code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import random
import gym
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, BatchNormalization,Activation
from tensorflow.keras.optimizers import Adam
from scores.score_logger import ScoreLogger
ENV_NAME = "TimePilot-ram-v0"
GAMMA = 0.95
LEARNING_RATE = 0.001
MEMORY_SIZE = 1000000
BATCH_SIZE = 20
EXPLORATION_MAX = 1.0
EXPLORATION_MIN = 0.01
EXPLORATION_DECAY = 0.995
NUM_EPISODES=2
WATCH_TRAINING=False
class DQNSolver:
def __init__(self, observation_input,action_space):
self.exploration_rate = EXPLORATION_MAX
self.observation_input=observation_input
self.action_space = action_space
self.memory = []
self.model = Sequential()
self.model.add(Dense(24, input_shape=observation_input, activation="relu"))
self.model.add(Flatten())
self.model.add(Dense(24, use_bias=False))
self.model.add(BatchNormalization())
self.model.add(Activation("relu"))
self.model.add(Dense(action_space, use_bias=False))
self.model.add(BatchNormalization())
self.model.add(Activation("linear"))
self.model.compile(loss="mse", optimizer=Adam(lr=LEARNING_RATE))
def predict(self,state):
return self.model.predict(state)
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() < self.exploration_rate:
return random.randrange(self.action_space)
q_values = self.predict(state)[0]
return np.argmax(q_values[0])
def experience_replay(self):
if len(self.memory) < BATCH_SIZE:
return
batch = random.sample(self.memory, BATCH_SIZE)
for state, action, reward, state_next, terminal in batch:
q_update = reward
if not terminal:
#new q value for this state/action pair is equal to the reward gained by taking this action at this state, plus the expected reward to be gained for the rest of the game.
#GAMMA is a parameter relating to the short/long term planning tendencies of the model. High GAMMA means were planning ahead, low means were looking most at short term rewards.
q_update = (reward + GAMMA * np.amax(self.predict(state_next)[0]))
q_values = self.predict(state)
q_values[0][action] = q_update
self.model.fit(state, q_values, verbose=0)
self.exploration_rate *= EXPLORATION_DECAY
self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)
def reshape_dims(obs_space):
dims=[1]
for i in range(len(obs_space.shape)):
dims.append(obs_space.shape[i])
return dims
def find_input_shape(env):
input_shape=None
#Box
if(type(env.observation_space)==gym.spaces.box.Box):
input_shape=env.observation_space.shape
#Discrete
elif(type(env.observation_space)==gym.spaces.discrete.Discrete):
input_shape=[env.observation_space.n]
return input_shape
class ActionSpaceError(Exception):
pass
def training():
env = gym.make(ENV_NAME)
# If the user chooses an environment with a non-discrete action space, return an error because DQN only works with discrete action spaces
if(type(env.action_space)!=gym.spaces.discrete.Discrete):
raise ActionSpaceError('This environment uses an action space that is not discrete. DQN can only be trained using discrete action spaces. Please select an envionment with a discrete action space.')
act_space=env.action_space.n
score_logger = ScoreLogger(ENV_NAME)
observation_input=find_input_shape(env)
dims=reshape_dims(env.observation_space)
dqn_solver = DQNSolver(observation_input,act_space)
for i in range(NUM_EPISODES):
state = env.reset()
#reshape state array if it has more than one dimension
if(len(dims)>1):
state = state.reshape(dims)
step = 0
while True:
step += 1
if(WATCH_TRAINING):
env.render()
action = dqn_solver.act(state)
state_next, reward, terminal, info = env.step(action)
reward = reward if not terminal else -reward
#reshape state array if it has more than one dimension
if(len(dims)>1):
state_next = state_next.reshape(dims)
dqn_solver.remember(state, action, reward, state_next, terminal)
state = state_next
if terminal:
print("Run: " + str(i+1) + ", exploration: " + str(dqn_solver.exploration_rate) + ", score: " + str(step))
score_logger.add_score(step, i+1)
break
dqn_solver.experience_replay()
return dqn_solver
def testing(dqn_solver):
env=gym.make(ENV_NAME)
dims=reshape_dims(env.observation_space)
step=0
#set exploration rate to be 0 so that we just follow the q function's policy
dqn_solver.exploration_rate=0
state=env.reset()
#reshape state array if it has more than one dimension
if(len(dims)>1):
state = state.reshape(dims)
while True:
step+=1
env.render()
action=dqn_solver.act(state)
next_state,reward,terminal,info=env.step(action)
if(terminal):
break
#reshape state array if it has more than one dimension
if(len(dims)>1):
state = next_state.reshape(dims)
if __name__ == "__main__":
solution=training()
testing(solution)
| [
"gym.make",
"tensorflow.keras.layers.BatchNormalization",
"numpy.argmax",
"random.sample",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Activation",
"random.randrange",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.optimizers.Adam",
"numpy.random.rand",
"scores.score_logge... | [((3202, 3220), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (3210, 3220), False, 'import gym\n'), ((3693, 3714), 'scores.score_logger.ScoreLogger', 'ScoreLogger', (['ENV_NAME'], {}), '(ENV_NAME)\n', (3704, 3714), False, 'from scores.score_logger import ScoreLogger\n'), ((4919, 4937), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (4927, 4937), False, 'import gym\n'), ((734, 746), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (744, 746), False, 'from tensorflow.keras.models import Sequential\n'), ((1633, 1655), 'numpy.argmax', 'np.argmax', (['q_values[0]'], {}), '(q_values[0])\n', (1642, 1655), True, 'import numpy as np\n'), ((1767, 1805), 'random.sample', 'random.sample', (['self.memory', 'BATCH_SIZE'], {}), '(self.memory, BATCH_SIZE)\n', (1780, 1805), False, 'import random\n'), ((770, 829), 'tensorflow.keras.layers.Dense', 'Dense', (['(24)'], {'input_shape': 'observation_input', 'activation': '"""relu"""'}), "(24, input_shape=observation_input, activation='relu')\n", (775, 829), False, 'from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Activation\n'), ((854, 863), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (861, 863), False, 'from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Activation\n'), ((888, 913), 'tensorflow.keras.layers.Dense', 'Dense', (['(24)'], {'use_bias': '(False)'}), '(24, use_bias=False)\n', (893, 913), False, 'from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Activation\n'), ((938, 958), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (956, 958), False, 'from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Activation\n'), ((983, 1001), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (993, 1001), False, 'from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Activation\n'), ((1026, 1061), 'tensorflow.keras.layers.Dense', 'Dense', (['action_space'], {'use_bias': '(False)'}), '(action_space, use_bias=False)\n', (1031, 1061), False, 'from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Activation\n'), ((1086, 1106), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1104, 1106), False, 'from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Activation\n'), ((1131, 1151), 'tensorflow.keras.layers.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (1141, 1151), False, 'from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Activation\n'), ((1479, 1495), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1493, 1495), True, 'import numpy as np\n'), ((1540, 1575), 'random.randrange', 'random.randrange', (['self.action_space'], {}), '(self.action_space)\n', (1556, 1575), False, 'import random\n'), ((1202, 1224), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'LEARNING_RATE'}), '(lr=LEARNING_RATE)\n', (1206, 1224), False, 'from tensorflow.keras.optimizers import Adam\n')] |
import os
import numpy as np, pandas as pd, matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.colors as colors
from matplotlib import cm
from datetime import datetime
kerneldir ='../data/kernels/'
testkernel = os.path.join(kerneldir,
'rsub-d2f9343c-tess2018360042939-s0006-1-1-0126_cal_img_bkgdsub-xtrns.fits-kernel')
def read_kernel_img(kernelpath):
with open(kernelpath, mode='r') as f:
lines = f.readlines()
for ix, l in enumerate(lines):
if l.startswith('# Image:'):
startnum = ix
imglines = [l for l in lines[startnum+1:]]
clines = [','.join(l.split()) for l in imglines]
clines = [c[2:]+'\n' for c in clines]
temppath = os.path.abspath(kernelpath)+'temp'
with open(temppath, mode='w') as f:
f.writelines(clines)
header = ['u','v','p0-q0','p0-q1','p0-q2','p1-q1','p1-q2','p2-q2']
df = pd.read_csv(temppath, names=header)
os.remove(temppath)
return df
def _make_kernel_plot(_kernel, kernelsize, order):
plt.close('all')
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
fig, ax = plt.subplots(ncols=1, nrows=1)
##########################################
vmin, vmax = -0.025, 0.025
linnorm = colors.Normalize(vmin=vmin, vmax=vmax)
#extent=[horizontal_min,horizontal_max,vertical_min,vertical_max]
extent = [-3.5,3.5,-3.5,3.5]
cset1 = ax.imshow(_kernel.T, cmap='RdBu', vmin=vmin, vmax=vmax,
norm=linnorm, interpolation='none', origin='lower',
extent=extent)
for i in range(kernelsize):
for j in range(kernelsize):
u = i - int(np.floor(kernelsize/2))
v = j - int(np.floor(kernelsize/2))
ax.text(u, v-0.2, '{:.4f}'.format(_kernel[i,j]), ha='center',
va='top', fontsize='xx-small')
ax.set_xlabel('u')
ax.set_ylabel('v')
divider0 = make_axes_locatable(ax)
cax0 = divider0.append_axes('right', size='5%', pad=0.05)
cb1 = fig.colorbar(cset1, ax=ax, cax=cax0, extend='both')
fig.tight_layout(h_pad=0, w_pad=-14, pad=0)
outpath = 'test_kernel_plots/kernel_{}.png'.format(order)
fig.savefig(outpath, bbox_inches='tight', dpi=400)
print('{}: made {}'.format(datetime.utcnow().isoformat(), outpath))
def plot_kernel(kernelpath, kernelsize, order='p0-q0'):
#
# read the image into a numpy array
#
df = read_kernel_img(kernelpath)
sumkernel = np.zeros((kernelsize,kernelsize))
orders = [ 'p0-q0', 'p0-q1', 'p0-q2', 'p1-q1', 'p1-q2', 'p2-q2' ]
for order in orders:
kernel = np.zeros((kernelsize,kernelsize))
# e.g., -3 to +3 for a kernelsize of 7
for i in range(kernelsize):
for j in range(kernelsize):
u = i - int(np.floor(kernelsize/2))
v = j - int(np.floor(kernelsize/2))
sel = (df['u'] == u) & (df['v'] == v)
kernel[i,j] = float(df[sel][order])
sumkernel[i,j] += float(df[sel][order])
#
# make the plot
#
_make_kernel_plot(kernel, kernelsize, order)
#
# do the same, for summed kernel
#
_make_kernel_plot(sumkernel, kernelsize, 'summed')
if __name__ == "__main__":
kernelpath = testkernel
kernelsize = 7
plot_kernel(kernelpath, kernelsize)
| [
"mpl_toolkits.axes_grid1.make_axes_locatable",
"os.remove",
"os.path.abspath",
"matplotlib.colors.Normalize",
"pandas.read_csv",
"matplotlib.pyplot.close",
"numpy.floor",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"datetime.datetime.utcnow",
"os.path.join"
] | [((262, 378), 'os.path.join', 'os.path.join', (['kerneldir', '"""rsub-d2f9343c-tess2018360042939-s0006-1-1-0126_cal_img_bkgdsub-xtrns.fits-kernel"""'], {}), "(kerneldir,\n 'rsub-d2f9343c-tess2018360042939-s0006-1-1-0126_cal_img_bkgdsub-xtrns.fits-kernel'\n )\n", (274, 378), False, 'import os\n'), ((948, 983), 'pandas.read_csv', 'pd.read_csv', (['temppath'], {'names': 'header'}), '(temppath, names=header)\n', (959, 983), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((989, 1008), 'os.remove', 'os.remove', (['temppath'], {}), '(temppath)\n', (998, 1008), False, 'import os\n'), ((1082, 1098), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1091, 1098), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((1200, 1230), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'nrows': '(1)'}), '(ncols=1, nrows=1)\n', (1212, 1230), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((1325, 1363), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (1341, 1363), True, 'import matplotlib.colors as colors\n'), ((2003, 2026), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (2022, 2026), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((2557, 2591), 'numpy.zeros', 'np.zeros', (['(kernelsize, kernelsize)'], {}), '((kernelsize, kernelsize))\n', (2565, 2591), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((762, 789), 'os.path.abspath', 'os.path.abspath', (['kernelpath'], {}), '(kernelpath)\n', (777, 789), False, 'import os\n'), ((2706, 2740), 'numpy.zeros', 'np.zeros', (['(kernelsize, kernelsize)'], {}), '((kernelsize, kernelsize))\n', (2714, 2740), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((1742, 1766), 'numpy.floor', 'np.floor', (['(kernelsize / 2)'], {}), '(kernelsize / 2)\n', (1750, 1766), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((1790, 1814), 'numpy.floor', 'np.floor', (['(kernelsize / 2)'], {}), '(kernelsize / 2)\n', (1798, 1814), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((2349, 2366), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2364, 2366), False, 'from datetime import datetime\n'), ((2893, 2917), 'numpy.floor', 'np.floor', (['(kernelsize / 2)'], {}), '(kernelsize / 2)\n', (2901, 2917), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n'), ((2945, 2969), 'numpy.floor', 'np.floor', (['(kernelsize / 2)'], {}), '(kernelsize / 2)\n', (2953, 2969), True, 'import numpy as np, pandas as pd, matplotlib.pyplot as plt\n')] |
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019-2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Module to test TF utils """
import unittest
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from keras.applications.vgg16 import VGG16
from keras.applications.resnet50 import ResNet50
from aimet_common.utils import AimetLogger
from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, \
iter_first_x, get_ordered_conv_linears, get_training_tensors
from aimet_tensorflow.utils.graph_saver import wrapper_func
from aimet_tensorflow.examples.test_models import single_residual, multiple_input_model, \
model_with_multiple_training_tensors
from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape
from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils
from aimet_tensorflow.utils.graph_saver import save_and_load_graph
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Test)
class TestTrainingExtensionsTfUtils(unittest.TestCase):
""" Unittest class for testing Tf Utils """
def test_wrapper_func_second_arg_without_args(self):
"""
test wrapper_func without any arguments, expect ValueError
"""
def dummy_eval_func():
return 1
dummy_eval_func = wrapper_func(dummy_eval_func)
# calling dummy_eval_func without any arguments
with self.assertRaises(ValueError):
dummy_eval_func()
def test_wrapper_func_second_arg_with_sess(self):
"""
test wrapper_func with second argument tf.Session, expect ValueError
"""
def dummy_eval_func(model, _):
return model
g = tf.Graph()
with g.as_default():
_ = VGG16(weights=None, input_shape=(224, 224, 3))
init = tf.global_variables_initializer()
sess = tf.Session(graph=g)
sess.run(init)
dummy_eval_func = wrapper_func(dummy_eval_func)
# calling dummy_eval_func with first random argument, and second argument tf.Session
self.assertRaises(ValueError, lambda: dummy_eval_func('test', sess))
sess.close()
def test_wrapper_func_first_arg_with_sess(self):
"""
test wrapper_func with first argument tf.Session
test to see if the provides session and updated session should be different or not
"""
def dummy_eval_func(model, _):
return model
g = tf.Graph()
with g.as_default():
_ = VGG16(weights=None, input_shape=(224, 224, 3))
init = tf.global_variables_initializer()
sess = tf.Session(graph=g)
sess.run(init)
dummy_eval_func = wrapper_func(dummy_eval_func)
# calling dummy_eval_func with tf.Session first argument
updated_sess = dummy_eval_func(sess, 'test')
self.assertNotEqual(sess, updated_sess)
sess.close()
def test_get_ordered_ops_with_single_residual(self):
"""
test get_op with simple single residual model
"""
g = tf.Graph()
with g.as_default():
single_residual()
ordered_ops = get_ordered_ops(g, ['input_1'])
self.assertTrue(ordered_ops.index(g.get_operation_by_name('conv2d_4/Conv2D')) >
ordered_ops.index(g.get_operation_by_name('conv2d_1/Conv2D')))
def test_get_ordered_ops_with_resnet50(self):
"""
test get_ordered_operations with Resnet50 model
"""
g = tf.Graph()
with g.as_default():
_ = ResNet50(weights=None)
inp_tensor = tf.get_variable('inp_tensor', shape=[1, 20, 5, 5],
initializer=tf.random_normal_initializer())
filter_tensor = tf.get_variable('filter_tensor', shape=[5, 5, 20, 50],
initializer=tf.random_normal_initializer())
# add random conv, which is not part of forward pass
# pylint: disable=no-member
_ = tf.nn.conv2d(input=inp_tensor, filter=filter_tensor, strides=[1, 1, 1, 1], padding='VALID',
data_format="NCHW", name='dangling/Conv2D')
ordered_ops = get_ordered_ops(g, ['input_1'])
for op in ordered_ops:
if op.type == 'Conv2D':
print(op.name)
self.assertTrue(ordered_ops.index(g.get_operation_by_name('res2a_branch2b/convolution')) >
ordered_ops.index(g.get_operation_by_name('res2a_branch1/convolution')))
self.assertTrue(ordered_ops.index(g.get_operation_by_name('activation_4/Relu')) >
ordered_ops.index(g.get_operation_by_name('add_1/add')))
self.assertTrue(ordered_ops.index(g.get_operation_by_name('res2a_branch2a/BiasAdd')) >
ordered_ops.index(g.get_operation_by_name('res2a_branch2a/convolution')))
self.assertTrue(g.get_operation_by_name('dangling/Conv2D') not in ordered_ops)
def test_get_ordered_ops_with_multiple_inputs(self):
"""
test get_ordered_operations with multiple inputs
"""
g = tf.Graph()
with g.as_default():
multiple_input_model()
ordered_ops = get_ordered_ops(g, ['input2', 'input1'])
self.assertTrue(ordered_ops.index(g.get_operation_by_name('conv1b/Conv2D')) >
ordered_ops.index(g.get_operation_by_name('input2')))
self.assertTrue(ordered_ops.index(g.get_operation_by_name('conv1a/Conv2D')) >
ordered_ops.index(g.get_operation_by_name('input1')))
self.assertTrue(ordered_ops.index(g.get_operation_by_name('add/add')) >
ordered_ops.index(g.get_operation_by_name('input1')))
self.assertTrue(ordered_ops.index(g.get_operation_by_name('add/add')) >
ordered_ops.index(g.get_operation_by_name('input2')))
def test_create_input_feed_dict(self):
"""
test create_input_feed_dict
"""
# 1) input_batch_data numpy array
g = tf.Graph()
with g.as_default():
_ = single_residual()
input_data = np.random.rand(1, 16, 16, 3)
feed_dict = create_input_feed_dict(graph=g, input_op_names_list=['input_1'], input_data=input_data)
self.assertEqual(feed_dict[g.get_tensor_by_name('input_1:0')].shape, input_data.shape)
tf.reset_default_graph()
# 2) input_batch_data List of numpy array
g = tf.Graph()
with g.as_default():
multiple_input_model()
input_data = list()
input_data.append(np.random.rand(10, 10, 3))
input_data.append(np.random.rand(12, 12, 3))
feed_dict = create_input_feed_dict(graph=g, input_op_names_list=['input1', 'input2'],
input_data=input_data)
self.assertEqual(feed_dict[g.get_tensor_by_name('input1:0')].shape, input_data[0].shape)
self.assertEqual(feed_dict[g.get_tensor_by_name('input2:0')].shape, input_data[1].shape)
tf.reset_default_graph()
# 3) input_batch_data Tuple of numpy array
g = tf.Graph()
with g.as_default():
multiple_input_model()
input_data = (np.random.rand(10, 10, 3), np.random.rand(12, 12, 3))
feed_dict = create_input_feed_dict(graph=g, input_op_names_list=['input1', 'input2'],
input_data=input_data)
self.assertEqual(feed_dict[g.get_tensor_by_name('input1:0')].shape, input_data[0].shape)
self.assertEqual(feed_dict[g.get_tensor_by_name('input2:0')].shape, input_data[1].shape)
tf.reset_default_graph()
# 3) input_batch_data and input_op_names mismatch
g = tf.Graph()
with g.as_default():
multiple_input_model()
input_data = (np.random.rand(10, 10, 3))
self.assertRaises(ValueError, lambda: create_input_feed_dict(graph=g,
input_op_names_list=['input1', 'input2'],
input_data=input_data))
tf.reset_default_graph()
g = tf.Graph()
with g.as_default():
model_with_multiple_training_tensors()
input_data = (np.random.rand(32, 32, 3))
feed_dict = create_input_feed_dict(graph=g, input_op_names_list=['input_1'],
input_data=input_data, training=True)
keras_learning_phase_tensor = g.get_tensor_by_name('keras_learning_phase:0')
is_training_tensor = g.get_tensor_by_name('is_training:0')
is_training_2_tensor = g.get_tensor_by_name('is_training_2:0')
self.assertEqual(feed_dict[keras_learning_phase_tensor], True)
self.assertEqual(feed_dict[is_training_tensor], True)
self.assertEqual(feed_dict[is_training_2_tensor], True)
tf.reset_default_graph()
def test_iter_first_x(self):
""" Test iter_first_x generator for creating a dataset generator """
tf.reset_default_graph()
sess = tf.Session()
with sess.graph.as_default():
dataset = tf.data.Dataset.from_tensor_slices([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
dataset_iterator = iter_first_x(dataset, num_batches=5)
for i, data in enumerate(dataset_iterator):
self.assertEqual(i, data) # Data has not been batched, so each element should be returned individually
self.assertTrue(i < 5) # Check that iterator stops at the correct point
with sess.graph.as_default():
dataset = tf.data.Dataset.from_tensor_slices([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
dataset = dataset.batch(2)
dataset_iterator = iter_first_x(dataset, num_batches=5)
for i, data in enumerate(dataset_iterator):
self.assertEqual(len(data), 2)
self.assertEqual(data[0], 2*i)
self.assertEqual(data[1], 2*i+1)
# Test that trying to extract more data than possible from the dataset is handled
# since tensorflow OutOfRangeError is converted to StopIteration
with sess.graph.as_default():
dataset_iterator = iter_first_x(dataset, num_batches=6)
for i, data in enumerate(dataset_iterator):
self.assertEqual(len(data), 2)
self.assertEqual(data[0], 2*i)
self.assertEqual(data[1], 2*i+1)
sess.close()
def test_update_to_weight_tensor_with_load_var(self):
"""
tests update to weight tensor of conv op using tf variable load api
:return:
"""
# create conv op
tf.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3,))
_ = tf.keras.layers.Conv2D(32, (3, 3), kernel_initializer=tf.random_uniform_initializer(-1, 2))(inputs)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
original_weights = WeightTensorUtils.get_tensor_as_numpy_data(sess, conv_op)
# add dummy weight tensor data
np.random.seed(0)
w_shape = WeightTensorUtils.get_tensor_shape(conv_op)
numpy_data = np.random.rand(3, w_shape[1], w_shape[2], w_shape[3])
# send in numpy data to overwrite previous value
WeightTensorUtils.update_tensor_for_op(sess, conv_op, numpy_data)
updated_weight_tensor = WeightTensorUtils.get_tensor_as_numpy_data(sess, conv_op)
# validate they are not the same
self.assertFalse(np.allclose(original_weights, updated_weight_tensor))
self.assertTrue(np.allclose(numpy_data, updated_weight_tensor))
sess.close()
def test_update_to_bias_with_load_var(self):
"""
tests update to bias param of conv op using tf variable load api
:return:
"""
# create conv op
tf.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3,))
conv_op = tf.keras.layers.Conv2D(32, (3, 3),
kernel_initializer=tf.random_uniform_initializer(-1, 2))(inputs)
bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv_op)
_ = tf.nn.relu(bn_op)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
original_bias = BiasUtils.get_bias_as_numpy_data(sess, conv_op)
# add dummy weight tensor data
np.random.seed(0)
b_shape = BiasUtils.get_shape(conv_op)
numpy_data = np.random.rand(b_shape[0])
# send in numpy data to overwrite previous value
BiasUtils.update_bias_for_op(sess, conv_op, numpy_data)
updated_bias = BiasUtils.get_bias_as_numpy_data(sess, conv_op)
# validate they are not the same
self.assertFalse(np.allclose(original_bias, updated_bias))
self.assertTrue(np.allclose(numpy_data, updated_bias))
sess.close()
def test_bias_add_with_conv(self):
"""
Test bias add on conv op
:return:
"""
tf.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3,), name="inputs")
# create a conv without bias param
conv_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(inputs)
bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv_op)
# pylint: disable=no-member
_ = tf.nn.relu(bn_op)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
self.assertTrue(BiasUtils.is_bias_none(conv_op))
# new_sess = BiasUtils.initialize_model_with_bias(sess)
shape = BiasUtils.get_shape(conv_op)
numpy_data = np.random.rand(shape[0])
BiasUtils.update_bias_for_op(sess, conv_op, bias_as_numpy_array=numpy_data)
new_sess = save_and_load_graph('./temp_bn_fold', sess)
conv_op = new_sess.graph.get_operation_by_name('conv2d/Conv2D')
bias_as_numpy_data = BiasUtils.get_bias_as_numpy_data(new_sess, conv_op)
assert(not BiasUtils.is_bias_none(conv_op))
new_sess.close()
def test_bias_update_to_dense(self):
"""
test bias correction on matmul layer
:return:
"""
tf.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3,))
x = tf.keras.layers.Flatten()(inputs)
dense = tf.keras.layers.Dense(2, use_bias=False, activation=tf.nn.softmax, name="single_residual")(x)
# pylint: disable=no-member
_ = tf.nn.relu(dense)
init = tf.global_variables_initializer()
sess = tf.Session(graph=tf.get_default_graph())
sess.run(init)
dense_op = sess.graph.get_operation_by_name('single_residual/MatMul')
self.assertTrue(BiasUtils.is_bias_none(dense_op))
new_sess = BiasUtils.initialize_model_with_bias(sess)
dense_op = new_sess.graph.get_operation_by_name('single_residual/MatMul')
self.assertTrue(not BiasUtils.is_bias_none(dense_op))
new_sess.close()
def test_get_ordered_conv_linears(self):
"""
Test get_ordered_conv_linears
"""
tf.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3,))
conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
# pylint: disable=no-member
relu_1 = tf.nn.relu(conv_op)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(relu_1)
_ = tf.nn.relu(conv2_op)
init = tf.global_variables_initializer()
sess = tf.Session(graph=tf.get_default_graph())
sess.run(init)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
# check if we get ordered list
input_op = conv_op.inputs[0].op.name
selected_ops = get_ordered_conv_linears(sess, [input_op])
self.assertEqual(2, len(selected_ops))
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
conv_1_op = sess.graph.get_operation_by_name('conv2d_1/Conv2D')
self.assertEqual(selected_ops[0], conv_op)
self.assertEqual(selected_ops[1], conv_1_op)
def test_get_training_tensors(self):
""" Test for obtaining all training tensors in a graph """
tf.reset_default_graph()
_ = model_with_multiple_training_tensors()
training_tensors = get_training_tensors(tf.get_default_graph())
self.assertEqual(3, len(training_tensors))
def test_param_read_bn_training_true(self):
"""
test we can fetch the params from a bn op that has training set to true.
:return:
"""
tf.reset_default_graph()
sess = tf.Session(graph=tf.get_default_graph())
with sess.as_default():
inputs = tf.keras.Input(shape=(32, 32, 3,))
conv_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(inputs)
bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv_op, training=True)
init = tf.global_variables_initializer()
sess.run(init)
moving_mean = BNUtils.get_moving_mean_as_numpy_data(sess, bn_op.op)
moving_var = BNUtils.get_moving_variance_as_numpy_data(sess, bn_op.op)
assert moving_mean is not None
assert moving_var is not None
sess.close()
def test_param_read_keras_bn_op_default(self):
"""
Test we can fetch the params from a bn op with no explicit setting of training flag
:return:
"""
tf.reset_default_graph()
sess = tf.Session(graph=tf.get_default_graph())
with sess.as_default():
inputs = tf.keras.Input(shape=(32, 32, 3,))
conv_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(inputs)
bn = tf.keras.layers.BatchNormalization(fused=True)(conv_op)
init = tf.global_variables_initializer()
sess.run(init)
# _ = tf.summary.FileWriter('./keras_model_bn_op', sess.graph)
# we use the bn op with is_training attribute set to false
bn_op_tensor = sess.graph.get_tensor_by_name('batch_normalization/cond/FusedBatchNormV3_1:0')
moving_mean = BNUtils.get_moving_mean_as_numpy_data(sess, bn_op_tensor.op)
moving_var = BNUtils.get_moving_variance_as_numpy_data(sess, bn_op_tensor.op)
beta = BNUtils.get_beta_as_numpy_data(sess, bn_op_tensor.op)
gamma = BNUtils.get_gamma_as_numpy_data(sess, bn_op_tensor.op)
assert beta is not None
assert gamma is not None
assert moving_mean is not None
assert moving_var is not None
sess.close()
def test_param_read_keras_bn_training_true(self):
"""
test we can fetch the params from a bn op that has training set to true.
:return:
"""
tf.reset_default_graph()
sess = tf.Session(graph=tf.get_default_graph())
with sess.as_default():
inputs = tf.keras.Input(shape=(32, 32, 3,))
conv_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(inputs)
bn_op_tensor = tf.keras.layers.BatchNormalization(fused=True, name="bn_op_1/")(conv_op, training=True)
init = tf.global_variables_initializer()
sess.run(init)
moving_mean = BNUtils.get_moving_mean_as_numpy_data(sess, bn_op_tensor.op)
moving_var = BNUtils.get_moving_variance_as_numpy_data(sess, bn_op_tensor.op)
beta = BNUtils.get_beta_as_numpy_data(sess, bn_op_tensor.op)
gamma = BNUtils.get_gamma_as_numpy_data(sess, bn_op_tensor.op)
assert beta is not None
assert gamma is not None
assert moving_mean is not None
assert moving_var is not None
sess.close()
def test_param_read_keras_bn_training_false(self):
"""
test we can fetch the params from a bn op that has training set to false.
:return:
"""
tf.reset_default_graph()
sess = tf.Session(graph=tf.get_default_graph())
with sess.as_default():
inputs = tf.keras.Input(shape=(32, 32, 3,))
conv_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(inputs)
bn_op_tensor = tf.keras.layers.BatchNormalization(fused=True, name="bn_op_1")(conv_op, training=False)
init = tf.global_variables_initializer()
sess.run(init)
moving_mean = BNUtils.get_moving_mean_as_numpy_data(sess, bn_op_tensor.op)
moving_var = BNUtils.get_moving_variance_as_numpy_data(sess, bn_op_tensor.op)
beta = BNUtils.get_beta_as_numpy_data(sess, bn_op_tensor.op)
gamma = BNUtils.get_gamma_as_numpy_data(sess, bn_op_tensor.op)
assert beta is not None
assert gamma is not None
assert moving_mean is not None
assert moving_var is not None
sess.close()
def test_with_keras_resnet50_with_weights(self):
"""
Test to replicate SFTI issue reported with Keras Resnet50 BN layer param extraction
:return:
"""
from tensorflow.python.keras.applications.resnet import ResNet50
tf.keras.backend.clear_session()
_ = ResNet50(weights='imagenet', input_shape=(224, 224, 3))
sess = tf.keras.backend.get_session()
# error reported by SFTI
# tensorflow.python.framework.errors_impl.InvalidArgumentError
# (0) Invalid argument: You must feed a value for placeholder tensor
# 'Placeholder_5' with dtype float and shape [?]
with sess.as_default():
bn_op_name = "conv1_bn/cond/FusedBatchNormV3_1"
bn_op = sess.graph.get_operation_by_name(bn_op_name)
moving_mean = BNUtils.get_moving_mean_as_numpy_data(sess, bn_op)
moving_var = BNUtils.get_moving_variance_as_numpy_data(sess, bn_op)
beta = BNUtils.get_beta_as_numpy_data(sess, bn_op)
gamma = BNUtils.get_gamma_as_numpy_data(sess, bn_op)
assert beta is not None
assert gamma is not None
assert moving_mean is not None
assert moving_var is not None
sess.close()
def test_with_tf_bn_op(self):
"""
Test with TF BN op
:return:
"""
tf.reset_default_graph()
sess = tf.Session(graph=tf.get_default_graph())
inp = tf.placeholder(tf.float32, [1, 32, 32, 3])
net = tf.layers.conv2d(inp, 32, [3, 3])
_ = tf.compat.v1.layers.batch_normalization(net)
# _ = tf.summary.FileWriter('./keras_model_bn_op', sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
bn_op = sess.graph.get_operation_by_name('batch_normalization/FusedBatchNormV3')
moving_mean = BNUtils.get_moving_mean_as_numpy_data(sess, bn_op)
moving_var = BNUtils.get_moving_variance_as_numpy_data(sess, bn_op)
beta = BNUtils.get_beta_as_numpy_data(sess, bn_op)
gamma = BNUtils.get_gamma_as_numpy_data(sess, bn_op)
assert beta is not None
assert gamma is not None
assert moving_mean is not None
assert moving_var is not None
def test_with_slim_bn_op(self):
"""
Test with Tf Slim BN op
:return:
"""
tf.reset_default_graph()
sess = tf.Session(graph=tf.get_default_graph())
inp = tf.placeholder(tf.float32, [1, 32, 32, 3])
net = slim.conv2d(inp, 32, [3, 3])
_ = slim.batch_norm(net, decay=.7, epsilon=.65, is_training=True)
init = tf.global_variables_initializer()
sess.run(init)
# _ = tf.summary.FileWriter('./keras_model_bn_op', sess.graph)
bn_op = sess.graph.get_operation_by_name('BatchNorm/FusedBatchNormV3')
moving_mean = BNUtils.get_moving_mean_as_numpy_data(sess, bn_op)
moving_var = BNUtils.get_moving_variance_as_numpy_data(sess, bn_op)
beta = BNUtils.get_beta_as_numpy_data(sess, bn_op)
gamma = BNUtils.get_gamma_as_numpy_data(sess, bn_op)
assert beta is not None
assert gamma is not None
assert moving_mean is not None
assert moving_var is not None
def test_get_output_activation_shape(self):
"""Test for getting output activation shapes"""
# 1) dynamic shape
graph = tf.Graph()
filter_data = np.ones([5, 5, 3, 32], dtype=np.float32)
with graph.as_default():
input_tensor = tf.placeholder(tf.float32, [1, None, None, None], 'input')
filter_tensor = tf.Variable(initial_value=filter_data, name='filter_tensor', dtype=tf.float32)
_ = tf.nn.conv2d(input=input_tensor, filter=filter_tensor, padding='SAME', strides=[1, 1, 1, 1],
data_format="NCHW", name='Conv2D_1')
init = tf.global_variables_initializer()
sess = tf.Session(graph=graph)
sess.run(init)
conv_op = sess.graph.get_operation_by_name('Conv2D_1')
output_shape = get_output_activation_shape(sess=sess, op=conv_op, input_op_names=['input'],
input_shape=(1, 3, 10, 10))
batch_size, channels, activations_h, activations_w = output_shape
self.assertEqual(activations_h, 10)
self.assertEqual(activations_w, 10)
self.assertEqual(channels, 32)
sess.close()
# 2) static shape
graph = tf.Graph()
input_data = np.ones([1, 3, 10, 10], dtype=np.float32)
filter_data = np.ones([5, 5, 3, 32], dtype=np.float32)
with graph.as_default():
input_tensor = tf.Variable(initial_value=input_data, name='input', dtype=tf.float32)
filter_tensor = tf.Variable(initial_value=filter_data, name='filter_tensor', dtype=tf.float32)
_ = tf.nn.conv2d(input=input_tensor, filter=filter_tensor, padding='SAME', strides=[1, 1, 1, 1],
data_format="NCHW", name='Conv2D_1')
init = tf.global_variables_initializer()
sess = tf.Session(graph=graph)
sess.run(init)
conv_op = sess.graph.get_operation_by_name('Conv2D_1')
output_shape = get_output_activation_shape(sess=sess, op=conv_op, input_op_names=['input'],
input_shape=(1, 3, 10, 10))
batch_size, channels, activations_h, activations_w = output_shape
self.assertEqual(activations_h, 10)
self.assertEqual(activations_w, 10)
self.assertEqual(channels, 32)
sess.close()
def test_get_output_activation_shape_channels_last(self):
"""Test for getting output activation shapes for channels_last format"""
# 1) dynamic shape
graph = tf.Graph()
filter_data = np.ones([5, 5, 3, 32], dtype=np.float32)
with graph.as_default():
input_tensor = tf.placeholder(tf.float32, [1, None, None, None], 'input')
filter_tensor = tf.Variable(initial_value=filter_data, name='filter_tensor', dtype=tf.float32)
_ = tf.nn.conv2d(input=input_tensor, filter=filter_tensor, padding='SAME', strides=[1, 1, 1, 1],
data_format="NHWC", name='Conv2D_1')
init = tf.global_variables_initializer()
sess = tf.Session(graph=graph)
sess.run(init)
conv_op = sess.graph.get_operation_by_name('Conv2D_1')
output_shape = get_output_activation_shape(sess=sess, op=conv_op, input_op_names=['input'],
input_shape=(1, 10, 10, 3))
batch_size, channels, activations_h, activations_w = output_shape
self.assertEqual(activations_h, 10)
self.assertEqual(activations_w, 10)
self.assertEqual(channels, 32)
sess.close()
# 2) static shape
graph = tf.Graph()
# channels_last format
input_data = np.ones([1, 10, 10, 3], dtype=np.float32)
filter_data = np.ones([5, 5, 3, 32], dtype=np.float32)
with graph.as_default():
input_tensor = tf.Variable(initial_value=input_data, name='input', dtype=tf.float32)
filter_tensor = tf.Variable(initial_value=filter_data, name='filter_tensor', dtype=tf.float32)
_ = tf.nn.conv2d(input=input_tensor, filter=filter_tensor, padding='SAME', strides=[1, 1, 1, 1],
data_format="NHWC", name='Conv2D_1')
init = tf.global_variables_initializer()
sess = tf.Session(graph=graph)
sess.run(init)
conv_op = sess.graph.get_operation_by_name('Conv2D_1')
output_shape = get_output_activation_shape(sess=sess, op=conv_op, input_op_names=['input'],
input_shape=(1, 10, 10, 3))
batch_size, channels, activations_h, activations_w = output_shape
self.assertEqual(activations_h, 10)
self.assertEqual(activations_w, 10)
self.assertEqual(channels, 32)
sess.close()
| [
"numpy.random.seed",
"tensorflow.keras.layers.Dense",
"tensorflow.reset_default_graph",
"numpy.allclose",
"numpy.ones",
"aimet_tensorflow.examples.test_models.model_with_multiple_training_tensors",
"aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_beta_as_numpy_data",
"aimet_common.utils.AimetLogg... | [((2746, 2800), 'aimet_common.utils.AimetLogger.get_area_logger', 'AimetLogger.get_area_logger', (['AimetLogger.LogAreas.Test'], {}), '(AimetLogger.LogAreas.Test)\n', (2773, 2800), False, 'from aimet_common.utils import AimetLogger\n'), ((3135, 3164), 'aimet_tensorflow.utils.graph_saver.wrapper_func', 'wrapper_func', (['dummy_eval_func'], {}), '(dummy_eval_func)\n', (3147, 3164), False, 'from aimet_tensorflow.utils.graph_saver import wrapper_func\n'), ((3529, 3539), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3537, 3539), True, 'import tensorflow as tf\n'), ((3701, 3720), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g'}), '(graph=g)\n', (3711, 3720), True, 'import tensorflow as tf\n'), ((3771, 3800), 'aimet_tensorflow.utils.graph_saver.wrapper_func', 'wrapper_func', (['dummy_eval_func'], {}), '(dummy_eval_func)\n', (3783, 3800), False, 'from aimet_tensorflow.utils.graph_saver import wrapper_func\n'), ((4297, 4307), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4305, 4307), True, 'import tensorflow as tf\n'), ((4469, 4488), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g'}), '(graph=g)\n', (4479, 4488), True, 'import tensorflow as tf\n'), ((4539, 4568), 'aimet_tensorflow.utils.graph_saver.wrapper_func', 'wrapper_func', (['dummy_eval_func'], {}), '(dummy_eval_func)\n', (4551, 4568), False, 'from aimet_tensorflow.utils.graph_saver import wrapper_func\n'), ((4906, 4916), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4914, 4916), True, 'import tensorflow as tf\n'), ((5000, 5031), 'aimet_tensorflow.utils.common.get_ordered_ops', 'get_ordered_ops', (['g', "['input_1']"], {}), "(g, ['input_1'])\n", (5015, 5031), False, 'from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, iter_first_x, get_ordered_conv_linears, get_training_tensors\n'), ((5351, 5361), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5359, 5361), True, 'import tensorflow as tf\n'), ((6075, 6106), 'aimet_tensorflow.utils.common.get_ordered_ops', 'get_ordered_ops', (['g', "['input_1']"], {}), "(g, ['input_1'])\n", (6090, 6106), False, 'from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, iter_first_x, get_ordered_conv_linears, get_training_tensors\n'), ((7009, 7019), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (7017, 7019), True, 'import tensorflow as tf\n'), ((7108, 7148), 'aimet_tensorflow.utils.common.get_ordered_ops', 'get_ordered_ops', (['g', "['input2', 'input1']"], {}), "(g, ['input2', 'input1'])\n", (7123, 7148), False, 'from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, iter_first_x, get_ordered_conv_linears, get_training_tensors\n'), ((7956, 7966), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (7964, 7966), True, 'import tensorflow as tf\n'), ((8052, 8080), 'numpy.random.rand', 'np.random.rand', (['(1)', '(16)', '(16)', '(3)'], {}), '(1, 16, 16, 3)\n', (8066, 8080), True, 'import numpy as np\n'), ((8101, 8193), 'aimet_tensorflow.utils.common.create_input_feed_dict', 'create_input_feed_dict', ([], {'graph': 'g', 'input_op_names_list': "['input_1']", 'input_data': 'input_data'}), "(graph=g, input_op_names_list=['input_1'], input_data\n =input_data)\n", (8123, 8193), False, 'from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, iter_first_x, get_ordered_conv_linears, get_training_tensors\n'), ((8293, 8317), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (8315, 8317), True, 'import tensorflow as tf\n'), ((8381, 8391), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8389, 8391), True, 'import tensorflow as tf\n'), ((8611, 8711), 'aimet_tensorflow.utils.common.create_input_feed_dict', 'create_input_feed_dict', ([], {'graph': 'g', 'input_op_names_list': "['input1', 'input2']", 'input_data': 'input_data'}), "(graph=g, input_op_names_list=['input1', 'input2'],\n input_data=input_data)\n", (8633, 8711), False, 'from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, iter_first_x, get_ordered_conv_linears, get_training_tensors\n'), ((8955, 8979), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (8977, 8979), True, 'import tensorflow as tf\n'), ((9044, 9054), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9052, 9054), True, 'import tensorflow as tf\n'), ((9217, 9317), 'aimet_tensorflow.utils.common.create_input_feed_dict', 'create_input_feed_dict', ([], {'graph': 'g', 'input_op_names_list': "['input1', 'input2']", 'input_data': 'input_data'}), "(graph=g, input_op_names_list=['input1', 'input2'],\n input_data=input_data)\n", (9239, 9317), False, 'from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, iter_first_x, get_ordered_conv_linears, get_training_tensors\n'), ((9560, 9584), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (9582, 9584), True, 'import tensorflow as tf\n'), ((9656, 9666), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9664, 9666), True, 'import tensorflow as tf\n'), ((9754, 9779), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(3)'], {}), '(10, 10, 3)\n', (9768, 9779), True, 'import numpy as np\n'), ((10072, 10096), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (10094, 10096), True, 'import tensorflow as tf\n'), ((10110, 10120), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10118, 10120), True, 'import tensorflow as tf\n'), ((10223, 10248), 'numpy.random.rand', 'np.random.rand', (['(32)', '(32)', '(3)'], {}), '(32, 32, 3)\n', (10237, 10248), True, 'import numpy as np\n'), ((10270, 10377), 'aimet_tensorflow.utils.common.create_input_feed_dict', 'create_input_feed_dict', ([], {'graph': 'g', 'input_op_names_list': "['input_1']", 'input_data': 'input_data', 'training': '(True)'}), "(graph=g, input_op_names_list=['input_1'], input_data\n =input_data, training=True)\n", (10292, 10377), False, 'from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, iter_first_x, get_ordered_conv_linears, get_training_tensors\n'), ((10844, 10868), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (10866, 10868), True, 'import tensorflow as tf\n'), ((10989, 11013), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (11011, 11013), True, 'import tensorflow as tf\n'), ((11029, 11041), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11039, 11041), True, 'import tensorflow as tf\n'), ((12609, 12633), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (12631, 12633), True, 'import tensorflow as tf\n'), ((12651, 12684), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (12665, 12684), True, 'import tensorflow as tf\n'), ((12814, 12847), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (12845, 12847), True, 'import tensorflow as tf\n'), ((12863, 12875), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (12873, 12875), True, 'import tensorflow as tf\n'), ((12996, 13053), 'aimet_tensorflow.utils.op.conv.WeightTensorUtils.get_tensor_as_numpy_data', 'WeightTensorUtils.get_tensor_as_numpy_data', (['sess', 'conv_op'], {}), '(sess, conv_op)\n', (13038, 13053), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((13102, 13119), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (13116, 13119), True, 'import numpy as np\n'), ((13138, 13181), 'aimet_tensorflow.utils.op.conv.WeightTensorUtils.get_tensor_shape', 'WeightTensorUtils.get_tensor_shape', (['conv_op'], {}), '(conv_op)\n', (13172, 13181), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((13203, 13256), 'numpy.random.rand', 'np.random.rand', (['(3)', 'w_shape[1]', 'w_shape[2]', 'w_shape[3]'], {}), '(3, w_shape[1], w_shape[2], w_shape[3])\n', (13217, 13256), True, 'import numpy as np\n'), ((13323, 13388), 'aimet_tensorflow.utils.op.conv.WeightTensorUtils.update_tensor_for_op', 'WeightTensorUtils.update_tensor_for_op', (['sess', 'conv_op', 'numpy_data'], {}), '(sess, conv_op, numpy_data)\n', (13361, 13388), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((13422, 13479), 'aimet_tensorflow.utils.op.conv.WeightTensorUtils.get_tensor_as_numpy_data', 'WeightTensorUtils.get_tensor_as_numpy_data', (['sess', 'conv_op'], {}), '(sess, conv_op)\n', (13464, 13479), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((13892, 13916), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (13914, 13916), True, 'import tensorflow as tf\n'), ((13934, 13967), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (13948, 13967), True, 'import tensorflow as tf\n'), ((14207, 14224), 'tensorflow.nn.relu', 'tf.nn.relu', (['bn_op'], {}), '(bn_op)\n', (14217, 14224), True, 'import tensorflow as tf\n'), ((14241, 14274), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (14272, 14274), True, 'import tensorflow as tf\n'), ((14290, 14302), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (14300, 14302), True, 'import tensorflow as tf\n'), ((14420, 14467), 'aimet_tensorflow.utils.op.conv.BiasUtils.get_bias_as_numpy_data', 'BiasUtils.get_bias_as_numpy_data', (['sess', 'conv_op'], {}), '(sess, conv_op)\n', (14452, 14467), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((14516, 14533), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (14530, 14533), True, 'import numpy as np\n'), ((14552, 14580), 'aimet_tensorflow.utils.op.conv.BiasUtils.get_shape', 'BiasUtils.get_shape', (['conv_op'], {}), '(conv_op)\n', (14571, 14580), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((14602, 14628), 'numpy.random.rand', 'np.random.rand', (['b_shape[0]'], {}), '(b_shape[0])\n', (14616, 14628), True, 'import numpy as np\n'), ((14695, 14750), 'aimet_tensorflow.utils.op.conv.BiasUtils.update_bias_for_op', 'BiasUtils.update_bias_for_op', (['sess', 'conv_op', 'numpy_data'], {}), '(sess, conv_op, numpy_data)\n', (14723, 14750), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((14775, 14822), 'aimet_tensorflow.utils.op.conv.BiasUtils.get_bias_as_numpy_data', 'BiasUtils.get_bias_as_numpy_data', (['sess', 'conv_op'], {}), '(sess, conv_op)\n', (14807, 14822), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((15139, 15163), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (15161, 15163), True, 'import tensorflow as tf\n'), ((15181, 15229), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(32, 32, 3)', 'name': '"""inputs"""'}), "(shape=(32, 32, 3), name='inputs')\n", (15195, 15229), True, 'import tensorflow as tf\n'), ((15471, 15488), 'tensorflow.nn.relu', 'tf.nn.relu', (['bn_op'], {}), '(bn_op)\n', (15481, 15488), True, 'import tensorflow as tf\n'), ((15505, 15538), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (15536, 15538), True, 'import tensorflow as tf\n'), ((15554, 15566), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (15564, 15566), True, 'import tensorflow as tf\n'), ((15797, 15825), 'aimet_tensorflow.utils.op.conv.BiasUtils.get_shape', 'BiasUtils.get_shape', (['conv_op'], {}), '(conv_op)\n', (15816, 15825), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((15847, 15871), 'numpy.random.rand', 'np.random.rand', (['shape[0]'], {}), '(shape[0])\n', (15861, 15871), True, 'import numpy as np\n'), ((15880, 15955), 'aimet_tensorflow.utils.op.conv.BiasUtils.update_bias_for_op', 'BiasUtils.update_bias_for_op', (['sess', 'conv_op'], {'bias_as_numpy_array': 'numpy_data'}), '(sess, conv_op, bias_as_numpy_array=numpy_data)\n', (15908, 15955), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((15975, 16018), 'aimet_tensorflow.utils.graph_saver.save_and_load_graph', 'save_and_load_graph', (['"""./temp_bn_fold"""', 'sess'], {}), "('./temp_bn_fold', sess)\n", (15994, 16018), False, 'from aimet_tensorflow.utils.graph_saver import save_and_load_graph\n'), ((16120, 16171), 'aimet_tensorflow.utils.op.conv.BiasUtils.get_bias_as_numpy_data', 'BiasUtils.get_bias_as_numpy_data', (['new_sess', 'conv_op'], {}), '(new_sess, conv_op)\n', (16152, 16171), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((16386, 16410), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (16408, 16410), True, 'import tensorflow as tf\n'), ((16429, 16462), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (16443, 16462), True, 'import tensorflow as tf\n'), ((16668, 16685), 'tensorflow.nn.relu', 'tf.nn.relu', (['dense'], {}), '(dense)\n', (16678, 16685), True, 'import tensorflow as tf\n'), ((16702, 16735), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (16733, 16735), True, 'import tensorflow as tf\n'), ((16972, 17014), 'aimet_tensorflow.utils.op.conv.BiasUtils.initialize_model_with_bias', 'BiasUtils.initialize_model_with_bias', (['sess'], {}), '(sess)\n', (17008, 17014), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((17301, 17325), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (17323, 17325), True, 'import tensorflow as tf\n'), ((17343, 17376), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (17357, 17376), True, 'import tensorflow as tf\n'), ((17493, 17512), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv_op'], {}), '(conv_op)\n', (17503, 17512), True, 'import tensorflow as tf\n'), ((17588, 17608), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv2_op'], {}), '(conv2_op)\n', (17598, 17608), True, 'import tensorflow as tf\n'), ((17625, 17658), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (17656, 17658), True, 'import tensorflow as tf\n'), ((17915, 17957), 'aimet_tensorflow.utils.common.get_ordered_conv_linears', 'get_ordered_conv_linears', (['sess', '[input_op]'], {}), '(sess, [input_op])\n', (17939, 17957), False, 'from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, iter_first_x, get_ordered_conv_linears, get_training_tensors\n'), ((18367, 18391), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (18389, 18391), True, 'import tensorflow as tf\n'), ((18404, 18442), 'aimet_tensorflow.examples.test_models.model_with_multiple_training_tensors', 'model_with_multiple_training_tensors', ([], {}), '()\n', (18440, 18442), False, 'from aimet_tensorflow.examples.test_models import single_residual, multiple_input_model, model_with_multiple_training_tensors\n'), ((18745, 18769), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (18767, 18769), True, 'import tensorflow as tf\n'), ((19636, 19660), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (19658, 19660), True, 'import tensorflow as tf\n'), ((20984, 21008), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (21006, 21008), True, 'import tensorflow as tf\n'), ((22124, 22148), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (22146, 22148), True, 'import tensorflow as tf\n'), ((23346, 23378), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (23376, 23378), True, 'import tensorflow as tf\n'), ((23391, 23446), 'tensorflow.python.keras.applications.resnet.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'input_shape': '(224, 224, 3)'}), "(weights='imagenet', input_shape=(224, 224, 3))\n", (23399, 23446), False, 'from tensorflow.python.keras.applications.resnet import ResNet50\n'), ((23462, 23492), 'tensorflow.keras.backend.get_session', 'tf.keras.backend.get_session', ([], {}), '()\n', (23490, 23492), True, 'import tensorflow as tf\n'), ((24466, 24490), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (24488, 24490), True, 'import tensorflow as tf\n'), ((24561, 24603), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1, 32, 32, 3]'], {}), '(tf.float32, [1, 32, 32, 3])\n', (24575, 24603), True, 'import tensorflow as tf\n'), ((24618, 24651), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['inp', '(32)', '[3, 3]'], {}), '(inp, 32, [3, 3])\n', (24634, 24651), True, 'import tensorflow as tf\n'), ((24664, 24708), 'tensorflow.compat.v1.layers.batch_normalization', 'tf.compat.v1.layers.batch_normalization', (['net'], {}), '(net)\n', (24703, 24708), True, 'import tensorflow as tf\n'), ((24796, 24829), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (24827, 24829), True, 'import tensorflow as tf\n'), ((24964, 25014), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_mean_as_numpy_data', 'BNUtils.get_moving_mean_as_numpy_data', (['sess', 'bn_op'], {}), '(sess, bn_op)\n', (25001, 25014), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((25036, 25090), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_variance_as_numpy_data', 'BNUtils.get_moving_variance_as_numpy_data', (['sess', 'bn_op'], {}), '(sess, bn_op)\n', (25077, 25090), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((25106, 25149), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_beta_as_numpy_data', 'BNUtils.get_beta_as_numpy_data', (['sess', 'bn_op'], {}), '(sess, bn_op)\n', (25136, 25149), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((25166, 25210), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_gamma_as_numpy_data', 'BNUtils.get_gamma_as_numpy_data', (['sess', 'bn_op'], {}), '(sess, bn_op)\n', (25197, 25210), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((25472, 25496), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (25494, 25496), True, 'import tensorflow as tf\n'), ((25567, 25609), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1, 32, 32, 3]'], {}), '(tf.float32, [1, 32, 32, 3])\n', (25581, 25609), True, 'import tensorflow as tf\n'), ((25624, 25652), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['inp', '(32)', '[3, 3]'], {}), '(inp, 32, [3, 3])\n', (25635, 25652), True, 'import tensorflow.contrib.slim as slim\n'), ((25665, 25728), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['net'], {'decay': '(0.7)', 'epsilon': '(0.65)', 'is_training': '(True)'}), '(net, decay=0.7, epsilon=0.65, is_training=True)\n', (25680, 25728), True, 'import tensorflow.contrib.slim as slim\n'), ((25743, 25776), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (25774, 25776), True, 'import tensorflow as tf\n'), ((25972, 26022), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_mean_as_numpy_data', 'BNUtils.get_moving_mean_as_numpy_data', (['sess', 'bn_op'], {}), '(sess, bn_op)\n', (26009, 26022), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((26044, 26098), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_variance_as_numpy_data', 'BNUtils.get_moving_variance_as_numpy_data', (['sess', 'bn_op'], {}), '(sess, bn_op)\n', (26085, 26098), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((26114, 26157), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_beta_as_numpy_data', 'BNUtils.get_beta_as_numpy_data', (['sess', 'bn_op'], {}), '(sess, bn_op)\n', (26144, 26157), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((26174, 26218), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_gamma_as_numpy_data', 'BNUtils.get_gamma_as_numpy_data', (['sess', 'bn_op'], {}), '(sess, bn_op)\n', (26205, 26218), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((26511, 26521), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (26519, 26521), True, 'import tensorflow as tf\n'), ((26544, 26584), 'numpy.ones', 'np.ones', (['[5, 5, 3, 32]'], {'dtype': 'np.float32'}), '([5, 5, 3, 32], dtype=np.float32)\n', (26551, 26584), True, 'import numpy as np\n'), ((27059, 27082), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (27069, 27082), True, 'import tensorflow as tf\n'), ((27193, 27301), 'aimet_tensorflow.utils.op.conv.get_output_activation_shape', 'get_output_activation_shape', ([], {'sess': 'sess', 'op': 'conv_op', 'input_op_names': "['input']", 'input_shape': '(1, 3, 10, 10)'}), "(sess=sess, op=conv_op, input_op_names=['input'],\n input_shape=(1, 3, 10, 10))\n", (27220, 27301), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((27618, 27628), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (27626, 27628), True, 'import tensorflow as tf\n'), ((27650, 27691), 'numpy.ones', 'np.ones', (['[1, 3, 10, 10]'], {'dtype': 'np.float32'}), '([1, 3, 10, 10], dtype=np.float32)\n', (27657, 27691), True, 'import numpy as np\n'), ((27714, 27754), 'numpy.ones', 'np.ones', (['[5, 5, 3, 32]'], {'dtype': 'np.float32'}), '([5, 5, 3, 32], dtype=np.float32)\n', (27721, 27754), True, 'import numpy as np\n'), ((28239, 28262), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (28249, 28262), True, 'import tensorflow as tf\n'), ((28373, 28481), 'aimet_tensorflow.utils.op.conv.get_output_activation_shape', 'get_output_activation_shape', ([], {'sess': 'sess', 'op': 'conv_op', 'input_op_names': "['input']", 'input_shape': '(1, 3, 10, 10)'}), "(sess=sess, op=conv_op, input_op_names=['input'],\n input_shape=(1, 3, 10, 10))\n", (28400, 28481), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((28942, 28952), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (28950, 28952), True, 'import tensorflow as tf\n'), ((28975, 29015), 'numpy.ones', 'np.ones', (['[5, 5, 3, 32]'], {'dtype': 'np.float32'}), '([5, 5, 3, 32], dtype=np.float32)\n', (28982, 29015), True, 'import numpy as np\n'), ((29490, 29513), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (29500, 29513), True, 'import tensorflow as tf\n'), ((29624, 29732), 'aimet_tensorflow.utils.op.conv.get_output_activation_shape', 'get_output_activation_shape', ([], {'sess': 'sess', 'op': 'conv_op', 'input_op_names': "['input']", 'input_shape': '(1, 10, 10, 3)'}), "(sess=sess, op=conv_op, input_op_names=['input'],\n input_shape=(1, 10, 10, 3))\n", (29651, 29732), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((30049, 30059), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (30057, 30059), True, 'import tensorflow as tf\n'), ((30112, 30153), 'numpy.ones', 'np.ones', (['[1, 10, 10, 3]'], {'dtype': 'np.float32'}), '([1, 10, 10, 3], dtype=np.float32)\n', (30119, 30153), True, 'import numpy as np\n'), ((30176, 30216), 'numpy.ones', 'np.ones', (['[5, 5, 3, 32]'], {'dtype': 'np.float32'}), '([5, 5, 3, 32], dtype=np.float32)\n', (30183, 30216), True, 'import numpy as np\n'), ((30701, 30724), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (30711, 30724), True, 'import tensorflow as tf\n'), ((30835, 30943), 'aimet_tensorflow.utils.op.conv.get_output_activation_shape', 'get_output_activation_shape', ([], {'sess': 'sess', 'op': 'conv_op', 'input_op_names': "['input']", 'input_shape': '(1, 10, 10, 3)'}), "(sess=sess, op=conv_op, input_op_names=['input'],\n input_shape=(1, 10, 10, 3))\n", (30862, 30943), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((3585, 3631), 'keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': 'None', 'input_shape': '(224, 224, 3)'}), '(weights=None, input_shape=(224, 224, 3))\n', (3590, 3631), False, 'from keras.applications.vgg16 import VGG16\n'), ((3651, 3684), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3682, 3684), True, 'import tensorflow as tf\n'), ((4353, 4399), 'keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': 'None', 'input_shape': '(224, 224, 3)'}), '(weights=None, input_shape=(224, 224, 3))\n', (4358, 4399), False, 'from keras.applications.vgg16 import VGG16\n'), ((4419, 4452), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4450, 4452), True, 'import tensorflow as tf\n'), ((4959, 4976), 'aimet_tensorflow.examples.test_models.single_residual', 'single_residual', ([], {}), '()\n', (4974, 4976), False, 'from aimet_tensorflow.examples.test_models import single_residual, multiple_input_model, model_with_multiple_training_tensors\n'), ((5408, 5430), 'tensorflow.python.keras.applications.resnet.ResNet50', 'ResNet50', ([], {'weights': 'None'}), '(weights=None)\n', (5416, 5430), False, 'from tensorflow.python.keras.applications.resnet import ResNet50\n'), ((5887, 6026), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'inp_tensor', 'filter': 'filter_tensor', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""dangling/Conv2D"""'}), "(input=inp_tensor, filter=filter_tensor, strides=[1, 1, 1, 1],\n padding='VALID', data_format='NCHW', name='dangling/Conv2D')\n", (5899, 6026), True, 'import tensorflow as tf\n'), ((7062, 7084), 'aimet_tensorflow.examples.test_models.multiple_input_model', 'multiple_input_model', ([], {}), '()\n', (7082, 7084), False, 'from aimet_tensorflow.examples.test_models import single_residual, multiple_input_model, model_with_multiple_training_tensors\n'), ((8012, 8029), 'aimet_tensorflow.examples.test_models.single_residual', 'single_residual', ([], {}), '()\n', (8027, 8029), False, 'from aimet_tensorflow.examples.test_models import single_residual, multiple_input_model, model_with_multiple_training_tensors\n'), ((8433, 8455), 'aimet_tensorflow.examples.test_models.multiple_input_model', 'multiple_input_model', ([], {}), '()\n', (8453, 8455), False, 'from aimet_tensorflow.examples.test_models import single_residual, multiple_input_model, model_with_multiple_training_tensors\n'), ((8511, 8536), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(3)'], {}), '(10, 10, 3)\n', (8525, 8536), True, 'import numpy as np\n'), ((8564, 8589), 'numpy.random.rand', 'np.random.rand', (['(12)', '(12)', '(3)'], {}), '(12, 12, 3)\n', (8578, 8589), True, 'import numpy as np\n'), ((9096, 9118), 'aimet_tensorflow.examples.test_models.multiple_input_model', 'multiple_input_model', ([], {}), '()\n', (9116, 9118), False, 'from aimet_tensorflow.examples.test_models import single_residual, multiple_input_model, model_with_multiple_training_tensors\n'), ((9142, 9167), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(3)'], {}), '(10, 10, 3)\n', (9156, 9167), True, 'import numpy as np\n'), ((9169, 9194), 'numpy.random.rand', 'np.random.rand', (['(12)', '(12)', '(3)'], {}), '(12, 12, 3)\n', (9183, 9194), True, 'import numpy as np\n'), ((9708, 9730), 'aimet_tensorflow.examples.test_models.multiple_input_model', 'multiple_input_model', ([], {}), '()\n', (9728, 9730), False, 'from aimet_tensorflow.examples.test_models import single_residual, multiple_input_model, model_with_multiple_training_tensors\n'), ((10162, 10200), 'aimet_tensorflow.examples.test_models.model_with_multiple_training_tensors', 'model_with_multiple_training_tensors', ([], {}), '()\n', (10198, 10200), False, 'from aimet_tensorflow.examples.test_models import single_residual, multiple_input_model, model_with_multiple_training_tensors\n'), ((11102, 11168), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n', (11136, 11168), True, 'import tensorflow as tf\n'), ((11200, 11236), 'aimet_tensorflow.utils.common.iter_first_x', 'iter_first_x', (['dataset'], {'num_batches': '(5)'}), '(dataset, num_batches=5)\n', (11212, 11236), False, 'from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, iter_first_x, get_ordered_conv_linears, get_training_tensors\n'), ((11565, 11631), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n', (11599, 11631), True, 'import tensorflow as tf\n'), ((11702, 11738), 'aimet_tensorflow.utils.common.iter_first_x', 'iter_first_x', (['dataset'], {'num_batches': '(5)'}), '(dataset, num_batches=5)\n', (11714, 11738), False, 'from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, iter_first_x, get_ordered_conv_linears, get_training_tensors\n'), ((12156, 12192), 'aimet_tensorflow.utils.common.iter_first_x', 'iter_first_x', (['dataset'], {'num_batches': '(6)'}), '(dataset, num_batches=6)\n', (12168, 12192), False, 'from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, iter_first_x, get_ordered_conv_linears, get_training_tensors\n'), ((13547, 13599), 'numpy.allclose', 'np.allclose', (['original_weights', 'updated_weight_tensor'], {}), '(original_weights, updated_weight_tensor)\n', (13558, 13599), True, 'import numpy as np\n'), ((13625, 13671), 'numpy.allclose', 'np.allclose', (['numpy_data', 'updated_weight_tensor'], {}), '(numpy_data, updated_weight_tensor)\n', (13636, 13671), True, 'import numpy as np\n'), ((14139, 14185), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'fused': '(True)'}), '(fused=True)\n', (14173, 14185), True, 'import tensorflow as tf\n'), ((14890, 14930), 'numpy.allclose', 'np.allclose', (['original_bias', 'updated_bias'], {}), '(original_bias, updated_bias)\n', (14901, 14930), True, 'import numpy as np\n'), ((14956, 14993), 'numpy.allclose', 'np.allclose', (['numpy_data', 'updated_bias'], {}), '(numpy_data, updated_bias)\n', (14967, 14993), True, 'import numpy as np\n'), ((15292, 15342), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'use_bias': '(False)'}), '(32, (3, 3), use_bias=False)\n', (15314, 15342), True, 'import tensorflow as tf\n'), ((15367, 15413), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'fused': '(True)'}), '(fused=True)\n', (15401, 15413), True, 'import tensorflow as tf\n'), ((15683, 15714), 'aimet_tensorflow.utils.op.conv.BiasUtils.is_bias_none', 'BiasUtils.is_bias_none', (['conv_op'], {}), '(conv_op)\n', (15705, 15714), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((16192, 16223), 'aimet_tensorflow.utils.op.conv.BiasUtils.is_bias_none', 'BiasUtils.is_bias_none', (['conv_op'], {}), '(conv_op)\n', (16214, 16223), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((16476, 16501), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (16499, 16501), True, 'import tensorflow as tf\n'), ((16526, 16621), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'use_bias': '(False)', 'activation': 'tf.nn.softmax', 'name': '"""single_residual"""'}), "(2, use_bias=False, activation=tf.nn.softmax, name=\n 'single_residual')\n", (16547, 16621), True, 'import tensorflow as tf\n'), ((16918, 16950), 'aimet_tensorflow.utils.op.conv.BiasUtils.is_bias_none', 'BiasUtils.is_bias_none', (['dense_op'], {}), '(dense_op)\n', (16940, 16950), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((17397, 17431), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (17419, 17431), True, 'import tensorflow as tf\n'), ((17533, 17567), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (17555, 17567), True, 'import tensorflow as tf\n'), ((18491, 18513), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (18511, 18513), True, 'import tensorflow as tf\n'), ((18881, 18914), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (18895, 18914), True, 'import tensorflow as tf\n'), ((19109, 19142), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (19140, 19142), True, 'import tensorflow as tf\n'), ((19197, 19250), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_mean_as_numpy_data', 'BNUtils.get_moving_mean_as_numpy_data', (['sess', 'bn_op.op'], {}), '(sess, bn_op.op)\n', (19234, 19250), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((19277, 19334), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_variance_as_numpy_data', 'BNUtils.get_moving_variance_as_numpy_data', (['sess', 'bn_op.op'], {}), '(sess, bn_op.op)\n', (19318, 19334), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((19771, 19804), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (19785, 19804), True, 'import tensorflow as tf\n'), ((19980, 20013), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (20011, 20013), True, 'import tensorflow as tf\n'), ((20320, 20380), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_mean_as_numpy_data', 'BNUtils.get_moving_mean_as_numpy_data', (['sess', 'bn_op_tensor.op'], {}), '(sess, bn_op_tensor.op)\n', (20357, 20380), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((20406, 20470), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_variance_as_numpy_data', 'BNUtils.get_moving_variance_as_numpy_data', (['sess', 'bn_op_tensor.op'], {}), '(sess, bn_op_tensor.op)\n', (20447, 20470), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((20490, 20543), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_beta_as_numpy_data', 'BNUtils.get_beta_as_numpy_data', (['sess', 'bn_op_tensor.op'], {}), '(sess, bn_op_tensor.op)\n', (20520, 20543), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((20564, 20618), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_gamma_as_numpy_data', 'BNUtils.get_gamma_as_numpy_data', (['sess', 'bn_op_tensor.op'], {}), '(sess, bn_op_tensor.op)\n', (20595, 20618), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((21119, 21152), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (21133, 21152), True, 'import tensorflow as tf\n'), ((21370, 21403), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (21401, 21403), True, 'import tensorflow as tf\n'), ((21458, 21518), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_mean_as_numpy_data', 'BNUtils.get_moving_mean_as_numpy_data', (['sess', 'bn_op_tensor.op'], {}), '(sess, bn_op_tensor.op)\n', (21495, 21518), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((21544, 21608), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_variance_as_numpy_data', 'BNUtils.get_moving_variance_as_numpy_data', (['sess', 'bn_op_tensor.op'], {}), '(sess, bn_op_tensor.op)\n', (21585, 21608), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((21628, 21681), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_beta_as_numpy_data', 'BNUtils.get_beta_as_numpy_data', (['sess', 'bn_op_tensor.op'], {}), '(sess, bn_op_tensor.op)\n', (21658, 21681), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((21702, 21756), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_gamma_as_numpy_data', 'BNUtils.get_gamma_as_numpy_data', (['sess', 'bn_op_tensor.op'], {}), '(sess, bn_op_tensor.op)\n', (21733, 21756), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((22259, 22292), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (22273, 22292), True, 'import tensorflow as tf\n'), ((22510, 22543), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (22541, 22543), True, 'import tensorflow as tf\n'), ((22598, 22658), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_mean_as_numpy_data', 'BNUtils.get_moving_mean_as_numpy_data', (['sess', 'bn_op_tensor.op'], {}), '(sess, bn_op_tensor.op)\n', (22635, 22658), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((22684, 22748), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_variance_as_numpy_data', 'BNUtils.get_moving_variance_as_numpy_data', (['sess', 'bn_op_tensor.op'], {}), '(sess, bn_op_tensor.op)\n', (22725, 22748), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((22768, 22821), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_beta_as_numpy_data', 'BNUtils.get_beta_as_numpy_data', (['sess', 'bn_op_tensor.op'], {}), '(sess, bn_op_tensor.op)\n', (22798, 22821), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((22842, 22896), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_gamma_as_numpy_data', 'BNUtils.get_gamma_as_numpy_data', (['sess', 'bn_op_tensor.op'], {}), '(sess, bn_op_tensor.op)\n', (22873, 22896), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((23915, 23965), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_mean_as_numpy_data', 'BNUtils.get_moving_mean_as_numpy_data', (['sess', 'bn_op'], {}), '(sess, bn_op)\n', (23952, 23965), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((23991, 24045), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_moving_variance_as_numpy_data', 'BNUtils.get_moving_variance_as_numpy_data', (['sess', 'bn_op'], {}), '(sess, bn_op)\n', (24032, 24045), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((24065, 24108), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_beta_as_numpy_data', 'BNUtils.get_beta_as_numpy_data', (['sess', 'bn_op'], {}), '(sess, bn_op)\n', (24095, 24108), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((24129, 24173), 'aimet_tensorflow.utils.op.fusedbatchnorm.BNUtils.get_gamma_as_numpy_data', 'BNUtils.get_gamma_as_numpy_data', (['sess', 'bn_op'], {}), '(sess, bn_op)\n', (24160, 24173), False, 'from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils\n'), ((26646, 26704), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1, None, None, None]', '"""input"""'], {}), "(tf.float32, [1, None, None, None], 'input')\n", (26660, 26704), True, 'import tensorflow as tf\n'), ((26734, 26812), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'filter_data', 'name': '"""filter_tensor"""', 'dtype': 'tf.float32'}), "(initial_value=filter_data, name='filter_tensor', dtype=tf.float32)\n", (26745, 26812), True, 'import tensorflow as tf\n'), ((26830, 26963), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'input_tensor', 'filter': 'filter_tensor', 'padding': '"""SAME"""', 'strides': '[1, 1, 1, 1]', 'data_format': '"""NCHW"""', 'name': '"""Conv2D_1"""'}), "(input=input_tensor, filter=filter_tensor, padding='SAME',\n strides=[1, 1, 1, 1], data_format='NCHW', name='Conv2D_1')\n", (26842, 26963), True, 'import tensorflow as tf\n'), ((27009, 27042), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (27040, 27042), True, 'import tensorflow as tf\n'), ((27816, 27885), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'input_data', 'name': '"""input"""', 'dtype': 'tf.float32'}), "(initial_value=input_data, name='input', dtype=tf.float32)\n", (27827, 27885), True, 'import tensorflow as tf\n'), ((27914, 27992), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'filter_data', 'name': '"""filter_tensor"""', 'dtype': 'tf.float32'}), "(initial_value=filter_data, name='filter_tensor', dtype=tf.float32)\n", (27925, 27992), True, 'import tensorflow as tf\n'), ((28010, 28143), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'input_tensor', 'filter': 'filter_tensor', 'padding': '"""SAME"""', 'strides': '[1, 1, 1, 1]', 'data_format': '"""NCHW"""', 'name': '"""Conv2D_1"""'}), "(input=input_tensor, filter=filter_tensor, padding='SAME',\n strides=[1, 1, 1, 1], data_format='NCHW', name='Conv2D_1')\n", (28022, 28143), True, 'import tensorflow as tf\n'), ((28189, 28222), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (28220, 28222), True, 'import tensorflow as tf\n'), ((29077, 29135), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1, None, None, None]', '"""input"""'], {}), "(tf.float32, [1, None, None, None], 'input')\n", (29091, 29135), True, 'import tensorflow as tf\n'), ((29165, 29243), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'filter_data', 'name': '"""filter_tensor"""', 'dtype': 'tf.float32'}), "(initial_value=filter_data, name='filter_tensor', dtype=tf.float32)\n", (29176, 29243), True, 'import tensorflow as tf\n'), ((29261, 29394), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'input_tensor', 'filter': 'filter_tensor', 'padding': '"""SAME"""', 'strides': '[1, 1, 1, 1]', 'data_format': '"""NHWC"""', 'name': '"""Conv2D_1"""'}), "(input=input_tensor, filter=filter_tensor, padding='SAME',\n strides=[1, 1, 1, 1], data_format='NHWC', name='Conv2D_1')\n", (29273, 29394), True, 'import tensorflow as tf\n'), ((29440, 29473), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (29471, 29473), True, 'import tensorflow as tf\n'), ((30278, 30347), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'input_data', 'name': '"""input"""', 'dtype': 'tf.float32'}), "(initial_value=input_data, name='input', dtype=tf.float32)\n", (30289, 30347), True, 'import tensorflow as tf\n'), ((30376, 30454), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'filter_data', 'name': '"""filter_tensor"""', 'dtype': 'tf.float32'}), "(initial_value=filter_data, name='filter_tensor', dtype=tf.float32)\n", (30387, 30454), True, 'import tensorflow as tf\n'), ((30472, 30605), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'input_tensor', 'filter': 'filter_tensor', 'padding': '"""SAME"""', 'strides': '[1, 1, 1, 1]', 'data_format': '"""NHWC"""', 'name': '"""Conv2D_1"""'}), "(input=input_tensor, filter=filter_tensor, padding='SAME',\n strides=[1, 1, 1, 1], data_format='NHWC', name='Conv2D_1')\n", (30484, 30605), True, 'import tensorflow as tf\n'), ((30651, 30684), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (30682, 30684), True, 'import tensorflow as tf\n'), ((9828, 9928), 'aimet_tensorflow.utils.common.create_input_feed_dict', 'create_input_feed_dict', ([], {'graph': 'g', 'input_op_names_list': "['input1', 'input2']", 'input_data': 'input_data'}), "(graph=g, input_op_names_list=['input1', 'input2'],\n input_data=input_data)\n", (9850, 9928), False, 'from aimet_tensorflow.utils.common import get_ordered_ops, create_input_feed_dict, iter_first_x, get_ordered_conv_linears, get_training_tensors\n'), ((16768, 16790), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (16788, 16790), True, 'import tensorflow as tf\n'), ((17126, 17158), 'aimet_tensorflow.utils.op.conv.BiasUtils.is_bias_none', 'BiasUtils.is_bias_none', (['dense_op'], {}), '(dense_op)\n', (17148, 17158), False, 'from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils, get_output_activation_shape\n'), ((17691, 17713), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (17711, 17713), True, 'import tensorflow as tf\n'), ((18803, 18825), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (18823, 18825), True, 'import tensorflow as tf\n'), ((18939, 18989), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'use_bias': '(False)'}), '(32, (3, 3), use_bias=False)\n', (18961, 18989), True, 'import tensorflow as tf\n'), ((19018, 19064), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'fused': '(True)'}), '(fused=True)\n', (19052, 19064), True, 'import tensorflow as tf\n'), ((19693, 19715), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (19713, 19715), True, 'import tensorflow as tf\n'), ((19828, 19878), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'use_bias': '(False)'}), '(32, (3, 3), use_bias=False)\n', (19850, 19878), True, 'import tensorflow as tf\n'), ((19904, 19950), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'fused': '(True)'}), '(fused=True)\n', (19938, 19950), True, 'import tensorflow as tf\n'), ((21041, 21063), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (21061, 21063), True, 'import tensorflow as tf\n'), ((21176, 21226), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'use_bias': '(False)'}), '(32, (3, 3), use_bias=False)\n', (21198, 21226), True, 'import tensorflow as tf\n'), ((21262, 21325), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'fused': '(True)', 'name': '"""bn_op_1/"""'}), "(fused=True, name='bn_op_1/')\n", (21296, 21325), True, 'import tensorflow as tf\n'), ((22181, 22203), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (22201, 22203), True, 'import tensorflow as tf\n'), ((22316, 22366), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'use_bias': '(False)'}), '(32, (3, 3), use_bias=False)\n', (22338, 22366), True, 'import tensorflow as tf\n'), ((22402, 22464), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'fused': '(True)', 'name': '"""bn_op_1"""'}), "(fused=True, name='bn_op_1')\n", (22436, 22464), True, 'import tensorflow as tf\n'), ((24523, 24545), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (24543, 24545), True, 'import tensorflow as tf\n'), ((25529, 25551), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (25549, 25551), True, 'import tensorflow as tf\n'), ((5561, 5591), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {}), '()\n', (5589, 5591), True, 'import tensorflow as tf\n'), ((5733, 5763), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {}), '()\n', (5761, 5763), True, 'import tensorflow as tf\n'), ((12752, 12788), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-1)', '(2)'], {}), '(-1, 2)\n', (12781, 12788), True, 'import tensorflow as tf\n'), ((14076, 14112), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-1)', '(2)'], {}), '(-1, 2)\n', (14105, 14112), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
""" Class for creating trimmed received noise files to estimate H
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
class Trimmer(object):
data_path = "../data/"
@staticmethod
def trim_both(fname, output_name, noise_length=100000, gap=10000, offset=10):
""" Writes two files that each contain one of the two trimmed blocks
of received noise
Parameters
----------
fname : str
name of the binary file to be trimmed, without file extension
noise_length : int
length of the noise block, in number of samples
gap : int
length of the gap between noise blocks, in number of samples
offset : int
number of samples used to accurately tune finding the blocks
"""
received = np.fromfile(Trimmer.data_path+fname+".bin",
dtype=np.complex64)
rec_length = range(len(received))
rec_ampl = np.absolute(received)
noise_ampl = np.amax(rec_ampl[:200000])
beg1 = np.argmax(rec_ampl>3*noise_ampl)+offset
end1 = beg1 + noise_length
beg2 = end1 + gap
end2 = beg2 + noise_length
plt.subplot(2,1,1)
plt.plot(rec_length[beg1-gap:end1+gap], rec_ampl[beg1-gap:end1+gap],
'.', ms=2, label="received")
plt.plot(rec_length[beg1:end1], rec_ampl[beg1:end1],
'.', ms=2, label="first")
plt.title("FIRST")
plt.subplot(2,1,2)
plt.plot(rec_length[beg2-gap:end2+gap], rec_ampl[beg2-gap:end2+gap],
'.', ms=2, label="received")
plt.plot(rec_length[beg2:end2], rec_ampl[beg2:end2],
'.', ms=2, label="second")
plt.title("SECOND")
plt.show()
Trimmer.write_trimmed(output_name, received[beg1:end1], received[beg2:end2])
@staticmethod
def write_trimmed(output_name, first, second):
""" Writes two binary complex64 files
Parametersc
----------
fname : str
base name of the file to write
first : ndarray
the first complex array to write to a file
second : ndarray
the second complex array to write to a file
"""
output_file = open(Trimmer.data_path+output_name+"1.bin", 'wb')
output_file.write(first.tobytes())
output_file.close()
output_file = open(Trimmer.data_path+output_name+"2.bin", 'wb')
output_file.write(second.tobytes())
output_file.close()
if __name__ == "__main__":
Trimmer.trim_both("recnoise1", output_name="noise_h1", offset=19)
Trimmer.trim_both("recnoise2", output_name="noise_h2", offset=19)
| [
"matplotlib.pyplot.title",
"numpy.absolute",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.fromfile",
"numpy.amax"
] | [((899, 966), 'numpy.fromfile', 'np.fromfile', (["(Trimmer.data_path + fname + '.bin')"], {'dtype': 'np.complex64'}), "(Trimmer.data_path + fname + '.bin', dtype=np.complex64)\n", (910, 966), True, 'import numpy as np\n'), ((1036, 1057), 'numpy.absolute', 'np.absolute', (['received'], {}), '(received)\n', (1047, 1057), True, 'import numpy as np\n'), ((1079, 1105), 'numpy.amax', 'np.amax', (['rec_ampl[:200000]'], {}), '(rec_ampl[:200000])\n', (1086, 1105), True, 'import numpy as np\n'), ((1267, 1287), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (1278, 1287), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1403), 'matplotlib.pyplot.plot', 'plt.plot', (['rec_length[beg1 - gap:end1 + gap]', 'rec_ampl[beg1 - gap:end1 + gap]', '"""."""'], {'ms': '(2)', 'label': '"""received"""'}), "(rec_length[beg1 - gap:end1 + gap], rec_ampl[beg1 - gap:end1 + gap],\n '.', ms=2, label='received')\n", (1302, 1403), True, 'import matplotlib.pyplot as plt\n'), ((1412, 1490), 'matplotlib.pyplot.plot', 'plt.plot', (['rec_length[beg1:end1]', 'rec_ampl[beg1:end1]', '"""."""'], {'ms': '(2)', 'label': '"""first"""'}), "(rec_length[beg1:end1], rec_ampl[beg1:end1], '.', ms=2, label='first')\n", (1420, 1490), True, 'import matplotlib.pyplot as plt\n'), ((1511, 1529), 'matplotlib.pyplot.title', 'plt.title', (['"""FIRST"""'], {}), "('FIRST')\n", (1520, 1529), True, 'import matplotlib.pyplot as plt\n'), ((1538, 1558), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1549, 1558), True, 'import matplotlib.pyplot as plt\n'), ((1565, 1674), 'matplotlib.pyplot.plot', 'plt.plot', (['rec_length[beg2 - gap:end2 + gap]', 'rec_ampl[beg2 - gap:end2 + gap]', '"""."""'], {'ms': '(2)', 'label': '"""received"""'}), "(rec_length[beg2 - gap:end2 + gap], rec_ampl[beg2 - gap:end2 + gap],\n '.', ms=2, label='received')\n", (1573, 1674), True, 'import matplotlib.pyplot as plt\n'), ((1683, 1762), 'matplotlib.pyplot.plot', 'plt.plot', (['rec_length[beg2:end2]', 'rec_ampl[beg2:end2]', '"""."""'], {'ms': '(2)', 'label': '"""second"""'}), "(rec_length[beg2:end2], rec_ampl[beg2:end2], '.', ms=2, label='second')\n", (1691, 1762), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1802), 'matplotlib.pyplot.title', 'plt.title', (['"""SECOND"""'], {}), "('SECOND')\n", (1792, 1802), True, 'import matplotlib.pyplot as plt\n'), ((1811, 1821), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1819, 1821), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1158), 'numpy.argmax', 'np.argmax', (['(rec_ampl > 3 * noise_ampl)'], {}), '(rec_ampl > 3 * noise_ampl)\n', (1131, 1158), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on 2017-4-25
@author: cheng.li
"""
import numpy as np
from alphamind.utilities import aggregate
from alphamind.utilities import array_index
from alphamind.utilities import group_mapping
from alphamind.utilities import simple_mean
from alphamind.utilities import simple_sqrsum
from alphamind.utilities import simple_std
from alphamind.utilities import transform
def standardize(x: np.ndarray, groups: np.ndarray = None, ddof=1) -> np.ndarray:
if groups is not None:
groups = group_mapping(groups)
mean_values = transform(groups, x, 'mean')
std_values = transform(groups, x, 'std', ddof)
return (x - mean_values) / np.maximum(std_values, 1e-8)
else:
return (x - simple_mean(x, axis=0)) / np.maximum(simple_std(x, axis=0, ddof=ddof), 1e-8)
def projection(x: np.ndarray, groups: np.ndarray = None, axis=1) -> np.ndarray:
if groups is not None and axis == 0:
groups = group_mapping(groups)
projected = transform(groups, x, 'project')
return projected
else:
return x / simple_sqrsum(x, axis=axis).reshape((-1, 1))
class Standardizer(object):
def __init__(self, ddof: int = 1):
self.ddof = ddof
self.mean = None
self.std = None
self.labels = None
def fit(self, x: np.ndarray, groups: np.ndarray = None):
if groups is not None:
group_index = group_mapping(groups)
self.mean = aggregate(group_index, x, 'mean')
self.std = aggregate(group_index, x, 'std', self.ddof)
self.labels = np.unique(groups)
else:
self.mean = simple_mean(x, axis=0)
self.std = simple_std(x, axis=0, ddof=self.ddof)
def transform(self, x: np.ndarray, groups: np.ndarray = None) -> np.ndarray:
if groups is not None:
index = array_index(self.labels, groups)
return (x - self.mean[index]) / np.maximum(self.std[index], 1e-8)
else:
return (x - self.mean) / np.maximum(self.std, 1e-8)
def __call__(self, x: np.ndarray, groups: np.ndarray = None) -> np.ndarray:
return standardize(x, groups, self.ddof)
| [
"numpy.maximum",
"alphamind.utilities.array_index",
"alphamind.utilities.group_mapping",
"alphamind.utilities.transform",
"alphamind.utilities.simple_mean",
"alphamind.utilities.simple_sqrsum",
"alphamind.utilities.simple_std",
"alphamind.utilities.aggregate",
"numpy.unique"
] | [((527, 548), 'alphamind.utilities.group_mapping', 'group_mapping', (['groups'], {}), '(groups)\n', (540, 548), False, 'from alphamind.utilities import group_mapping\n'), ((571, 599), 'alphamind.utilities.transform', 'transform', (['groups', 'x', '"""mean"""'], {}), "(groups, x, 'mean')\n", (580, 599), False, 'from alphamind.utilities import transform\n'), ((621, 654), 'alphamind.utilities.transform', 'transform', (['groups', 'x', '"""std"""', 'ddof'], {}), "(groups, x, 'std', ddof)\n", (630, 654), False, 'from alphamind.utilities import transform\n'), ((967, 988), 'alphamind.utilities.group_mapping', 'group_mapping', (['groups'], {}), '(groups)\n', (980, 988), False, 'from alphamind.utilities import group_mapping\n'), ((1009, 1040), 'alphamind.utilities.transform', 'transform', (['groups', 'x', '"""project"""'], {}), "(groups, x, 'project')\n", (1018, 1040), False, 'from alphamind.utilities import transform\n'), ((691, 720), 'numpy.maximum', 'np.maximum', (['std_values', '(1e-08)'], {}), '(std_values, 1e-08)\n', (701, 720), True, 'import numpy as np\n'), ((1430, 1451), 'alphamind.utilities.group_mapping', 'group_mapping', (['groups'], {}), '(groups)\n', (1443, 1451), False, 'from alphamind.utilities import group_mapping\n'), ((1476, 1509), 'alphamind.utilities.aggregate', 'aggregate', (['group_index', 'x', '"""mean"""'], {}), "(group_index, x, 'mean')\n", (1485, 1509), False, 'from alphamind.utilities import aggregate\n'), ((1533, 1576), 'alphamind.utilities.aggregate', 'aggregate', (['group_index', 'x', '"""std"""', 'self.ddof'], {}), "(group_index, x, 'std', self.ddof)\n", (1542, 1576), False, 'from alphamind.utilities import aggregate\n'), ((1603, 1620), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (1612, 1620), True, 'import numpy as np\n'), ((1659, 1681), 'alphamind.utilities.simple_mean', 'simple_mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1670, 1681), False, 'from alphamind.utilities import simple_mean\n'), ((1705, 1742), 'alphamind.utilities.simple_std', 'simple_std', (['x'], {'axis': '(0)', 'ddof': 'self.ddof'}), '(x, axis=0, ddof=self.ddof)\n', (1715, 1742), False, 'from alphamind.utilities import simple_std\n'), ((1876, 1908), 'alphamind.utilities.array_index', 'array_index', (['self.labels', 'groups'], {}), '(self.labels, groups)\n', (1887, 1908), False, 'from alphamind.utilities import array_index\n'), ((750, 772), 'alphamind.utilities.simple_mean', 'simple_mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (761, 772), False, 'from alphamind.utilities import simple_mean\n'), ((787, 819), 'alphamind.utilities.simple_std', 'simple_std', (['x'], {'axis': '(0)', 'ddof': 'ddof'}), '(x, axis=0, ddof=ddof)\n', (797, 819), False, 'from alphamind.utilities import simple_std\n'), ((1953, 1987), 'numpy.maximum', 'np.maximum', (['self.std[index]', '(1e-08)'], {}), '(self.std[index], 1e-08)\n', (1963, 1987), True, 'import numpy as np\n'), ((2038, 2065), 'numpy.maximum', 'np.maximum', (['self.std', '(1e-08)'], {}), '(self.std, 1e-08)\n', (2048, 2065), True, 'import numpy as np\n'), ((1095, 1122), 'alphamind.utilities.simple_sqrsum', 'simple_sqrsum', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (1108, 1122), False, 'from alphamind.utilities import simple_sqrsum\n')] |
#########################################################################
# 2020
# Author: <NAME>
#########################################################################
import numpy as np
import cv2
from dataset.types import Sample
class Flip:
def __init__(self, prob_to_apply=0.5):
self.prob = prob_to_apply
def __call__(self, sample: Sample) -> Sample:
if np.random.uniform() < self.prob:
sample["image"] = cv2.flip(sample["image"], 1)
rows, cols, chs = sample["image"].shape
for xy_array in sample["lane_list"]:
xy_array[:, 0] = cols - xy_array[:, 0]
return sample
class Translate:
def __init__(self, prob_to_apply=0.5, tx_min=-50, tx_max=50, ty_min=-30, ty_max=30):
self.prob = prob_to_apply
self.tx = (tx_min, tx_max)
self.ty = (ty_min, ty_max)
def __call__(self, sample: Sample) -> Sample:
if np.random.uniform() < self.prob:
tx = np.random.randint(*self.tx)
ty = np.random.randint(*self.ty)
rows, cols, chs = sample["image"].shape
sample["image"] = cv2.warpAffine(sample["image"],
np.float32([[1, 0, tx], [0, 1, ty]]),
(cols, rows))
n_lanes = len(sample["lane_list"])
for i in range(n_lanes):
xy_array = sample["lane_list"][i]
xy_array[:, 0] = xy_array[:, 0] + tx
xy_array[:, 1] = xy_array[:, 1] + ty
mask = (xy_array[:, 0] < 0) | (xy_array[:, 0] >= cols) | (xy_array[:, 1] < 0) | (xy_array[:, 1] >= rows)
sample["lane_list"][i] = xy_array[~mask] # remove transformed point if outside image boundary
return sample
class Rotate:
def __init__(self, prob_to_apply=0.5, angle_min=-10, angle_max=10):
"""
:param prob_to_apply: from [0.0, 1.0)
:param angle_min: [deg]
:param angle_max: [deg]
"""
self.prob = prob_to_apply
self.angle = (angle_min, angle_max)
def __call__(self, sample: Sample) -> Sample:
if np.random.uniform() < self.prob:
rows, cols, chs = sample["image"].shape
angle = np.random.randint(*self.angle)
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
sample["image"] = cv2.warpAffine(sample["image"], M, (cols, rows))
n_lanes = len(sample["lane_list"])
for i in range(n_lanes):
xy_array = sample["lane_list"][i]
xy_array = np.dot(xy_array, M.T[:2, :2]) + M.T[-1, :]
mask = (xy_array[:, 0] < 0) | (xy_array[:, 0] >= cols) | (xy_array[:, 1] < 0) | (xy_array[:, 1] >= rows)
sample["lane_list"][i] = xy_array[~mask] # remove transformed point if outside image boundary
return sample
class AddGaussianNoise:
def __init__(self, prob_to_apply=0.5):
self.prob = prob_to_apply
self.mean = (0, 0, 0)
self.stddev = (20, 20, 20)
def __call__(self, sample: Sample) -> Sample:
if np.random.uniform() < self.prob:
noise = np.zeros_like(sample["image"], dtype=np.uint8)
cv2.randn(noise, self.mean, self.stddev)
sample["image"] = sample["image"] + noise
return sample
class ChangeIntensity:
def __init__(self, prob_to_apply=0.5):
self.prob = prob_to_apply
self.range = (-60.0, 60.0)
def __call__(self, sample: Sample) -> Sample:
if np.random.uniform() < self.prob:
hsv = cv2.cvtColor(sample["image"], cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
value = int(np.random.uniform(*self.range))
if value > 0:
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
else:
lim = -1 * value
v[v < lim] = 0
v[v >= lim] -= lim
final_hsv = cv2.merge((h, s, v))
sample["image"] = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return sample
# class AddShadow:
# def __init__(self, prob_to_apply=0.5, alpha_min=0.5, alpha_max=0.75):
# self.prob = prob_to_apply
# self.alpha = (alpha_min, alpha_max)
#
# def __call__(self, sample: SampleBDD100K) -> SampleBDD100K:
# if np.random.uniform() < self.prob:
# rows, cols, chs = sample["image"].shape
# coin = np.random.randint(2)
# top_x, bottom_x = np.random.randint(0, 512, 2)
# shadow_img = sample["image"].copy()
# if coin == 0:
# rand = np.random.randint(2)
# vertices = np.array([[(50, 65), (45, 0), (145, 0), (150, 65)]], dtype=np.int32)
# if rand == 0:
# vertices = np.array([[top_x, 0], [0, 0], [0, rows], [bottom_x, rows]], dtype=np.int32)
# elif rand == 1:
# vertices = np.array([[top_x, 0], [cols, 0], [cols, rows], [bottom_x, rows]], dtype=np.int32)
# mask = sample["image"].copy()
# channel_count = sample["image"].shape[2] # i.e. 3 or 4 depending on your image
# ignore_mask_color = (0,) * channel_count
# cv2.fillPoly(mask, [vertices], ignore_mask_color)
# rand_alpha = np.random.uniform(*self.alpha)
# cv2.addWeighted(mask, rand_alpha, sample["image"], 1 - rand_alpha, 0., shadow_img)
# sample["image"] = shadow_img
# return sample
| [
"numpy.random.uniform",
"numpy.zeros_like",
"cv2.cvtColor",
"numpy.float32",
"cv2.warpAffine",
"numpy.random.randint",
"cv2.randn",
"cv2.split",
"numpy.dot",
"cv2.flip",
"cv2.merge",
"cv2.getRotationMatrix2D"
] | [((391, 410), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (408, 410), True, 'import numpy as np\n'), ((454, 482), 'cv2.flip', 'cv2.flip', (["sample['image']", '(1)'], {}), "(sample['image'], 1)\n", (462, 482), False, 'import cv2\n'), ((935, 954), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (952, 954), True, 'import numpy as np\n'), ((985, 1012), 'numpy.random.randint', 'np.random.randint', (['*self.tx'], {}), '(*self.tx)\n', (1002, 1012), True, 'import numpy as np\n'), ((1030, 1057), 'numpy.random.randint', 'np.random.randint', (['*self.ty'], {}), '(*self.ty)\n', (1047, 1057), True, 'import numpy as np\n'), ((2172, 2191), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2189, 2191), True, 'import numpy as np\n'), ((2277, 2307), 'numpy.random.randint', 'np.random.randint', (['*self.angle'], {}), '(*self.angle)\n', (2294, 2307), True, 'import numpy as np\n'), ((2324, 2379), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cols / 2, rows / 2)', 'angle', '(1)'], {}), '((cols / 2, rows / 2), angle, 1)\n', (2347, 2379), False, 'import cv2\n'), ((2410, 2458), 'cv2.warpAffine', 'cv2.warpAffine', (["sample['image']", 'M', '(cols, rows)'], {}), "(sample['image'], M, (cols, rows))\n", (2424, 2458), False, 'import cv2\n'), ((3148, 3167), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3165, 3167), True, 'import numpy as np\n'), ((3201, 3247), 'numpy.zeros_like', 'np.zeros_like', (["sample['image']"], {'dtype': 'np.uint8'}), "(sample['image'], dtype=np.uint8)\n", (3214, 3247), True, 'import numpy as np\n'), ((3260, 3300), 'cv2.randn', 'cv2.randn', (['noise', 'self.mean', 'self.stddev'], {}), '(noise, self.mean, self.stddev)\n', (3269, 3300), False, 'import cv2\n'), ((3576, 3595), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3593, 3595), True, 'import numpy as np\n'), ((3627, 3675), 'cv2.cvtColor', 'cv2.cvtColor', (["sample['image']", 'cv2.COLOR_BGR2HSV'], {}), "(sample['image'], cv2.COLOR_BGR2HSV)\n", (3639, 3675), False, 'import cv2\n'), ((3698, 3712), 'cv2.split', 'cv2.split', (['hsv'], {}), '(hsv)\n', (3707, 3712), False, 'import cv2\n'), ((4040, 4060), 'cv2.merge', 'cv2.merge', (['(h, s, v)'], {}), '((h, s, v))\n', (4049, 4060), False, 'import cv2\n'), ((4091, 4133), 'cv2.cvtColor', 'cv2.cvtColor', (['final_hsv', 'cv2.COLOR_HSV2BGR'], {}), '(final_hsv, cv2.COLOR_HSV2BGR)\n', (4103, 4133), False, 'import cv2\n'), ((1217, 1253), 'numpy.float32', 'np.float32', (['[[1, 0, tx], [0, 1, ty]]'], {}), '([[1, 0, tx], [0, 1, ty]])\n', (1227, 1253), True, 'import numpy as np\n'), ((3737, 3767), 'numpy.random.uniform', 'np.random.uniform', (['*self.range'], {}), '(*self.range)\n', (3754, 3767), True, 'import numpy as np\n'), ((2621, 2650), 'numpy.dot', 'np.dot', (['xy_array', 'M.T[:2, :2]'], {}), '(xy_array, M.T[:2, :2])\n', (2627, 2650), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer
from baselines.common.tf_util import get_session, save_variables, load_variables
from mpi4py import MPI
from baselines.common.tf_util import initialize
from baselines.common.mpi_util import sync_from_root
from baselines.common.constants import constants
# Helper function to build convnets
def conv2d(inputs, filters, kernel_size, strides, padding):
return tf.layers.conv2d(inputs = inputs,
filters = filters,
kernel_size = (kernel_size, kernel_size),
strides = strides,
padding = padding)
class ICM(object):
def __init__(self, ob_space, ac_space, max_grad_norm, beta, icm_lr_scale, idf):
sess = get_session()
#TODO find a better way
input_shape = [ob_space.shape[0], ob_space.shape[1], ob_space.shape[2]]
# input_shape = ob_space
print("ICM state Input shape ", np.shape(input_shape) , " ", input_shape)
self.action_shape = 36
self.idf=idf
# Placeholders
self.state_ = phi_state = tf.placeholder(tf.float32, [None, *input_shape], name="icm_state")
self.next_state_ = phi_next_state = tf.placeholder(tf.float32, [None, *input_shape], name="icm_next_state")
self.action_ = action = tf.placeholder(tf.float32, [None], name="icm_action")
# self.R = rewards = tf.placeholder(tf.float32, shape=[None], name="maxR")
with tf.variable_scope('icm_model'):
# Feature encoding
# Aka pass state and next_state to create phi(state), phi(next_state)
# state --> phi(state)
print("Feature Encodding of phi state with shape :: ",self.state_)
phi_state = self.feature_encoding(self.state_)
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
# next_state to phi(next_state)
phi_next_state = self.feature_encoding(self.next_state_)
# INVERSE MODEL
if self.idf :
pred_actions_logits, pred_actions_prob = self.inverse_model(phi_state, phi_next_state)
# FORWARD MODEL
pred_phi_next_state = self.forward_model(action, phi_state)
# CALCULATE THE ICM LOSS
# Inverse Loss LI
# We calculate the cross entropy between our ât and at
# Squeeze the labels (required)
labels = tf.cast(action, tf.int32)
print("prediction pred_actions_logits")
if self.idf :
self.inv_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred_actions_logits, labels=labels),name="inverse_loss")
# Foward Loss
# LF = 1/2 || pred_phi_next_state - phi_next_state ||
# TODO 0.5 * ?
self.forw_loss_axis = tf.reduce_mean(tf.square(tf.subtract(pred_phi_next_state, phi_next_state)) , axis=-1 , name="forward_loss_axis")
self.forw_loss = tf.reduce_mean(tf.square(tf.subtract(pred_phi_next_state, phi_next_state)) , name="forward_loss")
# Todo predictor lr scale ?
# ICM_LOSS = [(1 - beta) * LI + beta * LF ] * Predictor_Lr_scale
if self.idf :
self.icm_loss = ((1-beta) * self.inv_loss + beta * self.forw_loss)
else :
self.icm_loss = self.forw_loss
####
# self.icm_var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
# print("ICM var list ::: " , self.icm_var_list)
####
#
# if max_grad_norm is not None :
# t_icm_grads , _ = tf.clip_by_global_norm(self.icm_loss, constants['GRAD_NORM_CLIP'] )
# t_icm_grads_and_vars = list(zip(self.icm_loss , self.icm_var_list))
# print("\n\n\nit works \n\n\n")
#
# UPDATE THE PARAMETERS USING LOSS
# 1. Get the model parameters
self.icm_params = tf.trainable_variables('icm_model') ## var_list same as
## testing phase
self.predgrads = tf.gradients(self.icm_loss, self.icm_params)
self.predgrads , _ = tf.clip_by_global_norm(self.predgrads ,max_grad_norm )
self.pred_grads_and_vars = list(zip(self.predgrads, self.icm_params))
## testing phase
# print("\n\nTrainable variables \n ",icm_params)
# # 2. Build our trainer
self.icm_trainer = MpiAdamOptimizer(MPI.COMM_WORLD, learning_rate=1e-3, epsilon=1e-5)
# # 3. Calculate the gradients
icm_grads_and_var = self.icm_trainer.compute_gradients(self.icm_loss, self.icm_params)
# # t_grads_and_var = tf.gradients()
icm_grads, icm_var = zip(*icm_grads_and_var)
if max_grad_norm is not None:
# # Clip the gradients (normalize)
icm_grads, icm__grad_norm = tf.clip_by_global_norm(icm_grads, max_grad_norm)
icm_grads_and_var= list(zip(icm_grads, icm_var))
# # zip aggregate each gradient with parameters associated
# # For instance zip(ABCD, xyza) => Ax, By, Cz, Da
self._icm_train = self.icm_trainer.apply_gradients(icm_grads_and_var)
if MPI.COMM_WORLD.Get_rank() == 0:
print("Initialize")
initialize()
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
# print("GLOBAL VARIABLES", global_variables)
sync_from_root(sess, global_variables) #pylint: disable=E1101
# We use batch normalization to do feature normalization as explained in the paper
# using the universe head,
def feature_encoding(self, x):
print("feature function called !!")
x = tf.nn.elu(tf.layers.batch_normalization(conv2d(x, 8, 5, 4, "valid")))
print(x)
x = tf.nn.elu(tf.layers.batch_normalization(conv2d(x, 16, 3, 2, "valid")))
print(x)
x = tf.nn.elu(tf.layers.batch_normalization(conv2d(x, 32, 3, 2, "valid")))
print(x)
x = tf.nn.elu(tf.layers.batch_normalization(conv2d(x, 64, 3, 2, "valid")))
print(x)
x = tf.layers.flatten(x)
x = tf.nn.elu(tf.contrib.layers.fully_connected(x, 256))
return x
# Inverse Model
# Given phi(state) and phi(next_state) returns the predicted action ât
"""
Parameters
__________
action: The real action taken by our agent
phi_state: The feature representation of our state generated by our feature_encoding function.
phi_next_state: The feature representation of our next_state generated by our feature_encoding function.
returns pred_actions_logits: the logits and pred_actions_prob: the probability distribution of our actions
"""
def inverse_model(self, phi_state, phi_next_state):
# Concatenate phi(st) and phi(st+1)
icm_inv_concatenate = tf.concat([phi_state, phi_next_state], 1)
icm_inv_fc1 = tf.nn.relu(tf.layers.dense(icm_inv_concatenate, 256))
pred_actions_logits = tf.layers.dense(icm_inv_fc1, self.action_shape)
pred_actions_prob = tf.nn.softmax(pred_actions_logits, dim=-1)
return pred_actions_logits, pred_actions_prob
# Foward Model
# Given action and phi(st) must find pred_phi(st+1)
"""
Parameters
__________
action: The action taken by our agent
phi_state: The feature representation of our state generated by our feature_encoding function.
phi_next_state: The feature representation of our next_state generated by our feature_encoding function.
returns pred_phi_next_state: The feature representation prediction of our next_state.
"""
def forward_model(self, action, phi_state):
# Concatenate phi_state and action
action = tf.expand_dims(action, axis=1) # Expand dimension to be able to concatenate
icm_forw_concatenate = tf.concat(axis=1, values=[phi_state, action])
# FC
icm_forw_fc1 = tf.layers.dense(icm_forw_concatenate, 256)
# FC (size of phi_state [1] aka the width) # size of 288
icm_forw_pred_next_state = tf.layers.dense(icm_forw_fc1, phi_state.get_shape()[1].value)
return icm_forw_pred_next_state
# Calculate intrinsic reward
"""
Parameters
__________
phi_next_state: The feature representation of our next_state generated by our feature_encoding function.
pred_phi_next_state: The feature representation prediction of our next_state.
returns intrinsic_reward: The intrinsic reward
"""
def calculate_intrinsic_reward(self, state, next_state, action):
# print("In the error function ")
sess = tf.get_default_session()
# print("passed states shape {} {} {} ".format(np.shape(state) , np.shape(next_state) , np.shape(action)))
# passed states shape (2, 84, 84, 4) (2, 84, 84, 4) (2,)
# print("action : {} , type {}".format(np.shape(action) , type(action)))
nenvs = np.shape(state)[0]
# print("nenvs ",nenvs)
# tmp = []
# for i in range(nenvs) :
# ac = [action[i]]
# tmp.append(sess.run(self.forw_loss,
# {self.state_: np.expand_dims(state[i,:,:,:], axis=0), self.next_state_: np.expand_dims(next_state[i,:,:,:],axis=0),
# self.action_: ac } ) )
# print(" shape passed i {}, state {} , next_state {} , action _type {} , action {} ".
# format(i, np.shape(np.expand_dims(state[i,:,:,:] , axis=0)) , np.shape(next_state[i,:,:,:]) ,
# type(np.array(action[i] )) , np.shape([action[i]]) ) )
# tmp = np.concatenate([sess.run(self.forw_loss,
# {self.state_: np.expand_dims(state[i,:,:,:], axis=0),
# self.next_state_: np.expand_dims(next_state[i,:,:,:],axis=0), self.action_: [action[i]]}) for i in range(nenvs)] , 0 )
# print("tmp : ", np.shape(tmp) )
error = sess.run(self.forw_loss_axis, {self.state_: state, self.next_state_: next_state, self.action_: action})
# print("orignal error + error with axis -1 ")
# print(list(zip(tmp,error)))
# print("orignal Error ",error)
# error = error * 0.5 #np.dot(error , 0.5)
# print("Return error ",error)
# Return intrinsic reward
return error
def train_curiosity_model(self, states , next_states , actions):# , rewards):
sess = tf.get_default_session()
feed = {self.state_: states , self.next_state_ : next_states , self.action_ : actions }#, self.R :rewards }
if self.idf :
return sess.run((self.forw_loss, self.inv_loss, self.icm_loss, self._icm_train), feed_dict = feed)
else :
return sess.run((self.forw_loss, self.icm_loss, self._icm_train), feed_dict = feed)
# pass
"""
Need implement train function
""" | [
"tensorflow.trainable_variables",
"tensorflow.get_collection",
"baselines.common.tf_util.get_session",
"baselines.common.tf_util.initialize",
"tensorflow.get_variable_scope",
"numpy.shape",
"tensorflow.clip_by_global_norm",
"tensorflow.nn.softmax",
"tensorflow.subtract",
"tensorflow.concat",
"te... | [((475, 601), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'inputs', 'filters': 'filters', 'kernel_size': '(kernel_size, kernel_size)', 'strides': 'strides', 'padding': 'padding'}), '(inputs=inputs, filters=filters, kernel_size=(kernel_size,\n kernel_size), strides=strides, padding=padding)\n', (491, 601), True, 'import tensorflow as tf\n'), ((840, 853), 'baselines.common.tf_util.get_session', 'get_session', ([], {}), '()\n', (851, 853), False, 'from baselines.common.tf_util import get_session, save_variables, load_variables\n'), ((1207, 1273), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, *input_shape]'], {'name': '"""icm_state"""'}), "(tf.float32, [None, *input_shape], name='icm_state')\n", (1221, 1273), True, 'import tensorflow as tf\n'), ((1319, 1390), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, *input_shape]'], {'name': '"""icm_next_state"""'}), "(tf.float32, [None, *input_shape], name='icm_next_state')\n", (1333, 1390), True, 'import tensorflow as tf\n'), ((1423, 1476), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""icm_action"""'}), "(tf.float32, [None], name='icm_action')\n", (1437, 1476), True, 'import tensorflow as tf\n'), ((2561, 2586), 'tensorflow.cast', 'tf.cast', (['action', 'tf.int32'], {}), '(action, tf.int32)\n', (2568, 2586), True, 'import tensorflow as tf\n'), ((4049, 4084), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""icm_model"""'], {}), "('icm_model')\n", (4071, 4084), True, 'import tensorflow as tf\n'), ((4168, 4212), 'tensorflow.gradients', 'tf.gradients', (['self.icm_loss', 'self.icm_params'], {}), '(self.icm_loss, self.icm_params)\n', (4180, 4212), True, 'import tensorflow as tf\n'), ((4242, 4295), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['self.predgrads', 'max_grad_norm'], {}), '(self.predgrads, max_grad_norm)\n', (4264, 4295), True, 'import tensorflow as tf\n'), ((4522, 4590), 'baselines.common.mpi_adam_optimizer.MpiAdamOptimizer', 'MpiAdamOptimizer', (['MPI.COMM_WORLD'], {'learning_rate': '(0.001)', 'epsilon': '(1e-05)'}), '(MPI.COMM_WORLD, learning_rate=0.001, epsilon=1e-05)\n', (4538, 4590), False, 'from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer\n'), ((5388, 5446), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '""""""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='')\n", (5405, 5446), True, 'import tensorflow as tf\n'), ((5509, 5547), 'baselines.common.mpi_util.sync_from_root', 'sync_from_root', (['sess', 'global_variables'], {}), '(sess, global_variables)\n', (5523, 5547), False, 'from baselines.common.mpi_util import sync_from_root\n'), ((6198, 6218), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['x'], {}), '(x)\n', (6215, 6218), True, 'import tensorflow as tf\n'), ((6954, 6995), 'tensorflow.concat', 'tf.concat', (['[phi_state, phi_next_state]', '(1)'], {}), '([phi_state, phi_next_state], 1)\n', (6963, 6995), True, 'import tensorflow as tf\n'), ((7102, 7149), 'tensorflow.layers.dense', 'tf.layers.dense', (['icm_inv_fc1', 'self.action_shape'], {}), '(icm_inv_fc1, self.action_shape)\n', (7117, 7149), True, 'import tensorflow as tf\n'), ((7178, 7220), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['pred_actions_logits'], {'dim': '(-1)'}), '(pred_actions_logits, dim=-1)\n', (7191, 7220), True, 'import tensorflow as tf\n'), ((7874, 7904), 'tensorflow.expand_dims', 'tf.expand_dims', (['action'], {'axis': '(1)'}), '(action, axis=1)\n', (7888, 7904), True, 'import tensorflow as tf\n'), ((7982, 8027), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(1)', 'values': '[phi_state, action]'}), '(axis=1, values=[phi_state, action])\n', (7991, 8027), True, 'import tensorflow as tf\n'), ((8065, 8107), 'tensorflow.layers.dense', 'tf.layers.dense', (['icm_forw_concatenate', '(256)'], {}), '(icm_forw_concatenate, 256)\n', (8080, 8107), True, 'import tensorflow as tf\n'), ((8791, 8815), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (8813, 8815), True, 'import tensorflow as tf\n'), ((10561, 10585), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (10583, 10585), True, 'import tensorflow as tf\n'), ((1041, 1062), 'numpy.shape', 'np.shape', (['input_shape'], {}), '(input_shape)\n', (1049, 1062), True, 'import numpy as np\n'), ((1575, 1605), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""icm_model"""'], {}), "('icm_model')\n", (1592, 1605), True, 'import tensorflow as tf\n'), ((4947, 4995), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['icm_grads', 'max_grad_norm'], {}), '(icm_grads, max_grad_norm)\n', (4969, 4995), True, 'import tensorflow as tf\n'), ((5272, 5297), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (5295, 5297), False, 'from mpi4py import MPI\n'), ((5348, 5360), 'baselines.common.tf_util.initialize', 'initialize', ([], {}), '()\n', (5358, 5360), False, 'from baselines.common.tf_util import initialize\n'), ((6241, 6282), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['x', '(256)'], {}), '(x, 256)\n', (6274, 6282), True, 'import tensorflow as tf\n'), ((7029, 7070), 'tensorflow.layers.dense', 'tf.layers.dense', (['icm_inv_concatenate', '(256)'], {}), '(icm_inv_concatenate, 256)\n', (7044, 7070), True, 'import tensorflow as tf\n'), ((9094, 9109), 'numpy.shape', 'np.shape', (['state'], {}), '(state)\n', (9102, 9109), True, 'import numpy as np\n'), ((2701, 2794), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'pred_actions_logits', 'labels': 'labels'}), '(logits=pred_actions_logits,\n labels=labels)\n', (2747, 2794), True, 'import tensorflow as tf\n'), ((2975, 3023), 'tensorflow.subtract', 'tf.subtract', (['pred_phi_next_state', 'phi_next_state'], {}), '(pred_phi_next_state, phi_next_state)\n', (2986, 3023), True, 'import tensorflow as tf\n'), ((3114, 3162), 'tensorflow.subtract', 'tf.subtract', (['pred_phi_next_state', 'phi_next_state'], {}), '(pred_phi_next_state, phi_next_state)\n', (3125, 3162), True, 'import tensorflow as tf\n'), ((1929, 1952), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (1950, 1952), True, 'import tensorflow as tf\n')] |
import numpy as np
from knn import KNN
############################################################################
# DO NOT MODIFY ABOVE CODES
############################################################################
# TODO: implement F1 score
def f1_score(real_labels, predicted_labels):
"""
Information on F1 score - https://en.wikipedia.org/wiki/F1_score
:param real_labels: List[int]
:param predicted_labels: List[int]
:return: float
"""
assert len(real_labels) == len(predicted_labels)
true_p = 0
true_n = 0
false_p = 0
false_n = 0
for x in range(len(real_labels)):
if real_labels[x] == 1 and predicted_labels[x] ==1 :
true_p += 1
elif real_labels[x] == 0 and predicted_labels[x] ==0 :
true_n += 1
elif real_labels[x] == 0 and predicted_labels[x] ==1 :
false_p += 1
elif real_labels[x] == 1 and predicted_labels[x] ==0 :
false_n += 1
# print (true_p, "-", true_n, "-", false_p, "-", false_n)
if (true_p == 0 and true_n == 0 and false_p == 0):
f1_score = 1
elif (true_p == 0 and (true_n > 0 or false_n > 0)):
f1_score = 0
else:
precision = true_p / (true_p + false_p)
recall = true_p / (true_p + false_n)
# f1_score = 2 / ( ((true_p + false_p) / true_p) + ((true_p + false_n) / true_p))
f1_score = 2 * (1 / ((1 / precision) + (1 / recall)))
return f1_score
raise NotImplementedError
class Distances:
@staticmethod
# TODO
def minkowski_distance(point1, point2):
"""
Minkowski distance is the generalized version of Euclidean Distance
It is also know as L-p norm (where p>=1) that you have studied in class
For our assignment we need to take p=3
Information on Minkowski distance - https://en.wikipedia.org/wiki/Minkowski_distance
:param point1: List[float]
:param point2: List[float]
:return: float
"""
# p = 3
# a = np.subtract(point1, point2)
## print (a)
# b = np.absolute(a)
## print (b)
# c = b ** p
## print (c)
# d = np.sum(c)
## print (d)
# e = d ** (1/p)
## print (e)
# return e
p = 3
#return np.sum(np.absolute(point1 - point2) ** p) ** 1/p
#return (np.linalg.norm(np.subtract(point1, point2)) ** p) ** 1/p
#return np.absolute((np.sum(point1 - point2) ** p) ** 1/p)
point1 = np.array(point1)
point2 = np.array(point2)
return np.sum(np.absolute(np.subtract(point1, point2)) ** p) ** (1/p)
raise NotImplementedError
# return float(np.sum(np.absolute(np.subtract(point1, point2)) ** p) ** 1//p)
# return (np.linalg.norm(np.subtract(point1, point2)) ** p) ** 1/p
# return np.absolute((np.sum(point1 - point2) ** p) ** 1/p)
raise NotImplementedError
@staticmethod
# TODO
def euclidean_distance(point1, point2):
"""
:param point1: List[float]
:param point2: List[float]
:return: float
"""
# dist = np.sqrt(np.square(point1 - point2), dtype = 'float64')
# return dist
point1 = np.array(point1)
point2 = np.array(point2)
return np.sqrt(np.sum((point1 - point2) ** 2))
raise NotImplementedError
@staticmethod
# TODO
def inner_product_distance(point1, point2):
"""
:param point1: List[float]
:param point2: List[float]
:return: float
"""
point1 = np.array(point1)
point2 = np.array(point2)
return (np.inner(point1, point2))
raise NotImplementedError
@staticmethod
# TODO
def cosine_similarity_distance(point1, point2):
"""
:param point1: List[float]
:param point2: List[float]
:return: float
"""
point1 = np.array(point1)
point2 = np.array(point2)
#return (np.dot(point1, point2) / (np.linalg.norm(point1) * np.linalg.norm(point2)))
return (1 - (np.dot(point1, point2)/(np.sqrt(np.sum((point1) ** 2)) * np.sqrt(np.sum((point2) ** 2)))))
raise NotImplementedError
@staticmethod
# TODO
def gaussian_kernel_distance(point1, point2):
"""
:param point1: List[float]
:param point2: List[float]
:return: float
"""
# return np.exp(-1 // 2 * np.sum(point1 - point2) ** 2)
point1 = np.array(point1)
point2 = np.array(point2)
return (- np.exp(-0.5 * (np.sum(np.multiply(point1 - point2, point1 - point2)))))
raise NotImplementedError
class HyperparameterTuner:
def __init__(self):
self.best_k = None
self.best_distance_function = None
self.best_scaler = None
self.best_model = None
# TODO: find parameters with the best f1 score on validation dataset
def tuning_without_scaling(self, distance_funcs, x_train, y_train, x_val, y_val):
"""
In this part, you should try different distance function you implemented in part 1.1, and find the best k.
Use k range from 1 to 30 and increment by 2. Use f1-score to compare different models.
:param distance_funcs: dictionary of distance functions you must use to calculate the distance.
Make sure you loop over all distance functions for each data point and each k value.
You can refer to test.py file to see the format in which these functions will be
passed by the grading script
:param x_train: List[List[int]] training data set to train your KNN model
:param y_train: List[int] train labels to train your KNN model
:param x_val: List[List[int]] Validation data set will be used on your KNN predict function to produce
predicted labels and tune k and distance function.
:param y_val: List[int] validation labels
Find(tune) best k, distance_function and model (an instance of KNN) and assign to self.best_k,
self.best_distance_function and self.best_model respectively.
NOTE: self.best_scaler will be None
NOTE: When there is a tie, choose model based on the following priorities:
Then check distance function [euclidean > minkowski > gaussian > inner_prod > cosine_dist]
If they have same distance fuction, choose model which has a less k.
"""
# You need to assign the final values to these variables
self.best_k = None
self.best_distance_function = None
self.best_model = None
#
# dist_funcs_priority = {'euclidean' : 1,
# 'minkowski': 2,
# 'gaussian': 3,
# 'inner_prod': 4,
# 'cosine_dist': 5,}
best_f1 = 0
for selected_distance_func in distance_funcs:
for k in range(1, min(31, len(x_train) + 1), 2):
model = KNN(k, distance_funcs[selected_distance_func])
model.train(x_train, y_train)
y_val_dash = model.predict(x_val)
f1_score_value = f1_score(y_val, y_val_dash)
print (selected_distance_func, "-", f1_score_value)
if self.best_k == None:
self.best_k = k
# self.best_distance_function = distance_funcs[selected_distance_func]
self.best_distance_function = selected_distance_func
self.best_model = model
best_f1 = f1_score_value
elif f1_score_value > best_f1:
self.best_k = k
self.best_distance_function = selected_distance_func
self.best_model = model
best_f1 = f1_score_value
# elif f1_score_value == best_f1:
# current_best_dist_func = dist_funcs_priority[self.best_distance_function]
## maybe_best_dist_func = dist_funcs_priority[distance_funcs[selected_distance_func]]
# maybe_best_dist_func = dist_funcs_priority[selected_distance_func]
# if (maybe_best_dist_func < current_best_dist_func):
# self.best_k = k
## self.best_distance_function = distance_funcs[selected_distance_func]
# self.best_distance_function = selected_distance_func
# self.best_model = KNN(k, distance_funcs[selected_distance_func])
## print("zaala bc")
print("Best f1", best_f1, "Best Distance function", self.best_distance_function)
return
raise NotImplementedError
# # TODO: find parameters with the best f1 score on validation dataset, with normalized data
def tuning_with_scaling(self, distance_funcs, scaling_classes, x_train, y_train, x_val, y_val):
"""
This part is similar to Part 1.3 except that before passing your training and validation data to KNN model to
tune k and disrance function, you need to create the normalized data using these two scalers to transform your
data, both training and validation. Again, we will use f1-score to compare different models.
Here we have 3 hyperparameters i.e. k, distance_function and scaler.
:param distance_funcs: dictionary of distance funtions you use to calculate the distance. Make sure you
loop over all distance function for each data point and each k value.
You can refer to test.py file to see the format in which these functions will be
passed by the grading script
:param scaling_classes: dictionary of scalers you will use to normalized your data.
Refer to test.py file to check the format.
:param x_train: List[List[int]] training data set to train your KNN model
:param y_train: List[int] train labels to train your KNN model
:param x_val: List[List[int]] validation data set you will use on your KNN predict function to produce predicted
labels and tune your k, distance function and scaler.
:param y_val: List[int] validation labels
Find(tune) best k, distance_funtion, scaler and model (an instance of KNN) and assign to self.best_k,
self.best_distance_function, self.best_scaler and self.best_model respectively
NOTE: When there is a tie, choose model based on the following priorities:
For normalization, [min_max_scale > normalize];
Then check distance function [euclidean > minkowski > gaussian > inner_prod > cosine_dist]
If they have same distance function, choose model which has a less k.
"""
# You need to assign the final values to these variables
self.best_k = None
self.best_distance_function = None
self.best_scaler = None
self.best_model = None
self.scaling_classes_priority = [
'min_max_scale',
'normalize']
self.dist_funcs_priority = ['euclidean',
'minkowski',
'gaussian',
'inner_prod',
'cosine_dist']
best_f1 = 0
for scaling_class in self.scaling_classes_priority:
scaler = scaling_classes[scaling_class]()
x_train_scaled = scaler.__call__(x_train)
x_val_scaled = scaler.__call__(x_val)
for selected_distance_func in self.dist_funcs_priority:
for k in range(1, min(31, len(x_train) + 1), 2):
model = KNN(k, distance_funcs[selected_distance_func])
model.train(x_train_scaled, y_train)
y_val_dash = model.predict(x_val_scaled)
f1_score_value = f1_score(y_val, y_val_dash)
print (selected_distance_func, "-", f1_score_value)
if self.best_k == None:
self.best_k = k
# self.best_distance_function = distance_funcs[selected_distance_func]
self.best_distance_function = selected_distance_func
self.best_scaler = scaling_class
self.best_model = model
best_f1 = f1_score_value
elif f1_score_value > best_f1:
self.best_k = k
self.best_distance_function = selected_distance_func
self.best_scaler = scaling_class
self.best_model = model
best_f1 = f1_score_value
# elif f1_score_value == best_f1:
# if (self.scaling_classes_priority[scaling_class] < self.scaling_classes_priority[self.best_scaler]):
# self.best_k = k
# self.best_distance_function = selected_distance_func
# self.best_scaler = scaling_class
# self.best_model = model
# best_f1 = f1_score_value
# elif (self.dist_funcs_priority[selected_distance_func] < self.dist_funcs_priority[self.best_distance_function]):
# self.best_k = k
# # self.best_distance_function = distance_funcs[selected_distance_func]
# self.best_distance_function = selected_distance_func
# self.best_scaler = scaling_class
# self.best_model = model
# best_f1 = f1_score_value
#
# elif k < self.best_k:
# self.best_k = k
# self.best_distance_function = selected_distance_func
# self.best_scaler = scaling_class
# self.best_model = model
# best_f1 = f1_score_value
# elif f1_score_value == best_f1:
# current_best_dist_func = dist_funcs_priority[self.best_distance_function]
# # maybe_best_dist_func = dist_funcs_priority[distance_funcs[selected_distance_func]]
# maybe_best_dist_func = dist_funcs_priority[selected_distance_func]
# if (maybe_best_dist_func < current_best_dist_func):
# self.best_k = k
# # self.best_distance_function = distance_funcs[selected_distance_func]
# self.best_distance_function = selected_distance_func
# self.best_model = KNN(k, distance_funcs[selected_distance_func])
print ("best f1 score - ", best_f1)
return
raise NotImplementedError
class NormalizationScaler:
def __init__(self):
pass
# TODO: normalize data
def __call__(self, features):
"""
Normalize features for every sample
Example
features = [[3, 4], [1, -1], [0, 0]]
return [[0.6, 0.8], [0.707107, -0.707107], [0, 0]]
:param features: List[List[float]]
:return: List[List[float]]
"""
self.features = np.array(features)
# self.features = np.array(features)
# print (self.features)
norm_features = []
# self.features = np.reshape(features, (3,2))
# features = features / np.linalg.norm(features, ord=2, axis=1, keepdims=True)
# norm_features = features / (np.sqrt(np.sum(np.dot(features, features))))
# for x in range(len(features)):
for i in range(len(self.features)):
# denominator = np.sqrt(features[x] * features[x])
# print (denominator)
for j in range(len(self.features[i])):
# denominator = features[x]*features[x]
if self.features[i][j] == 0:
norm_features.append(0)
else:
norm_features.append(self.features[i][j] / np.sqrt(np.sum(self.features[i] * self.features[i])))
norm_features = np.reshape(norm_features, (self.features.shape[0], self.features.shape[1]))
return norm_features.tolist()
raise NotImplementedError
class MinMaxScaler:
"""
Please follow this link to know more about min max scaling
https://en.wikipedia.org/wiki/Feature_scaling
You should keep some states inside the object.
You can assume that the parameter of the first __call__
will be the training set.
Hints:
1. Use a variable to check for first __call__ and only compute
and store min/max in that case.
Note:
1. You may assume the parameters are valid when __call__
is being called the first time (you can find min and max).
Example:
train_features = [[0, 10], [2, 0]]
test_features = [[20, 1]]
scaler1 = MinMaxScale()
train_features_scaled = scaler1(train_features)
# train_features_scaled should be equal to [[0, 1], [1, 0]]
test_features_scaled = scaler1(test_features)
# test_features_scaled should be equal to [[10, 0.1]]
new_scaler = MinMaxScale() # creating a new scaler
_ = new_scaler([[1, 1], [0, 0]]) # new trainfeatures
test_features_scaled = new_scaler(test_features)
# now test_features_scaled should be [[20, 1]]
"""
def __init__(self):
self.first_call = True
self.col_max = 0
self.col_min = 0
pass
def __call__(self, features):
"""
normalize the feature vector for each sample . For example,
if the input features = [[2, -1], [-1, 5], [0, 0]],
the output should be [[1, 0], [0, 1], [0.333333, 0.16667]]
:param features: List[List[float]]
:return: List[List[float]]
"""
self.features = np.array(features)
# self.features = np.array(features)
# print (self.features)
min_max_features = []
if (self.first_call == True):
self.col_min = np.amin(self.features, axis = 0)
self.col_max = np.amax(self.features, axis = 0)
self.first_call = False
for i in range(len(self.features)):
for j in range(len(self.features[i])):
# min_max_features.append(self.features[i][j] / np.amax(self.features[i]))
if (self.col_max[j] - self.col_min[j]) == 0:
min_max_features.append(1)
else:
min_max_features.append((self.features[i][j] - self.col_min[j]) / (self.col_max[j]- self.col_min[j]))
# print(min_max_features)
min_max_features = np.reshape(min_max_features, (self.features.shape[0], self.features.shape[1]))
return min_max_features.tolist()
raise NotImplementedError
| [
"numpy.sum",
"numpy.amin",
"numpy.subtract",
"numpy.multiply",
"numpy.amax",
"knn.KNN",
"numpy.array",
"numpy.reshape",
"numpy.inner",
"numpy.dot"
] | [((2508, 2524), 'numpy.array', 'np.array', (['point1'], {}), '(point1)\n', (2516, 2524), True, 'import numpy as np\n'), ((2542, 2558), 'numpy.array', 'np.array', (['point2'], {}), '(point2)\n', (2550, 2558), True, 'import numpy as np\n'), ((3231, 3247), 'numpy.array', 'np.array', (['point1'], {}), '(point1)\n', (3239, 3247), True, 'import numpy as np\n'), ((3265, 3281), 'numpy.array', 'np.array', (['point2'], {}), '(point2)\n', (3273, 3281), True, 'import numpy as np\n'), ((3583, 3599), 'numpy.array', 'np.array', (['point1'], {}), '(point1)\n', (3591, 3599), True, 'import numpy as np\n'), ((3617, 3633), 'numpy.array', 'np.array', (['point2'], {}), '(point2)\n', (3625, 3633), True, 'import numpy as np\n'), ((3650, 3674), 'numpy.inner', 'np.inner', (['point1', 'point2'], {}), '(point1, point2)\n', (3658, 3674), True, 'import numpy as np\n'), ((3922, 3938), 'numpy.array', 'np.array', (['point1'], {}), '(point1)\n', (3930, 3938), True, 'import numpy as np\n'), ((3956, 3972), 'numpy.array', 'np.array', (['point2'], {}), '(point2)\n', (3964, 3972), True, 'import numpy as np\n'), ((4485, 4501), 'numpy.array', 'np.array', (['point1'], {}), '(point1)\n', (4493, 4501), True, 'import numpy as np\n'), ((4519, 4535), 'numpy.array', 'np.array', (['point2'], {}), '(point2)\n', (4527, 4535), True, 'import numpy as np\n'), ((15536, 15554), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (15544, 15554), True, 'import numpy as np\n'), ((16415, 16490), 'numpy.reshape', 'np.reshape', (['norm_features', '(self.features.shape[0], self.features.shape[1])'], {}), '(norm_features, (self.features.shape[0], self.features.shape[1]))\n', (16425, 16490), True, 'import numpy as np\n'), ((18222, 18240), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (18230, 18240), True, 'import numpy as np\n'), ((19066, 19144), 'numpy.reshape', 'np.reshape', (['min_max_features', '(self.features.shape[0], self.features.shape[1])'], {}), '(min_max_features, (self.features.shape[0], self.features.shape[1]))\n', (19076, 19144), True, 'import numpy as np\n'), ((3305, 3335), 'numpy.sum', 'np.sum', (['((point1 - point2) ** 2)'], {}), '((point1 - point2) ** 2)\n', (3311, 3335), True, 'import numpy as np\n'), ((18411, 18441), 'numpy.amin', 'np.amin', (['self.features'], {'axis': '(0)'}), '(self.features, axis=0)\n', (18418, 18441), True, 'import numpy as np\n'), ((18471, 18501), 'numpy.amax', 'np.amax', (['self.features'], {'axis': '(0)'}), '(self.features, axis=0)\n', (18478, 18501), True, 'import numpy as np\n'), ((4087, 4109), 'numpy.dot', 'np.dot', (['point1', 'point2'], {}), '(point1, point2)\n', (4093, 4109), True, 'import numpy as np\n'), ((7021, 7067), 'knn.KNN', 'KNN', (['k', 'distance_funcs[selected_distance_func]'], {}), '(k, distance_funcs[selected_distance_func])\n', (7024, 7067), False, 'from knn import KNN\n'), ((11784, 11830), 'knn.KNN', 'KNN', (['k', 'distance_funcs[selected_distance_func]'], {}), '(k, distance_funcs[selected_distance_func])\n', (11787, 11830), False, 'from knn import KNN\n'), ((2593, 2620), 'numpy.subtract', 'np.subtract', (['point1', 'point2'], {}), '(point1, point2)\n', (2604, 2620), True, 'import numpy as np\n'), ((4119, 4138), 'numpy.sum', 'np.sum', (['(point1 ** 2)'], {}), '(point1 ** 2)\n', (4125, 4138), True, 'import numpy as np\n'), ((4152, 4171), 'numpy.sum', 'np.sum', (['(point2 ** 2)'], {}), '(point2 ** 2)\n', (4158, 4171), True, 'import numpy as np\n'), ((4576, 4621), 'numpy.multiply', 'np.multiply', (['(point1 - point2)', '(point1 - point2)'], {}), '(point1 - point2, point1 - point2)\n', (4587, 4621), True, 'import numpy as np\n'), ((16345, 16388), 'numpy.sum', 'np.sum', (['(self.features[i] * self.features[i])'], {}), '(self.features[i] * self.features[i])\n', (16351, 16388), True, 'import numpy as np\n')] |
import numpy as np
import math
from scipy.spatial.distance import mahalanobis
class homie_particle:
def __init__(self, xpos, ypos, orientation):
self._x = xpos
self._y = ypos
self._theta = orientation # in radians wrt positive x-axis
self._land_mu = [] # list of landmark mean positions, each element 1D-array with 2 elements
self._land_cov = [] # list of landmark covariance (estimate of landmark position), 2D-array with 4 elements 2X2 vector
self._dist_bet_wheels = 16 # cm distance between the right and left wheels
self._id = None
### move the robot by rotation of left and right wheels
def move(self, l_dist, r_dist, del_t):
if l_dist != 0 or r_dist != 0:
omega_delt = (r_dist - l_dist)/self._dist_bet_wheels
v_r = r_dist/del_t
v_l = l_dist/del_t
R = (self._dist_bet_wheels/2)*((v_l + v_r)/(v_r - v_l))
term1 = R*(math.sin(self._theta)*math.cos(omega_delt) + math.cos(self._theta)*math.sin(omega_delt))
term2 = R*(math.sin(self._theta)*math.sin(omega_delt) - math.cos(self._theta)*math.cos(omega_delt))
term3 = self._theta
ICC_x = self._x - R*math.sin(self._theta)
ICC_y = self._y + R*math.cos(self._theta)
# update values
self._x = ICC_x + term1
self._y = ICC_y + term2
self._theta = omega_delt + term3
# ensure theta is between 0 and 2*pi
# self.constrain_theta()
# print(self._x, self._y, self._theta*(180/3.14159))
def constrain_theta(self):
if self._theta < 0:
self._theta += (2*3.14159)
if self._theta > (2*3.14159):
self._theta -= (2*3.14159)
### assign weight to this robot based on recent sensor value
def assign_weight(self, r, bearing, exclude_idxs=[]): # exclude idx for mutual exclusion
"""
r being distance and bearing is angle to landmark
"""
self.clean_landmarks()
bearing = np.deg2rad(bearing)
sense_xy = self.polar_to_xy(r, bearing)
# print(r, bearing)
# print('wall location:'+str(sense_xy))
landmark_idx = self.associate_sense(sense_xy, exclude_idxs=exclude_idxs)
landmark_idx = self.update_landmark(sense_xy, landmark_idx, r) # will assign a landmark idx if none exists
imp_weight = self.get_imp_weight(sense_xy, landmark_idx)
return imp_weight, landmark_idx
def polar_to_xy(self, r, bearing):
"""
polar is (r, theta) pair
"""
head_vec_x = math.cos(self._theta)
head_vec_y = math.sin(self._theta)
sense_vec_x = head_vec_x*math.cos(bearing) - head_vec_y*math.sin(bearing)
sense_vec_y = head_vec_x*math.sin(bearing) + head_vec_y*math.cos(bearing)
# print(self._theta*180/3.14159)
# print(sense_vec_x, sense_vec_y)
mark_x = self._x + r*sense_vec_x
mark_y = self._y + r*sense_vec_y
return np.array([mark_x, mark_y])
def associate_sense(self, sense_xy, exclude_idxs=[]):
if len(self._land_mu) == 0: # if the particle has no landmarks
return None
else:
min_dist = np.inf
min_idx = None
for qq in range(len(self._land_mu)):
if qq not in exclude_idxs:
this_cov = self._land_cov[qq]
this_mu = self._land_mu[qq]
cov_inv = np.linalg.inv(this_cov)
this_dist = mahalanobis(sense_xy, this_mu, cov_inv)
if this_dist < min_dist:
min_dist = this_dist
min_idx = qq
if min_dist < 2: # found an acceptable match to an existing landmark
return min_idx
else: # did not find any matching landmarks
return None
def update_landmark(self, sense_xy, landmark_idx, r):
obs_cov = np.array([np.array([r,0]),np.array([0,r])])
if landmark_idx is None:
self._land_mu.append(sense_xy)
self._land_cov.append(obs_cov)
return (len(self._land_mu) - 1)
else:
## kalman update for belief of landmark location and covariance
this_cov = self._land_cov[landmark_idx]
this_mu = self._land_mu[landmark_idx]
y_k = sense_xy - this_mu
s_k = np.matmul(np.matmul(np.identity(2), this_cov),
np.identity(2).transpose())+ obs_cov
k_k = np.matmul(np.matmul(this_cov,np.identity(2).transpose()),
np.linalg.inv(s_k))
k_k_y_k = np.array([k_k[0,0]*y_k[0] + k_k[0,1]*y_k[1], k_k[1,0]*y_k[0] + k_k[1,1]*y_k[1]])
next_mu = this_mu + k_k_y_k
next_cov = np.matmul((np.identity(2) - np.matmul(k_k, np.identity(2))),
this_cov)
self._land_mu[landmark_idx] = next_mu
self._land_cov[landmark_idx] = next_cov
return landmark_idx
def get_imp_weight(self, sense_xy, landmark_idx):
this_mu = self._land_mu[landmark_idx]
this_cov = self._land_cov[landmark_idx]
if np.sum(np.abs(this_cov)) > 4000:
print(this_cov)
this_num = np.exp(-1*0.5*(mahalanobis(sense_xy, this_mu, np.linalg.inv(this_cov))**2))
this_den = np.sqrt(2*3.14159*np.linalg.det(this_cov))
this_weight = this_num/this_den
return this_weight
def clean_landmarks(self):
idx = 0
while idx < len(self._land_cov):
if np.sum(np.abs(self._land_cov)) > 40000:
self._land_mu.pop(idx)
self._land_cov.pop(idx)
else:
idx += 1 | [
"numpy.abs",
"numpy.deg2rad",
"scipy.spatial.distance.mahalanobis",
"math.sin",
"numpy.identity",
"numpy.linalg.inv",
"numpy.array",
"math.cos",
"numpy.linalg.det"
] | [((2138, 2157), 'numpy.deg2rad', 'np.deg2rad', (['bearing'], {}), '(bearing)\n', (2148, 2157), True, 'import numpy as np\n'), ((2740, 2761), 'math.cos', 'math.cos', (['self._theta'], {}), '(self._theta)\n', (2748, 2761), False, 'import math\n'), ((2783, 2804), 'math.sin', 'math.sin', (['self._theta'], {}), '(self._theta)\n', (2791, 2804), False, 'import math\n'), ((3149, 3175), 'numpy.array', 'np.array', (['[mark_x, mark_y]'], {}), '([mark_x, mark_y])\n', (3157, 3175), True, 'import numpy as np\n'), ((4882, 4979), 'numpy.array', 'np.array', (['[k_k[0, 0] * y_k[0] + k_k[0, 1] * y_k[1], k_k[1, 0] * y_k[0] + k_k[1, 1] *\n y_k[1]]'], {}), '([k_k[0, 0] * y_k[0] + k_k[0, 1] * y_k[1], k_k[1, 0] * y_k[0] + k_k\n [1, 1] * y_k[1]])\n', (4890, 4979), True, 'import numpy as np\n'), ((2838, 2855), 'math.cos', 'math.cos', (['bearing'], {}), '(bearing)\n', (2846, 2855), False, 'import math\n'), ((2869, 2886), 'math.sin', 'math.sin', (['bearing'], {}), '(bearing)\n', (2877, 2886), False, 'import math\n'), ((2920, 2937), 'math.sin', 'math.sin', (['bearing'], {}), '(bearing)\n', (2928, 2937), False, 'import math\n'), ((2951, 2968), 'math.cos', 'math.cos', (['bearing'], {}), '(bearing)\n', (2959, 2968), False, 'import math\n'), ((4179, 4195), 'numpy.array', 'np.array', (['[r, 0]'], {}), '([r, 0])\n', (4187, 4195), True, 'import numpy as np\n'), ((4195, 4211), 'numpy.array', 'np.array', (['[0, r]'], {}), '([0, r])\n', (4203, 4211), True, 'import numpy as np\n'), ((4840, 4858), 'numpy.linalg.inv', 'np.linalg.inv', (['s_k'], {}), '(s_k)\n', (4853, 4858), True, 'import numpy as np\n'), ((5466, 5482), 'numpy.abs', 'np.abs', (['this_cov'], {}), '(this_cov)\n', (5472, 5482), True, 'import numpy as np\n'), ((5652, 5675), 'numpy.linalg.det', 'np.linalg.det', (['this_cov'], {}), '(this_cov)\n', (5665, 5675), True, 'import numpy as np\n'), ((1252, 1273), 'math.sin', 'math.sin', (['self._theta'], {}), '(self._theta)\n', (1260, 1273), False, 'import math\n'), ((1306, 1327), 'math.cos', 'math.cos', (['self._theta'], {}), '(self._theta)\n', (1314, 1327), False, 'import math\n'), ((3635, 3658), 'numpy.linalg.inv', 'np.linalg.inv', (['this_cov'], {}), '(this_cov)\n', (3648, 3658), True, 'import numpy as np\n'), ((3691, 3730), 'scipy.spatial.distance.mahalanobis', 'mahalanobis', (['sense_xy', 'this_mu', 'cov_inv'], {}), '(sense_xy, this_mu, cov_inv)\n', (3702, 3730), False, 'from scipy.spatial.distance import mahalanobis\n'), ((5037, 5051), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (5048, 5051), True, 'import numpy as np\n'), ((5864, 5886), 'numpy.abs', 'np.abs', (['self._land_cov'], {}), '(self._land_cov)\n', (5870, 5886), True, 'import numpy as np\n'), ((987, 1008), 'math.sin', 'math.sin', (['self._theta'], {}), '(self._theta)\n', (995, 1008), False, 'import math\n'), ((1009, 1029), 'math.cos', 'math.cos', (['omega_delt'], {}), '(omega_delt)\n', (1017, 1029), False, 'import math\n'), ((1032, 1053), 'math.cos', 'math.cos', (['self._theta'], {}), '(self._theta)\n', (1040, 1053), False, 'import math\n'), ((1054, 1074), 'math.sin', 'math.sin', (['omega_delt'], {}), '(omega_delt)\n', (1062, 1074), False, 'import math\n'), ((1099, 1120), 'math.sin', 'math.sin', (['self._theta'], {}), '(self._theta)\n', (1107, 1120), False, 'import math\n'), ((1121, 1141), 'math.sin', 'math.sin', (['omega_delt'], {}), '(omega_delt)\n', (1129, 1141), False, 'import math\n'), ((1144, 1165), 'math.cos', 'math.cos', (['self._theta'], {}), '(self._theta)\n', (1152, 1165), False, 'import math\n'), ((1166, 1186), 'math.cos', 'math.cos', (['omega_delt'], {}), '(omega_delt)\n', (1174, 1186), False, 'import math\n'), ((4643, 4657), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (4654, 4657), True, 'import numpy as np\n'), ((5069, 5083), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (5080, 5083), True, 'import numpy as np\n'), ((5585, 5608), 'numpy.linalg.inv', 'np.linalg.inv', (['this_cov'], {}), '(this_cov)\n', (5598, 5608), True, 'import numpy as np\n'), ((4698, 4712), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (4709, 4712), True, 'import numpy as np\n'), ((4782, 4796), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (4793, 4796), True, 'import numpy as np\n')] |
"""
Pre-processing functions for the ST Analysis packages.
Mainly function to aggregate datasets and filtering
functions (noisy spots and noisy genes)
"""
import numpy as np
import pandas as pd
import math
import os
from stanalysis.normalization import *
def merge_datasets(counts_tableA, counts_tableB, merging_action="SUM"):
""" This function merges two ST datasts (matrix of counts)
assuming that they are consecutive sections and that they
are aligned so each spot on the same position on the tissue
and the number of spots in the same.
The type of merging can be SUM (sum both counts) or AVG (average
sum of both counts).
It returns the merged matrix of counts for the commong spots/genes.
:param counts_tableA: a ST matrix of counts
:param counts_tableB: a ST matrix of counts
:param merging_action: Either SUM or AVG (for the merging of counts)
:return: a ST matrix of counts with the merged counts (for common genes/spots)
"""
merged_table = counts_tableA.copy()
for indexA, indexB in zip(counts_tableA.index, counts_tableB.index):
tokens = indexA.split("x")
assert(len(tokens) == 2)
x_a = float(tokens[0])
y_a = float(tokens[1])
tokens = indexB.split("x")
assert(len(tokens) == 2)
x_b = float(tokens[0])
y_b = float(tokens[1])
if abs(x_a - x_b) > 0.6 or abs(y_a - y_b) > 0.6:
print("Spots {} and {} dot not match and will be skipped".format(indexA, indexB))
merged_table.drop(indexA, axis=0, inplace=True)
continue
for geneA, geneB in zip(counts_tableA.loc[indexA], counts_tableB.loc[indexB]):
if geneA != geneB:
print("Genes {} and {} dot not match and will be skipped".format(geneA, geneB))
merged_table.drop(geneA, axis=1, inplace=True)
elif merging_action == "SUM":
merged_table.loc[indexA,geneA] += counts_tableB.loc[indexB, geneB]
else:
merged_table.loc[indexA,geneA] += counts_tableB.loc[indexB, geneB]
merged_table.loc[indexA,geneA] /= 2
return merged_table
def aggregate_datatasets(counts_table_files, plot_hist=False):
""" This functions takes a list of data frames with ST data
(genes as columns and spots as rows) and merges them into
one data frame using the genes as merging criteria.
An index will append to each spot to be able to identify
them. Optionally, a histogram of the read/spots and gene/spots
distributions can be generated for each dataset.
:param counts_table_files: a list of file names of the datasets
:param plot_hist: True if we want to generate the histogram plots
:return: a Pandas data frame with the merged data frames
"""
# Spots are rows and genes are columns
counts = pd.DataFrame()
sample_counts = dict()
for i,counts_file in enumerate(counts_table_files):
if not os.path.isfile(counts_file):
raise IOError("Error parsing data frame", "Invalid input file")
new_counts = pd.read_table(counts_file, sep="\t", header=0, index_col=0)
# Plot reads/genes distributions per spot
if plot_hist:
histogram(x_points=new_counts.sum(axis=1).values,
output=os.path.join(outdir, "hist_reads_{}.png".format(i)))
histogram(x_points=(new_counts != 0).sum(axis=1).values,
output=os.path.join(outdir, "hist_genes_{}.png".format(i)))
# Append dataset index to the spots (indexes) so they can be traced
new_spots = ["{0}_{1}".format(i, spot) for spot in new_counts.index]
new_counts.index = new_spots
counts = counts.append(new_counts)
# Replace Nan and Inf by zeroes
counts.replace([np.inf, -np.inf], np.nan)
counts.fillna(0.0, inplace=True)
return counts
def remove_noise(counts, num_exp_genes=0.01, num_exp_spots=0.01, min_expression=1):
"""This functions remove noisy genes and spots
for a given data frame (Genes as columns and spots as rows).
- The noisy spots are removed so to keep a percentage
of the total distribution of spots whose gene counts are not 0
The percentage is given as a parameter.
- The noisy genes are removed so every gene that is expressed
in less than 1% of the total spots. Expressed with a count >= 2.
:param counts: a Pandas data frame with the counts
:param num_exp_genes: a float from 0-1 representing the % of
the distribution of expressed genes a spot must have to be kept
:param num_exp_spots: a float from 0-1 representing the % of
the total number of spots that a gene must have with a count bigger
than the parameter min_expression in order to be kept
:param min_expression: the minimum expression for a gene to be
considered expressed
:return: a new Pandas data frame with noisy spots/genes removed
"""
# How many spots do we keep based on the number of genes expressed?
num_spots = len(counts.index)
num_genes = len(counts.columns)
min_genes_spot_exp = round((counts != 0).sum(axis=1).quantile(num_exp_genes))
print("Number of expressed genes a spot must have to be kept " \
"({}% of total expressed genes) {}".format(num_exp_genes, min_genes_spot_exp))
counts = counts[(counts != 0).sum(axis=1) >= min_genes_spot_exp]
print("Dropped {} spots".format(num_spots - len(counts.index)))
# Spots are columns and genes are rows
counts = counts.transpose()
# Remove noisy genes
min_features_gene = round(len(counts.columns) * num_exp_spots)
print("Removing genes that are expressed in less than {} " \
"spots with a count of at least {}".format(min_features_gene, min_expression))
counts = counts[(counts >= min_expression).sum(axis=1) >= min_features_gene]
print("Dropped {} genes".format(num_genes - len(counts.index)))
return counts.transpose()
def keep_top_genes(counts, num_genes_keep, criteria="Variance"):
""" This function takes a Pandas data frame
with ST data (Genes as columns and spots as rows)
and returns a new data frame where only the top
genes are kept by using the variance or the total count.
:param counts: a Pandas data frame with the counts
:param num_genes_keep: the % (1-100) of genes to keep
:param criteria: the criteria used to select ("Variance or "TopRanked")
:return: a new Pandas data frame with only the top ranked genes.
"""
# Spots as columns and genes as rows
counts = counts.transpose()
# Keep only the genes with higher over-all variance
num_genes = len(counts.index)
print("Removing {}% of genes based on the {}".format(num_genes_keep * 100, criteria))
if criteria == "Variance":
min_genes_spot_var = counts.var(axis=1).quantile(num_genes_keep)
if math.isnan(min_genes_spot_var):
print("Computed variance is NaN! Check your normalization factors..")
else:
print("Min normalized variance a gene must have over all spots " \
"to be kept ({0}% of total) {1}".format(num_genes_keep, min_genes_spot_var))
counts = counts[counts.var(axis=1) >= min_genes_spot_var]
elif criteria == "TopRanked":
min_genes_spot_sum = counts.sum(axis=1).quantile(num_genes_keep)
if math.isnan(min_genes_spot_var):
print("Computed sum is NaN! Check your normalization factors..")
else:
print("Min normalized total count a gene must have over all spots " \
"to be kept ({0}% of total) {1}".format(num_genes_keep, min_genes_spot_sum))
counts = counts[counts.sum(axis=1) >= min_genes_spot_var]
else:
raise RunTimeError("Error, incorrect criteria method\n")
print("Dropped {} genes".format(num_genes - len(counts.index)))
return counts.transpose()
def compute_size_factors(counts, normalization, scran_clusters=True):
""" Helper function to compute normalization
size factors"""
counts = counts.transpose()
if normalization in "DESeq2":
size_factors = computeSizeFactors(counts)
elif normalization in "DESeq2Linear":
size_factors = computeSizeFactorsLinear(counts)
elif normalization in "DESeq2PseudoCount":
size_factors = computeSizeFactors(counts + 1)
elif normalization in "DESeq2SizeAdjusted":
size_factors = computeSizeFactorsSizeAdjusted(counts)
elif normalization in "TMM":
size_factors = computeTMMFactors(counts)
elif normalization in "RLE":
size_factors = computeRLEFactors(counts)
elif normalization in "REL":
size_factors = counts.sum(axis=0)
elif normalization in "RAW":
size_factors = 1
elif normalization in "Scran":
size_factors = computeSumFactors(counts, scran_clusters)
else:
raise RunTimeError("Error, incorrect normalization method\n")
if np.isnan(size_factors).any() or np.isinf(size_factors).any():
print("Warning: Computed size factors contained NaN or Inf."
"\nThey will be replaced by 1.0!")
size_factors[np.isnan(size_factors)] = 1.0
size_factors[np.isinf(size_factors)] = 1.0
if np.any(size_factors <= 0.0):
print("Warning: Computed size factors contained zeroes or negative values."
"\nThey will be replaced by 1.0!")
size_factors[size_factors <= 0.0] = 1.0
return size_factors
def normalize_data(counts, normalization, center=False, adjusted_log=False):
"""This functions takes a data frame as input
with ST data (genes as columns and spots as rows) and
returns a data frame with the normalized counts using
different methods.
:param counts: a Pandas data frame with the counts
:param normalization: the normalization method to use
:param center: if True the size factors will be centered by their mean
:param adjusted_log: return adjusted logged normalized counts if True
(DESeq2, DESeq2Linear, DESeq2PseudoCount, DESeq2SizeAdjusted,RLE, REL, RAW, TMM, Scran)
:return: a Pandas data frame with the normalized counts (genes as columns)
"""
# Compute the size factors
size_factors = compute_size_factors(counts, normalization)
if np.all(size_factors == 1.0):
return counts
# Spots as columns and genes as rows
counts = counts.transpose()
# Center and/or adjust log the size_factors and counts
if center:
size_factors = size_factors / np.mean(size_factors)
if adjusted_log:
norm_counts = logCountsWithFactors(counts, size_factors)
else:
norm_counts = counts / size_factors
# return normalize counts (genes as columns)
return norm_counts.transpose()
def normalize_samples(counts, number_datasets):
""" This function takes a data frame
with ST data (genes as columns and spots as rows)
that is composed by several datasets (the index
of each dataset is appended to each spot) and
then aggregates the counts for each gene
in each dataset to later compute normalization
factors for each dataset using DESeq. Finally
it will apply the factors to each dataset.
:param counts: a Pandas dataframe conposed of several ST Datasets
:param number_datasets: the number of different datasets merged in the input data frame
:return: the same dataframe as input with the counts normalized
"""
# First store the aggregated gene counts for each dataset in a dictionary
sample_counts = dict()
tot_spots = counts.index
for i in xrange(number_datasets):
spots_to_keep = [spot for spot in tot_spots if spot.startswith("{}_".format(i))]
slice_counts = counts.loc[spots_to_keep]
sample_counts[i] = slice_counts.sum(axis=0)
# Now build up a data frame with the accumulated gene counts for
# each sample
per_sample_factors = pd.DataFrame(index=sample_counts.keys(), columns=counts.columns)
for key,value in sample_counts.iteritems():
per_sample_factors.loc[key] = value
# Replace Nan and Inf by zeroes
per_sample_factors.replace([np.inf, -np.inf], np.nan)
per_sample_factors.fillna(0.0, inplace=True)
# Spots are columns and genes are rows
per_sample_factors = per_sample_factors.transpose()
# Compute normalization factors for each dataset(sample) using DESeq
per_sample_size_factors = computeSizeFactors(per_sample_factors)
# Now use the factors per sample to normalize genes in each sample
# one factor per sample so we divide every gene count of each sample by its factor
for spot in counts.index:
# spot is i_XxY
tokens = spot.split("x")
assert(len(tokens) == 2)
index = int(tokens[0].split("_")[0])
factor = per_sample_size_factors[index]
counts.loc[spot] = counts.loc[spot] / factor
# Replace Nan and Inf by zeroes
counts.replace([np.inf, -np.inf], np.nan)
counts.fillna(0.0, inplace=True)
return counts | [
"pandas.DataFrame",
"math.isnan",
"numpy.isinf",
"numpy.isnan",
"numpy.any",
"os.path.isfile",
"numpy.mean",
"pandas.read_table",
"numpy.all"
] | [((2883, 2897), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2895, 2897), True, 'import pandas as pd\n'), ((9304, 9331), 'numpy.any', 'np.any', (['(size_factors <= 0.0)'], {}), '(size_factors <= 0.0)\n', (9310, 9331), True, 'import numpy as np\n'), ((10353, 10380), 'numpy.all', 'np.all', (['(size_factors == 1.0)'], {}), '(size_factors == 1.0)\n', (10359, 10380), True, 'import numpy as np\n'), ((3122, 3181), 'pandas.read_table', 'pd.read_table', (['counts_file'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(0)'}), "(counts_file, sep='\\t', header=0, index_col=0)\n", (3135, 3181), True, 'import pandas as pd\n'), ((6934, 6964), 'math.isnan', 'math.isnan', (['min_genes_spot_var'], {}), '(min_genes_spot_var)\n', (6944, 6964), False, 'import math\n'), ((2996, 3023), 'os.path.isfile', 'os.path.isfile', (['counts_file'], {}), '(counts_file)\n', (3010, 3023), False, 'import os\n'), ((7418, 7448), 'math.isnan', 'math.isnan', (['min_genes_spot_var'], {}), '(min_genes_spot_var)\n', (7428, 7448), False, 'import math\n'), ((9216, 9238), 'numpy.isnan', 'np.isnan', (['size_factors'], {}), '(size_factors)\n', (9224, 9238), True, 'import numpy as np\n'), ((9267, 9289), 'numpy.isinf', 'np.isinf', (['size_factors'], {}), '(size_factors)\n', (9275, 9289), True, 'import numpy as np\n'), ((10590, 10611), 'numpy.mean', 'np.mean', (['size_factors'], {}), '(size_factors)\n', (10597, 10611), True, 'import numpy as np\n'), ((9015, 9037), 'numpy.isnan', 'np.isnan', (['size_factors'], {}), '(size_factors)\n', (9023, 9037), True, 'import numpy as np\n'), ((9047, 9069), 'numpy.isinf', 'np.isinf', (['size_factors'], {}), '(size_factors)\n', (9055, 9069), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
class QCriticFF(tf.keras.Model):
def __init__(self, state_shape, action_shape, name='critic'):
super(QCriticFF, self).__init__(name=name)
self.state_shape = state_shape
self.state_size = np.prod(state_shape)
self.action_shape = action_shape
self.action_size = np.prod(self.action_shape)
#layers
self.dense1 = tf.layers.Dense(128, kernel_initializer=tf.keras.initializers.glorot_uniform())
self.dense1_activation = tf.keras.layers.Activation('relu')
self.dense2 = tf.layers.Dense(64, kernel_initializer=tf.keras.initializers.glorot_uniform())
self.dense2_activation = tf.keras.layers.Activation('relu')
self.q_layer = tf.layers.Dense(1,kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
def call(self, inputs):
state, action = inputs
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
x = self.dense1(state)
x = tf.contrib.layers.layer_norm(x)
x = self.dense1_activation(x)
x = tf.concat([x, action], axis=-1)
x = self.dense2(x)
x = tf.contrib.layers.layer_norm(x)
x = self.dense2_activation(x)
q = self.q_layer(x)
return q
| [
"tensorflow.random_uniform_initializer",
"tensorflow.contrib.layers.layer_norm",
"tensorflow.concat",
"tensorflow.variable_scope",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.initializers.glorot_uniform",
"numpy.prod"
] | [((260, 280), 'numpy.prod', 'np.prod', (['state_shape'], {}), '(state_shape)\n', (267, 280), True, 'import numpy as np\n'), ((349, 375), 'numpy.prod', 'np.prod', (['self.action_shape'], {}), '(self.action_shape)\n', (356, 375), True, 'import numpy as np\n'), ((528, 562), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (554, 562), True, 'import tensorflow as tf\n'), ((697, 731), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (723, 731), True, 'import tensorflow as tf\n'), ((923, 972), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.name'], {'reuse': 'tf.AUTO_REUSE'}), '(self.name, reuse=tf.AUTO_REUSE)\n', (940, 972), True, 'import tensorflow as tf\n'), ((1025, 1056), 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['x'], {}), '(x)\n', (1053, 1056), True, 'import tensorflow as tf\n'), ((1115, 1146), 'tensorflow.concat', 'tf.concat', (['[x, action]'], {'axis': '(-1)'}), '([x, action], axis=-1)\n', (1124, 1146), True, 'import tensorflow as tf\n'), ((1194, 1225), 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['x'], {}), '(x)\n', (1222, 1225), True, 'import tensorflow as tf\n'), ((455, 493), 'tensorflow.keras.initializers.glorot_uniform', 'tf.keras.initializers.glorot_uniform', ([], {}), '()\n', (491, 493), True, 'import tensorflow as tf\n'), ((624, 662), 'tensorflow.keras.initializers.glorot_uniform', 'tf.keras.initializers.glorot_uniform', ([], {}), '()\n', (660, 662), True, 'import tensorflow as tf\n'), ((792, 850), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-0.003)', 'maxval': '(0.003)'}), '(minval=-0.003, maxval=0.003)\n', (821, 850), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import os, sys
import tensorflow.compat.v1 as tf
import numpy as np
import pytest
import unittest
import shutil, tempfile
from tensorflow.python.tools.freeze_graph import freeze_graph
from tensorflow.tools.graph_transforms import TransformGraph
import coremltools
# local to pytest
from test_utils import generate_data, tf_transpose
DEBUG = False
def _parse_coreml_input_shapes(mlmodel):
return {x.name : list(x.type.multiArrayType.shape) for x in
mlmodel._spec.description.input}
def _parse_coreml_name_to_tf(coreml_name):
if coreml_name.endswith('__invar__'):
tf_name = coreml_name.replace('__invar__', '')
elif coreml_name.endswith('__outvar__'):
tf_name = coreml_name.replace('__outvar__', '')
else:
tf_name = coreml_name
return tf_name
class TFNetworkTest(unittest.TestCase):
@classmethod
def setUpClass(self):
"""
Set up the unit test by loading common utilities.
"""
def _get_tf_tensor_name(self, graph, name):
"""
Convenience function to get the name of first output tensor of an op with name
"""
return graph.get_operation_by_name(name).outputs[0].name
def _simple_freeze(self, input_graph, input_checkpoint, output_graph, output_node_names):
# output_node_names is a string of names separated by comma
freeze_graph(
input_graph=input_graph,
input_saver="",
input_binary=True,
input_checkpoint=input_checkpoint,
output_node_names=output_node_names,
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
output_graph=output_graph,
clear_devices=True,
initializer_nodes="")
def _quantize_static_tf_model(self, logdir, model_path, output_names):
with open(model_path, 'rb') as f:
serialized = f.read()
gdef = tf.GraphDef()
gdef.ParseFromString(serialized)
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default() as g:
transforms = [
"add_default_attributes", "remove_nodes(op=Identity, op=CheckNumerics)",
"fold_constants(ignore_errors=true)", "fold_batch_norms", "fold_old_batch_norms",
"quantize_weights(minimum_size=1)", "quantize_nodes", "strip_unused_nodes",
"sort_by_execution_order"
]
transformed_graph_def = TransformGraph(gdef, [], output_names, transforms)
tf.import_graph_def(transformed_graph_def, name='')
tf.train.write_graph(graph, logdir, "./tf_quantized_frozen.pb", as_text=False)
return os.path.join(logdir, 'tf_quantized_frozen.pb')
def _test_tf_model(
self,
graph,
input_shapes,
output_node_names,
data_mode='random',
input_refs=None,
delta=1e-2,
use_cpu_only=False,
graph_optimizations="freeze", # one of ["freeze", "convert_variables_to_constants", None]
quantize_tf_model=False,
quantize_mlmodel=False,
quantize_config={}):
"""
Common entry to testing routine.
graph - defined TensorFlow graph.
input_shapes - dict str:shape for each input op (placeholder)
output_node_names - output_node_names, a list of strings
data_mode - auto-generated input vectors, can be 'random', 'zeros', 'ones', 'linear', etc.
input_refs - a dictionary of reference input in tensorFlow axis order, each entry is str:shape.
When using auto-generated input vectors, set input_refs to None.
delta - maximum difference of normalized TensorFlow and CoreML outputs
use_cpu_only - If True, instantiate and run CoreML model with CPU only
graph_optimizations == "freeze" - Force TensorFlow graph to be frozen before converting.
quantize_tf_model - If True, try to quantize TensorFlow model before converting
quantize_mlmodel - If True, quantize the mlmodel after converting.
quantize_config - Dictionary with test quantization parameters
"""
# Some file processing
model_dir = tempfile.mkdtemp()
graph_def_file = os.path.join(model_dir, 'tf_graph.pb')
checkpoint_file = os.path.join(model_dir, 'tf_model.ckpt')
static_model_file = os.path.join(model_dir, 'tf_static.pb')
coreml_model_file = os.path.join(model_dir, 'coreml_model.mlmodel')
# add a saver
tf.reset_default_graph()
if graph_optimizations == "freeze":
with graph.as_default() as g:
saver = tf.train.Saver()
if input_refs is None:
feed_dict = {
self._get_tf_tensor_name(graph, name): generate_data(input_shapes[name], data_mode)
for name in input_shapes
}
else:
feed_dict = {
self._get_tf_tensor_name(graph, name): input_refs[name]
for name in list(input_refs.keys())
}
with tf.Session(graph=graph) as sess:
# initialize
initializer_op = tf.global_variables_initializer()
sess.run(initializer_op)
# run the result
fetches = [graph.get_operation_by_name(name).outputs[0] for name in output_node_names]
result = sess.run(fetches, feed_dict=feed_dict)
# save graph definition somewhere
tf.train.write_graph(sess.graph, model_dir, graph_def_file, as_text=False)
# save the weights if freezing is needed
if not graph_optimizations:
static_model_file = graph_def_file
elif graph_optimizations == "freeze":
saver.save(sess, checkpoint_file)
self._simple_freeze(
input_graph=graph_def_file,
input_checkpoint=checkpoint_file,
output_graph=static_model_file,
output_node_names=",".join(output_node_names))
else:
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), output_node_names)
with tf.gfile.GFile(static_model_file, "wb") as f:
f.write(output_graph_def.SerializeToString())
# if TF needs to be quantized, quantize the graph
if quantize_tf_model:
static_model_file = self._quantize_static_tf_model(
model_dir, static_model_file, output_node_names)
# convert to CoreML
mlmodel = coremltools.converters.tensorflow.convert(
static_model_file,
inputs=input_shapes,
outputs=output_node_names,
use_cpu_only=use_cpu_only)
# Quantize MLModel if needed
if quantize_mlmodel:
from coremltools.models.neural_network.quantization_utils import quantize_weights
nbits = quantize_config['nbits']
mode = quantize_config['mode']
mlmodel = quantize_weights(mlmodel, nbits, quantization_mode=mode)
if DEBUG:
print('\n mlmodel description: \n')
from coremltools.models.neural_network.printer import print_network_spec
print_network_spec(mlmodel.get_spec(), style='coding')
mlmodel.save(coreml_model_file)
print('\n mlmodel saved at %s' % coreml_model_file)
coreml_input_names = [str(x) for x in mlmodel.input_description]
coreml_input_shapes = _parse_coreml_input_shapes(mlmodel)
# Transpose input data as CoreML requires
coreml_inputs = {}
for name in coreml_input_names:
tfop_name = _parse_coreml_name_to_tf(name)
if tfop_name in input_shapes:
coreml_inputs[name] = tf_transpose(
feed_dict[self._get_tf_tensor_name(graph, tfop_name)])
else:
coreml_inputs[name] = np.zeros(coreml_input_shapes[name])
# Run predict in CoreML
coreml_output = mlmodel.predict(coreml_inputs, useCPUOnly=use_cpu_only)
for idx, out_name in enumerate(output_node_names):
tf_out = result[idx]
if len(tf_out.shape) == 0:
tf_out = np.array([tf_out])
tp = tf_out.flatten()
if out_name in coreml_output:
coreml_out = coreml_output[out_name]
elif out_name+'__outvar__' in coreml_output:
coreml_out = coreml_output[out_name+'__outvar__']
else:
self.assertTrue(False, 'CoreML output not found')
cp = coreml_out.flatten()
self.assertTrue(tf_out.shape == coreml_out.shape)
for i in range(len(tp)):
max_den = max(1.0, tp[i], cp[i])
self.assertAlmostEqual(tp[i] / max_den, cp[i] / max_den, delta=delta)
# Cleanup files - models on disk no longer useful
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def _test_tf_model_constant(
self,
graph,
input_shapes,
output_node_names,
data_mode='random_zero_mean',
delta=1e-2,
use_cpu_only=False,
validate_bool_only=False):
"""
Common entry to testing routine for graphs that have no variables.
Parameters
----------
graph: tf.Graph()
TensorFlow graph.
input_shapes: dict [str : shape]
Shapes for each input (placeholder).
output_node_names: list of str
Output tensor names.
data_mode: str
Data mode for the placeholder data generation.
input_refs: a dictionary of reference input in tensorFlow axis order.
Each entry is str:shape. When using auto-generated input vectors,
set input_refs to None.
delta: float
Delta for error checking, default 1e-2.
use_cpu_only: bool
If true, force use CPU only, default False.
validate_bool_only: bool
If true, only validate it's zero or non-zero, otherwise, validate
float values, default False.
"""
model_dir = tempfile.mkdtemp()
frozen_model_file = os.path.join(model_dir, 'tf_frozen.pb')
coreml_model_file = os.path.join(model_dir, 'coreml_model.mlmodel')
feed_input_shapes = { k : tuple([i if i > 0 else 10 for i in ashape]) for (k,ashape) in input_shapes.items()}
feed_dict = {
self._get_tf_tensor_name(graph, name): generate_data(feed_input_shapes[name], data_mode)
for name in feed_input_shapes
}
with tf.Session(graph=graph) as sess:
# initialize
sess.run(tf.global_variables_initializer())
# run the result
fetches = []
for name in output_node_names:
fetches += graph.get_operation_by_name(name).outputs
result = sess.run(fetches, feed_dict=feed_dict)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(
), # The graph_def is used to retrieve the nodes
output_node_names # The output node names are used to select the useful nodes
)
with tf.gfile.GFile(frozen_model_file, 'wb') as f:
f.write(output_graph_def.SerializeToString())
# convert to CoreML
mlmodel = coremltools.converters.tensorflow.convert(
frozen_model_file,
inputs=input_shapes,
outputs=output_node_names,
use_cpu_only=use_cpu_only)
if DEBUG:
print('\n mlmodel description: \n')
from coremltools.models.neural_network.printer import print_network_spec
print_network_spec(mlmodel.get_spec(), style='coding')
mlmodel.save(coreml_model_file)
print('\n mlmodel saved at %s' % coreml_model_file)
# Transpose input data as CoreML requires
coreml_inputs = {
name: tf_transpose(feed_dict[self._get_tf_tensor_name(graph, name)])
for name in feed_input_shapes
}
# Run predict in CoreML
coreml_output = mlmodel.predict(coreml_inputs, useCPUOnly=use_cpu_only)
idx = 0
for node_name in output_node_names:
num_outputs = len(graph.get_operation_by_name(node_name).outputs)
if graph.get_operation_by_name(node_name).type == 'Merge':
num_outputs = 1
for out_id in range(num_outputs):
tf_out = result[idx]
if len(tf_out.shape) == 0:
tf_out = np.array([tf_out])
tp = tf_out.flatten()
out_name = node_name if num_outputs == 1 else node_name + '_' + str(out_id)
coreml_out = coreml_output[out_name]
cp = coreml_out.flatten()
self.assertTrue(tf_out.shape == coreml_out.shape, msg=(tf_out.shape, 'vs.', coreml_out.shape))
if validate_bool_only:
cp = np.logical_and(cp, cp)
for i in range(len(tp)):
max_den = max(1.0, tp[i], cp[i])
self.assertAlmostEqual(tp[i] / max_den, cp[i] / max_den, delta=delta)
idx += 1
# Cleanup files - models on disk no longer useful
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
return mlmodel
class TFNetworkBatchTest(TFNetworkTest):
def _test_tf_model(
self,
graph,
input_tensor_shapes,
output_node_names,
data_mode='random',
delta=1e-2,
use_cpu_only=False,
graph_optimizations="freeze",
quantize_tf_model=False,
batch_sizes=None):
""" Test function for TFConvNetTest.
graph: TensorFlow graph representing the model.
input_tensor_shapes: dict of input op and the shape of tensor it generates.
output_node_names: A list of names of output nodes.
graph_optimizations: one of ["freeze", "convert_variables_to_constants", None].
graph optimizations performed on the TensorFlow graph before conversion.
quantize_tf_model: If true, will run TF-quantization utility on TF graph.
batch_sizes: If not None, and if all input shapes' first dimension is None,
test the TF graph with each batch size in batch_sizes
"""
variable_batch_size = False
for name, shape in input_tensor_shapes.items():
if len(shape) > 1 and shape[0] is None:
variable_batch_size = True
break
if variable_batch_size: # batched case
if batch_sizes is None or len(batch_sizes) == 0:
batch_sizes = [1]
elif 1 not in batch_sizes:
batch_sizes = [1] + batch_sizes
for bs in batch_sizes:
input_shapes = {}
for name, shape in input_tensor_shapes.items():
if shape[0] is None:
input_shapes[name] = [bs] + list(shape[1:])
else:
input_shapes[name] = shape
super(TFNetworkBatchTest, self)._test_tf_model(
graph,
input_shapes,
output_node_names,
data_mode=data_mode,
input_refs=None,
delta=delta,
use_cpu_only=use_cpu_only,
graph_optimizations=graph_optimizations,
quantize_tf_model=quantize_tf_model)
else:
super(TFNetworkBatchTest, self)._test_tf_model(
graph,
input_tensor_shapes,
output_node_names,
data_mode=data_mode,
input_refs=None,
delta=delta,
use_cpu_only=use_cpu_only,
graph_optimizations=graph_optimizations,
quantize_tf_model=quantize_tf_model)
| [
"coremltools.models.neural_network.quantization_utils.quantize_weights",
"test_utils.generate_data",
"tensorflow.compat.v1.train.write_graph",
"tensorflow.compat.v1.Graph",
"shutil.rmtree",
"os.path.join",
"tensorflow.compat.v1.import_graph_def",
"tensorflow.compat.v1.global_variables_initializer",
... | [((1511, 1809), 'tensorflow.python.tools.freeze_graph.freeze_graph', 'freeze_graph', ([], {'input_graph': 'input_graph', 'input_saver': '""""""', 'input_binary': '(True)', 'input_checkpoint': 'input_checkpoint', 'output_node_names': 'output_node_names', 'restore_op_name': '"""save/restore_all"""', 'filename_tensor_name': '"""save/Const:0"""', 'output_graph': 'output_graph', 'clear_devices': '(True)', 'initializer_nodes': '""""""'}), "(input_graph=input_graph, input_saver='', input_binary=True,\n input_checkpoint=input_checkpoint, output_node_names=output_node_names,\n restore_op_name='save/restore_all', filename_tensor_name='save/Const:0',\n output_graph=output_graph, clear_devices=True, initializer_nodes='')\n", (1523, 1809), False, 'from tensorflow.python.tools.freeze_graph import freeze_graph\n'), ((2088, 2101), 'tensorflow.compat.v1.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2099, 2101), True, 'import tensorflow.compat.v1 as tf\n'), ((2152, 2176), 'tensorflow.compat.v1.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2174, 2176), True, 'import tensorflow.compat.v1 as tf\n'), ((2193, 2203), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (2201, 2203), True, 'import tensorflow.compat.v1 as tf\n'), ((2765, 2843), 'tensorflow.compat.v1.train.write_graph', 'tf.train.write_graph', (['graph', 'logdir', '"""./tf_quantized_frozen.pb"""'], {'as_text': '(False)'}), "(graph, logdir, './tf_quantized_frozen.pb', as_text=False)\n", (2785, 2843), True, 'import tensorflow.compat.v1 as tf\n'), ((2859, 2905), 'os.path.join', 'os.path.join', (['logdir', '"""tf_quantized_frozen.pb"""'], {}), "(logdir, 'tf_quantized_frozen.pb')\n", (2871, 2905), False, 'import os, sys\n'), ((4415, 4433), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (4431, 4433), False, 'import shutil, tempfile\n'), ((4459, 4497), 'os.path.join', 'os.path.join', (['model_dir', '"""tf_graph.pb"""'], {}), "(model_dir, 'tf_graph.pb')\n", (4471, 4497), False, 'import os, sys\n'), ((4524, 4564), 'os.path.join', 'os.path.join', (['model_dir', '"""tf_model.ckpt"""'], {}), "(model_dir, 'tf_model.ckpt')\n", (4536, 4564), False, 'import os, sys\n'), ((4593, 4632), 'os.path.join', 'os.path.join', (['model_dir', '"""tf_static.pb"""'], {}), "(model_dir, 'tf_static.pb')\n", (4605, 4632), False, 'import os, sys\n'), ((4661, 4708), 'os.path.join', 'os.path.join', (['model_dir', '"""coreml_model.mlmodel"""'], {}), "(model_dir, 'coreml_model.mlmodel')\n", (4673, 4708), False, 'import os, sys\n'), ((4740, 4764), 'tensorflow.compat.v1.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4762, 4764), True, 'import tensorflow.compat.v1 as tf\n'), ((6842, 6982), 'coremltools.converters.tensorflow.convert', 'coremltools.converters.tensorflow.convert', (['static_model_file'], {'inputs': 'input_shapes', 'outputs': 'output_node_names', 'use_cpu_only': 'use_cpu_only'}), '(static_model_file, inputs=\n input_shapes, outputs=output_node_names, use_cpu_only=use_cpu_only)\n', (6883, 6982), False, 'import coremltools\n'), ((9226, 9251), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (9240, 9251), False, 'import os, sys\n'), ((10514, 10532), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (10530, 10532), False, 'import shutil, tempfile\n'), ((10561, 10600), 'os.path.join', 'os.path.join', (['model_dir', '"""tf_frozen.pb"""'], {}), "(model_dir, 'tf_frozen.pb')\n", (10573, 10600), False, 'import os, sys\n'), ((10629, 10676), 'os.path.join', 'os.path.join', (['model_dir', '"""coreml_model.mlmodel"""'], {}), "(model_dir, 'coreml_model.mlmodel')\n", (10641, 10676), False, 'import os, sys\n'), ((11873, 12013), 'coremltools.converters.tensorflow.convert', 'coremltools.converters.tensorflow.convert', (['frozen_model_file'], {'inputs': 'input_shapes', 'outputs': 'output_node_names', 'use_cpu_only': 'use_cpu_only'}), '(frozen_model_file, inputs=\n input_shapes, outputs=output_node_names, use_cpu_only=use_cpu_only)\n', (11914, 12013), False, 'import coremltools\n'), ((13829, 13854), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (13843, 13854), False, 'import os, sys\n'), ((2641, 2691), 'tensorflow.tools.graph_transforms.TransformGraph', 'TransformGraph', (['gdef', '[]', 'output_names', 'transforms'], {}), '(gdef, [], output_names, transforms)\n', (2655, 2691), False, 'from tensorflow.tools.graph_transforms import TransformGraph\n'), ((2704, 2755), 'tensorflow.compat.v1.import_graph_def', 'tf.import_graph_def', (['transformed_graph_def'], {'name': '""""""'}), "(transformed_graph_def, name='')\n", (2723, 2755), True, 'import tensorflow.compat.v1 as tf\n'), ((5297, 5320), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (5307, 5320), True, 'import tensorflow.compat.v1 as tf\n'), ((5384, 5417), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5415, 5417), True, 'import tensorflow.compat.v1 as tf\n'), ((5701, 5775), 'tensorflow.compat.v1.train.write_graph', 'tf.train.write_graph', (['sess.graph', 'model_dir', 'graph_def_file'], {'as_text': '(False)'}), '(sess.graph, model_dir, graph_def_file, as_text=False)\n', (5721, 5775), True, 'import tensorflow.compat.v1 as tf\n'), ((7298, 7354), 'coremltools.models.neural_network.quantization_utils.quantize_weights', 'quantize_weights', (['mlmodel', 'nbits'], {'quantization_mode': 'mode'}), '(mlmodel, nbits, quantization_mode=mode)\n', (7314, 7354), False, 'from coremltools.models.neural_network.quantization_utils import quantize_weights\n'), ((9265, 9289), 'shutil.rmtree', 'shutil.rmtree', (['model_dir'], {}), '(model_dir)\n', (9278, 9289), False, 'import shutil, tempfile\n'), ((10869, 10918), 'test_utils.generate_data', 'generate_data', (['feed_input_shapes[name]', 'data_mode'], {}), '(feed_input_shapes[name], data_mode)\n', (10882, 10918), False, 'from test_utils import generate_data, tf_transpose\n'), ((10985, 11008), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (10995, 11008), True, 'import tensorflow.compat.v1 as tf\n'), ((13868, 13892), 'shutil.rmtree', 'shutil.rmtree', (['model_dir'], {}), '(model_dir)\n', (13881, 13892), False, 'import shutil, tempfile\n'), ((4875, 4891), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4889, 4891), True, 'import tensorflow.compat.v1 as tf\n'), ((5005, 5049), 'test_utils.generate_data', 'generate_data', (['input_shapes[name]', 'data_mode'], {}), '(input_shapes[name], data_mode)\n', (5018, 5049), False, 'from test_utils import generate_data, tf_transpose\n'), ((8220, 8255), 'numpy.zeros', 'np.zeros', (['coreml_input_shapes[name]'], {}), '(coreml_input_shapes[name])\n', (8228, 8255), True, 'import numpy as np\n'), ((8526, 8544), 'numpy.array', 'np.array', (['[tf_out]'], {}), '([tf_out])\n', (8534, 8544), True, 'import numpy as np\n'), ((11064, 11097), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11095, 11097), True, 'import tensorflow.compat.v1 as tf\n'), ((11718, 11757), 'tensorflow.compat.v1.gfile.GFile', 'tf.gfile.GFile', (['frozen_model_file', '"""wb"""'], {}), "(frozen_model_file, 'wb')\n", (11732, 11757), True, 'import tensorflow.compat.v1 as tf\n'), ((13105, 13123), 'numpy.array', 'np.array', (['[tf_out]'], {}), '([tf_out])\n', (13113, 13123), True, 'import numpy as np\n'), ((13526, 13548), 'numpy.logical_and', 'np.logical_and', (['cp', 'cp'], {}), '(cp, cp)\n', (13540, 13548), True, 'import numpy as np\n'), ((6465, 6504), 'tensorflow.compat.v1.gfile.GFile', 'tf.gfile.GFile', (['static_model_file', '"""wb"""'], {}), "(static_model_file, 'wb')\n", (6479, 6504), True, 'import tensorflow.compat.v1 as tf\n'), ((11489, 11511), 'tensorflow.compat.v1.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (11509, 11511), True, 'import tensorflow.compat.v1 as tf\n')] |
"""
select_top
This file is a part of BdPy.
"""
__all__ = ['select_top']
import numpy as np
from .util import print_start_msg, print_finish_msg
def select_top(data, value, num, axis=0, verbose=True):
"""
Select top `num` features of `value` from `data`
Parameters
----------
data : array
Data matrix
value : array_like
Vector of values
num : int
Number of selected features
Returns
-------
selected_data : array
Selected data matrix
selected_index : array
Index of selected data
"""
if verbose:
print_start_msg()
num_elem = data.shape[axis]
sorted_index = np.argsort(value)[::-1]
rank = np.zeros(num_elem, dtype=np.int)
rank[sorted_index] = np.array(range(0, num_elem))
selected_index_bool = rank < num
if axis == 0:
selected_data = data[selected_index_bool, :]
selected_index = np.array(range(0, num_elem), dtype=np.int)[selected_index_bool]
elif axis == 1:
selected_data = data[:, selected_index_bool]
selected_index = np.array(range(0, num_elem), dtype=np.int)[selected_index_bool]
else:
raise ValueError('Invalid axis')
if verbose:
print_finish_msg()
return selected_data, selected_index
| [
"numpy.argsort",
"numpy.zeros"
] | [((709, 741), 'numpy.zeros', 'np.zeros', (['num_elem'], {'dtype': 'np.int'}), '(num_elem, dtype=np.int)\n', (717, 741), True, 'import numpy as np\n'), ((673, 690), 'numpy.argsort', 'np.argsort', (['value'], {}), '(value)\n', (683, 690), True, 'import numpy as np\n')] |
"""Implements a GymAdapter that converts Gym envs into SoftlearningEnv."""
import numpy as np
import gym
from gym import spaces, wrappers
from .softlearning_env import SoftlearningEnv
from softlearning.environments.gym import register_environments
from softlearning.environments.gym.wrappers import NormalizeActionWrapper
from collections import defaultdict
def parse_domain_task(gym_id):
domain_task_parts = gym_id.split('-')
domain = '-'.join(domain_task_parts[:1])
task = '-'.join(domain_task_parts[1:])
return domain, task
CUSTOM_GYM_ENVIRONMENT_IDS = register_environments()
CUSTOM_GYM_ENVIRONMENTS = defaultdict(list)
for gym_id in CUSTOM_GYM_ENVIRONMENT_IDS:
domain, task = parse_domain_task(gym_id)
CUSTOM_GYM_ENVIRONMENTS[domain].append(task)
CUSTOM_GYM_ENVIRONMENTS = dict(CUSTOM_GYM_ENVIRONMENTS)
GYM_ENVIRONMENT_IDS = tuple(gym.envs.registry.env_specs.keys())
GYM_ENVIRONMENTS = defaultdict(list)
for gym_id in GYM_ENVIRONMENT_IDS:
domain, task = parse_domain_task(gym_id)
GYM_ENVIRONMENTS[domain].append(task)
GYM_ENVIRONMENTS = dict(GYM_ENVIRONMENTS)
class GymAdapter(SoftlearningEnv):
"""Adapter that implements the SoftlearningEnv for Gym envs."""
def __init__(self,
domain,
task,
*args,
env=None,
normalize=True,
observation_keys=None,
unwrap_time_limit=True,
**kwargs):
assert not args, (
"Gym environments don't support args. Use kwargs instead.")
self.normalize = normalize
self.observation_keys = observation_keys
self.unwrap_time_limit = unwrap_time_limit
self._Serializable__initialize(locals())
super(GymAdapter, self).__init__(domain, task, *args, **kwargs)
if env is None:
assert (domain is not None and task is not None), (domain, task)
env_id = f"{domain}-{task}"
env = gym.envs.make(env_id, **kwargs)
else:
assert domain is None and task is None, (domain, task)
if isinstance(env, wrappers.TimeLimit) and unwrap_time_limit:
# Remove the TimeLimit wrapper that sets 'done = True' when
# the time limit specified for each environment has been passed and
# therefore the environment is not Markovian (terminal condition
# depends on time rather than state).
env = env.env
if isinstance(env.observation_space, spaces.Dict):
observation_keys = (
observation_keys or list(env.observation_space.spaces.keys()))
if normalize:
env = NormalizeActionWrapper(env)
self._env = env
@property
def observation_space(self):
observation_space = self._env.observation_space
return observation_space
@property
def active_observation_shape(self):
"""Shape for the active observation based on observation_keys."""
if not isinstance(self._env.observation_space, spaces.Dict):
return super(GymAdapter, self).active_observation_shape
observation_keys = (
self.observation_keys
or list(self._env.observation_space.spaces.keys()))
active_size = sum(
np.prod(self._env.observation_space.spaces[key].shape)
for key in observation_keys)
active_observation_shape = (active_size, )
return active_observation_shape
def convert_to_active_observation(self, observation):
if not isinstance(self._env.observation_space, spaces.Dict):
return observation
observation_keys = (
self.observation_keys
or list(self._env.observation_space.spaces.keys()))
observation = np.concatenate([
observation[key] for key in observation_keys
], axis=-1)
return observation
@property
def action_space(self, *args, **kwargs):
action_space = self._env.action_space
if len(action_space.shape) > 1:
raise NotImplementedError(
"Action space ({}) is not flat, make sure to check the"
" implemenation.".format(action_space))
return action_space
def step(self, action, *args, **kwargs):
# TODO(hartikainen): refactor this to always return an OrderedDict,
# such that the observations for all the envs is consistent. Right now
# some of the gym envs return np.array whereas others return dict.
#
# Something like:
# observation = OrderedDict()
# observation['observation'] = env.step(action, *args, **kwargs)
# return observation
return self._env.step(action, *args, **kwargs)
@property
def is_multiworld_env(self):
return hasattr(self._env.env, 'compute_rewards')
def compute_reward(self,
achieved_goal=None,
desired_goal=None,
info=None,
actions=None,
observations=None):
if self.is_multiworld_env:
return self._env.env.compute_rewards(actions, observations)[0]
else:
return self._env.compute_reward(achieved_goal, desired_goal, info)
def reset(self, *args, **kwargs):
return self._env.reset(*args, **kwargs)
def render(self, *args, **kwargs):
return self._env.render(*args, **kwargs)
def close(self, *args, **kwargs):
return self._env.close(*args, **kwargs)
def seed(self, *args, **kwargs):
return self._env.seed(*args, **kwargs)
@property
def unwrapped(self):
return self._env.unwrapped
def get_param_values(self, *args, **kwargs):
raise NotImplementedError
def set_param_values(self, *args, **kwargs):
raise NotImplementedError
| [
"gym.envs.make",
"softlearning.environments.gym.register_environments",
"gym.envs.registry.env_specs.keys",
"numpy.prod",
"collections.defaultdict",
"softlearning.environments.gym.wrappers.NormalizeActionWrapper",
"numpy.concatenate"
] | [((579, 602), 'softlearning.environments.gym.register_environments', 'register_environments', ([], {}), '()\n', (600, 602), False, 'from softlearning.environments.gym import register_environments\n'), ((629, 646), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (640, 646), False, 'from collections import defaultdict\n'), ((925, 942), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (936, 942), False, 'from collections import defaultdict\n'), ((870, 904), 'gym.envs.registry.env_specs.keys', 'gym.envs.registry.env_specs.keys', ([], {}), '()\n', (902, 904), False, 'import gym\n'), ((3821, 3892), 'numpy.concatenate', 'np.concatenate', (['[observation[key] for key in observation_keys]'], {'axis': '(-1)'}), '([observation[key] for key in observation_keys], axis=-1)\n', (3835, 3892), True, 'import numpy as np\n'), ((1997, 2028), 'gym.envs.make', 'gym.envs.make', (['env_id'], {}), '(env_id, **kwargs)\n', (2010, 2028), False, 'import gym\n'), ((2698, 2725), 'softlearning.environments.gym.wrappers.NormalizeActionWrapper', 'NormalizeActionWrapper', (['env'], {}), '(env)\n', (2720, 2725), False, 'from softlearning.environments.gym.wrappers import NormalizeActionWrapper\n'), ((3322, 3376), 'numpy.prod', 'np.prod', (['self._env.observation_space.spaces[key].shape'], {}), '(self._env.observation_space.spaces[key].shape)\n', (3329, 3376), True, 'import numpy as np\n')] |
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
from .mask_rcnn import MaskRCNNAdapter
from ..config import StringField, NumberField
from ..representation import TextDetectionPrediction
class MaskRCNNWithTextAdapter(MaskRCNNAdapter):
__provider__ = 'mask_rcnn_with_text'
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'classes_out': StringField(
description="Name of output layer with information about classes.",
optional=False
),
'scores_out': StringField(
description="Name of output layer with bbox scores.",
optional=True
),
'boxes_out': StringField(
description="Name of output layer with bboxes.",
optional=False
),
'raw_masks_out': StringField(
description='Name of output layer with raw instances masks.',
optional=False
),
'texts_out': StringField(
description='Name of output layer with texts.',
optional=False
),
'confidence_threshold': NumberField(
description='Confidence threshold that is used to filter out detected instances.',
optional=False
),
})
return parameters
def configure(self):
self.classes_out = self.get_value_from_config('classes_out')
self.scores_out = self.get_value_from_config('scores_out')
self.boxes_out = self.get_value_from_config('boxes_out')
self.num_detections_out = self.get_value_from_config('num_detections_out')
self.raw_masks_out = self.get_value_from_config('raw_masks_out')
self.texts_out = self.get_value_from_config('texts_out')
self.confidence_threshold = self.get_value_from_config('confidence_threshold')
self.mask_processor = self.mask_to_result if not self.scores_out else self.mask_to_result_old
self.outputs_verified = False
def select_output_blob(self, outputs):
super().select_output_blob(outputs)
self.texts_out = self.check_output_name(self.texts_out, outputs)
def process(self, raw, identifiers, frame_meta):
raw_outputs = self._extract_predictions(raw, frame_meta)
if not self.outputs_verified:
self.select_output_blob(raw_outputs)
classes = raw_outputs[self.classes_out]
if self.scores_out:
valid_detections_mask = classes > 0
scores = raw_outputs[self.scores_out][valid_detections_mask]
else:
scores = raw_outputs[self.boxes_out][:, 4]
valid_detections_mask = scores > 0
scores = scores[valid_detections_mask]
classes = classes[valid_detections_mask].astype(np.uint32)
boxes = raw_outputs[self.boxes_out][valid_detections_mask, :4]
raw_masks = raw_outputs[self.raw_masks_out][valid_detections_mask]
texts = raw_outputs[self.texts_out][valid_detections_mask]
confidence_filter = scores > self.confidence_threshold
classes = classes[confidence_filter]
boxes = boxes[confidence_filter]
texts = texts[confidence_filter]
raw_masks = raw_masks[confidence_filter]
text_filter = texts != ''
classes = classes[text_filter]
boxes = boxes[text_filter]
texts = texts[text_filter]
raw_masks = raw_masks[text_filter]
results = []
for identifier, image_meta in zip(identifiers, frame_meta):
im_scale_x, im_scale_y = image_meta['scale_x'], image_meta['scale_y']
img_h, img_w = image_meta['image_size'][:2]
boxes[:, :4] /= np.array([im_scale_x, im_scale_y, im_scale_x, im_scale_y])
boxes[:, 0:4:2] = np.clip(boxes[:, 0:4:2], 0, img_w - 1)
boxes[:, 1:4:2] = np.clip(boxes[:, 1:4:2], 0, img_h - 1)
segms = self.mask_processor(
boxes,
classes,
raw_masks,
num_classes=1,
mask_thr_binary=0.5,
img_size=(img_h, img_w)
)
rectangles = self.masks_to_rects(segms[0])
results.append(
TextDetectionPrediction(identifier, points=rectangles, description=texts))
return results
@staticmethod
def masks_to_rects(masks):
rects = []
for mask in masks:
decoded_mask = mask.astype(np.uint8)
contours = cv2.findContours(decoded_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
contour = sorted(contours, key=lambda x: -cv2.contourArea(x))[0]
xys = cv2.boxPoints(cv2.minAreaRect(contour))
rects.append(xys)
return rects
@staticmethod
def mask_to_result(det_bboxes,
det_labels,
det_masks,
num_classes,
mask_thr_binary=0.5,
img_size=None):
masks = det_masks
bboxes = det_bboxes[:, :4]
labels = det_labels
cls_masks = [[] for _ in range(num_classes)]
for bbox, label, mask in zip(bboxes, labels, masks):
x0, y0, x1, y1 = bbox
src_points = np.float32([[0, 0], [0, mask.shape[0]], [mask.shape[1], mask.shape[0]]]) - 0.5
dst_points = np.float32([[x0, y0], [x0, y1], [x1, y1]]) - 0.5
transform_matrix = cv2.getAffineTransform(src_points, dst_points)
mask = cv2.warpAffine(mask, transform_matrix, img_size[::-1])
mask = (mask >= mask_thr_binary).astype(np.uint8)
cls_masks[label].append(mask)
return cls_masks
@staticmethod
def mask_to_result_old(det_bboxes,
det_labels,
det_masks,
num_classes,
mask_thr_binary=0.5,
img_size=None):
def expand_boxes(boxes, scale):
"""Expand an array of boxes by a given scale."""
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def segm_postprocess(box, raw_cls_mask, im_h, im_w, full_image_mask=False, encode=False):
# Add zero border to prevent upsampling artifacts on segment borders.
raw_cls_mask = np.pad(raw_cls_mask, ((1, 1), (1, 1)), 'constant', constant_values=0)
extended_box = expand_boxes(box[np.newaxis, :], raw_cls_mask.shape[0] / (raw_cls_mask.shape[0] - 2.0))[
0]
extended_box = extended_box.astype(int)
w, h = np.maximum(extended_box[2:] - extended_box[:2] + 1, 1) # pylint: disable=E0633
x0, y0 = np.clip(extended_box[:2], a_min=0, a_max=[im_w, im_h])
x1, y1 = np.clip(extended_box[2:] + 1, a_min=0, a_max=[im_w, im_h])
raw_cls_mask = cv2.resize(raw_cls_mask, (w, h)) > 0.5
mask = raw_cls_mask.astype(np.uint8)
if full_image_mask:
# Put an object mask in an image mask.
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
mask_start_y = y0 - extended_box[1]
mask_end_y = y1 - extended_box[1]
mask_start_x = x0 - extended_box[0]
mask_end_x = x1 - extended_box[0]
im_mask[y0:y1, x0:x1] = mask[mask_start_y:mask_end_y, mask_start_x:mask_end_x]
else:
original_box = box.astype(int)
x0, y0 = np.clip(original_box[:2], a_min=0, a_max=[im_w, im_h])
x1, y1 = np.clip(original_box[2:] + 1, a_min=0, a_max=[im_w, im_h])
im_mask = np.ascontiguousarray(
mask[(y0 - original_box[1]):(y1 - original_box[1]), (x0 - original_box[0]):(x1 - original_box[0])]
)
return im_mask
masks = []
per_obj_raw_masks = []
for cls, raw_mask in zip(det_labels, det_masks):
per_obj_raw_masks.append(raw_mask[cls, ...])
for box, raw_cls_mask in zip(det_bboxes, per_obj_raw_masks):
masks.append(segm_postprocess(box, raw_cls_mask, *img_size, True, False))
return [masks]
| [
"numpy.pad",
"cv2.resize",
"cv2.contourArea",
"numpy.maximum",
"numpy.float32",
"numpy.zeros",
"numpy.clip",
"cv2.warpAffine",
"numpy.array",
"cv2.getAffineTransform",
"cv2.minAreaRect",
"numpy.ascontiguousarray",
"cv2.findContours"
] | [((4341, 4399), 'numpy.array', 'np.array', (['[im_scale_x, im_scale_y, im_scale_x, im_scale_y]'], {}), '([im_scale_x, im_scale_y, im_scale_x, im_scale_y])\n', (4349, 4399), True, 'import numpy as np\n'), ((4430, 4468), 'numpy.clip', 'np.clip', (['boxes[:, 0:4:2]', '(0)', '(img_w - 1)'], {}), '(boxes[:, 0:4:2], 0, img_w - 1)\n', (4437, 4468), True, 'import numpy as np\n'), ((4499, 4537), 'numpy.clip', 'np.clip', (['boxes[:, 1:4:2]', '(0)', '(img_h - 1)'], {}), '(boxes[:, 1:4:2], 0, img_h - 1)\n', (4506, 4537), True, 'import numpy as np\n'), ((6098, 6144), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['src_points', 'dst_points'], {}), '(src_points, dst_points)\n', (6120, 6144), False, 'import cv2\n'), ((6164, 6218), 'cv2.warpAffine', 'cv2.warpAffine', (['mask', 'transform_matrix', 'img_size[::-1]'], {}), '(mask, transform_matrix, img_size[::-1])\n', (6178, 6218), False, 'import cv2\n'), ((7009, 7030), 'numpy.zeros', 'np.zeros', (['boxes.shape'], {}), '(boxes.shape)\n', (7017, 7030), True, 'import numpy as np\n'), ((7441, 7510), 'numpy.pad', 'np.pad', (['raw_cls_mask', '((1, 1), (1, 1))', '"""constant"""'], {'constant_values': '(0)'}), "(raw_cls_mask, ((1, 1), (1, 1)), 'constant', constant_values=0)\n", (7447, 7510), True, 'import numpy as np\n'), ((7717, 7771), 'numpy.maximum', 'np.maximum', (['(extended_box[2:] - extended_box[:2] + 1)', '(1)'], {}), '(extended_box[2:] - extended_box[:2] + 1, 1)\n', (7727, 7771), True, 'import numpy as np\n'), ((7818, 7872), 'numpy.clip', 'np.clip', (['extended_box[:2]'], {'a_min': '(0)', 'a_max': '[im_w, im_h]'}), '(extended_box[:2], a_min=0, a_max=[im_w, im_h])\n', (7825, 7872), True, 'import numpy as np\n'), ((7894, 7952), 'numpy.clip', 'np.clip', (['(extended_box[2:] + 1)'], {'a_min': '(0)', 'a_max': '[im_w, im_h]'}), '(extended_box[2:] + 1, a_min=0, a_max=[im_w, im_h])\n', (7901, 7952), True, 'import numpy as np\n'), ((5144, 5216), 'cv2.findContours', 'cv2.findContours', (['decoded_mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(decoded_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (5160, 5216), False, 'import cv2\n'), ((5330, 5354), 'cv2.minAreaRect', 'cv2.minAreaRect', (['contour'], {}), '(contour)\n', (5345, 5354), False, 'import cv2\n'), ((5914, 5986), 'numpy.float32', 'np.float32', (['[[0, 0], [0, mask.shape[0]], [mask.shape[1], mask.shape[0]]]'], {}), '([[0, 0], [0, mask.shape[0]], [mask.shape[1], mask.shape[0]]])\n', (5924, 5986), True, 'import numpy as np\n'), ((6018, 6060), 'numpy.float32', 'np.float32', (['[[x0, y0], [x0, y1], [x1, y1]]'], {}), '([[x0, y0], [x0, y1], [x1, y1]])\n', (6028, 6060), True, 'import numpy as np\n'), ((7981, 8013), 'cv2.resize', 'cv2.resize', (['raw_cls_mask', '(w, h)'], {}), '(raw_cls_mask, (w, h))\n', (7991, 8013), False, 'import cv2\n'), ((8183, 8221), 'numpy.zeros', 'np.zeros', (['(im_h, im_w)'], {'dtype': 'np.uint8'}), '((im_h, im_w), dtype=np.uint8)\n', (8191, 8221), True, 'import numpy as np\n'), ((8611, 8665), 'numpy.clip', 'np.clip', (['original_box[:2]'], {'a_min': '(0)', 'a_max': '[im_w, im_h]'}), '(original_box[:2], a_min=0, a_max=[im_w, im_h])\n', (8618, 8665), True, 'import numpy as np\n'), ((8691, 8749), 'numpy.clip', 'np.clip', (['(original_box[2:] + 1)'], {'a_min': '(0)', 'a_max': '[im_w, im_h]'}), '(original_box[2:] + 1, a_min=0, a_max=[im_w, im_h])\n', (8698, 8749), True, 'import numpy as np\n'), ((8776, 8892), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['mask[y0 - original_box[1]:y1 - original_box[1], x0 - original_box[0]:x1 -\n original_box[0]]'], {}), '(mask[y0 - original_box[1]:y1 - original_box[1], x0 -\n original_box[0]:x1 - original_box[0]])\n', (8796, 8892), True, 'import numpy as np\n'), ((5275, 5293), 'cv2.contourArea', 'cv2.contourArea', (['x'], {}), '(x)\n', (5290, 5293), False, 'import cv2\n')] |
import os
from typing import Tuple
import cv2
import numpy as np
from keras.utils import Sequence
from pycocotools import mask
from pycocotools.coco import COCO
from util.array_utils import down_size_batch
class DataLoader(Sequence):
"""Loads data/labels from a coco annotated dataset.
The images are stored in the 'images/' directory and the annotations in the 'annotation.json' file.
# Arguments:
data_directory: string
absolute path to the directory
batch_size: int
amount of data points which are processed as one batch
shuffle: boolean
Shuffle data points after each epoch
augment: boolean
augment the data points with random horizontal/vertical flips and 90° rotations
down_sample_factor: int
factor used for down sampling. The image size has to be divisible by the factor without remainder
subset_size: int
size of the subset which is used instead of the whole dataset
random_state: numpy.random.RandomState
state which will be used for shuffling and augmentation
"""
def __init__(self: 'DataLoader',
data_directory: str,
batch_size: int = 32,
shuffle: bool = True,
augment: bool = True,
down_sample_factor: int = 1,
subset_size: int = 0,
random_state: np.random.RandomState = None):
self.directory = os.path.join(data_directory, 'images')
self.coco = COCO(os.path.join(data_directory, 'annotation-small.json'))
self.batch_size = batch_size
self.shuffle = shuffle
self.augment = augment
self.down_sample_factor = down_sample_factor
# Init random state
if isinstance(random_state, np.random.RandomState):
self.random_state = random_state
else:
self.random_state = np.random.RandomState()
# Get ids
self.image_ids = self.coco.getImgIds()
if 0 < subset_size < len(self.image_ids):
self.image_ids = self.random_state.choice(self.image_ids, subset_size)
# Get data shape
self.height = self.coco.loadImgs(self.image_ids[0])[0]['height']
self.width = self.coco.loadImgs(self.image_ids[0])[0]['width']
self.data_shape = (self.height, self.width, 3)
self.labels_shape = (self.height, self.width, 1)
def __len__(self):
return int(np.floor(len(self.image_ids) / self.batch_size))
def __getitem__(self, index):
indexes = self.image_ids[index * self.batch_size:(index + 1) * self.batch_size]
data = np.empty((self.batch_size, *self.data_shape))
labels = np.empty((self.batch_size, *self.labels_shape))
for i, ID in enumerate(indexes):
x = self._get_image(ID)
y = self._get_labels(ID)
if self.augment:
rotations = self.random_state.randint(4)
flip_lr = self.random_state.randint(2)
flip_ud = self.random_state.randint(2)
if flip_lr:
x = np.fliplr(x)
y = np.fliplr(y)
if flip_ud:
x = np.flipud(x)
y = np.flipud(y)
x = np.rot90(x, k=rotations)
y = np.rot90(y, k=rotations)
data[i,] = x
labels[i,] = y
if self.down_sample_factor > 1:
data = down_size_batch(data)
labels = down_size_batch(labels)
return data, labels
def on_epoch_end(self):
if self.shuffle:
self.random_state.shuffle(self.image_ids)
def _get_image(self, image_id):
path = self.coco.loadImgs([image_id])[0]['file_name']
image = cv2.imread(os.path.join(self.directory, path)) / 255
return image
def _get_labels(self, image_id):
annotations = self.coco.loadAnns(self.coco.getAnnIds(image_id))
labels = np.zeros((self.height, self.width), dtype=float)
for a in annotations:
rle = mask.frPyObjects(a['segmentation'], self.height, self.width)
m = np.squeeze(mask.decode(rle))
labels = np.logical_or(labels, m)
return labels.reshape((*labels.shape, 1))
class NumpyDataLoader(Sequence):
"""Loads data/labels where each data point is stored as a numpy array (.npy) on the file system.
The data points/labels have to be stored with a given prefix and incrementing numbers for
identification, for example with the prefix 'x_':
x_1.npy, x_2.npy, ...
# Arguments:
data_directory: string
absolute path to the directory
batch_size: int
amount of data points which are processed as one batch
data_shape: tuple
shape of the data
labels_shape: tuple
shape of the labels
data_prefix: string
prefix for the data files
label_prefix: string
prefix for the label files
shuffle: boolean
Shuffle data points after each epoch
augment: boolean
augment the data points with random horizontal/vertical flips and 90° rotations
random_state: numpy.random.RandomState
state which will be used for shuffling and augmentation
"""
def __init__(self: 'NumpyDataLoader',
data_directory: str,
batch_size: int = 32,
data_prefix: str = 'x_',
label_prefix: str = 'y_',
shuffle: bool = True,
augment: bool = True,
random_state: np.random.RandomState = None):
self.data_directory = data_directory
self.batch_size = batch_size
self.data_prefix = data_prefix
self.label_prefix = label_prefix
self.shuffle = shuffle
self.augment = augment
# Get amount of data points
data_size = len([f for f in os.listdir(data_directory) if f.startswith(data_prefix)])
self.image_ids = np.arange(data_size)
# Get data/label dimensionality from first sample
# Currently only one channeled labels are supported (binary segmentation)
# Augmentation might crash when label shape is not 2d
self.data_shape = np.load(data_directory + data_prefix + '0.npy').shape
self.labels_shape = (*np.load(data_directory + label_prefix + '0.npy').shape, 1)
# Init random state
if isinstance(random_state, np.random.RandomState):
self.random_state = random_state
else:
self.random_state = np.random.RandomState()
def __len__(self):
return int(np.floor(len(self.image_ids) / self.batch_size))
def __getitem__(self, index):
indexes = self.image_ids[index * self.batch_size:(index + 1) * self.batch_size]
data = np.empty((self.batch_size, *self.data_shape))
labels = np.empty((self.batch_size, *self.labels_shape))
for i, ID in enumerate(indexes):
x = np.load(self.data_directory + 'x_' + str(ID) + '.npy') / 255
y = np.load(self.data_directory + 'y_' + str(ID) + '.npy').reshape(self.labels_shape)
if self.augment:
rotations = self.random_state.randint(4)
flip_lr = self.random_state.randint(2)
flip_ud = self.random_state.randint(2)
if flip_lr:
x = np.fliplr(x)
y = np.fliplr(y)
if flip_ud:
x = np.flipud(x)
y = np.flipud(y)
x = np.rot90(x, k=rotations)
y = np.rot90(y, k=rotations)
data[i,] = x
labels[i,] = y
return data, labels
def on_epoch_end(self):
if self.shuffle:
self.random_state.shuffle(self.image_ids)
class RandomLoader(Sequence):
"""Loads a random batch of data points with a given shape.
This can be used for analyzing, like calculating the effective receptive field with random input.
No labels are generated.
# Arguments
dataset_size: int
desired amount of data points
batch_size: int
amount of data points which are processed as one batch
data_shape: Tuple
shape of the data points (channels last)
random_state: np.random.RandomState
state which will be used for data point generation
"""
def __init__(self: 'RandomLoader',
dataset_size: int = 100,
batch_size: int = 32,
data_shape: Tuple = (150, 150, 3),
random_state: np.random.RandomState = None):
self.dataset_size = dataset_size
self.batch_size = batch_size
self.data_shape = data_shape
if isinstance(random_state, np.random.RandomState):
self.random_state = random_state
else:
self.random_state = np.random.RandomState()
self._state = self.random_state.get_state() # Used for resetting random state after epoch
def __len__(self):
return self.dataset_size
def __getitem__(self, index):
data = self.random_state.rand(self.batch_size, *self.data_shape)
return data, None
def on_epoch_end(self):
self.random_state.set_state(self._state)
| [
"numpy.load",
"pycocotools.mask.decode",
"numpy.empty",
"numpy.zeros",
"numpy.flipud",
"numpy.random.RandomState",
"numpy.fliplr",
"util.array_utils.down_size_batch",
"numpy.arange",
"numpy.logical_or",
"numpy.rot90",
"pycocotools.mask.frPyObjects",
"os.path.join",
"os.listdir"
] | [((1502, 1540), 'os.path.join', 'os.path.join', (['data_directory', '"""images"""'], {}), "(data_directory, 'images')\n", (1514, 1540), False, 'import os\n'), ((2689, 2734), 'numpy.empty', 'np.empty', (['(self.batch_size, *self.data_shape)'], {}), '((self.batch_size, *self.data_shape))\n', (2697, 2734), True, 'import numpy as np\n'), ((2752, 2799), 'numpy.empty', 'np.empty', (['(self.batch_size, *self.labels_shape)'], {}), '((self.batch_size, *self.labels_shape))\n', (2760, 2799), True, 'import numpy as np\n'), ((4042, 4090), 'numpy.zeros', 'np.zeros', (['(self.height, self.width)'], {'dtype': 'float'}), '((self.height, self.width), dtype=float)\n', (4050, 4090), True, 'import numpy as np\n'), ((6124, 6144), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (6133, 6144), True, 'import numpy as np\n'), ((6952, 6997), 'numpy.empty', 'np.empty', (['(self.batch_size, *self.data_shape)'], {}), '((self.batch_size, *self.data_shape))\n', (6960, 6997), True, 'import numpy as np\n'), ((7015, 7062), 'numpy.empty', 'np.empty', (['(self.batch_size, *self.labels_shape)'], {}), '((self.batch_size, *self.labels_shape))\n', (7023, 7062), True, 'import numpy as np\n'), ((1566, 1619), 'os.path.join', 'os.path.join', (['data_directory', '"""annotation-small.json"""'], {}), "(data_directory, 'annotation-small.json')\n", (1578, 1619), False, 'import os\n'), ((1953, 1976), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (1974, 1976), True, 'import numpy as np\n'), ((3522, 3543), 'util.array_utils.down_size_batch', 'down_size_batch', (['data'], {}), '(data)\n', (3537, 3543), False, 'from util.array_utils import down_size_batch\n'), ((3565, 3588), 'util.array_utils.down_size_batch', 'down_size_batch', (['labels'], {}), '(labels)\n', (3580, 3588), False, 'from util.array_utils import down_size_batch\n'), ((4139, 4199), 'pycocotools.mask.frPyObjects', 'mask.frPyObjects', (["a['segmentation']", 'self.height', 'self.width'], {}), "(a['segmentation'], self.height, self.width)\n", (4155, 4199), False, 'from pycocotools import mask\n'), ((4266, 4290), 'numpy.logical_or', 'np.logical_or', (['labels', 'm'], {}), '(labels, m)\n', (4279, 4290), True, 'import numpy as np\n'), ((6374, 6421), 'numpy.load', 'np.load', (["(data_directory + data_prefix + '0.npy')"], {}), "(data_directory + data_prefix + '0.npy')\n", (6381, 6421), True, 'import numpy as np\n'), ((6697, 6720), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (6718, 6720), True, 'import numpy as np\n'), ((9062, 9085), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (9083, 9085), True, 'import numpy as np\n'), ((3339, 3363), 'numpy.rot90', 'np.rot90', (['x'], {'k': 'rotations'}), '(x, k=rotations)\n', (3347, 3363), True, 'import numpy as np\n'), ((3384, 3408), 'numpy.rot90', 'np.rot90', (['y'], {'k': 'rotations'}), '(y, k=rotations)\n', (3392, 3408), True, 'import numpy as np\n'), ((3852, 3886), 'os.path.join', 'os.path.join', (['self.directory', 'path'], {}), '(self.directory, path)\n', (3864, 3886), False, 'import os\n'), ((4227, 4243), 'pycocotools.mask.decode', 'mask.decode', (['rle'], {}), '(rle)\n', (4238, 4243), False, 'from pycocotools import mask\n'), ((7704, 7728), 'numpy.rot90', 'np.rot90', (['x'], {'k': 'rotations'}), '(x, k=rotations)\n', (7712, 7728), True, 'import numpy as np\n'), ((7749, 7773), 'numpy.rot90', 'np.rot90', (['y'], {'k': 'rotations'}), '(y, k=rotations)\n', (7757, 7773), True, 'import numpy as np\n'), ((3165, 3177), 'numpy.fliplr', 'np.fliplr', (['x'], {}), '(x)\n', (3174, 3177), True, 'import numpy as np\n'), ((3202, 3214), 'numpy.fliplr', 'np.fliplr', (['y'], {}), '(y)\n', (3211, 3214), True, 'import numpy as np\n'), ((3268, 3280), 'numpy.flipud', 'np.flipud', (['x'], {}), '(x)\n', (3277, 3280), True, 'import numpy as np\n'), ((3305, 3317), 'numpy.flipud', 'np.flipud', (['y'], {}), '(y)\n', (3314, 3317), True, 'import numpy as np\n'), ((6041, 6067), 'os.listdir', 'os.listdir', (['data_directory'], {}), '(data_directory)\n', (6051, 6067), False, 'import os\n'), ((6458, 6506), 'numpy.load', 'np.load', (["(data_directory + label_prefix + '0.npy')"], {}), "(data_directory + label_prefix + '0.npy')\n", (6465, 6506), True, 'import numpy as np\n'), ((7530, 7542), 'numpy.fliplr', 'np.fliplr', (['x'], {}), '(x)\n', (7539, 7542), True, 'import numpy as np\n'), ((7567, 7579), 'numpy.fliplr', 'np.fliplr', (['y'], {}), '(y)\n', (7576, 7579), True, 'import numpy as np\n'), ((7633, 7645), 'numpy.flipud', 'np.flipud', (['x'], {}), '(x)\n', (7642, 7645), True, 'import numpy as np\n'), ((7670, 7682), 'numpy.flipud', 'np.flipud', (['y'], {}), '(y)\n', (7679, 7682), True, 'import numpy as np\n')] |
import numpy as np
from mean_average_precision import MetricBuilder
# print list of available metrics
print(MetricBuilder.get_metrics_list())
num_classes = 5
# create metric_fn
metric_fn = MetricBuilder.build_evaluation_metric(
"map_2d", async_mode=True, num_classes=num_classes
)
# add some samples to evaluation
for i in range(num_classes):
# [xmin, ymin, xmax, ymax, class_id, difficult, crowd]
gt = np.array(
[
[439, 157, 556, 241, i, 0, 0],
[437, 246, 518, 351, i, 0, 0],
[515, 306, 595, 375, i, 0, 0],
[407, 386, 531, 476, i, 0, 0],
[544, 419, 621, 476, i, 0, 0],
[609, 297, 636, 392, i, 0, 0],
]
)
# [xmin, ymin, xmax, ymax, class_id, confidence]
preds = np.array(
[
[429, 219, 528, 247, i, 0.460851],
[433, 260, 506, 336, i, 0.269833],
[518, 314, 603, 369, i, 0.462608],
[592, 310, 634, 388, i, 0.298196],
[403, 384, 517, 461, i, 0.382881],
[405, 429, 519, 470, i, 0.369369],
[433, 272, 499, 341, i, 0.272826],
[413, 390, 515, 459, i, 0.619459],
]
)
metric_fn.add(preds, gt)
# compute PASCAL VOC metric
print(
f"VOC PASCAL mAP: {metric_fn.value(iou_thresholds=0.5, recall_thresholds=np.arange(0., 1.1, 0.1))}"
)
# compute PASCAL VOC metric at the all points
print(f"VOC PASCAL mAP in all points: {metric_fn.value(iou_thresholds=0.5)}")
| [
"numpy.array",
"mean_average_precision.MetricBuilder.get_metrics_list",
"mean_average_precision.MetricBuilder.build_evaluation_metric",
"numpy.arange"
] | [((191, 284), 'mean_average_precision.MetricBuilder.build_evaluation_metric', 'MetricBuilder.build_evaluation_metric', (['"""map_2d"""'], {'async_mode': '(True)', 'num_classes': 'num_classes'}), "('map_2d', async_mode=True,\n num_classes=num_classes)\n", (228, 284), False, 'from mean_average_precision import MetricBuilder\n'), ((109, 141), 'mean_average_precision.MetricBuilder.get_metrics_list', 'MetricBuilder.get_metrics_list', ([], {}), '()\n', (139, 141), False, 'from mean_average_precision import MetricBuilder\n'), ((418, 623), 'numpy.array', 'np.array', (['[[439, 157, 556, 241, i, 0, 0], [437, 246, 518, 351, i, 0, 0], [515, 306, \n 595, 375, i, 0, 0], [407, 386, 531, 476, i, 0, 0], [544, 419, 621, 476,\n i, 0, 0], [609, 297, 636, 392, i, 0, 0]]'], {}), '([[439, 157, 556, 241, i, 0, 0], [437, 246, 518, 351, i, 0, 0], [\n 515, 306, 595, 375, i, 0, 0], [407, 386, 531, 476, i, 0, 0], [544, 419,\n 621, 476, i, 0, 0], [609, 297, 636, 392, i, 0, 0]])\n', (426, 623), True, 'import numpy as np\n'), ((778, 1088), 'numpy.array', 'np.array', (['[[429, 219, 528, 247, i, 0.460851], [433, 260, 506, 336, i, 0.269833], [518,\n 314, 603, 369, i, 0.462608], [592, 310, 634, 388, i, 0.298196], [403, \n 384, 517, 461, i, 0.382881], [405, 429, 519, 470, i, 0.369369], [433, \n 272, 499, 341, i, 0.272826], [413, 390, 515, 459, i, 0.619459]]'], {}), '([[429, 219, 528, 247, i, 0.460851], [433, 260, 506, 336, i, \n 0.269833], [518, 314, 603, 369, i, 0.462608], [592, 310, 634, 388, i, \n 0.298196], [403, 384, 517, 461, i, 0.382881], [405, 429, 519, 470, i, \n 0.369369], [433, 272, 499, 341, i, 0.272826], [413, 390, 515, 459, i, \n 0.619459]])\n', (786, 1088), True, 'import numpy as np\n'), ((1333, 1357), 'numpy.arange', 'np.arange', (['(0.0)', '(1.1)', '(0.1)'], {}), '(0.0, 1.1, 0.1)\n', (1342, 1357), True, 'import numpy as np\n')] |
"""Core functions."""
import os
import nibabel as nb
import numpy as np
from matplotlib.cm import get_cmap
from imageio import mimwrite
from skimage.transform import resize
def parse_filename(filepath):
"""Parse input file path into directory, basename and extension.
Parameters
----------
filepath: string
Input name that will be parsed into directory, basename and extension.
Returns
-------
dirname: str
File directory.
basename: str
File name without directory and extension.
ext: str
File extension.
"""
path = os.path.normpath(filepath)
dirname = os.path.dirname(path)
filename = path.split(os.sep)[-1]
basename, ext = filename.split(os.extsep, 1)
return dirname, basename, ext
def load_and_prepare_image(filename, size=1):
"""Load and prepare image data.
Parameters
----------
filename1: str
Input file (eg. /john/home/image.nii.gz)
size: float
Image resizing factor.
Returns
-------
out_img: numpy array
"""
# Load NIfTI file
data = nb.load(filename).get_data()
# Pad data array with zeros to make the shape isometric
maximum = np.max(data.shape)
out_img = np.zeros([maximum] * 3)
a, b, c = data.shape
x, y, z = (list(data.shape) - maximum) / -2
out_img[int(x):a + int(x),
int(y):b + int(y),
int(z):c + int(z)] = data
out_img /= out_img.max() # scale image values between 0-1
# Resize image by the following factor
if size != 1:
out_img = resize(out_img, [int(size * maximum)] * 3)
maximum = int(maximum * size)
return out_img, maximum
def create_mosaic_normal(out_img, maximum):
"""Create grayscale image.
Parameters
----------
out_img: numpy array
maximum: int
Returns
-------
new_img: numpy array
"""
new_img = np.array(
[np.hstack((
np.hstack((
np.flip(out_img[i, :, :], 1).T,
np.flip(out_img[:, maximum - i - 1, :], 1).T)),
np.flip(out_img[:, :, maximum - i - 1], 1).T))
for i in range(maximum)])
return new_img
def create_mosaic_depth(out_img, maximum):
"""Create an image with concurrent slices represented with colors.
The image shows you in color what the value of the next slice will be. If
the color is slightly red or blue it means that the value on the next slide
is brighter or darker, respectifely. It therefore encodes a certain kind of
depth into the gif.
Parameters
----------
out_img: numpy array
maximum: int
Returns
-------
new_img: numpy array
"""
# Load normal mosaic image
new_img = create_mosaic_normal(out_img, maximum)
# Create RGB image (where red and blue mean a positive or negative shift in
# the direction of the depicted axis)
rgb_img = [new_img[i:i + 3, ...] for i in range(maximum - 3)]
# Make sure to have correct data shape
out_img = np.rollaxis(np.array(rgb_img), 1, 4)
# Add the 3 lost images at the end
out_img = np.vstack(
(out_img, np.zeros([3] + [o for o in out_img[-1].shape])))
return out_img
def create_mosaic_RGB(out_img1, out_img2, out_img3, maximum):
"""Create RGB image.
Parameters
----------
out_img: numpy array
maximum: int
Returns
-------
new_img: numpy array
"""
# Load normal mosaic image
new_img1 = create_mosaic_normal(out_img1, maximum)
new_img2 = create_mosaic_normal(out_img2, maximum)
new_img3 = create_mosaic_normal(out_img3, maximum)
# Create RGB image (where red and blue mean a positive or negative shift
# in the direction of the depicted axis)
rgb_img = [[new_img1[i, ...], new_img2[i, ...], new_img3[i, ...]]
for i in range(maximum)]
# Make sure to have correct data shape
out_img = np.rollaxis(np.array(rgb_img), 1, 4)
# Add the 3 lost images at the end
out_img = np.vstack(
(out_img, np.zeros([3] + [o for o in out_img[-1].shape])))
return out_img
def write_gif_normal(filename, size=1, fps=18):
"""Procedure for writing grayscale image.
Parameters
----------
filename: str
Input file (eg. /john/home/image.nii.gz)
size: float
Between 0 and 1.
fps: int
Frames per second
"""
# Load NIfTI and put it in right shape
out_img, maximum = load_and_prepare_image(filename, size)
# Create output mosaic
new_img = create_mosaic_normal(out_img, maximum)
# Figure out extension
ext = '.{}'.format(parse_filename(filename)[2])
# Write gif file
mimwrite(filename.replace(ext, '.gif'), new_img,
format='gif', fps=int(fps * size))
def write_gif_depth(filename, size=1, fps=18):
"""Procedure for writing depth image.
The image shows you in color what the value of the next slice will be. If
the color is slightly red or blue it means that the value on the next slide
is brighter or darker, respectifely. It therefore encodes a certain kind of
depth into the gif.
Parameters
----------
filename: str
Input file (eg. /john/home/image.nii.gz)
size: float
Between 0 and 1.
fps: int
Frames per second
"""
# Load NIfTI and put it in right shape
out_img, maximum = load_and_prepare_image(filename, size)
# Create output mosaic
new_img = create_mosaic_depth(out_img, maximum)
# Figure out extension
ext = '.{}'.format(parse_filename(filename)[2])
# Write gif file
mimwrite(filename.replace(ext, '_depth.gif'), new_img,
format='gif', fps=int(fps * size))
def write_gif_rgb(filename1, filename2, filename3, size=1, fps=18):
"""Procedure for writing RGB image.
Parameters
----------
filename1: str
Input file for red channel.
filename2: str
Input file for green channel.
filename3: str
Input file for blue channel.
size: float
Between 0 and 1.
fps: int
Frames per second
"""
# Load NIfTI and put it in right shape
out_img1, maximum1 = load_and_prepare_image(filename1, size)
out_img2, maximum2 = load_and_prepare_image(filename2, size)
out_img3, maximum3 = load_and_prepare_image(filename3, size)
if maximum1 == maximum2 and maximum1 == maximum3:
maximum = maximum1
# Create output mosaic
new_img = create_mosaic_RGB(out_img1, out_img2, out_img3, maximum)
# Generate output path
out_filename = '{}_{}_{}_rgb.gif'.format(parse_filename(filename1)[1],
parse_filename(filename2)[1],
parse_filename(filename3)[1])
out_path = os.path.join(parse_filename(filename1)[0], out_filename)
# Write gif file
mimwrite(out_path, new_img, format='gif', fps=int(fps * size))
def write_gif_pseudocolor(filename, size=1, fps=18, colormap='hot'):
"""Procedure for writing pseudo color image.
The colormap can be any colormap from matplotlib.
Parameters
----------
filename1: str
Input file (eg. /john/home/image.nii.gz)
size: float
Between 0 and 1.
fps: int
Frames per second
colormap: str
Name of the colormap that will be used.
"""
# Load NIfTI and put it in right shape
out_img, maximum = load_and_prepare_image(filename, size)
# Create output mosaic
new_img = create_mosaic_normal(out_img, maximum)
# Transform values according to the color map
cmap = get_cmap(colormap)
color_transformed = [cmap(new_img[i, ...]) for i in range(maximum)]
cmap_img = np.delete(color_transformed, 3, 3)
# Figure out extension
ext = '.{}'.format(parse_filename(filename)[2])
# Write gif file
mimwrite(filename.replace(ext, '_{}.gif'.format(colormap)),
cmap_img, format='gif', fps=int(fps * size))
| [
"numpy.flip",
"matplotlib.cm.get_cmap",
"nibabel.load",
"os.path.dirname",
"numpy.zeros",
"numpy.max",
"numpy.array",
"os.path.normpath",
"numpy.delete"
] | [((594, 620), 'os.path.normpath', 'os.path.normpath', (['filepath'], {}), '(filepath)\n', (610, 620), False, 'import os\n'), ((635, 656), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (650, 656), False, 'import os\n'), ((1203, 1221), 'numpy.max', 'np.max', (['data.shape'], {}), '(data.shape)\n', (1209, 1221), True, 'import numpy as np\n'), ((1237, 1260), 'numpy.zeros', 'np.zeros', (['([maximum] * 3)'], {}), '([maximum] * 3)\n', (1245, 1260), True, 'import numpy as np\n'), ((7631, 7649), 'matplotlib.cm.get_cmap', 'get_cmap', (['colormap'], {}), '(colormap)\n', (7639, 7649), False, 'from matplotlib.cm import get_cmap\n'), ((7737, 7771), 'numpy.delete', 'np.delete', (['color_transformed', '(3)', '(3)'], {}), '(color_transformed, 3, 3)\n', (7746, 7771), True, 'import numpy as np\n'), ((3042, 3059), 'numpy.array', 'np.array', (['rgb_img'], {}), '(rgb_img)\n', (3050, 3059), True, 'import numpy as np\n'), ((3939, 3956), 'numpy.array', 'np.array', (['rgb_img'], {}), '(rgb_img)\n', (3947, 3956), True, 'import numpy as np\n'), ((1099, 1116), 'nibabel.load', 'nb.load', (['filename'], {}), '(filename)\n', (1106, 1116), True, 'import nibabel as nb\n'), ((3150, 3196), 'numpy.zeros', 'np.zeros', (['([3] + [o for o in out_img[-1].shape])'], {}), '([3] + [o for o in out_img[-1].shape])\n', (3158, 3196), True, 'import numpy as np\n'), ((4047, 4093), 'numpy.zeros', 'np.zeros', (['([3] + [o for o in out_img[-1].shape])'], {}), '([3] + [o for o in out_img[-1].shape])\n', (4055, 4093), True, 'import numpy as np\n'), ((2088, 2130), 'numpy.flip', 'np.flip', (['out_img[:, :, maximum - i - 1]', '(1)'], {}), '(out_img[:, :, maximum - i - 1], 1)\n', (2095, 2130), True, 'import numpy as np\n'), ((1980, 2008), 'numpy.flip', 'np.flip', (['out_img[i, :, :]', '(1)'], {}), '(out_img[i, :, :], 1)\n', (1987, 2008), True, 'import numpy as np\n'), ((2028, 2070), 'numpy.flip', 'np.flip', (['out_img[:, maximum - i - 1, :]', '(1)'], {}), '(out_img[:, maximum - i - 1, :], 1)\n', (2035, 2070), True, 'import numpy as np\n')] |
import copy
import importlib
import itertools
from typing import Tuple, Dict, Callable
import numpy as np
from highway_env.types import Vector, Interval
def do_every(duration: float, timer: float) -> bool:
return duration < timer
def lmap(v: float, x: Interval, y: Interval) -> float:
"""Linear map of value v with range x to desired range y."""
return y[0] + (v - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
def class_from_path(path: str) -> Callable:
module_name, class_name = path.rsplit(".", 1)
class_object = getattr(importlib.import_module(module_name), class_name)
return class_object
def constrain(x: float, a: float, b: float) -> np.ndarray:
return np.clip(x, a, b)
def not_zero(x: float, eps: float = 1e-2) -> float:
if abs(x) > eps:
return x
elif x > 0:
return eps
else:
return -eps
def wrap_to_pi(x: float) -> float:
return ((x + np.pi) % (2 * np.pi)) - np.pi
def point_in_rectangle(point: Vector, rect_min: Vector, rect_max: Vector) -> bool:
"""
Check if a point is inside a rectangle
:param point: a point (x, y)
:param rect_min: x_min, y_min
:param rect_max: x_max, y_max
"""
return rect_min[0] <= point[0] <= rect_max[0] and rect_min[1] <= point[1] <= rect_max[1]
def point_in_rotated_rectangle(point: np.ndarray, center: np.ndarray, length: float, width: float, angle: float) \
-> bool:
"""
Check if a point is inside a rotated rectangle
:param point: a point
:param center: rectangle center
:param length: rectangle length
:param width: rectangle width
:param angle: rectangle angle [rad]
:return: is the point inside the rectangle
"""
c, s = np.cos(angle), np.sin(angle)
r = np.array([[c, -s], [s, c]])
ru = r.dot(point - center)
return point_in_rectangle(ru, (-length/2, -width/2), (length/2, width/2))
def point_in_ellipse(point: Vector, center: Vector, angle: float, length: float, width: float) -> bool:
"""
Check if a point is inside an ellipse
:param point: a point
:param center: ellipse center
:param angle: ellipse main axis angle
:param length: ellipse big axis
:param width: ellipse small axis
:return: is the point inside the ellipse
"""
c, s = np.cos(angle), np.sin(angle)
r = np.matrix([[c, -s], [s, c]])
ru = r.dot(point - center)
return np.sum(np.square(ru / np.array([length, width]))) < 1
def rotated_rectangles_intersect(rect1: Tuple[Vector, float, float, float],
rect2: Tuple[Vector, float, float, float]) -> bool:
"""
Do two rotated rectangles intersect?
:param rect1: (center, length, width, angle)
:param rect2: (center, length, width, angle)
:return: do they?
"""
return has_corner_inside(rect1, rect2) or has_corner_inside(rect2, rect1)
def has_corner_inside(rect1: Tuple[Vector, float, float, float],
rect2: Tuple[Vector, float, float, float]) -> bool:
"""
Check if rect1 has a corner inside rect2
:param rect1: (center, length, width, angle)
:param rect2: (center, length, width, angle)
"""
(c1, l1, w1, a1) = rect1
(c2, l2, w2, a2) = rect2
c1 = np.array(c1)
l1v = np.array([l1/2, 0])
w1v = np.array([0, w1/2])
r1_points = np.array([[0, 0],
- l1v, l1v, -w1v, w1v,
- l1v - w1v, - l1v + w1v, + l1v - w1v, + l1v + w1v])
c, s = np.cos(a1), np.sin(a1)
r = np.array([[c, -s], [s, c]])
rotated_r1_points = r.dot(r1_points.transpose()).transpose()
return any([point_in_rotated_rectangle(c1+np.squeeze(p), c2, l2, w2, a2) for p in rotated_r1_points])
def confidence_ellipsoid(data: Dict[str, np.ndarray], lambda_: float = 1e-5, delta: float = 0.1, sigma: float = 0.1,
param_bound: float = 1.0) -> Tuple[np.ndarray, np.ndarray, float]:
"""
Compute a confidence ellipsoid over the parameter theta, where y = theta^T phi
:param data: a dictionary {"features": [phi_0,...,phi_N], "outputs": [y_0,...,y_N]}
:param lambda_: l2 regularization parameter
:param delta: confidence level
:param sigma: noise covariance
:param param_bound: an upper-bound on the parameter norm
:return: estimated theta, Gramian matrix G_N_lambda, radius beta_N
"""
phi = np.array(data["features"])
y = np.array(data["outputs"])
g_n_lambda = 1/sigma * np.transpose(phi) @ phi + lambda_ * np.identity(phi.shape[-1])
theta_n_lambda = np.linalg.inv(g_n_lambda) @ np.transpose(phi) @ y / sigma
d = theta_n_lambda.shape[0]
beta_n = np.sqrt(2*np.log(np.sqrt(np.linalg.det(g_n_lambda) / lambda_ ** d) / delta)) + \
np.sqrt(lambda_*d) * param_bound
return theta_n_lambda, g_n_lambda, beta_n
def confidence_polytope(data: dict, parameter_box: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray, float]:
"""
Compute a confidence polytope over the parameter theta, where y = theta^T phi
:param data: a dictionary {"features": [phi_0,...,phi_N], "outputs": [y_0,...,y_N]}
:param parameter_box: a box [theta_min, theta_max] containing the parameter theta
:return: estimated theta, polytope vertices, Gramian matrix G_N_lambda, radius beta_N
"""
param_bound = np.amax(np.abs(parameter_box))
theta_n_lambda, g_n_lambda, beta_n = confidence_ellipsoid(data, param_bound=param_bound)
values, pp = np.linalg.eig(g_n_lambda)
radius_matrix = np.sqrt(beta_n) * np.linalg.inv(pp) @ np.diag(np.sqrt(1 / values))
h = np.array(list(itertools.product([-1, 1], repeat=theta_n_lambda.shape[0])))
d_theta = np.array([radius_matrix @ h_k for h_k in h])
# Clip the parameter and confidence region within the prior parameter box.
theta_n_lambda = np.clip(theta_n_lambda, parameter_box[0], parameter_box[1])
for k, _ in enumerate(d_theta):
d_theta[k] = np.clip(d_theta[k], parameter_box[0] - theta_n_lambda, parameter_box[1] - theta_n_lambda)
return theta_n_lambda, d_theta, g_n_lambda, beta_n
def is_valid_observation(y: np.ndarray, phi: np.ndarray, theta: np.ndarray, gramian: np.ndarray,
beta: float, sigma: float = 0.1) -> bool:
"""
Check if a new observation (phi, y) is valid according to a confidence ellipsoid on theta.
:param y: observation
:param phi: feature
:param theta: estimated parameter
:param gramian: Gramian matrix
:param beta: ellipsoid radius
:param sigma: noise covariance
:return: validity of the observation
"""
y_hat = np.tensordot(theta, phi, axes=[0, 0])
error = np.linalg.norm(y - y_hat)
eig_phi, _ = np.linalg.eig(phi.transpose() @ phi)
eig_g, _ = np.linalg.eig(gramian)
error_bound = np.sqrt(np.amax(eig_phi) / np.amin(eig_g)) * beta + sigma
return error < error_bound
def is_consistent_dataset(data: dict, parameter_box: np.ndarray = None) -> bool:
"""
Check whether a dataset {phi_n, y_n} is consistent
The last observation should be in the confidence ellipsoid obtained by the N-1 first observations.
:param data: a dictionary {"features": [phi_0,...,phi_N], "outputs": [y_0,...,y_N]}
:param parameter_box: a box [theta_min, theta_max] containing the parameter theta
:return: consistency of the dataset
"""
train_set = copy.deepcopy(data)
y, phi = train_set["outputs"].pop(-1), train_set["features"].pop(-1)
y, phi = np.array(y)[..., np.newaxis], np.array(phi)[..., np.newaxis]
if train_set["outputs"] and train_set["features"]:
theta, _, gramian, beta = confidence_polytope(train_set, parameter_box=parameter_box)
return is_valid_observation(y, phi, theta, gramian, beta)
else:
return True
| [
"numpy.abs",
"numpy.amin",
"numpy.clip",
"numpy.sin",
"numpy.linalg.norm",
"numpy.transpose",
"numpy.identity",
"numpy.linalg.eig",
"itertools.product",
"numpy.linalg.det",
"copy.deepcopy",
"importlib.import_module",
"numpy.tensordot",
"numpy.linalg.inv",
"numpy.cos",
"numpy.squeeze",
... | [((690, 706), 'numpy.clip', 'np.clip', (['x', 'a', 'b'], {}), '(x, a, b)\n', (697, 706), True, 'import numpy as np\n'), ((1756, 1783), 'numpy.array', 'np.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (1764, 1783), True, 'import numpy as np\n'), ((2326, 2354), 'numpy.matrix', 'np.matrix', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (2335, 2354), True, 'import numpy as np\n'), ((3238, 3250), 'numpy.array', 'np.array', (['c1'], {}), '(c1)\n', (3246, 3250), True, 'import numpy as np\n'), ((3261, 3282), 'numpy.array', 'np.array', (['[l1 / 2, 0]'], {}), '([l1 / 2, 0])\n', (3269, 3282), True, 'import numpy as np\n'), ((3291, 3312), 'numpy.array', 'np.array', (['[0, w1 / 2]'], {}), '([0, w1 / 2])\n', (3299, 3312), True, 'import numpy as np\n'), ((3327, 3419), 'numpy.array', 'np.array', (['[[0, 0], -l1v, l1v, -w1v, w1v, -l1v - w1v, -l1v + w1v, +l1v - w1v, +l1v + w1v]'], {}), '([[0, 0], -l1v, l1v, -w1v, w1v, -l1v - w1v, -l1v + w1v, +l1v - w1v,\n +l1v + w1v])\n', (3335, 3419), True, 'import numpy as np\n'), ((3515, 3542), 'numpy.array', 'np.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (3523, 3542), True, 'import numpy as np\n'), ((4373, 4399), 'numpy.array', 'np.array', (["data['features']"], {}), "(data['features'])\n", (4381, 4399), True, 'import numpy as np\n'), ((4408, 4433), 'numpy.array', 'np.array', (["data['outputs']"], {}), "(data['outputs'])\n", (4416, 4433), True, 'import numpy as np\n'), ((5458, 5483), 'numpy.linalg.eig', 'np.linalg.eig', (['g_n_lambda'], {}), '(g_n_lambda)\n', (5471, 5483), True, 'import numpy as np\n'), ((5668, 5714), 'numpy.array', 'np.array', (['[(radius_matrix @ h_k) for h_k in h]'], {}), '([(radius_matrix @ h_k) for h_k in h])\n', (5676, 5714), True, 'import numpy as np\n'), ((5814, 5873), 'numpy.clip', 'np.clip', (['theta_n_lambda', 'parameter_box[0]', 'parameter_box[1]'], {}), '(theta_n_lambda, parameter_box[0], parameter_box[1])\n', (5821, 5873), True, 'import numpy as np\n'), ((6599, 6636), 'numpy.tensordot', 'np.tensordot', (['theta', 'phi'], {'axes': '[0, 0]'}), '(theta, phi, axes=[0, 0])\n', (6611, 6636), True, 'import numpy as np\n'), ((6649, 6674), 'numpy.linalg.norm', 'np.linalg.norm', (['(y - y_hat)'], {}), '(y - y_hat)\n', (6663, 6674), True, 'import numpy as np\n'), ((6744, 6766), 'numpy.linalg.eig', 'np.linalg.eig', (['gramian'], {}), '(gramian)\n', (6757, 6766), True, 'import numpy as np\n'), ((7364, 7383), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (7377, 7383), False, 'import copy\n'), ((544, 580), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (567, 580), False, 'import importlib\n'), ((1719, 1732), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1725, 1732), True, 'import numpy as np\n'), ((1734, 1747), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1740, 1747), True, 'import numpy as np\n'), ((2289, 2302), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2295, 2302), True, 'import numpy as np\n'), ((2304, 2317), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2310, 2317), True, 'import numpy as np\n'), ((3484, 3494), 'numpy.cos', 'np.cos', (['a1'], {}), '(a1)\n', (3490, 3494), True, 'import numpy as np\n'), ((3496, 3506), 'numpy.sin', 'np.sin', (['a1'], {}), '(a1)\n', (3502, 3506), True, 'import numpy as np\n'), ((5324, 5345), 'numpy.abs', 'np.abs', (['parameter_box'], {}), '(parameter_box)\n', (5330, 5345), True, 'import numpy as np\n'), ((5931, 6024), 'numpy.clip', 'np.clip', (['d_theta[k]', '(parameter_box[0] - theta_n_lambda)', '(parameter_box[1] - theta_n_lambda)'], {}), '(d_theta[k], parameter_box[0] - theta_n_lambda, parameter_box[1] -\n theta_n_lambda)\n', (5938, 6024), True, 'import numpy as np\n'), ((4497, 4523), 'numpy.identity', 'np.identity', (['phi.shape[-1]'], {}), '(phi.shape[-1])\n', (4508, 4523), True, 'import numpy as np\n'), ((4737, 4757), 'numpy.sqrt', 'np.sqrt', (['(lambda_ * d)'], {}), '(lambda_ * d)\n', (4744, 4757), True, 'import numpy as np\n'), ((5504, 5519), 'numpy.sqrt', 'np.sqrt', (['beta_n'], {}), '(beta_n)\n', (5511, 5519), True, 'import numpy as np\n'), ((5522, 5539), 'numpy.linalg.inv', 'np.linalg.inv', (['pp'], {}), '(pp)\n', (5535, 5539), True, 'import numpy as np\n'), ((5550, 5569), 'numpy.sqrt', 'np.sqrt', (['(1 / values)'], {}), '(1 / values)\n', (5557, 5569), True, 'import numpy as np\n'), ((5593, 5651), 'itertools.product', 'itertools.product', (['[-1, 1]'], {'repeat': 'theta_n_lambda.shape[0]'}), '([-1, 1], repeat=theta_n_lambda.shape[0])\n', (5610, 5651), False, 'import itertools\n'), ((7470, 7481), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7478, 7481), True, 'import numpy as np\n'), ((7500, 7513), 'numpy.array', 'np.array', (['phi'], {}), '(phi)\n', (7508, 7513), True, 'import numpy as np\n'), ((4461, 4478), 'numpy.transpose', 'np.transpose', (['phi'], {}), '(phi)\n', (4473, 4478), True, 'import numpy as np\n'), ((4545, 4570), 'numpy.linalg.inv', 'np.linalg.inv', (['g_n_lambda'], {}), '(g_n_lambda)\n', (4558, 4570), True, 'import numpy as np\n'), ((4573, 4590), 'numpy.transpose', 'np.transpose', (['phi'], {}), '(phi)\n', (4585, 4590), True, 'import numpy as np\n'), ((2419, 2444), 'numpy.array', 'np.array', (['[length, width]'], {}), '([length, width])\n', (2427, 2444), True, 'import numpy as np\n'), ((3654, 3667), 'numpy.squeeze', 'np.squeeze', (['p'], {}), '(p)\n', (3664, 3667), True, 'import numpy as np\n'), ((6793, 6809), 'numpy.amax', 'np.amax', (['eig_phi'], {}), '(eig_phi)\n', (6800, 6809), True, 'import numpy as np\n'), ((6812, 6826), 'numpy.amin', 'np.amin', (['eig_g'], {}), '(eig_g)\n', (6819, 6826), True, 'import numpy as np\n'), ((4673, 4698), 'numpy.linalg.det', 'np.linalg.det', (['g_n_lambda'], {}), '(g_n_lambda)\n', (4686, 4698), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""This module contains the pyposmat engine for parameterization"""
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2016,2017,2018"
__license__ = "Simplified BSD License"
__version__ = "1.0"
import time,sys,os,copy,shutil,importlib
from collections import OrderedDict
import numpy as np
import scipy.stats
from pypospack.kde import Chiu1999_h
from pypospack.pyposmat.engines import PyposmatEngine
from pypospack.pyposmat.data import PyposmatConfigurationFile
from pypospack.pyposmat.data import PyposmatDataFile
from pypospack.pyposmat.data import PyposmatLogFile
from pypospack.pyposmat.data import PyposmatBadParametersFile
from pypospack.task.task_manager import PypospackTaskManagerError
from pypospack.potential import PotentialObjectMap
# necessary exceptions to handle for this class
from numpy.linalg import LinAlgError
from pypospack.exceptions import LammpsSimulationError
from pypospack.exceptions import PyposmatBadParameterError
from pypospack.exceptions import PypospackBadKdeBandwidthType
from pypospack.exceptions import PypospackBadEamEosError
class PyposmatMonteCarloSampler(PyposmatEngine):
def __init__(self,
filename_in='pyposmat.config.in',
filename_out='pyposmat.results.out',
o_log=None,
mpi_rank=None,
mpi_size=None,
base_directory=None):
"""Additional attributes are set by the base class :obj:PyposmatEngine
Args:
filename_in (str) - path of the configuration file
filename_out (str) - path of the output file
o_log (PyposmatLogFile) - if type(o_log) is a string, then the string is treated as a path in which to log information to. If type(o_log) is PyposmatLogFile then it is set as an attribute for the refernce.
mpi_rank (int)
mpi_size (int)
base_directory (str,optional): Either the relative or full path which provides a
unique drive addressing space for simultaneously running simulations.
Attributes:
mpi_rank (int) - this is passed in
mpi_size (int) - this is passed in
pyposmat_data_in_filename (str) - the path of the datafile to read in
pyposmat_data_out_filename (str) - the path of the datafile to write simulation results to
"""
assert isinstance(filename_in,str)
assert isinstance(filename_out,str)
assert type(base_directory) in [str,type(None)]
PyposmatEngine.__init__(self,
filename_in=filename_in,
filename_out=filename_out,
base_directory=base_directory,
fullauto=False)
if mpi_rank is None:
self.mpi_rank = 0
else:
self.mpi_rank = mpi_rank
if mpi_size is None:
self.mpi_size = 1
else:
self.mpi_size = mpi_size
assert self.mpi_rank < self.mpi_size
self.mpi_rank=mpi_rank
self.mpi_size=mpi_size
self.pyposmat_data_in_filename = None
self.pyposmat_data_out_filename = filename_out
self.pyposmat_data_bad_filename = 'pypospack.badparameters.out'
try:
self.configure_logger(o_log)
except TypeError as e:
m = "Unable to to configure obj_log based on attribute log:{}".format(str(o_log))
raise TypeError(m)
def configure_logger(self,o_log=None):
"""
Configurtion of the log object has different behavior based upon the type passed
into the argument o_log. If o_log is PyposmatLogFile, that object will be accessed
by reference. A string is assumed to be a filename location. By default the
argument for o_log is None, which means logging will go to standard out by means of
the print() function.
Args:
o_log (str,PyposmatLogFile,None): default: None
"""
if type(o_log) is PyposmatLogFile:
self.obj_log = o_log
elif type(o_log) is str:
self.obj_log = PyposmatLogFile(filename=o_log)
elif o_log is None:
self.obj_log = None
else:
m = "log object must be str, PyposmatLogFile, or None"
raise TypeError(m)
def log(self,str_msg):
if type(str_msg) is str:
m = str_msg
elif type(str_msg) is list:
m = "\n".join(str_msg)
if type(self.obj_log) is PyposmatLogFile:
self.obj_log.write(m)
print(m)
def configure_pyposmat_datafile_in(self,filename):
self.pyposmat_data_in_filename = filename
self.pyposmat_datafile_in = PyposmatDataFile(filename)
def configure_pyposmat_datafile_out(self,filename=None):
if filename is not None:
assert type(filename) is str
self.pyposmat_data_out_filename = filename
self.pyposmat_datafile_out = PyposmatDataFile(filename)
def configure_pyposmat_badparameters_file(self,filename=None):
if filename is not None:
assert type(filename) is str
self.pyposmat_badparameters_filename = filename
self.pyposmat_badparameters = PyposmatBadParametersFile(filename)
def read_configuration_file(self,filename=None):
PyposmatEngine.read_configuration_file(self,filename=filename)
# self.structure_directory = self.configuration.structures['structure_directory']
self.n_iterations = self.configuration.sampling_type['n_iterations']
self.parameter_names = [p for p in self.configuration.sampling_distribution]
self.qoi_names = [k for k in self.configuration.qois]
self.error_names = ['{}.err'.format(k) for k in self.qoi_names]
self.parameter_distribution_definition =\
self.configuration.sampling_distribution
try:
self.free_parameter_names = [k for k,v in self.parameter_distribution_definition.items() if v[0] != 'equals']
except KeyError as e:
print(self.parameter_distribution_definition.items())
raise
if self.configuration.sampling_constraints is not None:
self.parameter_constraints = copy.deepcopy(self.configuration.sampling_constraints)
else:
self.parameter_constraints = OrderedDict()
self.constrained_parameter_names = []
for p in self.parameter_names:
if p not in self.free_parameter_names:
self.constrained_parameter_names.append(p)
def run_simulations(self,i_iteration,n_samples=None,filename=None):
"""
Args:
i_iteration(int): the iteration cycle we are on.
n_samples(int,optional): the number of parameters to evaluate
filename(str,optional): the filename
"""
assert type(i_iteration) is int
assert type(n_samples) in [type(None),int]
assert type(filename) in [type(None),str]
i = i_iteration
_sampling_type = self.configuration.sampling_type[i]['type']
_n_samples = self.configuration.sampling_type[i]['n_samples']
if self.mpi_rank == 0:
m = ["R{}: Starting iteration N={}".format(self.mpi_rank,i_iteration)]
if _sampling_type is "from_file":
m += ["R{}: Sampling parameters from {}".format(
self.mpi_rank,filename)]
else:
m += ["R{}: Attemping n_samples={} with sampling_type={}".format(
self.mpi_rank,_n_samples,_sampling_type)]
if filename is not None:
m += ["R{}: Using file:{}".format(self.mpi_rank,filename)]
self.log(m)
if n_samples is not None:
_n_samples = n_samples
if _sampling_type == 'parametric':
self.run_parameteric_sampling(n_samples=_n_samples)
elif _sampling_type == 'kde':
if filename is None:
raise ValueError('cannot do kde sampling with out filename')
self.run_kde_sampling(n_samples=_n_samples,filename_in=filename)
elif _sampling_type == 'from_file':
if filename is None:
raise ValueError('cannot do filesampling without file')
self.run_file_sampling(filename)
else:
raise ValueError(
'unknown sampling type:{}'.format(
_sampling_type)
)
def write_badparameters_header(self):
self.pyposmat_badparameters.write_header_section(
filename=self.pyposmat_badparameters_filename,
parameter_names=self.parameter_names)
def write_data_out_header(self):
self.pyposmat_datafile_out.write_header_section(
filename=self.pyposmat_data_out_filename,
parameter_names=self.parameter_names,
qoi_names=self.qoi_names,
error_names=self.error_names)
def get_sim_id(self,i,s=None):
if s is not None:
return s
elif isinstance(i,int):
return str(i)
else:
m = 'cannot determine sim_id from i:{} and s:{}'.format(i,s)
raise TypeError(m)
def run_parameteric_sampling(self,n_samples):
# create random number generator
_rv_generators = OrderedDict()
for p in self.free_parameter_names:
distribution_type = self.parameter_distribution_definition[p][0]
if distribution_type == 'uniform':
_a = self.parameter_distribution_definition[p][1]['a']
_b = self.parameter_distribution_definition[p][1]['b']
_loc = _a
_scale = _b-_a
_rv_generators[p] = scipy.stats.uniform(loc=_loc,scale=_scale)
elif distribution_type == 'normal':
_mu = self.parameter_distribution_definition[p][1]['mu']
_sigma = self.parameter_distribution_definition[p][1]['sigma']
_loc = _mu
_scale = _sigma
_rv_generators[p] = scipy.stats.norm(loc=_loc,scale=_scale)
else:
raise ValueError('unknown distribution type: {}'.format(
distribution_type))
self.write_data_out_header()
time_start_iteration = time.time()
_n_errors = 0
for i_sample in range(n_samples):
# determin sim_id
self.get_sim_id(i=i_sample)
# new OrderedDict to hold in parameter values
_parameters = OrderedDict([(p,None) for p in self.parameter_names])
# generate free parameters for ordered dictionary
for p in self.free_parameter_names:
_parameters[p] = _rv_generators[p].rvs(size=1)[0]
# determine parameters determined from equality constraints
for p in self.constrained_parameter_names:
_constraint_type = self.parameter_distribution_definition[p][0]
if _constraint_type == 'equals':
# this condition is for fitting EoS for EAM function which
# requires a refernce ground state crystal structure
if p.endswith('latticetype'):
_v = self.parameter_distribution_definition[p][1]
_parameters[p] = _v
# process evaluation strings
elif type(self.parameter_distribution_definition[p][1]) is not list:
_str_eval = str(self.parameter_distribution_definition[p][1])
# replace string values with numerical values
for fp in self.free_parameter_names:
if fp in _str_eval:
_str_eval = _str_eval.replace(fp,str(_parameters[fp]))
# evaluate the string into a float
_parameters[p] = eval(_str_eval)
else:
raise ValueError("oops")
# additional tasks added here
for p in self.constrained_parameter_names:
if self.parameter_distribution_definition[p][0] == 'equals':
if type(self.parameter_distribution_definition[p][1]) is list:
# required for EAM potentials to calculate dens_max for embedding function
if self.parameter_distribution_definition[p][1][0] == 'equilibrium_density':
a0 = self.parameter_distribution_definition[p][1][1]
latt = self.parameter_distribution_definition[p][1][2]
_parameters[p] = self.calculate_equilibrium_density(a0,latt,_parameters)
try:
# check constraints
for k,v in self.parameter_constraints.items():
_eval_str = v
for pn,pv in _parameters.items():
_eval_str = _eval_str.replace(pn,str(pv))
if eval(_eval_str) is False:
m = "failed parameter constraint, {}".format(k)
raise PyposmatBadParameterError()
_results = self.evaluate_parameter_set(parameters=_parameters)
except PyposmatBadParameterError as e:
self.pyposmat_badparameters.write_simulation_exception(sim_id=sim_id,exception=e)
_n_errors += 1
except LammpsSimulationError as e:
self.pyposmat_badparameters.write_simulation_exception(sim_id=sim_id,exception=e)
_n_errors += 1
except PypospackTaskManagerError as e:
self.pyposmat_badparameters.write_simulation_exception(sim_id=sim_id,exception=e)
_n_errors += 1
except PypospackBadEamEosError as e:
self.pyposmat_badparameters.write_simulation_exception(sim_id=sim_id,exception=e)
_n_errors += 1
else:
#if type(sim_id) is float:
# _sim_id = int(sim_id)
_sim_id = "{}".format(i_sample)
self.pyposmat_datafile_out.write_simulation_results(
filename=self.pyposmat_data_out_filename,
sim_id=_sim_id,
results=_results)
finally:
# print out summaries every 10 solutions
if (i_sample+1)%10 == 0:
n_samples_completed = i_sample+1
time_end = time.time()
time_total = time_end-time_start_iteration
avg_time = time_total/n_samples_completed
_str_msg = 'R{}:{} samples completed in {:.4f}s. Avg_time = {:.4f}. n_errors = {}'.format(
self.mpi_rank,
n_samples_completed,
time_total,
avg_time,
_n_errors)
self.log(_str_msg)
def get_options_kde_bandwidth(self):
"""
Returns:
OrderedDict
"""
kde_options = OrderedDict()
kde_options['chiu1999'] = OrderedDict()
kde_options['chiu1999']['reference'] = '<NAME>. Ann. Stat. 1991, Vol. 19, No 4. 1883-1905'
kde_options['chiu1999']['doi'] = '10.1214/aos/1176348376'
kde_options['chiu1999']['description'] = ""
kde_options['silverman1984'] = OrderedDict()
kde_options['silverman1984']['reference'] = '<NAME>. (1986). Density Estimation for Statistics and Data Analysis. London: Chapman & Hall/CRC. p. 48'
kde_options['silverman1984']['isbn'] = '0-412-24620-1'
def determine_kde_bandwidth(self,X,kde_bw_type):
""" determine kde bandwidth
Args:
X(np.ndarray): array of data to determine the KDE bandwidth
kde_bw_type(str): the method of estimating the optimal bandwidth
"""
if self.mpi_rank == 0:
self.log('determine kde bandwidth...')
if kde_bw_type == 'chiu1999':
try:
h = Chiu1999_h(X)
except ValueError as e:
print(X)
raise
elif kde_bw_type == 'silverman1985':
h = Silverman1986
else:
m = 'kde_bw_type, {}, is not an implemented bandwidth type'
raise PypospackBadKdeBandwidthType(m)
if self.mpi_rank == 0:
self.log('{}:{}'.format(kde_bw_type,h))
self.kde_bw_type = kde_bw_type
self.kde_bw = h
return self.kde_bw
def run_kde_sampling(self,n_samples,filename_in,cluster_id=None,kde_bw_type='chiu1999'):
""" sample from a KDE distribution
Args:
n_samples(int): the number of samples to draw from the KDE distribution
filename_in(str): the path to the datafile from which the parameters will be drawn from
cluster_id(int): if we need to use a specific cluster_id, we specify it here.
otherwise, it will be drawn from all parameters contained within the set.
kde_bw_type(str): the method of estimating the optimal bandwidth
"""
_datafile_in = PyposmatDataFile()
_datafile_in.read(filename_in)
if cluster_id is None:
_free_parameter_names = [str(v) for v in self.free_parameter_names]
_X = _datafile_in.df[_free_parameter_names].values.T
else:
# subselect the dataframe by the cluster_id of interest
_datafile_in.df = _datafile_in.df.loc[_datafile_in.df['cluster_id'] == cluster_id]
_X = _datafile_in.df[self.free_parameter_names].loc[_datafile_in.df['cluster_id'] == cluster_id].values.T
# self.log.write("cluster_id {c} _X.shape={x}".format(c=cluster_id, x=_X.shape))
kde_bw = self.determine_kde_bandwidth(X=_X,kde_bw_type=kde_bw_type)
_rv_generator = scipy.stats.gaussian_kde(_X,kde_bw)
self.write_data_out_header()
time_start_iteration = time.time()
_n_errors = 0
for i_sample in range(n_samples):
# determine sim_id
sim_id = str(i_sample)
# new OrderedDict to hold in parameter values
_parameters = OrderedDict([(p,None) for p in self.parameter_names])
# generate free parameters for ordered dictionary
_free_parameters = _rv_generator.resample(1)
for i,v in enumerate(self.free_parameter_names):
_parameters[v] = float(_free_parameters[i,0])
# determine parameters determined from equality constraints
for p in self.constrained_parameter_names:
_constraint_type = self.parameter_distribution_definition[p][0]
if _constraint_type == 'equals':
# this condition is for fitting EoS for EAM function which
# requires a refernce ground state crystal structure
if p.endswith('latticetype'):
_v = self.parameter_distribution_definition[p][1]
_parameters[p] = _v
# process evaluation strings
elif type(self.parameter_distribution_definition[p][1]) is not list:
_str_eval = str(self.parameter_distribution_definition[p][1])
# replace string values with numerical values
for fp in self.free_parameter_names:
if fp in _str_eval:
_str_eval = _str_eval.replace(fp,str(_parameters[fp]))
# evaluate the string into a float
_parameters[p] = eval(_str_eval)
else:
raise ValueError("oops")
for p in self.constrained_parameter_names:
if self.parameter_distribution_definition[p][0] == 'equals':
# some EAM potentials have a normalizing equilbirum density
# which have to be determined based upon the parameterization of
# the electron density function
if type(self.parameter_distribution_definition[p][1]) is list:
if self.parameter_distribution_definition[p][1][0] == 'equilibrium_density':
a0 = self.parameter_distribution_definition[p][1][1]
latt = self.parameter_distribution_definition[p][1][2]
_parameters[p] = self.calculate_equilibrium_density(a0,latt,_parameters)
try:
# now we check parameter constraints
for k,v in self.parameter_constraints.items():
_eval_str = v
for pn,pv in _parameters.items():
_eval_str = _eval_str.replace(pn,str(pv))
if eval(_eval_str) is False:
s = 'parameter constraint failed, {}'.format(k)
raise PyposmatBadParameterError(s)
_results = self.evaluate_parameter_set(parameters=_parameters)
except PyposmatBadParameterError as e:
self.pyposmat_badparameters.write_simulation_exception(sim_id=sim_id,exception=e)
_n_errors += 1
except LammpsSimulationError as e:
self.pyposmat_badparameters.write_simulation_exception(sim_id=sim_id,exception=e)
_n_errors += 1
except PypospackTaskManagerError as e:
self.pyposmat_badparameters.write_simulation_exception(sim_id=sim_id,exception=e)
_n_errors += 1
except PypospackBadEamEosError as e:
self.pyposmat_badparameters.write_simulation_exception(sim_id=sim_id,exception=e)
_n_errors += 1
else:
# determine sim_id
_sim_id = int(i_sample)
self.pyposmat_datafile_out.write_simulation_results(
filename=self.pyposmat_data_out_filename,
sim_id=i_sample,
cluster_id=cluster_id,
results=_results)
finally:
# print out summaries every 10 solutions
if (i_sample+1)%10 == 0:
n_samples_completed = i_sample+1
time_end = time.time()
time_total = time_end-time_start_iteration
avg_time = time_total/n_samples_completed
_str_msg = 'R{}:{} samples completed in {:.4f}s. Avg_time = {:.4f}. n_errors = {}'.format(
self.mpi_rank,
n_samples_completed,
time_total,
avg_time,
_n_errors)
self.log(_str_msg)
d = OrderedDict()
d['kde_bandwidth'] = OrderedDict()
d['kde_bandwidth']['type'] = self.kde_bw_type
d['kde_bandwidth']['h'] = self.kde_bw
def run_file_sampling(self,filename_in):
_datafile_in = PyposmatDataFile(filename=filename_in)
_datafile_in.read()
# configure random number generator
self.write_data_out_header()
time_start_iteration = time.time()
_n_errors = 0
i_sample = 0
for row in _datafile_in.df.iterrows():
if self.mpi_rank != i_sample%self.mpi_size:
i_sample += 1
continue
else:
i_sample += 1
_parameters = OrderedDict([(p,row[1][p]) for p in self.parameter_names])
_sim_id = row[1]['sim_id']
# generate wierd things
for p in self.constrained_parameter_names:
if self.parameter_distribution_definition[p][0] == 'equals':
if type(self.parameter_distribution_definition[p][1]) is list:
if self.parameter_distribution_definition[p][1][0] == 'equilibrium_density':
a0 = self.parameter_distribution_definition[p][1][1]
latt = self.parameter_distribution_definition[p][1][2]
_parameters[p] = self.calculate_equilibrium_density(a0,latt,_parameters)
try:
# check constraints
for k,v in self.parameter_constraints.items():
_eval_str = v
for pn,pv in _parameters.items():
_eval_str = _eval_str.replace(pn,str(pv))
if eval(_eval_str) is False:
raise PyposmatBadParameterError()
_results = self.evaluate_parameter_set(parameters=_parameters)
except PyposmatBadParameterError as e:
self.pyposmat_badparameters.write_simulation_exception(sim_id=sim_id,exception=e)
_n_errors += 1
except LammpsSimulationError as e:
self.pyposmat_badparameters.write_simulation_exception(sim_id=sim_id,exception=e)
_n_errors += 1
except PypospackTaskManagerError as e:
self.pyposmat_badparameters.write_simulation_exception(sim_id=sim_id,exception=e)
_n_errors += 1
except PypospackBadEamEosError as e:
self.pyposmat_badparameters.write_simulation_exception(sim_id=sim_id,exception=e)
_n_errors += 1
else:
if type(_sim_id) is float: _sim_id = int(sim_id)
self.pyposmat_datafile_out.write_simulation_results(
filename=self.pyposmat_data_out_filename,
sim_id=_sim_id,
results=_results)
finally:
# print out summaries every 10 solutions
i_sample = i_sample+1
if (i_sample)%10 == 0:
n_samples_completed = i_sample
time_end = time.time()
time_total = time_end-time_start_iteration
avg_time = time_total/n_samples_completed
_str_msg = '{} samples completed in {:.4f}s. Avg_time = {:.4f}. n_errors = {}'.format(
n_samples_completed,
time_total,
avg_time,
_n_errors)
print('rank{}:'.format(self.mpi_rank)+_str_msg)
def calculate_equilibrium_density(self,a0,latt,parameters):
_parameters = OrderedDict()
for k,v in parameters.items():
if k.startswith('d_'):
_parameters[k[2:]] = v
s = k[2:].split('_')[0]
_potential_type = self.configuration.potential['density_type']
_symbols = self.configuration.potential['symbols']
_module_name,_class_name = PotentialObjectMap(
potential_type=_potential_type)
try:
_module = importlib.import_module(_module_name)
_class = getattr(_module,_class_name)
_dens_potential = _class(symbols=_symbols)
except:
raise
if latt == 'fcc':
d = OrderedDict([
('1NN',2/(2**0.5)*a0),
('2NN',1.000*a0),
('3NN',1.225*a0)])
Z= OrderedDict([
('1NN',12),
('2NN',6),
('3NN',24)])
rcut = (d['2NN']+d['3NN'])/2.
rmax = 10.
r = np.linspace(1,10,5000)*rmax/10
rho = _dens_potential.evaluate(r,_parameters,rcut)
rho_e = 0
for m in Z:
if d[m] < rcut:
rho_e += Z[m]*np.interp(d[m],r,rho[s])
return rho_e
def print_structure_database(self):
m = [
80*'-',
'{:^80}'.format('STRUCTURE DATABASE'),
80*'-',
'structure_directory:{}'.format(self.structure_directory),
'',
'{:^20} {:^20}'.format('name','filename'),
'{} {}'.format(20*'-',20*'-')
]
m += ['{:20} {:20}'.format(k,v) for k,v in self.structures['structures'].items()]
self.log(m)
def print_sampling_configuration(self):
print(80*'-')
print('{:^80}'.format('SAMPLING CONFIGURATION'))
print(80*'-')
print('{:^10} {:^10} {:^20}'.format(
'iteration',
'n_samples',
'sampling_type'))
print('{} {} {}'.format(10*'-',10*'-',20*'-'))
for i in range(self.n_iterations):
_sample_type = self.configuration.sampling_type[i]['type']
if _sample_type == 'kde_w_clusters':
_n_samples = self.configuration.sampling_type[i]['n_samples_per_cluster']
else:
_n_samples = self.configuration.sampling_type[i]['n_samples']
print('{:^10} {:^10} {:^20}'.format(i,_n_samples,_sample_type))
def print_initial_parameter_distribution(self):
print(80*'-')
print('{:80}'.format('INITIAL PARAMETER DISTRIBUTION'))
print(80*'-')
for p in self.parameter_distribution_definition:
if p in self.free_parameter_names:
str_free = 'free'
if self.parameter_distribution_definition[p][0] == 'uniform':
print('{:^20} {:^10} {:^10} {:^10} {:^10}'.format(
p,
str_free,
self.parameter_distribution_definition[p][0],
self.parameter_distribution_definition[p][1]['a'],
self.parameter_distribution_definition[p][1]['b']))
elif self.parameter_distribution_definition[p][0] == 'normal':
print('{:^20} {:^10} {:^10} {:^10} {:^10}'.format(
p,
str_free,
self.parameter_distribution_definition[p][0],
self.parameter_distribution_definition[p][1]['mu'],
self.parameter_distribution_definition[p][1]['sigma']))
else:
_distribution_type = self.parameter_distribution_defintion[p][0]
s = "incorrection parameter distribution for parameter {}. probability distribution function, {}, is not supported"
s = s.format(p,_distribution_type)
raise ValueError(s)
else:
str_free = 'not_free'
print('{:^20} {:^10}'.format(p,str_free))
| [
"copy.deepcopy",
"importlib.import_module",
"pypospack.pyposmat.engines.PyposmatEngine.read_configuration_file",
"pypospack.exceptions.PypospackBadKdeBandwidthType",
"numpy.interp",
"pypospack.pyposmat.data.PyposmatLogFile",
"pypospack.kde.Chiu1999_h",
"pypospack.pyposmat.engines.PyposmatEngine.__init... | [((2493, 2626), 'pypospack.pyposmat.engines.PyposmatEngine.__init__', 'PyposmatEngine.__init__', (['self'], {'filename_in': 'filename_in', 'filename_out': 'filename_out', 'base_directory': 'base_directory', 'fullauto': '(False)'}), '(self, filename_in=filename_in, filename_out=\n filename_out, base_directory=base_directory, fullauto=False)\n', (2516, 2626), False, 'from pypospack.pyposmat.engines import PyposmatEngine\n'), ((4687, 4713), 'pypospack.pyposmat.data.PyposmatDataFile', 'PyposmatDataFile', (['filename'], {}), '(filename)\n', (4703, 4713), False, 'from pypospack.pyposmat.data import PyposmatDataFile\n'), ((4942, 4968), 'pypospack.pyposmat.data.PyposmatDataFile', 'PyposmatDataFile', (['filename'], {}), '(filename)\n', (4958, 4968), False, 'from pypospack.pyposmat.data import PyposmatDataFile\n'), ((5210, 5245), 'pypospack.pyposmat.data.PyposmatBadParametersFile', 'PyposmatBadParametersFile', (['filename'], {}), '(filename)\n', (5235, 5245), False, 'from pypospack.pyposmat.data import PyposmatBadParametersFile\n'), ((5308, 5371), 'pypospack.pyposmat.engines.PyposmatEngine.read_configuration_file', 'PyposmatEngine.read_configuration_file', (['self'], {'filename': 'filename'}), '(self, filename=filename)\n', (5346, 5371), False, 'from pypospack.pyposmat.engines import PyposmatEngine\n'), ((9358, 9371), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9369, 9371), False, 'from collections import OrderedDict\n'), ((10353, 10364), 'time.time', 'time.time', ([], {}), '()\n', (10362, 10364), False, 'import time, sys, os, copy, shutil, importlib\n'), ((15272, 15285), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15283, 15285), False, 'from collections import OrderedDict\n'), ((15320, 15333), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15331, 15333), False, 'from collections import OrderedDict\n'), ((15590, 15603), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15601, 15603), False, 'from collections import OrderedDict\n'), ((17367, 17385), 'pypospack.pyposmat.data.PyposmatDataFile', 'PyposmatDataFile', ([], {}), '()\n', (17383, 17385), False, 'from pypospack.pyposmat.data import PyposmatDataFile\n'), ((18198, 18209), 'time.time', 'time.time', ([], {}), '()\n', (18207, 18209), False, 'import time, sys, os, copy, shutil, importlib\n'), ((23176, 23189), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23187, 23189), False, 'from collections import OrderedDict\n'), ((23219, 23232), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23230, 23232), False, 'from collections import OrderedDict\n'), ((23407, 23445), 'pypospack.pyposmat.data.PyposmatDataFile', 'PyposmatDataFile', ([], {'filename': 'filename_in'}), '(filename=filename_in)\n', (23423, 23445), False, 'from pypospack.pyposmat.data import PyposmatDataFile\n'), ((23588, 23599), 'time.time', 'time.time', ([], {}), '()\n', (23597, 23599), False, 'import time, sys, os, copy, shutil, importlib\n'), ((26849, 26862), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (26860, 26862), False, 'from collections import OrderedDict\n'), ((27177, 27227), 'pypospack.potential.PotentialObjectMap', 'PotentialObjectMap', ([], {'potential_type': '_potential_type'}), '(potential_type=_potential_type)\n', (27195, 27227), False, 'from pypospack.potential import PotentialObjectMap\n'), ((6227, 6281), 'copy.deepcopy', 'copy.deepcopy', (['self.configuration.sampling_constraints'], {}), '(self.configuration.sampling_constraints)\n', (6240, 6281), False, 'import time, sys, os, copy, shutil, importlib\n'), ((6337, 6350), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6348, 6350), False, 'from collections import OrderedDict\n'), ((10585, 10639), 'collections.OrderedDict', 'OrderedDict', (['[(p, None) for p in self.parameter_names]'], {}), '([(p, None) for p in self.parameter_names])\n', (10596, 10639), False, 'from collections import OrderedDict\n'), ((18447, 18501), 'collections.OrderedDict', 'OrderedDict', (['[(p, None) for p in self.parameter_names]'], {}), '([(p, None) for p in self.parameter_names])\n', (18458, 18501), False, 'from collections import OrderedDict\n'), ((23876, 23935), 'collections.OrderedDict', 'OrderedDict', (['[(p, row[1][p]) for p in self.parameter_names]'], {}), '([(p, row[1][p]) for p in self.parameter_names])\n', (23887, 23935), False, 'from collections import OrderedDict\n'), ((27280, 27317), 'importlib.import_module', 'importlib.import_module', (['_module_name'], {}), '(_module_name)\n', (27303, 27317), False, 'import time, sys, os, copy, shutil, importlib\n'), ((27500, 27585), 'collections.OrderedDict', 'OrderedDict', (["[('1NN', 2 / 2 ** 0.5 * a0), ('2NN', 1.0 * a0), ('3NN', 1.225 * a0)]"], {}), "([('1NN', 2 / 2 ** 0.5 * a0), ('2NN', 1.0 * a0), ('3NN', 1.225 *\n a0)])\n", (27511, 27585), False, 'from collections import OrderedDict\n'), ((27637, 27688), 'collections.OrderedDict', 'OrderedDict', (["[('1NN', 12), ('2NN', 6), ('3NN', 24)]"], {}), "([('1NN', 12), ('2NN', 6), ('3NN', 24)])\n", (27648, 27688), False, 'from collections import OrderedDict\n'), ((4079, 4110), 'pypospack.pyposmat.data.PyposmatLogFile', 'PyposmatLogFile', ([], {'filename': 'o_log'}), '(filename=o_log)\n', (4094, 4110), False, 'from pypospack.pyposmat.data import PyposmatLogFile\n'), ((16249, 16262), 'pypospack.kde.Chiu1999_h', 'Chiu1999_h', (['X'], {}), '(X)\n', (16259, 16262), False, 'from pypospack.kde import Chiu1999_h\n'), ((16526, 16557), 'pypospack.exceptions.PypospackBadKdeBandwidthType', 'PypospackBadKdeBandwidthType', (['m'], {}), '(m)\n', (16554, 16557), False, 'from pypospack.exceptions import PypospackBadKdeBandwidthType\n'), ((14666, 14677), 'time.time', 'time.time', ([], {}), '()\n', (14675, 14677), False, 'import time, sys, os, copy, shutil, importlib\n'), ((22687, 22698), 'time.time', 'time.time', ([], {}), '()\n', (22696, 22698), False, 'import time, sys, os, copy, shutil, importlib\n'), ((26300, 26311), 'time.time', 'time.time', ([], {}), '()\n', (26309, 26311), False, 'import time, sys, os, copy, shutil, importlib\n'), ((27817, 27841), 'numpy.linspace', 'np.linspace', (['(1)', '(10)', '(5000)'], {}), '(1, 10, 5000)\n', (27828, 27841), True, 'import numpy as np\n'), ((13236, 13263), 'pypospack.exceptions.PyposmatBadParameterError', 'PyposmatBadParameterError', ([], {}), '()\n', (13261, 13263), False, 'from pypospack.exceptions import PyposmatBadParameterError\n'), ((21301, 21329), 'pypospack.exceptions.PyposmatBadParameterError', 'PyposmatBadParameterError', (['s'], {}), '(s)\n', (21326, 21329), False, 'from pypospack.exceptions import PyposmatBadParameterError\n'), ((24941, 24968), 'pypospack.exceptions.PyposmatBadParameterError', 'PyposmatBadParameterError', ([], {}), '()\n', (24966, 24968), False, 'from pypospack.exceptions import PyposmatBadParameterError\n'), ((28024, 28050), 'numpy.interp', 'np.interp', (['d[m]', 'r', 'rho[s]'], {}), '(d[m], r, rho[s])\n', (28033, 28050), True, 'import numpy as np\n')] |
import matplotlib
from matplotlib import rc
import matplotlib.pyplot as plt
import numpy as np
from numpy import loadtxt
from scipy.interpolate import interp1d
from operator import add
from operator import sub
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
rc('text',usetex=True)
font={'family' : 'serif',
'weight' : 'normal',
'size' :16}
matplotlib.rc('font',**font)
n_s = 7
dim = n_s +1
n_weeks = 52
SAVE = 0; #save figures or not
# load case corresponding to reporting factor
# 0: no under-rep, 1: 10% under, 2: 50% under
rep = 0
if rep == 0:
rep_factor = 1.;
elif rep == 1:
rep_factor = 10./9.;
elif rep == 2:
rep_factor = 2.;
dataFile = "../inputs/data.txt"; data = loadtxt(dataFile,comments="%")
#print(data)
#print(data.shape)
times = np.linspace(1,52,52,endpoint=True)
state = np.linspace(1,52,52,endpoint=True)
state[0] = rep_factor* data[0,1]
for i in range(1,len(state)):
state[i] = state[i-1] + rep_factor * data[i,1];
#print(state)
# load reduced model output if needed
dataFile = "../inputs/data-red.txt"
data = loadtxt(dataFile,comments="%")
red = data[7:417:8,2]
#print(red)
#print(red.shape)
if rep == 0:
dataFile = "qoi-stats"
elif rep == 1:
dataFile = "qoi-stats-rep10p"
elif rep == 2:
dataFile = "qoi-stats-rep50p"
q = loadtxt(dataFile,comments="%")
rmean = [];
r1 = []; r2 = []; r3 = []; r4 = [];
for j in range(0,n_weeks):
rmean.append(q[j*dim+7,0])
r1.append(q[j*dim+7,1])
r2.append(q[j*dim+7,2])
r3.append(q[j*dim+7,3])
r4.append(q[j*dim+7,4])
fig, ax = plt.subplots()
if rep == 0:
ax.plot(times,state,'^',color='C0',markersize=7,label='Data')
elif rep == 1:
ax.plot(times,state,'^',color='C0',markersize=7,label='Modified data, with 10\% under-reporting')
elif rep == 2:
ax.plot(times,state,'^',color='C0',markersize=7,label='Modified data, with 50\% under-reporting')
ax.fill_between(times,r2,r3,facecolor='C3',alpha=.4)
ax.fill_between(times,r1,r4,facecolor='C3',alpha=.1)
ax.plot(times,rmean,color='C3',linewidth=2,label='Enriched model')
# print(datatimes)
# print(times)
#red = interp1d(datatimes,state_red)
#plt.plot(times,red(times),'g', linewidth=2,label='Reduced model')
#plt.plot(times,red, linewidth=2,label='Reduced model')
plt.xlabel('Epidemiological week')
plt.ylabel('Cummulative number of cases')
plt.ticklabel_format(axis='y',style='sci',scilimits=(3,0))
plt.tight_layout()
#if k < n_phis_cal:
# plt.title('Calibration scenario '+str(k+1)+' of '+str(n_phis_cal))
#else:
# plt.title('Prediction scenario '+str(k+1-n_phis_cal)+' of '+str(n_phis_val))
#plt.ylabel('$x_{}$'.format(i+1),fontsize=28)
#plt.xlim(times[0],times[-1])
ax.locator_params(nbins=10)
ax.legend(loc=0)
# plt.show()
# ZOOM
axins = zoomed_inset_axes(ax,1.4, loc='center right')
# replot what we need for zoom
axins.plot(times,state,'^',color='C0',markersize=7,label='Data')#, with 50\% under-reporting')
axins.fill_between(times,r2,r3,facecolor='C3',alpha=.4)
axins.fill_between(times,r1,r4,facecolor='C3',alpha=.1)
axins.plot(times,rmean,color='C3',linewidth=2,label='Enriched model')
# set zoom limits
if rep == 2:
x1, x2, y1, y2 = 12, 30, 320000, 550000
else:
x1, x2, y1, y2 = 12, 30, 175000, 300000
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
plt.xticks(visible=False)
plt.yticks(visible=False)
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="0.5")
if rep == 0 & SAVE == 1:
plt.savefig('/users/rebeccam/repos/documents/papers/zika-discrepancy/rawfigs/zika-enr.pdf')
elif rep == 1 & SAVE == 1:
plt.savefig('/users/rebeccam/repos/documents/papers/zika-discrepancy/rawfigs/zika-rep10p.pdf')
elif rep == 2 & SAVE == 1:
plt.savefig('/users/rebeccam/repos/documents/papers/zika-discrepancy/rawfigs/zika-rep50p.pdf')
plt.show()
| [
"matplotlib.pyplot.tight_layout",
"matplotlib.rc",
"matplotlib.pyplot.show",
"mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes",
"matplotlib.pyplot.yticks",
"numpy.loadtxt",
"numpy.linspace",
"matplotlib.pyplot.ticklabel_format",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"mp... | [((280, 303), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (282, 303), False, 'from matplotlib import rc\n'), ((370, 399), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (383, 399), False, 'import matplotlib\n'), ((710, 741), 'numpy.loadtxt', 'loadtxt', (['dataFile'], {'comments': '"""%"""'}), "(dataFile, comments='%')\n", (717, 741), False, 'from numpy import loadtxt\n'), ((781, 818), 'numpy.linspace', 'np.linspace', (['(1)', '(52)', '(52)'], {'endpoint': '(True)'}), '(1, 52, 52, endpoint=True)\n', (792, 818), True, 'import numpy as np\n'), ((824, 861), 'numpy.linspace', 'np.linspace', (['(1)', '(52)', '(52)'], {'endpoint': '(True)'}), '(1, 52, 52, endpoint=True)\n', (835, 861), True, 'import numpy as np\n'), ((1071, 1102), 'numpy.loadtxt', 'loadtxt', (['dataFile'], {'comments': '"""%"""'}), "(dataFile, comments='%')\n", (1078, 1102), False, 'from numpy import loadtxt\n'), ((1291, 1322), 'numpy.loadtxt', 'loadtxt', (['dataFile'], {'comments': '"""%"""'}), "(dataFile, comments='%')\n", (1298, 1322), False, 'from numpy import loadtxt\n'), ((1542, 1556), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1554, 1556), True, 'import matplotlib.pyplot as plt\n'), ((2233, 2267), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epidemiological week"""'], {}), "('Epidemiological week')\n", (2243, 2267), True, 'import matplotlib.pyplot as plt\n'), ((2268, 2309), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cummulative number of cases"""'], {}), "('Cummulative number of cases')\n", (2278, 2309), True, 'import matplotlib.pyplot as plt\n'), ((2310, 2371), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'axis': '"""y"""', 'style': '"""sci"""', 'scilimits': '(3, 0)'}), "(axis='y', style='sci', scilimits=(3, 0))\n", (2330, 2371), True, 'import matplotlib.pyplot as plt\n'), ((2369, 2387), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2385, 2387), True, 'import matplotlib.pyplot as plt\n'), ((2717, 2763), 'mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes', 'zoomed_inset_axes', (['ax', '(1.4)'], {'loc': '"""center right"""'}), "(ax, 1.4, loc='center right')\n", (2734, 2763), False, 'from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes\n'), ((3238, 3263), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'visible': '(False)'}), '(visible=False)\n', (3248, 3263), True, 'import matplotlib.pyplot as plt\n'), ((3264, 3289), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'visible': '(False)'}), '(visible=False)\n', (3274, 3289), True, 'import matplotlib.pyplot as plt\n'), ((3352, 3410), 'mpl_toolkits.axes_grid1.inset_locator.mark_inset', 'mark_inset', (['ax', 'axins'], {'loc1': '(1)', 'loc2': '(3)', 'fc': '"""none"""', 'ec': '"""0.5"""'}), "(ax, axins, loc1=1, loc2=3, fc='none', ec='0.5')\n", (3362, 3410), False, 'from mpl_toolkits.axes_grid1.inset_locator import mark_inset\n'), ((3779, 3789), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3787, 3789), True, 'import matplotlib.pyplot as plt\n'), ((3439, 3540), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/users/rebeccam/repos/documents/papers/zika-discrepancy/rawfigs/zika-enr.pdf"""'], {}), "(\n '/users/rebeccam/repos/documents/papers/zika-discrepancy/rawfigs/zika-enr.pdf'\n )\n", (3450, 3540), True, 'import matplotlib.pyplot as plt\n'), ((3560, 3664), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/users/rebeccam/repos/documents/papers/zika-discrepancy/rawfigs/zika-rep10p.pdf"""'], {}), "(\n '/users/rebeccam/repos/documents/papers/zika-discrepancy/rawfigs/zika-rep10p.pdf'\n )\n", (3571, 3664), True, 'import matplotlib.pyplot as plt\n'), ((3684, 3788), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/users/rebeccam/repos/documents/papers/zika-discrepancy/rawfigs/zika-rep50p.pdf"""'], {}), "(\n '/users/rebeccam/repos/documents/papers/zika-discrepancy/rawfigs/zika-rep50p.pdf'\n )\n", (3695, 3788), True, 'import matplotlib.pyplot as plt\n')] |
from __future__ import print_function, absolute_import
from reid.models import model_utils as mu
from reid.utils.data import data_process as dp
from reid.utils.serialization import save_checkpoint
from reid import datasets
from reid import models
from reid.config import Config
import torch
import numpy as np
import os
import argparse
parser = argparse.ArgumentParser(description='Cotrain args')
parser.add_argument('-s', '--seed', type=int, default=0)
args = parser.parse_args()
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
def cotrain(configs,data,iter_step=1,train_ratio=0.2):
"""
cotrain model:
params:
model_names: model configs
data: dataset include train and untrain data
save_paths: paths for storing models
iter_step: maximum iteration steps
train_ratio: labeled data ratio
"""
assert iter_step >= 1
train_data,untrain_data = dp.split_dataset(data.trainval, train_ratio, args.seed)
data_dir = data.images_dir
new_train_data = train_data
for step in range(iter_step):
pred_probs = []
add_ids = []
for view in range(2):
configs[view].set_training(True)
model = mu.train(new_train_data, data_dir, configs[view])
save_checkpoint({
'state_dict': model.state_dict(),
'epoch': step + 1,
'train_data': new_train_data}, False,
fpath = os.path.join(configs[view].logs_dir, configs[view].model_name, 'cotrain.epoch%d' % step)
)
if len(untrain_data) == 0:
continue
pred_probs.append(mu.predict_prob(
model,untrain_data,data_dir,configs[view]))
add_ids.append(dp.sel_idx(pred_probs[view], train_data))
# calculate predict probility on all data
p_b = mu.predict_prob(model, data.trainval, data_dir, configs[view])
p_y = np.argmax(p_b, axis=1)
t_y = [c for (_,c,_,_) in data.trainval]
print(np.mean(t_y == p_y))
if len(untrain_data) == 0:
break
# update training data
pred_y = np.argmax(sum(pred_probs), axis=1)
add_id = sum(add_ids)
new_train_data, untrain_data = dp.update_train_untrain(
add_id,new_train_data,untrain_data,pred_y)
config1 = Config()
config2 = Config(model_name='densenet121', height=224, width=224)
config3 = Config(model_name='resnet101', img_translation=2)
dataset = 'market1501std'
cur_path = os.getcwd()
logs_dir = os.path.join(cur_path, 'logs')
data_dir = os.path.join(cur_path,'data',dataset)
data = datasets.create(dataset, data_dir)
cotrain([config2,config3], data, 5)
| [
"reid.config.Config",
"argparse.ArgumentParser",
"numpy.argmax",
"os.getcwd",
"torch.manual_seed",
"reid.models.model_utils.predict_prob",
"torch.cuda.manual_seed",
"reid.models.model_utils.train",
"reid.utils.data.data_process.split_dataset",
"reid.datasets.create",
"torch.cuda.is_available",
... | [((346, 397), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Cotrain args"""'}), "(description='Cotrain args')\n", (369, 397), False, 'import argparse\n'), ((483, 511), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (500, 511), False, 'import torch\n'), ((515, 540), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (538, 540), False, 'import torch\n'), ((2386, 2394), 'reid.config.Config', 'Config', ([], {}), '()\n', (2392, 2394), False, 'from reid.config import Config\n'), ((2405, 2460), 'reid.config.Config', 'Config', ([], {'model_name': '"""densenet121"""', 'height': '(224)', 'width': '(224)'}), "(model_name='densenet121', height=224, width=224)\n", (2411, 2460), False, 'from reid.config import Config\n'), ((2471, 2520), 'reid.config.Config', 'Config', ([], {'model_name': '"""resnet101"""', 'img_translation': '(2)'}), "(model_name='resnet101', img_translation=2)\n", (2477, 2520), False, 'from reid.config import Config\n'), ((2558, 2569), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2567, 2569), False, 'import os\n'), ((2581, 2611), 'os.path.join', 'os.path.join', (['cur_path', '"""logs"""'], {}), "(cur_path, 'logs')\n", (2593, 2611), False, 'import os\n'), ((2623, 2662), 'os.path.join', 'os.path.join', (['cur_path', '"""data"""', 'dataset'], {}), "(cur_path, 'data', dataset)\n", (2635, 2662), False, 'import os\n'), ((2668, 2702), 'reid.datasets.create', 'datasets.create', (['dataset', 'data_dir'], {}), '(dataset, data_dir)\n', (2683, 2702), False, 'from reid import datasets\n'), ((546, 579), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (568, 579), False, 'import torch\n'), ((937, 992), 'reid.utils.data.data_process.split_dataset', 'dp.split_dataset', (['data.trainval', 'train_ratio', 'args.seed'], {}), '(data.trainval, train_ratio, args.seed)\n', (953, 992), True, 'from reid.utils.data import data_process as dp\n'), ((2293, 2362), 'reid.utils.data.data_process.update_train_untrain', 'dp.update_train_untrain', (['add_id', 'new_train_data', 'untrain_data', 'pred_y'], {}), '(add_id, new_train_data, untrain_data, pred_y)\n', (2316, 2362), True, 'from reid.utils.data import data_process as dp\n'), ((1231, 1280), 'reid.models.model_utils.train', 'mu.train', (['new_train_data', 'data_dir', 'configs[view]'], {}), '(new_train_data, data_dir, configs[view])\n', (1239, 1280), True, 'from reid.models import model_utils as mu\n'), ((1890, 1952), 'reid.models.model_utils.predict_prob', 'mu.predict_prob', (['model', 'data.trainval', 'data_dir', 'configs[view]'], {}), '(model, data.trainval, data_dir, configs[view])\n', (1905, 1952), True, 'from reid.models import model_utils as mu\n'), ((1971, 1993), 'numpy.argmax', 'np.argmax', (['p_b'], {'axis': '(1)'}), '(p_b, axis=1)\n', (1980, 1993), True, 'import numpy as np\n'), ((1671, 1732), 'reid.models.model_utils.predict_prob', 'mu.predict_prob', (['model', 'untrain_data', 'data_dir', 'configs[view]'], {}), '(model, untrain_data, data_dir, configs[view])\n', (1686, 1732), True, 'from reid.models import model_utils as mu\n'), ((1775, 1815), 'reid.utils.data.data_process.sel_idx', 'dp.sel_idx', (['pred_probs[view]', 'train_data'], {}), '(pred_probs[view], train_data)\n', (1785, 1815), True, 'from reid.utils.data import data_process as dp\n'), ((2065, 2084), 'numpy.mean', 'np.mean', (['(t_y == p_y)'], {}), '(t_y == p_y)\n', (2072, 2084), True, 'import numpy as np\n'), ((1474, 1567), 'os.path.join', 'os.path.join', (['configs[view].logs_dir', 'configs[view].model_name', "('cotrain.epoch%d' % step)"], {}), "(configs[view].logs_dir, configs[view].model_name, \n 'cotrain.epoch%d' % step)\n", (1486, 1567), False, 'import os\n')] |
#
# Raster Fairy v1.0.3,
# released 22.01.2016
#
# The purpose of Raster Fairy is to transform any kind of 2D point cloud into
# a regular raster whilst trying to preserve the neighborhood relations that
# were present in the original cloud. If you feel the name is a bit silly and
# you can also call it "RF-Transform".
#
# NOTICE: if you use this algorithm in an academic publication, paper or
# research project please cite it either as "Raster Fairy by <NAME>"
# or "RF-Transform by <NAME>"
#
#
#
# Copyright (c) 2016, <NAME>, <EMAIL>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Mario Klingemann nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from .prime import Prime
import math
def transformPointCloud2D( points2d, target = None, autoAdjustCount = True, proportionThreshold = 0.4):
pointCount = len(points2d)
rasterMask = None
if target is None:
target = getRectArrangements(pointCount)[0]
if (float(target[0]) / float(target[1])<proportionThreshold):
width = int(math.sqrt(pointCount))
height = int(math.ceil(float(pointCount)/float(width)))
print("no good rectangle found for",pointCount,"points, using incomplete square",width,"*",height)
target = {'width':width,'height':height,'mask':np.zeros((height,width),dtype=int), 'count':width*height, 'hex': False}
if type(target) is tuple and len(target)==2:
#print "using rectangle target"
if target[0] * target[1] < pointCount:
print("ERROR: target rectangle is too small to hold data: Rect is",target[0],"*",target[1],"=",target[0] * target[1]," vs ",pointCount," data points")
return False
width = target[0]
height = target[1]
elif "PIL." in str(type(target)):
#print "using bitmap image target"
rasterMask = getRasterMaskFromImage(target)
width = rasterMask['width']
height = rasterMask['height']
elif 'mask' in target and 'count' in target and 'width' in target and 'height' in target:
#print "using raster mask target"
rasterMask = target
width = rasterMask['width']
height = rasterMask['height']
if not (rasterMask is None) and rasterMask['mask'].shape[0]*rasterMask['mask'].shape[1]-np.sum( rasterMask['mask'].flat) < len(points2d):
print("ERROR: raster mask target does not have enough grid points to hold data")
return False
if not (rasterMask is None) and (rasterMask['count']!=len(points2d)):
mask = rasterMask['mask'].flatten()
count = len(points2d)
if autoAdjustCount is True:
if count > rasterMask['count']:
ones = np.nonzero(mask)[0]
np.random.shuffle(ones)
mask[ones[0:count-rasterMask['count']]] = 0
elif count < rasterMask['count']:
zeros = np.nonzero(1-mask)[0]
np.random.shuffle(zeros)
mask[zeros[0:rasterMask['count']-count]] = 1
else:
if count > rasterMask['count']:
ones = np.nonzero(mask)[0]
mask[ones[rasterMask['count']]-count:] = 0
elif count < rasterMask['count']:
zeros = np.nonzero(1-mask)[0]
mask[zeros[rasterMask['count']-count]] = 1
mask = mask.reshape((rasterMask['height'], rasterMask['width']))
rasterMask = {'width':rasterMask['width'],'height':rasterMask['height'],'mask':mask, 'count':count, 'hex': rasterMask['hex']}
quadrants = [{'points':points2d, 'grid':[0,0,width,height], 'indices':np.arange(pointCount)}]
i = 0
failedSlices = 0
while i < len(quadrants) and len(quadrants) < pointCount:
if ( len(quadrants[i]['points']) > 1 ):
slices = sliceQuadrant(quadrants[i], mask = rasterMask)
if len(slices)>1:
del quadrants[i]
quadrants += slices
i = 0
else:
failedSlices += 1
else:
i+=1
if failedSlices>0:
print("WARNING - There might be a problem with the data. Try using autoAdjustCount=True as a workaround or check if you have points with identical coordinates in your set.")
gridPoints2d = points2d.copy()
if not (rasterMask is None) and rasterMask['hex'] is True:
f = math.sqrt(3.0)/2.0
offset = -0.5
if np.argmin(rasterMask['mask'][0]) > np.argmin(rasterMask['mask'][1]):
offset = 0.5
for q in quadrants:
if (q['grid'][1]%2==0):
q['grid'][0]-=offset
q['grid'][1] *= f
for q in quadrants:
gridPoints2d[q['indices'][0]] = np.array(q['grid'][0:2],dtype=np.float)
return gridPoints2d, (width, height)
def sliceQuadrant( quadrant, mask = None ):
xy = quadrant['points']
grid = quadrant['grid']
indices = quadrant['indices']
slices = []
if mask is None:
if grid[2]>1:
sliceXCount = 2
while (grid[2]%sliceXCount!=0):
sliceXCount+=1
else:
sliceXCount = grid[3]
if grid[3]>1:
sliceYCount = 2
while (grid[3]%sliceYCount!=0):
sliceYCount+=1
else:
sliceYCount = grid[2]
splitX = (sliceXCount<sliceYCount or (sliceXCount==sliceYCount and grid[2]>grid[3]))
if splitX:
order = np.lexsort((xy[:,1].astype(int),xy[:,0].astype(int)))
sliceCount = sliceXCount
sliceSize = grid[2] // sliceCount
pointsPerSlice = grid[3] * sliceSize
gridOffset = grid[0]
else:
order = np.lexsort((xy[:,0].astype(int),xy[:,1].astype(int)))
sliceCount = sliceYCount
sliceSize = grid[3] // sliceCount
pointsPerSlice = grid[2] * sliceSize
gridOffset = grid[1]
for i in range(sliceCount):
sliceObject = {}
sliceObject['points'] = xy[order[i*pointsPerSlice:(i+1)*pointsPerSlice]]
if len(sliceObject['points'])>0:
sliceObject['indices'] = indices[order[i*pointsPerSlice:(i+1)*pointsPerSlice]]
if splitX:
sliceObject['grid'] = [gridOffset,grid[1],sliceSize,grid[3]]
gridOffset += sliceObject['grid'][2]
else:
sliceObject['grid'] = [grid[0],gridOffset,grid[2],sliceSize]
gridOffset += sliceObject['grid'][3]
slices.append(sliceObject)
else:
maskSlice = mask['mask'][grid[1]:grid[1]+grid[3],grid[0]:grid[0]+grid[2]]
cols,rows = maskSlice.shape
pointCountInMask = min(cols*rows - np.sum(maskSlice),len(indices))
columnCounts = cols - np.sum(maskSlice, axis=0)
splitColumn = countX = 0
while splitColumn < rows and countX < (pointCountInMask>>1):
countX += columnCounts[splitColumn]
splitColumn+=1
rowCounts = rows-np.sum(maskSlice,axis=1)
splitRow = countY = 0
while splitRow < cols and countY < (pointCountInMask>>1):
countY += rowCounts[splitRow]
splitRow+=1
order = np.lexsort((xy[:,1].astype(int),xy[:,0].astype(int)))
slicesX = []
if countX > 0:
sliceObject = {}
newOrder = order[:countX]
sliceObject['points'] = xy[newOrder]
sliceObject['indices'] = indices[newOrder]
sliceObject['grid'] = [grid[0], grid[1], splitColumn, grid[3]]
cropGrid(mask['mask'],sliceObject['grid'])
slicesX.append(sliceObject)
if countX < len(order):
sliceObject = {}
newOrder = order[countX:]
sliceObject['points'] = xy[newOrder]
sliceObject['indices'] = indices[newOrder]
sliceObject['grid'] = [grid[0]+splitColumn, grid[1], grid[2]-splitColumn, grid[3]]
cropGrid(mask['mask'],sliceObject['grid'])
slicesX.append(sliceObject)
order = np.lexsort((xy[:,0].astype(int),xy[:,1].astype(int)))
slicesY = []
if countY > 0:
sliceObject = {}
newOrder = order[:countY]
sliceObject['points'] = xy[newOrder]
sliceObject['indices'] = indices[newOrder]
sliceObject['grid'] = [grid[0], grid[1], grid[2],splitRow]
cropGrid(mask['mask'],sliceObject['grid'])
slicesY.append(sliceObject)
if countY < len(order):
sliceObject = {}
newOrder = order[countY:]
sliceObject['points'] = xy[newOrder]
sliceObject['indices'] = indices[newOrder]
sliceObject['grid'] = [grid[0], grid[1]+splitRow, grid[2], grid[3]-splitRow]
cropGrid(mask['mask'],sliceObject['grid'])
slicesY.append(sliceObject)
if len(slicesX)==1:
slices = slicesY
elif len(slicesY)==1:
slices = slicesX
else:
prop1 = float(slicesX[0]['grid'][2]) / float(slicesX[0]['grid'][3])
prop2 = float(slicesX[1]['grid'][2]) / float(slicesX[1]['grid'][3])
if prop1 > 1.0:
prop1 = 1.0 / prop1
if prop2 > 1.0:
prop2 = 1.0 / prop2
ratioX = max(abs(1.0 - prop1),abs(1.0 - prop2))
prop1 = float(slicesY[0]['grid'][2]) / float(slicesY[0]['grid'][3])
prop2 = float(slicesY[1]['grid'][2]) / float(slicesY[1]['grid'][3])
if prop1 > 1.0:
prop1 = 1.0 / prop1
if prop2 > 1.0:
prop2 = 1.0 / prop2
ratioY = max(abs(1.0 - prop1),abs(1.0 - prop2))
if ratioX < ratioY:
slices = slicesX
else:
slices = slicesY
return slices
def cropGrid(mask,grid):
maskSlice = mask[grid[1]:grid[1]+grid[3],grid[0]:grid[0]+grid[2]]
cols,rows = maskSlice.shape
columnCounts = cols - np.sum(maskSlice, axis=0)
for i in range(len(columnCounts)):
if columnCounts[i]>0:
break
grid[0]+=1
grid[2]-=1
for i in range(len(columnCounts)-1,-1,-1):
if columnCounts[i]>0:
break
grid[2]-=1
rowCounts = rows-np.sum(maskSlice,axis=1)
for i in range(len(rowCounts)):
if rowCounts[i]>0:
break
grid[1]+=1
grid[3]-=1
for i in range(len(rowCounts)-1,-1,-1):
if rowCounts[i]>0:
break
grid[3]-=1
def getRasterMaskFromImage( img ):
img = img.convert('1')
mask = np.array(img.getdata())&1
return {'width':img.width,'height':img.height,'mask':mask.reshape((img.height, img.width)), 'count':len(mask)- np.sum(mask), 'hex':False}
def getCircleRasterMask( r, innerRingRadius = 0, rasterCount = None, autoAdjustCount = True ):
d = r*2+1
p = np.ones((d,d),dtype=int)
IG = (r<<1) - 3
IDGR = -6
IDGD = (r<<2) - 10
IX = 0
IY = r
while IY >= IX:
p[r-IX:r+IX+1,r+IY] = p[r-IX:r+IX+1,r-IY] = p[r-IY:r+IY+1,r+IX] = p[r-IY:r+IY+1,r-IX] = 0
if IG<0:
IG = IG+IDGD
IDGD -= 8
IY-=1
else:
IG += IDGR
IDGD -=4
IDGR -= 4
IX+=1
if innerRingRadius > 0 and innerRingRadius < r:
r2 = r
r = innerRingRadius
IG = (r<<1) - 3
IDGR = -6
IDGD = (r<<2) - 10
IX = 0
IY = r
while IY >= IX:
p[r2-IX:r2+IX+1,r2+IY] = p[r2-IX:r2+IX+1,r2-IY] = p[r2-IY:r2+IY+1,r2+IX] = p[r2-IY:r2+IY+1,r2-IX] = 1
if IG<0:
IG = IG+IDGD
IDGD -= 8
IY-=1
else:
IG += IDGR
IDGD -=4
IDGR -= 4
IX+=1
count = p.shape[0] * p.shape[1] - np.sum(p.flat)
if not rasterCount is None and autoAdjustCount and count != rasterCount:
p = p.flatten()&1
if count < rasterCount:
ones = np.nonzero(p)[0]
np.random.shuffle(ones)
p[ones[0:rasterCount-count]] = 0
count = rasterCount
elif count > rasterCount:
zeros = np.nonzero(1-p)[0]
np.random.shuffle(zeros)
p[zeros[0:count-rasterCount]] = 1
count = rasterCount
p = p.reshape((d,d))
#print "adjusted count",p.shape[0] * p.shape[1]- np.sum(p)
return {'width':d,'height':d,'mask':p, 'count':count}
def getRectArrangements(n):
p = Prime()
f = p.getPrimeFactors(n)
f_count = len(f)
ma = multiplyArray(f)
arrangements = set([(1,ma)])
if (f_count > 1):
perms = set(p.getPermutations(f))
for perm in perms:
for i in range(1,f_count):
v1 = multiplyArray(perm[0:i])
v2 = multiplyArray(perm[i:])
arrangements.add((min(v1, v2),max(v1, v2)))
return sorted(list(arrangements), key=proportion_sort, reverse=True)
def getShiftedAlternatingRectArrangements(n):
arrangements = set([])
for x in range(1,n >> 1):
v = 2 * x + 1
if n % v == x:
arrangements.add((x, x + 1, ((n / v) | 0) * 2 + 1))
for x in range(2,1 + (n >> 1)):
v = 2 * x - 1
if n % v == x:
arrangements.add((x, x - 1, ((n / v) | 0) * 2 + 1))
result = []
for a in arrangements:
nn = n
d = []
i = 0
while nn > 0:
d.append(a[i])
nn-=a[i]
i = 1 - i
result.append({'hex':True,'rows':d,'type':'alternating'})
return result
def getShiftedSymmetricArrangements(n):
hits = []
for i in range(1, n >> 1):
d = []
count = n
row = i
d.append(row)
while True:
count -= row * 2
if count == row + 1:
if i != row:
d.append(row+1)
d+=d[0:-1][::-1]
hits.append({'hex':True,'rows':d,'type':'symmetric'})
break
elif count <= 0:
break
row+=1
d.append(row)
return hits
def getShiftedTriangularArrangement(n):
t = math.sqrt(8 * n + 1);
if t != math.floor(t):
return []
arrangement = []
i = 1
while n>0:
arrangement.append(i)
n-=i
i+=1
return [{'hex':True,'rows':arrangement,'type':'triangular'}]
def getAlternatingRectArrangements(n):
arrangements = set([])
for x in range(1,n >> 1):
v = 2 * x + 2
if n % v == x:
arrangements.add((x, x + 2, ((n / v) | 0) * 2 + 1))
for x in range(2,1 + (n >> 1)):
v = 2 * x - 2
if n % v == x:
arrangements.add((x, x -2, ((n / v) | 0) * 2 + 1))
result = []
for a in arrangements:
nn = n
d = []
i = 0
while nn > 0:
d.append(a[i])
nn-=a[i]
i = 1 - i
result.append({'hex':False,'rows':d,'type':'alternating'})
return result
def getSymmetricArrangements(n):
hits = []
for i in range(1, n >> 1):
d = []
count = n
row = i
d.append(row)
while True:
count -= row * 2
if count == row + 2:
if i != row:
d.append(row+2)
d+=d[0:-1][::-1]
hits.append({'hex':False,'rows':d,'type':'symmetric'})
break
elif count <= 0:
break
row+=2
d.append(row)
return hits
def getTriangularArrangement(n):
t = math.sqrt(n);
if t != math.floor(t):
return []
arrangement = []
i = 1
while n>0:
arrangement.append(i)
n-=i
i+=2
return [{'hex':False,'rows':arrangement,'type':'triangular'}]
def getArrangements(n, includeHexagonalArrangements = True,includeRectangularArrangements = True):
r = []
if includeHexagonalArrangements:
r += getShiftedAlternatingRectArrangements(n)
r += getShiftedSymmetricArrangements(n)
r += getShiftedTriangularArrangement(n)
if includeRectangularArrangements:
r += getAlternatingRectArrangements(n)
r += getSymmetricArrangements(n)
r += getTriangularArrangement(n)
bestr,bestrp,bestc = getBestCircularMatch(n)
if bestc == n:
r.append( getCircularArrangement(bestr,bestrp))
return r
def arrangementListToRasterMasks( arrangements ):
masks = []
for i in range(len(arrangements)):
masks.append(arrangementToRasterMask(arrangements[i]))
return sorted(masks, key=arrangement_sort, reverse=True)
def arrangementToRasterMask( arrangement ):
rows = np.array(arrangement['rows'])
width = np.max(rows)
if arrangement['hex'] is True:
width+=1
height = len(rows)
mask = np.ones((height,width),dtype=int)
for row in range(len(rows)):
c = rows[row]
mask[row,(width-c)>>1:((width-c)>>1)+c] = 0
return {'width':width,'height':height,'mask':mask, 'count':np.sum(rows),'hex':arrangement['hex'],'type':arrangement['type']}
def rasterMaskToGrid( rasterMask ):
grid = []
mask = rasterMask['mask']
for y in range(rasterMask['height']):
for x in range(rasterMask['width']):
if mask[y,x]==0:
grid.append([x,y])
grid = np.array(grid,dtype=np.float)
if not (rasterMask is None) and rasterMask['hex'] is True:
f = math.sqrt(3.0)/2.0
offset = -0.5
if np.argmin(rasterMask['mask'][0]) > np.argmin(rasterMask['mask'][1]):
offset = 0.5
for i in range(len(grid)):
if (grid[i][1]%2.0==0.0):
grid[i][0]-=offset
grid[i][1] *= f
return grid
def getBestCircularMatch(n):
bestc = n*2
bestr = 0
bestrp = 0.0
minr = int(math.sqrt(n / math.pi))
for rp in range(0,10):
rpf = float(rp)/10.0
for r in range(minr,minr+3):
rlim = (r+rpf)*(r+rpf)
c = 0
for y in range(-r,r+1):
yy = y*y
for x in range(-r,r+1):
if x*x+yy<rlim:
c+=1
if c == n:
return r,rpf,c
if c>n and c < bestc:
bestrp = rpf
bestr = r
bestc = c
return bestr,bestrp,bestc
def getCircularArrangement(radius,adjustFactor):
rows = np.zeros( radius*2+1,dtype=int )
rlim = (radius+adjustFactor)*(radius+adjustFactor)
for y in range(-radius,radius+1):
yy = y*y
for x in range(-radius,radius+1):
if x*x+yy<rlim:
rows[radius+y]+=1
return {'hex':False,'rows':rows,'type':'circular'}
def arrangement_sort(x):
return int(100000000*(abs(float(min(x['width'],x['height'])) / float(max(x['width'],x['height'])))))
def proportion_sort(x):
return int(100000000*(abs(float(min(x[0],x[1])) / float(max(x[0],x[1])))))
def multiplyArray(a):
f = 1
for v in a:
f *= v
return f
| [
"numpy.sum",
"math.sqrt",
"numpy.zeros",
"numpy.ones",
"math.floor",
"numpy.argmin",
"numpy.nonzero",
"numpy.max",
"numpy.array",
"numpy.arange",
"numpy.random.shuffle"
] | [((12550, 12576), 'numpy.ones', 'np.ones', (['(d, d)'], {'dtype': 'int'}), '((d, d), dtype=int)\n', (12557, 12576), True, 'import numpy as np\n'), ((15969, 15989), 'math.sqrt', 'math.sqrt', (['(8 * n + 1)'], {}), '(8 * n + 1)\n', (15978, 15989), False, 'import math\n'), ((17432, 17444), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (17441, 17444), False, 'import math\n'), ((18570, 18599), 'numpy.array', 'np.array', (["arrangement['rows']"], {}), "(arrangement['rows'])\n", (18578, 18599), True, 'import numpy as np\n'), ((18612, 18624), 'numpy.max', 'np.max', (['rows'], {}), '(rows)\n', (18618, 18624), True, 'import numpy as np\n'), ((18711, 18746), 'numpy.ones', 'np.ones', (['(height, width)'], {'dtype': 'int'}), '((height, width), dtype=int)\n', (18718, 18746), True, 'import numpy as np\n'), ((19230, 19260), 'numpy.array', 'np.array', (['grid'], {'dtype': 'np.float'}), '(grid, dtype=np.float)\n', (19238, 19260), True, 'import numpy as np\n'), ((20356, 20391), 'numpy.zeros', 'np.zeros', (['(radius * 2 + 1)'], {'dtype': 'int'}), '(radius * 2 + 1, dtype=int)\n', (20364, 20391), True, 'import numpy as np\n'), ((6166, 6206), 'numpy.array', 'np.array', (["q['grid'][0:2]"], {'dtype': 'np.float'}), "(q['grid'][0:2], dtype=np.float)\n", (6174, 6206), True, 'import numpy as np\n'), ((11624, 11649), 'numpy.sum', 'np.sum', (['maskSlice'], {'axis': '(0)'}), '(maskSlice, axis=0)\n', (11630, 11649), True, 'import numpy as np\n'), ((11920, 11945), 'numpy.sum', 'np.sum', (['maskSlice'], {'axis': '(1)'}), '(maskSlice, axis=1)\n', (11926, 11945), True, 'import numpy as np\n'), ((13565, 13579), 'numpy.sum', 'np.sum', (['p.flat'], {}), '(p.flat)\n', (13571, 13579), True, 'import numpy as np\n'), ((16003, 16016), 'math.floor', 'math.floor', (['t'], {}), '(t)\n', (16013, 16016), False, 'import math\n'), ((17458, 17471), 'math.floor', 'math.floor', (['t'], {}), '(t)\n', (17468, 17471), False, 'import math\n'), ((18916, 18928), 'numpy.sum', 'np.sum', (['rows'], {}), '(rows)\n', (18922, 18928), True, 'import numpy as np\n'), ((19744, 19766), 'math.sqrt', 'math.sqrt', (['(n / math.pi)'], {}), '(n / math.pi)\n', (19753, 19766), False, 'import math\n'), ((5056, 5077), 'numpy.arange', 'np.arange', (['pointCount'], {}), '(pointCount)\n', (5065, 5077), True, 'import numpy as np\n'), ((5823, 5837), 'math.sqrt', 'math.sqrt', (['(3.0)'], {}), '(3.0)\n', (5832, 5837), False, 'import math\n'), ((5876, 5908), 'numpy.argmin', 'np.argmin', (["rasterMask['mask'][0]"], {}), "(rasterMask['mask'][0])\n", (5885, 5908), True, 'import numpy as np\n'), ((5911, 5943), 'numpy.argmin', 'np.argmin', (["rasterMask['mask'][1]"], {}), "(rasterMask['mask'][1])\n", (5920, 5943), True, 'import numpy as np\n'), ((8325, 8350), 'numpy.sum', 'np.sum', (['maskSlice'], {'axis': '(0)'}), '(maskSlice, axis=0)\n', (8331, 8350), True, 'import numpy as np\n'), ((8563, 8588), 'numpy.sum', 'np.sum', (['maskSlice'], {'axis': '(1)'}), '(maskSlice, axis=1)\n', (8569, 8588), True, 'import numpy as np\n'), ((12397, 12409), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (12403, 12409), True, 'import numpy as np\n'), ((13764, 13787), 'numpy.random.shuffle', 'np.random.shuffle', (['ones'], {}), '(ones)\n', (13781, 13787), True, 'import numpy as np\n'), ((19335, 19349), 'math.sqrt', 'math.sqrt', (['(3.0)'], {}), '(3.0)\n', (19344, 19349), False, 'import math\n'), ((19388, 19420), 'numpy.argmin', 'np.argmin', (["rasterMask['mask'][0]"], {}), "(rasterMask['mask'][0])\n", (19397, 19420), True, 'import numpy as np\n'), ((19423, 19455), 'numpy.argmin', 'np.argmin', (["rasterMask['mask'][1]"], {}), "(rasterMask['mask'][1])\n", (19432, 19455), True, 'import numpy as np\n'), ((2434, 2455), 'math.sqrt', 'math.sqrt', (['pointCount'], {}), '(pointCount)\n', (2443, 2455), False, 'import math\n'), ((2695, 2731), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'int'}), '((height, width), dtype=int)\n', (2703, 2731), True, 'import numpy as np\n'), ((3704, 3735), 'numpy.sum', 'np.sum', (["rasterMask['mask'].flat"], {}), "(rasterMask['mask'].flat)\n", (3710, 3735), True, 'import numpy as np\n'), ((4169, 4192), 'numpy.random.shuffle', 'np.random.shuffle', (['ones'], {}), '(ones)\n', (4186, 4192), True, 'import numpy as np\n'), ((8263, 8280), 'numpy.sum', 'np.sum', (['maskSlice'], {}), '(maskSlice)\n', (8269, 8280), True, 'import numpy as np\n'), ((13735, 13748), 'numpy.nonzero', 'np.nonzero', (['p'], {}), '(p)\n', (13745, 13748), True, 'import numpy as np\n'), ((13950, 13974), 'numpy.random.shuffle', 'np.random.shuffle', (['zeros'], {}), '(zeros)\n', (13967, 13974), True, 'import numpy as np\n'), ((4133, 4149), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (4143, 4149), True, 'import numpy as np\n'), ((4361, 4385), 'numpy.random.shuffle', 'np.random.shuffle', (['zeros'], {}), '(zeros)\n', (4378, 4385), True, 'import numpy as np\n'), ((4528, 4544), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (4538, 4544), True, 'import numpy as np\n'), ((13919, 13936), 'numpy.nonzero', 'np.nonzero', (['(1 - p)'], {}), '(1 - p)\n', (13929, 13936), True, 'import numpy as np\n'), ((4323, 4343), 'numpy.nonzero', 'np.nonzero', (['(1 - mask)'], {}), '(1 - mask)\n', (4333, 4343), True, 'import numpy as np\n'), ((4677, 4697), 'numpy.nonzero', 'np.nonzero', (['(1 - mask)'], {}), '(1 - mask)\n', (4687, 4697), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier
def test_multi_target_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
for n in range(3):
rgr = GradientBoostingRegressor(random_state=0)
rgr.fit(X_train, y_train[:, n])
references[:, n] = rgr.predict(X_test)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X_train, y_train)
y_pred = rgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_partial_fit():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
half_index = 25
for n in range(3):
sgr = SGDRegressor(random_state=0)
sgr.partial_fit(X_train[:half_index], y_train[:half_index, n])
sgr.partial_fit(X_train[half_index:], y_train[half_index:, n])
references[:, n] = sgr.predict(X_test)
sgr = MultiOutputRegressor(SGDRegressor(random_state=0))
sgr.partial_fit(X_train[:half_index], y_train[:half_index])
sgr.partial_fit(X_train[half_index:], y_train[half_index:])
y_pred = sgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_one_target():
# Test multi target regression raises
X, y = datasets.make_regression(n_targets=1)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
assert_raises(ValueError, rgr.fit, X, y)
def test_multi_target_sparse_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test = X[50:]
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
rgr = MultiOutputRegressor(Lasso(random_state=0))
rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))
rgr.fit(X_train, y_train)
rgr_sparse.fit(sparse(X_train), y_train)
assert_almost_equal(rgr.predict(X_test),
rgr_sparse.predict(sparse(X_test)))
def test_multi_target_sample_weights_api():
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [0.8, 0.6]
rgr = MultiOutputRegressor(Lasso())
assert_raises_regex(ValueError, "does not support sample weights",
rgr.fit, X, y, w)
# no exception should be raised if the base estimator supports weights
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y, w)
def test_multi_target_sample_weight_partial_fit():
# weighted regressor
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr_w.partial_fit(X, y, w)
# weighted with different weights
w = [2., 2.]
rgr = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr.partial_fit(X, y, w)
assert_not_equal(rgr.predict(X)[0][0], rgr_w.predict(X)[0][0])
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
# Import the data
iris = datasets.load_iris()
# create a multiple targets by randomized shuffling and concatenating y.
X = iris.data
y1 = iris.target
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
y = np.column_stack((y1, y2, y3))
n_samples, n_features = X.shape
n_outputs = y.shape[1]
n_classes = len(np.unique(y1))
classes = list(map(np.unique, (y1, y2, y3)))
def test_multi_output_classification_partial_fit_parallelism():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
mor = MultiOutputClassifier(sgd_linear_clf, n_jobs=-1)
mor.partial_fit(X, y, classes)
est1 = mor.estimators_[0]
mor.partial_fit(X, y)
est2 = mor.estimators_[0]
# parallelism requires this to be the case for a sane implementation
assert_false(est1 is est2)
def test_multi_output_classification_partial_fit():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
# train the multi_target_linear and also get the predictions.
half_index = X.shape[0] // 2
multi_target_linear.partial_fit(
X[:half_index], y[:half_index], classes=classes)
first_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), first_predictions.shape)
multi_target_linear.partial_fit(X[half_index:], y[half_index:])
second_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), second_predictions.shape)
# train the linear classification with each column and assert that
# predictions are equal after first partial_fit and second partial_fit
for i in range(3):
# create a clone with the same state
sgd_linear_clf = clone(sgd_linear_clf)
sgd_linear_clf.partial_fit(
X[:half_index], y[:half_index, i], classes=classes[i])
assert_array_equal(sgd_linear_clf.predict(X), first_predictions[:, i])
sgd_linear_clf.partial_fit(X[half_index:], y[half_index:, i])
assert_array_equal(sgd_linear_clf.predict(X), second_predictions[:, i])
def test_mutli_output_classifiation_partial_fit_no_first_classes_exception():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
assert_raises_regex(ValueError, "classes must be passed on the first call "
"to partial_fit.",
multi_target_linear.partial_fit, X, y)
def test_multi_output_classification():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict, prodict_proba and score
forest = RandomForestClassifier(n_estimators=10, random_state=1)
multi_target_forest = MultiOutputClassifier(forest)
# train the multi_target_forest and also get the predictions.
multi_target_forest.fit(X, y)
predictions = multi_target_forest.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
predict_proba = multi_target_forest.predict_proba(X)
assert len(predict_proba) == n_outputs
for class_probabilities in predict_proba:
assert_equal((n_samples, n_classes), class_probabilities.shape)
assert_array_equal(np.argmax(np.dstack(predict_proba), axis=1),
predictions)
# train the forest with each column and assert that predictions are equal
for i in range(3):
forest_ = clone(forest) # create a clone with the same state
forest_.fit(X, y[:, i])
assert_equal(list(forest_.predict(X)), list(predictions[:, i]))
assert_array_equal(list(forest_.predict_proba(X)),
list(predict_proba[i]))
def test_multiclass_multioutput_estimator():
# test to check meta of meta estimators
svc = LinearSVC(random_state=0)
multi_class_svc = OneVsRestClassifier(svc)
multi_target_svc = MultiOutputClassifier(multi_class_svc)
multi_target_svc.fit(X, y)
predictions = multi_target_svc.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
# train the forest with each column and assert that predictions are equal
for i in range(3):
multi_class_svc_ = clone(multi_class_svc) # create a clone
multi_class_svc_.fit(X, y[:, i])
assert_equal(list(multi_class_svc_.predict(X)),
list(predictions[:, i]))
def test_multiclass_multioutput_estimator_predict_proba():
seed = 542
# make test deterministic
rng = np.random.RandomState(seed)
# random features
X = rng.normal(size=(5, 5))
# random labels
y1 = np.array(['b', 'a', 'a', 'b', 'a']).reshape(5, 1) # 2 classes
y2 = np.array(['d', 'e', 'f', 'e', 'd']).reshape(5, 1) # 3 classes
Y = np.concatenate([y1, y2], axis=1)
clf = MultiOutputClassifier(LogisticRegression(random_state=seed))
clf.fit(X, Y)
y_result = clf.predict_proba(X)
y_actual = [np.array([[0.23481764, 0.76518236],
[0.67196072, 0.32803928],
[0.54681448, 0.45318552],
[0.34883923, 0.65116077],
[0.73687069, 0.26312931]]),
np.array([[0.5171785, 0.23878628, 0.24403522],
[0.22141451, 0.64102704, 0.13755846],
[0.16751315, 0.18256843, 0.64991843],
[0.27357372, 0.55201592, 0.17441036],
[0.65745193, 0.26062899, 0.08191907]])]
for i in range(len(y_actual)):
assert_almost_equal(y_result[i], y_actual[i])
def test_multi_output_classification_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3, 2], [2, 3]]
w = np.asarray([2., 1.])
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf_w = MultiOutputClassifier(forest)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3, 2], [3, 2], [2, 3]]
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf = MultiOutputClassifier(forest)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_classification_partial_fit_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
yw = [[3, 2], [2, 3], [3, 2]]
w = np.asarray([2., 1., 1.])
sgd_linear_clf = SGDClassifier(random_state=1)
clf_w = MultiOutputClassifier(sgd_linear_clf)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
y = [[3, 2], [3, 2], [2, 3], [3, 2]]
sgd_linear_clf = SGDClassifier(random_state=1)
clf = MultiOutputClassifier(sgd_linear_clf)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5]]
assert_array_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_exceptions():
# NotFittedError when fit is not done but score, predict and
# and predict_proba are called
moc = MultiOutputClassifier(LinearSVC(random_state=0))
assert_raises(NotFittedError, moc.predict, y)
assert_raises(NotFittedError, moc.predict_proba, y)
assert_raises(NotFittedError, moc.score, X, y)
# ValueError when number of outputs is different
# for fit and score
y_new = np.column_stack((y1, y2))
moc.fit(X, y)
assert_raises(ValueError, moc.score, X, y_new)
| [
"sklearn.datasets.load_iris",
"sklearn.ensemble.GradientBoostingRegressor",
"sklearn.utils.testing.assert_equal",
"sklearn.base.clone",
"numpy.unique",
"sklearn.utils.testing.assert_raises",
"numpy.zeros_like",
"sklearn.linear_model.SGDClassifier",
"sklearn.datasets.make_regression",
"numpy.random... | [((4786, 4806), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (4804, 4806), False, 'from sklearn import datasets\n'), ((4916, 4943), 'sklearn.utils.shuffle', 'shuffle', (['y1'], {'random_state': '(1)'}), '(y1, random_state=1)\n', (4923, 4943), False, 'from sklearn.utils import shuffle\n'), ((4949, 4976), 'sklearn.utils.shuffle', 'shuffle', (['y1'], {'random_state': '(2)'}), '(y1, random_state=2)\n', (4956, 4976), False, 'from sklearn.utils import shuffle\n'), ((4981, 5010), 'numpy.column_stack', 'np.column_stack', (['(y1, y2, y3)'], {}), '((y1, y2, y3))\n', (4996, 5010), True, 'import numpy as np\n'), ((1104, 1141), 'sklearn.datasets.make_regression', 'datasets.make_regression', ([], {'n_targets': '(3)'}), '(n_targets=3)\n', (1128, 1141), False, 'from sklearn import datasets\n'), ((1234, 1255), 'numpy.zeros_like', 'np.zeros_like', (['y_test'], {}), '(y_test)\n', (1247, 1255), True, 'import numpy as np\n'), ((1565, 1604), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['references', 'y_pred'], {}), '(references, y_pred)\n', (1584, 1604), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((1666, 1703), 'sklearn.datasets.make_regression', 'datasets.make_regression', ([], {'n_targets': '(3)'}), '(n_targets=3)\n', (1690, 1703), False, 'from sklearn import datasets\n'), ((1796, 1817), 'numpy.zeros_like', 'np.zeros_like', (['y_test'], {}), '(y_test)\n', (1809, 1817), True, 'import numpy as np\n'), ((2322, 2361), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['references', 'y_pred'], {}), '(references, y_pred)\n', (2341, 2361), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((2464, 2501), 'sklearn.datasets.make_regression', 'datasets.make_regression', ([], {'n_targets': '(1)'}), '(n_targets=1)\n', (2488, 2501), False, 'from sklearn import datasets\n'), ((2581, 2621), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'rgr.fit', 'X', 'y'], {}), '(ValueError, rgr.fit, X, y)\n', (2594, 2621), False, 'from sklearn.utils.testing import assert_raises\n'), ((2678, 2715), 'sklearn.datasets.make_regression', 'datasets.make_regression', ([], {'n_targets': '(3)'}), '(n_targets=3)\n', (2702, 2715), False, 'from sklearn import datasets\n'), ((3392, 3480), 'sklearn.utils.testing.assert_raises_regex', 'assert_raises_regex', (['ValueError', '"""does not support sample weights"""', 'rgr.fit', 'X', 'y', 'w'], {}), "(ValueError, 'does not support sample weights', rgr.fit,\n X, y, w)\n", (3411, 3480), False, 'from sklearn.utils.testing import assert_raises_regex\n'), ((5082, 5095), 'numpy.unique', 'np.unique', (['y1'], {}), '(y1)\n', (5091, 5095), True, 'import numpy as np\n'), ((5229, 5270), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""', 'random_state': '(1)'}), "(loss='log', random_state=1)\n", (5242, 5270), False, 'from sklearn.linear_model import SGDClassifier\n'), ((5281, 5329), 'sklearn.multioutput.MultiOutputClassifier', 'MultiOutputClassifier', (['sgd_linear_clf'], {'n_jobs': '(-1)'}), '(sgd_linear_clf, n_jobs=-1)\n', (5302, 5329), False, 'from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier\n'), ((5528, 5554), 'sklearn.utils.testing.assert_false', 'assert_false', (['(est1 is est2)'], {}), '(est1 is est2)\n', (5540, 5554), False, 'from sklearn.utils.testing import assert_false\n'), ((5762, 5803), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""', 'random_state': '(1)'}), "(loss='log', random_state=1)\n", (5775, 5803), False, 'from sklearn.linear_model import SGDClassifier\n'), ((5830, 5867), 'sklearn.multioutput.MultiOutputClassifier', 'MultiOutputClassifier', (['sgd_linear_clf'], {}), '(sgd_linear_clf)\n', (5851, 5867), False, 'from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier\n'), ((6122, 6183), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['(n_samples, n_outputs)', 'first_predictions.shape'], {}), '((n_samples, n_outputs), first_predictions.shape)\n', (6134, 6183), False, 'from sklearn.utils.testing import assert_equal\n'), ((6313, 6375), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['(n_samples, n_outputs)', 'second_predictions.shape'], {}), '((n_samples, n_outputs), second_predictions.shape)\n', (6325, 6375), False, 'from sklearn.utils.testing import assert_equal\n'), ((7071, 7112), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""', 'random_state': '(1)'}), "(loss='log', random_state=1)\n", (7084, 7112), False, 'from sklearn.linear_model import SGDClassifier\n'), ((7139, 7176), 'sklearn.multioutput.MultiOutputClassifier', 'MultiOutputClassifier', (['sgd_linear_clf'], {}), '(sgd_linear_clf)\n', (7160, 7176), False, 'from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier\n'), ((7181, 7319), 'sklearn.utils.testing.assert_raises_regex', 'assert_raises_regex', (['ValueError', '"""classes must be passed on the first call to partial_fit."""', 'multi_target_linear.partial_fit', 'X', 'y'], {}), "(ValueError,\n 'classes must be passed on the first call to partial_fit.',\n multi_target_linear.partial_fit, X, y)\n", (7200, 7319), False, 'from sklearn.utils.testing import assert_raises_regex\n'), ((7587, 7642), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(1)'}), '(n_estimators=10, random_state=1)\n', (7609, 7642), False, 'from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier\n'), ((7669, 7698), 'sklearn.multioutput.MultiOutputClassifier', 'MultiOutputClassifier', (['forest'], {}), '(forest)\n', (7690, 7698), False, 'from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier\n'), ((7854, 7909), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['(n_samples, n_outputs)', 'predictions.shape'], {}), '((n_samples, n_outputs), predictions.shape)\n', (7866, 7909), False, 'from sklearn.utils.testing import assert_equal\n'), ((8722, 8747), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (8731, 8747), False, 'from sklearn.svm import LinearSVC\n'), ((8770, 8794), 'sklearn.multiclass.OneVsRestClassifier', 'OneVsRestClassifier', (['svc'], {}), '(svc)\n', (8789, 8794), False, 'from sklearn.multiclass import OneVsRestClassifier\n'), ((8818, 8856), 'sklearn.multioutput.MultiOutputClassifier', 'MultiOutputClassifier', (['multi_class_svc'], {}), '(multi_class_svc)\n', (8839, 8856), False, 'from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier\n'), ((8940, 8995), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['(n_samples, n_outputs)', 'predictions.shape'], {}), '((n_samples, n_outputs), predictions.shape)\n', (8952, 8995), False, 'from sklearn.utils.testing import assert_equal\n'), ((9426, 9453), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (9447, 9453), True, 'import numpy as np\n'), ((9683, 9715), 'numpy.concatenate', 'np.concatenate', (['[y1, y2]'], {'axis': '(1)'}), '([y1, y2], axis=1)\n', (9697, 9715), True, 'import numpy as np\n'), ((10666, 10688), 'numpy.asarray', 'np.asarray', (['[2.0, 1.0]'], {}), '([2.0, 1.0])\n', (10676, 10688), True, 'import numpy as np\n'), ((10700, 10755), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(1)'}), '(n_estimators=10, random_state=1)\n', (10722, 10755), False, 'from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier\n'), ((10768, 10797), 'sklearn.multioutput.MultiOutputClassifier', 'MultiOutputClassifier', (['forest'], {}), '(forest)\n', (10789, 10797), False, 'from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier\n'), ((10956, 11011), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(1)'}), '(n_estimators=10, random_state=1)\n', (10978, 11011), False, 'from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier\n'), ((11022, 11051), 'sklearn.multioutput.MultiOutputClassifier', 'MultiOutputClassifier', (['forest'], {}), '(forest)\n', (11043, 11051), False, 'from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier\n'), ((11373, 11400), 'numpy.asarray', 'np.asarray', (['[2.0, 1.0, 1.0]'], {}), '([2.0, 1.0, 1.0])\n', (11383, 11400), True, 'import numpy as np\n'), ((11419, 11448), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (11432, 11448), False, 'from sklearn.linear_model import SGDClassifier\n'), ((11461, 11498), 'sklearn.multioutput.MultiOutputClassifier', 'MultiOutputClassifier', (['sgd_linear_clf'], {}), '(sgd_linear_clf)\n', (11482, 11498), False, 'from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier\n'), ((11690, 11719), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (11703, 11719), False, 'from sklearn.linear_model import SGDClassifier\n'), ((11730, 11767), 'sklearn.multioutput.MultiOutputClassifier', 'MultiOutputClassifier', (['sgd_linear_clf'], {}), '(sgd_linear_clf)\n', (11751, 11767), False, 'from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier\n'), ((12092, 12137), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['NotFittedError', 'moc.predict', 'y'], {}), '(NotFittedError, moc.predict, y)\n', (12105, 12137), False, 'from sklearn.utils.testing import assert_raises\n'), ((12142, 12193), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['NotFittedError', 'moc.predict_proba', 'y'], {}), '(NotFittedError, moc.predict_proba, y)\n', (12155, 12193), False, 'from sklearn.utils.testing import assert_raises\n'), ((12198, 12244), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['NotFittedError', 'moc.score', 'X', 'y'], {}), '(NotFittedError, moc.score, X, y)\n', (12211, 12244), False, 'from sklearn.utils.testing import assert_raises\n'), ((12334, 12359), 'numpy.column_stack', 'np.column_stack', (['(y1, y2)'], {}), '((y1, y2))\n', (12349, 12359), True, 'import numpy as np\n'), ((12382, 12428), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'moc.score', 'X', 'y_new'], {}), '(ValueError, moc.score, X, y_new)\n', (12395, 12428), False, 'from sklearn.utils.testing import assert_raises\n'), ((1293, 1334), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1318, 1334), False, 'from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier\n'), ((1454, 1495), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1479, 1495), False, 'from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier\n'), ((1875, 1903), 'sklearn.linear_model.SGDRegressor', 'SGDRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (1887, 1903), False, 'from sklearn.linear_model import SGDRegressor\n'), ((2125, 2153), 'sklearn.linear_model.SGDRegressor', 'SGDRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2137, 2153), False, 'from sklearn.linear_model import SGDRegressor\n'), ((2534, 2575), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2559, 2575), False, 'from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier\n'), ((3379, 3386), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (3384, 3386), False, 'from sklearn.linear_model import Lasso\n'), ((3608, 3649), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (3633, 3649), False, 'from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier\n'), ((3872, 3900), 'sklearn.linear_model.SGDRegressor', 'SGDRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (3884, 3900), False, 'from sklearn.linear_model import SGDRegressor\n'), ((4020, 4048), 'sklearn.linear_model.SGDRegressor', 'SGDRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (4032, 4048), False, 'from sklearn.linear_model import SGDRegressor\n'), ((4338, 4379), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (4363, 4379), False, 'from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier\n'), ((4581, 4622), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (4606, 4622), False, 'from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier\n'), ((6616, 6637), 'sklearn.base.clone', 'clone', (['sgd_linear_clf'], {}), '(sgd_linear_clf)\n', (6621, 6637), False, 'from sklearn.base import clone\n'), ((8066, 8129), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['(n_samples, n_classes)', 'class_probabilities.shape'], {}), '((n_samples, n_classes), class_probabilities.shape)\n', (8078, 8129), False, 'from sklearn.utils.testing import assert_equal\n'), ((8355, 8368), 'sklearn.base.clone', 'clone', (['forest'], {}), '(forest)\n', (8360, 8368), False, 'from sklearn.base import clone\n'), ((9125, 9147), 'sklearn.base.clone', 'clone', (['multi_class_svc'], {}), '(multi_class_svc)\n', (9130, 9147), False, 'from sklearn.base import clone\n'), ((9749, 9786), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': 'seed'}), '(random_state=seed)\n', (9767, 9786), False, 'from sklearn.linear_model import LogisticRegression\n'), ((9860, 10005), 'numpy.array', 'np.array', (['[[0.23481764, 0.76518236], [0.67196072, 0.32803928], [0.54681448, \n 0.45318552], [0.34883923, 0.65116077], [0.73687069, 0.26312931]]'], {}), '([[0.23481764, 0.76518236], [0.67196072, 0.32803928], [0.54681448, \n 0.45318552], [0.34883923, 0.65116077], [0.73687069, 0.26312931]])\n', (9868, 10005), True, 'import numpy as np\n'), ((10122, 10331), 'numpy.array', 'np.array', (['[[0.5171785, 0.23878628, 0.24403522], [0.22141451, 0.64102704, 0.13755846],\n [0.16751315, 0.18256843, 0.64991843], [0.27357372, 0.55201592, \n 0.17441036], [0.65745193, 0.26062899, 0.08191907]]'], {}), '([[0.5171785, 0.23878628, 0.24403522], [0.22141451, 0.64102704, \n 0.13755846], [0.16751315, 0.18256843, 0.64991843], [0.27357372, \n 0.55201592, 0.17441036], [0.65745193, 0.26062899, 0.08191907]])\n', (10130, 10331), True, 'import numpy as np\n'), ((10471, 10516), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['y_result[i]', 'y_actual[i]'], {}), '(y_result[i], y_actual[i])\n', (10490, 10516), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((12061, 12086), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (12070, 12086), False, 'from sklearn.svm import LinearSVC\n'), ((2924, 2945), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2929, 2945), False, 'from sklearn.linear_model import Lasso\n'), ((2989, 3010), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2994, 3010), False, 'from sklearn.linear_model import Lasso\n'), ((8164, 8188), 'numpy.dstack', 'np.dstack', (['predict_proba'], {}), '(predict_proba)\n', (8173, 8188), True, 'import numpy as np\n'), ((9539, 9574), 'numpy.array', 'np.array', (["['b', 'a', 'a', 'b', 'a']"], {}), "(['b', 'a', 'a', 'b', 'a'])\n", (9547, 9574), True, 'import numpy as np\n'), ((9611, 9646), 'numpy.array', 'np.array', (["['d', 'e', 'f', 'e', 'd']"], {}), "(['d', 'e', 'f', 'e', 'd'])\n", (9619, 9646), True, 'import numpy as np\n')] |
"""Provide a thead-direction turning curve analysis"""
import numpy as np
import opexebo
import opexebo.defaults as default
def tuning_curve(angular_occupancy, spike_angles, **kwargs):
"""Analogous to a RateMap - i.e. mapping spike activity to spatial position
map spike rate as a function of angle
Parameters
----------
angular_occupancy : np.ma.MaskedArray
unsmoothed histogram of time spent at each angular range
Nx1 array, covering the range [0, 2pi] radians
Masked at angles of zero occupancy
spike_angles : np.ndarray
Mx1 array, where the m'th value is the angle of the animal (in radians)
associated with the m'th spike
kwargs
bin_width : float
width of histogram bin in DEGREES
Must match that used in calculating angular_occupancy
In the case of a non-exact divisor of 360 deg, the bin size will be
shrunk to yield an integer bin number.
Returns
-------
tuning_curve : np.ma.MaskedArray
unsmoothed array of firing rate as a function of angle
Nx1 array
Notes
--------
BNT.+analyses.turningcurve
Copyright (C) 2019 by <NAME>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
occ_ndim = angular_occupancy.ndim
spk_ndim = spike_angles.ndim
if occ_ndim != 1:
raise ValueError("angular_occupancy must be a 1D array. You provided a"\
" %d dimensional array" % occ_ndim)
if spk_ndim != 1:
raise ValueError("spike_angles must be a 1D array. You provided a %d"\
" dimensional array" % spk_ndim)
if np.nanmax(spike_angles) > 2*np.pi:
raise Warning("Angles higher than 2pi detected. Please check that your"\
" spike_angle array is in radians. If it is in degrees,"\
" you can convert with 'np.radians(array)'")
bin_width = kwargs.get("bin_width", default.bin_angle) # in degrees
num_bins = opexebo.general.bin_width_to_bin_number(360., bin_width) # This is for validation ONLY, the value num_bins here is not passed onwards
if num_bins != angular_occupancy.size:
raise ValueError("Keyword 'bin_width' must match the value used to"\
" generate angular_occupancy")
#UNITS!!!
# bin_width and arena_size need to be in the same units.
# As it happens, I hardcoded arena_size as 2pi -> convert bin_width to radians
# limits and spike_angles need to be in the same units
bin_width = np.radians(bin_width)
spike_histogram, bin_edges = opexebo.general.accumulate_spatial(spike_angles,
arena_size=2*np.pi, limits=(0, 2*np.pi), bin_width=bin_width)
tuning_curve = spike_histogram / (angular_occupancy + np.spacing(1))
return tuning_curve
| [
"numpy.radians",
"numpy.spacing",
"opexebo.general.accumulate_spatial",
"opexebo.general.bin_width_to_bin_number",
"numpy.nanmax"
] | [((2228, 2285), 'opexebo.general.bin_width_to_bin_number', 'opexebo.general.bin_width_to_bin_number', (['(360.0)', 'bin_width'], {}), '(360.0, bin_width)\n', (2267, 2285), False, 'import opexebo\n'), ((2777, 2798), 'numpy.radians', 'np.radians', (['bin_width'], {}), '(bin_width)\n', (2787, 2798), True, 'import numpy as np\n'), ((2833, 2951), 'opexebo.general.accumulate_spatial', 'opexebo.general.accumulate_spatial', (['spike_angles'], {'arena_size': '(2 * np.pi)', 'limits': '(0, 2 * np.pi)', 'bin_width': 'bin_width'}), '(spike_angles, arena_size=2 * np.pi,\n limits=(0, 2 * np.pi), bin_width=bin_width)\n', (2867, 2951), False, 'import opexebo\n'), ((1877, 1900), 'numpy.nanmax', 'np.nanmax', (['spike_angles'], {}), '(spike_angles)\n', (1886, 1900), True, 'import numpy as np\n'), ((3019, 3032), 'numpy.spacing', 'np.spacing', (['(1)'], {}), '(1)\n', (3029, 3032), True, 'import numpy as np\n')] |
import sys
from os import path
sys.path.append(path.dirname(__file__))
import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize
from Stopwords import Stopwords
from Ylabeler import Ylabeler
from gensim.models import Word2Vec
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import r2_score, mean_squared_log_error
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.layers import GlobalMaxPool1D
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
class Preprocessor:
def __init__(self, path="../Data/news_datatest.csv"):
# raw_csv = pandas instance
self.raw_csv = self.__get_csv(path)
# company name list from csv
self.co_name_list = set(self.__get_company_names())
""" initializing tools for preprocess (stopwords, vectorization, y_labeling) """
self.stopwords = Stopwords()
self.y_labeler = Ylabeler()
""" output = CSV file with X and Y """
self.output = None
@staticmethod
def __get_csv(path):
"""
csv로 부터 데이터를 받아온다.
:param path: String type
:return: pandas instance
"""
return pd.read_csv(path, engine='python')
def _test_open_csv(self):
"""
csv데이터 출력.
:return:
"""
print(self.raw_csv)
def __get_company_names(self):
"""
get company name from csv
:return: list
"""
loaded_csv = pd.read_csv("../Data/data_1213_20211114.csv", engine='python')
sorted_data = loaded_csv[['단축코드', '상장주식수', '한글 종목약명']].sort_values('한글 종목약명')
return sorted_data['한글 종목약명'].tolist()
def _get_token_and_company_name(self):
"""
공시 1차적으로 약한 토큰화 진행, 회사 이름 뽑아내기
회사 이름 없거나 2개 이상으로 뽑히면 False로 처리
:return: 공시 딕셔너리, 날짜 리스트, 공시에서 언급된 회사이름 리스트
뉴스에 맞는 날짜도 가져와야 해서 날짜 딕셔너리 추가.
2021.10.10, 13
"""
print(">> 총 데이터 수 = {}".format(len(self.raw_csv)))
# 기업명을 얻기 위한 딕셔너리
title_dict_for_getting_company = {}
# 최종적으로 replace와 stopwords 제거된 문장을 얻는 dictionary
title_dict = {}
# 날짜
date_list = []
# csv로부터 구한 회사 이름
company_name = []
# 기업명을 title_dict_for_getting_company에 추가.
# 날짜를 date_list에 추가
for idx, row in self.raw_csv.iterrows():
if idx % 1000 == 0:
print(">> {}번째 기업명 추출 작업 실행 완료..".format(idx))
title_dict_for_getting_company[str(idx)] = word_tokenize(row['title'])
date_list.append(word_tokenize(row['date'])[0])
# 기업명이 안뽑아 지거나 2개 이상 뽑히는 경우 False로 처리
for t in title_dict_for_getting_company.values():
company_name.append(list(set(t) & self.co_name_list) if len(set(t) & self.co_name_list) == 1 else False)
# False로 처리한 기업들 제거 후 불용어 처리
for idx, row in self.raw_csv.iterrows():
if idx % 1000 == 0:
print(">> {}번째 기업명 제거 작업 실행 완료..".format(idx))
if company_name[idx] != False:
row['title'] = row['title'].replace(company_name[idx][0], '')
title_dict[str(idx)] = self.stopwords._get_delete_stopwords(self.stopwords._get_replace_stopwords(row['title']))
print(">> result")
for i in range(3):
print(title_dict[str(i)], date_list[i], company_name[i])
return title_dict, date_list, company_name
def __get_processed_csv(self):
"""
get csv after preprocessed
:return:
"""
pass
def _process_step_reg(self):
"""
this is function preprocess step
1) raw csv ->
2) eliminate symbol stopword ->
3) get y labeled data from pandas naver api ->
4) return preprocessed csv
:return:
"""
tokenized_dict, date_dict, csv_co_list = self._get_token_and_company_name()
y_data = self.y_labeler._ylaber_step("reg", date_dict, csv_co_list)
not_coname_count = 0
for idx, co in enumerate(csv_co_list):
if co == False:
not_coname_count=not_coname_count+1
del tokenized_dict[str(idx)]
print(">> Y값이 도출되지 않은 공시 데이터 개수 =", not_coname_count)
print(">> 유의미한 X 데이터의 개수 =", len(tokenized_dict), '유의미한 Y 데이터의 개수 =', len(y_data))
lst = list(tokenized_dict.values())
# -----------------------------데이터 불러오기
x_y_data = pd.DataFrame({'X': lst, 'Y': y_data})
print("Dataframe")
print(x_y_data.head(3))
result = list(x_y_data['X'])
NewsModel = Word2Vec(sg=1, size=300, window=1, min_count=1, workers=-1)
NewsModel.build_vocab(result) # 단어 생성.
NewsModel.train(result, total_examples=len(result), epochs=100) # 학습
# -----------------Word2vec으로부터 임베딩된 결과 추출 dict() 형태
embedding_results = dict()
for token in NewsModel.wv.vocab.keys():
embedding_results[token] = NewsModel.wv[token]
vocab_size = len(NewsModel.wv.vocab.keys()) + 1
embedding_matrix = np.zeros((vocab_size, NewsModel.wv.vector_size))
# ------------------------------- 단어의 정수화
word_to_id = dict()
id_to_word = dict()
all_words = NewsModel.wv.vocab.keys()
for word in all_words:
if word not in word_to_id:
new_id = len(word_to_id) + 1
word_to_id[word] = new_id
id_to_word[new_id] = word
# print('\nword_to_id and id_to_word')
# print(word_to_id)
# print(id_to_word)
# ----------------------- 정수화 된 단어 매트릭스에 임베딩 결과로 저장
for word, i in word_to_id.items():
embedding_matrix[i, :] = embedding_results[word]
# -------------------CNN 기반 데이터 분석
encoded_result = []
for sent in result:
temp_id = []
for word in sent:
temp_id.append(word_to_id[word])
encoded_result.append(temp_id)
max_len = max(len(encoded_title) for encoded_title in encoded_result)
min_len = min(len(encoded_title) for encoded_title in encoded_result)
# -----------------------------------padding
padded_encoded_result = pad_sequences(encoded_result, padding='post') # 앞 부분에다가 값을 넣어줌 maxlen 파라미터로 조정가능
print('\n인코딩 결과 확인')
print(encoded_result[0])
print(padded_encoded_result[0].shape, padded_encoded_result[0])
# -------------------------------------train, test 분리
indices = np.arange(len(padded_encoded_result))
Y = x_y_data['Y']
indices_train, indicies_test = train_test_split(indices, test_size=0.2, shuffle=True,
random_state=0)
train_X = padded_encoded_result[indices_train]
train_Y = Y.iloc[indices_train]
test_X = padded_encoded_result[indicies_test]
test_Y = Y.iloc[indicies_test]
print('\nWord2Vec info')
print("vocab size=", vocab_size, "vector size=", NewsModel.wv.vector_size, "max len=", max_len, "min len=",
min_len)
# -------------------------------cnn 모델 설계
embedding_layer = Embedding(vocab_size, NewsModel.wv.vector_size, weights=[embedding_matrix],
input_length=max_len, trainable=False) # word2vec 임베딩 결과 학습되어있음 true로 하면 파인튜닝
CNN = Sequential()
CNN.add(embedding_layer)
CNN.add(Conv1D(filters=50, kernel_size=1, activation='relu')) # 너비가 고정되어있으므로 1D kernel_size 가 높이
CNN.add(GlobalMaxPool1D())
CNN.add(Flatten())
# CNN.add(Dense(100, activation='relu'))
# CNN.add(Dense(20, activation='relu'))
CNN.add(Dense(1))
print(CNN.summary())
# -----------------------------cnn 기반 학습
epoch_num = 100
CNN.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
history = CNN.fit(x=train_X, y=train_Y, epochs=epoch_num, verbose=0,
batch_size=32,
validation_data=(test_X, test_Y))
loss_train = history.history['loss']
loss_val = history.history['val_loss']
epochs = range(1, epoch_num+1)
plt.plot(epochs, loss_train, 'b-o', label='Training loss')
plt.plot(epochs, loss_val, 'r-o', label='Valid loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.show()
y_pred = CNN.predict(test_X)
print(y_pred)
r2 = r2_score(test_Y, y_pred)
#RMSLE = np.sqrt(mean_squared_log_error(test_Y, y_pred))
print("r_square score=", r2)
#print("RMSLE score=", RMSLE)
def _process_step_cls(self):
"""
this is function preprocess step
1) raw csv ->
2) eliminate symbol stopword ->
3) get y labeled data from pandas naver api ->
4) return preprocessed csv
:return:
"""
tokenized_dict, date_dict, csv_co_list = self._get_token_and_company_name()
y_data = self.y_labeler._ylaber_step("cls", date_dict, csv_co_list)
not_coname_count = 0
for idx, co in enumerate(csv_co_list):
if co == False:
not_coname_count = not_coname_count + 1
del tokenized_dict[str(idx)]
print(">> Y값이 도출되지 않은 공시 데이터 개수 =", not_coname_count)
print(">> 유의미한 X 데이터의 개수 =", len(tokenized_dict), '유의미한 Y 데이터의 개수 =', len(y_data))
print('하락의 개수:', y_data.count(0), '\n보합의 개수:', y_data.count(1), '\n상승의 개수:', y_data.count(2))
lst = list(tokenized_dict.values())
# -----------------------------데이터 불러오기
x_y_data = pd.DataFrame({'X': lst, 'Y': y_data})
zero_x_y_data = x_y_data[x_y_data['Y'] == 0]
one_x_y_data = x_y_data[x_y_data['Y'] == 1]
two_x_y_data = x_y_data[x_y_data['Y'] == 2]
min_count = min(y_data.count(0), y_data.count(1), y_data.count(2))
zero_x_y_data = zero_x_y_data.sample(frac=min_count / y_data.count(0), random_state=0)
one_x_y_data = one_x_y_data.sample(frac=min_count / y_data.count(1), random_state=0)
two_x_y_data = two_x_y_data.sample(frac=min_count / y_data.count(2), random_state=0)
x_y_data = pd.concat([zero_x_y_data, one_x_y_data, two_x_y_data])
print(">> 데이터 불균형 해결 이후 ")
print(x_y_data['Y'].value_counts())
print("Dataframe")
print(x_y_data.head(3))
result = list(x_y_data['X'])
NewsModel = Word2Vec(sg=1, size=300, window=3, min_count=1, workers=-1)
NewsModel.build_vocab(result) # 단어 생성.
NewsModel.train(result, total_examples=len(result), epochs=100) # 학습
# ----------------- Word2vec 으로부터 임베딩된 결과 추출 dict() 형태
embedding_results = dict()
for token in NewsModel.wv.vocab.keys():
embedding_results[token] = NewsModel.wv[token]
vocab_size = len(NewsModel.wv.vocab.keys()) + 1
embedding_matrix = np.zeros((vocab_size, NewsModel.wv.vector_size))
# ------------------------------- 단어의 정수화
word_to_id = dict()
id_to_word = dict()
all_words = NewsModel.wv.vocab.keys()
for word in all_words:
if word not in word_to_id:
new_id = len(word_to_id) + 1
word_to_id[word] = new_id
id_to_word[new_id] = word
# print('\nword_to_id and id_to_word')
# print(word_to_id)
# print(id_to_word)
# ----------------------- 정수화 된 단어 매트릭스에 임베딩 결과로 저장
for word, i in word_to_id.items():
embedding_matrix[i, :] = embedding_results[word]
# -------------------CNN 기반 데이터 분석
encoded_result = []
for sent in result:
temp_id = []
for word in sent:
temp_id.append(word_to_id[word])
encoded_result.append(temp_id)
max_len = max(len(encoded_email) for encoded_email in encoded_result)
min_len = min(len(encoded_email) for encoded_email in encoded_result)
# -----------------------------------padding
padded_encoded_result = pad_sequences(encoded_result, padding='post') # 앞 부분에다가 값을 넣어줌 maxlen 파라미터로 조정가능
print('\n인코딩 결과 확인')
print(encoded_result[0])
print(padded_encoded_result[0].shape, padded_encoded_result[0])
# -------------------------------------train, test 분리
indices = np.arange(len(padded_encoded_result))
Y = x_y_data['Y']
indices_train, indicies_test = train_test_split(indices, test_size=0.2, shuffle=True, random_state=0,stratify=Y)
train_X = padded_encoded_result[indices_train]
train_Y = Y.iloc[indices_train]
test_X = padded_encoded_result[indicies_test]
test_Y = Y.iloc[indicies_test]
print('\nWord2Vec info')
print("vocab size=", vocab_size, "vector size=", NewsModel.wv.vector_size, "max len=", max_len, "min len=",
min_len)
#------------------------------- cnn 모델 설계
embedding_layer = Embedding(vocab_size, NewsModel.wv.vector_size, weights=[embedding_matrix],
input_length=max_len, trainable=False) # word2vec 임베딩 결과 학습되어있음 true로 하면 파인튜닝
"""
CNN 모델의 최적 파라미터를 학습해보기 위하여 파라미터가 될 수 있는 값들을
반복문으로 실행하여 최적의 파라미터를 도출해본 코드
# filter_list = [10, 30, 50]
# kernel_list = [1, 2]
# conv_activation_list = ['relu', 'sigmoid']
# optimizer_list = ['adam', 'sgd']
# batch_size_list = [32, 64]
# info_list = []
# result_list = []
# class_result_list = []
# n = 0
# for f in filter_list:
# for k in kernel_list:
# for c in conv_activation_list:
# for o in optimizer_list:
# for b in batch_size_list:
"""
CNN = Sequential()
CNN.add(embedding_layer)
CNN.add(Conv1D(filters=50, kernel_size=1, activation='relu')) # 너비가 고정되어있으므로 1D kernel_size 가 높이
CNN.add(GlobalMaxPool1D())
CNN.add(Flatten())
CNN.add(Dense(3, activation='softmax'))
print(CNN.summary())
# -----------------------------cnn 기반 학습
epoch_num = 100
CNN.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = CNN.fit(x=train_X, y=to_categorical(np.array(train_Y)), epochs=epoch_num, verbose=0, batch_size=32,
validation_data=(test_X, to_categorical(np.array(test_Y))))
loss_train = history.history['loss']
loss_val = history.history['val_loss']
epochs = range(1, epoch_num+1)
plt.plot(epochs, loss_train, 'b-o', label='Training loss')
plt.plot(epochs, loss_val, 'r-o', label='Valid loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.show()
y_pred = CNN.predict(test_X)
y_pred = np.argmax(y_pred, axis=1)
print(y_pred, type(y_pred))
print("0 =", np.count_nonzero(y_pred == 0), "1 =", np.count_nonzero(y_pred == 1),"2 =", np.count_nonzero(y_pred == 2))
print(confusion_matrix(test_Y, y_pred))
print(classification_report(test_Y, y_pred, target_names=['0', '1', '2']))
"""
CNN 모델의 최적 파라미터의 결과를 확인해보기 위하여 반복문으로 실행 결과를 저장하여 출력한 코드
# info_list.append([f,k,c,o,b])
# result_list.append(confusion_matrix(test_Y, predy))
# class_result_list.append(classification_report(test_Y, predy, target_names=['0', '1', '2']))
# print(confusion_matrix(test_Y, predy))
# print(classification_report(test_Y, predy, target_names=['0', '1', '2']))
# n=n+1
# print(n,"번째 완료")
# for a in range(len(result_list)):
# print("\n",info_list[a],"\n",result_list[a],"\n",class_result_list[a])
"""
if __name__ == '__main__':
preprocessor = Preprocessor()
#preprocessor._process_step_cls()
preprocessor._process_step_reg()
| [
"numpy.argmax",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.Dense",
"sklearn.metrics.r2_score",
"sklearn.metrics.classification_report",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten",
"pandas.DataFrame",
"Stopwords.Stopwords",
... | [((47, 69), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (59, 69), False, 'from os import path\n'), ((1257, 1268), 'Stopwords.Stopwords', 'Stopwords', ([], {}), '()\n', (1266, 1268), False, 'from Stopwords import Stopwords\n'), ((1294, 1304), 'Ylabeler.Ylabeler', 'Ylabeler', ([], {}), '()\n', (1302, 1304), False, 'from Ylabeler import Ylabeler\n'), ((1556, 1590), 'pandas.read_csv', 'pd.read_csv', (['path'], {'engine': '"""python"""'}), "(path, engine='python')\n", (1567, 1590), True, 'import pandas as pd\n'), ((1847, 1909), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/data_1213_20211114.csv"""'], {'engine': '"""python"""'}), "('../Data/data_1213_20211114.csv', engine='python')\n", (1858, 1909), True, 'import pandas as pd\n'), ((4827, 4864), 'pandas.DataFrame', 'pd.DataFrame', (["{'X': lst, 'Y': y_data}"], {}), "({'X': lst, 'Y': y_data})\n", (4839, 4864), True, 'import pandas as pd\n'), ((4984, 5043), 'gensim.models.Word2Vec', 'Word2Vec', ([], {'sg': '(1)', 'size': '(300)', 'window': '(1)', 'min_count': '(1)', 'workers': '(-1)'}), '(sg=1, size=300, window=1, min_count=1, workers=-1)\n', (4992, 5043), False, 'from gensim.models import Word2Vec\n'), ((5460, 5508), 'numpy.zeros', 'np.zeros', (['(vocab_size, NewsModel.wv.vector_size)'], {}), '((vocab_size, NewsModel.wv.vector_size))\n', (5468, 5508), True, 'import numpy as np\n'), ((6625, 6670), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['encoded_result'], {'padding': '"""post"""'}), "(encoded_result, padding='post')\n", (6638, 6670), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((7026, 7096), 'sklearn.model_selection.train_test_split', 'train_test_split', (['indices'], {'test_size': '(0.2)', 'shuffle': '(True)', 'random_state': '(0)'}), '(indices, test_size=0.2, shuffle=True, random_state=0)\n', (7042, 7096), False, 'from sklearn.model_selection import train_test_split\n'), ((7595, 7713), 'tensorflow.keras.layers.Embedding', 'Embedding', (['vocab_size', 'NewsModel.wv.vector_size'], {'weights': '[embedding_matrix]', 'input_length': 'max_len', 'trainable': '(False)'}), '(vocab_size, NewsModel.wv.vector_size, weights=[embedding_matrix],\n input_length=max_len, trainable=False)\n', (7604, 7713), False, 'from tensorflow.keras.layers import Embedding\n'), ((7802, 7814), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7812, 7814), False, 'from tensorflow.keras.models import Sequential\n'), ((8632, 8690), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss_train', '"""b-o"""'], {'label': '"""Training loss"""'}), "(epochs, loss_train, 'b-o', label='Training loss')\n", (8640, 8690), True, 'import matplotlib.pyplot as plt\n'), ((8699, 8752), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss_val', '"""r-o"""'], {'label': '"""Valid loss"""'}), "(epochs, loss_val, 'r-o', label='Valid loss')\n", (8707, 8752), True, 'import matplotlib.pyplot as plt\n'), ((8761, 8781), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (8771, 8781), True, 'import matplotlib.pyplot as plt\n'), ((8790, 8808), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (8800, 8808), True, 'import matplotlib.pyplot as plt\n'), ((8817, 8829), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8827, 8829), True, 'import matplotlib.pyplot as plt\n'), ((8838, 8848), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8846, 8848), True, 'import matplotlib.pyplot as plt\n'), ((8923, 8947), 'sklearn.metrics.r2_score', 'r2_score', (['test_Y', 'y_pred'], {}), '(test_Y, y_pred)\n', (8931, 8947), False, 'from sklearn.metrics import r2_score, mean_squared_log_error\n'), ((10099, 10136), 'pandas.DataFrame', 'pd.DataFrame', (["{'X': lst, 'Y': y_data}"], {}), "({'X': lst, 'Y': y_data})\n", (10111, 10136), True, 'import pandas as pd\n'), ((10673, 10727), 'pandas.concat', 'pd.concat', (['[zero_x_y_data, one_x_y_data, two_x_y_data]'], {}), '([zero_x_y_data, one_x_y_data, two_x_y_data])\n', (10682, 10727), True, 'import pandas as pd\n'), ((10926, 10985), 'gensim.models.Word2Vec', 'Word2Vec', ([], {'sg': '(1)', 'size': '(300)', 'window': '(3)', 'min_count': '(1)', 'workers': '(-1)'}), '(sg=1, size=300, window=3, min_count=1, workers=-1)\n', (10934, 10985), False, 'from gensim.models import Word2Vec\n'), ((11404, 11452), 'numpy.zeros', 'np.zeros', (['(vocab_size, NewsModel.wv.vector_size)'], {}), '((vocab_size, NewsModel.wv.vector_size))\n', (11412, 11452), True, 'import numpy as np\n'), ((12568, 12613), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['encoded_result'], {'padding': '"""post"""'}), "(encoded_result, padding='post')\n", (12581, 12613), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((12969, 13055), 'sklearn.model_selection.train_test_split', 'train_test_split', (['indices'], {'test_size': '(0.2)', 'shuffle': '(True)', 'random_state': '(0)', 'stratify': 'Y'}), '(indices, test_size=0.2, shuffle=True, random_state=0,\n stratify=Y)\n', (12985, 13055), False, 'from sklearn.model_selection import train_test_split\n'), ((13494, 13612), 'tensorflow.keras.layers.Embedding', 'Embedding', (['vocab_size', 'NewsModel.wv.vector_size'], {'weights': '[embedding_matrix]', 'input_length': 'max_len', 'trainable': '(False)'}), '(vocab_size, NewsModel.wv.vector_size, weights=[embedding_matrix],\n input_length=max_len, trainable=False)\n', (13503, 13612), False, 'from tensorflow.keras.layers import Embedding\n'), ((14349, 14361), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (14359, 14361), False, 'from tensorflow.keras.models import Sequential\n'), ((15151, 15209), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss_train', '"""b-o"""'], {'label': '"""Training loss"""'}), "(epochs, loss_train, 'b-o', label='Training loss')\n", (15159, 15209), True, 'import matplotlib.pyplot as plt\n'), ((15218, 15271), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss_val', '"""r-o"""'], {'label': '"""Valid loss"""'}), "(epochs, loss_val, 'r-o', label='Valid loss')\n", (15226, 15271), True, 'import matplotlib.pyplot as plt\n'), ((15280, 15300), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (15290, 15300), True, 'import matplotlib.pyplot as plt\n'), ((15309, 15327), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (15319, 15327), True, 'import matplotlib.pyplot as plt\n'), ((15336, 15348), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15346, 15348), True, 'import matplotlib.pyplot as plt\n'), ((15357, 15367), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15365, 15367), True, 'import matplotlib.pyplot as plt\n'), ((15423, 15448), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (15432, 15448), True, 'import numpy as np\n'), ((2885, 2912), 'nltk.tokenize.word_tokenize', 'word_tokenize', (["row['title']"], {}), "(row['title'])\n", (2898, 2912), False, 'from nltk.tokenize import word_tokenize\n'), ((7864, 7916), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(50)', 'kernel_size': '(1)', 'activation': '"""relu"""'}), "(filters=50, kernel_size=1, activation='relu')\n", (7870, 7916), False, 'from tensorflow.keras.layers import Conv1D\n'), ((7970, 7987), 'tensorflow.keras.layers.GlobalMaxPool1D', 'GlobalMaxPool1D', ([], {}), '()\n', (7985, 7987), False, 'from tensorflow.keras.layers import GlobalMaxPool1D\n'), ((8005, 8014), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8012, 8014), False, 'from tensorflow.keras.layers import Flatten\n'), ((8129, 8137), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (8134, 8137), False, 'from tensorflow.keras.layers import Dense\n'), ((14411, 14463), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(50)', 'kernel_size': '(1)', 'activation': '"""relu"""'}), "(filters=50, kernel_size=1, activation='relu')\n", (14417, 14463), False, 'from tensorflow.keras.layers import Conv1D\n'), ((14517, 14534), 'tensorflow.keras.layers.GlobalMaxPool1D', 'GlobalMaxPool1D', ([], {}), '()\n', (14532, 14534), False, 'from tensorflow.keras.layers import GlobalMaxPool1D\n'), ((14552, 14561), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (14559, 14561), False, 'from tensorflow.keras.layers import Flatten\n'), ((14579, 14609), 'tensorflow.keras.layers.Dense', 'Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (14584, 14609), False, 'from tensorflow.keras.layers import Dense\n'), ((15508, 15537), 'numpy.count_nonzero', 'np.count_nonzero', (['(y_pred == 0)'], {}), '(y_pred == 0)\n', (15524, 15537), True, 'import numpy as np\n'), ((15546, 15575), 'numpy.count_nonzero', 'np.count_nonzero', (['(y_pred == 1)'], {}), '(y_pred == 1)\n', (15562, 15575), True, 'import numpy as np\n'), ((15583, 15612), 'numpy.count_nonzero', 'np.count_nonzero', (['(y_pred == 2)'], {}), '(y_pred == 2)\n', (15599, 15612), True, 'import numpy as np\n'), ((15628, 15660), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_Y', 'y_pred'], {}), '(test_Y, y_pred)\n', (15644, 15660), False, 'from sklearn.metrics import confusion_matrix\n'), ((15676, 15743), 'sklearn.metrics.classification_report', 'classification_report', (['test_Y', 'y_pred'], {'target_names': "['0', '1', '2']"}), "(test_Y, y_pred, target_names=['0', '1', '2'])\n", (15697, 15743), False, 'from sklearn.metrics import classification_report\n'), ((2942, 2968), 'nltk.tokenize.word_tokenize', 'word_tokenize', (["row['date']"], {}), "(row['date'])\n", (2955, 2968), False, 'from nltk.tokenize import word_tokenize\n'), ((14861, 14878), 'numpy.array', 'np.array', (['train_Y'], {}), '(train_Y)\n', (14869, 14878), True, 'import numpy as np\n'), ((14991, 15007), 'numpy.array', 'np.array', (['test_Y'], {}), '(test_Y)\n', (14999, 15007), True, 'import numpy as np\n')] |
import numpy as np
import torch
class PointCloudShuffle(object):
def __init__(self, num_point):
self.num_point = num_point
def __call__(self, sample):
pt_idxs = np.arange(0, self.num_point)
np.random.shuffle(pt_idxs)
sample['point_clouds'] = sample['point_clouds'][pt_idxs]
sample['rot_label'] = sample['rot_label'][pt_idxs]
sample['trans_label'] = sample['trans_label'][pt_idxs]
sample['cls_label'] = sample['cls_label'][pt_idxs]
if 'vis_label' in sample:
sample['vis_label'] = sample['vis_label'][pt_idxs]
return sample
class PointCloudJitter(object):
def __init__(self, scale):
self.scale = scale
def __call__(self, sample):
all_noise = np.random.standard_normal(sample['point_clouds'].shape) * self.scale
sample['point_clouds'] = sample['point_clouds'] + all_noise
sample['point_clouds'] = sample['point_clouds'].astype(np.float32)
return sample
class ToTensor(object):
def __call__(self, sample):
sample['point_clouds'] = torch.from_numpy(sample['point_clouds'])
sample['rot_label'] = torch.from_numpy(sample['rot_label'])
sample['trans_label'] = torch.from_numpy(sample['trans_label'])
sample['cls_label'] = torch.from_numpy(sample['cls_label'])
if 'vis_label' in sample:
sample['vis_label'] = torch.from_numpy(sample['vis_label'])
return sample
| [
"numpy.random.standard_normal",
"numpy.arange",
"numpy.random.shuffle",
"torch.from_numpy"
] | [((195, 223), 'numpy.arange', 'np.arange', (['(0)', 'self.num_point'], {}), '(0, self.num_point)\n', (204, 223), True, 'import numpy as np\n'), ((233, 259), 'numpy.random.shuffle', 'np.random.shuffle', (['pt_idxs'], {}), '(pt_idxs)\n', (250, 259), True, 'import numpy as np\n'), ((1128, 1168), 'torch.from_numpy', 'torch.from_numpy', (["sample['point_clouds']"], {}), "(sample['point_clouds'])\n", (1144, 1168), False, 'import torch\n'), ((1200, 1237), 'torch.from_numpy', 'torch.from_numpy', (["sample['rot_label']"], {}), "(sample['rot_label'])\n", (1216, 1237), False, 'import torch\n'), ((1271, 1310), 'torch.from_numpy', 'torch.from_numpy', (["sample['trans_label']"], {}), "(sample['trans_label'])\n", (1287, 1310), False, 'import torch\n'), ((1342, 1379), 'torch.from_numpy', 'torch.from_numpy', (["sample['cls_label']"], {}), "(sample['cls_label'])\n", (1358, 1379), False, 'import torch\n'), ((795, 850), 'numpy.random.standard_normal', 'np.random.standard_normal', (["sample['point_clouds'].shape"], {}), "(sample['point_clouds'].shape)\n", (820, 850), True, 'import numpy as np\n'), ((1450, 1487), 'torch.from_numpy', 'torch.from_numpy', (["sample['vis_label']"], {}), "(sample['vis_label'])\n", (1466, 1487), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# (mostly translation, see implementation details)
# License: BSD 3 clause
"""
The built-in correlation models submodule for the gaussian_process module.
"""
import numpy as np
def absolute_exponential(theta, d):
"""
Absolute exponential autocorrelation model.
(Ornstein-Uhlenbeck stochastic process)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i| )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.abs(np.asarray(d, dtype=np.float64))
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(- theta[0] * np.sum(d, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(- np.sum(theta.reshape(1, n_features) * d, axis=1))
def squared_exponential(theta, d):
"""
Squared exponential correlation model (Radial Basis Function).
(Infinitely differentiable stochastic process, very smooth)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * (d_i)^2 )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(-theta[0] * np.sum(d ** 2, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(-np.sum(theta.reshape(1, n_features) * d ** 2, axis=1))
def generalized_exponential(theta, d):
"""
Generalized exponential correlation model.
(Useful when one does not know the smoothness of the function to be
predicted.)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i|^p )
i = 1
Parameters
----------
theta : array_like
An array with shape 1+1 (isotropic) or n+1 (anisotropic) giving the
autocorrelation parameter(s) (theta, p).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if n_features > 1 and lth == 2:
theta = np.hstack([np.repeat(theta[0], n_features), theta[1]])
elif lth != n_features + 1:
raise Exception("Length of theta must be 2 or %s" % (n_features + 1))
else:
theta = theta.reshape(1, lth)
td = theta[:, 0:-1].reshape(1, n_features) * np.abs(d) ** theta[:, -1]
r = np.exp(- np.sum(td, 1))
return r
def pure_nugget(theta, d):
"""
Spatial independence correlation model (pure nugget).
(Useful when one wants to solve an ordinary least squares problem!)::
n
theta, d --> r(theta, d) = 1 if sum |d_i| == 0
i = 1
0 otherwise
Parameters
----------
theta : array_like
None.
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
n_eval = d.shape[0]
r = np.zeros(n_eval)
r[np.all(d == 0., axis=1)] = 1.
return r
def cubic(theta, d):
"""
Cubic correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - 3(theta_j*d_ij)^2 + 2(theta_j*d_ij)^3) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or " + str(n_features))
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td ** 2. * (3. - 2. * td)
r = np.prod(ss, 1)
return r
def linear(theta, d):
"""
Linear correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - theta_j*d_ij) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or %s" % n_features)
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td
r = np.prod(ss, 1)
return r
| [
"numpy.abs",
"numpy.sum",
"numpy.asarray",
"numpy.zeros",
"numpy.prod",
"numpy.all",
"numpy.repeat"
] | [((1063, 1098), 'numpy.asarray', 'np.asarray', (['theta'], {'dtype': 'np.float64'}), '(theta, dtype=np.float64)\n', (1073, 1098), True, 'import numpy as np\n'), ((2367, 2402), 'numpy.asarray', 'np.asarray', (['theta'], {'dtype': 'np.float64'}), '(theta, dtype=np.float64)\n', (2377, 2402), True, 'import numpy as np\n'), ((2411, 2442), 'numpy.asarray', 'np.asarray', (['d'], {'dtype': 'np.float64'}), '(d, dtype=np.float64)\n', (2421, 2442), True, 'import numpy as np\n'), ((3688, 3723), 'numpy.asarray', 'np.asarray', (['theta'], {'dtype': 'np.float64'}), '(theta, dtype=np.float64)\n', (3698, 3723), True, 'import numpy as np\n'), ((3732, 3763), 'numpy.asarray', 'np.asarray', (['d'], {'dtype': 'np.float64'}), '(d, dtype=np.float64)\n', (3742, 3763), True, 'import numpy as np\n'), ((5049, 5084), 'numpy.asarray', 'np.asarray', (['theta'], {'dtype': 'np.float64'}), '(theta, dtype=np.float64)\n', (5059, 5084), True, 'import numpy as np\n'), ((5093, 5124), 'numpy.asarray', 'np.asarray', (['d'], {'dtype': 'np.float64'}), '(d, dtype=np.float64)\n', (5103, 5124), True, 'import numpy as np\n'), ((5158, 5174), 'numpy.zeros', 'np.zeros', (['n_eval'], {}), '(n_eval)\n', (5166, 5174), True, 'import numpy as np\n'), ((5948, 5983), 'numpy.asarray', 'np.asarray', (['theta'], {'dtype': 'np.float64'}), '(theta, dtype=np.float64)\n', (5958, 5983), True, 'import numpy as np\n'), ((5992, 6023), 'numpy.asarray', 'np.asarray', (['d'], {'dtype': 'np.float64'}), '(d, dtype=np.float64)\n', (6002, 6023), True, 'import numpy as np\n'), ((6416, 6430), 'numpy.prod', 'np.prod', (['ss', '(1)'], {}), '(ss, 1)\n', (6423, 6430), True, 'import numpy as np\n'), ((7156, 7191), 'numpy.asarray', 'np.asarray', (['theta'], {'dtype': 'np.float64'}), '(theta, dtype=np.float64)\n', (7166, 7191), True, 'import numpy as np\n'), ((7200, 7231), 'numpy.asarray', 'np.asarray', (['d'], {'dtype': 'np.float64'}), '(d, dtype=np.float64)\n', (7210, 7231), True, 'import numpy as np\n'), ((7598, 7612), 'numpy.prod', 'np.prod', (['ss', '(1)'], {}), '(ss, 1)\n', (7605, 7612), True, 'import numpy as np\n'), ((1114, 1145), 'numpy.asarray', 'np.asarray', (['d'], {'dtype': 'np.float64'}), '(d, dtype=np.float64)\n', (1124, 1145), True, 'import numpy as np\n'), ((5181, 5205), 'numpy.all', 'np.all', (['(d == 0.0)'], {'axis': '(1)'}), '(d == 0.0, axis=1)\n', (5187, 5205), True, 'import numpy as np\n'), ((4186, 4195), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (4192, 4195), True, 'import numpy as np\n'), ((4229, 4242), 'numpy.sum', 'np.sum', (['td', '(1)'], {}), '(td, 1)\n', (4235, 4242), True, 'import numpy as np\n'), ((6161, 6170), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (6167, 6170), True, 'import numpy as np\n'), ((7369, 7378), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (7375, 7378), True, 'import numpy as np\n'), ((1292, 1309), 'numpy.sum', 'np.sum', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (1298, 1309), True, 'import numpy as np\n'), ((2587, 2609), 'numpy.sum', 'np.sum', (['(d ** 2)'], {'axis': '(1)'}), '(d ** 2, axis=1)\n', (2593, 2609), True, 'import numpy as np\n'), ((3934, 3965), 'numpy.repeat', 'np.repeat', (['theta[0]', 'n_features'], {}), '(theta[0], n_features)\n', (3943, 3965), True, 'import numpy as np\n'), ((6305, 6314), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (6311, 6314), True, 'import numpy as np\n'), ((7510, 7519), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (7516, 7519), True, 'import numpy as np\n')] |
"""
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
@author: <NAME>
"""
import argparse
import configparser as CP
import os
from datetime import datetime
import numpy as np
import pandas as pd
import scaleapi
now = datetime.now() # current date and time
def main(args, scale_api_key, scale_account_name):
client = scaleapi.ScaleClient(scale_api_key)
# create output folder
output_dir = args.project
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# create list to collect all tasks
all_tasks = []
counter = 0
next_token = None
while (True):
tasks = client.tasks(
batch=args.project,
status='completed',
next_token=next_token,
project=scale_account_name,
)
for task in tasks:
counter += 1
# print(f'Downloading Task {counter} | ${task.task_id}')
all_tasks.append(task)
next_token = tasks.next_token
if (next_token == None):
break
if args.method == 'acr':
results, rater_stats = parse_acr(all_tasks)
df = pd.DataFrame(results)
df.to_csv(os.path.join(
output_dir, f'{args.project}_Batch_{now.strftime("%Y%m%d")}_per_clip_results.csv'), index=False)
# TODO: This doesn't mesh well with other uses of P.808 ratings, something needs to be figured out
"""
model_pivot_table = df.pivot_table(
values='MOS', index='model', columns='clipset', margins=True, margins_name='Overall', aggfunc=[np.mean, len, np.std])
model_pivot_table = model_pivot_table.swaplevel(axis=1)
model_pivot_table.drop('Overall', inplace=True)
for cols in model_pivot_table.columns.levels[0]:
model_pivot_table.loc[:, (cols, 'CI')] = model_pivot_table.loc[:, cols].apply(
lambda x: 1.96 * x['std']/np.sqrt(x['len']), axis=1)
model_pivot_table.loc[:, (cols, 'DMOS')] = model_pivot_table.loc[:, cols]. apply(
lambda x: x['mean'] - model_pivot_table.loc['noisy', (cols, 'mean')], axis=1)
model_pivot_table = model_pivot_table.sort_values(
('Overall', 'mean'), ascending=False).sort_index(axis=1, ascending=False)
model_pivot_table.to_csv(os.path.join(
output_dir, f'{args.project}_Batch_{now.strftime("%Y%m%d")}_per_condition_results.csv'))
"""
elif args.method == 'echo':
echo_results, deg_results, rater_stats = parse_echo(all_tasks)
df_echo = pd.DataFrame(echo_results)
df_echo.to_csv(os.path.join(
output_dir, f'{args.project}_Batch_{now.strftime("%Y%m%d")}_per_clip_results_echo.csv'), index=False)
df_deg = pd.DataFrame(deg_results)
df_deg.to_csv(os.path.join(
output_dir, f'{args.project}_Batch_{now.strftime("%Y%m%d")}_per_clip_results_deg.csv'), index=False)
elif args.method == 'p835':
p835_results, rater_stats = parse_p835(all_tasks)
df = pd.DataFrame(p835_results)
df.to_csv(os.path.join(
output_dir, f'{args.project}_Batch_{now.strftime("%Y%m%d")}_per_clip_results.csv'), index=False)
else:
raise Exception(f'Unknown method {args.method}')
df_rater = pd.DataFrame(rater_stats)
df_rater.to_csv(os.path.join(
output_dir, f'{args.project}_Batch_{now.strftime("%Y%m%d")}_rater_stats.csv'))
def parse_acr(tasks):
results = list()
rater_stats = list()
for task in tasks:
if 'response' not in task.as_dict():
print('Found task that has not been rated yet')
continue
valid_file_urls = [f for f in task['response'] if f != 'annotations' and f != 'is_customer_fix']
for file_url in valid_file_urls:
clip_dict = {'short_file_name': task.as_dict()['metadata']['file_shortname']}
clip_dict['model'] = file_url
clip_dict['file_url'] = task.as_dict()['metadata']['file_urls'][file_url]
ratings = task.as_dict()['response'][file_url]['responses']
rater_stats.extend(ratings)
for i in range(len(ratings)):
vote = 'vote_' + str(i+1)
clip_dict[vote] = ratings[i]['rating']
clip_dict['MOS'] = np.mean([rating['rating']
for rating in ratings])
clip_dict['n'] = len(ratings)
clip_dict['std'] = np.std([rating['rating']
for rating in ratings], ddof=1)
clip_dict['95%CI'] = 1.96 * \
clip_dict['std'] / np.sqrt(len(ratings))
"""
clipset_match = re.match(
'.*[/](?P<clipset>audioset|ms_realrec|noreverb_clnsp|reverb_clnsp|stationary)', clip_dict['file_url'])
clip_dict['clipset'] = clipset_match.groupdict()['clipset']
"""
results.append(clip_dict)
return results, rater_stats
def parse_p835(tasks):
results = list()
rater_stats = list()
for task in tasks:
for file_url in task.as_dict()['metadata']['file_urls']:
clip_dict = {
'short_file_name': task.as_dict()['metadata']['file_shortname']}
clip_dict['model'] = file_url
clip_dict['file_url'] = task.as_dict()['metadata']['file_urls'][file_url]
if 'response' not in task.as_dict():
print('Found task that has not been rated yet')
continue
ratings = task.as_dict()['response'][file_url]['responses']
rater_stats.extend(ratings)
clip_dict.update(get_labelled_rating(ratings, 'distortion'))
clip_dict.update(get_labelled_rating(ratings, 'background'))
clip_dict.update(get_labelled_rating(ratings, 'overall'))
results.append(clip_dict)
return results, rater_stats
def get_labelled_rating(ratings, rating_label):
votes_dict = dict()
mos = np.mean([rating[rating_label] for rating in ratings])
num_votes = len(ratings)
stdev = np.std([rating[rating_label] for rating in ratings], ddof=1)
ci95 = 1.96 * stdev / np.sqrt(num_votes)
votes_dict = {
f'MOS_{rating_label}': mos,
'n': num_votes,
f'std_{rating_label}': stdev,
f'95%CI_{rating_label}': ci95,
}
for i, rating in enumerate(ratings):
votes_dict[f'vote_{rating_label}_{i+1}'] = rating[rating_label]
return votes_dict
def parse_echo(tasks):
echo_results = list()
deg_results = list()
rater_stats = list()
for task in tasks:
for file_url in task.as_dict()['metadata']['file_urls']:
clip_dict = {
'short_file_name': task.as_dict()['metadata']['file_shortname']}
clip_dict['model'] = file_url
clip_dict['file_url'] = remove_query_string_from_url(task.as_dict()['metadata']['file_urls'][file_url])
if 'response' not in task.as_dict():
print('Found task that has not been rated yet')
continue
ratings = task.as_dict()['response'][file_url]['responses']
rater_stats.extend(ratings)
clip_dict_echo = dict(clip_dict)
clip_dict_deg = dict(clip_dict)
for i, rating in enumerate(ratings):
vote = f'vote_{i+1}'
clip_dict_echo[vote] = rating['rating_echo']
clip_dict_deg[vote] = rating['rating_deg']
clip_dict_echo['MOS_ECHO'] = np.mean(
[rating['rating_echo'] for rating in ratings])
clip_dict_echo['n'] = len(ratings)
clip_dict_echo['std_echo'] = np.std(
[rating['rating_echo'] for rating in ratings], ddof=1)
clip_dict_echo['95%CI_echo'] = 1.96 * \
clip_dict_echo['std_echo'] / np.sqrt(len(ratings))
echo_results.append(clip_dict_echo)
clip_dict_deg['MOS_OTHER'] = np.mean(
[rating['rating_deg'] for rating in ratings])
clip_dict_deg['n'] = len(ratings)
clip_dict_deg['std_other'] = np.std(
[rating['rating_deg'] for rating in ratings], ddof=1)
clip_dict_deg['95%CI_other'] = 1.96 * \
clip_dict_deg['std_other'] / np.sqrt(len(ratings))
deg_results.append(clip_dict_deg)
return echo_results, deg_results, rater_stats
# TODO: some sort of structured URL parsing is more reasonable than this hack
def remove_query_string_from_url(url):
filename_cutoff_index = url.index('.wav') + 4
return url[:filename_cutoff_index]
if __name__ == '__main__':
print("Welcome to the Scale result parsing script for ACR test.")
parser = argparse.ArgumentParser(
description='Master script to prepare the ACR test')
parser.add_argument(
"--project", help="Name of the batch to club results by", required=True)
parser.add_argument(
"--cfg", help="Configuration file, see master.cfg", required=True)
parser.add_argument(
"--method", default="acr", const="acr", nargs="?",
choices=("acr", "echo", "p835"), help="Use regular ACR questions or echo questions")
# check input arguments
args = parser.parse_args()
assert os.path.exists(args.cfg), f"No config file in {args.cfg}"
cfg = CP.ConfigParser()
cfg._interpolation = CP.ExtendedInterpolation()
cfg.read(args.cfg)
main(
args=args,
scale_api_key = cfg.get("CommonAccountKeys", 'ScaleAPIKey'),
scale_account_name = cfg.get("CommonAccountKeys", 'ScaleAccountName')
)
| [
"pandas.DataFrame",
"os.mkdir",
"argparse.ArgumentParser",
"numpy.std",
"os.path.exists",
"numpy.mean",
"configparser.ExtendedInterpolation",
"configparser.ConfigParser",
"datetime.datetime.now",
"scaleapi.ScaleClient",
"numpy.sqrt"
] | [((534, 548), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (546, 548), False, 'from datetime import datetime\n'), ((644, 679), 'scaleapi.ScaleClient', 'scaleapi.ScaleClient', (['scale_api_key'], {}), '(scale_api_key)\n', (664, 679), False, 'import scaleapi\n'), ((3652, 3677), 'pandas.DataFrame', 'pd.DataFrame', (['rater_stats'], {}), '(rater_stats)\n', (3664, 3677), True, 'import pandas as pd\n'), ((6463, 6516), 'numpy.mean', 'np.mean', (['[rating[rating_label] for rating in ratings]'], {}), '([rating[rating_label] for rating in ratings])\n', (6470, 6516), True, 'import numpy as np\n'), ((6560, 6620), 'numpy.std', 'np.std', (['[rating[rating_label] for rating in ratings]'], {'ddof': '(1)'}), '([rating[rating_label] for rating in ratings], ddof=1)\n', (6566, 6620), True, 'import numpy as np\n'), ((9302, 9378), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Master script to prepare the ACR test"""'}), "(description='Master script to prepare the ACR test')\n", (9325, 9378), False, 'import argparse\n'), ((9856, 9880), 'os.path.exists', 'os.path.exists', (['args.cfg'], {}), '(args.cfg)\n', (9870, 9880), False, 'import os\n'), ((9927, 9944), 'configparser.ConfigParser', 'CP.ConfigParser', ([], {}), '()\n', (9942, 9944), True, 'import configparser as CP\n'), ((9971, 9997), 'configparser.ExtendedInterpolation', 'CP.ExtendedInterpolation', ([], {}), '()\n', (9995, 9997), True, 'import configparser as CP\n'), ((753, 779), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (767, 779), False, 'import os\n'), ((790, 810), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (798, 810), False, 'import os\n'), ((1474, 1495), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (1486, 1495), True, 'import pandas as pd\n'), ((6648, 6666), 'numpy.sqrt', 'np.sqrt', (['num_votes'], {}), '(num_votes)\n', (6655, 6666), True, 'import numpy as np\n'), ((2910, 2936), 'pandas.DataFrame', 'pd.DataFrame', (['echo_results'], {}), '(echo_results)\n', (2922, 2936), True, 'import pandas as pd\n'), ((3110, 3135), 'pandas.DataFrame', 'pd.DataFrame', (['deg_results'], {}), '(deg_results)\n', (3122, 3135), True, 'import pandas as pd\n'), ((4708, 4757), 'numpy.mean', 'np.mean', (["[rating['rating'] for rating in ratings]"], {}), "([rating['rating'] for rating in ratings])\n", (4715, 4757), True, 'import numpy as np\n'), ((4874, 4930), 'numpy.std', 'np.std', (["[rating['rating'] for rating in ratings]"], {'ddof': '(1)'}), "([rating['rating'] for rating in ratings], ddof=1)\n", (4880, 4930), True, 'import numpy as np\n'), ((8053, 8107), 'numpy.mean', 'np.mean', (["[rating['rating_echo'] for rating in ratings]"], {}), "([rating['rating_echo'] for rating in ratings])\n", (8060, 8107), True, 'import numpy as np\n'), ((8216, 8277), 'numpy.std', 'np.std', (["[rating['rating_echo'] for rating in ratings]"], {'ddof': '(1)'}), "([rating['rating_echo'] for rating in ratings], ddof=1)\n", (8222, 8277), True, 'import numpy as np\n'), ((8510, 8563), 'numpy.mean', 'np.mean', (["[rating['rating_deg'] for rating in ratings]"], {}), "([rating['rating_deg'] for rating in ratings])\n", (8517, 8563), True, 'import numpy as np\n'), ((8671, 8731), 'numpy.std', 'np.std', (["[rating['rating_deg'] for rating in ratings]"], {'ddof': '(1)'}), "([rating['rating_deg'] for rating in ratings], ddof=1)\n", (8677, 8731), True, 'import numpy as np\n'), ((3395, 3421), 'pandas.DataFrame', 'pd.DataFrame', (['p835_results'], {}), '(p835_results)\n', (3407, 3421), True, 'import pandas as pd\n')] |
import numpy as np
import torch
import torch.utils.data
import config as c
verts = [
(-2.4142, 1.),
(-1., 2.4142),
( 1., 2.4142),
( 2.4142, 1.),
( 2.4142, -1.),
( 1., -2.4142),
(-1., -2.4142),
(-2.4142, -1.)
]
label_maps = {
'all': [0,1,2,3,4,5,6,7],
'some': [0,0,0,0,1,1,2,3],
'none': [0,0,0,0,0,0,0,0],
}
def make_loaders(setup_type, batch_size):
np.random.seed(0)
N = int(1e6)
test_split = 10000
mapping = label_maps[setup_type]
pos = np.random.normal(size=(N, 2), scale = 0.2)
labels = np.zeros((N, 8))
n = N//8
for i, v in enumerate(verts):
pos[i*n:(i+1)*n,:] += v
labels[i*n:(i+1)*n,mapping[i]] = 1.
shuffling = np.random.permutation(N)
pos = torch.Tensor(pos[shuffling])
labels = torch.Tensor(labels[shuffling])
test_loader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(pos[:test_split], labels[:test_split]),
batch_size=batch_size, shuffle=True, drop_last=True)
train_loader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(pos[test_split:], labels[test_split:]),
batch_size=batch_size, shuffle=True, drop_last=True)
return test_loader, train_loader
from visdom import Visdom
viz = Visdom()
scatter_plot = 999
def show_live_posteriors(out_x, out_y, x, y):
colors = torch.mm(torch.round(y[:, c.ndim_z:].cpu()), torch.Tensor([np.arange(8)+1]).t()).numpy().astype(int).flatten()
viz.scatter(X=out_x[:, :2].cpu().data.numpy(),
Y=colors, win=scatter_plot, opts={'markersize':9})
c.test_loader, c.train_loader = make_loaders('some', c.batch_size)
c.test_time_functions = [show_live_posteriors]
c.ndim_x = 2
c.ndim_pad_x = 8
c.ndim_z = 2
c.ndim_y = 8
c.ndim_pad_zy = 0
c.lambd_fit_forw = 1.
c.lambd_mmd_forw = 50.
c.lambd_reconstruct = 1.
c.lambd_mmd_back = 250.
c.init_scale = 0.2
c.hidden_layer_sizes = 64
c.N_blocks = 3
c.filename_out = F'output/toy_modes_test_{c.N_blocks}.pt'
if c.N_blocks == 1:
c.exponent_clamping = 8.
c.mmd_back_kernels = [(0.1, 0.1), (0.8, 0.5), (0.2, 2)]
if __name__ == "__main__":
import train
c.lr_init = 3e-4
c.final_decay = 0.2
c.n_epochs = 60
c.train_backward_mmd = False
train.main()
c.filename_in = c.filename_out
c.filename_out += '_2'
c.lr_init = 5e-5
c.final_decay = 0.05
c.n_epochs = 100
c.train_backward_mmd = True
train.main()
| [
"numpy.random.seed",
"train.main",
"numpy.zeros",
"visdom.Visdom",
"torch.Tensor",
"numpy.arange",
"torch.utils.data.TensorDataset",
"numpy.random.normal",
"numpy.random.permutation"
] | [((1377, 1385), 'visdom.Visdom', 'Visdom', ([], {}), '()\n', (1383, 1385), False, 'from visdom import Visdom\n'), ((498, 515), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (512, 515), True, 'import numpy as np\n'), ((606, 646), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, 2)', 'scale': '(0.2)'}), '(size=(N, 2), scale=0.2)\n', (622, 646), True, 'import numpy as np\n'), ((662, 678), 'numpy.zeros', 'np.zeros', (['(N, 8)'], {}), '((N, 8))\n', (670, 678), True, 'import numpy as np\n'), ((820, 844), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (841, 844), True, 'import numpy as np\n'), ((855, 883), 'torch.Tensor', 'torch.Tensor', (['pos[shuffling]'], {}), '(pos[shuffling])\n', (867, 883), False, 'import torch\n'), ((897, 928), 'torch.Tensor', 'torch.Tensor', (['labels[shuffling]'], {}), '(labels[shuffling])\n', (909, 928), False, 'import torch\n'), ((2393, 2405), 'train.main', 'train.main', ([], {}), '()\n', (2403, 2405), False, 'import train\n'), ((2573, 2585), 'train.main', 'train.main', ([], {}), '()\n', (2583, 2585), False, 'import train\n'), ((985, 1054), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['pos[:test_split]', 'labels[:test_split]'], {}), '(pos[:test_split], labels[:test_split])\n', (1015, 1054), False, 'import torch\n'), ((1174, 1243), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['pos[test_split:]', 'labels[test_split:]'], {}), '(pos[test_split:], labels[test_split:])\n', (1204, 1243), False, 'import torch\n'), ((1524, 1536), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (1533, 1536), True, 'import numpy as np\n')] |
"""
Computation of the demagnetising field using the Fredkin-Koehler
technique and the infamous magpar method.
Rationale: The previous implementation in FemBemFKSolver (child class
of FemBemDeMagSolver) was kind of a mess. This does the same thing in the same
time with less code. Should be more conducive to further optimisation or as
a template for other techniques like the GCR.
"""
import numpy as np
import dolfin as df
import logging
from aeon import timer, Timer
from finmag.util.consts import mu0
from finmag.native.llg import compute_bem_fk
from finmag.util.meshes import nodal_volume
from finmag.util import helpers, configuration
from finmag.field import Field
from fk_demag_pbc import BMatrixPBC
logger = logging.getLogger('finmag')
fk_timer = Timer()
class FKDemag(object):
"""
Computation of the demagnetising field using the Fredkin-Koehler hybrid
FEM/BEM technique.
<NAME>. and <NAME>., "`Hybrid method for computing
demagnetizing fields`_", IEEE Transactions on Magnetics, vol.26, no.2,
pp.415-417, Mar 1990.
.. _Hybrid method for computing demagnetizing fields:
http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=106342
"""
def __init__(self, name='Demag', thin_film=False, macrogeometry=None,
solver_type=None, parameters=None):
"""
Create a new FKDemag instance.
The attribute `parameters` is a dict that contains the settings for the
solvers for the Neumann (potential phi_1) and Laplace (potential phi_2)
problems.
Setting the method used by the solvers:
Change the entries `phi_1_solver` and `phi_2_solver` to a value from
`df.list_krylov_solver_methods()`. Default is dolfin's default.
Setting the preconditioners:
Change the entries `phi_1_preconditioner` and `phi_2_preconditioner` to
a value from `df.list_krylov_solver_preconditioners()`. Default is
dolfin's default. There is a set of parameters optimised for thin films
(cg/ilu followed by default without preconditioner) that can be used by
passing in the argument 'thin_film` set to True.
Setting the tolerances:
Change the existing entries inside `phi_1` and `phi_2` which are
themselves dicts. You can add new entries to these dicts as well.
Everything which is understood by `df.KrylovSolver` is valid.
Allowed values for `solver_type` are 'Krylov','LU' and `None` (the
latter uses the value set in the .finmagrc file, defaulting to 'Krylov'
as no value is provided there).
"""
self.name = name
self.in_jacobian = False
default_parameters = {
'absolute_tolerance': 1e-6,
'relative_tolerance': 1e-6,
'maximum_iterations': int(1e4)
}
self.parameters = {
'phi_1_solver': 'default',
'phi_1_preconditioner': 'default',
'phi_1': default_parameters,
'phi_2_solver': 'default',
'phi_2_preconditioner': 'default',
'phi_2': default_parameters.copy()
}
if parameters is not None:
for (k, v) in parameters.items():
logger.debug(
"Setting demag solver parameter {}='{}'".format(k, v))
if k in ['phi_1', 'phi_2']:
# Since self.parameters['phi_1'] is a dictionary itself,
# only update the keys that are given (and similarly for
# 'phi_2').
for (k2, v2) in v.items():
self.parameters[k][k2] = v2
else:
self.parameters[k] = v
logger.debug("Demag parameters now: {}".format(self.parameters))
self.solver_type = solver_type
if thin_film:
self.parameters["phi_1_solver"] = "cg"
self.parameters["phi_1_preconditioner"] = "ilu"
self.parameters["phi_2_preconditioner"] = "none"
self.macrogeometry = macrogeometry
@timer.method
def setup(self, m, Ms, unit_length=1):
"""
Setup the FKDemag instance. Usually called automatically by the
Simulation object.
*Arguments*
m: finmag.Field
The unit magnetisation on a finite element space.
Ms: float
The saturation magnetisation in A/m.
unit_length: float
The length (in m) represented by one unit on the mesh. Default 1.
"""
assert isinstance(m, Field)
assert isinstance(Ms, Field)
self.m = m
self.Ms = Ms
self.unit_length = unit_length
self.S1 = df.FunctionSpace(self.m.mesh(), "Lagrange", 1)
self._test1 = df.TestFunction(self.S1)
self._trial1 = df.TrialFunction(self.S1)
self._test3 = df.TestFunction(self.m.functionspace)
self._trial3 = df.TrialFunction(self.m.functionspace)
# for computation of energy
self._nodal_volumes = nodal_volume(self.S1, unit_length)
self._H_func = df.Function(m.functionspace) # we will copy field into
# this when we need the
# energy
self._E_integrand = -0.5 * mu0 * \
df.dot(self._H_func, self.m.f * self.Ms.f)
self._E = self._E_integrand * df.dx
self._nodal_E = df.dot(self._E_integrand, self._test1) * df.dx
self._nodal_E_func = df.Function(self.S1)
# for computation of field and scalar magnetic potential
self._poisson_matrix = self._poisson_matrix()
self._laplace_zeros = df.Function(self.S1).vector()
# determine the solver type to be used (Krylov or LU); if the kwarg
# 'solver_type' is not provided, try to read the setting from the
# .finmagrc file; use 'Krylov' if this fails.
solver_type = self.solver_type
if solver_type is None:
solver_type = configuration.get_config_option(
'demag', 'solver_type', 'Krylov')
if solver_type == 'None': # if the user set 'solver_type = None' in
# the .finmagrc file, solver_type will be a
# string so we need to catch this here.
solver_type = 'Krylov'
logger.debug("Using {} solver for demag.".format(solver_type))
if solver_type == 'Krylov':
self._poisson_solver = df.KrylovSolver(self._poisson_matrix.copy(),
self.parameters['phi_1_solver'], self.parameters['phi_1_preconditioner'])
self._poisson_solver.parameters.update(self.parameters['phi_1'])
self._laplace_solver = df.KrylovSolver(
self.parameters['phi_2_solver'], self.parameters['phi_2_preconditioner'])
self._laplace_solver.parameters.update(self.parameters['phi_2'])
# We're setting 'same_nonzero_pattern=True' to enforce the
# same matrix sparsity pattern across different demag solves,
# which should speed up things.
#self._laplace_solver.parameters["preconditioner"][
# "structure"] = "same_nonzero_pattern"
elif solver_type == 'LU':
self._poisson_solver = df.LUSolver(self._poisson_matrix.copy())
self._laplace_solver = df.LUSolver()
self._poisson_solver.parameters["reuse_factorization"] = True
self._laplace_solver.parameters["reuse_factorization"] = True
else:
raise ValueError("Argument 'solver_type' must be either 'Krylov' or 'LU'. "
"Got: '{}'".format(solver_type))
with fk_timer('compute BEM'):
if not hasattr(self, "_bem"):
if self.macrogeometry is not None:
Ts = self.macrogeometry.compute_Ts(self.m.mesh())
pbc = BMatrixPBC(self.m.mesh(), Ts)
self._b2g_map = np.array(pbc.b2g_map, dtype=np.int)
self._bem = pbc.bm
else:
self._bem, self._b2g_map = compute_bem_fk(
df.BoundaryMesh(self.m.mesh(), 'exterior', False))
logger.debug("Boundary element matrix uses {:.2f} MB of memory.".format(
self._bem.nbytes / 1024. ** 2))
# solution of inhomogeneous Neumann problem
self._phi_1 = df.Function(self.S1)
# solution of Laplace equation inside domain
self._phi_2 = df.Function(self.S1)
self._phi = df.Function(self.S1) # magnetic potential phi_1 + phi_2
# To be applied to the vector field m as first step of computation of
# _phi_1. This gives us div(M), which is equal to Laplace(_phi_1),
# equation which is then solved using _poisson_solver.
self._Ms_times_divergence = df.assemble(
self.Ms.f * df.inner(self._trial3, df.grad(self._test1)) * df.dx)
# we move the boundary condition here to avoid create a instance each
# time when compute the magnetic potential
self.boundary_condition = df.DirichletBC(
self.S1, self._phi_2, df.DomainBoundary())
self.boundary_condition.apply(self._poisson_matrix)
self._setup_gradient_computation()
@timer.method
def precomputed_bem(self, bem, b2g_map):
"""
If the BEM and a boundary to global vertices map are known, they can be
passed to the FKDemag object with this method so it will skip
re-computing them.
"""
self._bem, self._b2g_map = bem, b2g_map
@timer.method
def compute_potential(self):
"""
Compute the magnetic potential.
*Returns*
df.Function
The magnetic potential.
"""
self._compute_magnetic_potential()
return self._phi
@timer.method
def compute_field(self):
"""
Compute the demagnetising field.
*Returns*
numpy.ndarray
The demagnetising field.
"""
self._compute_magnetic_potential()
return self._compute_gradient()
def average_field(self):
"""
Compute the average demag field.
"""
return helpers.average_field(self.compute_field())
@timer.method
def compute_energy(self):
"""
Compute the total energy of the field.
.. math::
E_\\mathrm{d} = -\\frac12 \\mu_0 \\int_\\Omega
H_\\mathrm{d} \\cdot \\vec M \\mathrm{d}x
*Returns*
Float
The energy of the demagnetising field.
"""
self._H_func.vector()[:] = self.compute_field()
return df.assemble(self._E) * self.unit_length ** self.m.mesh_dim()
@timer.method
def energy_density(self):
"""
Compute the energy density in the field.
.. math::
\\rho = \\frac{E_{\\mathrm{d}, i}}{V_i},
where V_i is the volume associated with the node i.
*Returns*
numpy.ndarray
The energy density of the demagnetising field.
"""
self._H_func.vector()[:] = self.compute_field()
nodal_E = df.assemble(self._nodal_E).array() * \
self.unit_length ** self.m.mesh_dim()
return nodal_E / self._nodal_volumes
@timer.method
def energy_density_function(self):
"""
Returns the energy density in the field as a dolfin function to allow probing.
*Returns*
dolfin.Function
The energy density of the demagnetising field.
"""
self._nodal_E_func.vector()[:] = self.energy_density()
return self._nodal_E_func
@fk_timer.method
def _poisson_matrix(self):
A = df.dot(df.grad(self._trial1), df.grad(self._test1)) * df.dx
return df.assemble(A) # stiffness matrix for Poisson equation
def _compute_magnetic_potential(self):
# compute _phi_1 on the whole domain
g_1 = self._Ms_times_divergence * self.m.f.vector()
with fk_timer("first linear solve"):
self._poisson_solver.solve(self._phi_1.vector(), g_1)
# compute _phi_2 on the boundary using the Dirichlet boundary
# conditions we get from BEM * _phi_1 on the boundary.
with fk_timer("using boundary conditions"):
phi_1 = self._phi_1.vector()[self._b2g_map]
self._phi_2.vector()[self._b2g_map[:]] = np.dot(
self._bem, phi_1)
#boundary_condition = df.DirichletBC(self.S1, self._phi_2, df.DomainBoundary())
#A = self._poisson_matrix.copy()
#b = self._laplace_zeros
#boundary_condition.apply(A, b)
A = self._poisson_matrix
b = self._laplace_zeros
self.boundary_condition.set_value(self._phi_2)
self.boundary_condition.apply(A, b)
# compute _phi_2 on the whole domain
with fk_timer("second linear solve"):
self._laplace_solver.solve(A, self._phi_2.vector(), b)
# add _phi_1 and _phi_2 to obtain magnetic potential
self._phi.vector()[:] = self._phi_1.vector() + self._phi_2.vector()
@fk_timer.method
def _setup_gradient_computation(self):
"""
Prepare the discretised gradient to use in :py:meth:`FKDemag._compute_gradient`.
We don't need the gradient field as a continuous field, we are only
interested in the values at specific points. It is thus a waste of
computational effort to use a projection of the gradient field, since
it performs the fairly large operation of assembling a matrix and
solving a linear system of equations.
"""
A = df.inner(self._test3, - df.grad(self._trial1)) * df.dx
# This can be applied to scalar functions.
self._gradient = df.assemble(A)
# The `A` above is in fact not quite the gradient, since we integrated
# over the volume as well. We will divide by the volume later, after
# the multiplication of the scalar magnetic potential. Since the two
# operations are symmetric (multiplying by volume, dividing by volume)
# we don't have to care for the units, i.e. unit_length.
b = df.dot(self._test3, df.Constant((1, 1, 1))) * df.dx
self._nodal_volumes_S3_no_units = df.assemble(b).array()
@fk_timer.method
def _compute_gradient(self):
"""
Get the demagnetising field from the magnetic scalar potential.
.. math::
\\vec{H}_{\\mathrm{d}} = - \\nabla \\phi (\\vec{r})
Using dolfin, we would translate this to
.. sourcecode::
H_d = df.project(- df.grad(self._phi), self.m.functionspace)
but the method used here is computationally less expensive.
"""
H = self._gradient * self._phi.vector()
return H.array() / self._nodal_volumes_S3_no_units
| [
"finmag.util.configuration.get_config_option",
"dolfin.grad",
"dolfin.LUSolver",
"dolfin.TrialFunction",
"dolfin.TestFunction",
"dolfin.DomainBoundary",
"dolfin.Function",
"dolfin.Constant",
"finmag.util.meshes.nodal_volume",
"numpy.array",
"aeon.Timer",
"numpy.dot",
"dolfin.KrylovSolver",
... | [((721, 748), 'logging.getLogger', 'logging.getLogger', (['"""finmag"""'], {}), "('finmag')\n", (738, 748), False, 'import logging\n'), ((760, 767), 'aeon.Timer', 'Timer', ([], {}), '()\n', (765, 767), False, 'from aeon import timer, Timer\n'), ((4786, 4810), 'dolfin.TestFunction', 'df.TestFunction', (['self.S1'], {}), '(self.S1)\n', (4801, 4810), True, 'import dolfin as df\n'), ((4834, 4859), 'dolfin.TrialFunction', 'df.TrialFunction', (['self.S1'], {}), '(self.S1)\n', (4850, 4859), True, 'import dolfin as df\n'), ((4882, 4919), 'dolfin.TestFunction', 'df.TestFunction', (['self.m.functionspace'], {}), '(self.m.functionspace)\n', (4897, 4919), True, 'import dolfin as df\n'), ((4943, 4981), 'dolfin.TrialFunction', 'df.TrialFunction', (['self.m.functionspace'], {}), '(self.m.functionspace)\n', (4959, 4981), True, 'import dolfin as df\n'), ((5049, 5083), 'finmag.util.meshes.nodal_volume', 'nodal_volume', (['self.S1', 'unit_length'], {}), '(self.S1, unit_length)\n', (5061, 5083), False, 'from finmag.util.meshes import nodal_volume\n'), ((5107, 5135), 'dolfin.Function', 'df.Function', (['m.functionspace'], {}), '(m.functionspace)\n', (5118, 5135), True, 'import dolfin as df\n'), ((5454, 5474), 'dolfin.Function', 'df.Function', (['self.S1'], {}), '(self.S1)\n', (5465, 5474), True, 'import dolfin as df\n'), ((8422, 8442), 'dolfin.Function', 'df.Function', (['self.S1'], {}), '(self.S1)\n', (8433, 8442), True, 'import dolfin as df\n'), ((8518, 8538), 'dolfin.Function', 'df.Function', (['self.S1'], {}), '(self.S1)\n', (8529, 8538), True, 'import dolfin as df\n'), ((8559, 8579), 'dolfin.Function', 'df.Function', (['self.S1'], {}), '(self.S1)\n', (8570, 8579), True, 'import dolfin as df\n'), ((11886, 11900), 'dolfin.assemble', 'df.assemble', (['A'], {}), '(A)\n', (11897, 11900), True, 'import dolfin as df\n'), ((13906, 13920), 'dolfin.assemble', 'df.assemble', (['A'], {}), '(A)\n', (13917, 13920), True, 'import dolfin as df\n'), ((5267, 5309), 'dolfin.dot', 'df.dot', (['self._H_func', '(self.m.f * self.Ms.f)'], {}), '(self._H_func, self.m.f * self.Ms.f)\n', (5273, 5309), True, 'import dolfin as df\n'), ((5378, 5416), 'dolfin.dot', 'df.dot', (['self._E_integrand', 'self._test1'], {}), '(self._E_integrand, self._test1)\n', (5384, 5416), True, 'import dolfin as df\n'), ((5957, 6022), 'finmag.util.configuration.get_config_option', 'configuration.get_config_option', (['"""demag"""', '"""solver_type"""', '"""Krylov"""'], {}), "('demag', 'solver_type', 'Krylov')\n", (5988, 6022), False, 'from finmag.util import helpers, configuration\n'), ((6731, 6825), 'dolfin.KrylovSolver', 'df.KrylovSolver', (["self.parameters['phi_2_solver']", "self.parameters['phi_2_preconditioner']"], {}), "(self.parameters['phi_2_solver'], self.parameters[\n 'phi_2_preconditioner'])\n", (6746, 6825), True, 'import dolfin as df\n'), ((9175, 9194), 'dolfin.DomainBoundary', 'df.DomainBoundary', ([], {}), '()\n', (9192, 9194), True, 'import dolfin as df\n'), ((10736, 10756), 'dolfin.assemble', 'df.assemble', (['self._E'], {}), '(self._E)\n', (10747, 10756), True, 'import dolfin as df\n'), ((12497, 12521), 'numpy.dot', 'np.dot', (['self._bem', 'phi_1'], {}), '(self._bem, phi_1)\n', (12503, 12521), True, 'import numpy as np\n'), ((5625, 5645), 'dolfin.Function', 'df.Function', (['self.S1'], {}), '(self.S1)\n', (5636, 5645), True, 'import dolfin as df\n'), ((7368, 7381), 'dolfin.LUSolver', 'df.LUSolver', ([], {}), '()\n', (7379, 7381), True, 'import dolfin as df\n'), ((11818, 11839), 'dolfin.grad', 'df.grad', (['self._trial1'], {}), '(self._trial1)\n', (11825, 11839), True, 'import dolfin as df\n'), ((11841, 11861), 'dolfin.grad', 'df.grad', (['self._test1'], {}), '(self._test1)\n', (11848, 11861), True, 'import dolfin as df\n'), ((14331, 14353), 'dolfin.Constant', 'df.Constant', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (14342, 14353), True, 'import dolfin as df\n'), ((14405, 14419), 'dolfin.assemble', 'df.assemble', (['b'], {}), '(b)\n', (14416, 14419), True, 'import dolfin as df\n'), ((7988, 8023), 'numpy.array', 'np.array', (['pbc.b2g_map'], {'dtype': 'np.int'}), '(pbc.b2g_map, dtype=np.int)\n', (7996, 8023), True, 'import numpy as np\n'), ((11235, 11261), 'dolfin.assemble', 'df.assemble', (['self._nodal_E'], {}), '(self._nodal_E)\n', (11246, 11261), True, 'import dolfin as df\n'), ((13799, 13820), 'dolfin.grad', 'df.grad', (['self._trial1'], {}), '(self._trial1)\n', (13806, 13820), True, 'import dolfin as df\n'), ((8930, 8950), 'dolfin.grad', 'df.grad', (['self._test1'], {}), '(self._test1)\n', (8937, 8950), True, 'import dolfin as df\n')] |
# -*- coding: utf-8 -*-
"""
Root module of tanuna package.
@author: <NAME>
"""
# ignore warning 'line break after binary operator'
# as line break *before* binary operator *also* creates a warning ...
# flake8: noqa: W504
# XXX refactor to use numpy arrays with "@" multiplication instead of matrices
# XXX have to decide whether lists of vectors have axes in "matrix-order"
# (useful for matrix multiplication) or in "plot-order" (useful for
# plotting)
# XXX refactor to consistantly use either "x" or "s" as variable name for state
# (consider that s is usually also used for omega * j + r)
import numpy as np
from scipy.linalg import expm
from scipy.integrate import solve_ivp
import functools
class ApproximationError(Exception):
pass
class MatrixError(Exception):
pass
class ConnectionError(Exception):
pass
class SolverError(Exception):
pass
def minor(A, i, j):
"""Returns matrix obtained by deleting row i and column j from matrix A."""
rows, cols = A.shape
rows = set(range(rows))
cols = set(range(cols))
M = A[list(rows - set([i])), :]
M = M[:, list(cols - set([j]))]
return(M)
def determinant(A):
"""Determinant of square matrix A. Can handle matrices of poly1d."""
if A.shape == (1, 1):
return(A[0, 0])
if A.shape == (0, 0):
return(1.)
cofacsum = 0.
for j in range(A.shape[1]):
cofacsum += (-1)**(0 + j) * A[0, j] * determinant(minor(A, 0, j))
return(cofacsum)
def cofactorMat(A):
"""Cofactor matrix of matrix A. Can handle matrices of poly1d."""
C = np.zeros(A.shape, dtype=object)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
C[i, j] = (-1)**(i + j) * determinant(minor(A, i, j))
return(C)
def polyDiag(polyList):
"""Construct diagonal matrix from list of poly1d"""
N = len(polyList)
A = np.matrix(np.zeros((N, N), dtype=object))
for i in range(N):
A[i, i] = polyList[i]
return(A)
def _normalizePartialConnections(H, G, Gout, Hin):
try:
Gshape = G.shape
except AttributeError as e:
if issubclass(type(G), (float, int)):
Gshape = (H.shape[1], H.shape[1])
else:
raise e
try:
Hshape = H.shape
except AttributeError as e:
if issubclass(type(H), (float, int)):
Hshape = (G.shape[0], G.shape[0])
else:
raise e
if Gout is None:
Gout = tuple(range(Gshape[0]))
if Hin is None:
Hin = tuple(range(Hshape[1]))
return Gout, Hin
def connect(H, G, Gout=None, Hin=None):
"""
Connect outputs Gout of G to inputs Hin of H. The outputs and inputs of
the connected system are arranged as follows:
- remaining outputs of G get lower, the outputs of H the higher indices
- inputs of G get the lower, remaining inputs of H the higher indices
connect(H, G) is equivalent to H * G.
"""
Gout, Hin = _normalizePartialConnections(H, G, Gout, Hin)
try:
return H.__connect__(G, Gout=Gout, Hin=Hin)
except (AttributeError, NotImplementedError):
return G.__rconnect__(H, Gout=Gout, Hin=Hin)
def feedback(G, Gout, Gin):
"""Create feedback connection from outputs Gout to inputs Gin"""
return(G.__feedback__(Gout, Gin))
class Function(object):
"""
Function that supports function arithmetics.
Let f, g Functions, and M a matrix, then
(f*g)(t, x, u) = f(t, x, g(t, x, u))
(f+g)(t, x, u) = f(t, x, u) + g(t, x, u)
(M*f)(t, x, u) = M*f(t, x, u)
(f*M)(t, x, u) = f(t, x, M*u)
"""
def __init__(self, func):
if not callable(func):
raise TypeError('func must be callable')
self.func = func
def __call__(self, t, x, u):
return self.func(t, x, u)
def __mul__(self, right):
f = self.func
if issubclass(type(right), Function):
g = right.func
def fg(t, x, u):
return f(t, x, g(t, x, u))
return Function(fg)
if issubclass(type(right), (np.matrix, float, int)):
g = right
def fg(t, x, u):
return f(t, x, g*u)
return Function(fg)
return NotImplementedError
def __rmul__(self, left):
if issubclass(type(left), (float, int)):
return self.__mul__(left)
if issubclass(type(left), np.matrix):
M = left
g = self.func
def fg(t, x, u):
return M * g(t, x, u)
return Function(fg)
return NotImplementedError
def __add__(self, right):
f = self.func
if issubclass(type(right), Function):
g = right.func
def fg(t, x, u):
return f(t, x, u) + g(t, x, u)
return Function(fg)
if issubclass(type(right), (np.matrix, float, int)):
g = right
def fg(t, x, u):
return f(t, x, u) + g
return Function(fg)
return NotImplementedError
def __radd__(self, left):
return self.__add__(left)
def __getitem__(self, slc):
return Function(lambda t, x, u: self.func(t, x, u)[slc])
def offset_inputs(self, right):
"""Level-shift inputs"""
return Function(lambda t, x, du: self.func(t, x, right + du))
def offset_outputs(self, left):
"""Level-shift outputs"""
return Function(lambda t, x, u: left + self.func(t, x, u))
def reorder_xputs(self, outs, ins):
# find inverse permutation of ins:
pairs = zip(ins, range(len(ins)))
pairs_sorted = sorted(pairs, key=lambda p: p[0])
ins_inv = list(zip(*pairs_sorted))[1]
def f_reordered(t, x, u):
u_new = u.take(ins_inv, axis=0)
y_new = self.func(t, x, u_new).take(outs, axis=0)
return y_new
return Function(f_reordered)
class CT_System(object):
"""
Describes a continuous-time system with dynamics described by ordinary
differential equations.
s: Internal state (vector) of the system
s0: Initial state of the system
u: External input (vector)
f(t, s, u): Dynamics of the system (ds/dt = f(t, s, u))
g(t, s, u): Function that maps state s to output y = g(t, s, u)
order: Order of the system (dimension of vector f(t, s, u))
shape: = (n_outputs, n_inputs) determines the dimensions of
y and u.
It is solved by simply calling it with arguments t and u. t is
either a float or array-like. In the latter case, the system is
solved for all the times t in the array. u is a function with call
signature u(t) returning an external input (vector).
"""
def __init__(self, f, g, order, shape, s0=None):
self.f = Function(f)
self.g = Function(g)
self.order = order
self.shape = shape
if s0 is None:
s0 = np.matrix(np.zeros((order, 1)))
else:
s0 = np.matrix(s0).reshape(-1, 1)
self.s = s0
def __call__(self, t, return_state=False, method='RK45'):
if self.shape[1] != 0:
raise ConnectionError(
'Not all inputs connected to sources: Can\'t solve!')
if type(t) in [float, int]:
t = np.array([float(t)])
s0 = np.array(self.s).reshape(-1)
if t[-1] == 0:
s = self.s.repeat(len(t), axis=1)
else:
sol = solve_ivp(self.f, (0., t[-1]), s0, method=method,
t_eval=t, dense_output=False, vectorized=True,
args=(np.matrix([[]]).reshape(0, 1),))
if not sol.success:
raise SolverError()
s = sol.y
y = self.g(t, s, np.matrix([[]]).reshape(0, len(t)))
if not return_state:
return y
else:
return (y, np.matrix(s))
def steadyStates(self, u0, t):
"""Returns a list of tuples (s_i, stability_i) with:
- s_i: A steady-state at time t, i.e. f(t, s_i, u0) = 0
- stability_i: True if this steady-state is stable, false otherwise
"""
raise NotImplementedError
def observable(self, t):
"""Returns whether the system is observable at time t (i.e. its
internal state is determinable from inputs u and outputs y)."""
raise NotImplementedError
def reachable(self, t):
"""Returns whether the system is reachable at time t (i.e. all states
are reachable by providing an appropriate input u(t))."""
raise NotImplementedError
def tangentLTI(self, t, s0, u0):
"""
Approximates the CT_System at time t near state s0 and input u0
by an LTISystem (linear, time-invariant system).
Raises ApproximationError if the system can not be linearized.
"""
raise NotImplementedError
@staticmethod
def Sh(H, G, Hin, Gout):
# u_h = Sh * y_g:
Sh = np.matrix(np.zeros((H.shape[1], G.shape[0])))
for k in range(len(Hin)):
i = Hin[k]
j = Gout[k]
Sh[i, j] = 1.
return Sh
@staticmethod
def Shbar(H, G, Hin, Gout):
# u_h = S_h_bar * u_h,unconnected:
sh = np.matrix(np.zeros((H.shape[1], H.shape[1] - len(Hin))))
u_h_unconnected = list(set(range(H.shape[1])) - set(Hin))
sh[u_h_unconnected, :] = np.eye(H.shape[1] - len(Hin))
return sh
@staticmethod
def Sgbar(G, Gout):
# y_g,unconnected = S_g_bar * y_g:
sg = np.matrix(np.zeros((G.shape[0] - len(Gout), G.shape[0])))
y_g_unconnected = list(set(range(G.shape[0])) - set(Gout))
sg[:, y_g_unconnected] = np.eye(G.shape[0] - len(Gout))
return sg
def __feedback__(self, Gout, Gin):
Nports = np.min(self.shape)
if len(Gout) >= Nports:
# cannot connect _all_ ports:
raise ConnectionError('at least 1 input and at least 1 output '
'must remain unconnected')
if len(Gout) != len(Gin):
raise ConnectionError(
"No. outputs to connect must match no. inputs to connect")
# Re-arange ports so that feedback outputs and inputs come last:
out_all = set(range(self.shape[0]))
in_all = set(range(self.shape[1]))
outs = tuple(out_all - set(Gout)) + Gout
ins = tuple(in_all - set(Gin)) + Gin
f_ord = self.f.reorder_xputs(range(self.shape[0]), ins)
g_ord = self.g.reorder_xputs(outs, ins)
g_ord_open = g_ord[:-len(Gout)]
g_ord_clsd = g_ord[-len(Gout):]
# Connect feedback:
def f_fb(t, x, u_open):
u = np.vstack(
[u_open,
g_ord_clsd(t, x,
np.vstack(
[u_open,
np.zeros((len(Gout), u_open.shape[1]))])
)
]
)
return f_ord(t, x, u)
def g_fb(t, x, u_open):
u = np.vstack(
[u_open,
g_ord_clsd(t, x,
np.vstack(
[u_open,
np.zeros((len(Gout), u_open.shape[1]))])
)
]
)
return g_ord_open(t, x, u)
shape = (self.shape[0] - len(Gout), self.shape[1] - len(Gout))
return CT_System(f_fb, g_fb, self.order, shape, self.s)
def __connect__(self, right, Gout=None, Hin=None):
H = self
G = right
Gout, Hin = _normalizePartialConnections(self, right, Gout, Hin)
if issubclass(type(right), CT_System):
if len(Gout) != len(Hin):
raise ConnectionError('Number of inputs does not match '
'number of outputs')
def f_hg(t, x_hg, u_hg):
x_g = x_hg[:G.order, :] # <= this creates ref to G! XXX
x_h = x_hg[G.order:, :]
u_g = u_hg[:G.shape[0], :]
u_hopen = u_hg[G.shape[0]:, :]
S_h_bar = self.Shbar(H, G, Hin, Gout)
S_h = self.Sh(H, G, Hin, Gout)
x_hg_dot = np.vstack([
G.f(t, x_g, u_g),
H.f(t, x_h,
S_h_bar * u_hopen + S_h * G.g(t, x_g, u_g))])
return x_hg_dot
def g_hg(t, x_hg, u_hg):
x_g = x_hg[:G.order, :]
x_h = x_hg[G.order:, :]
u_g = u_hg[:G.shape[0], :]
u_hopen = u_hg[G.shape[0]:, :]
S_g_bar = self.Sgbar(G, Gout)
S_h_bar = self.Shbar(H, G, Hin, Gout)
S_h = self.Sh(H, G, Hin, Gout)
y_hg = np.vstack([
S_g_bar * G.g(t, x_g, u_g),
H.g(t, x_h, S_h_bar * u_hopen + S_h * G.g(t, x_g, u_g))])
return y_hg
s0 = np.vstack([G.s, H.s])
order = s0.shape[0]
n_outputs = self.Sgbar(G, Gout).shape[0] + H.shape[0]
n_inputs = G.shape[1] + H.shape[1] - len(Hin)
shape = (n_outputs, n_inputs)
return CT_System(f_hg, g_hg, order, shape, s0=s0)
if issubclass(type(right), np.matrix):
if self.shape[1] != right.shape[0]:
raise ConnectionError('Number of inputs does not match '
'number of outputs')
if issubclass(type(right), (np.matrix, float, int)):
f_hg = self.f * right
g_hg = self.g * right
order = self.order
s0 = self.s
if issubclass(type(right), np.matrix):
shape = (self.shape[0], right.shape[1])
else:
shape = self.shape
return CT_System(f_hg, g_hg, order, shape, s0=s0)
raise NotImplementedError(
f"Don't know how to connect {self} and {right}")
def __rconnect__(self, left, Gout=None, Hin=None):
Gout, Hin = _normalizePartialConnections(left, self, Gout, Hin)
if issubclass(type(left), type(self)):
return type(self).__connect__(left, self, Gout=Gout, Hin=Hin)
if left.shape[1] != self.shape[0]:
raise ConnectionError('Number of inputs does not match '
'number of outputs')
if issubclass(type(left), (np.matrix, float, int)):
if len(Gout) != self.shape[0] or len(Hin) != left.shape[1]:
raise ConnectionError(
"Partial connections with matrix, float, int not supported")
f_hg = self.f
g_hg = left * self.g
shape = (left.shape[0], self.shape[1])
return CT_System(f_hg, g_hg, self.order, shape, s0=self.s)
raise NotImplementedError
def __mul__(self, right):
return(self.__connect__(right))
def __rmul__(self, left):
return(self.__rconnect__(left))
def __add__(self, right):
if issubclass(type(right), type(self)):
if self.shape != right.shape:
raise ConnectionError('Systems must have same shape')
def f_hg(t, x, u):
return np.vstack([self.f(t, x, u),
right.f(t, x, u)])
g_hg = self.g + right.g
order = self.order + right.order
s0 = np.vstack([self.s, right.s])
return CT_System(f_hg, g_hg, order, self.shape, s0=s0)
if issubclass(type(right), (np.matrix, float, int)):
f_hg = self.f + right
g_hg = self.g + right
return CT_System(f_hg, g_hg, self.order, self.shape)
raise NotImplementedError
def __radd__(self, left):
if issubclass(type(self), type(left)):
return(self + left)
if issubclass(type(left), (np.matrix, float, int)):
g_hg = left + self.g
return CT_System(self.f, g_hg, self.order, self.shape, s0=self.s)
raise NotImplementedError
def __sub__(self, right):
return(self + -right)
def __rsub__(self, left):
return(left + -self)
def __neg__(self):
return(self * (-1))
def __truediv__(self, right):
if type(right) in [float, int]:
invright = 1. / float(right)
return(self * invright)
raise NotImplementedError
def __or__(self, right):
"""Connect systems in parallel"""
if not issubclass(type(self), type(right)):
raise NotImplementedError
def f_hg(t, x, u):
u_self = u[:self.shape[0]]
u_right = u[self.shape[0]:]
x_self = x[:self.shape[0]]
x_right = x[self.shape[0]:]
return np.vstack([self.f(t, x_self, u_self),
right.f(t, x_right, u_right)])
def g_hg(t, x, u):
u_self = u[:self.shape[0]]
u_right = u[self.shape[0]:]
x_self = x[:self.shape[0]]
x_right = x[self.shape[0]:]
return np.vstack([self.g(t, x_self, u_self),
right.g(t, x_right, u_right)])
order = self.order + right.order
shape = (self.shape[0] + right.shape[0], self.shape[1] + right.shape[1])
s0 = np.vstack([self.s, right.s])
return CT_System(f_hg, g_hg, order, shape, s0=s0)
def __ror__(self, left):
if not issubclass(type(self), type(left)):
raise NotImplementedError
def f_hg(t, x, u):
u_left = u[:left.shape[0]]
u_right = u[left.shape[0]:]
x_left = x[:left.shape[0]]
x_right = x[left.shape[0]:]
return np.vstack([left.f(t, x_left, u_left),
self.f(t, x_right, u_right)])
def g_hg(t, x, u):
u_left = u[:left.shape[0]]
u_right = u[left.shape[0]:]
x_left = x[:left.shape[0]]
x_right = x[left.shape[0]:]
return np.vstack([left.g(t, x_left, u_left),
self.g(t, x_right, u_right)])
order = left.order + self.order
shape = (left.shape[0] + self.shape[0], left.shape[1] + self.shape[1])
s0 = np.vstack([left.s, self.s])
return CT_System(f_hg, g_hg, order, shape, s0=s0)
def __pow__(self, power):
"""Raise system to integer power"""
if type(power) is not int:
raise NotImplementedError
if power < 1:
raise NotImplementedError
if power == 1:
return(self)
else:
return(self * self**(power - 1))
def offset_inputs(self, right):
"""Level-shift inputs"""
if issubclass(type(right), (float, int)):
right = np.matrix([[right]])
if issubclass(type(right), np.matrix):
if right.shape != (self.shape[1], 1):
raise ConnectionError(
'Level-shift vector does not match shape of input')
f_shifted = self.f.offset_inputs(right)
g_shifted = self.g.offset_inputs(right)
return CT_System(f_shifted, g_shifted, self.order, self.shape,
self.s)
raise NotImplementedError
def offset_outputs(self, left):
"""Level-shift outputs"""
if issubclass(type(left), (float, int)):
left = np.matrix([[left]])
if issubclass(type(left), np.matrix):
if left.shape != (self.shape[0], 1):
print(left)
print(self.shape)
raise ConnectionError(
'Level-shift vector does not match shape of output')
g_shifted = self.g.offset_outputs(left)
return CT_System(self.f, g_shifted, self.order, self.shape, self.s)
raise NotImplementedError
class CT_LTI_System(CT_System):
"""Continuous-time, Linear, time-invariant system"""
def __init__(self, A, B, C, D, x0=None):
A, B, C, D = map(np.asmatrix, (A, B, C, D))
A, B, C, D = map(lambda M: M.astype('float'), (A, B, C, D))
order = A.shape[1]
if x0 is None:
x0 = np.matrix(np.zeros((order, 1)))
# Verify dimensions:
nout, nin = D.shape
if not (A.shape == (order, order) and B.shape == (order, nin)
and C.shape == (nout, order) and D.shape == (nout, nin)):
raise MatrixError('State matrices do not have proper shapes')
if not x0.shape == (order, 1):
raise MatrixError('Initial state has wrong shape')
self._A, self._B, self._C, self._D = A, B, C, D
self.x = np.matrix(x0)
self.s = self.x # XXX refactor to use same state variable in CT_LTI and CT
def f(self, t, s, u):
return self._A * s + self._B * u
def g(self, t, s, u):
return self._C * s + self._D * u
@property
def ABCD(self):
return([self._A, self._B, self._C, self._D])
@property
def order(self):
"""The order of the system"""
return(self._A.shape[0])
@property
def shape(self):
"""Number of outputs and inputs"""
return(self._D.shape)
@property
def poles(self):
"""Eigenvalues of the state matrix"""
return(self.zpk[1])
@property
def stable(self):
return(np.all(self.poles.real < 0))
@property
def Wo(self):
"""Observability matrix"""
W = np.matrix(np.zeros((0, self._C.shape[1])))
for n in range(self.order):
W = np.vstack((W, self._C * self._A**n))
return(W)
@property
def Wr(self):
"""Reachability matrix"""
W = np.matrix(np.zeros((self._B.shape[0], 0)))
for n in range(self.order):
W = np.hstack((W, self._A**n * self._B))
return(W)
@property
def reachable(self):
"""Returns True if the system is reachable."""
return(np.linalg.matrix_rank(self.Wr) == self.order)
@property
def observable(self):
"""Returns True if the system is observable."""
return(np.linalg.matrix_rank(self.Wo) == self.order)
def _tResponse(self):
"""Automatically determines appropriate time axis for step- and
impulse-response plotting"""
tau = np.abs(1. / self.poles.real)
f = self.poles.imag / (2 * np.pi)
period = np.abs(1. / f[f != 0])
timescales = np.concatenate([tau, period])
dt = timescales.min() / 20.
T = tau.max() * 10.
return(np.arange(0., T, dt))
def stepResponse(self, t=None):
"""
Returns (t, ystep), where
ystep : Step response
t : Corresponding array of times
t is either provided as an argument to this function or determined
automatically.
"""
if t is None:
t = self._tResponse()
A, B, C, D = self.ABCD
steady = D - C * A.I * B
y = [C * A.I * expm(A * ti) * B + steady for ti in t]
return((t, np.array(y).reshape((-1,) + self.shape)))
def impulseResponse(self, t=None):
"""
Returns (t, yimpulse), where
yimpulse : Impulse response (*without* direct term D)
t : Corresponding array of times
t is either provided as an argument to this function or determined
automatically.
"""
if t is None:
t = self._tResponse()
A, B, C = self._A, self._B, self._C
y = [C * expm(A * ti) * B for ti in t]
return((t, np.array(y).reshape((-1,) + self.shape)))
def freqResponse(self, f=None):
"""
Returns (f, r), where
f : Array of frequencies
r : (Complex) frequency response
f is either provided as an argument to thin function or determined
automatically.
"""
# see [astrom_feedback]_, page 153
# Automatically determine frequency axis:
if f is None:
t = self._tResponse()
dt = t[1] - t[0]
T = t[-1] + dt
fmax = 1. / (2 * dt)
fmin = 1. / T
f = np.logspace(np.log10(fmin), np.log10(fmax), 200)
b, a = self.tf
s = 2 * np.pi * 1j * f
resp = np.zeros((len(f),) + b.shape, dtype=complex)
for i in range(b.shape[0]):
for j in range(b.shape[1]):
resp[:, i, j] = b[i, j](s) / a(s)
return(f, resp)
@property
def tf(self):
"""
Transfer-function representation [b, a] of the system. Returns
numerator (b) and denominator (a) coefficients.
.. math::
G(s) = \\frac{b[0] * s^0 + ... + b[m] * s^m}
{a[0] * s^0 + ... + a[n] * s^n}
"""
A, B, C, D = self.ABCD
Aprime = polyDiag([np.poly1d([1, 0])] * self.order) - A
det = determinant(Aprime)
nout = self.shape[0]
nominator = C * cofactorMat(Aprime).T * B + polyDiag([det] * nout) * D
denominator = det
return(nominator, denominator)
@property
def zpk(self):
"""
Gain, Pole, Zero representation of the system. Returns a tuple
(z, p, k) with z the zeros, p the poles, and k the gain of the system.
p is an array. The format of z and k depends on the number of inputs
and outputs of the system:
For a SISO system z is an array and k is float. For a system with more
inputs or outputs, z and k are lists of 'shape' (nout, nin) containing
arrays and floats, respectively.
"""
b, a = self.tf
zeros = np.zeros(b.shape, dtype=list)
gains = np.zeros(b.shape)
for i in range(b.shape[0]):
for j in range(b.shape[1]):
zeros[i, j] = np.roots(b[i, j])
gains[i, j] = b[i, j][b[i, j].order]
poles = np.roots(a)
return(zeros, poles, gains)
def __feedback__(self, Gout, Gin):
G = self
Nports = np.min(G.shape)
if len(Gout) >= Nports:
# cannot connect _all_ ports:
raise ConnectionError('at least 1 input and at least 1 output '
'must remain unconnected')
if len(Gout) != len(Gin):
raise ConnectionError(
"No. outputs to connect must match no. inputs to connect")
# connect one channel at a time. Start with Gout[0] => Hin[0]
iout = Gout[0]
jin = Gin[0]
# Re-arange ports so that iout and jin in are the last output
# and the last input, respectively:
outorder = list(range(G.shape[0]))
outorder.pop(iout)
outorder += [iout]
inorder = list(range(G.shape[1]))
inorder.pop(jin)
inorder += [jin]
a, b, c, d = G.ABCD
b = b[:, inorder]
c = c[outorder, :]
d = d[:, inorder]
d = d[outorder, :]
# Connect feedback:
A = a + b[:, -1] * c[-1, :]
B = b[:, :-1] + b[:, -1] * d[-1, :-1]
C = c[:-1, :] + d[:-1, -1] * c[-1, :]
D = d[:-1, :-1] + d[:-1, -1] * d[-1, :-1]
if len(Gout) == 1:
# work done => return result
return(CT_LTI_System(A, B, C, D, G.x))
else:
# More ports have to be connected => recurse
return(self.__connect__(self, Gout[1:], Gin[1:]))
def __connect__(self, right, Gout=None, Hin=None):
H = self
G = right
Gout, Hin = _normalizePartialConnections(self, right, Gout, Hin)
if issubclass(type(G), CT_LTI_System):
if len(Gout) != len(Hin):
raise ConnectionError('Number of inputs does not match '
'number of outputs')
Gout = np.asarray(Gout)
Hin = np.asarray(Hin)
# Prepare connection matrices:
# ===============================
# u_h = Sh * y_g:
Sh = np.matrix(np.zeros((H.shape[1], G.shape[0])))
for k in range(len(Hin)):
i = Hin[k]
j = Gout[k]
Sh[i, j] = 1.
# u_h = sh * u_h,unconnected:
sh = np.matrix(np.zeros((H.shape[1], H.shape[1] - len(Hin))))
u_h_unconnected = list(set(range(H.shape[1])) - set(Hin))
sh[u_h_unconnected, :] = np.eye(H.shape[1] - len(Hin))
# y_g,unconnected = sg * y_g:
sg = np.matrix(np.zeros((G.shape[0] - len(Gout), G.shape[0])))
y_g_unconnected = list(set(range(G.shape[0])) - set(Gout))
sg[:, y_g_unconnected] = np.eye(G.shape[0] - len(Gout))
# Setup state matrices:
# ===============================
nH = H.order
nG = G.order
A = np.bmat([[G._A, np.zeros((nG, nH))],
[H._B * Sh * G._C, H._A]])
B = np.bmat([[G._B, np.zeros((nG, len(u_h_unconnected)))],
[H._B * Sh * G._D, H._B * sh]])
C = np.bmat([[sg * G._C, np.zeros((len(y_g_unconnected), nH))],
[H._D * Sh * G._C, H._C]])
D = np.bmat([[sg * G._D, np.zeros((len(y_g_unconnected),
len(u_h_unconnected)))],
[H._D * Sh * G._D, H._D * sh]])
x0 = np.vstack([G.x, H.x])
elif issubclass(type(G), np.matrix):
if H.shape[1] != G.shape[0]:
raise ConnectionError('No. inputs and outputs do not match')
# Multiply u by matrix before feeding into H:
A = np.matrix(H._A)
B = H._B * G
C = np.matrix(H._C)
D = H._D * G
x0 = np.matrix(H.x)
elif type(G) in [float, int]:
# Apply gain G on input side:
A = np.matrix(H._A)
B = np.matrix(H._B)
C = G * H._C
D = G * H._D
x0 = np.matrix(H.x)
else:
raise NotImplementedError
return(CT_LTI_System(A, B, C, D, x0))
def __rconnect__(self, left, Gout=None, Hin=None):
G = self
H = left
if issubclass(type(H), CT_LTI_System):
return(H.__connect__(G, Gout, Hin))
elif issubclass(type(H), np.matrix):
if H.shape[1] != G.shape[0]:
raise ConnectionError('No. inputs and outputs do not match')
# Multiply output of G by matrix:
A = np.matrix(G._A)
B = np.matrix(G._B)
C = H * G._C
D = H * G._D
x0 = np.matrix(G.x)
elif type(H) in [float, int]:
# Apply gain H on output side:
A = np.matrix(G._A)
B = np.matrix(G._B)
C = H * G._C
D = H * G._D
x0 = np.matrix(G.x)
else:
raise NotImplementedError
return(CT_LTI_System(A, B, C, D, x0))
def __add__(self, right):
G = self
nG = G.order
if issubclass(type(right), CT_LTI_System):
H = right
nH = H.order
if self.shape != H.shape:
raise ConnectionError('System shapes must be equal')
A = np.bmat([[G._A, np.zeros((nG, nH))],
[np.zeros((nH, nG)), H._A]])
B = np.vstack([G._B, H._B])
C = np.hstack([G._C, H._C])
D = G._D + H._D
x0 = np.vstack([G.x, H.x])
return(CT_LTI_System(A, B, C, D, x0))
if issubclass(type(right), np.matrix):
# (G + M)(t, x, u) = G(t, x, u) + M*u
if right.shape != G._D.shape:
raise MatrixError(
f'Shapes of {right} and self._D have to match')
A = G._A
B = G._B
C = G._C
D = G._D + right
x0 = G.x
return(CT_LTI_System(A, B, C, D, x0))
if type(right) in [float, int]:
right = np.matrix(np.ones(self.shape) * right)
return(self + right)
return super().__add__(right)
def __radd__(self, left):
if issubclass(type(left), (CT_LTI_System, np.matrix, float, int)):
return(self + left)
return super().__radd__(left)
def __or__(self, right):
"""Connect systems in parallel"""
# XXX Does not work if right is not a subclass of type(self)!
Ag, Bg, Cg, Dg = self.ABCD
gout, gin = self.shape
ng = self.order
Ah, Bh, Ch, Dh = right.ABCD
hout, hin = right.shape
nh = right.order
A = np.bmat([[Ag, np.zeros([ng, ng])],
[np.zeros([nh, nh]), Ah]])
B = np.bmat([[Bg, np.zeros([ng, gin])],
[np.zeros([nh, hin]), Bh]])
C = np.bmat([[Cg, np.zeros([gout, ng])],
[np.zeros([hout, nh]), Ch]])
D = np.bmat([[Dg, np.zeros([gout, gin])],
[np.zeros([hout, hin]), Dh]])
x = np.vstack([self.x, right.x])
return(CT_LTI_System(A, B, C, D, x))
def Thetaphi(b, a):
"""Translate filter-coefficient arrays b and a to Theta, phi
representation:
phi(B)*y_t = Theta(B)*x_t
Theta, phi = Thetaphi(b, a) are the coefficient of the back-shift-operator
polynomials (index i belongs to B^i)"""
phi = np.array(a)
if len(phi) > 1:
phi[1:] = -phi[1:]
Theta = np.array(b)
return [Theta, phi]
def ba(Theta, phi):
"""Translate backshift-operator polynomials Theta and phi to filter
coefficient array b, a.
a[0]*y[t] = a[1]*y[t-1] + ... + a[n]*y[t-n] + b[0]*x[t] + ... + b[m]*x[t-m]
"""
# XXX these b and a are not compatible with scipy.lfilter. Appararently,
# scipy.lfilter expects Theta and phi
# Thetaphi() is its own inverse:
return(Thetaphi(Theta, phi))
def differenceEquation(b, a):
"""Takes filter coefficient arrays b and a and returns string with
difference equation using powers of B, where B the backshift operator."""
Theta, phi = Thetaphi(b, a)
s = '('
for i in range(len(phi)):
s += '%.2f B^%d+' % (phi[i], i)
s = s[:-1] + ')*y_t = ('
for i in range(len(Theta)):
s += '%.2f B^%d+' % (Theta[i], i)
s = s[:-1] + ')*x_t'
return s
class DT_LTV_System():
"""Implements the discrete linear, time-variant system with input vector
u[t], internal state vector x[t], and output vector y[t]:
x[t+1] = A[t]*x[t] + B[t]*u[t]
y[t] = C*x[t] + D*u[t]
where
A[t]: state matrices
B[t]: input matrices
C[t]: output matrices
D[t]: feedthrough matrices
The system is initialized with state vector x[0] = X0.
"""
def __init__(self, At, Bt, Ct, Dt, X0):
self.At = At
self.Bt = Bt
self.Ct = Ct
self.Dt = Dt
self.X = X0
self.t = 0
def update(self, U):
U.shape = (-1, 1)
t = min(self.t, len(self.At))
self.X = np.dot(self.At[t], self.X) + np.dot(self.Bt[t], U)
self.t += 1
return np.dot(self.Ct[t], self.X) + np.dot(self.Dt[t], U)
def feed(self, Ut):
return np.concatenate([self.update(U) for U in Ut.T]).T
class DT_LTI_System(object):
"""Implements the discrete-time linear, time-invariant system with input
vector u[t], internal state vector x[t], and output vector y[t]:
x[t+1] = A * x[t] + B * u[t]
y[t] = C * x[t] + D * u[t]
where
A: state matrix
B: input matrix
C: output matrix
D: feedthrough matrix
The system is initialized with state vector x[0] = x0.
"""
def __init__(self, A, B, C, D, x0=np.matrix([0., 0.]).T):
self.A, self.B, self.C, self.C = A, B, C, D
self.x = x0
@staticmethod
def fromTransferFunction(Theta, phi):
"""Initialize DiscreteLTI instance from transfer-function coefficients
'Theta' and 'phi'."""
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
def stable(self):
"""Returns True if the system is strictly stable"""
raise NotImplementedError
def observable(self):
"""Returns true if the system is observable"""
raise NotImplementedError
def reachable(self):
"""Returns True if the system is observable"""
raise NotImplementedError
def tf(self):
"""Returns the transfer function (b, a) where 'b' are the coefficients
of the nominator polynomial and 'a' are the coefficients of the
denominator polynomial."""
raise NotImplementedError
def proper(self):
"""Returns true if the system's transfer function is strictly proper,
i.e. the degree of the numerator is less than the degree of the
denominator."""
raise NotImplementedError
def __add__(self, right):
raise NotImplementedError
def __radd__(self, left):
raise NotImplementedError
def __rsub__(self, left):
raise NotImplementedError
def __mul__(self, right):
raise NotImplementedError
def __rmul__(self, left):
raise NotImplementedError
def __iadd__(self, right):
raise NotImplementedError
def __isub__(self, right):
raise NotImplementedError
def __imul__(self, right):
raise NotImplementedError
def __idiv__(self, right):
raise NotImplementedError
if __name__ == '__main__':
import matplotlib.pyplot as pl
pl.close('all')
from tanuna.CT_LTI import LowPass, HighPass
J = connect(LowPass(10.), LowPass(10.), Gout=(), Hin=())
J = np.matrix([[1, 1]]) * J * np.matrix([[1], [1]])
w0 = 2 * np.pi * 10
zeta = 0.5
k = 1.
A = np.matrix([[0, w0], [-w0, -2 * zeta * w0]])
B = np.matrix([0, k * w0]).T
C = np.matrix([k, 0.])
D = np.matrix([0.])
G = CT_LTI_System(A, B, C, D)
G = HighPass(10, 2)
pl.figure()
# STEP RESPONSE
pl.subplot(4, 1, 1)
pl.title('Step-Response')
pl.plot(*G.stepResponse())
pl.xlabel('Time After Step (s)')
pl.ylabel('y')
# IMPULSE RESPONSE
pl.subplot(4, 1, 2)
pl.title('Impulse-Response')
pl.plot(*G.impulseResponse())
pl.xlabel('Time After Impulse (s)')
pl.ylabel('y')
# BODE PLOT
ax1 = pl.subplot(4, 1, 3)
ax1.set_title('Bode Plot')
f, Chi = G.freqResponse()
Chi.shape = (-1)
ax1.semilogx(f, 20 * np.log10(np.abs(Chi)), r'b-')
ax1.set_xlabel('Frequency (Hz)')
ax1.set_ylabel('Magnitude (dB)')
ax2 = ax1.twinx()
ax2.semilogx(f, np.angle(Chi) / np.pi, r'r-')
ax2.set_ylabel(r'Phase ($\pi$)')
# NYQUIST PLOT
ax = pl.subplot(4, 1, 4)
pl.title('Nyquist Plot')
pl.plot(np.real(Chi), np.imag(Chi))
pl.plot([-1], [0], r'ro')
pl.xlim([-2.5, 2])
pl.ylim([-1.5, 0.5])
ax.set_aspect('equal')
pl.axhline(y=0, color='k')
pl.axvline(x=0, color='k')
pl.xlabel('Real Part')
pl.ylabel('Imaginary Part')
| [
"matplotlib.pyplot.title",
"numpy.roots",
"numpy.abs",
"tanuna.CT_LTI.LowPass",
"numpy.angle",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.imag",
"numpy.arange",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.close",
"numpy.linalg.matrix_rank",
"numpy.real",
"numpy.log10",
"matplot... | [((1598, 1629), 'numpy.zeros', 'np.zeros', (['A.shape'], {'dtype': 'object'}), '(A.shape, dtype=object)\n', (1606, 1629), True, 'import numpy as np\n'), ((34085, 34096), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (34093, 34096), True, 'import numpy as np\n'), ((34157, 34168), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (34165, 34168), True, 'import numpy as np\n'), ((38280, 38295), 'matplotlib.pyplot.close', 'pl.close', (['"""all"""'], {}), "('all')\n", (38288, 38295), True, 'import matplotlib.pyplot as pl\n'), ((38522, 38565), 'numpy.matrix', 'np.matrix', (['[[0, w0], [-w0, -2 * zeta * w0]]'], {}), '([[0, w0], [-w0, -2 * zeta * w0]])\n', (38531, 38565), True, 'import numpy as np\n'), ((38607, 38626), 'numpy.matrix', 'np.matrix', (['[k, 0.0]'], {}), '([k, 0.0])\n', (38616, 38626), True, 'import numpy as np\n'), ((38634, 38650), 'numpy.matrix', 'np.matrix', (['[0.0]'], {}), '([0.0])\n', (38643, 38650), True, 'import numpy as np\n'), ((38693, 38708), 'tanuna.CT_LTI.HighPass', 'HighPass', (['(10)', '(2)'], {}), '(10, 2)\n', (38701, 38708), False, 'from tanuna.CT_LTI import LowPass, HighPass\n'), ((38714, 38725), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (38723, 38725), True, 'import matplotlib.pyplot as pl\n'), ((38751, 38770), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(4)', '(1)', '(1)'], {}), '(4, 1, 1)\n', (38761, 38770), True, 'import matplotlib.pyplot as pl\n'), ((38775, 38800), 'matplotlib.pyplot.title', 'pl.title', (['"""Step-Response"""'], {}), "('Step-Response')\n", (38783, 38800), True, 'import matplotlib.pyplot as pl\n'), ((38836, 38868), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Time After Step (s)"""'], {}), "('Time After Step (s)')\n", (38845, 38868), True, 'import matplotlib.pyplot as pl\n'), ((38873, 38887), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""y"""'], {}), "('y')\n", (38882, 38887), True, 'import matplotlib.pyplot as pl\n'), ((38916, 38935), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(4)', '(1)', '(2)'], {}), '(4, 1, 2)\n', (38926, 38935), True, 'import matplotlib.pyplot as pl\n'), ((38940, 38968), 'matplotlib.pyplot.title', 'pl.title', (['"""Impulse-Response"""'], {}), "('Impulse-Response')\n", (38948, 38968), True, 'import matplotlib.pyplot as pl\n'), ((39007, 39042), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Time After Impulse (s)"""'], {}), "('Time After Impulse (s)')\n", (39016, 39042), True, 'import matplotlib.pyplot as pl\n'), ((39047, 39061), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""y"""'], {}), "('y')\n", (39056, 39061), True, 'import matplotlib.pyplot as pl\n'), ((39089, 39108), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(4)', '(1)', '(3)'], {}), '(4, 1, 3)\n', (39099, 39108), True, 'import matplotlib.pyplot as pl\n'), ((39458, 39477), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(4)', '(1)', '(4)'], {}), '(4, 1, 4)\n', (39468, 39477), True, 'import matplotlib.pyplot as pl\n'), ((39482, 39506), 'matplotlib.pyplot.title', 'pl.title', (['"""Nyquist Plot"""'], {}), "('Nyquist Plot')\n", (39490, 39506), True, 'import matplotlib.pyplot as pl\n'), ((39551, 39575), 'matplotlib.pyplot.plot', 'pl.plot', (['[-1]', '[0]', '"""ro"""'], {}), "([-1], [0], 'ro')\n", (39558, 39575), True, 'import matplotlib.pyplot as pl\n'), ((39581, 39599), 'matplotlib.pyplot.xlim', 'pl.xlim', (['[-2.5, 2]'], {}), '([-2.5, 2])\n', (39588, 39599), True, 'import matplotlib.pyplot as pl\n'), ((39604, 39624), 'matplotlib.pyplot.ylim', 'pl.ylim', (['[-1.5, 0.5]'], {}), '([-1.5, 0.5])\n', (39611, 39624), True, 'import matplotlib.pyplot as pl\n'), ((39656, 39682), 'matplotlib.pyplot.axhline', 'pl.axhline', ([], {'y': '(0)', 'color': '"""k"""'}), "(y=0, color='k')\n", (39666, 39682), True, 'import matplotlib.pyplot as pl\n'), ((39687, 39713), 'matplotlib.pyplot.axvline', 'pl.axvline', ([], {'x': '(0)', 'color': '"""k"""'}), "(x=0, color='k')\n", (39697, 39713), True, 'import matplotlib.pyplot as pl\n'), ((39718, 39740), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Real Part"""'], {}), "('Real Part')\n", (39727, 39740), True, 'import matplotlib.pyplot as pl\n'), ((39745, 39772), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Imaginary Part"""'], {}), "('Imaginary Part')\n", (39754, 39772), True, 'import matplotlib.pyplot as pl\n'), ((1901, 1931), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'object'}), '((N, N), dtype=object)\n', (1909, 1931), True, 'import numpy as np\n'), ((10114, 10132), 'numpy.min', 'np.min', (['self.shape'], {}), '(self.shape)\n', (10120, 10132), True, 'import numpy as np\n'), ((17878, 17906), 'numpy.vstack', 'np.vstack', (['[self.s, right.s]'], {}), '([self.s, right.s])\n', (17887, 17906), True, 'import numpy as np\n'), ((18831, 18858), 'numpy.vstack', 'np.vstack', (['[left.s, self.s]'], {}), '([left.s, self.s])\n', (18840, 18858), True, 'import numpy as np\n'), ((21276, 21289), 'numpy.matrix', 'np.matrix', (['x0'], {}), '(x0)\n', (21285, 21289), True, 'import numpy as np\n'), ((21979, 22006), 'numpy.all', 'np.all', (['(self.poles.real < 0)'], {}), '(self.poles.real < 0)\n', (21985, 22006), True, 'import numpy as np\n'), ((22932, 22961), 'numpy.abs', 'np.abs', (['(1.0 / self.poles.real)'], {}), '(1.0 / self.poles.real)\n', (22938, 22961), True, 'import numpy as np\n'), ((23020, 23043), 'numpy.abs', 'np.abs', (['(1.0 / f[f != 0])'], {}), '(1.0 / f[f != 0])\n', (23026, 23043), True, 'import numpy as np\n'), ((23064, 23093), 'numpy.concatenate', 'np.concatenate', (['[tau, period]'], {}), '([tau, period])\n', (23078, 23093), True, 'import numpy as np\n'), ((23173, 23194), 'numpy.arange', 'np.arange', (['(0.0)', 'T', 'dt'], {}), '(0.0, T, dt)\n', (23182, 23194), True, 'import numpy as np\n'), ((26312, 26341), 'numpy.zeros', 'np.zeros', (['b.shape'], {'dtype': 'list'}), '(b.shape, dtype=list)\n', (26320, 26341), True, 'import numpy as np\n'), ((26358, 26375), 'numpy.zeros', 'np.zeros', (['b.shape'], {}), '(b.shape)\n', (26366, 26375), True, 'import numpy as np\n'), ((26569, 26580), 'numpy.roots', 'np.roots', (['a'], {}), '(a)\n', (26577, 26580), True, 'import numpy as np\n'), ((26691, 26706), 'numpy.min', 'np.min', (['G.shape'], {}), '(G.shape)\n', (26697, 26706), True, 'import numpy as np\n'), ((33738, 33766), 'numpy.vstack', 'np.vstack', (['[self.x, right.x]'], {}), '([self.x, right.x])\n', (33747, 33766), True, 'import numpy as np\n'), ((38361, 38374), 'tanuna.CT_LTI.LowPass', 'LowPass', (['(10.0)'], {}), '(10.0)\n', (38368, 38374), False, 'from tanuna.CT_LTI import LowPass, HighPass\n'), ((38375, 38388), 'tanuna.CT_LTI.LowPass', 'LowPass', (['(10.0)'], {}), '(10.0)\n', (38382, 38388), False, 'from tanuna.CT_LTI import LowPass, HighPass\n'), ((38440, 38461), 'numpy.matrix', 'np.matrix', (['[[1], [1]]'], {}), '([[1], [1]])\n', (38449, 38461), True, 'import numpy as np\n'), ((38574, 38596), 'numpy.matrix', 'np.matrix', (['[0, k * w0]'], {}), '([0, k * w0])\n', (38583, 38596), True, 'import numpy as np\n'), ((39519, 39531), 'numpy.real', 'np.real', (['Chi'], {}), '(Chi)\n', (39526, 39531), True, 'import numpy as np\n'), ((39533, 39545), 'numpy.imag', 'np.imag', (['Chi'], {}), '(Chi)\n', (39540, 39545), True, 'import numpy as np\n'), ((9216, 9250), 'numpy.zeros', 'np.zeros', (['(H.shape[1], G.shape[0])'], {}), '((H.shape[1], G.shape[0]))\n', (9224, 9250), True, 'import numpy as np\n'), ((13468, 13489), 'numpy.vstack', 'np.vstack', (['[G.s, H.s]'], {}), '([G.s, H.s])\n', (13477, 13489), True, 'import numpy as np\n'), ((15963, 15991), 'numpy.vstack', 'np.vstack', (['[self.s, right.s]'], {}), '([self.s, right.s])\n', (15972, 15991), True, 'import numpy as np\n'), ((19385, 19405), 'numpy.matrix', 'np.matrix', (['[[right]]'], {}), '([[right]])\n', (19394, 19405), True, 'import numpy as np\n'), ((20011, 20030), 'numpy.matrix', 'np.matrix', (['[[left]]'], {}), '([[left]])\n', (20020, 20030), True, 'import numpy as np\n'), ((22098, 22129), 'numpy.zeros', 'np.zeros', (['(0, self._C.shape[1])'], {}), '((0, self._C.shape[1]))\n', (22106, 22129), True, 'import numpy as np\n'), ((22183, 22221), 'numpy.vstack', 'np.vstack', (['(W, self._C * self._A ** n)'], {}), '((W, self._C * self._A ** n))\n', (22192, 22221), True, 'import numpy as np\n'), ((22327, 22358), 'numpy.zeros', 'np.zeros', (['(self._B.shape[0], 0)'], {}), '((self._B.shape[0], 0))\n', (22335, 22358), True, 'import numpy as np\n'), ((22412, 22450), 'numpy.hstack', 'np.hstack', (['(W, self._A ** n * self._B)'], {}), '((W, self._A ** n * self._B))\n', (22421, 22450), True, 'import numpy as np\n'), ((22577, 22607), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['self.Wr'], {}), '(self.Wr)\n', (22598, 22607), True, 'import numpy as np\n'), ((22735, 22765), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['self.Wo'], {}), '(self.Wo)\n', (22756, 22765), True, 'import numpy as np\n'), ((28492, 28508), 'numpy.asarray', 'np.asarray', (['Gout'], {}), '(Gout)\n', (28502, 28508), True, 'import numpy as np\n'), ((28527, 28542), 'numpy.asarray', 'np.asarray', (['Hin'], {}), '(Hin)\n', (28537, 28542), True, 'import numpy as np\n'), ((30072, 30093), 'numpy.vstack', 'np.vstack', (['[G.x, H.x]'], {}), '([G.x, H.x])\n', (30081, 30093), True, 'import numpy as np\n'), ((32059, 32082), 'numpy.vstack', 'np.vstack', (['[G._B, H._B]'], {}), '([G._B, H._B])\n', (32068, 32082), True, 'import numpy as np\n'), ((32099, 32122), 'numpy.hstack', 'np.hstack', (['[G._C, H._C]'], {}), '([G._C, H._C])\n', (32108, 32122), True, 'import numpy as np\n'), ((32168, 32189), 'numpy.vstack', 'np.vstack', (['[G.x, H.x]'], {}), '([G.x, H.x])\n', (32177, 32189), True, 'import numpy as np\n'), ((35746, 35772), 'numpy.dot', 'np.dot', (['self.At[t]', 'self.X'], {}), '(self.At[t], self.X)\n', (35752, 35772), True, 'import numpy as np\n'), ((35775, 35796), 'numpy.dot', 'np.dot', (['self.Bt[t]', 'U'], {}), '(self.Bt[t], U)\n', (35781, 35796), True, 'import numpy as np\n'), ((35832, 35858), 'numpy.dot', 'np.dot', (['self.Ct[t]', 'self.X'], {}), '(self.Ct[t], self.X)\n', (35838, 35858), True, 'import numpy as np\n'), ((35861, 35882), 'numpy.dot', 'np.dot', (['self.Dt[t]', 'U'], {}), '(self.Dt[t], U)\n', (35867, 35882), True, 'import numpy as np\n'), ((36445, 36466), 'numpy.matrix', 'np.matrix', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (36454, 36466), True, 'import numpy as np\n'), ((38414, 38433), 'numpy.matrix', 'np.matrix', (['[[1, 1]]'], {}), '([[1, 1]])\n', (38423, 38433), True, 'import numpy as np\n'), ((39362, 39375), 'numpy.angle', 'np.angle', (['Chi'], {}), '(Chi)\n', (39370, 39375), True, 'import numpy as np\n'), ((7114, 7134), 'numpy.zeros', 'np.zeros', (['(order, 1)'], {}), '((order, 1))\n', (7122, 7134), True, 'import numpy as np\n'), ((7515, 7531), 'numpy.array', 'np.array', (['self.s'], {}), '(self.s)\n', (7523, 7531), True, 'import numpy as np\n'), ((8091, 8103), 'numpy.matrix', 'np.matrix', (['s'], {}), '(s)\n', (8100, 8103), True, 'import numpy as np\n'), ((20803, 20823), 'numpy.zeros', 'np.zeros', (['(order, 1)'], {}), '((order, 1))\n', (20811, 20823), True, 'import numpy as np\n'), ((24823, 24837), 'numpy.log10', 'np.log10', (['fmin'], {}), '(fmin)\n', (24831, 24837), True, 'import numpy as np\n'), ((24839, 24853), 'numpy.log10', 'np.log10', (['fmax'], {}), '(fmax)\n', (24847, 24853), True, 'import numpy as np\n'), ((26482, 26499), 'numpy.roots', 'np.roots', (['b[i, j]'], {}), '(b[i, j])\n', (26490, 26499), True, 'import numpy as np\n'), ((28691, 28725), 'numpy.zeros', 'np.zeros', (['(H.shape[1], G.shape[0])'], {}), '((H.shape[1], G.shape[0]))\n', (28699, 28725), True, 'import numpy as np\n'), ((30331, 30346), 'numpy.matrix', 'np.matrix', (['H._A'], {}), '(H._A)\n', (30340, 30346), True, 'import numpy as np\n'), ((30388, 30403), 'numpy.matrix', 'np.matrix', (['H._C'], {}), '(H._C)\n', (30397, 30403), True, 'import numpy as np\n'), ((30446, 30460), 'numpy.matrix', 'np.matrix', (['H.x'], {}), '(H.x)\n', (30455, 30460), True, 'import numpy as np\n'), ((31196, 31211), 'numpy.matrix', 'np.matrix', (['G._A'], {}), '(G._A)\n', (31205, 31211), True, 'import numpy as np\n'), ((31228, 31243), 'numpy.matrix', 'np.matrix', (['G._B'], {}), '(G._B)\n', (31237, 31243), True, 'import numpy as np\n'), ((31311, 31325), 'numpy.matrix', 'np.matrix', (['G.x'], {}), '(G.x)\n', (31320, 31325), True, 'import numpy as np\n'), ((39225, 39236), 'numpy.abs', 'np.abs', (['Chi'], {}), '(Chi)\n', (39231, 39236), True, 'import numpy as np\n'), ((7167, 7180), 'numpy.matrix', 'np.matrix', (['s0'], {}), '(s0)\n', (7176, 7180), True, 'import numpy as np\n'), ((7967, 7982), 'numpy.matrix', 'np.matrix', (['[[]]'], {}), '([[]])\n', (7976, 7982), True, 'import numpy as np\n'), ((23678, 23689), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (23686, 23689), True, 'import numpy as np\n'), ((24160, 24172), 'scipy.linalg.expm', 'expm', (['(A * ti)'], {}), '(A * ti)\n', (24164, 24172), False, 'from scipy.linalg import expm\n'), ((24209, 24220), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (24217, 24220), True, 'import numpy as np\n'), ((30557, 30572), 'numpy.matrix', 'np.matrix', (['H._A'], {}), '(H._A)\n', (30566, 30572), True, 'import numpy as np\n'), ((30589, 30604), 'numpy.matrix', 'np.matrix', (['H._B'], {}), '(H._B)\n', (30598, 30604), True, 'import numpy as np\n'), ((30672, 30686), 'numpy.matrix', 'np.matrix', (['H.x'], {}), '(H.x)\n', (30681, 30686), True, 'import numpy as np\n'), ((31423, 31438), 'numpy.matrix', 'np.matrix', (['G._A'], {}), '(G._A)\n', (31432, 31438), True, 'import numpy as np\n'), ((31455, 31470), 'numpy.matrix', 'np.matrix', (['G._B'], {}), '(G._B)\n', (31464, 31470), True, 'import numpy as np\n'), ((31538, 31552), 'numpy.matrix', 'np.matrix', (['G.x'], {}), '(G.x)\n', (31547, 31552), True, 'import numpy as np\n'), ((32729, 32748), 'numpy.ones', 'np.ones', (['self.shape'], {}), '(self.shape)\n', (32736, 32748), True, 'import numpy as np\n'), ((33360, 33378), 'numpy.zeros', 'np.zeros', (['[ng, ng]'], {}), '([ng, ng])\n', (33368, 33378), True, 'import numpy as np\n'), ((33403, 33421), 'numpy.zeros', 'np.zeros', (['[nh, nh]'], {}), '([nh, nh])\n', (33411, 33421), True, 'import numpy as np\n'), ((33455, 33474), 'numpy.zeros', 'np.zeros', (['[ng, gin]'], {}), '([ng, gin])\n', (33463, 33474), True, 'import numpy as np\n'), ((33499, 33518), 'numpy.zeros', 'np.zeros', (['[nh, hin]'], {}), '([nh, hin])\n', (33507, 33518), True, 'import numpy as np\n'), ((33552, 33572), 'numpy.zeros', 'np.zeros', (['[gout, ng]'], {}), '([gout, ng])\n', (33560, 33572), True, 'import numpy as np\n'), ((33597, 33617), 'numpy.zeros', 'np.zeros', (['[hout, nh]'], {}), '([hout, nh])\n', (33605, 33617), True, 'import numpy as np\n'), ((33651, 33672), 'numpy.zeros', 'np.zeros', (['[gout, gin]'], {}), '([gout, gin])\n', (33659, 33672), True, 'import numpy as np\n'), ((33697, 33718), 'numpy.zeros', 'np.zeros', (['[hout, hin]'], {}), '([hout, hin])\n', (33705, 33718), True, 'import numpy as np\n'), ((23620, 23632), 'scipy.linalg.expm', 'expm', (['(A * ti)'], {}), '(A * ti)\n', (23624, 23632), False, 'from scipy.linalg import expm\n'), ((25505, 25522), 'numpy.poly1d', 'np.poly1d', (['[1, 0]'], {}), '([1, 0])\n', (25514, 25522), True, 'import numpy as np\n'), ((29528, 29546), 'numpy.zeros', 'np.zeros', (['(nG, nH)'], {}), '((nG, nH))\n', (29536, 29546), True, 'import numpy as np\n'), ((31968, 31986), 'numpy.zeros', 'np.zeros', (['(nG, nH)'], {}), '((nG, nH))\n', (31976, 31986), True, 'import numpy as np\n'), ((32015, 32033), 'numpy.zeros', 'np.zeros', (['(nH, nG)'], {}), '((nH, nG))\n', (32023, 32033), True, 'import numpy as np\n'), ((7804, 7819), 'numpy.matrix', 'np.matrix', (['[[]]'], {}), '([[]])\n', (7813, 7819), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
class PointNetCls(nn.Module):
def __init__(self, output_classes, input_dims=3, conv1_dim=64,
dropout_prob=0.5, use_transform=True):
super(PointNetCls, self).__init__()
self.input_dims = input_dims
self.conv1 = nn.ModuleList()
self.conv1.append(nn.Conv1d(input_dims, conv1_dim, 1))
self.conv1.append(nn.Conv1d(conv1_dim, conv1_dim, 1))
self.conv1.append(nn.Conv1d(conv1_dim, conv1_dim, 1))
self.bn1 = nn.ModuleList()
self.bn1.append(nn.BatchNorm1d(conv1_dim))
self.bn1.append(nn.BatchNorm1d(conv1_dim))
self.bn1.append(nn.BatchNorm1d(conv1_dim))
self.conv2 = nn.ModuleList()
self.conv2.append(nn.Conv1d(conv1_dim, conv1_dim * 2, 1))
self.conv2.append(nn.Conv1d(conv1_dim * 2, conv1_dim * 16, 1))
self.bn2 = nn.ModuleList()
self.bn2.append(nn.BatchNorm1d(conv1_dim * 2))
self.bn2.append(nn.BatchNorm1d(conv1_dim * 16))
self.maxpool = nn.MaxPool1d(conv1_dim * 16)
self.pool_feat_len = conv1_dim * 16
self.mlp3 = nn.ModuleList()
self.mlp3.append(nn.Linear(conv1_dim * 16, conv1_dim * 8))
self.mlp3.append(nn.Linear(conv1_dim * 8, conv1_dim * 4))
self.bn3 = nn.ModuleList()
self.bn3.append(nn.BatchNorm1d(conv1_dim * 8))
self.bn3.append(nn.BatchNorm1d(conv1_dim * 4))
self.dropout = nn.Dropout(0.3)
self.mlp_out = nn.Linear(conv1_dim * 4, output_classes)
self.use_transform = use_transform
if use_transform:
self.transform1 = TransformNet(input_dims)
self.trans_bn1 = nn.BatchNorm1d(input_dims)
self.transform2 = TransformNet(conv1_dim)
self.trans_bn2 = nn.BatchNorm1d(conv1_dim)
def forward(self, x):
batch_size = x.shape[0]
h = x.permute(0, 2, 1)
if self.use_transform:
trans = self.transform1(h)
h = h.transpose(2, 1)
h = torch.bmm(h, trans)
h = h.transpose(2, 1)
h = F.relu(self.trans_bn1(h))
for conv, bn in zip(self.conv1, self.bn1):
h = conv(h)
h = bn(h)
h = F.relu(h)
if self.use_transform:
trans = self.transform2(h)
h = h.transpose(2, 1)
h = torch.bmm(h, trans)
h = h.transpose(2, 1)
h = F.relu(self.trans_bn2(h))
for conv, bn in zip(self.conv2, self.bn2):
h = conv(h)
h = bn(h)
h = F.relu(h)
h = self.maxpool(h).view(-1, self.pool_feat_len)
for mlp, bn in zip(self.mlp3, self.bn3):
h = mlp(h)
h = bn(h)
h = F.relu(h)
h = self.dropout(h)
out = self.mlp_out(h)
return out
class TransformNet(nn.Module):
def __init__(self, input_dims=3, conv1_dim=64):
super(TransformNet, self).__init__()
self.conv = nn.ModuleList()
self.conv.append(nn.Conv1d(input_dims, conv1_dim, 1))
self.conv.append(nn.Conv1d(conv1_dim, conv1_dim * 2, 1))
self.conv.append(nn.Conv1d(conv1_dim * 2, conv1_dim * 16, 1))
self.bn = nn.ModuleList()
self.bn.append(nn.BatchNorm1d(conv1_dim))
self.bn.append(nn.BatchNorm1d(conv1_dim * 2))
self.bn.append(nn.BatchNorm1d(conv1_dim * 16))
self.maxpool = nn.MaxPool1d(conv1_dim * 16)
self.pool_feat_len = conv1_dim * 16
self.mlp2 = nn.ModuleList()
self.mlp2.append(nn.Linear(conv1_dim * 16, conv1_dim * 8))
self.mlp2.append(nn.Linear(conv1_dim * 8, conv1_dim * 4))
self.bn2 = nn.ModuleList()
self.bn2.append(nn.BatchNorm1d(conv1_dim * 8))
self.bn2.append(nn.BatchNorm1d(conv1_dim * 4))
self.input_dims = input_dims
self.mlp_out = nn.Linear(conv1_dim * 4, input_dims * input_dims)
def forward(self, h):
batch_size = h.shape[0]
for conv, bn in zip(self.conv, self.bn):
h = conv(h)
h = bn(h)
h = F.relu(h)
h = self.maxpool(h).view(-1, self.pool_feat_len)
for mlp, bn in zip(self.mlp2, self.bn2):
h = mlp(h)
h = bn(h)
h = F.relu(h)
out = self.mlp_out(h)
iden = Variable(torch.from_numpy(np.eye(self.input_dims).flatten().astype(np.float32)))
iden = iden.view(1, self.input_dims * self.input_dims).repeat(batch_size, 1)
if out.is_cuda:
iden = iden.cuda()
out = out + iden
out = out.view(-1, self.input_dims, self.input_dims)
return out
| [
"torch.nn.Dropout",
"torch.bmm",
"numpy.eye",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.nn.MaxPool1d",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.nn.functional.relu"
] | [((378, 393), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (391, 393), True, 'import torch.nn as nn\n'), ((601, 616), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (614, 616), True, 'import torch.nn as nn\n'), ((792, 807), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (805, 807), True, 'import torch.nn as nn\n'), ((965, 980), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (978, 980), True, 'import torch.nn as nn\n'), ((1116, 1144), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (['(conv1_dim * 16)'], {}), '(conv1_dim * 16)\n', (1128, 1144), True, 'import torch.nn as nn\n'), ((1210, 1225), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1223, 1225), True, 'import torch.nn as nn\n'), ((1379, 1394), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1392, 1394), True, 'import torch.nn as nn\n'), ((1529, 1544), 'torch.nn.Dropout', 'nn.Dropout', (['(0.3)'], {}), '(0.3)\n', (1539, 1544), True, 'import torch.nn as nn\n'), ((1568, 1608), 'torch.nn.Linear', 'nn.Linear', (['(conv1_dim * 4)', 'output_classes'], {}), '(conv1_dim * 4, output_classes)\n', (1577, 1608), True, 'import torch.nn as nn\n'), ((3075, 3090), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3088, 3090), True, 'import torch.nn as nn\n'), ((3307, 3322), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3320, 3322), True, 'import torch.nn as nn\n'), ((3506, 3534), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (['(conv1_dim * 16)'], {}), '(conv1_dim * 16)\n', (3518, 3534), True, 'import torch.nn as nn\n'), ((3600, 3615), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3613, 3615), True, 'import torch.nn as nn\n'), ((3769, 3784), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3782, 3784), True, 'import torch.nn as nn\n'), ((3956, 4005), 'torch.nn.Linear', 'nn.Linear', (['(conv1_dim * 4)', '(input_dims * input_dims)'], {}), '(conv1_dim * 4, input_dims * input_dims)\n', (3965, 4005), True, 'import torch.nn as nn\n'), ((420, 455), 'torch.nn.Conv1d', 'nn.Conv1d', (['input_dims', 'conv1_dim', '(1)'], {}), '(input_dims, conv1_dim, 1)\n', (429, 455), True, 'import torch.nn as nn\n'), ((483, 517), 'torch.nn.Conv1d', 'nn.Conv1d', (['conv1_dim', 'conv1_dim', '(1)'], {}), '(conv1_dim, conv1_dim, 1)\n', (492, 517), True, 'import torch.nn as nn\n'), ((545, 579), 'torch.nn.Conv1d', 'nn.Conv1d', (['conv1_dim', 'conv1_dim', '(1)'], {}), '(conv1_dim, conv1_dim, 1)\n', (554, 579), True, 'import torch.nn as nn\n'), ((641, 666), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['conv1_dim'], {}), '(conv1_dim)\n', (655, 666), True, 'import torch.nn as nn\n'), ((692, 717), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['conv1_dim'], {}), '(conv1_dim)\n', (706, 717), True, 'import torch.nn as nn\n'), ((743, 768), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['conv1_dim'], {}), '(conv1_dim)\n', (757, 768), True, 'import torch.nn as nn\n'), ((834, 872), 'torch.nn.Conv1d', 'nn.Conv1d', (['conv1_dim', '(conv1_dim * 2)', '(1)'], {}), '(conv1_dim, conv1_dim * 2, 1)\n', (843, 872), True, 'import torch.nn as nn\n'), ((900, 943), 'torch.nn.Conv1d', 'nn.Conv1d', (['(conv1_dim * 2)', '(conv1_dim * 16)', '(1)'], {}), '(conv1_dim * 2, conv1_dim * 16, 1)\n', (909, 943), True, 'import torch.nn as nn\n'), ((1005, 1034), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(conv1_dim * 2)'], {}), '(conv1_dim * 2)\n', (1019, 1034), True, 'import torch.nn as nn\n'), ((1060, 1090), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(conv1_dim * 16)'], {}), '(conv1_dim * 16)\n', (1074, 1090), True, 'import torch.nn as nn\n'), ((1251, 1291), 'torch.nn.Linear', 'nn.Linear', (['(conv1_dim * 16)', '(conv1_dim * 8)'], {}), '(conv1_dim * 16, conv1_dim * 8)\n', (1260, 1291), True, 'import torch.nn as nn\n'), ((1318, 1357), 'torch.nn.Linear', 'nn.Linear', (['(conv1_dim * 8)', '(conv1_dim * 4)'], {}), '(conv1_dim * 8, conv1_dim * 4)\n', (1327, 1357), True, 'import torch.nn as nn\n'), ((1419, 1448), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(conv1_dim * 8)'], {}), '(conv1_dim * 8)\n', (1433, 1448), True, 'import torch.nn as nn\n'), ((1474, 1503), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(conv1_dim * 4)'], {}), '(conv1_dim * 4)\n', (1488, 1503), True, 'import torch.nn as nn\n'), ((1763, 1789), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['input_dims'], {}), '(input_dims)\n', (1777, 1789), True, 'import torch.nn as nn\n'), ((1873, 1898), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['conv1_dim'], {}), '(conv1_dim)\n', (1887, 1898), True, 'import torch.nn as nn\n'), ((2109, 2128), 'torch.bmm', 'torch.bmm', (['h', 'trans'], {}), '(h, trans)\n', (2118, 2128), False, 'import torch\n'), ((2319, 2328), 'torch.nn.functional.relu', 'F.relu', (['h'], {}), '(h)\n', (2325, 2328), True, 'import torch.nn.functional as F\n'), ((2450, 2469), 'torch.bmm', 'torch.bmm', (['h', 'trans'], {}), '(h, trans)\n', (2459, 2469), False, 'import torch\n'), ((2660, 2669), 'torch.nn.functional.relu', 'F.relu', (['h'], {}), '(h)\n', (2666, 2669), True, 'import torch.nn.functional as F\n'), ((2838, 2847), 'torch.nn.functional.relu', 'F.relu', (['h'], {}), '(h)\n', (2844, 2847), True, 'import torch.nn.functional as F\n'), ((3116, 3151), 'torch.nn.Conv1d', 'nn.Conv1d', (['input_dims', 'conv1_dim', '(1)'], {}), '(input_dims, conv1_dim, 1)\n', (3125, 3151), True, 'import torch.nn as nn\n'), ((3178, 3216), 'torch.nn.Conv1d', 'nn.Conv1d', (['conv1_dim', '(conv1_dim * 2)', '(1)'], {}), '(conv1_dim, conv1_dim * 2, 1)\n', (3187, 3216), True, 'import torch.nn as nn\n'), ((3243, 3286), 'torch.nn.Conv1d', 'nn.Conv1d', (['(conv1_dim * 2)', '(conv1_dim * 16)', '(1)'], {}), '(conv1_dim * 2, conv1_dim * 16, 1)\n', (3252, 3286), True, 'import torch.nn as nn\n'), ((3346, 3371), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['conv1_dim'], {}), '(conv1_dim)\n', (3360, 3371), True, 'import torch.nn as nn\n'), ((3396, 3425), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(conv1_dim * 2)'], {}), '(conv1_dim * 2)\n', (3410, 3425), True, 'import torch.nn as nn\n'), ((3450, 3480), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(conv1_dim * 16)'], {}), '(conv1_dim * 16)\n', (3464, 3480), True, 'import torch.nn as nn\n'), ((3641, 3681), 'torch.nn.Linear', 'nn.Linear', (['(conv1_dim * 16)', '(conv1_dim * 8)'], {}), '(conv1_dim * 16, conv1_dim * 8)\n', (3650, 3681), True, 'import torch.nn as nn\n'), ((3708, 3747), 'torch.nn.Linear', 'nn.Linear', (['(conv1_dim * 8)', '(conv1_dim * 4)'], {}), '(conv1_dim * 8, conv1_dim * 4)\n', (3717, 3747), True, 'import torch.nn as nn\n'), ((3809, 3838), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(conv1_dim * 8)'], {}), '(conv1_dim * 8)\n', (3823, 3838), True, 'import torch.nn as nn\n'), ((3864, 3893), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(conv1_dim * 4)'], {}), '(conv1_dim * 4)\n', (3878, 3893), True, 'import torch.nn as nn\n'), ((4176, 4185), 'torch.nn.functional.relu', 'F.relu', (['h'], {}), '(h)\n', (4182, 4185), True, 'import torch.nn.functional as F\n'), ((4362, 4371), 'torch.nn.functional.relu', 'F.relu', (['h'], {}), '(h)\n', (4368, 4371), True, 'import torch.nn.functional as F\n'), ((4445, 4468), 'numpy.eye', 'np.eye', (['self.input_dims'], {}), '(self.input_dims)\n', (4451, 4468), True, 'import numpy as np\n')] |
"""
A collection of function handling faces.
"""
import cv2
import dlib
import numpy
PREDICTOR_PATH = "predictor_data\\shape_predictor_68_face_landmarks.dat"
SCALE_FACTOR = 1
FEATHER_AMOUNT = 11
FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))
# Points used to line up the images.
ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS +
RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)
# ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS + JAW_POINTS)
# Points from the second image to overlay on the first. The convex hull of each
# element will be overlaid.
OVERLAY_POINTS = [
LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS,
NOSE_POINTS + MOUTH_POINTS,
]
# OVERLAY_POINTS = [LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS + JAW_POINTS]
# Amount of blur to use during colour correction, as a fraction of the
# pupillary distance.
COLOUR_CORRECT_BLUR_FRAC = 0.6
# Returns the default face detector
detector = dlib.get_frontal_face_detector()
# This object is a tool that takes in an image region containing some object
# and outputs a set of point locations that define the pose of the object.
# The classic example of this is human face pose prediction, where you take
# an image of a human face as input and are expected to identify the locations
# of important facial landmarks such as the corners of the mouth and eyes,
# tip of the nose, and so forth.
predictor = dlib.shape_predictor(PREDICTOR_PATH)
class ImproperNumber(Exception):
pass
def get_landmarks(img):
"""
Get face landmarks from a rectangle region in image.
Args:
img: a cv2 image object.
rect:
Retrun:
1 * 68 numpy matrix corresponding to landmark points.
"""
rects = detector(img, 1) # 1 is upsampling factor.
return [numpy.matrix([[p.x, p.y] for p in predictor(img, rect).parts()]) for rect in rects]
def annotate_landmarks(img, landmarks, font_scale = 0.4):
"""
Annotate face landmarks on image.
Args:
img: a cv2 image object.
landmarks: numpy matrix consisted of points.
Return:
annotated image.
"""
img = img.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(img, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=font_scale,
color=(0, 0, 255))
cv2.circle(img, pos, 3, color=(0, 255, 255))
return img
def draw_convex_hull(img, points, color):
"""
Draw convex hull on img. Figure img will be changed after calling this function.
"""
points = cv2.convexHull(points)
cv2.fillConvexPoly(img, points, color=color)
def partial_blur(img, points, kenel_size = 9, type = 1):
"""
Partial Gaussian blur within convex hull of points.
Args:
type = 0 for Gaussian blur
type = 1 for average blur
"""
points = cv2.convexHull(points)
copy_img = img.copy()
black = (0, 0, 0)
if type:
cv2.blur(img, (kenel_size, kenel_size))
else:
cv2.GaussianBlur(img, (kenel_size, kenel_size), 0)
cv2.fillConvexPoly(copy_img, points, color = black)
for row in range(img.shape[:2][0]):
for col in range(img.shape[:2][1]):
if numpy.array_equal(copy_img[row][col], black):
copy_img[row][col] = blur_img[row][col]
return copy_img
def get_face_mask(img, landmarks):
"""
Get face mask matrix, mask area = 1.
"""
img = numpy.zeros(img.shape[:2], dtype=numpy.float64)
for group in OVERLAY_POINTS:
draw_convex_hull(img,
landmarks[group],
color=1)
img = numpy.array([img, img, img]).transpose((1, 2, 0))
img = (cv2.GaussianBlur(img, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0
img = cv2.GaussianBlur(img, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)
return img
def transformation_from_points(points1, points2):
"""
Return an affine transformation [s * R | T] such that:
sum ||s*R*p1,i + T - p2,i||^2
is minimized.
"""
# Solve the procrustes problem by subtracting centroids, scaling by the
# standard deviation, and then using the SVD to calculate the rotation. See
# the following for more details:
# https://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem
points1 = points1.astype(numpy.float64)
points2 = points2.astype(numpy.float64)
c1 = numpy.mean(points1, axis=0)
c2 = numpy.mean(points2, axis=0)
points1 -= c1
points2 -= c2
# normalization
s1 = numpy.std(points1)
s2 = numpy.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = numpy.linalg.svd(points1.T * points2)
# The R we seek is in fact the transpose of the one given by U * Vt. This
# is because the above formulation assumes the matrix goes on the right
# (with row vectors) where as our solution requires the matrix to be on the
# left (with column vectors).
R = (U * Vt).T
return numpy.vstack([numpy.hstack(((s2 / s1) * R,
c2.T - (s2 / s1) * R * c1.T)),
numpy.matrix([0., 0., 1.])])
def warp_im(im, M, dshape):
"""
Affine transformation with matrix M to dshape.
"""
output_im = numpy.zeros(dshape, dtype=im.dtype) # zero matrix
cv2.warpAffine(im,
M[:2], # shape of M
(dshape[1], dshape[0]),
dst = output_im,
borderMode = cv2.BORDER_TRANSPARENT,
flags = cv2.WARP_INVERSE_MAP)
return output_im
def correct_colours(im1, im2, landmarks1):
"""
Attempt to change the colouring of im2 to match that of im1.
It does this by dividing im2 by a gaussian blur of im2, and then multiplying
by a gaussian blur of im1.
"""
blur_amount = COLOUR_CORRECT_BLUR_FRAC * numpy.linalg.norm(
numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)
# Avoid divide-by-zero errors.
im2_blur += (128 * (im2_blur <= 1.0)).astype(im2_blur.dtype)
return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) /
im2_blur.astype(numpy.float64))
def align_face(src, landmark_src, dest, landmark_dest):
"""
Align face in src to dest image.
"""
M = transformation_from_points(landmark_dest[ALIGN_POINTS], landmark_src[ALIGN_POINTS])
mask = get_face_mask(src, landmark_src)
warped_mask = warp_im(mask, M, src.shape)
combined_mask = numpy.max([get_face_mask(dest, landmark_dest), warped_mask], axis = 0)
warped_src = warp_im(src, M, dest.shape)
warped_corrected_src = correct_colours(dest, warped_src, landmark_dest)
output_im = dest * (1 - combined_mask) + warped_corrected_src * combined_mask
return output_im.astype(numpy.uint8)
def to_uint8(img):
"""
Cast data type of numpy array to unsigned int8.
"""
return img.astype(numpy.uint8)
def switch_face(img_path):
"""
Switch faces in image.
"""
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
landmarks = get_landmarks(img)
if len(landmarks) < 1:
raise ImproperNumber("Faces detected is less than 2!")
if len(landmarks) > 2:
raise ImproperNumber("Faces detected is more than 2!")
output = align_face(img, landmarks[0], img, landmarks[1])
output = align_face(img, landmarks[1], output, landmarks[0])
return output
| [
"cv2.GaussianBlur",
"numpy.matrix",
"cv2.circle",
"numpy.std",
"numpy.zeros",
"cv2.blur",
"numpy.hstack",
"cv2.warpAffine",
"cv2.imread",
"numpy.mean",
"numpy.linalg.svd",
"cv2.convexHull",
"dlib.get_frontal_face_detector",
"numpy.array",
"numpy.array_equal",
"cv2.fillConvexPoly",
"d... | [((1328, 1360), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (1358, 1360), False, 'import dlib\n'), ((1796, 1832), 'dlib.shape_predictor', 'dlib.shape_predictor', (['PREDICTOR_PATH'], {}), '(PREDICTOR_PATH)\n', (1816, 1832), False, 'import dlib\n'), ((3041, 3063), 'cv2.convexHull', 'cv2.convexHull', (['points'], {}), '(points)\n', (3055, 3063), False, 'import cv2\n'), ((3068, 3112), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['img', 'points'], {'color': 'color'}), '(img, points, color=color)\n', (3086, 3112), False, 'import cv2\n'), ((3335, 3357), 'cv2.convexHull', 'cv2.convexHull', (['points'], {}), '(points)\n', (3349, 3357), False, 'import cv2\n'), ((3543, 3592), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['copy_img', 'points'], {'color': 'black'}), '(copy_img, points, color=black)\n', (3561, 3592), False, 'import cv2\n'), ((3927, 3974), 'numpy.zeros', 'numpy.zeros', (['img.shape[:2]'], {'dtype': 'numpy.float64'}), '(img.shape[:2], dtype=numpy.float64)\n', (3938, 3974), False, 'import numpy\n'), ((4269, 4327), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(FEATHER_AMOUNT, FEATHER_AMOUNT)', '(0)'], {}), '(img, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)\n', (4285, 4327), False, 'import cv2\n'), ((4891, 4918), 'numpy.mean', 'numpy.mean', (['points1'], {'axis': '(0)'}), '(points1, axis=0)\n', (4901, 4918), False, 'import numpy\n'), ((4928, 4955), 'numpy.mean', 'numpy.mean', (['points2'], {'axis': '(0)'}), '(points2, axis=0)\n', (4938, 4955), False, 'import numpy\n'), ((5021, 5039), 'numpy.std', 'numpy.std', (['points1'], {}), '(points1)\n', (5030, 5039), False, 'import numpy\n'), ((5049, 5067), 'numpy.std', 'numpy.std', (['points2'], {}), '(points2)\n', (5058, 5067), False, 'import numpy\n'), ((5122, 5159), 'numpy.linalg.svd', 'numpy.linalg.svd', (['(points1.T * points2)'], {}), '(points1.T * points2)\n', (5138, 5159), False, 'import numpy\n'), ((5770, 5805), 'numpy.zeros', 'numpy.zeros', (['dshape'], {'dtype': 'im.dtype'}), '(dshape, dtype=im.dtype)\n', (5781, 5805), False, 'import numpy\n'), ((5825, 5957), 'cv2.warpAffine', 'cv2.warpAffine', (['im', 'M[:2]', '(dshape[1], dshape[0])'], {'dst': 'output_im', 'borderMode': 'cv2.BORDER_TRANSPARENT', 'flags': 'cv2.WARP_INVERSE_MAP'}), '(im, M[:2], (dshape[1], dshape[0]), dst=output_im, borderMode\n =cv2.BORDER_TRANSPARENT, flags=cv2.WARP_INVERSE_MAP)\n', (5839, 5957), False, 'import cv2\n'), ((6707, 6759), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['im1', '(blur_amount, blur_amount)', '(0)'], {}), '(im1, (blur_amount, blur_amount), 0)\n', (6723, 6759), False, 'import cv2\n'), ((6775, 6827), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['im2', '(blur_amount, blur_amount)', '(0)'], {}), '(im2, (blur_amount, blur_amount), 0)\n', (6791, 6827), False, 'import cv2\n'), ((7983, 8021), 'cv2.imread', 'cv2.imread', (['img_path', 'cv2.IMREAD_COLOR'], {}), '(img_path, cv2.IMREAD_COLOR)\n', (7993, 8021), False, 'import cv2\n'), ((2809, 2853), 'cv2.circle', 'cv2.circle', (['img', 'pos', '(3)'], {'color': '(0, 255, 255)'}), '(img, pos, 3, color=(0, 255, 255))\n', (2819, 2853), False, 'import cv2\n'), ((3429, 3468), 'cv2.blur', 'cv2.blur', (['img', '(kenel_size, kenel_size)'], {}), '(img, (kenel_size, kenel_size))\n', (3437, 3468), False, 'import cv2\n'), ((3488, 3538), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(kenel_size, kenel_size)', '(0)'], {}), '(img, (kenel_size, kenel_size), 0)\n', (3504, 3538), False, 'import cv2\n'), ((3694, 3738), 'numpy.array_equal', 'numpy.array_equal', (['copy_img[row][col]', 'black'], {}), '(copy_img[row][col], black)\n', (3711, 3738), False, 'import numpy\n'), ((4127, 4155), 'numpy.array', 'numpy.array', (['[img, img, img]'], {}), '([img, img, img])\n', (4138, 4155), False, 'import numpy\n'), ((4189, 4247), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(FEATHER_AMOUNT, FEATHER_AMOUNT)', '(0)'], {}), '(img, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)\n', (4205, 4247), False, 'import cv2\n'), ((5474, 5528), 'numpy.hstack', 'numpy.hstack', (['(s2 / s1 * R, c2.T - s2 / s1 * R * c1.T)'], {}), '((s2 / s1 * R, c2.T - s2 / s1 * R * c1.T))\n', (5486, 5528), False, 'import numpy\n'), ((5598, 5627), 'numpy.matrix', 'numpy.matrix', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (5610, 5627), False, 'import numpy\n'), ((6473, 6520), 'numpy.mean', 'numpy.mean', (['landmarks1[LEFT_EYE_POINTS]'], {'axis': '(0)'}), '(landmarks1[LEFT_EYE_POINTS], axis=0)\n', (6483, 6520), False, 'import numpy\n'), ((6553, 6601), 'numpy.mean', 'numpy.mean', (['landmarks1[RIGHT_EYE_POINTS]'], {'axis': '(0)'}), '(landmarks1[RIGHT_EYE_POINTS], axis=0)\n', (6563, 6601), False, 'import numpy\n')] |
from __future__ import division
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import sys
from collections import defaultdict
from heapq import heappush, heappop
from contextlib import contextmanager
import h5py
import numpy as np
from skbio.io.util import _is_string_or_bytes
from future.utils import viewitems
from natsort import natsorted
def per_sample_sequences(iter_, max_seqs, min_seqs=1, random_buf_size=100000):
"""Get a max random subset of per sample sequences
Parameters
----------
iter_ : skbio.parse.sequences.SequenceIterator
The sequences to walk over
max_seqs : unsigned int
The maximum number of sequences per sample.
min_seqs : unsigned int, optional
The minimum number of sequences that must exist in a sample.
random_buf_size : unsigned int, optional
The size of the random value buffer.
Notes
-----
Randomly get ``max_seqs`` for each sample. If the sample has less than
``max_seqs``, only those samples that have > ``min_seqs`` are returned.
This method will at most hold ``max_seqs`` * N data, where N is the number
of samples.
All sequences associated to a sample have an equal probability of being
retained.
Raises
------
ValueError
If ``min_seqs`` is > ``max_seqs``.
ValueError
If ``min_seqs`` < 1 or if ``max_seqs`` < 1.
Returns
-------
generator
(sequence_id, sequence) where ``sequence_id`` is of the form
sampleid_integer.
"""
if min_seqs > max_seqs:
raise ValueError("min_seqs cannot be > max_seqs!")
if min_seqs < 1 or max_seqs < 1:
raise ValueError("min_seqs and max_seqs must be > 0!")
# buffer some random values
random_values = np.random.randint(0, sys.maxint, random_buf_size)
random_idx = 0
result = defaultdict(list)
for record in iter_:
# get sequence ID, sample_id, sequence and heap
sequence_id = record['SequenceID']
sample_id = sequence_id.rsplit('_', 1)[0]
sequence = record['Sequence']
heap = result[sample_id]
# pull a random value, and recompute random values if we've consumed
# our buffer
random_value = random_values[random_idx]
random_idx += 1
if random_idx >= random_buf_size:
random_values = np.random.randint(0, sys.maxint, random_buf_size)
random_idx = 0
# push our sequence on to the heap and drop the smallest if necessary
heappush(heap, (random_value, sequence_id, sequence))
if len(heap) > max_seqs:
heappop(heap)
# yield the sequences
for sid, heap in viewitems(result):
if len(heap) < min_seqs:
continue
for _, sequence_id, sequence in heap:
yield (sequence_id, sequence)
def stats_from_df(df):
"""Create a dictionary of summary statistics for a sample or prep template
Parameters
----------
t : SampleTemplate or PrepTemplate
Sample or prep template object to summarize
Returns
-------
dict of list of tuples
Dictionary object where the keys are the metadata categories
and the values are list of tuples. Each tuple is an observed value in
the category and the number of times its seen.
Format {category: [(val1, count1), (val2, count2), ...], ...}
"""
out = {}
# drop the study_id column if it exists
if 'study_id' in df.columns:
df.drop('study_id', axis=1, inplace=True)
cols = list(df.columns)
for column in cols:
counts = df[column].value_counts()
# get a pandas series of the value-count pairs
out[str(column)] = [(str(key), counts[key])
for key in natsorted(counts.index)]
return out
def _get_filehandle(filepath_or, *args, **kwargs):
"""Open file if `filepath_or` looks like a string/unicode/bytes, else
pass through.
"""
if _is_string_or_bytes(filepath_or):
if h5py.is_hdf5(filepath_or):
fh, own_fh = h5py.File(filepath_or, *args, **kwargs), True
else:
fh, own_fh = open(filepath_or, *args, **kwargs), True
else:
fh, own_fh = filepath_or, False
return fh, own_fh
@contextmanager
def open_file(filepath_or, *args, **kwargs):
"""Context manager, like ``open``, but lets file handles and file like
objects pass untouched.
It is useful when implementing a function that can accept both
strings and file-like objects (like numpy.loadtxt, etc).
This method differs slightly from scikit-bio's implementation in that it
handles HDF5 files appropriately.
Parameters
----------
filepath_or : str/bytes/unicode string or file-like
If string, file to be opened using ``h5py.File`` if the file is an
HDF5 file, otherwise builtin ``open`` will be used. If it is not a
string, the object is just returned untouched.
Other parameters
----------------
args, kwargs : tuple, dict
When `filepath_or` is a string, any extra arguments are passed
on to the ``open`` builtin.
Examples
--------
>>> with open_file('filename') as f: # doctest: +SKIP
... pass
>>> fh = open('filename') # doctest: +SKIP
>>> with open_file(fh) as f: # doctest: +SKIP
... pass
>>> fh.closed # doctest: +SKIP
False
>>> fh.close() # doctest: +SKIP
"""
fh, own_fh = _get_filehandle(filepath_or, *args, **kwargs)
try:
yield fh
finally:
if own_fh:
fh.close()
| [
"skbio.io.util._is_string_or_bytes",
"h5py.File",
"heapq.heappush",
"future.utils.viewitems",
"heapq.heappop",
"collections.defaultdict",
"numpy.random.randint",
"h5py.is_hdf5",
"natsort.natsorted"
] | [((2067, 2116), 'numpy.random.randint', 'np.random.randint', (['(0)', 'sys.maxint', 'random_buf_size'], {}), '(0, sys.maxint, random_buf_size)\n', (2084, 2116), True, 'import numpy as np\n'), ((2150, 2167), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2161, 2167), False, 'from collections import defaultdict\n'), ((2980, 2997), 'future.utils.viewitems', 'viewitems', (['result'], {}), '(result)\n', (2989, 2997), False, 'from future.utils import viewitems\n'), ((4284, 4316), 'skbio.io.util._is_string_or_bytes', '_is_string_or_bytes', (['filepath_or'], {}), '(filepath_or)\n', (4303, 4316), False, 'from skbio.io.util import _is_string_or_bytes\n'), ((2819, 2872), 'heapq.heappush', 'heappush', (['heap', '(random_value, sequence_id, sequence)'], {}), '(heap, (random_value, sequence_id, sequence))\n', (2827, 2872), False, 'from heapq import heappush, heappop\n'), ((4329, 4354), 'h5py.is_hdf5', 'h5py.is_hdf5', (['filepath_or'], {}), '(filepath_or)\n', (4341, 4354), False, 'import h5py\n'), ((2655, 2704), 'numpy.random.randint', 'np.random.randint', (['(0)', 'sys.maxint', 'random_buf_size'], {}), '(0, sys.maxint, random_buf_size)\n', (2672, 2704), True, 'import numpy as np\n'), ((2918, 2931), 'heapq.heappop', 'heappop', (['heap'], {}), '(heap)\n', (2925, 2931), False, 'from heapq import heappush, heappop\n'), ((4083, 4106), 'natsort.natsorted', 'natsorted', (['counts.index'], {}), '(counts.index)\n', (4092, 4106), False, 'from natsort import natsorted\n'), ((4381, 4420), 'h5py.File', 'h5py.File', (['filepath_or', '*args'], {}), '(filepath_or, *args, **kwargs)\n', (4390, 4420), False, 'import h5py\n')] |
import os
import glob
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
from PIL import Image
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torchvision import datasets, transforms
from util.tools import *
class dataset(Dataset):
def __init__(self, image_root, label_root, img_x, img_y):
"""Init function should not do any heavy lifting, but
must initialize how many items are available in this data set.
"""
self.images_path = image_root
self.labels_path = label_root
self.data_len = 0
self.images = []
self.labels = open(self.labels_path, "r").readlines()
self.transform = transforms.Compose([
transforms.Resize((img_x, img_y)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
for file in self.labels:
self.data_len += 1
tem = file.split(" ")[0]
temp = tem.split("-")
self.images.append(self.images_path + temp[0] + '/' + temp[0] + "-" + temp[1] + "/" + tem + ".png")
def __len__(self):
"""return number of points in our dataset"""
return(self.data_len)
def __getitem__(self, idx):
""" Here we have to return the item requested by `idx`
The PyTorch DataLoader class will use this method to make an iterable for
our training or validation loop.
"""
img = self.images[idx]
label = self.labels[idx].split(" ")[-1]
img = Image.open(img)
img = img.convert('RGB')
img = self.transform(img)
return(img, label[:-1])
def loader_param(batch_size=8):
img_x = 32
img_y = 32
batch_size = 8
return(img_x, img_y, batch_size)
def word_rep(word, letter2index, max_out_chars, device = 'cpu'):
pad_char = '-PAD-'
rep = torch.zeros(max_out_chars).to(device)
for letter_index, letter in enumerate(word):
pos = letter2index[letter]
rep[letter_index] = pos
pad_pos = letter2index[pad_char]
rep[letter_index+1] = pad_pos
return(rep, len(word))
def words_rep(labels_str, max_out_chars = 20, batch_size = 8, eng_alpha2index = None):
words_rep = []
output_cat = None
lengths_tensor = None
lengths = []
for i, label in enumerate(labels_str):
rep, lnt = word_rep(label, eng_alpha2index, max_out_chars)
words_rep.append(rep)
if lengths_tensor is None:
lengths_tensor = torch.empty(len(labels_str), dtype= torch.int)
if output_cat is None:
output_cat_size = list(rep.size())
output_cat_size.insert(0, len(labels_str))
output_cat = torch.empty(*output_cat_size, dtype=rep.dtype, device=rep.device)
# print(output_cat.shape)
output_cat[i, :] = rep
lengths_tensor[i] = lnt
lengths.append(lnt)
return(output_cat, lengths_tensor)
class IAMDataSet(Dataset):
def __init__(self, image_root, label_root, target_size=(200, 32), characters="'>' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"!#&\'()*+,-./0123456789:;?'", transform=None):
super(IAMDataSet, self).__init__()
self.images_path = image_root
self.labels_path = label_root
self.target_size = target_size
self.height = self.target_size[1]
self.width = self.target_size[0]
self.characters = characters
self.imgs = []
self.lexicons = []
self.parse_txt()
self.transform = transform
def __len__(self):
return len(self.imgs)
def __getitem__(self, item):
img_path, lexicon_index = self.imgs[item].split(" ")
#print(img_path)
lexicon = self.lexicons[int(lexicon_index)].split(" ")[-1]
lexicon = lexicon[:-1]
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_size = img.shape
if (img_size[1] / (img_size[0] * 1.0)) < 6.4:
img_reshape = cv2.resize(img, (int(32.0 / img_size[0] * img_size[1]), self.height))
mat_ori = np.zeros((self.height, self.width - int(32.0 / img_size[0] * img_size[1]), 3), dtype=np.uint8)
out_img = np.concatenate([img_reshape, mat_ori], axis=1).transpose([1, 0, 2])
else:
out_img = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_CUBIC)
out_img = np.asarray(out_img).transpose([1, 0, 2])
label = [self.characters.find(c) for c in lexicon]
if self.transform:
out_img = self.transform(out_img)
return out_img, label
def parse_txt(self):
# self.imgs = open(os.path.join(self.dataset_root, self.anno_txt_path), 'r').readlines()
# self.lexicons = open(os.path.join(self.dataset_root, self.lexicon_path), 'r').readlines()
self.lexicons = open(self.labels_path, "r").readlines()
for file in range(len(self.lexicons)):
tem = self.lexicons[file].split(" ")[0]
temp = tem.split("-")
self.imgs.append(self.images_path + temp[0] + '/' + temp[0] + "-" + temp[1] + "/" + tem + ".png" + " " + str(file))
if __name__ == '__main__':
img_x, img_y, batch_size = loader_param()
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
# train_set = dataset(image_root="/home/skyatmoon/COMP4550/IAM/words/", label_root = "/home/skyatmoon/COMP4550/IAM/words.txt", img_x = img_x, img_y = img_y)
# train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0)
train_set = IAMDataSet(image_root="/home/skyatmoon/COMP4550/IAM/words/", label_root = "/home/skyatmoon/COMP4550/IAM/words.txt",
target_size=(300, 32), characters="'>' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"!#&\'()*+,-./0123456789:;?'", transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=8, shuffle=True, num_workers=0, collate_fn=custom_collate_fn)
train_iter = iter(train_loader)
samples, labels, target_lengths, input_lengths = next(train_iter)
def imshow(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
trial_num = np.random.randint(0, batch_size)
imshow(samples[0])
print(labels)
# eng_alphabets = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"!#&\'()*+,-./0123456789:;?'
# pad_char = '-PAD-'
# eng_alpha2index = {pad_char: 0}
# for index, alpha in enumerate(eng_alphabets):
# eng_alpha2index[alpha] = index+1
# print(eng_alpha2index)
# input_lengths = torch.full(size=(batch_size,), fill_value=img_y, dtype=torch.int)
# print(input_lengths)
# for j, (images, labels_str) in enumerate(iter(train_loader)):
# labels, target_lengths = words_rep(labels_str, max_out_chars = 20, batch_size = batch_size , eng_alpha2index = eng_alpha2index)
# print(target_lengths) | [
"matplotlib.pyplot.show",
"torch.utils.data.DataLoader",
"numpy.asarray",
"torch.empty",
"numpy.transpose",
"PIL.Image.open",
"numpy.random.randint",
"torch.zeros",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"numpy.concatenate",
"torchvision.transforms.ToTensor"
] | [((6136, 6251), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'batch_size': '(8)', 'shuffle': '(True)', 'num_workers': '(0)', 'collate_fn': 'custom_collate_fn'}), '(train_set, batch_size=8, shuffle=True,\n num_workers=0, collate_fn=custom_collate_fn)\n', (6163, 6251), False, 'import torch\n'), ((6500, 6532), 'numpy.random.randint', 'np.random.randint', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (6517, 6532), True, 'import numpy as np\n'), ((1645, 1660), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (1655, 1660), False, 'from PIL import Image\n'), ((6468, 6478), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6476, 6478), True, 'import matplotlib.pyplot as plt\n'), ((1979, 2005), 'torch.zeros', 'torch.zeros', (['max_out_chars'], {}), '(max_out_chars)\n', (1990, 2005), False, 'import torch\n'), ((2812, 2877), 'torch.empty', 'torch.empty', (['*output_cat_size'], {'dtype': 'rep.dtype', 'device': 'rep.device'}), '(*output_cat_size, dtype=rep.dtype, device=rep.device)\n', (2823, 2877), False, 'import torch\n'), ((5399, 5420), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5418, 5420), False, 'from torchvision import datasets, transforms\n'), ((5458, 5533), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '(0.485, 0.456, 0.406)', 'std': '(0.229, 0.224, 0.225)'}), '(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n', (5478, 5533), False, 'from torchvision import datasets, transforms\n'), ((6428, 6458), 'numpy.transpose', 'np.transpose', (['npimg', '(1, 2, 0)'], {}), '(npimg, (1, 2, 0))\n', (6440, 6458), True, 'import numpy as np\n'), ((817, 850), 'torchvision.transforms.Resize', 'transforms.Resize', (['(img_x, img_y)'], {}), '((img_x, img_y))\n', (834, 850), False, 'from torchvision import datasets, transforms\n'), ((866, 887), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (885, 887), False, 'from torchvision import datasets, transforms\n'), ((901, 955), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (921, 955), False, 'from torchvision import datasets, transforms\n'), ((4330, 4376), 'numpy.concatenate', 'np.concatenate', (['[img_reshape, mat_ori]'], {'axis': '(1)'}), '([img_reshape, mat_ori], axis=1)\n', (4344, 4376), True, 'import numpy as np\n'), ((4530, 4549), 'numpy.asarray', 'np.asarray', (['out_img'], {}), '(out_img)\n', (4540, 4549), True, 'import numpy as np\n')] |
import os
import numpy as np
if not os.path.exists('../npydata'):
os.makedirs('../npydata')
'''please set your dataset path'''
try:
VisDrone_train_path='../dataset/VisDrone/train_data_class8/images/'
VisDrone_test_path='../dataset/VisDrone/test_data_class8/images/'
train_list = []
for filename in os.listdir(VisDrone_train_path):
if filename.split('.')[1] == 'jpg':
train_list.append(VisDrone_train_path.replace('..','.')+filename)
train_list.sort()
np.save('../npydata/VisDrone_train.npy', train_list)
test_list = []
for filename in os.listdir(VisDrone_test_path):
if filename.split('.')[1] == 'jpg':
test_list.append(VisDrone_test_path.replace('..','.')+filename)
test_list.sort()
np.save('../npydata/VisDrone_test.npy', test_list)
print("Generate VisDrone image list successfully")
except:
print("The VisDrone dataset path is wrong. Please check your path.")
| [
"os.listdir",
"numpy.save",
"os.path.exists",
"os.makedirs"
] | [((40, 68), 'os.path.exists', 'os.path.exists', (['"""../npydata"""'], {}), "('../npydata')\n", (54, 68), False, 'import os\n'), ((75, 100), 'os.makedirs', 'os.makedirs', (['"""../npydata"""'], {}), "('../npydata')\n", (86, 100), False, 'import os\n'), ((333, 364), 'os.listdir', 'os.listdir', (['VisDrone_train_path'], {}), '(VisDrone_train_path)\n', (343, 364), False, 'import os\n'), ((518, 570), 'numpy.save', 'np.save', (['"""../npydata/VisDrone_train.npy"""', 'train_list'], {}), "('../npydata/VisDrone_train.npy', train_list)\n", (525, 570), True, 'import numpy as np\n'), ((616, 646), 'os.listdir', 'os.listdir', (['VisDrone_test_path'], {}), '(VisDrone_test_path)\n', (626, 646), False, 'import os\n'), ((797, 847), 'numpy.save', 'np.save', (['"""../npydata/VisDrone_test.npy"""', 'test_list'], {}), "('../npydata/VisDrone_test.npy', test_list)\n", (804, 847), True, 'import numpy as np\n')] |
import pyarrow as pa
import pandas as pd
import pyarrow.plasma as plasma
import numpy as np
client = plasma.connect('/tmp/plasma.db', '', 3)
# Create a Pandas DataFrame
d = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
# Convert the Pandas DataFrame into a PyArrow RecordBatch
record_batch = pa.RecordBatch.from_pandas(df)
# Create the Plasma object from the PyArrow RecordBatch. Most of the work here
# is done to determine the size of buffer to request from the object store.
object_id = plasma.ObjectID(np.random.bytes(20))
mock_sink = pa.MockOutputStream()
stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
stream_writer.write_batch(record_batch)
stream_writer.close()
data_size = mock_sink.size()
buf = client.create(object_id, data_size)
stream = pa.FixedSizeBufferWriter(buf)
stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
stream_writer.write_batch(record_batch)
stream_writer.close()
client.seal(object_id)
data, = client.get_buffers([object_id])
buf = pa.BufferReader(data)
reader = pa.RecordBatchStreamReader(buf)
record_batch = reader.read_next_batch()
df2 = record_batch.to_pandas()
print(df2)
| [
"pandas.DataFrame",
"pyarrow.RecordBatchStreamWriter",
"pyarrow.RecordBatch.from_pandas",
"pyarrow.RecordBatchStreamReader",
"pyarrow.MockOutputStream",
"pyarrow.FixedSizeBufferWriter",
"pandas.Series",
"numpy.random.bytes",
"pyarrow.BufferReader",
"pyarrow.plasma.connect"
] | [((102, 141), 'pyarrow.plasma.connect', 'plasma.connect', (['"""/tmp/plasma.db"""', '""""""', '(3)'], {}), "('/tmp/plasma.db', '', 3)\n", (116, 141), True, 'import pyarrow.plasma as plasma\n'), ((308, 323), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (320, 323), True, 'import pandas as pd\n'), ((398, 428), 'pyarrow.RecordBatch.from_pandas', 'pa.RecordBatch.from_pandas', (['df'], {}), '(df)\n', (424, 428), True, 'import pyarrow as pa\n'), ((646, 667), 'pyarrow.MockOutputStream', 'pa.MockOutputStream', ([], {}), '()\n', (665, 667), True, 'import pyarrow as pa\n'), ((684, 742), 'pyarrow.RecordBatchStreamWriter', 'pa.RecordBatchStreamWriter', (['mock_sink', 'record_batch.schema'], {}), '(mock_sink, record_batch.schema)\n', (710, 742), True, 'import pyarrow as pa\n'), ((886, 915), 'pyarrow.FixedSizeBufferWriter', 'pa.FixedSizeBufferWriter', (['buf'], {}), '(buf)\n', (910, 915), True, 'import pyarrow as pa\n'), ((932, 987), 'pyarrow.RecordBatchStreamWriter', 'pa.RecordBatchStreamWriter', (['stream', 'record_batch.schema'], {}), '(stream, record_batch.schema)\n', (958, 987), True, 'import pyarrow as pa\n'), ((1120, 1141), 'pyarrow.BufferReader', 'pa.BufferReader', (['data'], {}), '(data)\n', (1135, 1141), True, 'import pyarrow as pa\n'), ((1151, 1182), 'pyarrow.RecordBatchStreamReader', 'pa.RecordBatchStreamReader', (['buf'], {}), '(buf)\n', (1177, 1182), True, 'import pyarrow as pa\n'), ((185, 234), 'pandas.Series', 'pd.Series', (['[1.0, 2.0, 3.0]'], {'index': "['a', 'b', 'c']"}), "([1.0, 2.0, 3.0], index=['a', 'b', 'c'])\n", (194, 234), True, 'import pandas as pd\n'), ((246, 305), 'pandas.Series', 'pd.Series', (['[1.0, 2.0, 3.0, 4.0]'], {'index': "['a', 'b', 'c', 'd']"}), "([1.0, 2.0, 3.0, 4.0], index=['a', 'b', 'c', 'd'])\n", (255, 305), True, 'import pandas as pd\n'), ((613, 632), 'numpy.random.bytes', 'np.random.bytes', (['(20)'], {}), '(20)\n', (628, 632), True, 'import numpy as np\n')] |
import pytest
import adlib27.elem_function as ef
from adlib27.autodiff import AutoDiff as AD
import numpy as np
import math
def test_sin0():
x = 1
assert ef.sin(x) == pytest.approx(np.sin(x))
# one value for one variable
def test_sin1():
# default AD object with .val=[0.0]
x = AD()
y = ef.sin(x)
assert y.val == pytest.approx(np.sin(0))
assert y.der == pytest.approx(np.cos(0) * 1)
# multiple value for one variable
def test_sin2():
# AD object with .val=[1,2]
x = AD(val=[1,2], index=0, magnitude=1)
y = ef.sin(x)
assert y.val == pytest.approx(np.sin([1,2]))
assert y.der[0] == pytest.approx(np.cos([1,2]) * 1)
# one value for multiple variable
def test_sin3():
# AD object with .val=[1]
x1 = AD(val=[1], index=0, magnitude=2)
# AD object with .val=[2]
x2 = AD(val=[2], index=1, magnitude=2)
y = ef.sin(x1+x2)
assert y.val == pytest.approx(np.sin([3]))
assert y.der[0] == pytest.approx(np.cos(3) * 1)
assert y.der[1] == pytest.approx(np.cos(3) * 1)
# multiple value for multiple variable
def test_sin4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.sin(x1 + x2)
assert y.val == pytest.approx(np.sin([3,5]))
assert y.der[0] == pytest.approx(np.cos([3,5]) * 1)
assert y.der[1] == pytest.approx(np.cos([3,5]) * 1)
def test_cos0():
x = 1
assert ef.cos(x) == pytest.approx(np.cos(x))
# one value for one variable
def test_cos1():
# default AD object with .val=[0.0]
x = AD()
y = ef.cos(x)
assert y.val == pytest.approx(np.cos(0))
assert y.der == pytest.approx(- np.sin(0) * 1)
# multiple value for one variable
def test_cos2():
# AD object with .val=[1,2]
x = AD(val=[1,2], index=0, magnitude=1)
y = ef.cos(x)
assert y.val == pytest.approx(np.cos([1,2]))
assert y.der[0] == pytest.approx(-np.sin([1,2]) * 1)
# one value for multiple variable
def test_cos3():
# AD object with .val=[1]
x1 = AD(val=[1], index=0, magnitude=2)
# AD object with .val=[2]
x2 = AD(val=[2], index=1, magnitude=2)
y = ef.cos(x1+x2)
assert y.val == pytest.approx(np.cos([3]))
assert y.der[0] == pytest.approx(-np.sin(3) * 1)
assert y.der[1] == pytest.approx(-np.sin(3) * 1)
# multiple value for multiple variable
def test_cos4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.cos(x1 + x2)
assert y.val == pytest.approx(np.cos([3,5]))
assert y.der[0] == pytest.approx(-np.sin([3,5]) * 1)
assert y.der[1] == pytest.approx(-np.sin([3,5]) * 1)
def test_tan0():
x = 1
assert ef.tan(x) == pytest.approx(np.tan(x))
# one value for one variable
def test_tan1():
# default AD object with .val=[0.0]
x = AD()
y = ef.tan(x)
assert y.val == pytest.approx(np.tan(0))
assert y.der == pytest.approx((1 / np.cos(0))**2 * 1)
# multiple value for one variable
def test_tan2():
# AD object with .val=[1,2]
x = AD(val=[1,2], index=0, magnitude=1)
y = ef.tan(x)
assert y.val == pytest.approx(np.tan([1,2]))
assert y.der[0] == pytest.approx((1 / np.cos([1,2]))**2 * 1)
# one value for multiple variable
def test_tan3():
# AD object with .val=[1]
x1 = AD(val=[1], index=0, magnitude=2)
# AD object with .val=[2]
x2 = AD(val=[2], index=1, magnitude=2)
y = ef.tan(x1+x2)
assert y.val == pytest.approx(np.tan([3]))
assert y.der[0] == pytest.approx((1 / np.cos(3))**2 * 1)
assert y.der[1] == pytest.approx((1 / np.cos(3))**2 * 1)
# multiple value for multiple variable
def test_tan4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.tan(x1 + x2)
assert y.val == pytest.approx(np.tan([3,5]))
assert y.der[0] == pytest.approx((1 / np.cos([3,5]))**2 * 1)
assert y.der[1] == pytest.approx((1 / np.cos([3,5]))**2 * 1)
# Inverse trig functions
def test_arcsin0():
x = 1
assert ef.arcsin(x) == pytest.approx(np.arcsin(x))
# one value for one variable
def test_arcsin1():
# default AD object with .val=[0.0]
x = AD()
y = ef.arcsin(x)
assert y.val == pytest.approx(np.arcsin(0))
assert y.der[0] == pytest.approx(1 / np.sqrt(1 - 0 **2) * 1)
# multiple value for multiple variable
def test_arcsin4():
# AD object with .val=[1,2]
x1 = AD(val=[0.1,0.2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[0.2,0.3], index=1, magnitude=2)
y = ef.arcsin(x1 + x2)
assert y.val == pytest.approx(np.arcsin([0.3,0.5]))
assert y.der[0] == pytest.approx([1.0482848367219182, 1.1547005383792517])
assert y.der[1] == pytest.approx([1.0482848367219182, 1.1547005383792517])
def test_arccos0():
x = 1
assert ef.arccos(x) == pytest.approx(np.arccos(x))
# one value for one variable
def test_arccos1():
# default AD object with .val=[0.0]
x = AD(val=[0.8])
y = ef.arccos(x)
assert y.val == pytest.approx(np.arccos(0.8))
assert y.der[0] == pytest.approx(-1 / np.sqrt(1 - 0.8 **2) * 1)
# multiple value for multiple variable
def test_arccos4():
# AD object with .val=[1,2]
x1 = AD(val=[0.3,0.4], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[0.4,0.5], index=1, magnitude=2)
y = ef.arccos(x1 + x2)
assert y.val == pytest.approx(np.arccos([0.7,0.9]))
assert y.der[0] == pytest.approx([-1.4002800840280099, -2.294157338705618])
assert y.der[1] == pytest.approx([-1.4002800840280099, -2.294157338705618])
def test_arctan0():
x = 1
assert ef.arctan(x) == pytest.approx(np.arctan(x))
# one value for one variable
def test_arctan1():
# default AD object with .val=[0.0]
x = AD(val=[0.8])
y = ef.arctan(x)
assert y.val == pytest.approx(np.arctan(0.8))
assert y.der[0][0] == pytest.approx(1 / (1 + 0.8 **2) * 1)
# multiple value for multiple variable
def test_arctan4():
# AD object with .val=[1,2]
x1 = AD(val=[0.3,0.4], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[0.4,0.5], index=1, magnitude=2)
y = ef.arctan(x1 + x2)
assert y.val == pytest.approx(np.arctan([0.7,0.9]))
assert y.der[0] == pytest.approx([0.6711409395973155, 0.5524861878453039])
assert y.der[1] == pytest.approx([0.6711409395973155, 0.5524861878453039])
def test_exp0():
x = 1
assert ef.exp(x) == pytest.approx(np.exp(x))
# one value for one variable
def test_exp1():
# default AD object with .val=[0.0]
x = AD()
y = ef.exp(x)
assert y.val == pytest.approx(np.exp(0))
assert y.der == pytest.approx(np.exp(0) * 1)
# multiple value for one variable
def test_exp2():
# AD object with .val=[1,2]
x = AD(val=[1,2], index=0, magnitude=1)
y = ef.exp(x)
assert y.val == pytest.approx(np.exp([1,2]))
assert y.der[0] == pytest.approx(np.exp([1,2]) * 1)
# one value for multiple variable
def test_exp3():
# AD object with .val=[1]
x1 = AD(val=[1], index=0, magnitude=2)
# AD object with .val=[2]
x2 = AD(val=[2], index=1, magnitude=2)
y = ef.exp(x1+x2)
assert y.val == pytest.approx(np.exp([3]))
assert y.der[0] == pytest.approx(np.exp(3) * 1)
assert y.der[1] == pytest.approx(np.exp(3) * 1)
# multiple value for multiple variable
def test_exp4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.exp(x1 + x2)
assert y.val == pytest.approx(np.exp([3,5]))
assert y.der[0] == pytest.approx(np.exp([3,5]) * 1)
assert y.der[1] == pytest.approx(np.exp([3,5]) * 1)
# Hyperbolic functions (sinh, cosh, tanh)
def test_sinh0():
x = 1
assert ef.sinh(x) == pytest.approx(np.sinh(x))
# one value for one variable
def test_sinh1():
# default AD object with .val=[0.0]
x = AD()
y = ef.sinh(x)
assert y.val == pytest.approx(np.sinh(0))
assert y.der == pytest.approx(np.cosh(0) * 1)
# multiple value for one variable
def test_sinh2():
# AD object with .val=[1,2]
x = AD(val=[1,2], index=0, magnitude=1)
y = ef.sinh(x)
assert y.val == pytest.approx(np.sinh([1,2]))
assert y.der[0] == pytest.approx(np.cosh([1,2]) * 1)
# one value for multiple variable
def test_sinh3():
# AD object with .val=[1]
x1 = AD(val=[1], index=0, magnitude=2)
# AD object with .val=[2]
x2 = AD(val=[2], index=1, magnitude=2)
y = ef.sinh(x1+x2)
assert y.val == pytest.approx(np.sinh([3]))
assert y.der[0] == pytest.approx(np.cosh(3) * 1)
assert y.der[1] == pytest.approx(np.cosh(3) * 1)
# multiple value for multiple variable
def test_sinh4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.sinh(x1 + x2)
assert y.val == pytest.approx(np.sinh([3,5]))
assert y.der[0] == pytest.approx(np.cosh([3,5]) * 1)
assert y.der[1] == pytest.approx(np.cosh([3,5]) * 1)
def test_cosh0():
x = 1
assert ef.cosh(x) == pytest.approx(np.cosh(x))
# one value for one variable
def test_cosh1():
# default AD object with .val=[0.0]
x = AD()
y = ef.cosh(x)
assert y.val == pytest.approx(np.cosh(0))
assert y.der == pytest.approx(np.sinh(0) * 1)
# multiple value for one variable
def test_cosh2():
# AD object with .val=[1,2]
x = AD(val=[1,2], index=0, magnitude=1)
y = ef.cosh(x)
assert y.val == pytest.approx(np.cosh([1,2]))
assert y.der[0] == pytest.approx(np.sinh([1,2]) * 1)
# one value for multiple variable
def test_cosh3():
# AD object with .val=[1]
x1 = AD(val=[1], index=0, magnitude=2)
# AD object with .val=[2]
x2 = AD(val=[2], index=1, magnitude=2)
y = ef.cosh(x1+x2)
assert y.val == pytest.approx(np.cosh([3]))
assert y.der[0] == pytest.approx(np.sinh(3) * 1)
assert y.der[1] == pytest.approx(np.sinh(3) * 1)
# multiple value for multiple variable
def test_cosh4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.cosh(x1 + x2)
assert y.val == pytest.approx(np.cosh([3,5]))
assert y.der[0] == pytest.approx(np.sinh([3,5]) * 1)
assert y.der[1] == pytest.approx(np.sinh([3,5]) * 1)
def test_tanh0():
x = 1
assert ef.tanh(x) == pytest.approx(np.tanh(x))
# one value for one variable
def test_tanh1():
# default AD object with .val=[0.0]
x = AD()
y = ef.tanh(x)
assert y.val == pytest.approx(np.tanh(0))
assert y.der == pytest.approx(1 / np.cosh(0))
# multiple value for one variable
def test_tanh2():
# AD object with .val=[1,2]
x = AD(val=[1,2], index=0, magnitude=1)
y = ef.tanh(x)
assert y.val == pytest.approx(np.tanh([1,2]))
assert y.der[0] == pytest.approx(1 / np.cosh([1,2]))
# one value for multiple variable
def test_tanh3():
# AD object with .val=[1]
x1 = AD(val=[1], index=0, magnitude=2)
# AD object with .val=[2]
x2 = AD(val=[2], index=1, magnitude=2)
y = ef.tanh(x1+x2)
assert y.val == pytest.approx(np.tanh([3]))
assert y.der[0] == pytest.approx(1 / np.cosh(3))
assert y.der[1] == pytest.approx(1 / np.cosh(3))
# multiple value for multiple variable
def test_tanh4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.tanh(x1 + x2)
assert y.val == pytest.approx(np.tanh([3,5]))
assert y.der[0] == pytest.approx(np.sinh(1 / np.cosh([3,5])),rel=1e-2)
assert y.der[1] == pytest.approx(np.sinh(1 / np.cosh([3,5])),rel=1e-2)
# Logistic function
# one value for one variable
def test_logistic1():
# default AD object with .val=[0.0]
x = AD()
y = ef.logistic(x)
assert y.val[0] == pytest.approx(0.5)
assert y.der[0][0] == pytest.approx(0.25)
# multiple value for multiple variable
def test_logistic4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.logistic(x1 + x2)
assert y.val == pytest.approx([0.95257412,0.99330714])
assert y.der[0] == pytest.approx([0.045176659730, 0.006648056670])
assert y.der[1] == pytest.approx([0.045176659730, 0.006648056670])
def test_log0():
x = 1
assert ef.log(x) == pytest.approx(np.log(x))
# one value for one variable
def test_log1():
# default AD object with .val=[0.0]
x = AD(val=[2])
y = ef.log(x)
assert y.val[0] == pytest.approx(np.log(2))
assert y.der[0][0] == pytest.approx((1/2*1))
# multiple value for multiple variable
def test_log4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.log(x1 + x2)
assert y.val == pytest.approx(np.log([3,5]))
assert y.der[0] == pytest.approx([0.3333333333333333, 0.2])
assert y.der[1] == pytest.approx([0.3333333333333333, 0.2])
def test_log2_0():
x = 1
assert ef.log2(x) == pytest.approx(np.log2(x))
# one value for one variable
def test_log2_1():
# default AD object with .val=[0.0]
x = AD(val=[2])
y = ef.log2(x)
assert y.val[0] == pytest.approx(np.log2(2))
assert y.der[0][0] == pytest.approx((1/(2* np.log(2)) *1))
# multiple value for multiple variable
def test_log2_4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.log2(x1 + x2)
assert y.val == pytest.approx(np.log2([3,5]))
assert y.der[0] == pytest.approx([0.48089834696298783, 0.28853900817779266])
assert y.der[1] == pytest.approx([0.48089834696298783, 0.28853900817779266])
def test_log10_0():
x = 1
assert ef.log10(x) == pytest.approx(np.log10(x))
# one value for one variable
def test_log10_1():
# default AD object with .val=[0.0]
x = AD(val=[2])
y = ef.log10(x)
assert y.val[0] == pytest.approx(np.log10(2))
assert y.der[0][0] == pytest.approx((1/(2* np.log(10)) *1))
# multiple value for multiple variable
def test_log10_4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.log10(x1 + x2)
assert y.val == pytest.approx(np.log10([3,5]))
assert y.der[0] == pytest.approx([0.14476482730108392, 0.08685889638065035])
assert y.der[1] == pytest.approx([0.14476482730108392, 0.08685889638065035])
def test_logb():
x = 1
assert ef.logb(x,2) == pytest.approx(math.log(x,2))
# one value for one variable
def test_logb1():
# default AD object with .val=[0.0]
x = AD(val=[2])
y = ef.logb(x,3)
assert y.val[0] == pytest.approx(math.log(2,3))
assert y.der[0][0] == pytest.approx((1/(2* np.log(3)) *1))
# multiple value for multiple variable
def test_logb4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.logb(x1 + x2,3)
assert y.val[0] == pytest.approx(math.log(3,3))
assert y.val[1] == pytest.approx(math.log(5, 3))
assert y.der[0] == pytest.approx([0.30341307554227914, 0.18204784532536747])
assert y.der[1] == pytest.approx([0.30341307554227914, 0.18204784532536747])
def test_sqrt0():
x = 1
assert ef.sqrt(x) == pytest.approx(np.sqrt(x))
# one value for one variable
def test_sqrt1():
# default AD object with .val=[0.0]
x = AD(val=[2])
y = ef.sqrt(x)
assert y.val == pytest.approx(np.sqrt(2))
assert y.der[0] == pytest.approx(0.5/np.sqrt(2) *1)
# multiple value for multiple variable
def test_sqrt4():
# AD object with .val=[1,2]
x1 = AD(val=[1,2], index=0, magnitude=2)
# AD object with .val=[2,3]
x2 = AD(val=[2,3], index=1, magnitude=2)
y = ef.sqrt(x1 + x2)
assert y.val == pytest.approx(np.sqrt([3,5]))
assert y.der[0] == pytest.approx(0.5/np.sqrt([3,5]) *1)
assert y.der[1] == pytest.approx(0.5/np.sqrt([3,5]) *1)
| [
"adlib27.elem_function.sinh",
"adlib27.elem_function.arccos",
"adlib27.elem_function.log10",
"numpy.sin",
"numpy.exp",
"adlib27.elem_function.sqrt",
"adlib27.elem_function.log",
"adlib27.elem_function.exp",
"numpy.arcsin",
"adlib27.elem_function.cos",
"numpy.tan",
"adlib27.autodiff.AutoDiff",
... | [((297, 301), 'adlib27.autodiff.AutoDiff', 'AD', ([], {}), '()\n', (299, 301), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((310, 319), 'adlib27.elem_function.sin', 'ef.sin', (['x'], {}), '(x)\n', (316, 319), True, 'import adlib27.elem_function as ef\n'), ((506, 542), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(1)'}), '(val=[1, 2], index=0, magnitude=1)\n', (508, 542), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((550, 559), 'adlib27.elem_function.sin', 'ef.sin', (['x'], {}), '(x)\n', (556, 559), True, 'import adlib27.elem_function as ef\n'), ((756, 789), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1], index=0, magnitude=2)\n', (758, 789), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((829, 862), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2], index=1, magnitude=2)\n', (831, 862), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((871, 886), 'adlib27.elem_function.sin', 'ef.sin', (['(x1 + x2)'], {}), '(x1 + x2)\n', (877, 886), True, 'import adlib27.elem_function as ef\n'), ((1135, 1171), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (1137, 1171), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((1212, 1248), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (1214, 1248), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((1256, 1271), 'adlib27.elem_function.sin', 'ef.sin', (['(x1 + x2)'], {}), '(x1 + x2)\n', (1262, 1271), True, 'import adlib27.elem_function as ef\n'), ((1605, 1609), 'adlib27.autodiff.AutoDiff', 'AD', ([], {}), '()\n', (1607, 1609), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((1618, 1627), 'adlib27.elem_function.cos', 'ef.cos', (['x'], {}), '(x)\n', (1624, 1627), True, 'import adlib27.elem_function as ef\n'), ((1816, 1852), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(1)'}), '(val=[1, 2], index=0, magnitude=1)\n', (1818, 1852), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((1860, 1869), 'adlib27.elem_function.cos', 'ef.cos', (['x'], {}), '(x)\n', (1866, 1869), True, 'import adlib27.elem_function as ef\n'), ((2067, 2100), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1], index=0, magnitude=2)\n', (2069, 2100), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((2140, 2173), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2], index=1, magnitude=2)\n', (2142, 2173), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((2182, 2197), 'adlib27.elem_function.cos', 'ef.cos', (['(x1 + x2)'], {}), '(x1 + x2)\n', (2188, 2197), True, 'import adlib27.elem_function as ef\n'), ((2448, 2484), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (2450, 2484), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((2525, 2561), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (2527, 2561), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((2569, 2584), 'adlib27.elem_function.cos', 'ef.cos', (['(x1 + x2)'], {}), '(x1 + x2)\n', (2575, 2584), True, 'import adlib27.elem_function as ef\n'), ((2920, 2924), 'adlib27.autodiff.AutoDiff', 'AD', ([], {}), '()\n', (2922, 2924), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((2933, 2942), 'adlib27.elem_function.tan', 'ef.tan', (['x'], {}), '(x)\n', (2939, 2942), True, 'import adlib27.elem_function as ef\n'), ((3138, 3174), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(1)'}), '(val=[1, 2], index=0, magnitude=1)\n', (3140, 3174), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((3182, 3191), 'adlib27.elem_function.tan', 'ef.tan', (['x'], {}), '(x)\n', (3188, 3191), True, 'import adlib27.elem_function as ef\n'), ((3397, 3430), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1], index=0, magnitude=2)\n', (3399, 3430), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((3470, 3503), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2], index=1, magnitude=2)\n', (3472, 3503), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((3512, 3527), 'adlib27.elem_function.tan', 'ef.tan', (['(x1 + x2)'], {}), '(x1 + x2)\n', (3518, 3527), True, 'import adlib27.elem_function as ef\n'), ((3794, 3830), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (3796, 3830), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((3871, 3907), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (3873, 3907), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((3915, 3930), 'adlib27.elem_function.tan', 'ef.tan', (['(x1 + x2)'], {}), '(x1 + x2)\n', (3921, 3930), True, 'import adlib27.elem_function as ef\n'), ((4321, 4325), 'adlib27.autodiff.AutoDiff', 'AD', ([], {}), '()\n', (4323, 4325), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((4334, 4346), 'adlib27.elem_function.arcsin', 'ef.arcsin', (['x'], {}), '(x)\n', (4343, 4346), True, 'import adlib27.elem_function as ef\n'), ((4561, 4601), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[0.1, 0.2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[0.1, 0.2], index=0, magnitude=2)\n', (4563, 4601), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((4642, 4682), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[0.2, 0.3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[0.2, 0.3], index=1, magnitude=2)\n', (4644, 4682), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((4690, 4708), 'adlib27.elem_function.arcsin', 'ef.arcsin', (['(x1 + x2)'], {}), '(x1 + x2)\n', (4699, 4708), True, 'import adlib27.elem_function as ef\n'), ((5108, 5121), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[0.8]'}), '(val=[0.8])\n', (5110, 5121), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((5130, 5142), 'adlib27.elem_function.arccos', 'ef.arccos', (['x'], {}), '(x)\n', (5139, 5142), True, 'import adlib27.elem_function as ef\n'), ((5362, 5402), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[0.3, 0.4]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[0.3, 0.4], index=0, magnitude=2)\n', (5364, 5402), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((5443, 5483), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[0.4, 0.5]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[0.4, 0.5], index=1, magnitude=2)\n', (5445, 5483), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((5491, 5509), 'adlib27.elem_function.arccos', 'ef.arccos', (['(x1 + x2)'], {}), '(x1 + x2)\n', (5500, 5509), True, 'import adlib27.elem_function as ef\n'), ((5911, 5924), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[0.8]'}), '(val=[0.8])\n', (5913, 5924), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((5933, 5945), 'adlib27.elem_function.arctan', 'ef.arctan', (['x'], {}), '(x)\n', (5942, 5945), True, 'import adlib27.elem_function as ef\n'), ((6160, 6200), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[0.3, 0.4]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[0.3, 0.4], index=0, magnitude=2)\n', (6162, 6200), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((6241, 6281), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[0.4, 0.5]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[0.4, 0.5], index=1, magnitude=2)\n', (6243, 6281), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((6289, 6307), 'adlib27.elem_function.arctan', 'ef.arctan', (['(x1 + x2)'], {}), '(x1 + x2)\n', (6298, 6307), True, 'import adlib27.elem_function as ef\n'), ((6694, 6698), 'adlib27.autodiff.AutoDiff', 'AD', ([], {}), '()\n', (6696, 6698), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((6707, 6716), 'adlib27.elem_function.exp', 'ef.exp', (['x'], {}), '(x)\n', (6713, 6716), True, 'import adlib27.elem_function as ef\n'), ((6903, 6939), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(1)'}), '(val=[1, 2], index=0, magnitude=1)\n', (6905, 6939), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((6947, 6956), 'adlib27.elem_function.exp', 'ef.exp', (['x'], {}), '(x)\n', (6953, 6956), True, 'import adlib27.elem_function as ef\n'), ((7153, 7186), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1], index=0, magnitude=2)\n', (7155, 7186), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((7226, 7259), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2], index=1, magnitude=2)\n', (7228, 7259), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((7268, 7283), 'adlib27.elem_function.exp', 'ef.exp', (['(x1 + x2)'], {}), '(x1 + x2)\n', (7274, 7283), True, 'import adlib27.elem_function as ef\n'), ((7532, 7568), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (7534, 7568), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((7609, 7645), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (7611, 7645), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((7653, 7668), 'adlib27.elem_function.exp', 'ef.exp', (['(x1 + x2)'], {}), '(x1 + x2)\n', (7659, 7668), True, 'import adlib27.elem_function as ef\n'), ((8049, 8053), 'adlib27.autodiff.AutoDiff', 'AD', ([], {}), '()\n', (8051, 8053), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((8062, 8072), 'adlib27.elem_function.sinh', 'ef.sinh', (['x'], {}), '(x)\n', (8069, 8072), True, 'import adlib27.elem_function as ef\n'), ((8262, 8298), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(1)'}), '(val=[1, 2], index=0, magnitude=1)\n', (8264, 8298), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((8306, 8316), 'adlib27.elem_function.sinh', 'ef.sinh', (['x'], {}), '(x)\n', (8313, 8316), True, 'import adlib27.elem_function as ef\n'), ((8516, 8549), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1], index=0, magnitude=2)\n', (8518, 8549), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((8589, 8622), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2], index=1, magnitude=2)\n', (8591, 8622), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((8631, 8647), 'adlib27.elem_function.sinh', 'ef.sinh', (['(x1 + x2)'], {}), '(x1 + x2)\n', (8638, 8647), True, 'import adlib27.elem_function as ef\n'), ((8900, 8936), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (8902, 8936), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((8977, 9013), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (8979, 9013), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((9021, 9037), 'adlib27.elem_function.sinh', 'ef.sinh', (['(x1 + x2)'], {}), '(x1 + x2)\n', (9028, 9037), True, 'import adlib27.elem_function as ef\n'), ((9378, 9382), 'adlib27.autodiff.AutoDiff', 'AD', ([], {}), '()\n', (9380, 9382), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((9391, 9401), 'adlib27.elem_function.cosh', 'ef.cosh', (['x'], {}), '(x)\n', (9398, 9401), True, 'import adlib27.elem_function as ef\n'), ((9591, 9627), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(1)'}), '(val=[1, 2], index=0, magnitude=1)\n', (9593, 9627), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((9635, 9645), 'adlib27.elem_function.cosh', 'ef.cosh', (['x'], {}), '(x)\n', (9642, 9645), True, 'import adlib27.elem_function as ef\n'), ((9845, 9878), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1], index=0, magnitude=2)\n', (9847, 9878), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((9918, 9951), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2], index=1, magnitude=2)\n', (9920, 9951), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((9960, 9976), 'adlib27.elem_function.cosh', 'ef.cosh', (['(x1 + x2)'], {}), '(x1 + x2)\n', (9967, 9976), True, 'import adlib27.elem_function as ef\n'), ((10229, 10265), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (10231, 10265), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((10306, 10342), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (10308, 10342), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((10350, 10366), 'adlib27.elem_function.cosh', 'ef.cosh', (['(x1 + x2)'], {}), '(x1 + x2)\n', (10357, 10366), True, 'import adlib27.elem_function as ef\n'), ((10707, 10711), 'adlib27.autodiff.AutoDiff', 'AD', ([], {}), '()\n', (10709, 10711), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((10720, 10730), 'adlib27.elem_function.tanh', 'ef.tanh', (['x'], {}), '(x)\n', (10727, 10730), True, 'import adlib27.elem_function as ef\n'), ((10920, 10956), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(1)'}), '(val=[1, 2], index=0, magnitude=1)\n', (10922, 10956), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((10964, 10974), 'adlib27.elem_function.tanh', 'ef.tanh', (['x'], {}), '(x)\n', (10971, 10974), True, 'import adlib27.elem_function as ef\n'), ((11174, 11207), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1], index=0, magnitude=2)\n', (11176, 11207), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((11247, 11280), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2], index=1, magnitude=2)\n', (11249, 11280), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((11289, 11305), 'adlib27.elem_function.tanh', 'ef.tanh', (['(x1 + x2)'], {}), '(x1 + x2)\n', (11296, 11305), True, 'import adlib27.elem_function as ef\n'), ((11558, 11594), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (11560, 11594), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((11635, 11671), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (11637, 11671), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((11679, 11695), 'adlib27.elem_function.tanh', 'ef.tanh', (['(x1 + x2)'], {}), '(x1 + x2)\n', (11686, 11695), True, 'import adlib27.elem_function as ef\n'), ((12018, 12022), 'adlib27.autodiff.AutoDiff', 'AD', ([], {}), '()\n', (12020, 12022), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((12031, 12045), 'adlib27.elem_function.logistic', 'ef.logistic', (['x'], {}), '(x)\n', (12042, 12045), True, 'import adlib27.elem_function as ef\n'), ((12238, 12274), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (12240, 12274), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((12315, 12351), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (12317, 12351), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((12359, 12379), 'adlib27.elem_function.logistic', 'ef.logistic', (['(x1 + x2)'], {}), '(x1 + x2)\n', (12370, 12379), True, 'import adlib27.elem_function as ef\n'), ((12753, 12764), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2]'}), '(val=[2])\n', (12755, 12764), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((12773, 12782), 'adlib27.elem_function.log', 'ef.log', (['x'], {}), '(x)\n', (12779, 12782), True, 'import adlib27.elem_function as ef\n'), ((12979, 13015), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (12981, 13015), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((13056, 13092), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (13058, 13092), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((13100, 13115), 'adlib27.elem_function.log', 'ef.log', (['(x1 + x2)'], {}), '(x1 + x2)\n', (13106, 13115), True, 'import adlib27.elem_function as ef\n'), ((13472, 13483), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2]'}), '(val=[2])\n', (13474, 13483), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((13492, 13502), 'adlib27.elem_function.log2', 'ef.log2', (['x'], {}), '(x)\n', (13499, 13502), True, 'import adlib27.elem_function as ef\n'), ((13716, 13752), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (13718, 13752), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((13793, 13829), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (13795, 13829), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((13837, 13853), 'adlib27.elem_function.log2', 'ef.log2', (['(x1 + x2)'], {}), '(x1 + x2)\n', (13844, 13853), True, 'import adlib27.elem_function as ef\n'), ((14248, 14259), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2]'}), '(val=[2])\n', (14250, 14259), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((14268, 14279), 'adlib27.elem_function.log10', 'ef.log10', (['x'], {}), '(x)\n', (14276, 14279), True, 'import adlib27.elem_function as ef\n'), ((14496, 14532), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (14498, 14532), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((14573, 14609), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (14575, 14609), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((14617, 14634), 'adlib27.elem_function.log10', 'ef.log10', (['(x1 + x2)'], {}), '(x1 + x2)\n', (14625, 14634), True, 'import adlib27.elem_function as ef\n'), ((15028, 15039), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2]'}), '(val=[2])\n', (15030, 15039), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((15048, 15061), 'adlib27.elem_function.logb', 'ef.logb', (['x', '(3)'], {}), '(x, 3)\n', (15055, 15061), True, 'import adlib27.elem_function as ef\n'), ((15276, 15312), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (15278, 15312), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((15353, 15389), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (15355, 15389), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((15397, 15416), 'adlib27.elem_function.logb', 'ef.logb', (['(x1 + x2)', '(3)'], {}), '(x1 + x2, 3)\n', (15404, 15416), True, 'import adlib27.elem_function as ef\n'), ((15860, 15871), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2]'}), '(val=[2])\n', (15862, 15871), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((15880, 15890), 'adlib27.elem_function.sqrt', 'ef.sqrt', (['x'], {}), '(x)\n', (15887, 15890), True, 'import adlib27.elem_function as ef\n'), ((16093, 16129), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[1, 2]', 'index': '(0)', 'magnitude': '(2)'}), '(val=[1, 2], index=0, magnitude=2)\n', (16095, 16129), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((16170, 16206), 'adlib27.autodiff.AutoDiff', 'AD', ([], {'val': '[2, 3]', 'index': '(1)', 'magnitude': '(2)'}), '(val=[2, 3], index=1, magnitude=2)\n', (16172, 16206), True, 'from adlib27.autodiff import AutoDiff as AD\n'), ((16214, 16230), 'adlib27.elem_function.sqrt', 'ef.sqrt', (['(x1 + x2)'], {}), '(x1 + x2)\n', (16221, 16230), True, 'import adlib27.elem_function as ef\n'), ((164, 173), 'adlib27.elem_function.sin', 'ef.sin', (['x'], {}), '(x)\n', (170, 173), True, 'import adlib27.elem_function as ef\n'), ((1472, 1481), 'adlib27.elem_function.cos', 'ef.cos', (['x'], {}), '(x)\n', (1478, 1481), True, 'import adlib27.elem_function as ef\n'), ((2787, 2796), 'adlib27.elem_function.tan', 'ef.tan', (['x'], {}), '(x)\n', (2793, 2796), True, 'import adlib27.elem_function as ef\n'), ((4179, 4191), 'adlib27.elem_function.arcsin', 'ef.arcsin', (['x'], {}), '(x)\n', (4188, 4191), True, 'import adlib27.elem_function as ef\n'), ((4788, 4843), 'pytest.approx', 'pytest.approx', (['[1.0482848367219182, 1.1547005383792517]'], {}), '([1.0482848367219182, 1.1547005383792517])\n', (4801, 4843), False, 'import pytest\n'), ((4867, 4922), 'pytest.approx', 'pytest.approx', (['[1.0482848367219182, 1.1547005383792517]'], {}), '([1.0482848367219182, 1.1547005383792517])\n', (4880, 4922), False, 'import pytest\n'), ((4966, 4978), 'adlib27.elem_function.arccos', 'ef.arccos', (['x'], {}), '(x)\n', (4975, 4978), True, 'import adlib27.elem_function as ef\n'), ((5589, 5645), 'pytest.approx', 'pytest.approx', (['[-1.4002800840280099, -2.294157338705618]'], {}), '([-1.4002800840280099, -2.294157338705618])\n', (5602, 5645), False, 'import pytest\n'), ((5669, 5725), 'pytest.approx', 'pytest.approx', (['[-1.4002800840280099, -2.294157338705618]'], {}), '([-1.4002800840280099, -2.294157338705618])\n', (5682, 5725), False, 'import pytest\n'), ((5769, 5781), 'adlib27.elem_function.arctan', 'ef.arctan', (['x'], {}), '(x)\n', (5778, 5781), True, 'import adlib27.elem_function as ef\n'), ((6022, 6059), 'pytest.approx', 'pytest.approx', (['(1 / (1 + 0.8 ** 2) * 1)'], {}), '(1 / (1 + 0.8 ** 2) * 1)\n', (6035, 6059), False, 'import pytest\n'), ((6387, 6442), 'pytest.approx', 'pytest.approx', (['[0.6711409395973155, 0.5524861878453039]'], {}), '([0.6711409395973155, 0.5524861878453039])\n', (6400, 6442), False, 'import pytest\n'), ((6466, 6521), 'pytest.approx', 'pytest.approx', (['[0.6711409395973155, 0.5524861878453039]'], {}), '([0.6711409395973155, 0.5524861878453039])\n', (6479, 6521), False, 'import pytest\n'), ((6561, 6570), 'adlib27.elem_function.exp', 'ef.exp', (['x'], {}), '(x)\n', (6567, 6570), True, 'import adlib27.elem_function as ef\n'), ((7913, 7923), 'adlib27.elem_function.sinh', 'ef.sinh', (['x'], {}), '(x)\n', (7920, 7923), True, 'import adlib27.elem_function as ef\n'), ((9242, 9252), 'adlib27.elem_function.cosh', 'ef.cosh', (['x'], {}), '(x)\n', (9249, 9252), True, 'import adlib27.elem_function as ef\n'), ((10571, 10581), 'adlib27.elem_function.tanh', 'ef.tanh', (['x'], {}), '(x)\n', (10578, 10581), True, 'import adlib27.elem_function as ef\n'), ((12069, 12087), 'pytest.approx', 'pytest.approx', (['(0.5)'], {}), '(0.5)\n', (12082, 12087), False, 'import pytest\n'), ((12114, 12133), 'pytest.approx', 'pytest.approx', (['(0.25)'], {}), '(0.25)\n', (12127, 12133), False, 'import pytest\n'), ((12400, 12439), 'pytest.approx', 'pytest.approx', (['[0.95257412, 0.99330714]'], {}), '([0.95257412, 0.99330714])\n', (12413, 12439), False, 'import pytest\n'), ((12462, 12507), 'pytest.approx', 'pytest.approx', (['[0.04517665973, 0.00664805667]'], {}), '([0.04517665973, 0.00664805667])\n', (12475, 12507), False, 'import pytest\n'), ((12533, 12578), 'pytest.approx', 'pytest.approx', (['[0.04517665973, 0.00664805667]'], {}), '([0.04517665973, 0.00664805667])\n', (12546, 12578), False, 'import pytest\n'), ((12620, 12629), 'adlib27.elem_function.log', 'ef.log', (['x'], {}), '(x)\n', (12626, 12629), True, 'import adlib27.elem_function as ef\n'), ((12857, 12881), 'pytest.approx', 'pytest.approx', (['(1 / 2 * 1)'], {}), '(1 / 2 * 1)\n', (12870, 12881), False, 'import pytest\n'), ((13188, 13228), 'pytest.approx', 'pytest.approx', (['[0.3333333333333333, 0.2]'], {}), '([0.3333333333333333, 0.2])\n', (13201, 13228), False, 'import pytest\n'), ((13252, 13292), 'pytest.approx', 'pytest.approx', (['[0.3333333333333333, 0.2]'], {}), '([0.3333333333333333, 0.2])\n', (13265, 13292), False, 'import pytest\n'), ((13335, 13345), 'adlib27.elem_function.log2', 'ef.log2', (['x'], {}), '(x)\n', (13342, 13345), True, 'import adlib27.elem_function as ef\n'), ((13927, 13984), 'pytest.approx', 'pytest.approx', (['[0.48089834696298783, 0.28853900817779266]'], {}), '([0.48089834696298783, 0.28853900817779266])\n', (13940, 13984), False, 'import pytest\n'), ((14008, 14065), 'pytest.approx', 'pytest.approx', (['[0.48089834696298783, 0.28853900817779266]'], {}), '([0.48089834696298783, 0.28853900817779266])\n', (14021, 14065), False, 'import pytest\n'), ((14108, 14119), 'adlib27.elem_function.log10', 'ef.log10', (['x'], {}), '(x)\n', (14116, 14119), True, 'import adlib27.elem_function as ef\n'), ((14709, 14766), 'pytest.approx', 'pytest.approx', (['[0.14476482730108392, 0.08685889638065035]'], {}), '([0.14476482730108392, 0.08685889638065035])\n', (14722, 14766), False, 'import pytest\n'), ((14790, 14847), 'pytest.approx', 'pytest.approx', (['[0.14476482730108392, 0.08685889638065035]'], {}), '([0.14476482730108392, 0.08685889638065035])\n', (14803, 14847), False, 'import pytest\n'), ((14887, 14900), 'adlib27.elem_function.logb', 'ef.logb', (['x', '(2)'], {}), '(x, 2)\n', (14894, 14900), True, 'import adlib27.elem_function as ef\n'), ((15544, 15601), 'pytest.approx', 'pytest.approx', (['[0.30341307554227914, 0.18204784532536747]'], {}), '([0.30341307554227914, 0.18204784532536747])\n', (15557, 15601), False, 'import pytest\n'), ((15625, 15682), 'pytest.approx', 'pytest.approx', (['[0.30341307554227914, 0.18204784532536747]'], {}), '([0.30341307554227914, 0.18204784532536747])\n', (15638, 15682), False, 'import pytest\n'), ((15723, 15733), 'adlib27.elem_function.sqrt', 'ef.sqrt', (['x'], {}), '(x)\n', (15730, 15733), True, 'import adlib27.elem_function as ef\n'), ((191, 200), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (197, 200), True, 'import numpy as np\n'), ((354, 363), 'numpy.sin', 'np.sin', (['(0)'], {}), '(0)\n', (360, 363), True, 'import numpy as np\n'), ((594, 608), 'numpy.sin', 'np.sin', (['[1, 2]'], {}), '([1, 2])\n', (600, 608), True, 'import numpy as np\n'), ((919, 930), 'numpy.sin', 'np.sin', (['[3]'], {}), '([3])\n', (925, 930), True, 'import numpy as np\n'), ((1306, 1320), 'numpy.sin', 'np.sin', (['[3, 5]'], {}), '([3, 5])\n', (1312, 1320), True, 'import numpy as np\n'), ((1499, 1508), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (1505, 1508), True, 'import numpy as np\n'), ((1662, 1671), 'numpy.cos', 'np.cos', (['(0)'], {}), '(0)\n', (1668, 1671), True, 'import numpy as np\n'), ((1904, 1918), 'numpy.cos', 'np.cos', (['[1, 2]'], {}), '([1, 2])\n', (1910, 1918), True, 'import numpy as np\n'), ((2230, 2241), 'numpy.cos', 'np.cos', (['[3]'], {}), '([3])\n', (2236, 2241), True, 'import numpy as np\n'), ((2619, 2633), 'numpy.cos', 'np.cos', (['[3, 5]'], {}), '([3, 5])\n', (2625, 2633), True, 'import numpy as np\n'), ((2814, 2823), 'numpy.tan', 'np.tan', (['x'], {}), '(x)\n', (2820, 2823), True, 'import numpy as np\n'), ((2977, 2986), 'numpy.tan', 'np.tan', (['(0)'], {}), '(0)\n', (2983, 2986), True, 'import numpy as np\n'), ((3226, 3240), 'numpy.tan', 'np.tan', (['[1, 2]'], {}), '([1, 2])\n', (3232, 3240), True, 'import numpy as np\n'), ((3560, 3571), 'numpy.tan', 'np.tan', (['[3]'], {}), '([3])\n', (3566, 3571), True, 'import numpy as np\n'), ((3965, 3979), 'numpy.tan', 'np.tan', (['[3, 5]'], {}), '([3, 5])\n', (3971, 3979), True, 'import numpy as np\n'), ((4209, 4221), 'numpy.arcsin', 'np.arcsin', (['x'], {}), '(x)\n', (4218, 4221), True, 'import numpy as np\n'), ((4381, 4393), 'numpy.arcsin', 'np.arcsin', (['(0)'], {}), '(0)\n', (4390, 4393), True, 'import numpy as np\n'), ((4743, 4764), 'numpy.arcsin', 'np.arcsin', (['[0.3, 0.5]'], {}), '([0.3, 0.5])\n', (4752, 4764), True, 'import numpy as np\n'), ((4996, 5008), 'numpy.arccos', 'np.arccos', (['x'], {}), '(x)\n', (5005, 5008), True, 'import numpy as np\n'), ((5177, 5191), 'numpy.arccos', 'np.arccos', (['(0.8)'], {}), '(0.8)\n', (5186, 5191), True, 'import numpy as np\n'), ((5544, 5565), 'numpy.arccos', 'np.arccos', (['[0.7, 0.9]'], {}), '([0.7, 0.9])\n', (5553, 5565), True, 'import numpy as np\n'), ((5799, 5811), 'numpy.arctan', 'np.arctan', (['x'], {}), '(x)\n', (5808, 5811), True, 'import numpy as np\n'), ((5980, 5994), 'numpy.arctan', 'np.arctan', (['(0.8)'], {}), '(0.8)\n', (5989, 5994), True, 'import numpy as np\n'), ((6342, 6363), 'numpy.arctan', 'np.arctan', (['[0.7, 0.9]'], {}), '([0.7, 0.9])\n', (6351, 6363), True, 'import numpy as np\n'), ((6588, 6597), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (6594, 6597), True, 'import numpy as np\n'), ((6751, 6760), 'numpy.exp', 'np.exp', (['(0)'], {}), '(0)\n', (6757, 6760), True, 'import numpy as np\n'), ((6991, 7005), 'numpy.exp', 'np.exp', (['[1, 2]'], {}), '([1, 2])\n', (6997, 7005), True, 'import numpy as np\n'), ((7316, 7327), 'numpy.exp', 'np.exp', (['[3]'], {}), '([3])\n', (7322, 7327), True, 'import numpy as np\n'), ((7703, 7717), 'numpy.exp', 'np.exp', (['[3, 5]'], {}), '([3, 5])\n', (7709, 7717), True, 'import numpy as np\n'), ((7941, 7951), 'numpy.sinh', 'np.sinh', (['x'], {}), '(x)\n', (7948, 7951), True, 'import numpy as np\n'), ((8107, 8117), 'numpy.sinh', 'np.sinh', (['(0)'], {}), '(0)\n', (8114, 8117), True, 'import numpy as np\n'), ((8351, 8366), 'numpy.sinh', 'np.sinh', (['[1, 2]'], {}), '([1, 2])\n', (8358, 8366), True, 'import numpy as np\n'), ((8680, 8692), 'numpy.sinh', 'np.sinh', (['[3]'], {}), '([3])\n', (8687, 8692), True, 'import numpy as np\n'), ((9072, 9087), 'numpy.sinh', 'np.sinh', (['[3, 5]'], {}), '([3, 5])\n', (9079, 9087), True, 'import numpy as np\n'), ((9270, 9280), 'numpy.cosh', 'np.cosh', (['x'], {}), '(x)\n', (9277, 9280), True, 'import numpy as np\n'), ((9436, 9446), 'numpy.cosh', 'np.cosh', (['(0)'], {}), '(0)\n', (9443, 9446), True, 'import numpy as np\n'), ((9680, 9695), 'numpy.cosh', 'np.cosh', (['[1, 2]'], {}), '([1, 2])\n', (9687, 9695), True, 'import numpy as np\n'), ((10009, 10021), 'numpy.cosh', 'np.cosh', (['[3]'], {}), '([3])\n', (10016, 10021), True, 'import numpy as np\n'), ((10401, 10416), 'numpy.cosh', 'np.cosh', (['[3, 5]'], {}), '([3, 5])\n', (10408, 10416), True, 'import numpy as np\n'), ((10599, 10609), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (10606, 10609), True, 'import numpy as np\n'), ((10765, 10775), 'numpy.tanh', 'np.tanh', (['(0)'], {}), '(0)\n', (10772, 10775), True, 'import numpy as np\n'), ((11009, 11024), 'numpy.tanh', 'np.tanh', (['[1, 2]'], {}), '([1, 2])\n', (11016, 11024), True, 'import numpy as np\n'), ((11338, 11350), 'numpy.tanh', 'np.tanh', (['[3]'], {}), '([3])\n', (11345, 11350), True, 'import numpy as np\n'), ((11730, 11745), 'numpy.tanh', 'np.tanh', (['[3, 5]'], {}), '([3, 5])\n', (11737, 11745), True, 'import numpy as np\n'), ((12647, 12656), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (12653, 12656), True, 'import numpy as np\n'), ((12820, 12829), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (12826, 12829), True, 'import numpy as np\n'), ((13150, 13164), 'numpy.log', 'np.log', (['[3, 5]'], {}), '([3, 5])\n', (13156, 13164), True, 'import numpy as np\n'), ((13363, 13373), 'numpy.log2', 'np.log2', (['x'], {}), '(x)\n', (13370, 13373), True, 'import numpy as np\n'), ((13540, 13550), 'numpy.log2', 'np.log2', (['(2)'], {}), '(2)\n', (13547, 13550), True, 'import numpy as np\n'), ((13888, 13903), 'numpy.log2', 'np.log2', (['[3, 5]'], {}), '([3, 5])\n', (13895, 13903), True, 'import numpy as np\n'), ((14137, 14148), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (14145, 14148), True, 'import numpy as np\n'), ((14317, 14328), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (14325, 14328), True, 'import numpy as np\n'), ((14669, 14685), 'numpy.log10', 'np.log10', (['[3, 5]'], {}), '([3, 5])\n', (14677, 14685), True, 'import numpy as np\n'), ((14917, 14931), 'math.log', 'math.log', (['x', '(2)'], {}), '(x, 2)\n', (14925, 14931), False, 'import math\n'), ((15098, 15112), 'math.log', 'math.log', (['(2)', '(3)'], {}), '(2, 3)\n', (15106, 15112), False, 'import math\n'), ((15453, 15467), 'math.log', 'math.log', (['(3)', '(3)'], {}), '(3, 3)\n', (15461, 15467), False, 'import math\n'), ((15505, 15519), 'math.log', 'math.log', (['(5)', '(3)'], {}), '(5, 3)\n', (15513, 15519), False, 'import math\n'), ((15751, 15761), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (15758, 15761), True, 'import numpy as np\n'), ((15925, 15935), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15932, 15935), True, 'import numpy as np\n'), ((16265, 16280), 'numpy.sqrt', 'np.sqrt', (['[3, 5]'], {}), '([3, 5])\n', (16272, 16280), True, 'import numpy as np\n'), ((399, 408), 'numpy.cos', 'np.cos', (['(0)'], {}), '(0)\n', (405, 408), True, 'import numpy as np\n'), ((646, 660), 'numpy.cos', 'np.cos', (['[1, 2]'], {}), '([1, 2])\n', (652, 660), True, 'import numpy as np\n'), ((969, 978), 'numpy.cos', 'np.cos', (['(3)'], {}), '(3)\n', (975, 978), True, 'import numpy as np\n'), ((1021, 1030), 'numpy.cos', 'np.cos', (['(3)'], {}), '(3)\n', (1027, 1030), True, 'import numpy as np\n'), ((1358, 1372), 'numpy.cos', 'np.cos', (['[3, 5]'], {}), '([3, 5])\n', (1364, 1372), True, 'import numpy as np\n'), ((1414, 1428), 'numpy.cos', 'np.cos', (['[3, 5]'], {}), '([3, 5])\n', (1420, 1428), True, 'import numpy as np\n'), ((6796, 6805), 'numpy.exp', 'np.exp', (['(0)'], {}), '(0)\n', (6802, 6805), True, 'import numpy as np\n'), ((7043, 7057), 'numpy.exp', 'np.exp', (['[1, 2]'], {}), '([1, 2])\n', (7049, 7057), True, 'import numpy as np\n'), ((7366, 7375), 'numpy.exp', 'np.exp', (['(3)'], {}), '(3)\n', (7372, 7375), True, 'import numpy as np\n'), ((7418, 7427), 'numpy.exp', 'np.exp', (['(3)'], {}), '(3)\n', (7424, 7427), True, 'import numpy as np\n'), ((7755, 7769), 'numpy.exp', 'np.exp', (['[3, 5]'], {}), '([3, 5])\n', (7761, 7769), True, 'import numpy as np\n'), ((7811, 7825), 'numpy.exp', 'np.exp', (['[3, 5]'], {}), '([3, 5])\n', (7817, 7825), True, 'import numpy as np\n'), ((8153, 8163), 'numpy.cosh', 'np.cosh', (['(0)'], {}), '(0)\n', (8160, 8163), True, 'import numpy as np\n'), ((8404, 8419), 'numpy.cosh', 'np.cosh', (['[1, 2]'], {}), '([1, 2])\n', (8411, 8419), True, 'import numpy as np\n'), ((8731, 8741), 'numpy.cosh', 'np.cosh', (['(3)'], {}), '(3)\n', (8738, 8741), True, 'import numpy as np\n'), ((8784, 8794), 'numpy.cosh', 'np.cosh', (['(3)'], {}), '(3)\n', (8791, 8794), True, 'import numpy as np\n'), ((9125, 9140), 'numpy.cosh', 'np.cosh', (['[3, 5]'], {}), '([3, 5])\n', (9132, 9140), True, 'import numpy as np\n'), ((9182, 9197), 'numpy.cosh', 'np.cosh', (['[3, 5]'], {}), '([3, 5])\n', (9189, 9197), True, 'import numpy as np\n'), ((9482, 9492), 'numpy.sinh', 'np.sinh', (['(0)'], {}), '(0)\n', (9489, 9492), True, 'import numpy as np\n'), ((9733, 9748), 'numpy.sinh', 'np.sinh', (['[1, 2]'], {}), '([1, 2])\n', (9740, 9748), True, 'import numpy as np\n'), ((10060, 10070), 'numpy.sinh', 'np.sinh', (['(3)'], {}), '(3)\n', (10067, 10070), True, 'import numpy as np\n'), ((10113, 10123), 'numpy.sinh', 'np.sinh', (['(3)'], {}), '(3)\n', (10120, 10123), True, 'import numpy as np\n'), ((10454, 10469), 'numpy.sinh', 'np.sinh', (['[3, 5]'], {}), '([3, 5])\n', (10461, 10469), True, 'import numpy as np\n'), ((10511, 10526), 'numpy.sinh', 'np.sinh', (['[3, 5]'], {}), '([3, 5])\n', (10518, 10526), True, 'import numpy as np\n'), ((10815, 10825), 'numpy.cosh', 'np.cosh', (['(0)'], {}), '(0)\n', (10822, 10825), True, 'import numpy as np\n'), ((11066, 11081), 'numpy.cosh', 'np.cosh', (['[1, 2]'], {}), '([1, 2])\n', (11073, 11081), True, 'import numpy as np\n'), ((11393, 11403), 'numpy.cosh', 'np.cosh', (['(3)'], {}), '(3)\n', (11400, 11403), True, 'import numpy as np\n'), ((11446, 11456), 'numpy.cosh', 'np.cosh', (['(3)'], {}), '(3)\n', (11453, 11456), True, 'import numpy as np\n'), ((1709, 1718), 'numpy.sin', 'np.sin', (['(0)'], {}), '(0)\n', (1715, 1718), True, 'import numpy as np\n'), ((1957, 1971), 'numpy.sin', 'np.sin', (['[1, 2]'], {}), '([1, 2])\n', (1963, 1971), True, 'import numpy as np\n'), ((2281, 2290), 'numpy.sin', 'np.sin', (['(3)'], {}), '(3)\n', (2287, 2290), True, 'import numpy as np\n'), ((2334, 2343), 'numpy.sin', 'np.sin', (['(3)'], {}), '(3)\n', (2340, 2343), True, 'import numpy as np\n'), ((2672, 2686), 'numpy.sin', 'np.sin', (['[3, 5]'], {}), '([3, 5])\n', (2678, 2686), True, 'import numpy as np\n'), ((2729, 2743), 'numpy.sin', 'np.sin', (['[3, 5]'], {}), '([3, 5])\n', (2735, 2743), True, 'import numpy as np\n'), ((4436, 4455), 'numpy.sqrt', 'np.sqrt', (['(1 - 0 ** 2)'], {}), '(1 - 0 ** 2)\n', (4443, 4455), True, 'import numpy as np\n'), ((5235, 5256), 'numpy.sqrt', 'np.sqrt', (['(1 - 0.8 ** 2)'], {}), '(1 - 0.8 ** 2)\n', (5242, 5256), True, 'import numpy as np\n'), ((11795, 11810), 'numpy.cosh', 'np.cosh', (['[3, 5]'], {}), '([3, 5])\n', (11802, 11810), True, 'import numpy as np\n'), ((11870, 11885), 'numpy.cosh', 'np.cosh', (['[3, 5]'], {}), '([3, 5])\n', (11877, 11885), True, 'import numpy as np\n'), ((15978, 15988), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (15985, 15988), True, 'import numpy as np\n'), ((16322, 16337), 'numpy.sqrt', 'np.sqrt', (['[3, 5]'], {}), '([3, 5])\n', (16329, 16337), True, 'import numpy as np\n'), ((16382, 16397), 'numpy.sqrt', 'np.sqrt', (['[3, 5]'], {}), '([3, 5])\n', (16389, 16397), True, 'import numpy as np\n'), ((3027, 3036), 'numpy.cos', 'np.cos', (['(0)'], {}), '(0)\n', (3033, 3036), True, 'import numpy as np\n'), ((3283, 3297), 'numpy.cos', 'np.cos', (['[1, 2]'], {}), '([1, 2])\n', (3289, 3297), True, 'import numpy as np\n'), ((3615, 3624), 'numpy.cos', 'np.cos', (['(3)'], {}), '(3)\n', (3621, 3624), True, 'import numpy as np\n'), ((3676, 3685), 'numpy.cos', 'np.cos', (['(3)'], {}), '(3)\n', (3682, 3685), True, 'import numpy as np\n'), ((4022, 4036), 'numpy.cos', 'np.cos', (['[3, 5]'], {}), '([3, 5])\n', (4028, 4036), True, 'import numpy as np\n'), ((4087, 4101), 'numpy.cos', 'np.cos', (['[3, 5]'], {}), '([3, 5])\n', (4093, 4101), True, 'import numpy as np\n'), ((13599, 13608), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (13605, 13608), True, 'import numpy as np\n'), ((14377, 14387), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (14383, 14387), True, 'import numpy as np\n'), ((15160, 15169), 'numpy.log', 'np.log', (['(3)'], {}), '(3)\n', (15166, 15169), True, 'import numpy as np\n')] |
from unittest.mock import patch, Mock
import numpy as np
from numpy.testing import assert_allclose
import pytest
from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain
from pyqumo.errors import CellValueError, RowSumError, MatrixShapeError
# ############################################################################
# TEST DiscreteTimeMarkovChain
# ############################################################################
@pytest.mark.parametrize('matrix, order, string', [
([[1.0]], 1, '(DTMC: t=[[1]])'),
([[0.5, 0.5], [0.8, 0.2]], 2, '(DTMC: t=[[0.5, 0.5], [0.8, 0.2]])')
])
def test_dtmc__props(matrix, order, string):
chain = DiscreteTimeMarkovChain(matrix)
assert_allclose(chain.matrix, matrix)
assert chain.order == order
assert str(chain) == string
@pytest.mark.parametrize('matrix, tol, exc_type', [
([[0.9]], 0.05, RowSumError),
([[-0.0005, 0.9995], [0.5, 0.5]], 1e-4, CellValueError),
([[0.5, 0.5]], 0.1, MatrixShapeError)
])
def test_dtmc__bad_matrix_raise_error(matrix, tol, exc_type):
with pytest.raises(exc_type):
DiscreteTimeMarkovChain(matrix, tol=tol)
def test_dtmc__bad_matrix_fix():
"""
Validate matrix is checked by default, if errors are not more then `tol`.
"""
matrix = np.asarray([
[-0.08, 0.02, 0.72, 0.12],
[-0.04, -0.01, 0.03, 0.76],
[1.09, 0.00, 0.02, 0.00],
[0.40, 0.40, 0.00, 0.20]
])
tol = 0.12
with patch('pyqumo.chains.fix_stochastic', return_value=matrix) as fix:
DiscreteTimeMarkovChain(matrix, tol=tol)
fix.assert_called_once()
assert_allclose(fix.call_args[0][0], matrix)
assert_allclose(fix.call_args[1]['tol'], tol)
#
# fix.assert_called_once_with(matrix, tol=tol)
def test_dtmc__bad_matrix_no_raise_when_safe_true():
"""
Validate that no exception is raised if matrix is not stochastic, but
`safe = True`.
"""
mat = [[-0.2, 1.1], [0.4, 0.4]] # very bad matrix
chain = DiscreteTimeMarkovChain(mat, safe=True, tol=0.01)
assert_allclose(mat, chain.matrix)
def test_dtmc__matrix_copied():
"""
Validate that if matrix is passed as an array, it is copied, so changes
in the argument don't affect the chain matrix.
"""
matrix = np.asarray([[0.5, 0.5], [0.5, 0.5]])
chain = DiscreteTimeMarkovChain(matrix)
matrix[0, 0] = 0.42
assert chain.matrix[0, 0] == 0.5
#
# Testing steady-state PMF and traces.
# ------------------------------------
# DTMC FIXTURES:
# --------------
# To test these properties, we will use three fixtures:
# - dtmc1: chain with single (absorbing) state
# - dtmc2: periodic chain with interchanging states 0-1-0-1...
# - dtmc9: large non-trivial chain
# - dtmc4_line: a DTMC with sequential deterministic transitions
# 0 -> 1 -> 2 -> 3 -> 3 -> ... (3 - absorbing state)
# - dtmc4_circle: a periodic DTMC with sequential deterministic transitions:
# 0 -> 1 -> 2 -> 3 -> 0 -> ... (no absorbing states)
#
def dtmc1():
return DiscreteTimeMarkovChain([[1.0]])
def dtmc2():
return DiscreteTimeMarkovChain([[0, 1], [1, 0]])
def dtmc9():
matrix = [
[.00, .50, .00, .50, .00, .00, .00, .00, .00],
[.25, .00, .25, .00, .25, .00, .00, .25, .00],
[.00, .50, .00, .00, .00, .50, .00, .00, .00],
[1/3, .00, .00, .00, 1/3, .00, 1/3, .00, .00],
[.00, .25, .00, .25, .00, .25, .00, .25, .00],
[.00, .00, 1/3, .00, 1/3, .00, .00, .00, 1/3],
[.00, .00, .00, .50, .00, .00, .00, .50, .00],
[.00, .25, .00, .00, .25, .00, .25, .00, .25],
[.00, .00, .00, .00, .00, .50, .00, .50, .00]]
return DiscreteTimeMarkovChain(matrix)
def dtmc4_line():
matrix = [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 1]]
return DiscreteTimeMarkovChain(matrix)
def dtmc4_circle():
matrix = [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0]]
return DiscreteTimeMarkovChain(matrix)
# (end of fixtures)
# -----------------
# Testing DiscreteTimeMarkovChain.steady_pmf
# ------------------------------------------
@pytest.mark.parametrize('chain, pmf, comment', [
(dtmc1(), [1.0], 'trivial DTMC of order 1 PMF is also trivial'),
(dtmc2(), [0.5, 0.5], 'periodic DTMC of order 2 states are equivalent'),
(
dtmc9(), [.077, .154, .077, .115, 0.154, 0.115, 0.077, 0.154, 0.077],
'DTMC of order 9 with non-trivial matrix and steady-state PMF'
)
])
def test_dtmc__steady_pmf(chain: DiscreteTimeMarkovChain, pmf, comment):
"""
Validate steady state PMF evaluation of a discrete time Markov chain.
"""
assert_allclose(chain.steady_pmf, pmf, rtol=0.01, err_msg=comment)
# Testing DiscreteTimeMarkovChain.trace()
# ---------------------------------------
@pytest.mark.parametrize('chain, init, path', [
(dtmc2(), 0, (0, 1, 0, 1, 0, 1, 0, 1)),
(dtmc2(), 1, (1, 0, 1, 0)),
(dtmc4_circle(), 2, (2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0)),
])
def test_dtmc__trace_from_concrete_state(chain, init, path):
"""
Validate dtmc trace() method call with a specific init state.
"""
size = len(path) - 1
trace = chain.trace(size, init=init)
for i in range(size):
step = next(trace)
assert step[0] == path[i], f"step #{i} = {step}, expected path: {path}"
def test_dtmc__trace_generates_exactly_size_steps():
"""
Validate trace() call generates a trace with a given number of steps.
"""
chain = dtmc2()
assert len(list(chain.trace(7))) == 7
@pytest.mark.parametrize('chain, size, init, ends, path, comment', [
(dtmc2(), 10, 0, (0,), (), 'if initial state in ends, return empty trace'),
(dtmc2(), 10, 0, (1,), ((0, 1),), 'path with single step till end'),
(dtmc4_line(), 2, 0, (3,), ((0, 1), (1, 2)), 'trace stop before ends'),
(dtmc4_line(), 8, 0, (3,), ((0, 1), (1, 2), (2, 3)), 'path of size 3'),
])
def test_dtmc__trace_till_ends(chain, size, init, ends, path, comment):
"""
Validate trace() call stops when reaches specified `ends` vertices.
"""
assert isinstance(chain, DiscreteTimeMarkovChain)
real_path = tuple(chain.trace(size, init=init, ends=ends))
assert path == real_path, comment
@pytest.mark.parametrize('chain, pmf, comment', [
(dtmc2(), [0.2, 0.8], 'DTMC-2 with non-equal initial probabilities'),
(dtmc2(), None, 'DTMC-2 with steady-state probability [0.5, 0.5]'),
(dtmc9(), [0.1, 0.3, 0.2, 0.0, 0.0, 0.1, 0.2, 0.1, 0.0], 'DTMC-9'),
])
def test_dtmc__trace_from_pmf(chain, pmf, comment):
"""
Validate dtmc trace() method starting from either given PMF, or
from steady state PMF (if `pmf = None`). To validate that, we call trace()
multiple times and measure rates at which states were chosen.
"""
NUM_RUNS = 10000
expected_pmf = chain.steady_pmf.copy() if pmf is None else np.asarray(pmf)
hits = np.zeros(chain.order)
for _ in range(NUM_RUNS):
trace = chain.trace(1, init=pmf)
transition = next(trace)
state = transition[0]
hits[state] += 1
# Estimate probabilities:
est_pmf = hits / NUM_RUNS
# Validate:
assert_allclose(est_pmf, expected_pmf, rtol=0.1, err_msg=comment)
@pytest.mark.parametrize('chain, comment', [
(dtmc2(), 'Periodic DTMC-2'),
(dtmc9(), 'Arbitrary DTMC-9'),
])
def test_dtmc__trace_visits_states_as_steady_pmf(chain, comment):
"""
Validate that on a long run trace visits state approximately with steady
PMF ratio. Trace selects initial state with steady state PMF distribution.
"""
assert isinstance(chain, DiscreteTimeMarkovChain)
TRACE_SIZE = 10000
hits_prev = np.zeros(chain.order)
hits_next = np.zeros(chain.order)
for step in chain.trace(TRACE_SIZE):
hits_prev[step[0]] += 1
hits_next[step[1]] += 1
hits_prev = hits_prev / TRACE_SIZE
hits_next = hits_next / TRACE_SIZE
assert_allclose(hits_prev, chain.steady_pmf, rtol=5e-2, atol=0.01,
err_msg=f"{comment} (hits_prev)")
assert_allclose(hits_next, chain.steady_pmf, rtol=5e-2, atol=0.01,
err_msg=f"{comment} (hits_next)")
# Testing DiscreteTimeMarkovChain.random_path()
# ---------------------------------------------
@pytest.mark.parametrize('chain, path, comment', [
(dtmc1(), (0, 0, 0), 'degenerate case of DTMC-1'),
])
def test_dtmc__random_path(chain, path, comment):
"""
Validate DTMC `random_path()` method by calling it from a chain with
completely determined transition matrix.
"""
size = len(path) - 1
real_path = chain.random_path(size, init=path[0])
assert real_path == path, comment
# ############################################################################
# TEST DiscreteTimeMarkovChain
# ############################################################################
@pytest.mark.parametrize('matrix, order, dtmc_matrix, string', [
([[0.0]], 1, [[1.0]], '(CTMC: g=[[0]])'),
([[-1, 1], [5, -5]], 2, [[0, 1], [1, 0]], '(CTMC: g=[[-1, 1], [5, -5]])'),
(
[[-2, 1.5, 0.5], [3, -3, 0], [0, 0, 0]],
3,
[[0, 0.75, 0.25], [1, 0, 0], [0, 0, 1]],
'(CTMC: g=[[-2, 1.5, 0.5], [3, -3, 0], [0, 0, 0]])'
),
])
def test_ctmc__props(matrix, order, dtmc_matrix, string):
"""
Validate CTMC creation and basic properties
"""
chain = ContinuousTimeMarkovChain(matrix)
assert_allclose(chain.matrix, matrix, err_msg=string)
assert chain.order == order, string
assert str(chain) == string, string
assert_allclose(chain.embedded_dtmc.matrix, dtmc_matrix, err_msg=string)
@pytest.mark.parametrize('matrix, tol, exc_type', [
(
[[-1.0, -0.101, 1.0], [1.0, -1.0, -0.11], [0, -0.09, 0]],
0.1, CellValueError
),
([[-1.21, 1.0], [5, -5.23]], 0.2, RowSumError),
([[-1, 1, 0], [0, 0, 0]], 0.1, MatrixShapeError)
])
def test_ctmc__bad_matrix_raise_error(matrix, tol, exc_type):
"""
Validate CTMC creation raises MatrixError if matrix is not infinitesimal.
"""
with pytest.raises(exc_type):
ContinuousTimeMarkovChain(matrix)
def test_ctmc__bad_matrix_fix():
"""
Validate that matrix is fixed if its maximum error is less then tolerance.
Here we use the same data as in fix_stochastic() call.
(In fact, this can be mocked)
"""
matrix = np.asarray([
[-2.0, 1.5, 0.5, 0.0, 0.0], # good row, no modifications
[-0.1, -0.9, 0.4, 0.5, 0.0], # set -0.1 to 0.0, err = 0.1
[0.0, 2.0, -3.0, 1.2, 0.0], # set diagonal to -3.2, err = 0.2
[0.5, -0.15, 3.4, -4.0, 0.0], # -0.15 => 0.0 -4.0 => -3.9, err = 0.15
[0.0, 0.0, 0.0, 0.0, 0.0] # good row, no modifications
])
tol = 0.21
with patch('pyqumo.chains.fix_infinitesimal', return_value=matrix) as fix:
ContinuousTimeMarkovChain(matrix, tol=tol)
fix.assert_called_once()
assert_allclose(fix.call_args[0][0], matrix)
assert_allclose(fix.call_args[1]['tol'], tol)
def test_ctmc__bad_matrix_no_raise_when_safe_true():
"""
Validate no exception raised when safe = True passed to CTMC constructor.
"""
matrix = [[-2, 1, 0], [1, -1, 1], [0, 0, 0.5]] # very bad matrix
chain = ContinuousTimeMarkovChain(matrix, safe=True, tol=0.01)
assert_allclose(matrix, chain.matrix)
def test_ctmc__matrix_copied():
"""
Validate that if matrix is passed as an array, it is copied, so changes
in the argument don't affect the chain matrix.
"""
matrix = np.asarray([[-1, 1], [1, -1]])
chain = ContinuousTimeMarkovChain(matrix)
matrix[0, 0] = -42
assert chain.matrix[0, 0] == -1
#
# Testing steady-state PMF and traces of CTMC
# -------------------------------------------
# CTMC FIXTURES:
# --------------
# To test these properties, we will use three fixtures:
# - dtmc1: chain with single (absorbing) state
# - dtmc2: periodic chain with interchanging states 0-1-0-1...
# - dtmc3: non-trivial non-periodic chain with three states
#
def ctmc1():
return ContinuousTimeMarkovChain([[0.0]])
def ctmc2():
return ContinuousTimeMarkovChain([[-1, 1], [1, -1]])
def ctmc3():
return ContinuousTimeMarkovChain(
[[-.025, .02, .005], [.3, -.5, .2], [.02, .4, -.42]]
)
# (end of fixtures)
# -----------------
# Testing ContinuousTimeMarkovChain.steady_pmf
# --------------------------------------------
@pytest.mark.parametrize('chain, pmf, comment', [
(ctmc1(), [1.0], 'trivial CTMC with 1 state'),
(ctmc2(), [0.5, 0.5], 'periodic CTMC with 2 states'),
(ctmc3(), [.885, .071, .044], 'CTMC with 3 states'),
])
def test_ctmc__steady_pmf(chain, pmf, comment):
"""
Validate steady state PMF evaluation of a continuous time Markov chain.
"""
assert isinstance(chain, ContinuousTimeMarkovChain)
assert_allclose(chain.steady_pmf, pmf, rtol=0.01, err_msg=comment)
# Testing ContinuousTimeMarkovChain.trace()
# -----------------------------------------
@pytest.mark.parametrize('chain, size, init, ends, safe', [
(ctmc2(), 5, 0, (), True),
(ctmc3(), 8, None, [3], False),
(ctmc3(), 42, [0.3, 0.3, 0.4], (), True)
])
def test_ctmc__trace_calls_dtmc_trace(chain, size, init, ends, safe):
"""
Validate that calling ctmc.trace() make a call to embedded DTMC trace().
"""
trace_mock = Mock(return_value=())
chain.embedded_dtmc.trace = trace_mock # patch (very ugly)
# Make a call to trace(). Since it is a generator, to actually call
# embedded chain trace() method, convert it to list:
_ = list(chain.trace(size, init, ends, safe))
# Make sure embedded DTMC trace() method was properly called:
trace_mock.assert_called_once_with(size, init=init, ends=ends, safe=safe)
def test_ctmc__trace_intervals_converge_to_matrix_means():
"""
Validate that intervals in the trace path converge to the mean values
-1/M[i,i] from the chain generator M. To test this, we generate a
reasonably large trace.
"""
chain = ctmc3()
intervals = np.zeros(chain.order)
hits = np.zeros(chain.order)
CHAIN_SIZE = 10000
# noinspection PyTypeChecker
for (state, _, interval) in chain.trace(CHAIN_SIZE):
intervals[state] += interval
hits[state] += 1
intervals /= hits
est_rates = 1 / intervals
assert_allclose(est_rates, chain.rates, rtol=0.1)
| [
"numpy.asarray",
"numpy.testing.assert_allclose",
"numpy.zeros",
"unittest.mock.Mock",
"pyqumo.chains.ContinuousTimeMarkovChain",
"unittest.mock.patch",
"pytest.raises",
"pyqumo.chains.DiscreteTimeMarkovChain",
"pytest.mark.parametrize"
] | [((456, 616), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""matrix, order, string"""', "[([[1.0]], 1, '(DTMC: t=[[1]])'), ([[0.5, 0.5], [0.8, 0.2]], 2,\n '(DTMC: t=[[0.5, 0.5], [0.8, 0.2]])')]"], {}), "('matrix, order, string', [([[1.0]], 1,\n '(DTMC: t=[[1]])'), ([[0.5, 0.5], [0.8, 0.2]], 2,\n '(DTMC: t=[[0.5, 0.5], [0.8, 0.2]])')])\n", (479, 616), False, 'import pytest\n'), ((817, 1003), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""matrix, tol, exc_type"""', '[([[0.9]], 0.05, RowSumError), ([[-0.0005, 0.9995], [0.5, 0.5]], 0.0001,\n CellValueError), ([[0.5, 0.5]], 0.1, MatrixShapeError)]'], {}), "('matrix, tol, exc_type', [([[0.9]], 0.05,\n RowSumError), ([[-0.0005, 0.9995], [0.5, 0.5]], 0.0001, CellValueError),\n ([[0.5, 0.5]], 0.1, MatrixShapeError)])\n", (840, 1003), False, 'import pytest\n'), ((8870, 9207), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""matrix, order, dtmc_matrix, string"""', "[([[0.0]], 1, [[1.0]], '(CTMC: g=[[0]])'), ([[-1, 1], [5, -5]], 2, [[0, 1],\n [1, 0]], '(CTMC: g=[[-1, 1], [5, -5]])'), ([[-2, 1.5, 0.5], [3, -3, 0],\n [0, 0, 0]], 3, [[0, 0.75, 0.25], [1, 0, 0], [0, 0, 1]],\n '(CTMC: g=[[-2, 1.5, 0.5], [3, -3, 0], [0, 0, 0]])')]"], {}), "('matrix, order, dtmc_matrix, string', [([[0.0]], 1,\n [[1.0]], '(CTMC: g=[[0]])'), ([[-1, 1], [5, -5]], 2, [[0, 1], [1, 0]],\n '(CTMC: g=[[-1, 1], [5, -5]])'), ([[-2, 1.5, 0.5], [3, -3, 0], [0, 0, 0\n ]], 3, [[0, 0.75, 0.25], [1, 0, 0], [0, 0, 1]],\n '(CTMC: g=[[-2, 1.5, 0.5], [3, -3, 0], [0, 0, 0]])')])\n", (8893, 9207), False, 'import pytest\n'), ((9630, 9872), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""matrix, tol, exc_type"""', '[([[-1.0, -0.101, 1.0], [1.0, -1.0, -0.11], [0, -0.09, 0]], 0.1,\n CellValueError), ([[-1.21, 1.0], [5, -5.23]], 0.2, RowSumError), ([[-1,\n 1, 0], [0, 0, 0]], 0.1, MatrixShapeError)]'], {}), "('matrix, tol, exc_type', [([[-1.0, -0.101, 1.0], [\n 1.0, -1.0, -0.11], [0, -0.09, 0]], 0.1, CellValueError), ([[-1.21, 1.0],\n [5, -5.23]], 0.2, RowSumError), ([[-1, 1, 0], [0, 0, 0]], 0.1,\n MatrixShapeError)])\n", (9653, 9872), False, 'import pytest\n'), ((676, 707), 'pyqumo.chains.DiscreteTimeMarkovChain', 'DiscreteTimeMarkovChain', (['matrix'], {}), '(matrix)\n', (699, 707), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((712, 749), 'numpy.testing.assert_allclose', 'assert_allclose', (['chain.matrix', 'matrix'], {}), '(chain.matrix, matrix)\n', (727, 749), False, 'from numpy.testing import assert_allclose\n'), ((1295, 1413), 'numpy.asarray', 'np.asarray', (['[[-0.08, 0.02, 0.72, 0.12], [-0.04, -0.01, 0.03, 0.76], [1.09, 0.0, 0.02, \n 0.0], [0.4, 0.4, 0.0, 0.2]]'], {}), '([[-0.08, 0.02, 0.72, 0.12], [-0.04, -0.01, 0.03, 0.76], [1.09, \n 0.0, 0.02, 0.0], [0.4, 0.4, 0.0, 0.2]])\n', (1305, 1413), True, 'import numpy as np\n'), ((2030, 2079), 'pyqumo.chains.DiscreteTimeMarkovChain', 'DiscreteTimeMarkovChain', (['mat'], {'safe': '(True)', 'tol': '(0.01)'}), '(mat, safe=True, tol=0.01)\n', (2053, 2079), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((2084, 2118), 'numpy.testing.assert_allclose', 'assert_allclose', (['mat', 'chain.matrix'], {}), '(mat, chain.matrix)\n', (2099, 2118), False, 'from numpy.testing import assert_allclose\n'), ((2309, 2345), 'numpy.asarray', 'np.asarray', (['[[0.5, 0.5], [0.5, 0.5]]'], {}), '([[0.5, 0.5], [0.5, 0.5]])\n', (2319, 2345), True, 'import numpy as np\n'), ((2358, 2389), 'pyqumo.chains.DiscreteTimeMarkovChain', 'DiscreteTimeMarkovChain', (['matrix'], {}), '(matrix)\n', (2381, 2389), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((3051, 3083), 'pyqumo.chains.DiscreteTimeMarkovChain', 'DiscreteTimeMarkovChain', (['[[1.0]]'], {}), '([[1.0]])\n', (3074, 3083), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((3110, 3151), 'pyqumo.chains.DiscreteTimeMarkovChain', 'DiscreteTimeMarkovChain', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (3133, 3151), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((3688, 3719), 'pyqumo.chains.DiscreteTimeMarkovChain', 'DiscreteTimeMarkovChain', (['matrix'], {}), '(matrix)\n', (3711, 3719), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((3821, 3852), 'pyqumo.chains.DiscreteTimeMarkovChain', 'DiscreteTimeMarkovChain', (['matrix'], {}), '(matrix)\n', (3844, 3852), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((3956, 3987), 'pyqumo.chains.DiscreteTimeMarkovChain', 'DiscreteTimeMarkovChain', (['matrix'], {}), '(matrix)\n', (3979, 3987), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((4649, 4715), 'numpy.testing.assert_allclose', 'assert_allclose', (['chain.steady_pmf', 'pmf'], {'rtol': '(0.01)', 'err_msg': 'comment'}), '(chain.steady_pmf, pmf, rtol=0.01, err_msg=comment)\n', (4664, 4715), False, 'from numpy.testing import assert_allclose\n'), ((6898, 6919), 'numpy.zeros', 'np.zeros', (['chain.order'], {}), '(chain.order)\n', (6906, 6919), True, 'import numpy as np\n'), ((7159, 7224), 'numpy.testing.assert_allclose', 'assert_allclose', (['est_pmf', 'expected_pmf'], {'rtol': '(0.1)', 'err_msg': 'comment'}), '(est_pmf, expected_pmf, rtol=0.1, err_msg=comment)\n', (7174, 7224), False, 'from numpy.testing import assert_allclose\n'), ((7676, 7697), 'numpy.zeros', 'np.zeros', (['chain.order'], {}), '(chain.order)\n', (7684, 7697), True, 'import numpy as np\n'), ((7714, 7735), 'numpy.zeros', 'np.zeros', (['chain.order'], {}), '(chain.order)\n', (7722, 7735), True, 'import numpy as np\n'), ((7924, 8029), 'numpy.testing.assert_allclose', 'assert_allclose', (['hits_prev', 'chain.steady_pmf'], {'rtol': '(0.05)', 'atol': '(0.01)', 'err_msg': 'f"""{comment} (hits_prev)"""'}), "(hits_prev, chain.steady_pmf, rtol=0.05, atol=0.01, err_msg=\n f'{comment} (hits_prev)')\n", (7939, 8029), False, 'from numpy.testing import assert_allclose\n'), ((8049, 8154), 'numpy.testing.assert_allclose', 'assert_allclose', (['hits_next', 'chain.steady_pmf'], {'rtol': '(0.05)', 'atol': '(0.01)', 'err_msg': 'f"""{comment} (hits_next)"""'}), "(hits_next, chain.steady_pmf, rtol=0.05, atol=0.01, err_msg=\n f'{comment} (hits_next)')\n", (8064, 8154), False, 'from numpy.testing import assert_allclose\n'), ((9378, 9411), 'pyqumo.chains.ContinuousTimeMarkovChain', 'ContinuousTimeMarkovChain', (['matrix'], {}), '(matrix)\n', (9403, 9411), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((9416, 9469), 'numpy.testing.assert_allclose', 'assert_allclose', (['chain.matrix', 'matrix'], {'err_msg': 'string'}), '(chain.matrix, matrix, err_msg=string)\n', (9431, 9469), False, 'from numpy.testing import assert_allclose\n'), ((9554, 9626), 'numpy.testing.assert_allclose', 'assert_allclose', (['chain.embedded_dtmc.matrix', 'dtmc_matrix'], {'err_msg': 'string'}), '(chain.embedded_dtmc.matrix, dtmc_matrix, err_msg=string)\n', (9569, 9626), False, 'from numpy.testing import assert_allclose\n'), ((10364, 10527), 'numpy.asarray', 'np.asarray', (['[[-2.0, 1.5, 0.5, 0.0, 0.0], [-0.1, -0.9, 0.4, 0.5, 0.0], [0.0, 2.0, -3.0, \n 1.2, 0.0], [0.5, -0.15, 3.4, -4.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[-2.0, 1.5, 0.5, 0.0, 0.0], [-0.1, -0.9, 0.4, 0.5, 0.0], [0.0, \n 2.0, -3.0, 1.2, 0.0], [0.5, -0.15, 3.4, -4.0, 0.0], [0.0, 0.0, 0.0, 0.0,\n 0.0]])\n', (10374, 10527), True, 'import numpy as np\n'), ((11256, 11310), 'pyqumo.chains.ContinuousTimeMarkovChain', 'ContinuousTimeMarkovChain', (['matrix'], {'safe': '(True)', 'tol': '(0.01)'}), '(matrix, safe=True, tol=0.01)\n', (11281, 11310), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((11315, 11352), 'numpy.testing.assert_allclose', 'assert_allclose', (['matrix', 'chain.matrix'], {}), '(matrix, chain.matrix)\n', (11330, 11352), False, 'from numpy.testing import assert_allclose\n'), ((11543, 11573), 'numpy.asarray', 'np.asarray', (['[[-1, 1], [1, -1]]'], {}), '([[-1, 1], [1, -1]])\n', (11553, 11573), True, 'import numpy as np\n'), ((11586, 11619), 'pyqumo.chains.ContinuousTimeMarkovChain', 'ContinuousTimeMarkovChain', (['matrix'], {}), '(matrix)\n', (11611, 11619), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((12062, 12096), 'pyqumo.chains.ContinuousTimeMarkovChain', 'ContinuousTimeMarkovChain', (['[[0.0]]'], {}), '([[0.0]])\n', (12087, 12096), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((12123, 12168), 'pyqumo.chains.ContinuousTimeMarkovChain', 'ContinuousTimeMarkovChain', (['[[-1, 1], [1, -1]]'], {}), '([[-1, 1], [1, -1]])\n', (12148, 12168), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((12195, 12288), 'pyqumo.chains.ContinuousTimeMarkovChain', 'ContinuousTimeMarkovChain', (['[[-0.025, 0.02, 0.005], [0.3, -0.5, 0.2], [0.02, 0.4, -0.42]]'], {}), '([[-0.025, 0.02, 0.005], [0.3, -0.5, 0.2], [0.02, \n 0.4, -0.42]])\n', (12220, 12288), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((12845, 12911), 'numpy.testing.assert_allclose', 'assert_allclose', (['chain.steady_pmf', 'pmf'], {'rtol': '(0.01)', 'err_msg': 'comment'}), '(chain.steady_pmf, pmf, rtol=0.01, err_msg=comment)\n', (12860, 12911), False, 'from numpy.testing import assert_allclose\n'), ((13357, 13378), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '()'}), '(return_value=())\n', (13361, 13378), False, 'from unittest.mock import patch, Mock\n'), ((14053, 14074), 'numpy.zeros', 'np.zeros', (['chain.order'], {}), '(chain.order)\n', (14061, 14074), True, 'import numpy as np\n'), ((14086, 14107), 'numpy.zeros', 'np.zeros', (['chain.order'], {}), '(chain.order)\n', (14094, 14107), True, 'import numpy as np\n'), ((14339, 14388), 'numpy.testing.assert_allclose', 'assert_allclose', (['est_rates', 'chain.rates'], {'rtol': '(0.1)'}), '(est_rates, chain.rates, rtol=0.1)\n', (14354, 14388), False, 'from numpy.testing import assert_allclose\n'), ((1079, 1102), 'pytest.raises', 'pytest.raises', (['exc_type'], {}), '(exc_type)\n', (1092, 1102), False, 'import pytest\n'), ((1112, 1152), 'pyqumo.chains.DiscreteTimeMarkovChain', 'DiscreteTimeMarkovChain', (['matrix'], {'tol': 'tol'}), '(matrix, tol=tol)\n', (1135, 1152), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((1478, 1536), 'unittest.mock.patch', 'patch', (['"""pyqumo.chains.fix_stochastic"""'], {'return_value': 'matrix'}), "('pyqumo.chains.fix_stochastic', return_value=matrix)\n", (1483, 1536), False, 'from unittest.mock import patch, Mock\n'), ((1553, 1593), 'pyqumo.chains.DiscreteTimeMarkovChain', 'DiscreteTimeMarkovChain', (['matrix'], {'tol': 'tol'}), '(matrix, tol=tol)\n', (1576, 1593), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((1635, 1679), 'numpy.testing.assert_allclose', 'assert_allclose', (['fix.call_args[0][0]', 'matrix'], {}), '(fix.call_args[0][0], matrix)\n', (1650, 1679), False, 'from numpy.testing import assert_allclose\n'), ((1688, 1733), 'numpy.testing.assert_allclose', 'assert_allclose', (["fix.call_args[1]['tol']", 'tol'], {}), "(fix.call_args[1]['tol'], tol)\n", (1703, 1733), False, 'from numpy.testing import assert_allclose\n'), ((6871, 6886), 'numpy.asarray', 'np.asarray', (['pmf'], {}), '(pmf)\n', (6881, 6886), True, 'import numpy as np\n'), ((10061, 10084), 'pytest.raises', 'pytest.raises', (['exc_type'], {}), '(exc_type)\n', (10074, 10084), False, 'import pytest\n'), ((10094, 10127), 'pyqumo.chains.ContinuousTimeMarkovChain', 'ContinuousTimeMarkovChain', (['matrix'], {}), '(matrix)\n', (10119, 10127), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((10764, 10825), 'unittest.mock.patch', 'patch', (['"""pyqumo.chains.fix_infinitesimal"""'], {'return_value': 'matrix'}), "('pyqumo.chains.fix_infinitesimal', return_value=matrix)\n", (10769, 10825), False, 'from unittest.mock import patch, Mock\n'), ((10842, 10884), 'pyqumo.chains.ContinuousTimeMarkovChain', 'ContinuousTimeMarkovChain', (['matrix'], {'tol': 'tol'}), '(matrix, tol=tol)\n', (10867, 10884), False, 'from pyqumo.chains import DiscreteTimeMarkovChain, ContinuousTimeMarkovChain\n'), ((10926, 10970), 'numpy.testing.assert_allclose', 'assert_allclose', (['fix.call_args[0][0]', 'matrix'], {}), '(fix.call_args[0][0], matrix)\n', (10941, 10970), False, 'from numpy.testing import assert_allclose\n'), ((10979, 11024), 'numpy.testing.assert_allclose', 'assert_allclose', (["fix.call_args[1]['tol']", 'tol'], {}), "(fix.call_args[1]['tol'], tol)\n", (10994, 11024), False, 'from numpy.testing import assert_allclose\n')] |
"""
Synthetic test 1
A Python program to compute the Synthetic test 1
Distinct SIs and strong nonlinear magnetic base level
This code is released from the paper:
Reliable Euler deconvolution estimates throughout the
vertical derivatives of the total-field anomaly
The program is under the conditions terms in the file README.txt
authors: <NAME> and <NAME>, 2019
email: <EMAIL>, <EMAIL>
"""
"""
Input:
input/synthetic_data.dat - 2d-array with "n" rows by 4 columns:
x-coordinate, y-coordinate, z-coordinate, anomaly. Where "n" rows
correspond to the size of the data.
Parameters:
Size of the moving data window:
winsize - an odd integer number.
Ex.: for a moving data window of 5 x 5 grid points -> winsize = 5
Percentage of the solutions that will be keep:
filt - a float number ranging from 0.0 to 1.0.
Ex.: to keep 10% of the solutions -> filt = 0.1
Structural indices used:
SI_vet - an array that can store any of the four SIs.
Ex.: to test only the SI = 1 -> SI_vet = [1]
to test the four SIs -> SI_vet = [0.01,1,2,3]
The areas to compute the statistics about the mean of the northing,
easting and depth estimates:
area_cla - array defining the four vertices of a polygon
[south,north,west,east]
"""
import numpy as np
import plot_functions as plt_fc
import euler_python as euler
import estimates_statistics as est_stats
# Input data
data_input=np.loadtxt('input/synthetic_data.dat')
shape = (120, 140)
area = [0, 24000, 0, 28000]
xi=data_input[:,0]
yi=data_input[:,1]
zi=data_input[:,2]
data=data_input[:,3]
'''
Plot input data - Figure 2d
'''
plt_fc.plot_input_data(data,xi,yi,zi,shape)
'''
These are the two parameters of our methodology for Euler deconvolution:
window size and the percentage of solutions to keep
'''
#moving data window size
winsize=7
#percentage of the solutions that will be keep
filt=0.1
#empty array for multiple SIs
est_classic=[]
#Define below the SIs to be tested
SI_vet=[0.001,1,2,3]
'''
Euler deconvolution for multiple SIs
'''
for SI in (SI_vet):
classic = euler.euler_deconv(data,xi,yi,zi,shape,area,SI,winsize,
filt)
est_classic.append(classic)
#Here finishes Euler deconvolution
'''
Plot Figures 4 and 7 - Selected depth and base level estimates for all SIs
'''
plt_fc.plot_classic(data,est_classic,xi,yi,zi,shape)
'''
Areas used to get the statistics - Defined after the classic plot
south,north,west,east
'''
area_cla0=[0.,25000,24000,28000]
area_cla1=[9200,25000,15000,20000]
area_cla2=[14000,18000,5000,10000]
area_cla3=[5000,8000,5000,8000]
est_stats.classic(est_classic,area_cla0,SI_vet,'classic_plt0')
est_stats.classic(est_classic,area_cla1,SI_vet,'classic_plt1')
est_stats.classic(est_classic,area_cla2,SI_vet,'classic_plt2')
est_stats.classic(est_classic,area_cla3,SI_vet,'classic_plt3') | [
"plot_functions.plot_input_data",
"euler_python.euler_deconv",
"estimates_statistics.classic",
"plot_functions.plot_classic",
"numpy.loadtxt"
] | [((1558, 1596), 'numpy.loadtxt', 'np.loadtxt', (['"""input/synthetic_data.dat"""'], {}), "('input/synthetic_data.dat')\n", (1568, 1596), True, 'import numpy as np\n'), ((1770, 1817), 'plot_functions.plot_input_data', 'plt_fc.plot_input_data', (['data', 'xi', 'yi', 'zi', 'shape'], {}), '(data, xi, yi, zi, shape)\n', (1792, 1817), True, 'import plot_functions as plt_fc\n'), ((2499, 2556), 'plot_functions.plot_classic', 'plt_fc.plot_classic', (['data', 'est_classic', 'xi', 'yi', 'zi', 'shape'], {}), '(data, est_classic, xi, yi, zi, shape)\n', (2518, 2556), True, 'import plot_functions as plt_fc\n'), ((2796, 2861), 'estimates_statistics.classic', 'est_stats.classic', (['est_classic', 'area_cla0', 'SI_vet', '"""classic_plt0"""'], {}), "(est_classic, area_cla0, SI_vet, 'classic_plt0')\n", (2813, 2861), True, 'import estimates_statistics as est_stats\n'), ((2860, 2925), 'estimates_statistics.classic', 'est_stats.classic', (['est_classic', 'area_cla1', 'SI_vet', '"""classic_plt1"""'], {}), "(est_classic, area_cla1, SI_vet, 'classic_plt1')\n", (2877, 2925), True, 'import estimates_statistics as est_stats\n'), ((2924, 2989), 'estimates_statistics.classic', 'est_stats.classic', (['est_classic', 'area_cla2', 'SI_vet', '"""classic_plt2"""'], {}), "(est_classic, area_cla2, SI_vet, 'classic_plt2')\n", (2941, 2989), True, 'import estimates_statistics as est_stats\n'), ((2988, 3053), 'estimates_statistics.classic', 'est_stats.classic', (['est_classic', 'area_cla3', 'SI_vet', '"""classic_plt3"""'], {}), "(est_classic, area_cla3, SI_vet, 'classic_plt3')\n", (3005, 3053), True, 'import estimates_statistics as est_stats\n'), ((2235, 2303), 'euler_python.euler_deconv', 'euler.euler_deconv', (['data', 'xi', 'yi', 'zi', 'shape', 'area', 'SI', 'winsize', 'filt'], {}), '(data, xi, yi, zi, shape, area, SI, winsize, filt)\n', (2253, 2303), True, 'import euler_python as euler\n')] |
################################################################################
# Numba-DPPY
#
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import dpctl
import numpy as np
from numba import njit
import pytest
from numba_dppy.tests.skip_tests import skip_test
list_of_filter_strs = [
"opencl:gpu:0",
"level0:gpu:0",
"opencl:cpu:0",
]
@pytest.fixture(params=list_of_filter_strs)
def filter_str(request):
return request.param
list_of_binary_ops = [
"bitwise_and",
"bitwise_or",
"bitwise_xor",
"left_shift",
"right_shift",
]
@pytest.fixture(params=list_of_binary_ops)
def binary_op(request):
return request.param
list_of_unary_ops = [
"bitwise_not",
"invert",
]
@pytest.fixture(params=list_of_unary_ops)
def unary_op(request):
return request.param
list_of_dtypes = [
np.int32,
np.int64,
]
@pytest.fixture(params=list_of_dtypes)
def input_arrays(request):
# The size of input and out arrays to be used
N = 2048
a = np.array(np.random.random(N), request.param)
b = np.array(np.random.random(N), request.param)
return a, b
def test_binary_ops(filter_str, binary_op, input_arrays):
if skip_test(filter_str):
pytest.skip()
a, b = input_arrays
binop = getattr(np, binary_op)
actual = np.empty(shape=a.shape, dtype=a.dtype)
expected = np.empty(shape=a.shape, dtype=a.dtype)
@njit
def f(a, b):
return binop(a, b)
with dpctl.device_context(filter_str):
actual = f(a, b)
expected = binop(a, b)
np.testing.assert_allclose(actual, expected, rtol=1e-5, atol=0)
def test_unary_ops(filter_str, unary_op, input_arrays):
if skip_test(filter_str):
pytest.skip()
a = input_arrays[0]
uop = getattr(np, unary_op)
actual = np.empty(shape=a.shape, dtype=a.dtype)
expected = np.empty(shape=a.shape, dtype=a.dtype)
@njit
def f(a):
return uop(a)
with dpctl.device_context(filter_str):
actual = f(a)
expected = uop(a)
np.testing.assert_allclose(actual, expected, rtol=1e-5, atol=0)
| [
"numba_dppy.tests.skip_tests.skip_test",
"numpy.empty",
"dpctl.device_context",
"pytest.fixture",
"pytest.skip",
"numpy.random.random",
"numpy.testing.assert_allclose"
] | [((1004, 1046), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'list_of_filter_strs'}), '(params=list_of_filter_strs)\n', (1018, 1046), False, 'import pytest\n'), ((1220, 1261), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'list_of_binary_ops'}), '(params=list_of_binary_ops)\n', (1234, 1261), False, 'import pytest\n'), ((1373, 1413), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'list_of_unary_ops'}), '(params=list_of_unary_ops)\n', (1387, 1413), False, 'import pytest\n'), ((1516, 1553), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'list_of_dtypes'}), '(params=list_of_dtypes)\n', (1530, 1553), False, 'import pytest\n'), ((1833, 1854), 'numba_dppy.tests.skip_tests.skip_test', 'skip_test', (['filter_str'], {}), '(filter_str)\n', (1842, 1854), False, 'from numba_dppy.tests.skip_tests import skip_test\n'), ((1951, 1989), 'numpy.empty', 'np.empty', ([], {'shape': 'a.shape', 'dtype': 'a.dtype'}), '(shape=a.shape, dtype=a.dtype)\n', (1959, 1989), True, 'import numpy as np\n'), ((2005, 2043), 'numpy.empty', 'np.empty', ([], {'shape': 'a.shape', 'dtype': 'a.dtype'}), '(shape=a.shape, dtype=a.dtype)\n', (2013, 2043), True, 'import numpy as np\n'), ((2200, 2264), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', 'expected'], {'rtol': '(1e-05)', 'atol': '(0)'}), '(actual, expected, rtol=1e-05, atol=0)\n', (2226, 2264), True, 'import numpy as np\n'), ((2329, 2350), 'numba_dppy.tests.skip_tests.skip_test', 'skip_test', (['filter_str'], {}), '(filter_str)\n', (2338, 2350), False, 'from numba_dppy.tests.skip_tests import skip_test\n'), ((2444, 2482), 'numpy.empty', 'np.empty', ([], {'shape': 'a.shape', 'dtype': 'a.dtype'}), '(shape=a.shape, dtype=a.dtype)\n', (2452, 2482), True, 'import numpy as np\n'), ((2498, 2536), 'numpy.empty', 'np.empty', ([], {'shape': 'a.shape', 'dtype': 'a.dtype'}), '(shape=a.shape, dtype=a.dtype)\n', (2506, 2536), True, 'import numpy as np\n'), ((2677, 2741), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', 'expected'], {'rtol': '(1e-05)', 'atol': '(0)'}), '(actual, expected, rtol=1e-05, atol=0)\n', (2703, 2741), True, 'import numpy as np\n'), ((1661, 1680), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (1677, 1680), True, 'import numpy as np\n'), ((1714, 1733), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (1730, 1733), True, 'import numpy as np\n'), ((1864, 1877), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (1875, 1877), False, 'import pytest\n'), ((2109, 2141), 'dpctl.device_context', 'dpctl.device_context', (['filter_str'], {}), '(filter_str)\n', (2129, 2141), False, 'import dpctl\n'), ((2360, 2373), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (2371, 2373), False, 'import pytest\n'), ((2594, 2626), 'dpctl.device_context', 'dpctl.device_context', (['filter_str'], {}), '(filter_str)\n', (2614, 2626), False, 'import dpctl\n')] |
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems and the Max Planck Institute for Biological
# Cybernetics. All rights reserved.
#
# Contact: <EMAIL>
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from .utils import to_tensor
class VertexJointSelector(nn.Module):
def __init__(self, vertex_ids=None,
use_hands=True,
use_feet_keypoints=True, **kwargs):
super(VertexJointSelector, self).__init__()
extra_joints_idxs = []
face_keyp_idxs = np.array([
vertex_ids['nose'],
vertex_ids['reye'],
vertex_ids['leye'],
vertex_ids['rear'],
vertex_ids['lear']], dtype=np.int64)
extra_joints_idxs = np.concatenate([extra_joints_idxs,
face_keyp_idxs])
if use_feet_keypoints:
feet_keyp_idxs = np.array([vertex_ids['LBigToe'],
vertex_ids['LSmallToe'],
vertex_ids['LHeel'],
vertex_ids['RBigToe'],
vertex_ids['RSmallToe'],
vertex_ids['RHeel']], dtype=np.int32)
extra_joints_idxs = np.concatenate(
[extra_joints_idxs, feet_keyp_idxs])
if use_hands:
self.tip_names = ['thumb', 'index', 'middle', 'ring', 'pinky']
tips_idxs = []
for hand_id in ['l', 'r']:
for tip_name in self.tip_names:
tips_idxs.append(vertex_ids[hand_id + tip_name])
extra_joints_idxs = np.concatenate(
[extra_joints_idxs, tips_idxs])
self.register_buffer('extra_joints_idxs',
to_tensor(extra_joints_idxs, dtype=torch.long))
def forward(self, vertices, joints):
extra_joints = torch.index_select(vertices, 1, self.extra_joints_idxs)
joints = torch.cat([joints, extra_joints], dim=1)
return joints
| [
"torch.index_select",
"numpy.array",
"torch.cat",
"numpy.concatenate"
] | [((1193, 1323), 'numpy.array', 'np.array', (["[vertex_ids['nose'], vertex_ids['reye'], vertex_ids['leye'], vertex_ids[\n 'rear'], vertex_ids['lear']]"], {'dtype': 'np.int64'}), "([vertex_ids['nose'], vertex_ids['reye'], vertex_ids['leye'],\n vertex_ids['rear'], vertex_ids['lear']], dtype=np.int64)\n", (1201, 1323), True, 'import numpy as np\n'), ((1410, 1461), 'numpy.concatenate', 'np.concatenate', (['[extra_joints_idxs, face_keyp_idxs]'], {}), '([extra_joints_idxs, face_keyp_idxs])\n', (1424, 1461), True, 'import numpy as np\n'), ((2601, 2656), 'torch.index_select', 'torch.index_select', (['vertices', '(1)', 'self.extra_joints_idxs'], {}), '(vertices, 1, self.extra_joints_idxs)\n', (2619, 2656), False, 'import torch\n'), ((2674, 2714), 'torch.cat', 'torch.cat', (['[joints, extra_joints]'], {'dim': '(1)'}), '([joints, extra_joints], dim=1)\n', (2683, 2714), False, 'import torch\n'), ((1567, 1741), 'numpy.array', 'np.array', (["[vertex_ids['LBigToe'], vertex_ids['LSmallToe'], vertex_ids['LHeel'],\n vertex_ids['RBigToe'], vertex_ids['RSmallToe'], vertex_ids['RHeel']]"], {'dtype': 'np.int32'}), "([vertex_ids['LBigToe'], vertex_ids['LSmallToe'], vertex_ids[\n 'LHeel'], vertex_ids['RBigToe'], vertex_ids['RSmallToe'], vertex_ids[\n 'RHeel']], dtype=np.int32)\n", (1575, 1741), True, 'import numpy as np\n'), ((1960, 2011), 'numpy.concatenate', 'np.concatenate', (['[extra_joints_idxs, feet_keyp_idxs]'], {}), '([extra_joints_idxs, feet_keyp_idxs])\n', (1974, 2011), True, 'import numpy as np\n'), ((2344, 2390), 'numpy.concatenate', 'np.concatenate', (['[extra_joints_idxs, tips_idxs]'], {}), '([extra_joints_idxs, tips_idxs])\n', (2358, 2390), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
RSR Algorithm by <NAME> <<EMAIL>>
Adapted from DCGAN and E2GAN
"""
import sys, os
import imageio
import cfg
import models_search
# from functions import validate
from utils.utils import set_log_dir, create_logger
# if not os.path.isfile('gan-vae-pretrained-pytorch'):
# os.system('git clone https://github.com/csinva/gan-vae-pretrained-pytorch.git')
# os.chdir('/home/jose/code/20201001_dcgan_rsr/gan-vae-pretrained-pytorch/cifar10_dcgan')
# if not os.path.isfile('pytorch-fid'):
# os.system('git clone https://github.com/mseitzer/pytorch-fid.git')
sys.path.append('pytorch-fid/pytorch_fid')
from inception import InceptionV3
dims = 2048
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
inception_model = InceptionV3([block_idx], normalize_input=False)
# print('Number of model parameters: {}'.format(
# sum([p.data.nelement() for p in inception_model.parameters()])))
# exit()
inception_model.cuda()
inception_model.eval()
"""Download statistics and compute whitening matrix (cholesky of inverse) so that
$$W.(x-\mu) \sim \mathcal{N}(0,I)$$
"""
def preprocess_for_inception(img):
#return (img+1.0)/2.0
return img
import numpy as np
if not os.path.isfile('fid_stats_cifar10_train.npz'):
os.system('wget http://bioinf.jku.at/research/ttur/ttur_stats/fid_stats_cifar10_train.npz')
precomputed_fid_stats = np.load('fid_stats_cifar10_train.npz')
# option 1 cholesky of inverse
# sigma_inverse = np.linalg.inv(precomputed_fid_stats['sigma'])
# W = np.linalg.cholesky(sigma_inverse) # whitening matrix
# option 2 inverse of cholesky
Q = np.linalg.cholesky(precomputed_fid_stats['sigma'])
W = np.linalg.inv(Q)
# Commented out IPython magic to ensure Python compatibility.
import os
import torch
import torchvision
import torch.nn as nn
from torchvision import transforms
from torchvision.utils import save_image
from torch.autograd import Variable
import torchvision.datasets as dset
import matplotlib.pyplot as plt
import pylab
import numpy as np
# %load_ext autoreload
# %autoreload 2
num_gpu = 1 if torch.cuda.is_available() else 0
# load the E2GAN models
args = cfg.parse_args()
args.img_size = 32
args.bottom_width = 4
args.gen_model = 'shared_gan_leaky'
args.latent_dim = 128
args.gf_dim = 256
args.g_spectral_norm = False
args.load_path = 'checkpoints/e2gan_cifar.pth'
args.arch = [0, 1, 0, 1, 0, 1, 2, 1, 0, 0, 1, 0, 1, 2] # e2gan architecture is defined this way (see paper)
G = eval('models_search.'+args.gen_model+'.Generator')(args=args).cuda()
G.set_arch(args.arch, cur_stage=2)
# print('Number of model parameters: {}'.format(
# sum([p.data.nelement() for p in G.parameters()])))
# exit()
# load weights
checkpoint_file = args.load_path
assert os.path.exists(checkpoint_file)
checkpoint = torch.load(checkpoint_file)
if 'avg_gen_state_dict' in checkpoint:
G.load_state_dict(checkpoint['avg_gen_state_dict'])
epoch = checkpoint['epoch'] - 1
print(f'=> loaded checkpoint {checkpoint_file} (epoch {epoch})')
else:
G.load_state_dict(checkpoint)
print(f'=> loaded checkpoint {checkpoint_file}')
print(G)
G.train()
description='full_lbs_%i_%i' % (args.lbs_init, args.lbs_end)
print('running experiment %s' % description)
outdir_images = 'outimgs/fakes/%s/' % (description)
outdir_weights = 'weights/%s/' % (description)
os.system('mkdir -p %s' % outdir_images)
os.system('mkdir -p %s' % outdir_weights)
if torch.cuda.is_available():
G = G.cuda()
batch_size = args.gen_batch_size
latent_size = args.latent_dim
small_batch_size = batch_size
large_batch_size = args.lbs_init
large_batch_size_init = large_batch_size
large_batch_size_end = args.lbs_end + batch_size
max_epoch = args.max_epoch
Nsteps = (large_batch_size_end-large_batch_size_init)//small_batch_size
step_length = max_epoch//Nsteps
NS = large_batch_size//small_batch_size
N_rotmat = 4000 # number of random projections
d_img = 2048 # inception feature dimension
G_opt = torch.optim.Adam(G.parameters(), lr=args.g_lr, weight_decay=0) # set LR and weight decay here
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(G_opt, max_epoch)
if args.dataset == "LSUN_bedrooms":
dataset = dset.LSUN(root='.data/',
classes=['bedroom_train'],
transform=transforms.Compose([
# transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif args.dataset == "LSUN_churches":
dataset = dset.LSUN(root='.data/',
classes=['church_outdoor_train'],
transform=transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif args.dataset == "celeba":
dataset = dset.CelebA(root='.data/', download=True,
transform=transforms.Compose([
# transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif args.dataset == "imagenet":
dataset = dset.ImageNet(root='.data/', download=True,
transform=transforms.Compose([
# transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
else:
dataset = dset.CIFAR10(root='.data/', download=True,
transform=transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
nc=3
dataloader = torch.utils.data.DataLoader(dataset, batch_size=small_batch_size,
shuffle=True, num_workers=2, drop_last=True)
dataloader_iterator = iter(dataloader)
losses = []
try_gt = False # set to True to use real samples as fake samples, helps to get an upper bound on performance
for epoch in range(max_epoch):
scheduler.step()
new_large_batch_size = large_batch_size_init + (epoch//step_length)*small_batch_size
# enarge large_batch_size accoring to lbs_init and lbs_end
if new_large_batch_size != large_batch_size:
large_batch_size = new_large_batch_size
threshold, std_threshold = compute_threshold(large_batch_size)
threshold += args.thresh_std * std_threshold
NS = large_batch_size//small_batch_size
print('epoch %i, new large batch size is %i, threshold is %f, NS is %i' % (epoch, new_large_batch_size, threshold, NS))
new_lbs = 1
else:
new_lbs = 0
if epoch==0: # first iteration uses random projections
rotmat_img = torch.randn(d_img, N_rotmat).cuda()
rotmat_img = rotmat_img/torch.sqrt(torch.sum(rotmat_img**2, dim=0))
elif epoch >0: # following iterations uses pairs and worst projections from previous iteration
with torch.no_grad():
pworst = 1/3.0
# keep 1/3rd of worst projections, add 2/3rd new ones
worst_values_img, worst_index_img = torch.sort(G_loss_all_img, descending=True)
rotmat_img_prev = rotmat_img[:,worst_index_img[:int(N_rotmat*pworst)]]
# rotmatimg will be taken from pairs of gt, output
N_rotmat_new = int(N_rotmat*(1-pworst))
ix_gt = np.random.randint(0,large_batch_size - small_batch_size*new_lbs, N_rotmat_new)
ix_output = np.random.randint(0,large_batch_size - small_batch_size*new_lbs, N_rotmat_new)
vectors_gt = all_gt[ix_gt, :].detach().t().cuda()
vectors_out = all_output_img[ix_output, :].detach().t()
rotmat_img = (vectors_gt-vectors_out)
# worst_image_np = rotmat_img[:,0].cpu().detach().numpy()
# worst_image_np = worst_image_np.reshape(3, 32, 32)
# worst_image_np = ((worst_image_np.transpose((1, 2, 0))/2.0 + .5)*255).astype(np.uint8)
# imageio.imwrite('%s/worst_img_%06i.png' % (outdir_images, epoch), worst_image_np)
# normalize
rotmat_img = rotmat_img/torch.sqrt(torch.sum(rotmat_img**2, dim=0))
rotmat_img = torch.cat((rotmat_img, rotmat_img_prev), dim=1)
print('DEBUG: worst values img', worst_values_img[:15])
# initialize tensors for noise vectors, real data and fake data
all_z = torch.randn(large_batch_size, latent_size).cuda()
all_gt = torch.zeros(large_batch_size, d_img).cuda()
all_output_img = torch.zeros(large_batch_size, d_img).cuda()
gt_images = [] # auxiliary list for use_gt = True
####################################################
# STEP 1. RUN
with torch.no_grad():
for idx in range(NS):
if epoch>=0:
try:
images, _ = next(dataloader_iterator)
except:
dataloader_iterator = iter(dataloader)
images, _ = next(dataloader_iterator)
images = images.cuda()
inception_features_gt = inception_model(images)[0].view(batch_size, -1)
all_gt[idx*batch_size:(idx+1)*batch_size,:] = inception_features_gt
z = all_z[idx*batch_size:(idx+1)*batch_size,:]
if try_gt: # for debugging
try:
images, _ = next(dataloader_iterator)
except:
dataloader_iterator = iter(dataloader)
images, _ = next(dataloader_iterator)
fake_images = images.cuda()
gt_images.append(fake_images)
else:
fake_images = G(z)
# compute inception feature
inception_features = inception_model(preprocess_for_inception(fake_images))[0].view(batch_size, -1)
# all_output_img[idx*batch_size:(idx+1)*batch_size,:] = fake_images.view(batch_size,-1) # for raw pixel values use this
all_output_img[idx*batch_size:(idx+1)*batch_size,:] = inception_features
## finished computing features, now project
with torch.no_grad():
all_output_img_projected = all_output_img.mm(rotmat_img)
all_gt_projected = all_gt.mm(rotmat_img)
####################################################
# STEP 2. SORT
with torch.no_grad():
# move to cpu, sort, move back to gpu
# [_, out_img_sort_ix] = torch.sort(all_output_img_projected.cpu(), dim=0)
# out_img_sort_relative = out_img_sort_ix.argsort(0)
# out_img_sort_relative = out_img_sort_relative.cuda()
[_, out_img_sort_ix] = torch.sort(all_output_img_projected, dim=0)
out_img_sort_relative = out_img_sort_ix.argsort(0)
# [gt_sort_val, _] = torch.sort(all_gt_projected.cpu(), dim=0)
# gt_sort_val = gt_sort_val.cuda()
[gt_sort_val, _] = torch.sort(all_gt_projected, dim=0)
####################################################
# STEP 3. RE-RUN
# initialize gradient
G_opt.zero_grad()
full_batch_loss = 0
G_loss_all_feat = 0
G_loss_all_img = 0
SQRT2 = 1.4142135623731
# now do actual comparison
for idx in range(0,large_batch_size,small_batch_size):
z = all_z[idx:idx+small_batch_size,:]
if try_gt:
print('try gt!')
fake_images = gt_images[idx//small_batch_size]
else:
fake_images = G(z)
# compute inception feature
inception_features = inception_model(preprocess_for_inception(fake_images))[0].view(batch_size, -1)
output_img = inception_features.mm(rotmat_img) # project
# get the relative position of the output
rel_ix_img = out_img_sort_relative[idx:idx+small_batch_size,:]
# now get the equivalent positions of the gt
gt = gt_sort_val.gather(0, rel_ix_img).cuda()
diff_img = (gt-output_img)**2
threshold_img = 1e-4 # don't penalize too small differences, this is normal even for samples of the same distribution. Trying other values for this hyperparameter might be interesing
diff_img = (torch.clamp(diff_img, min=threshold_img)-threshold_img)
G_loss_row_img = torch.sum(diff_img, dim=0) / large_batch_size
G_loss_img = torch.sum(G_loss_row_img)/ rotmat_img.shape[1]
# print('DEBUG: dist loss img: %f' % (G_loss_img.item()))
# print('DEBUG: --')
G_loss = G_loss_img
if not try_gt:
G_loss.backward()
G_loss_all_img += G_loss_row_img.detach().cpu()
full_batch_loss += G_loss.item()
if not try_gt: #epoch>1:
G_opt.step()
losses.append(full_batch_loss)
## RSR ENDS HERE
# what follows is for logging/saving/debugging
if 1:
print('DEBUG: large_batch_size', large_batch_size, 'epoch', epoch, 'loss', losses[-1], 'lr', scheduler.get_lr())
if epoch % 50 ==0 and epoch >0:
#if epoch % 1 ==0 and epoch >0:
if epoch % 100 == 0:
# save model
torch.save(G.state_dict(), '%s/G_%06i.pth' % (outdir_weights, epoch))
count_imgs = 0
G.eval()
with torch.no_grad():
Nb = 11000//batch_size
for i in range(Nb):
print('processing batch %i of %i' % (i, Nb))
z = torch.randn(batch_size, latent_size).cuda()
fake_images = G(z)
fake_images_np = fake_images.cpu().detach().numpy()
fake_images_np = fake_images_np.reshape(fake_images_np.shape[0], 3, 32, 32)
fake_images_np = ((fake_images_np.transpose((0, 2, 3, 1))/2.0 + .5)*255).astype(np.uint8)
for i in range(batch_size):
imageio.imwrite('%s/img_%06i.png' % (outdir_images, count_imgs), fake_images_np[i])
count_imgs+=1
G.train()
print('wrote images to %s' % outdir_images)
torch.cuda.empty_cache()
###################
# Compute FID score
# requires https://github.com/mseitzer/pytorch-fid
fid_command = 'python ../pytorch-fid/pytorch_fid/fid_score.py %s fid_stats_cifar10_train.npz --device cuda:0 ' % outdir_images
os.system(fid_command)
# END RSR
| [
"numpy.load",
"torch.randn",
"torch.cat",
"os.path.isfile",
"numpy.random.randint",
"torchvision.transforms.Normalize",
"torch.no_grad",
"sys.path.append",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.zeros",
"numpy.lin... | [((590, 632), 'sys.path.append', 'sys.path.append', (['"""pytorch-fid/pytorch_fid"""'], {}), "('pytorch-fid/pytorch_fid')\n", (605, 632), False, 'import sys, os\n'), ((748, 795), 'inception.InceptionV3', 'InceptionV3', (['[block_idx]'], {'normalize_input': '(False)'}), '([block_idx], normalize_input=False)\n', (759, 795), False, 'from inception import InceptionV3\n'), ((1364, 1402), 'numpy.load', 'np.load', (['"""fid_stats_cifar10_train.npz"""'], {}), "('fid_stats_cifar10_train.npz')\n", (1371, 1402), True, 'import numpy as np\n'), ((1594, 1644), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (["precomputed_fid_stats['sigma']"], {}), "(precomputed_fid_stats['sigma'])\n", (1612, 1644), True, 'import numpy as np\n'), ((1649, 1665), 'numpy.linalg.inv', 'np.linalg.inv', (['Q'], {}), '(Q)\n', (1662, 1665), True, 'import numpy as np\n'), ((2125, 2141), 'cfg.parse_args', 'cfg.parse_args', ([], {}), '()\n', (2139, 2141), False, 'import cfg\n'), ((2728, 2759), 'os.path.exists', 'os.path.exists', (['checkpoint_file'], {}), '(checkpoint_file)\n', (2742, 2759), False, 'import os\n'), ((2773, 2800), 'torch.load', 'torch.load', (['checkpoint_file'], {}), '(checkpoint_file)\n', (2783, 2800), False, 'import torch\n'), ((3352, 3392), 'os.system', 'os.system', (["('mkdir -p %s' % outdir_images)"], {}), "('mkdir -p %s' % outdir_images)\n", (3361, 3392), False, 'import os\n'), ((3393, 3434), 'os.system', 'os.system', (["('mkdir -p %s' % outdir_weights)"], {}), "('mkdir -p %s' % outdir_weights)\n", (3402, 3434), False, 'import os\n'), ((3443, 3468), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3466, 3468), False, 'import torch\n'), ((4093, 4153), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['G_opt', 'max_epoch'], {}), '(G_opt, max_epoch)\n', (4135, 4153), False, 'import torch\n'), ((6016, 6131), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'small_batch_size', 'shuffle': '(True)', 'num_workers': '(2)', 'drop_last': '(True)'}), '(dataset, batch_size=small_batch_size, shuffle=\n True, num_workers=2, drop_last=True)\n', (6043, 6131), False, 'import torch\n'), ((1198, 1243), 'os.path.isfile', 'os.path.isfile', (['"""fid_stats_cifar10_train.npz"""'], {}), "('fid_stats_cifar10_train.npz')\n", (1212, 1243), False, 'import os\n'), ((1248, 1349), 'os.system', 'os.system', (['"""wget http://bioinf.jku.at/research/ttur/ttur_stats/fid_stats_cifar10_train.npz"""'], {}), "(\n 'wget http://bioinf.jku.at/research/ttur/ttur_stats/fid_stats_cifar10_train.npz'\n )\n", (1257, 1349), False, 'import os\n'), ((2060, 2085), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2083, 2085), False, 'import torch\n'), ((8930, 8945), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8943, 8945), False, 'import torch\n'), ((10189, 10204), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10202, 10204), False, 'import torch\n'), ((10399, 10414), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10412, 10414), False, 'import torch\n'), ((10685, 10728), 'torch.sort', 'torch.sort', (['all_output_img_projected'], {'dim': '(0)'}), '(all_output_img_projected, dim=0)\n', (10695, 10728), False, 'import torch\n'), ((10918, 10953), 'torch.sort', 'torch.sort', (['all_gt_projected'], {'dim': '(0)'}), '(all_gt_projected, dim=0)\n', (10928, 10953), False, 'import torch\n'), ((13859, 13883), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13881, 13883), False, 'import torch\n'), ((14124, 14146), 'os.system', 'os.system', (['fid_command'], {}), '(fid_command)\n', (14133, 14146), False, 'import os\n'), ((8625, 8667), 'torch.randn', 'torch.randn', (['large_batch_size', 'latent_size'], {}), '(large_batch_size, latent_size)\n', (8636, 8667), False, 'import torch\n'), ((8686, 8722), 'torch.zeros', 'torch.zeros', (['large_batch_size', 'd_img'], {}), '(large_batch_size, d_img)\n', (8697, 8722), False, 'import torch\n'), ((8749, 8785), 'torch.zeros', 'torch.zeros', (['large_batch_size', 'd_img'], {}), '(large_batch_size, d_img)\n', (8760, 8785), False, 'import torch\n'), ((12188, 12228), 'torch.clamp', 'torch.clamp', (['diff_img'], {'min': 'threshold_img'}), '(diff_img, min=threshold_img)\n', (12199, 12228), False, 'import torch\n'), ((12269, 12295), 'torch.sum', 'torch.sum', (['diff_img'], {'dim': '(0)'}), '(diff_img, dim=0)\n', (12278, 12295), False, 'import torch\n'), ((12335, 12360), 'torch.sum', 'torch.sum', (['G_loss_row_img'], {}), '(G_loss_row_img)\n', (12344, 12360), False, 'import torch\n'), ((13173, 13188), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13186, 13188), False, 'import torch\n'), ((7044, 7072), 'torch.randn', 'torch.randn', (['d_img', 'N_rotmat'], {}), '(d_img, N_rotmat)\n', (7055, 7072), False, 'import torch\n'), ((7120, 7153), 'torch.sum', 'torch.sum', (['(rotmat_img ** 2)'], {'dim': '(0)'}), '(rotmat_img ** 2, dim=0)\n', (7129, 7153), False, 'import torch\n'), ((7259, 7274), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7272, 7274), False, 'import torch\n'), ((7402, 7445), 'torch.sort', 'torch.sort', (['G_loss_all_img'], {'descending': '(True)'}), '(G_loss_all_img, descending=True)\n', (7412, 7445), False, 'import torch\n'), ((7637, 7722), 'numpy.random.randint', 'np.random.randint', (['(0)', '(large_batch_size - small_batch_size * new_lbs)', 'N_rotmat_new'], {}), '(0, large_batch_size - small_batch_size * new_lbs,\n N_rotmat_new)\n', (7654, 7722), True, 'import numpy as np\n'), ((7733, 7818), 'numpy.random.randint', 'np.random.randint', (['(0)', '(large_batch_size - small_batch_size * new_lbs)', 'N_rotmat_new'], {}), '(0, large_batch_size - small_batch_size * new_lbs,\n N_rotmat_new)\n', (7750, 7818), True, 'import numpy as np\n'), ((8392, 8439), 'torch.cat', 'torch.cat', (['(rotmat_img, rotmat_img_prev)'], {'dim': '(1)'}), '((rotmat_img, rotmat_img_prev), dim=1)\n', (8401, 8439), False, 'import torch\n'), ((4413, 4434), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4432, 4434), False, 'from torchvision import transforms\n'), ((4463, 4517), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (4483, 4517), False, 'from torchvision import transforms\n'), ((13677, 13764), 'imageio.imwrite', 'imageio.imwrite', (["('%s/img_%06i.png' % (outdir_images, count_imgs))", 'fake_images_np[i]'], {}), "('%s/img_%06i.png' % (outdir_images, count_imgs),\n fake_images_np[i])\n", (13692, 13764), False, 'import imageio\n'), ((4765, 4794), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (4782, 4794), False, 'from torchvision import transforms\n'), ((4823, 4844), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4842, 4844), False, 'from torchvision import transforms\n'), ((4873, 4927), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (4893, 4927), False, 'from torchvision import transforms\n'), ((8341, 8374), 'torch.sum', 'torch.sum', (['(rotmat_img ** 2)'], {'dim': '(0)'}), '(rotmat_img ** 2, dim=0)\n', (8350, 8374), False, 'import torch\n'), ((13308, 13344), 'torch.randn', 'torch.randn', (['batch_size', 'latent_size'], {}), '(batch_size, latent_size)\n', (13319, 13344), False, 'import torch\n'), ((5177, 5198), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5196, 5198), False, 'from torchvision import transforms\n'), ((5227, 5281), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (5247, 5281), False, 'from torchvision import transforms\n'), ((5535, 5556), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5554, 5556), False, 'from torchvision import transforms\n'), ((5585, 5639), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (5605, 5639), False, 'from torchvision import transforms\n'), ((5813, 5834), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32)'], {}), '(32)\n', (5830, 5834), False, 'from torchvision import transforms\n'), ((5863, 5884), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5882, 5884), False, 'from torchvision import transforms\n'), ((5913, 5967), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (5933, 5967), False, 'from torchvision import transforms\n')] |
import numpy as np
import spiderman
def spiderman_rock(params, t, etc = []):
"""
This function generates the Kreidberg & Loeb 2016 phase curve model
Parameters
----------
t0: time of conjunction
per: orbital period
a_abs: semi-major axis (AU)
cos(i): cosine of the orbital inclination
ecc: eccentricity
w: arg of periastron (deg)
rp: planet radius (stellar radii)
a: semi-major axis (stellar radii)
p_u1: planet linear limb darkening parameter
p_u2: planet quadratic limb darkening
T_s: stellar Teff
l1: short wavelength (m)
l2: long wavelength (m)
insol: insolation (relative to Earth)
albedo: Bond albedo
redist: fraction of incident flux redistributed to nightside
npoints: number of phase bins for light curve interpolation
Returns
-------
This function returns planet-to-star flux at each time t.
Revisions
---------
2016-02-25 <NAME>
<EMAIL>
Original version
TODO add response function and nlayers to etc
"""
p = spiderman.ModelParams(brightness_model = 'kreidberg', stellar_model = 'blackbody')
p.n_layers = 5
p.t0 = params[0]
p.per = params[1]
p.a_abs = params[2]
p.inc = np.arccos(params[3])*180./np.pi
p.ecc = params[4]
p.w = params[5]
p.rp = params[6]
p.a = params[7]
p.p_u1 = params[8]
p.p_u2 = params[9]
p.T_s = params[10]
p.l1 = params[11]
p.l2 = params[12]
p.insol = 1361.*params[13] #W/m^2
p.albedo = params[14]
p.redist = params[15]
npoints = int(params[16])
#TODO: add filter path to etc
#p.filter = "/Users/lkreidberg/Desktop/Util/Throughput/spitzer_irac_ch2.txt"
#calculate light curve over npoints phase bins
phase = (t - p.t0)/p.per
phase -= np.round(phase)
phase_bin = np.linspace(phase.min(), phase.max(), npoints)
t_bin = phase_bin*p.per + p.t0
lc_bin = spiderman.web.lightcurve(t_bin, p)
#interpolate the binned light curve to the original t array
lc = np.interp(phase, phase_bin, lc_bin)
return lc
| [
"spiderman.ModelParams",
"numpy.interp",
"spiderman.web.lightcurve",
"numpy.arccos",
"numpy.round"
] | [((1104, 1182), 'spiderman.ModelParams', 'spiderman.ModelParams', ([], {'brightness_model': '"""kreidberg"""', 'stellar_model': '"""blackbody"""'}), "(brightness_model='kreidberg', stellar_model='blackbody')\n", (1125, 1182), False, 'import spiderman\n'), ((1956, 1971), 'numpy.round', 'np.round', (['phase'], {}), '(phase)\n', (1964, 1971), True, 'import numpy as np\n'), ((2082, 2116), 'spiderman.web.lightcurve', 'spiderman.web.lightcurve', (['t_bin', 'p'], {}), '(t_bin, p)\n', (2106, 2116), False, 'import spiderman\n'), ((2189, 2224), 'numpy.interp', 'np.interp', (['phase', 'phase_bin', 'lc_bin'], {}), '(phase, phase_bin, lc_bin)\n', (2198, 2224), True, 'import numpy as np\n'), ((1327, 1347), 'numpy.arccos', 'np.arccos', (['params[3]'], {}), '(params[3])\n', (1336, 1347), True, 'import numpy as np\n')] |
""" test scalar indexing, including at and iat """
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.tests.indexing.common import Base
class TestScalar(Base):
@pytest.mark.parametrize("kind", ["series", "frame"])
def test_at_and_iat_get(self, kind):
def _check(f, func, values=False):
if f is not None:
indices = self.generate_indices(f, values)
for i in indices:
result = getattr(f, func)[i]
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(result, expected)
d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
_check(f, "iat", values=True)
for f in [d["labels"], d["ts"], d["floats"]]:
if f is not None:
msg = "iAt based indexing can only have integer indexers"
with pytest.raises(ValueError, match=msg):
self.check_values(f, "iat")
# at
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
@pytest.mark.parametrize("kind", ["series", "frame"])
def test_at_and_iat_set(self, kind):
def _check(f, func, values=False):
if f is not None:
indices = self.generate_indices(f, values)
for i in indices:
getattr(f, func)[i] = 1
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(expected, 1)
d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
_check(f, "iat", values=True)
for f in [d["labels"], d["ts"], d["floats"]]:
if f is not None:
msg = "iAt based indexing can only have integer indexers"
with pytest.raises(ValueError, match=msg):
_check(f, "iat")
# at
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
class TestAtAndiAT:
# at and iat tests that don't need Base class
def test_float_index_at_iat(self):
ser = Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in ser.items():
assert ser.at[el] == item
for i in range(len(ser)):
assert ser.iat[i] == i + 1
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range("1/1/2000", periods=8)
df = DataFrame(np.random.randn(8, 4), index=dates, columns=["A", "B", "C", "D"])
s = df["A"]
result = s.at[dates[5]]
xp = s.values[5]
assert result == xp
# GH 7729
# make sure we are boxing the returns
s = Series(["2014-01-01", "2014-02-02"], dtype="datetime64[ns]")
expected = Timestamp("2014-02-02")
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
assert result == expected
s = Series(["1 days", "2 days"], dtype="timedelta64[ns]")
expected = Timedelta("2 days")
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
assert result == expected
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype="int64")
result = s.iloc[2]
assert result == 2
result = s.iat[2]
assert result == 2
msg = "index 10 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s.iat[10]
msg = "index -10 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s.iat[-10]
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype="int64")
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
assert result == 2
def test_frame_at_with_duplicate_axes(self):
# GH#33041
arr = np.random.randn(6).reshape(3, 2)
df = DataFrame(arr, columns=["A", "A"])
result = df.at[0, "A"]
expected = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.T.at["A", 0]
tm.assert_series_equal(result, expected)
# setter
df.at[1, "A"] = 2
expected = Series([2.0, 2.0], index=["A", "A"], name=1)
tm.assert_series_equal(df.iloc[1], expected)
def test_at_getitem_dt64tz_values(self):
# gh-15822
df = DataFrame(
{
"name": ["John", "Anderson"],
"date": [
Timestamp(2017, 3, 13, 13, 32, 56),
Timestamp(2017, 2, 16, 12, 10, 3),
],
}
)
df["date"] = df["date"].dt.tz_localize("Asia/Shanghai")
expected = Timestamp("2017-03-13 13:32:56+0800", tz="Asia/Shanghai")
result = df.loc[0, "date"]
assert result == expected
result = df.at[0, "date"]
assert result == expected
def test_mixed_index_at_iat_loc_iloc_series(self):
# GH 19860
s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
for el, item in s.items():
assert s.at[el] == s.loc[el] == item
for i in range(len(s)):
assert s.iat[i] == s.iloc[i] == i + 1
with pytest.raises(KeyError, match="^4$"):
s.at[4]
with pytest.raises(KeyError, match="^4$"):
s.loc[4]
def test_mixed_index_at_iat_loc_iloc_dataframe(self):
# GH 19860
df = DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], columns=["a", "b", "c", 1, 2]
)
for rowIdx, row in df.iterrows():
for el, item in row.items():
assert df.at[rowIdx, el] == df.loc[rowIdx, el] == item
for row in range(2):
for i in range(5):
assert df.iat[row, i] == df.iloc[row, i] == row * 5 + i
with pytest.raises(KeyError, match="^3$"):
df.at[0, 3]
with pytest.raises(KeyError, match="^3$"):
df.loc[0, 3]
def test_iat_setter_incompatible_assignment(self):
# GH 23236
result = DataFrame({"a": [0, 1], "b": [4, 5]})
result.iat[0, 0] = None
expected = DataFrame({"a": [None, 1], "b": [4, 5]})
tm.assert_frame_equal(result, expected)
def test_iat_dont_wrap_object_datetimelike():
# GH#32809 .iat calls go through DataFrame._get_value, should not
# call maybe_box_datetimelike
dti = date_range("2016-01-01", periods=3)
tdi = dti - dti
ser = Series(dti.to_pydatetime(), dtype=object)
ser2 = Series(tdi.to_pytimedelta(), dtype=object)
df = DataFrame({"A": ser, "B": ser2})
assert (df.dtypes == object).all()
for result in [df.at[0, "A"], df.iat[0, 0], df.loc[0, "A"], df.iloc[0, 0]]:
assert result is ser[0]
assert isinstance(result, datetime)
assert not isinstance(result, Timestamp)
for result in [df.at[1, "B"], df.iat[1, 1], df.loc[1, "B"], df.iloc[1, 1]]:
assert result is ser2[1]
assert isinstance(result, timedelta)
assert not isinstance(result, Timedelta)
def test_at_with_tuple_index_get():
# GH 26989
# DataFrame.at getter works with Index of tuples
df = DataFrame({"a": [1, 2]}, index=[(1, 2), (3, 4)])
assert df.index.nlevels == 1
assert df.at[(1, 2), "a"] == 1
# Series.at getter works with Index of tuples
series = df["a"]
assert series.index.nlevels == 1
assert series.at[(1, 2)] == 1
def test_at_with_tuple_index_set():
# GH 26989
# DataFrame.at setter works with Index of tuples
df = DataFrame({"a": [1, 2]}, index=[(1, 2), (3, 4)])
assert df.index.nlevels == 1
df.at[(1, 2), "a"] = 2
assert df.at[(1, 2), "a"] == 2
# Series.at setter works with Index of tuples
series = df["a"]
assert series.index.nlevels == 1
series.at[1, 2] = 3
assert series.at[1, 2] == 3
class TestMultiIndexScalar:
def test_multiindex_at_get(self):
# GH 26989
# DataFrame.at and DataFrame.loc getter works with MultiIndex
df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]])
assert df.index.nlevels == 2
assert df.at[(1, 3), "a"] == 1
assert df.loc[(1, 3), "a"] == 1
# Series.at and Series.loc getter works with MultiIndex
series = df["a"]
assert series.index.nlevels == 2
assert series.at[1, 3] == 1
assert series.loc[1, 3] == 1
def test_multiindex_at_set(self):
# GH 26989
# DataFrame.at and DataFrame.loc setter works with MultiIndex
df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]])
assert df.index.nlevels == 2
df.at[(1, 3), "a"] = 3
assert df.at[(1, 3), "a"] == 3
df.loc[(1, 3), "a"] = 4
assert df.loc[(1, 3), "a"] == 4
# Series.at and Series.loc setter works with MultiIndex
series = df["a"]
assert series.index.nlevels == 2
series.at[1, 3] = 5
assert series.at[1, 3] == 5
series.loc[1, 3] = 6
assert series.loc[1, 3] == 6
def test_multiindex_at_get_one_level(self):
# GH#38053
s2 = Series((0, 1), index=[[False, True]])
result = s2.at[False]
assert result == 0
| [
"pandas.DataFrame",
"pandas.Timestamp",
"pandas.date_range",
"pandas._testing.assert_almost_equal",
"numpy.random.randn",
"pandas._testing.assert_series_equal",
"pytest.raises",
"pandas.Series",
"pandas._testing.assert_frame_equal",
"pandas.Timedelta",
"pytest.mark.parametrize"
] | [((342, 394), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kind"""', "['series', 'frame']"], {}), "('kind', ['series', 'frame'])\n", (365, 394), False, 'import pytest\n'), ((1302, 1354), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kind"""', "['series', 'frame']"], {}), "('kind', ['series', 'frame'])\n", (1325, 1354), False, 'import pytest\n'), ((6982, 7017), 'pandas.date_range', 'date_range', (['"""2016-01-01"""'], {'periods': '(3)'}), "('2016-01-01', periods=3)\n", (6992, 7017), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((7153, 7185), 'pandas.DataFrame', 'DataFrame', (["{'A': ser, 'B': ser2}"], {}), "({'A': ser, 'B': ser2})\n", (7162, 7185), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((7754, 7802), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 2]}"], {'index': '[(1, 2), (3, 4)]'}), "({'a': [1, 2]}, index=[(1, 2), (3, 4)])\n", (7763, 7802), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((8129, 8177), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 2]}"], {'index': '[(1, 2), (3, 4)]'}), "({'a': [1, 2]}, index=[(1, 2), (3, 4)])\n", (8138, 8177), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((2361, 2401), 'pandas.Series', 'Series', (['[1, 2, 3]'], {'index': '[0.1, 0.2, 0.3]'}), '([1, 2, 3], index=[0.1, 0.2, 0.3])\n', (2367, 2401), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((2643, 2676), 'pandas.date_range', 'date_range', (['"""1/1/2000"""'], {'periods': '(8)'}), "('1/1/2000', periods=8)\n", (2653, 2676), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((2949, 3009), 'pandas.Series', 'Series', (["['2014-01-01', '2014-02-02']"], {'dtype': '"""datetime64[ns]"""'}), "(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')\n", (2955, 3009), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((3029, 3052), 'pandas.Timestamp', 'Timestamp', (['"""2014-02-02"""'], {}), "('2014-02-02')\n", (3038, 3052), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((3186, 3239), 'pandas.Series', 'Series', (["['1 days', '2 days']"], {'dtype': '"""timedelta64[ns]"""'}), "(['1 days', '2 days'], dtype='timedelta64[ns]')\n", (3192, 3239), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((3259, 3278), 'pandas.Timedelta', 'Timedelta', (['"""2 days"""'], {}), "('2 days')\n", (3268, 3278), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((4043, 4080), 'pandas.Series', 'Series', (['[2, 3]', '[2, 2]'], {'dtype': '"""int64"""'}), "([2, 3], [2, 2], dtype='int64')\n", (4049, 4080), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((4089, 4129), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (4111, 4129), True, 'import pandas._testing as tm\n'), ((4204, 4232), 'pandas.Series', 'Series', (['(2)'], {'index': '[0]', 'name': '(2)'}), '(2, index=[0], name=2)\n', (4210, 4232), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((4241, 4281), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (4263, 4281), True, 'import pandas._testing as tm\n'), ((4469, 4503), 'pandas.DataFrame', 'DataFrame', (['arr'], {'columns': "['A', 'A']"}), "(arr, columns=['A', 'A'])\n", (4478, 4503), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((4575, 4615), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (4597, 4615), True, 'import pandas._testing as tm\n'), ((4658, 4698), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (4680, 4698), True, 'import pandas._testing as tm\n'), ((4762, 4806), 'pandas.Series', 'Series', (['[2.0, 2.0]'], {'index': "['A', 'A']", 'name': '(1)'}), "([2.0, 2.0], index=['A', 'A'], name=1)\n", (4768, 4806), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((4815, 4859), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['df.iloc[1]', 'expected'], {}), '(df.iloc[1], expected)\n', (4837, 4859), True, 'import pandas._testing as tm\n'), ((5273, 5330), 'pandas.Timestamp', 'Timestamp', (['"""2017-03-13 13:32:56+0800"""'], {'tz': '"""Asia/Shanghai"""'}), "('2017-03-13 13:32:56+0800', tz='Asia/Shanghai')\n", (5282, 5330), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((5557, 5609), 'pandas.Series', 'Series', (['[1, 2, 3, 4, 5]'], {'index': "['a', 'b', 'c', 1, 2]"}), "([1, 2, 3, 4, 5], index=['a', 'b', 'c', 1, 2])\n", (5563, 5609), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((6011, 6087), 'pandas.DataFrame', 'DataFrame', (['[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]'], {'columns': "['a', 'b', 'c', 1, 2]"}), "([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], columns=['a', 'b', 'c', 1, 2])\n", (6020, 6087), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((6641, 6678), 'pandas.DataFrame', 'DataFrame', (["{'a': [0, 1], 'b': [4, 5]}"], {}), "({'a': [0, 1], 'b': [4, 5]})\n", (6650, 6678), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((6730, 6770), 'pandas.DataFrame', 'DataFrame', (["{'a': [None, 1], 'b': [4, 5]}"], {}), "({'a': [None, 1], 'b': [4, 5]})\n", (6739, 6770), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((6779, 6818), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (6800, 6818), True, 'import pandas._testing as tm\n'), ((8608, 8656), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 2]}"], {'index': '[[1, 2], [3, 4]]'}), "({'a': [1, 2]}, index=[[1, 2], [3, 4]])\n", (8617, 8656), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((9118, 9166), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 2]}"], {'index': '[[1, 2], [3, 4]]'}), "({'a': [1, 2]}, index=[[1, 2], [3, 4]])\n", (9127, 9166), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((9688, 9725), 'pandas.Series', 'Series', (['(0, 1)'], {'index': '[[False, True]]'}), '((0, 1), index=[[False, True]])\n', (9694, 9725), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((2700, 2721), 'numpy.random.randn', 'np.random.randn', (['(8)', '(4)'], {}), '(8, 4)\n', (2715, 2721), True, 'import numpy as np\n'), ((3791, 3827), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': 'msg'}), '(IndexError, match=msg)\n', (3804, 3827), False, 'import pytest\n'), ((3930, 3966), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': 'msg'}), '(IndexError, match=msg)\n', (3943, 3966), False, 'import pytest\n'), ((5790, 5826), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""^4$"""'}), "(KeyError, match='^4$')\n", (5803, 5826), False, 'import pytest\n'), ((5861, 5897), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""^4$"""'}), "(KeyError, match='^4$')\n", (5874, 5897), False, 'import pytest\n'), ((6411, 6447), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""^3$"""'}), "(KeyError, match='^3$')\n", (6424, 6447), False, 'import pytest\n'), ((6486, 6522), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""^3$"""'}), "(KeyError, match='^3$')\n", (6499, 6522), False, 'import pytest\n'), ((4423, 4441), 'numpy.random.randn', 'np.random.randn', (['(6)'], {}), '(6)\n', (4438, 4441), True, 'import numpy as np\n'), ((738, 778), 'pandas._testing.assert_almost_equal', 'tm.assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (760, 778), True, 'import pandas._testing as tm\n'), ((1091, 1127), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (1104, 1127), False, 'import pytest\n'), ((1693, 1728), 'pandas._testing.assert_almost_equal', 'tm.assert_almost_equal', (['expected', '(1)'], {}), '(expected, 1)\n', (1715, 1728), True, 'import pandas._testing as tm\n'), ((2041, 2077), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (2054, 2077), False, 'import pytest\n'), ((5055, 5089), 'pandas.Timestamp', 'Timestamp', (['(2017)', '(3)', '(13)', '(13)', '(32)', '(56)'], {}), '(2017, 3, 13, 13, 32, 56)\n', (5064, 5089), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n'), ((5111, 5144), 'pandas.Timestamp', 'Timestamp', (['(2017)', '(2)', '(16)', '(12)', '(10)', '(3)'], {}), '(2017, 2, 16, 12, 10, 3)\n', (5120, 5144), False, 'from pandas import DataFrame, Series, Timedelta, Timestamp, date_range\n')] |
import argparse
import time
import cv2
import torch
import pandas as pd
import numpy as np
from conda.exports import get_index
from torch import optim
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from tqdm import tqdm
from model import *
dice_total_list = []
rvd_total_list = []
jaccard_total_list = []
voe_total_list = []
sen_total_list = []
centerlist = []
widthlist = []
def get_data(i):
import dataset
imgs = dataset.make_dataset(r"E:/LITS/tumour/%d-%d/test" % (center, width))
imgx = []
imgy = []
for img in imgs:
imgx.append(img[0])
imgy.append(img[1])
return imgx[i], imgy[i]
def train_model(model, criterion, optimizer, dataload, num_epochs=30):
train_loss = []
for epoch in range(num_epochs):
dt_size = len(dataload.dataset)
epoch_loss = 0
step = 0
num_iter = (dt_size - 1) // dataload.batch_size + 1
with tqdm(total=num_iter, ncols=80, desc="训练epoch %d/%d" % (epoch + 1, num_epochs))as t:
for x, y in dataload:
t.set_postfix(loss='{:^7.3f}'.format(epoch_loss))
step += 1
inputs = x.to(torch.device)
labels = y.to(torch.device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
t.update()
torch.save(model.state_dict(), r"E:/LITS/tumour/%d-%d/weights/unet_weights_%d.pth" % (center, width, epoch))
train_loss.append(epoch_loss)
train_loss_csv = pd.DataFrame(data=train_loss)
train_loss_csv.to_csv("E:/LITS/tumour/%d-%d/unet_train_loss.csv" % (center, width), encoding='utf-8',
index=False)
if epoch_loss < 0.1:
print('Trian_loss' % epoch_loss)
break
test(epoch)
return model
def train():
print(center, width)
model = resnet34(3, 1).to(torch.device)
batch_size = args.batch_size
criterion = torch.nn.BCELoss()
optimizer = optim.Adam(model.parameters())
liver_dataset = LiverDataset(r"E:/LITS/tumour/%d-%d/train/" % (center, width), transform=x_transforms,
target_transform=y_transforms)
dataloaders = DataLoader(liver_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
train_model(model, criterion, optimizer, dataloaders)
def test(k):
args.ckp = r"E:/LITS/tumour/%d-%d/weights/unet_weights_%d.pth" % (center, width, k)
model = resnet34(3, 1).to(torch.device)
model.load_state_dict(torch.load(args.ckp, map_location='cpu'))
liver_dataset = LiverDataset(r"E:/LITS/tumour/%d-%d/test/" % (center, width), transform=x_transforms,
target_transform=y_transforms)
dataloaders = DataLoader(liver_dataset, batch_size=1)
model.eval()
import matplotlib.pyplot as plt
plt.ion()
with torch.no_grad():
i = 0
rvd_total = 0
dice_total = 0
jaccard_total = 0
sen_total = 0
voe_total = 0
num = len(dataloaders)
for x, _ in tqdm(dataloaders, ncols=80, desc="epoch %d" % k):
x = x.to(torch.device)
y = model(x)
img_y = torch.squeeze(y).cpu().numpy()
mask_path = get_data(i)[1]
liver = cv2.imread(get_data(i)[0], 0)
tumour = cv2.imread(get_data(i)[1], 0)
body = cv2.imread("E:/LITS/tumour/%d-%d/test/%d_body.png" % (center, width, i), 0)
cv2.imwrite("./result/%d.png" % i, img_y * 255)
seg = cv2.imread("./result/%d.png" % i, 0)
ret, seg = cv2.threshold(seg, 128, 255, cv2.THRESH_OTSU)
cv2.imwrite("./result/%d_b.png" % i, seg)
dice, sen, rvd = get_index(mask_path, seg)
rvd_total += rvd
dice_total += dice
sen_total += sen
jaccard = dice / (2 - dice)
jaccard_total += jaccard
voe = 1 - jaccard
voe_total += voe
if i < num: i += 1
image_mask = draw_mask_edge_on_image_cv2(body, tumour, seg, color1=(0, 255, 0), color2=(255, 0, 0))
print('epoch %d - Dice:%f - RVD:%f - Jaccard:%f - Voe:%f - TPR:%f' % (
k, dice_total / num, rvd_total / num, jaccard_total / num, voe_total / num, sen_total / num))
plt.imshow(image_mask)
plt.pause(0.01)
plt.show()
val(dice_total / num, rvd_total / num, jaccard_total / num, voe_total / num, sen_total / num)
time.sleep(1)
return model
def draw_mask_edge_on_image_cv2(image, mask, seg, color1, color2):
coef = 255 if np.max(image) < 3 else 1
image_mask = (image * coef).astype(np.float32)
contours1, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours2, _ = cv2.findContours(seg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
image_mask = cv2.cvtColor(image_mask, cv2.COLOR_GRAY2BGR)
cv2.drawContours(image_mask, contours1, -1, color1, 2)
cv2.drawContours(image_mask, contours2, -1, color2, 2)
image_mask = image_mask / 255
return image_mask
def val(x, y, z, p, q):
dice_total_list.append(x)
rvd_total_list.append(y)
jaccard_total_list.append(z)
voe_total_list.append(p)
sen_total_list.append(q)
dice_total_csv = pd.DataFrame(data=dice_total_list)
dice_total_csv.to_csv("E:/LITS/tumour/%d-%d/index/unet_dice_loss.csv" % (center, width), encoding='utf-8',
index=False)
rvd_total_csv = pd.DataFrame(data=rvd_total_list)
rvd_total_csv.to_csv("E:/LITS/tumour/%d-%d/index/unet_rvd_loss.csv" % (center, width), encoding='utf-8',
index=False)
jaccard_total_csv = pd.DataFrame(data=jaccard_total_list)
jaccard_total_csv.to_csv("E:/LITS/tumour/%d-%d/index/unet_jaccard_loss.csv" % (center, width), encoding='utf-8',
index=False)
voe_total_csv = pd.DataFrame(data=voe_total_list)
voe_total_csv.to_csv("E:/LITS/tumour/%d-%d/index/unet_voe_loss.csv" % (center, width), encoding='utf-8',
index=False)
sen_total_csv = pd.DataFrame(data=sen_total_list)
sen_total_csv.to_csv("E:/LITS/tumour/%d-%d/index/unet_sen_loss.csv" % (center, width), encoding='utf-8',
index=False)
if __name__ == "__main__":
x_transforms = transforms.Compose([
transforms.ToTensor(), # -> [0,1]
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) # ->[-1,1]
])
y_transforms = transforms.ToTensor()
parse = argparse.ArgumentParser()
parse.add_argument("--action", type=str, help="train or test", default="train")
parse.add_argument("--batch_size", type=int, default=4)
parse.add_argument("--ckp", type=str, help="the path of model weight file")
args = parse.parse_known_args()[0]
for i in range():
for j in range():
center = centerlist[i]
width = widthlist[j]
train()
| [
"argparse.ArgumentParser",
"torch.no_grad",
"pandas.DataFrame",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"torch.load",
"cv2.imwrite",
"torch.squeeze",
"numpy.max",
"cv2.drawContours",
"matplotlib.pyplot.pause",
"tqdm.tqdm",
"matplotli... | [((467, 534), 'dataset.make_dataset', 'dataset.make_dataset', (["('E:/LITS/tumour/%d-%d/test' % (center, width))"], {}), "('E:/LITS/tumour/%d-%d/test' % (center, width))\n", (487, 534), False, 'import dataset\n'), ((2210, 2228), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (2226, 2228), False, 'import torch\n'), ((2465, 2542), 'torch.utils.data.DataLoader', 'DataLoader', (['liver_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(liver_dataset, batch_size=batch_size, shuffle=True, num_workers=0)\n', (2475, 2542), False, 'from torch.utils.data import DataLoader\n'), ((3004, 3043), 'torch.utils.data.DataLoader', 'DataLoader', (['liver_dataset'], {'batch_size': '(1)'}), '(liver_dataset, batch_size=1)\n', (3014, 3043), False, 'from torch.utils.data import DataLoader\n'), ((3101, 3110), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3108, 3110), True, 'import matplotlib.pyplot as plt\n'), ((4960, 5022), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (4976, 5022), False, 'import cv2\n'), ((5042, 5103), 'cv2.findContours', 'cv2.findContours', (['seg', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(seg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (5058, 5103), False, 'import cv2\n'), ((5121, 5165), 'cv2.cvtColor', 'cv2.cvtColor', (['image_mask', 'cv2.COLOR_GRAY2BGR'], {}), '(image_mask, cv2.COLOR_GRAY2BGR)\n', (5133, 5165), False, 'import cv2\n'), ((5170, 5224), 'cv2.drawContours', 'cv2.drawContours', (['image_mask', 'contours1', '(-1)', 'color1', '(2)'], {}), '(image_mask, contours1, -1, color1, 2)\n', (5186, 5224), False, 'import cv2\n'), ((5229, 5283), 'cv2.drawContours', 'cv2.drawContours', (['image_mask', 'contours2', '(-1)', 'color2', '(2)'], {}), '(image_mask, contours2, -1, color2, 2)\n', (5245, 5283), False, 'import cv2\n'), ((5537, 5571), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'dice_total_list'}), '(data=dice_total_list)\n', (5549, 5571), True, 'import pandas as pd\n'), ((5742, 5775), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rvd_total_list'}), '(data=rvd_total_list)\n', (5754, 5775), True, 'import pandas as pd\n'), ((5947, 5984), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'jaccard_total_list'}), '(data=jaccard_total_list)\n', (5959, 5984), True, 'import pandas as pd\n'), ((6164, 6197), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'voe_total_list'}), '(data=voe_total_list)\n', (6176, 6197), True, 'import pandas as pd\n'), ((6365, 6398), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'sen_total_list'}), '(data=sen_total_list)\n', (6377, 6398), True, 'import pandas as pd\n'), ((6760, 6781), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6779, 6781), False, 'from torchvision.transforms import transforms\n'), ((6795, 6820), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6818, 6820), False, 'import argparse\n'), ((1765, 1794), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'train_loss'}), '(data=train_loss)\n', (1777, 1794), True, 'import pandas as pd\n'), ((2774, 2814), 'torch.load', 'torch.load', (['args.ckp'], {'map_location': '"""cpu"""'}), "(args.ckp, map_location='cpu')\n", (2784, 2814), False, 'import torch\n'), ((3121, 3136), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3134, 3136), False, 'import torch\n'), ((3318, 3366), 'tqdm.tqdm', 'tqdm', (['dataloaders'], {'ncols': '(80)', 'desc': "('epoch %d' % k)"}), "(dataloaders, ncols=80, desc='epoch %d' % k)\n", (3322, 3366), False, 'from tqdm import tqdm\n'), ((4567, 4589), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_mask'], {}), '(image_mask)\n', (4577, 4589), True, 'import matplotlib.pyplot as plt\n'), ((4598, 4613), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (4607, 4613), True, 'import matplotlib.pyplot as plt\n'), ((4622, 4632), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4630, 4632), True, 'import matplotlib.pyplot as plt\n'), ((4743, 4756), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4753, 4756), False, 'import time\n'), ((951, 1029), 'tqdm.tqdm', 'tqdm', ([], {'total': 'num_iter', 'ncols': '(80)', 'desc': "('训练epoch %d/%d' % (epoch + 1, num_epochs))"}), "(total=num_iter, ncols=80, desc='训练epoch %d/%d' % (epoch + 1, num_epochs))\n", (955, 1029), False, 'from tqdm import tqdm\n'), ((3639, 3714), 'cv2.imread', 'cv2.imread', (["('E:/LITS/tumour/%d-%d/test/%d_body.png' % (center, width, i))", '(0)'], {}), "('E:/LITS/tumour/%d-%d/test/%d_body.png' % (center, width, i), 0)\n", (3649, 3714), False, 'import cv2\n'), ((3727, 3774), 'cv2.imwrite', 'cv2.imwrite', (["('./result/%d.png' % i)", '(img_y * 255)'], {}), "('./result/%d.png' % i, img_y * 255)\n", (3738, 3774), False, 'import cv2\n'), ((3793, 3829), 'cv2.imread', 'cv2.imread', (["('./result/%d.png' % i)", '(0)'], {}), "('./result/%d.png' % i, 0)\n", (3803, 3829), False, 'import cv2\n'), ((3853, 3898), 'cv2.threshold', 'cv2.threshold', (['seg', '(128)', '(255)', 'cv2.THRESH_OTSU'], {}), '(seg, 128, 255, cv2.THRESH_OTSU)\n', (3866, 3898), False, 'import cv2\n'), ((3911, 3952), 'cv2.imwrite', 'cv2.imwrite', (["('./result/%d_b.png' % i)", 'seg'], {}), "('./result/%d_b.png' % i, seg)\n", (3922, 3952), False, 'import cv2\n'), ((3983, 4008), 'conda.exports.get_index', 'get_index', (['mask_path', 'seg'], {}), '(mask_path, seg)\n', (3992, 4008), False, 'from conda.exports import get_index\n'), ((4865, 4878), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4871, 4878), True, 'import numpy as np\n'), ((6623, 6644), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6642, 6644), False, 'from torchvision.transforms import transforms\n'), ((6666, 6720), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', (['[0.5, 0.5, 0.5]', '[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n', (6686, 6720), False, 'from torchvision.transforms import transforms\n'), ((3448, 3464), 'torch.squeeze', 'torch.squeeze', (['y'], {}), '(y)\n', (3461, 3464), False, 'import torch\n')] |
import os
import sys
import pickle
import argparse
import numpy as np
from numpy.lib.format import open_memmap
from utils.ntu_read_skeleton import read_xyz
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
# training_subjects = [
# 1, 2, 4, 5, 8, 9, 13, 14, 15, 16, 17, 18, 19, 25, 27, 28, 31, 34, 35, 38
# ]
training_subjects = [1, 2, 4, 5, 8, 9, 13, 14, 15, 16, 17, 18, 19, 25, 27, 28, 31, 34, 35, 38,
45, 46, 47, 49, 50, 52, 53, 54, 55, 56, 57, 58, 59, 70, 74, 78, 80, 81, 82,
83, 84, 85, 86, 89, 91, 92, 93, 94, 95, 97, 98, 100, 103]
# training_cameras = [2, 3]
training_cameras = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32] # For ntu 120 cross-setup
max_body = 2
num_joint = 25
max_frame = 100
toolbar_width = 30
def ntu_tranform_skeleton(test):
"""
:param test: frames of skeleton within a video sample
"""
remove_frame = False
test = np.asarray(test)
transform_test = []
# tiny = 0.00000001
frame = 0
for time in range(test.shape[0]):
# d = test[frame,0:3]
v1 = test[time,1*3:1*3+3]-test[time,0*3:0*3+3]
v2_ = test[time,12*3:12*3+3]-test[time,16*3:16*3+3]
# if v1.all() != 0 and v2_.all() != 0:
if np.all(v1 == 0) == False:
if np.all(v2_ == 0) == False:
frame = time
break
d = test[frame,0:3]
v1 = test[frame,1*3:1*3+3]-test[frame,0*3:0*3+3]
v1 = v1/np.linalg.norm(v1)
v2_ = test[frame,12*3:12*3+3]-test[frame,16*3:16*3+3]
proj_v2_v1 = np.dot(v1.T,v2_)*v1/np.linalg.norm(v1)
v2 = v2_-np.squeeze(proj_v2_v1)
v2 = v2/np.linalg.norm(v2)
v3 = np.cross(v2,v1)/np.linalg.norm(np.cross(v2,v1))
v1 = np.reshape(v1,(3,1))
v2 = np.reshape(v2,(3,1))
v3 = np.reshape(v3,(3,1))
R = np.hstack([v2,v3,v1])
for i in range(test.shape[0]):
xyzs = []
for j in range(25*2):
if j < 25:
if test[i][j*3:j*3+3].all()==0:
remove_frame = True
break
xyz = np.squeeze(np.matmul(np.linalg.inv(R),np.reshape(test[i][j*3:j*3+3]-d,(3,1))))
xyzs.append(xyz)
if not remove_frame:
xyzs = np.reshape(np.asarray(xyzs),(-1,75 * 2))
transform_test.append(xyzs)
else:
remove_frame = False
transform_test = np.squeeze(np.asarray(transform_test))
return transform_test
def print_toolbar(rate, annotation=''):
# setup toolbar
sys.stdout.write("{}[".format(annotation))
for i in range(toolbar_width):
if i * 1.0 / toolbar_width > rate:
sys.stdout.write(' ')
else:
sys.stdout.write('-')
sys.stdout.flush()
sys.stdout.write(']\r')
def end_toolbar():
sys.stdout.write("\n")
def gendata(data_path,
out_path,
ignored_sample_path=None,
benchmark='cross_view_data',
part='eval'):
if ignored_sample_path != None:
with open(ignored_sample_path, 'r') as f:
ignored_samples = [
line.strip() + '.skeleton' for line in f.readlines()
]
else:
ignored_samples = []
sample_name = []
sample_label = []
data_lens = []
for filename in os.listdir(data_path):
if filename in ignored_samples:
continue
action_class = int(
filename[filename.find('A') + 1:filename.find('A') + 4])
subject_id = int(
filename[filename.find('P') + 1:filename.find('P') + 4])
# camera_id = int(
# filename[filename.find('C') + 1:filename.find('C') + 4])
camera_id = int(
filename[filename.find('S') + 1:filename.find('S') + 4])
if benchmark == 'cross_view_data':
istraining = (camera_id in training_cameras)
elif benchmark == 'cross_subject_data':
istraining = (subject_id in training_subjects)
else:
raise ValueError()
if part == 'train':
issample = istraining
elif part == 'test':
issample = not (istraining)
else:
raise ValueError()
if issample:
# print("in here")
# if action_class == 120 :
# print("120")
# if action_class == 1:
# print("0")
sample_name.append(filename)
sample_label.append(action_class - 1)
with open('{}/{}_label.pkl'.format(out_path, part), 'wb') as f:
# pickle.dump((sample_name, list(sample_label)), f)
pickle.dump( list(sample_label ), f)
# np.save('{}/{}_label.npy'.format(out_path, part), sample_label)
fp = open_memmap(
'{}/trans_{}_data.npy'.format(out_path, part),
dtype='float32',
mode='w+',
shape=(len(sample_label), max_frame, 3 * num_joint * max_body))
for i, s in enumerate(sample_name):
print_toolbar(i * 1.0 / len(sample_label),
'({:>5}/{:<5}) Processing {:>5}-{:<5} data: '.format(
i + 1, len(sample_name), benchmark, part))
data = read_xyz(
os.path.join(data_path, s), max_body=max_body, num_joint=num_joint) # C, T, V, M
C, T, V, M = data.shape
data = data.transpose([1, -1, -2, 0]).reshape(T, -1) # T, M * V * C
data = ntu_tranform_skeleton(data)
# attention !!
if data.shape[0] > max_frame:
fp[i] = data[0:max_frame]
else:
# print(data.shape)
fp[i, 0:data.shape[0]] = data
data_lens.append(data.shape[0])
with open('{}/{}_sample_len.pkl'.format(out_path, part), 'wb') as f:
pickle.dump( data_lens, f)
end_toolbar()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='NTU-RGB-D Data Converter.')
parser.add_argument(
'--data_path', default='/data5/xushihao/data/ntu_raw_data/nturgb+d_skeletons/')
parser.add_argument(
'--ignored_sample_path',
default='/data5/xushihao/data/NTU_RGBD120_samples_with_missing_skeletons_new.txt')
parser.add_argument('--out_folder', default='/data5/xushihao/projects/my_gcn_lstm/Good_project_from_other_people/Predict-Cluster/pc_pytorch/data_for_pytorch/ntu120_2person')
benchmark = ['cross_subject_data', 'cross_view_data']
part = ['train', 'test']
arg = parser.parse_args()
for b in benchmark:
for p in part:
out_path = os.path.join(arg.out_folder, b)
if not os.path.exists(out_path):
os.makedirs(out_path)
gendata(
arg.data_path,
out_path,
arg.ignored_sample_path,
benchmark=b,
part=p)
| [
"sys.stdout.write",
"pickle.dump",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.asarray",
"numpy.cross",
"os.path.exists",
"numpy.hstack",
"numpy.linalg.norm",
"numpy.reshape",
"sys.stdout.flush",
"numpy.linalg.inv",
"numpy.squeeze",
"numpy.dot",
"os.path.join",
"os.listdir",
"nu... | [((950, 966), 'numpy.asarray', 'np.asarray', (['test'], {}), '(test)\n', (960, 966), True, 'import numpy as np\n'), ((1816, 1838), 'numpy.reshape', 'np.reshape', (['v1', '(3, 1)'], {}), '(v1, (3, 1))\n', (1826, 1838), True, 'import numpy as np\n'), ((1847, 1869), 'numpy.reshape', 'np.reshape', (['v2', '(3, 1)'], {}), '(v2, (3, 1))\n', (1857, 1869), True, 'import numpy as np\n'), ((1878, 1900), 'numpy.reshape', 'np.reshape', (['v3', '(3, 1)'], {}), '(v3, (3, 1))\n', (1888, 1900), True, 'import numpy as np\n'), ((1914, 1937), 'numpy.hstack', 'np.hstack', (['[v2, v3, v1]'], {}), '([v2, v3, v1])\n', (1923, 1937), True, 'import numpy as np\n'), ((2884, 2907), 'sys.stdout.write', 'sys.stdout.write', (["']\\r'"], {}), "(']\\r')\n", (2900, 2907), False, 'import sys\n'), ((2937, 2959), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (2953, 2959), False, 'import sys\n'), ((3452, 3473), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (3462, 3473), False, 'import os\n'), ((6074, 6138), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""NTU-RGB-D Data Converter."""'}), "(description='NTU-RGB-D Data Converter.')\n", (6097, 6138), False, 'import argparse\n'), ((1523, 1541), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (1537, 1541), True, 'import numpy as np\n'), ((1645, 1663), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (1659, 1663), True, 'import numpy as np\n'), ((1679, 1701), 'numpy.squeeze', 'np.squeeze', (['proj_v2_v1'], {}), '(proj_v2_v1)\n', (1689, 1701), True, 'import numpy as np\n'), ((1715, 1733), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (1729, 1733), True, 'import numpy as np\n'), ((1751, 1767), 'numpy.cross', 'np.cross', (['v2', 'v1'], {}), '(v2, v1)\n', (1759, 1767), True, 'import numpy as np\n'), ((2517, 2543), 'numpy.asarray', 'np.asarray', (['transform_test'], {}), '(transform_test)\n', (2527, 2543), True, 'import numpy as np\n'), ((2860, 2878), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2876, 2878), False, 'import sys\n'), ((5968, 5993), 'pickle.dump', 'pickle.dump', (['data_lens', 'f'], {}), '(data_lens, f)\n', (5979, 5993), False, 'import pickle\n'), ((1301, 1316), 'numpy.all', 'np.all', (['(v1 == 0)'], {}), '(v1 == 0)\n', (1307, 1316), True, 'import numpy as np\n'), ((1625, 1642), 'numpy.dot', 'np.dot', (['v1.T', 'v2_'], {}), '(v1.T, v2_)\n', (1631, 1642), True, 'import numpy as np\n'), ((1782, 1798), 'numpy.cross', 'np.cross', (['v2', 'v1'], {}), '(v2, v1)\n', (1790, 1798), True, 'import numpy as np\n'), ((2779, 2800), 'sys.stdout.write', 'sys.stdout.write', (['""" """'], {}), "(' ')\n", (2795, 2800), False, 'import sys\n'), ((2829, 2850), 'sys.stdout.write', 'sys.stdout.write', (['"""-"""'], {}), "('-')\n", (2845, 2850), False, 'import sys\n'), ((5396, 5422), 'os.path.join', 'os.path.join', (['data_path', 's'], {}), '(data_path, s)\n', (5408, 5422), False, 'import os\n'), ((6782, 6813), 'os.path.join', 'os.path.join', (['arg.out_folder', 'b'], {}), '(arg.out_folder, b)\n', (6794, 6813), False, 'import os\n'), ((1343, 1359), 'numpy.all', 'np.all', (['(v2_ == 0)'], {}), '(v2_ == 0)\n', (1349, 1359), True, 'import numpy as np\n'), ((2358, 2374), 'numpy.asarray', 'np.asarray', (['xyzs'], {}), '(xyzs)\n', (2368, 2374), True, 'import numpy as np\n'), ((6834, 6858), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (6848, 6858), False, 'import os\n'), ((6877, 6898), 'os.makedirs', 'os.makedirs', (['out_path'], {}), '(out_path)\n', (6888, 6898), False, 'import os\n'), ((2209, 2225), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (2222, 2225), True, 'import numpy as np\n'), ((2226, 2274), 'numpy.reshape', 'np.reshape', (['(test[i][j * 3:j * 3 + 3] - d)', '(3, 1)'], {}), '(test[i][j * 3:j * 3 + 3] - d, (3, 1))\n', (2236, 2274), True, 'import numpy as np\n')] |
import os
import pickle
from datetime import datetime
from json import dumps
from typing import Dict, List, NamedTuple, Union
import numpy as np
import pandas as pd
from fastapi import FastAPI
from kafka import KafkaProducer
from pydantic import BaseModel
from gamma import BayesianGaussianMixture, GaussianMixture
from gamma.utils import association, convert_picks_csv, from_seconds, to_seconds
# Kafak producer
use_kafka = False
try:
print('Connecting to k8s kafka')
BROKER_URL = 'quakeflow-kafka-headless:9092'
# BROKER_URL = "172.16.17.32:9094"
producer = KafkaProducer(
bootstrap_servers=[BROKER_URL],
key_serializer=lambda x: dumps(x).encode('utf-8'),
value_serializer=lambda x: dumps(x).encode('utf-8'),
)
use_kafka = True
print('k8s kafka connection success!')
except BaseException:
print('k8s Kafka connection error')
try:
print('Connecting to local kafka')
producer = KafkaProducer(
bootstrap_servers=['localhost:9092'],
key_serializer=lambda x: dumps(x).encode('utf-8'),
value_serializer=lambda x: dumps(x).encode('utf-8'),
)
use_kafka = True
print('local kafka connection success!')
except BaseException:
print('local Kafka connection error')
print(f"Kafka status: {use_kafka}")
app = FastAPI()
PROJECT_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
STATION_CSV = os.path.join(PROJECT_ROOT, "tests/stations_hawaii.csv")
# STATION_CSV = os.path.join(PROJECT_ROOT, "tests/stations.csv") ## ridgecrest
def default_config(config):
if "degree2km" not in config:
config["degree2km"] = 111.195
if "use_amplitude" not in config:
config["use_amplitude"] = True
if "use_dbscan" not in config:
config["use_dbscan"] = True
if "dbscan_eps" not in config:
config["dbscan_eps"] = 6
if "dbscan_min_samples" not in config:
config["dbscan_min_samples"] = 3
if "oversample_factor" not in config:
config["oversample_factor"] = 10
if "min_picks_per_eq" not in config:
config["min_picks_per_eq"] = 10
if "dims" not in config:
config["dims"] = ["x(km)", "y(km)", "z(km)"]
return config
## set config
config = {'xlim_degree': [-156.32, -154.32], 'ylim_degree': [18.39, 20.39], "z(km)": [0, 41]} ## hawaii
# config = {'xlim_degree': [-118.004, -117.004], 'ylim_degree': [35.205, 36.205], "z(km)": [0, 41]} ## ridgecrest
config = default_config(config)
config["center"] = [np.mean(config["xlim_degree"]), np.mean(config["ylim_degree"])]
config["x(km)"] = (np.array(config["xlim_degree"]) - config["center"][0]) * config["degree2km"]
config["y(km)"] = (np.array(config["ylim_degree"]) - config["center"][1]) * config["degree2km"]
config["bfgs_bounds"] = [list(config[x]) for x in config["dims"]] + [[None, None]]
for k, v in config.items():
print(f"{k}: {v}")
## read stations
stations = pd.read_csv(STATION_CSV, delimiter="\t")
stations = stations.rename(columns={"station": "id"})
stations["x(km)"] = stations["longitude"].apply(lambda x: (x - config["center"][0]) * config["degree2km"])
stations["y(km)"] = stations["latitude"].apply(lambda x: (x - config["center"][1]) * config["degree2km"])
stations["z(km)"] = stations["elevation(m)"].apply(lambda x: -x / 1e3)
print(stations)
class Data(BaseModel):
picks: List[Dict[str, Union[float, str]]]
stations: List[Dict[str, Union[float, str]]]
config: Dict[str, Union[List[float], List[int], List[str], float, int, str]]
class Pick(BaseModel):
picks: List[Dict[str, Union[float, str]]]
def run_gamma(data, config, stations):
picks = pd.DataFrame(data.picks)
picks["timestamp"] = picks["timestamp"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S.%f"))
event_idx0 = 0 ## current earthquake index
assignments = []
if (len(picks) > 0) and (len(picks) < 5000):
data, locs, phase_type, phase_weight, phase_index = convert_picks_csv(picks, stations, config)
catalogs, assignments = association(
data, locs, phase_type, phase_weight, len(stations), phase_index, event_idx0, config
)
event_idx0 += len(catalogs)
else:
catalogs = []
picks["time_idx"] = picks["timestamp"].apply(lambda x: x.strftime("%Y-%m-%dT%H")) ## process by hours
for hour in sorted(list(set(picks["time_idx"]))):
picks_ = picks[picks["time_idx"] == hour]
if len(picks_) == 0:
continue
data, locs, phase_type, phase_weight, phase_index = convert_picks_csv(picks_, stations, config)
catalog, assign = association(
data, locs, phase_type, phase_weight, len(stations), phase_index, event_idx0, config
)
event_idx0 += len(catalog)
catalogs.extend(catalog)
assignments.extend(assign)
## create catalog
print(catalogs)
catalogs = pd.DataFrame(catalogs, columns=["time(s)"] + config["dims"] + ["magnitude", "covariance"])
catalogs["time"] = catalogs["time(s)"].apply(lambda x: from_seconds(x))
catalogs["longitude"] = catalogs["x(km)"].apply(lambda x: x / config["degree2km"] + config["center"][0])
catalogs["latitude"] = catalogs["y(km)"].apply(lambda x: x / config["degree2km"] + config["center"][1])
catalogs["depth(m)"] = catalogs["z(km)"].apply(lambda x: x * 1e3)
catalogs["event_idx"] = range(event_idx0)
if config["use_amplitude"]:
catalogs["covariance"] = catalogs["covariance"].apply(lambda x: f"{x[0][0]:.3f},{x[1][1]:.3f},{x[0][1]:.3f}")
else:
catalogs["covariance"] = catalogs["covariance"].apply(lambda x: f"{x[0][0]:.3f}")
# catalogs.drop(columns=["x(km)", "y(km)", "z(km)", "time(s)"], inplace=True)
catalogs = catalogs[['time', 'magnitude', 'longitude', 'latitude', 'depth(m)', 'covariance', "event_idx"]]
## add assignment to picks
assignments = pd.DataFrame(assignments, columns=["pick_idx", "event_idx", "prob_gmma"])
picks_gamma = picks.join(assignments.set_index("pick_idx")).fillna(-1).astype({'event_idx': int})
picks_gamma["timestamp"] = picks_gamma["timestamp"].apply(lambda x: x.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3])
if "time_idx" in picks_gamma:
picks_gamma.drop(columns=["time_idx"], inplace=True)
return catalogs,picks_gamma
@app.post('/predict_stream')
def predict(data: Pick):
if len(data.picks) == 0:
return {"catalog": [], "picks": []}
catalogs, picks_gamma = run_gamma(data, config, stations)
if use_kafka:
print("Push events to kafka...")
for event in catalogs.to_dict(orient="records"):
producer.send('gmma_events', key=event["time"], value=event)
return {"catalog": catalogs.to_dict(orient="records"), "picks": picks_gamma.to_dict(orient="records")}
@app.post('/predict')
def predict(data: Data):
if len(data.picks) == 0:
return {"catalog": [], "picks": []}
stations = pd.DataFrame(data.stations)
if len(stations) == 0:
return {"catalog": [], "picks": []}
assert "latitude" in stations
assert "longitude" in stations
assert "elevation(m)" in stations
config = data.config
config = default_config(config)
if "xlim_degree" not in config:
config["xlim_degree"] = (stations["longitude"].min(), stations["longitude"].max())
if "ylim_degree" not in config:
config["ylim_degree"] = (stations["latitude"].min(), stations["latitude"].max())
if "center" not in config:
config["center"] = [np.mean(config["xlim_degree"]), np.mean(config["ylim_degree"])]
if "x(km)" not in config:
config["x(km)"] = (np.array(config["xlim_degree"]) - config["center"][0]) * config["degree2km"]
if "y(km)" not in config:
config["y(km)"] = (np.array(config["ylim_degree"]) - config["center"][1]) * config["degree2km"]
if "z(km)" not in config:
config["z(km)"] = (0, 41)
if "bfgs_bounds" not in config:
config["bfgs_bounds"] = [list(config[x]) for x in config["dims"]] + [[None, None]]
stations["x(km)"] = stations["longitude"].apply(lambda x: (x - config["center"][0]) * config["degree2km"])
stations["y(km)"] = stations["latitude"].apply(lambda x: (x - config["center"][1]) * config["degree2km"])
stations["z(km)"] = stations["elevation(m)"].apply(lambda x: -x / 1e3)
catalogs, picks_gamma = run_gamma(data, config, stations)
if use_kafka:
print("Push events to kafka...")
for event in catalogs.to_dict(orient="records"):
producer.send('gmma_events', key=event["time"], value=event)
return {"catalog": catalogs.to_dict(orient="records"), "picks": picks_gamma.to_dict(orient="records")}
@app.get("/healthz")
def healthz():
return {"status": "ok"} | [
"pandas.DataFrame",
"gamma.utils.convert_picks_csv",
"pandas.read_csv",
"os.path.dirname",
"json.dumps",
"datetime.datetime.strptime",
"numpy.mean",
"numpy.array",
"os.path.join",
"gamma.utils.from_seconds",
"fastapi.FastAPI"
] | [((1350, 1359), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (1357, 1359), False, 'from fastapi import FastAPI\n'), ((1454, 1509), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""tests/stations_hawaii.csv"""'], {}), "(PROJECT_ROOT, 'tests/stations_hawaii.csv')\n", (1466, 1509), False, 'import os\n'), ((2965, 3005), 'pandas.read_csv', 'pd.read_csv', (['STATION_CSV'], {'delimiter': '"""\t"""'}), "(STATION_CSV, delimiter='\\t')\n", (2976, 3005), True, 'import pandas as pd\n'), ((2545, 2575), 'numpy.mean', 'np.mean', (["config['xlim_degree']"], {}), "(config['xlim_degree'])\n", (2552, 2575), True, 'import numpy as np\n'), ((2577, 2607), 'numpy.mean', 'np.mean', (["config['ylim_degree']"], {}), "(config['ylim_degree'])\n", (2584, 2607), True, 'import numpy as np\n'), ((3686, 3710), 'pandas.DataFrame', 'pd.DataFrame', (['data.picks'], {}), '(data.picks)\n', (3698, 3710), True, 'import pandas as pd\n'), ((4979, 5073), 'pandas.DataFrame', 'pd.DataFrame', (['catalogs'], {'columns': "(['time(s)'] + config['dims'] + ['magnitude', 'covariance'])"}), "(catalogs, columns=['time(s)'] + config['dims'] + ['magnitude',\n 'covariance'])\n", (4991, 5073), True, 'import pandas as pd\n'), ((5976, 6049), 'pandas.DataFrame', 'pd.DataFrame', (['assignments'], {'columns': "['pick_idx', 'event_idx', 'prob_gmma']"}), "(assignments, columns=['pick_idx', 'event_idx', 'prob_gmma'])\n", (5988, 6049), True, 'import pandas as pd\n'), ((7026, 7053), 'pandas.DataFrame', 'pd.DataFrame', (['data.stations'], {}), '(data.stations)\n', (7038, 7053), True, 'import pandas as pd\n'), ((1406, 1431), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1421, 1431), False, 'import os\n'), ((2628, 2659), 'numpy.array', 'np.array', (["config['xlim_degree']"], {}), "(config['xlim_degree'])\n", (2636, 2659), True, 'import numpy as np\n'), ((2724, 2755), 'numpy.array', 'np.array', (["config['ylim_degree']"], {}), "(config['ylim_degree'])\n", (2732, 2755), True, 'import numpy as np\n'), ((3996, 4038), 'gamma.utils.convert_picks_csv', 'convert_picks_csv', (['picks', 'stations', 'config'], {}), '(picks, stations, config)\n', (4013, 4038), False, 'from gamma.utils import association, convert_picks_csv, from_seconds, to_seconds\n'), ((3771, 3815), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(x, '%Y-%m-%dT%H:%M:%S.%f')\n", (3788, 3815), False, 'from datetime import datetime\n'), ((4604, 4647), 'gamma.utils.convert_picks_csv', 'convert_picks_csv', (['picks_', 'stations', 'config'], {}), '(picks_, stations, config)\n', (4621, 4647), False, 'from gamma.utils import association, convert_picks_csv, from_seconds, to_seconds\n'), ((5129, 5144), 'gamma.utils.from_seconds', 'from_seconds', (['x'], {}), '(x)\n', (5141, 5144), False, 'from gamma.utils import association, convert_picks_csv, from_seconds, to_seconds\n'), ((7607, 7637), 'numpy.mean', 'np.mean', (["config['xlim_degree']"], {}), "(config['xlim_degree'])\n", (7614, 7637), True, 'import numpy as np\n'), ((7639, 7669), 'numpy.mean', 'np.mean', (["config['ylim_degree']"], {}), "(config['ylim_degree'])\n", (7646, 7669), True, 'import numpy as np\n'), ((7728, 7759), 'numpy.array', 'np.array', (["config['xlim_degree']"], {}), "(config['xlim_degree'])\n", (7736, 7759), True, 'import numpy as np\n'), ((7862, 7893), 'numpy.array', 'np.array', (["config['ylim_degree']"], {}), "(config['ylim_degree'])\n", (7870, 7893), True, 'import numpy as np\n'), ((668, 676), 'json.dumps', 'dumps', (['x'], {}), '(x)\n', (673, 676), False, 'from json import dumps\n'), ((729, 737), 'json.dumps', 'dumps', (['x'], {}), '(x)\n', (734, 737), False, 'from json import dumps\n'), ((1060, 1068), 'json.dumps', 'dumps', (['x'], {}), '(x)\n', (1065, 1068), False, 'from json import dumps\n'), ((1125, 1133), 'json.dumps', 'dumps', (['x'], {}), '(x)\n', (1130, 1133), False, 'from json import dumps\n')] |
import array
import numpy as np
from sys import getsizeof
###############################################################################
# EXAMPLES OF THE ARRAY LIBRARY AND NUMPY ARRAYS.
###############################################################################
my_python_array = array.array('i') # creates empty array of integers
my_python_array.append(10) # Add values to the array one at a time.
my_python_array.append(9)
my_python_array.append(8)
my_python_array.append(7)
my_python_array.append(5)
print(f"my_python_array[0] = {my_python_array[0]}") # Access the elements by their index.
print(f"my_python_array[4] = {my_python_array[4]}") # Access the elements by their index.
my_python_array[0] = 100 # Change Values by index.
print(f"my_python_array[0] = {my_python_array[0]}") # Access the elements by their index.
y = array.array('i', [10, 9, 8, 7, 5]) # creates an initialized array
print(f"Initialized Array: {y}")
############################
# NUMPY
############################
# SAMPLE CREATION OF NUMPY ARRAY
my_numpy_array = np.array([10, 9, 8, 7, 5]) # INITIALIZE A NUMPY ARRAY
# ACCESS THE ELEMENTS BY THEIR INDEX.
for i in range(0, my_numpy_array.size):
print(f"my_numpy_array[{i}] = {my_numpy_array[i]}")
# Change values by index
print("Before Change:")
print(f"my_numpy_array[3] = {my_numpy_array[3]}")
my_numpy_array[3] = 10
print("After Change:")
print(f"my_numpy_array[3] = {my_numpy_array[3]}")
# CREATING A MULTI-DIMENSIONAL NP ARRAY
r1 = [1, 2, 3]
r2 = [4, 5, 6]
my_2d_array = np.array([r1, r2])
print(my_2d_array)
# ACCESS BY ROW THEN COLUMN
print(f"Row: 0, Column: 2 = {my_2d_array[0][2]}")
# JAGGED ARRAYS (ARRAYS WITH ROWS OF DIFFERENT LENGTHS
r1_short = np.array([1, 2, 3]) # INPUTS SHOULD BE NP ARRAYS NOT LISTS
r2_long = np.array([4, 5, 6, 7])
my_jagged_array = np.array([r1_short, r2_long], dtype="object") # NEED TO SET DATATYPE
print("Implemented as an array of arrays:")
print(my_jagged_array)
# PERFORMING MATH ON NUMPY ARRAYS (SYNTACTIC SUGAR, BUT FAST IMPLEMENTATION TOO)
print(my_2d_array)
print("Times Two:")
print(my_2d_array * 2)
print("Element Wise Squared:")
print(my_2d_array ** 2)
print("Even works on arrays of arrays:")
print(my_jagged_array)
print("Times Two:")
print(my_jagged_array * 2)
# COMPARING MEMORY FOOTPRINT OF DIFFERENT ARRAY TYPES, PYTHON ARRAY, NUMPY ARRAY AND PYTHON LIST.
ARRAY_SIZE = 1000
array_data = np.linspace(1, ARRAY_SIZE, ARRAY_SIZE, dtype=int)
# CREATE A NEW ARRAYS
my_big_python_array = array.array('i', array_data)
my_big_numpy_array = np.array(array_data)
my_big_list = my_big_numpy_array.tolist()
# COMPARE THE SIZES OF THE ARRAYS
print(f" Array Size: {getsizeof(my_big_python_array)} bytes for {ARRAY_SIZE} elements.")
print(f" Numpy Array Size: {getsizeof(my_big_numpy_array)} bytes bytes for {ARRAY_SIZE} elements..")
print(f" List Size: {getsizeof(my_big_list)} bytes bytes for {ARRAY_SIZE} elements.")
###############################################################################
# BASIC NODE INTRODUCTIONS, A BASIC NODE, AND A MAP NODE AND SOME SAMPLE CODE
###############################################################################
class BasicNode:
"""
Introduction to a node object. Simple objeft that contains some data and a single
link to another node.
"""
def __init__(self, data=None, link=None):
self.data = data
self.link = link
# CREATING BASIC NODES AND LINKING THEM TOGETHER
n1 = BasicNode("Hello")
n2 = BasicNode("World")
n1.link = n2
print(f"{n1.data}, {n1.link.data}")
# DEMONSTRATION OF POINTERS, CHANGING THE DATA OF N2 AND ACCESSING IT THROUGH N1
n2.data = "Universe"
print(f"{n1.data}, {n1.link.data}")
# DEMONSTRATION OF MULTIPLE LINKS TOGETHER AND LOOPING THROUGH THEM.
n4 = BasicNode("Moon")
n3 = BasicNode("Goodnight", n4)
n2 = BasicNode("World", n3)
n1 = BasicNode("Hello", n2)
print(f"{n1.data}, {n2.data}, {n3.data}, {n4.data}")
print(f"{n1.data}, {n1.link.data}, {n1.link.link.data}, {n1.link.link.link.data}")
# Hello, World, Goodnight, Moon
# Demonstrate looping through nodes using current pointer.
curr = n1
while curr:
print(f"{curr.data}", end=", ")
curr = curr.link
print("\b\b")
class MapCellNode:
""" Toy implementation of a Map Cell Node to demonstrate the flexibility of
node structures and how they needn't only be lienar objects.
"""
def __init__(self, data=None):
"""
Creates a new MapCellNode containing the provided data.
:param data: The data to store in the node.
"""
self.data = data
self.north = None
self.east = None
self.south = None
self.west = None
# DEMONSTRATION OF CREATING NODES THAT RELATE TO EACH OTHER IN A NON LINEAR WAY.
n1 = MapCellNode("Danger")
n2 = MapCellNode("Danger")
n3 = MapCellNode("Safe")
n4 = MapCellNode("Safe")
n5 = MapCellNode("Danger")
n1.east = n2
n1.south = n3
n2.west = n1
n2.south = n4
##################################################
# SIMPLE IMPLEMENTATION OF A SINGLY LINKED LIST
##################################################
class SinglyLinkedList:
"""
A basic implementation of a singly linked list allowing for search, delete, contains, append.
"""
class SingleLinkNode:
"""
A simple node to be used for the SinglyLinkedList containing a next pointer and data field.
"""
def __init__(self, data):
"""
Creates a new node containing the provided data.
:param data: Any datatype that supports equality (for search/contains/delete).
"""
self.data = data
self.next = None
def __init__(self):
"""
Creates a new empty LinkedList.
"""
self.first = None
self.last = None
self.count = 0
def __str__(self):
"""
Generates the string representation of the linked list. Basically a comma-seperated value
list of the element values followed by the size.
Example: "String1, String2, String3: Size: 3"
:return: A string representation of the linked list.
"""
r = ', '.join([x for x in self.iter()])
r = "[Empty]" if len(r) == 0 else r
r += f": Size: {self.size()}"
return r
def append(self, data):
"""
Adds a new element to the end of the list.
:param data: The data to add to the end of the list.
:return: None
"""
n = SinglyLinkedList.SingleLinkNode(data)
# Special case for empty lists
if self.first is None:
self.first = n
self.last = n
else:
self.last.next = n
self.last = n
self.count += 1
def contains(self, data):
"""
Searches the linked list for the provided data.
:param data: The data (needle) for which to search.
:return: True if a node contains data matching the searched for data, False otherwise
"""
curr = self.first
while curr:
if curr.data == data:
return True
curr = curr.next
return False
def clear(self):
"""
Removes all elements from the list.
:return:
"""
self.first = None
self.last = None
self.count = 0
def delete(self, data):
"""
Searches for an element that contains the provided data.
:param data: The data for which to search for and delete.
:return: True if the deletion was successful, False otherwise.
"""
curr = self.first
prev = self.first
while curr:
if curr.data == data:
if curr == self.first: # SPECIAL CASE FOR DELETING THE FIRST NODE
self.first = curr.next
else:
prev.next = curr.next
if curr == self.last: # SPECIAL CASE FOR DELETING THE LAST NODE
self.last = prev
self.count -= 1
return True
prev = curr
curr = curr.next
return False
def is_empty(self):
"""
Common implementation to determine if the list contains any elements.
:return: False if list contains no elements, True otherwise.
"""
return self.count == 0
def iter(self):
"""
Manual implementation of an iterator for the linked list. When called successively
will return the next element in the list. Will return None when the end of the list
is found.
:return: The next element of the list.
"""
curr = self.first
while curr:
ret = curr.data
curr = curr.next
yield ret
def search(self, data):
"""
Searches linearly through the list for an element containing data that matches the data
for which is being searched. If found, returns a pointer to the node containing the data.
Otherwise returns None.
:param data: The data for which the function will search.
:return: A pointer to the node that contains the data that was being searched for.
False otherwise.
"""
curr = self.first
while curr:
if curr.data == data:
return curr
curr = curr.next
def size(self):
"""
Return the number of elements in the linked list.
:return: An integer representing the numbrer of elements in the list. 0 if list is empty.
"""
return self.count
my_singly_linked_list = SinglyLinkedList()
my_singly_linked_list.append("Hello")
print(f"Size Should be 1: {my_singly_linked_list.size()}")
print(f"First and Last should be Hello: {my_singly_linked_list.first.data}, {my_singly_linked_list.last.data}")
my_singly_linked_list.append("World")
print(f"Size Should be 2: {my_singly_linked_list.size()}")
print(f"First (Hello), Last(World): {my_singly_linked_list.first.data}, {my_singly_linked_list.last.data}")
my_singly_linked_list.append("Goodnight")
my_singly_linked_list.append("Moon")
print(f"")
print(f"Should find Hello: {my_singly_linked_list.contains('Hello')}")
print(f"Should NOT find Hi: {my_singly_linked_list.contains('Hi')}")
results = my_singly_linked_list.search("Hello")
print(f"Results Should be Hello: {results.data}")
deleted = my_singly_linked_list.delete("Nope")
print(f"Deleting NOPE should be False, {deleted}")
print(f"Deleting Hello:{my_singly_linked_list.delete('Hello')}")
print(my_singly_linked_list)
print("Clearing...")
my_singly_linked_list.clear()
print(my_singly_linked_list)
##################################################
# SIMPLE IMPLEMENTATION OF A DOUBLY LINKED LIST
##################################################
class DoublyLinkedList:
"""
A basic implementation of a singly linked list allowing for search, delete, contains, append.
"""
class DoubleLinkNode:
"""
A simple node to be used for the SinglyLinkedList containing a next pointer and data field.
"""
def __init__(self, data):
"""
Creates a new node containing the provided data.
:param data: Any datatype that supports equality (for search/contains/delete).
"""
self.data = data
self.next = None
self.prev = None
def __init__(self):
"""
Creates a new empty LinkedList.
"""
self.first = None
self.last = None
self.count = 0
def __str__(self):
"""
Generates the string representation of the linked list. Basically a comma-seperated value
list of the element values followed by the size.
Example: "String1, String2, String3: Size: 3"
:return: A string representation of the linked list.
"""
r = ', '.join([x for x in self.iter()])
r = "[Empty]" if len(r) == 0 else r
r += f": Size: {self.size()}"
return r
def append(self, data):
"""
Adds a new element to the end of the list.
:param data: The data to add to the end of the list.
:return: None
"""
n = DoublyLinkedList.DoubleLinkNode(data)
# Special case for empty lists
if self.first is None:
self.first = n
self.last = n
else:
n.prev = self.last # the old .last becomes the new node's prev
self.last.next = n # the old .last needs a new next (the new node)
self.last = n # the last node becomes the last node
self.count += 1
def contains(self, data):
"""
Searches the linked list for the provided data.
:param data: The data (needle) for which to search.
:return: True if a node contains data matching the searched for data, False otherwise
"""
curr = self.first
while curr:
if curr.data == data:
return True
curr = curr.next
return False
def clear(self):
"""
Removes all elements from the list.
:return:
"""
self.first = None
self.last = None
self.count = 0
def delete(self, data):
"""
Searches for an element that contains the provided data.
:param data: The data for which to search for and delete.
:return: True if the deletion was successful, False otherwise.
"""
curr = self.first
deleted_fl = False
if curr is None: # SPECIAL CASE FOR EMPTY LISTS
pass
elif curr.data == data: # SPECIAL CASE FOR REMOVE FROM FRONT
self.first = curr.next
if self.first is not None: # IF LAST ELEMENT, SKIP CLEARING PREV LINK
self.first.prev = None # CLEAR PREV LINK TO NEW FRONT NODE.
deleted_fl = True
elif self.last.data == data: # SPECIAL CASE FOR REMOVE FROM END
self.last = self.last.prev
self.last.next = None
deleted_fl = True
else: # REGULAR CASE, LOOP THROUGH TO SEE IF EXISTS IN THE MIDDLE
while curr:
if curr.data == data:
curr.prev.next = curr.next
curr.next.prev = curr.prev
deleted_fl = True
curr = curr.next
if deleted_fl:
self.count -= 1
return deleted_fl
def is_empty(self):
"""
Common implementation to determine if the list contains any elements.
:return: False if list contains no elements, True otherwise.
"""
return self.count == 0
def iter(self):
"""
Manual implementation of an iterator for the linked list. When called successively
will return the next element in the list. Will return None when the end of the list
is found.
:return: The next element of the list.
"""
curr = self.first
while curr:
ret = curr.data
curr = curr.next
yield ret
def search(self, data):
"""
Searches linearly through the list for an element containing data that matches the data
for which is being searched. If found, returns a pointer to the node containing the data.
Otherwise returns None.
:param data: The data for which the function will search.
:return: A pointer to the node that contains the data that was being searched for.
False otherwise.
"""
curr = self.first
while curr:
if curr.data == data:
return curr
curr = curr.next
def size(self):
"""
Return the number of elements in the linked list.
:return: An integer representing the numbrer of elements in the list. 0 if list is empty.
"""
return self.count
my_doubly_linked_list = DoublyLinkedList()
my_doubly_linked_list.append("Hello")
print(f"Size Should be 1: {my_doubly_linked_list.size()}")
print(f"First and Last should be Hello: {my_doubly_linked_list.first.data}, {my_doubly_linked_list.last.data}")
my_doubly_linked_list.append("World")
print(f"Size Should be 2: {my_doubly_linked_list.size()}")
print(f"First (Hello), Last(World): {my_doubly_linked_list.first.data}, {my_doubly_linked_list.last.data}")
my_doubly_linked_list.append("Goodnight")
my_doubly_linked_list.append("Moon")
print(f"Should find Hello: {my_doubly_linked_list.contains('Hello')}")
print(f"Should NOT find Hi: {my_doubly_linked_list.contains('Hi')}")
results = my_doubly_linked_list.search("Hello")
print(f"Results Should be Hello: {results.data}")
deleted = my_doubly_linked_list.delete("Nope")
print(f"Deleting NOPE should be False, {deleted}")
print(f"Deleting Hello:{my_doubly_linked_list.delete('Hello')}")
print(my_doubly_linked_list)
print("Clearing...")
my_doubly_linked_list.clear()
print(my_doubly_linked_list) | [
"numpy.array",
"array.array",
"numpy.linspace",
"sys.getsizeof"
] | [((290, 306), 'array.array', 'array.array', (['"""i"""'], {}), "('i')\n", (301, 306), False, 'import array\n'), ((848, 882), 'array.array', 'array.array', (['"""i"""', '[10, 9, 8, 7, 5]'], {}), "('i', [10, 9, 8, 7, 5])\n", (859, 882), False, 'import array\n'), ((1066, 1092), 'numpy.array', 'np.array', (['[10, 9, 8, 7, 5]'], {}), '([10, 9, 8, 7, 5])\n', (1074, 1092), True, 'import numpy as np\n'), ((1537, 1555), 'numpy.array', 'np.array', (['[r1, r2]'], {}), '([r1, r2])\n', (1545, 1555), True, 'import numpy as np\n'), ((1721, 1740), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1729, 1740), True, 'import numpy as np\n'), ((1792, 1814), 'numpy.array', 'np.array', (['[4, 5, 6, 7]'], {}), '([4, 5, 6, 7])\n', (1800, 1814), True, 'import numpy as np\n'), ((1833, 1878), 'numpy.array', 'np.array', (['[r1_short, r2_long]'], {'dtype': '"""object"""'}), "([r1_short, r2_long], dtype='object')\n", (1841, 1878), True, 'import numpy as np\n'), ((2412, 2461), 'numpy.linspace', 'np.linspace', (['(1)', 'ARRAY_SIZE', 'ARRAY_SIZE'], {'dtype': 'int'}), '(1, ARRAY_SIZE, ARRAY_SIZE, dtype=int)\n', (2423, 2461), True, 'import numpy as np\n'), ((2507, 2535), 'array.array', 'array.array', (['"""i"""', 'array_data'], {}), "('i', array_data)\n", (2518, 2535), False, 'import array\n'), ((2557, 2577), 'numpy.array', 'np.array', (['array_data'], {}), '(array_data)\n', (2565, 2577), True, 'import numpy as np\n'), ((2677, 2707), 'sys.getsizeof', 'getsizeof', (['my_big_python_array'], {}), '(my_big_python_array)\n', (2686, 2707), False, 'from sys import getsizeof\n'), ((2772, 2801), 'sys.getsizeof', 'getsizeof', (['my_big_numpy_array'], {}), '(my_big_numpy_array)\n', (2781, 2801), False, 'from sys import getsizeof\n'), ((2866, 2888), 'sys.getsizeof', 'getsizeof', (['my_big_list'], {}), '(my_big_list)\n', (2875, 2888), False, 'from sys import getsizeof\n')] |
import random
import numpy as np
from codit.outbreak import Outbreak
from codit.outbreak_recorder import WardComponent, MorbidityComponent
from codit.society import TestingTracingSociety
from codit.society.alternatives import StrategicTester
from codit.society.strategic import TwoTrackTester
from codit.society.lateral import LateralFlowUK
from codit.disease import Covid
from codit.population.networks.radial_age import RadialAgePopulation
from codit.population.networks.household_workplace import HouseholdWorkplacePopulation
from codit.population.networks.city import CityPopulation
from codit.config import CFG
ALL_TIME_DAYS = 15
def test_mutation():
random.seed(42)
mutation = Covid(pr_transmission_per_day=CFG.PROB_INFECT_IF_TOGETHER_ON_A_DAY["B.1.1.7"], name="B.1.1.7")
o = Outbreak(TwoTrackTester(), mutation, pop_size=5000, seed_size=50, n_days=150)
o.simulate()
def test_multiple_strain_society():
random.seed(42)
variant_1 = Covid(pr_transmission_per_day=CFG.PROB_INFECT_IF_TOGETHER_ON_A_DAY["B.1.1.7"], name="B.1.1.7")
diseases = {Covid(), variant_1}
o = Outbreak(TwoTrackTester(), diseases, pop_size=5000, seed_size=50, n_days=150)
o.simulate()
def test_two_track_society():
random.seed(42)
o = Outbreak(TwoTrackTester(), Covid(), pop_size=5000, seed_size=50, n_days=150)
o.simulate()
def test_two_track_hetero_society():
random.seed(42)
o = Outbreak(TwoTrackTester(), Covid(), pop_size=5000, seed_size=50, n_days=150,
population_type=RadialAgePopulation)
o.simulate()
def test_two_track_hw_society():
random.seed(42)
o = Outbreak(TwoTrackTester(), Covid(), pop_size=5000, seed_size=50, n_days=150,
population_type=HouseholdWorkplacePopulation)
o.simulate()
def test_two_track_city_society():
random.seed(42)
o = Outbreak(LateralFlowUK(config=dict(SIMULATOR_PERIODS_PER_DAY=4, DAILY_TEST_CAPACITY_PER_HEAD=1)), Covid(),
pop_size=8000, seed_size=8000//80, n_days=150,
population_type=CityPopulation)
o.recorder.add_component(WardComponent(o))
o.recorder.add_component(MorbidityComponent(o.pop.people))
o.simulate()
def test_smart_society():
random.seed(42)
o = Outbreak(StrategicTester(), Covid(), pop_size=5000, seed_size=50, n_days=150)
o.simulate()
def test_covid_model():
s = TestingTracingSociety(episodes_per_day=2, config=dict(PROB_TEST_IF_REQUESTED=0.4))
random.seed(42)
np.random.seed(42)
o = Outbreak(s, Covid(), pop_size=8, seed_size=1, n_days=ALL_TIME_DAYS)
o.simulate()
# for k, v in o.pop.contacts.items():
# print(k, len(v))
# t cov risks tests isol
np.testing.assert_allclose(o.recorder.main_component.story[:15], [[0.5, 0.25, 0.125, 0.0, 0.125, 0.0],
[1.0, 0.25, 0.125, 0.25, 0.0, 0.0],
[1.5, 0.25, 0.125, 0.0, 0.0, 0.0],
[2.0, 0.375, 0.125, 0.0, 0.0, 0.0],
[2.5, 0.375, 0.125, 0.0, 0.0, 0.0],
[3.0, 0.375, 0.125, 0.0, 0.0, 0.0],
[3.5, 0.375, 0.125, 0.0, 0.0, 0.0],
[4.0, 0.375, 0.125, 0.0, 0.0, 0.125],
[4.5, 0.375, 0.25, 0.0, 0.0, 0.125],
[5.0, 0.5, 0.25, 0.0, 0.0, 0.125],
[5.5, 0.5, 0.125, 0.0, 0.0, 0.125],
[6.0, 0.5, 0.25, 0.0, 0.0, 0.125],
[6.5, 0.5, 0.25, 0.0, 0.0, 0.125],
[7.0, 0.5, 0.25, 0.0, 0.0, 0.125],
[7.5, 0.5, 0.25, 0.0, 0.0, 0.125]])
#temptest
test_two_track_society() | [
"numpy.random.seed",
"codit.society.strategic.TwoTrackTester",
"codit.outbreak_recorder.WardComponent",
"codit.disease.Covid",
"codit.outbreak_recorder.MorbidityComponent",
"random.seed",
"codit.society.alternatives.StrategicTester",
"numpy.testing.assert_allclose"
] | [((664, 679), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (675, 679), False, 'import random\n'), ((695, 794), 'codit.disease.Covid', 'Covid', ([], {'pr_transmission_per_day': "CFG.PROB_INFECT_IF_TOGETHER_ON_A_DAY['B.1.1.7']", 'name': '"""B.1.1.7"""'}), "(pr_transmission_per_day=CFG.PROB_INFECT_IF_TOGETHER_ON_A_DAY[\n 'B.1.1.7'], name='B.1.1.7')\n", (700, 794), False, 'from codit.disease import Covid\n'), ((935, 950), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (946, 950), False, 'import random\n'), ((967, 1066), 'codit.disease.Covid', 'Covid', ([], {'pr_transmission_per_day': "CFG.PROB_INFECT_IF_TOGETHER_ON_A_DAY['B.1.1.7']", 'name': '"""B.1.1.7"""'}), "(pr_transmission_per_day=CFG.PROB_INFECT_IF_TOGETHER_ON_A_DAY[\n 'B.1.1.7'], name='B.1.1.7')\n", (972, 1066), False, 'from codit.disease import Covid\n'), ((1237, 1252), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (1248, 1252), False, 'import random\n'), ((1398, 1413), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (1409, 1413), False, 'import random\n'), ((1609, 1624), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (1620, 1624), False, 'import random\n'), ((1831, 1846), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (1842, 1846), False, 'import random\n'), ((2234, 2249), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (2245, 2249), False, 'import random\n'), ((2474, 2489), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (2485, 2489), False, 'import random\n'), ((2494, 2512), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2508, 2512), True, 'import numpy as np\n'), ((2742, 3382), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['o.recorder.main_component.story[:15]', '[[0.5, 0.25, 0.125, 0.0, 0.125, 0.0], [1.0, 0.25, 0.125, 0.25, 0.0, 0.0], [\n 1.5, 0.25, 0.125, 0.0, 0.0, 0.0], [2.0, 0.375, 0.125, 0.0, 0.0, 0.0], [\n 2.5, 0.375, 0.125, 0.0, 0.0, 0.0], [3.0, 0.375, 0.125, 0.0, 0.0, 0.0],\n [3.5, 0.375, 0.125, 0.0, 0.0, 0.0], [4.0, 0.375, 0.125, 0.0, 0.0, 0.125\n ], [4.5, 0.375, 0.25, 0.0, 0.0, 0.125], [5.0, 0.5, 0.25, 0.0, 0.0, \n 0.125], [5.5, 0.5, 0.125, 0.0, 0.0, 0.125], [6.0, 0.5, 0.25, 0.0, 0.0, \n 0.125], [6.5, 0.5, 0.25, 0.0, 0.0, 0.125], [7.0, 0.5, 0.25, 0.0, 0.0, \n 0.125], [7.5, 0.5, 0.25, 0.0, 0.0, 0.125]]'], {}), '(o.recorder.main_component.story[:15], [[0.5, \n 0.25, 0.125, 0.0, 0.125, 0.0], [1.0, 0.25, 0.125, 0.25, 0.0, 0.0], [1.5,\n 0.25, 0.125, 0.0, 0.0, 0.0], [2.0, 0.375, 0.125, 0.0, 0.0, 0.0], [2.5, \n 0.375, 0.125, 0.0, 0.0, 0.0], [3.0, 0.375, 0.125, 0.0, 0.0, 0.0], [3.5,\n 0.375, 0.125, 0.0, 0.0, 0.0], [4.0, 0.375, 0.125, 0.0, 0.0, 0.125], [\n 4.5, 0.375, 0.25, 0.0, 0.0, 0.125], [5.0, 0.5, 0.25, 0.0, 0.0, 0.125],\n [5.5, 0.5, 0.125, 0.0, 0.0, 0.125], [6.0, 0.5, 0.25, 0.0, 0.0, 0.125],\n [6.5, 0.5, 0.25, 0.0, 0.0, 0.125], [7.0, 0.5, 0.25, 0.0, 0.0, 0.125], [\n 7.5, 0.5, 0.25, 0.0, 0.0, 0.125]])\n', (2768, 3382), True, 'import numpy as np\n'), ((807, 823), 'codit.society.strategic.TwoTrackTester', 'TwoTrackTester', ([], {}), '()\n', (821, 823), False, 'from codit.society.strategic import TwoTrackTester\n'), ((1078, 1085), 'codit.disease.Covid', 'Covid', ([], {}), '()\n', (1083, 1085), False, 'from codit.disease import Covid\n'), ((1115, 1131), 'codit.society.strategic.TwoTrackTester', 'TwoTrackTester', ([], {}), '()\n', (1129, 1131), False, 'from codit.society.strategic import TwoTrackTester\n'), ((1270, 1286), 'codit.society.strategic.TwoTrackTester', 'TwoTrackTester', ([], {}), '()\n', (1284, 1286), False, 'from codit.society.strategic import TwoTrackTester\n'), ((1288, 1295), 'codit.disease.Covid', 'Covid', ([], {}), '()\n', (1293, 1295), False, 'from codit.disease import Covid\n'), ((1431, 1447), 'codit.society.strategic.TwoTrackTester', 'TwoTrackTester', ([], {}), '()\n', (1445, 1447), False, 'from codit.society.strategic import TwoTrackTester\n'), ((1449, 1456), 'codit.disease.Covid', 'Covid', ([], {}), '()\n', (1454, 1456), False, 'from codit.disease import Covid\n'), ((1642, 1658), 'codit.society.strategic.TwoTrackTester', 'TwoTrackTester', ([], {}), '()\n', (1656, 1658), False, 'from codit.society.strategic import TwoTrackTester\n'), ((1660, 1667), 'codit.disease.Covid', 'Covid', ([], {}), '()\n', (1665, 1667), False, 'from codit.disease import Covid\n'), ((1953, 1960), 'codit.disease.Covid', 'Covid', ([], {}), '()\n', (1958, 1960), False, 'from codit.disease import Covid\n'), ((2104, 2120), 'codit.outbreak_recorder.WardComponent', 'WardComponent', (['o'], {}), '(o)\n', (2117, 2120), False, 'from codit.outbreak_recorder import WardComponent, MorbidityComponent\n'), ((2151, 2183), 'codit.outbreak_recorder.MorbidityComponent', 'MorbidityComponent', (['o.pop.people'], {}), '(o.pop.people)\n', (2169, 2183), False, 'from codit.outbreak_recorder import WardComponent, MorbidityComponent\n'), ((2267, 2284), 'codit.society.alternatives.StrategicTester', 'StrategicTester', ([], {}), '()\n', (2282, 2284), False, 'from codit.society.alternatives import StrategicTester\n'), ((2286, 2293), 'codit.disease.Covid', 'Covid', ([], {}), '()\n', (2291, 2293), False, 'from codit.disease import Covid\n'), ((2533, 2540), 'codit.disease.Covid', 'Covid', ([], {}), '()\n', (2538, 2540), False, 'from codit.disease import Covid\n')] |
#!/usr/bin/env python
# coding=utf-8
"""
Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved
File: pooling.py
func: 用于特征描述的池化操作 refer https://github.com/filipradenovic/cnnimageretrieval-pytorch/blob/master/cirtorch/layers/pooling.py
Author: yuwei09(<EMAIL>)
Date: 2021/06/15
"""
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class MAC(nn.Layer):
"""最大全局描述
"""
def __init__(self):
super(MAC, self).__init__()
def forward(self, x):
"""forward"""
return F.max_pool2d(x, (x.shape[-2], x.shape[-1]))
class SPoC(nn.Layer):
"""平均全局描述
"""
def __init__(self):
super(SPoC, self).__init__()
def forward(self, x):
"""forward"""
return F.avg_pool2d(x, (x.shape[-2], x.shape[-1]))
class GeM(nn.Layer):
"""gem全局描述
"""
def __init__(self, p=3, eps=1e-6):
super(GeM, self).__init__()
#self.p = Parameter(torch.ones(1)*p)
self.p = p
self.eps = eps
def forward(self, x, p=3, eps=1e-6):
"""forward"""
return F.avg_pool2d(x.clip(min=self.eps).pow(self.p), (x.shape[-2], x.shape[-1])).pow(1. / self.p)
class GeMmp(nn.Layer):
"""动态gem全局描述
"""
def __init__(self, p=3, eps=1e-6):
super(GeMmp, self).__init__()
self.p = self.create_parameter(shape=(2048,))
self.p.set_value(np.ones(2048).astype("float32") * p)
self.eps = eps
def forward(self, x):
"""forward"""
p_unsqueeze = self.p.unsqueeze(-1).unsqueeze(-1)
return F.avg_pool2d(x.clip(min=self.eps).pow(p_unsqueeze), (x.shape[-2], x.shape[-1])).pow(1. / p_unsqueeze)
if __name__ == "__main__":
x = paddle.randn((10, 2048, 14, 14))
gem_pool = GeMmp()
x_ = gem_pool(x)
print(gem_pool.p)
print(x_.shape)
| [
"numpy.ones",
"paddle.nn.functional.avg_pool2d",
"paddle.randn",
"paddle.nn.functional.max_pool2d"
] | [((1719, 1751), 'paddle.randn', 'paddle.randn', (['(10, 2048, 14, 14)'], {}), '((10, 2048, 14, 14))\n', (1731, 1751), False, 'import paddle\n'), ((544, 587), 'paddle.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(x.shape[-2], x.shape[-1])'], {}), '(x, (x.shape[-2], x.shape[-1]))\n', (556, 587), True, 'import paddle.nn.functional as F\n'), ((763, 806), 'paddle.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(x.shape[-2], x.shape[-1])'], {}), '(x, (x.shape[-2], x.shape[-1]))\n', (775, 806), True, 'import paddle.nn.functional as F\n'), ((1396, 1409), 'numpy.ones', 'np.ones', (['(2048)'], {}), '(2048)\n', (1403, 1409), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sp
import scipy.ndimage as spim
from scipy.sparse import csgraph
from scipy.spatial import ConvexHull
from openpnm.utils import logging, Workspace
logger = logging.getLogger(__name__)
ws = Workspace()
def isoutside(coords, shape):
r"""
Identifies points that lie outside the specified shape
Parameters
----------
coords : array_like
The coordinates which are to be checked
shape : array_like
The shape of the domain beyond which points should be trimmed.
The argument is treated as follows:
**sphere** : If a scalar or single element list is received, it's
treated as the radius [r] of a sphere centered on [0, 0, 0].
**cylinder** : If a two-element list is received it's treated as
the radius and height of a cylinder [r, z] whose central axis
starts at [0, 0, 0] and extends in the positive z-direction.
**rectangle** : If a three element list is received, it's treated
as the outer corner of rectangle [x, y, z] whose opposite corner
lies at [0, 0, 0].
Returns
-------
An Np-long mask of ``True`` values indicating pores that lie outside the
domain.
"""
# Label external pores for trimming below
if len(shape) == 1: # Spherical
# Find external points
r = np.sqrt(np.sum(coords**2, axis=1))
Ps = r > shape[0]
elif len(shape) == 2: # Cylindrical
# Find external pores outside radius
r = np.sqrt(np.sum(coords[:, [0, 1]]**2, axis=1))
Ps = r > shape[0]
# Find external pores above and below cylinder
if shape[1] > 0:
Ps = Ps + (coords[:, 2] > shape[1])
Ps = Ps + (coords[:, 2] < 0)
else:
pass
elif len(shape) == 3: # Rectilinear
shape = np.array(shape, dtype=float)
try:
lo_lim = shape[:, 0]
hi_lim = shape[:, 1]
except IndexError:
lo_lim = np.array([0, 0, 0])
hi_lim = shape
Ps1 = np.any(coords > hi_lim, axis=1)
Ps2 = np.any(coords < lo_lim, axis=1)
Ps = Ps1 + Ps2
return Ps
def rotate_coords(network, a=0, b=0, c=0, R=None):
r"""
Rotates coordinates a given amount about each axis
Parameters
----------
network : OpenPNM Network object
The network whose pore coordinates should be transformed
a : scalar
The amount in degrees to rotate about the x-axis
b : scalar
The amount in degrees to rotate about the y-axis
c : scalar
The amount in degrees to rotate about the z-axis
R : array_like
Rotation matrix. Must be a 3-by-3 matrix since pore coordinates are
always in 3D. If this is given then the other individual arguments
are ignored.
See Also
--------
rotate_coords
Notes
-----
It is possible to rotate about any of the three axes by specifying ``a``,
``b``, and/or ``c``. In this case each rotation is applied in sequence.
"""
if R is None:
if a:
R = np.array([[1, 0, 0],
[0, np.cos(np.deg2rad(a)), -np.sin(np.deg2rad(a))],
[0, np.sin(np.deg2rad(a)), np.cos(np.deg2rad(a))]])
network['pore.coords'] = np.tensordot(network['pore.coords'], R,
axes=(1, 1))
if b:
R = np.array([[np.cos(np.deg2rad(b)), 0, -np.sin(np.deg2rad(b))],
[0, 1, 0],
[np.sin(np.deg2rad(b)), 0, np.cos(np.deg2rad(b))]])
network['pore.coords'] = np.tensordot(network['pore.coords'], R,
axes=(1, 1))
if c:
R = np.array([[np.cos(np.deg2rad(c)), -np.sin(np.deg2rad(c)), 0],
[np.sin(np.deg2rad(c)), np.cos(np.deg2rad(c)), 0],
[0, 0, 1]])
network['pore.coords'] = np.tensordot(network['pore.coords'], R,
axes=(1, 1))
else:
network['pore.coords'] = np.tensordot(network['pore.coords'], R,
axes=(1, 1))
def shear_coords(network, ay=0, az=0, bx=0, bz=0, cx=0, cy=0, S=None):
r"""
Shears the coordinates a given amount about along axis
Parameters
----------
network : OpenPNM Network object
The network whose pore coordinates should be transformed
ay : scalar
The factor by which to shear along the x-axis as a function of y
az : scalar
The factor by which to shear along the x-axis as a function of z
bx : scalar
The factor by which to shear along the y-axis as a function of x
bz : scalar
The factor by which to shear along the y-axis as a function of z
cx : scalar
The factor by which to shear along the z-axis as a function of x
cy : scalar
The factor by which to shear along the z-axis as a function of y
S : array_like
The shear matrix. Must be a 3-by-3 matrix since pore coordinates are
always in 3D. If this is given then the other individual arguments
are ignored.
See Also
--------
rotate_coords
Notes
-----
The shear along the i *th* -axis is given as i\* = i + aj. This means
the new i coordinate is the old one plus some linear factor *a* in the
j *th* direction.
The values of ``a``, ``b``, and ``c`` are essentially the inverse of the
slope to be formed by the neighboring layers of sheared pores. A value of
0 means no shear, and neighboring points are stacked directly on top of
each other; a value of 1 means they form a 45 degree diagonal, and so on.
If ``S`` is given, then is should be of the form:
::
S = [[1 , ay, az],
[bx, 1 , bz],
[cx, cy, 1 ]]
where any of the off-diagonal components can be 0
"""
if S is None:
S = np.array([[1, ay, az],
[bx, 1, bz],
[cx, cy, 1]])
network['pore.coords'] = (S@network['pore.coords'].T).T
def trim(network, pores=[], throats=[]):
'''
Remove pores or throats from the network
Parameters
----------
network : OpenPNM Network Object
The Network from which pores or throats should be removed
pores (or throats) : array_like
The indices of the of the pores or throats to be removed from the
network.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> pn.Np
125
>>> pn.Nt
300
>>> op.topotools.trim(network=pn, pores=[1])
>>> pn.Np
124
>>> pn.Nt
296
'''
pores = network._parse_indices(pores)
throats = network._parse_indices(throats)
Pkeep = np.copy(network['pore.all'])
Tkeep = np.copy(network['throat.all'])
if np.size(pores) > 0:
Pkeep[pores] = False
if not np.any(Pkeep):
raise Exception('Cannot delete ALL pores')
# # Performing customized find_neighbor_throats which is much faster, but
# # not general for other types of queries
# temp = np.in1d(network['throat.conns'].flatten(), pores)
# temp = np.reshape(temp, (network.Nt, 2))
# Ts = np.any(temp, axis=1)
# Ts = network.Ts[Ts]
Ts = network.find_neighbor_throats(pores=~Pkeep, mode='union')
if len(Ts) > 0:
Tkeep[Ts] = False
if np.size(throats) > 0:
Tkeep[throats] = False
# The following IF catches the special case of deleting ALL throats
# It removes all throat props, adds 'all', and skips rest of function
if not np.any(Tkeep):
logger.info('Removing ALL throats from network')
for item in list(network.keys()):
if item.split('.')[0] == 'throat':
del network[item]
network['throat.all'] = np.array([], ndmin=1)
return
# Temporarily store throat conns and pore map for processing later
Np_old = network.Np
Nt_old = network.Nt
Pkeep_inds = np.where(Pkeep)[0]
Tkeep_inds = np.where(Tkeep)[0]
Pmap = np.ones((network.Np,), dtype=int)*-1
tpore1 = network['throat.conns'][:, 0]
tpore2 = network['throat.conns'][:, 1]
# Delete specified pores and throats from all objects
for obj in network.project[::-1]:
if (obj.Np == Np_old) and (obj.Nt == Nt_old):
Ps = Pkeep_inds
Ts = Tkeep_inds
else:
Ps = obj.map_pores(pores=Pkeep, origin=network)
Ts = obj.map_throats(throats=Tkeep, origin=network)
for key in list(obj.keys()):
temp = obj.pop(key)
if key.split('.')[0] == 'throat':
obj.update({key: temp[Ts]})
if key.split('.')[0] == 'pore':
obj.update({key: temp[Ps]})
# Remap throat connections
Pmap[Pkeep] = np.arange(0, np.sum(Pkeep))
Tnew1 = Pmap[tpore1[Tkeep]]
Tnew2 = Pmap[tpore2[Tkeep]]
network.update({'throat.conns': np.vstack((Tnew1, Tnew2)).T})
# Clear adjacency and incidence matrices which will be out of date now
network._am.clear()
network._im.clear()
def extend(network, coords=[], conns=[], labels=[], **kwargs):
r'''
Add pores or throats to the network from a list of coords or conns.
Parameters
----------
network : OpenPNM Network Object
The network to which pores or throats should be added
coords : array_like
The coordinates of the pores to add. These will be appended to the
'pore.coords' array so should be of shape N-by-3, where N is the
number of pores in the list.
conns : array_like
The throat connections to add. These will be appended to the
'throat.conns' array so should be of shape N-by-2. Note that the
numbering must point to existing pores.
labels : string, or list of strings, optional
A list of labels to apply to the new pores and throats
'''
if 'throat_conns' in kwargs.keys():
conns = kwargs['throat_conns']
if 'pore_coords' in kwargs.keys():
coords = kwargs['pore_coords']
coords = np.array(coords)
conns = np.array(conns)
Np_old = network.num_pores()
Nt_old = network.num_throats()
Np = Np_old + coords.shape[0]
Nt = Nt_old + conns.shape[0]
if np.any(conns > Np):
raise Exception('Some throat conns point to non-existent pores')
network.update({'pore.all': np.ones([Np, ], dtype=bool),
'throat.all': np.ones([Nt, ], dtype=bool)})
# Add coords and conns
if np.size(coords) > 0:
coords = np.vstack((network['pore.coords'], coords))
network['pore.coords'] = coords
if np.size(conns) > 0:
conns = np.vstack((network['throat.conns'], conns))
network['throat.conns'] = conns
# Increase size of any prop or label arrays already on network and phases
objs = list(network.project.phases().values())
objs.append(network)
for obj in objs:
obj.update({'pore.all': np.ones([Np, ], dtype=bool),
'throat.all': np.ones([Nt, ], dtype=bool)})
for item in list(obj.keys()):
N = obj._count(element=item.split('.')[0])
if obj[item].shape[0] < N:
arr = obj.pop(item)
s = arr.shape
if arr.dtype == bool:
obj[item] = np.zeros(shape=(N, *s[1:]), dtype=bool)
else:
obj[item] = np.ones(shape=(N, *s[1:]), dtype=float)*np.nan
obj[item][:arr.shape[0]] = arr
# Regenerate models on all objects to fill new elements
for obj in network.project.phases().values():
if hasattr(obj, 'models'):
obj.regenerate_models()
# Apply labels, if supplied
if labels != []:
# Convert labels to list if necessary
if isinstance(labels, str):
labels = [labels]
for label in labels:
# Remove pore or throat from label, if present
label = label.split('.')[-1]
if np.size(coords) > 0:
Ps = np.r_[Np_old:Np]
if 'pore.'+label not in network.labels():
network['pore.'+label] = False
network['pore.'+label][Ps] = True
if np.size(conns) > 0:
Ts = np.r_[Nt_old:Nt]
if 'throat.'+label not in network.labels():
network['throat.'+label] = False
network['throat.'+label][Ts] = True
# Clear adjacency and incidence matrices which will be out of date now
network._am.clear()
network._im.clear()
def reduce_coordination(network, z):
r"""
Deletes throats on network to match specified average coordination number
Parameters
----------
network : OpenPNM Network object
The network whose throats are to be trimmed
z : scalar
The desire average coordination number. It is not possible to specify
the distribution of the coordination, only the mean value.
Notes
-----
This method first finds the minimum spanning tree of the network using
random weights on each throat, then assures that these throats are *not*
deleted, in order to maintain network connectivity.
"""
# Find minimum spanning tree using random weights
am = network.create_adjacency_matrix(weights=np.random.rand(network.Nt),
triu=False)
mst = csgraph.minimum_spanning_tree(am, overwrite=True)
mst = mst.tocoo()
# Label throats on spanning tree to avoid deleting them
Ts = network.find_connecting_throat(mst.row, mst.col)
Ts = np.hstack(Ts)
network['throat.mst'] = False
network['throat.mst'][Ts] = True
# Trim throats not on the spanning tree to acheive desired coordination
Ts = np.random.permutation(network.throats('mst', mode='nor'))
Ts = Ts[:int(network.Nt - network.Np*(z/2))]
trim(network=network, throats=Ts)
def label_faces(network, tol=0.0, label='surface'):
r"""
Finds pores on the surface of the network and labels them according to
whether they are on the *top*, *bottom*, etc. This function assumes the
network is cubic in shape (i.e. with six flat sides)
Parameters
----------
network : OpenPNM Network object
The network to apply the labels
tol : scalar
The tolerance for defining what counts as a surface pore, which is
specifically meant for random networks. All pores with ``tol`` of
the maximum or minimum along each axis are counts as pores. The
default is 0.
label : string
An identifying label to isolate the pores on the faces of the network.
The default is 'surface'. Surface pores can be found using
``find_surface_pores``.
"""
label = label.split('.', 1)[-1]
if 'pore.'+label not in network.labels():
find_surface_pores(network, label=label)
Psurf = network['pore.'+label]
crds = network['pore.coords']
xmin, xmax = np.amin(crds[:, 0]), np.amax(crds[:, 0])
xspan = xmax - xmin
ymin, ymax = np.amin(crds[:, 1]), np.amax(crds[:, 1])
yspan = ymax - ymin
zmin, zmax = np.amin(crds[:, 2]), np.amax(crds[:, 2])
zspan = zmax - zmin
dims = dimensionality(network)
if dims[0]:
network['pore.left'] = (crds[:, 0] <= (xmin + tol*xspan)) * Psurf
network['pore.right'] = (crds[:, 0] >= (xmax - tol*xspan)) * Psurf
if dims[1]:
network['pore.front'] = (crds[:, 1] <= (ymin + tol*yspan)) * Psurf
network['pore.back'] = (crds[:, 1] >= (ymax - tol*yspan)) * Psurf
if dims[2]:
network['pore.top'] = (crds[:, 2] >= (zmax - tol*zspan)) * Psurf
network['pore.bottom'] = (crds[:, 2] <= (zmin + tol*zspan)) * Psurf
def find_surface_pores(network, markers=None, label='surface'):
r"""
Find the pores on the surface of the domain by performing a Delaunay
triangulation between the network pores and some external ``markers``. All
pores connected to these external marker points are considered surface
pores.
Parameters
----------
network: OpenPNM Network Object
The network for which the surface pores are to be found
markers: array_like
3 x N array of the marker coordinates to use in the triangulation. The
labeling is performed in one step, so all points are added, and then
any pores connected to at least one marker is given the provided label.
By default, this function will automatically generate 6 points outside
each axis of the network domain.
Users may wish to specify a single external marker point and provide an
appropriate label in order to identify specific faces. For instance,
the marker may be *above* the domain, and the label might be
'top_surface'.
label : string
The label to apply to the pores. The default is 'surface'.
Notes
-----
This function does not check whether the given markers actually lie outside
the domain, allowing the labeling of *internal* sufaces.
If this method fails to mark some surface pores, consider sending more
markers on each face.
Examples
--------
>>> import openpnm as op
>>> net = op.network.Cubic(shape=[5, 5, 5])
>>> op.topotools.find_surface_pores(network=net)
>>> net.num_pores('surface')
98
When cubic networks are created, the surfaces are already labeled:
>>> net.num_pores(['top','bottom', 'left', 'right', 'front','back'])
98
"""
import scipy.spatial as sptl
dims = dimensionality(network)
coords = network['pore.coords'][:, dims]
if markers is None:
# normalize coords to a 1 unit cube centered on origin
coords -= np.amin(coords, axis=0)
coords /= np.amax(coords, axis=0)
coords -= 0.5
npts = max((network.Np/10, 100))
if sum(dims) == 1:
network['pore.'+label] = True
return
if sum(dims) == 2:
r = 0.75
theta = np.linspace(0, 2*np.pi, int(npts), dtype=float)
x = r*np.cos(theta)
y = r*np.sin(theta)
markers = np.vstack((x, y)).T
if sum(dims) == 3:
r = 1.00
indices = np.arange(0, int(npts), dtype=float) + 0.5
phi = np.arccos(1 - 2*indices/npts)
theta = np.pi * (1 + 5**0.5) * indices
x = r*np.cos(theta) * np.sin(phi)
y = r*np.sin(theta) * np.sin(phi)
z = r*np.cos(phi)
markers = np.vstack((x, y, z)).T
else:
if sum(dims) == 1:
pass
if sum(dims) == 2:
markers = np.atleast_2d(markers)
if markers.shape[1] != 2:
raise Exception('Network appears planar, so markers must be 2D')
if sum(dims) == 3:
markers = np.atleast_2d(markers)
if markers.shape[1] != 3:
raise Exception('Markers must be 3D for this network')
pts = np.vstack((coords, markers))
tri = sptl.Delaunay(pts, incremental=False)
(indices, indptr) = tri.vertex_neighbor_vertices
for k in range(network.Np, tri.npoints):
neighbors = indptr[indices[k]:indices[k+1]]
inds = np.where(neighbors < network.Np)
neighbors = neighbors[inds]
if 'pore.'+label not in network.keys():
network['pore.'+label] = False
network['pore.'+label][neighbors] = True
def dimensionality(network=None, coords=None):
r"""
Checks the dimensionality of the network
Parameters
----------
network : OpenPNM Network object
The network whose dimensionality is to be checked
Returns
-------
dims : list
A 3-by-1 array containing ``True`` for each axis that contains
multiple values, indicating that the pores are spatially distributed
in that direction.
"""
if network is not None:
xyz = network["pore.coords"]
elif coords is not None:
xyz = coords
eps = np.finfo(float).resolution
dims_unique = [not np.allclose(xk, xk.mean(), atol=0, rtol=eps) for xk in xyz.T]
return np.array(dims_unique)
def clone_pores(network, pores, labels=['clone'], mode='parents'):
r"""
Clones the specified pores and adds them to the network
Parameters
----------
network : OpenPNM Network Object
The Network object to which the new pores are to be added
pores : array_like
List of pores to clone
labels : string, or list of strings
The labels to apply to the clones, default is 'clone'
mode : string
Controls the connections between parents and clones. Options are:
- 'parents': (Default) Each clone is connected only to its parent
- 'siblings': Clones are only connected to each other in the same
manner as parents were connected
- 'isolated': No connections between parents or siblings
"""
if isinstance(labels, str):
labels = [labels]
network._parse_indices(pores)
Np = network.Np
Nt = network.Nt
# Clone pores
parents = np.array(pores, ndmin=1)
pcurrent = network['pore.coords']
pclone = pcurrent[pores, :]
pnew = np.concatenate((pcurrent, pclone), axis=0)
Npnew = np.shape(pnew)[0]
clones = np.arange(Np, Npnew)
# Create cloned pores first
extend(network=network, pore_coords=pclone)
# Apply provided labels to cloned pores
for item in labels:
network.set_label(label=item, pores=range(Np, Npnew))
# Add connections between parents and clones
if mode == 'parents':
tclone = np.vstack((parents, clones)).T
extend(network=network, conns=tclone)
elif mode == 'siblings':
ts = network.find_neighbor_throats(pores=pores, mode='xnor')
mapping = np.zeros([network.Np, ], dtype=int)
mapping[pores] = np.arange(Np, network.Np)
tclone = mapping[network['throat.conns'][ts]]
extend(network=network, throat_conns=tclone)
elif mode == 'isolated':
pass
Ntnew = network.Nt
for item in labels:
network.set_label(label=item, throats=range(Nt, Ntnew))
# Clear adjacency and incidence matrices which will be out of date now
network._am.clear()
network._im.clear()
def merge_networks(network, donor=[]):
r"""
Combine multiple networks into one
This does not attempt any topological manipulations (such as stiching
nearby pores to each other).
Parameters
----------
network : OpenPNM Network Object
The network to which all the other networks should be added.
donor : OpenPNM Network Object or list of Objects
The network object(s) to add to the given network
See Also
--------
extend
trim
stitch
"""
if isinstance(donor, list):
donors = donor
else:
donors = [donor]
# First fix up geometries
# main_proj = network.project
# main_geoms = main_proj.geometries()
for donor in donors:
proj = donor.project
geoms = proj.geometries().values()
for geo in geoms:
if geo.name in network.project.names:
geo.name = network.project._generate_name(geo)
network.project.append(geo)
for donor in donors:
network['pore.coords'] = np.vstack((network['pore.coords'],
donor['pore.coords']))
network['throat.conns'] = np.vstack((network['throat.conns'],
donor['throat.conns']
+ network.Np))
p_all = np.ones((np.shape(network['pore.coords'])[0],), dtype=bool)
t_all = np.ones((np.shape(network['throat.conns'])[0],), dtype=bool)
network.update({'pore.all': p_all})
network.update({'throat.all': t_all})
for key in set(network.keys()).union(set(donor.keys())):
if key.split('.')[1] not in ['conns', 'coords', '_id', 'all']:
if key in network.keys():
pop_flag = False
# If key not on donor add it first with dummy values to
# simplify merging later
if key not in donor.keys():
logger.debug('Adding ' + key + ' to donor')
if network[key].dtype == bool: # Deal with labels
donor[key] = False
else: # Deal with numerical data
element = key.split('.')[0]
shape = list(network[key].shape)
N = donor._count(element)
shape[0] = N
donor[key] = np.empty(shape=shape)*np.nan
pop_flag = True
# Then merge it with existing array on network
if len(network[key].shape) == 1:
temp = np.hstack((network[key], donor[key]))
else:
temp = np.vstack((network[key], donor[key]))
network[key] = temp
if pop_flag:
donor.pop(key, None)
else:
# If key not on network add it first
logger.debug('Adding ' + key + ' to network')
if donor[key].dtype == bool:
network[key] = False
else:
data_shape = list(donor[key].shape)
pore_prop = True if key.split(".")[0] == "pore" else False
data_shape[0] = network.Np if pore_prop else network.Nt
network[key] = np.empty(data_shape) * np.nan
# Then append donor values to network
s = np.shape(donor[key])[0]
network[key][-s:] = donor[key]
# Clear adjacency and incidence matrices which will be out of date now
network._am.clear()
network._im.clear()
def stitch(network, donor, P_network, P_donor, method='nearest',
len_max=np.inf, label_suffix='', label_stitches='stitched'):
r'''
Stitches a second a network to the current network.
Parameters
----------
network : OpenPNM Network Object
The Network to which to donor Network will be attached
donor : OpenPNM Network Object
The Network to stitch on to the current Network
P_network : array_like
The pores on the current Network
P_donor : array_like
The pores on the donor Network
label_suffix : str or None
Some text to append to each label in the donor Network before
inserting them into the recipient. The default is to append no
text, but a common option would be to append the donor Network's
name. To insert none of the donor labels, use ``None``.
label_stitches : str or list of strings
The label to apply to the newly created 'stitch' throats. The
defaul is 'stitched'. If performing multiple stitches in a row it
might be helpful to the throats created during each step uniquely
for later identification.
len_max : float
Set a length limit on length of new throats
method : string (default = 'nearest')
The method to use when making pore to pore connections. Options are:
- 'radius' : Connects each pore on the recipient network to the
nearest pores on the donor network, within ``len_max``
- 'nearest' : Connects each pore on the recipienet network to the
nearest pore on the donor network.
Notes
-----
Before stitching it is necessary to translate the pore coordinates of
one of the Networks so that it is positioned correctly relative to the
other. This is illustrated in the example below.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> pn2 = op.network.Cubic(shape=[5, 5, 5])
>>> [pn.Np, pn.Nt]
[125, 300]
>>> [pn2.Np, pn2.Nt]
[125, 300]
>>> pn2['pore.coords'][:, 2] += 5.0
>>> op.topotools.stitch(network=pn, donor=pn2, P_network=pn.pores('top'),
... P_donor=pn2.pores('bottom'), method='radius',
... len_max=1.0)
>>> [pn.Np, pn.Nt]
[250, 625]
'''
# Parse inputs
if isinstance(label_stitches, str):
label_stitches = [label_stitches]
for s in label_stitches:
if s not in network.keys():
network['throat.' + s] = False
# Get the initial number of pores and throats
N_init = {}
N_init['pore'] = network.Np
N_init['throat'] = network.Nt
if method == 'nearest':
P1 = P_network
P2 = P_donor + N_init['pore'] # Increment pores on donor
C1 = network['pore.coords'][P_network]
C2 = donor['pore.coords'][P_donor]
D = sp.spatial.distance.cdist(C1, C2)
[P1_ind, P2_ind] = np.where(D == D.min(axis=0))
conns = np.vstack((P1[P1_ind], P2[P2_ind])).T
elif method == 'radius':
P1 = P_network
P2 = P_donor + N_init['pore'] # Increment pores on donor
C1 = network['pore.coords'][P_network]
C2 = donor['pore.coords'][P_donor]
D = sp.spatial.distance.cdist(C1, C2)
[P1_ind, P2_ind] = np.where(D <= len_max)
conns = np.vstack((P1[P1_ind], P2[P2_ind])).T
else:
raise Exception('<{}> method not supported'.format(method))
merge_networks(network, donor)
# Add the new stitch throats to the Network
extend(network=network, throat_conns=conns, labels=label_stitches)
if len(network.project.geometries()) > 0:
logger.warning(str(conns.shape[0]) + ' newly created throats are not '
+ 'assigned to a geometry')
# Remove donor from Workspace, if present
# This check allows for the reuse of a donor Network multiple times
for sim in list(ws.values()):
if donor in sim:
del ws[sim.name]
def stitch_pores(network, pores1, pores2, mode='gabriel'):
r"""
Stitches together pores in a network with disconnected clusters
Parameter
---------
network : OpenPNM Network
The network to operate upon
pores1 and pores2: array_like
The pore indices of the disconnected clusters to be joined
mode : str
Dictates which tesselation method is used to identify which pores to
stitch together. Options are 'gabriel' (default) or 'delaunay'.
Returns
-------
None
The network is operated on 'in-place' so nothing is returned.
"""
from openpnm.network import Delaunay, Gabriel
pores1 = network._parse_indices(pores1)
pores2 = network._parse_indices(pores2)
C1 = network.coords[pores1, :]
C2 = network.coords[pores2, :]
crds = np.vstack((C1, C2))
if mode == 'delaunay':
net = Delaunay(points=crds, settings={'trim': False})
if mode == 'gabriel':
net = Gabriel(points=crds, settings={'trim': False})
net.set_label(pores=range(len(pores1)), label='pore.one')
net.set_label(pores=range(len(pores2)), label='pore.two')
Ts = net.find_neighbor_throats(pores=net.pores('one'), mode='xor')
conns = net.conns[Ts]
mapped_conns = np.vstack((pores1[conns[:, 0]],
pores2[conns[:, 1] - len(pores1)])).T
mapped_conns = np.sort(mapped_conns, axis=1)
extend(network=network, conns=mapped_conns, labels='stitched')
def connect_pores(network, pores1, pores2, labels=[], add_conns=True):
r'''
Returns the possible connections between two groups of pores, and optionally
makes the connections.
See ``Notes`` for advanced usage.
Parameters
----------
network : OpenPNM Network Object
pores1 : array_like
The first group of pores on the network
pores2 : array_like
The second group of pores on the network
labels : list of strings
The labels to apply to the new throats. This argument is only needed
if ``add_conns`` is True.
add_conns : bool
Indicates whether the connections should be added to the supplied
network (default is True). Otherwise, the connections are returned
as an Nt x 2 array that can be passed directly to ``extend``.
Notes
-----
(1) The method also works if ``pores1`` and ``pores2`` are list of lists,
in which case it consecutively connects corresponding members of the two
lists in a 1-to-1 fashion. Example: pores1 = [[0, 1], [2, 3]] and
pores2 = [[5], [7, 9]] leads to creation of the following connections:
::
0 --> 5 2 --> 7 3 --> 7
1 --> 5 2 --> 9 3 --> 9
(2) If you want to use the batch functionality, make sure that each element
within ``pores1`` and ``pores2`` are of type list or ndarray.
(3) It creates the connections in a format which is acceptable by
the default OpenPNM connections ('throat.conns') and either adds them to
the network or returns them.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> pn.Nt
300
>>> op.topotools.connect_pores(network=pn, pores1=[22, 32],
... pores2=[16, 80, 68])
>>> pn.Nt
306
>>> pn['throat.conns'][300:306]
array([[16, 22],
[22, 80],
[22, 68],
[16, 32],
[32, 80],
[32, 68]])
'''
# Assert that `pores1` and `pores2` are list of lists
try:
len(pores1[0])
except (TypeError, IndexError):
pores1 = [pores1]
try:
len(pores2[0])
except (TypeError, IndexError):
pores2 = [pores2]
if len(pores1) != len(pores2):
raise Exception('Running in batch mode! pores1 and pores2 must be'
+ ' of the same length.')
arr1, arr2 = [], []
for ps1, ps2 in zip(pores1, pores2):
size1 = np.size(ps1)
size2 = np.size(ps2)
arr1.append(np.repeat(ps1, size2))
arr2.append(np.tile(ps2, size1))
conns = np.vstack([np.concatenate(arr1), np.concatenate(arr2)]).T
if add_conns:
extend(network=network, throat_conns=conns, labels=labels)
else:
return conns
def find_pore_to_pore_distance(network, pores1=None, pores2=None):
r'''
Find the distance between all pores on set 1 to each pore in set 2
Parameters
----------
network : OpenPNM Network Object
The network object containing the pore coordinates
pores1 : array_like
The pore indices of the first set
pores2 : array_Like
The pore indices of the second set. It's OK if these indices are
partially or completely duplicating ``pores``.
Returns
-------
dist : array_like
A distance matrix with ``len(pores1)`` rows and ``len(pores2)`` columns.
The distance between pore *i* in ``pores1`` and *j* in ``pores2`` is
located at *(i, j)* and *(j, i)* in the distance matrix.
Notes
-----
This function computes and returns a distance matrix, which is a dense
matrix of size Np_1 by Np_2, so can get large. For distances between
larger sets a KD-tree approach would be better, which is available in
``scipy.spatial``.
'''
from scipy.spatial.distance import cdist
p1 = np.array(pores1, ndmin=1)
p2 = np.array(pores2, ndmin=1)
coords = network['pore.coords']
return cdist(coords[p1], coords[p2])
def filter_pores_by_z(network, pores, z=1):
r"""
Find pores with a given number of neighbors
Parameters
----------
network : OpenPNM Network object
The network on which the query is to be performed
pores : array_like
The pores to be filtered
z : int
The coordination number to filter by
Returns
-------
pores : array_like
The pores which have the specified coordination number
"""
pores = network._parse_indices(pores)
Nz = network.num_neighbors(pores=pores)
orphans = np.where(Nz == z)[0]
hits = pores[orphans]
return hits
def subdivide(network, pores, shape, labels=[]):
r'''
It trim the pores and replace them by cubic networks with the sent shape.
Parameters
----------
network : OpenPNM Network Object
pores : array_like
The first group of pores to be replaced
shape : array_like
The shape of cubic networks in the target locations
Notes
-----
It works only for cubic networks, and a check is performed to ensure this
is the case.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 6, 5], spacing=0.001)
>>> pn.Np
150
>>> nano_pores = [2, 13, 14, 15]
>>> op.topotools.subdivide(network=pn, pores=nano_pores, shape=[4, 7, 3],
... labels='nano')
>>> pn.Np
482
'''
mro = network._mro()
if 'Cubic' not in mro:
raise Exception('Subdivide is only supported for Cubic Networks')
from openpnm.network import Cubic
pores = network._parse_indices(pores)
# Checks to find boundary pores in the selected pores
if 'pore.boundary' in network.labels():
if (np.in1d(pores, network.pores('boundary'))).any():
raise Exception('boundary pores cannot be subdivided!')
if not hasattr(network, '_subdivide_flag'):
network._subdivide_flag = True
else:
raise Exception('The network has subdivided pores, so the method \
does not support another subdivision')
# Assigning right shape and division
if np.size(shape) != 2 and np.size(shape) != 3:
raise Exception('Subdivide not implemented for Networks other than 2D and 3D')
if np.size(shape) == 3 and 1 not in shape:
div = np.array(shape, ndmin=1)
single_dim = None
else:
single_dim = np.where(np.array(get_shape(network)) == 1)[0]
if np.size(single_dim) == 0:
single_dim = None
if np.size(shape) == 3:
div = np.array(shape, ndmin=1)
else:
div = np.zeros(3, dtype=np.int32)
if single_dim is None:
dim = 2
else:
dim = single_dim
div[dim] = 1
div[-np.array(div, ndmin=1, dtype=bool)] = np.array(shape, ndmin=1)
# Creating small network and handling labels
networkspacing = get_spacing(network)
new_netspacing = networkspacing/div
new_net = Cubic(shape=div, spacing=new_netspacing)
main_labels = ['front', 'back', 'left', 'right', 'top', 'bottom']
if single_dim is not None:
label_groups = np.array([['left', 'right'],
['front', 'back'],
['top', 'bottom']])
non_single_labels = label_groups[np.array([0, 1, 2]) != single_dim]
for label in main_labels:
new_net['pore.surface_' + label] = False
network['pore.surface_' + label] = False
if single_dim is None:
new_net['pore.surface_' + label][new_net.pores(labels=label)] = True
else:
for ind in [0, 1]:
loc = (non_single_labels[ind] == label)
temp_pores = new_net.pores(non_single_labels[ind][loc])
new_net['pore.surface_' + label][temp_pores] = True
old_coords = np.copy(new_net['pore.coords'])
if labels == []:
labels = ['pore.subdivided_' + new_net.name]
for P in pores:
# Shifting the new network to the right location and attaching it to
# the main network
shift = network['pore.coords'][P] - networkspacing/2
new_net['pore.coords'] += shift
Pn = network.find_neighbor_pores(pores=P)
try:
Pn_new_net = network.pores(labels)
except KeyError:
Pn_new_net = []
Pn_old_net = Pn[~np.in1d(Pn, Pn_new_net)]
Np1 = network.Np
extend(pore_coords=new_net['pore.coords'],
throat_conns=new_net['throat.conns'] + Np1,
labels=labels, network=network)
# Moving the temporary labels to the big network
for label in main_labels:
network['pore.surface_' + label][Np1:] = new_net['pore.surface_' + label]
# Stitching the old pores of the main network to the new extended pores
surf_pores = network.pores('surface_*')
surf_coord = network['pore.coords'][surf_pores]
for neighbor in Pn:
neighbor_coord = network['pore.coords'][neighbor]
dist = [round(np.inner(neighbor_coord-x, neighbor_coord-x),
20) for x in surf_coord]
nearest_neighbor = surf_pores[dist == np.amin(dist)]
if neighbor in Pn_old_net:
coplanar_labels = network.labels(pores=nearest_neighbor)
new_neighbors = network.pores(coplanar_labels,
mode='and')
# This might happen to the edge of the small network
if np.size(new_neighbors) == 0:
labels = network.labels(pores=nearest_neighbor,
mode='and')
common_label = [label for label in labels if 'surface_' in label]
new_neighbors = network.pores(common_label)
elif neighbor in Pn_new_net:
new_neighbors = nearest_neighbor
connect_pores(network=network, pores1=neighbor,
pores2=new_neighbors, labels=labels)
# Removing temporary labels
for label in main_labels:
network['pore.surface_' + label] = False
new_net['pore.coords'] = np.copy(old_coords)
label_faces(network=network)
for label in main_labels:
del network['pore.surface_' + label]
trim(network=network, pores=pores)
ws = network.project.workspace
ws.close_project(new_net.project)
def trim_occluded_throats(network, mask='all'):
r"""
Remove throats with zero area from the network and also remove
pores that are isolated (as a result or otherwise)
Parameters
----------
network : OpenPNM Network Object
mask : string
Applies routine only to pores and throats with this label
"""
occluded_ts = network['throat.area'] == 0
if np.sum(occluded_ts) > 0:
occluded_ts *= network["throat."+mask]
trim(network=network, throats=occluded_ts)
def merge_pores(network, pores, labels=['merged']):
r"""
Combines a selection of pores into a new single pore located at the
centroid of the selected pores and connected to all of their neighbors.
Parameters
----------
network : OpenPNM Network Object
pores : array_like
The list of pores which are to be combined into a new single pore
labels : string or list of strings
The labels to apply to the new pore and new throat connections
Notes
-----
(1) The method also works if a list of lists is passed, in which case
it consecutively merges the given selections of pores.
(2) The selection of pores should be chosen carefully, preferrable so that
they all form a continuous cluster. For instance, it is recommended
to use the ``find_nearby_pores`` method to find all pores within a
certain distance of a given pore, and these can then be merged without
causing any abnormal connections.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[20, 20, 1])
>>> Ps = pn.find_nearby_pores(pores=111, r=5, flatten=True)
>>> op.topotools.merge_pores(network=pn, pores=Ps, labels=['merged'])
>>> print(pn.Np)
321
>>> pn.pores('merged')
array([320])
>>> pn.num_throats('merged')
32
"""
# Assert that `pores` is list of lists
try:
len(pores[0])
except (TypeError, IndexError):
pores = [pores]
N = len(pores)
NBs, XYZs = [], []
for Ps in pores:
temp = network.find_neighbor_pores(pores=Ps, mode='union', flatten=True,
include_input=False)
NBs.append(temp)
points = np.concatenate((temp, Ps))
XYZs.append(hull_centroid(network["pore.coords"][points]))
extend(network, pore_coords=XYZs, labels=labels)
Pnew = network.Ps[-N::]
# Possible throats between new pores: This only happens when running in
# batch mode, i.e. multiple groups of pores are to be merged. In case
# some of these groups share elements, possible throats between the
# intersecting elements is not captured and must be added manually.
pores_set = [set(items) for items in pores]
NBs_set = [set(items) for items in NBs]
ps1, ps2 = [], []
from itertools import combinations
for i, j in combinations(range(N), 2):
if not NBs_set[i].isdisjoint(pores_set[j]):
ps1.append([network.Ps[-N+i]])
ps2.append([network.Ps[-N+j]])
# Add (possible) connections between the new pores
connect_pores(network, pores1=ps1, pores2=ps2, labels=labels)
# Add connections between the new pores and the rest of the network
connect_pores(network, pores2=np.split(Pnew, N), pores1=NBs, labels=labels)
# Trim merged pores from the network
trim(network=network, pores=np.concatenate(pores))
def hull_centroid(points):
r"""
Computes centroid of the convex hull enclosing the given coordinates.
Parameters
----------
points : Np by 3 ndarray
Coordinates (xyz)
Returns
-------
centroid : array
A 3 by 1 Numpy array containing coordinates of the centroid.
"""
dim = [np.unique(points[:, i]).size != 1 for i in range(3)]
hull = ConvexHull(points[:, dim])
centroid = points.mean(axis=0)
centroid[dim] = hull.points[hull.vertices].mean(axis=0)
return centroid
def _template_sphere_disc(dim, outer_radius, inner_radius):
r"""
This private method generates an image array of a sphere/shell-disc/ring.
It is useful for passing to Cubic networks as a ``template`` to make
networks with desired shapes.
Parameters
----------
dim : int
Network dimension
outer_radius : int
Number of the nodes in the outer radius of the network
inner_radius : int
Number of the nodes in the inner radius of the network
Returns
-------
im : array_like
A Numpy array containing 1's to demarcate the desired shape, and 0's
elsewhere.
"""
rmax = np.array(outer_radius, ndmin=1)
rmin = np.array(inner_radius, ndmin=1)
ind = 2 * rmax - 1
coord = np.indices((ind * np.ones(dim, dtype=int)))
coord = coord - (ind - 1)/2
x = coord[0, :]
y = coord[1, :]
if dim == 2:
img = (x ** 2 + y ** 2) < rmax ** 2
elif dim == 3:
z = coord[2, :]
img = (x ** 2 + y ** 2 + z ** 2) < rmax ** 2
if rmin[0] != 0:
if dim == 2:
img_min = (x ** 2 + y ** 2) > rmin ** 2
elif dim == 3:
img_min = (x ** 2 + y ** 2 + z ** 2) > rmin ** 2
img = img * img_min
return img
def template_sphere_shell(outer_radius, inner_radius=0, dim=3):
r"""
This method generates an image array of a sphere-shell.
It is useful for passing to Cubic networks as a ``template`` to make
spherical shaped networks.
Parameters
----------
outer_radius : int
Number of nodes in the outer radius of the sphere.
inner_radius : int
Number of nodes in the inner radius of the shell. a value of 0 will
result in a solid sphere.
dim : scalar
Controls the number of dimensions of the result. 3 returns a sphere,
while 2 returns a disk.
Returns
-------
im : array_like
A Numpy array containing 1's to demarcate the sphere-shell, and 0's
elsewhere.
"""
img = _template_sphere_disc(dim=dim, outer_radius=outer_radius,
inner_radius=inner_radius)
return img
def template_cylinder_annulus(height, outer_radius, inner_radius=0):
r"""
This method generates an image array of a disc-ring.
It is useful for passing to Cubic networks as a ``template`` to make
circular-shaped 2D networks.
Parameters
----------
height : int
The height of the cylinder
outer_radius : int
Number of nodes in the outer radius of the cylinder
inner_radius : int
Number of the nodes in the inner radius of the annulus. A value of 0
will result in a solid cylinder.
Returns
-------
im : array_like
A Numpy array containing 1's to demarcate the disc-ring, and 0's
elsewhere.
"""
img = _template_sphere_disc(dim=2, outer_radius=outer_radius,
inner_radius=inner_radius)
img = np.tile(np.atleast_3d(img), reps=height)
return img
def generate_base_points(num_points, domain_size, density_map=None,
reflect=True):
r"""
Generates a set of base points for passing into the Tessellation-based
Network classes. The points can be distributed in spherical, cylindrical,
or rectilinear patterns, as well as 2D and 3D (disks and squares).
Parameters
----------
num_points : scalar
The number of base points that lie within the domain. Note that the
actual number of points returned will be larger, with the extra points
lying outside the domain.
domain_size : list or array
Controls the size and shape of the domain, as follows:
**sphere** : If a single value is received, its treated as the radius
[r] of a sphere centered on [0, 0, 0].
**cylinder** : If a two-element list is received it's treated as the
radius and height of a cylinder [r, z] positioned at [0, 0, 0] and
extending in the positive z-direction. If the z dimension is 0, a
disk of radius r is created.
**rectangle** : If a three element list is received, it's treated
as the outer corner of rectangle [x, y, z] whose opposite corner lies
at [0, 0, 0]. If the z dimension is 0, a rectangle of size X-by-Y is
created.
density_map : array, optional
A an array that contains fractional values (0 < i < 1) indicating the
liklihood that a point in that region should be kept. The size of this
array can be anything, but the shape must match the ``domain_size``;
that is for a 3D network the shape of the ``density_map`` can be
[10, 10, 10] or [50, 50, 50], depending on how important the resolution
of the density distribution is. For a 2D network the ``density_map``
should be [10, 10].
When specifying a custom probabiliy map is it recommended to also set
values outside the given domain to zero. If not, then the correct
shape will still be returned, but with too few points in it.
reflect : boolean
If ``True``, the the base points are generated as specified, the reflected
about each face of the domain. This essentially tricks the
tessellation functions into creating smoothfaces at the
boundaries once these excess pores are trimmed.
Notes
-----
The reflection approach tends to create larger pores near the surfaces, so
it might be necessary to use the ``density_map`` argument to specify a
slightly higher density of points near the surfaces.
The ``Voronoi``, ``Delaunay``, ``Gabriel``, and ``DelunayVoronoiDual``
classes can *techncially* handle base points with spherical or cylindrical
domains, but the reflection across round surfaces does not create perfect
Voronoi cells so the surfaces will not be smooth.
Examples
--------
The following generates a spherical array with higher values near the core.
It uses a distance transform to create a sphere of radius 10, then a
second distance transform to create larger values in the center away from
the sphere surface. These distance values could be further skewed by
applying a power, with values higher than 1 resulting in higher values in
the core, and fractional values smoothinging them out a bit.
>>> import openpnm as op
>>> import scipy as sp
>>> import scipy.ndimage as spim
>>> im = np.ones([21, 21, 21], dtype=int)
>>> im[10, 10, 10] = 0
>>> im = spim.distance_transform_edt(im) <= 20 # Create sphere of 1's
>>> prob = spim.distance_transform_edt(im)
>>> prob = prob / np.amax(prob) # Normalize between 0 and 1
>>> pts = op.topotools.generate_base_points(num_points=50,
... domain_size=[1, 1, 1],
... density_map=prob)
>>> net = op.network.DelaunayVoronoiDual(points=pts, shape=[1, 1, 1])
"""
def _try_points(num_points, prob):
prob = np.atleast_3d(prob)
prob = np.array(prob)/np.amax(prob) # Ensure prob is normalized
base_pts = []
N = 0
while N < num_points:
pt = np.random.rand(3) # Generate a point
# Test whether to keep it or not
[indx, indy, indz] = np.floor(pt*np.shape(prob)).astype(int)
if np.random.rand(1) <= prob[indx][indy][indz]:
base_pts.append(pt)
N += 1
base_pts = np.array(base_pts)
return base_pts
if len(domain_size) == 1: # Spherical
domain_size = np.array(domain_size)
r = domain_size[0]
if density_map is None:
# Make an image of a sphere filled with ones and use _try_points
density_map = np.ones([41, 41, 41])
density_map[20, 20, 20] = 0
density_map = spim.distance_transform_edt(density_map) < 20
base_pts = _try_points(num_points, density_map)
# Convert to spherical coordinates
X, Y, Z = np.array(base_pts - [0.5, 0.5, 0.5]).T
r = 2*np.sqrt(X**2 + Y**2 + Z**2)*domain_size[0]
theta = 2*np.arctan(Y/X)
phi = 2*np.arctan(np.sqrt(X**2 + Y**2)/Z)
# Trim points outside the domain (from improper prob images)
inds = r <= domain_size[0]
[r, theta, phi] = [r[inds], theta[inds], phi[inds]]
# Reflect base points across perimeter
if reflect:
r, theta, phi = reflect_base_points(np.vstack((r, theta, phi)),
domain_size)
# Convert to Cartesean coordinates
X, Y, Z = from_sph(r, theta, phi)
base_pts = np.vstack([X, Y, Z]).T
elif len(domain_size) == 2: # Cylindrical or Disk
domain_size = np.array(domain_size)
if density_map is None:
density_map = np.ones([41, 41, 41])
density_map[20, 20, :] = 0
if domain_size[1] == 0: # Disk
density_map = density_map[:, :, 0]
density_map = spim.distance_transform_edt(density_map) < 20
base_pts = _try_points(num_points, density_map)
# Convert to cylindrical coordinates
X, Y, Z = np.array(base_pts - [0.5, 0.5, 0]).T # Center on z-axis
r = 2*np.sqrt(X**2 + Y**2)*domain_size[0]
theta = 2*np.arctan(Y/X)
z = Z*domain_size[1]
# Trim points outside the domain (from improper prob images)
inds = r <= domain_size[0]
[r, theta, z] = [r[inds], theta[inds], z[inds]]
inds = ~((z > domain_size[1]) + (z < 0))
[r, theta, z] = [r[inds], theta[inds], z[inds]]
if reflect:
r, theta, z = reflect_base_points(np.vstack([r, theta, z]),
domain_size)
# Convert to Cartesean coordinates
X, Y, Z = from_cyl(r, theta, z)
base_pts = np.vstack([X, Y, Z]).T
elif len(domain_size) == 3: # Cube or square
if density_map is None:
density_map = np.ones([41, 41, 41])
if domain_size[2] == 0:
density_map = density_map[:, :, 0]
base_pts = _try_points(num_points, density_map)
base_pts = base_pts*domain_size
if reflect:
base_pts = reflect_base_points(base_pts, domain_size)
return base_pts
def to_cyl(X, Y, Z):
r = 2*np.sqrt(X**2 + Y**2)
theta = 2*np.arctan(Y/X)
z = Z
return np.vstack((r, theta, z))
def from_cyl(r, theta, z):
X = r*np.cos(theta)
Y = r*np.sin(theta)
Z = z
return np.vstack((X, Y, Z))
def to_sph(X, Y, Z):
r = 2*np.sqrt(X**2 + Y**2 + Z**2)
theta = 2*np.arctan(Y/X)
phi = 2*np.arctan(np.sqrt(X**2 + Y**2)/Z)
return np.vstack((r, theta, phi))
def from_sph(r, theta, phi):
X = r*np.cos(theta)*np.sin(phi)
Y = r*np.sin(theta)*np.sin(phi)
Z = r*np.cos(phi)
return np.vstack([X, Y, Z])
def reflect_base_points(base_pts, domain_size):
r'''
Helper function for relecting a set of points about the faces of a
given domain.
Parameters
----------
base_pts : array_like
The coordinates of the base_pts to be reflected in the coordinate
system corresponding to the the domain as follows:
**spherical** : [r, theta, phi]
**cylindrical** or **circular** : [r, theta, z]
**rectangular** or **square** : [x, y, z]
domain_size : list or array
Controls the size and shape of the domain, as follows:
**sphere** : If a single value is received, its treated as the radius
[r] of a sphere centered on [0, 0, 0].
**cylinder** : If a two-element list is received it's treated as the
radius and height of a cylinder [r, z] positioned at [0, 0, 0] and
extending in the positive z-direction. If the z dimension is 0, a
disk of radius r is created.
**rectangle** : If a three element list is received, it's treated
as the outer corner of rectangle [x, y, z] whose opposite corner lies
at [0, 0, 0]. If the z dimension is 0, a rectangle of size X-by-Y is
created.
Notes
-----
The base points can be either [N x 3] or [3 x N]. There transposed internally
as needed and returned to the original shape. If N=3 then the transposing is
skipped so the user needs to ensure the the form of [3 x N].
'''
domain_size = np.array(domain_size)
if len(domain_size) == 1:
r, theta, phi = base_pts
new_r = 2*domain_size[0] - r
r = np.hstack([r, new_r])
theta = np.hstack([theta, theta])
phi = np.hstack([phi, phi])
base_pts = np.vstack((r, theta, phi))
if len(domain_size) == 2:
r, theta, z = base_pts
new_r = 2*domain_size[0] - r
r = np.hstack([r, new_r])
theta = np.hstack([theta, theta])
z = np.hstack([z, z])
if domain_size[1] != 0: # If not a disk
r = np.hstack([r, r, r])
theta = np.hstack([theta, theta, theta])
z = np.hstack([z, -z, 2*domain_size[1]-z])
base_pts = np.vstack((r, theta, z))
elif len(domain_size) == 3:
Nx, Ny, Nz = domain_size
# Reflect base points about all 6 faces
orig_pts = base_pts
base_pts = np.vstack((base_pts,
[-1, 1, 1] * orig_pts + [2.0 * Nx, 0, 0]))
base_pts = np.vstack((base_pts, [-1, 1, 1] * orig_pts))
base_pts = np.vstack((base_pts,
[1, -1, 1] * orig_pts + [0, 2.0 * Ny, 0]))
base_pts = np.vstack((base_pts, [1, -1, 1] * orig_pts))
if domain_size[2] != 0:
base_pts = np.vstack((base_pts,
[1, 1, -1] * orig_pts + [0, 0, 2.0 * Nz]))
base_pts = np.vstack((base_pts, [1, 1, -1] * orig_pts))
return base_pts
def add_boundary_pores(network, pores, offset=None, move_to=None,
apply_label='boundary'):
r"""
This method uses ``clone_pores`` to clone the input pores, then shifts
them the specified amount and direction, then applies the given label.
Parameters
----------
pores : array_like
List of pores to offset. If no pores are specified, then it
assumes that all surface pores are to be cloned.
offset : 3 x 1 array
The distance in vector form which the cloned boundary pores should
be offset. Either this, or ``move_to`` must be specified.
move_to : 3 x 1 array
The location to move the boundary pores to. A value of ``None``
indicates that no translation should be applied in that axis. For
instance, ``[None, None, 0]`` indicates that the boundary pores should
moved along the z-axis to the specified location. Either this or
``offset`` must be specified.
apply_label : string
This label is applied to the boundary pores. Default is
'boundary'.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> print(pn.Np) # Confirm initial Network size
125
>>> Ps = pn.pores('top') # Select pores on top face
>>> pn.add_boundary_pores(labels=['top'])
>>> print(pn.Np) # Confirm addition of 25 new pores
150
"""
# Parse the input pores
Ps = np.array(pores, ndmin=1)
if Ps.dtype is bool:
Ps = network.toindices(Ps)
if np.size(pores) == 0: # Handle an empty array if given
return np.array([], dtype=np.int64)
# Clone the specifed pores
clone_pores(network=network, pores=Ps)
newPs = network.pores('pore.clone')
del network['pore.clone']
newTs = network.throats('clone')
del network['throat.clone']
if offset is not None: # Offset the cloned pores
network['pore.coords'][newPs] += offset
if move_to is not None: # Move the cloned pores
for i, d in enumerate(move_to):
if d is not None:
temp = network['pore.coords'][newPs]
temp[:, i] = d
network['pore.coords'][newPs] = temp
# Apply labels to boundary pores (trim leading 'pores' if present)
label = apply_label.split('.')[-1]
plabel = 'pore.' + label
tlabel = 'throat.' + label
network[plabel] = False
network[plabel][newPs] = True
network[tlabel] = False
network[tlabel][newTs] = True
def iscoplanar(coords):
r'''
Determines if given pores are coplanar with each other
Parameters
----------
coords : array_like
List of pore coords to check for coplanarity. At least 3 pores are
required.
Returns
-------
results : bool
A boolean value of whether given points are coplanar (``True``) or
not (``False``)
'''
coords = np.array(coords, ndmin=1)
if np.shape(coords)[0] < 3:
raise Exception('At least 3 input pores are required')
Px = coords[:, 0]
Py = coords[:, 1]
Pz = coords[:, 2]
# Do easy check first, for common coordinate
if np.shape(np.unique(Px))[0] == 1:
return True
if np.shape(np.unique(Py))[0] == 1:
return True
if np.shape(np.unique(Pz))[0] == 1:
return True
# Perform rigorous check using vector algebra
# Grab first basis vector from list of coords
n1 = np.array((Px[1] - Px[0], Py[1] - Py[0], Pz[1] - Pz[0])).T
n = np.array([0.0, 0.0, 0.0])
i = 1
while n.sum() == 0:
if i >= (np.size(Px) - 1):
logger.warning('No valid basis vectors found')
return False
# Chose a secon basis vector
n2 = np.array((Px[i+1] - Px[i], Py[i+1] - Py[i], Pz[i+1] - Pz[i])).T
# Find their cross product
n = np.cross(n1, n2)
i += 1
# Create vectors between all other pairs of points
r = np.array((Px[1:-1] - Px[0], Py[1:-1] - Py[0], Pz[1:-1] - Pz[0]))
# Ensure they all lie on the same plane
n_dot = np.dot(n, r)
return bool(np.sum(np.absolute(n_dot)) == 0)
def is_fully_connected(network, pores_BC=None):
r"""
Checks whether network is fully connected, i.e. not clustered.
Parameters
----------
network : GenericNetwork
The network whose connectivity to check.
pores_BC : array_like (optional)
The pore indices of boundary conditions (inlets/outlets).
Returns
-------
bool
If ``pores_BC`` is not specified, then returns ``True`` only if
the entire network is connected to the same cluster. If
``pores_BC`` is given, then returns ``True`` only if all clusters
are connected to the given boundary condition pores.
"""
am = network.get_adjacency_matrix(fmt='lil').copy()
temp = csgraph.connected_components(am, directed=False)[1]
is_connected = np.unique(temp).size == 1
# Ensure all clusters are part of pores, if given
if not is_connected and pores_BC is not None:
am.resize(network.Np + 1, network.Np + 1)
pores_BC = network._parse_indices(pores_BC)
am.rows[-1] = pores_BC.tolist()
am.data[-1] = np.arange(network.Nt, network.Nt + len(pores_BC)).tolist()
temp = csgraph.connected_components(am, directed=False)[1]
is_connected = np.unique(temp).size == 1
return is_connected
def get_spacing(network):
r"""
Determine the spacing along each axis of a simple cubic network
Parameters
----------
network : OpenPNM network
The network for which spacing is desired
Returns
-------
spacing : ndarray
The spacing along each axis in the form of ``[Lx, Ly, Lz]``, where
``L`` is the physical dimension in the implied units (i.e. meters)
"""
from openpnm.topotools.generators.tools import get_spacing
d = {'vert.coords': network.coords, 'edge.conns': network.conns}
spc = get_spacing(d)
return spc
def get_shape(network):
r"""
Determine the shape of each axis of a simple cubic network
Parameters
----------
network : OpenPNM network
The network for which shape is desired
Returns
-------
shape : ndarray
The shape along each axis in the form of ``[Nx, Ny, Nz]`` where
``N`` is the number of pores
"""
from openpnm.topotools.generators.tools import get_shape
d = {'vert.coords': network.coords, 'edge.conns': network.conns}
shp = get_shape(d)
return shp
| [
"numpy.absolute",
"numpy.sum",
"numpy.amin",
"numpy.empty",
"openpnm.network.Delaunay",
"numpy.ones",
"numpy.shape",
"numpy.sin",
"numpy.arange",
"scipy.sparse.csgraph.connected_components",
"numpy.tile",
"numpy.inner",
"numpy.unique",
"scipy.spatial.Delaunay",
"numpy.atleast_2d",
"sci... | [((191, 218), 'openpnm.utils.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (208, 218), False, 'from openpnm.utils import logging, Workspace\n'), ((224, 235), 'openpnm.utils.Workspace', 'Workspace', ([], {}), '()\n', (233, 235), False, 'from openpnm.utils import logging, Workspace\n'), ((6923, 6951), 'numpy.copy', 'np.copy', (["network['pore.all']"], {}), "(network['pore.all'])\n", (6930, 6951), True, 'import numpy as np\n'), ((6964, 6994), 'numpy.copy', 'np.copy', (["network['throat.all']"], {}), "(network['throat.all'])\n", (6971, 6994), True, 'import numpy as np\n'), ((10338, 10354), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (10346, 10354), True, 'import numpy as np\n'), ((10367, 10382), 'numpy.array', 'np.array', (['conns'], {}), '(conns)\n', (10375, 10382), True, 'import numpy as np\n'), ((10525, 10543), 'numpy.any', 'np.any', (['(conns > Np)'], {}), '(conns > Np)\n', (10531, 10543), True, 'import numpy as np\n'), ((13694, 13743), 'scipy.sparse.csgraph.minimum_spanning_tree', 'csgraph.minimum_spanning_tree', (['am'], {'overwrite': '(True)'}), '(am, overwrite=True)\n', (13723, 13743), False, 'from scipy.sparse import csgraph\n'), ((13894, 13907), 'numpy.hstack', 'np.hstack', (['Ts'], {}), '(Ts)\n', (13903, 13907), True, 'import numpy as np\n'), ((19297, 19325), 'numpy.vstack', 'np.vstack', (['(coords, markers)'], {}), '((coords, markers))\n', (19306, 19325), True, 'import numpy as np\n'), ((19336, 19373), 'scipy.spatial.Delaunay', 'sptl.Delaunay', (['pts'], {'incremental': '(False)'}), '(pts, incremental=False)\n', (19349, 19373), True, 'import scipy.spatial as sptl\n'), ((20451, 20472), 'numpy.array', 'np.array', (['dims_unique'], {}), '(dims_unique)\n', (20459, 20472), True, 'import numpy as np\n'), ((21438, 21462), 'numpy.array', 'np.array', (['pores'], {'ndmin': '(1)'}), '(pores, ndmin=1)\n', (21446, 21462), True, 'import numpy as np\n'), ((21544, 21586), 'numpy.concatenate', 'np.concatenate', (['(pcurrent, pclone)'], {'axis': '(0)'}), '((pcurrent, pclone), axis=0)\n', (21558, 21586), True, 'import numpy as np\n'), ((21630, 21650), 'numpy.arange', 'np.arange', (['Np', 'Npnew'], {}), '(Np, Npnew)\n', (21639, 21650), True, 'import numpy as np\n'), ((31291, 31310), 'numpy.vstack', 'np.vstack', (['(C1, C2)'], {}), '((C1, C2))\n', (31300, 31310), True, 'import numpy as np\n'), ((31846, 31875), 'numpy.sort', 'np.sort', (['mapped_conns'], {'axis': '(1)'}), '(mapped_conns, axis=1)\n', (31853, 31875), True, 'import numpy as np\n'), ((35846, 35871), 'numpy.array', 'np.array', (['pores1'], {'ndmin': '(1)'}), '(pores1, ndmin=1)\n', (35854, 35871), True, 'import numpy as np\n'), ((35881, 35906), 'numpy.array', 'np.array', (['pores2'], {'ndmin': '(1)'}), '(pores2, ndmin=1)\n', (35889, 35906), True, 'import numpy as np\n'), ((35954, 35983), 'scipy.spatial.distance.cdist', 'cdist', (['coords[p1]', 'coords[p2]'], {}), '(coords[p1], coords[p2])\n', (35959, 35983), False, 'from scipy.spatial.distance import cdist\n'), ((38952, 38972), 'openpnm.topotools.generators.tools.get_spacing', 'get_spacing', (['network'], {}), '(network)\n', (38963, 38972), False, 'from openpnm.topotools.generators.tools import get_spacing\n'), ((39027, 39067), 'openpnm.network.Cubic', 'Cubic', ([], {'shape': 'div', 'spacing': 'new_netspacing'}), '(shape=div, spacing=new_netspacing)\n', (39032, 39067), False, 'from openpnm.network import Cubic\n'), ((39901, 39932), 'numpy.copy', 'np.copy', (["new_net['pore.coords']"], {}), "(new_net['pore.coords'])\n", (39908, 39932), True, 'import numpy as np\n'), ((46323, 46349), 'scipy.spatial.ConvexHull', 'ConvexHull', (['points[:, dim]'], {}), '(points[:, dim])\n', (46333, 46349), False, 'from scipy.spatial import ConvexHull\n'), ((47129, 47160), 'numpy.array', 'np.array', (['outer_radius'], {'ndmin': '(1)'}), '(outer_radius, ndmin=1)\n', (47137, 47160), True, 'import numpy as np\n'), ((47172, 47203), 'numpy.array', 'np.array', (['inner_radius'], {'ndmin': '(1)'}), '(inner_radius, ndmin=1)\n', (47180, 47203), True, 'import numpy as np\n'), ((57017, 57041), 'numpy.vstack', 'np.vstack', (['(r, theta, z)'], {}), '((r, theta, z))\n', (57026, 57041), True, 'import numpy as np\n'), ((57140, 57160), 'numpy.vstack', 'np.vstack', (['(X, Y, Z)'], {}), '((X, Y, Z))\n', (57149, 57160), True, 'import numpy as np\n'), ((57308, 57334), 'numpy.vstack', 'np.vstack', (['(r, theta, phi)'], {}), '((r, theta, phi))\n', (57317, 57334), True, 'import numpy as np\n'), ((57471, 57491), 'numpy.vstack', 'np.vstack', (['[X, Y, Z]'], {}), '([X, Y, Z])\n', (57480, 57491), True, 'import numpy as np\n'), ((58990, 59011), 'numpy.array', 'np.array', (['domain_size'], {}), '(domain_size)\n', (58998, 59011), True, 'import numpy as np\n'), ((61926, 61950), 'numpy.array', 'np.array', (['pores'], {'ndmin': '(1)'}), '(pores, ndmin=1)\n', (61934, 61950), True, 'import numpy as np\n'), ((63393, 63418), 'numpy.array', 'np.array', (['coords'], {'ndmin': '(1)'}), '(coords, ndmin=1)\n', (63401, 63418), True, 'import numpy as np\n'), ((63987, 64012), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (63995, 64012), True, 'import numpy as np\n'), ((64422, 64486), 'numpy.array', 'np.array', (['(Px[1:-1] - Px[0], Py[1:-1] - Py[0], Pz[1:-1] - Pz[0])'], {}), '((Px[1:-1] - Px[0], Py[1:-1] - Py[0], Pz[1:-1] - Pz[0]))\n', (64430, 64486), True, 'import numpy as np\n'), ((64543, 64555), 'numpy.dot', 'np.dot', (['n', 'r'], {}), '(n, r)\n', (64549, 64555), True, 'import numpy as np\n'), ((66450, 66464), 'openpnm.topotools.generators.tools.get_spacing', 'get_spacing', (['d'], {}), '(d)\n', (66461, 66464), False, 'from openpnm.topotools.generators.tools import get_spacing\n'), ((66988, 67000), 'openpnm.topotools.generators.tools.get_shape', 'get_shape', (['d'], {}), '(d)\n', (66997, 67000), False, 'from openpnm.topotools.generators.tools import get_shape\n'), ((4169, 4221), 'numpy.tensordot', 'np.tensordot', (["network['pore.coords']", 'R'], {'axes': '(1, 1)'}), "(network['pore.coords'], R, axes=(1, 1))\n", (4181, 4221), True, 'import numpy as np\n'), ((6063, 6112), 'numpy.array', 'np.array', (['[[1, ay, az], [bx, 1, bz], [cx, cy, 1]]'], {}), '([[1, ay, az], [bx, 1, bz], [cx, cy, 1]])\n', (6071, 6112), True, 'import numpy as np\n'), ((7002, 7016), 'numpy.size', 'np.size', (['pores'], {}), '(pores)\n', (7009, 7016), True, 'import numpy as np\n'), ((7585, 7601), 'numpy.size', 'np.size', (['throats'], {}), '(throats)\n', (7592, 7601), True, 'import numpy as np\n'), ((8232, 8247), 'numpy.where', 'np.where', (['Pkeep'], {}), '(Pkeep)\n', (8240, 8247), True, 'import numpy as np\n'), ((8268, 8283), 'numpy.where', 'np.where', (['Tkeep'], {}), '(Tkeep)\n', (8276, 8283), True, 'import numpy as np\n'), ((8298, 8331), 'numpy.ones', 'np.ones', (['(network.Np,)'], {'dtype': 'int'}), '((network.Np,), dtype=int)\n', (8305, 8331), True, 'import numpy as np\n'), ((9076, 9089), 'numpy.sum', 'np.sum', (['Pkeep'], {}), '(Pkeep)\n', (9082, 9089), True, 'import numpy as np\n'), ((10777, 10792), 'numpy.size', 'np.size', (['coords'], {}), '(coords)\n', (10784, 10792), True, 'import numpy as np\n'), ((10815, 10858), 'numpy.vstack', 'np.vstack', (["(network['pore.coords'], coords)"], {}), "((network['pore.coords'], coords))\n", (10824, 10858), True, 'import numpy as np\n'), ((10906, 10920), 'numpy.size', 'np.size', (['conns'], {}), '(conns)\n', (10913, 10920), True, 'import numpy as np\n'), ((10942, 10985), 'numpy.vstack', 'np.vstack', (["(network['throat.conns'], conns)"], {}), "((network['throat.conns'], conns))\n", (10951, 10985), True, 'import numpy as np\n'), ((15278, 15297), 'numpy.amin', 'np.amin', (['crds[:, 0]'], {}), '(crds[:, 0])\n', (15285, 15297), True, 'import numpy as np\n'), ((15299, 15318), 'numpy.amax', 'np.amax', (['crds[:, 0]'], {}), '(crds[:, 0])\n', (15306, 15318), True, 'import numpy as np\n'), ((15360, 15379), 'numpy.amin', 'np.amin', (['crds[:, 1]'], {}), '(crds[:, 1])\n', (15367, 15379), True, 'import numpy as np\n'), ((15381, 15400), 'numpy.amax', 'np.amax', (['crds[:, 1]'], {}), '(crds[:, 1])\n', (15388, 15400), True, 'import numpy as np\n'), ((15442, 15461), 'numpy.amin', 'np.amin', (['crds[:, 2]'], {}), '(crds[:, 2])\n', (15449, 15461), True, 'import numpy as np\n'), ((15463, 15482), 'numpy.amax', 'np.amax', (['crds[:, 2]'], {}), '(crds[:, 2])\n', (15470, 15482), True, 'import numpy as np\n'), ((18043, 18066), 'numpy.amin', 'np.amin', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (18050, 18066), True, 'import numpy as np\n'), ((18085, 18108), 'numpy.amax', 'np.amax', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (18092, 18108), True, 'import numpy as np\n'), ((19539, 19571), 'numpy.where', 'np.where', (['(neighbors < network.Np)'], {}), '(neighbors < network.Np)\n', (19547, 19571), True, 'import numpy as np\n'), ((20328, 20343), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (20336, 20343), True, 'import numpy as np\n'), ((21599, 21613), 'numpy.shape', 'np.shape', (['pnew'], {}), '(pnew)\n', (21607, 21613), True, 'import numpy as np\n'), ((23663, 23720), 'numpy.vstack', 'np.vstack', (["(network['pore.coords'], donor['pore.coords'])"], {}), "((network['pore.coords'], donor['pore.coords']))\n", (23672, 23720), True, 'import numpy as np\n'), ((23799, 23871), 'numpy.vstack', 'np.vstack', (["(network['throat.conns'], donor['throat.conns'] + network.Np)"], {}), "((network['throat.conns'], donor['throat.conns'] + network.Np))\n", (23808, 23871), True, 'import numpy as np\n'), ((29340, 29373), 'scipy.spatial.distance.cdist', 'sp.spatial.distance.cdist', (['C1', 'C2'], {}), '(C1, C2)\n', (29365, 29373), True, 'import scipy as sp\n'), ((31352, 31399), 'openpnm.network.Delaunay', 'Delaunay', ([], {'points': 'crds', 'settings': "{'trim': False}"}), "(points=crds, settings={'trim': False})\n", (31360, 31399), False, 'from openpnm.network import Delaunay, Gabriel\n'), ((31440, 31486), 'openpnm.network.Gabriel', 'Gabriel', ([], {'points': 'crds', 'settings': "{'trim': False}"}), "(points=crds, settings={'trim': False})\n", (31447, 31486), False, 'from openpnm.network import Delaunay, Gabriel\n'), ((34437, 34449), 'numpy.size', 'np.size', (['ps1'], {}), '(ps1)\n', (34444, 34449), True, 'import numpy as np\n'), ((34466, 34478), 'numpy.size', 'np.size', (['ps2'], {}), '(ps2)\n', (34473, 34478), True, 'import numpy as np\n'), ((36546, 36563), 'numpy.where', 'np.where', (['(Nz == z)'], {}), '(Nz == z)\n', (36554, 36563), True, 'import numpy as np\n'), ((38335, 38359), 'numpy.array', 'np.array', (['shape'], {'ndmin': '(1)'}), '(shape, ndmin=1)\n', (38343, 38359), True, 'import numpy as np\n'), ((39192, 39259), 'numpy.array', 'np.array', (["[['left', 'right'], ['front', 'back'], ['top', 'bottom']]"], {}), "([['left', 'right'], ['front', 'back'], ['top', 'bottom']])\n", (39200, 39259), True, 'import numpy as np\n'), ((42262, 42281), 'numpy.copy', 'np.copy', (['old_coords'], {}), '(old_coords)\n', (42269, 42281), True, 'import numpy as np\n'), ((42898, 42917), 'numpy.sum', 'np.sum', (['occluded_ts'], {}), '(occluded_ts)\n', (42904, 42917), True, 'import numpy as np\n'), ((44751, 44777), 'numpy.concatenate', 'np.concatenate', (['(temp, Ps)'], {}), '((temp, Ps))\n', (44765, 44777), True, 'import numpy as np\n'), ((49484, 49502), 'numpy.atleast_3d', 'np.atleast_3d', (['img'], {}), '(img)\n', (49497, 49502), True, 'import numpy as np\n'), ((53589, 53608), 'numpy.atleast_3d', 'np.atleast_3d', (['prob'], {}), '(prob)\n', (53602, 53608), True, 'import numpy as np\n'), ((54059, 54077), 'numpy.array', 'np.array', (['base_pts'], {}), '(base_pts)\n', (54067, 54077), True, 'import numpy as np\n'), ((54168, 54189), 'numpy.array', 'np.array', (['domain_size'], {}), '(domain_size)\n', (54176, 54189), True, 'import numpy as np\n'), ((56946, 56970), 'numpy.sqrt', 'np.sqrt', (['(X ** 2 + Y ** 2)'], {}), '(X ** 2 + Y ** 2)\n', (56953, 56970), True, 'import numpy as np\n'), ((56981, 56997), 'numpy.arctan', 'np.arctan', (['(Y / X)'], {}), '(Y / X)\n', (56990, 56997), True, 'import numpy as np\n'), ((57081, 57094), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (57087, 57094), True, 'import numpy as np\n'), ((57105, 57118), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (57111, 57118), True, 'import numpy as np\n'), ((57194, 57227), 'numpy.sqrt', 'np.sqrt', (['(X ** 2 + Y ** 2 + Z ** 2)'], {}), '(X ** 2 + Y ** 2 + Z ** 2)\n', (57201, 57227), True, 'import numpy as np\n'), ((57236, 57252), 'numpy.arctan', 'np.arctan', (['(Y / X)'], {}), '(Y / X)\n', (57245, 57252), True, 'import numpy as np\n'), ((57390, 57401), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (57396, 57401), True, 'import numpy as np\n'), ((57426, 57437), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (57432, 57437), True, 'import numpy as np\n'), ((57448, 57459), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (57454, 57459), True, 'import numpy as np\n'), ((59124, 59145), 'numpy.hstack', 'np.hstack', (['[r, new_r]'], {}), '([r, new_r])\n', (59133, 59145), True, 'import numpy as np\n'), ((59162, 59187), 'numpy.hstack', 'np.hstack', (['[theta, theta]'], {}), '([theta, theta])\n', (59171, 59187), True, 'import numpy as np\n'), ((59202, 59223), 'numpy.hstack', 'np.hstack', (['[phi, phi]'], {}), '([phi, phi])\n', (59211, 59223), True, 'import numpy as np\n'), ((59243, 59269), 'numpy.vstack', 'np.vstack', (['(r, theta, phi)'], {}), '((r, theta, phi))\n', (59252, 59269), True, 'import numpy as np\n'), ((59380, 59401), 'numpy.hstack', 'np.hstack', (['[r, new_r]'], {}), '([r, new_r])\n', (59389, 59401), True, 'import numpy as np\n'), ((59418, 59443), 'numpy.hstack', 'np.hstack', (['[theta, theta]'], {}), '([theta, theta])\n', (59427, 59443), True, 'import numpy as np\n'), ((59456, 59473), 'numpy.hstack', 'np.hstack', (['[z, z]'], {}), '([z, z])\n', (59465, 59473), True, 'import numpy as np\n'), ((59687, 59711), 'numpy.vstack', 'np.vstack', (['(r, theta, z)'], {}), '((r, theta, z))\n', (59696, 59711), True, 'import numpy as np\n'), ((62018, 62032), 'numpy.size', 'np.size', (['pores'], {}), '(pores)\n', (62025, 62032), True, 'import numpy as np\n'), ((62088, 62116), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (62096, 62116), True, 'import numpy as np\n'), ((63921, 63976), 'numpy.array', 'np.array', (['(Px[1] - Px[0], Py[1] - Py[0], Pz[1] - Pz[0])'], {}), '((Px[1] - Px[0], Py[1] - Py[0], Pz[1] - Pz[0]))\n', (63929, 63976), True, 'import numpy as np\n'), ((64327, 64343), 'numpy.cross', 'np.cross', (['n1', 'n2'], {}), '(n1, n2)\n', (64335, 64343), True, 'import numpy as np\n'), ((65324, 65372), 'scipy.sparse.csgraph.connected_components', 'csgraph.connected_components', (['am'], {'directed': '(False)'}), '(am, directed=False)\n', (65352, 65372), False, 'from scipy.sparse import csgraph\n'), ((1366, 1393), 'numpy.sum', 'np.sum', (['(coords ** 2)'], {'axis': '(1)'}), '(coords ** 2, axis=1)\n', (1372, 1393), True, 'import numpy as np\n'), ((3329, 3381), 'numpy.tensordot', 'np.tensordot', (["network['pore.coords']", 'R'], {'axes': '(1, 1)'}), "(network['pore.coords'], R, axes=(1, 1))\n", (3341, 3381), True, 'import numpy as np\n'), ((3676, 3728), 'numpy.tensordot', 'np.tensordot', (["network['pore.coords']", 'R'], {'axes': '(1, 1)'}), "(network['pore.coords'], R, axes=(1, 1))\n", (3688, 3728), True, 'import numpy as np\n'), ((4023, 4075), 'numpy.tensordot', 'np.tensordot', (["network['pore.coords']", 'R'], {'axes': '(1, 1)'}), "(network['pore.coords'], R, axes=(1, 1))\n", (4035, 4075), True, 'import numpy as np\n'), ((7066, 7079), 'numpy.any', 'np.any', (['Pkeep'], {}), '(Pkeep)\n', (7072, 7079), True, 'import numpy as np\n'), ((7807, 7820), 'numpy.any', 'np.any', (['Tkeep'], {}), '(Tkeep)\n', (7813, 7820), True, 'import numpy as np\n'), ((8054, 8075), 'numpy.array', 'np.array', (['[]'], {'ndmin': '(1)'}), '([], ndmin=1)\n', (8062, 8075), True, 'import numpy as np\n'), ((10650, 10675), 'numpy.ones', 'np.ones', (['[Np]'], {'dtype': 'bool'}), '([Np], dtype=bool)\n', (10657, 10675), True, 'import numpy as np\n'), ((10713, 10738), 'numpy.ones', 'np.ones', (['[Nt]'], {'dtype': 'bool'}), '([Nt], dtype=bool)\n', (10720, 10738), True, 'import numpy as np\n'), ((13603, 13629), 'numpy.random.rand', 'np.random.rand', (['network.Nt'], {}), '(network.Nt)\n', (13617, 13629), True, 'import numpy as np\n'), ((18613, 18646), 'numpy.arccos', 'np.arccos', (['(1 - 2 * indices / npts)'], {}), '(1 - 2 * indices / npts)\n', (18622, 18646), True, 'import numpy as np\n'), ((18964, 18986), 'numpy.atleast_2d', 'np.atleast_2d', (['markers'], {}), '(markers)\n', (18977, 18986), True, 'import numpy as np\n'), ((19155, 19177), 'numpy.atleast_2d', 'np.atleast_2d', (['markers'], {}), '(markers)\n', (19168, 19177), True, 'import numpy as np\n'), ((21953, 21981), 'numpy.vstack', 'np.vstack', (['(parents, clones)'], {}), '((parents, clones))\n', (21962, 21981), True, 'import numpy as np\n'), ((22146, 22179), 'numpy.zeros', 'np.zeros', (['[network.Np]'], {'dtype': 'int'}), '([network.Np], dtype=int)\n', (22154, 22179), True, 'import numpy as np\n'), ((22207, 22232), 'numpy.arange', 'np.arange', (['Np', 'network.Np'], {}), '(Np, network.Np)\n', (22216, 22232), True, 'import numpy as np\n'), ((29446, 29481), 'numpy.vstack', 'np.vstack', (['(P1[P1_ind], P2[P2_ind])'], {}), '((P1[P1_ind], P2[P2_ind]))\n', (29455, 29481), True, 'import numpy as np\n'), ((29704, 29737), 'scipy.spatial.distance.cdist', 'sp.spatial.distance.cdist', (['C1', 'C2'], {}), '(C1, C2)\n', (29729, 29737), True, 'import scipy as sp\n'), ((29765, 29787), 'numpy.where', 'np.where', (['(D <= len_max)'], {}), '(D <= len_max)\n', (29773, 29787), True, 'import numpy as np\n'), ((34499, 34520), 'numpy.repeat', 'np.repeat', (['ps1', 'size2'], {}), '(ps1, size2)\n', (34508, 34520), True, 'import numpy as np\n'), ((34542, 34561), 'numpy.tile', 'np.tile', (['ps2', 'size1'], {}), '(ps2, size1)\n', (34549, 34561), True, 'import numpy as np\n'), ((38142, 38156), 'numpy.size', 'np.size', (['shape'], {}), '(shape)\n', (38149, 38156), True, 'import numpy as np\n'), ((38166, 38180), 'numpy.size', 'np.size', (['shape'], {}), '(shape)\n', (38173, 38180), True, 'import numpy as np\n'), ((38281, 38295), 'numpy.size', 'np.size', (['shape'], {}), '(shape)\n', (38288, 38295), True, 'import numpy as np\n'), ((38475, 38494), 'numpy.size', 'np.size', (['single_dim'], {}), '(single_dim)\n', (38482, 38494), True, 'import numpy as np\n'), ((38542, 38556), 'numpy.size', 'np.size', (['shape'], {}), '(shape)\n', (38549, 38556), True, 'import numpy as np\n'), ((38581, 38605), 'numpy.array', 'np.array', (['shape'], {'ndmin': '(1)'}), '(shape, ndmin=1)\n', (38589, 38605), True, 'import numpy as np\n'), ((38638, 38665), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.int32'}), '(3, dtype=np.int32)\n', (38646, 38665), True, 'import numpy as np\n'), ((38856, 38880), 'numpy.array', 'np.array', (['shape'], {'ndmin': '(1)'}), '(shape, ndmin=1)\n', (38864, 38880), True, 'import numpy as np\n'), ((45784, 45801), 'numpy.split', 'np.split', (['Pnew', 'N'], {}), '(Pnew, N)\n', (45792, 45801), True, 'import numpy as np\n'), ((45903, 45924), 'numpy.concatenate', 'np.concatenate', (['pores'], {}), '(pores)\n', (45917, 45924), True, 'import numpy as np\n'), ((47257, 47280), 'numpy.ones', 'np.ones', (['dim'], {'dtype': 'int'}), '(dim, dtype=int)\n', (47264, 47280), True, 'import numpy as np\n'), ((53624, 53638), 'numpy.array', 'np.array', (['prob'], {}), '(prob)\n', (53632, 53638), True, 'import numpy as np\n'), ((53639, 53652), 'numpy.amax', 'np.amax', (['prob'], {}), '(prob)\n', (53646, 53652), True, 'import numpy as np\n'), ((53765, 53782), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (53779, 53782), True, 'import numpy as np\n'), ((54352, 54373), 'numpy.ones', 'np.ones', (['[41, 41, 41]'], {}), '([41, 41, 41])\n', (54359, 54373), True, 'import numpy as np\n'), ((54603, 54639), 'numpy.array', 'np.array', (['(base_pts - [0.5, 0.5, 0.5])'], {}), '(base_pts - [0.5, 0.5, 0.5])\n', (54611, 54639), True, 'import numpy as np\n'), ((54717, 54733), 'numpy.arctan', 'np.arctan', (['(Y / X)'], {}), '(Y / X)\n', (54726, 54733), True, 'import numpy as np\n'), ((55254, 55274), 'numpy.vstack', 'np.vstack', (['[X, Y, Z]'], {}), '([X, Y, Z])\n', (55263, 55274), True, 'import numpy as np\n'), ((55355, 55376), 'numpy.array', 'np.array', (['domain_size'], {}), '(domain_size)\n', (55363, 55376), True, 'import numpy as np\n'), ((57376, 57389), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (57382, 57389), True, 'import numpy as np\n'), ((57412, 57425), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (57418, 57425), True, 'import numpy as np\n'), ((59539, 59559), 'numpy.hstack', 'np.hstack', (['[r, r, r]'], {}), '([r, r, r])\n', (59548, 59559), True, 'import numpy as np\n'), ((59580, 59612), 'numpy.hstack', 'np.hstack', (['[theta, theta, theta]'], {}), '([theta, theta, theta])\n', (59589, 59612), True, 'import numpy as np\n'), ((59629, 59671), 'numpy.hstack', 'np.hstack', (['[z, -z, 2 * domain_size[1] - z]'], {}), '([z, -z, 2 * domain_size[1] - z])\n', (59638, 59671), True, 'import numpy as np\n'), ((59872, 59935), 'numpy.vstack', 'np.vstack', (['(base_pts, [-1, 1, 1] * orig_pts + [2.0 * Nx, 0, 0])'], {}), '((base_pts, [-1, 1, 1] * orig_pts + [2.0 * Nx, 0, 0]))\n', (59881, 59935), True, 'import numpy as np\n'), ((59985, 60029), 'numpy.vstack', 'np.vstack', (['(base_pts, [-1, 1, 1] * orig_pts)'], {}), '((base_pts, [-1, 1, 1] * orig_pts))\n', (59994, 60029), True, 'import numpy as np\n'), ((60049, 60112), 'numpy.vstack', 'np.vstack', (['(base_pts, [1, -1, 1] * orig_pts + [0, 2.0 * Ny, 0])'], {}), '((base_pts, [1, -1, 1] * orig_pts + [0, 2.0 * Ny, 0]))\n', (60058, 60112), True, 'import numpy as np\n'), ((60162, 60206), 'numpy.vstack', 'np.vstack', (['(base_pts, [1, -1, 1] * orig_pts)'], {}), '((base_pts, [1, -1, 1] * orig_pts))\n', (60171, 60206), True, 'import numpy as np\n'), ((63426, 63442), 'numpy.shape', 'np.shape', (['coords'], {}), '(coords)\n', (63434, 63442), True, 'import numpy as np\n'), ((64216, 64283), 'numpy.array', 'np.array', (['(Px[i + 1] - Px[i], Py[i + 1] - Py[i], Pz[i + 1] - Pz[i])'], {}), '((Px[i + 1] - Px[i], Py[i + 1] - Py[i], Pz[i + 1] - Pz[i]))\n', (64224, 64283), True, 'import numpy as np\n'), ((65395, 65410), 'numpy.unique', 'np.unique', (['temp'], {}), '(temp)\n', (65404, 65410), True, 'import numpy as np\n'), ((65763, 65811), 'scipy.sparse.csgraph.connected_components', 'csgraph.connected_components', (['am'], {'directed': '(False)'}), '(am, directed=False)\n', (65791, 65811), False, 'from scipy.sparse import csgraph\n'), ((1525, 1563), 'numpy.sum', 'np.sum', (['(coords[:, [0, 1]] ** 2)'], {'axis': '(1)'}), '(coords[:, [0, 1]] ** 2, axis=1)\n', (1531, 1563), True, 'import numpy as np\n'), ((1846, 1874), 'numpy.array', 'np.array', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (1854, 1874), True, 'import numpy as np\n'), ((2063, 2094), 'numpy.any', 'np.any', (['(coords > hi_lim)'], {'axis': '(1)'}), '(coords > hi_lim, axis=1)\n', (2069, 2094), True, 'import numpy as np\n'), ((2109, 2140), 'numpy.any', 'np.any', (['(coords < lo_lim)'], {'axis': '(1)'}), '(coords < lo_lim, axis=1)\n', (2115, 2140), True, 'import numpy as np\n'), ((9191, 9216), 'numpy.vstack', 'np.vstack', (['(Tnew1, Tnew2)'], {}), '((Tnew1, Tnew2))\n', (9200, 9216), True, 'import numpy as np\n'), ((11234, 11259), 'numpy.ones', 'np.ones', (['[Np]'], {'dtype': 'bool'}), '([Np], dtype=bool)\n', (11241, 11259), True, 'import numpy as np\n'), ((11297, 11322), 'numpy.ones', 'np.ones', (['[Nt]'], {'dtype': 'bool'}), '([Nt], dtype=bool)\n', (11304, 11322), True, 'import numpy as np\n'), ((12275, 12290), 'numpy.size', 'np.size', (['coords'], {}), '(coords)\n', (12282, 12290), True, 'import numpy as np\n'), ((12508, 12522), 'numpy.size', 'np.size', (['conns'], {}), '(conns)\n', (12515, 12522), True, 'import numpy as np\n'), ((18394, 18407), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (18400, 18407), True, 'import numpy as np\n'), ((18426, 18439), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (18432, 18439), True, 'import numpy as np\n'), ((18462, 18479), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (18471, 18479), True, 'import numpy as np\n'), ((18728, 18739), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (18734, 18739), True, 'import numpy as np\n'), ((18774, 18785), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (18780, 18785), True, 'import numpy as np\n'), ((18804, 18815), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (18810, 18815), True, 'import numpy as np\n'), ((18838, 18858), 'numpy.vstack', 'np.vstack', (['(x, y, z)'], {}), '((x, y, z))\n', (18847, 18858), True, 'import numpy as np\n'), ((29804, 29839), 'numpy.vstack', 'np.vstack', (['(P1[P1_ind], P2[P2_ind])'], {}), '((P1[P1_ind], P2[P2_ind]))\n', (29813, 29839), True, 'import numpy as np\n'), ((34586, 34606), 'numpy.concatenate', 'np.concatenate', (['arr1'], {}), '(arr1)\n', (34600, 34606), True, 'import numpy as np\n'), ((34608, 34628), 'numpy.concatenate', 'np.concatenate', (['arr2'], {}), '(arr2)\n', (34622, 34628), True, 'import numpy as np\n'), ((39367, 39386), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (39375, 39386), True, 'import numpy as np\n'), ((40420, 40443), 'numpy.in1d', 'np.in1d', (['Pn', 'Pn_new_net'], {}), '(Pn, Pn_new_net)\n', (40427, 40443), True, 'import numpy as np\n'), ((46259, 46282), 'numpy.unique', 'np.unique', (['points[:, i]'], {}), '(points[:, i])\n', (46268, 46282), True, 'import numpy as np\n'), ((53936, 53953), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (53950, 53953), True, 'import numpy as np\n'), ((54440, 54480), 'scipy.ndimage.distance_transform_edt', 'spim.distance_transform_edt', (['density_map'], {}), '(density_map)\n', (54467, 54480), True, 'import scipy.ndimage as spim\n'), ((54656, 54689), 'numpy.sqrt', 'np.sqrt', (['(X ** 2 + Y ** 2 + Z ** 2)'], {}), '(X ** 2 + Y ** 2 + Z ** 2)\n', (54663, 54689), True, 'import numpy as np\n'), ((55061, 55087), 'numpy.vstack', 'np.vstack', (['(r, theta, phi)'], {}), '((r, theta, phi))\n', (55070, 55087), True, 'import numpy as np\n'), ((55435, 55456), 'numpy.ones', 'np.ones', (['[41, 41, 41]'], {}), '([41, 41, 41])\n', (55442, 55456), True, 'import numpy as np\n'), ((55782, 55816), 'numpy.array', 'np.array', (['(base_pts - [0.5, 0.5, 0])'], {}), '(base_pts - [0.5, 0.5, 0])\n', (55790, 55816), True, 'import numpy as np\n'), ((55907, 55923), 'numpy.arctan', 'np.arctan', (['(Y / X)'], {}), '(Y / X)\n', (55916, 55923), True, 'import numpy as np\n'), ((56469, 56489), 'numpy.vstack', 'np.vstack', (['[X, Y, Z]'], {}), '([X, Y, Z])\n', (56478, 56489), True, 'import numpy as np\n'), ((57273, 57297), 'numpy.sqrt', 'np.sqrt', (['(X ** 2 + Y ** 2)'], {}), '(X ** 2 + Y ** 2)\n', (57280, 57297), True, 'import numpy as np\n'), ((60262, 60325), 'numpy.vstack', 'np.vstack', (['(base_pts, [1, 1, -1] * orig_pts + [0, 0, 2.0 * Nz])'], {}), '((base_pts, [1, 1, -1] * orig_pts + [0, 0, 2.0 * Nz]))\n', (60271, 60325), True, 'import numpy as np\n'), ((60383, 60427), 'numpy.vstack', 'np.vstack', (['(base_pts, [1, 1, -1] * orig_pts)'], {}), '((base_pts, [1, 1, -1] * orig_pts))\n', (60392, 60427), True, 'import numpy as np\n'), ((63647, 63660), 'numpy.unique', 'np.unique', (['Px'], {}), '(Px)\n', (63656, 63660), True, 'import numpy as np\n'), ((63707, 63720), 'numpy.unique', 'np.unique', (['Py'], {}), '(Py)\n', (63716, 63720), True, 'import numpy as np\n'), ((63767, 63780), 'numpy.unique', 'np.unique', (['Pz'], {}), '(Pz)\n', (63776, 63780), True, 'import numpy as np\n'), ((64064, 64075), 'numpy.size', 'np.size', (['Px'], {}), '(Px)\n', (64071, 64075), True, 'import numpy as np\n'), ((64580, 64598), 'numpy.absolute', 'np.absolute', (['n_dot'], {}), '(n_dot)\n', (64591, 64598), True, 'import numpy as np\n'), ((65838, 65853), 'numpy.unique', 'np.unique', (['temp'], {}), '(temp)\n', (65847, 65853), True, 'import numpy as np\n'), ((11595, 11634), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, *s[1:])', 'dtype': 'bool'}), '(shape=(N, *s[1:]), dtype=bool)\n', (11603, 11634), True, 'import numpy as np\n'), ((18712, 18725), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (18718, 18725), True, 'import numpy as np\n'), ((18758, 18771), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (18764, 18771), True, 'import numpy as np\n'), ((23987, 24019), 'numpy.shape', 'np.shape', (["network['pore.coords']"], {}), "(network['pore.coords'])\n", (23995, 24019), True, 'import numpy as np\n'), ((24063, 24096), 'numpy.shape', 'np.shape', (["network['throat.conns']"], {}), "(network['throat.conns'])\n", (24071, 24096), True, 'import numpy as np\n'), ((38818, 38852), 'numpy.array', 'np.array', (['div'], {'ndmin': '(1)', 'dtype': 'bool'}), '(div, ndmin=1, dtype=bool)\n', (38826, 38852), True, 'import numpy as np\n'), ((41106, 41154), 'numpy.inner', 'np.inner', (['(neighbor_coord - x)', '(neighbor_coord - x)'], {}), '(neighbor_coord - x, neighbor_coord - x)\n', (41114, 41154), True, 'import numpy as np\n'), ((41253, 41266), 'numpy.amin', 'np.amin', (['dist'], {}), '(dist)\n', (41260, 41266), True, 'import numpy as np\n'), ((41589, 41611), 'numpy.size', 'np.size', (['new_neighbors'], {}), '(new_neighbors)\n', (41596, 41611), True, 'import numpy as np\n'), ((54758, 54782), 'numpy.sqrt', 'np.sqrt', (['(X ** 2 + Y ** 2)'], {}), '(X ** 2 + Y ** 2)\n', (54765, 54782), True, 'import numpy as np\n'), ((55617, 55657), 'scipy.ndimage.distance_transform_edt', 'spim.distance_transform_edt', (['density_map'], {}), '(density_map)\n', (55644, 55657), True, 'import scipy.ndimage as spim\n'), ((55853, 55877), 'numpy.sqrt', 'np.sqrt', (['(X ** 2 + Y ** 2)'], {}), '(X ** 2 + Y ** 2)\n', (55860, 55877), True, 'import numpy as np\n'), ((56282, 56306), 'numpy.vstack', 'np.vstack', (['[r, theta, z]'], {}), '([r, theta, z])\n', (56291, 56306), True, 'import numpy as np\n'), ((56601, 56622), 'numpy.ones', 'np.ones', (['[41, 41, 41]'], {}), '([41, 41, 41])\n', (56608, 56622), True, 'import numpy as np\n'), ((2002, 2021), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2010, 2021), True, 'import numpy as np\n'), ((11689, 11728), 'numpy.ones', 'np.ones', ([], {'shape': '(N, *s[1:])', 'dtype': 'float'}), '(shape=(N, *s[1:]), dtype=float)\n', (11696, 11728), True, 'import numpy as np\n'), ((25314, 25351), 'numpy.hstack', 'np.hstack', (['(network[key], donor[key])'], {}), '((network[key], donor[key]))\n', (25323, 25351), True, 'import numpy as np\n'), ((25409, 25446), 'numpy.vstack', 'np.vstack', (['(network[key], donor[key])'], {}), '((network[key], donor[key]))\n', (25418, 25446), True, 'import numpy as np\n'), ((26204, 26224), 'numpy.shape', 'np.shape', (['donor[key]'], {}), '(donor[key])\n', (26212, 26224), True, 'import numpy as np\n'), ((38435, 38453), 'openpnm.topotools.generators.tools.get_shape', 'get_shape', (['network'], {}), '(network)\n', (38444, 38453), False, 'from openpnm.topotools.generators.tools import get_shape\n'), ((3173, 3186), 'numpy.deg2rad', 'np.deg2rad', (['a'], {}), '(a)\n', (3183, 3186), True, 'import numpy as np\n'), ((3251, 3264), 'numpy.deg2rad', 'np.deg2rad', (['a'], {}), '(a)\n', (3261, 3264), True, 'import numpy as np\n'), ((3274, 3287), 'numpy.deg2rad', 'np.deg2rad', (['a'], {}), '(a)\n', (3284, 3287), True, 'import numpy as np\n'), ((3480, 3493), 'numpy.deg2rad', 'np.deg2rad', (['b'], {}), '(b)\n', (3490, 3493), True, 'import numpy as np\n'), ((3595, 3608), 'numpy.deg2rad', 'np.deg2rad', (['b'], {}), '(b)\n', (3605, 3608), True, 'import numpy as np\n'), ((3621, 3634), 'numpy.deg2rad', 'np.deg2rad', (['b'], {}), '(b)\n', (3631, 3634), True, 'import numpy as np\n'), ((3827, 3840), 'numpy.deg2rad', 'np.deg2rad', (['c'], {}), '(c)\n', (3837, 3840), True, 'import numpy as np\n'), ((3905, 3918), 'numpy.deg2rad', 'np.deg2rad', (['c'], {}), '(c)\n', (3915, 3918), True, 'import numpy as np\n'), ((3928, 3941), 'numpy.deg2rad', 'np.deg2rad', (['c'], {}), '(c)\n', (3938, 3941), True, 'import numpy as np\n'), ((26092, 26112), 'numpy.empty', 'np.empty', (['data_shape'], {}), '(data_shape)\n', (26100, 26112), True, 'import numpy as np\n'), ((53893, 53907), 'numpy.shape', 'np.shape', (['prob'], {}), '(prob)\n', (53901, 53907), True, 'import numpy as np\n'), ((3197, 3210), 'numpy.deg2rad', 'np.deg2rad', (['a'], {}), '(a)\n', (3207, 3210), True, 'import numpy as np\n'), ((3507, 3520), 'numpy.deg2rad', 'np.deg2rad', (['b'], {}), '(b)\n', (3517, 3520), True, 'import numpy as np\n'), ((3851, 3864), 'numpy.deg2rad', 'np.deg2rad', (['c'], {}), '(c)\n', (3861, 3864), True, 'import numpy as np\n'), ((25094, 25115), 'numpy.empty', 'np.empty', ([], {'shape': 'shape'}), '(shape=shape)\n', (25102, 25115), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
from sklearn.metrics import jaccard_similarity_score, roc_auc_score, precision_score, f1_score, average_precision_score, average_precision_score
import numpy as np
from models import GMNN
from util import llprint, multi_label_metric
import dill
import time
from torch.nn import CrossEntropyLoss
from torch.optim import Adam
import os
import torch.nn.functional as F
torch.manual_seed(1203)
model_name = 'GMNN_1'
resume_name = 'Epoch_4_Loss1_1.3970.model'
def eval(model, data_eval, voc_size, epoch):
# evaluate
print('')
model.eval()
smm_record = []
auc, p_1, p_3, p_5, f1, prauc = [[] for _ in range(6)]
for step, input in enumerate(data_eval):
y_gt = []
y_pred = []
y_pred_prob = []
y_pred_label = []
input1_hidden, input2_hidden, target_hidden= None, None, None
for adm in input:
y_pred_label_tmp = []
y_gt_tmp = np.zeros(voc_size[2])
y_gt_tmp[adm[2]] = 1
y_gt.append(y_gt_tmp)
target_output1, output_logits, output_labels, [input1_hidden, input2_hidden, target_hidden] = model(adm, [input1_hidden, input2_hidden, target_hidden])
target_output1 = F.sigmoid(target_output1).detach().cpu().numpy()[0]
a = np.argsort(target_output1)[::-1]
b = np.max(output_logits, axis=-1)
y_pred_prob.append(target_output1)
y_pred_tmp = target_output1.copy()
y_pred_tmp[y_pred_tmp>=0.5] = 1
y_pred_tmp[y_pred_tmp<0.5] = 0
y_pred.append(y_pred_tmp)
for idx, value in enumerate(y_pred_tmp):
if value == 1:
y_pred_label_tmp.append(idx)
y_pred_label.append(y_pred_label_tmp)
smm_record.append(y_pred_label)
adm_auc, adm_p_1, adm_p_3, adm_p_5, adm_f1, adm_prauc = multi_label_metric(np.array(y_gt), np.array(y_pred), np.array(y_pred_prob))
auc.append(adm_auc)
p_1.append(adm_p_1)
p_3.append(adm_p_3)
p_5.append(adm_p_5)
f1.append(adm_f1)
prauc.append(adm_prauc)
llprint('\rEval--Epoch: %d, Step: %d/%d' % (epoch, step, len(data_eval)))
llprint('\tAUC: %.4f, P1: %.4f, P3: %.4f, P5: %.4f, F1: %.4f, PRAUC: %.4f\n' % (
np.mean(auc), np.mean(p_1), np.mean(p_3), np.mean(p_5), np.mean(f1), np.mean(prauc)
))
dill.dump(obj=smm_record, file=open('../data/smm_records.pkl', 'wb'))
def main():
if not os.path.exists(os.path.join("saved", model_name)):
os.makedirs(os.path.join("saved", model_name))
data_path = '../data/records.pkl'
voc_path = '../data/voc.pkl'
ehr_adj_path = '../data/ehr_adj.pkl'
ddi_adj_path = '../data/ddi_A.pkl'
device = torch.device('cuda:0')
ehr_adj = dill.load(open(ehr_adj_path, 'rb'))
ddi_adj = dill.load(open(ddi_adj_path, 'rb'))
data = dill.load(open(data_path, 'rb'))
voc = dill.load(open(voc_path, 'rb'))
diag_voc, pro_voc, med_voc = voc['diag_voc'], voc['pro_voc'], voc['med_voc']
split_point = int(len(data) * 2 / 3)
data_train = data[:split_point]
eval_len = int(len(data[split_point:]) / 2)
# data_eval = data[split_point:split_point + eval_len]
data_eval = data[split_point+eval_len:]
EPOCH = 30
LR = 0.001
EVAL = True
voc_size = (len(diag_voc.idx2word), len(pro_voc.idx2word), len(med_voc.idx2word))
model = GMNN(voc_size, ehr_adj, ddi_adj, emb_dim=64, device=device)
if EVAL:
model.load_state_dict(torch.load(open(os.path.join("saved", model_name, resume_name), 'rb')))
model.to(device=device)
optimizer = Adam(list(model.parameters()), lr=LR)
if EVAL:
eval(model, data_eval, voc_size, 0)
else:
for epoch in range(EPOCH):
loss_record1 = []
loss_record2 = []
start_time = time.time()
model.train()
for step, input in enumerate(data_train):
input1_hidden, input2_hidden, target_hidden = None, None, None
loss = 0
for adm in input:
loss1_target = np.zeros((1, voc_size[2]))
loss1_target[:, adm[2]] = 1
loss2_target = adm[2] + [adm[2][0]]
loss3_target = np.full((1, voc_size[2]), -1)
for idx, item in enumerate(adm[2]):
loss3_target[0][idx] = item
target_output1, target_output2, [input1_hidden, input2_hidden, target_hidden], batch_pos_loss, batch_neg_loss = model(adm, [input1_hidden, input2_hidden, target_hidden])
loss1 = F.binary_cross_entropy_with_logits(target_output1, torch.FloatTensor(loss1_target).to(device))
loss2 = F.cross_entropy(target_output2, torch.LongTensor(loss2_target).to(device))
# loss = 9*loss1/10 + loss2/10
loss3 = F.multilabel_margin_loss(F.sigmoid(target_output1), torch.LongTensor(loss3_target).to(device))
loss += loss1 + 0.1*loss3 + 0.01*batch_neg_loss
loss_record1.append(loss.item())
loss_record2.append(loss3.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
llprint('\rTrain--Epoch: %d, Step: %d/%d' % (epoch, step, len(data_train)))
eval(model, data_eval, voc_size, epoch)
end_time = time.time()
elapsed_time = (end_time - start_time) / 60
llprint('\tEpoch: %d, Loss1: %.4f, Loss2: %.4f, One Epoch Time: %.2fm, Appro Left Time: %.2fh\n' % (epoch,
np.mean(loss_record1),
np.mean(loss_record2),
elapsed_time,
elapsed_time * (
EPOCH - epoch - 1)/60))
torch.save(model.state_dict(), open( os.path.join('saved', model_name, 'Epoch_%d_Loss1_%.4f.model' % (epoch, np.mean(loss_record1))), 'wb'))
print('')
# test
torch.save(model.state_dict(), open(
os.path.join('saved', model_name, 'final.model'), 'wb'))
if __name__ == '__main__':
main() | [
"models.GMNN",
"numpy.full",
"torch.LongTensor",
"torch.manual_seed",
"numpy.zeros",
"torch.FloatTensor",
"time.time",
"numpy.argsort",
"numpy.max",
"numpy.mean",
"numpy.array",
"torch.nn.functional.sigmoid",
"torch.device",
"os.path.join"
] | [((413, 436), 'torch.manual_seed', 'torch.manual_seed', (['(1203)'], {}), '(1203)\n', (430, 436), False, 'import torch\n'), ((2843, 2865), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2855, 2865), False, 'import torch\n'), ((3528, 3587), 'models.GMNN', 'GMNN', (['voc_size', 'ehr_adj', 'ddi_adj'], {'emb_dim': '(64)', 'device': 'device'}), '(voc_size, ehr_adj, ddi_adj, emb_dim=64, device=device)\n', (3532, 3587), False, 'from models import GMNN\n'), ((978, 999), 'numpy.zeros', 'np.zeros', (['voc_size[2]'], {}), '(voc_size[2])\n', (986, 999), True, 'import numpy as np\n'), ((1387, 1417), 'numpy.max', 'np.max', (['output_logits'], {'axis': '(-1)'}), '(output_logits, axis=-1)\n', (1393, 1417), True, 'import numpy as np\n'), ((1954, 1968), 'numpy.array', 'np.array', (['y_gt'], {}), '(y_gt)\n', (1962, 1968), True, 'import numpy as np\n'), ((1970, 1986), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (1978, 1986), True, 'import numpy as np\n'), ((1988, 2009), 'numpy.array', 'np.array', (['y_pred_prob'], {}), '(y_pred_prob)\n', (1996, 2009), True, 'import numpy as np\n'), ((2580, 2613), 'os.path.join', 'os.path.join', (['"""saved"""', 'model_name'], {}), "('saved', model_name)\n", (2592, 2613), False, 'import os\n'), ((2637, 2670), 'os.path.join', 'os.path.join', (['"""saved"""', 'model_name'], {}), "('saved', model_name)\n", (2649, 2670), False, 'import os\n'), ((3987, 3998), 'time.time', 'time.time', ([], {}), '()\n', (3996, 3998), False, 'import time\n'), ((5636, 5647), 'time.time', 'time.time', ([], {}), '()\n', (5645, 5647), False, 'import time\n'), ((1337, 1363), 'numpy.argsort', 'np.argsort', (['target_output1'], {}), '(target_output1)\n', (1347, 1363), True, 'import numpy as np\n'), ((2367, 2379), 'numpy.mean', 'np.mean', (['auc'], {}), '(auc)\n', (2374, 2379), True, 'import numpy as np\n'), ((2381, 2393), 'numpy.mean', 'np.mean', (['p_1'], {}), '(p_1)\n', (2388, 2393), True, 'import numpy as np\n'), ((2395, 2407), 'numpy.mean', 'np.mean', (['p_3'], {}), '(p_3)\n', (2402, 2407), True, 'import numpy as np\n'), ((2409, 2421), 'numpy.mean', 'np.mean', (['p_5'], {}), '(p_5)\n', (2416, 2421), True, 'import numpy as np\n'), ((2423, 2434), 'numpy.mean', 'np.mean', (['f1'], {}), '(f1)\n', (2430, 2434), True, 'import numpy as np\n'), ((2436, 2450), 'numpy.mean', 'np.mean', (['prauc'], {}), '(prauc)\n', (2443, 2450), True, 'import numpy as np\n'), ((6679, 6727), 'os.path.join', 'os.path.join', (['"""saved"""', 'model_name', '"""final.model"""'], {}), "('saved', model_name, 'final.model')\n", (6691, 6727), False, 'import os\n'), ((3649, 3695), 'os.path.join', 'os.path.join', (['"""saved"""', 'model_name', 'resume_name'], {}), "('saved', model_name, resume_name)\n", (3661, 3695), False, 'import os\n'), ((4258, 4284), 'numpy.zeros', 'np.zeros', (['(1, voc_size[2])'], {}), '((1, voc_size[2]))\n', (4266, 4284), True, 'import numpy as np\n'), ((4431, 4460), 'numpy.full', 'np.full', (['(1, voc_size[2])', '(-1)'], {}), '((1, voc_size[2]), -1)\n', (4438, 4460), True, 'import numpy as np\n'), ((5102, 5127), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['target_output1'], {}), '(target_output1)\n', (5111, 5127), True, 'import torch.nn.functional as F\n'), ((5922, 5943), 'numpy.mean', 'np.mean', (['loss_record1'], {}), '(loss_record1)\n', (5929, 5943), True, 'import numpy as np\n'), ((6042, 6063), 'numpy.mean', 'np.mean', (['loss_record2'], {}), '(loss_record2)\n', (6049, 6063), True, 'import numpy as np\n'), ((4846, 4877), 'torch.FloatTensor', 'torch.FloatTensor', (['loss1_target'], {}), '(loss1_target)\n', (4863, 4877), False, 'import torch\n'), ((4951, 4981), 'torch.LongTensor', 'torch.LongTensor', (['loss2_target'], {}), '(loss2_target)\n', (4967, 4981), False, 'import torch\n'), ((5129, 5159), 'torch.LongTensor', 'torch.LongTensor', (['loss3_target'], {}), '(loss3_target)\n', (5145, 5159), False, 'import torch\n'), ((6547, 6568), 'numpy.mean', 'np.mean', (['loss_record1'], {}), '(loss_record1)\n', (6554, 6568), True, 'import numpy as np\n'), ((1268, 1293), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['target_output1'], {}), '(target_output1)\n', (1277, 1293), True, 'import torch.nn.functional as F\n')] |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import networkx as nx
import numpy as np
from numpy.testing import assert_allclose
# Module under test
import bokeh.plotting.graph as bpg # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_from_networkx_method() -> None:
G=nx.Graph()
G.add_nodes_from([0,1,2,3])
G.add_edges_from([[0,1], [0,2], [2,3]])
renderer = bpg.from_networkx(G, nx.circular_layout)
assert renderer.node_renderer.data_source.data["index"] == [0,1,2,3]
assert renderer.edge_renderer.data_source.data["start"] == [0,0,2]
assert renderer.edge_renderer.data_source.data["end"] == [1,2,3]
gl = renderer.layout_provider.graph_layout
assert set(gl.keys()) == {0, 1, 2, 3}
assert_allclose(gl[0], np.array([1.0, 0.0]), atol=1e-7)
def test_from_networkx_method_with_kwargs() -> None:
G=nx.Graph()
G.add_nodes_from([0,1,2,3])
G.add_edges_from([[0,1], [0,2], [2,3]])
renderer = bpg.from_networkx(G, nx.circular_layout, scale=2)
gl = renderer.layout_provider.graph_layout
assert set(gl.keys()) == {0, 1, 2, 3}
assert_allclose(gl[0], np.array([2.0, 0.0]), atol=1e-7)
def test_from_networkx_with_scalar_attributes() -> None:
G = nx.Graph()
G.add_nodes_from([(0, {"attr_1": "a", "attr_2": 10}),
(1, {"attr_1": "b"}),
(2, {"attr_1": "c", "attr_2": 30})])
G.add_edges_from([(0, 1, {"attr_1": "A"}),
(0, 2, {"attr_1": "B", "attr_2": 10})])
renderer = bpg.from_networkx(G, nx.circular_layout)
assert renderer.node_renderer.data_source.data["index"] == [0, 1, 2]
assert renderer.node_renderer.data_source.data["attr_1"] == ["a", "b", "c"]
assert renderer.node_renderer.data_source.data["attr_2"] == [10, None, 30]
assert renderer.edge_renderer.data_source.data["start"] == [0, 0]
assert renderer.edge_renderer.data_source.data["end"] == [1, 2]
assert renderer.edge_renderer.data_source.data["attr_1"] == ["A", "B"]
assert renderer.edge_renderer.data_source.data["attr_2"] == [None, 10]
@pytest.mark.parametrize('typ', [list, tuple])
def test_from_networkx_with_sequence_attributes(typ) -> None:
G = nx.Graph()
G.add_nodes_from([(0, {"attr_1": typ([1, 2]), "attr_2": 10}),
(1, {}),
(2, {"attr_1": typ([3]), "attr_2": 30})])
G.add_edges_from([(0, 1, {"attr_1": typ([1, 11])}),
(0, 2, {"attr_1": typ([2, 22]), "attr_2": 10})])
renderer = bpg.from_networkx(G, nx.circular_layout)
assert renderer.node_renderer.data_source.data["index"] == [0, 1, 2]
assert renderer.node_renderer.data_source.data["attr_1"] == [[1, 2], [], [3]]
assert renderer.node_renderer.data_source.data["attr_2"] == [10, None, 30]
assert renderer.edge_renderer.data_source.data["start"] == [0, 0]
assert renderer.edge_renderer.data_source.data["end"] == [1, 2]
assert renderer.edge_renderer.data_source.data["attr_1"] == [[1, 11], [2, 22]]
assert renderer.edge_renderer.data_source.data["attr_2"] == [None, 10]
def test_from_networkx_errors_with_mixed_attributes() -> None:
G = nx.Graph()
G.add_nodes_from([(0, {"attr_1": [1, 2], "attr_2": 10}),
(1, {}),
(2, {"attr_1": 3, "attr_2": 30})])
with pytest.raises(ValueError):
bpg.from_networkx(G, nx.circular_layout)
G = nx.Graph()
G.add_edges_from([(0, 1, {"attr_1": [1, 11]}),
(0, 2, {"attr_1": 2, "attr_2": 10})])
with pytest.raises(ValueError):
bpg.from_networkx(G, nx.circular_layout)
def test_from_networkx_with_bad_attributes() -> None:
G = nx.Graph()
G.add_nodes_from([(0, {"index": "a", "attr_1": 10}),
(1, {"index": "b", "attr_1": 20})])
G.add_edges_from([[0, 1]])
with pytest.warns(UserWarning):
renderer = bpg.from_networkx(G, nx.circular_layout)
assert renderer.node_renderer.data_source.data["index"] == [0, 1]
assert renderer.node_renderer.data_source.data["attr_1"] == [10, 20]
G = nx.Graph()
G.add_nodes_from([0, 1])
G.add_edges_from([(0, 1, {"start": "A", "attr_1": 10})])
with pytest.warns(UserWarning):
renderer = bpg.from_networkx(G, nx.circular_layout)
assert renderer.edge_renderer.data_source.data["start"] == [0]
assert renderer.edge_renderer.data_source.data["end"] == [1]
assert renderer.edge_renderer.data_source.data["attr_1"] == [10]
G = nx.Graph()
G.add_nodes_from([0, 1])
G.add_edges_from([(0, 1, {"end": "A", "attr_1": 10})])
with pytest.warns(UserWarning):
renderer = bpg.from_networkx(G, nx.circular_layout)
assert renderer.edge_renderer.data_source.data["start"] == [0]
assert renderer.edge_renderer.data_source.data["end"] == [1]
assert renderer.edge_renderer.data_source.data["attr_1"] == [10]
def test_from_networkx_fixed_layout() -> None:
G = nx.Graph()
G.add_nodes_from([0, 1, 2])
G.add_edges_from([[0, 1], [0, 2]])
fixed_layout = {0: [0, 1],
1: [-1, 0],
2: [1, 0]}
renderer = bpg.from_networkx(G, fixed_layout)
assert renderer.node_renderer.data_source.data["index"] == [0, 1, 2]
assert renderer.edge_renderer.data_source.data["start"] == [0, 0]
assert renderer.edge_renderer.data_source.data["end"] == [1, 2]
gl = renderer.layout_provider.graph_layout
assert set(gl.keys()) == {0, 1, 2}
assert renderer.layout_provider.graph_layout[0] == fixed_layout[0]
assert renderer.layout_provider.graph_layout[1] == fixed_layout[1]
assert renderer.layout_provider.graph_layout[2] == fixed_layout[2]
def test_from_networkx_with_missing_layout() -> None:
G = nx.Graph()
G.add_nodes_from([0, 1, 2])
G.add_edges_from([[0, 1], [0, 2]])
missing_fixed_layout = {0: [0, 1],
1: [-1, 0]}
with pytest.warns(UserWarning):
renderer = bpg.from_networkx(G, missing_fixed_layout)
gl = renderer.layout_provider.graph_layout
assert set(gl.keys()) == {0, 1}
assert renderer.layout_provider.graph_layout[0] == missing_fixed_layout[0]
assert renderer.layout_provider.graph_layout[1] == missing_fixed_layout[1]
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| [
"pytest.warns",
"pytest.raises",
"networkx.Graph",
"numpy.array",
"pytest.mark.parametrize",
"bokeh.plotting.graph.from_networkx"
] | [((3056, 3101), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""typ"""', '[list, tuple]'], {}), "('typ', [list, tuple])\n", (3079, 3101), False, 'import pytest\n'), ((1256, 1266), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1264, 1266), True, 'import networkx as nx\n'), ((1359, 1399), 'bokeh.plotting.graph.from_networkx', 'bpg.from_networkx', (['G', 'nx.circular_layout'], {}), '(G, nx.circular_layout)\n', (1376, 1399), True, 'import bokeh.plotting.graph as bpg\n'), ((1824, 1834), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1832, 1834), True, 'import networkx as nx\n'), ((1927, 1976), 'bokeh.plotting.graph.from_networkx', 'bpg.from_networkx', (['G', 'nx.circular_layout'], {'scale': '(2)'}), '(G, nx.circular_layout, scale=2)\n', (1944, 1976), True, 'import bokeh.plotting.graph as bpg\n'), ((2194, 2204), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2202, 2204), True, 'import networkx as nx\n'), ((2491, 2531), 'bokeh.plotting.graph.from_networkx', 'bpg.from_networkx', (['G', 'nx.circular_layout'], {}), '(G, nx.circular_layout)\n', (2508, 2531), True, 'import bokeh.plotting.graph as bpg\n'), ((3172, 3182), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3180, 3182), True, 'import networkx as nx\n'), ((3487, 3527), 'bokeh.plotting.graph.from_networkx', 'bpg.from_networkx', (['G', 'nx.circular_layout'], {}), '(G, nx.circular_layout)\n', (3504, 3527), True, 'import bokeh.plotting.graph as bpg\n'), ((4132, 4142), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (4140, 4142), True, 'import networkx as nx\n'), ((4387, 4397), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (4395, 4397), True, 'import networkx as nx\n'), ((4658, 4668), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (4666, 4668), True, 'import networkx as nx\n'), ((5072, 5082), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (5080, 5082), True, 'import networkx as nx\n'), ((5492, 5502), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (5500, 5502), True, 'import networkx as nx\n'), ((5957, 5967), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (5965, 5967), True, 'import networkx as nx\n'), ((6150, 6184), 'bokeh.plotting.graph.from_networkx', 'bpg.from_networkx', (['G', 'fixed_layout'], {}), '(G, fixed_layout)\n', (6167, 6184), True, 'import bokeh.plotting.graph as bpg\n'), ((6759, 6769), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (6767, 6769), True, 'import networkx as nx\n'), ((1730, 1750), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (1738, 1750), True, 'import numpy as np\n'), ((2094, 2114), 'numpy.array', 'np.array', (['[2.0, 0.0]'], {}), '([2.0, 0.0])\n', (2102, 2114), True, 'import numpy as np\n'), ((4302, 4327), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4315, 4327), False, 'import pytest\n'), ((4337, 4377), 'bokeh.plotting.graph.from_networkx', 'bpg.from_networkx', (['G', 'nx.circular_layout'], {}), '(G, nx.circular_layout)\n', (4354, 4377), True, 'import bokeh.plotting.graph as bpg\n'), ((4519, 4544), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4532, 4544), False, 'import pytest\n'), ((4554, 4594), 'bokeh.plotting.graph.from_networkx', 'bpg.from_networkx', (['G', 'nx.circular_layout'], {}), '(G, nx.circular_layout)\n', (4571, 4594), True, 'import bokeh.plotting.graph as bpg\n'), ((4825, 4850), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (4837, 4850), False, 'import pytest\n'), ((4871, 4911), 'bokeh.plotting.graph.from_networkx', 'bpg.from_networkx', (['G', 'nx.circular_layout'], {}), '(G, nx.circular_layout)\n', (4888, 4911), True, 'import bokeh.plotting.graph as bpg\n'), ((5183, 5208), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (5195, 5208), False, 'import pytest\n'), ((5229, 5269), 'bokeh.plotting.graph.from_networkx', 'bpg.from_networkx', (['G', 'nx.circular_layout'], {}), '(G, nx.circular_layout)\n', (5246, 5269), True, 'import bokeh.plotting.graph as bpg\n'), ((5601, 5626), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (5613, 5626), False, 'import pytest\n'), ((5647, 5687), 'bokeh.plotting.graph.from_networkx', 'bpg.from_networkx', (['G', 'nx.circular_layout'], {}), '(G, nx.circular_layout)\n', (5664, 5687), True, 'import bokeh.plotting.graph as bpg\n'), ((6931, 6956), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (6943, 6956), False, 'import pytest\n'), ((6977, 7019), 'bokeh.plotting.graph.from_networkx', 'bpg.from_networkx', (['G', 'missing_fixed_layout'], {}), '(G, missing_fixed_layout)\n', (6994, 7019), True, 'import bokeh.plotting.graph as bpg\n')] |
# Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import nvidia.dali as dali
import numpy as np
import os
import cv2
import math
from test_utils import compare_pipelines
from test_utils import get_dali_extra_path
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
class WaterPipeline(Pipeline):
def __init__(self, device, batch_size, phase_y, phase_x, freq_x, freq_y, ampl_x, ampl_y,
num_threads=3, device_id=0, num_gpus=1, dtype=types.UINT8, prime_size=False,
do_mask=False):
super(WaterPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.dtype = dtype
self.prime_size = prime_size
self.do_mask = do_mask
self.input = ops.readers.Caffe(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.decoders.Image(device = "cpu", output_type = types.RGB)
self.water = ops.Water(device = self.device, ampl_x=ampl_x, ampl_y=ampl_y,
phase_x=phase_x, phase_y=phase_y, freq_x=freq_x, freq_y=freq_y,
interp_type = dali.types.INTERP_LINEAR)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
if self.device == 'gpu':
images = images.gpu()
if self.prime_size:
images = fn.resize(images, resize_x=101, resize_y=43)
mask = fn.random.coin_flip(seed=42) if self.do_mask else None
images = fn.cast(images, dtype=self.dtype)
images = self.water(images, mask=mask)
return images
def python_water(img, phase_y, phase_x, freq_x, freq_y, ampl_x, ampl_y):
nh,nw=img.shape[:2]
img_x=np.zeros((nh,nw),np.float32)
img_y=np.zeros((nh,nw),np.float32)
x_idx = np.arange(0, nw, 1, np.float32)
y_idx = np.arange(0, nh, 1, np.float32)
x_wave = ampl_y * np.cos(freq_y * x_idx + phase_y)
y_wave = ampl_x * np.sin(freq_x * y_idx + phase_x)
for x in range(nw):
img_x[:,x] = y_wave + x - 0.5
for y in range(nh):
img_y[y,:] = x_wave + y - 0.5
return cv2.remap(img, img_x, img_y, cv2.INTER_LINEAR)
class WaterPythonPipeline(Pipeline):
def __init__(self, batch_size, function, num_threads=1, device_id=0, num_gpus=1,
dtype=types.UINT8, prime_size=False):
super(WaterPythonPipeline, self).__init__(batch_size,
num_threads,
device_id,
exec_async=False,
exec_pipelined=False)
self.dtype = dtype
self.prime_size = prime_size
self.input = ops.readers.Caffe(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.decoders.Image(device = "cpu", output_type = types.RGB)
self.water = ops.PythonFunction(function=function, output_layouts="HWC")
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
if self.prime_size:
images = fn.resize(images, resize_x=101, resize_y=43)
images = fn.cast(images, dtype=self.dtype)
images = self.water(images)
return images
def check_water_cpu_vs_gpu(batch_size, niter, dtype, do_mask):
phase_y=0.5
phase_x=0.2
freq_x=0.06
freq_y=0.08
ampl_x=2.0
ampl_y=3.0
compare_pipelines(WaterPipeline('cpu', batch_size, ampl_x=ampl_x, ampl_y=ampl_y,
phase_x=phase_x, phase_y=phase_y, freq_x=freq_x, freq_y=freq_y,
dtype=dtype, do_mask=do_mask),
WaterPipeline('gpu', batch_size, ampl_x=ampl_x, ampl_y=ampl_y,
phase_x=phase_x, phase_y=phase_y, freq_x=freq_x, freq_y=freq_y,
dtype=dtype, do_mask=do_mask),
batch_size=batch_size, N_iterations=niter, eps=1)
def test_water_cpu_vs_gpu():
niter = 3
for batch_size in [1, 3]:
for do_mask in [False, True]:
for dtype in [types.UINT8, types.FLOAT]:
yield check_water_cpu_vs_gpu, batch_size, niter, dtype, do_mask
def check_water_vs_cv(device, batch_size, niter, dtype, prime_size):
phase_y=0.5
phase_x=0.2
freq_x=0.06
freq_y=0.08
ampl_x=2.0
ampl_y=3.0
python_func = lambda img: python_water(img, phase_y, phase_x, freq_x, freq_y, ampl_x, ampl_y)
compare_pipelines(WaterPipeline(device, batch_size, ampl_x=ampl_x, ampl_y=ampl_y,
phase_x=phase_x, phase_y=phase_y, freq_x=freq_x, freq_y=freq_y,
dtype=dtype, prime_size=prime_size),
WaterPythonPipeline(batch_size, python_func, dtype=dtype,
prime_size=prime_size),
batch_size=batch_size, N_iterations=niter, eps=8)
def test_water_vs_cv():
niter = 3
for device in ['cpu', 'gpu']:
for batch_size in [1, 3]:
for dtype in [types.UINT8, types.FLOAT]:
for prime_size in [False, True]:
yield check_water_vs_cv, device, batch_size, niter, dtype, prime_size
| [
"nvidia.dali.ops.readers.Caffe",
"nvidia.dali.fn.cast",
"nvidia.dali.ops.decoders.Image",
"nvidia.dali.fn.resize",
"nvidia.dali.fn.random.coin_flip",
"nvidia.dali.ops.Water",
"numpy.zeros",
"cv2.remap",
"numpy.sin",
"numpy.arange",
"numpy.cos",
"nvidia.dali.ops.PythonFunction",
"test_utils.g... | [((943, 964), 'test_utils.get_dali_extra_path', 'get_dali_extra_path', ([], {}), '()\n', (962, 964), False, 'from test_utils import get_dali_extra_path\n'), ((983, 1025), 'os.path.join', 'os.path.join', (['test_data_root', '"""db"""', '"""lmdb"""'], {}), "(test_data_root, 'db', 'lmdb')\n", (995, 1025), False, 'import os\n'), ((2498, 2528), 'numpy.zeros', 'np.zeros', (['(nh, nw)', 'np.float32'], {}), '((nh, nw), np.float32)\n', (2506, 2528), True, 'import numpy as np\n'), ((2537, 2567), 'numpy.zeros', 'np.zeros', (['(nh, nw)', 'np.float32'], {}), '((nh, nw), np.float32)\n', (2545, 2567), True, 'import numpy as np\n'), ((2578, 2609), 'numpy.arange', 'np.arange', (['(0)', 'nw', '(1)', 'np.float32'], {}), '(0, nw, 1, np.float32)\n', (2587, 2609), True, 'import numpy as np\n'), ((2622, 2653), 'numpy.arange', 'np.arange', (['(0)', 'nh', '(1)', 'np.float32'], {}), '(0, nh, 1, np.float32)\n', (2631, 2653), True, 'import numpy as np\n'), ((2901, 2947), 'cv2.remap', 'cv2.remap', (['img', 'img_x', 'img_y', 'cv2.INTER_LINEAR'], {}), '(img, img_x, img_y, cv2.INTER_LINEAR)\n', (2910, 2947), False, 'import cv2\n'), ((1503, 1588), 'nvidia.dali.ops.readers.Caffe', 'ops.readers.Caffe', ([], {'path': 'caffe_db_folder', 'shard_id': 'device_id', 'num_shards': 'num_gpus'}), '(path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus\n )\n', (1520, 1588), True, 'import nvidia.dali.ops as ops\n'), ((1612, 1667), 'nvidia.dali.ops.decoders.Image', 'ops.decoders.Image', ([], {'device': '"""cpu"""', 'output_type': 'types.RGB'}), "(device='cpu', output_type=types.RGB)\n", (1630, 1667), True, 'import nvidia.dali.ops as ops\n'), ((1693, 1863), 'nvidia.dali.ops.Water', 'ops.Water', ([], {'device': 'self.device', 'ampl_x': 'ampl_x', 'ampl_y': 'ampl_y', 'phase_x': 'phase_x', 'phase_y': 'phase_y', 'freq_x': 'freq_x', 'freq_y': 'freq_y', 'interp_type': 'dali.types.INTERP_LINEAR'}), '(device=self.device, ampl_x=ampl_x, ampl_y=ampl_y, phase_x=phase_x,\n phase_y=phase_y, freq_x=freq_x, freq_y=freq_y, interp_type=dali.types.\n INTERP_LINEAR)\n', (1702, 1863), True, 'import nvidia.dali.ops as ops\n'), ((2287, 2320), 'nvidia.dali.fn.cast', 'fn.cast', (['images'], {'dtype': 'self.dtype'}), '(images, dtype=self.dtype)\n', (2294, 2320), True, 'import nvidia.dali.fn as fn\n'), ((2676, 2708), 'numpy.cos', 'np.cos', (['(freq_y * x_idx + phase_y)'], {}), '(freq_y * x_idx + phase_y)\n', (2682, 2708), True, 'import numpy as np\n'), ((2731, 2763), 'numpy.sin', 'np.sin', (['(freq_x * y_idx + phase_x)'], {}), '(freq_x * y_idx + phase_x)\n', (2737, 2763), True, 'import numpy as np\n'), ((3510, 3595), 'nvidia.dali.ops.readers.Caffe', 'ops.readers.Caffe', ([], {'path': 'caffe_db_folder', 'shard_id': 'device_id', 'num_shards': 'num_gpus'}), '(path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus\n )\n', (3527, 3595), True, 'import nvidia.dali.ops as ops\n'), ((3619, 3674), 'nvidia.dali.ops.decoders.Image', 'ops.decoders.Image', ([], {'device': '"""cpu"""', 'output_type': 'types.RGB'}), "(device='cpu', output_type=types.RGB)\n", (3637, 3674), True, 'import nvidia.dali.ops as ops\n'), ((3701, 3760), 'nvidia.dali.ops.PythonFunction', 'ops.PythonFunction', ([], {'function': 'function', 'output_layouts': '"""HWC"""'}), "(function=function, output_layouts='HWC')\n", (3719, 3760), True, 'import nvidia.dali.ops as ops\n'), ((3991, 4024), 'nvidia.dali.fn.cast', 'fn.cast', (['images'], {'dtype': 'self.dtype'}), '(images, dtype=self.dtype)\n', (3998, 4024), True, 'import nvidia.dali.fn as fn\n'), ((2155, 2199), 'nvidia.dali.fn.resize', 'fn.resize', (['images'], {'resize_x': '(101)', 'resize_y': '(43)'}), '(images, resize_x=101, resize_y=43)\n', (2164, 2199), True, 'import nvidia.dali.fn as fn\n'), ((2215, 2243), 'nvidia.dali.fn.random.coin_flip', 'fn.random.coin_flip', ([], {'seed': '(42)'}), '(seed=42)\n', (2234, 2243), True, 'import nvidia.dali.fn as fn\n'), ((3929, 3973), 'nvidia.dali.fn.resize', 'fn.resize', (['images'], {'resize_x': '(101)', 'resize_y': '(43)'}), '(images, resize_x=101, resize_y=43)\n', (3938, 3973), True, 'import nvidia.dali.fn as fn\n')] |
import os
import numpy as np
import warnings
from keras.callbacks import Callback
from keras import backend as K
# Code is ported from https://github.com/fastai/fastai
class OneCycleLR(Callback):
def __init__(self,
num_samples,
batch_size,
max_lr,
end_percentage=0.1,
scale_percentage=None,
maximum_momentum=0.95,
minimum_momentum=0.85,
verbose=True):
""" This callback implements a cyclical learning rate policy (CLR).
This is a special case of Cyclic Learning Rates, where we have only 1 cycle.
After the completion of 1 cycle, the learning rate will decrease rapidly to
100th its initial lowest value.
# Arguments:
num_samples: Integer. Number of samples in the dataset.
batch_size: Integer. Batch size during training.
max_lr: Float. Initial learning rate. This also sets the
starting learning rate (which will be 10x smaller than
this), and will increase to this value during the first cycle.
end_percentage: Float. The percentage of all the epochs of training
that will be dedicated to sharply decreasing the learning
rate after the completion of 1 cycle. Must be between 0 and 1.
scale_percentage: Float or None. If float, must be between 0 and 1.
If None, it will compute the scale_percentage automatically
based on the `end_percentage`.
maximum_momentum: Optional. Sets the maximum momentum (initial)
value, which gradually drops to its lowest value in half-cycle,
then gradually increases again to stay constant at this max value.
Can only be used with SGD Optimizer.
minimum_momentum: Optional. Sets the minimum momentum at the end of
the half-cycle. Can only be used with SGD Optimizer.
verbose: Bool. Whether to print the current learning rate after every
epoch.
# Reference
- [A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, weight_decay, and weight decay](https://arxiv.org/abs/1803.09820)
- [Super-Convergence: Very Fast Training of Residual Networks Using Large Learning Rates](https://arxiv.org/abs/1708.07120)
"""
super(OneCycleLR, self).__init__()
if end_percentage < 0. or end_percentage > 1.:
raise ValueError("`end_percentage` must be between 0 and 1")
if scale_percentage is not None and (scale_percentage < 0. or scale_percentage > 1.):
raise ValueError("`scale_percentage` must be between 0 and 1")
self.initial_lr = max_lr
self.end_percentage = end_percentage
self.scale = float(scale_percentage) if scale_percentage is not None else float(end_percentage)
self.max_momentum = maximum_momentum
self.min_momentum = minimum_momentum
self.verbose = verbose
if self.max_momentum is not None and self.min_momentum is not None:
self._update_momentum = True
else:
self._update_momentum = False
self.clr_iterations = 0.
self.history = {}
self.epochs = None
self.batch_size = batch_size
self.samples = num_samples
self.steps = None
self.num_iterations = None
self.mid_cycle_id = None
def _reset(self):
"""
Reset the callback.
"""
self.clr_iterations = 0.
self.history = {}
def compute_lr(self):
"""
Compute the learning rate based on which phase of the cycle it is in.
- If in the first half of training, the learning rate gradually increases.
- If in the second half of training, the learning rate gradually decreases.
- If in the final `end_percentage` portion of training, the learning rate
is quickly reduced to near 100th of the original min learning rate.
# Returns:
the new learning rate
"""
if self.clr_iterations > 2 * self.mid_cycle_id:
current_percentage = (self.clr_iterations - 2 * self.mid_cycle_id)
current_percentage /= float((self.num_iterations - 2 * self.mid_cycle_id))
new_lr = self.initial_lr * (1. + (current_percentage *
(1. - 100.) / 100.)) * self.scale
elif self.clr_iterations > self.mid_cycle_id:
current_percentage = 1. - (
self.clr_iterations - self.mid_cycle_id) / self.mid_cycle_id
new_lr = self.initial_lr * (1. + current_percentage *
(self.scale * 100 - 1.)) * self.scale
else:
current_percentage = self.clr_iterations / self.mid_cycle_id
new_lr = self.initial_lr * (1. + current_percentage *
(self.scale * 100 - 1.)) * self.scale
if self.clr_iterations == self.num_iterations:
self.clr_iterations = 0
return new_lr
def compute_momentum(self):
"""
Compute the momentum based on which phase of the cycle it is in.
- If in the first half of training, the momentum gradually decreases.
- If in the second half of training, the momentum gradually increases.
- If in the final `end_percentage` portion of training, the momentum value
is kept constant at the maximum initial value.
# Returns:
the new momentum value
"""
if self.clr_iterations > 2 * self.mid_cycle_id:
new_momentum = self.max_momentum
elif self.clr_iterations > self.mid_cycle_id:
current_percentage = 1. - ((self.clr_iterations - self.mid_cycle_id) / float(
self.mid_cycle_id))
new_momentum = self.max_momentum - current_percentage * (
self.max_momentum - self.min_momentum)
else:
current_percentage = self.clr_iterations / float(self.mid_cycle_id)
new_momentum = self.max_momentum - current_percentage * (
self.max_momentum - self.min_momentum)
return new_momentum
def on_train_begin(self, logs={}):
logs = logs or {}
self.epochs = self.params['epochs']
# When fit generator is used
# self.params don't have the elements 'batch_size' and 'samples'
# self.batch_size = self.params['batch_size']
# self.samples = self.params['samples']
self.steps = self.params['steps']
if self.steps is not None:
self.num_iterations = self.epochs * self.steps
else:
if (self.samples % self.batch_size) == 0:
remainder = 0
else:
remainder = 1
self.num_iterations = (self.epochs + remainder) * self.samples // self.batch_size
self.mid_cycle_id = int(self.num_iterations * ((1. - self.end_percentage)) / float(2))
self._reset()
K.set_value(self.model.optimizer.lr, self.compute_lr())
if self._update_momentum:
if not hasattr(self.model.optimizer, 'momentum'):
raise ValueError("Momentum can be updated only on SGD optimizer !")
new_momentum = self.compute_momentum()
K.set_value(self.model.optimizer.momentum, new_momentum)
def on_batch_end(self, epoch, logs=None):
logs = logs or {}
self.clr_iterations += 1
new_lr = self.compute_lr()
self.history.setdefault('lr', []).append(
K.get_value(self.model.optimizer.lr))
K.set_value(self.model.optimizer.lr, new_lr)
if self._update_momentum:
if not hasattr(self.model.optimizer, 'momentum'):
raise ValueError("Momentum can be updated only on SGD optimizer !")
new_momentum = self.compute_momentum()
self.history.setdefault('momentum', []).append(
K.get_value(self.model.optimizer.momentum))
K.set_value(self.model.optimizer.momentum, new_momentum)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
def on_epoch_end(self, epoch, logs=None):
if self.verbose:
if self._update_momentum:
print(" - lr: %0.5f - momentum: %0.2f " %
(self.history['lr'][-1], self.history['momentum'][-1]))
else:
print(" - lr: %0.5f " % (self.history['lr'][-1]))
class LRFinder(Callback):
def __init__(self,
num_samples,
batch_size,
minimum_lr=1e-5,
maximum_lr=10.,
lr_scale='exp',
validation_data=None,
validation_sample_rate=5,
stopping_criterion_factor=4.,
loss_smoothing_beta=0.98,
save_dir=None,
verbose=True):
"""
This class uses the Cyclic Learning Rate history to find a
set of learning rates that can be good initializations for the
One-Cycle training proposed by <NAME> in the paper referenced
below.
A port of the Fast.ai implementation for Keras.
# Note
This requires that the model be trained for exactly 1 epoch. If the model
is trained for more epochs, then the metric calculations are only done for
the first epoch.
# Interpretation
Upon visualizing the loss plot, check where the loss starts to increase
rapidly. Choose a learning rate at somewhat prior to the corresponding
position in the plot for faster convergence. This will be the maximum_lr lr.
Choose the max value as this value when passing the `max_val` argument
to OneCycleLR callback.
Since the plot is in log-scale, you need to compute 10 ^ (-k) of the x-axis
# Arguments:
num_samples: Integer. Number of samples in the dataset.
batch_size: Integer. Batch size during training.
minimum_lr: Float. Initial learning rate (and the minimum).
maximum_lr: Float. Final learning rate (and the maximum).
lr_scale: Can be one of ['exp', 'linear']. Chooses the type of
scaling for each update to the learning rate during subsequent
batches. Choose 'exp' for large range and 'linear' for small range.
validation_data: Requires the validation dataset as a tuple of
(X, y) belonging to the validation set. If provided, will use the
validation set to compute the loss metrics. Else uses the training
batch loss. Will warn if not provided to alert the user.
validation_sample_rate: Positive or Negative Integer. Number of batches to sample from the
validation set per iteration of the LRFinder. Larger number of
samples will reduce the variance but will take longer time to execute
per batch.
If Positive > 0, will sample from the validation dataset
If Megative, will use the entire dataset
stopping_criterion_factor: Integer or None. A factor which is used
to measure large increase in the loss value during training.
Since callbacks cannot stop training of a model, it will simply
stop logging the additional values from the epochs after this
stopping criterion has been met.
If None, this check will not be performed.
loss_smoothing_beta: Float. The smoothing factor for the moving
average of the loss function.
save_dir: Optional, String. If passed a directory path, the callback
will save the running loss and learning rates to two separate numpy
arrays inside this directory. If the directory in this path does not
exist, they will be created.
verbose: Whether to print the learning rate after every batch of training.
# References:
- [A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, weight_decay, and weight decay](https://arxiv.org/abs/1803.09820)
"""
super(LRFinder, self).__init__()
if lr_scale not in ['exp', 'linear']:
raise ValueError("`lr_scale` must be one of ['exp', 'linear']")
if validation_data is not None:
self.validation_data = validation_data
self.use_validation_set = True
if validation_sample_rate > 0 or validation_sample_rate < 0:
self.validation_sample_rate = validation_sample_rate
else:
raise ValueError("`validation_sample_rate` must be a positive or negative integer other than o")
else:
self.use_validation_set = False
self.validation_sample_rate = 0
self.num_samples = num_samples
self.batch_size = batch_size
self.initial_lr = minimum_lr
self.final_lr = maximum_lr
self.lr_scale = lr_scale
self.stopping_criterion_factor = stopping_criterion_factor
self.loss_smoothing_beta = loss_smoothing_beta
self.save_dir = save_dir
self.verbose = verbose
self.num_batches_ = num_samples // batch_size
self.current_lr_ = minimum_lr
if lr_scale == 'exp':
self.lr_multiplier_ = (maximum_lr / float(minimum_lr)) ** (
1. / float(self.num_batches_))
else:
extra_batch = int((num_samples % batch_size) != 0)
self.lr_multiplier_ = np.linspace(
minimum_lr, maximum_lr, num=self.num_batches_ + extra_batch)
# If negative, use entire validation set
if self.validation_sample_rate < 0:
self.validation_sample_rate = self.validation_data[0].shape[0] // batch_size
self.current_batch_ = 0
self.current_epoch_ = 0
self.best_loss_ = 1e6
self.running_loss_ = 0.
self.history = {}
def on_train_begin(self, logs=None):
self.current_epoch_ = 1
K.set_value(self.model.optimizer.lr, self.initial_lr)
warnings.simplefilter("ignore")
def on_epoch_begin(self, epoch, logs=None):
self.current_batch_ = 0
if self.current_epoch_ > 1:
warnings.warn(
"\n\nLearning rate finder should be used only with a single epoch. "
"Hereafter, the callback will not measure the losses.\n\n")
def on_batch_begin(self, batch, logs=None):
self.current_batch_ += 1
def on_batch_end(self, batch, logs=None):
if self.current_epoch_ > 1:
return
if self.use_validation_set:
X, Y = self.validation_data[0], self.validation_data[1]
# use 5 random batches from test set for fast approximate of loss
num_samples = self.batch_size * self.validation_sample_rate
if num_samples > X.shape[0]:
num_samples = X.shape[0]
idx = np.random.choice(X.shape[0], num_samples, replace=False)
x = X[idx]
y = Y[idx]
values = self.model.evaluate(x, y, batch_size=self.batch_size, verbose=False)
loss = values[0]
else:
loss = logs['loss']
# smooth the loss value and bias correct
running_loss = self.loss_smoothing_beta * loss + (
1. - self.loss_smoothing_beta) * loss
running_loss = running_loss / (
1. - self.loss_smoothing_beta**self.current_batch_)
# stop logging if loss is too large
if self.current_batch_ > 1 and self.stopping_criterion_factor is not None and (
running_loss >
self.stopping_criterion_factor * self.best_loss_):
if self.verbose:
print(" - LRFinder: Skipping iteration since loss is %d times as large as best loss (%0.4f)"
% (self.stopping_criterion_factor, self.best_loss_))
return
if running_loss < self.best_loss_ or self.current_batch_ == 1:
self.best_loss_ = running_loss
current_lr = K.get_value(self.model.optimizer.lr)
self.history.setdefault('running_loss_', []).append(running_loss)
if self.lr_scale == 'exp':
self.history.setdefault('log_lrs', []).append(np.log10(current_lr))
else:
self.history.setdefault('log_lrs', []).append(current_lr)
# compute the lr for the next batch and update the optimizer lr
if self.lr_scale == 'exp':
current_lr *= self.lr_multiplier_
else:
current_lr = self.lr_multiplier_[self.current_batch_ - 1]
K.set_value(self.model.optimizer.lr, current_lr)
# save the other metrics as well
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
if self.verbose:
if self.use_validation_set:
print(" - LRFinder: val_loss: %1.4f - lr = %1.8f " %
(values[0], current_lr))
else:
print(" - LRFinder: lr = %1.8f " % current_lr)
def on_epoch_end(self, epoch, logs=None):
if self.save_dir is not None and self.current_epoch_ <= 1:
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
losses_path = os.path.join(self.save_dir, 'losses.npy')
lrs_path = os.path.join(self.save_dir, 'lrs.npy')
np.save(losses_path, self.losses)
np.save(lrs_path, self.lrs)
if self.verbose:
print("\tLR Finder : Saved the losses and learning rate values in path : {%s}"
% (self.save_dir))
self.current_epoch_ += 1
warnings.simplefilter("default")
def plot_schedule(self, clip_beginning=None, clip_endding=None):
"""
Plots the schedule from the callback itself.
# Arguments:
clip_beginning: Integer or None. If positive integer, it will
remove the specified portion of the loss graph to remove the large
loss values in the beginning of the graph.
clip_endding: Integer or None. If negative integer, it will
remove the specified portion of the ending of the loss graph to
remove the sharp increase in the loss values at high learning rates.
"""
try:
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
except ImportError:
print(
"Matplotlib not found. Please use `pip install matplotlib` first."
)
return
if clip_beginning is not None and clip_beginning < 0:
clip_beginning = -clip_beginning
if clip_endding is not None and clip_endding > 0:
clip_endding = -clip_endding
losses = self.losses
lrs = self.lrs
if clip_beginning:
losses = losses[clip_beginning:]
lrs = lrs[clip_beginning:]
if clip_endding:
losses = losses[:clip_endding]
lrs = lrs[:clip_endding]
plt.plot(lrs, losses)
plt.title('Learning rate vs Loss')
plt.xlabel('learning rate')
plt.ylabel('loss')
plt.show()
@classmethod
def restore_schedule_from_dir(cls,
directory,
clip_beginning=None,
clip_endding=None):
"""
Loads the training history from the saved numpy files in the given directory.
# Arguments:
directory: String. Path to the directory where the serialized numpy
arrays of the loss and learning rates are saved.
clip_beginning: Integer or None. If positive integer, it will
remove the specified portion of the loss graph to remove the large
loss values in the beginning of the graph.
clip_endding: Integer or None. If negative integer, it will
remove the specified portion of the ending of the loss graph to
remove the sharp increase in the loss values at high learning rates.
Returns:
tuple of (losses, learning rates)
"""
if clip_beginning is not None and clip_beginning < 0:
clip_beginning = -clip_beginning
if clip_endding is not None and clip_endding > 0:
clip_endding = -clip_endding
losses_path = os.path.join(directory, 'losses.npy')
lrs_path = os.path.join(directory, 'lrs.npy')
if not os.path.exists(losses_path) or not os.path.exists(lrs_path):
print("%s and %s could not be found at directory : {%s}" %
(losses_path, lrs_path, directory))
losses = None
lrs = None
else:
losses = np.load(losses_path)
lrs = np.load(lrs_path)
if clip_beginning:
losses = losses[clip_beginning:]
lrs = lrs[clip_beginning:]
if clip_endding:
losses = losses[:clip_endding]
lrs = lrs[:clip_endding]
return losses, lrs
@classmethod
def plot_schedule_from_file(cls,
directory,
clip_beginning=None,
clip_endding=None):
"""
Plots the schedule from the saved numpy arrays of the loss and learning
rate values in the specified directory.
# Arguments:
directory: String. Path to the directory where the serialized numpy
arrays of the loss and learning rates are saved.
clip_beginning: Integer or None. If positive integer, it will
remove the specified portion of the loss graph to remove the large
loss values in the beginning of the graph.
clip_endding: Integer or None. If negative integer, it will
remove the specified portion of the ending of the loss graph to
remove the sharp increase in the loss values at high learning rates.
"""
try:
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
except ImportError:
print("Matplotlib not found. Please use `pip install matplotlib` first.")
return
losses, lrs = cls.restore_schedule_from_dir(
directory,
clip_beginning=clip_beginning,
clip_endding=clip_endding)
if losses is None or lrs is None:
return
else:
plt.plot(lrs, losses)
plt.title('Learning rate vs Loss')
plt.xlabel('learning rate')
plt.ylabel('loss')
plt.show()
@property
def lrs(self):
return np.array(self.history['log_lrs'])
@property
def losses(self):
return np.array(self.history['running_loss_'])
| [
"matplotlib.pyplot.title",
"numpy.load",
"keras.backend.set_value",
"matplotlib.pyplot.style.use",
"os.path.join",
"warnings.simplefilter",
"os.path.exists",
"numpy.linspace",
"numpy.random.choice",
"numpy.log10",
"numpy.save",
"matplotlib.pyplot.show",
"keras.backend.get_value",
"matplotl... | [((7833, 7877), 'keras.backend.set_value', 'K.set_value', (['self.model.optimizer.lr', 'new_lr'], {}), '(self.model.optimizer.lr, new_lr)\n', (7844, 7877), True, 'from keras import backend as K\n'), ((14445, 14498), 'keras.backend.set_value', 'K.set_value', (['self.model.optimizer.lr', 'self.initial_lr'], {}), '(self.model.optimizer.lr, self.initial_lr)\n', (14456, 14498), True, 'from keras import backend as K\n'), ((14508, 14539), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (14529, 14539), False, 'import warnings\n'), ((16521, 16557), 'keras.backend.get_value', 'K.get_value', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (16532, 16557), True, 'from keras import backend as K\n'), ((17079, 17127), 'keras.backend.set_value', 'K.set_value', (['self.model.optimizer.lr', 'current_lr'], {}), '(self.model.optimizer.lr, current_lr)\n', (17090, 17127), True, 'from keras import backend as K\n'), ((18154, 18186), 'warnings.simplefilter', 'warnings.simplefilter', (['"""default"""'], {}), "('default')\n", (18175, 18186), False, 'import warnings\n'), ((19560, 19581), 'matplotlib.pyplot.plot', 'plt.plot', (['lrs', 'losses'], {}), '(lrs, losses)\n', (19568, 19581), True, 'import matplotlib.pyplot as plt\n'), ((19590, 19624), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning rate vs Loss"""'], {}), "('Learning rate vs Loss')\n", (19599, 19624), True, 'import matplotlib.pyplot as plt\n'), ((19633, 19660), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""learning rate"""'], {}), "('learning rate')\n", (19643, 19660), True, 'import matplotlib.pyplot as plt\n'), ((19669, 19687), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (19679, 19687), True, 'import matplotlib.pyplot as plt\n'), ((19696, 19706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19704, 19706), True, 'import matplotlib.pyplot as plt\n'), ((20942, 20979), 'os.path.join', 'os.path.join', (['directory', '"""losses.npy"""'], {}), "(directory, 'losses.npy')\n", (20954, 20979), False, 'import os\n'), ((20999, 21033), 'os.path.join', 'os.path.join', (['directory', '"""lrs.npy"""'], {}), "(directory, 'lrs.npy')\n", (21011, 21033), False, 'import os\n'), ((23316, 23349), 'numpy.array', 'np.array', (["self.history['log_lrs']"], {}), "(self.history['log_lrs'])\n", (23324, 23349), True, 'import numpy as np\n'), ((23402, 23441), 'numpy.array', 'np.array', (["self.history['running_loss_']"], {}), "(self.history['running_loss_'])\n", (23410, 23441), True, 'import numpy as np\n'), ((7525, 7581), 'keras.backend.set_value', 'K.set_value', (['self.model.optimizer.momentum', 'new_momentum'], {}), '(self.model.optimizer.momentum, new_momentum)\n', (7536, 7581), True, 'from keras import backend as K\n'), ((7787, 7823), 'keras.backend.get_value', 'K.get_value', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (7798, 7823), True, 'from keras import backend as K\n'), ((8244, 8300), 'keras.backend.set_value', 'K.set_value', (['self.model.optimizer.momentum', 'new_momentum'], {}), '(self.model.optimizer.momentum, new_momentum)\n', (8255, 8300), True, 'from keras import backend as K\n'), ((13935, 14007), 'numpy.linspace', 'np.linspace', (['minimum_lr', 'maximum_lr'], {'num': '(self.num_batches_ + extra_batch)'}), '(minimum_lr, maximum_lr, num=self.num_batches_ + extra_batch)\n', (13946, 14007), True, 'import numpy as np\n'), ((14670, 14819), 'warnings.warn', 'warnings.warn', (['"""\n\nLearning rate finder should be used only with a single epoch. Hereafter, the callback will not measure the losses.\n\n"""'], {}), '(\n """\n\nLearning rate finder should be used only with a single epoch. Hereafter, the callback will not measure the losses.\n\n"""\n )\n', (14683, 14819), False, 'import warnings\n'), ((15388, 15444), 'numpy.random.choice', 'np.random.choice', (['X.shape[0]', 'num_samples'], {'replace': '(False)'}), '(X.shape[0], num_samples, replace=False)\n', (15404, 15444), True, 'import numpy as np\n'), ((17754, 17795), 'os.path.join', 'os.path.join', (['self.save_dir', '"""losses.npy"""'], {}), "(self.save_dir, 'losses.npy')\n", (17766, 17795), False, 'import os\n'), ((17819, 17857), 'os.path.join', 'os.path.join', (['self.save_dir', '"""lrs.npy"""'], {}), "(self.save_dir, 'lrs.npy')\n", (17831, 17857), False, 'import os\n'), ((17871, 17904), 'numpy.save', 'np.save', (['losses_path', 'self.losses'], {}), '(losses_path, self.losses)\n', (17878, 17904), True, 'import numpy as np\n'), ((17917, 17944), 'numpy.save', 'np.save', (['lrs_path', 'self.lrs'], {}), '(lrs_path, self.lrs)\n', (17924, 17944), True, 'import numpy as np\n'), ((18878, 18908), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-white"""'], {}), "('seaborn-white')\n", (18891, 18908), True, 'import matplotlib.pyplot as plt\n'), ((21322, 21342), 'numpy.load', 'np.load', (['losses_path'], {}), '(losses_path)\n', (21329, 21342), True, 'import numpy as np\n'), ((21361, 21378), 'numpy.load', 'np.load', (['lrs_path'], {}), '(lrs_path)\n', (21368, 21378), True, 'import numpy as np\n'), ((22693, 22723), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-white"""'], {}), "('seaborn-white')\n", (22706, 22723), True, 'import matplotlib.pyplot as plt\n'), ((23104, 23125), 'matplotlib.pyplot.plot', 'plt.plot', (['lrs', 'losses'], {}), '(lrs, losses)\n', (23112, 23125), True, 'import matplotlib.pyplot as plt\n'), ((23138, 23172), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning rate vs Loss"""'], {}), "('Learning rate vs Loss')\n", (23147, 23172), True, 'import matplotlib.pyplot as plt\n'), ((23185, 23212), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""learning rate"""'], {}), "('learning rate')\n", (23195, 23212), True, 'import matplotlib.pyplot as plt\n'), ((23225, 23243), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (23235, 23243), True, 'import matplotlib.pyplot as plt\n'), ((23256, 23266), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23264, 23266), True, 'import matplotlib.pyplot as plt\n'), ((8188, 8230), 'keras.backend.get_value', 'K.get_value', (['self.model.optimizer.momentum'], {}), '(self.model.optimizer.momentum)\n', (8199, 8230), True, 'from keras import backend as K\n'), ((16726, 16746), 'numpy.log10', 'np.log10', (['current_lr'], {}), '(current_lr)\n', (16734, 16746), True, 'import numpy as np\n'), ((17653, 17682), 'os.path.exists', 'os.path.exists', (['self.save_dir'], {}), '(self.save_dir)\n', (17667, 17682), False, 'import os\n'), ((17700, 17726), 'os.makedirs', 'os.makedirs', (['self.save_dir'], {}), '(self.save_dir)\n', (17711, 17726), False, 'import os\n'), ((21050, 21077), 'os.path.exists', 'os.path.exists', (['losses_path'], {}), '(losses_path)\n', (21064, 21077), False, 'import os\n'), ((21085, 21109), 'os.path.exists', 'os.path.exists', (['lrs_path'], {}), '(lrs_path)\n', (21099, 21109), False, 'import os\n')] |
import numpy as np
class RunningStat(object):
def __init__(self, shape):
"""
calulates the running mean and std of a data stream
http://www.johndcook.com/blog/standard_deviation/
:param shape: (tuple) the shape of the data stream's output
"""
self._step = 0
self._mean = np.zeros(shape)
self._std = np.zeros(shape)
def push(self, value):
"""
update the running mean and std
:param value: (numpy Number) the data
"""
value = np.asarray(value)
assert value.shape == self._mean.shape
self._step += 1
if self._step == 1:
self._mean[...] = value
else:
old_m = self._mean.copy()
self._mean[...] = old_m + (value - old_m) / self._step
self._std[...] = self._std + (value - old_m) * (value - self._mean)
@property
def n(self):
"""
the number of data points
:return: (int)
"""
return self._step
@property
def mean(self):
"""
the average value
:return: (float)
"""
return self._mean
@property
def var(self):
"""
the variation of the data points
:return: (float)
"""
return self._std / (self._step - 1) if self._step > 1 else np.square(self._mean)
@property
def std(self):
"""
the standard deviation of the data points
:return: (float)
"""
return np.sqrt(self.var)
@property
def shape(self):
"""
the shape of the data points
:return: (tuple)
"""
return self._mean.shape
| [
"numpy.asarray",
"numpy.square",
"numpy.zeros",
"numpy.sqrt"
] | [((334, 349), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (342, 349), True, 'import numpy as np\n'), ((370, 385), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (378, 385), True, 'import numpy as np\n'), ((541, 558), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (551, 558), True, 'import numpy as np\n'), ((1533, 1550), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (1540, 1550), True, 'import numpy as np\n'), ((1362, 1383), 'numpy.square', 'np.square', (['self._mean'], {}), '(self._mean)\n', (1371, 1383), True, 'import numpy as np\n')] |
#!/usr/bin/python
'''
numpy2geotiff.py
<NAME>
----------------
Input: - ASCII file where spaces, line separate numeric 2D array values.
Such as that generated by numpy.savetxt() with default formatting.
- Geotiff raster with target extent, coordinate system.
- File to save output raster
Output: - Geotiff raster of same extent, coordinate system as input.
Raster values are just a copy of the input ASCII file.
Todo: Multiple input ASCII files, each a band of the output raster.
'''
from optparse import OptionParser
import numpy as np
from osgeo import gdal
def main():
# Options
parser = OptionParser()
parser.add_option("-g", "--geotiff_in", default = None,
help = "Raster (geotiff) of region [required].")
parser.add_option("-n", "--numpy", default = None,
help = "ASCII numeric 2D array file [required].")
parser.add_option("-o", "--geotiff_out", default = None,
help = "Output raster (geotiff) [required].")
(options, args) = parser.parse_args()
# Verify presence of required options
if options.geotiff_in is None \
or options.numpy is None \
or options.geotiff_out is None:
parser.print_help()
exit(1)
# Load input raster
in_raster = gdal.Open(options.geotiff_in)
in_grid = in_raster.GetRasterBand(1).ReadAsArray()
# Load input ASCII
in_ascii = np.loadtxt(options.numpy)
# Verify same dimensions
if in_grid.shape[0] != in_grid.shape[0] or \
in_grid.shape[1] != in_grid.shape[1]:
print("Input geotiff (-g) and input numpy (-n) must have same row, column lengths")
exit(1)
# Create output raster
out_raster = gdal.GetDriverByName('GTiff').Create(
options.geotiff_out, in_grid.shape[1], in_grid.shape[0],
1, gdal.GDT_Float32)
out_raster.SetGeoTransform(in_raster.GetGeoTransform())
out_raster.SetProjection(in_raster.GetProjection())
out_band = out_raster.GetRasterBand(1)
# Write ASCII grid to output raster
out_band.WriteArray(in_ascii)
# Write output raster to disk
out_raster.FlushCache()
if __name__ == '__main__':
main()
| [
"osgeo.gdal.Open",
"osgeo.gdal.GetDriverByName",
"numpy.loadtxt",
"optparse.OptionParser"
] | [((640, 654), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (652, 654), False, 'from optparse import OptionParser\n'), ((1306, 1335), 'osgeo.gdal.Open', 'gdal.Open', (['options.geotiff_in'], {}), '(options.geotiff_in)\n', (1315, 1335), False, 'from osgeo import gdal\n'), ((1432, 1457), 'numpy.loadtxt', 'np.loadtxt', (['options.numpy'], {}), '(options.numpy)\n', (1442, 1457), True, 'import numpy as np\n'), ((1733, 1762), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (1753, 1762), False, 'from osgeo import gdal\n')] |
from .CoderBase import CoderBase
import numpy as np
class TBR(CoderBase):
def __init__(self, f_factor):
self.f_factor = f_factor
self.threshold = None
self.start_point = None
self.previous_signal = None
self.enc_N = 0
self.M = 0
self.V = 0
self.dec_isFirst = True
def encode(self, signal, start_point=None, threshold=None):
if start_point is not None:
self.start_point = start_point
elif self.start_point is None:
self.start_point = signal
if self.previous_signal is None:
self.previous_signal = signal
diff = signal - self.previous_signal
self.previous_signal = signal
self.enc_N += 1
N = self.enc_N
self.V = self.V*(N-1)/N + ((self.M-diff)**2)*(N-1)/(N**2)
self.M = self.M*(N-1)/N + diff/N
if threshold is not None:
self.threshold = threshold
else:
self.threshold = self.M + self.f_factor*np.sqrt(self.V)
spikes = 0
if diff > self.threshold:
spikes = 1
elif diff < -self.threshold:
spikes = -1
return spikes
def decode(self, spikes, start_point=None, threshold=None):
if start_point is not None:
self.start_point = start_point
elif self.start_point is None:
raise ValueError('start_point is not set')
if threshold is not None:
self.threshold = threshold
elif self.threshold is None:
raise ValueError('threshold is not set')
if self.dec_isFirst:
self.dec_isFirst = False
signal = self.start_point
else:
signal = self.previous_signal + np.sign(spikes)*self.threshold
self.previous_signal = signal
return signal
class SF(CoderBase):
def __init__(self, threshold):
self.threshold = threshold
self.start_point = None
self.base = None
self.dec_isFirst = True
def encode(self, signal, start_point=None, base=None):
if start_point is not None:
self.start_point = start_point
elif self.start_point is None:
self.start_point = signal
if base is not None:
self.base = base
elif self.base is None:
self.base = self.start_point
spikes = 0
if signal > self.base + self.threshold:
spikes = 1
elif signal < self.base - self.threshold:
spikes = -1
self.base += spikes*self.threshold
return spikes
def decode(self, spikes, start_point=None, threshold=None):
if start_point is not None:
self.start_point = start_point
elif self.start_point is None:
raise ValueError('start_point is not set')
if threshold is not None:
self.threshold = threshold
elif self.threshold is None:
raise ValueError('threshold is not set')
if self.dec_isFirst:
self.dec_isFirst = False
signal = self.start_point
else:
signal = self.previous_signal + np.sign(spikes)*self.threshold
self.previous_signal = signal
return signal
class MW(CoderBase):
def __init__(self, threshold, window):
self.threshold = threshold
self.window = window
self.signals = np.zeros(self.window)
self.start_point = None
self.enc_N = 0
self.dec_isFirst = True
def encode(self, signal, start_point=None):
if start_point is not None:
self.start_point = start_point
elif self.start_point is None:
self.start_point = signal
if self.enc_N==0:
base = signal
else:
if self.enc_N//self.window == 0:
base = np.sum(self.signals)/self.enc_N
else:
base = np.mean(self.signals)
self.signals[self.enc_N%self.window] = signal
self.enc_N += 1
spikes = 0
if signal > base + self.threshold:
spikes = 1
elif signal < base - self.threshold:
spikes = -1
return spikes
def decode(self, spikes, start_point=None, threshold=None):
if start_point is not None:
self.start_point = start_point
elif self.start_point is None:
raise ValueError('start_point is not set')
if threshold is not None:
self.threshold = threshold
elif self.threshold is None:
raise ValueError('threshold is not set')
if self.dec_isFirst:
self.dec_isFirst = False
signal = self.start_point
else:
signal = self.previous_signal + np.sign(spikes)*self.threshold
self.previous_signal = signal
return signal
class BSA(CoderBase):
def __init__(self, threshold, fir):
self.threshold = threshold
self.fir = fir
self.shift = None
self.gain = None
self.sig_hist = np.zeros(len(self.fir))
self.spk_hist = np.zeros(len(self.fir))
self.err1 = 0
self.err2 = 0
self.enc_N = 0
def encode(self, signal, shift=None, gain=None):
if shift is not None:
self.shift = shift
elif self.shift is None:
raise ValueError('shift is not set')
if gain is not None:
self.gain = gain
elif self.gain is None:
raise ValueError('gain is not set')
signal = (signal - self.shift)/self.gain
if signal < 0:
signal = 0
self.enc_N += 1
spikes = 0
self.sig_hist = np.roll(self.sig_hist, 1)
self.sig_hist[0] = signal
idx = min(len(self.fir), self.enc_N)
self.err1 = np.sum(np.abs(self.sig_hist - self.fir)[:idx])
self.err2 = np.sum(np.abs(self.sig_hist)[:idx])
if self.err1 <= (self.err2 - self.threshold):
spikes = 1
self.sig_hist -= self.fir
return spikes
def decode(self, spikes, shift=None, gain=None):
if shift is not None:
self.shift = shift
elif self.shift is None:
raise ValueError('shift is not set')
if gain is not None:
self.gain = gain
elif self.gain is None:
raise ValueError('gain is not set')
self.spk_hist = np.roll(self.spk_hist, 1)
self.spk_hist[0] = spikes
signal = np.sum(self.spk_hist*self.fir)*self.gain + self.shift
return signal | [
"numpy.abs",
"numpy.sum",
"numpy.roll",
"numpy.zeros",
"numpy.mean",
"numpy.sign",
"numpy.sqrt"
] | [((3421, 3442), 'numpy.zeros', 'np.zeros', (['self.window'], {}), '(self.window)\n', (3429, 3442), True, 'import numpy as np\n'), ((5718, 5743), 'numpy.roll', 'np.roll', (['self.sig_hist', '(1)'], {}), '(self.sig_hist, 1)\n', (5725, 5743), True, 'import numpy as np\n'), ((6443, 6468), 'numpy.roll', 'np.roll', (['self.spk_hist', '(1)'], {}), '(self.spk_hist, 1)\n', (6450, 6468), True, 'import numpy as np\n'), ((3943, 3964), 'numpy.mean', 'np.mean', (['self.signals'], {}), '(self.signals)\n', (3950, 3964), True, 'import numpy as np\n'), ((5850, 5882), 'numpy.abs', 'np.abs', (['(self.sig_hist - self.fir)'], {}), '(self.sig_hist - self.fir)\n', (5856, 5882), True, 'import numpy as np\n'), ((5917, 5938), 'numpy.abs', 'np.abs', (['self.sig_hist'], {}), '(self.sig_hist)\n', (5923, 5938), True, 'import numpy as np\n'), ((6520, 6552), 'numpy.sum', 'np.sum', (['(self.spk_hist * self.fir)'], {}), '(self.spk_hist * self.fir)\n', (6526, 6552), True, 'import numpy as np\n'), ((1024, 1039), 'numpy.sqrt', 'np.sqrt', (['self.V'], {}), '(self.V)\n', (1031, 1039), True, 'import numpy as np\n'), ((1764, 1779), 'numpy.sign', 'np.sign', (['spikes'], {}), '(spikes)\n', (1771, 1779), True, 'import numpy as np\n'), ((3177, 3192), 'numpy.sign', 'np.sign', (['spikes'], {}), '(spikes)\n', (3184, 3192), True, 'import numpy as np\n'), ((3870, 3890), 'numpy.sum', 'np.sum', (['self.signals'], {}), '(self.signals)\n', (3876, 3890), True, 'import numpy as np\n'), ((4792, 4807), 'numpy.sign', 'np.sign', (['spikes'], {}), '(spikes)\n', (4799, 4807), True, 'import numpy as np\n')] |
import struct
from abc import abstractclassmethod
import numpy as np
import scipy.sparse as sparse
import h5py
def load_sparse_data(fname):
with open(fname, 'rb') as fd:
magic = b''
for b in struct.unpack('4c', fd.read(4)):
magic += b
if magic != b'\x00\x00\xae\xfd':
raise RuntimeError("Filetype does not match expected for sparse data")
# load array size
size = struct.unpack('3I', fd.read(12))
# load nnz
nnz = struct.unpack('L', fd.read(8))[0]
# load indices
index = struct.unpack('{:d}L'.format(nnz), fd.read(nnz*8))
# load values
value = struct.unpack('{:d}d'.format(nnz), fd.read(nnz*8))
return size, index, value
def sparse2dense(size, index, value):
"""convert sparse data (COO) to dense array"""
arr = np.zeros(np.product(size))
for idx, val in zip(index, value):
if idx>= arr.size: break # hack to fix off-by-one error in simulation code that allowed one extra element to be stored
arr[idx] = val
return arr.reshape(size[::-1])
class SparseMatrixBase():
def __init__(self, outfile, *args, drop_thresh=1e-4, **kwargs):
self.outfile = outfile
self.drop_thresh = drop_thresh
self.coo = None
@abstractclassmethod
def add_column(self, vol):
# invalidate coo cache
self.coo = None
pass
@abstractclassmethod
def tocoo(self):
pass
def tocsc(self):
return self.tocoo().tocsc()
def tocsr(self):
return self.tocoo().tocsr()
class SparseMatrixCOO(SparseMatrixBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data = [] # non-zero values
self.rowi = [] # row indices
self.coli = [] # col indices
self.nrows = 0 # y-dimension (M)
self.ncols = 0 # x-dimension (N)
self._buf_limit = (1024.0)**3 * 1.0/3.0 # 1GB total bufsize (split between three arrays)
self._chunksize = (10000,)
self.initialized = False
@classmethod
def fromFile(cls, infile):
self = cls(infile)
self._read_from_file()
return self
def add_column(self, vol):
if not self.initialized:
self._init_h5file()
super().add_column(vol)
if vol is None:
self.ncols += 1
return
# flush data after reaching buf size limit
if len(self.data) >= self._buf_limit:
self._flush_buf_to_file()
# flatten vol to column
nrows = vol.size
if self.nrows and self.nrows != nrows:
raise RuntimeError("number of rows in column doesn't match existing columns")
elif self.nrows == 0:
# set column size for first column addition
self.nrows = nrows
if np.all(vol == 0):
self.ncols += 1
return
data = np.ravel(vol).T
# filter small values
if self.drop_thresh is not None:
tmp = data+np.amin(data)
tmax = np.amax(tmp)
if tmax != 0:
tmp /= tmax
data[tmp<self.drop_thresh] = 0.0
# convert to sparse column
row_indices = data.nonzero()[0].tolist()
self.rowi.extend( row_indices )
self.coli.extend( [self.ncols]*len(row_indices) )
self.data.extend( data[row_indices].tolist() )
self.ncols += 1
def tocoo(self):
if self.coo is not None:
return self.coo
if self.initialized:
# need to write remainder of buffer first
self.finish()
self._read_from_file()
self.coo = sparse.coo_matrix((self.data, (self.rowi, self.coli)), shape=(self.nrows, self.ncols))
return self.coo
def _init_h5file(self):
"""opens h5file and inits resizable, chunked datasets for incremental flushes to
reduce runtime memory usage"""
self.h5file = h5py.File(self.outfile, 'w')
self._dsdata = self.h5file.create_dataset('data', shape=(0,), maxshape=(None,), chunks=self._chunksize, dtype='f4')
self._dsrowi = self.h5file.create_dataset('i', shape=(0,), maxshape=(None,), chunks=self._chunksize, dtype='u4')
self._dscoli = self.h5file.create_dataset('j', shape=(0,), maxshape=(None,), chunks=self._chunksize, dtype='u4')
self._dsnrows = self.h5file.create_dataset('nrows', shape=(), dtype='u4')
self._dsncols = self.h5file.create_dataset('ncols', shape=(), dtype='u4')
self.initialized = True
def _read_from_file(self):
with h5py.File(self.outfile, 'r') as fd:
self.data = fd['data'][()]
self.rowi = fd['i'][()]
self.coli = fd['j'][()]
self.nrows = fd['nrows'][()]
self.ncols = fd['ncols'][()]
def _flush_buf_to_file(self):
assert len(self.data) == len(self.rowi)
assert len(self.data) == len(self.coli)
if len(self.data):
# resize datasets in h5file
self._dsdata.resize((self._dsdata.len()+len(self.data), ))
self._dsrowi.resize((self._dsrowi.len()+len(self.data), ))
self._dscoli.resize((self._dscoli.len()+len(self.data), ))
# copy memory buffer to h5file
self._dsdata[-len(self.data):] = self.data
self._dsrowi[-len(self.data):] = self.rowi
self._dscoli[-len(self.data):] = self.coli
# reset memory buffers
self.data = []
self.rowi = []
self.coli = []
def _finalize_h5file(self):
# check if file is open and writable
if self.h5file:
self._flush_buf_to_file()
self._dsnrows[()] = self.nrows
self._dsncols[()] = self.ncols
self.h5file.create_dataset('sparse_threshold', shape=(), dtype='f4', data=self.drop_thresh if self.drop_thresh is not None else 0.0)
self.h5file.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.finish()
def finish(self):
self._finalize_h5file()
| [
"h5py.File",
"numpy.amin",
"numpy.ravel",
"numpy.amax",
"scipy.sparse.coo_matrix",
"numpy.product",
"numpy.all"
] | [((855, 871), 'numpy.product', 'np.product', (['size'], {}), '(size)\n', (865, 871), True, 'import numpy as np\n'), ((2865, 2881), 'numpy.all', 'np.all', (['(vol == 0)'], {}), '(vol == 0)\n', (2871, 2881), True, 'import numpy as np\n'), ((3739, 3829), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(self.data, (self.rowi, self.coli))'], {'shape': '(self.nrows, self.ncols)'}), '((self.data, (self.rowi, self.coli)), shape=(self.nrows,\n self.ncols))\n', (3756, 3829), True, 'import scipy.sparse as sparse\n'), ((4029, 4057), 'h5py.File', 'h5py.File', (['self.outfile', '"""w"""'], {}), "(self.outfile, 'w')\n", (4038, 4057), False, 'import h5py\n'), ((2946, 2959), 'numpy.ravel', 'np.ravel', (['vol'], {}), '(vol)\n', (2954, 2959), True, 'import numpy as np\n'), ((3090, 3102), 'numpy.amax', 'np.amax', (['tmp'], {}), '(tmp)\n', (3097, 3102), True, 'import numpy as np\n'), ((4677, 4705), 'h5py.File', 'h5py.File', (['self.outfile', '"""r"""'], {}), "(self.outfile, 'r')\n", (4686, 4705), False, 'import h5py\n'), ((3057, 3070), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (3064, 3070), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Slot/VentilationPolar.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Slot/VentilationPolar
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from copy import deepcopy
from .Hole import Hole
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Slot.VentilationPolar.build_geometry import build_geometry
except ImportError as error:
build_geometry = error
try:
from ..Methods.Slot.VentilationPolar.check import check
except ImportError as error:
check = error
try:
from ..Methods.Slot.VentilationPolar.comp_radius import comp_radius
except ImportError as error:
comp_radius = error
try:
from ..Methods.Slot.VentilationPolar.comp_surface import comp_surface
except ImportError as error:
comp_surface = error
try:
from ..Methods.Slot.VentilationPolar.get_center import get_center
except ImportError as error:
get_center = error
try:
from ..Methods.Slot.VentilationPolar._comp_point_coordinate import (
_comp_point_coordinate,
)
except ImportError as error:
_comp_point_coordinate = error
try:
from ..Methods.Slot.VentilationPolar.plot_schematics import plot_schematics
except ImportError as error:
plot_schematics = error
from numpy import isnan
from ._check import InitUnKnowClassError
class VentilationPolar(Hole):
"""Polar axial ventilation duct"""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Slot.VentilationPolar.build_geometry
if isinstance(build_geometry, ImportError):
build_geometry = property(
fget=lambda x: raise_(
ImportError(
"Can't use VentilationPolar method build_geometry: "
+ str(build_geometry)
)
)
)
else:
build_geometry = build_geometry
# cf Methods.Slot.VentilationPolar.check
if isinstance(check, ImportError):
check = property(
fget=lambda x: raise_(
ImportError("Can't use VentilationPolar method check: " + str(check))
)
)
else:
check = check
# cf Methods.Slot.VentilationPolar.comp_radius
if isinstance(comp_radius, ImportError):
comp_radius = property(
fget=lambda x: raise_(
ImportError(
"Can't use VentilationPolar method comp_radius: " + str(comp_radius)
)
)
)
else:
comp_radius = comp_radius
# cf Methods.Slot.VentilationPolar.comp_surface
if isinstance(comp_surface, ImportError):
comp_surface = property(
fget=lambda x: raise_(
ImportError(
"Can't use VentilationPolar method comp_surface: "
+ str(comp_surface)
)
)
)
else:
comp_surface = comp_surface
# cf Methods.Slot.VentilationPolar.get_center
if isinstance(get_center, ImportError):
get_center = property(
fget=lambda x: raise_(
ImportError(
"Can't use VentilationPolar method get_center: " + str(get_center)
)
)
)
else:
get_center = get_center
# cf Methods.Slot.VentilationPolar._comp_point_coordinate
if isinstance(_comp_point_coordinate, ImportError):
_comp_point_coordinate = property(
fget=lambda x: raise_(
ImportError(
"Can't use VentilationPolar method _comp_point_coordinate: "
+ str(_comp_point_coordinate)
)
)
)
else:
_comp_point_coordinate = _comp_point_coordinate
# cf Methods.Slot.VentilationPolar.plot_schematics
if isinstance(plot_schematics, ImportError):
plot_schematics = property(
fget=lambda x: raise_(
ImportError(
"Can't use VentilationPolar method plot_schematics: "
+ str(plot_schematics)
)
)
)
else:
plot_schematics = plot_schematics
# generic save method is available in all object
save = save
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
D0=1,
H0=1,
W1=1,
Zh=36,
mat_void=-1,
magnetization_dict_offset=None,
Alpha0=0,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "D0" in list(init_dict.keys()):
D0 = init_dict["D0"]
if "H0" in list(init_dict.keys()):
H0 = init_dict["H0"]
if "W1" in list(init_dict.keys()):
W1 = init_dict["W1"]
if "Zh" in list(init_dict.keys()):
Zh = init_dict["Zh"]
if "mat_void" in list(init_dict.keys()):
mat_void = init_dict["mat_void"]
if "magnetization_dict_offset" in list(init_dict.keys()):
magnetization_dict_offset = init_dict["magnetization_dict_offset"]
if "Alpha0" in list(init_dict.keys()):
Alpha0 = init_dict["Alpha0"]
# Set the properties (value check and convertion are done in setter)
self.D0 = D0
self.H0 = H0
self.W1 = W1
# Call Hole init
super(VentilationPolar, self).__init__(
Zh=Zh,
mat_void=mat_void,
magnetization_dict_offset=magnetization_dict_offset,
Alpha0=Alpha0,
)
# The class is frozen (in Hole init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this object in a readeable string (for print)"""
VentilationPolar_str = ""
# Get the properties inherited from Hole
VentilationPolar_str += super(VentilationPolar, self).__str__()
VentilationPolar_str += "D0 = " + str(self.D0) + linesep
VentilationPolar_str += "H0 = " + str(self.H0) + linesep
VentilationPolar_str += "W1 = " + str(self.W1) + linesep
return VentilationPolar_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from Hole
if not super(VentilationPolar, self).__eq__(other):
return False
if other.D0 != self.D0:
return False
if other.H0 != self.H0:
return False
if other.W1 != self.W1:
return False
return True
def compare(self, other, name="self", ignore_list=None, is_add_value=False):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
# Check the properties inherited from Hole
diff_list.extend(
super(VentilationPolar, self).compare(
other, name=name, ignore_list=ignore_list, is_add_value=is_add_value
)
)
if (
other._D0 is not None
and self._D0 is not None
and isnan(other._D0)
and isnan(self._D0)
):
pass
elif other._D0 != self._D0:
if is_add_value:
val_str = " (self=" + str(self._D0) + ", other=" + str(other._D0) + ")"
diff_list.append(name + ".D0" + val_str)
else:
diff_list.append(name + ".D0")
if (
other._H0 is not None
and self._H0 is not None
and isnan(other._H0)
and isnan(self._H0)
):
pass
elif other._H0 != self._H0:
if is_add_value:
val_str = " (self=" + str(self._H0) + ", other=" + str(other._H0) + ")"
diff_list.append(name + ".H0" + val_str)
else:
diff_list.append(name + ".H0")
if (
other._W1 is not None
and self._W1 is not None
and isnan(other._W1)
and isnan(self._W1)
):
pass
elif other._W1 != self._W1:
if is_add_value:
val_str = " (self=" + str(self._W1) + ", other=" + str(other._W1) + ")"
diff_list.append(name + ".W1" + val_str)
else:
diff_list.append(name + ".W1")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
# Get size of the properties inherited from Hole
S += super(VentilationPolar, self).__sizeof__()
S += getsizeof(self.D0)
S += getsizeof(self.H0)
S += getsizeof(self.W1)
return S
def as_dict(self, type_handle_ndarray=0, keep_function=False, **kwargs):
"""
Convert this object in a json serializable dict (can be use in __init__).
type_handle_ndarray: int
How to handle ndarray (0: tolist, 1: copy, 2: nothing)
keep_function : bool
True to keep the function object, else return str
Optional keyword input parameter is for internal use only
and may prevent json serializability.
"""
# Get the properties inherited from Hole
VentilationPolar_dict = super(VentilationPolar, self).as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
VentilationPolar_dict["D0"] = self.D0
VentilationPolar_dict["H0"] = self.H0
VentilationPolar_dict["W1"] = self.W1
# The class name is added to the dict for deserialisation purpose
# Overwrite the mother class name
VentilationPolar_dict["__class__"] = "VentilationPolar"
return VentilationPolar_dict
def copy(self):
"""Creates a deepcopy of the object"""
# Handle deepcopy of all the properties
D0_val = self.D0
H0_val = self.H0
W1_val = self.W1
Zh_val = self.Zh
if self.mat_void is None:
mat_void_val = None
else:
mat_void_val = self.mat_void.copy()
if self.magnetization_dict_offset is None:
magnetization_dict_offset_val = None
else:
magnetization_dict_offset_val = self.magnetization_dict_offset.copy()
Alpha0_val = self.Alpha0
# Creates new object of the same type with the copied properties
obj_copy = type(self)(
D0=D0_val,
H0=H0_val,
W1=W1_val,
Zh=Zh_val,
mat_void=mat_void_val,
magnetization_dict_offset=magnetization_dict_offset_val,
Alpha0=Alpha0_val,
)
return obj_copy
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.D0 = None
self.H0 = None
self.W1 = None
# Set to None the properties inherited from Hole
super(VentilationPolar, self)._set_None()
def _get_D0(self):
"""getter of D0"""
return self._D0
def _set_D0(self, value):
"""setter of D0"""
check_var("D0", value, "float", Vmin=0)
self._D0 = value
D0 = property(
fget=_get_D0,
fset=_set_D0,
doc=u"""Height of the hole
:Type: float
:min: 0
""",
)
def _get_H0(self):
"""getter of H0"""
return self._H0
def _set_H0(self, value):
"""setter of H0"""
check_var("H0", value, "float", Vmin=0)
self._H0 = value
H0 = property(
fget=_get_H0,
fset=_set_H0,
doc=u"""Radius of the bottom of Hole
:Type: float
:min: 0
""",
)
def _get_W1(self):
"""getter of W1"""
return self._W1
def _set_W1(self, value):
"""setter of W1"""
check_var("W1", value, "float", Vmin=0, Vmax=6.29)
self._W1 = value
W1 = property(
fget=_get_W1,
fset=_set_W1,
doc=u"""Hole angular width
:Type: float
:min: 0
:max: 6.29
""",
)
| [
"numpy.isnan",
"sys.getsizeof"
] | [((10487, 10505), 'sys.getsizeof', 'getsizeof', (['self.D0'], {}), '(self.D0)\n', (10496, 10505), False, 'from sys import getsizeof\n'), ((10520, 10538), 'sys.getsizeof', 'getsizeof', (['self.H0'], {}), '(self.H0)\n', (10529, 10538), False, 'from sys import getsizeof\n'), ((10553, 10571), 'sys.getsizeof', 'getsizeof', (['self.W1'], {}), '(self.W1)\n', (10562, 10571), False, 'from sys import getsizeof\n'), ((8771, 8787), 'numpy.isnan', 'isnan', (['other._D0'], {}), '(other._D0)\n', (8776, 8787), False, 'from numpy import isnan\n'), ((8805, 8820), 'numpy.isnan', 'isnan', (['self._D0'], {}), '(self._D0)\n', (8810, 8820), False, 'from numpy import isnan\n'), ((9236, 9252), 'numpy.isnan', 'isnan', (['other._H0'], {}), '(other._H0)\n', (9241, 9252), False, 'from numpy import isnan\n'), ((9270, 9285), 'numpy.isnan', 'isnan', (['self._H0'], {}), '(self._H0)\n', (9275, 9285), False, 'from numpy import isnan\n'), ((9701, 9717), 'numpy.isnan', 'isnan', (['other._W1'], {}), '(other._W1)\n', (9706, 9717), False, 'from numpy import isnan\n'), ((9735, 9750), 'numpy.isnan', 'isnan', (['self._W1'], {}), '(self._W1)\n', (9740, 9750), False, 'from numpy import isnan\n')] |
import os
import numpy as np
import pandas as pd
data_path = ""
data_dir = data_path+os.sep+"GeneNetworks"
HiNTfile = data_dir+os.sep+"HomoSapiens_htb_hq.txt"
def generate_HiNT_adjacency():
hint_df = pd.read_csv(HiNTfile, sep="\t")
total_genes = set(hint_df['Gene_A'].tolist()).union(set(hint_df['Gene_B'].tolist())) # 10,898 genes
adj_mat = np.zeros((len(total_genes), len(total_genes)))
np.fill_diagonal(adj_mat, 1)
adj_pd = pd.DataFrame(adj_mat, index=total_genes, columns=total_genes)
for index, row in hint_df.iterrows():
gene_A = row['Gene_A']
gene_B = row['Gene_B']
adj_pd.at[gene_A, gene_B] = 1
adj_pd.to_csv(data_dir+os.sep+"HomoSapients_htb_hq_adj.txt", sep=" ")
def generate_pathway_adjacency(gmt_file):
pathway_dict = {}
gene_sets = set()
with open(gmt_file, 'r') as f:
for line in f:
res = line.strip().split('\t')
pathway_name = res[0]
pathway_genes = res[2:]
gene_sets = gene_sets.union(set(pathway_genes))
pathway_dict[pathway_name] = pathway_genes
gene_list = list(gene_sets)
gene_list.sort()
adj_mat = np.zeros((len(gene_list), len(pathway_dict)))
adj_pd = pd.DataFrame(adj_mat, index=gene_list, columns=list(pathway_dict.keys()))
for pathway, genes in pathway_dict.items():
adj_pd.loc[genes, pathway] = 1
adj_pd.to_csv(gmt_file.replace('.gmt', '')+'adj.txt', sep=" ")
def load_adjacency():
adj_df = pd.read_csv(data_dir+os.sep+"HomoSapients_htb_hq_adj.txt",
sep=" ", index_col=0)
return adj_df
if __name__ == "__main__":
gmt_file = data_dir+os.sep+"c5.go.v7.2.symbols.gmt"
generate_pathway_adjacency(gmt_file)
gmt_file = data_dir+os.sep+"c2.cp.v7.2.symbols.gmt"
generate_pathway_adjacency(gmt_file)
| [
"pandas.read_csv",
"numpy.fill_diagonal",
"pandas.DataFrame"
] | [((206, 237), 'pandas.read_csv', 'pd.read_csv', (['HiNTfile'], {'sep': '"""\t"""'}), "(HiNTfile, sep='\\t')\n", (217, 237), True, 'import pandas as pd\n'), ((407, 435), 'numpy.fill_diagonal', 'np.fill_diagonal', (['adj_mat', '(1)'], {}), '(adj_mat, 1)\n', (423, 435), True, 'import numpy as np\n'), ((449, 510), 'pandas.DataFrame', 'pd.DataFrame', (['adj_mat'], {'index': 'total_genes', 'columns': 'total_genes'}), '(adj_mat, index=total_genes, columns=total_genes)\n', (461, 510), True, 'import pandas as pd\n'), ((1496, 1584), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + os.sep + 'HomoSapients_htb_hq_adj.txt')"], {'sep': '""" """', 'index_col': '(0)'}), "(data_dir + os.sep + 'HomoSapients_htb_hq_adj.txt', sep=' ',\n index_col=0)\n", (1507, 1584), True, 'import pandas as pd\n')] |
# Test of subplot plotting direction
# Plots from: https://matplotlib.org/3.2.1/gallery/images_contours_and_fields/plot_streamplot.html#sphx-glr-gallery-images-contours-and-fields-plot-streamplot-py
import os
import numpy as np
from figpager import FigPager
# Reference:
# https://matplotlib.org/devdocs/gallery/subplots_axes_and_figures/subplots_demo.html
def test_main():
# Initalize with a configuration that controls page margins
# and plot spacing
# Initalize with a page size and number of plots
# Initalize with an output file
outfile = "./tests/out_2.pdf"
fp = FigPager(
"letter", 2, 2, outfile=outfile, orientation="portrait", overwrite=True,
)
# Some example data to display
x = np.linspace(0, 2 * np.pi, 400)
y = np.sin(x ** 2)
for r in range(2):
direction = "left-to-right"
if r > 0:
direction = "top-to-bottom"
fp.add_page()
ax = fp.add_subplot(direction=direction)
ax.plot(x, y)
ax.set_title("Plot 1 " + direction)
ax = fp.add_subplot(direction=direction)
ax.plot(x, y, "tab:orange")
ax.set_title("Plot 2 " + direction)
ax = fp.add_subplot(direction=direction)
ax.plot(x, -y, "tab:green")
ax.set_title("Plot 3 " + direction)
ax = fp.add_subplot(direction=direction)
ax.plot(x, -y, "tab:red")
ax.set_title("Plot 4 " + direction)
print("outfile: " + outfile)
# close the figure
fp.close()
print("--Done!--")
if __name__ == "__main__":
test_main()
| [
"numpy.linspace",
"numpy.sin",
"figpager.FigPager"
] | [((620, 706), 'figpager.FigPager', 'FigPager', (['"""letter"""', '(2)', '(2)'], {'outfile': 'outfile', 'orientation': '"""portrait"""', 'overwrite': '(True)'}), "('letter', 2, 2, outfile=outfile, orientation='portrait', overwrite\n =True)\n", (628, 706), False, 'from figpager import FigPager\n'), ((764, 794), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(400)'], {}), '(0, 2 * np.pi, 400)\n', (775, 794), True, 'import numpy as np\n'), ((804, 818), 'numpy.sin', 'np.sin', (['(x ** 2)'], {}), '(x ** 2)\n', (810, 818), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Purpose:
# Status: Developing
# Dependence: Python 3.6
# Version: ALPHA
# Created Date: 10:21h, 20/12/2018
# Usage:
#
#
# Author: <NAME>, https://github.com/SeisPider
# Email: <EMAIL>
# Copyright (C) 2017-2018 <NAME>
# -------------------------------------------------------------------------------
def import_measurement(logfile):
"""the same as func. name
"""
# import efficient data
data = np.loadtxt(logfile, skiprows=2, usecols=(1, 2, 3, 4))
ang, p, wt = data[:, 0], data[:, 1], data[:, 2]
msk = ~np.isnan(ang)
eff_ang, eff_p, eff_wt = ang[msk], p[msk], wt[msk]
# select data with weight higher than 7
msk = eff_wt >= 0.7
eff_ang, eff_p, eff_wt = np.rad2deg(eff_ang[msk]), eff_p[msk], eff_wt[msk]
# select data where angels are smaller than 80 degree
msk = eff_ang <= 60
eff_ang, eff_p, eff_wt = eff_ang[msk], eff_p[msk], eff_wt[msk]
return eff_ang, eff_p, eff_wt
def syn_ang(meas_p, vs):
"""compute synthetic angle based on given vs and ray
parameter
"""
return np.rad2deg(2 * np.arcsin(vs * meas_p))
def misfit(vs, meas_p, meas_ang, meas_wt):
"""self-defined misfit function
"""
syn_angs = syn_ang(meas_p, vs)
upper = ((syn_angs - meas_ang) ** 2 * meas_wt).sum()
lower = meas_wt.sum()
return upper / lower
def inversion(staid, meas_p, meas_ang, meas_wt, save=True, bootstrap_num=5000):
"""perform inversion and give uncertainty of vs
"""
# popt, pcov = curve_fit(syn_ang, meas_p, meas_ang, bounds=(0, 4))
m = minimize(
misfit, 3, args=(meas_p, meas_ang, meas_wt), method="SLSQP", bounds=((0, 4.5),)
)
mean_vs = m.x[0]
# Filter the outliers
residuals = syn_ang(meas_p, mean_vs) - meas_ang
meanresidual, stdresidual = residuals.mean(), residuals.std()
msk = np.abs(residuals - meanresidual) <= 2 * stdresidual
meas_p, meas_ang, meas_wt = meas_p[msk], meas_ang[msk], meas_wt[msk]
m = minimize(
misfit,
mean_vs,
args=(meas_p, meas_ang, meas_wt),
method="SLSQP",
bounds=((0, 4.5),),
)
mean_vs = m.x[0]
# use bootstrap to estimate uncertainty or not
vss = np.zeros(bootstrap_num)
measurement_num = len(meas_p)
for idx in range(bootstrap_num):
sub_meas_p = choice(meas_p, measurement_num)
sub_meas_ang = choice(meas_ang, measurement_num)
sub_meas_wt = choice(meas_wt, measurement_num)
m = minimize(
misfit,
mean_vs,
args=(sub_meas_p, sub_meas_ang, sub_meas_wt),
method="SLSQP",
bounds=((0, 4.5),),
)
vss[idx] = m.x[0]
if save:
# depict data
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(18, 9))
# depict uncertainty
axes[0].hist(vss, bins=40, density=True, facecolor="g", alpha=0.5)
axes[0].vlines(
[mean_vs],
0,
1,
transform=axes[0].get_xaxis_transform(),
colors="r",
label="Inverted_Mean",
)
boot_mean = vss.mean()
boot_unc = vss.std()
axes[0].vlines(
[boot_mean, boot_mean + boot_unc * 3, boot_mean - boot_unc * 3],
0,
1,
transform=axes[0].get_xaxis_transform(),
colors="b",
label="Boot_Mean_Unc",
)
axes[0].set_xlabel("Vs (km/s)")
axes[0].set_ylabel("Density")
axes[0].legend()
cax = axes[1].scatter(meas_p, meas_ang, c=meas_wt)
xarray = np.linspace(meas_p.min(), meas_p.max(), 100)
axes[1].plot(xarray, syn_ang(xarray, m.x), "r-")
cbar = fig.colorbar(cax)
axes[1].set_xlabel("Ray parameter")
axes[1].set_ylabel("Polarization angle (deg.)")
axes[1].set_title("Polarization Analysis ({:.3f} km/s)".format(boot_mean))
axes[1].set_ylim(-40, 90)
fig.savefig("./MOD/{}.png".format(staid))
plt.close()
print("Suc. invert {}".format(staid))
return vss.mean(), vss.std(), measurement_num
def one_station(logfile):
"""perform inversion and uncertainty analysis of specific station
Parameters
==========
logfile: str
path of log file
"""
try:
staid = ".".join(basename(logfile).split(".")[0:2])
meas_ang, meas_p, meas_wt = import_measurement(logfile)
vs, std, num = inversion(staid, meas_p, meas_ang, meas_wt, save=True)
msg = "{} {:.5f} {:.5f} {}".format(staid, vs, std, num)
return msg
except Exception as err:
print("Unhandled Error [{}] {}".format(err, staid))
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from glob import glob
from os.path import basename
from numpy.random import choice
import multiprocessing as mp
plt.switch_backend("agg")
NB_PROCESSES = 40
logfiles = glob("./POL/*.POL")
msg = ["#staid vs(km/s) vs_unc(km/s) measurement_num"]
pool = mp.Pool(NB_PROCESSES)
msgs = pool.starmap(one_station, zip(logfiles))
msgs = msg + msgs
outmsg = [x for x in msgs if x is not None]
# export to inverted log file
with open("./MOD/test.csv", "w") as f:
f.write("\n".join(outmsg))
| [
"matplotlib.pyplot.switch_backend",
"numpy.random.choice",
"scipy.optimize.minimize",
"numpy.abs",
"os.path.basename",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.arcsin",
"numpy.isnan",
"numpy.rad2deg",
"multiprocessing.Pool",
"numpy.loadtxt",
"glob.glob",
"matplotlib.pyplot.subplots... | [((559, 612), 'numpy.loadtxt', 'np.loadtxt', (['logfile'], {'skiprows': '(2)', 'usecols': '(1, 2, 3, 4)'}), '(logfile, skiprows=2, usecols=(1, 2, 3, 4))\n', (569, 612), True, 'import numpy as np\n'), ((1690, 1783), 'scipy.optimize.minimize', 'minimize', (['misfit', '(3)'], {'args': '(meas_p, meas_ang, meas_wt)', 'method': '"""SLSQP"""', 'bounds': '((0, 4.5),)'}), "(misfit, 3, args=(meas_p, meas_ang, meas_wt), method='SLSQP',\n bounds=((0, 4.5),))\n", (1698, 1783), False, 'from scipy.optimize import minimize\n'), ((2104, 2203), 'scipy.optimize.minimize', 'minimize', (['misfit', 'mean_vs'], {'args': '(meas_p, meas_ang, meas_wt)', 'method': '"""SLSQP"""', 'bounds': '((0, 4.5),)'}), "(misfit, mean_vs, args=(meas_p, meas_ang, meas_wt), method='SLSQP',\n bounds=((0, 4.5),))\n", (2112, 2203), False, 'from scipy.optimize import minimize\n'), ((2330, 2353), 'numpy.zeros', 'np.zeros', (['bootstrap_num'], {}), '(bootstrap_num)\n', (2338, 2353), True, 'import numpy as np\n'), ((5042, 5067), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (5060, 5067), True, 'import matplotlib.pyplot as plt\n'), ((5105, 5124), 'glob.glob', 'glob', (['"""./POL/*.POL"""'], {}), "('./POL/*.POL')\n", (5109, 5124), False, 'from glob import glob\n'), ((5195, 5216), 'multiprocessing.Pool', 'mp.Pool', (['NB_PROCESSES'], {}), '(NB_PROCESSES)\n', (5202, 5216), True, 'import multiprocessing as mp\n'), ((676, 689), 'numpy.isnan', 'np.isnan', (['ang'], {}), '(ang)\n', (684, 689), True, 'import numpy as np\n'), ((843, 867), 'numpy.rad2deg', 'np.rad2deg', (['eff_ang[msk]'], {}), '(eff_ang[msk])\n', (853, 867), True, 'import numpy as np\n'), ((1970, 2002), 'numpy.abs', 'np.abs', (['(residuals - meanresidual)'], {}), '(residuals - meanresidual)\n', (1976, 2002), True, 'import numpy as np\n'), ((2446, 2477), 'numpy.random.choice', 'choice', (['meas_p', 'measurement_num'], {}), '(meas_p, measurement_num)\n', (2452, 2477), False, 'from numpy.random import choice\n'), ((2501, 2534), 'numpy.random.choice', 'choice', (['meas_ang', 'measurement_num'], {}), '(meas_ang, measurement_num)\n', (2507, 2534), False, 'from numpy.random import choice\n'), ((2557, 2589), 'numpy.random.choice', 'choice', (['meas_wt', 'measurement_num'], {}), '(meas_wt, measurement_num)\n', (2563, 2589), False, 'from numpy.random import choice\n'), ((2602, 2713), 'scipy.optimize.minimize', 'minimize', (['misfit', 'mean_vs'], {'args': '(sub_meas_p, sub_meas_ang, sub_meas_wt)', 'method': '"""SLSQP"""', 'bounds': '((0, 4.5),)'}), "(misfit, mean_vs, args=(sub_meas_p, sub_meas_ang, sub_meas_wt),\n method='SLSQP', bounds=((0, 4.5),))\n", (2610, 2713), False, 'from scipy.optimize import minimize\n'), ((2862, 2909), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(18, 9)'}), '(nrows=1, ncols=2, figsize=(18, 9))\n', (2874, 2909), True, 'import matplotlib.pyplot as plt\n'), ((4115, 4126), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4124, 4126), True, 'import matplotlib.pyplot as plt\n'), ((1213, 1235), 'numpy.arcsin', 'np.arcsin', (['(vs * meas_p)'], {}), '(vs * meas_p)\n', (1222, 1235), True, 'import numpy as np\n'), ((4432, 4449), 'os.path.basename', 'basename', (['logfile'], {}), '(logfile)\n', (4440, 4449), False, 'from os.path import basename\n')] |
import numpy as np
from pymoo.model.individual import Individual
def interleaving_args(*args, kwargs=None):
if len(args) % 2 != 0:
raise Exception(f"Even number of arguments are required but {len(args)} arguments were provided.")
if kwargs is None:
kwargs = {}
for i in range(int(len(args) / 2)):
key, values = args[i * 2], args[i * 2 + 1]
kwargs[key] = values
return kwargs
class Population(np.ndarray):
def __new__(cls, n_individuals=0):
obj = super(Population, cls).__new__(cls, n_individuals, dtype=cls).view(cls)
for i in range(n_individuals):
obj[i] = Individual()
return obj
def copy(self, deep=False):
pop = Population(n_individuals=len(self))
for i in range(len(self)):
pop[i] = self[i].copy(deep=deep)
return pop
def has(self, key):
return all([ind.has(key) for ind in self])
def collect(self, func, to_numpy=True):
val = []
for i in range(len(self)):
val.append(func(self[i]))
if to_numpy:
val = np.array(val)
return val
def set(self, *args, **kwargs):
# if population is empty just return
if self.size == 0:
return
# done for the old interface with the interleaving variable definition
kwargs = interleaving_args(*args, kwargs=kwargs)
# for each entry in the dictionary set it to each individual
for key, values in kwargs.items():
is_iterable = hasattr(values, '__len__') and not isinstance(values, str)
if is_iterable and len(values) != len(self):
raise Exception("Population Set Attribute Error: Number of values and population size do not match!")
for i in range(len(self)):
val = values[i] if is_iterable else values
self[i].set(key, val)
return self
def get(self, *args, to_numpy=True):
val = {}
for c in args:
val[c] = []
# for each individual
for i in range(len(self)):
# for each argument
for c in args:
val[c].append(self[i].get(c))
# convert the results to a list
res = [val[c] for c in args]
# to numpy array if desired - default true
if to_numpy:
res = [np.array(e) for e in res]
# return as tuple or single value
if len(args) == 1:
return res[0]
else:
return tuple(res)
def __deepcopy__(self, memo):
return self.copy(deep=True)
@classmethod
def merge(cls, a, b):
a, b = pop_from_array_or_individual(a), pop_from_array_or_individual(b)
if len(a) == 0:
return b
elif len(b) == 0:
return a
else:
obj = np.concatenate([a, b]).view(Population)
return obj
@classmethod
def create(cls, *args):
pop = np.concatenate([pop_from_array_or_individual(arg) for arg in args]).view(Population)
return pop
@classmethod
def new(cls, *args, **kwargs):
if len(args) == 1:
return Population(n_individuals=args[0], **kwargs)
else:
kwargs = interleaving_args(*args, kwargs=kwargs)
iterable = [v for _, v in kwargs.items() if hasattr(v, '__len__') and not isinstance(v, str)]
if len(iterable) == 0:
return Population()
else:
n = np.unique(np.array([len(v) for v in iterable]))
if len(n) == 1:
n = n[0]
pop = Population(n_individuals=n)
pop.set(*args, **kwargs)
return pop
else:
raise Exception(f"Population.new needs to be called with same-sized inputs, but the sizes are {n}")
def pop_from_array_or_individual(array, pop=None):
# the population type can be different - (different type of individuals)
if pop is None:
pop = Population()
# provide a whole population object - (individuals might be already evaluated)
if isinstance(array, Population):
pop = array
elif isinstance(array, np.ndarray):
pop = pop.new("X", np.atleast_2d(array))
elif isinstance(array, Individual):
pop = Population(1)
pop[0] = array
else:
return None
return pop
if __name__ == '__main__':
pop = Population(10)
pop.get("F")
pop.new()
print("")
| [
"numpy.array",
"pymoo.model.individual.Individual",
"numpy.concatenate",
"numpy.atleast_2d"
] | [((646, 658), 'pymoo.model.individual.Individual', 'Individual', ([], {}), '()\n', (656, 658), False, 'from pymoo.model.individual import Individual\n'), ((1110, 1123), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (1118, 1123), True, 'import numpy as np\n'), ((2390, 2401), 'numpy.array', 'np.array', (['e'], {}), '(e)\n', (2398, 2401), True, 'import numpy as np\n'), ((4305, 4325), 'numpy.atleast_2d', 'np.atleast_2d', (['array'], {}), '(array)\n', (4318, 4325), True, 'import numpy as np\n'), ((2876, 2898), 'numpy.concatenate', 'np.concatenate', (['[a, b]'], {}), '([a, b])\n', (2890, 2898), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from ..builder import LOSSES
from .accuracy import accuracy
'''
Adaptive Class Supression Loss
Author: changewt
Source: https://github.com/CASIA-IVA-Lab/ACSL
Paper: https://openaccess.thecvf.com/content/CVPR2021/papers/Wang_Adaptive_Class_Suppression_Loss_for_Long-Tail_Object_Detection_CVPR_2021_paper.pdf
'''
@LOSSES.register_module()
class ACSL(nn.Module):
def __init__(self, score_thr=0.7, json_file='../../datasets/lvis/data/lvis_v1_train.json', loss_weight=1.0,variant='sigmoid'):
super(ACSL, self).__init__()
self.score_thr = score_thr
assert self.score_thr > 0 and self.score_thr < 1
self.loss_weight = loss_weight
assert len(json_file) != 0
self.freq_group = self.get_freq_info(json_file)
self.variant = variant
self.custom_activation = True
# custom accuracy of the classsifier
self.custom_accuracy = True
def get_activation(self, cls_score):
"""Get custom activation of cls_score.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C).
Returns:
torch.Tensor: The custom activation of cls_score with shape
(N, C).
"""
if self.variant=='gumbel':
scores = 1/(torch.exp(torch.exp(-cls_score)))
elif self.variant=='normal':
scores=1/2+torch.erf(cls_score/(2**(1/2)))/2
elif self.variant=='softmax':
scores= torch.softmax(cls_score,dim=-1)
elif self.variant=='sigmoid':
scores= torch.sigmoid(cls_score)
return scores
def get_accuracy(self, cls_score, labels):
"""Get custom accuracy w.r.t. cls_score and labels.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C).
labels (torch.Tensor): The learning label of the prediction.
Returns:
Dict [str, torch.Tensor]: The accuracy for objectness and classes,
respectively.
"""
acc_classes = accuracy(cls_score, labels)
acc = dict()
acc['acc_Cls'] = acc_classes
return acc
def get_freq_info(self, json_file):
cats = json.load(open(json_file, 'r'))['categories']
freq_dict = {'rare': [], 'common': [], 'freq': []}
for cat in cats:
if cat['frequency'] == 'r':
freq_dict['rare'].append(cat['id'])
elif cat['frequency'] == 'c':
freq_dict['common'].append(cat['id'])
elif cat['frequency'] == 'f':
freq_dict['freq'].append(cat['id'])
else:
print('Something wrong with the json file.')
return freq_dict
def forward(self, cls_logits, labels, weight=None, avg_factor=None, reduction_override=None, **kwargs):
device = cls_logits.device
self.n_i, self.n_c = cls_logits.size()
# expand the labels to all their parent nodes
target = cls_logits.new_zeros(self.n_i, self.n_c)
# weight mask, decide which class should be ignored
#weight_mask = cls_logits.new_zeros(self.n_i, self.n_c)
#convert 1204 to 0 for bg index
index_inversion = torch.tensor([cls_logits.shape[1]-1]+(torch.arange(cls_logits.shape[1]-1)).tolist(),device='cuda')
cls_logits = torch.index_select(cls_logits, 1, index_inversion)
labels = (labels+1)%self.n_c
unique_label = torch.unique(labels)
with torch.no_grad():
if self.variant =='sigmoid':
sigmoid_cls_logits = torch.sigmoid(cls_logits)
elif self.variant =='gumbel':
sigmoid_cls_logits = 1/(torch.exp(torch.exp(-(torch.clamp(cls_logits,min=-4,max=10)))))
elif self.variant =='normal':
sigmoid_cls_logits = 1/2+torch.erf(torch.clamp(cls_logits,min=-5,max=5)/(2**(1/2)))/2
elif self.variant =='softmax':
sigmoid_cls_logits = torch.softmax(cls_logits,dim=-1)
# for each sample, if its score on unrealated class hight than score_thr, their gradient should not be ignored
# this is also applied to negative samples
high_score_inds = torch.nonzero(sigmoid_cls_logits>=self.score_thr)
weight_mask = torch.sparse_coo_tensor(high_score_inds.t(), cls_logits.new_ones(high_score_inds.shape[0]), size=(self.n_i, self.n_c), device=device).to_dense()
for cls in unique_label:
cls = cls.item()
cls_inds = torch.nonzero(labels == cls).squeeze(1)
if cls == 0:
# construct target vector for background samples
target[cls_inds, 0] = 1
# for bg, set the weight of all classes to 1
weight_mask[cls_inds] = 0
cls_inds_cpu = cls_inds.cpu()
# Solve the rare categories, random choost 1/3 bg samples to suppress rare categories
rare_cats = self.freq_group['rare']
rare_cats = torch.tensor(rare_cats, device=cls_logits.device)
choose_bg_num = int(len(cls_inds) * 0.01)
choose_bg_inds = torch.tensor(np.random.choice(cls_inds_cpu, size=(choose_bg_num), replace=False), device=device)
tmp_weight_mask = weight_mask[choose_bg_inds]
tmp_weight_mask[:, rare_cats] = 1
weight_mask[choose_bg_inds] = tmp_weight_mask
# Solve the common categories, random choost 2/3 bg samples to suppress rare categories
common_cats = self.freq_group['common']
common_cats = torch.tensor(common_cats, device=cls_logits.device)
choose_bg_num = int(len(cls_inds) * 0.1)
choose_bg_inds = torch.tensor(np.random.choice(cls_inds_cpu, size=(choose_bg_num), replace=False), device=device)
tmp_weight_mask = weight_mask[choose_bg_inds]
tmp_weight_mask[:, common_cats] = 1
weight_mask[choose_bg_inds] = tmp_weight_mask
# Solve the frequent categories, random choost all bg samples to suppress rare categories
freq_cats = self.freq_group['freq']
freq_cats = torch.tensor(freq_cats, device=cls_logits.device)
choose_bg_num = int(len(cls_inds) * 1.0)
choose_bg_inds = torch.tensor(np.random.choice(cls_inds_cpu, size=(choose_bg_num), replace=False), device=device)
tmp_weight_mask = weight_mask[choose_bg_inds]
tmp_weight_mask[:, freq_cats] = 1
weight_mask[choose_bg_inds] = tmp_weight_mask
# Set the weight for bg to 1
weight_mask[cls_inds, 0] = 1
else:
# construct target vector for foreground samples
cur_labels = [cls]
cur_labels = torch.tensor(cur_labels, device=cls_logits.device)
tmp_label_vec = cls_logits.new_zeros(self.n_c)
tmp_label_vec[cur_labels] = 1
tmp_label_vec = tmp_label_vec.expand(cls_inds.numel(), self.n_c)
target[cls_inds] = tmp_label_vec
# construct weight mask for fg samples
tmp_weight_mask_vec = weight_mask[cls_inds]
# set the weight for ground truth category
tmp_weight_mask_vec[:, cur_labels] = 1
weight_mask[cls_inds] = tmp_weight_mask_vec
if self.variant =='sigmoid':
cls_loss = F.binary_cross_entropy_with_logits(cls_logits, target.float(), reduction='none')
elif self.variant =='gumbel':
pestim = 1/(torch.exp(torch.exp(-(torch.clamp(cls_logits,min=-4,max=10)))))
cls_loss = F.binary_cross_entropy(pestim, target.float(), reduction='none')
elif self.variant =='normal':
pestim=1/2+torch.erf(torch.clamp(cls_logits,min=-5,max=5)/(2**(1/2)))/2
cls_loss = F.binary_cross_entropy(pestim, target.float(), reduction='none')
elif self.variant =='softmax':
cls_loss = F.cross_entropy(weight_mask*cls_logits, target.argmax(axis=1), reduction='mean')
return cls_loss
return torch.sum(weight_mask * cls_loss) / self.n_i
| [
"torch.unique",
"torch.erf",
"torch.nonzero",
"torch.softmax",
"torch.index_select",
"torch.sigmoid",
"torch.exp",
"torch.clamp",
"torch.arange",
"numpy.random.choice",
"torch.no_grad",
"torch.sum",
"torch.tensor"
] | [((3461, 3511), 'torch.index_select', 'torch.index_select', (['cls_logits', '(1)', 'index_inversion'], {}), '(cls_logits, 1, index_inversion)\n', (3479, 3511), False, 'import torch\n'), ((3581, 3601), 'torch.unique', 'torch.unique', (['labels'], {}), '(labels)\n', (3593, 3601), False, 'import torch\n'), ((4357, 4408), 'torch.nonzero', 'torch.nonzero', (['(sigmoid_cls_logits >= self.score_thr)'], {}), '(sigmoid_cls_logits >= self.score_thr)\n', (4370, 4408), False, 'import torch\n'), ((3624, 3639), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3637, 3639), False, 'import torch\n'), ((8414, 8447), 'torch.sum', 'torch.sum', (['(weight_mask * cls_loss)'], {}), '(weight_mask * cls_loss)\n', (8423, 8447), False, 'import torch\n'), ((3719, 3744), 'torch.sigmoid', 'torch.sigmoid', (['cls_logits'], {}), '(cls_logits)\n', (3732, 3744), False, 'import torch\n'), ((5163, 5212), 'torch.tensor', 'torch.tensor', (['rare_cats'], {'device': 'cls_logits.device'}), '(rare_cats, device=cls_logits.device)\n', (5175, 5212), False, 'import torch\n'), ((5768, 5819), 'torch.tensor', 'torch.tensor', (['common_cats'], {'device': 'cls_logits.device'}), '(common_cats, device=cls_logits.device)\n', (5780, 5819), False, 'import torch\n'), ((6388, 6437), 'torch.tensor', 'torch.tensor', (['freq_cats'], {'device': 'cls_logits.device'}), '(freq_cats, device=cls_logits.device)\n', (6400, 6437), False, 'import torch\n'), ((7056, 7106), 'torch.tensor', 'torch.tensor', (['cur_labels'], {'device': 'cls_logits.device'}), '(cur_labels, device=cls_logits.device)\n', (7068, 7106), False, 'import torch\n'), ((1402, 1423), 'torch.exp', 'torch.exp', (['(-cls_score)'], {}), '(-cls_score)\n', (1411, 1423), False, 'import torch\n'), ((1578, 1610), 'torch.softmax', 'torch.softmax', (['cls_score'], {'dim': '(-1)'}), '(cls_score, dim=-1)\n', (1591, 1610), False, 'import torch\n'), ((4660, 4688), 'torch.nonzero', 'torch.nonzero', (['(labels == cls)'], {}), '(labels == cls)\n', (4673, 4688), False, 'import torch\n'), ((5317, 5382), 'numpy.random.choice', 'np.random.choice', (['cls_inds_cpu'], {'size': 'choose_bg_num', 'replace': '(False)'}), '(cls_inds_cpu, size=choose_bg_num, replace=False)\n', (5333, 5382), True, 'import numpy as np\n'), ((5923, 5988), 'numpy.random.choice', 'np.random.choice', (['cls_inds_cpu'], {'size': 'choose_bg_num', 'replace': '(False)'}), '(cls_inds_cpu, size=choose_bg_num, replace=False)\n', (5939, 5988), True, 'import numpy as np\n'), ((6541, 6606), 'numpy.random.choice', 'np.random.choice', (['cls_inds_cpu'], {'size': 'choose_bg_num', 'replace': '(False)'}), '(cls_inds_cpu, size=choose_bg_num, replace=False)\n', (6557, 6606), True, 'import numpy as np\n'), ((1486, 1521), 'torch.erf', 'torch.erf', (['(cls_score / 2 ** (1 / 2))'], {}), '(cls_score / 2 ** (1 / 2))\n', (1495, 1521), False, 'import torch\n'), ((1668, 1692), 'torch.sigmoid', 'torch.sigmoid', (['cls_score'], {}), '(cls_score)\n', (1681, 1692), False, 'import torch\n'), ((3379, 3416), 'torch.arange', 'torch.arange', (['(cls_logits.shape[1] - 1)'], {}), '(cls_logits.shape[1] - 1)\n', (3391, 3416), False, 'import torch\n'), ((4115, 4148), 'torch.softmax', 'torch.softmax', (['cls_logits'], {'dim': '(-1)'}), '(cls_logits, dim=-1)\n', (4128, 4148), False, 'import torch\n'), ((7879, 7918), 'torch.clamp', 'torch.clamp', (['cls_logits'], {'min': '(-4)', 'max': '(10)'}), '(cls_logits, min=-4, max=10)\n', (7890, 7918), False, 'import torch\n'), ((3849, 3888), 'torch.clamp', 'torch.clamp', (['cls_logits'], {'min': '(-4)', 'max': '(10)'}), '(cls_logits, min=-4, max=10)\n', (3860, 3888), False, 'import torch\n'), ((8080, 8118), 'torch.clamp', 'torch.clamp', (['cls_logits'], {'min': '(-5)', 'max': '(5)'}), '(cls_logits, min=-5, max=5)\n', (8091, 8118), False, 'import torch\n'), ((3984, 4022), 'torch.clamp', 'torch.clamp', (['cls_logits'], {'min': '(-5)', 'max': '(5)'}), '(cls_logits, min=-5, max=5)\n', (3995, 4022), False, 'import torch\n')] |
# Once for All: Train One Network and Specialize it for Efficient Deployment
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# International Conference on Learning Representations (ICLR), 2020.
import numpy as np
import os
import sys
import torch
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
__all__ = [
"sort_dict",
"get_same_padding",
"get_split_list",
"list_sum",
"list_mean",
"list_join",
"subset_mean",
"sub_filter_start_end",
"min_divisible_value",
"val2list",
"download_url",
"write_log",
"pairwise_accuracy",
"accuracy",
"AverageMeter",
"MultiClassAverageMeter",
"DistributedMetric",
"DistributedTensor",
]
def sort_dict(src_dict, reverse=False, return_dict=True):
output = sorted(src_dict.items(), key=lambda x: x[1], reverse=reverse)
if return_dict:
return dict(output)
else:
return output
def get_same_padding(kernel_size, dilation=1):
assert isinstance(dilation, int), f"dilation has to be a number and not tuple"
if isinstance(kernel_size, tuple):
assert len(kernel_size) == 2, "invalid kernel size: %s" % kernel_size
p1 = get_same_padding(kernel_size[0], dilation)
p2 = get_same_padding(kernel_size[1], dilation)
return p1, p2
assert isinstance(kernel_size, int), "kernel size should be either `int` or `tuple`"
assert kernel_size % 2 > 0, "kernel size should be odd number"
return (kernel_size // 2) * dilation
def get_split_list(in_dim, child_num, accumulate=False):
in_dim_list = [in_dim // child_num] * child_num
for _i in range(in_dim % child_num):
in_dim_list[_i] += 1
if accumulate:
for i in range(1, child_num):
in_dim_list[i] += in_dim_list[i - 1]
return in_dim_list
def list_sum(x):
return x[0] if len(x) == 1 else x[0] + list_sum(x[1:])
def list_mean(x):
return list_sum(x) / len(x)
def list_join(val_list, sep="\t"):
return sep.join([str(val) for val in val_list])
def subset_mean(val_list, sub_indexes):
sub_indexes = val2list(sub_indexes, 1)
return list_mean([val_list[idx] for idx in sub_indexes])
def sub_filter_start_end(kernel_size, sub_kernel_size):
center = kernel_size // 2
dev = sub_kernel_size // 2
start, end = center - dev, center + dev + 1
assert end - start == sub_kernel_size
return start, end
def min_divisible_value(n1, v1):
""" make sure v1 is divisible by n1, otherwise decrease v1 """
if v1 >= n1:
return n1
while n1 % v1 != 0:
v1 -= 1
return v1
def val2list(val, repeat_time=1):
if isinstance(val, list) or isinstance(val, np.ndarray):
return val
elif isinstance(val, tuple):
return list(val)
else:
return [val for _ in range(repeat_time)]
def download_url(url, model_dir="~/.torch/", overwrite=False):
target_dir = url.split("/")[-1]
model_dir = os.path.expanduser(model_dir)
try:
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_dir = os.path.join(model_dir, target_dir)
cached_file = model_dir
if not os.path.exists(cached_file) or overwrite:
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urlretrieve(url, cached_file)
return cached_file
except Exception as e:
# remove lock file so download can be executed next time.
os.remove(os.path.join(model_dir, "download.lock"))
sys.stderr.write("Failed to download from url %s" % url + "\n" + str(e) + "\n")
return None
def write_log(logs_path, log_str, prefix="valid", should_print=True, mode="a"):
if not os.path.exists(logs_path):
os.makedirs(logs_path, exist_ok=True)
""" prefix: valid, train, test """
if prefix in ["valid", "test"]:
with open(os.path.join(logs_path, "valid_console.txt"), mode) as fout:
fout.write(log_str + "\n")
fout.flush()
if prefix in ["valid", "test", "train"]:
with open(os.path.join(logs_path, "train_console.txt"), mode) as fout:
if prefix in ["valid", "test"]:
fout.write("=" * 10)
fout.write(log_str + "\n")
fout.flush()
else:
with open(os.path.join(logs_path, "%s.txt" % prefix), mode) as fout:
fout.write(log_str + "\n")
fout.flush()
if should_print:
print(log_str)
def pairwise_accuracy(la, lb, n_samples=200000):
n = len(la)
assert n == len(lb)
total = 0
count = 0
for _ in range(n_samples):
i = np.random.randint(n)
j = np.random.randint(n)
while i == j:
j = np.random.randint(n)
if la[i] >= la[j] and lb[i] >= lb[j]:
count += 1
if la[i] < la[j] and lb[i] < lb[j]:
count += 1
total += 1
return float(count) / total
def accuracy(output, target, topk=(1,)):
""" Computes the precision@k for the specified values of k """
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""
Computes and stores the average and current value
Copied from: https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MultiClassAverageMeter:
""" Multi Binary Classification Tasks """
def __init__(self, num_classes, balanced=False, **kwargs):
super(MultiClassAverageMeter, self).__init__()
self.num_classes = num_classes
self.balanced = balanced
self.counts = []
for k in range(self.num_classes):
self.counts.append(np.ndarray((2, 2), dtype=np.float32))
self.reset()
def reset(self):
for k in range(self.num_classes):
self.counts[k].fill(0)
def add(self, outputs, targets):
outputs = outputs.data.cpu().numpy()
targets = targets.data.cpu().numpy()
for k in range(self.num_classes):
output = np.argmax(outputs[:, k, :], axis=1)
target = targets[:, k]
x = output + 2 * target
bincount = np.bincount(x.astype(np.int32), minlength=2 ** 2)
self.counts[k] += bincount.reshape((2, 2))
def value(self):
mean = 0
for k in range(self.num_classes):
if self.balanced:
value = np.mean((self.counts[k] / np.maximum(np.sum(self.counts[k], axis=1), 1)[:, None]).diagonal())
else:
value = np.sum(self.counts[k].diagonal()) / np.maximum(np.sum(self.counts[k]), 1)
mean += value / self.num_classes * 100.0
return mean
class DistributedMetric(object):
"""
Horovod: average metrics from distributed training.
"""
def __init__(self, name):
self.name = name
self.sum = torch.zeros(1)[0]
self.count = torch.zeros(1)[0]
def update(self, val, delta_n=1):
import horovod.torch as hvd
val *= delta_n
self.sum += hvd.allreduce(val.detach().cpu(), name=self.name)
self.count += delta_n
@property
def avg(self):
return self.sum / self.count
class DistributedTensor(object):
def __init__(self, name):
self.name = name
self.sum = None
self.count = torch.zeros(1)[0]
self.synced = False
def update(self, val, delta_n=1):
val *= delta_n
if self.sum is None:
self.sum = val.detach()
else:
self.sum += val.detach()
self.count += delta_n
@property
def avg(self):
import horovod.torch as hvd
if not self.synced:
self.sum = hvd.allreduce(self.sum, name=self.name)
self.synced = True
return self.sum / self.count
| [
"numpy.sum",
"os.makedirs",
"os.path.join",
"numpy.argmax",
"horovod.torch.allreduce",
"numpy.ndarray",
"os.path.exists",
"urllib.request.urlretrieve",
"numpy.random.randint",
"torch.zeros",
"os.path.expanduser"
] | [((2985, 3014), 'os.path.expanduser', 'os.path.expanduser', (['model_dir'], {}), '(model_dir)\n', (3003, 3014), False, 'import os\n'), ((3121, 3156), 'os.path.join', 'os.path.join', (['model_dir', 'target_dir'], {}), '(model_dir, target_dir)\n', (3133, 3156), False, 'import os\n'), ((3752, 3777), 'os.path.exists', 'os.path.exists', (['logs_path'], {}), '(logs_path)\n', (3766, 3777), False, 'import os\n'), ((3787, 3824), 'os.makedirs', 'os.makedirs', (['logs_path'], {'exist_ok': '(True)'}), '(logs_path, exist_ok=True)\n', (3798, 3824), False, 'import os\n'), ((4669, 4689), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (4686, 4689), True, 'import numpy as np\n'), ((4702, 4722), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (4719, 4722), True, 'import numpy as np\n'), ((3039, 3064), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (3053, 3064), False, 'import os\n'), ((3078, 3100), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (3089, 3100), False, 'import os\n'), ((3341, 3370), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'cached_file'], {}), '(url, cached_file)\n', (3352, 3370), False, 'from urllib.request import urlretrieve\n'), ((4761, 4781), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (4778, 4781), True, 'import numpy as np\n'), ((6708, 6743), 'numpy.argmax', 'np.argmax', (['outputs[:, k, :]'], {'axis': '(1)'}), '(outputs[:, k, :], axis=1)\n', (6717, 6743), True, 'import numpy as np\n'), ((7546, 7560), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (7557, 7560), False, 'import torch\n'), ((7585, 7599), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (7596, 7599), False, 'import torch\n'), ((8008, 8022), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (8019, 8022), False, 'import torch\n'), ((8384, 8423), 'horovod.torch.allreduce', 'hvd.allreduce', (['self.sum'], {'name': 'self.name'}), '(self.sum, name=self.name)\n', (8397, 8423), True, 'import horovod.torch as hvd\n'), ((3204, 3231), 'os.path.exists', 'os.path.exists', (['cached_file'], {}), '(cached_file)\n', (3218, 3231), False, 'import os\n'), ((3509, 3549), 'os.path.join', 'os.path.join', (['model_dir', '"""download.lock"""'], {}), "(model_dir, 'download.lock')\n", (3521, 3549), False, 'import os\n'), ((3918, 3962), 'os.path.join', 'os.path.join', (['logs_path', '"""valid_console.txt"""'], {}), "(logs_path, 'valid_console.txt')\n", (3930, 3962), False, 'import os\n'), ((4106, 4150), 'os.path.join', 'os.path.join', (['logs_path', '"""train_console.txt"""'], {}), "(logs_path, 'train_console.txt')\n", (4118, 4150), False, 'import os\n'), ((4340, 4382), 'os.path.join', 'os.path.join', (['logs_path', "('%s.txt' % prefix)"], {}), "(logs_path, '%s.txt' % prefix)\n", (4352, 4382), False, 'import os\n'), ((6357, 6393), 'numpy.ndarray', 'np.ndarray', (['(2, 2)'], {'dtype': 'np.float32'}), '((2, 2), dtype=np.float32)\n', (6367, 6393), True, 'import numpy as np\n'), ((7263, 7285), 'numpy.sum', 'np.sum', (['self.counts[k]'], {}), '(self.counts[k])\n', (7269, 7285), True, 'import numpy as np\n'), ((7117, 7147), 'numpy.sum', 'np.sum', (['self.counts[k]'], {'axis': '(1)'}), '(self.counts[k], axis=1)\n', (7123, 7147), True, 'import numpy as np\n')] |
import sys
import numpy as np
class BFGS:
"""
Class to execute a BFGS optimization to a local minimum
"""
def __init__(self, step_tol=1E-7, grad_tol=1E-7, line_tol=1E-10,
inhess=None, max_step=100, max_lin_step=1000,
use_grad_tol=1, use_step_tol=1):
"""
Initialize the BFGS algorithm
step_tol: tolerance on the minimum step size to continue the
optimization (absolute L2 norm of the step)
grad_tol: tolerance on the minimum gradient to continue the
optimization (absolute L2 norm of the gradient)
line_tol: tolerance on the alpha in the line search
inhess: initial guess for the hessian (default is identity matrix)
max_step: maximum number of Gradient iterations
max_lin_step: maximum number of iterations in the linear search
use_grad_tol: use the gradient tolerance convergance criterion
use_step_tol: use the step size tolerance convergance criterion
"""
self.step_tol = step_tol
self.grad_tol = grad_tol
self.line_tol = line_tol
if inhess is None:
self.inhess = []
else:
self.inhess = inhess
self.max_step = max_step
self.max_lin_step = max_lin_step
self.use_grad_tol = use_grad_tol
self.use_step_tol = use_step_tol
if not use_grad_tol and not use_step_tol:
sys.exit('Cannot execute an optimization if neither the step ' +
'nor the gradient tolerance can be used')
def converged(self, step, grad):
if np.linalg.norm(step) < self.step_tol and self.use_step_tol:
return 1
if np.linalg.norm(grad) < self.grad_tol and self.use_grad_tol:
return 1
return 0
def optimize(self, f, x):
"""
Optimize a function to the closest local minimum using the BFGS method.
f: a function of x which needs to have
f.eval(x): function evaluation of f
f.gradient(x): gradient df/dx in x
x: initial guess
"""
# intermediate coordinates
x_i = [x]
if len(self.inhess) == 0:
H = np.eye(len(x))
else:
H = self.inhess
Hinv = np.linalg.inv(H)
g = f.gradient(x)
# intermediate forces
g_i = [g]
p = np.dot(np.linalg.inv(H), -g)
it = 0
while it < self.max_step:
ak = self.line_search(f, x, p)
sk = ak * p
xk = x + sk
gk = f.gradient(xk)
yk = gk - g
sy = np.dot(sk, yk)
if sy:
t1 = (sy + np.dot(yk, np.dot(Hinv, yk))) * np.outer(sk, sk)
t1 = t1 / np.power(sy, 2)
t2 = np.dot(Hinv, np.outer(yk, sk))
t2 = t2 + np.dot(np.outer(sk, yk), Hinv)
t2 = t2 / sy
Hinvk = Hinv + t1 - t2
else:
Hinvk = Hinv
x_i.append(xk)
g_i.append(gk)
converged = self.converged(sk, gk)
if converged:
return xk, x_i, g_i
g = gk
x = xk
p = np.dot(Hinvk, -gk)
Hinv = Hinvk
it += 1
# return final x values, list of geometric steps
# and list of gradients
return xk, x_i, g_i
def line_search(self, f, x, p):
"""
Perform a line serach of function f from point x along direction p
"""
a = 1.
nu = .9
fx = f.eval(x)
fx0 = fx
it = 0
while it < self.max_lin_step:
xk = x+a*p
fxk = f.eval(xk)
if it > 0:
if fxk > fx and fx < fx0:
# starting to climb again, return the second to last value
return a / nu
fx = fxk
a *= nu
if a < self.line_tol:
break
it += 1
return a
| [
"numpy.outer",
"numpy.power",
"numpy.linalg.inv",
"numpy.linalg.norm",
"numpy.dot",
"sys.exit"
] | [((2287, 2303), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (2300, 2303), True, 'import numpy as np\n'), ((1439, 1549), 'sys.exit', 'sys.exit', (["('Cannot execute an optimization if neither the step ' +\n 'nor the gradient tolerance can be used')"], {}), "('Cannot execute an optimization if neither the step ' +\n 'nor the gradient tolerance can be used')\n", (1447, 1549), False, 'import sys\n'), ((2398, 2414), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (2411, 2414), True, 'import numpy as np\n'), ((2635, 2649), 'numpy.dot', 'np.dot', (['sk', 'yk'], {}), '(sk, yk)\n', (2641, 2649), True, 'import numpy as np\n'), ((3230, 3248), 'numpy.dot', 'np.dot', (['Hinvk', '(-gk)'], {}), '(Hinvk, -gk)\n', (3236, 3248), True, 'import numpy as np\n'), ((1616, 1636), 'numpy.linalg.norm', 'np.linalg.norm', (['step'], {}), '(step)\n', (1630, 1636), True, 'import numpy as np\n'), ((1708, 1728), 'numpy.linalg.norm', 'np.linalg.norm', (['grad'], {}), '(grad)\n', (1722, 1728), True, 'import numpy as np\n'), ((2728, 2744), 'numpy.outer', 'np.outer', (['sk', 'sk'], {}), '(sk, sk)\n', (2736, 2744), True, 'import numpy as np\n'), ((2771, 2786), 'numpy.power', 'np.power', (['sy', '(2)'], {}), '(sy, 2)\n', (2779, 2786), True, 'import numpy as np\n'), ((2821, 2837), 'numpy.outer', 'np.outer', (['yk', 'sk'], {}), '(yk, sk)\n', (2829, 2837), True, 'import numpy as np\n'), ((2872, 2888), 'numpy.outer', 'np.outer', (['sk', 'yk'], {}), '(sk, yk)\n', (2880, 2888), True, 'import numpy as np\n'), ((2707, 2723), 'numpy.dot', 'np.dot', (['Hinv', 'yk'], {}), '(Hinv, yk)\n', (2713, 2723), True, 'import numpy as np\n')] |
"""
A script to assess model precision, accuracy, recall, and f1 score via cross validation
"""
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import cross_validate, StratifiedKFold
import argparse
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras.layers import Dense, BatchNormalization, Dropout, Activation
from keras.optimizers import adam
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument('-model', action='store', dest='mod', help='Select the model to run grid search on '
'[rf, xgboost, nn].')
parser.add_argument('-data', action='store', dest='data', help='Path to transcriptome data.')
parser.add_argument('-labels', action='store', dest='labels', help='Path to transcriptome labels.')
parser.add_argument('-k', action='store', dest='folds', help='Number of cross validation folds.')
class InvalidArgError(Exception):
pass
def preprocess_data_sklearn(data_file, label_file):
"""
A simple function to remove transcriptomes with no training label and to scale the data.
:param data_file: training dataset
:param label_file: training labels
:return: features and labels formatted for use with sklearn classifiers
"""
data = pd.read_csv(data_file, index_col=0).reset_index(drop=True)
labels = pd.read_csv(label_file, index_col=0).reset_index(drop=True)
data.drop(columns=["M_id"], inplace=True)
idx = labels.index[labels['Trophic mode'] == 'Un']
data = data.drop(idx)
labels = labels.drop(idx)
scaler = MinMaxScaler()
X = scaler.fit_transform(data)
y = labels['Trophic mode']
return X, y
def cross_validate_sklearn(model, data_file, label_file, folds):
"""
Perform stratified k-fold cross validation for sklearn compatible classifiers.
:param model: sklearn model object.
:param data_file: training data
:param label_file: training labels
:param folds: number of cross validation folds.
:return: dict. results.
"""
scoring = {'accuracy': make_scorer(accuracy_score),
'precision': make_scorer(precision_score, average='macro'),
'recall': make_scorer(recall_score, average='macro'),
'f1_score': make_scorer(f1_score, average='macro')}
kfold = StratifiedKFold(n_splits=folds)
model = model
features, targets = preprocess_data_sklearn(data_file, label_file)
results = cross_validate(estimator=model, X=features, y=targets,
cv=kfold, scoring=scoring)
return results
def preprocess_data_keras(data_file, label_file):
"""
Prepare data for cross-validation with keras models. Target needs to be one-hot encoded.
:param data_file: training data
:param label_file: training labels
:return: features and one-hot encoded targets
"""
X, y = preprocess_data_sklearn(data_file, label_file)
encoder = LabelEncoder()
encoder.fit(y)
encoded_Y = encoder.transform(y)
dummy_y = np_utils.to_categorical(encoded_Y)
return X, dummy_y
def generate_data(training_data, labels):
"""
Data generator for cross validation with keras.
:param training_data: preprocessed training data.
:param labels: one-hot encoded training labels.
:return: X_train, y_train, X_test, y_test
"""
X, y = training_data, labels
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
return X_train, y_train, X_test, y_test
def get_model(X_train, y_train):
model = Sequential()
# Input layer, tuneable number of neurons (neurons1)
model.add(Dense(6, input_dim=X_train.shape[1]))
model.add(Activation('relu'))
model.add(BatchNormalization())
# Hidden layer, tuneable number of neurons (neurons2)
model.add(Dense(6))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dense(3, activation='softmax'))
# Compile model
optimizer = adam(lr=0.001)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model.fit(X_train, y_train, epochs=20, batch_size=12, verbose=0)
return model
def cross_validate_keras(folds, training_data, labels):
"""
Cross validation for keras models.
:param folds: Number of folds for cross validation
:param training_data: preprocessed training data
:param labels: one-hot encoded labels.
:return: dict. results
"""
cv = folds
accuracy_list = []
precision_list = []
recall_list = []
f1_list = []
for fold in range(cv):
X_train, y_train, X_test, y_test = generate_data(training_data, labels)
model = get_model(X_train, y_train)
yhat_classes = model.predict_classes(X_test)
testy = np.argmax(y_test, axis=1)
accuracy = accuracy_score(testy, yhat_classes)
accuracy_list.append(accuracy)
precision = precision_score(testy, yhat_classes, average='macro')
precision_list.append(precision)
recall = recall_score(testy, yhat_classes, average='macro')
recall_list.append(recall)
f1 = f1_score(testy, yhat_classes, average='macro')
f1_list.append(f1)
results_dict = {'accuracy': accuracy_list, 'precision': precision_list, 'recall': recall_list, 'f1': f1_list}
return results_dict
def run():
args = parser.parse_args()
mod = args.mod
data_file = args.data
label_file = args.labels
folds = args.folds
if mod == 'rf':
model = RandomForestClassifier(n_estimators=100, max_depth=1000)
res = cross_validate_sklearn(model=model, data_file=data_file, label_file=label_file, folds=folds)
elif mod == 'xgboost':
model = XGBClassifier(n_estimators=10, learning_rate=0.5, reg_lambda=0)
res = cross_validate_sklearn(model=model, data_file=data_file, label_file=label_file, folds=folds)
elif mod == 'nn':
X, y = preprocess_data_keras(data_file=data_file, label_file=label_file)
res = cross_validate_keras(folds=folds, training_data=X, labels=y)
else:
raise InvalidArgError("model must be one of [xgboost, rf, nn].")
print(res)
if __name__=='__main__':
run()
| [
"argparse.ArgumentParser",
"numpy.argmax",
"sklearn.model_selection.cross_validate",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"sklearn.preprocessing.MinMaxScaler",
"pandas.read_csv",
"sklearn.metrics.f1_score",
"keras.optimizers.adam",
"sklearn.preprocessing.La... | [((736, 761), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (759, 761), False, 'import argparse\n'), ((1925, 1939), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1937, 1939), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2665, 2696), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'folds'}), '(n_splits=folds)\n', (2680, 2696), False, 'from sklearn.model_selection import cross_validate, StratifiedKFold\n'), ((2803, 2889), 'sklearn.model_selection.cross_validate', 'cross_validate', ([], {'estimator': 'model', 'X': 'features', 'y': 'targets', 'cv': 'kfold', 'scoring': 'scoring'}), '(estimator=model, X=features, y=targets, cv=kfold, scoring=\n scoring)\n', (2817, 2889), False, 'from sklearn.model_selection import cross_validate, StratifiedKFold\n'), ((3293, 3307), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (3305, 3307), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3378, 3412), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['encoded_Y'], {}), '(encoded_Y)\n', (3401, 3412), False, 'from keras.utils import np_utils\n'), ((3773, 3811), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.33)'}), '(X, y, test_size=0.33)\n', (3789, 3811), False, 'from sklearn.model_selection import train_test_split\n'), ((3826, 3840), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3838, 3840), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4015, 4027), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4025, 4027), False, 'from keras.models import Sequential\n'), ((4444, 4458), 'keras.optimizers.adam', 'adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (4448, 4458), False, 'from keras.optimizers import adam\n'), ((2412, 2439), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (2423, 2439), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\n'), ((2469, 2514), 'sklearn.metrics.make_scorer', 'make_scorer', (['precision_score'], {'average': '"""macro"""'}), "(precision_score, average='macro')\n", (2480, 2514), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\n'), ((2541, 2583), 'sklearn.metrics.make_scorer', 'make_scorer', (['recall_score'], {'average': '"""macro"""'}), "(recall_score, average='macro')\n", (2552, 2583), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\n'), ((2612, 2650), 'sklearn.metrics.make_scorer', 'make_scorer', (['f1_score'], {'average': '"""macro"""'}), "(f1_score, average='macro')\n", (2623, 2650), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\n'), ((4099, 4135), 'keras.layers.Dense', 'Dense', (['(6)'], {'input_dim': 'X_train.shape[1]'}), '(6, input_dim=X_train.shape[1])\n', (4104, 4135), False, 'from keras.layers import Dense, BatchNormalization, Dropout, Activation\n'), ((4151, 4169), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4161, 4169), False, 'from keras.layers import Dense, BatchNormalization, Dropout, Activation\n'), ((4185, 4205), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4203, 4205), False, 'from keras.layers import Dense, BatchNormalization, Dropout, Activation\n'), ((4280, 4288), 'keras.layers.Dense', 'Dense', (['(6)'], {}), '(6)\n', (4285, 4288), False, 'from keras.layers import Dense, BatchNormalization, Dropout, Activation\n'), ((4304, 4322), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4314, 4322), False, 'from keras.layers import Dense, BatchNormalization, Dropout, Activation\n'), ((4338, 4358), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4356, 4358), False, 'from keras.layers import Dense, BatchNormalization, Dropout, Activation\n'), ((4375, 4405), 'keras.layers.Dense', 'Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (4380, 4405), False, 'from keras.layers import Dense, BatchNormalization, Dropout, Activation\n'), ((5253, 5278), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (5262, 5278), True, 'import numpy as np\n'), ((5299, 5334), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['testy', 'yhat_classes'], {}), '(testy, yhat_classes)\n', (5313, 5334), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\n'), ((5395, 5448), 'sklearn.metrics.precision_score', 'precision_score', (['testy', 'yhat_classes'], {'average': '"""macro"""'}), "(testy, yhat_classes, average='macro')\n", (5410, 5448), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\n'), ((5508, 5558), 'sklearn.metrics.recall_score', 'recall_score', (['testy', 'yhat_classes'], {'average': '"""macro"""'}), "(testy, yhat_classes, average='macro')\n", (5520, 5558), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\n'), ((5608, 5654), 'sklearn.metrics.f1_score', 'f1_score', (['testy', 'yhat_classes'], {'average': '"""macro"""'}), "(testy, yhat_classes, average='macro')\n", (5616, 5654), False, 'from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\n'), ((6000, 6056), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)', 'max_depth': '(1000)'}), '(n_estimators=100, max_depth=1000)\n', (6022, 6056), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1622, 1657), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {'index_col': '(0)'}), '(data_file, index_col=0)\n', (1633, 1657), True, 'import pandas as pd\n'), ((1694, 1730), 'pandas.read_csv', 'pd.read_csv', (['label_file'], {'index_col': '(0)'}), '(label_file, index_col=0)\n', (1705, 1730), True, 'import pandas as pd\n'), ((6207, 6270), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'n_estimators': '(10)', 'learning_rate': '(0.5)', 'reg_lambda': '(0)'}), '(n_estimators=10, learning_rate=0.5, reg_lambda=0)\n', (6220, 6270), False, 'from xgboost import XGBClassifier\n')] |
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
# Definirajmo funkcijo, ki jo fitamo
f = lambda x, A, B: A * np.exp(B * x)
data = np.loadtxt('../data/boltz.dat', skiprows=1, delimiter=',')
# Klicemo curve_fit, kjer je
# - sigma: seznam napak y koordinate
# - p0: zacetni priblizki parametrov (kot so v zaporedju v def. funkcije)
popt, cov = opt.curve_fit(
f, data[:, 0], data[:, 1], sigma=data[:, 3], p0=[1, 1e-10])
# Napake so koreni diagonalnih elementov kovariancne matrike
perr = np.sqrt(np.diag(cov))
print(popt)
print(perr)
plt.errorbar(data[:, 0], data[:, 1], xerr=data[:, 2], yerr=data[:, 3])
plt.plot(data[:, 0], f(data[:, 0], popt[0], popt[1]))
plt.show()
| [
"matplotlib.pyplot.show",
"scipy.optimize.curve_fit",
"numpy.loadtxt",
"numpy.exp",
"numpy.diag",
"matplotlib.pyplot.errorbar"
] | [((164, 222), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/boltz.dat"""'], {'skiprows': '(1)', 'delimiter': '""","""'}), "('../data/boltz.dat', skiprows=1, delimiter=',')\n", (174, 222), True, 'import numpy as np\n'), ((378, 451), 'scipy.optimize.curve_fit', 'opt.curve_fit', (['f', 'data[:, 0]', 'data[:, 1]'], {'sigma': 'data[:, 3]', 'p0': '[1, 1e-10]'}), '(f, data[:, 0], data[:, 1], sigma=data[:, 3], p0=[1, 1e-10])\n', (391, 451), True, 'import scipy.optimize as opt\n'), ((574, 644), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['data[:, 0]', 'data[:, 1]'], {'xerr': 'data[:, 2]', 'yerr': 'data[:, 3]'}), '(data[:, 0], data[:, 1], xerr=data[:, 2], yerr=data[:, 3])\n', (586, 644), True, 'import matplotlib.pyplot as plt\n'), ((700, 710), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (708, 710), True, 'import matplotlib.pyplot as plt\n'), ((534, 546), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (541, 546), True, 'import numpy as np\n'), ((142, 155), 'numpy.exp', 'np.exp', (['(B * x)'], {}), '(B * x)\n', (148, 155), True, 'import numpy as np\n')] |
from opts import opts
import h5py
import json
import numpy as np
from vocab import vocabulary
import cPickle
import random
import os
class DataLoader(object):
'''
dataloader for loading data
'''
def __init__(self,opt,phase):
assert isinstance(opt,opts)
self.opt = opt
# new attributes are extracted by resnet
# store in h5 file dataset feats
self.attributes = h5py.File(opt.attributes_h5)['feats']
data = h5py.File(opt.data_h5)
with open(opt.data_json) as f:
json_out = json.load(f)
splits = [image_info['split'] for image_info in json_out['images']]
image_ids = [image_info['image_id'] for image_info in json_out['images']]
self.image_ids = image_ids
self.train_ix = [ i for i,split in enumerate(splits) if split==0]
self.val_ix = [i for i,split in enumerate(splits) if split==1]
self.test_ix = [i for i,split in enumerate(splits) if split==2]
self.pt = 0
self.phase = phase
self.features = data['features']
self.labels = data['labels']
self.label_start_ix = data['label_start_ix']
self.label_end_ix = data['label_end_ix']
assert self.labels.shape[-1] == self.opt.seq_length
self.ix_to_word = json_out['ix_to_word']
self.ix_to_attr = json_out['ix_to_attr']
# decide the value of start token and end token
if self.opt.END_TOKEN and self.opt.START_TOKEN:
assert self.opt.END_TOKEN == self.opt.START_TOKEN
assert self.opt.START_TOKEN == len(json_out['ix_to_word'])+1
else:
self.opt.vocab_size = len(json_out['ix_to_word'])+2
self.opt.END_TOKEN = self.opt.START_TOKEN = len(json_out['ix_to_word'])+1
def get_batch(self,batch_size):
if self.phase == 'train':
data_ix = self.train_ix
elif self.phase == 'val':
data_ix = self.val_ix
else:
data_ix = self.test_ix
attributes = np.zeros((batch_size,self.opt.attr_size)).astype(np.float32)
features = np.zeros((batch_size,self.opt.nRegions,self.opt.image_encoding_size)).astype(np.float32)
image_feature = np.zeros((batch_size,self.opt.image_encoding_size)).astype(np.float32)
input_seqs = np.zeros((batch_size,self.opt.nSeqs_per_img,self.opt.seq_length+1)).astype(np.int32)
target_seqs = np.zeros((batch_size,self.opt.nSeqs_per_img,self.opt.seq_length+1)).astype(np.int32)
nRegions = self.opt.nRegions
image_ids = []
# first step: start token
input_seqs[:,:,0] = self.opt.START_TOKEN
for i in xrange(batch_size):
# reset iterator if it has reached the end
if self.pt>=len(data_ix):
# shuffle each epoch
random.shuffle(data_ix)
self.pt = 0
index = data_ix[self.pt]
features[i] = np.reshape(self.features[index*nRegions:(index+1)*nRegions,:]
,(nRegions,-1))
image_feature[i] = features[i][-1]
# the index is 1-based in hdf5, but it is 0-based in python
st = self.label_start_ix[index]
ed = self.label_end_ix[index]
# randomly pick five labels
pick = np.random.randint(st,ed-self.opt.nSeqs_per_img+1)
nseqs = self.opt.nSeqs_per_img
input_seqs[i,:,1:] = self.labels[pick:pick+nseqs,:]
target_seqs[i,:,0:-1] = self.labels[pick:pick+nseqs,:]
image_ids.append(self.image_ids[index])
attributes[i] = self.attributes[index]
self.pt +=1
# language model batch_size: batch_size * self.opt.nSeqs_per_img
if self.phase == 'train':
attributes = np.repeat(attributes,self.opt.nSeqs_per_img,axis=0)
features = np.repeat(features,self.opt.nSeqs_per_img,axis=0)
image_feature = np.repeat(image_feature,self.opt.nSeqs_per_img,axis=0)
input_seqs = np.reshape(input_seqs,[-1,self.opt.seq_length+1])
target_seqs = np.reshape(target_seqs,[-1,self.opt.seq_length+1])
# END TOKEN at last
cols = np.argmax(np.int32(target_seqs==0),axis=1)
rows = np.array(xrange(cols.shape[0]))
#for row,col in enumerate(indices):
#target_seqs[row,col] = self.opt.END_TOKEN
target_seqs[rows,cols] = self.opt.END_TOKEN
#vocab = vocabulary(self.opt)
return attributes, features, image_feature, input_seqs, target_seqs, image_ids
else:
return attributes,features, image_feature, image_ids, None
| [
"h5py.File",
"json.load",
"random.shuffle",
"numpy.zeros",
"numpy.random.randint",
"numpy.reshape",
"numpy.int32",
"numpy.repeat"
] | [((473, 495), 'h5py.File', 'h5py.File', (['opt.data_h5'], {}), '(opt.data_h5)\n', (482, 495), False, 'import h5py\n'), ((420, 448), 'h5py.File', 'h5py.File', (['opt.attributes_h5'], {}), '(opt.attributes_h5)\n', (429, 448), False, 'import h5py\n'), ((558, 570), 'json.load', 'json.load', (['f'], {}), '(f)\n', (567, 570), False, 'import json\n'), ((2986, 3076), 'numpy.reshape', 'np.reshape', (['self.features[index * nRegions:(index + 1) * nRegions, :]', '(nRegions, -1)'], {}), '(self.features[index * nRegions:(index + 1) * nRegions, :], (\n nRegions, -1))\n', (2996, 3076), True, 'import numpy as np\n'), ((3365, 3419), 'numpy.random.randint', 'np.random.randint', (['st', '(ed - self.opt.nSeqs_per_img + 1)'], {}), '(st, ed - self.opt.nSeqs_per_img + 1)\n', (3382, 3419), True, 'import numpy as np\n'), ((3870, 3923), 'numpy.repeat', 'np.repeat', (['attributes', 'self.opt.nSeqs_per_img'], {'axis': '(0)'}), '(attributes, self.opt.nSeqs_per_img, axis=0)\n', (3879, 3923), True, 'import numpy as np\n'), ((3945, 3996), 'numpy.repeat', 'np.repeat', (['features', 'self.opt.nSeqs_per_img'], {'axis': '(0)'}), '(features, self.opt.nSeqs_per_img, axis=0)\n', (3954, 3996), True, 'import numpy as np\n'), ((4023, 4079), 'numpy.repeat', 'np.repeat', (['image_feature', 'self.opt.nSeqs_per_img'], {'axis': '(0)'}), '(image_feature, self.opt.nSeqs_per_img, axis=0)\n', (4032, 4079), True, 'import numpy as np\n'), ((4103, 4156), 'numpy.reshape', 'np.reshape', (['input_seqs', '[-1, self.opt.seq_length + 1]'], {}), '(input_seqs, [-1, self.opt.seq_length + 1])\n', (4113, 4156), True, 'import numpy as np\n'), ((4179, 4233), 'numpy.reshape', 'np.reshape', (['target_seqs', '[-1, self.opt.seq_length + 1]'], {}), '(target_seqs, [-1, self.opt.seq_length + 1])\n', (4189, 4233), True, 'import numpy as np\n'), ((2059, 2101), 'numpy.zeros', 'np.zeros', (['(batch_size, self.opt.attr_size)'], {}), '((batch_size, self.opt.attr_size))\n', (2067, 2101), True, 'import numpy as np\n'), ((2139, 2210), 'numpy.zeros', 'np.zeros', (['(batch_size, self.opt.nRegions, self.opt.image_encoding_size)'], {}), '((batch_size, self.opt.nRegions, self.opt.image_encoding_size))\n', (2147, 2210), True, 'import numpy as np\n'), ((2252, 2304), 'numpy.zeros', 'np.zeros', (['(batch_size, self.opt.image_encoding_size)'], {}), '((batch_size, self.opt.image_encoding_size))\n', (2260, 2304), True, 'import numpy as np\n'), ((2344, 2415), 'numpy.zeros', 'np.zeros', (['(batch_size, self.opt.nSeqs_per_img, self.opt.seq_length + 1)'], {}), '((batch_size, self.opt.nSeqs_per_img, self.opt.seq_length + 1))\n', (2352, 2415), True, 'import numpy as np\n'), ((2451, 2522), 'numpy.zeros', 'np.zeros', (['(batch_size, self.opt.nSeqs_per_img, self.opt.seq_length + 1)'], {}), '((batch_size, self.opt.nSeqs_per_img, self.opt.seq_length + 1))\n', (2459, 2522), True, 'import numpy as np\n'), ((2871, 2894), 'random.shuffle', 'random.shuffle', (['data_ix'], {}), '(data_ix)\n', (2885, 2894), False, 'import random\n'), ((4291, 4317), 'numpy.int32', 'np.int32', (['(target_seqs == 0)'], {}), '(target_seqs == 0)\n', (4299, 4317), True, 'import numpy as np\n')] |
# general imports
import pickle
import random
import mdtraj as md
import numpy as np
# Imports from the openff toolkit
import openff.toolkit
import torch
from mdtraj import Trajectory
from openff.toolkit.typing.engines.smirnoff import ForceField
from torchani.models import ANI2x
from tqdm import tqdm
forcefield = ForceField('openff_unconstrained-2.0.0.offxml')
print(openff.toolkit._version.get_versions())
# Imports from openMM
from NNPOps import OptimizedTorchANI
from openmm import LangevinIntegrator, Platform, unit
from openmm.app import Simulation
##################################
hartree_to_kJ_mol = 2625.5
mass_dict_in_daltons = {"H": 1.0, "C": 12.0, "N": 14.0, "O": 16.0}
distance_unit = unit.angstrom
time_unit = unit.femtoseconds
speed_unit = distance_unit / time_unit
energy_unit = unit.kilojoule_per_mole
stepsize = 1 * time_unit
collision_rate = 1 / unit.picosecond
temperature = 300 * unit.kelvin
from openmmtools.constants import kB
kBT = kB * temperature
##################################
class NNP(torch.nn.Module):
def __init__(self, molecule, platform):
super().__init__()
self.platform = platform
# Store the atomic numbers
self.species = torch.tensor([[atom.atomic_number for atom in molecule.atoms]], device=self.platform)
# Create an ANI-2x model
self.model = ANI2x(periodic_table_index=True)
# Accelerate the model
self.model = OptimizedTorchANI(self.model, self.species).to(device=self.platform)
# save atoms as string
self.atoms = ''.join([a.element.symbol for a in molecule.atoms])
def _calculate_energy(self, coordinates: torch.tensor):
"""
Helpter function to return energies as tensor.
Given a coordinate set the energy is calculated.
Parameters
----------
coordinates : torch.tensor
coordinates in angstrom without units attached
Returns
-------
energy_in_kJ_mol : torch.tensor
"""
#coordinates = coordinates.unsqueeze(0).float()
energy_in_hartree = self.model((self.species, coordinates)).energies
# convert energy from hartrees to kJ/mol
return energy_in_hartree * hartree_to_kJ_mol
def calculate_force(self, coordinates: unit.Quantity):
"""
Given a coordinate set the forces with respect to the coordinates are calculated.
Parameters
----------
coordinates : numpy array in angstrom
initial configuration
Returns
-------
F, E : float, in kJ/mol/A, kJ/mol
"""
assert type(coordinates) is unit.Quantity
coordinates_ = torch.tensor([coordinates.value_in_unit(unit.angstrom)], dtype=torch.float32, requires_grad=True, device=self.platform)
energy_in_kJ_mol = self._calculate_energy(coordinates_)
# derivative of E (in kJ/mol) w.r.t. coordinates (in Angstrom)
derivative = torch.autograd.grad((energy_in_kJ_mol).sum(), coordinates_)[0]
if self.platform == 'cpu':
F = - np.array(derivative)
elif self.platform == 'cuda':
F = - np.array(derivative.cpu())
else:
raise RuntimeError('Platform needs to be specified. Either CPU or CUDA.')
return F*(unit.kilojoule_per_mole / unit.angstrom), energy_in_kJ_mol * unit.kilojoule_per_mole # kilojoule_per_mole / angstrom; kJ/mol
def calculate_energy(self, coordinates: unit.Quantity):
"""
Given a coordinate set the energy is calculated.
Parameters
----------
coordinates : unit'd
coordinates
Returns
-------
energy : torch.tensor
"""
assert type(coordinates) is unit.Quantity
coordinates_ = torch.tensor([coordinates.value_in_unit(unit.angstrom)], dtype=torch.float32, requires_grad=True, device=self.platform)
return self._calculate_energy(coordinates_).detach().cpu().numpy() * unit.kilojoule_per_mole
class LangevinDynamics(object):
def __init__(self, atoms: str, energy_and_force):
self.energy_and_force = energy_and_force
self.temperature = temperature
self.atoms = atoms
def run_dynamics(
self,
x0: np.ndarray,
n_steps: int = 100,
stepsize: unit.quantity.Quantity = 1.0 * unit.femtosecond,
collision_rate: unit.quantity.Quantity = 10 /unit.picoseconds,
progress_bar: bool = False,
temperature=None
):
"""Unadjusted Langevin dynamics.
Parameters
----------
x0 : array of floats, in Angstrom
initial configuration
n_steps : integer
number of Langevin steps
stepsize : float > 0, in units of femtoseconds
collision_rate : float > 0, in units of 1/ps
controls the rate of interaction with the heat bath
progress_bar : bool
use tqdm to show progress bar
Returns
-------
traj : [n_steps + 1 x dim] array of floats, unit'd
trajectory of samples generated by Langevin dynamics
energy : list of floats in kJ/mol
stddev : list of kJ/mol
ensemble_bias : list of kJ/mol
"""
assert type(x0) == unit.Quantity
assert type(stepsize) == unit.Quantity
assert type(collision_rate) == unit.Quantity
if temperature == None:
temperature = self.temperature
assert type(temperature) == unit.Quantity
# generate mass arrays
masses = np.array([mass_dict_in_daltons[a] for a in self.atoms]) * unit.dalton
sigma_v = np.array([unit.sqrt(kB * temperature / m ) / speed_unit for m in masses]) * speed_unit
v0 = np.random.randn(len(sigma_v), 3) * sigma_v[:, None]
# convert initial state numpy arrays with correct attached units
x = np.array(x0.value_in_unit(distance_unit)) * distance_unit # x.shape =[N_atoms][3]
v = np.array(v0.value_in_unit(speed_unit)) * speed_unit # v.shape = [N_atoms][3]
# traj is accumulated as a list of arrays with attached units
traj = [x]
# dimensionless scalars
a = np.exp(-collision_rate * stepsize)
b = np.sqrt(1 - np.exp(-2 * collision_rate * stepsize))
# compute force on initial configuration
F, E, = self.energy_and_force.calculate_force(x) # F.shape = [1][N_atoms][3]
F = F[0] # to transform F.shape to [N_atoms][3]
# energy is saved as a list
energy = [E]
trange = range(n_steps)
if progress_bar:
trange = tqdm(trange)
# main loop
for _ in trange:
# v
v += (stepsize * 0.5) * F / masses[:, None]
# r
x += (stepsize * 0.5) * v # NOTE: x.shape = [n_atoms][3], but v.shape = [n_atoms][3]
# o
v = (a * v) + (b * sigma_v[:, None] * np.random.randn(*x.shape))
# r
x += (stepsize * 0.5) * v
F, E = self.energy_and_force.calculate_force(x)
F = F[0] # to transform F.shape to [N_atoms][3]
energy.append(E)
# v
v += (stepsize * 0.5) * F / masses[:, None]
norm_F = np.linalg.norm(F)
# report gradient norm
if progress_bar:
trange.set_postfix({"|force|": norm_F})
# check positions and forces are finite
if (not np.isfinite(x).all()) or (not np.isfinite(norm_F)):
print("Numerical instability encountered!")
return traj, energy
traj.append(x)
return traj, energy
def save_traj(samples, molecule, name=''):
molecule.to_file('test.pdb', file_format='pdb')
top = md.load(f"test.pdb").topology
traj = Trajectory(samples, topology=top)
if name:
traj.save_dcd(f'{name}')
else:
traj.save_dcd('test.dcd')
return traj
def create_mm_sim(molecule):
"""Create vacuum simulation system"""
platform = Platform.getPlatformByName('CPU')
properties={}
properties["Threads"]="2"
integrator = LangevinIntegrator(temperature, collision_rate, stepsize)
topology = molecule.to_topology()
system = forcefield.create_openmm_system(topology)
sim = Simulation(topology, system, integrator, platform=platform, platformProperties=properties)
molecule.generate_conformers()
sim.context.setPositions(molecule.conformers[0])
sim.minimizeEnergy()
sim.context.setVelocitiesToTemperature(temperature)
return sim
def collect_samples_ani(molecule, n_samples=1_000, n_steps_per_sample=10_000, platform='cuda'):
"""generate samples using ANI2x"""
print(f'Generate samples with QML: {n_samples=}, {n_steps_per_sample=}')
energy_and_force = NNP(molecule, platform=platform)
langevine = LangevinDynamics(energy_and_force.atoms,energy_and_force)
# generate the position for ANI
positions = molecule.conformers[0]
samples = []
for _ in tqdm(range(n_samples)):
samples, energy = langevine.run_dynamics(positions, n_steps_per_sample, stepsize=stepsize)
return samples
def get_positions(sim):
"""get position of system in a state"""
return sim.context.getState(getPositions=True).getPositions(asNumpy=True)
def collect_samples_mm(_, sim, n_samples=1_000, n_steps_per_sample=10_000):
"""generate samples using a classical FF"""
print(f'Generate samples with MM: {n_samples=}, {n_steps_per_sample=}')
samples = []
for _ in tqdm(range(n_samples)):
sim.step(n_steps_per_sample)
samples.append(get_positions(sim))
return samples
def compute_mm_energy(sim, positions):
"""compute mm energy for given positions"""
sim.context.setPositions(positions)
return sim.context.getState(getEnergy=True).getPotentialEnergy()
def compute_mm_force(sim, positions):
"""compute mm forces given a position"""
sim.context.setPositions(positions)
return sim.context.getState(getForces=True).getForces(asNumpy=True)
def mixing(a, b, lambda_value):
return ((1.-lambda_value) * a) + (lambda_value * b)
def neq_from_ani_to_mm(molecule,
n_samples:int,
switching_length:int,
n_steps_per_sample:int,
nr_of_switches:int,
save_samples:str = "",
load_samples:str="",
platform:str='cuda'):
"""NEQ switching from ANI to MM
Args:
molecule (_type_): _description_
Returns:
_type_: _description_
"""
from functools import partial
print('Performing NEQ switching from ANI to MM representation of molecule')
# define systems
energy_and_force = NNP(molecule, platform=platform)
sim = create_mm_sim(molecule)
# define energy and force calculation as a function of switching parameter lambda
def _calculate_energy(x:unit.Quantity, lamb:float):
assert type(x) == unit.Quantity
assert lamb >= 0.0 and lamb <= 1.0
ani_e = energy_and_force.calculate_energy(x)
mm_e = compute_mm_energy(sim, x)
return mixing(ani_e, mm_e, lamb)
def _calculate_forces(x:unit.Quantity, lamb:float):
assert type(x) == unit.Quantity
assert lamb >= 0.0 and lamb <= 1.0
ani_f = energy_and_force.calculate_force(x)[0]
mm_f = compute_mm_force(sim, x)
return mixing(ani_f[0], mm_f, lamb)
# define function that collects samples
generate_samples = partial(collect_samples_ani, n_samples=n_samples, n_steps_per_sample = n_steps_per_sample)
# call switching routine
ws = _noneq_sampling_and_switching(nr_of_switches=nr_of_switches,
molecule=molecule,
generate_samples=generate_samples,
calculate_force=_calculate_forces,
calculate_energy = _calculate_energy,
switching_length=switching_length,
save_samples=save_samples,
load_samples=load_samples
)
return ws
def neq_from_mm_to_ani(molecule,
n_samples:int,
n_steps_per_sample:int,
nr_of_switches:int,
switching_length:int,
save_samples:str = "",
load_samples:str="",
platform:str='cuda'):
"""NEQ switching from ANI to MM
Args:
molecule (_type_): _description_
Returns:
_type_: _description_
"""
from functools import partial
print('Performing NEQ switching from MM to ANI representation of molecule')
# define systems
energy_and_force = NNP(molecule, platform=platform)
sim = create_mm_sim(molecule)
# define energy and force calculation as a function of switching parameter lambda
def _calculate_energy(x:unit.Quantity, lamb:float):
assert type(x) == unit.Quantity
assert lamb >= 0.0 and lamb <= 1.0
ani_e = energy_and_force.calculate_energy(x)
mm_e = compute_mm_energy(sim, x)
return mixing(mm_e, ani_e, lamb)
def _calculate_forces(x:unit.Quantity, lamb:float):
assert type(x) == unit.Quantity
assert lamb >= 0.0 and lamb <= 1.0
ani_f = energy_and_force.calculate_force(x)[0]
mm_f = compute_mm_force(sim, x)
return mixing(mm_f, ani_f[0], lamb)
# define function that collects samples
generate_samples = partial(collect_samples_mm, sim=sim, n_samples=n_samples, n_steps_per_sample = n_steps_per_sample)
# call switching routine
ws = _noneq_sampling_and_switching(nr_of_switches=nr_of_switches,
molecule=molecule,
generate_samples=generate_samples,
calculate_force=_calculate_forces,
calculate_energy = _calculate_energy,
switching_length=switching_length,
save_samples=save_samples,
load_samples = load_samples
)
return ws
def _noneq_sampling_and_switching(
nr_of_switches:int,
molecule,
generate_samples,
calculate_force,
calculate_energy,
switching_length:int,
save_samples:str,
load_samples:str,
):
"""
Use nonequ switching to calculate work values from 'from_system' to 'to_system' starting with 'from_system' sampels.
"""
lam_values = np.linspace(0,1,switching_length)
print(f'Perform NEQ using: {nr_of_switches=}, {switching_length=}')
# generate mass arrays
atoms = ''.join([a.element.symbol for a in molecule.atoms])
masses = np.array([mass_dict_in_daltons[a] for a in atoms]) * unit.daltons
# dimensionless scalars
a = np.exp(- collision_rate * stepsize)
b = np.sqrt(1 - np.exp(-2 * collision_rate * stepsize))
# generate sigma_v
sigma_v = np.array([unit.sqrt(kB * temperature / m) / speed_unit for m in masses]) * speed_unit
# generate samples
print('Start generating samples ...')
if load_samples:
print('Loading prgenerated samples ...')
samples = pickle.load(open(load_samples, 'rb'))
else:
samples = generate_samples(molecule)
if save_samples:
pickle.dump(samples, open(save_samples, 'wb+'))
print('Samples generated ...')
# w_list contains the work values for each switchin protocol
w_list = []
print("Start with switching protocoll ...")
for switch_nr in tqdm(range(nr_of_switches)):
# traj accumulates the samples
traj = []
# select starting conformations
x = np.array(random.choice(samples).value_in_unit(distance_unit)) * distance_unit
# initial force
F = calculate_force(x, lamb=0.)
# seed velocities from boltzmann distribution
v0 = np.random.randn(len(sigma_v), 3) * sigma_v[:, None]
v = np.array(v0.value_in_unit(speed_unit)) * speed_unit
w = 0.0 * unit.kilojoule_per_mole
for idx in range(1, switching_length):
# v
v += (stepsize * 0.5) * F / masses[:, None]
# r
x += (stepsize * 0.5) * v
# o
v = (a * v) + (b * sigma_v[:, None] * np.random.randn(*x.shape))
# r
x += (stepsize * 0.5) * v
# calculate F
F = calculate_force(x, lamb=lam_values[idx])
# v
v += (stepsize * 0.5) * F / masses[:, None]
traj.append(x)
# calculate work
# evaluate u_t(x_t) - u_{t-1}(x_t)
u_now = calculate_energy(x, lamb=lam_values[idx])
u_before = calculate_energy(x, lamb=lam_values[idx-1])
w += (u_now - u_before)
w_list.append(w.value_in_unit(unit.kilojoule_per_mole))
if save_samples:
print(f'NEQ switching work: {w}')
print('##################')
save_traj(traj, molecule, name=f'{save_samples.split(".")[0]}_{switch_nr}.dcd')
return np.array(w_list) * unit.kilojoule_per_mole
| [
"mdtraj.load",
"numpy.linalg.norm",
"openmm.Platform.getPlatformByName",
"numpy.exp",
"numpy.random.randn",
"openff.toolkit.typing.engines.smirnoff.ForceField",
"numpy.isfinite",
"torchani.models.ANI2x",
"numpy.linspace",
"functools.partial",
"tqdm.tqdm",
"openmm.unit.sqrt",
"openmm.app.Simu... | [((317, 364), 'openff.toolkit.typing.engines.smirnoff.ForceField', 'ForceField', (['"""openff_unconstrained-2.0.0.offxml"""'], {}), "('openff_unconstrained-2.0.0.offxml')\n", (327, 364), False, 'from openff.toolkit.typing.engines.smirnoff import ForceField\n'), ((7870, 7903), 'mdtraj.Trajectory', 'Trajectory', (['samples'], {'topology': 'top'}), '(samples, topology=top)\n', (7880, 7903), False, 'from mdtraj import Trajectory\n'), ((8110, 8143), 'openmm.Platform.getPlatformByName', 'Platform.getPlatformByName', (['"""CPU"""'], {}), "('CPU')\n", (8136, 8143), False, 'from openmm import LangevinIntegrator, Platform, unit\n'), ((8209, 8266), 'openmm.LangevinIntegrator', 'LangevinIntegrator', (['temperature', 'collision_rate', 'stepsize'], {}), '(temperature, collision_rate, stepsize)\n', (8227, 8266), False, 'from openmm import LangevinIntegrator, Platform, unit\n'), ((8372, 8466), 'openmm.app.Simulation', 'Simulation', (['topology', 'system', 'integrator'], {'platform': 'platform', 'platformProperties': 'properties'}), '(topology, system, integrator, platform=platform,\n platformProperties=properties)\n', (8382, 8466), False, 'from openmm.app import Simulation\n'), ((11675, 11768), 'functools.partial', 'partial', (['collect_samples_ani'], {'n_samples': 'n_samples', 'n_steps_per_sample': 'n_steps_per_sample'}), '(collect_samples_ani, n_samples=n_samples, n_steps_per_sample=\n n_steps_per_sample)\n', (11682, 11768), False, 'from functools import partial\n'), ((13817, 13917), 'functools.partial', 'partial', (['collect_samples_mm'], {'sim': 'sim', 'n_samples': 'n_samples', 'n_steps_per_sample': 'n_steps_per_sample'}), '(collect_samples_mm, sim=sim, n_samples=n_samples,\n n_steps_per_sample=n_steps_per_sample)\n', (13824, 13917), False, 'from functools import partial\n'), ((14904, 14939), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'switching_length'], {}), '(0, 1, switching_length)\n', (14915, 14939), True, 'import numpy as np\n'), ((15232, 15266), 'numpy.exp', 'np.exp', (['(-collision_rate * stepsize)'], {}), '(-collision_rate * stepsize)\n', (15238, 15266), True, 'import numpy as np\n'), ((1212, 1302), 'torch.tensor', 'torch.tensor', (['[[atom.atomic_number for atom in molecule.atoms]]'], {'device': 'self.platform'}), '([[atom.atomic_number for atom in molecule.atoms]], device=self\n .platform)\n', (1224, 1302), False, 'import torch\n'), ((1352, 1384), 'torchani.models.ANI2x', 'ANI2x', ([], {'periodic_table_index': '(True)'}), '(periodic_table_index=True)\n', (1357, 1384), False, 'from torchani.models import ANI2x\n'), ((6242, 6276), 'numpy.exp', 'np.exp', (['(-collision_rate * stepsize)'], {}), '(-collision_rate * stepsize)\n', (6248, 6276), True, 'import numpy as np\n'), ((7829, 7849), 'mdtraj.load', 'md.load', (['f"""test.pdb"""'], {}), "(f'test.pdb')\n", (7836, 7849), True, 'import mdtraj as md\n'), ((15129, 15179), 'numpy.array', 'np.array', (['[mass_dict_in_daltons[a] for a in atoms]'], {}), '([mass_dict_in_daltons[a] for a in atoms])\n', (15137, 15179), True, 'import numpy as np\n'), ((17520, 17536), 'numpy.array', 'np.array', (['w_list'], {}), '(w_list)\n', (17528, 17536), True, 'import numpy as np\n'), ((5612, 5667), 'numpy.array', 'np.array', (['[mass_dict_in_daltons[a] for a in self.atoms]'], {}), '([mass_dict_in_daltons[a] for a in self.atoms])\n', (5620, 5667), True, 'import numpy as np\n'), ((6669, 6681), 'tqdm.tqdm', 'tqdm', (['trange'], {}), '(trange)\n', (6673, 6681), False, 'from tqdm import tqdm\n'), ((7305, 7322), 'numpy.linalg.norm', 'np.linalg.norm', (['F'], {}), '(F)\n', (7319, 7322), True, 'import numpy as np\n'), ((15288, 15326), 'numpy.exp', 'np.exp', (['(-2 * collision_rate * stepsize)'], {}), '(-2 * collision_rate * stepsize)\n', (15294, 15326), True, 'import numpy as np\n'), ((1437, 1480), 'NNPOps.OptimizedTorchANI', 'OptimizedTorchANI', (['self.model', 'self.species'], {}), '(self.model, self.species)\n', (1454, 1480), False, 'from NNPOps import OptimizedTorchANI\n'), ((3095, 3115), 'numpy.array', 'np.array', (['derivative'], {}), '(derivative)\n', (3103, 3115), True, 'import numpy as np\n'), ((6301, 6339), 'numpy.exp', 'np.exp', (['(-2 * collision_rate * stepsize)'], {}), '(-2 * collision_rate * stepsize)\n', (6307, 6339), True, 'import numpy as np\n'), ((6980, 7005), 'numpy.random.randn', 'np.random.randn', (['*x.shape'], {}), '(*x.shape)\n', (6995, 7005), True, 'import numpy as np\n'), ((7545, 7564), 'numpy.isfinite', 'np.isfinite', (['norm_F'], {}), '(norm_F)\n', (7556, 7564), True, 'import numpy as np\n'), ((15376, 15407), 'openmm.unit.sqrt', 'unit.sqrt', (['(kB * temperature / m)'], {}), '(kB * temperature / m)\n', (15385, 15407), False, 'from openmm import LangevinIntegrator, Platform, unit\n'), ((16727, 16752), 'numpy.random.randn', 'np.random.randn', (['*x.shape'], {}), '(*x.shape)\n', (16742, 16752), True, 'import numpy as np\n'), ((5710, 5741), 'openmm.unit.sqrt', 'unit.sqrt', (['(kB * temperature / m)'], {}), '(kB * temperature / m)\n', (5719, 5741), False, 'from openmm import LangevinIntegrator, Platform, unit\n'), ((16123, 16145), 'random.choice', 'random.choice', (['samples'], {}), '(samples)\n', (16136, 16145), False, 'import random\n'), ((7515, 7529), 'numpy.isfinite', 'np.isfinite', (['x'], {}), '(x)\n', (7526, 7529), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
import numpy
from pyscf import gto, scf, ao2mo
'''
Customizing Hamiltonian for SCF module.
Three steps to define Hamiltonian for SCF:
1. Specify the number of electrons. (Note mole object must be "built" before doing this step)
2. Overwrite three attributes of scf object
.get_hcore
.get_ovlp
._eri
3. Specify initial guess (to overwrite the default atomic density initial guess)
Note you will see warning message on the screen:
overwrite keys get_ovlp get_hcore of <class 'pyscf.scf.hf.RHF'>
'''
mol = gto.M()
n = 10
mol.nelectron = n
mf = scf.RHF(mol)
h1 = numpy.zeros((n,n))
for i in range(n-1):
h1[i,i+1] = h1[i+1,i] = -1.0
h1[n-1,0] = h1[0,n-1] = -1.0 # PBC
eri = numpy.zeros((n,n,n,n))
for i in range(n):
eri[i,i,i,i] = 4.0
mf.get_hcore = lambda *args: h1
mf.get_ovlp = lambda *args: numpy.eye(n)
# ao2mo.restore(8, eri, n) to get 8-fold permutation symmetry of the integrals
# ._eri only supports the two-electron integrals in 4-fold or 8-fold symmetry.
mf._eri = ao2mo.restore(8, eri, n)
mf.kernel()
| [
"pyscf.ao2mo.restore",
"numpy.zeros",
"pyscf.gto.M",
"pyscf.scf.RHF",
"numpy.eye"
] | [((582, 589), 'pyscf.gto.M', 'gto.M', ([], {}), '()\n', (587, 589), False, 'from pyscf import gto, scf, ao2mo\n'), ((621, 633), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (628, 633), False, 'from pyscf import gto, scf, ao2mo\n'), ((639, 658), 'numpy.zeros', 'numpy.zeros', (['(n, n)'], {}), '((n, n))\n', (650, 658), False, 'import numpy\n'), ((754, 779), 'numpy.zeros', 'numpy.zeros', (['(n, n, n, n)'], {}), '((n, n, n, n))\n', (765, 779), False, 'import numpy\n'), ((1061, 1085), 'pyscf.ao2mo.restore', 'ao2mo.restore', (['(8)', 'eri', 'n'], {}), '(8, eri, n)\n', (1074, 1085), False, 'from pyscf import gto, scf, ao2mo\n'), ((880, 892), 'numpy.eye', 'numpy.eye', (['n'], {}), '(n)\n', (889, 892), False, 'import numpy\n')] |
import time
import warnings
from collections import deque
from enum import IntEnum
import numpy as np
from numpy import array
from recordtype import recordtype
from flatland.envs.agent_utils import RailAgentStatus
from flatland.utils.graphics_pil import PILGL, PILSVG
from flatland.utils.graphics_pgl import PGLGL
# TODO: suggested renaming to RailEnvRenderTool, as it will only work with RailEnv!
class AgentRenderVariant(IntEnum):
BOX_ONLY = 0
ONE_STEP_BEHIND = 1
AGENT_SHOWS_OPTIONS = 2
ONE_STEP_BEHIND_AND_BOX = 3
AGENT_SHOWS_OPTIONS_AND_BOX = 4
class RenderTool(object):
""" RenderTool is a facade to a renderer.
(This was introduced for the Browser / JS renderer which has now been removed.)
"""
def __init__(self, env, gl="PGL", jupyter=False,
agent_render_variant=AgentRenderVariant.ONE_STEP_BEHIND,
show_debug=False, clear_debug_text=True, screen_width=800, screen_height=600,
host="localhost", port=None):
self.env = env
self.frame_nr = 0
self.start_time = time.time()
self.times_list = deque()
self.agent_render_variant = agent_render_variant
if gl in ["PIL", "PILSVG", "PGL"]:
self.renderer = RenderLocal(env, gl, jupyter,
agent_render_variant,
show_debug, clear_debug_text, screen_width, screen_height)
self.gl = self.renderer.gl
else:
print("[", gl, "] not found, switch to PGL")
def render_env(self,
show=False, # whether to call matplotlib show() or equivalent after completion
show_agents=True, # whether to include agents
show_inactive_agents=False, # whether to show agents before they start
show_observations=True, # whether to include observations
show_predictions=False, # whether to include predictions
show_rowcols=False, # label the rows and columns
frames=False, # frame counter to show (intended since invocation)
episode=None, # int episode number to show
step=None, # int step number to show in image
selected_agent=None, # indicate which agent is "selected" in the editor):
return_image=False): # indicate if image is returned for use in monitor:
return self.renderer.render_env(show, show_agents, show_inactive_agents, show_observations,
show_predictions, show_rowcols, frames, episode, step, selected_agent, return_image)
def close_window(self):
self.renderer.close_window()
def reset(self):
self.renderer.reset()
def set_new_rail(self):
self.renderer.set_new_rail()
self.renderer.env = self.env # bit of a hack - copy our env to the delegate
def update_background(self):
self.renderer.update_background()
def get_endpoint_URL(self):
""" Returns a string URL for the root of the HTTP server
TODO: Need to update this work work on a remote server! May be tricky...
"""
#return "http://localhost:{}".format(self.renderer.get_port())
if hasattr(self.renderer, "get_endpoint_url"):
return self.renderer.get_endpoint_url()
else:
print("Attempt to get_endpoint_url from RenderTool - only supported with BROWSER")
return None
def get_image(self):
"""
"""
if hasattr(self.renderer, "gl"):
return self.renderer.gl.get_image()
else:
print("Attempt to retrieve image from RenderTool - not supported with BROWSER")
return None
class RenderBase(object):
def __init__(self, env):
pass
def render_env(self):
pass
def close_window(self):
pass
def reset(self):
pass
def set_new_rail(self):
""" Signal to the renderer that the env has changed and will need re-rendering.
"""
pass
def update_background(self):
""" A lesser version of set_new_rail?
TODO: can update_background be pruned for simplicity?
"""
pass
class RenderLocal(RenderBase):
""" Class to render the RailEnv and agents.
Uses two layers, layer 0 for rails (mostly static), layer 1 for agents etc (dynamic)
The lower / rail layer 0 is only redrawn after set_new_rail() has been called.
Created with a "GraphicsLayer" or gl - now either PIL or PILSVG
"""
visit = recordtype("visit", ["rc", "iDir", "iDepth", "prev"])
color_list = list("brgcmyk")
# \delta RC for NESW
transitions_row_col = np.array([[-1, 0], [0, 1], [1, 0], [0, -1]])
pix_per_cell = 1 # misnomer...
half_pix_per_cell = pix_per_cell / 2
x_y_half = array([half_pix_per_cell, -half_pix_per_cell])
row_col_to_xy = array([[0, -pix_per_cell], [pix_per_cell, 0]])
grid = array(np.meshgrid(np.arange(10), -np.arange(10))) * array([[[pix_per_cell]], [[pix_per_cell]]])
theta = np.linspace(0, np.pi / 2, 5)
arc = array([np.cos(theta), np.sin(theta)]).T # from [1,0] to [0,1]
def __init__(self, env, gl="PILSVG", jupyter=False,
agent_render_variant=AgentRenderVariant.ONE_STEP_BEHIND,
show_debug=False, clear_debug_text=True, screen_width=800, screen_height=600):
self.env = env
self.frame_nr = 0
self.start_time = time.time()
self.times_list = deque()
self.agent_render_variant = agent_render_variant
self.gl_str = gl
if gl == "PIL":
self.gl = PILGL(env.width, env.height, jupyter, screen_width=screen_width, screen_height=screen_height)
elif gl == "PILSVG":
self.gl = PILSVG(env.width, env.height, jupyter, screen_width=screen_width, screen_height=screen_height)
else:
if gl != "PGL":
print("[", gl, "] not found, switch to PGL, PILSVG")
print("Using PGL")
self.gl = PGLGL(env.width, env.height, jupyter, screen_width=screen_width, screen_height=screen_height)
self.new_rail = True
self.show_debug = show_debug
self.clear_debug_text = clear_debug_text
self.update_background()
def reset(self):
"""
Resets the environment
:return:
"""
self.set_new_rail()
self.frame_nr = 0
self.start_time = time.time()
self.times_list = deque()
return
def update_background(self):
# create background map
targets = {}
for agent_idx, agent in enumerate(self.env.agents):
if agent is None:
continue
#print(f"updatebg: {agent_idx} {agent.target}")
targets[tuple(agent.target)] = agent_idx
self.gl.build_background_map(targets)
def resize(self):
self.gl.resize(self.env)
def set_new_rail(self):
""" Tell the renderer that the rail has changed.
eg when the rail has been regenerated, or updated in the editor.
"""
self.new_rail = True
def plot_agents(self, targets=True, selected_agent=None):
color_map = self.gl.get_cmap('hsv', lut=(len(self.env.agents) + 1))
for agent_idx, agent in enumerate(self.env.agents):
if agent is None:
continue
color = color_map(agent_idx)
self.plot_single_agent(agent.position, agent.direction, color, target=agent.target if targets else None,
static=True, selected=agent_idx == selected_agent)
for agent_idx, agent in enumerate(self.env.agents):
if agent is None:
continue
color = color_map(agent_idx)
self.plot_single_agent(agent.position, agent.direction, color, target=agent.target if targets else None)
def get_transition_row_col(self, row_col_pos, direction, bgiTrans=False):
"""
Get the available transitions for row_col_pos in direction direction,
as row & col deltas.
If bgiTrans is True, return a grid of indices of available transitions.
eg for a cell row_col_pos = (4,5), in direction direction = 0 (N),
where the available transitions are N and E, returns:
[[-1,0], [0,1]] ie N=up one row, and E=right one col.
and if bgiTrans is True, returns a tuple:
(
[[-1,0], [0,1]], # deltas as before
[0, 1] # available transition indices, ie N, E
)
"""
transitions = self.env.rail.get_transitions(*row_col_pos, direction)
transition_list = np.where(transitions)[0] # RC list of transitions
# HACK: workaround dead-end transitions
if len(transition_list) == 0:
reverse_direciton = (direction + 2) % 4
transitions = tuple(int(tmp_dir == reverse_direciton) for tmp_dir in range(4))
transition_list = np.where(transitions)[0] # RC list of transitions
transition_grid = self.__class__.transitions_row_col[transition_list]
if bgiTrans:
return transition_grid, transition_list
else:
return transition_grid
def plot_single_agent(self, position_row_col, direction, color="r", target=None, static=False, selected=False):
"""
Plot a simple agent.
Assumes a working graphics layer context (cf a MPL figure).
"""
if position_row_col is None:
return
rt = self.__class__
direction_row_col = rt.transitions_row_col[direction] # agent direction in RC
direction_xy = np.matmul(direction_row_col, rt.row_col_to_xy) # agent direction in xy
xyPos = np.matmul(position_row_col - direction_row_col / 2, rt.row_col_to_xy) + rt.x_y_half
if static:
color = self.gl.adapt_color(color, lighten=True)
color = color
self.gl.scatter(*xyPos, color=color, layer=1, marker="o", s=100) # agent location
xy_dir_line = array([xyPos, xyPos + direction_xy / 2]).T # line for agent orient.
self.gl.plot(*xy_dir_line, color=color, layer=1, lw=5, ms=0, alpha=0.6)
if selected:
self._draw_square(xyPos, 1, color)
if target is not None:
target_row_col = array(target)
target_xy = np.matmul(target_row_col, rt.row_col_to_xy) + rt.x_y_half
self._draw_square(target_xy, 1 / 3, color, layer=1)
def plot_transition(self, position_row_col, transition_row_col, color="r", depth=None):
"""
plot the transitions in transition_row_col at position position_row_col.
transition_row_col is a 2d numpy array containing a list of RC transitions,
eg [[-1,0], [0,1]] means N, E.
"""
rt = self.__class__
position_xy = np.matmul(position_row_col, rt.row_col_to_xy) + rt.x_y_half
transition_xy = position_xy + np.matmul(transition_row_col, rt.row_col_to_xy / 2.4)
self.gl.scatter(*transition_xy.T, color=color, marker="o", s=50, alpha=0.2)
if depth is not None:
for x, y in transition_xy:
self.gl.text(x, y, depth)
def draw_transition(self,
line,
center,
rotation,
dead_end=False,
curves=False,
color="gray",
arrow=True,
spacing=0.1):
"""
gLine is a numpy 2d array of points,
in the plotting space / coords.
eg:
[[0,.5],[1,0.2]] means a line
from x=0, y=0.5
to x=1, y=0.2
"""
if not curves and not dead_end:
# just a straigt line, no curve nor dead_end included in this basic rail element
self.gl.plot(
[line[0][0], line[1][0]], # x
[line[0][1], line[1][1]], # y
color=color
)
else:
# it was not a simple line to draw: the rail has a curve or dead_end included.
rt = self.__class__
straight = rotation in [0, 2]
dx, dy = np.squeeze(np.diff(line, axis=0)) * spacing / 2
if straight:
if color == "auto":
if dx > 0 or dy > 0:
color = "C1" # N or E
else:
color = "C2" # S or W
if dead_end:
line_xy = array([
line[1] + [dy, dx],
center,
line[1] - [dy, dx],
])
self.gl.plot(*line_xy.T, color=color)
else:
line_xy = line + [-dy, dx]
self.gl.plot(*line_xy.T, color=color)
if arrow:
middle_xy = np.sum(line_xy * [[1 / 4], [3 / 4]], axis=0)
arrow_xy = array([
middle_xy + [-dx - dy, +dx - dy],
middle_xy,
middle_xy + [-dx + dy, -dx - dy]])
self.gl.plot(*arrow_xy.T, color=color)
else:
middle_xy = np.mean(line, axis=0)
dxy = middle_xy - center
corner = middle_xy + dxy
if rotation == 1:
arc_factor = 1 - spacing
color_auto = "C1"
else:
arc_factor = 1 + spacing
color_auto = "C2"
dxy2 = (center - corner) * arc_factor # for scaling the arc
if color == "auto":
color = color_auto
self.gl.plot(*(rt.arc * dxy2 + corner).T, color=color)
if arrow:
dx, dy = np.squeeze(np.diff(line, axis=0)) / 20
iArc = int(len(rt.arc) / 2)
middle_xy = corner + rt.arc[iArc] * dxy2
arrow_xy = array([
middle_xy + [-dx - dy, +dx - dy],
middle_xy,
middle_xy + [-dx + dy, -dx - dy]])
self.gl.plot(*arrow_xy.T, color=color)
def render_observation(self, agent_handles, observation_dict):
"""
Render the extent of the observation of each agent. All cells that appear in the agent
observation will be highlighted.
:param agent_handles: List of agent indices to adapt color and get correct observation
:param observation_dict: dictionary containing sets of cells of the agent observation
"""
rt = self.__class__
# Check if the observation builder provides an observation
if len(observation_dict) < 1:
warnings.warn(
"Predictor did not provide any predicted cells to render. \
Observation builder needs to populate: env.dev_obs_dict")
else:
for agent in agent_handles:
color = self.gl.get_agent_color(agent)
for visited_cell in observation_dict[agent]:
cell_coord = array(visited_cell[:2])
cell_coord_trans = np.matmul(cell_coord, rt.row_col_to_xy) + rt.x_y_half
self._draw_square(cell_coord_trans, 1 / (agent + 1.1), color, layer=1, opacity=100)
def render_prediction(self, agent_handles, prediction_dict):
"""
Render the extent of the observation of each agent. All cells that appear in the agent
observation will be highlighted.
:param agent_handles: List of agent indices to adapt color and get correct observation
:param observation_dict: dictionary containing sets of cells of the agent observation
"""
rt = self.__class__
if len(prediction_dict) < 1:
warnings.warn(
"Predictor did not provide any predicted cells to render. \
Predictors builder needs to populate: env.dev_pred_dict")
else:
for agent in agent_handles:
color = self.gl.get_agent_color(agent)
for visited_cell in prediction_dict[agent]:
cell_coord = array(visited_cell[:2])
if type(self.gl) is PILSVG:
# TODO : Track highlighting (Adrian)
r = cell_coord[0]
c = cell_coord[1]
transitions = self.env.rail.grid[r, c]
self.gl.set_predicion_path_at(r, c, transitions, agent_rail_color=color)
else:
cell_coord_trans = np.matmul(cell_coord, rt.row_col_to_xy) + rt.x_y_half
self._draw_square(cell_coord_trans, 1 / (agent + 1.1), color, layer=1, opacity=100)
def render_rail(self, spacing=False, rail_color="gray", curves=True, arrows=False):
cell_size = 1 # TODO: remove cell_size
env = self.env
# Draw cells grid
grid_color = [0.95, 0.95, 0.95]
for row in range(env.height + 1):
self.gl.plot([0, (env.width + 1) * cell_size],
[-row * cell_size, -row * cell_size],
color=grid_color, linewidth=2)
for col in range(env.width + 1):
self.gl.plot([col * cell_size, col * cell_size],
[0, -(env.height + 1) * cell_size],
color=grid_color, linewidth=2)
# Draw each cell independently
for row in range(env.height):
for col in range(env.width):
# bounding box of the grid cell
x0 = cell_size * col # left
x1 = cell_size * (col + 1) # right
y0 = cell_size * -row # top
y1 = cell_size * -(row + 1) # bottom
# centres of cell edges
coords = [
((x0 + x1) / 2.0, y0), # N middle top
(x1, (y0 + y1) / 2.0), # E middle right
((x0 + x1) / 2.0, y1), # S middle bottom
(x0, (y0 + y1) / 2.0) # W middle left
]
# cell centre
center_xy = array([x0, y1]) + cell_size / 2
# cell transition values
cell = env.rail.get_full_transitions(row, col)
cell_valid = env.rail.cell_neighbours_valid((row, col), check_this_cell=True)
# Special Case 7, with a single bit; terminate at center
nbits = 0
tmp = cell
while tmp > 0:
nbits += (tmp & 1)
tmp = tmp >> 1
# as above - move the from coord to the centre
# it's a dead env.
is_dead_end = nbits == 1
if not cell_valid:
self.gl.scatter(*center_xy, color="r", s=30)
for orientation in range(4): # ori is where we're heading
from_ori = (orientation + 2) % 4 # 0123=NESW -> 2301=SWNE
from_xy = coords[from_ori]
moves = env.rail.get_transitions(row, col, orientation)
for to_ori in range(4):
to_xy = coords[to_ori]
rotation = (to_ori - from_ori) % 4
if (moves[to_ori]): # if we have this transition
self.draw_transition(
array([from_xy, to_xy]), center_xy,
rotation, dead_end=is_dead_end, curves=curves and not is_dead_end, spacing=spacing,
color=rail_color)
def render_env(self,
show=False, # whether to call matplotlib show() or equivalent after completion
show_agents=True, # whether to include agents
show_inactive_agents=False,
show_observations=True, # whether to include observations
show_predictions=False, # whether to include predictions
show_rowcols=False, # label the rows and columns
frames=False, # frame counter to show (intended since invocation)
episode=None, # int episode number to show
step=None, # int step number to show in image
selected_agent=None, # indicate which agent is "selected" in the editor
return_image=False): # indicate if image is returned for use in monitor:
""" Draw the environment using the GraphicsLayer this RenderTool was created with.
(Use show=False from a Jupyter notebook with %matplotlib inline)
"""
# if type(self.gl) is PILSVG:
if self.gl_str in ["PILSVG", "PGL"]:
return self.render_env_svg(show=show,
show_observations=show_observations,
show_predictions=show_predictions,
selected_agent=selected_agent,
show_agents=show_agents,
show_inactive_agents=show_inactive_agents,
show_rowcols=show_rowcols,
return_image=return_image
)
else:
return self.render_env_pil(show=show,
show_agents=show_agents,
show_inactive_agents=show_inactive_agents,
show_observations=show_observations,
show_predictions=show_predictions,
show_rowcols=show_rowcols,
frames=frames,
episode=episode,
step=step,
selected_agent=selected_agent,
return_image=return_image
)
def _draw_square(self, center, size, color, opacity=255, layer=0):
x0 = center[0] - size / 2
x1 = center[0] + size / 2
y0 = center[1] - size / 2
y1 = center[1] + size / 2
self.gl.plot([x0, x1, x1, x0, x0], [y0, y0, y1, y1, y0], color=color, layer=layer, opacity=opacity)
def get_image(self):
return self.gl.get_image()
def render_env_pil(self,
show=False, # whether to call matplotlib show() or equivalent after completion
# use false when calling from Jupyter. (and matplotlib no longer supported!)
show_agents=True, # whether to include agents
show_inactive_agents=False,
show_observations=True, # whether to include observations
show_predictions=False, # whether to include predictions
show_rowcols=False, # label the rows and columns
frames=False, # frame counter to show (intended since invocation)
episode=None, # int episode number to show
step=None, # int step number to show in image
selected_agent=None, # indicate which agent is "selected" in the editor
return_image=False # indicate if image is returned for use in monitor:
):
if type(self.gl) is PILGL:
self.gl.begin_frame()
env = self.env
self.render_rail()
# Draw each agent + its orientation + its target
if show_agents:
self.plot_agents(targets=True, selected_agent=selected_agent)
if show_observations:
self.render_observation(range(env.get_num_agents()), env.dev_obs_dict)
if show_predictions and len(env.dev_pred_dict) > 0:
self.render_prediction(range(env.get_num_agents()), env.dev_pred_dict)
# Draw some textual information like fps
text_y = [-0.3, -0.6, -0.9]
if frames:
self.gl.text(0.1, text_y[2], "Frame:{:}".format(self.frame_nr))
self.frame_nr += 1
if episode is not None:
self.gl.text(0.1, text_y[1], "Ep:{}".format(episode))
if step is not None:
self.gl.text(0.1, text_y[0], "Step:{}".format(step))
time_now = time.time()
self.gl.text(2, text_y[2], "elapsed:{:.2f}s".format(time_now - self.start_time))
self.times_list.append(time_now)
if len(self.times_list) > 20:
self.times_list.popleft()
if len(self.times_list) > 1:
rFps = (len(self.times_list) - 1) / (self.times_list[-1] - self.times_list[0])
self.gl.text(2, text_y[1], "fps:{:.2f}".format(rFps))
self.gl.prettify2(env.width, env.height, self.pix_per_cell)
# TODO: for MPL, we don't want to call clf (called by endframe)
# if not show:
if show and type(self.gl) is PILGL:
self.gl.show()
self.gl.pause(0.00001)
if return_image:
return self.get_image()
return
def render_env_svg(
self, show=False, show_observations=True, show_predictions=False, selected_agent=None,
show_agents=True, show_inactive_agents=False, show_rowcols=False, return_image=False
):
"""
Renders the environment with SVG support (nice image)
"""
env = self.env
self.gl.begin_frame()
if self.new_rail:
self.new_rail = False
self.gl.clear_rails()
# store the targets
targets = {}
selected = {}
for agent_idx, agent in enumerate(self.env.agents):
if agent is None:
continue
targets[tuple(agent.target)] = agent_idx
selected[tuple(agent.target)] = (agent_idx == selected_agent)
# Draw each cell independently
for r in range(env.height):
for c in range(env.width):
transitions = env.rail.grid[r, c]
if (r, c) in targets:
target = targets[(r, c)]
is_selected = selected[(r, c)]
else:
target = None
is_selected = False
self.gl.set_rail_at(r, c, transitions, target=target, is_selected=is_selected,
rail_grid=env.rail.grid, num_agents=env.get_num_agents(),
show_debug=self.show_debug)
self.gl.build_background_map(targets)
if show_rowcols:
# label rows, cols
for iRow in range(env.height):
self.gl.text_rowcol((iRow, 0), str(iRow), layer=self.gl.RAIL_LAYER)
for iCol in range(env.width):
self.gl.text_rowcol((0, iCol), str(iCol), layer=self.gl.RAIL_LAYER)
if show_agents:
for agent_idx, agent in enumerate(self.env.agents):
if agent is None:
continue
# Show an agent even if it hasn't already started
if agent.position is None:
if show_inactive_agents:
# print("agent ", agent_idx, agent.position, agent.old_position, agent.initial_position)
self.gl.set_agent_at(agent_idx, *(agent.initial_position),
agent.initial_direction, agent.initial_direction,
is_selected=(selected_agent == agent_idx),
rail_grid=env.rail.grid,
show_debug=self.show_debug, clear_debug_text=self.clear_debug_text,
malfunction=False)
continue
is_malfunction = agent.malfunction_data["malfunction"] > 0
if self.agent_render_variant == AgentRenderVariant.BOX_ONLY:
self.gl.set_cell_occupied(agent_idx, *(agent.position))
elif self.agent_render_variant == AgentRenderVariant.ONE_STEP_BEHIND or \
self.agent_render_variant == AgentRenderVariant.ONE_STEP_BEHIND_AND_BOX: # noqa: E125
# Most common case - the agent has been running for >1 steps
if agent.old_position is not None:
position = agent.old_position
direction = agent.direction
old_direction = agent.old_direction
# the agent's first step - it doesn't have an old position yet
elif agent.position is not None:
position = agent.position
direction = agent.direction
old_direction = agent.direction
# When the editor has just added an agent
elif agent.initial_position is not None:
position = agent.initial_position
direction = agent.initial_direction
old_direction = agent.initial_direction
# set_agent_at uses the agent index for the color
if self.agent_render_variant == AgentRenderVariant.ONE_STEP_BEHIND_AND_BOX:
self.gl.set_cell_occupied(agent_idx, *(agent.position))
self.gl.set_agent_at(agent_idx, *position, old_direction, direction,
selected_agent == agent_idx, rail_grid=env.rail.grid,
show_debug=self.show_debug, clear_debug_text=self.clear_debug_text,
malfunction=is_malfunction)
else:
position = agent.position
direction = agent.direction
for possible_direction in range(4):
# Is a transition along movement `desired_movement_from_new_cell` to the current cell possible?
isValid = env.rail.get_transition((*agent.position, agent.direction), possible_direction)
if isValid:
direction = possible_direction
# set_agent_at uses the agent index for the color
self.gl.set_agent_at(agent_idx, *position, agent.direction, direction,
selected_agent == agent_idx, rail_grid=env.rail.grid,
show_debug=self.show_debug, clear_debug_text=self.clear_debug_text,
malfunction=is_malfunction)
# set_agent_at uses the agent index for the color
if self.agent_render_variant == AgentRenderVariant.AGENT_SHOWS_OPTIONS_AND_BOX:
self.gl.set_cell_occupied(agent_idx, *(agent.position))
if show_inactive_agents:
show_this_agent=True
else:
show_this_agent = agent.status == RailAgentStatus.ACTIVE
if show_this_agent:
self.gl.set_agent_at(agent_idx, *position, agent.direction, direction,
selected_agent == agent_idx,
rail_grid=env.rail.grid, malfunction=is_malfunction)
if show_observations:
self.render_observation(range(env.get_num_agents()), env.dev_obs_dict)
if show_predictions:
self.render_prediction(range(env.get_num_agents()), env.dev_pred_dict)
if show:
self.gl.show()
for i in range(3):
self.gl.process_events()
self.frame_nr += 1
if return_image:
return self.get_image()
return
def close_window(self):
self.gl.close_window()
| [
"numpy.sum",
"flatland.utils.graphics_pgl.PGLGL",
"flatland.utils.graphics_pil.PILGL",
"recordtype.recordtype",
"time.time",
"flatland.utils.graphics_pil.PILSVG",
"numpy.where",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.matmul",
"numpy.cos",
"warnings.warn",
"numpy.sin",
"n... | [((4619, 4672), 'recordtype.recordtype', 'recordtype', (['"""visit"""', "['rc', 'iDir', 'iDepth', 'prev']"], {}), "('visit', ['rc', 'iDir', 'iDepth', 'prev'])\n", (4629, 4672), False, 'from recordtype import recordtype\n'), ((4758, 4802), 'numpy.array', 'np.array', (['[[-1, 0], [0, 1], [1, 0], [0, -1]]'], {}), '([[-1, 0], [0, 1], [1, 0], [0, -1]])\n', (4766, 4802), True, 'import numpy as np\n'), ((4895, 4941), 'numpy.array', 'array', (['[half_pix_per_cell, -half_pix_per_cell]'], {}), '([half_pix_per_cell, -half_pix_per_cell])\n', (4900, 4941), False, 'from numpy import array\n'), ((4962, 5008), 'numpy.array', 'array', (['[[0, -pix_per_cell], [pix_per_cell, 0]]'], {}), '([[0, -pix_per_cell], [pix_per_cell, 0]])\n', (4967, 5008), False, 'from numpy import array\n'), ((5128, 5156), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / 2)', '(5)'], {}), '(0, np.pi / 2, 5)\n', (5139, 5156), True, 'import numpy as np\n'), ((1091, 1102), 'time.time', 'time.time', ([], {}), '()\n', (1100, 1102), False, 'import time\n'), ((1129, 1136), 'collections.deque', 'deque', ([], {}), '()\n', (1134, 1136), False, 'from collections import deque\n'), ((5072, 5115), 'numpy.array', 'array', (['[[[pix_per_cell]], [[pix_per_cell]]]'], {}), '([[[pix_per_cell]], [[pix_per_cell]]])\n', (5077, 5115), False, 'from numpy import array\n'), ((5533, 5544), 'time.time', 'time.time', ([], {}), '()\n', (5542, 5544), False, 'import time\n'), ((5571, 5578), 'collections.deque', 'deque', ([], {}), '()\n', (5576, 5578), False, 'from collections import deque\n'), ((6535, 6546), 'time.time', 'time.time', ([], {}), '()\n', (6544, 6546), False, 'import time\n'), ((6573, 6580), 'collections.deque', 'deque', ([], {}), '()\n', (6578, 6580), False, 'from collections import deque\n'), ((9761, 9807), 'numpy.matmul', 'np.matmul', (['direction_row_col', 'rt.row_col_to_xy'], {}), '(direction_row_col, rt.row_col_to_xy)\n', (9770, 9807), True, 'import numpy as np\n'), ((24674, 24685), 'time.time', 'time.time', ([], {}), '()\n', (24683, 24685), False, 'import time\n'), ((5710, 5807), 'flatland.utils.graphics_pil.PILGL', 'PILGL', (['env.width', 'env.height', 'jupyter'], {'screen_width': 'screen_width', 'screen_height': 'screen_height'}), '(env.width, env.height, jupyter, screen_width=screen_width,\n screen_height=screen_height)\n', (5715, 5807), False, 'from flatland.utils.graphics_pil import PILGL, PILSVG\n'), ((8763, 8784), 'numpy.where', 'np.where', (['transitions'], {}), '(transitions)\n', (8771, 8784), True, 'import numpy as np\n'), ((9850, 9919), 'numpy.matmul', 'np.matmul', (['(position_row_col - direction_row_col / 2)', 'rt.row_col_to_xy'], {}), '(position_row_col - direction_row_col / 2, rt.row_col_to_xy)\n', (9859, 9919), True, 'import numpy as np\n'), ((10152, 10192), 'numpy.array', 'array', (['[xyPos, xyPos + direction_xy / 2]'], {}), '([xyPos, xyPos + direction_xy / 2])\n', (10157, 10192), False, 'from numpy import array\n'), ((10430, 10443), 'numpy.array', 'array', (['target'], {}), '(target)\n', (10435, 10443), False, 'from numpy import array\n'), ((10963, 11008), 'numpy.matmul', 'np.matmul', (['position_row_col', 'rt.row_col_to_xy'], {}), '(position_row_col, rt.row_col_to_xy)\n', (10972, 11008), True, 'import numpy as np\n'), ((11061, 11114), 'numpy.matmul', 'np.matmul', (['transition_row_col', '(rt.row_col_to_xy / 2.4)'], {}), '(transition_row_col, rt.row_col_to_xy / 2.4)\n', (11070, 11114), True, 'import numpy as np\n'), ((14998, 15153), 'warnings.warn', 'warnings.warn', (['"""Predictor did not provide any predicted cells to render. Observation builder needs to populate: env.dev_obs_dict"""'], {}), "(\n 'Predictor did not provide any predicted cells to render. Observation builder needs to populate: env.dev_obs_dict'\n )\n", (15011, 15153), False, 'import warnings\n'), ((16080, 16235), 'warnings.warn', 'warnings.warn', (['"""Predictor did not provide any predicted cells to render. Predictors builder needs to populate: env.dev_pred_dict"""'], {}), "(\n 'Predictor did not provide any predicted cells to render. Predictors builder needs to populate: env.dev_pred_dict'\n )\n", (16093, 16235), False, 'import warnings\n'), ((5038, 5051), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5047, 5051), True, 'import numpy as np\n'), ((5174, 5187), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5180, 5187), True, 'import numpy as np\n'), ((5189, 5202), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5195, 5202), True, 'import numpy as np\n'), ((5855, 5953), 'flatland.utils.graphics_pil.PILSVG', 'PILSVG', (['env.width', 'env.height', 'jupyter'], {'screen_width': 'screen_width', 'screen_height': 'screen_height'}), '(env.width, env.height, jupyter, screen_width=screen_width,\n screen_height=screen_height)\n', (5861, 5953), False, 'from flatland.utils.graphics_pil import PILGL, PILSVG\n'), ((6118, 6215), 'flatland.utils.graphics_pgl.PGLGL', 'PGLGL', (['env.width', 'env.height', 'jupyter'], {'screen_width': 'screen_width', 'screen_height': 'screen_height'}), '(env.width, env.height, jupyter, screen_width=screen_width,\n screen_height=screen_height)\n', (6123, 6215), False, 'from flatland.utils.graphics_pgl import PGLGL\n'), ((9074, 9095), 'numpy.where', 'np.where', (['transitions'], {}), '(transitions)\n', (9082, 9095), True, 'import numpy as np\n'), ((10468, 10511), 'numpy.matmul', 'np.matmul', (['target_row_col', 'rt.row_col_to_xy'], {}), '(target_row_col, rt.row_col_to_xy)\n', (10477, 10511), True, 'import numpy as np\n'), ((13429, 13450), 'numpy.mean', 'np.mean', (['line'], {'axis': '(0)'}), '(line, axis=0)\n', (13436, 13450), True, 'import numpy as np\n'), ((5054, 5067), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5063, 5067), True, 'import numpy as np\n'), ((12662, 12717), 'numpy.array', 'array', (['[line[1] + [dy, dx], center, line[1] - [dy, dx]]'], {}), '([line[1] + [dy, dx], center, line[1] - [dy, dx]])\n', (12667, 12717), False, 'from numpy import array\n'), ((14215, 14306), 'numpy.array', 'array', (['[middle_xy + [-dx - dy, +dx - dy], middle_xy, middle_xy + [-dx + dy, -dx - dy]]'], {}), '([middle_xy + [-dx - dy, +dx - dy], middle_xy, middle_xy + [-dx + dy, \n -dx - dy]])\n', (14220, 14306), False, 'from numpy import array\n'), ((15366, 15389), 'numpy.array', 'array', (['visited_cell[:2]'], {}), '(visited_cell[:2])\n', (15371, 15389), False, 'from numpy import array\n'), ((16447, 16470), 'numpy.array', 'array', (['visited_cell[:2]'], {}), '(visited_cell[:2])\n', (16452, 16470), False, 'from numpy import array\n'), ((18472, 18487), 'numpy.array', 'array', (['[x0, y1]'], {}), '([x0, y1])\n', (18477, 18487), False, 'from numpy import array\n'), ((12341, 12362), 'numpy.diff', 'np.diff', (['line'], {'axis': '(0)'}), '(line, axis=0)\n', (12348, 12362), True, 'import numpy as np\n'), ((13065, 13109), 'numpy.sum', 'np.sum', (['(line_xy * [[1 / 4], [3 / 4]])'], {'axis': '(0)'}), '(line_xy * [[1 / 4], [3 / 4]], axis=0)\n', (13071, 13109), True, 'import numpy as np\n'), ((13146, 13237), 'numpy.array', 'array', (['[middle_xy + [-dx - dy, +dx - dy], middle_xy, middle_xy + [-dx + dy, -dx - dy]]'], {}), '([middle_xy + [-dx - dy, +dx - dy], middle_xy, middle_xy + [-dx + dy, \n -dx - dy]])\n', (13151, 13237), False, 'from numpy import array\n'), ((15429, 15468), 'numpy.matmul', 'np.matmul', (['cell_coord', 'rt.row_col_to_xy'], {}), '(cell_coord, rt.row_col_to_xy)\n', (15438, 15468), True, 'import numpy as np\n'), ((14047, 14068), 'numpy.diff', 'np.diff', (['line'], {'axis': '(0)'}), '(line, axis=0)\n', (14054, 14068), True, 'import numpy as np\n'), ((16893, 16932), 'numpy.matmul', 'np.matmul', (['cell_coord', 'rt.row_col_to_xy'], {}), '(cell_coord, rt.row_col_to_xy)\n', (16902, 16932), True, 'import numpy as np\n'), ((19764, 19787), 'numpy.array', 'array', (['[from_xy, to_xy]'], {}), '([from_xy, to_xy])\n', (19769, 19787), False, 'from numpy import array\n')] |
"""
Figure 3
Plot source patterns
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne import EvokedArray
from config import path_data
from sklearn.decomposition import PCA
from jr.plot.base import alpha_cmap, LinearSegmentedColormap
from surfer import Brain
from webcolors import hex_to_rgb
# Define recordings sfreq
sfreq = 120
# Define colors
colors = ['#1f77b4', '#d62728', '#ff7f0e']
# Define pattern to plot (here left_spatial frequency)
mean_morph_patterns = mne.read_source_estimate(path_data +
'morph_source_patterns/Target_left_sfreq_patterns-rh.stc')
n_components = 2 # keep the first 2 components
pca = PCA(n_components, random_state=0).fit(mean_morph_patterns.data.T)
n_components = len(pca.components_)
time_course = mne.EvokedArray(
data=pca.transform(mean_morph_patterns.data.T).T,
info=mne.create_info(len(pca.components_), sfreq=sfreq),
tmin=-0.2)
colors = np.array(hex_to_rgb('#ff7f0e'))/255.
colors = np.concatenate((colors, [1]), axis=0)
colors_ = (colors, colors)
# Show brain
brain = Brain('fsaverage', 'both', 'inflated_pre', background='w',
cortex='low_contrast')
# Show patterns
for side, slic in (('lh', slice(0, 10242)), ('rh', slice(10242, None))):
for roi, color in zip(pca.components_, colors_):
roi = np.abs(roi[slic])
cmap = alpha_cmap(LinearSegmentedColormap.from_list(
'RdBu', [color, color]), diverge=False)
brain.add_data(roi, hemi=side, colormap=cmap, smoothing_steps=5,
vertices=mean_morph_patterns.vertices[side == 'rh'],
alpha=0.5)
brain.show_view(view='caudal')
# brain.show_view(view='lateral')
| [
"surfer.Brain",
"numpy.abs",
"webcolors.hex_to_rgb",
"mne.read_source_estimate",
"sklearn.decomposition.PCA",
"jr.plot.base.LinearSegmentedColormap.from_list",
"numpy.concatenate"
] | [((545, 644), 'mne.read_source_estimate', 'mne.read_source_estimate', (["(path_data + 'morph_source_patterns/Target_left_sfreq_patterns-rh.stc')"], {}), "(path_data +\n 'morph_source_patterns/Target_left_sfreq_patterns-rh.stc')\n", (569, 644), False, 'import mne\n'), ((1019, 1056), 'numpy.concatenate', 'np.concatenate', (['(colors, [1])'], {'axis': '(0)'}), '((colors, [1]), axis=0)\n', (1033, 1056), True, 'import numpy as np\n'), ((1105, 1191), 'surfer.Brain', 'Brain', (['"""fsaverage"""', '"""both"""', '"""inflated_pre"""'], {'background': '"""w"""', 'cortex': '"""low_contrast"""'}), "('fsaverage', 'both', 'inflated_pre', background='w', cortex=\n 'low_contrast')\n", (1110, 1191), False, 'from surfer import Brain\n'), ((700, 733), 'sklearn.decomposition.PCA', 'PCA', (['n_components'], {'random_state': '(0)'}), '(n_components, random_state=0)\n', (703, 733), False, 'from sklearn.decomposition import PCA\n'), ((982, 1003), 'webcolors.hex_to_rgb', 'hex_to_rgb', (['"""#ff7f0e"""'], {}), "('#ff7f0e')\n", (992, 1003), False, 'from webcolors import hex_to_rgb\n'), ((1357, 1374), 'numpy.abs', 'np.abs', (['roi[slic]'], {}), '(roi[slic])\n', (1363, 1374), True, 'import numpy as np\n'), ((1401, 1458), 'jr.plot.base.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""RdBu"""', '[color, color]'], {}), "('RdBu', [color, color])\n", (1434, 1458), False, 'from jr.plot.base import alpha_cmap, LinearSegmentedColormap\n')] |
import numpy as np
import cv2
def translate(image, x, y):
M = np.float32([[1,0,x], [0,1,y]])
shiftedImage = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
return shiftedImage
def rotate(image, angle, center=None, scale=1.0):
(h, w) = image.shape[:2]
if center is None:
center = (w//2, h//2)
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated= cv2.warpAffine(image, M, (w,h))
return rotated
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
if width is None and height is None:
return image
if width==None:
r= height/float(image.shape[0])
dim = (int(width*r), height)
if height==None:
r= width/float(image.shape[1])
dim = (width, int(height*r))
resized=cv2.resize(image, dim, inter)
return resized | [
"cv2.warpAffine",
"numpy.float32",
"cv2.getRotationMatrix2D",
"cv2.resize"
] | [((67, 101), 'numpy.float32', 'np.float32', (['[[1, 0, x], [0, 1, y]]'], {}), '([[1, 0, x], [0, 1, y]])\n', (77, 101), True, 'import numpy as np\n'), ((117, 175), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(image.shape[1], image.shape[0])'], {}), '(image, M, (image.shape[1], image.shape[0]))\n', (131, 175), False, 'import cv2\n'), ((351, 396), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', 'angle', 'scale'], {}), '(center, angle, scale)\n', (374, 396), False, 'import cv2\n'), ((410, 442), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(w, h)'], {}), '(image, M, (w, h))\n', (424, 442), False, 'import cv2\n'), ((824, 853), 'cv2.resize', 'cv2.resize', (['image', 'dim', 'inter'], {}), '(image, dim, inter)\n', (834, 853), False, 'import cv2\n')] |
# code-checked
# server-checked
import os
import torch
import torch.nn.parallel
import torch.optim
import torch.utils.data
from torch.autograd import Variable
from model import DepthCompletionNet
from datasets import DatasetVirtualKITTIValSeq
from criterion import MaskedL2Gauss, RMSE
import numpy as np
import cv2
model_id = "ensembling_virtual"
model_is = [0, 1, 2, 3, 4, 5, 6, 7]
print (model_is)
snapshot_dir = "/workspace/evaluating_bdl/depthCompletion/training_logs/%s_%s_eval_seq_virtual" % (model_id, str(model_is))
virtualkitti_path = "/root/data/virtualkitti"
batch_size = 4
models = []
for i in model_is:
restore_from = "/workspace/evaluating_bdl/depthCompletion/trained_models/%s_%d/checkpoint_40000.pth" % (model_id, i)
model = DepthCompletionNet().cuda()
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(restore_from))
model.eval()
models.append(model)
M = float(len(models))
print (M)
criterion = MaskedL2Gauss().cuda()
rmse_criterion = RMSE().cuda()
val_sequences = ["0002"]
variations = ["clone", "30-deg-left", "30-deg-right"]
#variations = ["15-deg-left", "15-deg-right", "30-deg-left", "30-deg-right", "clone", "fog", "morning", "overcast", "rain", "sunset"]
for step, seq in enumerate(val_sequences):
print ("##################################################################")
print ("seq: %d/%d, %s" % (step+1, len(val_sequences), seq))
for variation_step, variation in enumerate(variations):
print ("#############################")
print ("variation: %d/%d, %s" % (variation_step+1, len(variations), variation))
snapshot_dir_seq = snapshot_dir + "/" + seq + "_" + variation
if not os.path.exists(snapshot_dir):
os.makedirs(snapshot_dir)
if not os.path.exists(snapshot_dir_seq):
os.makedirs(snapshot_dir_seq)
eval_dataset = DatasetVirtualKITTIValSeq(virtualkitti_path=virtualkitti_path, seq=seq, variation=variation)
eval_loader = torch.utils.data.DataLoader(dataset=eval_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
batch_losses = []
batch_rmses = []
for i_iter, batch in enumerate(eval_loader):
with torch.no_grad(): # (corresponds to setting volatile=True in all variables, this is done during inference to reduce memory consumption)
imgs, sparses, targets, file_ids, imgs_color = batch
imgs = Variable(imgs.cuda()) # (shape: (batch_size, h, w))
sparses = Variable(sparses.cuda()) # (shape: (batch_size, h, w))
targets = Variable(targets.cuda()) # (shape: (batch_size, h, w))
means = []
sigma_2_aleas = []
for model in models:
mean, log_var = model(imgs, sparses) # (both of shape: (batch_size, 1, h, w))
sigma_2_alea = torch.exp(log_var) # (sigma_alea^2) # (shape: (batch_size, 1, h, w))
means.append(mean)
sigma_2_aleas.append(sigma_2_alea)
mean = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w))
for value in means:
mean = mean + value/M
sigma_2_alea = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w)) (sigma_alea^2)
for value in sigma_2_aleas:
sigma_2_alea = sigma_2_alea + value/M
sigma_2_epi = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w)) (sigma_epi^2)
for value in means:
sigma_2_epi = sigma_2_epi + torch.pow(mean - value, 2)/M
sigma_2_pred = sigma_2_alea + sigma_2_epi # (sigma_pred^2)
loss = criterion(mean, torch.log(sigma_2_pred), targets)
rmse = rmse_criterion(mean, targets)
print('iter = {}/{} completed, loss = {}, rmse = {}'.format(i_iter, len(eval_dataset)/batch_size, loss.data.cpu().numpy(), rmse.data.cpu().numpy()))
batch_losses.append(loss.data.cpu().numpy())
batch_rmses.append(rmse.data.cpu().numpy())
########################################################################
# visualization:
########################################################################
mean = mean.data.cpu().numpy() # (shape: (batch_size, 1, h, w))
sigma_2_alea = sigma_2_alea.data.cpu().numpy() # (shape: (batch_size, 1, h, w))
sigma_2_epi = sigma_2_epi.data.cpu().numpy() # (shape: (batch_size, 1, h, w))
sigma_2_pred = sigma_2_pred.data.cpu().numpy() # (shape: (batch_size, 1, h, w))
targets = targets.data.cpu().numpy() # (shape: (batch_size, h, w))
imgs = imgs.data.cpu().numpy() # (shape: (batch_size, h, w))
sparses = sparses.data.cpu().numpy() # (shape: (batch_size, h, w))
imgs_color = imgs_color.numpy()
for i in range(mean.shape[0]):
file_id = file_ids[i] # (file_id == "0002/clone/00007.png" (e.g.))
file_id = file_id.split("/")[-1] # (file_id == "00007.png")
file_id = file_id.split(".png")[0] # (file_id == "00007")
pred = mean[i] # (shape: (1, h, w))
pred = pred.squeeze(0) # (shape: (h, w))
sigma_2_alea_ = sigma_2_alea[i] # (shape: (1, h, w))
sigma_2_alea_ = sigma_2_alea_.squeeze(0) # (shape: (h, w))
sigma_alea = np.sqrt(sigma_2_alea_)
sigma_2_epi_ = sigma_2_epi[i] # (shape: (1, h, w))
sigma_2_epi_ = sigma_2_epi_.squeeze(0) # (shape: (h, w))
sigma_epi = np.sqrt(sigma_2_epi_)
sigma_2_pred_ = sigma_2_pred[i] # (shape: (1, h, w))
sigma_2_pred_ = sigma_2_pred_.squeeze(0) # (shape: (h, w))
sigma_pred = np.sqrt(sigma_2_pred_)
img = imgs[i] # (shape: (h, w))
img = img.astype(np.uint8)
img_color = imgs_color[i]
img_color = img_color.astype(np.uint8)
max_distance = 65.0
target = targets[i] # (shape: (h, w))
target[target > max_distance] = max_distance
target = (target/max_distance)*255
target = target.astype(np.uint8)
sparse = sparses[i] # (shape: (h, w))
sparse[sparse > max_distance] = max_distance
sparse = (sparse/max_distance)*255
sparse = sparse.astype(np.uint8)
pred[pred > max_distance] = max_distance
pred = (pred/max_distance)*255
pred = pred.astype(np.uint8)
sparse_color = cv2.applyColorMap(sparse, cv2.COLORMAP_SUMMER)
sparse_color[sparse == 0] = 0
target_color = cv2.applyColorMap(target, cv2.COLORMAP_SUMMER)
target_color[target == 0] = 0
pred_color = cv2.applyColorMap(pred, cv2.COLORMAP_SUMMER)
max_interval_length = 75.0 # (corresponds to the maximum length of a 95% conf interval)
max_sigma = max_interval_length/(2.0*1.96)
sigma_alea[sigma_alea > max_sigma] = max_sigma
sigma_alea = (sigma_alea/max_sigma)*255
sigma_alea = sigma_alea.astype(np.uint8)
sigma_alea_color = cv2.applyColorMap(sigma_alea, cv2.COLORMAP_HOT)
sigma_epi[sigma_epi > max_sigma] = max_sigma
sigma_epi = (sigma_epi/max_sigma)*255
sigma_epi = sigma_epi.astype(np.uint8)
sigma_epi_color = cv2.applyColorMap(sigma_epi, cv2.COLORMAP_HOT)
sigma_pred[sigma_pred > max_sigma] = max_sigma
sigma_pred = (sigma_pred/max_sigma)*255
sigma_pred = sigma_pred.astype(np.uint8)
sigma_pred_color = cv2.applyColorMap(sigma_pred, cv2.COLORMAP_HOT)
cv2.imwrite(snapshot_dir_seq + "/" + file_id + "_img.png", img)
cv2.imwrite(snapshot_dir_seq + "/" + file_id + "_img_color.png", img_color)
cv2.imwrite(snapshot_dir_seq + "/" + file_id + "_sparse_color.png", sparse_color)
cv2.imwrite(snapshot_dir_seq + "/" + file_id + "_target_color.png", target_color)
cv2.imwrite(snapshot_dir_seq + "/" + file_id + "_pred_color.png", pred_color)
cv2.imwrite(snapshot_dir_seq + "/" + file_id + "_sigma_alea_color.png", sigma_alea_color)
cv2.imwrite(snapshot_dir_seq + "/" + file_id + "_sigma_epi_color.png", sigma_epi_color)
cv2.imwrite(snapshot_dir_seq + "/" + file_id + "_sigma_pred_color.png", sigma_pred_color)
val_loss = np.mean(batch_losses)
print ("val loss: %g" % val_loss)
val_rmse = np.mean(batch_rmses)
print ("val rmse: %g" % val_rmse)
img_h = 352
img_w = 1216
colorbar_w = 30
colorbar_row = np.linspace(start=255.0, stop=0.0, num=img_h) # (shape: (img_h, ))
colorbar = np.zeros((colorbar_w, img_h)) # (shape: (colorbar_w, img_h)
colorbar = colorbar + colorbar_row
colorbar = colorbar.T # (shape: (img_h, colorbar_w)
colorbar = colorbar.astype(np.uint8)
colorbar_SUMMER = cv2.applyColorMap(colorbar, cv2.COLORMAP_SUMMER) # (shape: (img_h, colorbar_w, 3)
colorbar_HOT = cv2.applyColorMap(colorbar, cv2.COLORMAP_HOT) # (shape: (img_h, colorbar_w, 3)
ids = eval_dataset.ids # (contains e.g. "00007.png" and so on)
ids_sorted = sorted(ids)
out = cv2.VideoWriter("%s/%s_%s.avi" % (snapshot_dir_seq, seq, variation), cv2.VideoWriter_fourcc(*"MJPG"), 12, (2*(img_w + colorbar_w), 4*img_h))
for step, id in enumerate(ids_sorted):
if step % 10 == 0:
print ("step: %d/%d" % (step+1, len(ids)))
# (id == "00007.png" e.g.)
id = id.split(".png")[0] # (id == "00007")
img_color = cv2.imread(snapshot_dir_seq + "/" + id + "_img_color.png", -1) # (shape: (img_h, img_w, 3))
sparse_color = cv2.imread(snapshot_dir_seq + "/" + id + "_sparse_color.png", -1) # (shape: (img_h, img_w, 3))
target_color = cv2.imread(snapshot_dir_seq + "/" + id + "_target_color.png", -1) # (shape: (img_h, img_w, 3))
pred_color = cv2.imread(snapshot_dir_seq + "/" + id + "_pred_color.png", -1) # (shape: (img_h, img_w, 3))
sigma_alea_color = cv2.imread(snapshot_dir_seq + "/" + id + "_sigma_alea_color.png", -1) # (shape: (img_h, img_w, 3))
sigma_epi_color = cv2.imread(snapshot_dir_seq + "/" + id + "_sigma_epi_color.png", -1) # (shape: (img_h, img_w, 3))
sigma_pred_color = cv2.imread(snapshot_dir_seq + "/" + id + "_sigma_pred_color.png", -1) # (shape: (img_h, img_w, 3))
combined_img = np.zeros((4*img_h, 2*(img_w + colorbar_w), 3), dtype=np.uint8)
#
combined_img[0:img_h, 0:img_w] = img_color
combined_img[0:img_h, (img_w + colorbar_w):(2*img_w + colorbar_w)] = sparse_color
combined_img[0:img_h, (2*img_w + colorbar_w):(2*img_w + 2*colorbar_w)] = colorbar_SUMMER
#
combined_img[img_h:(2*img_h), 0:img_w] = target_color
combined_img[img_h:(2*img_h), img_w:(img_w + colorbar_w)] = colorbar_SUMMER
combined_img[img_h:(2*img_h), (img_w + colorbar_w):(2*img_w + colorbar_w)] = pred_color
combined_img[img_h:(2*img_h), (2*img_w + colorbar_w):(2*img_w + 2*colorbar_w)] = colorbar_SUMMER
#
combined_img[(2*img_h):(3*img_h), int(img_w+colorbar_w - (img_w+colorbar_w)/2):int(img_w+colorbar_w - (img_w+colorbar_w)/2 + img_w)] = sigma_pred_color
combined_img[(2*img_h):(3*img_h), int(img_w+colorbar_w - (img_w+colorbar_w)/2 + img_w):int(img_w+colorbar_w - (img_w+colorbar_w)/2 + img_w + colorbar_w)] = colorbar_HOT
#
combined_img[(3*img_h):(4*img_h), 0:img_w] = sigma_alea_color
combined_img[(3*img_h):(4*img_h), img_w:(img_w + colorbar_w)] = colorbar_HOT
combined_img[(3*img_h):(4*img_h), (img_w + colorbar_w):(2*img_w + colorbar_w)] = sigma_epi_color
combined_img[(3*img_h):(4*img_h), (2*img_w + colorbar_w):(2*img_w + 2*colorbar_w)] = colorbar_HOT
out.write(combined_img)
out.release()
| [
"cv2.VideoWriter_fourcc",
"numpy.mean",
"torch.no_grad",
"torch.utils.data.DataLoader",
"cv2.imwrite",
"torch.load",
"os.path.exists",
"torch.exp",
"numpy.linspace",
"model.DepthCompletionNet",
"torch.log",
"datasets.DatasetVirtualKITTIValSeq",
"torch.pow",
"cv2.applyColorMap",
"os.maked... | [((832, 860), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (853, 860), False, 'import torch\n'), ((888, 912), 'torch.load', 'torch.load', (['restore_from'], {}), '(restore_from)\n', (898, 912), False, 'import torch\n'), ((1010, 1025), 'criterion.MaskedL2Gauss', 'MaskedL2Gauss', ([], {}), '()\n', (1023, 1025), False, 'from criterion import MaskedL2Gauss, RMSE\n'), ((1051, 1057), 'criterion.RMSE', 'RMSE', ([], {}), '()\n', (1055, 1057), False, 'from criterion import MaskedL2Gauss, RMSE\n'), ((1955, 2051), 'datasets.DatasetVirtualKITTIValSeq', 'DatasetVirtualKITTIValSeq', ([], {'virtualkitti_path': 'virtualkitti_path', 'seq': 'seq', 'variation': 'variation'}), '(virtualkitti_path=virtualkitti_path, seq=seq,\n variation=variation)\n', (1980, 2051), False, 'from datasets import DatasetVirtualKITTIValSeq\n'), ((2071, 2177), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'eval_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(dataset=eval_dataset, batch_size=batch_size,\n shuffle=False, num_workers=4)\n', (2098, 2177), False, 'import torch\n'), ((9318, 9339), 'numpy.mean', 'np.mean', (['batch_losses'], {}), '(batch_losses)\n', (9325, 9339), True, 'import numpy as np\n'), ((9403, 9423), 'numpy.mean', 'np.mean', (['batch_rmses'], {}), '(batch_rmses)\n', (9410, 9423), True, 'import numpy as np\n'), ((9563, 9608), 'numpy.linspace', 'np.linspace', ([], {'start': '(255.0)', 'stop': '(0.0)', 'num': 'img_h'}), '(start=255.0, stop=0.0, num=img_h)\n', (9574, 9608), True, 'import numpy as np\n'), ((9650, 9679), 'numpy.zeros', 'np.zeros', (['(colorbar_w, img_h)'], {}), '((colorbar_w, img_h))\n', (9658, 9679), True, 'import numpy as np\n'), ((9888, 9936), 'cv2.applyColorMap', 'cv2.applyColorMap', (['colorbar', 'cv2.COLORMAP_SUMMER'], {}), '(colorbar, cv2.COLORMAP_SUMMER)\n', (9905, 9936), False, 'import cv2\n'), ((9994, 10039), 'cv2.applyColorMap', 'cv2.applyColorMap', (['colorbar', 'cv2.COLORMAP_HOT'], {}), '(colorbar, cv2.COLORMAP_HOT)\n', (10011, 10039), False, 'import cv2\n'), ((791, 811), 'model.DepthCompletionNet', 'DepthCompletionNet', ([], {}), '()\n', (809, 811), False, 'from model import DepthCompletionNet\n'), ((1767, 1795), 'os.path.exists', 'os.path.exists', (['snapshot_dir'], {}), '(snapshot_dir)\n', (1781, 1795), False, 'import os\n'), ((1810, 1835), 'os.makedirs', 'os.makedirs', (['snapshot_dir'], {}), '(snapshot_dir)\n', (1821, 1835), False, 'import os\n'), ((1852, 1884), 'os.path.exists', 'os.path.exists', (['snapshot_dir_seq'], {}), '(snapshot_dir_seq)\n', (1866, 1884), False, 'import os\n'), ((1899, 1928), 'os.makedirs', 'os.makedirs', (['snapshot_dir_seq'], {}), '(snapshot_dir_seq)\n', (1910, 1928), False, 'import os\n'), ((10267, 10298), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (10289, 10298), False, 'import cv2\n'), ((10605, 10667), 'cv2.imread', 'cv2.imread', (["(snapshot_dir_seq + '/' + id + '_img_color.png')", '(-1)'], {}), "(snapshot_dir_seq + '/' + id + '_img_color.png', -1)\n", (10615, 10667), False, 'import cv2\n'), ((10725, 10790), 'cv2.imread', 'cv2.imread', (["(snapshot_dir_seq + '/' + id + '_sparse_color.png')", '(-1)'], {}), "(snapshot_dir_seq + '/' + id + '_sparse_color.png', -1)\n", (10735, 10790), False, 'import cv2\n'), ((10848, 10913), 'cv2.imread', 'cv2.imread', (["(snapshot_dir_seq + '/' + id + '_target_color.png')", '(-1)'], {}), "(snapshot_dir_seq + '/' + id + '_target_color.png', -1)\n", (10858, 10913), False, 'import cv2\n'), ((10969, 11032), 'cv2.imread', 'cv2.imread', (["(snapshot_dir_seq + '/' + id + '_pred_color.png')", '(-1)'], {}), "(snapshot_dir_seq + '/' + id + '_pred_color.png', -1)\n", (10979, 11032), False, 'import cv2\n'), ((11094, 11163), 'cv2.imread', 'cv2.imread', (["(snapshot_dir_seq + '/' + id + '_sigma_alea_color.png')", '(-1)'], {}), "(snapshot_dir_seq + '/' + id + '_sigma_alea_color.png', -1)\n", (11104, 11163), False, 'import cv2\n'), ((11224, 11292), 'cv2.imread', 'cv2.imread', (["(snapshot_dir_seq + '/' + id + '_sigma_epi_color.png')", '(-1)'], {}), "(snapshot_dir_seq + '/' + id + '_sigma_epi_color.png', -1)\n", (11234, 11292), False, 'import cv2\n'), ((11354, 11423), 'cv2.imread', 'cv2.imread', (["(snapshot_dir_seq + '/' + id + '_sigma_pred_color.png')", '(-1)'], {}), "(snapshot_dir_seq + '/' + id + '_sigma_pred_color.png', -1)\n", (11364, 11423), False, 'import cv2\n'), ((11483, 11549), 'numpy.zeros', 'np.zeros', (['(4 * img_h, 2 * (img_w + colorbar_w), 3)'], {'dtype': 'np.uint8'}), '((4 * img_h, 2 * (img_w + colorbar_w), 3), dtype=np.uint8)\n', (11491, 11549), True, 'import numpy as np\n'), ((2301, 2316), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2314, 2316), False, 'import torch\n'), ((2987, 3005), 'torch.exp', 'torch.exp', (['log_var'], {}), '(log_var)\n', (2996, 3005), False, 'import torch\n'), ((3902, 3925), 'torch.log', 'torch.log', (['sigma_2_pred'], {}), '(sigma_2_pred)\n', (3911, 3925), False, 'import torch\n'), ((5775, 5797), 'numpy.sqrt', 'np.sqrt', (['sigma_2_alea_'], {}), '(sigma_2_alea_)\n', (5782, 5797), True, 'import numpy as np\n'), ((5983, 6004), 'numpy.sqrt', 'np.sqrt', (['sigma_2_epi_'], {}), '(sigma_2_epi_)\n', (5990, 6004), True, 'import numpy as np\n'), ((6195, 6217), 'numpy.sqrt', 'np.sqrt', (['sigma_2_pred_'], {}), '(sigma_2_pred_)\n', (6202, 6217), True, 'import numpy as np\n'), ((7151, 7197), 'cv2.applyColorMap', 'cv2.applyColorMap', (['sparse', 'cv2.COLORMAP_SUMMER'], {}), '(sparse, cv2.COLORMAP_SUMMER)\n', (7168, 7197), False, 'import cv2\n'), ((7287, 7333), 'cv2.applyColorMap', 'cv2.applyColorMap', (['target', 'cv2.COLORMAP_SUMMER'], {}), '(target, cv2.COLORMAP_SUMMER)\n', (7304, 7333), False, 'import cv2\n'), ((7421, 7465), 'cv2.applyColorMap', 'cv2.applyColorMap', (['pred', 'cv2.COLORMAP_SUMMER'], {}), '(pred, cv2.COLORMAP_SUMMER)\n', (7438, 7465), False, 'import cv2\n'), ((7874, 7921), 'cv2.applyColorMap', 'cv2.applyColorMap', (['sigma_alea', 'cv2.COLORMAP_HOT'], {}), '(sigma_alea, cv2.COLORMAP_HOT)\n', (7891, 7921), False, 'import cv2\n'), ((8148, 8194), 'cv2.applyColorMap', 'cv2.applyColorMap', (['sigma_epi', 'cv2.COLORMAP_HOT'], {}), '(sigma_epi, cv2.COLORMAP_HOT)\n', (8165, 8194), False, 'import cv2\n'), ((8428, 8475), 'cv2.applyColorMap', 'cv2.applyColorMap', (['sigma_pred', 'cv2.COLORMAP_HOT'], {}), '(sigma_pred, cv2.COLORMAP_HOT)\n', (8445, 8475), False, 'import cv2\n'), ((8499, 8562), 'cv2.imwrite', 'cv2.imwrite', (["(snapshot_dir_seq + '/' + file_id + '_img.png')", 'img'], {}), "(snapshot_dir_seq + '/' + file_id + '_img.png', img)\n", (8510, 8562), False, 'import cv2\n'), ((8584, 8659), 'cv2.imwrite', 'cv2.imwrite', (["(snapshot_dir_seq + '/' + file_id + '_img_color.png')", 'img_color'], {}), "(snapshot_dir_seq + '/' + file_id + '_img_color.png', img_color)\n", (8595, 8659), False, 'import cv2\n'), ((8681, 8766), 'cv2.imwrite', 'cv2.imwrite', (["(snapshot_dir_seq + '/' + file_id + '_sparse_color.png')", 'sparse_color'], {}), "(snapshot_dir_seq + '/' + file_id + '_sparse_color.png',\n sparse_color)\n", (8692, 8766), False, 'import cv2\n'), ((8784, 8869), 'cv2.imwrite', 'cv2.imwrite', (["(snapshot_dir_seq + '/' + file_id + '_target_color.png')", 'target_color'], {}), "(snapshot_dir_seq + '/' + file_id + '_target_color.png',\n target_color)\n", (8795, 8869), False, 'import cv2\n'), ((8887, 8964), 'cv2.imwrite', 'cv2.imwrite', (["(snapshot_dir_seq + '/' + file_id + '_pred_color.png')", 'pred_color'], {}), "(snapshot_dir_seq + '/' + file_id + '_pred_color.png', pred_color)\n", (8898, 8964), False, 'import cv2\n'), ((8986, 9079), 'cv2.imwrite', 'cv2.imwrite', (["(snapshot_dir_seq + '/' + file_id + '_sigma_alea_color.png')", 'sigma_alea_color'], {}), "(snapshot_dir_seq + '/' + file_id + '_sigma_alea_color.png',\n sigma_alea_color)\n", (8997, 9079), False, 'import cv2\n'), ((9097, 9188), 'cv2.imwrite', 'cv2.imwrite', (["(snapshot_dir_seq + '/' + file_id + '_sigma_epi_color.png')", 'sigma_epi_color'], {}), "(snapshot_dir_seq + '/' + file_id + '_sigma_epi_color.png',\n sigma_epi_color)\n", (9108, 9188), False, 'import cv2\n'), ((9206, 9299), 'cv2.imwrite', 'cv2.imwrite', (["(snapshot_dir_seq + '/' + file_id + '_sigma_pred_color.png')", 'sigma_pred_color'], {}), "(snapshot_dir_seq + '/' + file_id + '_sigma_pred_color.png',\n sigma_pred_color)\n", (9217, 9299), False, 'import cv2\n'), ((3753, 3779), 'torch.pow', 'torch.pow', (['(mean - value)', '(2)'], {}), '(mean - value, 2)\n', (3762, 3779), False, 'import torch\n')] |
import csv
from pymatgen.core.structure import Structure
from pymatgen.io.ase import AseAtomsAdaptor
import os
import dscribe
from dscribe.descriptors import SineMatrix
import argparse
import sys
import numpy as np
import time
parser = argparse.ArgumentParser(description='generate 1d sine matrix description of materials')
parser.add_argument('root', help='path to the directory of CIF files.')
args = parser.parse_args(sys.argv[1:])
start_all = time.time()
cif_path = args.root
# ---------------------- Loading id-data -----------------------------
id_prop_file = os.path.join(cif_path, 'id_prop.csv')
with open(id_prop_file) as f:
reader = csv.reader(f)
id_prop_data = [row for row in reader]
id_data = []
for i in range(len(id_prop_data)):
id_data.append(id_prop_data[i][0])
# ----------------------- bulk_dict, position_dict, atom_name_dict -----------------------------------------------------
# convert Structure type to ASE type
Ada = AseAtomsAdaptor()
bulk_dict = {} # store cif-id and corresponding ASE type structure
start = time.time()
for ids in id_data:
crystal = Structure.from_file(os.path.join(cif_path, ids + '.cif'))
bulk = Ada.get_atoms(crystal)
bulk_dict[ids] = bulk
all_species = ['Hg', 'He', 'Lu', 'I', 'Zr', 'Sc', 'Na', 'Bi', 'Cl', 'Ir', 'Tl',
'Cr', 'S', 'B', 'Tb', 'Ag', 'F', 'Sr', 'Li', 'Ba', 'Sb', 'Hf',
'N', 'Os', 'K', 'Mn', 'Ge', 'O', 'Tc', 'Cs', 'Sn', 'Mg', 'Ru',
'Pt', 'Cu', 'C', 'La', 'Ca', 'Au', 'Al', 'H', 'Mo', 'Nd', 'Ti',
'W', 'Re', 'Cd', 'Pb', 'P', 'Be', 'Co', 'Xe', 'In', 'Pd', 'Nb',
'Ta', 'Br', 'As', 'Ga', 'V', 'Ni', 'Kr', 'Rb', 'Fe', 'Y', 'Se',
'Rh', 'Si', 'Te', 'Zn']
sm = SineMatrix(
n_atoms_max=50,
permutation="sorted_l2",
sparse=False,
flatten=True
)
print("The length of the feature vector is: ", sm.get_number_of_features())
print("*********************************************************************")
# --------------------- Store sine rep of materials in a dictionary sine_material ---------------------------
start = time.time()
sine_material = {}
for cif_id in list(bulk_dict.keys()):
sine_material[cif_id] = sm.create(bulk_dict[cif_id])
print("Spend ", time.time() - start, ' s to store sine rep of materials in a dictionary sine_material')
print("*********************************************************************")
# --------------------------- Save as .npy file ----------------------------------
np.save('Sine_mat.npy', sine_material)
print("Over !!!!! This script takes: ", time.time() - start_all, ' s')
| [
"numpy.save",
"csv.reader",
"argparse.ArgumentParser",
"time.time",
"pymatgen.io.ase.AseAtomsAdaptor",
"dscribe.descriptors.SineMatrix",
"os.path.join"
] | [((237, 329), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""generate 1d sine matrix description of materials"""'}), "(description=\n 'generate 1d sine matrix description of materials')\n", (260, 329), False, 'import argparse\n'), ((449, 460), 'time.time', 'time.time', ([], {}), '()\n', (458, 460), False, 'import time\n'), ((570, 607), 'os.path.join', 'os.path.join', (['cif_path', '"""id_prop.csv"""'], {}), "(cif_path, 'id_prop.csv')\n", (582, 607), False, 'import os\n'), ((961, 978), 'pymatgen.io.ase.AseAtomsAdaptor', 'AseAtomsAdaptor', ([], {}), '()\n', (976, 978), False, 'from pymatgen.io.ase import AseAtomsAdaptor\n'), ((1056, 1067), 'time.time', 'time.time', ([], {}), '()\n', (1065, 1067), False, 'import time\n'), ((1740, 1819), 'dscribe.descriptors.SineMatrix', 'SineMatrix', ([], {'n_atoms_max': '(50)', 'permutation': '"""sorted_l2"""', 'sparse': '(False)', 'flatten': '(True)'}), "(n_atoms_max=50, permutation='sorted_l2', sparse=False, flatten=True)\n", (1750, 1819), False, 'from dscribe.descriptors import SineMatrix\n'), ((2112, 2123), 'time.time', 'time.time', ([], {}), '()\n', (2121, 2123), False, 'import time\n'), ((2505, 2543), 'numpy.save', 'np.save', (['"""Sine_mat.npy"""', 'sine_material'], {}), "('Sine_mat.npy', sine_material)\n", (2512, 2543), True, 'import numpy as np\n'), ((651, 664), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (661, 664), False, 'import csv\n'), ((1123, 1159), 'os.path.join', 'os.path.join', (['cif_path', "(ids + '.cif')"], {}), "(cif_path, ids + '.cif')\n", (1135, 1159), False, 'import os\n'), ((2255, 2266), 'time.time', 'time.time', ([], {}), '()\n', (2264, 2266), False, 'import time\n'), ((2585, 2596), 'time.time', 'time.time', ([], {}), '()\n', (2594, 2596), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 4 16:48:26 2020
@author: Hooooaaanng
"""
from Bio.Seq import Seq
import time
from Bio.Alphabet import IUPAC
from Bio import SeqIO
import itertools
import random
import numpy as np
import numpy.linalg as linalg
import scipy.linalg as la
import pickle
import pandas as pd
import matplotlib.pyplot as plt
from copy import copy
from tqdm import tqdm_notebook
# Dictionary with bacteria full name as keys and variable name/fna file name as values
bacteria_dict = {'Acinetobacter baumannii':'abauman','Bacteroides fragilis':'bfragil','Escherichia coli':'ecolik12','Enterobacter cloacae':'entcloac', 'E. faecium':'entfaec', 'Klebsiella pneumoniae':'klebpneu','Proteus mirabilis':'pmirabil','Pseudomonas aeruginosia':'pseudaer','Staph. epidermidis':'stapepid','Staph. aureus':'staphaur','Staphylococcus saprophyticus':'stapsapr','Streptococcus agalactiae':'strepaga','Streptococcus pneumoniae':'strepneu'}
# Import all bacteria file and set up global variables with bacteria files as name
for bacteria in bacteria_dict.values():
record = 0
globals()[bacteria] = []
for seq_record in SeqIO.parse(bacteria + ".fna", "fasta"):
#print(seq_record)
globals()[bacteria + '_' + str(record)] = seq_record
globals()[bacteria].append(globals()[bacteria + '_' + str(record)])
#print(globals()[bacteria])
record += 1
# Function to generate a set containing all possible permutation of a kmer given k, as DNA format
def kmer_perm(k):
kmer_set = set([''.join(i) for i in itertools.product('ATCG', repeat = k)])
return kmer_set
# Random search function for maximizing fitness score
def random_search(cycles, kmer_enumerate, kmer_num, bacteria_dict, seed, pop_size, alpha, offset):
random.seed(seed)
bacteria_num = len(bacteria_dict)
cycle_track = []
min_L2norm_track = []
fitness_count = []
total_records_length = np.zeros((1,bacteria_num))
pop_best_fitness = []
pop_average_fitness = []
L2min_pairwise_diff_track = []
L2min_column_track = []
for cycle_num in range(cycles):
cycle_track.append(cycle_num)
pop = []
pop_count = []
pop_fitness_cycle = []
pairwise_cycle = []
min_column_cycle = []
for i in range(pop_size):
# Choose kmer_num random 8-mer from the set of all possible 8-mer
kmer_set = random.sample(tuple(kmer_enumerate),kmer_num)
pop.append(kmer_set)
# Build count matrix for the given set of probe
Count_Matrix = kmer_count(kmer_num, bacteria_num, bacteria_dict, kmer_set)
pop_count.append(Count_Matrix)
# Calculate L2 norms for pairwise differences among bacteria's column vector
# Note: bacteria's column vector is the length of the number of probes
fitness = UMD_fitness_score(Count_Matrix, bacteria_dict)
pop_fitness_cycle.append(fitness.L2norm_min_pairwise_diff() + alpha*np.log(fitness.L2norm_min_column() - offset))
min_L2norm_track.append(fitness.L2norm_min_column())
pairwise_cycle.append(fitness.L2norm_min_pairwise_diff())
min_column_cycle.append(fitness.L2norm_min_column())
if max(min_L2norm_track) == min_L2norm_track[-1]:
best_probe_set = kmer_set
if cycle_num == 0:
fitness_count.append(fitness.L2norm_min_pairwise_diff() + alpha*np.log(fitness.L2norm_min_column() - offset))
else:
fitness_count.append(fitness_count[-1])
best_count = Count_Matrix
else:
fitness_count.append(fitness.L2norm_min_pairwise_diff() + alpha*np.log(fitness.L2norm_min_column() - offset))
pop_best_fitness.append(max(pop_fitness_cycle))
pop_average_fitness.append(sum(pop_fitness_cycle)/len(pop_fitness_cycle))
L2min_pairwise_diff_track.append(max(pairwise_cycle))
L2min_column_track.append(max(min_column_cycle))
return(cycle_track, best_probe_set, best_count, pop_best_fitness, pop_average_fitness, L2min_pairwise_diff_track, L2min_column_track)
# genetic algorithm for maximizing fitness score
# a single kmer is representative of a genes, a sets of kmer constitute a chromosome, a set of chromosomes represent a population
# initial population size is randomly chosen, initial chromosome size is the number of kmers
def gene_alg(cycles, chrom_per_pop, genes_per_chrom, kmer_enumerate, bacteria_dict, seed, alpha, offset):
random.seed(seed)
start_time = time.time()
bacteria_num = len(bacteria_dict)
gen_alg = genetic_alg(cycles, chrom_per_pop, genes_per_chrom, kmer_enumerate, bacteria_num, bacteria_dict, alpha, offset)
gen_alg.pop_ini()
cycle_track = []
fitness_score = []
pop_fitness_average = []
exe_time = []
L2_pairwise = []
L2_column = []
for generation in range(cycles):
cycle_track.append(generation)
gen_alg.pop_fitness_cal()
gen_alg.select_mating_pool()
gen_alg.crossover_mutation()
fitness_score.append(np.max(gen_alg.pop_fitness))
L2_pairwise.append(np.max(gen_alg.pop_L2pairwise))
L2_column.append(np.max(gen_alg.pop_L2column))
if generation == 0:
best_probe_set = gen_alg.best_probe_set_cycle
best_count = gen_alg.best_count_cycle
if (np.max(gen_alg.pop_fitness) > np.max(fitness_score)):
best_probe_set = gen_alg.best_probe_set_cycle
best_count = gen_alg.best_count_cycle
pop_fitness_average.append(gen_alg.pop_fitness_average)
# print(gen_alg.pop_fitness)
exe_time.append(time.time() - start_time)
return(cycle_track, fitness_score, pop_fitness_average, best_probe_set, best_count, exe_time, L2_pairwise, L2_column)
# Construct the count matrix
def kmer_count(kmer_num,bacteria_num, bacteria_dict, kmer_set):
start_time = time.time()
total_records_length = np.zeros((1,bacteria_num))
bacteria_index = 0
Count_Matrix = np.zeros((kmer_num,bacteria_num))
for bacteria in bacteria_dict.values():
for records in globals()[bacteria]:
sequence = records.seq
probe_index = 0
total_records_length[0,bacteria_index] += len(sequence)
for probe in kmer_set:
true_probe_index = list(kmer_set).index(probe)
Count_Matrix[true_probe_index,bacteria_index] += sequence.count(probe)
probe_index += 1
if (probe_index%1000) == 0:
print(time.time() - start_time)
bacteria_index += 1
return(Count_Matrix)
# Obtain the top ranked N probes in a set of kmer
# a class containing different ways the fitness score can be calculated
class UMD_fitness_score:
def __init__(self, Count_Matrix, bacteria_dict):
self.Count_Matrix = Count_Matrix
self.bacteria_num = len(Count_Matrix[1,:])
self.bacteria_dict = bacteria_dict
self.L2norm_pairwise = {}
for i in range(self.bacteria_num):
for j in range(self.bacteria_num):
if i > j:
self. L2norm_pairwise[str(list(self.bacteria_dict.keys())[i]) + '-' + str(list(self.bacteria_dict.keys())[j])] = linalg.norm(self.Count_Matrix[:,i] - self.Count_Matrix[:,j])
def __str__(self):
return('Fitness score class to calculate different fitness score for a kmer count matrixfor universal microbial diagnostic platform')
def L2norm_pairwise(self):
return(self.L2norm_pairwise)
def L2norm_max(self):
return(max(list(self.L2norm_pairwise.values())))
def L2norm_ave(self):
return(sum(list(self.L2norm_pairwise.values()))/len(list(self.L2norm_pairwise.values())))
def L2norm_min_pairwise_diff(self):
return(min(list(self.L2norm_pairwise.values())))
def L2norm_min_column(self):
self.column_norm = []
for i in range(len(self.Count_Matrix[0])):
self.column_norm.append(linalg.norm(self.Count_Matrix[:,i]))
self.min_column = min(self.column_norm)
return(self.min_column)
# class of genetic algorithm containing the steps for genetic algorithms
# a single kmer is representative of a genes, a sets of kmer constitute a chromosome, a set of chromosomes represent a population
# initial population size is randomly chosen, initial chromosome size is the number of kmers
class genetic_alg:
def __init__(self, cycles, chrom_per_pop, genes_per_chrom, kmer_enumerate, bacteria_num, bacteria_dict, alpha, offset):
self.kmer_enumerate = kmer_enumerate
self.bacteria_num = bacteria_num
self.bacteria_dict = bacteria_dict
self.cycles = cycles
self.chrom_per_pop = chrom_per_pop
self.genes_per_chrom = genes_per_chrom
self.pop_size = (chrom_per_pop, self.genes_per_chrom)
self.pop = []
self.pop_fitness = []
self.pop_L2pairwise = []
self.pop_L2column = []
self.num_parent = 2
self.parents = []
self.offsprings = []
self.pop_fitness_average = 0
self.best_probe_set_cycle = 0
self.best_count_cycle = 0
self.alpha = alpha
self.offset = offset
def __str__(self):
return('Class of genetic algorithm containing steps customized for optimizing kmer for Universal Microbial Diagnostic platform')
# initialize the initial population
def pop_ini(self):
for index in range(self.chrom_per_pop):
# for kmerindex in range(self.genes_per_chrom):
# chrom = random.sample(tuple(self.kmer_enumerate), self.genes_per_chrom)
# self.pop[index][:] = chrom[kmerindex]
# self.pop[index][:] = list(random.sample(tuple(self.kmer_enumerate), self.genes_per_chrom))
self.pop.append(random.sample(tuple(self.kmer_enumerate), self.genes_per_chrom))
return self.pop
# calculate the fitness of each sets of kmer in the population
def pop_fitness_cal(self):
self.pop_fitness = []
for i in range(self.chrom_per_pop):
Count_Matrix = kmer_count(self.genes_per_chrom, self.bacteria_num, self.bacteria_dict, self.pop[i])
fitness = UMD_fitness_score(Count_Matrix, bacteria_dict)
self.pop_fitness.append(fitness.L2norm_min_pairwise_diff() + self.alpha*np.log(fitness.L2norm_min_column() - self.offset))
self.pop_L2pairwise.append(fitness.L2norm_min_pairwise_diff())
self.pop_L2column.append(fitness.L2norm_min_column())
if (fitness.L2norm_min_pairwise_diff() + self.alpha*np.log(fitness.L2norm_min_column() - self.offset)) >= np.max(self.pop_fitness):
self.best_probe_set_cycle = self.pop[i]
self.best_count_cycle = Count_Matrix
self.pop_fitness_average = sum(self.pop_fitness)/len(self.pop_fitness)
return(self.pop_fitness)
def select_mating_pool(self):
fitness = copy(self.pop_fitness)
self.parents = []
for parent_num in range(self.num_parent):
max_fitness_idx = np.argmax(fitness)
self.parents.append(self.pop[max_fitness_idx])
fitness[max_fitness_idx] = -999999
return self.parents
def crossover_mutation(self):
# point at which kmer set is switched between 2 sets, here I take the middle
crossover_point = int(self.genes_per_chrom/2)
offspring_size = self.chrom_per_pop - len(self.parents)
self.offsprings = []
self.offsprings.append(self.parents[0][:crossover_point] + self.parents[1][crossover_point:])
self.offsprings.append(self.parents[1][:crossover_point] + self.parents[0][crossover_point:])
# print('parent size',len(self.parents))
# print('offsprings size', len(self.offsprings))
for k in range(offspring_size - 2):
if int(k/2) == 0:
mutated_offspring = copy(self.offsprings[0])
else:
mutated_offspring = copy(self.offsprings[1])
mutated_offspring[random.randint(0,len(mutated_offspring)-1)] = random.sample(self.kmer_enumerate,1)[0]
#print(self.offsprings)
self.offsprings.append(mutated_offspring)
self.pop = []
for j in range(len(self.parents)):
self.pop.append(self.parents[j])
for k in range(len(self.offsprings)):
self.pop.append(self.offsprings[k])
return self.pop
| [
"Bio.SeqIO.parse",
"numpy.argmax",
"random.sample",
"numpy.zeros",
"copy.copy",
"time.time",
"numpy.max",
"random.seed",
"numpy.linalg.norm",
"itertools.product"
] | [((1168, 1207), 'Bio.SeqIO.parse', 'SeqIO.parse', (["(bacteria + '.fna')", '"""fasta"""'], {}), "(bacteria + '.fna', 'fasta')\n", (1179, 1207), False, 'from Bio import SeqIO\n'), ((1818, 1835), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1829, 1835), False, 'import random\n'), ((1982, 2009), 'numpy.zeros', 'np.zeros', (['(1, bacteria_num)'], {}), '((1, bacteria_num))\n', (1990, 2009), True, 'import numpy as np\n'), ((4759, 4776), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (4770, 4776), False, 'import random\n'), ((4801, 4812), 'time.time', 'time.time', ([], {}), '()\n', (4810, 4812), False, 'import time\n'), ((6302, 6313), 'time.time', 'time.time', ([], {}), '()\n', (6311, 6313), False, 'import time\n'), ((6342, 6369), 'numpy.zeros', 'np.zeros', (['(1, bacteria_num)'], {}), '((1, bacteria_num))\n', (6350, 6369), True, 'import numpy as np\n'), ((6413, 6447), 'numpy.zeros', 'np.zeros', (['(kmer_num, bacteria_num)'], {}), '((kmer_num, bacteria_num))\n', (6421, 6447), True, 'import numpy as np\n'), ((11661, 11683), 'copy.copy', 'copy', (['self.pop_fitness'], {}), '(self.pop_fitness)\n', (11665, 11683), False, 'from copy import copy\n'), ((5380, 5407), 'numpy.max', 'np.max', (['gen_alg.pop_fitness'], {}), '(gen_alg.pop_fitness)\n', (5386, 5407), True, 'import numpy as np\n'), ((5437, 5467), 'numpy.max', 'np.max', (['gen_alg.pop_L2pairwise'], {}), '(gen_alg.pop_L2pairwise)\n', (5443, 5467), True, 'import numpy as np\n'), ((5495, 5523), 'numpy.max', 'np.max', (['gen_alg.pop_L2column'], {}), '(gen_alg.pop_L2column)\n', (5501, 5523), True, 'import numpy as np\n'), ((5701, 5728), 'numpy.max', 'np.max', (['gen_alg.pop_fitness'], {}), '(gen_alg.pop_fitness)\n', (5707, 5728), True, 'import numpy as np\n'), ((5731, 5752), 'numpy.max', 'np.max', (['fitness_score'], {}), '(fitness_score)\n', (5737, 5752), True, 'import numpy as np\n'), ((11793, 11811), 'numpy.argmax', 'np.argmax', (['fitness'], {}), '(fitness)\n', (11802, 11811), True, 'import numpy as np\n'), ((1595, 1630), 'itertools.product', 'itertools.product', (['"""ATCG"""'], {'repeat': 'k'}), "('ATCG', repeat=k)\n", (1612, 1630), False, 'import itertools\n'), ((6006, 6017), 'time.time', 'time.time', ([], {}), '()\n', (6015, 6017), False, 'import time\n'), ((8550, 8586), 'numpy.linalg.norm', 'linalg.norm', (['self.Count_Matrix[:, i]'], {}), '(self.Count_Matrix[:, i])\n', (8561, 8586), True, 'import numpy.linalg as linalg\n'), ((11328, 11352), 'numpy.max', 'np.max', (['self.pop_fitness'], {}), '(self.pop_fitness)\n', (11334, 11352), True, 'import numpy as np\n'), ((12671, 12695), 'copy.copy', 'copy', (['self.offsprings[0]'], {}), '(self.offsprings[0])\n', (12675, 12695), False, 'from copy import copy\n'), ((12752, 12776), 'copy.copy', 'copy', (['self.offsprings[1]'], {}), '(self.offsprings[1])\n', (12756, 12776), False, 'from copy import copy\n'), ((12868, 12905), 'random.sample', 'random.sample', (['self.kmer_enumerate', '(1)'], {}), '(self.kmer_enumerate, 1)\n', (12881, 12905), False, 'import random\n'), ((7726, 7788), 'numpy.linalg.norm', 'linalg.norm', (['(self.Count_Matrix[:, i] - self.Count_Matrix[:, j])'], {}), '(self.Count_Matrix[:, i] - self.Count_Matrix[:, j])\n', (7737, 7788), True, 'import numpy.linalg as linalg\n'), ((6989, 7000), 'time.time', 'time.time', ([], {}), '()\n', (6998, 7000), False, 'import time\n')] |
import logging
import numpy as np
import pandas as pd
from scipy import sparse
from sklearn import base
logger = logging.getLogger("causalml")
NAN_INT = -98765 # A random integer to impute missing values with
class LabelEncoder(base.BaseEstimator):
"""Label Encoder that groups infrequent values into one label.
Code from https://github.com/jeongyoonlee/Kaggler/blob/master/kaggler/preprocessing/data.py
Attributes:
min_obs (int): minimum number of observation to assign a label.
label_encoders (list of dict): label encoders for columns
label_maxes (list of int): maximum of labels for columns
"""
def __init__(self, min_obs=10):
"""Initialize the LabelEncoder class object.
Args:
min_obs (int): minimum number of observation to assign a label.
"""
self.min_obs = min_obs
def __repr__(self):
return ("LabelEncoder(min_obs={})").format(self.min_obs)
def _get_label_encoder_and_max(self, x):
"""Return a mapping from values and its maximum of a column to integer labels.
Args:
x (pandas.Series): a categorical column to encode.
Returns:
label_encoder (dict): mapping from values of features to integers
max_label (int): maximum label
"""
# NaN cannot be used as a key for dict. So replace it with a random integer.
label_count = x.fillna(NAN_INT).value_counts()
n_uniq = label_count.shape[0]
label_count = label_count[label_count >= self.min_obs]
n_uniq_new = label_count.shape[0]
# If every label appears more than min_obs, new label starts from 0.
# Otherwise, new label starts from 1 and 0 is used for all old labels
# that appear less than min_obs.
offset = 0 if n_uniq == n_uniq_new else 1
label_encoder = pd.Series(
np.arange(n_uniq_new) + offset, index=label_count.index
)
max_label = label_encoder.max()
label_encoder = label_encoder.to_dict()
return label_encoder, max_label
def _transform_col(self, x, i):
"""Encode one categorical column into labels.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
x (pandas.Series): a column with labels.
"""
return x.fillna(NAN_INT).map(self.label_encoders[i]).fillna(0)
def fit(self, X, y=None):
self.label_encoders = [None] * X.shape[1]
self.label_maxes = [None] * X.shape[1]
for i, col in enumerate(X.columns):
(
self.label_encoders[i],
self.label_maxes[i],
) = self._get_label_encoder_and_max(X[col])
return self
def transform(self, X):
"""Encode categorical columns into label encoded columns
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X (pandas.DataFrame): label encoded columns
"""
for i, col in enumerate(X.columns):
X.loc[:, col] = self._transform_col(X[col], i)
return X
def fit_transform(self, X, y=None):
"""Encode categorical columns into label encoded columns
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X (pandas.DataFrame): label encoded columns
"""
self.label_encoders = [None] * X.shape[1]
self.label_maxes = [None] * X.shape[1]
for i, col in enumerate(X.columns):
(
self.label_encoders[i],
self.label_maxes[i],
) = self._get_label_encoder_and_max(X[col])
X.loc[:, col] = X[col].fillna(NAN_INT).map(self.label_encoders[i]).fillna(0)
return X
class OneHotEncoder(base.BaseEstimator):
"""One-Hot-Encoder that groups infrequent values into one dummy variable.
Code from https://github.com/jeongyoonlee/Kaggler/blob/master/kaggler/preprocessing/data.py
Attributes:
min_obs (int): minimum number of observation to create a dummy variable
label_encoders (list of (dict, int)): label encoders and their maximums
for columns
"""
def __init__(self, min_obs=10):
"""Initialize the OneHotEncoder class object.
Args:
min_obs (int): minimum number of observation to create a dummy variable
"""
self.min_obs = min_obs
self.label_encoder = LabelEncoder(min_obs)
def __repr__(self):
return ("OneHotEncoder(min_obs={})").format(self.min_obs)
def _transform_col(self, x, i):
"""Encode one categorical column into sparse matrix with one-hot-encoding.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical
variable into dummy variables
"""
labels = self.label_encoder._transform_col(x, i)
label_max = self.label_encoder.label_maxes[i]
# build row and column index for non-zero values of a sparse matrix
index = np.array(range(len(labels)))
i = index[labels > 0]
j = labels[labels > 0] - 1 # column index starts from 0
if len(i) > 0:
return sparse.coo_matrix(
(np.ones_like(i), (i, j)), shape=(x.shape[0], label_max)
)
else:
# if there is no non-zero value, return no matrix
return None
def fit(self, X, y=None):
self.label_encoder.fit(X)
return self
def transform(self, X):
"""Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X_new (scipy.sparse.coo_matrix): sparse matrix encoding categorical
variables into dummy variables
"""
for i, col in enumerate(X.columns):
X_col = self._transform_col(X[col], i)
if X_col is not None:
if i == 0:
X_new = X_col
else:
X_new = sparse.hstack((X_new, X_col))
logger.debug(
"{} --> {} features".format(col, self.label_encoder.label_maxes[i])
)
return X_new
def fit_transform(self, X, y=None):
"""Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
sparse matrix encoding categorical variables into dummy variables
"""
self.label_encoder.fit(X)
return self.transform(X)
def load_data(data, features, transformations={}):
"""Load data and set the feature matrix and label vector.
Args:
data (pandas.DataFrame): total input data
features (list of str): column names to be used in the inference model
transformation (dict of (str, func)): transformations to be applied to features
Returns:
X (numpy.matrix): a feature matrix
"""
df = data[features].copy()
bool_cols = [col for col in df.columns if df[col].dtype == bool]
df.loc[:, bool_cols] = df[bool_cols].astype(np.int8)
for col, transformation in transformations.items():
logger.info("Applying {} to {}".format(transformation.__name__, col))
df[col] = df[col].apply(transformation)
cat_cols = [col for col in features if df[col].dtype == np.object]
num_cols = [col for col in features if col not in cat_cols]
logger.info("Applying one-hot-encoding to {}".format(cat_cols))
ohe = OneHotEncoder(min_obs=df.shape[0] * 0.001)
X_cat = ohe.fit_transform(df[cat_cols]).todense()
X = np.hstack([df[num_cols].values, X_cat])
return X
| [
"numpy.ones_like",
"numpy.hstack",
"numpy.arange",
"scipy.sparse.hstack",
"logging.getLogger"
] | [((115, 144), 'logging.getLogger', 'logging.getLogger', (['"""causalml"""'], {}), "('causalml')\n", (132, 144), False, 'import logging\n'), ((7998, 8037), 'numpy.hstack', 'np.hstack', (['[df[num_cols].values, X_cat]'], {}), '([df[num_cols].values, X_cat])\n', (8007, 8037), True, 'import numpy as np\n'), ((1905, 1926), 'numpy.arange', 'np.arange', (['n_uniq_new'], {}), '(n_uniq_new)\n', (1914, 1926), True, 'import numpy as np\n'), ((5502, 5517), 'numpy.ones_like', 'np.ones_like', (['i'], {}), '(i)\n', (5514, 5517), True, 'import numpy as np\n'), ((6373, 6402), 'scipy.sparse.hstack', 'sparse.hstack', (['(X_new, X_col)'], {}), '((X_new, X_col))\n', (6386, 6402), False, 'from scipy import sparse\n')] |
import theano
import theano.tensor as T
import numpy
from Logistic_Regression import LogisticRegression, load_data
import os
import sys
import timeit
from six.moves import cPickle as pickle
# This program will focus on a single-hidden-layer MLP.
# We start off by implementing a class that will represent a hidden layer.
# To construct the MLP we will then only need to throw a
# logistic regression layer on top.
# NOTE: For tanh activation function the interval from which weights should be
# randomly generated, between [- --> + sqrt(6/(fan_in + fan_out))]
# For sigmoid : between [- --> + 4 * sqrt(6/(fan_in + fan_out))]
class HiddenLayer(object):
def __init__ (self, rng, input, n_in, n_out, W = None, b=None, activation = T.tanh):
"""
Typical hidden layer of an MLP: units are fully connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in, n_out)
and the bias vector b is of shape (n_out,)
NOTE: The nonlinearity used here is tanh
Hidden unit activation is given by : tanh(dot(input, W) + b)
rng : numpy.random.RandomState
a random number generator used to initialize weights
input : theano.tensor.dmatrix
symbolic tensor of shape (n_examples, n_in)
n_in : int
dimesionality of input
n_out : int
number of hidden units
activation: theano operation or function
Non linearity to be applied in the layer
"""
self.input = input
if W is None :
W_values = numpy.asarray(
rng.uniform(
low = -numpy.sqrt(6. / (n_in + n_out)),
high = numpy.sqrt(6. / (n_in + n_out)),
size = (n_in, n_out)
),
dtype = theano.config.floatX
)
if activation == T.nnet.sigmoid:
W_values *= 4
W = theano.shared (value = W_values, name='W', borrow = True)
if b is None:
b_values = numpy.zeros((n_out,), dtype = theano.config.floatX)
b = theano.shared(value = b_values, name = 'b', borrow = True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
self.params = [self.W, self.b]
class MLP(object):
"""
MLP is an FFNN, has one or more hidden layers
and a nonlinear activation function.
The final layer, topmost layer is a softmax function.
"""
def __init__ (self, rng, input, n_in, n_hidden, n_out):
"""
initialize the parameters of the perceptron.
rng : numpy.random.RandomState
random number generator to initialize weights
input : theano.tensor.TensorType
input to the architecture (one minibatch)
n_in : int
the number of features of the input variable
n_hidden : int
number of hidden units
n_out : int
number of output units
"""
self.hiddenLayer = HiddenLayer(
rng = rng,
input = input,
n_in = n_in,
n_out = n_hidden,
activation = T.tanh
)
self.logRegressionLayer = LogisticRegression(
input = self.hiddenLayer.output,
n_in = n_hidden,
n_out = n_out
)
self.L1 = (
abs(self.hiddenLayer.W).sum()
+ abs(self.logRegressionLayer.W).sum()
)
self.L2_sqr = (
(self.hiddenLayer.W ** 2).sum()
+ (self.logRegressionLayer.W ** 2).sum()
)
self.negative_log_likelihood = (
self.logRegressionLayer.negative_log_likelihood
)
self.errors = self.logRegressionLayer.errors
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
self.input = input
def test_mlp (learning_rate = 0.01, L1_reg = 0.00, L2_reg = 0.0001, n_epochs = 1000,
dataset = 'mnist.pkl.gz', batch_size = 20, n_hidden = 500):
"""
Gradient descent on a multi-layer-perceptron
learning_rate : float
factor for gradient descent
L1_reg : float, L1-Norm of weights
L2_reg : float, L2-Norm of weights
n_epochs : int, maximal number of epochs to run on the system
dataset : string, path to the MNIST dataset
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x , test_set_y = datasets[2]
# compute the number of mini-batches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow = True).shape[0] // batch_size
n_valid_batches = valid_set_x.get_value(borrow = True).shape[0] // batch_size
n_test_batches = test_set_x.get_value(borrow = True).shape[0] // batch_size
#####################
# BUILD ACTUAL MODEL #
#####################
print ('Building the model ...')
# allocate symbolic variables for the data
index = T.lscalar()
# Generate symbolic varibales for input : x and labels : y
x = T.matrix('x')
y = T.ivector('y')
rng = numpy.random.RandomState(1234)
# Construct the MLP Class
classifier = MLP(
rng = rng,
input = x,
n_in = 28*28,
n_hidden = n_hidden,
n_out = 10
)
cost = (
classifier.negative_log_likelihood(y)
+ L1_reg * classifier.L1
+ L2_reg * classifier.L2_sqr
)
# computing the gradient of cost with respect to theta
gparams = [T.grad(cost, param) for param in classifier.params]
# specifying the update expression as a list of tuples:
# (variable, update expression) pairs
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(classifier.params, gparams)]
train_model = theano.function(
inputs = [index],
outputs = cost,
updates = updates,
givens = {
x : train_set_x[ index * batch_size : (index + 1) * batch_size],
y : train_set_y[ index * batch_size : (index + 1) * batch_size]
}
)
test_model = theano.function(
inputs = [index],
outputs= classifier.errors(y),
givens = {
x : test_set_x[index * batch_size : (index + 1) * batch_size],
y : test_set_y[index * batch_size : (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs = [index],
outputs= classifier.errors(y),
givens = {
x : valid_set_x[index * batch_size : (index + 1) * batch_size],
y : valid_set_y[index * batch_size : (index + 1) * batch_size]
}
)
#################
## TRAIN MODEL ##
#################
print ('Training the model ...')
# Early stopping parameters
"""
Early Stopping Procedure
We'll have patience about the improvement in performance,
after the patience is over.
Early stopping rules provide guidance as to how many iterations can be
run before the learner begins to over-fit.
"""
# look at these many examples before patience is up
patience = 5000
# wait this much longer when a new best is found
patience_increase = 2
improvement_threshold = 0.995
# a relative improvement of this much is considered significant
validation_frequency = min(n_train_batches, patience // 2)
# go through these many mini-batches before checking the network
# on the validation set ; in this case we check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
#iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in range(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
if this_validation_loss < best_validation_loss:
# improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in range(n_test_batches)]
test_score = numpy.mean(test_losses)
print(
(
'epoch %i, minibatch %i / %i, test error of'
' best model %f %%'
) %
(
epoch,
minibatch_index + 1,
n_test_batches,
test_score * 100.
)
)
# save the best model
with open('best_model_mnist.pkl', 'wb') as f:
pickle.dump (classifier, f)
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print (
(
'Optimiation complete with best validation score of %f %%,'
'with test performance %f %%'
)
% (best_validation_loss * 100., test_score * 100.)
)
print ('The code runs for %d epochs, with %f epochs/sec '%(
epoch, 1. * epoch / (end_time - start_time) / 60.0 ))
print (('The code for file ' + os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time))), file=sys.stderr)
if __name__ == '__main__':
test_mlp()
| [
"six.moves.cPickle.dump",
"Logistic_Regression.load_data",
"theano.function",
"theano.tensor.lscalar",
"timeit.default_timer",
"theano.tensor.dot",
"os.path.split",
"theano.tensor.ivector",
"numpy.zeros",
"numpy.random.RandomState",
"theano.tensor.grad",
"theano.shared",
"numpy.mean",
"Log... | [((4632, 4650), 'Logistic_Regression.load_data', 'load_data', (['dataset'], {}), '(dataset)\n', (4641, 4650), False, 'from Logistic_Regression import LogisticRegression, load_data\n'), ((5283, 5294), 'theano.tensor.lscalar', 'T.lscalar', ([], {}), '()\n', (5292, 5294), True, 'import theano.tensor as T\n'), ((5367, 5380), 'theano.tensor.matrix', 'T.matrix', (['"""x"""'], {}), "('x')\n", (5375, 5380), True, 'import theano.tensor as T\n'), ((5389, 5403), 'theano.tensor.ivector', 'T.ivector', (['"""y"""'], {}), "('y')\n", (5398, 5403), True, 'import theano.tensor as T\n'), ((5415, 5445), 'numpy.random.RandomState', 'numpy.random.RandomState', (['(1234)'], {}), '(1234)\n', (5439, 5445), False, 'import numpy\n'), ((6140, 6341), 'theano.function', 'theano.function', ([], {'inputs': '[index]', 'outputs': 'cost', 'updates': 'updates', 'givens': '{x: train_set_x[index * batch_size:(index + 1) * batch_size], y:\n train_set_y[index * batch_size:(index + 1) * batch_size]}'}), '(inputs=[index], outputs=cost, updates=updates, givens={x:\n train_set_x[index * batch_size:(index + 1) * batch_size], y:\n train_set_y[index * batch_size:(index + 1) * batch_size]})\n', (6155, 6341), False, 'import theano\n'), ((7952, 7974), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7972, 7974), False, 'import timeit\n'), ((10257, 10279), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (10277, 10279), False, 'import timeit\n'), ((3441, 3518), 'Logistic_Regression.LogisticRegression', 'LogisticRegression', ([], {'input': 'self.hiddenLayer.output', 'n_in': 'n_hidden', 'n_out': 'n_out'}), '(input=self.hiddenLayer.output, n_in=n_hidden, n_out=n_out)\n', (3459, 3518), False, 'from Logistic_Regression import LogisticRegression, load_data\n'), ((5837, 5856), 'theano.tensor.grad', 'T.grad', (['cost', 'param'], {}), '(cost, param)\n', (5843, 5856), True, 'import theano.tensor as T\n'), ((2011, 2063), 'theano.shared', 'theano.shared', ([], {'value': 'W_values', 'name': '"""W"""', 'borrow': '(True)'}), "(value=W_values, name='W', borrow=True)\n", (2024, 2063), False, 'import theano\n'), ((2115, 2164), 'numpy.zeros', 'numpy.zeros', (['(n_out,)'], {'dtype': 'theano.config.floatX'}), '((n_out,), dtype=theano.config.floatX)\n', (2126, 2164), False, 'import numpy\n'), ((2183, 2235), 'theano.shared', 'theano.shared', ([], {'value': 'b_values', 'name': '"""b"""', 'borrow': '(True)'}), "(value=b_values, name='b', borrow=True)\n", (2196, 2235), False, 'import theano\n'), ((2303, 2323), 'theano.tensor.dot', 'T.dot', (['input', 'self.W'], {}), '(input, self.W)\n', (2308, 2323), True, 'import theano.tensor as T\n'), ((8586, 8615), 'numpy.mean', 'numpy.mean', (['validation_losses'], {}), '(validation_losses)\n', (8596, 8615), False, 'import numpy\n'), ((9513, 9536), 'numpy.mean', 'numpy.mean', (['test_losses'], {}), '(test_losses)\n', (9523, 9536), False, 'import numpy\n'), ((10654, 10677), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (10667, 10677), False, 'import os\n'), ((1767, 1799), 'numpy.sqrt', 'numpy.sqrt', (['(6.0 / (n_in + n_out))'], {}), '(6.0 / (n_in + n_out))\n', (1777, 1799), False, 'import numpy\n'), ((10121, 10147), 'six.moves.cPickle.dump', 'pickle.dump', (['classifier', 'f'], {}), '(classifier, f)\n', (10132, 10147), True, 'from six.moves import cPickle as pickle\n'), ((1707, 1739), 'numpy.sqrt', 'numpy.sqrt', (['(6.0 / (n_in + n_out))'], {}), '(6.0 / (n_in + n_out))\n', (1717, 1739), False, 'import numpy\n')] |
import scipy.misc
import numpy as np
import os
from glob import glob
import tensorflow as tf
import tensorflow.contrib.slim as slim
from keras.datasets import cifar10, mnist
class ImageData:
def __init__(self, load_size, channels, custom_dataset):
self.load_size = load_size
self.channels = channels
self.custom_dataset = custom_dataset
def image_processing(self, filename):
# 圖片前處理, 將圖片resize到[load_size, load_size] -> 接著把pixel_value 縮放到[-1.0, 1.0]
if not self.custom_dataset :
x_decode = filename
else :
x = tf.read_file(filename)
x_decode = tf.image.decode_jpeg(x, channels=self.channels)
img = tf.image.resize_images(x_decode, [self.load_size, self.load_size])
img = tf.cast(img, tf.float32) / 127.5 - 1
return img
def load_mnist():
(train_data, train_labels), (test_data, test_labels) = mnist.load_data()
x = np.concatenate((train_data, test_data), axis=0)
x = np.expand_dims(x, axis=-1)
return x
def load_cifar10() :
(train_data, train_labels), (test_data, test_labels) = cifar10.load_data()
x = np.concatenate((train_data, test_data), axis=0)
return x
def load_data(dataset_name) :
# 看是要載入甚麼資料庫 mnist 或 cifar10 或 自己的Dataset
if dataset_name == 'mnist' :
x = load_mnist()
elif dataset_name == 'cifar10' :
x = load_cifar10()
else :
x = glob(os.path.join("./dataset", dataset_name, '*.*'))
return x
def preprocessing(x, size):
# 可用cv2取代
x = scipy.misc.imread(x, mode='RGB')
x = scipy.misc.imresize(x, [size, size])
x = normalize(x)
return x
def normalize(x) :
return x/127.5 - 1
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def merge(images, size):
'''把output_img合併成大圖並輸出(我有自己的方法)'''
# 檢查最後的channel是 3或4或1
h, w = images.shape[1], images.shape[2]
# 彩色圖
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c)) # 全黑的大圖
for idx, image in enumerate(images):
# 記憶點: 是否"連續變動" (Continuous Change)
i = idx % size[1] # column (每次都+1) 連續變動
j = idx // size[1] # row (每size次才會+1) 非連續變動
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
# 灰度圖
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
# image = np.squeeze(merge(images, size)) # 채널이 1인거 제거 ?
return scipy.misc.imsave(path, merge(images, size))
def inverse_transform(images):
return (images+1.)/2.
def check_folder(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def str2bool(x):
return x.lower() in ('true')
##################################################################################
# Regularization 正交(垂直)初始化
##################################################################################
# 用在卷積
def orthogonal_regularizer(scale) :
""" Defining the Orthogonal regularizer and return the function at last to be used in Conv layer as kernel regularizer"""
def ortho_reg(w) :
""" Reshaping the matrxi in to 2D tensor for enforcing orthogonality"""
_, _, _, c = w.get_shape().as_list()
# 把batch_size, w, z 都壓縮成一個數字
w = tf.reshape(w, [-1, c])
""" Declaring a Identity Tensor of appropriate size"""
# 對角線1, 其他元素為0
identity = tf.eye(c)
""" Regularizer Wt*W - I """
# 移除w(權重)對角線上的元素
w_transpose = tf.transpose(w) # W的轉置(Wt)
w_mul = tf.matmul(w_transpose, w) # Wt*W
reg = tf.subtract(w_mul, identity) # Wt*W - I
"""Calculating the Loss Obtained"""
ortho_loss = tf.nn.l2_loss(reg)
return scale * ortho_loss
return ortho_reg
# 用在Dense
def orthogonal_regularizer_fully(scale) :
""" Defining the Orthogonal regularizer and return the function at last to be used in Fully Connected Layer """
def ortho_reg_fully(w) :
""" Reshaping the matrix in to 2D tensor for enforcing orthogonality"""
_, c = w.get_shape().as_list()
"""Declaring a Identity Tensor of appropriate size"""
identity = tf.eye(c)
w_transpose = tf.transpose(w)
w_mul = tf.matmul(w_transpose, w)
reg = tf.subtract(w_mul, identity)
""" Calculating the Loss """
ortho_loss = tf.nn.l2_loss(reg)
return scale * ortho_loss
return ortho_reg_fully
| [
"tensorflow.trainable_variables",
"tensorflow.reshape",
"tensorflow.matmul",
"os.path.join",
"keras.datasets.cifar10.load_data",
"tensorflow.subtract",
"os.path.exists",
"tensorflow.cast",
"tensorflow.image.resize_images",
"tensorflow.eye",
"tensorflow.transpose",
"tensorflow.contrib.slim.mode... | [((916, 933), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (931, 933), False, 'from keras.datasets import cifar10, mnist\n'), ((942, 989), 'numpy.concatenate', 'np.concatenate', (['(train_data, test_data)'], {'axis': '(0)'}), '((train_data, test_data), axis=0)\n', (956, 989), True, 'import numpy as np\n'), ((998, 1024), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (1012, 1024), True, 'import numpy as np\n'), ((1120, 1139), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (1137, 1139), False, 'from keras.datasets import cifar10, mnist\n'), ((1148, 1195), 'numpy.concatenate', 'np.concatenate', (['(train_data, test_data)'], {'axis': '(0)'}), '((train_data, test_data), axis=0)\n', (1162, 1195), True, 'import numpy as np\n'), ((3157, 3181), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3179, 3181), True, 'import tensorflow as tf\n'), ((3186, 3247), 'tensorflow.contrib.slim.model_analyzer.analyze_vars', 'slim.model_analyzer.analyze_vars', (['model_vars'], {'print_info': '(True)'}), '(model_vars, print_info=True)\n', (3218, 3247), True, 'import tensorflow.contrib.slim as slim\n'), ((699, 765), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['x_decode', '[self.load_size, self.load_size]'], {}), '(x_decode, [self.load_size, self.load_size])\n', (721, 765), True, 'import tensorflow as tf\n'), ((2036, 2075), 'numpy.zeros', 'np.zeros', (['(h * size[0], w * size[1], c)'], {}), '((h * size[0], w * size[1], c))\n', (2044, 2075), True, 'import numpy as np\n'), ((3040, 3063), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (3054, 3063), False, 'import os\n'), ((3073, 3093), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (3084, 3093), False, 'import os\n'), ((3865, 3887), 'tensorflow.reshape', 'tf.reshape', (['w', '[-1, c]'], {}), '(w, [-1, c])\n', (3875, 3887), True, 'import tensorflow as tf\n'), ((3994, 4003), 'tensorflow.eye', 'tf.eye', (['c'], {}), '(c)\n', (4000, 4003), True, 'import tensorflow as tf\n'), ((4089, 4104), 'tensorflow.transpose', 'tf.transpose', (['w'], {}), '(w)\n', (4101, 4104), True, 'import tensorflow as tf\n'), ((4138, 4163), 'tensorflow.matmul', 'tf.matmul', (['w_transpose', 'w'], {}), '(w_transpose, w)\n', (4147, 4163), True, 'import tensorflow as tf\n'), ((4187, 4215), 'tensorflow.subtract', 'tf.subtract', (['w_mul', 'identity'], {}), '(w_mul, identity)\n', (4198, 4215), True, 'import tensorflow as tf\n'), ((4294, 4312), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['reg'], {}), '(reg)\n', (4307, 4312), True, 'import tensorflow as tf\n'), ((4770, 4779), 'tensorflow.eye', 'tf.eye', (['c'], {}), '(c)\n', (4776, 4779), True, 'import tensorflow as tf\n'), ((4802, 4817), 'tensorflow.transpose', 'tf.transpose', (['w'], {}), '(w)\n', (4814, 4817), True, 'import tensorflow as tf\n'), ((4834, 4859), 'tensorflow.matmul', 'tf.matmul', (['w_transpose', 'w'], {}), '(w_transpose, w)\n', (4843, 4859), True, 'import tensorflow as tf\n'), ((4874, 4902), 'tensorflow.subtract', 'tf.subtract', (['w_mul', 'identity'], {}), '(w_mul, identity)\n', (4885, 4902), True, 'import tensorflow as tf\n'), ((4962, 4980), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['reg'], {}), '(reg)\n', (4975, 4980), True, 'import tensorflow as tf\n'), ((590, 612), 'tensorflow.read_file', 'tf.read_file', (['filename'], {}), '(filename)\n', (602, 612), True, 'import tensorflow as tf\n'), ((636, 683), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['x'], {'channels': 'self.channels'}), '(x, channels=self.channels)\n', (656, 683), True, 'import tensorflow as tf\n'), ((2441, 2477), 'numpy.zeros', 'np.zeros', (['(h * size[0], w * size[1])'], {}), '((h * size[0], w * size[1]))\n', (2449, 2477), True, 'import numpy as np\n'), ((780, 804), 'tensorflow.cast', 'tf.cast', (['img', 'tf.float32'], {}), '(img, tf.float32)\n', (787, 804), True, 'import tensorflow as tf\n'), ((1438, 1484), 'os.path.join', 'os.path.join', (['"""./dataset"""', 'dataset_name', '"""*.*"""'], {}), "('./dataset', dataset_name, '*.*')\n", (1450, 1484), False, 'import os\n')] |
"""Save the number of trainable parameter and inference speed of all available models."""
# =============================================================================
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import argparse
from pathlib import Path
from typing import Union
import numpy as np
import pandas as pd
import plotly.express as px
import torch
from tqdm import tqdm
import ptlflow
from ptlflow.models.base_model.base_model import BaseModel
from ptlflow.utils.timer import Timer
from ptlflow.utils.utils import count_parameters, get_list_of_available_models_list, make_divisible
TABLE_COLS = ['Model', 'Params', 'Time(ms)']
def _init_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', type=str, default='all', choices=['all']+get_list_of_available_models_list(),
help=('Path to a csv file with the speed results.'))
parser.add_argument(
'--csv_path', type=str, default=None,
help=('Path to a csv file with the speed results.'))
parser.add_argument(
'--num_samples', type=int, default=20,
help=('Number of forwards to estimate average time'))
parser.add_argument(
'--input_size', type=int, nargs=2, default=(500, 1000),
help=('Resolution of the input to forward.'))
parser.add_argument(
'--output_path', type=str, default=str(Path('outputs/speed')),
help=('Path to a directory where the outputs will be saved.'))
return parser
def benchmark(
args: argparse.Namespace
) -> pd.DataFrame:
"""Run the benchmark on all models.
Parameters
----------
args : argparse.Namespace
Arguments for configuring the benchmark.
Returns
-------
pd.DataFrame
A DataFrame with the benchmark results.
"""
df = pd.DataFrame(
{TABLE_COLS[0]: pd.Series([], dtype='str'),
TABLE_COLS[1]: pd.Series([], dtype='int'),
TABLE_COLS[2]: pd.Series([], dtype='float')})
output_path = Path(args.output_path)
output_path.mkdir(parents=True, exist_ok=True)
if args.model == 'all':
model_names = ptlflow.models_dict.keys()
else:
model_names = [args.model]
for mname in tqdm(model_names):
model = ptlflow.get_model(mname)
model = model.eval()
if torch.cuda.is_available():
model = model.cuda()
model_params = count_parameters(model)
infer_timer = estimate_inference_time(args, model)
values = [mname, model_params, infer_timer*1000]
df = df.append({c: v for c, v in zip(df.columns, values)}, ignore_index=True)
df = df.round(3)
df.to_csv(output_path / f'speed_benchmark-{args.model}.csv', index=False)
save_plot(output_path, args.model, df)
return df
@torch.no_grad()
def estimate_inference_time(
args: argparse.Namespace,
model: BaseModel
) -> float:
"""Compute the average forward time for one model.
Parameters
----------
args : argparse.Namespace
Arguments for configuring the benchmark.
model : BaseModel
The model to perform the estimation.
Returns
-------
float
The average time of the runs.
"""
timer = Timer('inference')
for i in range(args.num_samples+1):
inputs = {
'images': torch.rand(
1, 2, 3, make_divisible(args.input_size[0], model.output_stride),
make_divisible(args.input_size[1], model.output_stride))}
if torch.cuda.is_available():
inputs['images'] = inputs['images'].cuda()
if i > 0:
# Skip first time, it is slow due to memory allocation
timer.tic()
model(inputs)
if i > 0:
timer.toc()
return timer.mean()
def save_plot(
output_dir: Union[str, Path],
model_name: str,
df: pd.DataFrame
) -> None:
"""Create a plot of the results and save to disk.
Parameters
----------
output_dir : Union[str, Path]
Path to the directory where the plot will be saved.
model_name : str
Name of the model. Used just to name the resulting file.
df : pd.DataFrame
A DataFrame with the benchmark results.
"""
df = df.dropna()
output_dir = Path(output_dir)
log10_col = TABLE_COLS[1]+'(Log10)'
df_tmp = df.copy()
df_tmp[log10_col] = np.log10(df[TABLE_COLS[1]])
fig = px.scatter(
df, x=TABLE_COLS[1], y=TABLE_COLS[2], color=TABLE_COLS[0], symbol=TABLE_COLS[0], log_x=True,
title='Parameters x Forward time')
fig.update_traces(
marker={
'size': 20,
'line': {'width': 2, 'color': 'DarkSlateGrey'}},
selector={'mode': 'markers'})
fig.update_layout(
title_font_size=30
)
fig.write_html(output_dir / f'speed_plot-{model_name}.html')
if __name__ == '__main__':
parser = _init_parser()
args = parser.parse_args()
if args.csv_path is None:
df = benchmark(args)
else:
df = pd.read_csv(args.csv_path)
Path(args.output_path).mkdir(parents=True, exist_ok=True)
save_plot(args.output_path, args.model, df)
print(f'Results saved to {str(args.output_path)}.')
| [
"tqdm.tqdm",
"ptlflow.utils.utils.make_divisible",
"argparse.ArgumentParser",
"ptlflow.utils.utils.get_list_of_available_models_list",
"pandas.read_csv",
"ptlflow.models_dict.keys",
"ptlflow.get_model",
"pathlib.Path",
"ptlflow.utils.timer.Timer",
"torch.cuda.is_available",
"ptlflow.utils.utils.... | [((3420, 3435), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3433, 3435), False, 'import torch\n'), ((1308, 1333), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1331, 1333), False, 'import argparse\n'), ((2626, 2648), 'pathlib.Path', 'Path', (['args.output_path'], {}), '(args.output_path)\n', (2630, 2648), False, 'from pathlib import Path\n'), ((2840, 2857), 'tqdm.tqdm', 'tqdm', (['model_names'], {}), '(model_names)\n', (2844, 2857), False, 'from tqdm import tqdm\n'), ((3853, 3871), 'ptlflow.utils.timer.Timer', 'Timer', (['"""inference"""'], {}), "('inference')\n", (3858, 3871), False, 'from ptlflow.utils.timer import Timer\n'), ((4897, 4913), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (4901, 4913), False, 'from pathlib import Path\n'), ((5002, 5029), 'numpy.log10', 'np.log10', (['df[TABLE_COLS[1]]'], {}), '(df[TABLE_COLS[1]])\n', (5010, 5029), True, 'import numpy as np\n'), ((5041, 5183), 'plotly.express.scatter', 'px.scatter', (['df'], {'x': 'TABLE_COLS[1]', 'y': 'TABLE_COLS[2]', 'color': 'TABLE_COLS[0]', 'symbol': 'TABLE_COLS[0]', 'log_x': '(True)', 'title': '"""Parameters x Forward time"""'}), "(df, x=TABLE_COLS[1], y=TABLE_COLS[2], color=TABLE_COLS[0],\n symbol=TABLE_COLS[0], log_x=True, title='Parameters x Forward time')\n", (5051, 5183), True, 'import plotly.express as px\n'), ((2751, 2777), 'ptlflow.models_dict.keys', 'ptlflow.models_dict.keys', ([], {}), '()\n', (2775, 2777), False, 'import ptlflow\n'), ((2875, 2899), 'ptlflow.get_model', 'ptlflow.get_model', (['mname'], {}), '(mname)\n', (2892, 2899), False, 'import ptlflow\n'), ((2940, 2965), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2963, 2965), False, 'import torch\n'), ((3023, 3046), 'ptlflow.utils.utils.count_parameters', 'count_parameters', (['model'], {}), '(model)\n', (3039, 3046), False, 'from ptlflow.utils.utils import count_parameters, get_list_of_available_models_list, make_divisible\n'), ((4132, 4157), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4155, 4157), False, 'import torch\n'), ((5652, 5678), 'pandas.read_csv', 'pd.read_csv', (['args.csv_path'], {}), '(args.csv_path)\n', (5663, 5678), True, 'import pandas as pd\n'), ((2472, 2498), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""str"""'}), "([], dtype='str')\n", (2481, 2498), True, 'import pandas as pd\n'), ((2524, 2550), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""int"""'}), "([], dtype='int')\n", (2533, 2550), True, 'import pandas as pd\n'), ((2576, 2604), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (2585, 2604), True, 'import pandas as pd\n'), ((1419, 1454), 'ptlflow.utils.utils.get_list_of_available_models_list', 'get_list_of_available_models_list', ([], {}), '()\n', (1452, 1454), False, 'from ptlflow.utils.utils import count_parameters, get_list_of_available_models_list, make_divisible\n'), ((1998, 2019), 'pathlib.Path', 'Path', (['"""outputs/speed"""'], {}), "('outputs/speed')\n", (2002, 2019), False, 'from pathlib import Path\n'), ((3990, 4045), 'ptlflow.utils.utils.make_divisible', 'make_divisible', (['args.input_size[0]', 'model.output_stride'], {}), '(args.input_size[0], model.output_stride)\n', (4004, 4045), False, 'from ptlflow.utils.utils import count_parameters, get_list_of_available_models_list, make_divisible\n'), ((4063, 4118), 'ptlflow.utils.utils.make_divisible', 'make_divisible', (['args.input_size[1]', 'model.output_stride'], {}), '(args.input_size[1], model.output_stride)\n', (4077, 4118), False, 'from ptlflow.utils.utils import count_parameters, get_list_of_available_models_list, make_divisible\n'), ((5687, 5709), 'pathlib.Path', 'Path', (['args.output_path'], {}), '(args.output_path)\n', (5691, 5709), False, 'from pathlib import Path\n')] |
# coding: utf-8
# @Author: oliver
# @Date: 2019-11-25 20:52:44
import os
import sys
import numpy as np
from shapely.geometry import *
labels_path = 'origin_labels'
output_dir = 'gt_labels'
labels_list = os.listdir(labels_path)
for file in labels_list:
file_name = os.path.join(labels_path, file)
with open(file_name, encoding='utf-8', mode='r') as f:
boxes = []
for line in f.readlines():
params = line.strip().strip('\ufeff').strip('\xef\xbb\xbf').split(',')
x1 = np.int(params[0])
y1 = np.int(params[1])
box = [np.int(params[i]) for i in range(4, 32)]
box = np.asarray(np.asarray(box) + ([x1 * 1.0, y1 * 1.0] * 14)).astype(np.int)
box = [[int(box[j]), int(box[j+1])] for j in range(0,len(box),2)]
try:
pgt = Polygon(box)
except Exception as e:
print('Not a valid polygon.', pgt)
continue
if not pgt.is_valid:
print('GT polygon has intersecting sides.', pts)
continue
pRing = LinearRing(box)
if pRing.is_ccw:
box.reverse()
boxes.append(np.array(box).reshape(-1))
boxes = np.asarray(boxes)
saved_path = os.path.join(output_dir, file)
np.savetxt(saved_path, boxes, fmt='%d', delimiter=',')
| [
"numpy.asarray",
"numpy.savetxt",
"numpy.int",
"numpy.array",
"os.path.join",
"os.listdir"
] | [((208, 231), 'os.listdir', 'os.listdir', (['labels_path'], {}), '(labels_path)\n', (218, 231), False, 'import os\n'), ((273, 304), 'os.path.join', 'os.path.join', (['labels_path', 'file'], {}), '(labels_path, file)\n', (285, 304), False, 'import os\n'), ((1300, 1330), 'os.path.join', 'os.path.join', (['output_dir', 'file'], {}), '(output_dir, file)\n', (1312, 1330), False, 'import os\n'), ((1335, 1389), 'numpy.savetxt', 'np.savetxt', (['saved_path', 'boxes'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "(saved_path, boxes, fmt='%d', delimiter=',')\n", (1345, 1389), True, 'import numpy as np\n'), ((1265, 1282), 'numpy.asarray', 'np.asarray', (['boxes'], {}), '(boxes)\n', (1275, 1282), True, 'import numpy as np\n'), ((518, 535), 'numpy.int', 'np.int', (['params[0]'], {}), '(params[0])\n', (524, 535), True, 'import numpy as np\n'), ((553, 570), 'numpy.int', 'np.int', (['params[1]'], {}), '(params[1])\n', (559, 570), True, 'import numpy as np\n'), ((591, 608), 'numpy.int', 'np.int', (['params[i]'], {}), '(params[i])\n', (597, 608), True, 'import numpy as np\n'), ((1222, 1235), 'numpy.array', 'np.array', (['box'], {}), '(box)\n', (1230, 1235), True, 'import numpy as np\n'), ((661, 676), 'numpy.asarray', 'np.asarray', (['box'], {}), '(box)\n', (671, 676), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def Three_element_add(array):
array0 = array[:]
array1 = np.append(array[1:],np.array([0]))
array2 = np.append(array[2:],np.array([0, 0]))
arr_sum = array0 + array1 + array2
return arr_sum[:-2]
def VThin(image, array):
NEXT = 1
height, width = image.shape[:2]
for i in range(1,height):
M_all = Three_element_add(image[i])
for j in range(1,width):
if NEXT == 0:
NEXT = 1
else:
M = M_all[j-1] if j<width-1 else 1
if image[i, j] == 0 and M != 0:
a = np.zeros(9,dtype=np.uint8)
if height-1 > i and width-1 > j:
kernel = image[i - 1:i + 2, j - 1:j + 2]
a = np.where(kernel == 255, 1, 0)
a = a.reshape(1, -1)[0]
NUM = np.array([1,2,4,8,0,16,32,64,128],dtype=np.uint8)
sumArr = np.sum(a*NUM)
image[i, j] = array[sumArr] * 255
if array[sumArr] == 1:
NEXT = 0
return image
def HThin(image, array):
height, width = image.shape[:2]
NEXT = 1
for j in range(1,width):
M_all = Three_element_add(image[:,j])
for i in range(1,height):
if NEXT == 0:
NEXT = 1
else:
M = M_all[i-1] if i < height - 1 else 1
if image[i, j] == 0 and M != 0:
a = np.zeros(9,dtype=np.uint8)
if height - 1 > i and width - 1 > j:
kernel = image[i - 1:i + 2, j - 1:j + 2]
a = np.where(kernel == 255, 1, 0)
a = a.reshape(1, -1)[0]
NUM = np.array([1, 2, 4, 8, 0, 16, 32, 64, 128],dtype=np.uint8)
sumArr = np.sum(a * NUM)
image[i, j] = array[sumArr] * 255
if array[sumArr] == 1:
NEXT = 0
return image
def Xihua(binary, array, num=10):
binary_image = binary.copy()
image = cv2.copyMakeBorder(binary_image, 1, 0, 1, 0, cv2.BORDER_CONSTANT, value=0)
for i in range(num):
VThin(image, array)
HThin(image, array)
return image
array = [0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\
1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1,\
0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\
1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1,\
1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\
1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1,\
0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1,\
1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,\
1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,\
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0,\
1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0]
def edge_demo(image):
blurred = cv2.GaussianBlur(image,(3,3),0) #高斯降噪,适度
gray = cv2.cvtColor(blurred,cv2.COLOR_BGR2GRAY)
#求梯度
xgrd = cv2.Sobel(gray,cv2.CV_16SC1,1,0)
ygrd = cv2.Sobel(gray,cv2.CV_16SC1,0,1)
egde_output = cv2.Canny(xgrd,ygrd,50,150) #50低阈值,150高阈值
#egde_output = cv.Canny(gray,50,150) #都可使用
cv2.imwrite('canny_edge.jpg',255-egde_output)
kernel = np.ones((3, 3), np.uint8)
img_dilate = cv2.dilate(egde_output, kernel)
cv2.imwrite('canny_edge_dilate.jpg',255-img_dilate)
img_erode = Xihua(255-img_dilate,array)
cv2.imwrite('canny_edge_erode.jpg',img_erode)
def edge_filter(image):
kernel1 = np.array([[1,0],
[0,-1]])
kernel2 = np.array([[-1,0,1],
[-2,0,2],
[-1,0,1]])
kernel3 = np.array([[-1,0,1],
[-1,0,1],
[-1,0,1]])
threshold = 50
l = []
res1 = cv2.filter2D(image,-1,kernel2)
res2 = cv2.filter2D(image,-1,kernel2.T)
res = np.clip(res1+res2,0,255)
res[np.where(res>=threshold)] = 255
l.append(255-res)
cv2.imwrite('Sobel.png',255-res)
res1 = cv2.filter2D(image,-1,kernel1)
res2 = cv2.filter2D(image,-1,kernel1.T)
res = np.clip(res1+res2,0,255)
res[np.where(res>=threshold)] = 255
l.append(255-res)
cv2.imwrite('Roberts.png',255-res)
res1 = cv2.filter2D(image,-1,kernel3)
res2 = cv2.filter2D(image,-1,kernel3.T)
res = np.clip(res1+res2,0,255)
res[np.where(res>=threshold)] = 255
l.append(255-res)
cv2.imwrite('Prewitt.png',255-res)
length = len(l)
l1 = l.pop(0).astype(np.float32)
res1 = l1.copy()
res2 = l1.copy()
for img in l:
res1 = res1 * img
res2 = res2 + img
res1 = (res1/255/255).astype(np.uint8)
res1[np.where(res1>=threshold)] = 255
cv2.imwrite('SRP_M.png',res1)
res2 = (res2 / length).astype(np.uint8)
res2[np.where(res2>=threshold*3)] = 255
cv2.imwrite('SRP_A.png',res2)
def edge(image):
sigma = 3
kernel_size = (0,0)
L = cv2.GaussianBlur(image, kernel_size, sigma)
H = (255 - cv2.subtract(image, L))
H[H<=250] = 0
cv2.imwrite('DoG.png',H)
if __name__ == '__main__':
img = cv2.imread('sample.png',0)
image = cv2.imread('sample.png')
edge(img)
edge_filter(img)
# edge_demo(image) | [
"cv2.GaussianBlur",
"cv2.Canny",
"cv2.subtract",
"numpy.sum",
"cv2.filter2D",
"cv2.dilate",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.copyMakeBorder",
"numpy.ones",
"numpy.clip",
"numpy.zeros",
"cv2.imread",
"numpy.where",
"numpy.array",
"cv2.Sobel"
] | [((2132, 2206), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['binary_image', '(1)', '(0)', '(1)', '(0)', 'cv2.BORDER_CONSTANT'], {'value': '(0)'}), '(binary_image, 1, 0, 1, 0, cv2.BORDER_CONSTANT, value=0)\n', (2150, 2206), False, 'import cv2\n'), ((3270, 3304), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(3, 3)', '(0)'], {}), '(image, (3, 3), 0)\n', (3286, 3304), False, 'import cv2\n'), ((3323, 3364), 'cv2.cvtColor', 'cv2.cvtColor', (['blurred', 'cv2.COLOR_BGR2GRAY'], {}), '(blurred, cv2.COLOR_BGR2GRAY)\n', (3335, 3364), False, 'import cv2\n'), ((3384, 3419), 'cv2.Sobel', 'cv2.Sobel', (['gray', 'cv2.CV_16SC1', '(1)', '(0)'], {}), '(gray, cv2.CV_16SC1, 1, 0)\n', (3393, 3419), False, 'import cv2\n'), ((3428, 3463), 'cv2.Sobel', 'cv2.Sobel', (['gray', 'cv2.CV_16SC1', '(0)', '(1)'], {}), '(gray, cv2.CV_16SC1, 0, 1)\n', (3437, 3463), False, 'import cv2\n'), ((3480, 3510), 'cv2.Canny', 'cv2.Canny', (['xgrd', 'ygrd', '(50)', '(150)'], {}), '(xgrd, ygrd, 50, 150)\n', (3489, 3510), False, 'import cv2\n'), ((3576, 3624), 'cv2.imwrite', 'cv2.imwrite', (['"""canny_edge.jpg"""', '(255 - egde_output)'], {}), "('canny_edge.jpg', 255 - egde_output)\n", (3587, 3624), False, 'import cv2\n'), ((3635, 3660), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (3642, 3660), True, 'import numpy as np\n'), ((3678, 3709), 'cv2.dilate', 'cv2.dilate', (['egde_output', 'kernel'], {}), '(egde_output, kernel)\n', (3688, 3709), False, 'import cv2\n'), ((3714, 3768), 'cv2.imwrite', 'cv2.imwrite', (['"""canny_edge_dilate.jpg"""', '(255 - img_dilate)'], {}), "('canny_edge_dilate.jpg', 255 - img_dilate)\n", (3725, 3768), False, 'import cv2\n'), ((3814, 3860), 'cv2.imwrite', 'cv2.imwrite', (['"""canny_edge_erode.jpg"""', 'img_erode'], {}), "('canny_edge_erode.jpg', img_erode)\n", (3825, 3860), False, 'import cv2\n'), ((3899, 3926), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (3907, 3926), True, 'import numpy as np\n'), ((3963, 4009), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]'], {}), '([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n', (3971, 4009), True, 'import numpy as np\n'), ((4066, 4112), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]'], {}), '([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])\n', (4074, 4112), True, 'import numpy as np\n'), ((4196, 4228), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel2'], {}), '(image, -1, kernel2)\n', (4208, 4228), False, 'import cv2\n'), ((4238, 4272), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel2.T'], {}), '(image, -1, kernel2.T)\n', (4250, 4272), False, 'import cv2\n'), ((4281, 4309), 'numpy.clip', 'np.clip', (['(res1 + res2)', '(0)', '(255)'], {}), '(res1 + res2, 0, 255)\n', (4288, 4309), True, 'import numpy as np\n'), ((4372, 4407), 'cv2.imwrite', 'cv2.imwrite', (['"""Sobel.png"""', '(255 - res)'], {}), "('Sobel.png', 255 - res)\n", (4383, 4407), False, 'import cv2\n'), ((4416, 4448), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel1'], {}), '(image, -1, kernel1)\n', (4428, 4448), False, 'import cv2\n'), ((4458, 4492), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel1.T'], {}), '(image, -1, kernel1.T)\n', (4470, 4492), False, 'import cv2\n'), ((4501, 4529), 'numpy.clip', 'np.clip', (['(res1 + res2)', '(0)', '(255)'], {}), '(res1 + res2, 0, 255)\n', (4508, 4529), True, 'import numpy as np\n'), ((4592, 4629), 'cv2.imwrite', 'cv2.imwrite', (['"""Roberts.png"""', '(255 - res)'], {}), "('Roberts.png', 255 - res)\n", (4603, 4629), False, 'import cv2\n'), ((4638, 4670), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel3'], {}), '(image, -1, kernel3)\n', (4650, 4670), False, 'import cv2\n'), ((4680, 4714), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel3.T'], {}), '(image, -1, kernel3.T)\n', (4692, 4714), False, 'import cv2\n'), ((4723, 4751), 'numpy.clip', 'np.clip', (['(res1 + res2)', '(0)', '(255)'], {}), '(res1 + res2, 0, 255)\n', (4730, 4751), True, 'import numpy as np\n'), ((4814, 4851), 'cv2.imwrite', 'cv2.imwrite', (['"""Prewitt.png"""', '(255 - res)'], {}), "('Prewitt.png', 255 - res)\n", (4825, 4851), False, 'import cv2\n'), ((5107, 5137), 'cv2.imwrite', 'cv2.imwrite', (['"""SRP_M.png"""', 'res1'], {}), "('SRP_M.png', res1)\n", (5118, 5137), False, 'import cv2\n'), ((5229, 5259), 'cv2.imwrite', 'cv2.imwrite', (['"""SRP_A.png"""', 'res2'], {}), "('SRP_A.png', res2)\n", (5240, 5259), False, 'import cv2\n'), ((5323, 5366), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', 'kernel_size', 'sigma'], {}), '(image, kernel_size, sigma)\n', (5339, 5366), False, 'import cv2\n'), ((5428, 5453), 'cv2.imwrite', 'cv2.imwrite', (['"""DoG.png"""', 'H'], {}), "('DoG.png', H)\n", (5439, 5453), False, 'import cv2\n'), ((5491, 5518), 'cv2.imread', 'cv2.imread', (['"""sample.png"""', '(0)'], {}), "('sample.png', 0)\n", (5501, 5518), False, 'import cv2\n'), ((5530, 5554), 'cv2.imread', 'cv2.imread', (['"""sample.png"""'], {}), "('sample.png')\n", (5540, 5554), False, 'import cv2\n'), ((117, 130), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (125, 130), True, 'import numpy as np\n'), ((165, 181), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (173, 181), True, 'import numpy as np\n'), ((4314, 4340), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (4322, 4340), True, 'import numpy as np\n'), ((4534, 4560), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (4542, 4560), True, 'import numpy as np\n'), ((4756, 4782), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (4764, 4782), True, 'import numpy as np\n'), ((5070, 5097), 'numpy.where', 'np.where', (['(res1 >= threshold)'], {}), '(res1 >= threshold)\n', (5078, 5097), True, 'import numpy as np\n'), ((5190, 5221), 'numpy.where', 'np.where', (['(res2 >= threshold * 3)'], {}), '(res2 >= threshold * 3)\n', (5198, 5221), True, 'import numpy as np\n'), ((5382, 5404), 'cv2.subtract', 'cv2.subtract', (['image', 'L'], {}), '(image, L)\n', (5394, 5404), False, 'import cv2\n'), ((621, 648), 'numpy.zeros', 'np.zeros', (['(9)'], {'dtype': 'np.uint8'}), '(9, dtype=np.uint8)\n', (629, 648), True, 'import numpy as np\n'), ((898, 956), 'numpy.array', 'np.array', (['[1, 2, 4, 8, 0, 16, 32, 64, 128]'], {'dtype': 'np.uint8'}), '([1, 2, 4, 8, 0, 16, 32, 64, 128], dtype=np.uint8)\n', (906, 956), True, 'import numpy as np\n'), ((977, 992), 'numpy.sum', 'np.sum', (['(a * NUM)'], {}), '(a * NUM)\n', (983, 992), True, 'import numpy as np\n'), ((1520, 1547), 'numpy.zeros', 'np.zeros', (['(9)'], {'dtype': 'np.uint8'}), '(9, dtype=np.uint8)\n', (1528, 1547), True, 'import numpy as np\n'), ((1801, 1859), 'numpy.array', 'np.array', (['[1, 2, 4, 8, 0, 16, 32, 64, 128]'], {'dtype': 'np.uint8'}), '([1, 2, 4, 8, 0, 16, 32, 64, 128], dtype=np.uint8)\n', (1809, 1859), True, 'import numpy as np\n'), ((1888, 1903), 'numpy.sum', 'np.sum', (['(a * NUM)'], {}), '(a * NUM)\n', (1894, 1903), True, 'import numpy as np\n'), ((794, 823), 'numpy.where', 'np.where', (['(kernel == 255)', '(1)', '(0)'], {}), '(kernel == 255, 1, 0)\n', (802, 823), True, 'import numpy as np\n'), ((1697, 1726), 'numpy.where', 'np.where', (['(kernel == 255)', '(1)', '(0)'], {}), '(kernel == 255, 1, 0)\n', (1705, 1726), True, 'import numpy as np\n')] |
# plots.py
import numpy as np
import pandas as pd
import datetime
from bokeh.layouts import layout
from bokeh.models import (Range1d, ColumnDataSource, RangeTool,
LinearColorMapper, BasicTicker,
ColorBar, HoverTool, BoxSelectTool, Span, Paragraph,
DataRange1d)
from bokeh.models.widgets.tables import (NumberFormatter, DateFormatter,
TableColumn, DataTable)
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.models.tickers import DatetimeTicker, FixedTicker
from bokeh.palettes import brewer, mpl, d3, Colorblind
from bokeh.plotting import figure
from bokeh.transform import jitter
from itertools import zip_longest
from logging import getLogger, NullHandler
from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array
from pandas import notnull, DataFrame, Series
from pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype
from bokeh_palettes import rainbow
from typing import Dict, Iterable, Tuple, Union, Sequence, List
# Add do-nothing handler to the module logger.
# This will prevent logged events being output in
# the absence of logging configuration by the user of the library.
getLogger(__name__).addHandler(NullHandler())
# color palettes
discrete_brewer = (brewer['Set2'][8] +
brewer['Set1'][9] +
brewer['Set3'][12] +
brewer['Dark2'][8])
discrete_d3 = (d3['Category20'][20][::2] +
d3['Category20'][20][1::2])
discrete_dark = (d3['Category10'][10] +
brewer['Dark2'][8] +
Colorblind[8])
rainbow = [
'#0034f8',
'#0037f6',
'#003af3',
'#003df0',
'#003fed',
'#0041ea',
'#0044e7',
'#0046e4',
'#0048e1',
'#004ade',
'#004cdb',
'#004fd8',
'#0051d5',
'#0053d2',
'#0054d0',
'#0056cd',
'#0058ca',
'#005ac7',
'#005cc4',
'#005ec1',
'#0060be',
'#0061bb',
'#0063b8',
'#0065b6',
'#0066b3',
'#0068b0',
'#006aad',
'#006baa',
'#006da7',
'#006ea5',
'#006fa2',
'#00719f',
'#00729d',
'#00739a',
'#007598',
'#007695',
'#077793',
'#0d7890',
'#13798e',
'#187a8b',
'#1c7b89',
'#1f7c87',
'#237d84',
'#267e82',
'#287f7f',
'#2b807d',
'#2d817b',
'#2f8278',
'#318376',
'#328473',
'#348571',
'#35866f',
'#36876c',
'#37886a',
'#388967',
'#398a65',
'#3a8b62',
'#3b8c60',
'#3c8e5d',
'#3c8f5b',
'#3d9058',
'#3d9155',
'#3e9253',
'#3e9350',
'#3e944d',
'#3e954a',
'#3e9647',
'#3f9745',
'#3f9842',
'#3e993e',
'#3e9a3b',
'#3e9b38',
'#3e9c35',
'#3e9d32',
'#3e9e2e',
'#3e9f2b',
'#3fa027',
'#3fa124',
'#40a221',
'#41a31d',
'#42a41a',
'#44a517',
'#45a615',
'#47a713',
'#4aa711',
'#4ca80f',
'#4fa90e',
'#51a90d',
'#54aa0d',
'#57ab0d',
'#5aab0d',
'#5dac0d',
'#5fad0d',
'#62ad0e',
'#65ae0e',
'#67ae0e',
'#6aaf0f',
'#6db00f',
'#6fb00f',
'#72b110',
'#74b110',
'#77b211',
'#79b211',
'#7cb311',
'#7eb412',
'#80b412',
'#83b512',
'#85b513',
'#88b613',
'#8ab613',
'#8cb714',
'#8fb814',
'#91b815',
'#93b915',
'#95b915',
'#98ba16',
'#9aba16',
'#9cbb16',
'#9fbb17',
'#a1bc17',
'#a3bc18',
'#a5bd18',
'#a7be18',
'#aabe19',
'#acbf19',
'#aebf19',
'#b0c01a',
'#b2c01a',
'#b5c11b',
'#b7c11b',
'#b9c21b',
'#bbc21c',
'#bdc31c',
'#c0c31c',
'#c2c41d',
'#c4c41d',
'#c6c51d',
'#c8c51e',
'#cac61e',
'#cdc61f',
'#cfc71f',
'#d1c71f',
'#d3c820',
'#d5c820',
'#d7c920',
'#d9c921',
'#dcca21',
'#deca22',
'#e0ca22',
'#e2cb22',
'#e4cb23',
'#e6cc23',
'#e8cc23',
'#eacc24',
'#eccd24',
'#eecd24',
'#f0cd24',
'#f2cd24',
'#f3cd24',
'#f5cc24',
'#f6cc24',
'#f8cb24',
'#f9ca24',
'#f9c923',
'#fac823',
'#fbc722',
'#fbc622',
'#fcc521',
'#fcc421',
'#fcc220',
'#fdc120',
'#fdc01f',
'#fdbe1f',
'#fdbd1e',
'#febb1d',
'#feba1d',
'#feb91c',
'#feb71b',
'#feb61b',
'#feb51a',
'#ffb31a',
'#ffb219',
'#ffb018',
'#ffaf18',
'#ffae17',
'#ffac16',
'#ffab16',
'#ffa915',
'#ffa815',
'#ffa714',
'#ffa513',
'#ffa413',
'#ffa212',
'#ffa111',
'#ff9f10',
'#ff9e10',
'#ff9c0f',
'#ff9b0e',
'#ff9a0e',
'#ff980d',
'#ff970c',
'#ff950b',
'#ff940b',
'#ff920a',
'#ff9109',
'#ff8f08',
'#ff8e08',
'#ff8c07',
'#ff8b06',
'#ff8905',
'#ff8805',
'#ff8604',
'#ff8404',
'#ff8303',
'#ff8102',
'#ff8002',
'#ff7e01',
'#ff7c01',
'#ff7b00',
'#ff7900',
'#ff7800',
'#ff7600',
'#ff7400',
'#ff7200',
'#ff7100',
'#ff6f00',
'#ff6d00',
'#ff6c00',
'#ff6a00',
'#ff6800',
'#ff6600',
'#ff6400',
'#ff6200',
'#ff6100',
'#ff5f00',
'#ff5d00',
'#ff5b00',
'#ff5900',
'#ff5700',
'#ff5500',
'#ff5300',
'#ff5000',
'#ff4e00',
'#ff4c00',
'#ff4a00',
'#ff4700',
'#ff4500',
'#ff4200',
'#ff4000',
'#ff3d00',
'#ff3a00',
'#ff3700',
'#ff3400',
'#ff3100',
'#ff2d00',
'#ff2a00']
brewer_sets_123 = (brewer['Set1'][9] + brewer['Set2'][8] + brewer['Set3'][10][
2:][::-1]) * 10
brewer_sets_12 = (brewer['Set1'][9] + brewer['Set2'][8]) * 10
brewer_sets_23 = (brewer['Set2'][8] + brewer['Set3'][12]) * 10
palette_dark = (brewer['Dark2'][8] + brewer['Set2'][8] + Colorblind[8] +
d3['Category10'][10])
red_to_green = (brewer['YlOrBr'][9] +
brewer['YlGn'][9][::-1])
def plots_to_grid(plots: Iterable, n_columns: int = 4) -> figure:
"""Arrange plots in a grid.
:param plots: list of plots.
:param n_columns: number of columns in the grid. Default 4.
:author: <EMAIL>
"""
grid = [list(x)
for x
in zip_longest(*[iter(plots)] * n_columns, fillvalue=None)]
grid[-1] = [x for x in grid[-1] if x]
return layout(grid)
def heatmap(obj: DataFrame,
xvar: str,
yvar: str,
value: str,
width: int = None,
height: int = None,
hoover_format: Iterable[Tuple[str, str]] = None,
color_low: Union[int, float] = None,
color_high: Union[int, float] = None,
title: str = None,
colorbar: bool = True,
xrange: Sequence = None,
yrange: Sequence = None,
palette: Union[str, List] = None,
reverse: bool = False) -> figure:
"""Plot heatmap.
:param obj: input data.
:param xvar: column from input data to on x axis.
:param yvar: column from input data to plot on y axis.
:param value: column from input data (of format int or float) to format the colors.
:param width: width of plot.
:param height: height of plot.
:param hoover_format: variables to include in the hoover tool. Default None.
v.g. [('label1', '@column_name1'), ('label2', '@column_name2')]
:param color_low: value in data to match with the end of the color map.
:param color_high: value in data to match with the start of the color map.
:param title: title of plot. Default None.
:param colorbar: plot color bar to the right of the plot.
:param xrange: list of x axis tickers.
:param yrange: list of y axis tickers.
:param palette: name of color palette or list of HEX color codes.
Default None. If None the palette brewer 'orange-yellow-green' is used.
Accepts any brewer palette name that contains at least a 9 colours.
:param reverse: reverse color mapping.
:return: bokeh figure.
:author: <EMAIL>
"""
obj.fillna(0, inplace=True)
# set low and high threshold for color map
color_low = color_low or obj[value].min()
color_high = color_high or obj[value].max()
# continuous colormap from list of colours
c = (brewer[palette][9]
if isinstance(palette, str)
else (palette or mpl['Viridis'][256]))
c = c[::-1] if reverse else c
mapper = LinearColorMapper(palette=c, low=color_low, high=color_high)
# define axis range
obj[xvar] = obj[xvar].astype(str)
obj[yvar] = obj[yvar].astype(str)
xrange = xrange or obj[xvar].unique()
yrange = yrange or obj[yvar].unique()
width = width or int(20 * len(xrange)) + 400
height = height or int(20 * len(yrange)) + 400
title = title or f'Heatmap | {xvar} - {yvar}'
p = figure(title=title, x_range=xrange, y_range=yrange,
plot_width=width, plot_height=height, tools='box_zoom,reset',
toolbar_location=None, tooltips=hoover_format)
# figure theme
p.title.text_font_size = "12pt"
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "10pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = 1
p.xaxis.axis_label = xvar
p.yaxis.axis_label = yvar
p.min_border_left = 50
p.min_border_right = 50
p.min_border_top = 50
p.min_border_bottom = 50
# plot data
p.rect(x=xvar, y=yvar, width=1, height=1, source=obj,
fill_color={'field': value, 'transform': mapper}, line_color=None)
# plot color bar
if colorbar:
color_bar = ColorBar(color_mapper=mapper,
major_label_text_font_size="10pt",
ticker=BasicTicker(desired_num_ticks=10),
title=f'{value}',
label_standoff=10, border_line_color=None,
location=(0, 0))
p.add_layout(color_bar, 'right')
return p
def histograms(obj: Union[DataFrame, Series],
bins: int = 10,
width: int = None,
height: int = None,
groupby: Union[str, Series] = None,
title: str = None,
color: Union[str, Iterable[str]] = None,
hoover: bool = True,
**kwargs) -> figure:
"""Create a histogram figure for each column in obj that are of type bool, int or float.
:param obj: input data.
:param groupby: name of column to group columns in independent correlation matrices.
:param width: width of plot in pixels.
:param height: height of plot in pixels.
:param title: title of plot.
:param color: color or palette of colors to use in the plot.
:param bins: number of bins in histograms.
:param hoover: include a hoover tool. Default True.
:return: bokeh figure
:author: <EMAIL>
"""
logger = getLogger(__name__)
#obj[obj.select_dtypes(np.float64).columns] = obj.select_dtypes(np.float64).astype(np.float32)
groups = obj[groupby] if groupby else None
source = obj.drop(groupby, axis=1) if groupby else obj.copy()
# If typ= frame or Series
if isinstance(source, DataFrame):
source = source.select_dtypes(include='number')
if source.shape[1] > 1:
list_of_plots = [histograms(source[col], title=col,
width=width or 300,
height=height or 200,
hoover=hoover,
groupby=groups,
color=color, bins=bins)
for col in source]
n = kwargs.pop('n_columns', None) or min([3, source.shape[1]])
return plots_to_grid(plots=list_of_plots, n_columns=n)
title = title or obj.name or ''
# x range
xmin, xmax = float(min(source)), float(source.max())
xpad = abs(abs(xmin) - abs(xmax)) * 0.05
xrange = (xmin - xpad, xmax + (xpad * 2))
p = figure(title=title, tools='box_zoom,reset',
background_fill_color="white",
plot_height=height or 600, plot_width=width or 800,
toolbar_location='above', x_range=xrange)
data = [(title, obj)] if groupby is None else [(str(key), df) for key, df
in source.groupby(groups)]
color = color or brewer_sets_123
for c, (name, sample) in zip(color, data):
hist, edges = histogram(sample.dropna().values, bins=bins)
source = ColumnDataSource(
data=dict(values=hist, left=edges[:-1], right=edges[1:]))
gly = p.quad(top='values', bottom=0, left='left', right='right',
fill_color=c, line_color="white", source=source,
legend_label=str(name))
if hoover:
p.add_tools(HoverTool(renderers=[gly],
tooltips=[('Freq(x)', '@values')],
toggleable=False))
p.y_range.start = 0
p.xaxis.axis_label = 'x'
p.yaxis.axis_label = 'Freq(x)'
p.grid.grid_line_color = '#eeeeee'
# borders
p.min_border_left = 40
p.min_border_right = 40
p.min_border_top = 40
p.min_border_bottom = 40
# legend
p.legend.click_policy = 'hide'
p.legend.location = 'top_right'
p.legend.label_text_font_size = '6pt'
p.legend.background_fill_color = "#ffffff"
p.legend.background_fill_alpha = 1
p.legend.label_text_line_height = 1
p.legend.spacing = 0
return p
def correlation(obj: DataFrame, method: str = 'spearman',
plot_unique: bool = True,
width: int = None, height: int = None, title: str = None,
groupby: str = None,
color_palette: Iterable[str] = None, add_text: bool = True,
hoover: bool = True, **kwargs) -> figure:
"""Plot correlation matrix between columns in pandas data frame.
:param obj: input data
:param method: correlation method. Default 'spearman'. Accepted values are 'spearman',
‘pearson’, ‘kendall’.
:param groupby: name of column to group columns in independent correlation matrices.
:param plot_unique: plot unique correlation pairs only (triangular shape).
:param width: width of plot in pixels.
:param height: height of plot in pixels.
:param title: title of plot.
:param color_palette: color palette for plot.
:param add_text: print the correlation coefficient inside each square.
:param hoover: add hoover tool to plot.
:return: bokeh figure
:author: <EMAIL>
"""
logger = getLogger(__name__)
if groupby:
list_of_plots = [
correlation(df, method=method, plot_unique=plot_unique,
title=str(key),
width=width or 200, height=height or 200,
hoover=hoover,
add_text=False, color_palette=color_palette)
for key, df
in obj.groupby(groupby)]
n_columns = kwargs.pop('n_columns', None) or 3
return plots_to_grid(list_of_plots, n_columns)
corr = (obj
.reindex(sorted(obj.columns), axis=1)
.select_dtypes(include=['int', 'float'])
.corr(method=method))
n = corr.shape[1]
source = (corr
.stack()
.reset_index()
.rename({'level_0': 'x', 'level_1': 'y', 0: 'stat'}, axis=1))
if plot_unique:
# remove duplicate pairs of features
source = source.groupby(
source['x'].apply(hash) + source['y'].apply(hash)).first()
# format rho value to string
source.insert(0, 'stat_str', source.stat.map('{:+.2f}'.format))
color_palette = color_palette or brewer['RdYlBu'][11]
mapper = LinearColorMapper(palette=color_palette,
low=source['stat'].min(),
high=source['stat'].max())
# create figure and hoover tool
xrange = sorted(set(source['x'].values))
yrange = xrange[::-1]
p = figure(title=title, plot_width=width or n * 40,
plot_height=height or n * 40,
x_range=xrange, y_range=yrange, toolbar_location=None)
# plot data
gly = p.rect('x', 'y', .9, .9, source=source, fill_alpha=0.5,
fill_color={'field': 'stat', 'transform': mapper})
if add_text:
p.text('x', 'y', text='stat_str', source=source, text_align="center",
text_baseline="middle", text_font_size='8pt')
if hoover:
p.add_tools(HoverTool(renderers=[gly], toggleable=False,
tooltips=[("x", "@x"), ("y", "@y"),
("stat", "@stat_str")]))
# format plot
p.outline_line_color = None
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = 0.8
p.yaxis.major_label_orientation = 0.8
return p
def time_lines(obj: Union[DataFrame, Series],
yvar: Union[str, Iterable[str]] = None,
xvar: str = None,
xrange: Iterable = None,
yrange: Iterable = None,
groupby: str = None,
highlight: Union[str, Iterable[str]] = None,
line_width: float = 1,
color: str = None,
color_palette: List[str] = None,
height: int = 400, width: int = 1600,
legend_location: str = 'top_right',
title: str = None, toolbar: str = 'xpan,box_zoom,reset',
hoover: bool = True,
hoover_tips: Iterable[Tuple[str, str]] = None) -> figure:
"""Creates a time series scatter plot.
:param obj: input data.
:param yvar: columns names in data to plot in the y axis.
:param xvar: column or index name to plot in the x axis. Default None. If None the index
will plot on the x axis.
:param xrange: x range in the format (min, max) or bokeh.figure.x_range. Default None.
If None the x axis shows the first 1/10 of the x variable data.
:param yrange: y range in the format (min, max). If None the default y axis range is shown.
:param groupby: name of column to used to group the data. If None data is not grouped.
:param highlight: columns names of variables in the y axis to be ploted with a thicker line.
:param line_width: width of lines.
:param color: color of line.
:param color_palette: palette of color as list of HEX codes to use in case of more than one
group of data or variable per plot. Default None. In None, a comination of brewer
palettes is used.
:param height: height of plot in pixels. Default 400.
:param width: width of plot in pixels. Default 1600.
:param legend_location: legend location.
:param title: title of plot. Default None. If None the title is a list of the names of the
variables plotted in the y axis.
:param toolbar: tools to include in the tool bar.
:param hoover: include a hoover tool. Default True.
:param hoover_tips: variables to include in the hoover tool. Default None. If None the
hoover tool shows the y axis value. v.g. [('label', '@column_name')]
:return: bokeh figure.
:author: <EMAIL>
"""
logger = getLogger(__name__)
# transform strings to list of strings
yvar = (([yvar] if isinstance(yvar, str) else yvar) if yvar
else (list(obj.columns) if isinstance(obj, DataFrame)
else ([obj.name or 0])))
highlight = list(highlight) if highlight else []
# define x variable
obj.index.name = obj.index.name or 'index'
xvar = xvar or obj.index.name
df = obj.reset_index()
# define x range
if xrange is None:
xr = sorted(set(df[xvar]))
xrange = (xr[0], xr[int(len(xr) / 5)])
# plot only the time component of datetime variables in the y axis
ydtype = 'auto'
for dtvar in [y for y in yvar if is_datetime64_any_dtype(df[y])]:
df[dtvar] = df[dtvar].dt.time
ydtype = 'datetime'
# define title
t = title or "; ".join([str(x) for x in yvar])
# create figure
p = figure(plot_height=height, plot_width=width, tools=toolbar,
toolbar_location='above', y_axis_type=ydtype,
x_axis_type="datetime", background_fill_color="#f8f9f9",
x_range=xrange, title=t)
# plot data
color_palette = color_palette or palette_dark
for col, c in zip(yvar, color_palette):
if groupby:
groups = df[groupby].unique()
for g, cc in zip(groups, color_palette):
source = df[df[groupby] == g].rename(columns={col: 'y'})
if source['y'].empty or source['y'].isnull().all():
continue
w = 4 if g in highlight else line_width
gly = p.line(x=xvar, y='y', line_color=color or cc,
line_width=w, source=source, legend_label=str(g))
if hoover:
tips = hoover_tips or [('value', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
else:
source = df.rename(columns={col: 'y'})
if source['y'].empty or source['y'].isnull().all():
continue
w = 4 if col in highlight else line_width
gly = p.line(x=xvar, y='y', line_color=color or c,
line_width=w, source=source, legend_label=str(col))
if hoover:
tips = hoover_tips or [('value', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
# title format
p.title.text_font_size = '10pt' if title else '0pt'
# x axis format
p.xaxis.ticker = DatetimeTicker(desired_num_ticks=int(width / 100))
p.xaxis.formatter = DatetimeTickFormatter(days=["%d-%b-%y"])
# y axis format
if yrange:
p.y_range = yrange if isinstance(yrange, DataRange1d) else Range1d(
*yrange)
# legend format
p.legend.click_policy = 'hide'
p.legend.location = legend_location
p.legend.label_text_font_size = '8pt'
p.legend.background_fill_color = "#ffffff"
p.legend.background_fill_alpha = 1
p.legend.label_text_line_height = 1
p.legend.spacing = 1
# borders
p.min_border_bottom = 50
p.min_border_left = 50
return p
def time_lines_and_dots(obj: Union[Series, DataFrame],
yvar: Union[List[str], str] = None,
xvar: str = None,
xrange: Tuple[float, float] = None,
yrange: Tuple[float, float] = None,
groupby: str = None,
line_color: str = None,
line_color_palette: List[str] = None,
line_width: float = 1,
dots_color: str = None,
dots_color_palette: List[str] = None,
dots: Union[Series, DataFrame] = None,
dots_groupby: str = None,
dots_xvar: Union[List[str], str] = None,
dots_yvar: Union[List[str], str] = None,
dots_size: float = 10,
height: float = 400, width: float = 1600,
title: str = None,
hoover: bool = True, hoover_tips=None,
toolbar: str = 'xpan,box_zoom,reset',
legend_location: str = 'top_right') -> figure:
"""Plot a combination of time series line and scatter graph.
:param obj: input table for line plot.
:param yvar: column name or list of column names from the input table to plot on the y axis.
:param xvar: column name from table to plot on the x axis. Default None. If None, the index
of the input table will be plotted on the x axis.
:param xrange: range of x axis in format (min, max).
:param yrange: range of y axis in format (min, max).
:param groupby: name of column in input table to use to group samples in plot.
:param line_color_palette: palette of color as list of HEX codes to use in case of more than
one group of data or variables per plot. Default None. In None, a combination of brewer
palettes is used.
:param line_width: width of lines.
:param dots_color_palette: palette of color as list of HEX codes to use in case of more than
one group of data or variables per plot. Default None. In None, a combination of brewer
palettes is used.
:param dots: input table for scatter plot.
:param dots_yvar: column name or list of column names from input scatter table to plot on the y axis.
:param dots_size: size of dots.
:param dots_groupby: name of column in input scatter table to use to group samples in plot.
:param dots_xvar: column name from dots table to plot on the x axis. Default None. If None,
the index of the input dots table will be plotted on the x axis.
:param height: height of figure in pixels.
:param width: width of figure in pixels.
:param title: title of figure.
:param hoover: include a hoover tool. Default True.
:param hoover_tips: variables to include in the hoover tool. Default None. If None the
hoover tool shows the y axis value. v.g. [('label', '@column_name')]
:param toolbar: tools to include in the tool bar.
:param legend_location: legend location in figure. Default 'center_right'.
:return: bokeh figure.
:author: <EMAIL>
"""
logger = getLogger(__name__)
# transform strings to list of strings
yvar = (([yvar] if isinstance(yvar, str) else yvar) if yvar
else (list(obj.columns) if isinstance(obj, DataFrame)
else ([obj.name or 0])))
# define datetime x variable
obj.index.name = obj.index.name or 'index'
xvar = xvar or obj.index.name
df = obj.reset_index()
if xrange is None:
xr = sorted(set(df[xvar]))
xrange = (xr[0], xr[int(len(xr) / 10)])
# plot only the time component of datetime variables in the y axis
ydtype = 'auto'
for dtvar in [y for y in yvar if is_datetime64_any_dtype(df[y])]:
df[dtvar] = df[dtvar].dt.time
ydtype = 'datetime'
t = title or "; ".join([str(x) for x in yvar])
p = figure(plot_height=height, plot_width=width, tools=toolbar,
toolbar_location='above', x_axis_type="datetime",
y_axis_type=ydtype,
background_fill_color="#f8f9f9", x_range=xrange, title=t)
# plot data
line_color_palette = line_color_palette or palette_dark
for col, c in zip(yvar, line_color_palette):
if groupby:
groups = df[groupby].unique()
for g, cc in zip(groups, line_color_palette):
source = df[df[groupby] == g].rename(columns={col: 'y'})
if source['y'].empty or source['y'].isnull().all():
continue
else:
gly = p.line(x=xvar, y='y', line_color=line_color or cc,
line_width=line_width,
source=source, legend_label=str(g))
if hoover:
tips = hoover_tips or [('value', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
else:
source = df.rename(columns={col: 'y'})
if source['y'].empty or source['y'].isnull().all():
continue
else:
gly = p.line(x=xvar, y='y', line_color=line_color or c,
line_width=line_width,
source=source, legend_label=str(col))
if hoover:
tips = hoover_tips or [('value', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
if dots is not None:
dots.index.name = dots.index.name or 'UnnamedIndex'
dots_xvar = dots_xvar or dots.index.name
dots_yvar = [dots_yvar] if isinstance(dots_yvar, str) else (
dots_yvar if dots_yvar is not None else [dots.name])
df = dots.reset_index()
dots_color_palette = dots_color_palette or palette_dark
for col, c in zip(dots_yvar, dots_color_palette):
df.rename(columns={col: 'y'}, inplace=True)
if dots_groupby:
for g, cc in zip(df[dots_groupby].unique(),
dots_color_palette):
source = df[df[dots_groupby] == g]
if source['y'].empty or source['y'].isnull().all():
continue
else:
gly = p.circle(dots_xvar, 'y', size=dots_size,
fill_color=dots_color or cc,
fill_alpha=0.8, line_color='white',
source=source,
legend_label=str(g))
if hoover:
tips = hoover_tips or [('value', '@y')]
p.add_tools(
HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
else:
if df['y'].empty or df['y'].isnull().all():
continue
gly = p.circle(dots_xvar, 'y', size=dots_size, fill_alpha=0.8,
line_color='white',
fill_color=dots_color or c, source=df,
legend_label=str(col))
if hoover:
tips = hoover_tips or [('value', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
else:
logger.warning('Dots data is missing.')
# title format
p.title.text_font_size = '10pt' if title else '0pt'
# x axis format
p.xaxis.ticker = DatetimeTicker(desired_num_ticks=int(width / 100))
p.xaxis.formatter = DatetimeTickFormatter(days=["%d-%b-%y"])
# y axis format
if yrange:
p.y_range = yrange if isinstance(yrange, DataRange1d) else Range1d(
*yrange)
# legend format
p.legend.click_policy = 'hide'
p.legend.location = legend_location
p.legend.label_text_font_size = '8pt'
p.legend.background_fill_color = "#ffffff"
p.legend.background_fill_alpha = 1
p.legend.label_text_line_height = 1
p.legend.spacing = 1
# borders
p.min_border_bottom = 50
p.min_border_left = 50
return p
def time_bars(obj: Union[DataFrame, Series], yvar: Union[str, List[str]],
xvar: str = None,
xrange: Tuple = None,
yrange: Tuple = None, groupby: str = None, color: str = None,
color_palette: List[str] = None,
height: int = 400, width: int = 1600, bar_width=1,
title: str = None,
toolbar: str = 'xpan,box_zoom,reset',
legend_location: str = 'top_right',
hoover: bool = True,
hoover_tips: Iterable[Tuple[str, str]] = None) -> figure:
"""Plot a time series bar plot.
:param obj: input data.
:param yvar: columns names in data to plot in the y axis.
:param xvar: column or index name to plot in the x axis. Default None. If None the index
will plot on the x axis.
:param xrange: x range in the format (min, max) or bokeh.figure.x_range. Default None.
If None the x axis shows the first 1/10 of the x variable data.
:param yrange: y range in the format (min, max). If None the default y axis range is shown.
:param groupby: name of column to used to group the data. If None data is not grouped.
:param color: color of bars, only effective when groupby is None. Default None. If None,
the brewer['Set3'] palette is used.
:param color_palette: palette of color as list of HEX codes to use in case of more than
one group of data or variables per plot. Default None. In None, a combination of brewer
palettes is used.
:param bar_width: width of bars. Default 1.
:param height: height of plot in pixels. Default 400.
:param width: width of plot in pixels. Default 1600.
:param title: title of plot. Default None. If None the title is a list of the names of the
variables plotted in the y axis.
:param toolbar: tools to include in the tool bar.
:param legend_location: legend location.
:param hoover: include a hoover tool. Default True.
:param hoover_tips: variables to include in the hoover tool. Default None. If None the
hoover tool shows the y axis value. v.g. [('label', '@column_name')]
:return: bokeh figure.
:author: <EMAIL>
"""
logger = getLogger(__name__)
# transform strings to list of strings
yvar = (([yvar] if isinstance(yvar, str) else yvar) if yvar
else (list(obj.columns) if isinstance(obj, DataFrame)
else ([obj.name or 0])))
# define datetime x variable
obj.index.name = obj.index.name or 'index'
xvar = xvar or obj.index.name
df = obj.reset_index()
if xrange is None:
xr = sorted(set(df[xvar]))
xrange = (xr[0], xr[int(len(xr) / 10)])
# plot only the time component of datetime variables in the y axis
ydtype = 'auto'
for dtvar in [y for y in yvar if is_datetime64_any_dtype(df[y])]:
df[dtvar] = df[dtvar].dt.time
ydtype = 'datetime'
t = title or "; ".join([str(x) for x in yvar])
p = figure(plot_height=height, plot_width=width, tools=toolbar,
toolbar_location='above',
x_axis_type="datetime", y_axis_type=ydtype,
background_fill_color="#f8f9f9",
x_range=xrange, title=t)
# plot data
color_palette = color_palette or brewer_sets_23
for col, c in zip(yvar, color_palette):
if groupby:
groups = df[groupby].unique()
for g, cc in zip(groups, brewer['Set3'][12] * 10):
source = df[df[groupby] == g].rename(columns={col: 'y'})
if source['y'].empty or source['y'].isnull().all():
continue
gly = p.vbar(x=xvar, top='y', width=bar_width, source=source,
legend_label=str(g),
fill_color=color or cc, line_color=color or cc)
if hoover:
tips = hoover_tips or [('value', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
else:
source = df.rename(columns={col: 'y'})
if source['y'].empty or source['y'].isnull().all():
continue
gly = p.vbar(x=xvar, top='y', width=bar_width, source=source,
legend_label=str(col),
fill_color=color or c, line_color=color or c)
if hoover:
tips = hoover_tips or [('value', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
# title format
p.title.text_font_size = '10pt'
# x axis format
p.xaxis.ticker = DatetimeTicker(desired_num_ticks=int(width / 100))
p.xaxis.formatter = DatetimeTickFormatter(days=["%d-%b-%y"])
# y axis format
if yrange:
p.y_range = yrange if isinstance(yrange, DataRange1d) else Range1d(
*yrange)
# legend format
p.legend.click_policy = 'hide'
p.legend.location = legend_location
p.legend.label_text_font_size = '8pt'
p.legend.background_fill_color = "#ffffff"
p.legend.background_fill_alpha = 1
p.legend.label_text_line_height = 1
p.legend.spacing = 1
# borders
p.min_border_bottom = 50
p.min_border_left = 50
return p
def time_scatter(obj: Union[DataFrame, Series],
yvar: Union[str, Iterable[str]] = None,
xvar: str = None, xrange: Iterable = None,
yrange: Iterable = None,
groupby: str = None, color: str = None,
color_palette: List[str] = None,
height: int = 400, width: int = 1600,
legend_location: str = 'top_right',
size: Union[int, float] = 12, alpha: float = .8,
title: str = None, toolbar: str = 'xpan,box_zoom,reset',
hoover: bool = True,
hoover_tips: Iterable[Tuple[str, str]] = None) -> figure:
"""Creates a time series scatter plot.
:param obj: input data.
:param yvar: columns names in data to plot in the y axis.
:param xvar: column or index name to plot in the x axis. Default None. If None the index
will plot on the x axis.
:param xrange: x range in the format (min, max) or bokeh.figure.x_range. Default None.
If None the x axis shows the first 1/10 of the x variable data.
:param yrange: y range in the format (min, max). If None the default y axis range is shown.
:param groupby: name of column to used to group the data. If None data is not grouped.
:param color: color of dots, only effective when groupby is None. Default None. If None,
the brewer['Set3'] palette is used.
:param color_palette: palette of color as list of HEX codes to use in case of more than one
group of data or variable per plot. Default None. In None, a combination of
brewer 'Set' palettes is used.
:param height: height of plot in pixels. Default 400.
:param width: width of plot in pixels. Default 1600.
:param legend_location: legend location.
:param size: size of dots. Default 12.
:param alpha: transparency factor of dots (0->transparent, 1->non-transparent). Default 0.8
:param title: title of plot. Default None. If None the title is a list of the names of the
variables plotted in the y axis.
:param toolbar: tools to include in the tool bar.
:param hoover: include a hoover tool. Default True.
:param hoover_tips: variables to include in the hoover tool. Default None. If None the
hoover tool shows the y axis value. v.g. [('label', '@column_name')]
:return: bokeh figure.
:author: <EMAIL>
"""
logger = getLogger(__name__)
# transform strings to list of strings
yvar = (([yvar] if isinstance(yvar, str) else yvar) if yvar
else (list(obj.columns) if isinstance(obj, DataFrame)
else ([obj.name or 0])))
# define datetime x variable
obj.index.name = obj.index.name or 'index'
xvar = xvar or obj.index.name
df = obj.reset_index()
# define x range
if xrange is None:
xr = sorted(set(df[xvar]))
xrange = (xr[0], xr[int(len(xr) / 10)])
# plot only the time component of datetime variables in the y axis
ydtype = 'auto'
for dtvar in [y for y in yvar if is_datetime64_any_dtype(df[y])]:
df[dtvar] = df[dtvar].dt.time
ydtype = 'datetime'
t = title or "; ".join([str(x) for x in yvar])
p = figure(plot_height=height, plot_width=width, tools=toolbar,
toolbar_location='above', y_axis_type=ydtype,
x_axis_type="datetime",
background_fill_color="#f8f9f9", x_range=xrange, title=t)
# plot data
color_palette = color_palette or brewer_sets_23
for col, c in zip(yvar, color_palette):
if groupby:
groups = df[groupby].unique()
for g, cc in zip(groups, color_palette):
source = df[df[groupby] == g].rename(columns={col: 'y'})
if source['y'].empty or source['y'].isnull().all():
continue
gly = p.circle(x=xvar, y='y', source=source,
size=size, fill_color=color or cc,
fill_alpha=alpha, line_alpha=0,
legend_label=str(g))
if hoover:
tips = hoover_tips or [('value', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
else:
source = df.rename(columns={col: 'y'})
if source['y'].empty or source['y'].isnull().all():
continue
gly = p.circle(x=xvar, y='y', source=source,
size=size, fill_color=color or c, fill_alpha=alpha,
line_alpha=0,
legend_label=str(col))
if hoover:
tips = hoover_tips or [('value', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
# title format
p.title.text_font_size = '10pt'
# x axis format
p.xaxis.ticker = DatetimeTicker(desired_num_ticks=int(width / 100))
p.xaxis.formatter = DatetimeTickFormatter(days=["%d-%b-%y"])
# y axis format
if yrange:
p.y_range = yrange if isinstance(yrange, DataRange1d) else Range1d(
*yrange)
# legend format
p.legend.click_policy = 'hide'
p.legend.location = legend_location
p.legend.label_text_font_size = '8pt'
p.legend.background_fill_color = "#ffffff"
p.legend.background_fill_alpha = 1
p.legend.label_text_line_height = 1
p.legend.spacing = 1
# borders
p.min_border_bottom = 50
p.min_border_left = 50
return p
def time_range_tool(obj: Union[DataFrame, Series], xrange: Iterable,
yvar: str = None, xvar: str = None,
yrange: Iterable = None,
height=120, width=1600) -> figure:
"""Plots a range selection tool for time series plots with xpan tools.
:param obj: input data.
:param yvar: columns name in data to plot in the y axis.
:param xrange: x range in the format (min, max) or a bokeh.figure.x_range.
:param xvar: column or index name to plot in the x axis. Default None. If None the index
will plot on the x axis.
:param yrange: y range in the format (min, max). If None the default y axis range is shown.
:param height: height of plot in pixels. Default 120.
:param width: width of plot in pixels. Default 1600.
:return: bokeh figure.
:raises TypeError: if yvar column is not data type numeric.
:author: <EMAIL>
"""
logger = getLogger(__name__)
# define datetime x variable
obj.index.name = obj.index.name or 'index'
xvar = xvar or obj.index.name
df = obj.sort_index().reset_index()
# set default value
yvar = yvar if yvar is not None else obj.name
# check y axis variable dtype
if not (is_numeric_dtype(df[yvar]) or is_datetime64_any_dtype(df[yvar])):
raise TypeError(f'Y axis variable column must be numeric or datetime.')
# plot only the time component of datetime variables in the y axis
ydtype = None
if is_datetime64_any_dtype(df[yvar]):
df[yvar] = df[yvar].dt.time
ydtype = 'datetime'
p = figure(
title=f"Drag the box to change the temporal range. Variable displayed: {yvar}",
plot_height=height, plot_width=width, x_axis_type="datetime",
y_axis_type=ydtype,
toolbar_location=None, background_fill_color="#f5f5f5")
# plot data
p.vbar(x=xvar, top=yvar, width=1, source=df, fill_color="#aaafb3",
line_color="#aaafb3")
# x axis format
p.xaxis.ticker = DatetimeTicker(desired_num_ticks=int(width / 100))
p.xaxis.formatter = DatetimeTickFormatter(days=["%d-%b-%y"])
# y axis format
if yrange:
p.y_range = yrange if isinstance(yrange, DataRange1d) else Range1d(
*yrange)
p.ygrid.grid_line_color = None
p.title.text_font_size = "10pt"
# add selection of range tool
tool = RangeTool(x_range=xrange)
tool.overlay.fill_color = "#708090"
tool.overlay.fill_alpha = 0.5
p.add_tools(tool)
p.toolbar.active_multi = tool
return p
def scatter(obj: DataFrame, xvar: str, yvar: str, xrange: Tuple = None,
yrange: Tuple = None,
size: Union[int, float, str] = 1, size_type: str = 'radius',
colorvar: str = None, color_asc: bool = False,
color_min: float = None, color_max: float = None,
marker_color: str = None, color_palette: List[str] = None,
color_alpha: float = 0.6, groupby: str = None,
legend_location: str = 'top_right',
height: int = 700, width: int = 700,
x_jitter: float = 0, y_jitter: float = 0,
hist_axes: bool = False, nbins: int = 10,
get_regression: bool = False,
deg: int = 1, add_regression: Dict = None, hoover: bool = True,
title: str = '',
hoover_tips: Iterable[Tuple] = None,
toolbar: str = "pan,box_zoom,reset",
xaxis_labels_map: Dict = None,
yaxis_labels_map: Dict = None) -> Union[figure, None]:
"""Scatter plot + Histograms on x and y axis + regression line/curve.
:param obj: input table.
:param xvar: name of column in table to plot on the x axis.
:param yvar: name of column in table to plot on the y axis.
:param size: size of radius of markers in units, or name of the column on table to use
as marker size. Default 1 unit.
:param size_type: measure size in 'radius' or 'size'.
:param colorvar: name of column in table to map the color of the markers to. Column must be
dtype numerical.
:param color_asc: indicate if maximum colors are red (True) or blue(False).
:param color_max: indicate maximum value to colour as red (if asc), instead of variable maximum
:param marker_color: color of marker.
:param color_palette: palette of color as list of HEX codes to use in case of more than one
group of data or variable per plot. Default None. In None, a combination of
brewer 'Set' palettes is used.
:param color_alpha: transparency of fill color of marker (0-transparent, 1-opaque). Default 0.6.
:param groupby: name of column in table to group data in plot. Groups are differentiated
by marker color. Default None.
:param legend_location: legend location in figure. Default 'center_right'.
:param xrange: x range in the format (min, max) or bokeh.figure.x_range. Default None.
:param yrange: y range in the format (min, max) or bokeh.figure.y_range. Default None.
:param height: height of plot in pixels. Default 700.
:param width: width of plot in pixels. Default 700.
:param x_jitter: add jitter to categorical groups in x axis. Default 0, or no jitter.
:param hist_axes: plot histograms at the x and y axes. Default False.
:param nbins: number of bins to divided the histograms into. Default 10.
:param get_regression: compute and plot a regression estimator for each group in the data.
Default, False.
:param deg: degrees of freedom of the regression estimator. Default 1.
:param add_regression: regression fit function of the type returned by the numpy.poly1d.
Default None.
:param hoover: display hoover tool tips. Default True.
:param hoover_tips: variables to include in the hoover tool v.g. [('label', '@column_name')].
Default None. If None the hoover tool shows the y axis value.
:param toolbar: bokeh figure tools to include. Default 'pan,box_zoom,reset'.
:param title: title of plot.
:param xaxis_labels_map: dict used to override major x axis labels. Default None.
:param yaxis_labels_map: dict used to override major y axis labels. Default None.
:return: bokeh figure.
:author: <EMAIL>
"""
logger = getLogger(__name__)
# define axis range
if not xrange:
if is_numeric_dtype(obj[xvar]):
xdiff = (1 + obj[xvar].max() - obj[xvar].min()) * 1.1
xrange = [obj[xvar].max() - xdiff, obj[xvar].min() + xdiff]
else:
xrange = obj[xvar].unique()
if not yrange:
if is_numeric_dtype(obj[yvar]):
ydiff = (1 + obj[yvar].max() - obj[yvar].min()) * 1.1
yrange = [obj[yvar].max() - ydiff, obj[yvar].min() + ydiff]
else:
yrange = obj[yvar].unique()
# create scatter plot
p = figure(plot_height=height, plot_width=width, min_border=50,
min_border_left=50,
tools=toolbar, toolbar_location="above", title=title,
x_range=xrange,
y_range=yrange, background_fill_color="#ffffff")
p.select(BoxSelectTool).select_every_mousemove = False
color_palette = color_palette or brewer_sets_123
scatter_params = {size_type: size}
if groupby:
for g, cc in zip(obj[groupby].unique(), color_palette):
source = obj[obj[groupby] == g]
source.insert(0, 'x', source[xvar])
source.insert(0, 'y', source[yvar])
if source.y.isnull().all() or source.x.isnull().all():
continue
gly = p.circle(x=jitter('x', x_jitter, range=p.x_range),
y=jitter('y', y_jitter, range=p.y_range),
color=marker_color or cc, size=size,
fill_alpha=color_alpha, line_alpha=color_alpha,
source=source,
legend_label=str(g))
if hoover:
tips = hoover_tips or [('x', '@x'), ('y', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
else:
source = obj.copy()
source.insert(0, 'x', source[xvar])
source.insert(0, 'y', source[yvar])
if source.y.isnull().all() or source.x.isnull().all():
logger.error('All NaN values in axis.')
return
if colorvar is None:
cc = color_palette[0] or brewer['Set2'][8][1]
else:
source.insert(0, 'c', source[colorvar])
clip_max = color_max or source['c'].quantile(.99)
clip_min = color_min or source['c'].quantile(.01)
source[colorvar] = source[colorvar].clip(clip_min, clip_max)
pal = rainbow if not color_asc else rainbow[::-1]
mapper = LinearColorMapper(palette=pal)
cc = {'field': colorvar, 'transform': mapper}
color_bar = ColorBar(color_mapper=mapper,
major_label_text_font_size="10pt",
ticker=BasicTicker(desired_num_ticks=10),
label_standoff=10, border_line_color=None,
location=(0, 0), width=10)
p.add_layout(color_bar, 'right')
p.title.text = p.title.text + f' | Color Bar: {colorvar}'
gly = p.circle(jitter('x', x_jitter), 'y', color=cc,
fill_alpha=color_alpha, line_alpha=color_alpha,
source=source, size=size)
if hoover:
tips = hoover_tips or [('x', '@x'), ('y', '@y'), ('c', '@c')]
p.add_tools(
HoverTool(renderers=[gly], tooltips=tips, toggleable=False))
# plot a regression function
if get_regression:
source = obj[(notnull(obj[xvar])) & (notnull(obj[yvar]))]
if groupby:
for g, cc in zip(source[groupby].unique(), color_palette):
x = source[source[groupby] == g][xvar].values
y = source[source[groupby] == g][yvar].values
try:
fit = polyfit(x, y, deg)
fit_fn = poly1d(fit)
# fit_fn is a function which takes in x
# and returns an estimate for y
xx = linspace(min(x), max(x), 1000)
p.line(x=xx, y=fit_fn(xx), line_color=cc, line_width=2,
legend_label=str(g))
except Exception as e:
logger.error(f'Regression failed.\n{e}')
else:
cc = 'black'
x = source[xvar].values
y = source[yvar].values
try:
fit = polyfit(x, y, deg)
fit_fn = poly1d(fit)
# fit_fn is a function which takes in x
# and returns an estimate for y
xx = linspace(min(x), max(x), 1000)
p.line(x=xx, y=fit_fn(xx), line_color=cc, line_width=2)
except Exception as e:
logger.error(f'Regression failed.\n{e}')
if add_regression:
for (key, model), c in zip(add_regression.items(), brewer_sets_12):
xx = linspace(p.x_range.start, p.x_range.end, 1000)
p.line(x=xx, y=model.predict(array([[a] for a in xx])),
line_color=c, line_width=1, legend_label=str(key))
# legend format
if groupby or add_regression:
p.legend.click_policy = 'hide'
p.legend.location = legend_location
p.legend.label_text_font_size = '8pt'
p.legend.background_fill_color = "#ffffff"
p.legend.background_fill_alpha = 1
p.legend.label_text_line_height = 1
p.legend.spacing = 1
p.min_border_left = 50
p.min_border_top = 50
if not hist_axes:
# axis format
p.xaxis.axis_label = xvar
p.yaxis.axis_label = yvar
p.xaxis.major_label_overrides = xaxis_labels_map or {}
p.yaxis.major_label_overrides = yaxis_labels_map or {}
p.xaxis.axis_label_standoff = 20
p.yaxis.axis_label_standoff = 20
return p
# add histograms to axis
p.axis.visible = False
# create the horizontal histogram
xhist, xedges = histogram(obj[xvar].values[~isnan(obj[xvar].values)],
bins=nbins)
xzeros = zeros(len(xedges) - 1)
xmax = max(xhist) * 1.1
xh = figure(toolbar_location=None, plot_width=p.plot_width,
plot_height=200,
x_range=p.x_range, y_range=(-xmax / 4, xmax), min_border=10,
min_border_left=50,
y_axis_location="right", background_fill_color="#fafafa")
xh.xgrid.grid_line_color = None
xh.yaxis.major_label_orientation = pi / 4
xh.xaxis.axis_label = xvar
xh.xaxis.major_label_overrides = xaxis_labels_map or {}
xh.xaxis.axis_label_standoff = 20
line_param = dict(color="#3A5785", line_color=None)
if groupby:
for g, cc in zip(obj[groupby].unique(), brewer_sets_123):
xx = obj[obj[groupby] == g][xvar].values
xxhist, xxedges = histogram(xx[~isnan(xx)], bins=nbins)
xh.quad(bottom=0, left=xxedges[:-1], right=xxedges[1:], top=xxhist,
color=cc,
fill_alpha=0.3)
else:
cc = brewer['Set2'][8][1]
xh.quad(bottom=0, left=xedges[:-1], right=xedges[1:], top=xhist,
color=cc,
line_color="white")
xh.quad(bottom=0, left=xedges[:-1], right=xedges[1:], top=xzeros,
alpha=0.5, **line_param)
xh.quad(bottom=0, left=xedges[:-1], right=xedges[1:], top=xzeros,
alpha=0.1, **line_param)
# create the vertical histogram
yhist, yedges = histogram(obj[yvar].values[~isnan(obj[yvar].values)],
bins=nbins)
yzeros = zeros(len(yedges) - 1)
ymax = max(yhist) * 1.1
yh = figure(toolbar_location=None, plot_width=200,
plot_height=p.plot_height,
x_range=(-ymax / 4, ymax), y_range=p.y_range, min_border=10,
y_axis_location="right", background_fill_color="#fafafa")
yh.ygrid.grid_line_color = None
yh.xaxis.major_label_orientation = pi / 4
yh.yaxis.major_label_overrides = yaxis_labels_map or {}
yh.yaxis.axis_label = yvar
yh.yaxis.axis_label_standoff = 20
if groupby:
for g, cc in zip(obj[groupby].unique(), brewer_sets_123):
yy = obj[obj[groupby] == g][yvar].values
yyhist, yyedges = histogram(yy[~isnan(yy)], bins=nbins)
yh.quad(left=0, bottom=yyedges[:-1], top=yyedges[1:], right=yyhist,
color=cc,
fill_alpha=0.3)
else:
cc = brewer['Set2'][8][1]
yh.quad(left=0, bottom=yedges[:-1], top=yedges[1:], right=yhist,
color=cc,
line_color="white")
yh.quad(left=0, bottom=yedges[:-1], top=yedges[1:], right=yzeros,
alpha=0.5, **line_param)
yh.quad(left=0, bottom=yedges[:-1], top=yedges[1:], right=yzeros,
alpha=0.1, **line_param)
return layout([[p, yh], [xh]])
def lines(obj: Union[DataFrame, Series],
yvar: Union[str, Iterable[str]] = None, xvar: str = None,
xrange: Tuple = None, yrange: Tuple = None, groupby: str = None,
highlight: Union[str, Iterable[str]] = None, line_width: float = 1,
hline: List[float] = None, vline: List[float] = None,
color: str = None,
color_palette: List[str] = None,
legend_location: str = 'top_right', height: int = 400,
width: int = 1600,
title: str = None, toolbar: str = "pan,box_zoom,reset",
background: str = None,
hoover: bool = True,
hoover_tips: Iterable[Tuple[str, str]] = None) -> figure:
"""Plot a line graph.
:param obj: input data.
:param yvar: columns names in data to plot in the y axis.
:param xvar: column or index name to plot in the x axis. Default None. If None the index
will plot on the x axis.
:param xrange: x range in the format (min, max) or bokeh.figure.x_range. Default None.
If None the x axis shows the first 1/10 of the x variable data.
:param yrange: y range in the format (min, max). If None the default y axis range is shown.
:param groupby: name of column to used to group the data. If None data is not grouped.
:param highlight: columns names of variables in the y axis to be plotted with a thicker line.
:param line_width: width of lines.
:param hline: positions in the y axis where to draw a span line.
:param vline: positions in the x axis where to draw a span line.
:param color: color of lines, only effective when groupby is None. Default None. If None,
the brewer['Set1'] and brewer['Set2'] palette is used.
:param color_palette: palette of color as list of HEX codes to use in case of more than
one group of data or variables per plot. Default None. In None, a combination of brewer
palettes is used.
:param legend_location: legend location in figure. Default 'center_right'.
:param height: height of plot in pixels. Default 400.
:param width: width of plot in pixels. Default 1600.
:param title: title of plot. Default None. If None the title is a list of the names of the
variables plotted in the y axis.
:param toolbar: tools to include in the tool bar.
:param background: color of background. Default None. If None the background color is gray.
:param hoover: include a hoover tool. Default True.
:param hoover_tips: variables to include in the hoover tool. Default None. If None the
hoover tool shows the y axis value. v.g. [('label', '@column_name')]
:return: bokeh figure.
:author: <EMAIL>
"""
logger = getLogger(__name__)
# transform strings to list of strings
yvar = (([yvar] if isinstance(yvar, str) else yvar) if yvar
else (list(obj.columns) if isinstance(obj, DataFrame)
else ([obj.name or 0])))
highlight = list(highlight) if highlight else []
# define datetime x variable
obj.index.name = obj.index.name or 'index'
xvar = xvar or obj.index.name
df = obj.reset_index()
# define axis range
xdiff = (obj[xvar].max() - obj[xvar].min()) * 1.1
xrange = xrange or [obj[xvar].max() - xdiff, obj[xvar].min() + xdiff]
# define title
t = title or "; ".join([str(x) for x in yvar])
# create figure
p = figure(plot_height=height, plot_width=width, tools=toolbar,
toolbar_location="above",
background_fill_color="#f8f9f9", x_range=xrange, title=t)
# plot data
color_palette = color_palette or brewer_sets_123
for col, c in zip(yvar, color_palette):
if groupby:
groups = df[groupby].unique()
for g, cc in zip(groups, color_palette):
source = df[df[groupby] == g].rename(columns={col: 'y'})
if source['y'].empty or source['y'].isnull().all():
continue
w = 4 if g in highlight else line_width
gly = p.line(x=xvar, y='y', line_color=color or cc,
line_width=w, source=source,
legend_label=str(g))
if hoover:
tips = hoover_tips or [('value', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
else:
source = df.rename(columns={col: 'y'})
if source['y'].empty or source['y'].isnull().all():
continue
w = 4 if col in highlight else line_width
gly = p.line(x=xvar, y='y', line_color=color or c, line_width=w,
source=source,
legend_label=str(col))
if hoover:
tips = hoover_tips or [('value', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
# reference lines
if hline:
for line in hline:
new_line = Span(location=line, dimension='width',
line_color='black', line_width=3,
line_dash='dashed')
p.renderers.extend([new_line])
if vline:
for line in vline:
new_line = Span(location=line, dimension='height',
line_color='gray', line_width=3,
line_dash='dashed')
p.renderers.extend([new_line])
# y axis format
if yrange:
p.y_range = yrange if isinstance(yrange, DataRange1d) else Range1d(
*yrange)
# title format
p.title.text_font_size = '10pt' if title else '0pt'
# axis format
p.xaxis.axis_label = xvar
# legend format
p.legend.click_policy = 'hide'
p.legend.location = legend_location
p.legend.label_text_font_size = '8pt'
p.legend.background_fill_color = background or "#ffffff"
p.legend.background_fill_alpha = 1
p.legend.label_text_line_height = 1
p.legend.spacing = 1
# borders
p.min_border_top = 100
p.min_border_left = 100
return p
def plot_blanks(df: DataFrame, title: str, width: int = 400, height: int = 800,
palette: str = "Blues8",
datetime_index: bool = True) -> figure:
"""Plot data frame values as pixel intensity, using index as y axis values.
:param df: input data.
:param title: title of plot.
:param width: plot width in pixels.
:param height: plot height in pixels.
:param palette: name of bokeh color palette.
:param datetime_index: index of input data is of type datetime.
:return: bokeh figure.
:author: <EMAIL>
"""
logger = getLogger(__name__)
p = figure(plot_width=width, plot_height=height, toolbar_location=None)
p.x_range.range_padding = 0
p.y_range.range_padding = 0
p.xaxis.major_label_text_font_size = "8pt"
p.yaxis.major_label_text_font_size = "8pt"
# title
p.title.text = title
p.title.align = "left"
p.title.text_color = "#374f80"
p.title.text_font_size = "18px"
# borders
p.min_border_left = 10
p.min_border_right = 20
p.min_border_top = 50
p.min_border_bottom = 150
# format x axis
p.xaxis.ticker = FixedTicker(ticks=[x + .5 for x in range(df.shape[1])])
p.xaxis.major_label_overrides = {x + .5: y for x, y
in zip(range(df.shape[1]),
df.columns.values)}
p.xaxis.major_label_orientation = 1.2
# format y axis
ylabels = sorted(set(df.index.value))
#ylabels = sorted(set(df.index))
if datetime_index:
ylabels = [x.strftime("%d-%B-%Y") for x in ylabels]
#import pandas as pd
#ylabels = [pd.to_datetime(str(x)).strftime("%d-%B-%Y") for x in ylabels]
p.yaxis.ticker = FixedTicker(
ticks=[int(y) for y in linspace(0, len(ylabels) - 1, 20)])
p.yaxis.major_label_overrides = {int(y): ylabels[::-1][int(y)] for y
in linspace(0, len(ylabels) - 1,
int(height / 50))}
# plot data
p.image(image=[df.values], x=0, y=0, dw=df.shape[1], dh=df.shape[0],
palette=palette)
return p
def line_plot_grid(
data: Dict[str, Tuple[Tuple[Iterable[str], Iterable[str]]]]) -> figure:
"""Plot a grid of line plots.
:param data: Each entry of the dictionary will be an independent plot in the grid. Each entry
contains name of plot (key), tuple of x,y pairs (value).
:return: bokeh figure
:author: <EMAIL>
"""
logger = getLogger(__name__)
args = [iter(data.items())] * 4
grid = []
for a in list(zip_longest(*args, fillvalue=None)):
r = []
for key, value in [x for x in a if x]:
p = figure(title=key, tools='', background_fill_color="white",
plot_height=300,
plot_width=400, toolbar_location=None)
# borders
p.min_border_left = 30
p.min_border_right = 40
p.min_border_top = 30
p.min_border_bottom = 40
for (x, y), color in zip(value, brewer['Set2'][8]):
p.line(x=x, y=y, line_color=color)
r.append(p)
grid.append(r)
return layout(grid)
def grid_of_plots(data: List[List[Union[DataFrame, Series]]], xvar: str = None,
yvar: str = None,
xrange: Union[Tuple, DataRange1d] = None,
yrange: Union[Tuple, DataRange1d] = None,
xlabels: List = None, ylabels: List = None,
graph: str = 'line',
marker_size: Union[int, float, str] = 1,
fill_alpha: Union[int, float] = 0.6,
background_color: str = None, color: str = None,
color_palette: List[str] = None,
line_width: float = 1, groupby: str = None,
add_legend: bool = False,
height: int = 200, width: int = 200, hoover: bool = False) -> \
Union[
figure, None]:
"""Create a figure formed by a grid of subplots, where n.rows = len(data) and the n.columns
might vary for each row.
:param data: input data of format [[[DataFrame], [DataFrame2]], [row2], ...].
Each DataFrame (or Series) is the source data for an independent plot in the grid.
If Dataframe, must define yvar to plot.
:param xvar: variable to plot in x axis. Default None. If None, the index will be used.
:param yvar: variable to plot in y axis. Default None. It must be defined if Dataframe.
If None and Series, plot series values.
:param xrange: x range in the format (min, max) or bokeh.figure.x_range. Default None.
:param yrange: y range in the format (min, max) or bokeh.figure.y_range. Default None.
:param xlabels: labels to plot as column headers in grid figure. Default None.
:param ylabels: labels to plot as row index in grid figure. The must be of same length
as len(data). Default None.
:param graph: type of graph to plot. Default 'line'. Also accepted 'scatter'.
:param marker_size: size of radius of scatter markers in units, or name of the column on table
to use as marker size. Default 1 unit.
:param fill_alpha: transparency of fill color of scatter marker (0-transparent, 1-opaque).
Default 0.6.
:param background_color: background color of each plot. Default None.
:param color: color of line or scatter marker. Default None.
:param color_palette: palette of color as list of HEX codes to use in case of more than one
group of data or variable per plot. Default None. In None, a combination of
brewer 'Set' palettes is used.
:param line_width: width of line in line plot. Default 1.
:param groupby: name of column in table to use to group data.
:param add_legend: show legend of each subplot.
:param height: height of each subplot.
:param width: width of each subplot.
:param hoover: enable hoover tool
:return: bokeh figure
:author: <EMAIL>
"""
logger = getLogger(__name__)
grid = []
color = None if color_palette else (color or brewer['Accent'][3][0])
# color = color or brewer['Accent'][3][0]
if xlabels is not None:
grid.append([plot_text(label, width=width) for label in xlabels])
if ylabels is not None:
grid[0].insert(0, plot_text(''))
for i, data_row in enumerate(data):
row = []
if ylabels is not None:
row.append(
plot_text(ylabels[i], height=height, angle=3.1416 / 2))
for obj in data_row:
# define x and y variable
obj.index.name = obj.index.name or 'index'
xvar = xvar or obj.index.name
df = obj.reset_index()
yvar = list(yvar) if yvar is not None else list(df.columns[1])
p = figure(plot_height=height, plot_width=width, min_border=10,
toolbar_location=None,
background_fill_color=background_color)
color_palette = color_palette or brewer_sets_12
if groupby:
for (name, group_data), c in zip(df.groupby(groupby),
color_palette):
if group_data.empty:
continue
for y in yvar:
if df[y].isnull().all() or df[xvar].isnull().all():
continue
xarray = group_data[xvar].values
yarray = group_data[y].values
if graph == 'line':
gly = p.line(xarray, yarray, line_color=color or c,
line_width=line_width,
legend_label=str(name))
if graph == 'scatter':
gly = p.circle(xarray, yarray, radius=marker_size,
color=color or c,
fill_alpha=fill_alpha,
legend_label=str(name))
if hoover:
tips = [('x', f'@{xvar}'), ('y', '@y')]
p.add_tools(
HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
else:
for y, c in zip(yvar, color_palette):
if df[y].isnull().all() or df[xvar].isnull().all():
continue
if graph == 'line':
gly = p.line(xvar, y, source=df, line_color=color or c,
line_width=line_width,
legend_label=str(y))
if graph == 'scatter':
gly = p.circle(xvar, y, source=df, radius=marker_size,
color=color or c,
fill_alpha=fill_alpha,
legend_label=str(y))
if hoover:
tips = [('x', f'@{xvar}'), ('y', '@y')]
p.add_tools(HoverTool(renderers=[gly], tooltips=tips,
toggleable=False))
# remove axis and grids
p.axis.visible = False
p.grid.visible = False
# axis format
if xrange:
p.x_range = xrange if isinstance(xrange,
DataRange1d) else Range1d(
*xrange)
if yrange:
p.y_range = yrange if isinstance(yrange,
DataRange1d) else Range1d(
*yrange)
# legend format
p.legend.visible = add_legend
if add_legend:
p.legend.click_policy = 'hide'
p.legend.location = 'top_right'
p.legend.label_text_font_size = '6pt'
row.append(p)
grid.append(row)
grid.append(plot_text('', height=100))
return layout(grid)
def plot_table(obj: Union[DataFrame, Series], column_names: List[str] = None,
datetime_index: bool = True,
height: int = 600, width: int = 1600, decimals: int = 2,
title: str = None) -> figure:
"""plot column data table.
:param obj: input data.
:param column_names: names of columns in obj to include in table. Default None.
If None, all columns in obj will be included.
:param height: table height in pixels.
:param width: table width in pixels.
:param datetime_index: format the index column as datetime.
:param decimals: decimal places to show on numeric columns.
:param title: title of table. Default None.
:return: bokeh figure.
:author: <EMAIL>
"""
logger = getLogger(__name__)
obj.index.name = obj.index.name or 'UnnamedIndex'
if isinstance(obj, Series):
column_names = column_names or [obj.name]
elif isinstance(obj, DataFrame):
column_names = column_names or list(obj.columns)
source = obj.sort_index().reset_index()
index_format = DateFormatter(
format='%d-%m-%Y %H:%M:%S') if datetime_index else None
columns = [TableColumn(field=obj.index.name, title=obj.index.name,
formatter=index_format, width=150)]
table_width = 150
number_format = '0.' + ('0' * decimals)
for col in column_names:
col_format = None
col_width = (len(col) * 5) + 40
if is_numeric_dtype(source[col]):
col_format = NumberFormatter(format=number_format)
if is_datetime64_any_dtype(source[col]):
col_format = DateFormatter(format='%d-%m-%Y %H:%M:%S')
columns.append(TableColumn(field=col, title=col, formatter=col_format,
width=col_width))
table_width += col_width
table_width = min(table_width, width)
data_table = DataTable(columns=columns, source=ColumnDataSource(source),
index_position=None,
reorderable=True,
height=height, fit_columns=False, header_row=True,
width=table_width)
data_table.margin = (5, 5, 20, 40)
title = title or ''
p = Paragraph(text=title)
p.margin = (20, 5, 5, 40)
return layout(p, data_table)
def plot_text(text: str, width: float = 50, height: float = 50,
angle: float = 0,
fontsize: int = 12, color: str = None,
outline_color: str = None) -> figure:
"""Plot text box.
:param text: text to plot.
:param width: width of text box. Default 50.
:param height: height of tect box. Default 50.
:param angle: angle (radiants) to rotate the text from the horizontal. Default 0.
:param fontsize: font size of text. Default 12.
:param color: font color of text (HEX codes or English names). Default 'black'.
:param outline_color: color of outline around the text box. Default None (without outline).
:return: bokeh figure.
:author: <EMAIL>
"""
fontsize = str(f'{fontsize}pt')
color = color or 'black'
# create figure
p = figure(plot_height=height, plot_width=width, min_border=10,
min_border_left=10,
x_range=(-1, 1), y_range=(-1, 1), toolbar_location=None)
x, y = linspace(-1, 1, 3), linspace(-1, 1, 3)
source = ColumnDataSource(dict(x=x, y=y, text=['', text, '']))
p.text('x', 'y', text='text', text_align="center", source=source,
angle=angle,
text_baseline="middle", text_font_size=fontsize, text_color=color)
p.axis.visible = False
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.outline_line_color = outline_color
return p
| [
"bokeh.models.ColumnDataSource",
"numpy.polyfit",
"numpy.isnan",
"bokeh.models.RangeTool",
"logging.NullHandler",
"bokeh.models.widgets.tables.DateFormatter",
"bokeh.models.widgets.tables.TableColumn",
"itertools.zip_longest",
"pandas.notnull",
"numpy.linspace",
"pandas.api.types.is_datetime64_a... | [((1289, 1302), 'logging.NullHandler', 'NullHandler', ([], {}), '()\n', (1300, 1302), False, 'from logging import getLogger, NullHandler\n'), ((6390, 6402), 'bokeh.layouts.layout', 'layout', (['grid'], {}), '(grid)\n', (6396, 6402), False, 'from bokeh.layouts import layout\n'), ((8489, 8549), 'bokeh.models.LinearColorMapper', 'LinearColorMapper', ([], {'palette': 'c', 'low': 'color_low', 'high': 'color_high'}), '(palette=c, low=color_low, high=color_high)\n', (8506, 8549), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((8896, 9064), 'bokeh.plotting.figure', 'figure', ([], {'title': 'title', 'x_range': 'xrange', 'y_range': 'yrange', 'plot_width': 'width', 'plot_height': 'height', 'tools': '"""box_zoom,reset"""', 'toolbar_location': 'None', 'tooltips': 'hoover_format'}), "(title=title, x_range=xrange, y_range=yrange, plot_width=width,\n plot_height=height, tools='box_zoom,reset', toolbar_location=None,\n tooltips=hoover_format)\n", (8902, 9064), False, 'from bokeh.plotting import figure\n'), ((11067, 11086), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (11076, 11086), False, 'from logging import getLogger, NullHandler\n'), ((12215, 12392), 'bokeh.plotting.figure', 'figure', ([], {'title': 'title', 'tools': '"""box_zoom,reset"""', 'background_fill_color': '"""white"""', 'plot_height': '(height or 600)', 'plot_width': '(width or 800)', 'toolbar_location': '"""above"""', 'x_range': 'xrange'}), "(title=title, tools='box_zoom,reset', background_fill_color='white',\n plot_height=height or 600, plot_width=width or 800, toolbar_location=\n 'above', x_range=xrange)\n", (12221, 12392), False, 'from bokeh.plotting import figure\n'), ((14858, 14877), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (14867, 14877), False, 'from logging import getLogger, NullHandler\n'), ((16311, 16448), 'bokeh.plotting.figure', 'figure', ([], {'title': 'title', 'plot_width': '(width or n * 40)', 'plot_height': '(height or n * 40)', 'x_range': 'xrange', 'y_range': 'yrange', 'toolbar_location': 'None'}), '(title=title, plot_width=width or n * 40, plot_height=height or n * \n 40, x_range=xrange, y_range=yrange, toolbar_location=None)\n', (16317, 16448), False, 'from bokeh.plotting import figure\n'), ((19674, 19693), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (19683, 19693), False, 'from logging import getLogger, NullHandler\n'), ((20546, 20741), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': 'height', 'plot_width': 'width', 'tools': 'toolbar', 'toolbar_location': '"""above"""', 'y_axis_type': 'ydtype', 'x_axis_type': '"""datetime"""', 'background_fill_color': '"""#f8f9f9"""', 'x_range': 'xrange', 'title': 't'}), "(plot_height=height, plot_width=width, tools=toolbar,\n toolbar_location='above', y_axis_type=ydtype, x_axis_type='datetime',\n background_fill_color='#f8f9f9', x_range=xrange, title=t)\n", (20552, 20741), False, 'from bokeh.plotting import figure\n'), ((22344, 22384), 'bokeh.models.formatters.DatetimeTickFormatter', 'DatetimeTickFormatter', ([], {'days': "['%d-%b-%y']"}), "(days=['%d-%b-%y'])\n", (22365, 22384), False, 'from bokeh.models.formatters import DatetimeTickFormatter\n'), ((26149, 26168), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (26158, 26168), False, 'from logging import getLogger, NullHandler\n'), ((26921, 27116), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': 'height', 'plot_width': 'width', 'tools': 'toolbar', 'toolbar_location': '"""above"""', 'x_axis_type': '"""datetime"""', 'y_axis_type': 'ydtype', 'background_fill_color': '"""#f8f9f9"""', 'x_range': 'xrange', 'title': 't'}), "(plot_height=height, plot_width=width, tools=toolbar,\n toolbar_location='above', x_axis_type='datetime', y_axis_type=ydtype,\n background_fill_color='#f8f9f9', x_range=xrange, title=t)\n", (26927, 27116), False, 'from bokeh.plotting import figure\n'), ((30831, 30871), 'bokeh.models.formatters.DatetimeTickFormatter', 'DatetimeTickFormatter', ([], {'days': "['%d-%b-%y']"}), "(days=['%d-%b-%y'])\n", (30852, 30871), False, 'from bokeh.models.formatters import DatetimeTickFormatter\n'), ((33638, 33657), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (33647, 33657), False, 'from logging import getLogger, NullHandler\n'), ((34410, 34605), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': 'height', 'plot_width': 'width', 'tools': 'toolbar', 'toolbar_location': '"""above"""', 'x_axis_type': '"""datetime"""', 'y_axis_type': 'ydtype', 'background_fill_color': '"""#f8f9f9"""', 'x_range': 'xrange', 'title': 't'}), "(plot_height=height, plot_width=width, tools=toolbar,\n toolbar_location='above', x_axis_type='datetime', y_axis_type=ydtype,\n background_fill_color='#f8f9f9', x_range=xrange, title=t)\n", (34416, 34605), False, 'from bokeh.plotting import figure\n'), ((36216, 36256), 'bokeh.models.formatters.DatetimeTickFormatter', 'DatetimeTickFormatter', ([], {'days': "['%d-%b-%y']"}), "(days=['%d-%b-%y'])\n", (36237, 36256), False, 'from bokeh.models.formatters import DatetimeTickFormatter\n'), ((39234, 39253), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (39243, 39253), False, 'from logging import getLogger, NullHandler\n'), ((40026, 40221), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': 'height', 'plot_width': 'width', 'tools': 'toolbar', 'toolbar_location': '"""above"""', 'y_axis_type': 'ydtype', 'x_axis_type': '"""datetime"""', 'background_fill_color': '"""#f8f9f9"""', 'x_range': 'xrange', 'title': 't'}), "(plot_height=height, plot_width=width, tools=toolbar,\n toolbar_location='above', y_axis_type=ydtype, x_axis_type='datetime',\n background_fill_color='#f8f9f9', x_range=xrange, title=t)\n", (40032, 40221), False, 'from bokeh.plotting import figure\n'), ((41878, 41918), 'bokeh.models.formatters.DatetimeTickFormatter', 'DatetimeTickFormatter', ([], {'days': "['%d-%b-%y']"}), "(days=['%d-%b-%y'])\n", (41899, 41918), False, 'from bokeh.models.formatters import DatetimeTickFormatter\n'), ((43372, 43391), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (43381, 43391), False, 'from logging import getLogger, NullHandler\n'), ((43908, 43941), 'pandas.api.types.is_datetime64_any_dtype', 'is_datetime64_any_dtype', (['df[yvar]'], {}), '(df[yvar])\n', (43931, 43941), False, 'from pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype\n'), ((44016, 44253), 'bokeh.plotting.figure', 'figure', ([], {'title': 'f"""Drag the box to change the temporal range. Variable displayed: {yvar}"""', 'plot_height': 'height', 'plot_width': 'width', 'x_axis_type': '"""datetime"""', 'y_axis_type': 'ydtype', 'toolbar_location': 'None', 'background_fill_color': '"""#f5f5f5"""'}), "(title=\n f'Drag the box to change the temporal range. Variable displayed: {yvar}',\n plot_height=height, plot_width=width, x_axis_type='datetime',\n y_axis_type=ydtype, toolbar_location=None, background_fill_color='#f5f5f5')\n", (44022, 44253), False, 'from bokeh.plotting import figure\n'), ((44510, 44550), 'bokeh.models.formatters.DatetimeTickFormatter', 'DatetimeTickFormatter', ([], {'days': "['%d-%b-%y']"}), "(days=['%d-%b-%y'])\n", (44531, 44550), False, 'from bokeh.models.formatters import DatetimeTickFormatter\n'), ((44799, 44824), 'bokeh.models.RangeTool', 'RangeTool', ([], {'x_range': 'xrange'}), '(x_range=xrange)\n', (44808, 44824), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((48736, 48755), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (48745, 48755), False, 'from logging import getLogger, NullHandler\n'), ((49317, 49525), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': 'height', 'plot_width': 'width', 'min_border': '(50)', 'min_border_left': '(50)', 'tools': 'toolbar', 'toolbar_location': '"""above"""', 'title': 'title', 'x_range': 'xrange', 'y_range': 'yrange', 'background_fill_color': '"""#ffffff"""'}), "(plot_height=height, plot_width=width, min_border=50, min_border_left\n =50, tools=toolbar, toolbar_location='above', title=title, x_range=\n xrange, y_range=yrange, background_fill_color='#ffffff')\n", (49323, 49525), False, 'from bokeh.plotting import figure\n'), ((54924, 55147), 'bokeh.plotting.figure', 'figure', ([], {'toolbar_location': 'None', 'plot_width': 'p.plot_width', 'plot_height': '(200)', 'x_range': 'p.x_range', 'y_range': '(-xmax / 4, xmax)', 'min_border': '(10)', 'min_border_left': '(50)', 'y_axis_location': '"""right"""', 'background_fill_color': '"""#fafafa"""'}), "(toolbar_location=None, plot_width=p.plot_width, plot_height=200,\n x_range=p.x_range, y_range=(-xmax / 4, xmax), min_border=10,\n min_border_left=50, y_axis_location='right', background_fill_color=\n '#fafafa')\n", (54930, 55147), False, 'from bokeh.plotting import figure\n'), ((56435, 56634), 'bokeh.plotting.figure', 'figure', ([], {'toolbar_location': 'None', 'plot_width': '(200)', 'plot_height': 'p.plot_height', 'x_range': '(-ymax / 4, ymax)', 'y_range': 'p.y_range', 'min_border': '(10)', 'y_axis_location': '"""right"""', 'background_fill_color': '"""#fafafa"""'}), "(toolbar_location=None, plot_width=200, plot_height=p.plot_height,\n x_range=(-ymax / 4, ymax), y_range=p.y_range, min_border=10,\n y_axis_location='right', background_fill_color='#fafafa')\n", (56441, 56634), False, 'from bokeh.plotting import figure\n'), ((57641, 57664), 'bokeh.layouts.layout', 'layout', (['[[p, yh], [xh]]'], {}), '([[p, yh], [xh]])\n', (57647, 57664), False, 'from bokeh.layouts import layout\n'), ((60415, 60434), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (60424, 60434), False, 'from logging import getLogger, NullHandler\n'), ((61096, 61248), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': 'height', 'plot_width': 'width', 'tools': 'toolbar', 'toolbar_location': '"""above"""', 'background_fill_color': '"""#f8f9f9"""', 'x_range': 'xrange', 'title': 't'}), "(plot_height=height, plot_width=width, tools=toolbar,\n toolbar_location='above', background_fill_color='#f8f9f9', x_range=\n xrange, title=t)\n", (61102, 61248), False, 'from bokeh.plotting import figure\n'), ((64467, 64486), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (64476, 64486), False, 'from logging import getLogger, NullHandler\n'), ((64495, 64562), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'width', 'plot_height': 'height', 'toolbar_location': 'None'}), '(plot_width=width, plot_height=height, toolbar_location=None)\n', (64501, 64562), False, 'from bokeh.plotting import figure\n'), ((66407, 66426), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (66416, 66426), False, 'from logging import getLogger, NullHandler\n'), ((67108, 67120), 'bokeh.layouts.layout', 'layout', (['grid'], {}), '(grid)\n', (67114, 67120), False, 'from bokeh.layouts import layout\n'), ((70021, 70040), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (70030, 70040), False, 'from logging import getLogger, NullHandler\n'), ((74203, 74215), 'bokeh.layouts.layout', 'layout', (['grid'], {}), '(grid)\n', (74209, 74215), False, 'from bokeh.layouts import layout\n'), ((74984, 75003), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (74993, 75003), False, 'from logging import getLogger, NullHandler\n'), ((76465, 76486), 'bokeh.models.Paragraph', 'Paragraph', ([], {'text': 'title'}), '(text=title)\n', (76474, 76486), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((76528, 76549), 'bokeh.layouts.layout', 'layout', (['p', 'data_table'], {}), '(p, data_table)\n', (76534, 76549), False, 'from bokeh.layouts import layout\n'), ((77374, 77515), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': 'height', 'plot_width': 'width', 'min_border': '(10)', 'min_border_left': '(10)', 'x_range': '(-1, 1)', 'y_range': '(-1, 1)', 'toolbar_location': 'None'}), '(plot_height=height, plot_width=width, min_border=10, min_border_left\n =10, x_range=(-1, 1), y_range=(-1, 1), toolbar_location=None)\n', (77380, 77515), False, 'from bokeh.plotting import figure\n'), ((1258, 1277), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (1267, 1277), False, 'from logging import getLogger, NullHandler\n'), ((48810, 48837), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['obj[xvar]'], {}), '(obj[xvar])\n', (48826, 48837), False, 'from pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype\n'), ((49061, 49088), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['obj[yvar]'], {}), '(obj[yvar])\n', (49077, 49088), False, 'from pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype\n'), ((66495, 66529), 'itertools.zip_longest', 'zip_longest', (['*args'], {'fillvalue': 'None'}), '(*args, fillvalue=None)\n', (66506, 66529), False, 'from itertools import zip_longest\n'), ((75298, 75339), 'bokeh.models.widgets.tables.DateFormatter', 'DateFormatter', ([], {'format': '"""%d-%m-%Y %H:%M:%S"""'}), "(format='%d-%m-%Y %H:%M:%S')\n", (75311, 75339), False, 'from bokeh.models.widgets.tables import NumberFormatter, DateFormatter, TableColumn, DataTable\n'), ((75392, 75487), 'bokeh.models.widgets.tables.TableColumn', 'TableColumn', ([], {'field': 'obj.index.name', 'title': 'obj.index.name', 'formatter': 'index_format', 'width': '(150)'}), '(field=obj.index.name, title=obj.index.name, formatter=\n index_format, width=150)\n', (75403, 75487), False, 'from bokeh.models.widgets.tables import NumberFormatter, DateFormatter, TableColumn, DataTable\n'), ((75683, 75712), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['source[col]'], {}), '(source[col])\n', (75699, 75712), False, 'from pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype\n'), ((75788, 75824), 'pandas.api.types.is_datetime64_any_dtype', 'is_datetime64_any_dtype', (['source[col]'], {}), '(source[col])\n', (75811, 75824), False, 'from pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype\n'), ((77553, 77571), 'numpy.linspace', 'linspace', (['(-1)', '(1)', '(3)'], {}), '(-1, 1, 3)\n', (77561, 77571), False, 'from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array\n'), ((77573, 77591), 'numpy.linspace', 'linspace', (['(-1)', '(1)', '(3)'], {}), '(-1, 1, 3)\n', (77581, 77591), False, 'from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array\n'), ((16815, 16923), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'toggleable': '(False)', 'tooltips': "[('x', '@x'), ('y', '@y'), ('stat', '@stat_str')]"}), "(renderers=[gly], toggleable=False, tooltips=[('x', '@x'), ('y',\n '@y'), ('stat', '@stat_str')])\n", (16824, 16923), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((20349, 20379), 'pandas.api.types.is_datetime64_any_dtype', 'is_datetime64_any_dtype', (['df[y]'], {}), '(df[y])\n', (20372, 20379), False, 'from pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype\n'), ((22488, 22504), 'bokeh.models.Range1d', 'Range1d', (['*yrange'], {}), '(*yrange)\n', (22495, 22504), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((26762, 26792), 'pandas.api.types.is_datetime64_any_dtype', 'is_datetime64_any_dtype', (['df[y]'], {}), '(df[y])\n', (26785, 26792), False, 'from pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype\n'), ((30974, 30990), 'bokeh.models.Range1d', 'Range1d', (['*yrange'], {}), '(*yrange)\n', (30981, 30990), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((34251, 34281), 'pandas.api.types.is_datetime64_any_dtype', 'is_datetime64_any_dtype', (['df[y]'], {}), '(df[y])\n', (34274, 34281), False, 'from pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype\n'), ((36359, 36375), 'bokeh.models.Range1d', 'Range1d', (['*yrange'], {}), '(*yrange)\n', (36366, 36375), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((39867, 39897), 'pandas.api.types.is_datetime64_any_dtype', 'is_datetime64_any_dtype', (['df[y]'], {}), '(df[y])\n', (39890, 39897), False, 'from pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype\n'), ((42021, 42037), 'bokeh.models.Range1d', 'Range1d', (['*yrange'], {}), '(*yrange)\n', (42028, 42037), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((43666, 43692), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['df[yvar]'], {}), '(df[yvar])\n', (43682, 43692), False, 'from pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype\n'), ((43696, 43729), 'pandas.api.types.is_datetime64_any_dtype', 'is_datetime64_any_dtype', (['df[yvar]'], {}), '(df[yvar])\n', (43719, 43729), False, 'from pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype\n'), ((44653, 44669), 'bokeh.models.Range1d', 'Range1d', (['*yrange'], {}), '(*yrange)\n', (44660, 44669), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((51317, 51347), 'bokeh.models.LinearColorMapper', 'LinearColorMapper', ([], {'palette': 'pal'}), '(palette=pal)\n', (51334, 51347), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((51877, 51898), 'bokeh.transform.jitter', 'jitter', (['"""x"""', 'x_jitter'], {}), "('x', x_jitter)\n", (51883, 51898), False, 'from bokeh.transform import jitter\n'), ((53710, 53756), 'numpy.linspace', 'linspace', (['p.x_range.start', 'p.x_range.end', '(1000)'], {}), '(p.x_range.start, p.x_range.end, 1000)\n', (53718, 53756), False, 'from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array\n'), ((62788, 62884), 'bokeh.models.Span', 'Span', ([], {'location': 'line', 'dimension': '"""width"""', 'line_color': '"""black"""', 'line_width': '(3)', 'line_dash': '"""dashed"""'}), "(location=line, dimension='width', line_color='black', line_width=3,\n line_dash='dashed')\n", (62792, 62884), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((63044, 63140), 'bokeh.models.Span', 'Span', ([], {'location': 'line', 'dimension': '"""height"""', 'line_color': '"""gray"""', 'line_width': '(3)', 'line_dash': '"""dashed"""'}), "(location=line, dimension='height', line_color='gray', line_width=3,\n line_dash='dashed')\n", (63048, 63140), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((63339, 63355), 'bokeh.models.Range1d', 'Range1d', (['*yrange'], {}), '(*yrange)\n', (63346, 63355), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((66610, 66728), 'bokeh.plotting.figure', 'figure', ([], {'title': 'key', 'tools': '""""""', 'background_fill_color': '"""white"""', 'plot_height': '(300)', 'plot_width': '(400)', 'toolbar_location': 'None'}), "(title=key, tools='', background_fill_color='white', plot_height=300,\n plot_width=400, toolbar_location=None)\n", (66616, 66728), False, 'from bokeh.plotting import figure\n'), ((70821, 70947), 'bokeh.plotting.figure', 'figure', ([], {'plot_height': 'height', 'plot_width': 'width', 'min_border': '(10)', 'toolbar_location': 'None', 'background_fill_color': 'background_color'}), '(plot_height=height, plot_width=width, min_border=10,\n toolbar_location=None, background_fill_color=background_color)\n', (70827, 70947), False, 'from bokeh.plotting import figure\n'), ((75739, 75776), 'bokeh.models.widgets.tables.NumberFormatter', 'NumberFormatter', ([], {'format': 'number_format'}), '(format=number_format)\n', (75754, 75776), False, 'from bokeh.models.widgets.tables import NumberFormatter, DateFormatter, TableColumn, DataTable\n'), ((75851, 75892), 'bokeh.models.widgets.tables.DateFormatter', 'DateFormatter', ([], {'format': '"""%d-%m-%Y %H:%M:%S"""'}), "(format='%d-%m-%Y %H:%M:%S')\n", (75864, 75892), False, 'from bokeh.models.widgets.tables import NumberFormatter, DateFormatter, TableColumn, DataTable\n'), ((75916, 75988), 'bokeh.models.widgets.tables.TableColumn', 'TableColumn', ([], {'field': 'col', 'title': 'col', 'formatter': 'col_format', 'width': 'col_width'}), '(field=col, title=col, formatter=col_format, width=col_width)\n', (75927, 75988), False, 'from bokeh.models.widgets.tables import NumberFormatter, DateFormatter, TableColumn, DataTable\n'), ((76151, 76175), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['source'], {}), '(source)\n', (76167, 76175), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((9884, 9917), 'bokeh.models.BasicTicker', 'BasicTicker', ([], {'desired_num_ticks': '(10)'}), '(desired_num_ticks=10)\n', (9895, 9917), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((13072, 13151), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': "[('Freq(x)', '@values')]", 'toggleable': '(False)'}), "(renderers=[gly], tooltips=[('Freq(x)', '@values')], toggleable=False)\n", (13081, 13151), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((52169, 52228), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (52178, 52228), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((52309, 52327), 'pandas.notnull', 'notnull', (['obj[xvar]'], {}), '(obj[xvar])\n', (52316, 52327), False, 'from pandas import notnull, DataFrame, Series\n'), ((52332, 52350), 'pandas.notnull', 'notnull', (['obj[yvar]'], {}), '(obj[yvar])\n', (52339, 52350), False, 'from pandas import notnull, DataFrame, Series\n'), ((53217, 53235), 'numpy.polyfit', 'polyfit', (['x', 'y', 'deg'], {}), '(x, y, deg)\n', (53224, 53235), False, 'from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array\n'), ((53261, 53272), 'numpy.poly1d', 'poly1d', (['fit'], {}), '(fit)\n', (53267, 53272), False, 'from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array\n'), ((54782, 54805), 'numpy.isnan', 'isnan', (['obj[xvar].values'], {}), '(obj[xvar].values)\n', (54787, 54805), False, 'from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array\n'), ((56293, 56316), 'numpy.isnan', 'isnan', (['obj[yvar].values'], {}), '(obj[yvar].values)\n', (56298, 56316), False, 'from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array\n'), ((22054, 22113), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (22063, 22113), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((28485, 28544), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (28494, 28544), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((35946, 36005), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (35955, 36005), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((41608, 41667), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (41617, 41667), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((50070, 50108), 'bokeh.transform.jitter', 'jitter', (['"""x"""', 'x_jitter'], {'range': 'p.x_range'}), "('x', x_jitter, range=p.x_range)\n", (50076, 50108), False, 'from bokeh.transform import jitter\n'), ((50139, 50177), 'bokeh.transform.jitter', 'jitter', (['"""y"""', 'y_jitter'], {'range': 'p.y_range'}), "('y', y_jitter, range=p.y_range)\n", (50145, 50177), False, 'from bokeh.transform import jitter\n'), ((50524, 50583), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (50533, 50583), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((51568, 51601), 'bokeh.models.BasicTicker', 'BasicTicker', ([], {'desired_num_ticks': '(10)'}), '(desired_num_ticks=10)\n', (51579, 51601), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((52615, 52633), 'numpy.polyfit', 'polyfit', (['x', 'y', 'deg'], {}), '(x, y, deg)\n', (52622, 52633), False, 'from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array\n'), ((52663, 52674), 'numpy.poly1d', 'poly1d', (['fit'], {}), '(fit)\n', (52669, 52674), False, 'from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array\n'), ((62602, 62661), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (62611, 62661), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((73628, 73644), 'bokeh.models.Range1d', 'Range1d', (['*xrange'], {}), '(*xrange)\n', (73635, 73644), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((73813, 73829), 'bokeh.models.Range1d', 'Range1d', (['*yrange'], {}), '(*yrange)\n', (73820, 73829), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((21496, 21555), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (21505, 21555), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((27912, 27971), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (27921, 27971), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((30479, 30538), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (30488, 30538), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((35389, 35448), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (35398, 35448), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((41017, 41076), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (41026, 41076), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((53798, 53822), 'numpy.array', 'array', (['[[a] for a in xx]'], {}), '([[a] for a in xx])\n', (53803, 53822), False, 'from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array\n'), ((55645, 55654), 'numpy.isnan', 'isnan', (['xx'], {}), '(xx)\n', (55650, 55654), False, 'from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array\n'), ((57066, 57075), 'numpy.isnan', 'isnan', (['yy'], {}), '(yy)\n', (57071, 57075), False, 'from numpy import linspace, histogram, zeros, pi, polyfit, poly1d, isnan, array\n'), ((62019, 62078), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (62028, 62078), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((73242, 73301), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (73251, 73301), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((29896, 29955), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (29905, 29955), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n'), ((72291, 72350), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[gly]', 'tooltips': 'tips', 'toggleable': '(False)'}), '(renderers=[gly], tooltips=tips, toggleable=False)\n', (72300, 72350), False, 'from bokeh.models import Range1d, ColumnDataSource, RangeTool, LinearColorMapper, BasicTicker, ColorBar, HoverTool, BoxSelectTool, Span, Paragraph, DataRange1d\n')] |
## Python 3
import logging
import math
import numpy as np
import numpy.linalg
import numpy.random
from collections import defaultdict
from collections import Counter
from utility import *
import inside
RIGHT_ARROW = "->"
START_SYMBOL = "S"
UNARY_SYMBOL = "<A>"
SAMPLE_MAX_DEPTH=100
SAMPLE_CACHE_SIZE=1000
PARTITION_FUNCTION_MAX_ITERATIONS=100
PARTITION_FUNCTION_EPSILON=1e-9
class PCFG:
"""
This class stores a PCFG where the underlying CFG is in CNF (more or less).
"""
def __init__(self, cfg=None):
if cfg:
self.nonterminals = set(cfg.nonterminals)
self.terminals = set(cfg.terminals)
self.start = cfg.start
self.productions = list(cfg.productions)
else:
self.nonterminals = set()
self.terminals = set()
## Productions are tuples (A, a) or (A, B,C)
self.start = None
self.productions = []
self.parameters = {}
self.log_parameters = {}
def make_unary(self):
"""
return a new grammar which has the same distribution over lengths and only one terminal symbol.
"""
upcfg = PCFG()
upcfg.terminals.add(UNARY_SYMBOL)
upcfg.nonterminals = set(self.nonterminals)
upcfg.start = self.start
for prod in self.productions:
p = self.parameters[prod]
if len(prod) == 3:
# binary
upcfg.productions.append(prod)
upcfg.parameters[prod] = p
else:
# lexical
nt,a = prod
newprod = (nt,UNARY_SYMBOL)
if newprod in upcfg.parameters:
upcfg.parameters[newprod] += p
else:
upcfg.productions.append(newprod)
upcfg.parameters[newprod] = p
# Why normalise?
upcfg.set_log_parameters()
return upcfg
def store(self, filename,header=[]):
"""
Store this to a file.
"""
self.productions.sort()
with open(filename,'w') as fhandle:
if len(header) > 0:
for line in header:
fhandle.write('#' + line + "\n")
for prod in self.productions:
p = self.parameters[prod]
if len(prod) == 2:
fhandle.write( "%e %s %s %s \n" % ( p, prod[0], RIGHT_ARROW, prod[1] ))
else:
fhandle.write( "%e %s %s %s %s \n" % ( p, prod[0], RIGHT_ARROW, prod[1],prod[2]))
def store_mjio(self, filename):
"""
Store this in a format suitable for use by MJ IO code.
So introduce preterminals for all nonterminals.
Assume in CNF.
"""
with open(filename,'w') as fhandle:
fhandle.write("1.0 S1 --> S \n")
preterminals = { nt : ("PRE" + nt) for nt in self.nonterminals}
## now add up probs of preterminals
preterminal_probs = { nt : 0.0 for nt in self.nonterminals}
for prod in self.productions:
if len(prod) == 2:
preterminal_probs[prod[0]] += self.parameters[prod]
for nt in self.nonterminals:
fhandle.write( "%0.12f %s --> %s \n" % ( preterminal_probs[nt], nt, preterminals[nt] ))
for prod in self.productions:
if len(prod) == 2:
# preterminal
p = self.parameters[prod] / preterminal_probs[prod[0]]
fhandle.write( "%0.12f %s --> %s \n" % ( p, preterminals[prod[0]], prod[1]))
else:
# binary rule so
p = self.parameters[prod] / (1.0 - preterminal_probs[prod.lhs])
fhandle.write( "%0.12f %s --> %s %s \n" % ( p, prod[0], prod[1],prod[2]))
def rhs_index(self, terminal):
"""
Utility function that returns a list of all nonterminals that have a production
with this terminal on its right hand side.
"""
return [ prod[0] for prod in self.productions if terminal in prod]
def copy(self):
"""
return a new copy of this pcfg.
"""
copypcfg = PCFG()
copypcfg.nonterminals = set(self.nonterminals)
copypcfg.terminals = set(self.terminals)
copypcfg.start = self.start
copypcfg.productions = list(self.productions)
copypcfg.parameters = dict(self.parameters)
copypcfg.log_parameters = dict(self.log_parameters)
return copypcfg
def trim_zeros(self, threshold = 0.0):
"""
destructively remove all zero productions, zero nonterminals and zero terminals.
"""
self.productions = [ prod for prod in self.productions if self.parameters[prod] > threshold]
self.nonterminals = set( [ prod[0] for prod in self.productions])
self.terminals = set( [ prod[1] for prod in self.productions if len(prod) == 2])
self.parameters = { prod : self.parameters[prod] for prod in self.productions}
def set_log_parameters(self):
self.log_parameters = {}
for prod in self.productions:
self.log_parameters[prod] = math.log(self.parameters[prod])
def normalise(self):
totals = defaultdict(float)
for prod in self.productions:
totals[prod[0]] += self.parameters[prod]
for prod in self.productions:
p = self.parameters[prod]
if p == 0.0:
raise ValueError("zero parameter",prod)
param = p/ totals[prod[0]]
self.parameters[prod] = param
self.log_parameters[prod] = math.log(param)
def check_normalisation(self):
totals = defaultdict(float)
for prod in self.productions:
totals[prod[0]] += self.parameters[prod]
return totals
def is_normalised(self, epsilon= 1e-5):
totals = self.check_normalisation()
for a in totals:
if abs(1.0 - totals[a]) > epsilon:
return False
return True
def log_probability_derivation(self, tree):
"""
Compute the log prob of a derivation.
"""
if len(tree) == 2:
# lexical
return self.log_parameters[tree]
else:
left_lp = self.log_probability_derivation(tree[1])
right_lp = self.log_probability_derivation(tree[2])
local_tree = (tree[0], tree[1][0], tree[2][0])
local_lp = self.log_parameters[local_tree]
return left_lp + right_lp + local_lp
## Various properties of the PCFG that may be useful to compute.
def useful_information(self):
return [ 'Statistics of the final grammar',
'Actual expected length %f' % self.expected_length(),
'Derivational entropy %f' % self.derivational_entropy(),
'Derivational entropy (split) binary %f lexical %f ' % self.derivational_entropy_split(),
'Lexical Ambiguity (entropy) %f' % self.compute_lexical_ambiguity()
]
def per_word_entropies(self):
"""
dict mapping each word to the entropy of the posterior distribution of nonterminals given
terminals.
"""
result = defaultdict(float)
te = self.terminal_expectations()
pe = self.production_expectations()
for prod, e in pe.items():
if len(prod) == 2:
a = prod[1]
# posterior
p = e/te[a]
result[a] -= p * math.log(p)
return result
def entropy_unigram(self):
"""
Entropy of the unigram distribution: compute exactly.
"""
te = self.terminal_expectations()
L = self.expected_length()
e = 0.0
for a in te.values():
p = a/L
e -= p * math.log(p)
return e
def entropy_preterminal(self):
"""
Entropy of the preterminal distribution.
"""
prode = self.production_expectations()
preterminalexpectations = defaultdict(float)
for prod,e in prode.items():
if len(prod) == 2:
preterminalexpectations[prod[0]] += e
l = self.expected_length()
e = 0.0
for a in preterminalexpectations.values():
p = a/l
e -= p * math.log(p)
return e
def entropy_conditional_nonterminals(self):
# entropy of the conditional productions.
e = defaultdict(float)
for prod in self.productions:
p = self.parameters[prod]
if p > 0:
e[prod[0]] -= p * math.log(p)
return e
def derivational_entropy(self):
"""
entropy of the distribution over derivation trees.
"""
nt_entropies = self.entropy_conditional_nonterminals()
nt_expectations = self.nonterminal_expectations()
return sum([ nt_entropies[nt] * nt_expectations[nt] for nt in self.nonterminals])
def derivational_entropy_split(self):
"""
Rerun binary and lexical entropies which sum to the derivational entropy.
"""
lexical_totals = self.sum_lexical_probs()
bentropies = 0.0
lentropies = 0.0
nt_expectations = self.nonterminal_expectations()
for nt, p in lexical_totals.items():
bentropies -= nt_expectations[nt] *p * math.log(p)
for prod, alpha in self.parameters.items():
if len(prod) == 2:
p = alpha / lexical_totals[prod[0]]
lentropies -= nt_expectations[prod[0]] * alpha * math.log(p)
else:
bentropies -= nt_expectations[prod[0]] * alpha * math.log(alpha)
return (bentropies,lentropies)
def sum_lexical_probs(self):
sums = defaultdict(float)
for prod, alpha in self.parameters.items():
if len(prod) == 2:
sums[prod[0]] += alpha
return sums
def monte_carlo_entropy(self, n, sampler = None):
"""
Use a Monte Carlo approximation; return string entropy, unlabeled entropy and derivation entropy.
"""
string_entropy=0
unlabeled_tree_entropy = 0
labeled_tree_entropy = 0
if sampler == None:
sampler = Sampler(self)
insidec = inside.InsideComputation(self)
for i in range(n):
tree = sampler.sample_tree()
lp1 = self.log_probability_derivation(tree)
sentence = collect_yield(tree)
lp2 = insidec.inside_bracketed_log_probability(tree)
lp3 = insidec.inside_log_probability(sentence)
string_entropy -= lp1
unlabeled_tree_entropy -= lp2
labeled_tree_entropy -= lp3
return string_entropy/n, unlabeled_tree_entropy/n, labeled_tree_entropy/n
def compute_lexical_ambiguity(self):
"""
Conditional entropy in nats of the preterminal given the terminal.
Note that even if the grammar is unambiguous, this can be greater than zero.
"""
l = self.expected_length()
te = self.terminal_expectations()
pe = self.production_expectations()
ce = 0.0
for prod, e in pe.items():
if len(prod) ==2:
le = te[prod[1]]
ce -= (e/l) * math.log( e/le)
return ce
def estimate_ambiguity(self, samples = 1000, max_length=100, sampler = None):
"""
Monte Carlo estimate of the conditional entropy H(tree|string)
"""
if sampler==None:
mysampler = Sampler(self)
else:
mysampler = sampler
insider = inside.InsideComputation(self)
total = 0.0
n = 0.0
for i in range(samples):
tree = mysampler.sample_tree()
s = collect_yield(tree)
if len(s) > max_length:
continue
lp = insider.inside_log_probability(s)
lpd = self.log_probability_derivation(tree)
total += lp - lpd
n += 1
return total/n
def estimate_communicability(self, samples = 1000, max_length=100, sampler = None):
"""
Returns two estimates of the communicability; The second one is I think better in most cases.
"""
if sampler==None:
mysampler = Sampler(self)
else:
mysampler = sampler
insider = inside.InsideComputation(self)
same = 0.0
ratio = 0.0
n = 0
for i in range(samples):
t = sampler.sample_tree()
s =collect_yield(t)
if len(s) <= max_length:
n += 1
mapt = insider.viterbi_parse(s)
if t == mapt:
same += 1
lpd = self.log_probability_derivation(mapt)
lps = insider.inside_log_probability(s)
ratio += math.exp(lpd - lps)
return ( same/n, ratio/n)
def partition_nonterminals(self):
"""
Partition the sets of nonterminals into sets of mutually recursive nonterminals.
"""
graph = defaultdict(list)
for prod in self.productions:
if len(prod) == 3:
for i in [1,2]:
graph[prod[0]].append(prod[i])
return strongly_connected_components(graph)
def renormalise(self):
"""
renormalise so that it is consistent.
destructuve
"""
rn = self.compute_partition_function_fast()
for prod in self.productions:
if len(prod) == 3:
a,b,c = prod
self.parameters[prod] *= (rn[b] * rn[c])/rn[a]
else:
self.parameters[prod] *= 1.0/rn[prod[0]]
self.normalise()
def compute_partition_function_fast(self):
"""
Solve the quadratic equations using Newton method.
"""
ntindex = { nt:i for i,nt in enumerate(list(self.nonterminals))}
n = len(ntindex)
alpha = defaultdict(float)
beta = np.zeros(n)
for prod in self.productions:
p = self.parameters[prod]
if len(prod) == 2:
beta[ ntindex[prod[0]]] += p
else:
alpha[ (ntindex[prod[0]],ntindex[prod[1]],ntindex[prod[2]])]+= p
x = np.zeros(n)
def f(y):
## evaluate f at this point.
fy = beta - y
for i,j,k in alpha:
a = alpha[(i,j,k)]
#i is lhs
fy[i] += a * y[j] * y[k]
return fy
def J(y):
# evalate Jacobian
J = -1 * np.eye(n)
for i,j,k in alpha:
a = alpha[(i,j,k)]
J[i,j] += a * y[k]
J[i,k] += a * y[j]
return J
for i in range(PARTITION_FUNCTION_MAX_ITERATIONS):
#print(x)
y = f(x)
#print(y)
x1 = x - np.dot(np.linalg.inv(J(x)),y)
if numpy.linalg.norm(x - x1, 1) < PARTITION_FUNCTION_EPSILON:
return { nt : x[ntindex[nt]] for nt in self.nonterminals}
x = x1
raise ValueError("failed to converge")
def compute_partition_function_fp(self):
"""
Return a dict mapping each nonterminal to the prob that a string terminates from that.
Use the naive fixed point algorithm.
"""
bprods = [ prod for prod in self.productions if len(prod) == 3]
lprodmap = defaultdict(float)
for prod in self.productions:
if len(prod) == 2:
lprodmap[prod[0]] += self.parameters[prod]
z = defaultdict(float)
for i in range(PARTITION_FUNCTION_MAX_ITERATIONS):
z = self._compute_one_step_partition(z, bprods,lprodmap)
return z
def production_expectations(self):
nte = self.nonterminal_expectations()
return { prod: (self.parameters[prod] * nte[prod[0]]) for prod in self.productions }
def terminal_expectations(self):
answer = defaultdict(float)
pe = self.production_expectations()
for prod in pe:
if len(prod) == 2:
alpha = pe[prod]
answer[prod[1]] += alpha
return answer
def expected_length(self):
pe = self.production_expectations()
return sum([ pe[prod] for prod in pe if len(prod) == 2 ])
def nonterminal_expectations(self):
"""
Compute the expected number of times each nonterminal will be used in a given derivation.
return a dict mapping nonterminals to non-negative reals.
"""
n = len(self.nonterminals)
transitionMatrix = np.zeros([n,n])
#outputMatrix = np.zeros(n)
ntlist = list(self.nonterminals)
#print(ntlist)
index = { nt:i for i,nt in enumerate(ntlist)}
for prod in self.productions:
alpha = self.parameters[prod]
lhs = index[prod[0]]
if len(prod) == 3:
transitionMatrix[lhs,index[prod[1]]] += alpha
transitionMatrix[lhs,index[prod[2]]] += alpha
#print(transitionMatrix)
#r2 = numpy.linalg.inv(np.eye(n) - transitionMatrix)
#print(r2)
result = np.dot(numpy.linalg.inv(np.eye(n) - transitionMatrix),transitionMatrix)
si = index[self.start]
resultD = { nt : result[si, index[nt]] for nt in self.nonterminals}
resultD[self.start] += 1
return resultD
def expected_lengths(self):
"""
Compute the expected length of a string generated by each nonterminal.
Assume that the grammar is consistent.
"""
n = len(self.nonterminals)
m = len(self.terminals)
ntlist = list(self.nonterminals)
transitionMatrix = np.zeros([n,n])
outputMatrix = np.zeros([n,m])
index = {nt : i for i,nt in enumerate(ntlist)}
terminalIndex = { a: i for i,a in enumerate(list(self.terminals))}
# each element stores the expected number of times that
# nonterminal will generate another nonterminal in a single derivation.
for prod in self.productions:
alpha = self.parameters[prod]
lhs = index[prod[0]]
if len(prod) == 2:
outputMatrix[lhs,terminalIndex[prod[1]]] += alpha
else:
transitionMatrix[lhs,index[prod[1]]] += alpha
transitionMatrix[lhs,index[prod[2]]] += alpha
# n = o * (1- t)^-1
result = np.dot(numpy.linalg.inv(np.eye(n) - transitionMatrix),outputMatrix)
return result
def _compute_one_step_partition(self, z, bprods,lprodmap):
newz = defaultdict(float)
for production in bprods:
score = self.parameters[production] * z[production[1]] * z[production[2]]
newz[production[0]] += score
for nt in lprodmap:
newz[nt] += lprodmap[nt]
return newz
def approximate_kernel(self):
"""
return a map from nonterminals to (a, p) pairs
where a is a terminal and p is the posterior probability.
"""
pe = self.production_expectations()
te = self.terminal_expectations()
result = {}
for prod, e in pe.items():
if len(prod) == 2:
nt,a = prod
posteriora = e/te[a]
if nt in result:
(b,posteriorb) = result[nt]
if posteriora > posteriorb:
result[nt] = (a,posteriora)
else:
result[nt] = (a,posteriora)
return result
def oracle_fkp1(self):
"""
Return a list of terminals that characterise the nonterminals,
picking the most likely one.
Start with "S"
"""
lhs_counter = defaultdict(list)
answer = []
for prod in self.productions:
if len(prod) == 2:
lhs_counter[prod[1]].append(prod[0])
nts = [ self.start ]
for nt in self.nonterminals:
if nt != self.start:
nts.append(nt)
for nt in nts:
candidates = [ a for a in self.terminals if lhs_counter[a] == [nt] ]
if len(candidates) == 0:
return ()
else:
answer.append( max(candidates, key = lambda a : self.parameters[ (nt,a)]))
return answer
def oracle_kernel(self):
"""
Return a map from nonterminals to anchors.
picking the most likely one.
Return empty map if it does not satisfy the condition.
"""
lhs_counter = defaultdict(list)
answer = {}
for prod in self.productions:
if len(prod) == 2:
lhs_counter[prod[1]].append(prod[0])
for nt in self.nonterminals:
candidates = [ a for a in self.terminals if lhs_counter[a] == [nt] ]
if len(candidates) == 0:
return {}
else:
answer[nt] = max(candidates, key = lambda a : self.parameters[ (nt,a)])
return answer
def renormalise_convergent_wcfg(self):
"""
Scale all of the productions with "S" to get a globally normalised pcfg.
"""
scale = self.compute_partition_function_fast()[self.start]
for prod in self.parameters:
if prod[0]== self.start:
self.parameters[prod] /= scale
self.set_log_parameters()
def renormalise_divergent_wcfg(self):
"""
Algorithm from Smith and Johnson (1999)
Weighted and Probabilistic Context-Free
Grammars Are Equally Expressive.
This gives us a wcfg which is convergent, then we can convert to a PCFG.
"""
sigma = len(self.terminals)
beta = max ( [ self.parameters[prod] for prod in self.productions if len(prod) == 3])
nu = max ( [ self.parameters[prod] for prod in self.productions if len(prod) == 2])
factor = 1.0 / ( 8 * sigma * beta * nu)
pcfg1 = self.copy()
for prod in self.productions:
if len(prod) == 2:
pcfg1.parameters[prod] *= factor
pcfg1.set_log_parameters()
return pcfg1
def convert_parameters_pi2xi(self):
nte = self.nonterminal_expectations()
xi = {}
for prod in self.productions:
if len(prod) == 3:
a,b,c = prod
param = self.parameters[prod]
xib = param * nte[a]/ (nte[b] * nte[c])
xi[prod] = xib
else:
a,b = prod
param = self.parameters[prod]
xib = param * nte[a]
xi[prod] = xib
xipcfg = self.copy()
xipcfg.parameters = xi
xipcfg.set_log_parameters()
return xipcfg
def convert_parameters_xi2pi(self):
"""
Assume pcfg1 has parameters in xi format.
Convert these to pi
"""
pcfg1 = self.copy()
expectations = pcfg1.compute_partition_function_fast()
for prod in pcfg1.productions:
param = pcfg1.parameters[prod]
if len(prod) == 2:
nt,a = prod
newparam = param/expectations[nt]
else:
a,b,c = prod
newparam = param * expectations[b] * expectations[c] / expectations[a]
pcfg1.parameters[prod] = newparam
pcfg1.set_log_parameters()
return pcfg1
def estimate_pcfg_from_treebank(filename):
"""
Take ML estimate from a treebank and retuen the pcfg that results.
"""
counts = Counter()
ml = PCFG()
with open(filename) as inf:
for line in inf:
tokens = line.split()
i = 0
# skip some floats
while tokens[i].startswith('-') or tokens[i].startswith('0'):
i += 1
tree = string_to_tree(" ".join(tokens[i:]))
ml.start = tree[0]
count_productions(tree,counts)
for production in counts:
ml.productions.append(production)
ml.nonterminals.add(production[0])
if len(production) == 2:
ml.terminals.add(production[1])
ml.parameters[production] = float(counts[production])
ml.normalise()
return ml
def load_pcfg_from_file(filename, normalise=True, discard_zero=True):
nonterminals = set()
## Nonterminals are things that appear on the lhs of a production.
## Start symbol is S
## Um thats it ?
prods = []
with open(filename) as infile:
for line in infile:
if line.startswith("#"):
continue
tks = line.split()
if len(tks) == 0:
continue
assert len(tks) >= 4
assert tks[2] == RIGHT_ARROW
p = float(tks[0])
assert p >= 0
lhs = tks[1]
nonterminals.add(lhs)
if p == 0.0 and discard_zero:
continue
rhs = tuple(tks[3:])
prods.append( (p,lhs,rhs))
assert START_SYMBOL in nonterminals
terminals = set()
totals = { nt: 0.0 for nt in nonterminals }
for p,lhs,rhs in prods:
totals[lhs] += p
for s in rhs:
if not s in nonterminals:
terminals.add(s)
my_pcfg = PCFG()
for p,lhs,rhs in prods:
prod = (lhs,) + rhs
my_pcfg.productions.append(prod)
if normalise:
p /= totals[lhs]
my_pcfg.parameters[prod] = p
if discard_zero:
my_pcfg.log_parameters[prod] = math.log(p)
my_pcfg.start = START_SYMBOL
my_pcfg.terminals = terminals
my_pcfg.nonterminals = nonterminals
return my_pcfg
class Multinomial:
"""
this represents a collection of productions with the same left hand side.
"""
def __init__(self, pcfg, nonterminal, cache_size=SAMPLE_CACHE_SIZE,random=random):
self.cache_size = cache_size
self.productions = [ prod for prod in pcfg.productions if prod[0] == nonterminal ]
self.n = len(self.productions)
self.nonterminal = nonterminal
parameters = [ pcfg.parameters[prod] for prod in self.productions]
self.p = np.array(parameters)/np.sum(parameters)
self.rng = random
self._sample()
def _sample(self):
self.cache = self.rng.choice(range(self.n), self.cache_size,True,self.p)
self.cache_index = 0
def sample_production(self):
"""
return the rhs of a production as a tuple of length 1 or 2
"""
if self.cache_index >= self.cache_size:
self._sample()
result = self.productions[self.cache[self.cache_index]]
self.cache_index += 1
return result
class Sampler:
"""
This object is used to sample from a PCFG.
"""
def __init__(self, pcfg,cache_size=SAMPLE_CACHE_SIZE,max_depth = SAMPLE_MAX_DEPTH,random=None):
## construct indices for sampling
if random == None:
random = numpy.random.RandomState()
assert pcfg.is_normalised()
## For reproducibility we need to have a fixed order.
nts = list(pcfg.nonterminals)
nts.sort()
self.multinomials = { nt: Multinomial(pcfg,nt, cache_size,random) for nt in nts}
self.start = pcfg.start
self.max_depth = max_depth
self.insider = inside.InsideComputation(pcfg)
self.mypcfg = pcfg
def sample_production(self, lhs):
return self.multinomials[lhs].sample_production()
def sample_tree(self):
return self._sample_tree(self.start, 0)
def sample_string(self):
return collect_yield(self.sample_tree())
def _sample_tree(self, nonterminal, current_depth):
"""
Sample a single tree from the pcfg, and throw an exception of max_depth is exceeded.
"""
if current_depth >= self.max_depth:
raise ValueError("Too deep")
prod = self.sample_production(nonterminal)
if len(prod) == 2:
# lexical rule
return prod
else:
left_branch = self._sample_tree(prod[1],current_depth + 1)
right_branch = self._sample_tree(prod[2],current_depth + 1)
return (nonterminal, left_branch,right_branch)
def estimate_string_entropy(self,samples,max_length=50,verbose=False):
"""
Estimate the string entropy and perplexity.
"""
total_length = 0.0
total_samples = 0
total_lps = 0.0
total_lpt = 0.0
total_lpb = 0.0
for _ in range(samples):
tree = self.sample_tree()
s = collect_yield(tree)
if True:
total_samples += 1
lps = self.insider.inside_log_probability(s)
lpt = self.mypcfg.log_probability_derivation(tree)
lpb = self.insider._bracketed_log_probability(tree)[self.start]
total_length += len(s)
if verbose: print(s,lpt, lpb,lps)
total_lps += lps
total_lpt += lpt
total_lpb += lpb
sentential_entropy = -total_lps/total_samples
perplexity = math.exp(-total_lps/(total_length + total_samples))
print("SentenceEntropy %f WordPerplexity %f " % (sentential_entropy, perplexity))
| [
"math.exp",
"numpy.sum",
"numpy.eye",
"inside.InsideComputation",
"numpy.zeros",
"collections.defaultdict",
"numpy.array",
"collections.Counter",
"math.log"
] | [((19495, 19504), 'collections.Counter', 'Counter', ([], {}), '()\n', (19502, 19504), False, 'from collections import Counter\n'), ((4405, 4423), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (4416, 4423), False, 'from collections import defaultdict\n'), ((4775, 4793), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (4786, 4793), False, 'from collections import defaultdict\n'), ((6078, 6096), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (6089, 6096), False, 'from collections import defaultdict\n'), ((6719, 6737), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (6730, 6737), False, 'from collections import defaultdict\n'), ((7059, 7077), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (7070, 7077), False, 'from collections import defaultdict\n'), ((8173, 8191), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (8184, 8191), False, 'from collections import defaultdict\n'), ((8602, 8632), 'inside.InsideComputation', 'inside.InsideComputation', (['self'], {}), '(self)\n', (8626, 8632), False, 'import inside\n'), ((9719, 9749), 'inside.InsideComputation', 'inside.InsideComputation', (['self'], {}), '(self)\n', (9743, 9749), False, 'import inside\n'), ((10326, 10356), 'inside.InsideComputation', 'inside.InsideComputation', (['self'], {}), '(self)\n', (10350, 10356), False, 'import inside\n'), ((10873, 10890), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10884, 10890), False, 'from collections import defaultdict\n'), ((11595, 11613), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (11606, 11613), False, 'from collections import defaultdict\n'), ((11623, 11634), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (11631, 11634), True, 'import numpy as np\n'), ((11836, 11847), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (11844, 11847), True, 'import numpy as np\n'), ((12754, 12772), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (12765, 12772), False, 'from collections import defaultdict\n'), ((12880, 12898), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (12891, 12898), False, 'from collections import defaultdict\n'), ((13235, 13253), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (13246, 13253), False, 'from collections import defaultdict\n'), ((13782, 13798), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (13790, 13798), True, 'import numpy as np\n'), ((14726, 14742), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (14734, 14742), True, 'import numpy as np\n'), ((14759, 14775), 'numpy.zeros', 'np.zeros', (['[n, m]'], {}), '([n, m])\n', (14767, 14775), True, 'import numpy as np\n'), ((15494, 15512), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (15505, 15512), False, 'from collections import defaultdict\n'), ((16396, 16413), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (16407, 16413), False, 'from collections import defaultdict\n'), ((17044, 17061), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (17055, 17061), False, 'from collections import defaultdict\n'), ((22683, 22713), 'inside.InsideComputation', 'inside.InsideComputation', (['pcfg'], {}), '(pcfg)\n', (22707, 22713), False, 'import inside\n'), ((24176, 24229), 'math.exp', 'math.exp', (['(-total_lps / (total_length + total_samples))'], {}), '(-total_lps / (total_length + total_samples))\n', (24184, 24229), False, 'import math\n'), ((4339, 4370), 'math.log', 'math.log', (['self.parameters[prod]'], {}), '(self.parameters[prod])\n', (4347, 4370), False, 'import math\n'), ((4715, 4730), 'math.log', 'math.log', (['param'], {}), '(param)\n', (4723, 4730), False, 'import math\n'), ((21089, 21100), 'math.log', 'math.log', (['p'], {}), '(p)\n', (21097, 21100), False, 'import math\n'), ((21672, 21692), 'numpy.array', 'np.array', (['parameters'], {}), '(parameters)\n', (21680, 21692), True, 'import numpy as np\n'), ((21693, 21711), 'numpy.sum', 'np.sum', (['parameters'], {}), '(parameters)\n', (21699, 21711), True, 'import numpy as np\n'), ((6539, 6550), 'math.log', 'math.log', (['p'], {}), '(p)\n', (6547, 6550), False, 'import math\n'), ((6940, 6951), 'math.log', 'math.log', (['p'], {}), '(p)\n', (6948, 6951), False, 'import math\n'), ((7837, 7848), 'math.log', 'math.log', (['p'], {}), '(p)\n', (7845, 7848), False, 'import math\n'), ((10684, 10703), 'math.exp', 'math.exp', (['(lpd - lps)'], {}), '(lpd - lps)\n', (10692, 10703), False, 'import math\n'), ((12059, 12068), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (12065, 12068), True, 'import numpy as np\n'), ((6292, 6303), 'math.log', 'math.log', (['p'], {}), '(p)\n', (6300, 6303), False, 'import math\n'), ((7174, 7185), 'math.log', 'math.log', (['p'], {}), '(p)\n', (7182, 7185), False, 'import math\n'), ((8010, 8021), 'math.log', 'math.log', (['p'], {}), '(p)\n', (8018, 8021), False, 'import math\n'), ((8084, 8099), 'math.log', 'math.log', (['alpha'], {}), '(alpha)\n', (8092, 8099), False, 'import math\n'), ((9442, 9458), 'math.log', 'math.log', (['(e / le)'], {}), '(e / le)\n', (9450, 9458), False, 'import math\n'), ((14272, 14281), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (14278, 14281), True, 'import numpy as np\n'), ((15361, 15370), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (15367, 15370), True, 'import numpy as np\n')] |
import tensorflow as tf
tf_version = int((tf.__version__).split('.')[0])
if tf_version >= 2:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
#trying to fix the cuDNN issue
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
for device in gpu_devices:
tf.config.experimental.set_memory_growth(device, True)
##end of fixing
import numpy as np
import time
from cell import CellsConfig
import mrcnn.model as modellib
import skimage.io
import sys
import skimage
from PIL import Image
#from libtiff import TIFF
import time
def generate_inference_model(model_path, cropsize):
"""
Generates an inference model from the model_path. cropsize is how big of a patch to run inference on.
"""
import math
class InferenceConfig(CellsConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# comment below if running inference on small crops
TRAIN_ROIS_PER_IMAGE = 2000
POST_NMS_ROIS_INFERENCE = 10000
DETECTION_MAX_INSTANCES = 200
#TRAIN_ROIS_PER_IMAGE = 4000 * 4
#POST_NMS_ROIS_INFERENCE = 20000 * 4
#DETECTION_MAX_INSTANCES = 400 * 4
#DETECTION_NMS_THRESHOLD = 0.35
IMAGE_MIN_DIM = cropsize #math.ceil(mindim / 256) * 256
IMAGE_MAX_DIM = cropsize #math.ceil(maxdim / 256) * 256
inference_config = InferenceConfig()
#print("MRCNN take from:", modellib.__file__)
# Recreate the model in inference mode
DEVICE = '/device:GPU:0'
with tf.device(DEVICE):
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=model_path)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
#model_path = model.find_last()[1]
#model_path = '/data/kimjb/Mask_RCNN_original/logs/cells20180628T1527/mask_rcnn_cells_0100.h5'
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
#print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
return model
def run_inference(model, image):
"""
Runs inference on an image using model and returns the mask stack.
"""
# get results
start = time.time()
results = model.detect([image], verbose=1)
end = time.time()
#print("Inference took {}".format(end-start))
r = results[0]
masks = r['masks']
#print(masks.shape)
return masks
def mask_stack_to_single_image(masks, checkpoint_id):
"""
Merge a stack of masks containing multiple instances to one large image.
Args:
image: full fov np array
masks: stack of masks of shape [h,w,n]. Note that image.shape != masks.shape, because the shape of the masks is the size of the inference call. Since we are doing inference in patches, the masks are going to be of size of the patch.
Returns:
image that is the same shape as the original raw image, containing all of the masks from the mask stack
"""
image = np.zeros((masks.shape[0:2]),dtype=np.uint16) # image that contains all cells (0 bg, >0 is cell id)
# switch shape to [num_masks, h, w] from [h, w, num_masks]
masks = masks.astype(np.uint16)
#masks = np.moveaxis(masks, 0, -1)
masks = np.moveaxis(masks, -1, 0)
#image_shape = masks.shape
#image = np.zeros(image_shape[1:])
#print(image.shape)
num_masks = masks.shape[0] # shape = [n, h, w]
#print("Sum = %", np.sum(masks))
for i in range(num_masks):
current_mask = masks[i]
#print(np.max(current_mask), np.sum(current_mask))
image[current_mask > 0] = checkpoint_id
#image = add_mask_to_ids(image, current_mask, checkpoint_id)
checkpoint_id += 1
#print("Sum=", np.sum(image > 0))
return image, checkpoint_id
def add_mask_to_ids(image, mask, fill_int):
"""
Same as mask_stack_to_single_image but is just a helper function. Merges one mask from the stack into image. Gives unique id to each mask
"""
for (row,col), value in np.ndenumerate(mask):
if value != 0 and image[row,col] == 0:
image[row, col] = fill_int
return image
def pad(arrays, reference, offsets):
"""
array: Array to be padded
reference: Reference array with the desired shape
offsets: list of offsets (number of elements must be equal to the dimension of the array)
"""
# Create an array of zeros with the reference shape
result = np.zeros((reference[0],reference[1]), dtype=np.uint16)
#print('result:')
#print(result.shape)
# Create a list of slices from offset to offset + shape in each dimension
insertHere = [slice(offsets[dim], offsets[dim] + arrays.shape[dim]) for dim in range(arrays.ndim)]
#print(insertHere)
#print(arrays.shape)
# Insert the array in the result at the specified offsets
result[insertHere] = arrays
return result
def stitched_inference(image, cropsize, model, padding=40):#, minsize=100):
"""
array: Array to be padded
reference: Reference array with the desired shape
offsets: list of offsets (number of elements must be equal to the dimension of the array)
"""
stack = np.zeros((image.shape[0], image.shape[1],1),dtype=np.uint16) # make new image of zeros (exclude third dimension, not using rgb)
visited = np.zeros(image.shape[0:2])
num_times_visited = np.zeros(image.shape[0:2])
num_row = image.shape[0] # num rows in the image
num_col = image.shape[1]
#print(image.shape)
assert cropsize < num_row and cropsize < num_col, 'cropsize must be smaller than the image dimensions'
#rowlist = np.concatenate(([0],np.arange(cropsize-padding, num_row, cropsize)))
#collist = np.concatenate(([0],np.arange(cropsize-padding, num_col, cropsize)))
checkpoint_id = 1
for row in np.arange(0, num_row, cropsize-padding): # row defines the rightbound side of box
for col in np.arange(0, num_col, cropsize-padding): # col defines lowerbound of box
masks_with_ids = np.zeros(image.shape[0:2])
upperbound = row
lowerbound = row + cropsize
leftbound = col
rightbound = col + cropsize
if lowerbound > num_row:
lowerbound = num_row
upperbound = num_row-cropsize
if rightbound > num_col:
rightbound = num_col
leftbound = num_col-cropsize
#upperbound = bound(final_image, cropsize, padding, minsize, row, 'upper')
#lowerbound = bound(final_image, cropsize, padding, minsize, row, 'lower')
#rightbound = bound(final_image, cropsize, padding, minsize, col, 'right')
#leftbound = bound(final_image, cropsize, padding, minsize, col, 'left')
#print(row)
#print(col)
#print('bounds:')
#print('upper: {}'.format(upperbound))
#print('lower: {}'.format(lowerbound))
#print('left : {}'.format(leftbound))
#print('right: {}'.format(rightbound))
num_times_visited[upperbound:lowerbound, leftbound:rightbound] += 1
cropped_image = image[upperbound:lowerbound, leftbound:rightbound, :]
#print('cropped image shape: {}'.format(cropped_image.shape))
#print(cropped_image.shape)
masks = run_inference(model, cropped_image)
#padded_masks = pad(masks, [num_row, num_col, masks.shape[2]], [upperbound, leftbound])
#padded_masks = pad(masks, [num_row, num_col, masks.shape[2]], [upperbound,leftbound,0])
#print('mask shape:')
#print (padded_masks.shape)
one_inference_mask_image, checkpoint_id = mask_stack_to_single_image(masks, checkpoint_id) # works
padded_inference_mask = pad(one_inference_mask_image, [num_row, num_col], [upperbound,leftbound])
padded_inference_mask = np.expand_dims(padded_inference_mask, axis=2)
stack = np.concatenate((stack, padded_inference_mask), axis=2)
return stack, num_times_visited
class CleanMask():
def __init__(self, stack, threshold, num_times_visited):
## input
self.stack = stack #stack of masks
self.num_times_visited = num_times_visited #how many times inference ran on each pixel
self.num_row = self.stack.shape[0]
self.num_col = self.stack.shape[1]
self.masks = np.zeros((self.num_row, self.num_col), dtype=np.uint16)
#self.visitedPoints = np.zeros((self.num_row, self.num_col))
self.id = 1
self.dict = {}
self.unique = {}
self.cells = {}
self.threshold = threshold
def getMasks(self):
return self.masks
def visited(self, row, col):
if self.visitedPoints[row,col] == 1:
return True
def get_ids(self, array):
ids = set()
for value in array:
if value > 0:
ids.add(value)
frozen_set_ids = frozenset(ids)
return frozen_set_ids
def at_least_one(self, values, ids):
for value in values:
if value in ids:
return True
return False
def build_connectivity_matrix(self):
"""
Build the connectivity matrix between cells ids
"""
start = time.time()
n_labels = np.max(self.stack)
self.conn_mat = np.zeros((n_labels + 1, n_labels + 1))
cells = self.stack > 0
#Find where a pixel has more than one inference
n_inference = np.sum(cells,2)
to_connect = (n_inference > 1)
indexes = np.nonzero(to_connect)
to_process = stack[indexes[0], indexes[1], :]
for i in range(0, to_process.shape[0]):
#by profiling, working with list was faster than numpy arrays
ids = np.unique(to_process[i,:]).tolist()
if 0 in ids:
ids.remove(0)
ids_size = len(ids)
for i in range(ids_size - 1):
for j in range(i+1,ids_size):
self.conn_mat[ids[i], ids[j] ] += 1
#The connectivity matrix is symetrical
conn_t = self.conn_mat.transpose()
self.conn_mat += conn_t
end = time.time()
#print("connectivity time = {}".format(end-start))
def merge_cells(self):
"""
Find connected cells between multiple inferences and merge strongly connected
cells that have overlap more than a threshold.
"""
self.build_connectivity_matrix()
#Filter out week connections
self.conn_matrix[self.conn_matrix < self.threshold] = 0
#Get connected components
np.fill_diagonal(self.conn_matrix, 1)
from scipy.sparse.csgraph import csgraph_from_dense, connected_components
n_conn_comp, graph_labels = connected_components(conn_matrix, False)
#print(n_conn_comp)
#print(graph_labels)
def cleanup(self):
for row in range(self.num_row):
for col in range(self.num_col):
if self.num_times_visited[row,col] > 1:
set_of_ids = self.get_ids(self.stack[row,col,:])
if len(set_of_ids) > 1:
if set_of_ids not in self.dict.keys():
self.dict[set_of_ids] = 0
self.dict[set_of_ids] += 1
oneinf = {}
for row in range(self.num_row):
for col in range(self.num_col):
if self.num_times_visited[row,col] == 1:
set_of_ids = self.get_ids(self.stack[row,col,:])
if len(set_of_ids) > 0:
if set_of_ids not in oneinf.keys():
oneinf[set_of_ids] = 0
oneinf[set_of_ids] += 1
for set_of_ids in oneinf:
if oneinf[set_of_ids] >= self.threshold:
self.unique[set_of_ids] = self.id
self.id += 1
#Merge similar ids
for set_of_ids in self.dict:
if len(set_of_ids) > 1 and self.dict[set_of_ids] >= self.threshold:
self.unique[set_of_ids] = self.id
self.id += 1
for cell_id in set_of_ids:
temp = frozenset([cell_id])
if temp in oneinf.keys():
if oneinf[temp] >= self.threshold:
self.unique[temp] = self.unique[set_of_ids]
#Change to unique ids
for row in range(self.num_row):
for col in range(self.num_col):
set_of_ids = self.get_ids(self.stack[row,col,:])
if len(set_of_ids) > 0:
if set_of_ids in self.unique.keys():
self.masks[row,col] = self.unique[set_of_ids]
def save(self, save_path):
np.save('inference-stack', self.stack)
return
tiff = TIFF.open(save_path, mode='w')
tiff.write_image(self.getMasks())
tiff.close()
def map_uint16_to_uint8(img, lower_bound=None, upper_bound=None):
'''
Map a 16-bit image trough a lookup table to convert it to 8-bit.
Parameters
----------
img: numpy.ndarray[np.uint16]
image that should be mapped
lower_bound: int, optional
lower bound of the range that should be mapped to ``[0, 255]``,
value must be in the range ``[0, 65535]`` and smaller than `upper_bound`
(defaults to ``numpy.min(img)``)
upper_bound: int, optional
upper bound of the range that should be mapped to ``[0, 255]``,
value must be in the range ``[0, 65535]`` and larger than `lower_bound`
(defaults to ``numpy.max(img)``)
Returns
-------
numpy.ndarray[uint8]
'''
if lower_bound is None:
lower_bound = np.min(img)
if not(0 <= lower_bound < 2**16):
raise ValueError(
'"lower_bound" must be in the range [0, 65535]')
if upper_bound is None:
upper_bound = np.max(img)
if not(0 <= upper_bound < 2**16):
raise ValueError(
'"upper_bound" must be in the range [0, 65535]')
#if lower_bound >= upper_bound:
# raise ValueError(
# '"lower_bound" must be smaller than "upper_bound"')
lut = np.concatenate([
np.zeros(lower_bound, dtype=np.uint16),
np.linspace(0, 255, upper_bound - lower_bound).astype(np.uint16),
np.ones(2**16 - upper_bound, dtype=np.uint16) * 255
])
return lut[img].astype(np.uint8)
def preprocess(img):
image = Image.open(img)
imarray = np.array(image)
image = skimage.color.gray2rgb(imarray)
image = map_uint16_to_uint8(image)
return image
| [
"numpy.moveaxis",
"numpy.sum",
"numpy.ones",
"tensorflow.compat.v1.__version__.split",
"numpy.arange",
"scipy.sparse.csgraph.connected_components",
"mrcnn.model.MaskRCNN",
"numpy.unique",
"tensorflow.compat.v1.config.experimental.set_memory_growth",
"numpy.max",
"numpy.linspace",
"tensorflow.c... | [((202, 228), 'tensorflow.compat.v1.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (226, 228), True, 'import tensorflow.compat.v1 as tf\n'), ((275, 310), 'tensorflow.compat.v1.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (295, 310), True, 'import tensorflow.compat.v1 as tf\n'), ((326, 377), 'tensorflow.compat.v1.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (370, 377), True, 'import tensorflow.compat.v1 as tf\n'), ((135, 159), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (157, 159), True, 'import tensorflow.compat.v1 as tf\n'), ((413, 467), 'tensorflow.compat.v1.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['device', '(True)'], {}), '(device, True)\n', (453, 467), True, 'import tensorflow.compat.v1 as tf\n'), ((2490, 2501), 'time.time', 'time.time', ([], {}), '()\n', (2499, 2501), False, 'import time\n'), ((2559, 2570), 'time.time', 'time.time', ([], {}), '()\n', (2568, 2570), False, 'import time\n'), ((3276, 3319), 'numpy.zeros', 'np.zeros', (['masks.shape[0:2]'], {'dtype': 'np.uint16'}), '(masks.shape[0:2], dtype=np.uint16)\n', (3284, 3319), True, 'import numpy as np\n'), ((3531, 3556), 'numpy.moveaxis', 'np.moveaxis', (['masks', '(-1)', '(0)'], {}), '(masks, -1, 0)\n', (3542, 3556), True, 'import numpy as np\n'), ((4326, 4346), 'numpy.ndenumerate', 'np.ndenumerate', (['mask'], {}), '(mask)\n', (4340, 4346), True, 'import numpy as np\n'), ((4761, 4816), 'numpy.zeros', 'np.zeros', (['(reference[0], reference[1])'], {'dtype': 'np.uint16'}), '((reference[0], reference[1]), dtype=np.uint16)\n', (4769, 4816), True, 'import numpy as np\n'), ((5488, 5550), 'numpy.zeros', 'np.zeros', (['(image.shape[0], image.shape[1], 1)'], {'dtype': 'np.uint16'}), '((image.shape[0], image.shape[1], 1), dtype=np.uint16)\n', (5496, 5550), True, 'import numpy as np\n'), ((5630, 5656), 'numpy.zeros', 'np.zeros', (['image.shape[0:2]'], {}), '(image.shape[0:2])\n', (5638, 5656), True, 'import numpy as np\n'), ((5681, 5707), 'numpy.zeros', 'np.zeros', (['image.shape[0:2]'], {}), '(image.shape[0:2])\n', (5689, 5707), True, 'import numpy as np\n'), ((6145, 6186), 'numpy.arange', 'np.arange', (['(0)', 'num_row', '(cropsize - padding)'], {}), '(0, num_row, cropsize - padding)\n', (6154, 6186), True, 'import numpy as np\n'), ((15220, 15235), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (15230, 15235), False, 'from PIL import Image\n'), ((15250, 15265), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (15258, 15265), True, 'import numpy as np\n'), ((15278, 15309), 'skimage.color.gray2rgb', 'skimage.color.gray2rgb', (['imarray'], {}), '(imarray)\n', (15300, 15309), False, 'import skimage\n'), ((41, 66), 'tensorflow.compat.v1.__version__.split', 'tf.__version__.split', (['"""."""'], {}), "('.')\n", (61, 66), True, 'import tensorflow.compat.v1 as tf\n'), ((1610, 1627), 'tensorflow.compat.v1.device', 'tf.device', (['DEVICE'], {}), '(DEVICE)\n', (1619, 1627), True, 'import tensorflow.compat.v1 as tf\n'), ((1645, 1732), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'inference_config', 'model_dir': 'model_path'}), "(mode='inference', config=inference_config, model_dir=\n model_path)\n", (1662, 1732), True, 'import mrcnn.model as modellib\n'), ((6246, 6287), 'numpy.arange', 'np.arange', (['(0)', 'num_col', '(cropsize - padding)'], {}), '(0, num_col, cropsize - padding)\n', (6255, 6287), True, 'import numpy as np\n'), ((8859, 8914), 'numpy.zeros', 'np.zeros', (['(self.num_row, self.num_col)'], {'dtype': 'np.uint16'}), '((self.num_row, self.num_col), dtype=np.uint16)\n', (8867, 8914), True, 'import numpy as np\n'), ((9781, 9792), 'time.time', 'time.time', ([], {}), '()\n', (9790, 9792), False, 'import time\n'), ((9812, 9830), 'numpy.max', 'np.max', (['self.stack'], {}), '(self.stack)\n', (9818, 9830), True, 'import numpy as np\n'), ((9856, 9894), 'numpy.zeros', 'np.zeros', (['(n_labels + 1, n_labels + 1)'], {}), '((n_labels + 1, n_labels + 1))\n', (9864, 9894), True, 'import numpy as np\n'), ((10010, 10026), 'numpy.sum', 'np.sum', (['cells', '(2)'], {}), '(cells, 2)\n', (10016, 10026), True, 'import numpy as np\n'), ((10083, 10105), 'numpy.nonzero', 'np.nonzero', (['to_connect'], {}), '(to_connect)\n', (10093, 10105), True, 'import numpy as np\n'), ((10724, 10735), 'time.time', 'time.time', ([], {}), '()\n', (10733, 10735), False, 'import time\n'), ((11193, 11230), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.conn_matrix', '(1)'], {}), '(self.conn_matrix, 1)\n', (11209, 11230), True, 'import numpy as np\n'), ((11351, 11391), 'scipy.sparse.csgraph.connected_components', 'connected_components', (['conn_matrix', '(False)'], {}), '(conn_matrix, False)\n', (11371, 11391), False, 'from scipy.sparse.csgraph import csgraph_from_dense, connected_components\n'), ((13509, 13547), 'numpy.save', 'np.save', (['"""inference-stack"""', 'self.stack'], {}), "('inference-stack', self.stack)\n", (13516, 13547), True, 'import numpy as np\n'), ((14476, 14487), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (14482, 14487), True, 'import numpy as np\n'), ((14663, 14674), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (14669, 14674), True, 'import numpy as np\n'), ((6348, 6374), 'numpy.zeros', 'np.zeros', (['image.shape[0:2]'], {}), '(image.shape[0:2])\n', (6356, 6374), True, 'import numpy as np\n'), ((8323, 8368), 'numpy.expand_dims', 'np.expand_dims', (['padded_inference_mask'], {'axis': '(2)'}), '(padded_inference_mask, axis=2)\n', (8337, 8368), True, 'import numpy as np\n'), ((8389, 8443), 'numpy.concatenate', 'np.concatenate', (['(stack, padded_inference_mask)'], {'axis': '(2)'}), '((stack, padded_inference_mask), axis=2)\n', (8403, 8443), True, 'import numpy as np\n'), ((14967, 15005), 'numpy.zeros', 'np.zeros', (['lower_bound'], {'dtype': 'np.uint16'}), '(lower_bound, dtype=np.uint16)\n', (14975, 15005), True, 'import numpy as np\n'), ((15089, 15136), 'numpy.ones', 'np.ones', (['(2 ** 16 - upper_bound)'], {'dtype': 'np.uint16'}), '(2 ** 16 - upper_bound, dtype=np.uint16)\n', (15096, 15136), True, 'import numpy as np\n'), ((10313, 10340), 'numpy.unique', 'np.unique', (['to_process[i, :]'], {}), '(to_process[i, :])\n', (10322, 10340), True, 'import numpy as np\n'), ((15015, 15061), 'numpy.linspace', 'np.linspace', (['(0)', '(255)', '(upper_bound - lower_bound)'], {}), '(0, 255, upper_bound - lower_bound)\n', (15026, 15061), True, 'import numpy as np\n')] |
""" Unit tests for visibility operations
"""
import sys
import unittest
import logging
import numpy
from data_models.parameters import arl_path
from data_models.polarisation import PolarisationFrame
from processing_components.visibility.base import create_blockvisibility_from_uvfits, create_visibility_from_uvfits
from processing_components.visibility.operations import integrate_visibility_by_channel
from processing_components.imaging.base import invert_2d, create_image_from_visibility
from processing_components.visibility.coalesce import convert_visibility_to_blockvisibility, \
convert_blockvisibility_to_visibility
from processing_components.image.operations import export_image_to_fits
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
log.addHandler(logging.StreamHandler(sys.stderr))
class TestCreateMS(unittest.TestCase):
def setUp(self):
self.dir = arl_path('test_results')
self.persist = False
return
# def test_create_list(self):
# uvfitsfile = arl_path("data/vis/xcasa.fits")
# self.vis = create_blockvisibility_from_uvfits(uvfitsfile)
# for v in self.vis:
# assert v.vis.data.shape[-1] == 4
# assert v.polarisation_frame.type == "circular"
def test_create_list_spectral(self):
uvfitsfile = arl_path("data/vis/ASKAP_example.fits")
vis_by_channel = list()
nchan_ave = 16
nchan = 192
for schan in range(0, nchan, nchan_ave):
max_chan = min(nchan, schan + nchan_ave)
v = create_visibility_from_uvfits(uvfitsfile, range(schan, max_chan))
vis_by_channel.append(v[0])
assert len(vis_by_channel) == 12
for v in vis_by_channel:
assert v.vis.data.shape[-1] == 4
assert v.polarisation_frame.type == "linear"
def test_create_list_spectral_average(self):
uvfitsfile = arl_path("data/vis/ASKAP_example.fits")
vis_by_channel = list()
nchan_ave = 16
nchan = 192
for schan in range(0, nchan, nchan_ave):
max_chan = min(nchan, schan + nchan_ave)
v = create_blockvisibility_from_uvfits(uvfitsfile, range(schan, max_chan))
vis_by_channel.append(integrate_visibility_by_channel(v[0]))
assert len(vis_by_channel) == 12
for v in vis_by_channel:
assert v.vis.data.shape[-1] == 4
assert v.vis.data.shape[-2] == 1
assert v.polarisation_frame.type == "linear"
def test_invert(self):
uvfitsfile = arl_path("data/vis/ASKAP_example.fits")
nchan_ave = 32
nchan = 192
for schan in range(0, nchan, nchan_ave):
max_chan = min(nchan, schan + nchan_ave)
bv = create_blockvisibility_from_uvfits(uvfitsfile, range(schan, max_chan))[0]
vis = convert_blockvisibility_to_visibility(bv)
from processing_components.visibility.operations import convert_visibility_to_stokesI
vis = convert_visibility_to_stokesI(vis)
model = create_image_from_visibility(vis, npixel=256, polarisation_frame=PolarisationFrame('stokesI'))
dirty, sumwt = invert_2d(vis, model, context='2d')
assert (numpy.max(numpy.abs(dirty.data))) > 0.0
assert dirty.shape == (nchan_ave, 1, 256, 256)
import matplotlib.pyplot as plt
from processing_components.image.operations import show_image
show_image(dirty)
plt.show()
if self.persist: export_image_to_fits(dirty, '%s/test_visibility_uvfits_dirty.fits' % self.dir)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"processing_components.image.operations.export_image_to_fits",
"matplotlib.pyplot.show",
"numpy.abs",
"data_models.polarisation.PolarisationFrame",
"data_models.parameters.arl_path",
"logging.StreamHandler",
"processing_components.visibility.coalesce.convert_blockvisibility_to_visibil... | [((723, 750), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (740, 750), False, 'import logging\n'), ((795, 828), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (816, 828), False, 'import logging\n'), ((845, 878), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stderr'], {}), '(sys.stderr)\n', (866, 878), False, 'import logging\n'), ((3813, 3828), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3826, 3828), False, 'import unittest\n'), ((966, 990), 'data_models.parameters.arl_path', 'arl_path', (['"""test_results"""'], {}), "('test_results')\n", (974, 990), False, 'from data_models.parameters import arl_path\n'), ((1429, 1468), 'data_models.parameters.arl_path', 'arl_path', (['"""data/vis/ASKAP_example.fits"""'], {}), "('data/vis/ASKAP_example.fits')\n", (1437, 1468), False, 'from data_models.parameters import arl_path\n'), ((2042, 2081), 'data_models.parameters.arl_path', 'arl_path', (['"""data/vis/ASKAP_example.fits"""'], {}), "('data/vis/ASKAP_example.fits')\n", (2050, 2081), False, 'from data_models.parameters import arl_path\n'), ((2708, 2747), 'data_models.parameters.arl_path', 'arl_path', (['"""data/vis/ASKAP_example.fits"""'], {}), "('data/vis/ASKAP_example.fits')\n", (2716, 2747), False, 'from data_models.parameters import arl_path\n'), ((3011, 3052), 'processing_components.visibility.coalesce.convert_blockvisibility_to_visibility', 'convert_blockvisibility_to_visibility', (['bv'], {}), '(bv)\n', (3048, 3052), False, 'from processing_components.visibility.coalesce import convert_visibility_to_blockvisibility, convert_blockvisibility_to_visibility\n'), ((3169, 3203), 'processing_components.visibility.operations.convert_visibility_to_stokesI', 'convert_visibility_to_stokesI', (['vis'], {}), '(vis)\n', (3198, 3203), False, 'from processing_components.visibility.operations import convert_visibility_to_stokesI\n'), ((3346, 3381), 'processing_components.imaging.base.invert_2d', 'invert_2d', (['vis', 'model'], {'context': '"""2d"""'}), "(vis, model, context='2d')\n", (3355, 3381), False, 'from processing_components.imaging.base import invert_2d, create_image_from_visibility\n'), ((3631, 3648), 'processing_components.image.operations.show_image', 'show_image', (['dirty'], {}), '(dirty)\n', (3641, 3648), False, 'from processing_components.image.operations import show_image\n'), ((3661, 3671), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3669, 3671), True, 'import matplotlib.pyplot as plt\n'), ((2389, 2426), 'processing_components.visibility.operations.integrate_visibility_by_channel', 'integrate_visibility_by_channel', (['v[0]'], {}), '(v[0])\n', (2420, 2426), False, 'from processing_components.visibility.operations import integrate_visibility_by_channel\n'), ((3701, 3779), 'processing_components.image.operations.export_image_to_fits', 'export_image_to_fits', (['dirty', "('%s/test_visibility_uvfits_dirty.fits' % self.dir)"], {}), "(dirty, '%s/test_visibility_uvfits_dirty.fits' % self.dir)\n", (3721, 3779), False, 'from processing_components.image.operations import export_image_to_fits\n'), ((3289, 3317), 'data_models.polarisation.PolarisationFrame', 'PolarisationFrame', (['"""stokesI"""'], {}), "('stokesI')\n", (3306, 3317), False, 'from data_models.polarisation import PolarisationFrame\n'), ((3412, 3433), 'numpy.abs', 'numpy.abs', (['dirty.data'], {}), '(dirty.data)\n', (3421, 3433), False, 'import numpy\n')] |
from comet_ml import Experiment
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.optim as optim
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = DataLoader(trainset, batch_size=500,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = DataLoader(testset, batch_size=500,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
# imshow(torchvision.utils.make_grid(images))
# print labels
# print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
print("Dataset size = ", len(trainloader))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
nin = 3
kernels_per_layer = 6
self.conv1 = nn.Conv2d(3, 6, 5)
self.depthwise = nn.Conv2d(nin, nin * kernels_per_layer, kernel_size=5, groups=nin)
self.oneXoneDepthSeparable = nn.Conv2d(18, 21, kernel_size=1)
self.pool = nn.MaxPool2d(2, 2)
self.test1x1 = nn.Conv2d(3, 1, kernel_size=1)
nin2 = int((kernels_per_layer*(kernels_per_layer+1))/2)
kernels_per_layer2 = 16
self.conv2 = nn.Conv2d(6, 16, 5)
self.depthwise2 = nn.Conv2d(nin2, nin2 * kernels_per_layer2, kernel_size=5, groups=nin2)
self.oneXoneDepthSeparable2 = nn.Conv2d(336, 136, kernel_size=1)
self.fc1 = nn.Linear(136*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def permuted_oneXone_conv(self, x, nin):
num_filters = x.shape[1]/nin
print("Num filters", int(num_filters))
x_in = torch.zeros((x.shape[0], nin, x.shape[2], x.shape[3]))
# print("X in", x_in.shape)
# permuting first channel
x_out = torch.zeros(x.shape[0], int((num_filters * (num_filters+1))/2), x.shape[2], x.shape[3])
x_out = x_out.to(device)
count = 0
#(x) 0,1,...59 --> 0,1,2 3,1,2 6,1,2 .... (3,4,5) --> 3,4,5 6,4,5 9,4,5...
for i in range(int(num_filters)):
# print("*********" + str(i) + "***********")
for j in range(i, int(num_filters)):
x_in[:, 0, :, :] = x[:, j*nin, :, :]
x_in[:, 1:, :, :] = x[:, (i*nin)+1:(i+1)*nin, :, :]
x_out_channel = nn.Conv2d(nin, 1, kernel_size=1)(x_in)
x_out[:, count, :, :] = x_out_channel
# print(count)
count = count + 1
return x_out
# def permuted_oneXone_conv(self, x, nin):
# num_filters = x.shape[1] / nin
# print("Num filters", int(num_filters))
# x_in = torch.zeros((x.shape[0], nin, x.shape[2], x.shape[3]))
# # print("X in", x_in.shape)
#
# # permuting first channel
# x_out = torch.zeros(x.shape[0], int((num_filters * (num_filters + 1)) / 2), x.shape[2], x.shape[3])
# x_out = x_out.to(device)
# count = 0
# for i in range(int(num_filters)):
# # print("*********" + str(i) + "***********")
# for j in range(i, int(num_filters)):
# x_in[:, 0, :, :] = x[:, j * nin, :, :]
# x_in[:, 1:, :, :] = x[:, (i * nin) + 1:(i + 1) * nin, :, :]
# x_out_channel = nn.Conv2d(nin, 1, kernel_size=1)(x_in)
# x_out[:, count, :, :] = x_out_channel
# # print(count)
# count = count + 1
# return x_out
# def permuted_oneXone_conv(self, x, nin):
# num_filters = x.shape[1]/nin
# print("Num filters", int(num_filters))
# x_in = torch.zeros((x.shape[0], nin, x.shape[2], x.shape[3]))
# print("X in", x_in.shape)
#
# # permuting first channel
# x_out = torch.zeros(x.shape[0], int((num_filters * (num_filters+1))/2), x.shape[2], x.shape[3])
# x_out_list = []
#
# x_in = x_in.to(device)
# x_out_tens = torch.zeros(1, 1, 28, 28)
# # x_out_tens = x_out_tens.to(device)
#
# # x_out = x_out.to(device)
# count = 0
#
# for i in range(int(num_filters)):
# # print("*********" + str(i) + "***********")
# for j in range(i, int(num_filters)):
# x_in[:, 0, :, :] = x[:, j*nin, :, :]
# x_in[:, 1:, :, :] = x[:, (i*nin)+1:(i+1)*nin, :, :]
# x_in = x_in.detach().cpu()
# temp = nn.Conv2d(3, 1, kernel_size=1)(x_in)
#
# print("Temp", temp.shape)
# x_out_tens = torch.cat((x_out_tens, temp), 1)
# print("X out tens", x_out_tens.shape)
# # x_out_list.append(temp)
# # x_out[:, count, :, :] = nn.Conv2d(3, 1, kernel_size=1)(x_in)
# # print(count)
# count = count + 1
# # torch.cat(x_out_list, out=x_out)
# x_out_tens = x_out_tens[:, 1:, :, :]
# x_out_tens.to(device)
# return x_out_tens
# n = num_filters, n_in = no of input channels, n_out = no of output channels
# unique combinations across filters => n_out = n^2 + n_in * (n-1)^2
# (n-1)^3 < n^2 + n_in * (n-1)^2 < n^3
# 5 filters, n_in = 3
# perm 1st = --> (123, 423, 723, 10-2-3, 13-2-3), (156, 456, 756,..), .... --> 25
# perm 2nd = --> (153, 183, 1-11-3, )
def forward(self, x):
# x = self.conv1(x)
nin = x.shape[1]
# print("Before 1", x.shape)
depthwise_x = self.depthwise(x)
# print("Depthwise 1", depthwise_x.shape)
# x = self.permuted_oneXone_conv(depthwise_x, nin)
# x = x.to(device)
x = self.oneXoneDepthSeparable(depthwise_x)
# print("After 1", x.shape)
# x = self.oneXone(x)
# print("oneXone", x.shape)
x = self.pool(F.relu(x))
# x = self.conv2(x)
nin = x.shape[1]
# print("Before 2", x.shape)
depthwise_x = self.depthwise2(x)
# print("Depthwise 2", depthwise_x.shape)
# x = self.permuted_oneXone_conv(depthwise_x, nin)
x = self.oneXoneDepthSeparable2(depthwise_x)
# print("After 2", x.shape)
x = self.pool(F.relu(x))
x = x.view(-1, 136*5*5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Assuming that we are on a CUDA machine, this should print a CUDA device:
print(device)
net = Net()
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for epoch in range(500): # loop over the dataset multiple times
running_loss = 0.0
print("Epoch", epoch)
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
torch.autograd.set_detect_anomaly(True)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 0: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch, i, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
PATH = './cifar_net.pth'
torch.save(net.state_dict(), PATH)
dataiter = iter(testloader)
images, labels = dataiter.next()
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
net = Net()
net.load_state_dict(torch.load(PATH))
outputs = net(images)
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(4)))
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i])) | [
"matplotlib.pyplot.show",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.CrossEntropyLoss",
"numpy.transpose",
"torch.zeros",
"torchvision.datasets.CIFAR10",
"torchvision.utils.make_grid",
"torch.max",
"torch.cuda.is_available",
"torch.autograd.set_detect_anomaly",
... | [((417, 512), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=True, download=True,\n transform=transform)\n", (445, 512), False, 'import torchvision\n'), ((563, 628), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': '(500)', 'shuffle': '(True)', 'num_workers': '(2)'}), '(trainset, batch_size=500, shuffle=True, num_workers=2)\n', (573, 628), False, 'from torch.utils.data import DataLoader\n'), ((682, 778), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=False, download=True,\n transform=transform)\n", (710, 778), False, 'import torchvision\n'), ((827, 892), 'torch.utils.data.DataLoader', 'DataLoader', (['testset'], {'batch_size': '(500)', 'shuffle': '(False)', 'num_workers': '(2)'}), '(testset, batch_size=500, shuffle=False, num_workers=2)\n', (837, 892), False, 'from torch.utils.data import DataLoader\n'), ((7358, 7379), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7377, 7379), True, 'import torch.nn as nn\n'), ((8639, 8660), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (8648, 8660), False, 'import torch\n'), ((1172, 1182), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1180, 1182), True, 'import matplotlib.pyplot as plt\n'), ((8434, 8469), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['images'], {}), '(images)\n', (8461, 8469), False, 'import torchvision\n'), ((8583, 8599), 'torch.load', 'torch.load', (['PATH'], {}), '(PATH)\n', (8593, 8599), False, 'import torch\n'), ((8800, 8815), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8813, 8815), False, 'import torch\n'), ((9230, 9245), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9243, 9245), False, 'import torch\n'), ((320, 341), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (339, 341), True, 'import torchvision.transforms as transforms\n'), ((348, 402), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (368, 402), True, 'import torchvision.transforms as transforms\n'), ((1136, 1166), 'numpy.transpose', 'np.transpose', (['npimg', '(1, 2, 0)'], {}), '(npimg, (1, 2, 0))\n', (1148, 1166), True, 'import numpy as np\n'), ((1615, 1633), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(6)', '(5)'], {}), '(3, 6, 5)\n', (1624, 1633), True, 'import torch.nn as nn\n'), ((1660, 1726), 'torch.nn.Conv2d', 'nn.Conv2d', (['nin', '(nin * kernels_per_layer)'], {'kernel_size': '(5)', 'groups': 'nin'}), '(nin, nin * kernels_per_layer, kernel_size=5, groups=nin)\n', (1669, 1726), True, 'import torch.nn as nn\n'), ((1764, 1796), 'torch.nn.Conv2d', 'nn.Conv2d', (['(18)', '(21)'], {'kernel_size': '(1)'}), '(18, 21, kernel_size=1)\n', (1773, 1796), True, 'import torch.nn as nn\n'), ((1818, 1836), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1830, 1836), True, 'import torch.nn as nn\n'), ((1863, 1893), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)'], {'kernel_size': '(1)'}), '(3, 1, kernel_size=1)\n', (1872, 1893), True, 'import torch.nn as nn\n'), ((2014, 2033), 'torch.nn.Conv2d', 'nn.Conv2d', (['(6)', '(16)', '(5)'], {}), '(6, 16, 5)\n', (2023, 2033), True, 'import torch.nn as nn\n'), ((2060, 2130), 'torch.nn.Conv2d', 'nn.Conv2d', (['nin2', '(nin2 * kernels_per_layer2)'], {'kernel_size': '(5)', 'groups': 'nin2'}), '(nin2, nin2 * kernels_per_layer2, kernel_size=5, groups=nin2)\n', (2069, 2130), True, 'import torch.nn as nn\n'), ((2169, 2203), 'torch.nn.Conv2d', 'nn.Conv2d', (['(336)', '(136)'], {'kernel_size': '(1)'}), '(336, 136, kernel_size=1)\n', (2178, 2203), True, 'import torch.nn as nn\n'), ((2224, 2251), 'torch.nn.Linear', 'nn.Linear', (['(136 * 5 * 5)', '(120)'], {}), '(136 * 5 * 5, 120)\n', (2233, 2251), True, 'import torch.nn as nn\n'), ((2267, 2285), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(84)'], {}), '(120, 84)\n', (2276, 2285), True, 'import torch.nn as nn\n'), ((2305, 2322), 'torch.nn.Linear', 'nn.Linear', (['(84)', '(10)'], {}), '(84, 10)\n', (2314, 2322), True, 'import torch.nn as nn\n'), ((2469, 2523), 'torch.zeros', 'torch.zeros', (['(x.shape[0], nin, x.shape[2], x.shape[3])'], {}), '((x.shape[0], nin, x.shape[2], x.shape[3]))\n', (2480, 2523), False, 'import torch\n'), ((7191, 7216), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7214, 7216), False, 'import torch\n'), ((7738, 7777), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (7771, 7777), False, 'import torch\n'), ((8928, 8954), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (8937, 8954), False, 'import torch\n'), ((9358, 9379), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (9367, 9379), False, 'import torch\n'), ((6642, 6651), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (6648, 6651), True, 'import torch.nn.functional as F\n'), ((7007, 7016), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (7013, 7016), True, 'import torch.nn.functional as F\n'), ((3136, 3168), 'torch.nn.Conv2d', 'nn.Conv2d', (['nin', '(1)'], {'kernel_size': '(1)'}), '(nin, 1, kernel_size=1)\n', (3145, 3168), True, 'import torch.nn as nn\n')] |
import unittest
from ..utils import check_for_sklearn_backend
class IoSklearnTest(unittest.TestCase):
@unittest.skipUnless(check_for_sklearn_backend(),
"Test should be only executed if sklearn backend is "
"installed and specified")
def test_load_save(self):
from delira.io.sklearn import load_checkpoint, save_checkpoint
from delira.models import SklearnEstimator
from sklearn.tree import DecisionTreeRegressor
import numpy as np
net = SklearnEstimator(DecisionTreeRegressor())
net.fit(X=np.random.rand(2, 32), y=np.random.rand(2))
save_checkpoint("./model_sklearn.pkl", model=net)
self.assertTrue(load_checkpoint("./model_sklearn.pkl"))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"sklearn.tree.DecisionTreeRegressor",
"numpy.random.rand",
"delira.io.sklearn.save_checkpoint",
"delira.io.sklearn.load_checkpoint"
] | [((800, 815), 'unittest.main', 'unittest.main', ([], {}), '()\n', (813, 815), False, 'import unittest\n'), ((653, 702), 'delira.io.sklearn.save_checkpoint', 'save_checkpoint', (['"""./model_sklearn.pkl"""'], {'model': 'net'}), "('./model_sklearn.pkl', model=net)\n", (668, 702), False, 'from delira.io.sklearn import load_checkpoint, save_checkpoint\n'), ((558, 581), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (579, 581), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((727, 765), 'delira.io.sklearn.load_checkpoint', 'load_checkpoint', (['"""./model_sklearn.pkl"""'], {}), "('./model_sklearn.pkl')\n", (742, 765), False, 'from delira.io.sklearn import load_checkpoint, save_checkpoint\n'), ((601, 622), 'numpy.random.rand', 'np.random.rand', (['(2)', '(32)'], {}), '(2, 32)\n', (615, 622), True, 'import numpy as np\n'), ((626, 643), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (640, 643), True, 'import numpy as np\n')] |
#! /usr/bin/env python
import os
import time
import datetime
import sys
import shutil
import glob
import data_utils as utils
import tensorflow as tf
import numpy as np
import pandas as pd
import pickle
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
from data_utils import IMDBDataset
from text_cnn import TextCNN
# Parameters
# ==================================================
# Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", .2, "Percentage of the training data to use for validation")
# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 200, "Dimensionality of character embedding (300 for this example)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 128, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 100, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 1000, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 3, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_boolean("preprocessing", False, "Whether to preprocess tweets or not")
# Specifics
tf.flags.DEFINE_string("data_path",
"/home/manuto/Documents/world_bank/bert_twitter_labor/code/twitter/data/may20_9Klabels/data_binary_pos_neg_balanced",
"path to train and val data")
tf.flags.DEFINE_string("embeddings_path",
"/home/manuto/Documents/world_bank/bert_twitter_labor/data/glove_embeddings/embeddings.npy",
"path to embeddings npy file")
tf.flags.DEFINE_string("label", "is_unemployed", "Label to train on")
tf.flags.DEFINE_string("vocab_path",
"/home/manuto/Documents/world_bank/bert_twitter_labor/data/glove_embeddings/vocab.pckl",
"Path pickle file")
tf.flags.DEFINE_string("run_name", "default_run_name", "Name of folder in runs folder where models are saved")
tf.flags.DEFINE_string("output_dir", "", "Output directory where models are saved")
FLAGS = tf.flags.FLAGS
FLAGS(sys.argv)
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# Data Preparation
print("Loading Dataset ...")
data_path = FLAGS.data_path
train_df = pd.read_csv(os.path.join(data_path, "train_{}.csv".format(FLAGS.label)), lineterminator = '\n')
eval_df = pd.read_csv(os.path.join(data_path, "val_{}.csv".format(FLAGS.label)), lineterminator= '\n')
def tokenizer(text):
return [wdict.get(w.lower(), 0) for w in text.split(' ')]
with open(FLAGS.vocab_path, 'rb') as dfile:
wdict = pickle.load(dfile)
text_processor = TextPreProcessor(
# terms that will be normalized
normalize=['url', 'email', 'le npercent', 'money', 'phone', 'user',
'time', 'url', 'date', 'number'],
# terms that will be annotated
annotate={"hashtag", "allcaps", "elongated", "repeated",
'emphasis', 'censored'},
fix_html=True, # fix HTML tokens
# corpus from which the word statistics are going to be used
# for word segmentation
segmenter="twitter",
# corpus from which the word statistics are going to be used
# for spell correction
corrector="twitter",
unpack_hashtags=True, # perform word segmentation on hashtags
unpack_contractions=True, # Unpack contractions (can't -> can not)
spell_correct_elong=False, # spell correction for elongated words
# select a tokenizer. You can use SocialTokenizer, or pass your own
# the tokenizer, should take as input a string and return a list of tokens
tokenizer=SocialTokenizer(lowercase=True).tokenize,
# list of dictionaries, for replacing tokens extracted from the text,
# with other expressions. You can pass more than one dictionaries.
dicts=[emoticons]
)
def ekphrasis_preprocessing(tweet):
return " ".join(text_processor.pre_process_doc(tweet))
if FLAGS.preprocessing:
train_df['text'] = train_df['text'].apply(ekphrasis_preprocessing)
eval_df['text'] = eval_df['text'].apply(ekphrasis_preprocessing)
print("***********Text was successfully preprocessed***********")
train_df['text_tokenized'] = train_df['text'].apply(tokenizer)
eval_df['text_tokenized'] = eval_df['text'].apply(tokenizer)
def pad_dataset(dataset, maxlen):
return np.array(
[np.pad(r, (0, maxlen - len(r)), mode='constant') if len(r) < maxlen else np.array(r[:maxlen])
for r in dataset])
x_train = pad_dataset(train_df.text_tokenized.values.tolist(), 128)
x_dev = pad_dataset(eval_df.text_tokenized.values.tolist(), 128)
def create_label(label):
if label == 1:
return [0, 1]
elif label == 0:
return [1, 0]
y_train = np.array((train_df['class'].apply(create_label)).values.tolist())
y_dev = np.array((eval_df['class'].apply(create_label)).values.tolist())
vocab_size = len(wdict)
embedding_path = FLAGS.embeddings_path
embedding = utils.load_embeddings(embedding_path, vocab_size, FLAGS.embedding_dim)
print("Embeddings loaded, Vocabulary Size: {:d}. Starting training ...".format(vocab_size))
def prepare_filepath_for_storing_model(output_dir: str) -> str:
"""Prepare the filepath where the trained model will be stored.
:param output_dir: Directory where to store outputs (trained models).
:return: path_to_store_model: Path where to store the trained model.
"""
path_to_store_model = os.path.join(output_dir, 'models')
if not os.path.exists(path_to_store_model):
os.makedirs(path_to_store_model)
return path_to_store_model
# Training
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = TextCNN(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=vocab_size,
embedding_size=FLAGS.embedding_dim,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
l2_reg_lambda=FLAGS.l2_reg_lambda)
# Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = prepare_filepath_for_storing_model(output_dir=args.output_dir)
#out_dir = os.path.abspath(os.path.join(FLAGS.output_dir, FLAGS.run_name))
print("Writing to {}\n".format(out_dir))
# Summaries for loss, accuracy, precision
loss_summary = tf.summary.scalar("loss", cnn.loss)
acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)
precision_summary = tf.summary.scalar("precision", cnn.precision)
recall_summary = tf.summary.scalar("recall", cnn.recall)
auc_summary = tf.summary.scalar("auc", cnn.auc)
# Train Summaries
train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged, precision_summary, recall_summary, auc_summary])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary, precision_summary, recall_summary, auc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
# Write vocabulary
# vocab_processor.save(os.path.join(out_dir, "vocab"))
# Initialize all variables
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(cnn.embedding_init, feed_dict={cnn.embedding_placeholder: embedding})
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
}
_, step, summaries, loss, accuracy, precision, recall, auc = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy, cnn.precision, cnn.recall, cnn.auc], feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}, precision {:g}, recall {:g}, auc {:g}".format(time_str, step, loss, accuracy, precision, recall, auc))
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy, precision, recall, auc = sess.run(
[global_step, dev_summary_op, cnn.loss, cnn.accuracy, cnn.precision, cnn.recall, cnn.auc], feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}, precision {:g}, recall {:g}, auc {:g}".format(time_str, step, loss, accuracy, precision, recall, auc))
if writer:
writer.add_summary(summaries, step)
return loss
# Generate batches
batches = utils.batch_iter(
list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
# Training loop. For each batch...
lowest_eval_loss = 1
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0:
print("\nEvaluation:")
dev_step(x_dev, y_dev, writer=dev_summary_writer)
loss = dev_step(x_dev, y_dev, writer=dev_summary_writer)
print("")
if loss < lowest_eval_loss:
lowest_eval_loss = loss
checkpoint_folder = glob.glob(checkpoint_dir + '/*')
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved best model checkpoint to {}\n".format(path))
for f in checkpoint_folder:
head, tail = os.path.split(f)
if tail != 'checkpoint':
os.remove(f)
print("Removed former best model")
#if current_step % FLAGS.checkpoint_every == 0:
# path = saver.save(sess, checkpoint_prefix, global_step=current_step)
# print("Saved model checkpoint to {}\n".format(path))
| [
"os.remove",
"tensorflow.nn.zero_fraction",
"tensorflow.local_variables_initializer",
"tensorflow.ConfigProto",
"tensorflow.global_variables",
"pickle.load",
"tensorflow.Variable",
"glob.glob",
"tensorflow.summary.merge",
"os.path.join",
"data_utils.load_embeddings",
"ekphrasis.classes.tokeniz... | [((523, 635), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""dev_sample_percentage"""', '(0.2)', '"""Percentage of the training data to use for validation"""'], {}), "('dev_sample_percentage', 0.2,\n 'Percentage of the training data to use for validation')\n", (544, 635), True, 'import tensorflow as tf\n'), ((656, 769), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""embedding_dim"""', '(200)', '"""Dimensionality of character embedding (300 for this example)"""'], {}), "('embedding_dim', 200,\n 'Dimensionality of character embedding (300 for this example)')\n", (679, 769), True, 'import tensorflow as tf\n'), ((766, 868), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""filter_sizes"""', '"""3,4,5"""', '"""Comma-separated filter sizes (default: \'3,4,5\')"""'], {}), '(\'filter_sizes\', \'3,4,5\',\n "Comma-separated filter sizes (default: \'3,4,5\')")\n', (788, 868), True, 'import tensorflow as tf\n'), ((865, 964), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_filters"""', '(128)', '"""Number of filters per filter size (default: 128)"""'], {}), "('num_filters', 128,\n 'Number of filters per filter size (default: 128)')\n", (888, 964), True, 'import tensorflow as tf\n'), ((961, 1055), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""dropout_keep_prob"""', '(0.5)', '"""Dropout keep probability (default: 0.5)"""'], {}), "('dropout_keep_prob', 0.5,\n 'Dropout keep probability (default: 0.5)')\n", (982, 1055), True, 'import tensorflow as tf\n'), ((1052, 1142), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""l2_reg_lambda"""', '(0.0)', '"""L2 regularization lambda (default: 0.0)"""'], {}), "('l2_reg_lambda', 0.0,\n 'L2 regularization lambda (default: 0.0)')\n", (1073, 1142), True, 'import tensorflow as tf\n'), ((1162, 1232), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""batch_size"""', '(128)', '"""Batch Size (default: 64)"""'], {}), "('batch_size', 128, 'Batch Size (default: 64)')\n", (1185, 1232), True, 'import tensorflow as tf\n'), ((1233, 1323), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_epochs"""', '(100)', '"""Number of training epochs (default: 200)"""'], {}), "('num_epochs', 100,\n 'Number of training epochs (default: 200)')\n", (1256, 1323), True, 'import tensorflow as tf\n'), ((1320, 1436), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""evaluate_every"""', '(500)', '"""Evaluate model on dev set after this many steps (default: 100)"""'], {}), "('evaluate_every', 500,\n 'Evaluate model on dev set after this many steps (default: 100)')\n", (1343, 1436), True, 'import tensorflow as tf\n'), ((1433, 1537), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""checkpoint_every"""', '(1000)', '"""Save model after this many steps (default: 100)"""'], {}), "('checkpoint_every', 1000,\n 'Save model after this many steps (default: 100)')\n", (1456, 1537), True, 'import tensorflow as tf\n'), ((1534, 1630), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_checkpoints"""', '(3)', '"""Number of checkpoints to store (default: 5)"""'], {}), "('num_checkpoints', 3,\n 'Number of checkpoints to store (default: 5)')\n", (1557, 1630), True, 'import tensorflow as tf\n'), ((1645, 1740), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""allow_soft_placement"""', '(True)', '"""Allow device soft device placement"""'], {}), "('allow_soft_placement', True,\n 'Allow device soft device placement')\n", (1668, 1740), True, 'import tensorflow as tf\n'), ((1737, 1830), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""log_device_placement"""', '(False)', '"""Log placement of ops on devices"""'], {}), "('log_device_placement', False,\n 'Log placement of ops on devices')\n", (1760, 1830), True, 'import tensorflow as tf\n'), ((1827, 1917), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""preprocessing"""', '(False)', '"""Whether to preprocess tweets or not"""'], {}), "('preprocessing', False,\n 'Whether to preprocess tweets or not')\n", (1850, 1917), True, 'import tensorflow as tf\n'), ((1926, 2118), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""data_path"""', '"""/home/manuto/Documents/world_bank/bert_twitter_labor/code/twitter/data/may20_9Klabels/data_binary_pos_neg_balanced"""', '"""path to train and val data"""'], {}), "('data_path',\n '/home/manuto/Documents/world_bank/bert_twitter_labor/code/twitter/data/may20_9Klabels/data_binary_pos_neg_balanced'\n , 'path to train and val data')\n", (1948, 2118), True, 'import tensorflow as tf\n'), ((2156, 2330), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""embeddings_path"""', '"""/home/manuto/Documents/world_bank/bert_twitter_labor/data/glove_embeddings/embeddings.npy"""', '"""path to embeddings npy file"""'], {}), "('embeddings_path',\n '/home/manuto/Documents/world_bank/bert_twitter_labor/data/glove_embeddings/embeddings.npy'\n , 'path to embeddings npy file')\n", (2178, 2330), True, 'import tensorflow as tf\n'), ((2368, 2437), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""label"""', '"""is_unemployed"""', '"""Label to train on"""'], {}), "('label', 'is_unemployed', 'Label to train on')\n", (2390, 2437), True, 'import tensorflow as tf\n'), ((2438, 2592), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""vocab_path"""', '"""/home/manuto/Documents/world_bank/bert_twitter_labor/data/glove_embeddings/vocab.pckl"""', '"""Path pickle file"""'], {}), "('vocab_path',\n '/home/manuto/Documents/world_bank/bert_twitter_labor/data/glove_embeddings/vocab.pckl'\n , 'Path pickle file')\n", (2460, 2592), True, 'import tensorflow as tf\n'), ((2630, 2744), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""run_name"""', '"""default_run_name"""', '"""Name of folder in runs folder where models are saved"""'], {}), "('run_name', 'default_run_name',\n 'Name of folder in runs folder where models are saved')\n", (2652, 2744), True, 'import tensorflow as tf\n'), ((2741, 2828), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""output_dir"""', '""""""', '"""Output directory where models are saved"""'], {}), "('output_dir', '',\n 'Output directory where models are saved')\n", (2763, 2828), True, 'import tensorflow as tf\n'), ((5757, 5827), 'data_utils.load_embeddings', 'utils.load_embeddings', (['embedding_path', 'vocab_size', 'FLAGS.embedding_dim'], {}), '(embedding_path, vocab_size, FLAGS.embedding_dim)\n', (5778, 5827), True, 'import data_utils as utils\n'), ((3428, 3446), 'pickle.load', 'pickle.load', (['dfile'], {}), '(dfile)\n', (3439, 3446), False, 'import pickle\n'), ((6235, 6269), 'os.path.join', 'os.path.join', (['output_dir', '"""models"""'], {}), "(output_dir, 'models')\n", (6247, 6269), False, 'import os\n'), ((6451, 6567), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': 'FLAGS.allow_soft_placement', 'log_device_placement': 'FLAGS.log_device_placement'}), '(allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n', (6465, 6567), True, 'import tensorflow as tf\n'), ((6592, 6623), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_conf'}), '(config=session_conf)\n', (6602, 6623), True, 'import tensorflow as tf\n'), ((6281, 6316), 'os.path.exists', 'os.path.exists', (['path_to_store_model'], {}), '(path_to_store_model)\n', (6295, 6316), False, 'import os\n'), ((6326, 6358), 'os.makedirs', 'os.makedirs', (['path_to_store_model'], {}), '(path_to_store_model)\n', (6337, 6358), False, 'import os\n'), ((7067, 7118), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (7078, 7118), True, 'import tensorflow as tf\n'), ((7139, 7168), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.001)'], {}), '(0.001)\n', (7161, 7168), True, 'import tensorflow as tf\n'), ((7824, 7856), 'tensorflow.summary.merge', 'tf.summary.merge', (['grad_summaries'], {}), '(grad_summaries)\n', (7840, 7856), True, 'import tensorflow as tf\n'), ((8239, 8274), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'cnn.loss'], {}), "('loss', cnn.loss)\n", (8256, 8274), True, 'import tensorflow as tf\n'), ((8297, 8340), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'cnn.accuracy'], {}), "('accuracy', cnn.accuracy)\n", (8314, 8340), True, 'import tensorflow as tf\n'), ((8369, 8414), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""precision"""', 'cnn.precision'], {}), "('precision', cnn.precision)\n", (8386, 8414), True, 'import tensorflow as tf\n'), ((8440, 8479), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""recall"""', 'cnn.recall'], {}), "('recall', cnn.recall)\n", (8457, 8479), True, 'import tensorflow as tf\n'), ((8502, 8535), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""auc"""', 'cnn.auc'], {}), "('auc', cnn.auc)\n", (8519, 8535), True, 'import tensorflow as tf\n'), ((8590, 8710), 'tensorflow.summary.merge', 'tf.summary.merge', (['[loss_summary, acc_summary, grad_summaries_merged, precision_summary,\n recall_summary, auc_summary]'], {}), '([loss_summary, acc_summary, grad_summaries_merged,\n precision_summary, recall_summary, auc_summary])\n', (8606, 8710), True, 'import tensorflow as tf\n'), ((8735, 8778), 'os.path.join', 'os.path.join', (['out_dir', '"""summaries"""', '"""train"""'], {}), "(out_dir, 'summaries', 'train')\n", (8747, 8778), False, 'import os\n'), ((8810, 8862), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['train_summary_dir', 'sess.graph'], {}), '(train_summary_dir, sess.graph)\n', (8831, 8862), True, 'import tensorflow as tf\n'), ((8913, 9010), 'tensorflow.summary.merge', 'tf.summary.merge', (['[loss_summary, acc_summary, precision_summary, recall_summary, auc_summary]'], {}), '([loss_summary, acc_summary, precision_summary,\n recall_summary, auc_summary])\n', (8929, 9010), True, 'import tensorflow as tf\n'), ((9033, 9074), 'os.path.join', 'os.path.join', (['out_dir', '"""summaries"""', '"""dev"""'], {}), "(out_dir, 'summaries', 'dev')\n", (9045, 9074), False, 'import os\n'), ((9104, 9154), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['dev_summary_dir', 'sess.graph'], {}), '(dev_summary_dir, sess.graph)\n', (9125, 9154), True, 'import tensorflow as tf\n'), ((9368, 9405), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model"""'], {}), "(checkpoint_dir, 'model')\n", (9380, 9405), False, 'import os\n'), ((4427, 4458), 'ekphrasis.classes.tokenizer.SocialTokenizer', 'SocialTokenizer', ([], {'lowercase': '(True)'}), '(lowercase=True)\n', (4442, 4458), False, 'from ekphrasis.classes.tokenizer import SocialTokenizer\n'), ((6407, 6417), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6415, 6417), True, 'import tensorflow as tf\n'), ((9302, 9338), 'os.path.join', 'os.path.join', (['out_dir', '"""checkpoints"""'], {}), "(out_dir, 'checkpoints')\n", (9314, 9338), False, 'import os\n'), ((9421, 9451), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (9435, 9451), False, 'import os\n'), ((9465, 9492), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (9476, 9492), False, 'import os\n'), ((9524, 9545), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (9543, 9545), True, 'import tensorflow as tf\n'), ((9726, 9759), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9757, 9759), True, 'import tensorflow as tf\n'), ((9778, 9810), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (9808, 9810), True, 'import tensorflow as tf\n'), ((11833, 11872), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'global_step'], {}), '(sess, global_step)\n', (11853, 11872), True, 'import tensorflow as tf\n'), ((5235, 5255), 'numpy.array', 'np.array', (['r[:maxlen]'], {}), '(r[:maxlen])\n', (5243, 5255), True, 'import numpy as np\n'), ((7938, 7949), 'time.time', 'time.time', ([], {}), '()\n', (7947, 7949), False, 'import time\n'), ((7655, 7677), 'tensorflow.nn.zero_fraction', 'tf.nn.zero_fraction', (['g'], {}), '(g)\n', (7674, 7677), True, 'import tensorflow as tf\n'), ((10425, 10448), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10446, 10448), False, 'import datetime\n'), ((11187, 11210), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11208, 11210), False, 'import datetime\n'), ((12262, 12294), 'glob.glob', 'glob.glob', (["(checkpoint_dir + '/*')"], {}), "(checkpoint_dir + '/*')\n", (12271, 12294), False, 'import glob\n'), ((12547, 12563), 'os.path.split', 'os.path.split', (['f'], {}), '(f)\n', (12560, 12563), False, 'import os\n'), ((12641, 12653), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (12650, 12653), False, 'import os\n')] |
import numpy as np, os, sys
import torch
import matplotlib as mpl #patch-wise similarities, droi images
def datagen2d(w1,w2,eps,num):
x=np.random.normal(size=(num,2))
y=x[:,0]*w1+x[:,1]*w2+eps*np.random.normal(size=(num))
#x.shape=(numdata,dims) dims=2 here
#y.shape=(numdata)
print(x.shape,y.shape)
return x,y
def rndsplit(x,y,numtr):
inds=np.arange(y.size)
np.random.shuffle(inds)
xtr=x[inds[0:numtr],:]
ytr=y[inds[0:numtr]]
xv=x[inds[numtr:],:]
yv=y[inds[numtr:]]
return xtr,ytr,xv,yv
def gendata(numtotal):
w1=0.5
w2=-2
xtr,ytr = datagen2d(w1=w1,w2=w2,eps=0.8,num= int( numtotal) ) #numtotal
xv,yv = datagen2d(w1=w1,w2=w2,eps=0.8,num= 3000 ) #fix 3k test samples
#xtr,ytr,xv,yv = rndsplit(x,y,numtr=int( 0.7*numtotal))
return xtr,ytr,xv,yv,w1,w2
def gendata2(numtotal):
w1=0.5
w2=-2
xtr,ytr = datagen2d(w1=w1,w2=w2,eps=0.8,num= int( 0.7*numtotal) ) #70% of numtotal
xv,yv = datagen2d(w1=w1,w2=w2,eps=0.8,num= int( 0.3*numtotal) ) #30 % of numtotal
#xtr,ytr,xv,yv = rndsplit(x,y,numtr=int( 0.7*numtotal))
return xtr,ytr,xv,yv,w1,w2
def linreg_train(xtr,ytr,C):
mat=np.linalg.inv( np.dot(xtr.T,xtr)+ C*np.eye(xtr.shape[1]) )
#print(mat.shape)
w= np.dot(mat ,np.dot(xtr.T,ytr))
return w
def linreg_apply(xv,w):
return np.dot(xv,w)
def mse(ypred,ytrue):
e=np.mean( (ypred-ytrue)**2 )
return e
#one setup: 100 train, 5000 train. always 3000 test
# other setup: 100 total, 5000 total, always 30% test, observe means variances of w and of mse averaged over 100 runs
# 30% as test
#variance of estimated w as function of sample size, barplots
#variance of mse as function of sample size, barplots
def lossonvalset(w,xv,yv):
ypred=linreg_apply(xv,w)
e=mse(ypred,yv)
return e
def gradientononesample(w,x,y,C):
# TODO dLoss / dw (your current w), hint: mind the sign please
g = (x + y)/w
return g
def gradientonminibatch(w,xb,yb,C):
# xb.shape = (num samples, dimensions)
# yb.shape = (num samples)
g=np.zeros(2)
for i in range(xb.shape[0]):
g+=1.0/float(xb.shape[0])*gradientononesample(w,xb[i,:],yb[i],C)
return g
def sgdforthis(xtr, ytr, C, learningrate, batchsize, maxnumepochs, stopthresh, xv,yv):
#the code which actually runs SGD
# randomly initialize w, close to zero but not equal to zero
w=np.random.normal(size=2)
oldloss=lossonvalset(w,xv,yv) # initial validation loss
conv=False #flag whether sgd has converged
for ep in range(maxnumepochs):
inds=np.arange(xtr.shape[0])
# the order of samples should be randomized in every epoch
# TODO: get a random order of indices that are used to sample minibatches
# create new order
np.random.shuffle(inds)
start_num = 0
numbatches= int( xtr.shape[0] // batchsize )
for b in range(numbatches):
print(w,xtr.shape,ytr.shape)
n = inds[start_num]
print(xtr[n], ytr[n])
start_num += 1
# TODO: compute gradient over minibatch
# TODO: g=gradientonminibatch(w,"minibatch of xtr", "same minibatch of ytr as of xtr",C)
#TODO apply gradient here
#get your validation loss with your newly updated parameters
newloss=lossonvalset(w,xv,yv)
# TODO: stop if loss change between old and current epoch is too small, update value of oldloss
pass
if False==conv:
print('maxnumepochs reached without convergence')
return w
def linreg_train_sgd(xtr,ytr,C, learningrate, batchsize, maxnumepochs, stopthresh, xv,yv):
# what def linreg_train_sgd(xtr,ytr,C, learningrate, batchsize, maxnumepochs, stopthresh, xv,yv):
# actually does
w=sgdforthis(xtr, ytr, C, learningrate, batchsize, maxnumepochs, stopthresh, xv,yv)
return w
def run1(xtr,ytr,xv,yv,w1,w2,C):
w=linreg_train(xtr,ytr,C=C) # 0.1
wtrue=np.asarray([w1,w2])
print('w',w, 'true w', [w1,w2], 'diff', np.dot((w-wtrue).T,w-wtrue))
ypred=linreg_apply(xv,w)
e=mse(ypred,yv)
print('mse',e)
return e, np.dot((w-wtrue).T,w-wtrue)
def run2(xtr,ytr,xv,yv,w1,w2,C):
learningrate=0.01
batchsize=5
maxnumepochs=1000
stopthresh=0.0001
w=linreg_train_sgd(xtr,ytr,C, learningrate, batchsize, maxnumepochs, stopthresh, xv,yv)
wtrue=np.asarray([w1,w2])
print('w',w, 'true w', [w1,w2], 'diff', np.dot((w-wtrue).T,w-wtrue))
ypred=linreg_apply(xv,w)
e=mse(ypred,yv)
print('mse',e)
return e, np.dot((w-wtrue).T,w-wtrue)
if __name__=='__main__':
#xtr,ytr,xv,yv,w1,w2=gendata(50)
#run1(xtr,ytr,xv,yv,w1,w2,1e-3)
xtr,ytr,xv,yv,w1,w2=gendata(100)
# run1(xtr,ytr,xv,yv,w1,w2,1e-3)
run2(xtr,ytr,xv,yv,w1,w2,1e-3)
| [
"numpy.eye",
"numpy.asarray",
"numpy.zeros",
"numpy.mean",
"numpy.arange",
"numpy.random.normal",
"numpy.dot",
"numpy.random.shuffle"
] | [((141, 172), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num, 2)'}), '(size=(num, 2))\n', (157, 172), True, 'import numpy as np, os, sys\n'), ((362, 379), 'numpy.arange', 'np.arange', (['y.size'], {}), '(y.size)\n', (371, 379), True, 'import numpy as np, os, sys\n'), ((382, 405), 'numpy.random.shuffle', 'np.random.shuffle', (['inds'], {}), '(inds)\n', (399, 405), True, 'import numpy as np, os, sys\n'), ((1303, 1316), 'numpy.dot', 'np.dot', (['xv', 'w'], {}), '(xv, w)\n', (1309, 1316), True, 'import numpy as np, os, sys\n'), ((1343, 1372), 'numpy.mean', 'np.mean', (['((ypred - ytrue) ** 2)'], {}), '((ypred - ytrue) ** 2)\n', (1350, 1372), True, 'import numpy as np, os, sys\n'), ((2010, 2021), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2018, 2021), True, 'import numpy as np, os, sys\n'), ((2331, 2355), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(2)'}), '(size=2)\n', (2347, 2355), True, 'import numpy as np, os, sys\n'), ((3824, 3844), 'numpy.asarray', 'np.asarray', (['[w1, w2]'], {}), '([w1, w2])\n', (3834, 3844), True, 'import numpy as np, os, sys\n'), ((4232, 4252), 'numpy.asarray', 'np.asarray', (['[w1, w2]'], {}), '([w1, w2])\n', (4242, 4252), True, 'import numpy as np, os, sys\n'), ((1236, 1254), 'numpy.dot', 'np.dot', (['xtr.T', 'ytr'], {}), '(xtr.T, ytr)\n', (1242, 1254), True, 'import numpy as np, os, sys\n'), ((2503, 2526), 'numpy.arange', 'np.arange', (['xtr.shape[0]'], {}), '(xtr.shape[0])\n', (2512, 2526), True, 'import numpy as np, os, sys\n'), ((2697, 2720), 'numpy.random.shuffle', 'np.random.shuffle', (['inds'], {}), '(inds)\n', (2714, 2720), True, 'import numpy as np, os, sys\n'), ((3887, 3919), 'numpy.dot', 'np.dot', (['(w - wtrue).T', '(w - wtrue)'], {}), '((w - wtrue).T, w - wtrue)\n', (3893, 3919), True, 'import numpy as np, os, sys\n'), ((3993, 4025), 'numpy.dot', 'np.dot', (['(w - wtrue).T', '(w - wtrue)'], {}), '((w - wtrue).T, w - wtrue)\n', (3999, 4025), True, 'import numpy as np, os, sys\n'), ((4295, 4327), 'numpy.dot', 'np.dot', (['(w - wtrue).T', '(w - wtrue)'], {}), '((w - wtrue).T, w - wtrue)\n', (4301, 4327), True, 'import numpy as np, os, sys\n'), ((4401, 4433), 'numpy.dot', 'np.dot', (['(w - wtrue).T', '(w - wtrue)'], {}), '((w - wtrue).T, w - wtrue)\n', (4407, 4433), True, 'import numpy as np, os, sys\n'), ((200, 226), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'num'}), '(size=num)\n', (216, 226), True, 'import numpy as np, os, sys\n'), ((1155, 1173), 'numpy.dot', 'np.dot', (['xtr.T', 'xtr'], {}), '(xtr.T, xtr)\n', (1161, 1173), True, 'import numpy as np, os, sys\n'), ((1176, 1196), 'numpy.eye', 'np.eye', (['xtr.shape[1]'], {}), '(xtr.shape[1])\n', (1182, 1196), True, 'import numpy as np, os, sys\n')] |
''' Fvtk module implements simple visualization functions using VTK.
The main idea is the following:
A window can have one or more renderers. A renderer can have none, one or more actors. Examples of actors are a sphere, line, point etc.
You basically add actors in a renderer and in that way you can visualize the forementioned objects e.g. sphere, line ...
Examples
---------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> a=fvtk.axes()
>>> fvtk.add(r,a)
>>> #fvtk.show(r)
For more information on VTK there many neat examples in
http://www.vtk.org/Wiki/VTK/Tutorials/External_Tutorials
'''
from __future__ import division, print_function, absolute_import
from dipy.utils.six.moves import xrange
import types
import numpy as np
from dipy.core.ndindex import ndindex
# Conditional import machinery for vtk
from ..utils.optpkg import optional_package
# Allow import, but disable doctests if we don't have vtk
vtk, have_vtk, setup_module = optional_package('vtk')
colors, have_vtk_colors, _ = optional_package('vtk.util.colors')
cm, have_matplotlib, _ = optional_package('matplotlib.cm')
if have_matplotlib:
get_cmap = cm.get_cmap
else:
import dipy.data.get_cmap as get_cmap
# a track buffer used only with picking tracks
track_buffer = []
# indices buffer for the tracks
ind_buffer = []
# tempory renderer used only with picking tracks
tmp_ren = None
if have_vtk:
version = vtk.vtkVersion.GetVTKSourceVersion().split(' ')[-1]
major_version = vtk.vtkVersion.GetVTKMajorVersion()
# Create a text mapper and actor to display the results of picking.
textMapper = vtk.vtkTextMapper()
tprop = textMapper.GetTextProperty()
tprop.SetFontFamilyToArial()
tprop.SetFontSize(10)
# tprop.BoldOn()
# tprop.ShadowOn()
tprop.SetColor(1, 0, 0)
textActor = vtk.vtkActor2D()
textActor.VisibilityOff()
textActor.SetMapper(textMapper)
# Create a cell picker.
picker = vtk.vtkCellPicker()
def ren():
'''Create a renderer.
Returns
-------
v : vtkRenderer() object
Renderer.
Examples
--------
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3)]
>>> c=fvtk.line(lines, fvtk.colors.red)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
'''
return vtk.vtkRenderer()
def add(ren, a):
''' Add a specific actor
'''
if isinstance(a, vtk.vtkVolume):
ren.AddVolume(a)
else:
ren.AddActor(a)
def rm(ren, a):
''' Remove a specific actor
'''
ren.RemoveActor(a)
def clear(ren):
''' Remove all actors from the renderer
'''
ren.RemoveAllViewProps()
def rm_all(ren):
''' Remove all actors from the renderer
'''
clear(ren)
def _arrow(pos=(0, 0, 0), color=(1, 0, 0), scale=(1, 1, 1), opacity=1):
''' Internal function for generating arrow actors.
'''
arrow = vtk.vtkArrowSource()
# arrow.SetTipLength(length)
arrowm = vtk.vtkPolyDataMapper()
if major_version <= 5:
arrowm.SetInput(arrow.GetOutput())
else:
arrowm.SetInputData(arrow.GetOutput())
arrowa = vtk.vtkActor()
arrowa.SetMapper(arrowm)
arrowa.GetProperty().SetColor(color)
arrowa.GetProperty().SetOpacity(opacity)
arrowa.SetScale(scale)
return arrowa
def axes(scale=(1, 1, 1), colorx=(1, 0, 0), colory=(0, 1, 0), colorz=(0, 0, 1),
opacity=1):
""" Create an actor with the coordinate's system axes where
red = x, green = y, blue =z.
Parameters
----------
scale : tuple (3,)
axes size e.g. (100, 100, 100)
colorx : tuple (3,)
x-axis color. Default red.
colory : tuple (3,)
y-axis color. Default blue.
colorz : tuple (3,)
z-axis color. Default green.
Returns
-------
vtkAssembly
"""
arrowx = _arrow(color=colorx, scale=scale, opacity=opacity)
arrowy = _arrow(color=colory, scale=scale, opacity=opacity)
arrowz = _arrow(color=colorz, scale=scale, opacity=opacity)
arrowy.RotateZ(90)
arrowz.RotateY(-90)
ass = vtk.vtkAssembly()
ass.AddPart(arrowx)
ass.AddPart(arrowy)
ass.AddPart(arrowz)
return ass
def _lookup(colors):
''' Internal function
Creates a lookup table with given colors.
Parameters
------------
colors : array, shape (N,3)
Colormap where every triplet is encoding red, green and blue e.g.
::
r1,g1,b1
r2,g2,b2
...
rN,gN,bN
where
::
0=<r<=1,
0=<g<=1,
0=<b<=1,
Returns
----------
vtkLookupTable
'''
colors = np.asarray(colors, dtype=np.float32)
if colors.ndim > 2:
raise ValueError('Incorrect shape of array in colors')
if colors.ndim == 1:
N = 1
if colors.ndim == 2:
N = colors.shape[0]
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(N)
lut.Build()
if colors.ndim == 2:
scalar = 0
for (r, g, b) in colors:
lut.SetTableValue(scalar, r, g, b, 1.0)
scalar += 1
if colors.ndim == 1:
lut.SetTableValue(0, colors[0], colors[1], colors[2], 1.0)
return lut
def streamtube(lines, colors, opacity=1, linewidth=0.15, tube_sides=8,
lod=True, lod_points=10 ** 4, lod_points_size=5):
""" Uses streamtubes to visualize polylines
Parameters
----------
lines : list
list of N curves represented as 2D ndarrays
colors : array (N, 3) or tuple (3,)
opacity : float
linewidth : float
tube_sides : int
lod : bool
use vtkLODActor rather than vtkActor
lod_points : int
number of points to be used when LOD is in effect
lod_points_size : int
size of points when lod is in effect
Examples
--------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10, 3), np.random.rand(20, 3)]
>>> colors=np.random.rand(2, 3)
>>> c=fvtk.streamtube(lines, colors)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
Notes
-----
Streamtubes can be heavy on GPU when loading many streamlines and therefore,
you may experience slow rendering time depending on system GPU. A solution
to this problem is to reduce the number of points in each streamline. In Dipy
we provide an algorithm that will reduce the number of points on the straighter
parts of the streamline but keep more points on the curvier parts. This can
be used in the following way
from dipy.tracking.distances import approx_polygon_track
lines = [approx_polygon_track(line, 0.2) for line in lines]
"""
points = vtk.vtkPoints()
colors = np.asarray(colors)
if colors.ndim == 1:
colors = np.tile(colors, (len(lines), 1))
# Create the polyline.
streamlines = vtk.vtkCellArray()
cols = vtk.vtkUnsignedCharArray()
cols.SetName("Cols")
cols.SetNumberOfComponents(3)
len_lines = len(lines)
prior_line_shape = 0
for i in range(len_lines):
line = lines[i]
streamlines.InsertNextCell(line.shape[0])
for j in range(line.shape[0]):
points.InsertNextPoint(*line[j])
streamlines.InsertCellPoint(j + prior_line_shape)
color = (255 * colors[i]).astype('ubyte')
cols.InsertNextTuple3(*color)
prior_line_shape += line.shape[0]
profileData = vtk.vtkPolyData()
profileData.SetPoints(points)
profileData.SetLines(streamlines)
profileData.GetPointData().AddArray(cols)
# Add thickness to the resulting line.
profileTubes = vtk.vtkTubeFilter()
profileTubes.SetNumberOfSides(tube_sides)
profileTubes.SetInput(profileData)
profileTubes.SetRadius(linewidth)
profileMapper = vtk.vtkPolyDataMapper()
profileMapper.SetInputConnection(profileTubes.GetOutputPort())
profileMapper.ScalarVisibilityOn()
profileMapper.SetScalarModeToUsePointFieldData()
profileMapper.SelectColorArray("Cols")
profileMapper.GlobalImmediateModeRenderingOn()
if lod:
profile = vtk.vtkLODActor()
profile.SetNumberOfCloudPoints(lod_points)
profile.GetProperty().SetPointSize(lod_points_size)
else:
profile = vtk.vtkActor()
profile.SetMapper(profileMapper)
profile.GetProperty().SetAmbient(0) # .3
profile.GetProperty().SetSpecular(0) # .3
profile.GetProperty().SetSpecularPower(10)
profile.GetProperty().SetInterpolationToGouraud()
profile.GetProperty().BackfaceCullingOn()
profile.GetProperty().SetOpacity(opacity)
return profile
def line(lines, colors, opacity=1, linewidth=1):
''' Create an actor for one or more lines.
Parameters
------------
lines : list of arrays representing lines as 3d points for example
lines=[np.random.rand(10,3),np.random.rand(20,3)]
represents 2 lines the first with 10 points and the second with 20 points in x,y,z coordinates.
colors : array, shape (N,3)
Colormap where every triplet is encoding red, green and blue e.g.
::
r1,g1,b1
r2,g2,b2
...
rN,gN,bN
where
::
0=<r<=1,
0=<g<=1,
0=<b<=1
opacity : float, optional
``0 <= transparency <= 1``
linewidth : float, optional
Line thickness.
Returns
----------
v : vtkActor object
Line.
Examples
----------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3), np.random.rand(20,3)]
>>> colors=np.random.rand(2,3)
>>> c=fvtk.line(lines, colors)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
'''
if not isinstance(lines, types.ListType):
lines = [lines]
points = vtk.vtkPoints()
lines_ = vtk.vtkCellArray()
linescalars = vtk.vtkFloatArray()
# lookuptable=vtk.vtkLookupTable()
lookuptable = _lookup(colors)
scalarmin = 0
colors = np.asarray(colors)
if colors.ndim == 2:
scalarmax = colors.shape[0] - 1
if colors.ndim == 1:
scalarmax = 0
curPointID = 0
m = (0.0, 0.0, 0.0)
n = (1.0, 0.0, 0.0)
scalar = 0
# many colors
if colors.ndim == 2:
for Line in lines:
inw = True
mit = iter(Line)
nit = iter(Line)
next(nit)
while(inw):
try:
m = next(mit)
n = next(nit)
# scalar=sp.rand(1)
linescalars.SetNumberOfComponents(1)
points.InsertNextPoint(m)
linescalars.InsertNextTuple1(scalar)
points.InsertNextPoint(n)
linescalars.InsertNextTuple1(scalar)
lines_.InsertNextCell(2)
lines_.InsertCellPoint(curPointID)
lines_.InsertCellPoint(curPointID + 1)
curPointID += 2
except StopIteration:
break
scalar += 1
# one color only
if colors.ndim == 1:
for Line in lines:
inw = True
mit = iter(Line)
nit = iter(Line)
next(nit)
while(inw):
try:
m = next(mit)
n = next(nit)
# scalar=sp.rand(1)
linescalars.SetNumberOfComponents(1)
points.InsertNextPoint(m)
linescalars.InsertNextTuple1(scalar)
points.InsertNextPoint(n)
linescalars.InsertNextTuple1(scalar)
lines_.InsertNextCell(2)
lines_.InsertCellPoint(curPointID)
lines_.InsertCellPoint(curPointID + 1)
curPointID += 2
except StopIteration:
break
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines_)
polydata.GetPointData().SetScalars(linescalars)
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
mapper.SetLookupTable(lookuptable)
mapper.SetColorModeToMapScalars()
mapper.SetScalarRange(scalarmin, scalarmax)
mapper.SetScalarModeToUsePointData()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(linewidth)
actor.GetProperty().SetOpacity(opacity)
return actor
def dots(points, color=(1, 0, 0), opacity=1, dot_size=5):
""" Create one or more 3d points
Parameters
----------
points : ndarray, (N, 3)
color : tuple (3,)
opacity : float
dot_size : int
Returns
--------
vtkActor
See Also
---------
dipy.viz.fvtk.point
"""
if points.ndim == 2:
points_no = points.shape[0]
else:
points_no = 1
polyVertexPoints = vtk.vtkPoints()
polyVertexPoints.SetNumberOfPoints(points_no)
aPolyVertex = vtk.vtkPolyVertex()
aPolyVertex.GetPointIds().SetNumberOfIds(points_no)
cnt = 0
if points.ndim > 1:
for point in points:
polyVertexPoints.InsertPoint(cnt, point[0], point[1], point[2])
aPolyVertex.GetPointIds().SetId(cnt, cnt)
cnt += 1
else:
polyVertexPoints.InsertPoint(cnt, points[0], points[1], points[2])
aPolyVertex.GetPointIds().SetId(cnt, cnt)
cnt += 1
aPolyVertexGrid = vtk.vtkUnstructuredGrid()
aPolyVertexGrid.Allocate(1, 1)
aPolyVertexGrid.InsertNextCell(aPolyVertex.GetCellType(),
aPolyVertex.GetPointIds())
aPolyVertexGrid.SetPoints(polyVertexPoints)
aPolyVertexMapper = vtk.vtkDataSetMapper()
if major_version <= 5:
aPolyVertexMapper.SetInput(aPolyVertexGrid)
else:
aPolyVertexMapper.SetInputData(aPolyVertexGrid)
aPolyVertexActor = vtk.vtkActor()
aPolyVertexActor.SetMapper(aPolyVertexMapper)
aPolyVertexActor.GetProperty().SetColor(color)
aPolyVertexActor.GetProperty().SetOpacity(opacity)
aPolyVertexActor.GetProperty().SetPointSize(dot_size)
return aPolyVertexActor
def point(points, colors, opacity=1, point_radius=0.1, theta=8, phi=8):
""" Visualize points as sphere glyphs
Parameters
----------
points : ndarray, shape (N, 3)
colors : ndarray (N,3) or tuple (3,)
point_radius : float
theta : int
phi : int
Returns
-------
vtkActor
Examples
--------
>>> from dipy.viz import fvtk
>>> ren = fvtk.ren()
>>> pts = np.random.rand(5, 3)
>>> point_actor = fvtk.point(pts, fvtk.colors.coral)
>>> fvtk.add(ren, point_actor)
>>> #fvtk.show(ren)
"""
if np.array(colors).ndim == 1:
# return dots(points,colors,opacity)
colors = np.tile(colors, (len(points), 1))
scalars = vtk.vtkUnsignedCharArray()
scalars.SetNumberOfComponents(3)
pts = vtk.vtkPoints()
cnt_colors = 0
for p in points:
pts.InsertNextPoint(p[0], p[1], p[2])
scalars.InsertNextTuple3(
round(255 * colors[cnt_colors][0]), round(255 * colors[cnt_colors][1]), round(255 * colors[cnt_colors][2]))
cnt_colors += 1
src = vtk.vtkSphereSource()
src.SetRadius(point_radius)
src.SetThetaResolution(theta)
src.SetPhiResolution(phi)
polyData = vtk.vtkPolyData()
polyData.SetPoints(pts)
polyData.GetPointData().SetScalars(scalars)
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(src.GetOutputPort())
if major_version <= 5:
glyph.SetInput(polyData)
else:
glyph.SetInputData(polyData)
glyph.SetColorModeToColorByScalar()
glyph.SetScaleModeToDataScalingOff()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(glyph.GetOutput())
else:
mapper.SetInputData(glyph.GetOutput())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def label(ren, text='Origin', pos=(0, 0, 0), scale=(0.2, 0.2, 0.2),
color=(1, 1, 1)):
''' Create a label actor.
This actor will always face the camera
Parameters
----------
ren : vtkRenderer() object
Renderer as returned by ``ren()``.
text : str
Text for the label.
pos : (3,) array_like, optional
Left down position of the label.
scale : (3,) array_like
Changes the size of the label.
color : (3,) array_like
Label color as ``(r,g,b)`` tuple.
Returns
-------
l : vtkActor object
Label.
Examples
--------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> l=fvtk.label(r)
>>> fvtk.add(r,l)
>>> #fvtk.show(r)
'''
atext = vtk.vtkVectorText()
atext.SetText(text)
textm = vtk.vtkPolyDataMapper()
if major_version <= 5:
textm.SetInput(atext.GetOutput())
else:
textm.SetInputData(atext.GetOutput())
texta = vtk.vtkFollower()
texta.SetMapper(textm)
texta.SetScale(scale)
texta.GetProperty().SetColor(color)
texta.SetPosition(pos)
ren.AddActor(texta)
texta.SetCamera(ren.GetActiveCamera())
return texta
def volume(vol, voxsz=(1.0, 1.0, 1.0), affine=None, center_origin=1,
info=0, maptype=0, trilinear=1, iso=0, iso_thr=100,
opacitymap=None, colormap=None):
''' Create a volume and return a volumetric actor using volumetric
rendering.
This function has many different interesting capabilities. The maptype,
opacitymap and colormap are the most crucial parameters here.
Parameters
----------
vol : array, shape (N, M, K), dtype uint8
An array representing the volumetric dataset that we want to visualize
using volumetric rendering.
voxsz : (3,) array_like
Voxel size.
affine : (4, 4) ndarray
As given by volumeimages.
center_origin : int {0,1}
It considers that the center of the volume is the
point ``(-vol.shape[0]/2.0+0.5,-vol.shape[1]/2.0+0.5,-vol.shape[2]/2.0+0.5)``.
info : int {0,1}
If 1 it prints out some info about the volume, the method and the
dataset.
trilinear : int {0,1}
Use trilinear interpolation, default 1, gives smoother rendering. If
you want faster interpolation use 0 (Nearest).
maptype : int {0,1}
The maptype is a very important parameter which affects the raycasting algorithm in use for the rendering.
The options are:
If 0 then vtkVolumeTextureMapper2D is used.
If 1 then vtkVolumeRayCastFunction is used.
iso : int {0,1}
If iso is 1 and maptype is 1 then we use
``vtkVolumeRayCastIsosurfaceFunction`` which generates an isosurface at
the predefined iso_thr value. If iso is 0 and maptype is 1
``vtkVolumeRayCastCompositeFunction`` is used.
iso_thr : int
If iso is 1 then then this threshold in the volume defines the value
which will be used to create the isosurface.
opacitymap : (2, 2) ndarray
The opacity map assigns a transparency coefficient to every point in
the volume. The default value uses the histogram of the volume to
calculate the opacitymap.
colormap : (4, 4) ndarray
The color map assigns a color value to every point in the volume.
When None from the histogram it uses a red-blue colormap.
Returns
-------
v : vtkVolume
Volume.
Notes
--------
What is the difference between TextureMapper2D and RayCastFunction? Coming
soon... See VTK user's guide [book] & The Visualization Toolkit [book] and
VTK's online documentation & online docs.
What is the difference between RayCastIsosurfaceFunction and
RayCastCompositeFunction? Coming soon... See VTK user's guide [book] &
The Visualization Toolkit [book] and VTK's online documentation &
online docs.
What about trilinear interpolation?
Coming soon... well when time permits really ... :-)
Examples
--------
First example random points.
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> vol=100*np.random.rand(100,100,100)
>>> vol=vol.astype('uint8')
>>> vol.min(), vol.max()
(0, 99)
>>> r = fvtk.ren()
>>> v = fvtk.volume(vol)
>>> fvtk.add(r,v)
>>> #fvtk.show(r)
Second example with a more complicated function
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> x, y, z = np.ogrid[-10:10:20j, -10:10:20j, -10:10:20j]
>>> s = np.sin(x*y*z)/(x*y*z)
>>> r = fvtk.ren()
>>> v = fvtk.volume(s)
>>> fvtk.add(r,v)
>>> #fvtk.show(r)
If you find this function too complicated you can always use mayavi.
Please do not forget to use the -wthread switch in ipython if you are
running mayavi.
from enthought.mayavi import mlab
import numpy as np
x, y, z = np.ogrid[-10:10:20j, -10:10:20j, -10:10:20j]
s = np.sin(x*y*z)/(x*y*z)
mlab.pipeline.volume(mlab.pipeline.scalar_field(s))
mlab.show()
More mayavi demos are available here:
http://code.enthought.com/projects/mayavi/docs/development/html/mayavi/mlab.html
'''
if vol.ndim != 3:
raise ValueError('3d numpy arrays only please')
if info:
print('Datatype', vol.dtype, 'converted to uint8')
vol = np.interp(vol, [vol.min(), vol.max()], [0, 255])
vol = vol.astype('uint8')
if opacitymap is None:
bin, res = np.histogram(vol.ravel())
res2 = np.interp(res, [vol.min(), vol.max()], [0, 1])
opacitymap = np.vstack((res, res2)).T
opacitymap = opacitymap.astype('float32')
'''
opacitymap=np.array([[ 0.0, 0.0],
[50.0, 0.9]])
'''
if info:
print('opacitymap', opacitymap)
if colormap is None:
bin, res = np.histogram(vol.ravel())
res2 = np.interp(res, [vol.min(), vol.max()], [0, 1])
zer = np.zeros(res2.shape)
colormap = np.vstack((res, res2, zer, res2[::-1])).T
colormap = colormap.astype('float32')
'''
colormap=np.array([[0.0, 0.5, 0.0, 0.0],
[64.0, 1.0, 0.5, 0.5],
[128.0, 0.9, 0.2, 0.3],
[196.0, 0.81, 0.27, 0.1],
[255.0, 0.5, 0.5, 0.5]])
'''
if info:
print('colormap', colormap)
im = vtk.vtkImageData()
im.SetScalarTypeToUnsignedChar()
im.SetDimensions(vol.shape[0], vol.shape[1], vol.shape[2])
# im.SetOrigin(0,0,0)
# im.SetSpacing(voxsz[2],voxsz[0],voxsz[1])
im.AllocateScalars()
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
if affine is not None:
aff = vtk.vtkMatrix4x4()
aff.DeepCopy((affine[0, 0], affine[0, 1], affine[0, 2], affine[0, 3], affine[1, 0], affine[1, 1], affine[1, 2], affine[1, 3], affine[2, 0], affine[
2, 1], affine[2, 2], affine[2, 3], affine[3, 0], affine[3, 1], affine[3, 2], affine[3, 3]))
# aff.DeepCopy((affine[0,0],affine[0,1],affine[0,2],0,affine[1,0],affine[1,1],affine[1,2],0,affine[2,0],affine[2,1],affine[2,2],0,affine[3,0],affine[3,1],affine[3,2],1))
# aff.DeepCopy((affine[0,0],affine[0,1],affine[0,2],127.5,affine[1,0],affine[1,1],affine[1,2],-127.5,affine[2,0],affine[2,1],affine[2,2],-127.5,affine[3,0],affine[3,1],affine[3,2],1))
reslice = vtk.vtkImageReslice()
if major_version <= 5:
reslice.SetInput(im)
else:
reslice.SetInputData(im)
# reslice.SetOutputDimensionality(2)
# reslice.SetOutputOrigin(127,-145,147)
reslice.SetResliceAxes(aff)
# reslice.SetOutputOrigin(-127,-127,-127)
# reslice.SetOutputExtent(-127,128,-127,128,-127,128)
# reslice.SetResliceAxesOrigin(0,0,0)
# print 'Get Reslice Axes Origin ', reslice.GetResliceAxesOrigin()
# reslice.SetOutputSpacing(1.0,1.0,1.0)
reslice.SetInterpolationModeToLinear()
# reslice.UpdateWholeExtent()
# print 'reslice GetOutputOrigin', reslice.GetOutputOrigin()
# print 'reslice GetOutputExtent',reslice.GetOutputExtent()
# print 'reslice GetOutputSpacing',reslice.GetOutputSpacing()
changeFilter = vtk.vtkImageChangeInformation()
if major_version <= 5:
changeFilter.SetInput(reslice.GetOutput())
else:
changeFilter.SetInputData(reslice.GetOutput())
# changeFilter.SetInput(im)
if center_origin:
changeFilter.SetOutputOrigin(
-vol.shape[0] / 2.0 + 0.5, -vol.shape[1] / 2.0 + 0.5, -vol.shape[2] / 2.0 + 0.5)
print('ChangeFilter ', changeFilter.GetOutputOrigin())
opacity = vtk.vtkPiecewiseFunction()
for i in range(opacitymap.shape[0]):
opacity.AddPoint(opacitymap[i, 0], opacitymap[i, 1])
color = vtk.vtkColorTransferFunction()
for i in range(colormap.shape[0]):
color.AddRGBPoint(
colormap[i, 0], colormap[i, 1], colormap[i, 2], colormap[i, 3])
if(maptype == 0):
property = vtk.vtkVolumeProperty()
property.SetColor(color)
property.SetScalarOpacity(opacity)
if trilinear:
property.SetInterpolationTypeToLinear()
else:
property.SetInterpolationTypeToNearest()
if info:
print('mapper VolumeTextureMapper2D')
mapper = vtk.vtkVolumeTextureMapper2D()
if affine is None:
if major_version <= 5:
mapper.SetInput(im)
else:
mapper.SetInputData(im)
else:
if major_version <= 5:
mapper.SetInput(changeFilter.GetOutput())
else:
mapper.SetInputData(changeFilter.GetOutput())
if (maptype == 1):
property = vtk.vtkVolumeProperty()
property.SetColor(color)
property.SetScalarOpacity(opacity)
property.ShadeOn()
if trilinear:
property.SetInterpolationTypeToLinear()
else:
property.SetInterpolationTypeToNearest()
if iso:
isofunc = vtk.vtkVolumeRayCastIsosurfaceFunction()
isofunc.SetIsoValue(iso_thr)
else:
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
if info:
print('mapper VolumeRayCastMapper')
mapper = vtk.vtkVolumeRayCastMapper()
if iso:
mapper.SetVolumeRayCastFunction(isofunc)
if info:
print('Isosurface')
else:
mapper.SetVolumeRayCastFunction(compositeFunction)
# mapper.SetMinimumImageSampleDistance(0.2)
if info:
print('Composite')
if affine is None:
if major_version <= 5:
mapper.SetInput(im)
else:
mapper.SetInputData(im)
else:
# mapper.SetInput(reslice.GetOutput())
if major_version <= 5:
mapper.SetInput(changeFilter.GetOutput())
else:
mapper.SetInputData(changeFilter.GetOutput())
# Return mid position in world space
# im2=reslice.GetOutput()
# index=im2.FindPoint(vol.shape[0]/2.0,vol.shape[1]/2.0,vol.shape[2]/2.0)
# print 'Image Getpoint ' , im2.GetPoint(index)
volum = vtk.vtkVolume()
volum.SetMapper(mapper)
volum.SetProperty(property)
if info:
print('Origin', volum.GetOrigin())
print('Orientation', volum.GetOrientation())
print('OrientationW', volum.GetOrientationWXYZ())
print('Position', volum.GetPosition())
print('Center', volum.GetCenter())
print('Get XRange', volum.GetXRange())
print('Get YRange', volum.GetYRange())
print('Get ZRange', volum.GetZRange())
print('Volume data type', vol.dtype)
return volum
def contour(vol, voxsz=(1.0, 1.0, 1.0), affine=None, levels=[50],
colors=[np.array([1.0, 0.0, 0.0])], opacities=[0.5]):
""" Take a volume and draw surface contours for any any number of
thresholds (levels) where every contour has its own color and opacity
Parameters
----------
vol : (N, M, K) ndarray
An array representing the volumetric dataset for which we will draw
some beautiful contours .
voxsz : (3,) array_like
Voxel size.
affine : None
Not used.
levels : array_like
Sequence of thresholds for the contours taken from image values needs
to be same datatype as `vol`.
colors : (N, 3) ndarray
RGB values in [0,1].
opacities : array_like
Opacities of contours.
Returns
-------
vtkAssembly
Examples
--------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> A=np.zeros((10,10,10))
>>> A[3:-3,3:-3,3:-3]=1
>>> r=fvtk.ren()
>>> fvtk.add(r,fvtk.contour(A,levels=[1]))
>>> #fvtk.show(r)
"""
im = vtk.vtkImageData()
im.SetScalarTypeToUnsignedChar()
im.SetDimensions(vol.shape[0], vol.shape[1], vol.shape[2])
# im.SetOrigin(0,0,0)
# im.SetSpacing(voxsz[2],voxsz[0],voxsz[1])
im.AllocateScalars()
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
ass = vtk.vtkAssembly()
# ass=[]
for (i, l) in enumerate(levels):
# print levels
skinExtractor = vtk.vtkContourFilter()
if major_version <= 5:
skinExtractor.SetInput(im)
else:
skinExtractor.SetInputData(im)
skinExtractor.SetValue(0, l)
skinNormals = vtk.vtkPolyDataNormals()
skinNormals.SetInputConnection(skinExtractor.GetOutputPort())
skinNormals.SetFeatureAngle(60.0)
skinMapper = vtk.vtkPolyDataMapper()
skinMapper.SetInputConnection(skinNormals.GetOutputPort())
skinMapper.ScalarVisibilityOff()
skin = vtk.vtkActor()
skin.SetMapper(skinMapper)
skin.GetProperty().SetOpacity(opacities[i])
# print colors[i]
skin.GetProperty().SetColor(colors[i][0], colors[i][1], colors[i][2])
# skin.Update()
ass.AddPart(skin)
del skin
del skinMapper
del skinExtractor
return ass
lowercase_cm_name = {'blues':'Blues', 'accent':'Accent'}
def create_colormap(v, name='jet', auto=True):
"""Create colors from a specific colormap and return it
as an array of shape (N,3) where every row gives the corresponding
r,g,b value. The colormaps we use are similar with those of pylab.
Parameters
----------
v : (N,) array
vector of values to be mapped in RGB colors according to colormap
name : str.
Name of the colormap. Currently implemented: 'jet', 'blues',
'accent', 'bone' and matplotlib colormaps if you have matplotlib
installed.
auto : bool,
if auto is True then v is interpolated to [0, 10] from v.min()
to v.max()
Notes
-----
Dipy supports a few colormaps for those who do not use Matplotlib, for
more colormaps consider downloading Matplotlib.
"""
if v.ndim > 1:
msg = 'This function works only with 1d arrays. Use ravel()'
raise ValueError(msg)
if auto:
v = np.interp(v, [v.min(), v.max()], [0, 1])
else:
v = np.clip(v, 0, 1)
# For backwards compatibility with lowercase names
newname = lowercase_cm_name.get(name) or name
colormap = get_cmap(newname)
if colormap is None:
e_s = "Colormap '%s' is not yet implemented " % name
raise ValueError(e_s)
rgba = colormap(v)
rgb = rgba[:, :3].copy()
return rgb
def _makeNd(array, ndim):
"""Pads as many 1s at the beginning of array's shape as are need to give
array ndim dimensions."""
new_shape = (1,) * (ndim - array.ndim) + array.shape
return array.reshape(new_shape)
def sphere_funcs(sphere_values, sphere, image=None, colormap='jet',
scale=2.2, norm=True, radial_scale=True):
"""Plot many morphed spherical functions simultaneously.
Parameters
----------
sphere_values : (M,) or (X, M) or (X, Y, M) or (X, Y, Z, M) ndarray
Values on the sphere.
sphere : Sphere
image : None,
Not yet supported.
colormap : None or 'jet'
If None then no color is used.
scale : float,
Distance between spheres.
norm : bool,
Normalize `sphere_values`.
radial_scale : bool,
Scale sphere points according to odf values.
Returns
-------
actor : vtkActor
Spheres.
Examples
--------
>>> from dipy.viz import fvtk
>>> r = fvtk.ren()
>>> odfs = np.ones((5, 5, 724))
>>> odfs[..., 0] = 2.
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric724')
>>> fvtk.add(r, fvtk.sphere_funcs(odfs, sphere))
>>> #fvtk.show(r)
"""
sphere_values = np.asarray(sphere_values)
if sphere_values.ndim > 4:
raise ValueError("Wrong shape")
sphere_values = _makeNd(sphere_values, 4)
grid_shape = np.array(sphere_values.shape[:3])
faces = np.asarray(sphere.faces, dtype=int)
vertices = sphere.vertices
if sphere_values.shape[-1] != sphere.vertices.shape[0]:
msg = 'Sphere.vertices.shape[0] should be the same as the '
msg += 'last dimensions of sphere_values i.e. sphere_values.shape[-1]'
raise ValueError(msg)
list_sq = []
list_cols = []
for ijk in np.ndindex(*grid_shape):
m = sphere_values[ijk].copy()
if norm:
m /= abs(m).max()
if radial_scale:
xyz = vertices.T * m
else:
xyz = vertices.T.copy()
xyz += scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
list_sq.append(xyz)
if colormap is not None:
cols = create_colormap(m, colormap)
cols = np.interp(cols, [0, 1], [0, 255]).astype('ubyte')
list_cols.append(cols)
points = vtk.vtkPoints()
triangles = vtk.vtkCellArray()
if colormap is not None:
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
colors.SetName("Colors")
for k in xrange(len(list_sq)):
xyz = list_sq[k]
if colormap is not None:
cols = list_cols[k]
for i in xrange(xyz.shape[0]):
points.InsertNextPoint(*xyz[i])
if colormap is not None:
colors.InsertNextTuple3(*cols[i])
for j in xrange(faces.shape[0]):
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, faces[j, 0] + k * xyz.shape[0])
triangle.GetPointIds().SetId(1, faces[j, 1] + k * xyz.shape[0])
triangle.GetPointIds().SetId(2, faces[j, 2] + k * xyz.shape[0])
triangles.InsertNextCell(triangle)
del triangle
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetPolys(triangles)
if colormap is not None:
polydata.GetPointData().SetScalars(colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def peaks(peaks_dirs, peaks_values=None, scale=2.2, colors=(1, 0, 0)):
""" Visualize peak directions as given from ``peaks_from_model``
Parameters
----------
peaks_dirs : ndarray
Peak directions. The shape of the array can be (M, 3) or (X, M, 3) or
(X, Y, M, 3) or (X, Y, Z, M, 3)
peaks_values : ndarray
Peak values. The shape of the array can be (M, ) or (X, M) or
(X, Y, M) or (X, Y, Z, M)
scale : float
Distance between spheres
colors : ndarray or tuple
Peak colors
Returns
-------
vtkActor
See Also
--------
dipy.viz.fvtk.sphere_funcs
"""
peaks_dirs = np.asarray(peaks_dirs)
if peaks_dirs.ndim > 5:
raise ValueError("Wrong shape")
peaks_dirs = _makeNd(peaks_dirs, 5)
if peaks_values is not None:
peaks_values = _makeNd(peaks_values, 4)
grid_shape = np.array(peaks_dirs.shape[:3])
list_dirs = []
for ijk in np.ndindex(*grid_shape):
xyz = scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
for i in range(peaks_dirs.shape[-2]):
if peaks_values is not None:
pv = peaks_values[ijk][i]
else:
pv = 1.
symm = np.vstack((-peaks_dirs[ijk][i] * pv + xyz,
peaks_dirs[ijk][i] * pv + xyz))
list_dirs.append(symm)
return line(list_dirs, colors)
def tensor(evals, evecs, scalar_colors=None, sphere=None, scale=2.2, norm=True):
"""Plot many tensors as ellipsoids simultaneously.
Parameters
----------
evals : (3,) or (X, 3) or (X, Y, 3) or (X, Y, Z, 3) ndarray
eigenvalues
evecs : (3, 3) or (X, 3, 3) or (X, Y, 3, 3) or (X, Y, Z, 3, 3) ndarray
eigenvectors
scalar_colors : (3,) or (X, 3) or (X, Y, 3) or (X, Y, Z, 3) ndarray
RGB colors used to show the tensors
Default None, color the ellipsoids using ``color_fa``
sphere : Sphere,
this sphere will be transformed to the tensor ellipsoid
Default is None which uses a symmetric sphere with 724 points.
scale : float,
distance between ellipsoids.
norm : boolean,
Normalize `evals`.
Returns
-------
actor : vtkActor
Ellipsoids
Examples
--------
>>> from dipy.viz import fvtk
>>> r = fvtk.ren()
>>> evals = np.array([1.4, .35, .35]) * 10 ** (-3)
>>> evecs = np.eye(3)
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric724')
>>> fvtk.add(r, fvtk.tensor(evals, evecs, sphere=sphere))
>>> #fvtk.show(r)
"""
evals = np.asarray(evals)
if evals.ndim > 4:
raise ValueError("Wrong shape")
evals = _makeNd(evals, 4)
evecs = _makeNd(evecs, 5)
grid_shape = np.array(evals.shape[:3])
if sphere is None:
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
faces = np.asarray(sphere.faces, dtype=int)
vertices = sphere.vertices
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
colors.SetName("Colors")
if scalar_colors is None:
from dipy.reconst.dti import color_fa, fractional_anisotropy
cfa = color_fa(fractional_anisotropy(evals), evecs)
else:
cfa = scalar_colors
list_sq = []
list_cols = []
for ijk in ndindex(grid_shape):
ea = evals[ijk]
if norm:
ea /= ea.max()
ea = np.diag(ea.copy())
ev = evecs[ijk].copy()
xyz = np.dot(ev, np.dot(ea, vertices.T))
xyz += scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
list_sq.append(xyz)
acolor = np.zeros(xyz.shape)
acolor[:, :] = np.interp(cfa[ijk], [0, 1], [0, 255])
list_cols.append(acolor.astype('ubyte'))
points = vtk.vtkPoints()
triangles = vtk.vtkCellArray()
for k in xrange(len(list_sq)):
xyz = list_sq[k]
cols = list_cols[k]
for i in xrange(xyz.shape[0]):
points.InsertNextPoint(*xyz[i])
colors.InsertNextTuple3(*cols[i])
for j in xrange(faces.shape[0]):
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, faces[j, 0] + k * xyz.shape[0])
triangle.GetPointIds().SetId(1, faces[j, 1] + k * xyz.shape[0])
triangle.GetPointIds().SetId(2, faces[j, 2] + k * xyz.shape[0])
triangles.InsertNextCell(triangle)
del triangle
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetPolys(triangles)
polydata.GetPointData().SetScalars(colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def slicer(vol, voxsz=(1.0, 1.0, 1.0), plane_i=[0], plane_j=None,
plane_k=None, outline=True):
""" Slice a 3D volume
Parameters
----------
vol : array, shape (N, M, K)
An array representing the volumetric dataset that we want to slice
voxsz : sequence of 3 floats
Voxel size.
plane_i : sequence of ints
show plane or planes along the first dimension
plane_j : sequence of ints
show plane or planes along the second dimension
plane_k : sequence of ints
show plane or planes along the third(last) dimension
outline : bool
if True (default) a small outline is drawn around the slices
Examples
--------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> x, y, z = np.ogrid[-10:10:80j, -10:10:80j, -10:10:80j]
>>> s = np.sin(x * y * z) / (x * y * z)
>>> r = fvtk.ren()
>>> fvtk.add(r, fvtk.slicer(s, plane_i=[0, 5]))
>>> #fvtk.show(r)
"""
if plane_i is None:
plane_i = []
if plane_j is None:
plane_j = []
if plane_k is None:
plane_k = []
if vol.ndim != 3:
raise ValueError("vol has to be a 3d array")
vol = np.interp(vol, xp=[vol.min(), vol.max()], fp=[0, 255])
vol = vol.astype('uint8')
im = vtk.vtkImageData()
im.SetScalarTypeToUnsignedChar()
I, J, K = vol.shape[:3]
im.SetDimensions(I, J, K)
# im.SetOrigin(0,0,0)
im.SetSpacing(voxsz[2], voxsz[0], voxsz[1])
im.AllocateScalars()
# copy data
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
# An outline provides context around the data.
outlineData = vtk.vtkOutlineFilter()
if major_version <= 5:
outlineData.SetInput(im)
else:
outlineData.SetInputData(im)
mapOutline = vtk.vtkPolyDataMapper()
mapOutline.SetInputConnection(outlineData.GetOutputPort())
outline_ = vtk.vtkActor()
outline_.SetMapper(mapOutline)
outline_.GetProperty().SetColor(1, 0, 0)
# Now we are creating three orthogonal planes passing through the
# volume. Each plane uses a different texture map and therefore has
# diferent coloration.
# Start by creatin a black/white lookup table.
lut = vtk.vtkLookupTable()
lut.SetTableRange(vol.min(), vol.max())
lut.SetSaturationRange(0, 0)
lut.SetHueRange(0, 0)
lut.SetValueRange(0, 1)
lut.SetRampToLinear()
lut.Build()
x1, x2, y1, y2, z1, z2 = im.GetExtent()
# print x1,x2,y1,y2,z1,z2
# Create the first of the three planes. The filter vtkImageMapToColors
# maps the data through the corresponding lookup table created above.
# The vtkImageActor is a type of vtkProp and conveniently displays an
# image on a single quadrilateral plane. It does this using texture
# mapping and as a result is quite fast. (Note: the input image has to
# be unsigned char values, which the vtkImageMapToColors produces.)
# Note also that by specifying the DisplayExtent, the pipeline
# requests data of this extent and the vtkImageMapToColors only
# processes a slice of data.
planeColors = vtk.vtkImageMapToColors()
# saggitalColors.SetInputConnection(im.GetOutputPort())
if major_version <= 5:
planeColors.SetInput(im)
else:
planeColors.SetInputData(im)
planeColors.SetLookupTable(lut)
planeColors.Update()
saggitals = []
for x in plane_i:
saggital = vtk.vtkImageActor()
if major_version <= 5:
saggital.SetInput(planeColors.GetOutput())
else:
saggital.SetInputData(planeColors.GetOutput())
saggital.SetDisplayExtent(x, x, y1, y2, z1, z2)
saggitals.append(saggital)
axials = []
for z in plane_k:
axial = vtk.vtkImageActor()
if major_version <= 5:
axial.SetInput(planeColors.GetOutput())
else:
axial.SetInputData(planeColors.GetOutput())
axial.SetDisplayExtent(x1, x2, y1, y2, z, z)
axials.append(axial)
coronals = []
for y in plane_j:
coronal = vtk.vtkImageActor()
if major_version <= 5:
coronal.SetInput(planeColors.GetOutput())
else:
coronal.SetInputData(planeColors.GetOutput())
coronal.SetDisplayExtent(x1, x2, y, y, z1, z2)
coronals.append(coronal)
assem = vtk.vtkAssembly()
for sag in saggitals:
assem.AddPart(sag)
for ax in axials:
assem.AddPart(ax)
for cor in coronals:
assem.AddPart(cor)
if outline:
assem.AddPart(outline_)
return assem
def camera(ren, pos=None, focal=None, viewup=None, verbose=True):
""" Change the active camera
Parameters
----------
ren : vtkRenderer
pos : tuple
(x, y, z) position of the camera
focal : tuple
(x, y, z) focal point
viewup : tuple
(x, y, z) viewup vector
verbose : bool
show information about the camera
Returns
-------
vtkCamera
"""
cam = ren.GetActiveCamera()
if verbose:
print('Camera Position (%.2f,%.2f,%.2f)' % cam.GetPosition())
print('Camera Focal Point (%.2f,%.2f,%.2f)' % cam.GetFocalPoint())
print('Camera View Up (%.2f,%.2f,%.2f)' % cam.GetViewUp())
if pos is not None:
cam = ren.GetActiveCamera().SetPosition(*pos)
if focal is not None:
ren.GetActiveCamera().SetFocalPoint(*focal)
if viewup is not None:
ren.GetActiveCamera().SetViewUp(*viewup)
cam = ren.GetActiveCamera()
if pos is not None or focal is not None or viewup is not None:
if verbose:
print('-------------------------------------')
print('Camera New Position (%.2f,%.2f,%.2f)' % cam.GetPosition())
print('Camera New Focal Point (%.2f,%.2f,%.2f)' %
cam.GetFocalPoint())
print('Camera New View Up (%.2f,%.2f,%.2f)' % cam.GetViewUp())
return cam
def show(ren, title='Dipy', size=(300, 300), png_magnify=1):
""" Show window
Notes
-----
To save a screenshot press's' and check your current directory
for ``fvtk.png``.
Parameters
------------
ren : vtkRenderer() object
As returned from function ``ren()``.
title : string
A string for the window title bar.
size : (int, int)
``(width, height)`` of the window
png_magnify : int
Number of times to magnify the screenshot.
Notes
-----
If you want to:
* navigate in the the 3d world use the left - middle - right mouse buttons
* reset the screen press 'r'
* save a screenshot press 's'
* quit press 'q'
See also
---------
dipy.viz.fvtk.record
Examples
----------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3),np.random.rand(20,3)]
>>> colors=np.array([[0.2,0.2,0.2],[0.8,0.8,0.8]])
>>> c=fvtk.line(lines,colors)
>>> fvtk.add(r,c)
>>> l=fvtk.label(r)
>>> fvtk.add(r,l)
>>> #fvtk.show(r)
See also
----------
dipy.viz.fvtk.record
"""
ren.ResetCamera()
window = vtk.vtkRenderWindow()
window.AddRenderer(ren)
# window.SetAAFrames(6)
window.SetWindowName(title)
window.SetSize(size[0], size[1])
style = vtk.vtkInteractorStyleTrackballCamera()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(window)
iren.SetPicker(picker)
def key_press(obj, event):
key = obj.GetKeySym()
if key == 's' or key == 'S':
print('Saving image...')
renderLarge = vtk.vtkRenderLargeImage()
if major_version <= 5:
renderLarge.SetInput(ren)
else:
renderLarge.SetInputData(ren)
renderLarge.SetMagnification(png_magnify)
renderLarge.Update()
writer = vtk.vtkPNGWriter()
writer.SetInputConnection(renderLarge.GetOutputPort())
writer.SetFileName('fvtk.png')
writer.Write()
print('Look for fvtk.png in your current working directory.')
iren.AddObserver('KeyPressEvent', key_press)
iren.SetInteractorStyle(style)
iren.Initialize()
picker.Pick(85, 126, 0, ren)
window.Render()
iren.Start()
# window.RemoveAllObservers()
# ren.SetRenderWindow(None)
window.RemoveRenderer(ren)
ren.SetRenderWindow(None)
def record(ren=None, cam_pos=None, cam_focal=None, cam_view=None,
out_path=None, path_numbering=False, n_frames=1, az_ang=10,
magnification=1, size=(300, 300), verbose=False):
''' This will record a video of your scene
Records a video as a series of ``.png`` files of your scene by rotating the
azimuth angle az_angle in every frame.
Parameters
-----------
ren : vtkRenderer() object
as returned from function ren()
cam_pos : None or sequence (3,), optional
camera position
cam_focal : None or sequence (3,), optional
camera focal point
cam_view : None or sequence (3,), optional
camera view up
out_path : str, optional
output directory for the frames
path_numbering : bool
when recording it changes out_path ot out_path + str(frame number)
n_frames : int, optional
number of frames to save, default 1
az_ang : float, optional
azimuthal angle of camera rotation.
magnification : int, optional
how much to magnify the saved frame
Examples
---------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> a=fvtk.axes()
>>> fvtk.add(r,a)
>>> #uncomment below to record
>>> #fvtk.record(r)
>>> #check for new images in current directory
'''
if ren is None:
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(size[0], size[1])
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# ren.GetActiveCamera().Azimuth(180)
ren.ResetCamera()
renderLarge = vtk.vtkRenderLargeImage()
if major_version <= 5:
renderLarge.SetInput(ren)
else:
renderLarge.SetInputData(ren)
renderLarge.SetMagnification(magnification)
renderLarge.Update()
writer = vtk.vtkPNGWriter()
ang = 0
if cam_pos is not None:
cx, cy, cz = cam_pos
ren.GetActiveCamera().SetPosition(cx, cy, cz)
if cam_focal is not None:
fx, fy, fz = cam_focal
ren.GetActiveCamera().SetFocalPoint(fx, fy, fz)
if cam_view is not None:
ux, uy, uz = cam_view
ren.GetActiveCamera().SetViewUp(ux, uy, uz)
cam = ren.GetActiveCamera()
if verbose:
print('Camera Position (%.2f,%.2f,%.2f)' % cam.GetPosition())
print('Camera Focal Point (%.2f,%.2f,%.2f)' % cam.GetFocalPoint())
print('Camera View Up (%.2f,%.2f,%.2f)' % cam.GetViewUp())
for i in range(n_frames):
ren.GetActiveCamera().Azimuth(ang)
renderLarge = vtk.vtkRenderLargeImage()
renderLarge.SetInput(ren)
renderLarge.SetMagnification(magnification)
renderLarge.Update()
writer.SetInputConnection(renderLarge.GetOutputPort())
# filename='/tmp/'+str(3000000+i)+'.png'
if path_numbering:
if out_path is None:
filename = str(1000000 + i) + '.png'
else:
filename = out_path + str(1000000 + i) + '.png'
else:
filename = out_path
writer.SetFileName(filename)
writer.Write()
ang = +az_ang
if __name__ == "__main__":
pass
| [
"numpy.ndindex",
"dipy.data.get_cmap",
"numpy.asarray",
"dipy.data.get_sphere",
"numpy.zeros",
"numpy.clip",
"dipy.utils.six.moves.xrange",
"dipy.core.ndindex.ndindex",
"numpy.array",
"numpy.interp",
"numpy.dot",
"dipy.reconst.dti.fractional_anisotropy",
"numpy.vstack"
] | [((4708, 4744), 'numpy.asarray', 'np.asarray', (['colors'], {'dtype': 'np.float32'}), '(colors, dtype=np.float32)\n', (4718, 4744), True, 'import numpy as np\n'), ((6770, 6788), 'numpy.asarray', 'np.asarray', (['colors'], {}), '(colors)\n', (6780, 6788), True, 'import numpy as np\n'), ((10094, 10112), 'numpy.asarray', 'np.asarray', (['colors'], {}), '(colors)\n', (10104, 10112), True, 'import numpy as np\n'), ((32062, 32079), 'dipy.data.get_cmap', 'get_cmap', (['newname'], {}), '(newname)\n', (32070, 32079), True, 'import dipy.data.get_cmap as get_cmap\n'), ((33531, 33556), 'numpy.asarray', 'np.asarray', (['sphere_values'], {}), '(sphere_values)\n', (33541, 33556), True, 'import numpy as np\n'), ((33692, 33725), 'numpy.array', 'np.array', (['sphere_values.shape[:3]'], {}), '(sphere_values.shape[:3])\n', (33700, 33725), True, 'import numpy as np\n'), ((33738, 33773), 'numpy.asarray', 'np.asarray', (['sphere.faces'], {'dtype': 'int'}), '(sphere.faces, dtype=int)\n', (33748, 33773), True, 'import numpy as np\n'), ((34096, 34119), 'numpy.ndindex', 'np.ndindex', (['*grid_shape'], {}), '(*grid_shape)\n', (34106, 34119), True, 'import numpy as np\n'), ((36598, 36620), 'numpy.asarray', 'np.asarray', (['peaks_dirs'], {}), '(peaks_dirs)\n', (36608, 36620), True, 'import numpy as np\n'), ((36829, 36859), 'numpy.array', 'np.array', (['peaks_dirs.shape[:3]'], {}), '(peaks_dirs.shape[:3])\n', (36837, 36859), True, 'import numpy as np\n'), ((36896, 36919), 'numpy.ndindex', 'np.ndindex', (['*grid_shape'], {}), '(*grid_shape)\n', (36906, 36919), True, 'import numpy as np\n'), ((38579, 38596), 'numpy.asarray', 'np.asarray', (['evals'], {}), '(evals)\n', (38589, 38596), True, 'import numpy as np\n'), ((38738, 38763), 'numpy.array', 'np.array', (['evals.shape[:3]'], {}), '(evals.shape[:3])\n', (38746, 38763), True, 'import numpy as np\n'), ((38885, 38920), 'numpy.asarray', 'np.asarray', (['sphere.faces'], {'dtype': 'int'}), '(sphere.faces, dtype=int)\n', (38895, 38920), True, 'import numpy as np\n'), ((39309, 39328), 'dipy.core.ndindex.ndindex', 'ndindex', (['grid_shape'], {}), '(grid_shape)\n', (39316, 39328), False, 'from dipy.core.ndindex import ndindex\n'), ((22192, 22212), 'numpy.zeros', 'np.zeros', (['res2.shape'], {}), '(res2.shape)\n', (22200, 22212), True, 'import numpy as np\n'), ((28454, 28479), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (28462, 28479), True, 'import numpy as np\n'), ((31923, 31939), 'numpy.clip', 'np.clip', (['v', '(0)', '(1)'], {}), '(v, 0, 1)\n', (31930, 31939), True, 'import numpy as np\n'), ((34964, 34984), 'dipy.utils.six.moves.xrange', 'xrange', (['xyz.shape[0]'], {}), '(xyz.shape[0])\n', (34970, 34984), False, 'from dipy.utils.six.moves import xrange\n'), ((35136, 35158), 'dipy.utils.six.moves.xrange', 'xrange', (['faces.shape[0]'], {}), '(faces.shape[0])\n', (35142, 35158), False, 'from dipy.utils.six.moves import xrange\n'), ((38846, 38872), 'dipy.data.get_sphere', 'get_sphere', (['"""symmetric724"""'], {}), "('symmetric724')\n", (38856, 38872), False, 'from dipy.data import get_sphere\n'), ((39636, 39655), 'numpy.zeros', 'np.zeros', (['xyz.shape'], {}), '(xyz.shape)\n', (39644, 39655), True, 'import numpy as np\n'), ((39679, 39716), 'numpy.interp', 'np.interp', (['cfa[ijk]', '[0, 1]', '[0, 255]'], {}), '(cfa[ijk], [0, 1], [0, 255])\n', (39688, 39716), True, 'import numpy as np\n'), ((39941, 39961), 'dipy.utils.six.moves.xrange', 'xrange', (['xyz.shape[0]'], {}), '(xyz.shape[0])\n', (39947, 39961), False, 'from dipy.utils.six.moves import xrange\n'), ((40072, 40094), 'dipy.utils.six.moves.xrange', 'xrange', (['faces.shape[0]'], {}), '(faces.shape[0])\n', (40078, 40094), False, 'from dipy.utils.six.moves import xrange\n'), ((14943, 14959), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (14951, 14959), True, 'import numpy as np\n'), ((21808, 21830), 'numpy.vstack', 'np.vstack', (['(res, res2)'], {}), '((res, res2))\n', (21817, 21830), True, 'import numpy as np\n'), ((22232, 22271), 'numpy.vstack', 'np.vstack', (['(res, res2, zer, res2[::-1])'], {}), '((res, res2, zer, res2[::-1]))\n', (22241, 22271), True, 'import numpy as np\n'), ((37194, 37268), 'numpy.vstack', 'np.vstack', (['(-peaks_dirs[ijk][i] * pv + xyz, peaks_dirs[ijk][i] * pv + xyz)'], {}), '((-peaks_dirs[ijk][i] * pv + xyz, peaks_dirs[ijk][i] * pv + xyz))\n', (37203, 37268), True, 'import numpy as np\n'), ((39181, 39209), 'dipy.reconst.dti.fractional_anisotropy', 'fractional_anisotropy', (['evals'], {}), '(evals)\n', (39202, 39209), False, 'from dipy.reconst.dti import color_fa, fractional_anisotropy\n'), ((39487, 39509), 'numpy.dot', 'np.dot', (['ea', 'vertices.T'], {}), '(ea, vertices.T)\n', (39493, 39509), True, 'import numpy as np\n'), ((34523, 34556), 'numpy.interp', 'np.interp', (['cols', '[0, 1]', '[0, 255]'], {}), '(cols, [0, 1], [0, 255])\n', (34532, 34556), True, 'import numpy as np\n')] |
import os
import time
from itertools import product
import argparse
import json
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
import strawberryfields as sf
from strawberryfields.ops import *
from learner.circuits import variational_quantum_circuit
from learner.gates import (cubic_phase, DFT, random_unitary, cross_kerr, get_modes,
unitary_state_fidelity, sample_average_fidelity, process_fidelity, average_fidelity)
from learner.plots import (wigner_3D_plot, wavefunction_plot,
two_mode_wavefunction_plot, plot_cost, one_mode_unitary_plots, two_mode_unitary_plots)
HP = {
'name': 'random_gif',
'out_dir': 'sim_results',
'target_unitary_fn': random_unitary,
'target_params': {'size': 4},
'cutoff': 10,
'gate_cutoff': 4,
'depth': 25,
'reps': 2000,
'penalty_strength': 0,
'active_sd': 0.0001,
'passive_sd': 0.1,
'maps_outside': False,
}
def parse_arguments(defaults):
parser = argparse.ArgumentParser(description='Quantum gate synthesis.')
parser.add_argument('-n', '--name',
type=str, default=defaults["name"], help='Simulation name.')
parser.add_argument('-o', '--out-dir',
type=str, default=defaults["out_dir"], help='Output directory')
parser.add_argument('-s', '--dump-reps',
type=int, default=100, help='Steps at which to save output')
parser.add_argument('-D', '--debug',
action='store_true', help="Debug mode")
parser.add_argument('-r', '--reps',
type=int, default=defaults["reps"], help='Optimization steps')
parser.add_argument('-p', '--target-params',
type=json.loads, default=defaults["target_params"], help='Gate parameters')
parser.add_argument('-c', '--cutoff',
type=int, default=defaults["cutoff"], help='Fock basis truncation')
parser.add_argument('-g', '--gate-cutoff',
type=int, default=defaults["gate_cutoff"], help='Gate/unitary truncation')
parser.add_argument('-d', '--depth',
type=int, default=defaults["depth"], help='Number of layers')
parser.add_argument('-P', '--penalty-strength',
type=int, default=defaults["penalty_strength"], help='Regularisation penalty strength')
args = parser.parse_args()
hyperparams = {}
hyperparams.update(defaults)
hyperparams.update(vars(args))
if args.debug:
hyperparams['depth'] = 1
hyperparams['reps'] = 5
hyperparams['name'] += "_debug"
hyperparams['ID'] = "{}_d{}_c{}_g{}_r{}".format(
hyperparams['name'], hyperparams['depth'], hyperparams['cutoff'], hyperparams['gate_cutoff'], hyperparams['reps'])
hyperparams['out_dir'] = os.path.join(args.out_dir, hyperparams['ID'], '')
hyperparams['board_name'] = os.path.join('TensorBoard', hyperparams['ID'], '')
if not os.path.exists(hyperparams['out_dir']):
os.makedirs(hyperparams['out_dir'])
return hyperparams
def real_unitary_overlaps(ket, target_unitary, gate_cutoff, cutoff):
m = len(ket.shape)-1
if m == 1:
in_state = np.arange(gate_cutoff)
target_kets = np.array([target_unitary[:, i] for i in in_state])
target_kets = tf.constant(target_kets, dtype=tf.complex64)
overlaps = tf.real(tf.einsum('bi,bi->b', tf.conj(target_kets), ket))
elif m == 2:
fock_states = np.arange(gate_cutoff)
in_state = np.array(list(product(fock_states, fock_states)))
target_unitary_sf = np.einsum('ijkl->ikjl', target_unitary.reshape([cutoff]*4))
target_kets = np.array([target_unitary_sf[:, i, :, j] for i, j in in_state])
target_kets = tf.constant(target_kets, dtype=tf.complex64)
overlaps = tf.real(tf.einsum('bij,bij->b', tf.conj(target_kets), ket))
for idx, state in enumerate(in_state.T):
tf.summary.scalar('overlap_{}'.format(state), tf.abs(overlaps[idx]))
return overlaps
def optimize(ket, target_unitary, parameters, cutoff, gate_cutoff, reps=1000, penalty_strength=0,
out_dir='sim_results', ID='gate_synthesis', board_name='TensorBoard',
dump_reps=100, **kwargs):
d = gate_cutoff
c = cutoff
m = len(ket.shape)-1
overlaps = real_unitary_overlaps(ket, target_unitary, gate_cutoff, cutoff)
mean_overlap = tf.reduce_mean(overlaps)
tf.summary.scalar("mean_overlap", mean_overlap)
loss = tf.reduce_sum(tf.abs(overlaps - 1))
tf.summary.scalar('loss', loss)
if m == 1:
state_norms = tf.abs(tf.einsum('bi,bi->b', ket, tf.conj(ket)))
elif m == 2:
state_norms = tf.abs(tf.einsum('bij,bij->b', ket, tf.conj(ket)))
norm_deviation = tf.reduce_sum((state_norms - 1)**2)/gate_cutoff
penalty = penalty_strength*norm_deviation
tf.summary.scalar('penalty', penalty)
cost = loss + penalty
tf.summary.scalar('cost', cost)
optimiser = tf.train.AdamOptimizer()
min_cost_optimize = optimiser.minimize(cost)
session = tf.Session()
session.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(board_name)
merge = tf.summary.merge_all()
overlap_progress = []
cost_progress = []
best_mean_overlap = 0
best_min_overlap = 0
best_max_overlap = 0
start = time.time()
for i in range(reps):
_, cost_val, overlaps_val, ket_val, penalty_val, params_val = session.run(
[min_cost_optimize, cost, overlaps, ket, penalty, parameters])
mean_overlap_val = np.mean(overlaps_val)
min_overlap_val = min(overlaps_val)
max_overlap_val = max(overlaps_val)
cost_progress.append(cost_val)
overlap_progress.append(overlaps_val)
if m == 1:
learnt_unitary = ket_val.T
elif m == 2:
learnt_unitary = ket_val.reshape(d**2, c**2).T
c = learnt_unitary.shape[0]
d = learnt_unitary.shape[1]
Ur = learnt_unitary[:d, :d]
vmax = np.max([Ur.real, Ur.imag])
vmin = np.min([Ur.real, Ur.imag])
cmax = max(vmax, vmin)
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].matshow(Ur.real, cmap=plt.get_cmap('Reds'), vmin=-cmax, vmax=cmax)
ax[1].matshow(Ur.imag, cmap=plt.get_cmap('Greens'), vmin=-cmax, vmax=cmax)
for a in ax.ravel():
a.tick_params(bottom=False,labelbottom=False,
top=False,labeltop=False,
left=False,labelleft=False,
right=False,labelright=False)
ax[0].set_xlabel(r'$\mathrm{Re}(U)$')
ax[1].set_xlabel(r'$\mathrm{Im}(U)$')
for a in ax.ravel():
a.tick_params(color='white', labelcolor='white')
for spine in a.spines.values():
spine.set_edgecolor('white')
fig.tight_layout()
fig.savefig(os.path.join(out_dir, '{}.png'.format(i).zfill(4)))
plt.close(fig)
if i % dump_reps == 0:
print("Rep: {} Cost: {:.4f} Overlaps: Mean = {:.4f}, Min = {:.4f}, Max = {:.4f}".format(
i, cost_val, mean_overlap_val, min_overlap_val, max_overlap_val))
summary = session.run(merge)
writer.add_summary(summary, i)
if i > 0:
np.savez(os.path.join(out_dir, ID+'.npz'),
**sim_results)
if i > 0 and mean_overlap_val > best_mean_overlap:
end = time.time()
best_mean_overlap = mean_overlap_val
best_min_overlap = min_overlap_val
best_max_overlap = max_overlap_val
min_cost = cost_val
if m == 1:
learnt_unitary = ket_val.T
elif m == 2:
learnt_unitary = ket_val.reshape(d**2, c**2).T
eq_state_target, eq_state_learnt, state_fid = unitary_state_fidelity(target_unitary, learnt_unitary, cutoff)
Fe = process_fidelity(target_unitary, learnt_unitary, cutoff)
avgF = average_fidelity(target_unitary, learnt_unitary, cutoff)
sim_results = {
'name': HP['name'],
'ID': HP['ID'],
'target_unitary': target_unitary,
'target_params': HP['target_params'],
'cutoff': cutoff,
'gate_cutoff': gate_cutoff,
'depth': HP['depth'],
'reps': HP['reps'],
'penalty_strength': HP['penalty_strength'],
'best_runtime': end-start,
'best_rep': i,
'mean_overlap': mean_overlap_val,
'min_overlap': min_overlap_val,
'max_overlap': max_overlap_val,
'process_fidelity': Fe,
'avg_fidelity': avgF,
'min_cost': cost_val,
'cost_progress': np.array(cost_progress),
'mean_overlap_progress': np.mean(np.array(overlap_progress), axis=1),
'min_overlap_progress': np.min(np.array(overlap_progress), axis=1),
'max_overlap_progress': np.max(np.array(overlap_progress), axis=1),
'penalty': penalty_val,
'learnt_unitary': learnt_unitary,
'params': params_val,
'r1': params_val[0],
'sq_r': params_val[1],
'sq_phi': params_val[2],
'r2': params_val[3],
'disp_r': params_val[4],
'disp_phi': params_val[5],
'kappa': params_val[6],
'eq_state_learnt': eq_state_learnt,
'eq_state_target': eq_state_target,
'eq_state_fidelity': state_fid
}
end = time.time()
print("\nElapsed time is {} seconds".format(np.round(end - start)))
print("Final cost = ", cost_val)
print("Minimum cost = ", min_cost)
print("\nMean overlap = {}".format(best_mean_overlap))
print("Min overlap = {}".format(best_min_overlap))
print("Max overlap = {}".format(best_max_overlap))
avgFs = sample_average_fidelity(target_unitary, learnt_unitary, cutoff)
sim_results['sample_avg_fidelity'] = avgFs
print("\nProcess fidelity = {}".format(Fe))
print("Average fidelity = {}".format(avgF))
print("Sampled average fidelity = {}".format(avgFs))
print("\nEqual superposition state fidelity = ", state_fid)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
sim_results['runtime'] = end-start
sim_results['cost_progress'] = np.array(cost_progress)
sim_results['mean_overlap_progress'] = np.mean(np.array(overlap_progress), axis=1)
sim_results['min_overlap_progress'] = np.min(np.array(overlap_progress), axis=1)
sim_results['max_overlap_progress'] = np.max(np.array(overlap_progress), axis=1)
np.savez(os.path.join(out_dir, ID+'.npz'), **sim_results)
return sim_results
def save_plots(target_unitary, learnt_unitary, eq_state_learnt, eq_state_target,
cost_progress, *, modes, offset=-0.11, l=5, out_dir='sim_results',
ID='gate_synthesis', **kwargs):
square = not kwargs.get('maps_outside', True)
if modes == 1:
fig1, ax1 = wigner_3D_plot(eq_state_target, offset=offset, l=l)
fig1.savefig(os.path.join(out_dir, ID+'_targetWigner.png'))
fig2, ax2 = wigner_3D_plot(eq_state_learnt, offset=offset, l=l)
fig2.savefig(os.path.join(out_dir, ID+'_learntWigner.png'))
figW1, axW1 = one_mode_unitary_plots(target_unitary, learnt_unitary, square=square)
figW1.savefig(os.path.join(out_dir, ID+'_unitaryPlot.png'))
elif modes == 2:
figW1, axW1 = two_mode_wavefunction_plot(eq_state_target, l=l)
figW1.savefig(os.path.join(out_dir, ID+'_targetWavefunction.png'))
figW2, axW2 = two_mode_wavefunction_plot(eq_state_learnt, l=l)
figW2.savefig(os.path.join(out_dir, ID+'_learntWavefunction.png'))
figM1, axM1 = two_mode_unitary_plots(target_unitary, learnt_unitary, square=square)
figM1.savefig(os.path.join(out_dir, ID+'_unitaryPlot.png'))
figC, axC = plot_cost(cost_progress)
figC.savefig(os.path.join(out_dir, ID+'_cost.png'))
if __name__ == "__main__":
HP = parse_arguments(HP)
target_unitary = HP['target_unitary_fn'](cutoff=HP['cutoff'], **HP['target_params'])
HP['modes'] = get_modes(target_unitary, HP['cutoff'])
HP['batch_size'] = HP['gate_cutoff']**HP['modes']
print('------------------------------------------------------------------------')
print('Hyperparameters:')
print('------------------------------------------------------------------------')
for key, val in HP.items():
print("{}: {}".format(key, val))
print('------------------------------------------------------------------------')
in_ket = np.zeros([HP['gate_cutoff'], HP['cutoff']])
np.fill_diagonal(in_ket, 1)
if HP['modes'] == 2:
in_ket = np.einsum('ij,kl->ikjl', in_ket, in_ket)
in_ket = in_ket.reshape(HP['gate_cutoff']**2, HP['cutoff'], HP['cutoff'])
print('Constructing variational quantum circuit...')
ket, parameters = variational_quantum_circuit(input_state=in_ket, **HP)
print('Beginning optimization...')
res = optimize(ket, target_unitary, parameters, **HP)
print('Generating plots...')
save_plots(target_unitary, res['learnt_unitary'], res['eq_state_learnt'],
res['eq_state_target'], res['cost_progress'], **HP) | [
"tensorflow.reduce_sum",
"argparse.ArgumentParser",
"numpy.einsum",
"numpy.mean",
"learner.gates.get_modes",
"numpy.arange",
"learner.gates.average_fidelity",
"learner.gates.sample_average_fidelity",
"os.path.join",
"numpy.round",
"tensorflow.abs",
"learner.gates.unitary_state_fidelity",
"le... | [((971, 1033), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Quantum gate synthesis."""'}), "(description='Quantum gate synthesis.')\n", (994, 1033), False, 'import argparse\n'), ((2661, 2710), 'os.path.join', 'os.path.join', (['args.out_dir', "hyperparams['ID']", '""""""'], {}), "(args.out_dir, hyperparams['ID'], '')\n", (2673, 2710), False, 'import os\n'), ((2743, 2793), 'os.path.join', 'os.path.join', (['"""TensorBoard"""', "hyperparams['ID']", '""""""'], {}), "('TensorBoard', hyperparams['ID'], '')\n", (2755, 2793), False, 'import os\n'), ((4242, 4266), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['overlaps'], {}), '(overlaps)\n', (4256, 4266), True, 'import tensorflow as tf\n'), ((4271, 4318), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean_overlap"""', 'mean_overlap'], {}), "('mean_overlap', mean_overlap)\n", (4288, 4318), True, 'import tensorflow as tf\n'), ((4370, 4401), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (4387, 4401), True, 'import tensorflow as tf\n'), ((4697, 4734), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""penalty"""', 'penalty'], {}), "('penalty', penalty)\n", (4714, 4734), True, 'import tensorflow as tf\n'), ((4765, 4796), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cost"""', 'cost'], {}), "('cost', cost)\n", (4782, 4796), True, 'import tensorflow as tf\n'), ((4813, 4837), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (4835, 4837), True, 'import tensorflow as tf\n'), ((4901, 4913), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4911, 4913), True, 'import tensorflow as tf\n'), ((4978, 5011), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['board_name'], {}), '(board_name)\n', (4999, 5011), True, 'import tensorflow as tf\n'), ((5024, 5046), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (5044, 5046), True, 'import tensorflow as tf\n'), ((5184, 5195), 'time.time', 'time.time', ([], {}), '()\n', (5193, 5195), False, 'import time\n'), ((9567, 9578), 'time.time', 'time.time', ([], {}), '()\n', (9576, 9578), False, 'import time\n'), ((9908, 9971), 'learner.gates.sample_average_fidelity', 'sample_average_fidelity', (['target_unitary', 'learnt_unitary', 'cutoff'], {}), '(target_unitary, learnt_unitary, cutoff)\n', (9931, 9971), False, 'from learner.gates import cubic_phase, DFT, random_unitary, cross_kerr, get_modes, unitary_state_fidelity, sample_average_fidelity, process_fidelity, average_fidelity\n'), ((10375, 10398), 'numpy.array', 'np.array', (['cost_progress'], {}), '(cost_progress)\n', (10383, 10398), True, 'import numpy as np\n'), ((11936, 11960), 'learner.plots.plot_cost', 'plot_cost', (['cost_progress'], {}), '(cost_progress)\n', (11945, 11960), False, 'from learner.plots import wigner_3D_plot, wavefunction_plot, two_mode_wavefunction_plot, plot_cost, one_mode_unitary_plots, two_mode_unitary_plots\n'), ((12185, 12224), 'learner.gates.get_modes', 'get_modes', (['target_unitary', "HP['cutoff']"], {}), "(target_unitary, HP['cutoff'])\n", (12194, 12224), False, 'from learner.gates import cubic_phase, DFT, random_unitary, cross_kerr, get_modes, unitary_state_fidelity, sample_average_fidelity, process_fidelity, average_fidelity\n'), ((12653, 12696), 'numpy.zeros', 'np.zeros', (["[HP['gate_cutoff'], HP['cutoff']]"], {}), "([HP['gate_cutoff'], HP['cutoff']])\n", (12661, 12696), True, 'import numpy as np\n'), ((12701, 12728), 'numpy.fill_diagonal', 'np.fill_diagonal', (['in_ket', '(1)'], {}), '(in_ket, 1)\n', (12717, 12728), True, 'import numpy as np\n'), ((12973, 13026), 'learner.circuits.variational_quantum_circuit', 'variational_quantum_circuit', ([], {'input_state': 'in_ket'}), '(input_state=in_ket, **HP)\n', (13000, 13026), False, 'from learner.circuits import variational_quantum_circuit\n'), ((2805, 2843), 'os.path.exists', 'os.path.exists', (["hyperparams['out_dir']"], {}), "(hyperparams['out_dir'])\n", (2819, 2843), False, 'import os\n'), ((2853, 2888), 'os.makedirs', 'os.makedirs', (["hyperparams['out_dir']"], {}), "(hyperparams['out_dir'])\n", (2864, 2888), False, 'import os\n'), ((3041, 3063), 'numpy.arange', 'np.arange', (['gate_cutoff'], {}), '(gate_cutoff)\n', (3050, 3063), True, 'import numpy as np\n'), ((3086, 3136), 'numpy.array', 'np.array', (['[target_unitary[:, i] for i in in_state]'], {}), '([target_unitary[:, i] for i in in_state])\n', (3094, 3136), True, 'import numpy as np\n'), ((3159, 3203), 'tensorflow.constant', 'tf.constant', (['target_kets'], {'dtype': 'tf.complex64'}), '(target_kets, dtype=tf.complex64)\n', (3170, 3203), True, 'import tensorflow as tf\n'), ((4344, 4364), 'tensorflow.abs', 'tf.abs', (['(overlaps - 1)'], {}), '(overlaps - 1)\n', (4350, 4364), True, 'import tensorflow as tf\n'), ((4599, 4636), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((state_norms - 1) ** 2)'], {}), '((state_norms - 1) ** 2)\n', (4612, 4636), True, 'import tensorflow as tf\n'), ((4930, 4963), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4961, 4963), True, 'import tensorflow as tf\n'), ((5407, 5428), 'numpy.mean', 'np.mean', (['overlaps_val'], {}), '(overlaps_val)\n', (5414, 5428), True, 'import numpy as np\n'), ((5863, 5889), 'numpy.max', 'np.max', (['[Ur.real, Ur.imag]'], {}), '([Ur.real, Ur.imag])\n', (5869, 5889), True, 'import numpy as np\n'), ((5905, 5931), 'numpy.min', 'np.min', (['[Ur.real, Ur.imag]'], {}), '([Ur.real, Ur.imag])\n', (5911, 5931), True, 'import numpy as np\n'), ((5981, 6016), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 5)'}), '(1, 2, figsize=(10, 5))\n', (5993, 6016), True, 'from matplotlib import pyplot as plt\n'), ((6808, 6822), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6817, 6822), True, 'from matplotlib import pyplot as plt\n'), ((10247, 10270), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (10261, 10270), False, 'import os\n'), ((10280, 10300), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (10291, 10300), False, 'import os\n'), ((10450, 10476), 'numpy.array', 'np.array', (['overlap_progress'], {}), '(overlap_progress)\n', (10458, 10476), True, 'import numpy as np\n'), ((10535, 10561), 'numpy.array', 'np.array', (['overlap_progress'], {}), '(overlap_progress)\n', (10543, 10561), True, 'import numpy as np\n'), ((10620, 10646), 'numpy.array', 'np.array', (['overlap_progress'], {}), '(overlap_progress)\n', (10628, 10646), True, 'import numpy as np\n'), ((10669, 10703), 'os.path.join', 'os.path.join', (['out_dir', "(ID + '.npz')"], {}), "(out_dir, ID + '.npz')\n", (10681, 10703), False, 'import os\n'), ((11027, 11078), 'learner.plots.wigner_3D_plot', 'wigner_3D_plot', (['eq_state_target'], {'offset': 'offset', 'l': 'l'}), '(eq_state_target, offset=offset, l=l)\n', (11041, 11078), False, 'from learner.plots import wigner_3D_plot, wavefunction_plot, two_mode_wavefunction_plot, plot_cost, one_mode_unitary_plots, two_mode_unitary_plots\n'), ((11167, 11218), 'learner.plots.wigner_3D_plot', 'wigner_3D_plot', (['eq_state_learnt'], {'offset': 'offset', 'l': 'l'}), '(eq_state_learnt, offset=offset, l=l)\n', (11181, 11218), False, 'from learner.plots import wigner_3D_plot, wavefunction_plot, two_mode_wavefunction_plot, plot_cost, one_mode_unitary_plots, two_mode_unitary_plots\n'), ((11309, 11378), 'learner.plots.one_mode_unitary_plots', 'one_mode_unitary_plots', (['target_unitary', 'learnt_unitary'], {'square': 'square'}), '(target_unitary, learnt_unitary, square=square)\n', (11331, 11378), False, 'from learner.plots import wigner_3D_plot, wavefunction_plot, two_mode_wavefunction_plot, plot_cost, one_mode_unitary_plots, two_mode_unitary_plots\n'), ((11978, 12017), 'os.path.join', 'os.path.join', (['out_dir', "(ID + '_cost.png')"], {}), "(out_dir, ID + '_cost.png')\n", (11990, 12017), False, 'import os\n'), ((12771, 12811), 'numpy.einsum', 'np.einsum', (['"""ij,kl->ikjl"""', 'in_ket', 'in_ket'], {}), "('ij,kl->ikjl', in_ket, in_ket)\n", (12780, 12811), True, 'import numpy as np\n'), ((3320, 3342), 'numpy.arange', 'np.arange', (['gate_cutoff'], {}), '(gate_cutoff)\n', (3329, 3342), True, 'import numpy as np\n'), ((3522, 3584), 'numpy.array', 'np.array', (['[target_unitary_sf[:, i, :, j] for i, j in in_state]'], {}), '([target_unitary_sf[:, i, :, j] for i, j in in_state])\n', (3530, 3584), True, 'import numpy as np\n'), ((3607, 3651), 'tensorflow.constant', 'tf.constant', (['target_kets'], {'dtype': 'tf.complex64'}), '(target_kets, dtype=tf.complex64)\n', (3618, 3651), True, 'import tensorflow as tf\n'), ((3830, 3851), 'tensorflow.abs', 'tf.abs', (['overlaps[idx]'], {}), '(overlaps[idx])\n', (3836, 3851), True, 'import tensorflow as tf\n'), ((7314, 7325), 'time.time', 'time.time', ([], {}), '()\n', (7323, 7325), False, 'import time\n'), ((7713, 7775), 'learner.gates.unitary_state_fidelity', 'unitary_state_fidelity', (['target_unitary', 'learnt_unitary', 'cutoff'], {}), '(target_unitary, learnt_unitary, cutoff)\n', (7735, 7775), False, 'from learner.gates import cubic_phase, DFT, random_unitary, cross_kerr, get_modes, unitary_state_fidelity, sample_average_fidelity, process_fidelity, average_fidelity\n'), ((7793, 7849), 'learner.gates.process_fidelity', 'process_fidelity', (['target_unitary', 'learnt_unitary', 'cutoff'], {}), '(target_unitary, learnt_unitary, cutoff)\n', (7809, 7849), False, 'from learner.gates import cubic_phase, DFT, random_unitary, cross_kerr, get_modes, unitary_state_fidelity, sample_average_fidelity, process_fidelity, average_fidelity\n'), ((7869, 7925), 'learner.gates.average_fidelity', 'average_fidelity', (['target_unitary', 'learnt_unitary', 'cutoff'], {}), '(target_unitary, learnt_unitary, cutoff)\n', (7885, 7925), False, 'from learner.gates import cubic_phase, DFT, random_unitary, cross_kerr, get_modes, unitary_state_fidelity, sample_average_fidelity, process_fidelity, average_fidelity\n'), ((9627, 9648), 'numpy.round', 'np.round', (['(end - start)'], {}), '(end - start)\n', (9635, 9648), True, 'import numpy as np\n'), ((11100, 11147), 'os.path.join', 'os.path.join', (['out_dir', "(ID + '_targetWigner.png')"], {}), "(out_dir, ID + '_targetWigner.png')\n", (11112, 11147), False, 'import os\n'), ((11240, 11287), 'os.path.join', 'os.path.join', (['out_dir', "(ID + '_learntWigner.png')"], {}), "(out_dir, ID + '_learntWigner.png')\n", (11252, 11287), False, 'import os\n'), ((11401, 11447), 'os.path.join', 'os.path.join', (['out_dir', "(ID + '_unitaryPlot.png')"], {}), "(out_dir, ID + '_unitaryPlot.png')\n", (11413, 11447), False, 'import os\n'), ((11490, 11538), 'learner.plots.two_mode_wavefunction_plot', 'two_mode_wavefunction_plot', (['eq_state_target'], {'l': 'l'}), '(eq_state_target, l=l)\n', (11516, 11538), False, 'from learner.plots import wigner_3D_plot, wavefunction_plot, two_mode_wavefunction_plot, plot_cost, one_mode_unitary_plots, two_mode_unitary_plots\n'), ((11636, 11684), 'learner.plots.two_mode_wavefunction_plot', 'two_mode_wavefunction_plot', (['eq_state_learnt'], {'l': 'l'}), '(eq_state_learnt, l=l)\n', (11662, 11684), False, 'from learner.plots import wigner_3D_plot, wavefunction_plot, two_mode_wavefunction_plot, plot_cost, one_mode_unitary_plots, two_mode_unitary_plots\n'), ((11782, 11851), 'learner.plots.two_mode_unitary_plots', 'two_mode_unitary_plots', (['target_unitary', 'learnt_unitary'], {'square': 'square'}), '(target_unitary, learnt_unitary, square=square)\n', (11804, 11851), False, 'from learner.plots import wigner_3D_plot, wavefunction_plot, two_mode_wavefunction_plot, plot_cost, one_mode_unitary_plots, two_mode_unitary_plots\n'), ((3253, 3273), 'tensorflow.conj', 'tf.conj', (['target_kets'], {}), '(target_kets)\n', (3260, 3273), True, 'import tensorflow as tf\n'), ((4473, 4485), 'tensorflow.conj', 'tf.conj', (['ket'], {}), '(ket)\n', (4480, 4485), True, 'import tensorflow as tf\n'), ((6053, 6073), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Reds"""'], {}), "('Reds')\n", (6065, 6073), True, 'from matplotlib import pyplot as plt\n'), ((6134, 6156), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Greens"""'], {}), "('Greens')\n", (6146, 6156), True, 'from matplotlib import pyplot as plt\n'), ((8707, 8730), 'numpy.array', 'np.array', (['cost_progress'], {}), '(cost_progress)\n', (8715, 8730), True, 'import numpy as np\n'), ((11561, 11614), 'os.path.join', 'os.path.join', (['out_dir', "(ID + '_targetWavefunction.png')"], {}), "(out_dir, ID + '_targetWavefunction.png')\n", (11573, 11614), False, 'import os\n'), ((11707, 11760), 'os.path.join', 'os.path.join', (['out_dir', "(ID + '_learntWavefunction.png')"], {}), "(out_dir, ID + '_learntWavefunction.png')\n", (11719, 11760), False, 'import os\n'), ((11874, 11920), 'os.path.join', 'os.path.join', (['out_dir', "(ID + '_unitaryPlot.png')"], {}), "(out_dir, ID + '_unitaryPlot.png')\n", (11886, 11920), False, 'import os\n'), ((3376, 3409), 'itertools.product', 'product', (['fock_states', 'fock_states'], {}), '(fock_states, fock_states)\n', (3383, 3409), False, 'from itertools import product\n'), ((3703, 3723), 'tensorflow.conj', 'tf.conj', (['target_kets'], {}), '(target_kets)\n', (3710, 3723), True, 'import tensorflow as tf\n'), ((4563, 4575), 'tensorflow.conj', 'tf.conj', (['ket'], {}), '(ket)\n', (4570, 4575), True, 'import tensorflow as tf\n'), ((7168, 7202), 'os.path.join', 'os.path.join', (['out_dir', "(ID + '.npz')"], {}), "(out_dir, ID + '.npz')\n", (7180, 7202), False, 'import os\n'), ((8781, 8807), 'numpy.array', 'np.array', (['overlap_progress'], {}), '(overlap_progress)\n', (8789, 8807), True, 'import numpy as np\n'), ((8865, 8891), 'numpy.array', 'np.array', (['overlap_progress'], {}), '(overlap_progress)\n', (8873, 8891), True, 'import numpy as np\n'), ((8949, 8975), 'numpy.array', 'np.array', (['overlap_progress'], {}), '(overlap_progress)\n', (8957, 8975), True, 'import numpy as np\n')] |
# Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer-based langauge models."""
from flax import nn
import jax.numpy as jnp
import numpy as np
def shift_right(x, train=True):
"""Shift the input to the right by padding on axis 1."""
if train:
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[1] = (1, 0) # Padding on axis=1
padded = jnp.pad(
x, pad_widths, mode='constant', constant_values=x.dtype.type(0))
return padded[:, :-1]
else:
# Do nothing in predict mode, as then the sequence length is 1.
return x
class Embed(nn.Module):
"""Embedding Module.
A parameterized function from integers [0, n) to d-dimensional vectors.
"""
def apply(self,
inputs,
num_embeddings,
features,
mode='input',
emb_init=nn.initializers.normal(stddev=1.0)):
"""Applies Embed module.
Args:
inputs: input data
num_embeddings: number of embedding
features: size of the embedding dimension
mode: either 'input' or 'output' -> to share input/output embedding
emb_init: embedding initializer
Returns:
output which is embedded input data
"""
embedding = self.param('embedding', (num_embeddings, features), emb_init)
if mode == 'input':
if inputs.dtype not in [jnp.int32, jnp.int64, jnp.uint32, jnp.uint64]:
raise ValueError('Input type must be an integer or unsigned integer.')
return jnp.take(embedding, inputs, axis=0)
if mode == 'output':
return jnp.einsum('bld,vd->blv', inputs, embedding)
def sinusoidal_init(max_len=2048):
"""1D Sinusoidal Position Embedding Initializer.
Args:
max_len: maximum possible length for the input
Returns:
output: init function returning `(1, max_len, d_feature)`
"""
def init(key, shape, dtype=np.float32):
"""Sinusoidal init."""
del key, dtype
d_feature = shape[-1]
pe = np.zeros((max_len, d_feature), dtype=np.float32)
position = np.arange(0, max_len)[:, np.newaxis]
div_term = np.exp(
np.arange(0, d_feature, 2) * -(np.log(10000.0) / d_feature))
pe[:, 0::2] = np.sin(position * div_term)
pe[:, 1::2] = np.cos(position * div_term)
pe = pe[np.newaxis, :, :] # [1, max_len, d_feature]
return jnp.array(pe)
return init
class AddPositionEmbs(nn.Module):
"""Adds learned positional embeddings to the inputs."""
def apply(self,
inputs,
max_len=2048,
posemb_init=nn.initializers.normal(stddev=1.0)):
"""Applies AddPositionEmbs module.
Args:
inputs: input data
max_len: maximum possible length for the input
posemb_init: positional embedding initializer
Returns:
output: `(bs, timesteps, in_dim)`
"""
assert inputs.ndim == 3, ('Number of dimention should be 3, but it is: %d' %
inputs.ndim)
length = inputs.shape[1]
pos_emb_shape = (1, max_len, inputs.shape[-1])
pos_embedding = self.param('pos_embedding', pos_emb_shape, posemb_init)
return inputs + pos_embedding[:, :length, :]
class MlpBlock(nn.Module):
"""Transformer MLP block."""
def apply(self,
inputs,
mlp_dim,
out_dim=None,
dropout_rate=0.1,
deterministic=False,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6)):
"""Applies Transformer MlpBlock module."""
actual_out_dim = inputs.shape[-1] if out_dim is None else out_dim
x = nn.Dense(inputs, mlp_dim, kernel_init=kernel_init, bias_init=bias_init)
x = nn.gelu(x)
x = nn.dropout(x, rate=dropout_rate, deterministic=deterministic)
output = nn.Dense(
x, actual_out_dim, kernel_init=kernel_init, bias_init=bias_init)
output = nn.dropout(output, rate=dropout_rate, deterministic=deterministic)
return output
class Transformer1DBlock(nn.Module):
"""Transformer layer (https://openreview.net/forum?id=H1e5GJBtDr)."""
def apply(self,
inputs,
qkv_dim,
mlp_dim,
num_heads,
causal_mask=False,
padding_mask=None,
dropout_rate=0.1,
attention_dropout_rate=0.1,
deterministic=False):
"""Applies Transformer1DBlock module.
Args:
inputs: input data
qkv_dim: dimension of the query/key/value
mlp_dim: dimension of the mlp on top of attention block
num_heads: number of heads
causal_mask: bool, mask future or not
padding_mask: bool, mask padding tokens
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
deterministic: bool, deterministic or not (to apply dropout)
Returns:
output after transformer block.
"""
# Attention block.
assert inputs.ndim == 3
x = nn.LayerNorm(inputs)
x = nn.SelfAttention(
x,
num_heads=num_heads,
qkv_features=qkv_dim,
attention_axis=(1,),
causal_mask=causal_mask,
padding_mask=padding_mask,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
bias=False,
broadcast_dropout=False,
dropout_rate=attention_dropout_rate,
deterministic=deterministic)
x = nn.dropout(x, rate=dropout_rate, deterministic=deterministic)
x = x + inputs
# MLP block.
y = nn.LayerNorm(x)
y = MlpBlock(
y,
mlp_dim=mlp_dim,
dropout_rate=dropout_rate,
deterministic=deterministic)
return x + y
class Transformer(nn.Module):
"""Transformer Model for sequence tagging."""
def apply(self,
inputs,
vocab_size,
output_vocab_size,
emb_dim=512,
num_heads=8,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=2048,
train=True,
dropout_rate=0.3,
attention_dropout_rate=0.3):
"""Applies Transformer model on the inputs.
Args:
inputs: input data
vocab_size: size of the input vocabulary
output_vocab_size: size of the output classes
emb_dim: dimension of embedding
num_heads: number of heads
num_layers: number of layers
qkv_dim: dimension of the query/key/value
mlp_dim: dimension of the mlp on top of attention block
max_len: maximum length.
train: if it is training,
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
Returns:
output of a transformer decoder.
"""
padding_mask = jnp.where(inputs > 0, 1, 0).astype(jnp.float32)[..., None]
assert inputs.ndim == 2 # (batch, len)
x = inputs.astype('int32')
x = Embed(x, num_embeddings=vocab_size, features=emb_dim, name='embed')
x = nn.dropout(x, rate=dropout_rate, deterministic=not train)
x = AddPositionEmbs(
x, max_len=max_len, posemb_init=sinusoidal_init(max_len=max_len))
for _ in range(num_layers):
x = Transformer1DBlock(
x,
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
causal_mask=False,
padding_mask=padding_mask,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
deterministic=not train,
)
x = nn.LayerNorm(x)
logits = nn.Dense(
x,
output_vocab_size,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6))
return logits
| [
"jax.numpy.array",
"numpy.log",
"jax.numpy.where",
"jax.numpy.einsum",
"jax.numpy.take",
"numpy.zeros",
"flax.nn.initializers.normal",
"flax.nn.dropout",
"flax.nn.initializers.xavier_uniform",
"numpy.sin",
"flax.nn.gelu",
"flax.nn.LayerNorm",
"numpy.cos",
"numpy.arange",
"flax.nn.Dense"
... | [((1358, 1392), 'flax.nn.initializers.normal', 'nn.initializers.normal', ([], {'stddev': '(1.0)'}), '(stddev=1.0)\n', (1380, 1392), False, 'from flax import nn\n'), ((2472, 2520), 'numpy.zeros', 'np.zeros', (['(max_len, d_feature)'], {'dtype': 'np.float32'}), '((max_len, d_feature), dtype=np.float32)\n', (2480, 2520), True, 'import numpy as np\n'), ((2683, 2710), 'numpy.sin', 'np.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (2689, 2710), True, 'import numpy as np\n'), ((2729, 2756), 'numpy.cos', 'np.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (2735, 2756), True, 'import numpy as np\n'), ((2825, 2838), 'jax.numpy.array', 'jnp.array', (['pe'], {}), '(pe)\n', (2834, 2838), True, 'import jax.numpy as jnp\n'), ((3037, 3071), 'flax.nn.initializers.normal', 'nn.initializers.normal', ([], {'stddev': '(1.0)'}), '(stddev=1.0)\n', (3059, 3071), False, 'from flax import nn\n'), ((3878, 3910), 'flax.nn.initializers.xavier_uniform', 'nn.initializers.xavier_uniform', ([], {}), '()\n', (3908, 3910), False, 'from flax import nn\n'), ((3934, 3970), 'flax.nn.initializers.normal', 'nn.initializers.normal', ([], {'stddev': '(1e-06)'}), '(stddev=1e-06)\n', (3956, 3970), False, 'from flax import nn\n'), ((4097, 4168), 'flax.nn.Dense', 'nn.Dense', (['inputs', 'mlp_dim'], {'kernel_init': 'kernel_init', 'bias_init': 'bias_init'}), '(inputs, mlp_dim, kernel_init=kernel_init, bias_init=bias_init)\n', (4105, 4168), False, 'from flax import nn\n'), ((4177, 4187), 'flax.nn.gelu', 'nn.gelu', (['x'], {}), '(x)\n', (4184, 4187), False, 'from flax import nn\n'), ((4196, 4257), 'flax.nn.dropout', 'nn.dropout', (['x'], {'rate': 'dropout_rate', 'deterministic': 'deterministic'}), '(x, rate=dropout_rate, deterministic=deterministic)\n', (4206, 4257), False, 'from flax import nn\n'), ((4271, 4344), 'flax.nn.Dense', 'nn.Dense', (['x', 'actual_out_dim'], {'kernel_init': 'kernel_init', 'bias_init': 'bias_init'}), '(x, actual_out_dim, kernel_init=kernel_init, bias_init=bias_init)\n', (4279, 4344), False, 'from flax import nn\n'), ((4367, 4433), 'flax.nn.dropout', 'nn.dropout', (['output'], {'rate': 'dropout_rate', 'deterministic': 'deterministic'}), '(output, rate=dropout_rate, deterministic=deterministic)\n', (4377, 4433), False, 'from flax import nn\n'), ((5430, 5450), 'flax.nn.LayerNorm', 'nn.LayerNorm', (['inputs'], {}), '(inputs)\n', (5442, 5450), False, 'from flax import nn\n'), ((5896, 5957), 'flax.nn.dropout', 'nn.dropout', (['x'], {'rate': 'dropout_rate', 'deterministic': 'deterministic'}), '(x, rate=dropout_rate, deterministic=deterministic)\n', (5906, 5957), False, 'from flax import nn\n'), ((6003, 6018), 'flax.nn.LayerNorm', 'nn.LayerNorm', (['x'], {}), '(x)\n', (6015, 6018), False, 'from flax import nn\n'), ((7445, 7502), 'flax.nn.dropout', 'nn.dropout', (['x'], {'rate': 'dropout_rate', 'deterministic': '(not train)'}), '(x, rate=dropout_rate, deterministic=not train)\n', (7455, 7502), False, 'from flax import nn\n'), ((7973, 7988), 'flax.nn.LayerNorm', 'nn.LayerNorm', (['x'], {}), '(x)\n', (7985, 7988), False, 'from flax import nn\n'), ((1997, 2032), 'jax.numpy.take', 'jnp.take', (['embedding', 'inputs'], {'axis': '(0)'}), '(embedding, inputs, axis=0)\n', (2005, 2032), True, 'import jax.numpy as jnp\n'), ((2071, 2115), 'jax.numpy.einsum', 'jnp.einsum', (['"""bld,vd->blv"""', 'inputs', 'embedding'], {}), "('bld,vd->blv', inputs, embedding)\n", (2081, 2115), True, 'import jax.numpy as jnp\n'), ((2536, 2557), 'numpy.arange', 'np.arange', (['(0)', 'max_len'], {}), '(0, max_len)\n', (2545, 2557), True, 'import numpy as np\n'), ((2604, 2630), 'numpy.arange', 'np.arange', (['(0)', 'd_feature', '(2)'], {}), '(0, d_feature, 2)\n', (2613, 2630), True, 'import numpy as np\n'), ((5664, 5696), 'flax.nn.initializers.xavier_uniform', 'nn.initializers.xavier_uniform', ([], {}), '()\n', (5694, 5696), False, 'from flax import nn\n'), ((5716, 5752), 'flax.nn.initializers.normal', 'nn.initializers.normal', ([], {'stddev': '(1e-06)'}), '(stddev=1e-06)\n', (5738, 5752), False, 'from flax import nn\n'), ((8070, 8102), 'flax.nn.initializers.xavier_uniform', 'nn.initializers.xavier_uniform', ([], {}), '()\n', (8100, 8102), False, 'from flax import nn\n'), ((8122, 8158), 'flax.nn.initializers.normal', 'nn.initializers.normal', ([], {'stddev': '(1e-06)'}), '(stddev=1e-06)\n', (8144, 8158), False, 'from flax import nn\n'), ((7226, 7253), 'jax.numpy.where', 'jnp.where', (['(inputs > 0)', '(1)', '(0)'], {}), '(inputs > 0, 1, 0)\n', (7235, 7253), True, 'import jax.numpy as jnp\n'), ((2635, 2650), 'numpy.log', 'np.log', (['(10000.0)'], {}), '(10000.0)\n', (2641, 2650), True, 'import numpy as np\n')] |
from VESIcal import calibration_checks
from VESIcal import core
from scipy.optimize import root_scalar
from abc import abstractmethod
import numpy as np
class FugacityModel(object):
""" The fugacity model object is for implementations of fugacity models
for individual volatile species, though it may depend on the mole
fraction of other volatile species. It contains all the methods required
to calculate the fugacity at a given pressure and mole fraction.
"""
def __init__(self):
self.set_calibration_ranges([])
def set_calibration_ranges(self,calibration_ranges):
self.calibration_ranges = calibration_ranges
@abstractmethod
def fugacity(self,pressure,**kwargs):
"""
"""
# @abstractmethod
def check_calibration_range(self,parameters,report_nonexistance=True):
s = ''
for cr in self.calibration_ranges:
if cr.check(parameters) == False:
s += cr.string(parameters,report_nonexistance)
return s
# ------------- FUGACITY MODELS -------------------------------- #
class fugacity_idealgas(FugacityModel):
""" An instance of FugacityModel for an ideal gas.
"""
def fugacity(self,pressure,X_fluid=1.0,**kwargs):
""" Returns the fugacity of an ideal gas, i.e., the partial pressure.
Parameters
----------
pressure float
Total pressure of the system, in bars.
X_fluid float
The mole fraction of the species in the vapour phase.
Returns
-------
float
Fugacity (partial pressure) in bars
"""
return pressure*X_fluid
class fugacity_KJ81_co2(FugacityModel):
""" Implementation of the Kerrick and Jacobs (1981) EOS for mixed fluids. This class
will return the properties of the CO2 component of the mixed fluid.
"""
def __init__(self):
self.set_calibration_ranges([calibration_checks.CalibrationRange('pressure',20000.0,calibration_checks.crf_LessThan,'bar','Kerrick and Jacobs (1981) EOS',
fail_msg=calibration_checks.crmsg_LessThan_fail, pass_msg=calibration_checks.crmsg_LessThan_pass,
description_msg=calibration_checks.crmsg_LessThan_description),
calibration_checks.CalibrationRange('temperature',1050,calibration_checks.crf_LessThan,'oC','Kerrick and Jacobs (1981) EOS',
fail_msg=calibration_checks.crmsg_LessThan_fail, pass_msg=calibration_checks.crmsg_LessThan_pass,
description_msg=calibration_checks.crmsg_LessThan_description)])
def fugacity(self,pressure,temperature,X_fluid,**kwargs):
""" Calculates the fugacity of CO2 in a mixed CO2-H2O fluid. Above 1050C,
it assumes H2O and CO2 do not interact, as the equations are not defined
beyond this point.
Parameters
----------
pressure float
Total pressure of the system in bars.
temperature float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid.
Returns
-------
float
fugacity of CO2 in bars
"""
if X_fluid == 0:
return 0
elif temperature >= 1050.0:
return pressure*np.exp(self.lnPhi_mix(pressure,temperature,1.0))*X_fluid
else:
return pressure*np.exp(self.lnPhi_mix(pressure,temperature,X_fluid))*X_fluid
def volume(self,P,T,X_fluid):
""" Calculates the volume of the mixed fluid, by solving Eq (28) of Kerrick and
Jacobs (1981) using scipy.root_scalar.
Parameters
----------
P float
Total pressure of the system, in bars.
T float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid
Returns
-------
float
Volume of the mixed fluid.
"""
if X_fluid != 1.0:
# x0 = self.volume(P,T,1.0)*X_fluid + self.volume_h(P,T)*(1-X_fluid)
# print(x0)
if P >= 20000 and T<800-273.15:
x0 = (X_fluid*25+(1-X_fluid)*15)
else:
x0 = (X_fluid*35+(1-X_fluid)*15)
else:
if P >= 20000 and T<800-273.15:
x0 = 25
else:
x0=35
return root_scalar(self.root_volume,x0=x0,x1=x0*0.9,args=(P,T,X_fluid)).root
def root_volume(self,v,P,T,X_fluid):
""" Returns the difference between the lhs and rhs of Eq (28) of Kerrick and Jacobs (1981).
For use with a root finder to obtain the volume of the mixed fluid.
Parameters
----------
v float
Guess for the volume
P float
Total system pressure in bars.
T float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid.
Returns
-------
float
Difference between lhs and rhs of Eq (28) of Kerrick and Jacobs (1981), in bars.
"""
T = T + 273.15
c = {}
h = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
if X_fluid == 1:
bm = c['b']
cm = c['c']
c12= c['c']
dm = c['d']
d12= c['d']
em = c['e']
e12 =c['e']
else:
bm = X_fluid*c['b'] + (1-X_fluid)*h['b']
c12 = (c['c']*h['c'])**0.5
cm = c['c']*X_fluid**2 + h['c']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*c12
d12 = (c['d']*h['d'])**0.5
dm = c['d']*X_fluid**2 + h['d']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*d12
e12 = (c['e']*h['e'])**0.5
em = c['e']*X_fluid**2 + h['e']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*e12
am = cm + dm/v + em/v**2
y = bm/(4*v)
pt1 = (83.14 * T * (1 + y + y**2 - y**3)) / (v*(1-y)**3)
pt2 = - am / (T**0.5 * v * (v+bm))
return -(P - pt1 - pt2)
def volume_h(self,P,T):
""" Calculates the volume of a pure H2O fluid, by solving Eq (14) of
Kerrick and Jacobs (1981).
Parameters
----------
P float
Total pressure in bars.
T float
Temperature in degC.
Returns
-------
Difference between lhs and rhs of Eq (14) of Kerrick and Jacobs (1981), in bars.
"""
return root_scalar(self.root_volume_h,x0=15,x1=35,args=(P,T)).root
def root_volume_h(self,v,P,T):
""" Returns the difference between the lhs and rhs of Eq (14) of
Kerrick and Jacobs (1981). For use with a root solver to identify the
volume of a pure H2O fluid.
Parameters
----------
v float
Guess for the volume
P float
Total pressure in bars.
T float
Temperature in degC.
Returns
-------
float
The difference between the lhs and rhs of Eq (14) of Kerrick and Jacobs (1981),
in bars.
"""
T = T + 273.15
h = {}
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
h['a'] = h['c'] + h['d']/v + h['e']/v**2
y = h['b']/(4*v)
pt1 = (83.14 * T * (1 + y + y**2 - y**3)) / (v*(1-y)**3)
pt2 = - h['a'] / (T**0.5 * v * (v+h['b']))
return -(P - pt1 - pt2)
def lnPhi_mix(self,P,T,X_fluid):
""" Calculates the natural log of the fugacity coefficient for CO2 in a
mixed CO2-H2O fluid. Uses Eq (27) of Kerrick and Jacobs (1981).
Parameters
----------
P float
Total pressure in bars.
T float
Temperature in degC
X_fluid float
The mole fraction of CO2 in the fluid.
Returns
-------
float
The natural log of the fugacity coefficient for CO2 in a mixed fluid.
"""
T = T + 273.15
v = self.volume(P,T-273.15,X_fluid)
c = {}
h = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
if X_fluid == 1:
bm = c['b']
cm = c['c']
c12= c['c']
dm = c['d']
d12= c['d']
em = c['e']
e12 =c['e']
else:
bm = X_fluid*c['b'] + (1-X_fluid)*h['b']
c12 = (c['c']*h['c'])**0.5
cm = c['c']*X_fluid**2 + h['c']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*c12
d12 = (c['d']*h['d'])**0.5
dm = c['d']*X_fluid**2 + h['d']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*d12
e12 = (c['e']*h['e'])**0.5
em = c['e']*X_fluid**2 + h['e']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*e12
y = bm/(4*v)
# Z = (1+y+y**2-y**3)/(1-y)**2 - am/(83.14*T**1.5*(v+bm))
Z = v*P/(83.14*T)
lnPhi = 0
lnPhi += (4*y-3*y**2)/(1-y)**2 + (c['b']/bm * (4*y-2*y**2)/(1-y)**3)
lnPhi += - (2*c['c']*X_fluid+2*(1-X_fluid)*c12)/(83.14*T**1.5*bm)*np.log((v+bm)/v)
lnPhi += - cm*c['b']/(83.14*T**1.5*bm*(v+bm))
lnPhi += cm*c['b']/(83.14*T**1.5*bm**2)*np.log((v+bm)/v)
lnPhi += - (2*c['d']*X_fluid+2*d12*(1-X_fluid)+dm)/(83.14*T**1.5*bm*v)
lnPhi += (2*c['d']*X_fluid+2*(1-X_fluid)*d12+dm)/(83.14*T**1.5*bm**2)*np.log((v+bm)/v)
lnPhi += c['b']*dm/(83.14*T**1.5*v*bm*(v+bm)) + 2*c['b']*dm/(83.14*T**1.5*bm**2*(v+bm))
lnPhi += - 2*c['b']*dm/(83.14*T**1.5*bm**3)*np.log((v+bm)/v)
lnPhi += - (2*c['e']*X_fluid + 2*(1-X_fluid)*e12+2*em)/(83.14*T**1.5*2*bm*v**2)
lnPhi += (2*c['e']*X_fluid+2*e12*(1-X_fluid)+2*em)/(83.14*T**1.5*bm**2*v)
lnPhi += - (2*c['e']*X_fluid+2*e12*(1-X_fluid)+2*em)/(83.14*T**1.5*bm**3)*np.log((v+bm)/v)
lnPhi += em*c['b']/(83.14*T**1.5*2*bm*v**2*(v+bm)) - 3*em*c['b']/(83.14*T**1.5*2*bm**2*v*(v+bm))
lnPhi += 3*em*c['b']/(83.14*T**1.5*bm**4)*np.log((v+bm)/v) - 3*em*c['b']/(83.14*T**1.5*bm**3*(v+bm))
lnPhi += - np.log(Z)
return lnPhi
class fugacity_KJ81_h2o(FugacityModel):
"""Implementation of the Kerrick and Jacobs (1981) EOS for mixed fluids. This class
will return the properties of the H2O component of the mixed fluid.
"""
def __init__(self):
self.set_calibration_ranges([calibration_checks.CalibrationRange('pressure',20000.0,calibration_checks.crf_LessThan,'bar','Kerrick and Jacobs (1981) EOS',
fail_msg=calibration_checks.crmsg_LessThan_fail, pass_msg=calibration_checks.crmsg_LessThan_pass,
description_msg=calibration_checks.crmsg_LessThan_description),
calibration_checks.CalibrationRange('temperature',1050,calibration_checks.crf_LessThan,'oC','Kerrick and Jacobs (1981) EOS',
fail_msg=calibration_checks.crmsg_LessThan_fail, pass_msg=calibration_checks.crmsg_LessThan_pass,
description_msg=calibration_checks.crmsg_LessThan_description)])
def fugacity(self,pressure,temperature,X_fluid,**kwargs):
""" Calculates the fugacity of H2O in a mixed CO2-H2O fluid. Above 1050C,
it assumes H2O and CO2 do not interact, as the equations are not defined
beyond this point.
Parameters
----------
pressure float
Total pressure of the system in bars.
temperature float
Temperature in degC
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
fugacity of H2O in bars
"""
if X_fluid == 0:
return 0
elif temperature >= 1050:
return pressure*np.exp(self.lnPhi_mix(pressure,temperature,1.0))*X_fluid
else:
return pressure*np.exp(self.lnPhi_mix(pressure,temperature,X_fluid))*X_fluid
def volume(self,P,T,X_fluid):
""" Calculates the volume of the mixed fluid, by solving Eq (28) of Kerrick and
Jacobs (1981) using scipy.root_scalar.
Parameters
----------
P float
Total pressure of the system, in bars.
T float
Temperature in degC
X_fluid float
Mole fraction of H2O in the fluid
Returns
-------
float
Volume of the mixed fluid.
"""
if X_fluid != 1.0:
# x0 = self.volume(P,T,1.0)*X_fluid + self.volume_h(P,T)*(1-X_fluid)
# print(x0)
if P >= 20000 and T<800-273.15:
x0 = ((1-X_fluid)*25+X_fluid*15)
else:
x0 = ((1-X_fluid)*35+X_fluid*15)
else:
if P >= 20000 and T<800-273.15:
x0 = 10
else:
x0=15
return root_scalar(self.root_volume,x0=x0,x1=x0*0.9,args=(P,T,X_fluid)).root
def root_volume(self,v,P,T,X_fluid):
""" Returns the difference between the lhs and rhs of Eq (28) of Kerrick and Jacobs (1981).
For use with a root finder to obtain the volume of the mixed fluid.
Parameters
----------
v float
Guess for the volume
P float
Total system pressure in bars.
T float
Temperature in degC
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
Difference between lhs and rhs of Eq (28) of Kerrick and Jacobs (1981), in bars.
"""
T = T + 273.15
c = {}
h = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
if X_fluid == 1:
bm = h['b']
cm = h['c']
dm = h['d']
em = h['e']
c12= h['c']
d12= h['d']
e12= h['e']
else:
bm = X_fluid*h['b'] + (1-X_fluid)*c['b']
c12 = (c['c']*h['c'])**0.5
cm = h['c']*X_fluid**2 + c['c']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*c12
d12 = (c['d']*h['d'])**0.5
dm = h['d']*X_fluid**2 + c['d']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*d12
e12 = (c['e']*h['e'])**0.5
em = h['e']*X_fluid**2 + c['e']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*e12
am = cm + dm/v + em/v**2
y = bm/(4*v)
pt1 = (83.14 * T * (1 + y + y**2 - y**3)) / (v*(1-y)**3)
pt2 = - am / (T**0.5 * v * (v+bm))
return -(P - pt1 - pt2)
def volume_c(self,P,T):
""" Calculates the volume of a pure CO2 fluid, by solving Eq (14) of
Kerrick and Jacobs (1981).
Parameters
----------
P float
Total pressure in bars.
T float
Temperature in degC.
Returns
-------
Difference between lhs and rhs of Eq (14) of Kerrick and Jacobs (1981), in bars.
"""
return root_scalar(self.root_volume_c,x0=15,x1=35,args=(P,T)).root
def root_volume_c(self,v,P,T):
""" Returns the difference between the lhs and rhs of Eq (14) of
Kerrick and Jacobs (1981). For use with a root solver to identify the
volume of a pure H2O fluid.
Parameters
----------
v float
Guess for the volume
P float
Total pressure in bars.
T float
Temperature in degC.
Returns
-------
float
The difference between the lhs and rhs of Eq (14) of Kerrick and Jacobs (1981),
in bars.
"""
T = T + 273.15
c = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
c['a'] = c['c'] + c['d']/v + c['e']/v**2
y = c['b']/(4*v)
pt1 = (83.14 * T * (1 + y + y**2 - y**3)) / (v*(1-y)**3)
pt2 = - c['a'] / (T**0.5 * v * (v+c['b']))
return -(P - pt1 - pt2)
def lnPhi_mix(self,P,T,X_fluid):
""" Calculates the natural log of the fugacity coefficient for H2O in a
mixed CO2-H2O fluid. Uses Eq (27) of <NAME> Jacobs (1981).
Parameters
----------
P float
Total pressure in bars.
T float
Temperature in degC
X_fluid float
The mole fraction of H2O in the fluid.
Returns
-------
float
The natural log of the fugacity coefficient for H2O in a mixed fluid.
"""
T = T + 273.15
v = self.volume(P,T-273.15,X_fluid)
c = {}
h = {}
c['b'] = 58.0
c['c'] = (28.31 + 0.10721*T - 8.81e-6*T**2)*1e6
c['d'] = (9380.0 - 8.53*T + 1.189e-3*T**2)*1e6
c['e'] = (-368654.0 + 715.9*T + 0.1534*T**2)*1e6
h['b'] = 29.0
h['c'] = (290.78 - 0.30276*T + 1.4774e-4*T**2)*1e6#3
h['d'] = (-8374.0 + 19.437*T - 8.148e-3*T**2)*1e6
h['e'] = (76600.0 - 133.9*T + 0.1071*T**2)*1e6
if X_fluid == 1:
bm = h['b']
cm = h['c']
dm = h['d']
em = h['e']
c12= h['c']
d12= h['d']
e12= h['e']
else:
bm = X_fluid*h['b'] + (1-X_fluid)*c['b']
c12 = (c['c']*h['c'])**0.5
cm = h['c']*X_fluid**2 + c['c']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*c12
d12 = (c['d']*h['d'])**0.5
dm = h['d']*X_fluid**2 + c['d']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*d12
e12 = (c['e']*h['e'])**0.5
em = h['e']*X_fluid**2 + c['e']*(1-X_fluid)**2 + 2*X_fluid*(1-X_fluid)*e12
y = bm/(4*v)
# Z = (1+y+y**2-y**3)/(1-y)**2 - am/(83.14*T**1.5*(v+bm))
Z = v*P/(83.14*T)
lnPhi = 0
lnPhi += (4*y-3*y**2)/(1-y)**2 + (h['b']/bm * (4*y-2*y**2)/(1-y)**3)
lnPhi += - (2*h['c']*X_fluid+2*(1-X_fluid)*c12)/(83.14*T**1.5*bm)*np.log((v+bm)/v)
lnPhi += - cm*h['b']/(83.14*T**1.5*bm*(v+bm))
lnPhi += cm*h['b']/(83.14*T**1.5*bm**2)*np.log((v+bm)/v)
lnPhi += - (2*h['d']*X_fluid+2*d12*(1-X_fluid)+dm)/(83.14*T**1.5*bm*v)
lnPhi += (2*h['d']*X_fluid+2*(1-X_fluid)*d12+dm)/(83.14*T**1.5*bm**2)*np.log((v+bm)/v)
lnPhi += h['b']*dm/(83.14*T**1.5*v*bm*(v+bm)) + 2*h['b']*dm/(83.14*T**1.5*bm**2*(v+bm))
lnPhi += - 2*h['b']*dm/(83.14*T**1.5*bm**3)*np.log((v+bm)/v)
lnPhi += - (2*h['e']*X_fluid + 2*(1-X_fluid)*e12+2*em)/(83.14*T**1.5*2*bm*v**2)
lnPhi += (2*h['e']*X_fluid+2*e12*(1-X_fluid)+2*em)/(83.14*T**1.5*bm**2*v)
lnPhi += - (2*h['e']*X_fluid+2*e12*(1-X_fluid)+2*em)/(83.14*T**1.5*bm**3)*np.log((v+bm)/v)
lnPhi += em*h['b']/(83.14*T**1.5*2*bm*v**2*(v+bm)) - 3*em*h['b']/(83.14*T**1.5*2*bm**2*v*(v+bm))
lnPhi += 3*em*h['b']/(83.14*T**1.5*bm**4)*np.log((v+bm)/v) - 3*em*h['b']/(83.14*T**1.5*bm**3*(v+bm))
lnPhi += - np.log(Z)
return lnPhi
class fugacity_ZD09_co2(FugacityModel):
""" Implementation of the Zhang and Duan (2009) fugacity model for pure CO2
fluids."""
def __init__(self):
self.set_calibration_ranges([calibration_checks.CalibrationRange('pressure',[1,1e5],calibration_checks.crf_Between,'bar','Zhang and Duan (2009) EOS',
fail_msg=calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.crmsg_Between_pass,
description_msg=calibration_checks.crmsg_Between_description),
calibration_checks.CalibrationRange('temperature',[200,2300],calibration_checks.crf_Between,'oC','Zhang and Duan (2009) EOS',
fail_msg=calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.crmsg_Between_pass,
description_msg=calibration_checks.crmsg_Between_description)])
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
""" Calculates the fugacity of a pure CO2 fluid, or a mixed fluid assuming
ideal mixing. Implements eqn (14) of Zhang and Duan (2009).
Parameters
---------
pressure float
Pressure in bars
temperature float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid. Default is 1.0.
Returns
-------
float
Fugacity of CO2, standard state 1 bar.
"""
P = pressure/10
T = temperature + 273.15
a = np.array([0.0,
2.95177298930e-2,
-6.33756452413e3,
-2.75265428882e5,
1.29128089283e-3,
-1.45797416153e2,
7.65938947237e4,
2.58661493537e-6,
0.52126532146,
-1.39839523753e2,
-2.36335007175e-8,
5.35026383543e-3,
-0.27110649951,
2.50387836486e4,
0.73226726041,
1.5483335997e-2])
e = 235.0
s = 3.79
Pm = 3.0636*P*s**3/e
Tm = 154*T/e
Vm = root_scalar(self.Vm,x0=200,x1=100,args=(P,T)).root
S1 = ((a[1]+a[2]/Tm**2+a[3]/Tm**3)/Vm+
(a[4]+a[5]/Tm**2+a[6]/Tm**3)/(2*Vm**2)+
(a[7]+a[8]/Tm**2+a[9]/Tm**3)/(4*Vm**4)+
(a[10]+a[11]/Tm**2+a[12]/Tm**3)/(5*Vm**5)+
(a[13]/(2*a[15]*Tm**3)*(a[14]+1-(a[14]+1+a[15]/Vm**2)*
np.exp(-a[15]/Vm**2)))
)
Z = Pm*Vm/(8.314*Tm)
lnfc = Z - 1 - np.log(Z) + S1
return P*np.exp(lnfc)*10
def Vm(self,Vm,P,T):
""" Function to use for solving for the parameter Vm, defined by eqn (8) of
Zhang and Duan (2009). Called by scipy.fsolve in the fugacity method.
Parameters
----------
Vm float
Guessed value of Vm
P float
Pressure in MPa
T float
Temperature in K
Returns
-------
float
Difference between (rearranged) LHS and RHS of eqn (8) of Zhang and Duan (2009).
"""
Pm = 3.0636*P*3.79**3/235.0
Tm = 154*T/235.0
a = np.array([0.0,
2.95177298930e-2,
-6.33756452413e3,
-2.75265428882e5,
1.29128089283e-3,
-1.45797416153e2,
7.65938947237e4,
2.58661493537e-6,
0.52126532146,
-1.39839523753e2,
-2.36335007175e-8,
5.35026383543e-3,
-0.27110649951,
2.50387836486e4,
0.73226726041,
1.5483335997e-2])
return ((1+(a[1]+a[2]/Tm**2+a[3]/Tm**3)/Vm+
(a[4]+a[5]/Tm**2+a[6]/Tm**3)/Vm**2+
(a[7]+a[8]/Tm**2+a[9]/Tm**3)/Vm**4)*0.08314*Tm/Pm - Vm
)
class fugacity_MRK_co2(FugacityModel):
""" Modified Redlick Kwong fugacity model as used by VolatileCalc. Python implementation by
<NAME> (github.com/DJRgeoscience/VolatileCalcForPython), based on VB code by Newman &
Lowenstern.
"""
def __init__(self):
self.set_calibration_ranges([])
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
""" Calculates the fugacity of CO2 in a pure or mixed H2O-CO2 fluid (assuming ideal mixing).
Parameters
----------
pressure float
Total pressure of the system in bars.
temperature float
Temperature in degC
X_fluid float
Mole fraction of CO2 in the fluid.
Returns
-------
float
fugacity of CO2 in bars
"""
fug = self.MRK(pressure,temperature+273.15)
return fug*X_fluid
def FNA(self,TK):
return (166800000 - 193080 * (TK - 273.15) + 186.4 * (TK - 273.15)**2 - 0.071288 * ((TK - 273.15)**3)) * 1.01325
def FNB(self,TK):
return 1.01325 * (73030000 - 71400 * (TK - 273.15) + 21.57 * (TK - 273.15)**2)
def FNC(self,TK):
R = 83.14321
return 1.01325 * (np.exp(-11.071 + 5953 / TK - 2746000 / TK**2 + 464600000 / TK**3) * 0.5 * R * R * TK**2.5 / 1.02668 + 40123800)
def FNF(self,V,TK,A,B,P):
R = 83.14321
return R * TK / (V - B) - A / ((V * V + B * V) * TK**0.5) - P
def MRK(self,P,TK): #Redlich-Kwong routine to estimate endmember H2O and CO2 fugacities
R = 83.14321
B_1 = 14.6
B_2 = 29.7
for X_1 in [0,1]:
B = X_1 * B_1 + (1 - X_1) * B_2
A = X_1**2 * self.FNA(TK) + 2 * X_1 * (1 - X_1) * self.FNC(TK) + (1 - X_1)**2 * self.FNB(TK)
Temp2 = B + 5
Q = 1
Temp1 = 0
while abs(Temp2 - Temp1) >= 0.00001:
Temp1 = Temp2
F_1 = (self.FNF(Temp1 + 0.01, TK, A, B, P) - self.FNF(Temp1, TK, A, B, P)) / 0.01
Temp2 = Temp1 - Q * self.FNF(Temp1, TK, A, B, P) / F_1
F_2 = (self.FNF(Temp2 + 0.01, TK, A, B, P) - self.FNF(Temp2, TK, A, B, P)) / 0.01
if F_2 * F_1 <= 0:
Q = Q / 2.
if abs(Temp2 - Temp1) > 0.00001:
F_1 = F_2
V = Temp2
G_1 = np.log(V / (V - B)) + B_1 / (V - B) - 2 * (X_1 * self.FNA(TK) + (1 - X_1) * self.FNC(TK)) * np.log((V + B) / V) / (R * TK**1.5 * B)
G_1 = G_1 + (np.log((V + B) / V) - B / (V + B)) * A * B_1 / (R * TK**1.5 * B**2) - np.log(P * V / (R * TK))
G_1 = np.exp(G_1)
G_2 = np.log(V / (V - B)) + B_2 / (V - B) - 2 * (X_1 * self.FNC(TK) + (1 - X_1) * self.FNB(TK)) * np.log((V + B) / V) / (R * TK**1.5 * B)
G_2 = G_2 + (np.log((V + B) / V) - B / (V + B)) * A * B_2 / (R * TK**1.5 * B**2) - np.log(P * V / (R * TK))
G_2 = np.exp(G_2)
if X_1 == 0:
fCO2o = G_2 * P #The fugacity of CO2
# return fCO2o
return fCO2o
class fugacity_MRK_h2o(FugacityModel):
""" Modified Redlick Kwong fugacity model as used by VolatileCalc. Python implementation by
<NAME> (github.com/DJRgeoscience/VolatileCalcForPython), based on VB code by Newman &
Lowenstern.
"""
def __init__(self):
self.set_calibration_ranges([])
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
""" Calculates the fugacity of H2O in a pure or mixed H2O-CO2 fluid (assuming ideal mixing).
Parameters
----------
pressure float
Total pressure of the system in bars.
temperature float
Temperature in degC
X_fluid float
Mole fraction of H2O in the fluid.
Returns
-------
float
fugacity of CO2 in bars
"""
fug = self.MRK(pressure,temperature+273.15)
return fug*X_fluid
def FNA(self,TK):
return (166800000 - 193080 * (TK - 273.15) + 186.4 * (TK - 273.15)**2 - 0.071288 * ((TK - 273.15)**3)) * 1.01325
def FNB(self,TK):
return 1.01325 * (73030000 - 71400 * (TK - 273.15) + 21.57 * (TK - 273.15)**2)
def FNC(self,TK):
R = 83.14321
return 1.01325 * (np.exp(-11.071 + 5953 / TK - 2746000 / TK**2 + 464600000 / TK**3) * 0.5 * R * R * TK**2.5 / 1.02668 + 40123800)
def FNF(self,V,TK,A,B,P):
R = 83.14321
return R * TK / (V - B) - A / ((V * V + B * V) * TK**0.5) - P
def MRK(self,P,TK): #Redlich-Kwong routine to estimate endmember H2O and CO2 fugacities
R = 83.14321
B_1 = 14.6
B_2 = 29.7
# X_1 = 1
for X_1 in [0,1]:
B = X_1 * B_1 + (1 - X_1) * B_2
A = X_1**2 * self.FNA(TK) + 2 * X_1 * (1 - X_1) * self.FNC(TK) + (1 - X_1)**2 * self.FNB(TK)
Temp2 = B + 5
Q = 1
Temp1 = 0
while abs(Temp2 - Temp1) >= 0.00001:
Temp1 = Temp2
F_1 = (self.FNF(Temp1 + 0.01, TK, A, B, P) - self.FNF(Temp1, TK, A, B, P)) / 0.01
Temp2 = Temp1 - Q * self.FNF(Temp1, TK, A, B, P) / F_1
F_2 = (self.FNF(Temp2 + 0.01, TK, A, B, P) - self.FNF(Temp2, TK, A, B, P)) / 0.01
if F_2 * F_1 <= 0:
Q = Q / 2.
if abs(Temp2 - Temp1) > 0.00001:
F_1 = F_2
V = Temp2
G_1 = np.log(V / (V - B)) + B_1 / (V - B) - 2 * (X_1 * self.FNA(TK) + (1 - X_1) * self.FNC(TK)) * np.log((V + B) / V) / (R * TK**1.5 * B)
G_1 = G_1 + (np.log((V + B) / V) - B / (V + B)) * A * B_1 / (R * TK**1.5 * B**2) - np.log(P * V / (R * TK))
G_1 = np.exp(G_1)
G_2 = np.log(V / (V - B)) + B_2 / (V - B) - 2 * (X_1 * self.FNC(TK) + (1 - X_1) * self.FNB(TK)) * np.log((V + B) / V) / (R * TK**1.5 * B)
G_2 = G_2 + (np.log((V + B) / V) - B / (V + B)) * A * B_2 / (R * TK**1.5 * B**2) - np.log(P * V / (R * TK))
G_2 = np.exp(G_2)
if X_1 == 1:
fH2Oo = G_1 * P #The fugacity of H2O
# return fH2Oo
return fH2Oo
class fugacity_HB_co2(FugacityModel):
"""
Implementation of the Holloway and Blank (1994) Modified Redlich Kwong EoS for CO2.
"""
def __init__(self):
self.set_calibration_ranges([calibration_checks.CalibrationRange('pressure',[1,1e5],calibration_checks.crf_Between,'bar','Redlich Kwong EOS',
fail_msg=calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.crmsg_Between_pass,
description_msg=calibration_checks.crmsg_Between_description),
calibration_checks.CalibrationRange('temperature',500.0,calibration_checks.crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=calibration_checks.crmsg_GreaterThan_fail, pass_msg=calibration_checks.crmsg_GreaterThan_pass,
description_msg=calibration_checks.crmsg_GreaterThan_description)])
self.HBmodel = fugacity_HollowayBlank()
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
return self.HBmodel.fugacity(pressure=pressure, temperature=temperature, species='CO2')*X_fluid
class fugacity_HB_h2o(FugacityModel):
"""
Implementation of the Holloway and Blank (1994) Modified Redlich Kwong EoS for H2O.
"""
def __init__(self):
self.set_calibration_ranges([calibration_checks.CalibrationRange('pressure',[1,1e5],calibration_checks.crf_Between,'bar','Redlich Kwong EOS',
fail_msg=calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.crmsg_Between_pass,
description_msg=calibration_checks.crmsg_Between_description),
calibration_checks.CalibrationRange('temperature',500.0,calibration_checks.crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=calibration_checks.crmsg_GreaterThan_fail, pass_msg=calibration_checks.crmsg_GreaterThan_pass,
description_msg=calibration_checks.crmsg_GreaterThan_description)])
self.HBmodel = fugacity_HollowayBlank()
def fugacity(self,pressure,temperature,X_fluid=1.0,**kwargs):
return self.HBmodel.fugacity(pressure=pressure, temperature=temperature, species='H2O')*X_fluid
class fugacity_HollowayBlank(FugacityModel):
"""
Implementation of the Modified Redlich Kwong presented in Holloway and Blank (1994) Reviews
in Mineralogy and Geochemistry vol. 30. Originally written in Quickbasic. CO2 calculations
translated to Matlab by <NAME> and translated to python by K. Iacovino for VESIcal.
H2O calculations translated to VisualBasic by <NAME> and translated to python by
K. Iacovino for VESIcal.
"""
def __init__(self):
self.set_calibration_ranges([calibration_checks.CalibrationRange('pressure',[1,1e5],calibration_checks.crf_Between,'bar','MRK EOS (Holloway and Blank, 1994)',
fail_msg=calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.crmsg_Between_pass,
description_msg=calibration_checks.crmsg_Between_description),
calibration_checks.CalibrationRange('temperature',500,calibration_checks.crf_GreaterThan,'oC','MRK EOS (Holloway and Blank, 1994)',
fail_msg=calibration_checks.crmsg_GreaterThan_fail, pass_msg=calibration_checks.crmsg_GreaterThan_pass,
description_msg=calibration_checks.crmsg_GreaterThan_description)])
def REDKW(self, BP, A2B):
"""
The RK routine. A routine to calculate compressibility factor and fugacity coefficient
with the Redlich-Kwong equation following Edmister (1968). This solution for supercritical
fluid.
Parameters
----------
BP: float
B parameter sum from RKCALC
A2B: float
A parameter sum from RKCALC
Returns
-------
float
XLNFP (fugacity coefficient?)
"""
if A2B < 1*10**(-10):
A2B = 0.001
#Define constants
TH = 0.333333
RR = -A2B*BP**2
QQ = BP*(A2B-BP-1)
XN = QQ*TH+RR-0.074074
XM = QQ-TH
XNN = XN*XN*0.25
XMM = XM**3 / 27.0
ARG = XNN+XMM
if ARG > 0:
X = np.sqrt(ARG)
F = 1
XN2 = -XN*0.5
iXMM = XN2+X
if iXMM < 0:
F = -1
XMM = F*((F*iXMM)**TH)
F = 1
iXNN = XN2 - X
if iXNN < 0:
F = -1
XNN = F*((F*iXNN)**TH)
Z = XMM+XNN+TH
ZBP = Z-BP
if ZBP < 0.000001:
ZBP = 0.000001
BPZ = 1+BP/Z
FP = Z-1-np.log(ZBP)-A2B*np.log(BPZ)
if FP < -37 or FP > 37:
FP = 0.000001
elif ARG <0:
COSPHI = np.sqrt(-XNN/XMM)
if XN > 0:
COSPHI = -COSPHI
TANPHI = np.sqrt(1-COSPHI**2)/COSPHI
PHI = np.arctan(TANPHI)*TH
FAC = 2*np.sqrt(-XM*TH)
#sort for largest root
R1 = np.cos(PHI)
R2 = np.cos(PHI+2.0944)
R3 = np.cos(PHI+4.18879)
RH = R2
if R1 > R2:
RH = R1
if R3 > RH:
RH = R3
Z = RH*FAC+TH
ZBP = Z-BP
if ZBP < 0.000001:
ZBP = 0.000001
BPZ = 1+BP/Z
FP = Z-1-np.log(ZBP)-A2B*np.log(BPZ)
if FP < -37 or FP > 37:
FP = 0.000001
else:
FP = 1
Z = 1
XLNFP = FP
return XLNFP
def Saxena(self, TK, pb):
"""
High pressure corresponding states routines from Saxena and Fei (1987) GCA
vol. 51, 783-791.
Parameters
----------
TK: float
Temperature in K.
pb: float
Pressure in bars.
Returns
-------
float
XLNF, Natural log of the ratio F(P)/F(4000 bar)
"""
#Define integration limit
PO = 4000
#Critical temperatures and pressures for CO2
TR = TK/304.2
PR = pb/73.9
PC = 73.9
#Virial coeficients
A = 2.0614-2.2351/TR**2 - 0.39411*np.log(TR)
B = 0.055125/TR + 0.039344/TR**2
C = -1.8935*10**(-6)/TR - 1.1092*10**(-5)/TR**2 - 2.1892*10**(-5)/TR**3
D = 5.0527*10**(-11)/TR - 6.3033*10**(-21)/TR**3
#Calculate molar volume
Z = A+B*PR+C*PR**2+D*PR**3
V = Z*83.0117*TK/pb
#integrate from PO (4000 bars) to P to calculate ln fugacity
LNF = A*np.log(pb/PO)+(B/PC)*(pb-PO)+(C/(2*PC**2))*(pb**2-PO**2)
LNF = LNF+(D/(3*PC**3))*(pb**3-PO**3)
XLNF = LNF
return XLNF
def RKCALC(self, temperature, pressure, species):
"""
Calculation of pure gas MRK properties following Holloway 1981, 1987
Parameters
----------
temperature: float
Temperature in degrees K.
pressure: float
Pressure in atmospheres.
Returns
-------
float
Natural log of the fugacity of a pure gas.
"""
#Define constants
R = 82.05736
RR = 6732.2
pb = 1.013*pressure
PBLN = np.log(pb)
TCEL = temperature-273.15
RXT = R*temperature
RT = R*temperature**1.5 * 10**(-6)
if species == 'CO2':
#Calculate T-dependent MRK A parameter CO2
ACO2M = 73.03 - 0.0714*TCEL + 2.157*10**(-5)*TCEL**2
#Define MRK B parameter for CO2
BSUM = 29.7
ASUM = ACO2M / (BSUM*RT)
elif species == 'H2O':
#Calculate T-dependent MRK A parameter H2O
AH2OM = 115.98 - np.double(0.0016295)*temperature - 1.4984*10**(-5)*temperature**2
#Define MRK B parameter for H2O
BSUM = 14.5
ASUM = AH2OM / (BSUM*RT)
BSUM = pressure*BSUM/RXT
XLNFP = self.REDKW(BSUM, ASUM)
#Convert to ln(fugacity)
PUREG = XLNFP + PBLN
return PUREG
def fugacity(self, pressure, temperature, species, **kwargs):
"""
Calculates fugacity.
Parameters
----------
temperature: float
Temperature in degrees C.
pressure: float
Pressure in bars.
species: str
Choose which species to calculate. Options are 'H2O' and 'CO2'.
Returns
-------
float
Fugacity coefficient for passed species
"""
#convert temp and press to atmospheres and Kelvin
pressureAtmo = pressure/1.013
temperatureK = temperature + 273.15
PO = 4000/1.013
#Use the MRK below 4,000 bars, Saxena above 4,000 bars
if pressure > 4000 and species=='CO2':
iPUREG = self.RKCALC(temperatureK, PO, species)
XLNF = self.Saxena(temperatureK, pressure)
PUREG = iPUREG + XLNF
else:
PUREG = self.RKCALC(temperatureK, pressureAtmo, species)
#Convert from ln(fugacity) to fugacity
stdf = np.exp(PUREG)
return stdf
class fugacity_RK_co2(FugacityModel):
"""
Implementation of the Redlich Kwong EoS for CO2.
Code derived from http://people.ds.cam.ac.uk/pjb10/thermo/pure.html - <NAME> 30 October 2003.
"""
def __init__(self):
self.set_calibration_ranges([calibration_checks.CalibrationRange('pressure',[1,1e5],calibration_checks.crf_Between,'bar','Redlich Kwong EOS',
fail_msg=calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.crmsg_Between_pass,
description_msg=calibration_checks.crmsg_Between_description),
calibration_checks.CalibrationRange('temperature',[500],calibration_checks.crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=calibration_checks.crmsg_GreaterThan_fail, pass_msg=calibration_checks.crmsg_GreaterThan_pass,
description_msg=calibration_checks.crmsg_GreaterThan_description)])
# self.set_calibration_ranges([cr_Between('pressure',[1.0,1e5],'bar','Redlich Kwong EOS'),
# cr_GreaterThan('temperature',500,'oC','Redlich Kwong EOS')])
self.RKmodel = fugacity_RedlichKwong()
def fugacity(self,pressure,temperature,X_fluid,**kwargs):
return self.RKmodel.fugacity(pressure, temperature, X_fluid, 'CO2')
class fugacity_RK_h2o(FugacityModel):
"""
Implementation of the Redlich Kwong EoS for H2O.
Code derived from http://people.ds.cam.ac.uk/pjb10/thermo/pure.html - <NAME> 30 October 2003.
"""
def __init__(self):
self.set_calibration_ranges([calibration_checks.CalibrationRange('pressure',[1,1e5],calibration_checks.crf_Between,'bar','Redlich Kwong EOS',
fail_msg=calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.crmsg_Between_pass,
description_msg=calibration_checks.crmsg_Between_description),
calibration_checks.CalibrationRange('temperature',500,calibration_checks.crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=calibration_checks.crmsg_GreaterThan_fail, pass_msg=calibration_checks.crmsg_GreaterThan_pass,
description_msg=calibration_checks.crmsg_GreaterThan_description)])
self.RKmodel = fugacity_RedlichKwong()
def fugacity(self,pressure,temperature,X_fluid,**kwargs):
return self.RKmodel.fugacity(pressure, temperature, X_fluid, 'H2O')
class fugacity_RedlichKwong(FugacityModel):
"""
Implementation of the Redlich Kwong EoS
Code derived from http://people.ds.cam.ac.uk/pjb10/thermo/pure.html - <NAME> 30 October 2003.
"""
def __init__(self):
self.set_calibration_ranges([calibration_checks.CalibrationRange('pressure',[1,1e5],calibration_checks.crf_Between,'bar','Redlich Kwong EOS',
fail_msg=calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.crmsg_Between_pass,
description_msg=calibration_checks.crmsg_Between_description),
calibration_checks.CalibrationRange('temperature',500,calibration_checks.crf_GreaterThan,'oC','Redlich Kwong EOS',
fail_msg=calibration_checks.crmsg_GreaterThan_fail, pass_msg=calibration_checks.crmsg_GreaterThan_pass,
description_msg=calibration_checks.crmsg_GreaterThan_description)])
def gamma(self, pressure, temperature, species):
"""
Calculates fugacity coefficients.
Parameters
----------
temperature: fload
Temperature in degrees C.
pressure: float
Pressure in bars.
species: str
Choose which species to calculate. Options are 'H2O' and 'CO2'.
Returns
-------
float
Fugacity coefficient for passed species.
"""
temperatureK = temperature + 273.15
R = 8.3145
fluid_species_names = ['CO2', 'H2O']
critical_params = {'CO2':{ "cT": 304.15,
"cP": 73.8659,
"o": 0.225
},
'H2O':{ "cT": 647.25,
"cP": 221.1925,
"o": 0.334
}
}
#Calculate a and b parameters (depend only on critical parameters)...
a = 0.42748 * R**2.0 * critical_params[species]["cT"]**(2.5) / (critical_params[species]["cP"] * 10.0**5)
b = 0.08664 * R * critical_params[species]["cT"] / (critical_params[species]["cP"] * 10.0**5)
kappa = 0.0
#Calculate coefficients in the cubic equation of state...
#coeffs: (C0, C1, C2, A, B)
A = a * pressure * 10.0**5 / (np.sqrt(temperatureK) * (R * temperatureK)**2.0)
B = b * pressure * 10.0**5 / (R * temperatureK)
C2 = -1.0
C1 = A - B - B * B
C0 = -A * B
#Solve the cubic equation for Z0 - Z2, D...
Q1 = C2 * C1 / 6.0 - C0 / 2.0 - C2**3.0 / 27.0
P1 = C2**2.0 / 9.0 - C1 / 3.0
D = Q1**2.0 - P1**3.0
if D >= 0:
kOneThird = 1.0 / 3.0
absQ1PSqrtD = np.fabs(Q1 + np.sqrt(D))
temp1 = absQ1PSqrtD**kOneThird
temp1 *= (Q1 + np.sqrt(D)) / absQ1PSqrtD
absQ1MSqrtD = np.fabs(Q1 - np.sqrt(D))
temp2 = absQ1MSqrtD**kOneThird
temp2 *= (Q1 - np.sqrt(D)) / absQ1MSqrtD
Z0 = temp1 + temp2 - C2 / 3.0
else:
temp1 = Q1**2.0 / (P1**3.0)
temp2 = np.sqrt(1.0 - temp1) / np.sqrt(temp1)
temp2 *= Q1 / np.fabs(Q1)
gamma = np.arctan(temp2)
if gamma < 0:
gamma = gamma + np.pi
Z0 = 2.0 * np.sqrt(P1) * np.cos(gamma/3.0) - C2 / 3.0
Z1 = 2.0 * np.sqrt(P1) * np.cos((gamma + 2.0 * np.pi) / 3.0) - C2/3.0
Z2 = 2.0 * np.sqrt(P1) * np.cos((gamma + 4.0 * np.pi) / 3.0) - C2/3.0
if Z0 < Z1:
temp0 = Z0
Z0 = Z1
Z1 = temp0
if Z1 < Z2:
temp0 = Z1
Z1 = Z2
Z2 = temp0
if Z0 < Z1:
temp0 = Z0
Z0 = Z1
Z1 = temp0
#Calculate Departure Functions
gamma = np.exp(Z0 - 1.0 - np.log(Z0-B) - A * np.log(1.0+B/Z0)/B)
Hdep = R * temperatureK * (Z0 - 1.0 - 1.5*A*np.log(1.0+B/Z0)/B)
Sdep = R * (np.log(Z0-B) - 0.5*A*np.log(1.0+B/Z0)/B)
return gamma
def fugacity(self, pressure, temperature, X_fluid=1.0, species='H2O', **kwargs):
"""
Calculates the fugacity of H2O in a mixed H2O-CO2 fluid using the universal relationships:
P_i = f_i/gamma_i = (fpure_i * Xfluid_i) / gamma_i
See Iacovino (2015) EPSL for further explanation.
"""
gammaH2O = self.gamma(pressure, temperature, 'H2O')
gammaCO2 = self.gamma(pressure, temperature, 'CO2')
fugacityH2Opure = pressure * gammaH2O
fugacityCO2pure = pressure * gammaCO2
if species == 'H2O':
return fugacityH2Opure * X_fluid
elif species == 'CO2':
return fugacityCO2pure * X_fluid
else:
raise core.InputError("Species must be H2O or CO2.")
| [
"numpy.log",
"numpy.double",
"scipy.optimize.root_scalar",
"numpy.fabs",
"numpy.array",
"numpy.exp",
"numpy.cos",
"numpy.arctan",
"VESIcal.core.InputError",
"VESIcal.calibration_checks.CalibrationRange",
"numpy.sqrt"
] | [((22146, 22423), 'numpy.array', 'np.array', (['[0.0, 0.029517729893, -6337.56452413, -275265.428882, 0.00129128089283, -\n 145.797416153, 76593.8947237, 2.58661493537e-06, 0.52126532146, -\n 139.839523753, -2.36335007175e-08, 0.00535026383543, -0.27110649951, \n 25038.7836486, 0.73226726041, 0.015483335997]'], {}), '([0.0, 0.029517729893, -6337.56452413, -275265.428882, \n 0.00129128089283, -145.797416153, 76593.8947237, 2.58661493537e-06, \n 0.52126532146, -139.839523753, -2.36335007175e-08, 0.00535026383543, -\n 0.27110649951, 25038.7836486, 0.73226726041, 0.015483335997])\n', (22154, 22423), True, 'import numpy as np\n'), ((23942, 24219), 'numpy.array', 'np.array', (['[0.0, 0.029517729893, -6337.56452413, -275265.428882, 0.00129128089283, -\n 145.797416153, 76593.8947237, 2.58661493537e-06, 0.52126532146, -\n 139.839523753, -2.36335007175e-08, 0.00535026383543, -0.27110649951, \n 25038.7836486, 0.73226726041, 0.015483335997]'], {}), '([0.0, 0.029517729893, -6337.56452413, -275265.428882, \n 0.00129128089283, -145.797416153, 76593.8947237, 2.58661493537e-06, \n 0.52126532146, -139.839523753, -2.36335007175e-08, 0.00535026383543, -\n 0.27110649951, 25038.7836486, 0.73226726041, 0.015483335997])\n', (23950, 24219), True, 'import numpy as np\n'), ((38707, 38717), 'numpy.log', 'np.log', (['pb'], {}), '(pb)\n', (38713, 38717), True, 'import numpy as np\n'), ((40574, 40587), 'numpy.exp', 'np.exp', (['PUREG'], {}), '(PUREG)\n', (40580, 40587), True, 'import numpy as np\n'), ((4582, 4653), 'scipy.optimize.root_scalar', 'root_scalar', (['self.root_volume'], {'x0': 'x0', 'x1': '(x0 * 0.9)', 'args': '(P, T, X_fluid)'}), '(self.root_volume, x0=x0, x1=x0 * 0.9, args=(P, T, X_fluid))\n', (4593, 4653), False, 'from scipy.optimize import root_scalar\n'), ((7008, 7066), 'scipy.optimize.root_scalar', 'root_scalar', (['self.root_volume_h'], {'x0': '(15)', 'x1': '(35)', 'args': '(P, T)'}), '(self.root_volume_h, x0=15, x1=35, args=(P, T))\n', (7019, 7066), False, 'from scipy.optimize import root_scalar\n'), ((10080, 10100), 'numpy.log', 'np.log', (['((v + bm) / v)'], {}), '((v + bm) / v)\n', (10086, 10100), True, 'import numpy as np\n'), ((10199, 10219), 'numpy.log', 'np.log', (['((v + bm) / v)'], {}), '((v + bm) / v)\n', (10205, 10219), True, 'import numpy as np\n'), ((10373, 10393), 'numpy.log', 'np.log', (['((v + bm) / v)'], {}), '((v + bm) / v)\n', (10379, 10393), True, 'import numpy as np\n'), ((10538, 10558), 'numpy.log', 'np.log', (['((v + bm) / v)'], {}), '((v + bm) / v)\n', (10544, 10558), True, 'import numpy as np\n'), ((10807, 10827), 'numpy.log', 'np.log', (['((v + bm) / v)'], {}), '((v + bm) / v)\n', (10813, 10827), True, 'import numpy as np\n'), ((11057, 11066), 'numpy.log', 'np.log', (['Z'], {}), '(Z)\n', (11063, 11066), True, 'import numpy as np\n'), ((13986, 14057), 'scipy.optimize.root_scalar', 'root_scalar', (['self.root_volume'], {'x0': 'x0', 'x1': '(x0 * 0.9)', 'args': '(P, T, X_fluid)'}), '(self.root_volume, x0=x0, x1=x0 * 0.9, args=(P, T, X_fluid))\n', (13997, 14057), False, 'from scipy.optimize import root_scalar\n'), ((16412, 16470), 'scipy.optimize.root_scalar', 'root_scalar', (['self.root_volume_c'], {'x0': '(15)', 'x1': '(35)', 'args': '(P, T)'}), '(self.root_volume_c, x0=15, x1=35, args=(P, T))\n', (16423, 16470), False, 'from scipy.optimize import root_scalar\n'), ((19473, 19493), 'numpy.log', 'np.log', (['((v + bm) / v)'], {}), '((v + bm) / v)\n', (19479, 19493), True, 'import numpy as np\n'), ((19592, 19612), 'numpy.log', 'np.log', (['((v + bm) / v)'], {}), '((v + bm) / v)\n', (19598, 19612), True, 'import numpy as np\n'), ((19766, 19786), 'numpy.log', 'np.log', (['((v + bm) / v)'], {}), '((v + bm) / v)\n', (19772, 19786), True, 'import numpy as np\n'), ((19931, 19951), 'numpy.log', 'np.log', (['((v + bm) / v)'], {}), '((v + bm) / v)\n', (19937, 19951), True, 'import numpy as np\n'), ((20200, 20220), 'numpy.log', 'np.log', (['((v + bm) / v)'], {}), '((v + bm) / v)\n', (20206, 20220), True, 'import numpy as np\n'), ((20450, 20459), 'numpy.log', 'np.log', (['Z'], {}), '(Z)\n', (20456, 20459), True, 'import numpy as np\n'), ((22851, 22900), 'scipy.optimize.root_scalar', 'root_scalar', (['self.Vm'], {'x0': '(200)', 'x1': '(100)', 'args': '(P, T)'}), '(self.Vm, x0=200, x1=100, args=(P, T))\n', (22862, 22900), False, 'from scipy.optimize import root_scalar\n'), ((27403, 27414), 'numpy.exp', 'np.exp', (['G_1'], {}), '(G_1)\n', (27409, 27414), True, 'import numpy as np\n'), ((27703, 27714), 'numpy.exp', 'np.exp', (['G_2'], {}), '(G_2)\n', (27709, 27714), True, 'import numpy as np\n'), ((30514, 30525), 'numpy.exp', 'np.exp', (['G_1'], {}), '(G_1)\n', (30520, 30525), True, 'import numpy as np\n'), ((30814, 30825), 'numpy.exp', 'np.exp', (['G_2'], {}), '(G_2)\n', (30820, 30825), True, 'import numpy as np\n'), ((35623, 35635), 'numpy.sqrt', 'np.sqrt', (['ARG'], {}), '(ARG)\n', (35630, 35635), True, 'import numpy as np\n'), ((46750, 46766), 'numpy.arctan', 'np.arctan', (['temp2'], {}), '(temp2)\n', (46759, 46766), True, 'import numpy as np\n'), ((1955, 2265), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""pressure"""', '(20000.0)', 'calibration_checks.crf_LessThan', '"""bar"""', '"""Kerrick and Jacobs (1981) EOS"""'], {'fail_msg': 'calibration_checks.crmsg_LessThan_fail', 'pass_msg': 'calibration_checks.crmsg_LessThan_pass', 'description_msg': 'calibration_checks.crmsg_LessThan_description'}), "('pressure', 20000.0, calibration_checks\n .crf_LessThan, 'bar', 'Kerrick and Jacobs (1981) EOS', fail_msg=\n calibration_checks.crmsg_LessThan_fail, pass_msg=calibration_checks.\n crmsg_LessThan_pass, description_msg=calibration_checks.\n crmsg_LessThan_description)\n", (1990, 2265), False, 'from VESIcal import calibration_checks\n'), ((2388, 2697), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""temperature"""', '(1050)', 'calibration_checks.crf_LessThan', '"""oC"""', '"""Kerrick and Jacobs (1981) EOS"""'], {'fail_msg': 'calibration_checks.crmsg_LessThan_fail', 'pass_msg': 'calibration_checks.crmsg_LessThan_pass', 'description_msg': 'calibration_checks.crmsg_LessThan_description'}), "('temperature', 1050, calibration_checks\n .crf_LessThan, 'oC', 'Kerrick and Jacobs (1981) EOS', fail_msg=\n calibration_checks.crmsg_LessThan_fail, pass_msg=calibration_checks.\n crmsg_LessThan_pass, description_msg=calibration_checks.\n crmsg_LessThan_description)\n", (2423, 2697), False, 'from VESIcal import calibration_checks\n'), ((10979, 10999), 'numpy.log', 'np.log', (['((v + bm) / v)'], {}), '((v + bm) / v)\n', (10985, 10999), True, 'import numpy as np\n'), ((11361, 11671), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""pressure"""', '(20000.0)', 'calibration_checks.crf_LessThan', '"""bar"""', '"""Kerrick and Jacobs (1981) EOS"""'], {'fail_msg': 'calibration_checks.crmsg_LessThan_fail', 'pass_msg': 'calibration_checks.crmsg_LessThan_pass', 'description_msg': 'calibration_checks.crmsg_LessThan_description'}), "('pressure', 20000.0, calibration_checks\n .crf_LessThan, 'bar', 'Kerrick and Jacobs (1981) EOS', fail_msg=\n calibration_checks.crmsg_LessThan_fail, pass_msg=calibration_checks.\n crmsg_LessThan_pass, description_msg=calibration_checks.\n crmsg_LessThan_description)\n", (11396, 11671), False, 'from VESIcal import calibration_checks\n'), ((11794, 12103), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""temperature"""', '(1050)', 'calibration_checks.crf_LessThan', '"""oC"""', '"""Kerrick and Jacobs (1981) EOS"""'], {'fail_msg': 'calibration_checks.crmsg_LessThan_fail', 'pass_msg': 'calibration_checks.crmsg_LessThan_pass', 'description_msg': 'calibration_checks.crmsg_LessThan_description'}), "('temperature', 1050, calibration_checks\n .crf_LessThan, 'oC', 'Kerrick and Jacobs (1981) EOS', fail_msg=\n calibration_checks.crmsg_LessThan_fail, pass_msg=calibration_checks.\n crmsg_LessThan_pass, description_msg=calibration_checks.\n crmsg_LessThan_description)\n", (11829, 12103), False, 'from VESIcal import calibration_checks\n'), ((20372, 20392), 'numpy.log', 'np.log', (['((v + bm) / v)'], {}), '((v + bm) / v)\n', (20378, 20392), True, 'import numpy as np\n'), ((20682, 20988), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""pressure"""', '[1, 100000.0]', 'calibration_checks.crf_Between', '"""bar"""', '"""Zhang and Duan (2009) EOS"""'], {'fail_msg': 'calibration_checks.crmsg_Between_fail', 'pass_msg': 'calibration_checks.crmsg_Between_pass', 'description_msg': 'calibration_checks.crmsg_Between_description'}), "('pressure', [1, 100000.0],\n calibration_checks.crf_Between, 'bar', 'Zhang and Duan (2009) EOS',\n fail_msg=calibration_checks.crmsg_Between_fail, pass_msg=\n calibration_checks.crmsg_Between_pass, description_msg=\n calibration_checks.crmsg_Between_description)\n", (20717, 20988), False, 'from VESIcal import calibration_checks\n'), ((21107, 21413), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""temperature"""', '[200, 2300]', 'calibration_checks.crf_Between', '"""oC"""', '"""Zhang and Duan (2009) EOS"""'], {'fail_msg': 'calibration_checks.crmsg_Between_fail', 'pass_msg': 'calibration_checks.crmsg_Between_pass', 'description_msg': 'calibration_checks.crmsg_Between_description'}), "('temperature', [200, 2300],\n calibration_checks.crf_Between, 'oC', 'Zhang and Duan (2009) EOS',\n fail_msg=calibration_checks.crmsg_Between_fail, pass_msg=\n calibration_checks.crmsg_Between_pass, description_msg=\n calibration_checks.crmsg_Between_description)\n", (21142, 21413), False, 'from VESIcal import calibration_checks\n'), ((23291, 23300), 'numpy.log', 'np.log', (['Z'], {}), '(Z)\n', (23297, 23300), True, 'import numpy as np\n'), ((23324, 23336), 'numpy.exp', 'np.exp', (['lnfc'], {}), '(lnfc)\n', (23330, 23336), True, 'import numpy as np\n'), ((27360, 27384), 'numpy.log', 'np.log', (['(P * V / (R * TK))'], {}), '(P * V / (R * TK))\n', (27366, 27384), True, 'import numpy as np\n'), ((27660, 27684), 'numpy.log', 'np.log', (['(P * V / (R * TK))'], {}), '(P * V / (R * TK))\n', (27666, 27684), True, 'import numpy as np\n'), ((30471, 30495), 'numpy.log', 'np.log', (['(P * V / (R * TK))'], {}), '(P * V / (R * TK))\n', (30477, 30495), True, 'import numpy as np\n'), ((30771, 30795), 'numpy.log', 'np.log', (['(P * V / (R * TK))'], {}), '(P * V / (R * TK))\n', (30777, 30795), True, 'import numpy as np\n'), ((31160, 31459), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""pressure"""', '[1, 100000.0]', 'calibration_checks.crf_Between', '"""bar"""', '"""Redlich Kwong EOS"""'], {'fail_msg': 'calibration_checks.crmsg_Between_fail', 'pass_msg': 'calibration_checks.crmsg_Between_pass', 'description_msg': 'calibration_checks.crmsg_Between_description'}), "('pressure', [1, 100000.0],\n calibration_checks.crf_Between, 'bar', 'Redlich Kwong EOS', fail_msg=\n calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.\n crmsg_Between_pass, description_msg=calibration_checks.\n crmsg_Between_description)\n", (31195, 31459), False, 'from VESIcal import calibration_checks\n'), ((31577, 31886), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""temperature"""', '(500.0)', 'calibration_checks.crf_GreaterThan', '"""oC"""', '"""Redlich Kwong EOS"""'], {'fail_msg': 'calibration_checks.crmsg_GreaterThan_fail', 'pass_msg': 'calibration_checks.crmsg_GreaterThan_pass', 'description_msg': 'calibration_checks.crmsg_GreaterThan_description'}), "('temperature', 500.0,\n calibration_checks.crf_GreaterThan, 'oC', 'Redlich Kwong EOS', fail_msg\n =calibration_checks.crmsg_GreaterThan_fail, pass_msg=calibration_checks\n .crmsg_GreaterThan_pass, description_msg=calibration_checks.\n crmsg_GreaterThan_description)\n", (31612, 31886), False, 'from VESIcal import calibration_checks\n'), ((32398, 32697), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""pressure"""', '[1, 100000.0]', 'calibration_checks.crf_Between', '"""bar"""', '"""Redlich Kwong EOS"""'], {'fail_msg': 'calibration_checks.crmsg_Between_fail', 'pass_msg': 'calibration_checks.crmsg_Between_pass', 'description_msg': 'calibration_checks.crmsg_Between_description'}), "('pressure', [1, 100000.0],\n calibration_checks.crf_Between, 'bar', 'Redlich Kwong EOS', fail_msg=\n calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.\n crmsg_Between_pass, description_msg=calibration_checks.\n crmsg_Between_description)\n", (32433, 32697), False, 'from VESIcal import calibration_checks\n'), ((32815, 33124), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""temperature"""', '(500.0)', 'calibration_checks.crf_GreaterThan', '"""oC"""', '"""Redlich Kwong EOS"""'], {'fail_msg': 'calibration_checks.crmsg_GreaterThan_fail', 'pass_msg': 'calibration_checks.crmsg_GreaterThan_pass', 'description_msg': 'calibration_checks.crmsg_GreaterThan_description'}), "('temperature', 500.0,\n calibration_checks.crf_GreaterThan, 'oC', 'Redlich Kwong EOS', fail_msg\n =calibration_checks.crmsg_GreaterThan_fail, pass_msg=calibration_checks\n .crmsg_GreaterThan_pass, description_msg=calibration_checks.\n crmsg_GreaterThan_description)\n", (32850, 33124), False, 'from VESIcal import calibration_checks\n'), ((33950, 34264), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""pressure"""', '[1, 100000.0]', 'calibration_checks.crf_Between', '"""bar"""', '"""MRK EOS (Holloway and Blank, 1994)"""'], {'fail_msg': 'calibration_checks.crmsg_Between_fail', 'pass_msg': 'calibration_checks.crmsg_Between_pass', 'description_msg': 'calibration_checks.crmsg_Between_description'}), "('pressure', [1, 100000.0],\n calibration_checks.crf_Between, 'bar',\n 'MRK EOS (Holloway and Blank, 1994)', fail_msg=calibration_checks.\n crmsg_Between_fail, pass_msg=calibration_checks.crmsg_Between_pass,\n description_msg=calibration_checks.crmsg_Between_description)\n", (33985, 34264), False, 'from VESIcal import calibration_checks\n'), ((34384, 34709), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""temperature"""', '(500)', 'calibration_checks.crf_GreaterThan', '"""oC"""', '"""MRK EOS (Holloway and Blank, 1994)"""'], {'fail_msg': 'calibration_checks.crmsg_GreaterThan_fail', 'pass_msg': 'calibration_checks.crmsg_GreaterThan_pass', 'description_msg': 'calibration_checks.crmsg_GreaterThan_description'}), "('temperature', 500, calibration_checks.\n crf_GreaterThan, 'oC', 'MRK EOS (Holloway and Blank, 1994)', fail_msg=\n calibration_checks.crmsg_GreaterThan_fail, pass_msg=calibration_checks.\n crmsg_GreaterThan_pass, description_msg=calibration_checks.\n crmsg_GreaterThan_description)\n", (34419, 34709), False, 'from VESIcal import calibration_checks\n'), ((36215, 36234), 'numpy.sqrt', 'np.sqrt', (['(-XNN / XMM)'], {}), '(-XNN / XMM)\n', (36222, 36234), True, 'import numpy as np\n'), ((36467, 36478), 'numpy.cos', 'np.cos', (['PHI'], {}), '(PHI)\n', (36473, 36478), True, 'import numpy as np\n'), ((36496, 36516), 'numpy.cos', 'np.cos', (['(PHI + 2.0944)'], {}), '(PHI + 2.0944)\n', (36502, 36516), True, 'import numpy as np\n'), ((36532, 36553), 'numpy.cos', 'np.cos', (['(PHI + 4.18879)'], {}), '(PHI + 4.18879)\n', (36538, 36553), True, 'import numpy as np\n'), ((37659, 37669), 'numpy.log', 'np.log', (['TR'], {}), '(TR)\n', (37665, 37669), True, 'import numpy as np\n'), ((40875, 41174), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""pressure"""', '[1, 100000.0]', 'calibration_checks.crf_Between', '"""bar"""', '"""Redlich Kwong EOS"""'], {'fail_msg': 'calibration_checks.crmsg_Between_fail', 'pass_msg': 'calibration_checks.crmsg_Between_pass', 'description_msg': 'calibration_checks.crmsg_Between_description'}), "('pressure', [1, 100000.0],\n calibration_checks.crf_Between, 'bar', 'Redlich Kwong EOS', fail_msg=\n calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.\n crmsg_Between_pass, description_msg=calibration_checks.\n crmsg_Between_description)\n", (40910, 41174), False, 'from VESIcal import calibration_checks\n'), ((41292, 41601), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""temperature"""', '[500]', 'calibration_checks.crf_GreaterThan', '"""oC"""', '"""Redlich Kwong EOS"""'], {'fail_msg': 'calibration_checks.crmsg_GreaterThan_fail', 'pass_msg': 'calibration_checks.crmsg_GreaterThan_pass', 'description_msg': 'calibration_checks.crmsg_GreaterThan_description'}), "('temperature', [500],\n calibration_checks.crf_GreaterThan, 'oC', 'Redlich Kwong EOS', fail_msg\n =calibration_checks.crmsg_GreaterThan_fail, pass_msg=calibration_checks\n .crmsg_GreaterThan_pass, description_msg=calibration_checks.\n crmsg_GreaterThan_description)\n", (41327, 41601), False, 'from VESIcal import calibration_checks\n'), ((42342, 42641), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""pressure"""', '[1, 100000.0]', 'calibration_checks.crf_Between', '"""bar"""', '"""Redlich Kwong EOS"""'], {'fail_msg': 'calibration_checks.crmsg_Between_fail', 'pass_msg': 'calibration_checks.crmsg_Between_pass', 'description_msg': 'calibration_checks.crmsg_Between_description'}), "('pressure', [1, 100000.0],\n calibration_checks.crf_Between, 'bar', 'Redlich Kwong EOS', fail_msg=\n calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.\n crmsg_Between_pass, description_msg=calibration_checks.\n crmsg_Between_description)\n", (42377, 42641), False, 'from VESIcal import calibration_checks\n'), ((42759, 43067), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""temperature"""', '(500)', 'calibration_checks.crf_GreaterThan', '"""oC"""', '"""Redlich Kwong EOS"""'], {'fail_msg': 'calibration_checks.crmsg_GreaterThan_fail', 'pass_msg': 'calibration_checks.crmsg_GreaterThan_pass', 'description_msg': 'calibration_checks.crmsg_GreaterThan_description'}), "('temperature', 500, calibration_checks.\n crf_GreaterThan, 'oC', 'Redlich Kwong EOS', fail_msg=calibration_checks\n .crmsg_GreaterThan_fail, pass_msg=calibration_checks.\n crmsg_GreaterThan_pass, description_msg=calibration_checks.\n crmsg_GreaterThan_description)\n", (42794, 43067), False, 'from VESIcal import calibration_checks\n'), ((43605, 43904), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""pressure"""', '[1, 100000.0]', 'calibration_checks.crf_Between', '"""bar"""', '"""Redlich Kwong EOS"""'], {'fail_msg': 'calibration_checks.crmsg_Between_fail', 'pass_msg': 'calibration_checks.crmsg_Between_pass', 'description_msg': 'calibration_checks.crmsg_Between_description'}), "('pressure', [1, 100000.0],\n calibration_checks.crf_Between, 'bar', 'Redlich Kwong EOS', fail_msg=\n calibration_checks.crmsg_Between_fail, pass_msg=calibration_checks.\n crmsg_Between_pass, description_msg=calibration_checks.\n crmsg_Between_description)\n", (43640, 43904), False, 'from VESIcal import calibration_checks\n'), ((44022, 44330), 'VESIcal.calibration_checks.CalibrationRange', 'calibration_checks.CalibrationRange', (['"""temperature"""', '(500)', 'calibration_checks.crf_GreaterThan', '"""oC"""', '"""Redlich Kwong EOS"""'], {'fail_msg': 'calibration_checks.crmsg_GreaterThan_fail', 'pass_msg': 'calibration_checks.crmsg_GreaterThan_pass', 'description_msg': 'calibration_checks.crmsg_GreaterThan_description'}), "('temperature', 500, calibration_checks.\n crf_GreaterThan, 'oC', 'Redlich Kwong EOS', fail_msg=calibration_checks\n .crmsg_GreaterThan_fail, pass_msg=calibration_checks.\n crmsg_GreaterThan_pass, description_msg=calibration_checks.\n crmsg_GreaterThan_description)\n", (44057, 44330), False, 'from VESIcal import calibration_checks\n'), ((45840, 45861), 'numpy.sqrt', 'np.sqrt', (['temperatureK'], {}), '(temperatureK)\n', (45847, 45861), True, 'import numpy as np\n'), ((46653, 46673), 'numpy.sqrt', 'np.sqrt', (['(1.0 - temp1)'], {}), '(1.0 - temp1)\n', (46660, 46673), True, 'import numpy as np\n'), ((46676, 46690), 'numpy.sqrt', 'np.sqrt', (['temp1'], {}), '(temp1)\n', (46683, 46690), True, 'import numpy as np\n'), ((46717, 46728), 'numpy.fabs', 'np.fabs', (['Q1'], {}), '(Q1)\n', (46724, 46728), True, 'import numpy as np\n'), ((47577, 47591), 'numpy.log', 'np.log', (['(Z0 - B)'], {}), '(Z0 - B)\n', (47583, 47591), True, 'import numpy as np\n'), ((48363, 48409), 'VESIcal.core.InputError', 'core.InputError', (['"""Species must be H2O or CO2."""'], {}), "('Species must be H2O or CO2.')\n", (48378, 48409), False, 'from VESIcal import core\n'), ((27133, 27152), 'numpy.log', 'np.log', (['(V / (V - B))'], {}), '(V / (V - B))\n', (27139, 27152), True, 'import numpy as np\n'), ((27433, 27452), 'numpy.log', 'np.log', (['(V / (V - B))'], {}), '(V / (V - B))\n', (27439, 27452), True, 'import numpy as np\n'), ((30244, 30263), 'numpy.log', 'np.log', (['(V / (V - B))'], {}), '(V / (V - B))\n', (30250, 30263), True, 'import numpy as np\n'), ((30544, 30563), 'numpy.log', 'np.log', (['(V / (V - B))'], {}), '(V / (V - B))\n', (30550, 30563), True, 'import numpy as np\n'), ((36077, 36088), 'numpy.log', 'np.log', (['ZBP'], {}), '(ZBP)\n', (36083, 36088), True, 'import numpy as np\n'), ((36093, 36104), 'numpy.log', 'np.log', (['BPZ'], {}), '(BPZ)\n', (36099, 36104), True, 'import numpy as np\n'), ((36311, 36335), 'numpy.sqrt', 'np.sqrt', (['(1 - COSPHI ** 2)'], {}), '(1 - COSPHI ** 2)\n', (36318, 36335), True, 'import numpy as np\n'), ((36357, 36374), 'numpy.arctan', 'np.arctan', (['TANPHI'], {}), '(TANPHI)\n', (36366, 36374), True, 'import numpy as np\n'), ((36398, 36415), 'numpy.sqrt', 'np.sqrt', (['(-XM * TH)'], {}), '(-XM * TH)\n', (36405, 36415), True, 'import numpy as np\n'), ((38030, 38045), 'numpy.log', 'np.log', (['(pb / PO)'], {}), '(pb / PO)\n', (38036, 38045), True, 'import numpy as np\n'), ((46280, 46290), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (46287, 46290), True, 'import numpy as np\n'), ((46362, 46372), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (46369, 46372), True, 'import numpy as np\n'), ((46428, 46438), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (46435, 46438), True, 'import numpy as np\n'), ((46510, 46520), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (46517, 46520), True, 'import numpy as np\n'), ((46870, 46889), 'numpy.cos', 'np.cos', (['(gamma / 3.0)'], {}), '(gamma / 3.0)\n', (46876, 46889), True, 'import numpy as np\n'), ((46936, 46971), 'numpy.cos', 'np.cos', (['((gamma + 2.0 * np.pi) / 3.0)'], {}), '((gamma + 2.0 * np.pi) / 3.0)\n', (46942, 46971), True, 'import numpy as np\n'), ((47018, 47053), 'numpy.cos', 'np.cos', (['((gamma + 4.0 * np.pi) / 3.0)'], {}), '((gamma + 4.0 * np.pi) / 3.0)\n', (47024, 47053), True, 'import numpy as np\n'), ((47446, 47460), 'numpy.log', 'np.log', (['(Z0 - B)'], {}), '(Z0 - B)\n', (47452, 47460), True, 'import numpy as np\n'), ((23199, 23223), 'numpy.exp', 'np.exp', (['(-a[15] / Vm ** 2)'], {}), '(-a[15] / Vm ** 2)\n', (23205, 23223), True, 'import numpy as np\n'), ((27225, 27244), 'numpy.log', 'np.log', (['((V + B) / V)'], {}), '((V + B) / V)\n', (27231, 27244), True, 'import numpy as np\n'), ((27525, 27544), 'numpy.log', 'np.log', (['((V + B) / V)'], {}), '((V + B) / V)\n', (27531, 27544), True, 'import numpy as np\n'), ((30336, 30355), 'numpy.log', 'np.log', (['((V + B) / V)'], {}), '((V + B) / V)\n', (30342, 30355), True, 'import numpy as np\n'), ((30636, 30655), 'numpy.log', 'np.log', (['((V + B) / V)'], {}), '((V + B) / V)\n', (30642, 30655), True, 'import numpy as np\n'), ((36827, 36838), 'numpy.log', 'np.log', (['ZBP'], {}), '(ZBP)\n', (36833, 36838), True, 'import numpy as np\n'), ((36843, 36854), 'numpy.log', 'np.log', (['BPZ'], {}), '(BPZ)\n', (36849, 36854), True, 'import numpy as np\n'), ((46856, 46867), 'numpy.sqrt', 'np.sqrt', (['P1'], {}), '(P1)\n', (46863, 46867), True, 'import numpy as np\n'), ((46922, 46933), 'numpy.sqrt', 'np.sqrt', (['P1'], {}), '(P1)\n', (46929, 46933), True, 'import numpy as np\n'), ((47004, 47015), 'numpy.sqrt', 'np.sqrt', (['P1'], {}), '(P1)\n', (47011, 47015), True, 'import numpy as np\n'), ((47465, 47485), 'numpy.log', 'np.log', (['(1.0 + B / Z0)'], {}), '(1.0 + B / Z0)\n', (47471, 47485), True, 'import numpy as np\n'), ((47537, 47557), 'numpy.log', 'np.log', (['(1.0 + B / Z0)'], {}), '(1.0 + B / Z0)\n', (47543, 47557), True, 'import numpy as np\n'), ((47598, 47618), 'numpy.log', 'np.log', (['(1.0 + B / Z0)'], {}), '(1.0 + B / Z0)\n', (47604, 47618), True, 'import numpy as np\n'), ((39196, 39216), 'numpy.double', 'np.double', (['(0.0016295)'], {}), '(0.0016295)\n', (39205, 39216), True, 'import numpy as np\n'), ((25974, 26043), 'numpy.exp', 'np.exp', (['(-11.071 + 5953 / TK - 2746000 / TK ** 2 + 464600000 / TK ** 3)'], {}), '(-11.071 + 5953 / TK - 2746000 / TK ** 2 + 464600000 / TK ** 3)\n', (25980, 26043), True, 'import numpy as np\n'), ((27290, 27309), 'numpy.log', 'np.log', (['((V + B) / V)'], {}), '((V + B) / V)\n', (27296, 27309), True, 'import numpy as np\n'), ((27590, 27609), 'numpy.log', 'np.log', (['((V + B) / V)'], {}), '((V + B) / V)\n', (27596, 27609), True, 'import numpy as np\n'), ((29067, 29136), 'numpy.exp', 'np.exp', (['(-11.071 + 5953 / TK - 2746000 / TK ** 2 + 464600000 / TK ** 3)'], {}), '(-11.071 + 5953 / TK - 2746000 / TK ** 2 + 464600000 / TK ** 3)\n', (29073, 29136), True, 'import numpy as np\n'), ((30401, 30420), 'numpy.log', 'np.log', (['((V + B) / V)'], {}), '((V + B) / V)\n', (30407, 30420), True, 'import numpy as np\n'), ((30701, 30720), 'numpy.log', 'np.log', (['((V + B) / V)'], {}), '((V + B) / V)\n', (30707, 30720), True, 'import numpy as np\n')] |
from numpy import arcsin, cos, exp, angle, pi, sin, tan, array
from ....Functions.Geometry.inter_line_line import inter_line_line
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : HoleM53
A HoleM53 object
Returns
-------
point_dict: dict
A dict of the slot coordinates
"""
Rext = self.get_Rext()
# "Tooth" angle (P1',0,P1)
alpha_T = 2 * arcsin(self.W3 / (2 * (Rext - self.H1)))
# magnet pole pitch angle (Z1,0,Z1')
alpha_S = (2 * pi / self.Zh) - alpha_T
# Angle (P1,P1',P4') and (P5',P4', )
alpha = (pi - self.W0) / 2
# Half slot pitch
hssp = pi / self.Zh
Z1 = (Rext - self.H1) * exp(-1j * alpha_S / 2)
x11 = 2 * sin(alpha_S / 2) * (Rext - self.H1) # Distance from P1 to P1'
# In rect triangle P4, P1, perp (P1,P1') with P4
H = tan(alpha) * (x11 / 2 - self.W1 / 2)
Z4 = Z1.real - H - 1j * self.W1 / 2
x45 = self.H2 / cos(alpha) # distance from P4 to P5
Z5 = Z4 - x45
# Get coordinates of "random" points on (P5,P8) and (P1,P8)
# In ref P4 center and P1 on X+ axis
Z58 = (self.W4 - 1j * self.H2) * exp(1j * angle(Z1 - Z4)) + Z4
# In the tooth ref
Z18 = (Rext - self.H1 - self.H2 + 1j * self.W3 / 2) * exp(-1j * hssp)
Z8 = inter_line_line(Z5, Z58, Z1, Z18)[0]
# In ref "b" P4 center and P1 on X+ axis
Z8b = (Z8 - Z4) * exp(-1j * angle(Z1 - Z4))
Z9 = (Z8b + 1j * self.H2) * exp(1j * angle(Z1 - Z4)) + Z4
Z2 = (Z8b + 1j * self.H2 - self.W2) * exp(1j * angle(Z1 - Z4)) + Z4
Z3 = (Z8b + 1j * self.H2 - self.W2 - self.W4) * exp(1j * angle(Z1 - Z4)) + Z4
Z7 = (Z8b - self.W2) * exp(1j * angle(Z1 - Z4)) + Z4
Z6 = (Z8b - self.W2 - self.W4) * exp(1j * angle(Z1 - Z4)) + Z4
point_dict = dict()
point_dict["Z1"] = Z1
point_dict["Z2"] = Z2
point_dict["Z3"] = Z3
point_dict["Z4"] = Z4
point_dict["Z5"] = Z5
point_dict["Z6"] = Z6
point_dict["Z7"] = Z7
point_dict["Z8"] = Z8
point_dict["Z9"] = Z9
# Symmetry
point_dict["Z1s"] = Z1.conjugate()
point_dict["Z2s"] = Z2.conjugate()
point_dict["Z3s"] = Z3.conjugate()
point_dict["Z4s"] = Z4.conjugate()
point_dict["Z5s"] = Z5.conjugate()
point_dict["Z6s"] = Z6.conjugate()
point_dict["Z7s"] = Z7.conjugate()
point_dict["Z8s"] = Z8.conjugate()
point_dict["Z9s"] = Z9.conjugate()
point_dict["Zc0"] = inter_line_line(Z3, Z2, point_dict["Z3s"], point_dict["Z2s"])[0]
return point_dict
| [
"numpy.angle",
"numpy.arcsin",
"numpy.sin",
"numpy.tan",
"numpy.exp",
"numpy.cos"
] | [((473, 513), 'numpy.arcsin', 'arcsin', (['(self.W3 / (2 * (Rext - self.H1)))'], {}), '(self.W3 / (2 * (Rext - self.H1)))\n', (479, 513), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n'), ((745, 769), 'numpy.exp', 'exp', (['(-1.0j * alpha_S / 2)'], {}), '(-1.0j * alpha_S / 2)\n', (748, 769), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n'), ((906, 916), 'numpy.tan', 'tan', (['alpha'], {}), '(alpha)\n', (909, 916), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n'), ((1004, 1014), 'numpy.cos', 'cos', (['alpha'], {}), '(alpha)\n', (1007, 1014), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n'), ((1313, 1330), 'numpy.exp', 'exp', (['(-1.0j * hssp)'], {}), '(-1.0j * hssp)\n', (1316, 1330), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n'), ((782, 798), 'numpy.sin', 'sin', (['(alpha_S / 2)'], {}), '(alpha_S / 2)\n', (785, 798), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n'), ((1453, 1467), 'numpy.angle', 'angle', (['(Z1 - Z4)'], {}), '(Z1 - Z4)\n', (1458, 1467), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n'), ((1211, 1225), 'numpy.angle', 'angle', (['(Z1 - Z4)'], {}), '(Z1 - Z4)\n', (1216, 1225), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n'), ((1510, 1524), 'numpy.angle', 'angle', (['(Z1 - Z4)'], {}), '(Z1 - Z4)\n', (1515, 1524), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n'), ((1582, 1596), 'numpy.angle', 'angle', (['(Z1 - Z4)'], {}), '(Z1 - Z4)\n', (1587, 1596), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n'), ((1664, 1678), 'numpy.angle', 'angle', (['(Z1 - Z4)'], {}), '(Z1 - Z4)\n', (1669, 1678), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n'), ((1721, 1735), 'numpy.angle', 'angle', (['(Z1 - Z4)'], {}), '(Z1 - Z4)\n', (1726, 1735), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n'), ((1788, 1802), 'numpy.angle', 'angle', (['(Z1 - Z4)'], {}), '(Z1 - Z4)\n', (1793, 1802), False, 'from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\n')] |
#! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : kmeans.py
# Author : YunYang1994
# Created date: 2019-01-25 11:08:15
# Description :
#
#================================================================
import cv2
import argparse
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
current_palette = list(sns.xkcd_rgb.values())
def iou(box, clusters):
"""
Calculates the Intersection over Union (IoU) between a box and k clusters.
param:
box: tuple or array, shifted to the origin (i. e. width and height)
clusters: numpy array of shape (k, 2) where k is the number of clusters
return:
numpy array of shape (k, 0) where k is the number of clusters
"""
x = np.minimum(clusters[:, 0], box[0])
y = np.minimum(clusters[:, 1], box[1])
if np.count_nonzero(x == 0) > 0 or np.count_nonzero(y == 0) > 0:
raise ValueError("Box has no area")
intersection = x * y
box_area = box[0] * box[1]
cluster_area = clusters[:, 0] * clusters[:, 1]
iou_ = intersection / (box_area + cluster_area - intersection)
return iou_
def kmeans(boxes, k, dist=np.median,seed=1):
"""
Calculates k-means clustering with the Intersection over Union (IoU) metric.
:param boxes: numpy array of shape (r, 2), where r is the number of rows
:param k: number of clusters
:param dist: distance function
:return: numpy array of shape (k, 2)
"""
rows = boxes.shape[0]
distances = np.empty((rows, k)) ## N row x N cluster
last_clusters = np.zeros((rows,))
np.random.seed(seed)
# initialize the cluster centers to be k items
clusters = boxes[np.random.choice(rows, k, replace=False)]
while True:
# Step 1: allocate each item to the closest cluster centers
for icluster in range(k): # I made change to lars76's code here to make the code faster
distances[:,icluster] = 1 - iou(clusters[icluster], boxes)
nearest_clusters = np.argmin(distances, axis=1)
if (last_clusters == nearest_clusters).all():
break
# Step 2: calculate the cluster centers as mean (or median) of all the cases in the clusters.
for cluster in range(k):
clusters[cluster] = dist(boxes[nearest_clusters == cluster], axis=0)
last_clusters = nearest_clusters
return clusters, nearest_clusters, distances
def parse_anno(annotation_path):
anno = open(annotation_path, 'r')
result = []
for line in anno:
s = line.strip().split(' ')
image = cv2.imread(s[0])
image_h, image_w = image.shape[:2]
s = s[1:]
box_cnt = len(s) // 5
for i in range(box_cnt):
x_min, y_min, x_max, y_max = float(s[i*5+0]), float(s[i*5+1]), float(s[i*5+2]), float(s[i*5+3])
width = (x_max - x_min) / image_w
height = (y_max - y_min) / image_h
result.append([width, height])
result = np.asarray(result)
return result
def plot_cluster_result(clusters,nearest_clusters,WithinClusterSumDist,wh,k):
for icluster in np.unique(nearest_clusters):
pick = nearest_clusters==icluster
c = current_palette[icluster]
plt.rc('font', size=8)
plt.plot(wh[pick,0],wh[pick,1],"p",
color=c,
alpha=0.5,label="cluster = {}, N = {:6.0f}".format(icluster,np.sum(pick)))
plt.text(clusters[icluster,0],
clusters[icluster,1],
"c{}".format(icluster),
fontsize=20,color="red")
plt.title("Clusters=%d" %k)
plt.xlabel("width")
plt.ylabel("height")
plt.legend(title="Mean IoU = {:5.4f}".format(WithinClusterSumDist))
plt.tight_layout()
plt.savefig("./kmeans.jpg")
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_txt", type=str, default="./raccoon_dataset/train.txt")
parser.add_argument("--anchors_txt", type=str, default="./data/raccoon_anchors.txt")
parser.add_argument("--cluster_num", type=int, default=9)
args = parser.parse_args()
anno_result = parse_anno(args.dataset_txt)
clusters, nearest_clusters, distances = kmeans(anno_result, args.cluster_num)
# sorted by area
area = clusters[:, 0] * clusters[:, 1]
indice = np.argsort(area)
clusters = clusters[indice]
with open(args.anchors_txt, "w") as f:
for i in range(args.cluster_num):
width, height = clusters[i]
f.writelines(str(width) + " " + str(height) + " ")
WithinClusterMeanDist = np.mean(distances[np.arange(distances.shape[0]),nearest_clusters])
plot_cluster_result(clusters, nearest_clusters, 1-WithinClusterMeanDist, anno_result, args.cluster_num)
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.sum",
"numpy.empty",
"numpy.argmin",
"numpy.argsort",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"seaborn.xkcd_rgb.values",
"numpy.unique",
"matplotlib.pyplot.rc",
"numpy.random.choice",
"numpy.minimum... | [((485, 506), 'seaborn.xkcd_rgb.values', 'sns.xkcd_rgb.values', ([], {}), '()\n', (504, 506), True, 'import seaborn as sns\n'), ((885, 919), 'numpy.minimum', 'np.minimum', (['clusters[:, 0]', 'box[0]'], {}), '(clusters[:, 0], box[0])\n', (895, 919), True, 'import numpy as np\n'), ((928, 962), 'numpy.minimum', 'np.minimum', (['clusters[:, 1]', 'box[1]'], {}), '(clusters[:, 1], box[1])\n', (938, 962), True, 'import numpy as np\n'), ((1646, 1665), 'numpy.empty', 'np.empty', (['(rows, k)'], {}), '((rows, k))\n', (1654, 1665), True, 'import numpy as np\n'), ((1707, 1724), 'numpy.zeros', 'np.zeros', (['(rows,)'], {}), '((rows,))\n', (1715, 1724), True, 'import numpy as np\n'), ((1730, 1750), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1744, 1750), True, 'import numpy as np\n'), ((3118, 3136), 'numpy.asarray', 'np.asarray', (['result'], {}), '(result)\n', (3128, 3136), True, 'import numpy as np\n'), ((3255, 3282), 'numpy.unique', 'np.unique', (['nearest_clusters'], {}), '(nearest_clusters)\n', (3264, 3282), True, 'import numpy as np\n'), ((3887, 3905), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3903, 3905), True, 'import matplotlib.pyplot as plt\n'), ((3910, 3937), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./kmeans.jpg"""'], {}), "('./kmeans.jpg')\n", (3921, 3937), True, 'import matplotlib.pyplot as plt\n'), ((3942, 3952), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3950, 3952), True, 'import matplotlib.pyplot as plt\n'), ((3994, 4019), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4017, 4019), False, 'import argparse\n'), ((4499, 4515), 'numpy.argsort', 'np.argsort', (['area'], {}), '(area)\n', (4509, 4515), True, 'import numpy as np\n'), ((1824, 1864), 'numpy.random.choice', 'np.random.choice', (['rows', 'k'], {'replace': '(False)'}), '(rows, k, replace=False)\n', (1840, 1864), True, 'import numpy as np\n'), ((2146, 2174), 'numpy.argmin', 'np.argmin', (['distances'], {'axis': '(1)'}), '(distances, axis=1)\n', (2155, 2174), True, 'import numpy as np\n'), ((2719, 2735), 'cv2.imread', 'cv2.imread', (['s[0]'], {}), '(s[0])\n', (2729, 2735), False, 'import cv2\n'), ((3372, 3394), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(8)'}), "('font', size=8)\n", (3378, 3394), True, 'import matplotlib.pyplot as plt\n'), ((3726, 3754), 'matplotlib.pyplot.title', 'plt.title', (["('Clusters=%d' % k)"], {}), "('Clusters=%d' % k)\n", (3735, 3754), True, 'import matplotlib.pyplot as plt\n'), ((3762, 3781), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""width"""'], {}), "('width')\n", (3772, 3781), True, 'import matplotlib.pyplot as plt\n'), ((3790, 3810), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""height"""'], {}), "('height')\n", (3800, 3810), True, 'import matplotlib.pyplot as plt\n'), ((970, 994), 'numpy.count_nonzero', 'np.count_nonzero', (['(x == 0)'], {}), '(x == 0)\n', (986, 994), True, 'import numpy as np\n'), ((1002, 1026), 'numpy.count_nonzero', 'np.count_nonzero', (['(y == 0)'], {}), '(y == 0)\n', (1018, 1026), True, 'import numpy as np\n'), ((4783, 4812), 'numpy.arange', 'np.arange', (['distances.shape[0]'], {}), '(distances.shape[0])\n', (4792, 4812), True, 'import numpy as np\n'), ((3542, 3554), 'numpy.sum', 'np.sum', (['pick'], {}), '(pick)\n', (3548, 3554), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.