code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Author: <NAME>
"""
import numpy as np
def custom_round(x, base=5):
"""Rounding to a custom base.
"""
return int(base * np.ceil(float(x)/base))
def custom_ravel(a, b):
"""Custom ravelling function for arrays
of varying lengths (len(a) > len(b)).
"""
ab = []
for i, x in enumerate(a):
ab.append(x)
try:
ab.append(b[i])
except IndexError:
pass
return ab
def resample_2d(X, resolution):
"""Resample input data for efficient plotting.
Parameters:
-----------
X : array_like
Input data for clustering.
resolution : int
Number of "pixels" for 2d histogram downscaling.
Default 'auto' downscales to 200x200 for >5000
samples, and no downscaling for <=5000 samples.
Returns:
--------
xx[mask] : array_like
Rescaled x meshgrid.
yy[mask] : array_like
Rescaled y meshgrid.
"""
x, y = X[:,0], X[:,1]
nbins = np.ptp(X, axis=0) / resolution
hh, locx, locy = np.histogram2d(x, y, bins=np.ceil(nbins).astype('int'))
xwidth, ywidth = np.diff(locx).mean(), np.diff(locy).mean()
mask = hh != 0
locx = locx[:-1] + xwidth
locy = locy[:-1] + ywidth
yy, xx = np.meshgrid(locy, locx)
np.random.seed(0)
yy += np.random.uniform(-xwidth/2, xwidth/2, size=hh.shape)
xx += np.random.uniform(-ywidth/2, ywidth/2, size=hh.shape)
return xx[mask], yy[mask]
| [
"numpy.random.uniform",
"numpy.meshgrid",
"numpy.random.seed",
"numpy.ceil",
"numpy.ptp",
"numpy.diff"
] | [((1253, 1276), 'numpy.meshgrid', 'np.meshgrid', (['locy', 'locx'], {}), '(locy, locx)\n', (1264, 1276), True, 'import numpy as np\n'), ((1281, 1298), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1295, 1298), True, 'import numpy as np\n'), ((1309, 1366), 'numpy.random.uniform', 'np.random.uniform', (['(-xwidth / 2)', '(xwidth / 2)'], {'size': 'hh.shape'}), '(-xwidth / 2, xwidth / 2, size=hh.shape)\n', (1326, 1366), True, 'import numpy as np\n'), ((1373, 1430), 'numpy.random.uniform', 'np.random.uniform', (['(-ywidth / 2)', '(ywidth / 2)'], {'size': 'hh.shape'}), '(-ywidth / 2, ywidth / 2, size=hh.shape)\n', (1390, 1430), True, 'import numpy as np\n'), ((987, 1004), 'numpy.ptp', 'np.ptp', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (993, 1004), True, 'import numpy as np\n'), ((1117, 1130), 'numpy.diff', 'np.diff', (['locx'], {}), '(locx)\n', (1124, 1130), True, 'import numpy as np\n'), ((1139, 1152), 'numpy.diff', 'np.diff', (['locy'], {}), '(locy)\n', (1146, 1152), True, 'import numpy as np\n'), ((1066, 1080), 'numpy.ceil', 'np.ceil', (['nbins'], {}), '(nbins)\n', (1073, 1080), True, 'import numpy as np\n')] |
import os
import tensorflow as tf
import numpy as np
from modules.modules_spect_mmd import sampler, \
generator, joint_discriminator, spect_kernel
from utils.model_utils import l1_loss, eval_kernel
from utils.tf_forward_tan import lddmm
class VariationalCycleGAN(object):
def __init__(self, dim_pitch=1, dim_mfc=23, n_frames=128,
joint_discriminator=joint_discriminator,
spect_kernel=spect_kernel,
generator=generator, sampler=sampler,
lddmm=lddmm, mode='train',
log_file_name='no_name_passed', pre_train=None):
self.n_frames = n_frames
self.pitch_shape = [None, dim_pitch, None] #[batch_size, num_features, num_frames]
self.mfc_shape = [None, dim_mfc, None]
self.first_order_diff_mat = np.eye(self.n_frames, dtype=np.float32)
for i in range(1, self.n_frames):
self.first_order_diff_mat[i-1,i] = -1
# Create the kernel for lddmm
self.kernel = tf.expand_dims(tf.constant([6,50],
dtype=tf.float32), axis=0)
self.sampler = sampler
self.generator = generator
self.joint_discriminator = joint_discriminator
self.spect_kernel = spect_kernel
self.lddmm = lddmm
self.mode = mode
self.build_model()
self.optimizer_initializer()
self.saver = tf.train.Saver()
self.sess = tf.Session()
if pre_train is not None:
self.saver.restore(self.sess, pre_train)
else:
self.sess.run(tf.global_variables_initializer())
if self.mode == 'train':
self.train_step = 0
self.writer = tf.summary.FileWriter('./tensorboard_log/'+log_file_name,
tf.get_default_graph())
self.generator_summaries, self.discriminator_summaries = self.summary()
def build_model(self):
# Placeholders for real training samples
self.pitch_A_real = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='pitch_A_real')
self.pitch_B_real = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='pitch_B_real')
self.mfc_A_real = tf.placeholder(tf.float32, shape=self.mfc_shape,
name='mfc_A_real')
self.mfc_B_real = tf.placeholder(tf.float32, shape=self.mfc_shape,
name='mfc_B_real')
# Placeholders for fake generated samples
self.pitch_A_fake = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='pitch_A_fake')
self.pitch_B_fake = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='pitch_B_fake')
self.mfc_A_fake = tf.placeholder(tf.float32, shape=self.mfc_shape,
name='mfc_A_fake')
self.mfc_B_fake = tf.placeholder(tf.float32, shape=self.mfc_shape,
name='mfc_B_fake')
# Placeholder for test samples
self.pitch_A_test = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='pitch_A_test')
self.mfc_A_test = tf.placeholder(tf.float32, shape=self.mfc_shape,
name='mfc_A_test')
self.pitch_B_test = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='pitch_B_test')
self.mfc_B_test = tf.placeholder(tf.float32, shape=self.mfc_shape,
name='mfc_B_test')
# Place holder for lambda_cycle and lambda_identity
self.lambda_cycle_pitch = tf.placeholder(tf.float32, None,
name='lambda_cycle_pitch')
self.lambda_cycle_mfc = tf.placeholder(tf.float32, None,
name='lambda_cycle_mfc')
self.lambda_momenta = tf.placeholder(tf.float32, None,
name='lambda_momenta')
self.lambda_identity_mfc = tf.placeholder(tf.float32, None,
name='lambda_identity_mfc')
'''
Generator A
'''
# Generate pitch from A to B
self.momenta_generation_A2B = self.sampler(input_pitch=self.pitch_A_real,
input_mfc=self.mfc_A_real, reuse=False, scope_name='sampler_A2B')
self.pitch_generation_A2B = self.lddmm(x=self.pitch_A_real,
p=self.momenta_generation_A2B, kernel=self.kernel, reuse=False, scope_name='lddmm')
self.mfc_generation_A2B = self.generator(input_pitch=self.pitch_generation_A2B,
input_mfc=self.mfc_A_real, reuse=False, scope_name='generator_A2B')
# Cyclic and Identity generation
self.momenta_cycle_A2A = self.sampler(input_pitch=self.pitch_generation_A2B,
input_mfc=self.mfc_generation_A2B, reuse=False, scope_name='sampler_B2A')
self.pitch_cycle_A2A = self.lddmm(x=self.pitch_generation_A2B,
p=self.momenta_cycle_A2A, kernel=self.kernel, reuse=True, scope_name='lddmm')
self.mfc_cycle_A2A = self.generator(input_pitch=self.pitch_cycle_A2A,
input_mfc=self.mfc_generation_A2B, reuse=False, scope_name='generator_B2A')
self.mfc_identity_A2B = self.generator(input_pitch=self.pitch_B_real,
input_mfc=self.mfc_B_real, reuse=True, scope_name='generator_A2B')
'''
Generator B
'''
# Generate pitch from B to A
self.momenta_generation_B2A = self.sampler(input_pitch=self.pitch_B_real,
input_mfc=self.mfc_B_real, reuse=True, scope_name='sampler_B2A')
self.pitch_generation_B2A = self.lddmm(x=self.pitch_B_real,
p=self.momenta_generation_B2A, kernel=self.kernel, reuse=True, scope_name='lddmm')
self.mfc_generation_B2A = self.generator(input_pitch=self.pitch_generation_B2A,
input_mfc=self.mfc_B_real, reuse=True, scope_name='generator_B2A')
# Cyclic and Identity generation
self.momenta_cycle_B2B = self.sampler(input_pitch=self.pitch_generation_B2A,
input_mfc=self.mfc_generation_B2A, reuse=True, scope_name='sampler_A2B')
self.pitch_cycle_B2B = self.lddmm(x=self.pitch_generation_B2A,
p=self.momenta_cycle_B2B, kernel=self.kernel, reuse=True, scope_name='lddmm')
self.mfc_cycle_B2B = self.generator(input_pitch=self.pitch_cycle_B2B,
input_mfc=self.mfc_generation_B2A, reuse=True, scope_name='generator_A2B')
self.mfc_identity_B2A = self.generator(input_pitch=self.pitch_A_real,
input_mfc=self.mfc_A_real, reuse=True, scope_name='generator_B2A')
'''
Initialize the joint discriminators
'''
# Discriminator initialized to keep parameters in memory
self.joint_discrimination_B_fake = self.joint_discriminator(input_mfc=tf.concat([self.mfc_A_real,
self.mfc_generation_A2B], axis=1), input_pitch=tf.concat([self.pitch_A_real,
self.pitch_generation_A2B], axis=1), reuse=False, scope_name='joint_discriminator_A')
self.joint_discrimination_A_fake = self.joint_discriminator(input_mfc=tf.concat([self.mfc_B_real,
self.mfc_generation_B2A], axis=1), input_pitch=tf.concat([self.pitch_B_real,
self.pitch_generation_B2A], axis=1), reuse=False, scope_name='joint_discriminator_B')
'''
Initialize the spect kernel
'''
# Kernel initialized to keep parameters in memory
self.spect_kernel_A_real = self.spect_kernel(input_mfc=self.mfc_A_real,
reuse=False, scope_name='spect_kernel')
self.spect_kernel_A_fake = self.spect_kernel(input_mfc=self.mfc_generation_B2A,
reuse=True, scope_name='spect_kernel')
self.spect_kernel_B_real = self.spect_kernel(input_mfc=self.mfc_B_real,
reuse=True, scope_name='spect_kernel')
self.spect_kernel_B_fake = self.spect_kernel(input_mfc=self.mfc_generation_A2B,
reuse=True, scope_name='spect_kernel')
self.kernel_AA = eval_kernel(kernel1=self.spect_kernel_A_real,
kernel2=self.spect_kernel_A_fake)
self.kernel_BB = eval_kernel(kernel1=self.spect_kernel_B_real,
kernel2=self.spect_kernel_B_fake)
self.kernel_AB = eval_kernel(kernel1=self.spect_kernel_A_real,
kernel2=self.spect_kernel_B_real)
self.kernel_BA = eval_kernel(kernel1=self.spect_kernel_A_fake,
kernel2=self.spect_kernel_B_fake)
'''
Computing loss for generators
'''
# Cycle loss
self.cycle_loss_pitch = (l1_loss(y=self.pitch_A_real, y_hat=self.pitch_cycle_A2A) \
+ l1_loss(y=self.pitch_B_real, y_hat=self.pitch_cycle_B2B)) / 2.0
self.cycle_loss_mfc = (l1_loss(y=self.mfc_A_real, y_hat=self.mfc_cycle_A2A) \
+ l1_loss(y=self.mfc_B_real, y_hat=self.mfc_cycle_A2A)) / 2.0
# Identity loss
self.identity_loss_mfc = (l1_loss(y=self.mfc_identity_A2B, y_hat=self.mfc_B_real) \
+ l1_loss(y=self.mfc_identity_B2A, y_hat=self.mfc_A_real)) / 2.0
# Sampler-Generator loss
self.generator_loss_A2B = l1_loss(y=tf.ones_like(self.joint_discrimination_B_fake),
y_hat=self.joint_discrimination_B_fake)
self.generator_loss_B2A = l1_loss(y=tf.ones_like(self.joint_discrimination_A_fake),
y_hat=self.joint_discrimination_B_fake)
self.gen_disc_loss = (self.generator_loss_A2B + self.generator_loss_B2A) / 2.0
# MMD loss
self.gen_kernel_loss = self.kernel_AA + self.kernel_BB \
- self.kernel_AB - self.kernel_BA
# Momenta loss
self.momenta_loss_A2B = tf.reduce_sum(tf.square(tf.matmul(self.first_order_diff_mat,
tf.reshape(self.momenta_generation_A2B, [-1,1])))) \
+ tf.reduce_sum(tf.square(tf.matmul(self.first_order_diff_mat,
tf.reshape(self.momenta_cycle_A2A, [-1,1]))))
self.momenta_loss_B2A = tf.reduce_sum(tf.square(tf.matmul(self.first_order_diff_mat,
tf.reshape(self.momenta_generation_B2A, [-1,1])))) \
+ tf.reduce_sum(tf.square(tf.matmul(self.first_order_diff_mat,
tf.reshape(self.momenta_cycle_B2B, [-1,1]))))
self.momenta_loss = (self.momenta_loss_A2B + self.momenta_loss_B2A) / 2.0
# Merging all generator losses
self.generator_loss = self.gen_disc_loss + self.gen_kernel_loss \
+ self.lambda_cycle_pitch*self.cycle_loss_pitch \
+ self.lambda_cycle_mfc*self.cycle_loss_mfc \
+ self.lambda_identity_mfc*self.identity_loss_mfc
# Compute the discriminator probability for pair of inputs
self.joint_discrimination_input_A_real_B_fake \
= self.joint_discriminator(input_mfc=tf.concat([self.mfc_A_real, self.mfc_B_fake], axis=1),
input_pitch=tf.concat([self.pitch_A_real, self.pitch_B_fake], axis=1),
reuse=True, scope_name='joint_discriminator_A')
self.joint_discrimination_input_A_fake_B_real \
= self.joint_discriminator(input_mfc=tf.concat([self.mfc_A_fake, self.mfc_B_real], axis=1),
input_pitch=tf.concat([self.pitch_A_fake, self.pitch_B_real], axis=1),
reuse=True, scope_name='joint_discriminator_A')
self.joint_discrimination_input_B_real_A_fake \
= self.joint_discriminator(input_mfc=tf.concat([self.mfc_B_real, self.mfc_A_fake], axis=1),
input_pitch=tf.concat([self.pitch_B_real, self.pitch_A_fake], axis=1),
reuse=True, scope_name='joint_discriminator_B')
self.joint_discrimination_input_B_fake_A_real \
= self.joint_discriminator(input_mfc=tf.concat([self.mfc_B_fake, self.mfc_A_real], axis=1),
input_pitch=tf.concat([self.pitch_B_fake, self.pitch_A_real], axis=1),
reuse=True, scope_name='joint_discriminator_B')
# Compute discriminator loss for backprop
self.joint_discriminator_loss_input_A_real \
= l1_loss(y=tf.zeros_like(self.joint_discrimination_input_A_real_B_fake),
y_hat=self.joint_discrimination_input_A_real_B_fake)
self.joint_discriminator_loss_input_A_fake \
= l1_loss(y=tf.ones_like(self.joint_discrimination_input_A_fake_B_real),
y_hat=self.joint_discrimination_input_A_fake_B_real)
self.joint_discriminator_loss_A = (self.joint_discriminator_loss_input_A_real \
+ self.joint_discriminator_loss_input_A_fake) / 2.0
self.joint_discriminator_loss_input_B_real \
= l1_loss(y=tf.zeros_like(self.joint_discrimination_input_B_real_A_fake),
y_hat=self.joint_discrimination_input_B_real_A_fake)
self.joint_discriminator_loss_input_B_fake \
= l1_loss(y=tf.ones_like(self.joint_discrimination_input_B_fake_A_real),
y_hat=self.joint_discrimination_input_B_fake_A_real)
self.joint_discriminator_loss_B = (self.joint_discriminator_loss_input_B_real \
+ self.joint_discriminator_loss_input_B_fake) / 2.0
# Merge the two discriminators into one
self.joint_discriminator_loss = (self.joint_discriminator_loss_A + self.joint_discriminator_loss_B) / 2.0
# Evaluate the kernel for mfcc
self.spect_kernel_disc_A_real = self.spect_kernel(input_mfc=self.mfc_A_real,
reuse=True, scope_name='spect_kernel')
self.spect_kernel_disc_A_fake = self.spect_kernel(input_mfc=self.mfc_A_fake,
reuse=True, scope_name='spect_kernel')
self.spect_kernel_disc_B_real = self.spect_kernel(input_mfc=self.mfc_B_real,
reuse=True, scope_name='spect_kernel')
self.spect_kernel_disc_B_fake = self.spect_kernel(input_mfc=self.mfc_B_fake,
reuse=True, scope_name='spect_kernel')
self.spect_kernel_A_real_B_real = eval_kernel(kernel1=self.spect_kernel_disc_A_real,
kernel2=self.spect_kernel_disc_B_real)
self.spect_kernel_A_real_A_fake = eval_kernel(kernel1=self.spect_kernel_disc_A_real,
kernel2=self.spect_kernel_disc_A_fake)
self.spect_kernel_B_real_B_fake = eval_kernel(kernel1=self.spect_kernel_disc_B_real,
kernel2=self.spect_kernel_disc_B_fake)
self.spect_kernel_A_fake_B_fake = eval_kernel(kernel1=self.spect_kernel_disc_A_fake,
kernel2=self.spect_kernel_disc_B_fake)
# Merge the two discriminators into one
self.disc_kernel_loss = self.spect_kernel_A_real_B_real \
+ self.spect_kernel_A_fake_B_fake\
- self.spect_kernel_A_real_A_fake \
- self.spect_kernel_B_real_B_fake
# Final merging of joint and spect discriminators
self.discriminator_loss = self.joint_discriminator_loss + self.disc_kernel_loss
# Categorize variables to optimize the two sets separately
trainable_variables = tf.trainable_variables()
self.discriminator_vars = [var for var in trainable_variables if 'discriminator' in var.name \
or 'kernel' in var.name]
self.generator_vars = [var for var in trainable_variables if 'generator' in var.name \
or 'sampler' in var.name]
# Reserved for test
self.momenta_A2B_test = self.sampler(input_pitch=self.pitch_A_test,
input_mfc=self.mfc_A_test, reuse=True, scope_name='sampler_A2B')
self.pitch_A2B_test = self.lddmm(x=self.pitch_A_test,
p=self.momenta_A2B_test, kernel=self.kernel, reuse=True, scope_name='lddmm')
self.mfc_A2B_test = self.generator(input_pitch=self.pitch_A2B_test,
input_mfc=self.mfc_A_test, reuse=True, scope_name='generator_A2B')
self.momenta_B2A_test = self.sampler(input_pitch=self.pitch_B_test,
input_mfc=self.mfc_B_test, reuse=True, scope_name='sampler_B2A')
self.pitch_B2A_test = self.lddmm(x=self.pitch_B_test,
p=self.momenta_B2A_test, kernel=self.kernel, reuse=True, scope_name='lddmm')
self.mfc_B2A_test = self.generator(input_pitch=self.pitch_B2A_test,
input_mfc=self.mfc_B_test, reuse=True, scope_name='generator_B2A')
def optimizer_initializer(self):
self.generator_learning_rate = tf.placeholder(tf.float32, None,
name='generator_learning_rate')
self.discriminator_learning_rate = tf.placeholder(tf.float32, None,
name='discriminator_learning_rate')
self.discriminator_optimizer \
= tf.train.AdamOptimizer(learning_rate=self.discriminator_learning_rate,
beta1=0.5).minimize(self.discriminator_loss, var_list=self.discriminator_vars)
self.generator_optimizer \
= tf.train.AdamOptimizer(learning_rate=self.generator_learning_rate,
beta1=0.5).minimize(self.generator_loss, var_list=self.generator_vars)
def train(self, pitch_A, mfc_A, pitch_B, mfc_B, lambda_cycle_pitch,
lambda_cycle_mfc, lambda_momenta, lambda_identity_mfc,
generator_learning_rate, discriminator_learning_rate):
generated_momenta_B, generated_pitch_B, generated_mfc_B, \
generated_momenta_A, generated_pitch_A, generated_mfc_A, \
generator_loss, _, generator_summaries \
= self.sess.run([self.momenta_generation_A2B, self.pitch_generation_A2B,
self.mfc_generation_A2B, self.momenta_generation_B2A,
self.pitch_generation_B2A, self.mfc_generation_B2A,
self.gen_disc_loss, self.generator_optimizer, self.generator_summaries],
feed_dict = {self.lambda_cycle_pitch:lambda_cycle_pitch,
self.lambda_cycle_mfc:lambda_cycle_mfc,
self.lambda_momenta:lambda_momenta,
self.lambda_identity_mfc:lambda_identity_mfc,
self.pitch_A_real:pitch_A, self.mfc_A_real:mfc_A,
self.pitch_B_real:pitch_B, self.mfc_B_real:mfc_B,
self.generator_learning_rate:generator_learning_rate})
discriminator_loss, _, discriminator_summaries \
= self.sess.run([self.joint_discriminator_loss, self.discriminator_optimizer,
self.discriminator_summaries], feed_dict={self.pitch_A_real:pitch_A,
self.pitch_B_real:pitch_B, self.mfc_A_real:mfc_A,
self.mfc_B_real:mfc_B, self.discriminator_learning_rate:discriminator_learning_rate,
self.pitch_A_fake:generated_pitch_A, self.pitch_B_fake:generated_pitch_B,
self.mfc_A_fake:generated_mfc_A, self.mfc_B_fake:generated_mfc_B})
self.writer.add_summary(generator_summaries, self.train_step)
self.writer.add_summary(discriminator_summaries, self.train_step)
self.train_step += 1
return generator_loss, discriminator_loss, generated_pitch_A, \
generated_mfc_A, generated_pitch_B, generated_mfc_B, \
generated_momenta_A, generated_momenta_B
def test_gen(self, mfc_A, pitch_A, mfc_B, pitch_B):
gen_mom_B, gen_pitch_B, gen_mfc_B, = self.sess.run([self.momenta_A2B_test, \
self.pitch_A2B_test, self.mfc_A2B_test], \
feed_dict={self.pitch_A_test:pitch_A, \
self.mfc_A_test:mfc_A})
gen_mom_A, gen_pitch_A, gen_mfc_A = self.sess.run([self.momenta_B2A_test, \
self.pitch_B2A_test, self.mfc_B2A_test], \
feed_dict={self.pitch_B_test:pitch_B, \
self.mfc_B_test:mfc_B})
return gen_pitch_A, gen_mfc_A, gen_pitch_B, gen_mfc_B, gen_mom_A, gen_mom_B
def test(self, input_pitch, input_mfc, direction):
if direction == 'A2B':
generated_pitch, generated_mfc = self.sess.run([self.pitch_A2B_test,
self.mfc_A2B_test], feed_dict = {self.pitch_A_test:input_pitch,
self.mfc_A_test:input_mfc})
elif direction == 'B2A':
generated_pitch, generated_mfc = self.sess.run([self.pitch_B2A_test,
self.mfc_B2A_test], feed_dict = {self.pitch_B_test:input_pitch,
self.mfc_B_test:input_mfc})
else:
raise Exception('Conversion direction must be specified.')
return generated_pitch, generated_mfc
def save(self, directory, filename):
if not os.path.exists(directory):
os.makedirs(directory)
self.saver.save(self.sess, \
os.path.join(directory, filename))
def load(self, filepath):
self.saver.restore(self.sess, filepath)
def summary(self):
with tf.name_scope('generator_summaries'):
cycle_loss_pitch_summary = tf.summary.scalar('cycle_loss_pitch',
self.cycle_loss_pitch)
cycle_loss_mfc_summary = tf.summary.scalar('cycle_loss_mfc',
self.cycle_loss_mfc)
identity_loss_summary = tf.summary.scalar('identity_loss_mfc',
self.identity_loss_mfc)
generator_loss_A2B_summary = tf.summary.scalar('generator_loss_A2B',
self.generator_loss_A2B)
generator_loss_B2A_summary = tf.summary.scalar('generator_loss_B2A',
self.generator_loss_B2A)
generator_kernel_loss_summary = tf.summary.scalar('generator_kernel_loss',
self.gen_kernel_loss)
kernel_AA_summary = tf.summary.scalar('generator_kernel_AA', self.kernel_AA)
kernel_AB_summary = tf.summary.scalar('generator_kernel_AB', self.kernel_AB)
kernel_BB_summary = tf.summary.scalar('generator_kernel_BB', self.kernel_BB)
kernel_BA_summary = tf.summary.scalar('generator_kernel_BA', self.kernel_BA)
generator_loss_summary = tf.summary.scalar('generator_loss', \
self.generator_loss)
generator_summaries = tf.summary.merge([cycle_loss_pitch_summary, \
cycle_loss_mfc_summary, \
identity_loss_summary, \
generator_loss_A2B_summary, \
generator_loss_B2A_summary, \
generator_kernel_loss_summary, \
generator_loss_summary, \
kernel_AA_summary, \
kernel_AB_summary, \
kernel_BB_summary, \
kernel_BA_summary])
with tf.name_scope('discriminator_summaries'):
discriminator_loss_A_summary \
= tf.summary.scalar('discriminator_loss_A', \
self.joint_discriminator_loss_A)
discriminator_loss_B_summary \
= tf.summary.scalar('discriminator_loss_B', \
self.joint_discriminator_loss_B)
discriminator_loss_summary \
= tf.summary.scalar('discriminator_loss', \
self.discriminator_loss)
discriminator_kernel_loss_summary \
= tf.summary.scalar('discriminator_kernel_loss', \
self.disc_kernel_loss)
discriminator_summaries \
= tf.summary.merge([discriminator_loss_A_summary, \
discriminator_loss_B_summary, \
discriminator_kernel_loss_summary, \
discriminator_loss_summary])
return generator_summaries, discriminator_summaries
if __name__ == '__main__':
model = VariationalCycleGAN(num_features = 23)
print('Graph Compile Successful.')
| [
"tensorflow.trainable_variables",
"tensorflow.reshape",
"tensorflow.zeros_like",
"tensorflow.get_default_graph",
"tensorflow.summary.merge",
"os.path.join",
"utils.model_utils.eval_kernel",
"os.path.exists",
"tensorflow.concat",
"tensorflow.placeholder",
"tensorflow.name_scope",
"tensorflow.tr... | [((811, 850), 'numpy.eye', 'np.eye', (['self.n_frames'], {'dtype': 'np.float32'}), '(self.n_frames, dtype=np.float32)\n', (817, 850), True, 'import numpy as np\n'), ((1381, 1397), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1395, 1397), True, 'import tensorflow as tf\n'), ((1418, 1430), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1428, 1430), True, 'import tensorflow as tf\n'), ((1978, 2049), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.pitch_shape', 'name': '"""pitch_A_real"""'}), "(tf.float32, shape=self.pitch_shape, name='pitch_A_real')\n", (1992, 2049), True, 'import tensorflow as tf\n'), ((2095, 2166), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.pitch_shape', 'name': '"""pitch_B_real"""'}), "(tf.float32, shape=self.pitch_shape, name='pitch_B_real')\n", (2109, 2166), True, 'import tensorflow as tf\n'), ((2211, 2278), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.mfc_shape', 'name': '"""mfc_A_real"""'}), "(tf.float32, shape=self.mfc_shape, name='mfc_A_real')\n", (2225, 2278), True, 'import tensorflow as tf\n'), ((2322, 2389), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.mfc_shape', 'name': '"""mfc_B_real"""'}), "(tf.float32, shape=self.mfc_shape, name='mfc_B_real')\n", (2336, 2389), True, 'import tensorflow as tf\n'), ((2494, 2565), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.pitch_shape', 'name': '"""pitch_A_fake"""'}), "(tf.float32, shape=self.pitch_shape, name='pitch_A_fake')\n", (2508, 2565), True, 'import tensorflow as tf\n'), ((2611, 2682), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.pitch_shape', 'name': '"""pitch_B_fake"""'}), "(tf.float32, shape=self.pitch_shape, name='pitch_B_fake')\n", (2625, 2682), True, 'import tensorflow as tf\n'), ((2727, 2794), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.mfc_shape', 'name': '"""mfc_A_fake"""'}), "(tf.float32, shape=self.mfc_shape, name='mfc_A_fake')\n", (2741, 2794), True, 'import tensorflow as tf\n'), ((2838, 2905), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.mfc_shape', 'name': '"""mfc_B_fake"""'}), "(tf.float32, shape=self.mfc_shape, name='mfc_B_fake')\n", (2852, 2905), True, 'import tensorflow as tf\n'), ((2991, 3062), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.pitch_shape', 'name': '"""pitch_A_test"""'}), "(tf.float32, shape=self.pitch_shape, name='pitch_A_test')\n", (3005, 3062), True, 'import tensorflow as tf\n'), ((3106, 3173), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.mfc_shape', 'name': '"""mfc_A_test"""'}), "(tf.float32, shape=self.mfc_shape, name='mfc_A_test')\n", (3120, 3173), True, 'import tensorflow as tf\n'), ((3220, 3291), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.pitch_shape', 'name': '"""pitch_B_test"""'}), "(tf.float32, shape=self.pitch_shape, name='pitch_B_test')\n", (3234, 3291), True, 'import tensorflow as tf\n'), ((3335, 3402), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'self.mfc_shape', 'name': '"""mfc_B_test"""'}), "(tf.float32, shape=self.mfc_shape, name='mfc_B_test')\n", (3349, 3402), True, 'import tensorflow as tf\n'), ((3515, 3574), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'None'], {'name': '"""lambda_cycle_pitch"""'}), "(tf.float32, None, name='lambda_cycle_pitch')\n", (3529, 3574), True, 'import tensorflow as tf\n'), ((3624, 3681), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'None'], {'name': '"""lambda_cycle_mfc"""'}), "(tf.float32, None, name='lambda_cycle_mfc')\n", (3638, 3681), True, 'import tensorflow as tf\n'), ((3729, 3784), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'None'], {'name': '"""lambda_momenta"""'}), "(tf.float32, None, name='lambda_momenta')\n", (3743, 3784), True, 'import tensorflow as tf\n'), ((3837, 3897), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'None'], {'name': '"""lambda_identity_mfc"""'}), "(tf.float32, None, name='lambda_identity_mfc')\n", (3851, 3897), True, 'import tensorflow as tf\n'), ((7963, 8042), 'utils.model_utils.eval_kernel', 'eval_kernel', ([], {'kernel1': 'self.spect_kernel_A_real', 'kernel2': 'self.spect_kernel_A_fake'}), '(kernel1=self.spect_kernel_A_real, kernel2=self.spect_kernel_A_fake)\n', (7974, 8042), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((8085, 8164), 'utils.model_utils.eval_kernel', 'eval_kernel', ([], {'kernel1': 'self.spect_kernel_B_real', 'kernel2': 'self.spect_kernel_B_fake'}), '(kernel1=self.spect_kernel_B_real, kernel2=self.spect_kernel_B_fake)\n', (8096, 8164), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((8207, 8286), 'utils.model_utils.eval_kernel', 'eval_kernel', ([], {'kernel1': 'self.spect_kernel_A_real', 'kernel2': 'self.spect_kernel_B_real'}), '(kernel1=self.spect_kernel_A_real, kernel2=self.spect_kernel_B_real)\n', (8218, 8286), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((8329, 8408), 'utils.model_utils.eval_kernel', 'eval_kernel', ([], {'kernel1': 'self.spect_kernel_A_fake', 'kernel2': 'self.spect_kernel_B_fake'}), '(kernel1=self.spect_kernel_A_fake, kernel2=self.spect_kernel_B_fake)\n', (8340, 8408), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((14064, 14158), 'utils.model_utils.eval_kernel', 'eval_kernel', ([], {'kernel1': 'self.spect_kernel_disc_A_real', 'kernel2': 'self.spect_kernel_disc_B_real'}), '(kernel1=self.spect_kernel_disc_A_real, kernel2=self.\n spect_kernel_disc_B_real)\n', (14075, 14158), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((14213, 14307), 'utils.model_utils.eval_kernel', 'eval_kernel', ([], {'kernel1': 'self.spect_kernel_disc_A_real', 'kernel2': 'self.spect_kernel_disc_A_fake'}), '(kernel1=self.spect_kernel_disc_A_real, kernel2=self.\n spect_kernel_disc_A_fake)\n', (14224, 14307), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((14362, 14456), 'utils.model_utils.eval_kernel', 'eval_kernel', ([], {'kernel1': 'self.spect_kernel_disc_B_real', 'kernel2': 'self.spect_kernel_disc_B_fake'}), '(kernel1=self.spect_kernel_disc_B_real, kernel2=self.\n spect_kernel_disc_B_fake)\n', (14373, 14456), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((14511, 14605), 'utils.model_utils.eval_kernel', 'eval_kernel', ([], {'kernel1': 'self.spect_kernel_disc_A_fake', 'kernel2': 'self.spect_kernel_disc_B_fake'}), '(kernel1=self.spect_kernel_disc_A_fake, kernel2=self.\n spect_kernel_disc_B_fake)\n', (14522, 14605), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((15180, 15204), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (15202, 15204), True, 'import tensorflow as tf\n'), ((16655, 16719), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'None'], {'name': '"""generator_learning_rate"""'}), "(tf.float32, None, name='generator_learning_rate')\n", (16669, 16719), True, 'import tensorflow as tf\n'), ((16780, 16848), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'None'], {'name': '"""discriminator_learning_rate"""'}), "(tf.float32, None, name='discriminator_learning_rate')\n", (16794, 16848), True, 'import tensorflow as tf\n'), ((1019, 1057), 'tensorflow.constant', 'tf.constant', (['[6, 50]'], {'dtype': 'tf.float32'}), '([6, 50], dtype=tf.float32)\n', (1030, 1057), True, 'import tensorflow as tf\n'), ((21059, 21084), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (21073, 21084), False, 'import os\n'), ((21098, 21120), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (21109, 21120), False, 'import os\n'), ((21182, 21215), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (21194, 21215), False, 'import os\n'), ((21336, 21372), 'tensorflow.name_scope', 'tf.name_scope', (['"""generator_summaries"""'], {}), "('generator_summaries')\n", (21349, 21372), True, 'import tensorflow as tf\n'), ((21413, 21473), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cycle_loss_pitch"""', 'self.cycle_loss_pitch'], {}), "('cycle_loss_pitch', self.cycle_loss_pitch)\n", (21430, 21473), True, 'import tensorflow as tf\n'), ((21532, 21588), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cycle_loss_mfc"""', 'self.cycle_loss_mfc'], {}), "('cycle_loss_mfc', self.cycle_loss_mfc)\n", (21549, 21588), True, 'import tensorflow as tf\n'), ((21646, 21708), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""identity_loss_mfc"""', 'self.identity_loss_mfc'], {}), "('identity_loss_mfc', self.identity_loss_mfc)\n", (21663, 21708), True, 'import tensorflow as tf\n'), ((21771, 21835), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""generator_loss_A2B"""', 'self.generator_loss_A2B'], {}), "('generator_loss_A2B', self.generator_loss_A2B)\n", (21788, 21835), True, 'import tensorflow as tf\n'), ((21898, 21962), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""generator_loss_B2A"""', 'self.generator_loss_B2A'], {}), "('generator_loss_B2A', self.generator_loss_B2A)\n", (21915, 21962), True, 'import tensorflow as tf\n'), ((22028, 22092), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""generator_kernel_loss"""', 'self.gen_kernel_loss'], {}), "('generator_kernel_loss', self.gen_kernel_loss)\n", (22045, 22092), True, 'import tensorflow as tf\n'), ((22146, 22202), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""generator_kernel_AA"""', 'self.kernel_AA'], {}), "('generator_kernel_AA', self.kernel_AA)\n", (22163, 22202), True, 'import tensorflow as tf\n'), ((22235, 22291), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""generator_kernel_AB"""', 'self.kernel_AB'], {}), "('generator_kernel_AB', self.kernel_AB)\n", (22252, 22291), True, 'import tensorflow as tf\n'), ((22324, 22380), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""generator_kernel_BB"""', 'self.kernel_BB'], {}), "('generator_kernel_BB', self.kernel_BB)\n", (22341, 22380), True, 'import tensorflow as tf\n'), ((22413, 22469), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""generator_kernel_BA"""', 'self.kernel_BA'], {}), "('generator_kernel_BA', self.kernel_BA)\n", (22430, 22469), True, 'import tensorflow as tf\n'), ((22507, 22563), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""generator_loss"""', 'self.generator_loss'], {}), "('generator_loss', self.generator_loss)\n", (22524, 22563), True, 'import tensorflow as tf\n'), ((22636, 22930), 'tensorflow.summary.merge', 'tf.summary.merge', (['[cycle_loss_pitch_summary, cycle_loss_mfc_summary, identity_loss_summary,\n generator_loss_A2B_summary, generator_loss_B2A_summary,\n generator_kernel_loss_summary, generator_loss_summary,\n kernel_AA_summary, kernel_AB_summary, kernel_BB_summary, kernel_BA_summary]'], {}), '([cycle_loss_pitch_summary, cycle_loss_mfc_summary,\n identity_loss_summary, generator_loss_A2B_summary,\n generator_loss_B2A_summary, generator_kernel_loss_summary,\n generator_loss_summary, kernel_AA_summary, kernel_AB_summary,\n kernel_BB_summary, kernel_BA_summary])\n', (22652, 22930), True, 'import tensorflow as tf\n'), ((23309, 23349), 'tensorflow.name_scope', 'tf.name_scope', (['"""discriminator_summaries"""'], {}), "('discriminator_summaries')\n", (23322, 23349), True, 'import tensorflow as tf\n'), ((23412, 23486), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""discriminator_loss_A"""', 'self.joint_discriminator_loss_A'], {}), "('discriminator_loss_A', self.joint_discriminator_loss_A)\n", (23429, 23486), True, 'import tensorflow as tf\n'), ((23574, 23648), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""discriminator_loss_B"""', 'self.joint_discriminator_loss_B'], {}), "('discriminator_loss_B', self.joint_discriminator_loss_B)\n", (23591, 23648), True, 'import tensorflow as tf\n'), ((23734, 23798), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""discriminator_loss"""', 'self.discriminator_loss'], {}), "('discriminator_loss', self.discriminator_loss)\n", (23751, 23798), True, 'import tensorflow as tf\n'), ((23891, 23960), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""discriminator_kernel_loss"""', 'self.disc_kernel_loss'], {}), "('discriminator_kernel_loss', self.disc_kernel_loss)\n", (23908, 23960), True, 'import tensorflow as tf\n'), ((24043, 24192), 'tensorflow.summary.merge', 'tf.summary.merge', (['[discriminator_loss_A_summary, discriminator_loss_B_summary,\n discriminator_kernel_loss_summary, discriminator_loss_summary]'], {}), '([discriminator_loss_A_summary,\n discriminator_loss_B_summary, discriminator_kernel_loss_summary,\n discriminator_loss_summary])\n', (24059, 24192), True, 'import tensorflow as tf\n'), ((1558, 1591), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1589, 1591), True, 'import tensorflow as tf\n'), ((1764, 1786), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1784, 1786), True, 'import tensorflow as tf\n'), ((6736, 6797), 'tensorflow.concat', 'tf.concat', (['[self.mfc_A_real, self.mfc_generation_A2B]'], {'axis': '(1)'}), '([self.mfc_A_real, self.mfc_generation_A2B], axis=1)\n', (6745, 6797), True, 'import tensorflow as tf\n'), ((6824, 6889), 'tensorflow.concat', 'tf.concat', (['[self.pitch_A_real, self.pitch_generation_A2B]'], {'axis': '(1)'}), '([self.pitch_A_real, self.pitch_generation_A2B], axis=1)\n', (6833, 6889), True, 'import tensorflow as tf\n'), ((7036, 7097), 'tensorflow.concat', 'tf.concat', (['[self.mfc_B_real, self.mfc_generation_B2A]'], {'axis': '(1)'}), '([self.mfc_B_real, self.mfc_generation_B2A], axis=1)\n', (7045, 7097), True, 'import tensorflow as tf\n'), ((7124, 7189), 'tensorflow.concat', 'tf.concat', (['[self.pitch_B_real, self.pitch_generation_B2A]'], {'axis': '(1)'}), '([self.pitch_B_real, self.pitch_generation_B2A], axis=1)\n', (7133, 7189), True, 'import tensorflow as tf\n'), ((8543, 8599), 'utils.model_utils.l1_loss', 'l1_loss', ([], {'y': 'self.pitch_A_real', 'y_hat': 'self.pitch_cycle_A2A'}), '(y=self.pitch_A_real, y_hat=self.pitch_cycle_A2A)\n', (8550, 8599), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((8620, 8676), 'utils.model_utils.l1_loss', 'l1_loss', ([], {'y': 'self.pitch_B_real', 'y_hat': 'self.pitch_cycle_B2B'}), '(y=self.pitch_B_real, y_hat=self.pitch_cycle_B2B)\n', (8627, 8676), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((8716, 8768), 'utils.model_utils.l1_loss', 'l1_loss', ([], {'y': 'self.mfc_A_real', 'y_hat': 'self.mfc_cycle_A2A'}), '(y=self.mfc_A_real, y_hat=self.mfc_cycle_A2A)\n', (8723, 8768), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((8789, 8841), 'utils.model_utils.l1_loss', 'l1_loss', ([], {'y': 'self.mfc_B_real', 'y_hat': 'self.mfc_cycle_A2A'}), '(y=self.mfc_B_real, y_hat=self.mfc_cycle_A2A)\n', (8796, 8841), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((8908, 8963), 'utils.model_utils.l1_loss', 'l1_loss', ([], {'y': 'self.mfc_identity_A2B', 'y_hat': 'self.mfc_B_real'}), '(y=self.mfc_identity_A2B, y_hat=self.mfc_B_real)\n', (8915, 8963), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((8984, 9039), 'utils.model_utils.l1_loss', 'l1_loss', ([], {'y': 'self.mfc_identity_B2A', 'y_hat': 'self.mfc_A_real'}), '(y=self.mfc_identity_B2A, y_hat=self.mfc_A_real)\n', (8991, 9039), False, 'from utils.model_utils import l1_loss, eval_kernel\n'), ((9125, 9171), 'tensorflow.ones_like', 'tf.ones_like', (['self.joint_discrimination_B_fake'], {}), '(self.joint_discrimination_B_fake)\n', (9137, 9171), True, 'import tensorflow as tf\n'), ((9274, 9320), 'tensorflow.ones_like', 'tf.ones_like', (['self.joint_discrimination_A_fake'], {}), '(self.joint_discrimination_A_fake)\n', (9286, 9320), True, 'import tensorflow as tf\n'), ((10808, 10861), 'tensorflow.concat', 'tf.concat', (['[self.mfc_A_real, self.mfc_B_fake]'], {'axis': '(1)'}), '([self.mfc_A_real, self.mfc_B_fake], axis=1)\n', (10817, 10861), True, 'import tensorflow as tf\n'), ((10896, 10953), 'tensorflow.concat', 'tf.concat', (['[self.pitch_A_real, self.pitch_B_fake]'], {'axis': '(1)'}), '([self.pitch_A_real, self.pitch_B_fake], axis=1)\n', (10905, 10953), True, 'import tensorflow as tf\n'), ((11129, 11182), 'tensorflow.concat', 'tf.concat', (['[self.mfc_A_fake, self.mfc_B_real]'], {'axis': '(1)'}), '([self.mfc_A_fake, self.mfc_B_real], axis=1)\n', (11138, 11182), True, 'import tensorflow as tf\n'), ((11217, 11274), 'tensorflow.concat', 'tf.concat', (['[self.pitch_A_fake, self.pitch_B_real]'], {'axis': '(1)'}), '([self.pitch_A_fake, self.pitch_B_real], axis=1)\n', (11226, 11274), True, 'import tensorflow as tf\n'), ((11451, 11504), 'tensorflow.concat', 'tf.concat', (['[self.mfc_B_real, self.mfc_A_fake]'], {'axis': '(1)'}), '([self.mfc_B_real, self.mfc_A_fake], axis=1)\n', (11460, 11504), True, 'import tensorflow as tf\n'), ((11539, 11596), 'tensorflow.concat', 'tf.concat', (['[self.pitch_B_real, self.pitch_A_fake]'], {'axis': '(1)'}), '([self.pitch_B_real, self.pitch_A_fake], axis=1)\n', (11548, 11596), True, 'import tensorflow as tf\n'), ((11772, 11825), 'tensorflow.concat', 'tf.concat', (['[self.mfc_B_fake, self.mfc_A_real]'], {'axis': '(1)'}), '([self.mfc_B_fake, self.mfc_A_real], axis=1)\n', (11781, 11825), True, 'import tensorflow as tf\n'), ((11860, 11917), 'tensorflow.concat', 'tf.concat', (['[self.pitch_B_fake, self.pitch_A_real]'], {'axis': '(1)'}), '([self.pitch_B_fake, self.pitch_A_real], axis=1)\n', (11869, 11917), True, 'import tensorflow as tf\n'), ((12124, 12184), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.joint_discrimination_input_A_real_B_fake'], {}), '(self.joint_discrimination_input_A_real_B_fake)\n', (12137, 12184), True, 'import tensorflow as tf\n'), ((12337, 12396), 'tensorflow.ones_like', 'tf.ones_like', (['self.joint_discrimination_input_A_fake_B_real'], {}), '(self.joint_discrimination_input_A_fake_B_real)\n', (12349, 12396), True, 'import tensorflow as tf\n'), ((12727, 12787), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.joint_discrimination_input_B_real_A_fake'], {}), '(self.joint_discrimination_input_B_real_A_fake)\n', (12740, 12787), True, 'import tensorflow as tf\n'), ((12940, 12999), 'tensorflow.ones_like', 'tf.ones_like', (['self.joint_discrimination_input_B_fake_A_real'], {}), '(self.joint_discrimination_input_B_fake_A_real)\n', (12952, 12999), True, 'import tensorflow as tf\n'), ((16924, 17009), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.discriminator_learning_rate', 'beta1': '(0.5)'}), '(learning_rate=self.discriminator_learning_rate,\n beta1=0.5)\n', (16946, 17009), True, 'import tensorflow as tf\n'), ((17153, 17230), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.generator_learning_rate', 'beta1': '(0.5)'}), '(learning_rate=self.generator_learning_rate, beta1=0.5)\n', (17175, 17230), True, 'import tensorflow as tf\n'), ((9731, 9779), 'tensorflow.reshape', 'tf.reshape', (['self.momenta_generation_A2B', '[-1, 1]'], {}), '(self.momenta_generation_A2B, [-1, 1])\n', (9741, 9779), True, 'import tensorflow as tf\n'), ((9884, 9927), 'tensorflow.reshape', 'tf.reshape', (['self.momenta_cycle_A2A', '[-1, 1]'], {}), '(self.momenta_cycle_A2A, [-1, 1])\n', (9894, 9927), True, 'import tensorflow as tf\n'), ((10037, 10085), 'tensorflow.reshape', 'tf.reshape', (['self.momenta_generation_B2A', '[-1, 1]'], {}), '(self.momenta_generation_B2A, [-1, 1])\n', (10047, 10085), True, 'import tensorflow as tf\n'), ((10190, 10233), 'tensorflow.reshape', 'tf.reshape', (['self.momenta_cycle_B2B', '[-1, 1]'], {}), '(self.momenta_cycle_B2B, [-1, 1])\n', (10200, 10233), True, 'import tensorflow as tf\n')] |
#
# lajollaS
# www.fabiocrameri.ch/colourmaps
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.99983, 0.99974, 0.79991],
[0.10023, 0.10091, 0.0037913],
[0.86973, 0.45582, 0.31068],
[0.94802, 0.76354, 0.37498],
[0.49809, 0.23315, 0.20576],
[0.98278, 0.91253, 0.57381],
[0.91381, 0.60672, 0.32439],
[0.28496, 0.16607, 0.1061],
[0.72238, 0.30998, 0.27958],
[0.1874, 0.13354, 0.062323],
[0.9931, 0.95921, 0.68789],
[0.96754, 0.84738, 0.4604],
[0.9298, 0.68157, 0.33699],
[0.61029, 0.26633, 0.24848],
[0.389, 0.1997, 0.1555],
[0.89674, 0.53312, 0.31778],
[0.81242, 0.37438, 0.29885],
[0.55391, 0.24948, 0.22849],
[0.14225, 0.11735, 0.03474],
[0.90574, 0.57009, 0.32091],
[0.95797, 0.80656, 0.41183],
[0.44303, 0.2166, 0.18105],
[0.97589, 0.88298, 0.51602],
[0.93853, 0.72144, 0.35096],
[0.88549, 0.4952, 0.31451],
[0.66685, 0.28556, 0.26546],
[0.8463, 0.41502, 0.30567],
[0.99693, 0.97973, 0.74399],
[0.98842, 0.93731, 0.63126],
[0.23526, 0.1497, 0.083951],
[0.33625, 0.18278, 0.13021],
[0.92169, 0.64362, 0.32917],
[0.77392, 0.34151, 0.29098],
[0.83077, 0.3945, 0.30251],
[0.47045, 0.2249, 0.1936],
[0.93407, 0.70122, 0.34297],
[0.89151, 0.51432, 0.3162],
[0.9719, 0.86595, 0.48769],
[0.16445, 0.12548, 0.04964],
[0.87836, 0.47572, 0.31269],
[0.85918, 0.43556, 0.30835],
[0.96286, 0.82747, 0.4349],
[0.99087, 0.94851, 0.65967],
[0.99512, 0.96958, 0.71599],
[0.31043, 0.17441, 0.11793],
[0.41587, 0.20818, 0.16831],
[0.95298, 0.7851, 0.39176],
[0.211, 0.14161, 0.073581],
[0.63859, 0.2755, 0.25734],
[0.69488, 0.29697, 0.27286],
[0.12086, 0.1092, 0.019283],
[0.99851, 0.98977, 0.77195],
[0.90983, 0.58841, 0.32257],
[0.90141, 0.55169, 0.31934],
[0.74891, 0.32481, 0.28559],
[0.98573, 0.92539, 0.60264],
[0.52592, 0.24134, 0.21742],
[0.97951, 0.89846, 0.54486],
[0.58204, 0.25775, 0.23883],
[0.36245, 0.19124, 0.14274],
[0.92569, 0.66242, 0.33255],
[0.94319, 0.74225, 0.36147],
[0.91775, 0.62509, 0.32653],
[0.25991, 0.15783, 0.094764],
[0.79686, 0.35981, 0.2957],
[0.95049, 0.7743, 0.38296],
[0.70873, 0.30323, 0.27631],
[0.86473, 0.44572, 0.30956],
[0.90781, 0.57926, 0.32172],
[0.96523, 0.83758, 0.44738],
[0.29764, 0.17023, 0.11196],
[0.27237, 0.16197, 0.10032],
[0.94083, 0.73178, 0.35587],
[0.7358, 0.31716, 0.28268],
[0.42941, 0.2124, 0.17473],
[0.15323, 0.12139, 0.042482],
[0.22307, 0.14566, 0.078687],
[0.91971, 0.63434, 0.32777],
[0.98119, 0.90566, 0.55934],
[0.4567, 0.22079, 0.18738],
[0.98967, 0.94298, 0.64549],
[0.97775, 0.89091, 0.53041],
[0.91577, 0.61589, 0.32541],
[0.34931, 0.18702, 0.13639],
[0.82195, 0.38436, 0.30075],
[0.78568, 0.35049, 0.29341],
[0.19909, 0.13759, 0.068084],
[0.85306, 0.42531, 0.30705],
[0.99921, 0.99476, 0.78593],
[0.48423, 0.22906, 0.1997],
[0.76164, 0.33295, 0.28837],
[0.89913, 0.54243, 0.31856],
[0.95548, 0.79587, 0.40139],
[0.94558, 0.75284, 0.36782],
[0.88862, 0.50481, 0.31537],
[0.96976, 0.85685, 0.47387],
[0.99606, 0.97467, 0.73],
[0.97394, 0.87466, 0.50176],
[0.93191, 0.69133, 0.33977],
[0.96044, 0.81711, 0.42301]]
lajollaS_map = LinearSegmentedColormap.from_list('lajollaS', cm_data)
# For use of "viscm view"
test_cm = lajollaS_map
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(lajollaS_map)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=lajollaS_map)
plt.show()
| [
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.linspace",
"matplotlib.pyplot.show",
"viscm.viscm"
] | [((4739, 4793), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""lajollaS"""', 'cm_data'], {}), "('lajollaS', cm_data)\n", (4772, 4793), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((5290, 5300), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5298, 5300), True, 'import matplotlib.pyplot as plt\n'), ((5040, 5059), 'viscm.viscm', 'viscm', (['lajollaS_map'], {}), '(lajollaS_map)\n', (5045, 5059), False, 'from viscm import viscm\n'), ((5186, 5210), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(256)'], {}), '(0, 100, 256)\n', (5197, 5210), True, 'import numpy as np\n')] |
from von_karman_psds import spatial_psds, von_karman_gust_intensity
import numpy as np
import pytest
altitude = 20000
probability_of_exceedance = 1e-5
np.random.seed(0)
x_max = 10000
dx_approx = 0.1
### Scary FFT magic below this line...
fft_exponent = int(np.round(np.log2(x_max / dx_approx))) - 1
k = np.arange(2 ** fft_exponent + 1)
dOmega = 2 * np.pi / x_max
Omega = dOmega * k
spatial_power = np.array(spatial_psds(
Omega=Omega,
altitude=altitude,
probability_of_exceedance=probability_of_exceedance
)).T
spatial_power[0] = 0 # Zero the DC-mode power
rms_from_spatial_power = np.sqrt(np.sum(spatial_power, axis=0) * dOmega)
spatial_amps = np.sqrt(2 * spatial_power * dOmega)
spatial_phase = np.random.random(spatial_amps.shape) * 2 * np.pi
spatial_freq_signal = spatial_amps * np.exp(1j * spatial_phase)
spatial_space_signal = np.fft.irfft(
spatial_freq_signal,
axis=0
)
spatial_space_signal = np.real(spatial_space_signal) * k.max()
rms_from_signal = np.sqrt(np.mean(spatial_space_signal**2, axis=0))
assert rms_from_spatial_power == pytest.approx(rms_from_signal, rel = 0.01), "Something went wrong in the IFFT!"
dx = x_max / len(spatial_space_signal)
x = dx * np.arange(len(spatial_space_signal))
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(palette=sns.color_palette("husl"))
fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
for i, label in enumerate(["u", "v", "w"]):
plt.plot(
x,
spatial_space_signal[:, i],
linewidth=1,
label=f"${label}_g$",
color = sns.husl_palette(1, h=i/3)[0]
)
plt.xlim(0, 100)
plt.xlabel(r"Distance Along Flight Path [m]")
plt.ylabel(r"Local Gusts [m/s]")
plt.title(r"Von Karman Gusts: Sample Generated Gust Profile"
f"\n{altitude} m Altitude, $10^{'{'}{np.log10(probability_of_exceedance):.0f}{'}'}$ Prob. of Exceedance")
plt.tight_layout()
plt.legend()
# plt.savefig("C:/Users/User/Downloads/temp.svg")
plt.show()
| [
"numpy.random.seed",
"numpy.sum",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"matplotlib.pyplot.tight_layout",
"von_karman_psds.spatial_psds",
"numpy.fft.irfft",
"seaborn.husl_palette",
"numpy.real",
"numpy.log10",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.log2",
"ma... | [((153, 170), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (167, 170), True, 'import numpy as np\n'), ((308, 340), 'numpy.arange', 'np.arange', (['(2 ** fft_exponent + 1)'], {}), '(2 ** fft_exponent + 1)\n', (317, 340), True, 'import numpy as np\n'), ((666, 701), 'numpy.sqrt', 'np.sqrt', (['(2 * spatial_power * dOmega)'], {}), '(2 * spatial_power * dOmega)\n', (673, 701), True, 'import numpy as np\n'), ((855, 896), 'numpy.fft.irfft', 'np.fft.irfft', (['spatial_freq_signal'], {'axis': '(0)'}), '(spatial_freq_signal, axis=0)\n', (867, 896), True, 'import numpy as np\n'), ((1347, 1394), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(6.4, 4.8)', 'dpi': '(200)'}), '(1, 1, figsize=(6.4, 4.8), dpi=200)\n', (1359, 1394), True, 'import matplotlib.pyplot as plt\n'), ((1603, 1619), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (1611, 1619), True, 'import matplotlib.pyplot as plt\n'), ((1620, 1664), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Distance Along Flight Path [m]"""'], {}), "('Distance Along Flight Path [m]')\n", (1630, 1664), True, 'import matplotlib.pyplot as plt\n'), ((1666, 1697), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Local Gusts [m/s]"""'], {}), "('Local Gusts [m/s]')\n", (1676, 1697), True, 'import matplotlib.pyplot as plt\n'), ((1876, 1894), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1892, 1894), True, 'import matplotlib.pyplot as plt\n'), ((1895, 1907), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1905, 1907), True, 'import matplotlib.pyplot as plt\n'), ((1958, 1968), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1966, 1968), True, 'import matplotlib.pyplot as plt\n'), ((804, 832), 'numpy.exp', 'np.exp', (['(1.0j * spatial_phase)'], {}), '(1.0j * spatial_phase)\n', (810, 832), True, 'import numpy as np\n'), ((930, 959), 'numpy.real', 'np.real', (['spatial_space_signal'], {}), '(spatial_space_signal)\n', (937, 959), True, 'import numpy as np\n'), ((997, 1039), 'numpy.mean', 'np.mean', (['(spatial_space_signal ** 2)'], {'axis': '(0)'}), '(spatial_space_signal ** 2, axis=0)\n', (1004, 1039), True, 'import numpy as np\n'), ((1072, 1112), 'pytest.approx', 'pytest.approx', (['rms_from_signal'], {'rel': '(0.01)'}), '(rms_from_signal, rel=0.01)\n', (1085, 1112), False, 'import pytest\n'), ((414, 516), 'von_karman_psds.spatial_psds', 'spatial_psds', ([], {'Omega': 'Omega', 'altitude': 'altitude', 'probability_of_exceedance': 'probability_of_exceedance'}), '(Omega=Omega, altitude=altitude, probability_of_exceedance=\n probability_of_exceedance)\n', (426, 516), False, 'from von_karman_psds import spatial_psds, von_karman_gust_intensity\n'), ((610, 639), 'numpy.sum', 'np.sum', (['spatial_power'], {'axis': '(0)'}), '(spatial_power, axis=0)\n', (616, 639), True, 'import numpy as np\n'), ((718, 754), 'numpy.random.random', 'np.random.random', (['spatial_amps.shape'], {}), '(spatial_amps.shape)\n', (734, 754), True, 'import numpy as np\n'), ((1310, 1335), 'seaborn.color_palette', 'sns.color_palette', (['"""husl"""'], {}), "('husl')\n", (1327, 1335), True, 'import seaborn as sns\n'), ((271, 297), 'numpy.log2', 'np.log2', (['(x_max / dx_approx)'], {}), '(x_max / dx_approx)\n', (278, 297), True, 'import numpy as np\n'), ((1807, 1842), 'numpy.log10', 'np.log10', (['probability_of_exceedance'], {}), '(probability_of_exceedance)\n', (1815, 1842), True, 'import numpy as np\n'), ((1567, 1595), 'seaborn.husl_palette', 'sns.husl_palette', (['(1)'], {'h': '(i / 3)'}), '(1, h=i / 3)\n', (1583, 1595), True, 'import seaborn as sns\n')] |
import numpy as np
# import scanpy.api as sc
import pandas as pd
def norm(x, reverse=False):
if reverse:
y = np.power(10, x) - 1.01
y = np.around(y).astype(np.int32)
return y
else:
return np.log10(x + 1.01)
def minmax_0_to_1(x, reverse=False, minmax=1):
if reverse:
# x -> [0, 1]
minmax_x = x * minmax
# minmax_x -> [0, 6]
return norm(minmax_x, reverse)
else:
norm_x = norm(x, reverse)
minmax_x = norm_x / np.max(norm_x)
return minmax_x
array = [0, 1, 2, 3, 150, 134]
x = np.asarray(array)
print(minmax_0_to_1(x))
print(minmax_0_to_1(minmax_0_to_1(x), True, np.max(norm(x))))
print("-----------------")
# def normalization(express_data):
# adata = sc.AnnData(express_data.T.values)
# sc.pp.normalize_per_cell(adata)
# sc.pp.log1p(adata)
# return pd.DataFrame(adata.X.T, columns=express_data.columns.tolist(), index=express_data.index.tolist())
# a = pd.read_csv("./data/true_counts_simulated_dataset1_dropout0.05.csv")
# print(normalization(a))
# print(normalization(a).describe())
| [
"numpy.power",
"numpy.asarray",
"numpy.around",
"numpy.max",
"numpy.log10"
] | [((603, 620), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (613, 620), True, 'import numpy as np\n'), ((239, 257), 'numpy.log10', 'np.log10', (['(x + 1.01)'], {}), '(x + 1.01)\n', (247, 257), True, 'import numpy as np\n'), ((128, 143), 'numpy.power', 'np.power', (['(10)', 'x'], {}), '(10, x)\n', (136, 143), True, 'import numpy as np\n'), ((524, 538), 'numpy.max', 'np.max', (['norm_x'], {}), '(norm_x)\n', (530, 538), True, 'import numpy as np\n'), ((164, 176), 'numpy.around', 'np.around', (['y'], {}), '(y)\n', (173, 176), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from sklearn.metrics import classification_report
def binary_backtests_returns(
backtests: pd.DataFrame,
) -> pd.DataFrame:
"""
Converts a Horizon backtest data frame into a binary backtests of directions
"""
return backtests.diff().apply(np.sign).dropna()
def calculate_metrics(y_pred: pd.Series, y_true: pd.Series):
"""
Takes a Horizon binary backtest data frame and calculate metrics.
"""
return classification_report(
y_true=y_true,
y_pred=y_pred,
output_dict=True,
)
def recommender(last_observed_values: pd.Series, predictions: pd.DataFrame) -> pd.DataFrame:
means = predictions["mean"]
spreads = predictions["bound_high"] - predictions["bound_low"]
spreads.index = predictions["Series"]
means.index = predictions["Series"]
changes = means - last_observed_values
directions = np.sign(changes)
uncertainties = 100 * spreads / means
predicted_movement = 100 * changes / last_observed_values[0]
recommendations = pd.DataFrame(
[directions, uncertainties, predicted_movement],
index=["Recommendations", "Predictive Uncertainty Percent", "Predicted Movement Percent"],
)
return recommendations
| [
"pandas.DataFrame",
"sklearn.metrics.classification_report",
"numpy.sign"
] | [((478, 547), 'sklearn.metrics.classification_report', 'classification_report', ([], {'y_true': 'y_true', 'y_pred': 'y_pred', 'output_dict': '(True)'}), '(y_true=y_true, y_pred=y_pred, output_dict=True)\n', (499, 547), False, 'from sklearn.metrics import classification_report\n'), ((915, 931), 'numpy.sign', 'np.sign', (['changes'], {}), '(changes)\n', (922, 931), True, 'import numpy as np\n'), ((1061, 1222), 'pandas.DataFrame', 'pd.DataFrame', (['[directions, uncertainties, predicted_movement]'], {'index': "['Recommendations', 'Predictive Uncertainty Percent',\n 'Predicted Movement Percent']"}), "([directions, uncertainties, predicted_movement], index=[\n 'Recommendations', 'Predictive Uncertainty Percent',\n 'Predicted Movement Percent'])\n", (1073, 1222), True, 'import pandas as pd\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmcv.ops import boxes_iou3d, nms3d, nms3d_normal
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_boxes_iou3d():
np_boxes1 = np.asarray([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0],
[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0],
[3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.0]],
dtype=np.float32)
np_boxes2 = np.asarray([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0],
[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, np.pi / 2],
[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, np.pi / 4]],
dtype=np.float32)
np_expect_ious = np.asarray([[1.0, 1.0, 1.0 / 2**0.5],
[1.0 / 7, 1.0 / 7, 1.0 / 7], [0.0, 0.0, 0.0]],
dtype=np.float32)
boxes1 = torch.from_numpy(np_boxes1).cuda()
boxes2 = torch.from_numpy(np_boxes2).cuda()
ious = boxes_iou3d(boxes1, boxes2)
assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=1e-4)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_nms3d():
# test for 5 boxes
np_boxes = np.asarray([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0],
[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0],
[3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.3],
[3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.0],
[3.0, 3.2, 3.2, 3.0, 2.0, 2.0, 0.3]],
dtype=np.float32)
np_scores = np.array([0.6, 0.9, 0.1, 0.2, 0.15], dtype=np.float32)
np_inds = np.array([1, 0, 3])
boxes = torch.from_numpy(np_boxes)
scores = torch.from_numpy(np_scores)
inds = nms3d(boxes.cuda(), scores.cuda(), iou_threshold=0.3)
assert np.allclose(inds.cpu().numpy(), np_inds)
# test for many boxes
np.random.seed(42)
np_boxes = np.random.rand(555, 7).astype(np.float32)
np_scores = np.random.rand(555).astype(np.float32)
boxes = torch.from_numpy(np_boxes)
scores = torch.from_numpy(np_scores)
inds = nms3d(boxes.cuda(), scores.cuda(), iou_threshold=0.3)
assert len(inds.cpu().numpy()) == 176
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_nms3d_normal():
# test for 5 boxes
np_boxes = np.asarray([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0],
[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0],
[3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.3],
[3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.0],
[3.0, 3.2, 3.2, 3.0, 2.0, 2.0, 0.3]],
dtype=np.float32)
np_scores = np.array([0.6, 0.9, 0.1, 0.2, 0.15], dtype=np.float32)
np_inds = np.array([1, 0, 3])
boxes = torch.from_numpy(np_boxes)
scores = torch.from_numpy(np_scores)
inds = nms3d_normal(boxes.cuda(), scores.cuda(), iou_threshold=0.3)
assert np.allclose(inds.cpu().numpy(), np_inds)
# test for many boxes
np.random.seed(42)
np_boxes = np.random.rand(555, 7).astype(np.float32)
np_scores = np.random.rand(555).astype(np.float32)
boxes = torch.from_numpy(np_boxes)
scores = torch.from_numpy(np_scores)
inds = nms3d_normal(boxes.cuda(), scores.cuda(), iou_threshold=0.3)
assert len(inds.cpu().numpy()) == 148
| [
"numpy.random.seed",
"mmcv.ops.boxes_iou3d",
"numpy.asarray",
"numpy.array",
"torch.cuda.is_available",
"numpy.random.rand",
"torch.from_numpy"
] | [((279, 425), 'numpy.asarray', 'np.asarray', (['[[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0],\n [3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0, \n 2.0, 0.0], [3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.0]], dtype=np.float32)\n', (289, 425), True, 'import numpy as np\n'), ((520, 683), 'numpy.asarray', 'np.asarray', (['[[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0], [1.0, 1.0, 1.0, 2.0, 2.0, 2.0, np.pi /\n 2], [1.0, 1.0, 1.0, 2.0, 2.0, 2.0, np.pi / 4]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0], [1.0, 1.0, 1.0, 2.0, 2.0, \n 2.0, np.pi / 2], [1.0, 1.0, 1.0, 2.0, 2.0, 2.0, np.pi / 4]], dtype=np.\n float32)\n', (530, 683), True, 'import numpy as np\n'), ((778, 887), 'numpy.asarray', 'np.asarray', (['[[1.0, 1.0, 1.0 / 2 ** 0.5], [1.0 / 7, 1.0 / 7, 1.0 / 7], [0.0, 0.0, 0.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0, 1.0 / 2 ** 0.5], [1.0 / 7, 1.0 / 7, 1.0 / 7], [0.0, \n 0.0, 0.0]], dtype=np.float32)\n', (788, 887), True, 'import numpy as np\n'), ((1055, 1082), 'mmcv.ops.boxes_iou3d', 'boxes_iou3d', (['boxes1', 'boxes2'], {}), '(boxes1, boxes2)\n', (1066, 1082), False, 'from mmcv.ops import boxes_iou3d, nms3d, nms3d_normal\n'), ((1299, 1524), 'numpy.asarray', 'np.asarray', (['[[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0],\n [3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.3], [3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.0\n ], [3.0, 3.2, 3.2, 3.0, 2.0, 2.0, 0.3]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0, \n 2.0, 0.0], [3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.3], [3.0, 3.0, 3.0, 3.0, \n 2.0, 2.0, 0.0], [3.0, 3.2, 3.2, 3.0, 2.0, 2.0, 0.3]], dtype=np.float32)\n', (1309, 1524), True, 'import numpy as np\n'), ((1665, 1719), 'numpy.array', 'np.array', (['[0.6, 0.9, 0.1, 0.2, 0.15]'], {'dtype': 'np.float32'}), '([0.6, 0.9, 0.1, 0.2, 0.15], dtype=np.float32)\n', (1673, 1719), True, 'import numpy as np\n'), ((1734, 1753), 'numpy.array', 'np.array', (['[1, 0, 3]'], {}), '([1, 0, 3])\n', (1742, 1753), True, 'import numpy as np\n'), ((1766, 1792), 'torch.from_numpy', 'torch.from_numpy', (['np_boxes'], {}), '(np_boxes)\n', (1782, 1792), False, 'import torch\n'), ((1806, 1833), 'torch.from_numpy', 'torch.from_numpy', (['np_scores'], {}), '(np_scores)\n', (1822, 1833), False, 'import torch\n'), ((1983, 2001), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1997, 2001), True, 'import numpy as np\n'), ((2126, 2152), 'torch.from_numpy', 'torch.from_numpy', (['np_boxes'], {}), '(np_boxes)\n', (2142, 2152), False, 'import torch\n'), ((2166, 2193), 'torch.from_numpy', 'torch.from_numpy', (['np_scores'], {}), '(np_scores)\n', (2182, 2193), False, 'import torch\n'), ((2455, 2680), 'numpy.asarray', 'np.asarray', (['[[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0],\n [3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.3], [3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.0\n ], [3.0, 3.2, 3.2, 3.0, 2.0, 2.0, 0.3]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0, \n 2.0, 0.0], [3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 0.3], [3.0, 3.0, 3.0, 3.0, \n 2.0, 2.0, 0.0], [3.0, 3.2, 3.2, 3.0, 2.0, 2.0, 0.3]], dtype=np.float32)\n', (2465, 2680), True, 'import numpy as np\n'), ((2821, 2875), 'numpy.array', 'np.array', (['[0.6, 0.9, 0.1, 0.2, 0.15]'], {'dtype': 'np.float32'}), '([0.6, 0.9, 0.1, 0.2, 0.15], dtype=np.float32)\n', (2829, 2875), True, 'import numpy as np\n'), ((2890, 2909), 'numpy.array', 'np.array', (['[1, 0, 3]'], {}), '([1, 0, 3])\n', (2898, 2909), True, 'import numpy as np\n'), ((2922, 2948), 'torch.from_numpy', 'torch.from_numpy', (['np_boxes'], {}), '(np_boxes)\n', (2938, 2948), False, 'import torch\n'), ((2962, 2989), 'torch.from_numpy', 'torch.from_numpy', (['np_scores'], {}), '(np_scores)\n', (2978, 2989), False, 'import torch\n'), ((3146, 3164), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3160, 3164), True, 'import numpy as np\n'), ((3289, 3315), 'torch.from_numpy', 'torch.from_numpy', (['np_boxes'], {}), '(np_boxes)\n', (3305, 3315), False, 'import torch\n'), ((3329, 3356), 'torch.from_numpy', 'torch.from_numpy', (['np_scores'], {}), '(np_scores)\n', (3345, 3356), False, 'import torch\n'), ((180, 205), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (203, 205), False, 'import torch\n'), ((1184, 1209), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1207, 1209), False, 'import torch\n'), ((2333, 2358), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2356, 2358), False, 'import torch\n'), ((960, 987), 'torch.from_numpy', 'torch.from_numpy', (['np_boxes1'], {}), '(np_boxes1)\n', (976, 987), False, 'import torch\n'), ((1008, 1035), 'torch.from_numpy', 'torch.from_numpy', (['np_boxes2'], {}), '(np_boxes2)\n', (1024, 1035), False, 'import torch\n'), ((2017, 2039), 'numpy.random.rand', 'np.random.rand', (['(555)', '(7)'], {}), '(555, 7)\n', (2031, 2039), True, 'import numpy as np\n'), ((2075, 2094), 'numpy.random.rand', 'np.random.rand', (['(555)'], {}), '(555)\n', (2089, 2094), True, 'import numpy as np\n'), ((3180, 3202), 'numpy.random.rand', 'np.random.rand', (['(555)', '(7)'], {}), '(555, 7)\n', (3194, 3202), True, 'import numpy as np\n'), ((3238, 3257), 'numpy.random.rand', 'np.random.rand', (['(555)'], {}), '(555)\n', (3252, 3257), True, 'import numpy as np\n')] |
import math
from decimal import Decimal
from NIM.benchmarks import Benchmark
from numpy import zeros, pi, cos, exp, sqrt, array
import numpy as np
# double source
class MultiSourceFunction(Benchmark):
def __init__(self, lower=(0, 0, 0), upper=(20, 20, 0), dimension=3, pg_stability="F", u=2, q=1500, z=1,
x0=3.26, y0=9, x1=8.46, y1=16.5):
super().__init__(lower, upper, dimension)
self.pg_stability = pg_stability
self.q0 = q
self.q1 = q
self.z = z
self.x0 = x0
self.y0 = y0
self.z0 = z
self.x1 = x1
self.y1 = y1
self.z1 = z
self.u0 = u
self.u1 = u
self.d = 1
@staticmethod
def sigma_y(pg_stability, x):
if pg_stability == 'A':
return 0.22 * x * (1 + 0.0001 * x) ** (-0.5)
elif pg_stability == 'B':
return 0.16 * x * (1 + 0.0001 * x) ** (-0.5)
elif pg_stability == 'C':
return 0.11 * x * (1 + 0.0001 * x) ** (-0.5)
elif pg_stability == 'D':
return 0.08 * x * (1 + 0.0001 * x) ** (-0.5)
elif pg_stability == 'E':
return 0.06 * x * (1 + 0.0001 * x) ** (-0.5)
else:
return 0.04 * x * (1 + 0.0001 * x) ** (-0.5)
@staticmethod
def sigma_z(pg_stability, x):
if pg_stability == 'A':
return 0.2 * x
elif pg_stability == 'B':
return 0.12 * x
elif pg_stability == 'C':
return 0.08 * x * (1 + 0.0002 * x) ** (-0.5)
elif pg_stability == 'D':
return 0.06 * x * (1 + 0.0015 * x) ** (-0.5)
elif pg_stability == 'E':
return 0.03 * x * (1 + 0.0003 * x) ** (-1)
else:
return 0.016 * x * (1 + 0.0003 * x) ** (-1)
def calculate_concentration(self, x, y, z, q0, x0, y0, z0, u, pg_stability):
part1 = (q0 / (2 * pi * self.sigma_y(pg_stability, (x - x0)) * self.sigma_z(pg_stability, (x - x0)) * u)) * exp(
-0.5 * ((y - y0) ** 2) / (self.sigma_y(pg_stability, (x - x0)) ** 2))
part2 = exp(-((z - z0) ** 2) / (2 * (self.sigma_z(pg_stability, (x - x0)) ** 2))) + exp(
-((z + z0) ** 2) / (2 * (self.sigma_z(pg_stability, (x - x0)) ** 2)))
return part1 * part2
def eval(self, sol):
result = 0
if abs(self.y0 - sol[1]) < abs(self.y1 - sol[1]):
if sol[0] < self.x0:
result = 0
else:
result = self.calculate_concentration(sol[0], sol[1], self.z0, self.q0, self.x0, self.y0, self.z0,
self.u0, self.pg_stability)
else:
if sol[0] < self.x1:
result = 0
else:
result = self.calculate_concentration(sol[0], sol[1], self.z0, self.q1, self.x1, self.y1, self.z1,
self.u1, self.pg_stability)
return round(result, 4)
def get_optimum(self):
return array([[3.3, 9.0],
[8.5, 16.5]]), self.q0
if __name__ == '__main__':
ssf = MultiSourceFunction()
| [
"numpy.array"
] | [((3012, 3044), 'numpy.array', 'array', (['[[3.3, 9.0], [8.5, 16.5]]'], {}), '([[3.3, 9.0], [8.5, 16.5]])\n', (3017, 3044), False, 'from numpy import zeros, pi, cos, exp, sqrt, array\n')] |
"""
Basic Collaborative Filtering Tutorial (Matrix factorization)
This tutorial is based on
https://heartbeat.fritz.ai/recommender-systems-with-python-part-iii-collaborative-filtering-singular-value-decomposition-5b5dcb3f242b
"""
import os
import pathlib
import pandas as pd
import numpy as np
from scipy.sparse.linalg import svds
# Load data
movies_filename = "movie.csv"
ratings_filename = "rating.csv"
data_path = os.path.join(
pathlib.Path(__file__).parent.absolute(),
"../../../data/examples"
)
df_movies = pd.read_csv(
os.path.join(data_path, movies_filename),
usecols=["movieId", "title"],
dtype={"movieId": "int32", "title": "str"}
)
df_ratings = pd.read_csv(
os.path.join(data_path, ratings_filename),
usecols=["userId", "movieId", "rating"],
dtype={"userId": "int32", "movieId": "int32", "rating": "float32"}
)
# Only take first 10000 to make this example faster
df_ratings = df_ratings[:100000]
print("========= MOVIES DF =========")
print("MOVIES DF HEAD\n", df_movies.head(), end="\n\n")
print("MOVIES DF SHAPE", df_movies.shape, end="\n\n")
print("========= RATINGS DF =========")
print("RATINGS DF HEAD\n", df_ratings.head(), end="\n\n")
print("RATINGS DF SHAPE", df_ratings.shape, end="\n\n")
# RATING MATRIX
# Pivot ratings into movie features to get a rating matrix
# Each movie is a row and each user is a colum, values are ratings
# 0 indicates no rating
df_movie_features = df_ratings.pivot(
index='userId',
columns='movieId',
values='rating'
).fillna(0)
print("========= RATINGS MATRIX (Movies x Users) =========")
print("RATINGS MATRIX: HEAD\n", df_movie_features.head(), end="\n\n")
print("RATINGS MATRIX: SHAPE", df_movie_features.shape, end="\n\n")
# Normalize each user's ratings (demeaning) to remove user bias
# If a user tends to rate low, 3 actually means a top rating in his own scale
# and that’s the information we want to extract
R = df_movie_features.to_numpy()
user_ratings_mean = np.mean(R, axis=1)
R_demeaned = R - user_ratings_mean.reshape(-1, 1)
print("Factorizing matrix...")
# Perform matrix factorization with Singular Value Decomposition (SVD)
U, sigma, Vt = svds(R_demeaned, k=50)
# The returned value in sigma is just the values instead of a diagonal matrix.
# Since I'm going to leverage matrix multiplication to get predictions
# I'll convert it to the diagonal matrix form.
sigma = np.diag(sigma)
all_user_predicted_ratings = np.dot(
np.dot(U, sigma), Vt) + user_ratings_mean.reshape(-1, 1)
print("========= USER PREDICTED RATINGS =========")
print("USER PREDICTED RATINGS:\n",
all_user_predicted_ratings, end="\n\n")
print("USER PREDICTED RATINGS: SHAPE",
all_user_predicted_ratings.shape, end="\n\n")
def recommend_movies(
preds_df,
userID,
movies_df,
original_ratings_df,
num_recommendations=5):
"""
Get and sort the user's predictions
"""
# UserID starts at 1, not 0
user_row_number = userID - 1
sorted_user_predictions = preds_df.iloc[user_row_number].sort_values(
ascending=False) # UserID starts at 1
# Get the user's data and merge in the movie information.
user_data = original_ratings_df[original_ratings_df.userId == (userID)]
user_full = (user_data.merge(movies_df, how='left', left_on='movieId', right_on='movieId').
sort_values(['rating'], ascending=False)
)
recommendations = (movies_df[~movies_df['movieId'].isin(user_full['movieId'])]).merge(pd.DataFrame(sorted_user_predictions).reset_index(), how='left', left_on='movieId',
right_on='movieId').rename(columns={user_row_number: 'Predictions'}).sort_values('Predictions', ascending=False).iloc[:num_recommendations, :-1]
return user_full, recommendations
if __name__ == "__main__":
preds_df = pd.DataFrame(all_user_predicted_ratings,
columns=df_movie_features.columns)
already_rated, predictions = recommend_movies(
preds_df, 330, df_movies, df_ratings, 10)
print("========= PREDICTIONS =========")
print("USER RATED MOVIES:\n")
print(already_rated.head())
print("PREDICTED MOVIES FOR USER\n")
print(predictions)
| [
"pandas.DataFrame",
"scipy.sparse.linalg.svds",
"pathlib.Path",
"numpy.mean",
"numpy.dot",
"numpy.diag",
"os.path.join"
] | [((1975, 1993), 'numpy.mean', 'np.mean', (['R'], {'axis': '(1)'}), '(R, axis=1)\n', (1982, 1993), True, 'import numpy as np\n'), ((2162, 2184), 'scipy.sparse.linalg.svds', 'svds', (['R_demeaned'], {'k': '(50)'}), '(R_demeaned, k=50)\n', (2166, 2184), False, 'from scipy.sparse.linalg import svds\n'), ((2390, 2404), 'numpy.diag', 'np.diag', (['sigma'], {}), '(sigma)\n', (2397, 2404), True, 'import numpy as np\n'), ((542, 582), 'os.path.join', 'os.path.join', (['data_path', 'movies_filename'], {}), '(data_path, movies_filename)\n', (554, 582), False, 'import os\n'), ((697, 738), 'os.path.join', 'os.path.join', (['data_path', 'ratings_filename'], {}), '(data_path, ratings_filename)\n', (709, 738), False, 'import os\n'), ((3913, 3988), 'pandas.DataFrame', 'pd.DataFrame', (['all_user_predicted_ratings'], {'columns': 'df_movie_features.columns'}), '(all_user_predicted_ratings, columns=df_movie_features.columns)\n', (3925, 3988), True, 'import pandas as pd\n'), ((2446, 2462), 'numpy.dot', 'np.dot', (['U', 'sigma'], {}), '(U, sigma)\n', (2452, 2462), True, 'import numpy as np\n'), ((439, 461), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (451, 461), False, 'import pathlib\n'), ((3510, 3547), 'pandas.DataFrame', 'pd.DataFrame', (['sorted_user_predictions'], {}), '(sorted_user_predictions)\n', (3522, 3547), True, 'import pandas as pd\n')] |
from sklearn.svm import LinearSVC
import numpy as np
import pandas as pd
from bluegraph.backends.stellargraph import StellarGraphNodeEmbedder
from bluegraph.downstream.node_classification import NodeClassifier
from sklearn import model_selection
from bluegraph.downstream import (get_confusion_matrix,
get_classification_scores,
transform_to_2d,
cluster_nodes,
plot_2d)
def test_node_classification(random_pgframe):
random_pgframe.rename_nodes({
n: str(n)
for n in random_pgframe.nodes()
})
train_nodes, test_nodes = model_selection.train_test_split(
random_pgframe.nodes(), train_size=0.8)
node2vec_embedder = StellarGraphNodeEmbedder(
"node2vec", edge_weight="mi",
embedding_dimension=10, length=5, number_of_walks=10)
node2vec_embedding = node2vec_embedder.fit_model(random_pgframe)
random_pgframe.add_node_properties(
node2vec_embedding.rename(columns={"embedding": "node2vec"}))
node2vec_classifier = NodeClassifier(
LinearSVC(), feature_vector_prop="node2vec")
types = ["Apple", "Orange", "Carrot"]
node_types = pd.DataFrame([
(str(n), np.random.choice(types, p=[0.5, 0.4, 0.1]))
for n in range(random_pgframe.number_of_nodes())
], columns=["@id", "entity_type"])
random_pgframe.add_node_properties(node_types)
cluster_nodes(
random_pgframe.get_node_property_values("node2vec").to_list())
node2vec_2d = transform_to_2d(
random_pgframe.get_node_property_values("node2vec").to_list())
plot_2d(
random_pgframe, vectors=node2vec_2d, label_prop="entity_type",
silent=True)
node2vec_classifier.fit(
random_pgframe, train_elements=train_nodes,
label_prop="entity_type")
node2vec_pred = node2vec_classifier.predict(
random_pgframe, predict_elements=test_nodes)
true_labels = random_pgframe._nodes.loc[test_nodes, "entity_type"]
scores = get_classification_scores(true_labels, node2vec_pred, multiclass=True)
matrix = get_confusion_matrix(true_labels, node2vec_pred)
| [
"numpy.random.choice",
"bluegraph.backends.stellargraph.StellarGraphNodeEmbedder",
"bluegraph.downstream.get_classification_scores",
"bluegraph.downstream.get_confusion_matrix",
"bluegraph.downstream.plot_2d",
"sklearn.svm.LinearSVC"
] | [((793, 906), 'bluegraph.backends.stellargraph.StellarGraphNodeEmbedder', 'StellarGraphNodeEmbedder', (['"""node2vec"""'], {'edge_weight': '"""mi"""', 'embedding_dimension': '(10)', 'length': '(5)', 'number_of_walks': '(10)'}), "('node2vec', edge_weight='mi', embedding_dimension=\n 10, length=5, number_of_walks=10)\n", (817, 906), False, 'from bluegraph.backends.stellargraph import StellarGraphNodeEmbedder\n'), ((1677, 1764), 'bluegraph.downstream.plot_2d', 'plot_2d', (['random_pgframe'], {'vectors': 'node2vec_2d', 'label_prop': '"""entity_type"""', 'silent': '(True)'}), "(random_pgframe, vectors=node2vec_2d, label_prop='entity_type',\n silent=True)\n", (1684, 1764), False, 'from bluegraph.downstream import get_confusion_matrix, get_classification_scores, transform_to_2d, cluster_nodes, plot_2d\n'), ((2081, 2151), 'bluegraph.downstream.get_classification_scores', 'get_classification_scores', (['true_labels', 'node2vec_pred'], {'multiclass': '(True)'}), '(true_labels, node2vec_pred, multiclass=True)\n', (2106, 2151), False, 'from bluegraph.downstream import get_confusion_matrix, get_classification_scores, transform_to_2d, cluster_nodes, plot_2d\n'), ((2165, 2213), 'bluegraph.downstream.get_confusion_matrix', 'get_confusion_matrix', (['true_labels', 'node2vec_pred'], {}), '(true_labels, node2vec_pred)\n', (2185, 2213), False, 'from bluegraph.downstream import get_confusion_matrix, get_classification_scores, transform_to_2d, cluster_nodes, plot_2d\n'), ((1148, 1159), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (1157, 1159), False, 'from sklearn.svm import LinearSVC\n'), ((1285, 1327), 'numpy.random.choice', 'np.random.choice', (['types'], {'p': '[0.5, 0.4, 0.1]'}), '(types, p=[0.5, 0.4, 0.1])\n', (1301, 1327), True, 'import numpy as np\n')] |
import numpy as np
def mag2db(mag):
return 20*np.log10(mag)
| [
"numpy.log10"
] | [((52, 65), 'numpy.log10', 'np.log10', (['mag'], {}), '(mag)\n', (60, 65), True, 'import numpy as np\n')] |
import torch
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import DataLoader,Subset
import yaml
import pickle
import os
from operator import attrgetter
from build_models import UltraModel
from dataset import fingerset
from utils import averageScalar,string_for_loss
from utils import dice as Dice_coef
from yacs.config import CfgNode as CN
import sys
with open('./config.yaml','r') as f:
config = CN(yaml.safe_load(f))
model = UltraModel(1,config.MODEL.NUM_CLASSES,2,config.MODEL.BACKBONE).cuda()
if not os.path.exists("./best.pth.tar"):
raise Exception("you should put the preprained model in the root directory")
best_ckp = torch.load('./best.pth.tar',map_location=lambda storage,_:storage)
model.load_state_dict(best_ckp["model"])
model.eval()
datasetset_pkl = './pkls/test.pkl'
if "test" in datasetset_pkl:
set_type = "test"
elif "eval" in datasetset_pkl:
set_type = "eval"
else:
set_type = "train"
testset = fingerset(datasetset_pkl, "test", [64,64],2, config.DATA.NORMALIZATION)
dataloader = DataLoader(testset,128,False,num_workers=8)
yaw_error_scalar = averageScalar()
pitch_error_scalar = averageScalar()
roll_error_scalar = averageScalar()
finger_acc_scalar = averageScalar()
seg_dice_scalar = averageScalar()
def length_arange(min,max,length):
return torch.arange(min,max,float(max-min)/length)
idx_tensor_yaw = length_arange(config.MODEL.YAW_MIN,config.MODEL.YAW_MAX,config.MODEL.NUM_CLASSES[0]).cuda()
idx_tensor_pitch = length_arange(config.MODEL.PITCH_MIN,config.MODEL.PITCH_MAX,config.MODEL.NUM_CLASSES[1]).cuda()
idx_tensor_roll = length_arange(config.MODEL.ROLL_MIN,config.MODEL.ROLL_MAX,config.MODEL.NUM_CLASSES[2]).cuda()
predicts = []
gts = []
for iterx, item in enumerate(dataloader):
with torch.no_grad():
img = item["img"].cuda()
yaw_gt = item['yaw'].cuda().to(torch.float32)
pitch_gt = item["pitch"].cuda().to(torch.float32)
roll_gt = item["roll"].cuda().to(torch.float32)
gt = torch.stack([yaw_gt,pitch_gt,roll_gt],dim=-1)
finger = item['finger_type'].cuda()
path = item['path']
seg = item['seg'].cuda()
bb = img.shape[0]
result = model(img)
yaw_prob = result["yaw"]
pitch_prob = result["pitch"]
roll_prob = result['roll']
finger_probs = result['finger']
fcn_out = result['fcn']
fcn_out = torch.sigmoid(fcn_out)
fcn_out = (fcn_out>0.75).cpu().numpy().astype(np.int32)
if config.TRAIN.CLS_LOSS=="bce":
yaw_predict = torch.sum(torch.sigmoid(yaw_prob)*idx_tensor_yaw[None],dim=1)/config.MODEL.NUM_CLASSES[0]
pitch_predict = torch.sum(torch.sigmoid(pitch_prob)*idx_tensor_pitch[None],dim=1)/config.MODEL.NUM_CLASSES[1]
roll_predict = torch.sum(torch.sigmoid(roll_prob)*idx_tensor_roll[None],dim=1)/config.MODEL.NUM_CLASSES[2]
else:
yaw_predict = (F.softmax(yaw_prob,dim=1)*idx_tensor_yaw[None]).sum(dim=1)
pitch_predict = (F.softmax(pitch_prob,dim=1)*idx_tensor_pitch[None]).sum(dim=1)
roll_predict = (F.softmax(roll_prob,dim=1)*idx_tensor_roll[None]).sum(dim=1)
yaw_error = (yaw_gt-yaw_predict).abs().mean(0).item()
pitch_error = (pitch_gt-pitch_predict).abs().mean(0).item()
roll_error = (roll_gt-roll_predict).abs().mean(0).item()
finger_right = (finger_probs.argmax(dim=-1)==finger).sum().item()/bb
seg_dice = Dice_coef(seg.cpu().numpy(),fcn_out,[1,])[0]
yaw_error_scalar.update(yaw_error,bb)
pitch_error_scalar.update(pitch_error,bb)
roll_error_scalar.update(roll_error,bb)
finger_acc_scalar.update(finger_right,bb)
seg_dice_scalar.update(seg_dice,bb)
print(f"Eval Evil: {iterx}/{len(dataloader)}|| ",string_for_loss(["yaw","pitch","roll","finger","seg"],
[yaw_error,pitch_error,roll_error,finger_right,seg_dice]))
for k in range(bb):
gts.append([yaw_gt[k].item(),pitch_gt[k].item(),roll_gt[k].item()])
predicts.append([yaw_predict[k].item(),pitch_predict[k].item(),roll_predict[k].item()])
# print(f"Average eval error: yaw: {yaw_error_scalar.avg:4f},pitch: {pitch_error_scalar.avg:4f},roll: {roll_error_scalar.avg:4f}")
print(f"reporting performance on {set_type}")
print("\t\t yaw\t\t pitch\t\t roll")
predicts = np.array(predicts)
gts = np.array(gts)
mae = np.mean(np.abs(predicts-gts),axis = 0)
mae_all = np.mean(np.abs(predicts-gts))
print(f"MAE {mae[0]}\t {mae[1]}\t {mae[2]} {mae_all}(all)")
rmse = np.sqrt(np.mean((predicts-gts)**2,axis=0))
rmse_all = np.sqrt(np.mean((predicts-gts)**2))
print(f"RMSE {rmse[0]}\t {rmse[1]}\t {rmse[2]} {rmse_all}(all)")
sd = np.std(np.abs(predicts-gts),axis=0)
sd_all = np.std(np.abs(predicts-gts))
print(f"SD {sd[0]}\t {sd[1]}\t {sd[2]}\t {sd_all}(all)")
| [
"utils.string_for_loss",
"numpy.abs",
"torch.stack",
"torch.utils.data.DataLoader",
"dataset.fingerset",
"utils.averageScalar",
"torch.load",
"os.path.exists",
"torch.nn.functional.softmax",
"torch.sigmoid",
"numpy.mean",
"numpy.array",
"yaml.safe_load",
"build_models.UltraModel",
"torch... | [((690, 759), 'torch.load', 'torch.load', (['"""./best.pth.tar"""'], {'map_location': '(lambda storage, _: storage)'}), "('./best.pth.tar', map_location=lambda storage, _: storage)\n", (700, 759), False, 'import torch\n'), ((991, 1064), 'dataset.fingerset', 'fingerset', (['datasetset_pkl', '"""test"""', '[64, 64]', '(2)', 'config.DATA.NORMALIZATION'], {}), "(datasetset_pkl, 'test', [64, 64], 2, config.DATA.NORMALIZATION)\n", (1000, 1064), False, 'from dataset import fingerset\n'), ((1076, 1122), 'torch.utils.data.DataLoader', 'DataLoader', (['testset', '(128)', '(False)'], {'num_workers': '(8)'}), '(testset, 128, False, num_workers=8)\n', (1086, 1122), False, 'from torch.utils.data import DataLoader, Subset\n'), ((1140, 1155), 'utils.averageScalar', 'averageScalar', ([], {}), '()\n', (1153, 1155), False, 'from utils import averageScalar, string_for_loss\n'), ((1177, 1192), 'utils.averageScalar', 'averageScalar', ([], {}), '()\n', (1190, 1192), False, 'from utils import averageScalar, string_for_loss\n'), ((1213, 1228), 'utils.averageScalar', 'averageScalar', ([], {}), '()\n', (1226, 1228), False, 'from utils import averageScalar, string_for_loss\n'), ((1249, 1264), 'utils.averageScalar', 'averageScalar', ([], {}), '()\n', (1262, 1264), False, 'from utils import averageScalar, string_for_loss\n'), ((1283, 1298), 'utils.averageScalar', 'averageScalar', ([], {}), '()\n', (1296, 1298), False, 'from utils import averageScalar, string_for_loss\n'), ((4433, 4451), 'numpy.array', 'np.array', (['predicts'], {}), '(predicts)\n', (4441, 4451), True, 'import numpy as np\n'), ((4458, 4471), 'numpy.array', 'np.array', (['gts'], {}), '(gts)\n', (4466, 4471), True, 'import numpy as np\n'), ((564, 596), 'os.path.exists', 'os.path.exists', (['"""./best.pth.tar"""'], {}), "('./best.pth.tar')\n", (578, 596), False, 'import os\n'), ((4487, 4509), 'numpy.abs', 'np.abs', (['(predicts - gts)'], {}), '(predicts - gts)\n', (4493, 4509), True, 'import numpy as np\n'), ((4536, 4558), 'numpy.abs', 'np.abs', (['(predicts - gts)'], {}), '(predicts - gts)\n', (4542, 4558), True, 'import numpy as np\n'), ((4634, 4672), 'numpy.mean', 'np.mean', (['((predicts - gts) ** 2)'], {'axis': '(0)'}), '((predicts - gts) ** 2, axis=0)\n', (4641, 4672), True, 'import numpy as np\n'), ((4688, 4718), 'numpy.mean', 'np.mean', (['((predicts - gts) ** 2)'], {}), '((predicts - gts) ** 2)\n', (4695, 4718), True, 'import numpy as np\n'), ((4794, 4816), 'numpy.abs', 'np.abs', (['(predicts - gts)'], {}), '(predicts - gts)\n', (4800, 4816), True, 'import numpy as np\n'), ((4839, 4861), 'numpy.abs', 'np.abs', (['(predicts - gts)'], {}), '(predicts - gts)\n', (4845, 4861), True, 'import numpy as np\n'), ((459, 476), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (473, 476), False, 'import yaml\n'), ((487, 552), 'build_models.UltraModel', 'UltraModel', (['(1)', 'config.MODEL.NUM_CLASSES', '(2)', 'config.MODEL.BACKBONE'], {}), '(1, config.MODEL.NUM_CLASSES, 2, config.MODEL.BACKBONE)\n', (497, 552), False, 'from build_models import UltraModel\n'), ((1802, 1817), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1815, 1817), False, 'import torch\n'), ((2036, 2084), 'torch.stack', 'torch.stack', (['[yaw_gt, pitch_gt, roll_gt]'], {'dim': '(-1)'}), '([yaw_gt, pitch_gt, roll_gt], dim=-1)\n', (2047, 2084), False, 'import torch\n'), ((2437, 2459), 'torch.sigmoid', 'torch.sigmoid', (['fcn_out'], {}), '(fcn_out)\n', (2450, 2459), False, 'import torch\n'), ((3840, 3964), 'utils.string_for_loss', 'string_for_loss', (["['yaw', 'pitch', 'roll', 'finger', 'seg']", '[yaw_error, pitch_error, roll_error, finger_right, seg_dice]'], {}), "(['yaw', 'pitch', 'roll', 'finger', 'seg'], [yaw_error,\n pitch_error, roll_error, finger_right, seg_dice])\n", (3855, 3964), False, 'from utils import averageScalar, string_for_loss\n'), ((2602, 2625), 'torch.sigmoid', 'torch.sigmoid', (['yaw_prob'], {}), '(yaw_prob)\n', (2615, 2625), False, 'import torch\n'), ((2720, 2745), 'torch.sigmoid', 'torch.sigmoid', (['pitch_prob'], {}), '(pitch_prob)\n', (2733, 2745), False, 'import torch\n'), ((2841, 2865), 'torch.sigmoid', 'torch.sigmoid', (['roll_prob'], {}), '(roll_prob)\n', (2854, 2865), False, 'import torch\n'), ((2964, 2990), 'torch.nn.functional.softmax', 'F.softmax', (['yaw_prob'], {'dim': '(1)'}), '(yaw_prob, dim=1)\n', (2973, 2990), True, 'from torch.nn import functional as F\n'), ((3052, 3080), 'torch.nn.functional.softmax', 'F.softmax', (['pitch_prob'], {'dim': '(1)'}), '(pitch_prob, dim=1)\n', (3061, 3080), True, 'from torch.nn import functional as F\n'), ((3143, 3170), 'torch.nn.functional.softmax', 'F.softmax', (['roll_prob'], {'dim': '(1)'}), '(roll_prob, dim=1)\n', (3152, 3170), True, 'from torch.nn import functional as F\n')] |
import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionSinTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_sin_scalar(self):
self.assertEqual(dnp.sin(1.2 + 1j), np.sin(1.2 + 1j))
self.assertEqual(dnp.sin(dnp.pi/2.), np.sin(np.pi/2.))
self.assertEqual(dnp.sin(0.5), np.sin(0.5))
self.assertEqual(dnp.sin(1), np.sin(1))
self.assertEqual(dnp.sin(-1), np.sin(-1))
self.assertEqual(dnp.sin(0), np.sin(0))
self.assertEqual(dnp.isnan(dnp.sin(dnp.nan)), True)
self.assertEqual(np.isnan(np.sin(np.nan)), True)
def test_function_math_sin_list(self):
npa = np.sin(list(np.array([0., 30., 45., 60., 90., np.nan]) * np.pi / 180.))
dnpa = dnp.sin(list(dnp.array([0., 30., 45., 60., 90., dnp.nan]) * dnp.pi / 180.))
assert_array_equal(dnpa, npa)
def test_function_math_sin_array(self):
npa = np.sin(np.array([0., 30., 45., 60., 90., np.nan]) * np.pi / 180.)
dnpa = dnp.sin(dnp.array([0., 30., 45., 60., 90., dnp.nan]) * dnp.pi / 180.)
assert_array_equal(dnpa, npa)
def test_function_math_sin_series(self):
ps = pd.Series(np.array([0., 30., 45., 60., 90., np.nan]) * np.pi / 180.)
os = orca.Series(ps)
assert_series_equal(dnp.sin(os).to_pandas(), np.sin(ps))
def test_function_math_sin_dataframe(self):
pdf = pd.DataFrame({"cola": np.array([0., 30., np.nan, 45., 60., 90., np.nan]) * np.pi / 180.,
"colb": np.array([0., 30., np.nan, 45., 60., np.nan, 90.]) * np.pi / 180.})
odf = orca.DataFrame(pdf)
assert_frame_equal(dnp.sin(odf).to_pandas(), np.sin(pdf))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"dolphindb_numpy.array",
"orca.connect",
"orca.Series",
"numpy.sin",
"numpy.array",
"dolphindb_numpy.sin",
"orca.DataFrame"
] | [((1971, 1986), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1984, 1986), False, 'import unittest\n'), ((323, 366), 'orca.connect', 'orca.connect', (['HOST', 'PORT', '"""admin"""', '"""123456"""'], {}), "(HOST, PORT, 'admin', '123456')\n", (335, 366), False, 'import orca\n'), ((1501, 1516), 'orca.Series', 'orca.Series', (['ps'], {}), '(ps)\n', (1512, 1516), False, 'import orca\n'), ((1852, 1871), 'orca.DataFrame', 'orca.DataFrame', (['pdf'], {}), '(pdf)\n', (1866, 1871), False, 'import orca\n'), ((438, 457), 'dolphindb_numpy.sin', 'dnp.sin', (['(1.2 + 1.0j)'], {}), '(1.2 + 1.0j)\n', (445, 457), True, 'import dolphindb_numpy as dnp\n'), ((457, 475), 'numpy.sin', 'np.sin', (['(1.2 + 1.0j)'], {}), '(1.2 + 1.0j)\n', (463, 475), True, 'import numpy as np\n'), ((500, 521), 'dolphindb_numpy.sin', 'dnp.sin', (['(dnp.pi / 2.0)'], {}), '(dnp.pi / 2.0)\n', (507, 521), True, 'import dolphindb_numpy as dnp\n'), ((520, 539), 'numpy.sin', 'np.sin', (['(np.pi / 2.0)'], {}), '(np.pi / 2.0)\n', (526, 539), True, 'import numpy as np\n'), ((563, 575), 'dolphindb_numpy.sin', 'dnp.sin', (['(0.5)'], {}), '(0.5)\n', (570, 575), True, 'import dolphindb_numpy as dnp\n'), ((577, 588), 'numpy.sin', 'np.sin', (['(0.5)'], {}), '(0.5)\n', (583, 588), True, 'import numpy as np\n'), ((615, 625), 'dolphindb_numpy.sin', 'dnp.sin', (['(1)'], {}), '(1)\n', (622, 625), True, 'import dolphindb_numpy as dnp\n'), ((627, 636), 'numpy.sin', 'np.sin', (['(1)'], {}), '(1)\n', (633, 636), True, 'import numpy as np\n'), ((663, 674), 'dolphindb_numpy.sin', 'dnp.sin', (['(-1)'], {}), '(-1)\n', (670, 674), True, 'import dolphindb_numpy as dnp\n'), ((676, 686), 'numpy.sin', 'np.sin', (['(-1)'], {}), '(-1)\n', (682, 686), True, 'import numpy as np\n'), ((713, 723), 'dolphindb_numpy.sin', 'dnp.sin', (['(0)'], {}), '(0)\n', (720, 723), True, 'import dolphindb_numpy as dnp\n'), ((725, 734), 'numpy.sin', 'np.sin', (['(0)'], {}), '(0)\n', (731, 734), True, 'import numpy as np\n'), ((1570, 1580), 'numpy.sin', 'np.sin', (['ps'], {}), '(ps)\n', (1576, 1580), True, 'import numpy as np\n'), ((1925, 1936), 'numpy.sin', 'np.sin', (['pdf'], {}), '(pdf)\n', (1931, 1936), True, 'import numpy as np\n'), ((771, 787), 'dolphindb_numpy.sin', 'dnp.sin', (['dnp.nan'], {}), '(dnp.nan)\n', (778, 787), True, 'import dolphindb_numpy as dnp\n'), ((830, 844), 'numpy.sin', 'np.sin', (['np.nan'], {}), '(np.nan)\n', (836, 844), True, 'import numpy as np\n'), ((1178, 1225), 'numpy.array', 'np.array', (['[0.0, 30.0, 45.0, 60.0, 90.0, np.nan]'], {}), '([0.0, 30.0, 45.0, 60.0, 90.0, np.nan])\n', (1186, 1225), True, 'import numpy as np\n'), ((1260, 1309), 'dolphindb_numpy.array', 'dnp.array', (['[0.0, 30.0, 45.0, 60.0, 90.0, dnp.nan]'], {}), '([0.0, 30.0, 45.0, 60.0, 90.0, dnp.nan])\n', (1269, 1309), True, 'import dolphindb_numpy as dnp\n'), ((1429, 1476), 'numpy.array', 'np.array', (['[0.0, 30.0, 45.0, 60.0, 90.0, np.nan]'], {}), '([0.0, 30.0, 45.0, 60.0, 90.0, np.nan])\n', (1437, 1476), True, 'import numpy as np\n'), ((1545, 1556), 'dolphindb_numpy.sin', 'dnp.sin', (['os'], {}), '(os)\n', (1552, 1556), True, 'import dolphindb_numpy as dnp\n'), ((1899, 1911), 'dolphindb_numpy.sin', 'dnp.sin', (['odf'], {}), '(odf)\n', (1906, 1911), True, 'import dolphindb_numpy as dnp\n'), ((923, 970), 'numpy.array', 'np.array', (['[0.0, 30.0, 45.0, 60.0, 90.0, np.nan]'], {}), '([0.0, 30.0, 45.0, 60.0, 90.0, np.nan])\n', (931, 970), True, 'import numpy as np\n'), ((1011, 1060), 'dolphindb_numpy.array', 'dnp.array', (['[0.0, 30.0, 45.0, 60.0, 90.0, dnp.nan]'], {}), '([0.0, 30.0, 45.0, 60.0, 90.0, dnp.nan])\n', (1020, 1060), True, 'import dolphindb_numpy as dnp\n'), ((1667, 1722), 'numpy.array', 'np.array', (['[0.0, 30.0, np.nan, 45.0, 60.0, 90.0, np.nan]'], {}), '([0.0, 30.0, np.nan, 45.0, 60.0, 90.0, np.nan])\n', (1675, 1722), True, 'import numpy as np\n'), ((1770, 1825), 'numpy.array', 'np.array', (['[0.0, 30.0, np.nan, 45.0, 60.0, np.nan, 90.0]'], {}), '([0.0, 30.0, np.nan, 45.0, 60.0, np.nan, 90.0])\n', (1778, 1825), True, 'import numpy as np\n')] |
# Originally by <NAME>
# Modified by <NAME> 9/2015
# Modified by <NAME> 5/2017
import numpy as np
import logging
from qcodes.instrument.base import Instrument
from pycqed.instrument_drivers.pq_parameters import InstrumentParameter
import time
from qcodes.instrument_drivers.tektronix.AWG5014 import Tektronix_AWG5014
try:
from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.\
UHFQuantumController import UHFQC
# except ModuleNotFoundError:
# exception catching removed because it does not work in python versions before 3.6
except Exception:
UHFQC = type(None)
# some pulses use rounding when determining the correct sample at which to
# insert a particular value. this might require correct rounding -- the pulses
# are typically specified on short time scales, but the time unit we use is
# seconds. therefore we need a suitably chosen digit on which to round. 9 would
# round a pulse to 1 ns precision. 11 is 10 ps, and therefore probably beyond
# the lifetime of this code (no 10ps AWG available yet :))
SIGNIFICANT_DIGITS = 11
class Pulsar(Instrument):
"""
A meta-instrument responsible for all communication with the AWGs.
Contains information about all the available awg-channels in the setup.
Starting, stopping and programming and changing the parameters of the AWGs
should be done through Pulsar. Supports Tektronix AWG5014 and partially
ZI UHFLI.
Args:
default_AWG: Name of the AWG that new channels get defined on if no
AWG is specified
master_AWG: Name of the AWG that triggers all the other AWG-s and
should be started last (after other AWG-s are already
waiting for a trigger.
"""
def __init__(self, name='Pulsar', default_AWG=None, master_AWG=None):
super().__init__(name)
# for compatibility with old code, the default AWG name is stored in
# self.AWG.name
if default_AWG is not None:
self.AWG = self.AWG_obj(AWG=default_AWG)
else:
class Object(object):
pass
self.AWG = Object()
self.AWG.name = None
self.add_parameter('default_AWG',
set_cmd=self._set_default_AWG,
get_cmd=self._get_default_AWG)
self.add_parameter('master_AWG', parameter_class=InstrumentParameter,
initial_value=master_AWG)
self.channels = {}
self.last_sequence = None
self.last_elements = None
self._clock_prequeried_state = False
# channel handling
def define_channel(self, id, name, type, delay, offset,
high, low, active, AWG=None):
"""
The AWG object must be created before creating channels for that AWG
Args:
id: channel id. For the Tektronix 5014 must be of the form
ch#(_marker#) with # a number and the part in () optional.
For UHFQC must be 'ch1' or 'ch2'.
name: This name must be specified in pulses for them to play on
this channel.
type: marker/analog/readout
delay: global delay applied to this channel (positive values move
pulses on this channel forward in time)
offset: a (software implemented) offset voltage that is added to
all of the waveforms (analog channel only)
high: maximal output value
low: minimal output value
active: whether this channel will be programmed
AWG: name of the AWG this channel is on
"""
if AWG is None:
AWG = self.default_AWG()
_doubles = []
for c_name, c_dict in self.channels.items():
if c_dict['id'] == id and c_dict['AWG'] == AWG:
logging.warning("Channel '{}' on '{}' already in use, {} will "
"overwrite {}.".format(id, AWG, name, c_name))
_doubles.append(c_name)
for c in _doubles:
del self.channels[c]
self.channels[name] = {'id': id,
'type': type,
'delay': delay,
'offset': offset,
'high': high,
'low': low,
'active': active,
'AWG': AWG}
def AWG_obj(self, **kw):
"""
Return the AWG object corresponding to a channel or an AWG name.
Args:
AWG: Name of the AWG Instrument.
channel: Name of the channel
Returns: An instance of Instrument class corresponding to the AWG
requested.
"""
AWG = kw.get('AWG', None)
chan = kw.get('channel', None)
if AWG is not None and chan is not None:
raise ValueError('Both `AWG` and `channel` arguments passed to '
'Pulsar.AWG_obj()')
elif AWG is None and chan is not None:
name = self.channels[chan]['AWG']
elif AWG is not None and chan is None:
name = AWG
else:
raise ValueError('Either `AWG` or `channel` argument needs to be '
'passed to Pulsar.AWG_obj()')
return Instrument.find_instrument(name)
def clock(self, channel):
"""
Returns the clock rate of channel `channel`
Args:
channel: name of the channel
Returns: clock rate in samples per second
"""
if self._clock_prequeried_state:
return self._clocks[self.channels[channel]['AWG']]
else:
obj = self.AWG_obj(channel=channel)
return obj.clock_freq()
def channel_opt(self, name, option, value=None):
"""
Convenience function to get or set a channel option.
Args:
name: Name of the channel.
option: Name of the option. Available options:
* 'id'
* 'type'
* 'delay'
* 'offset'
* 'high'
* 'low'
* 'active'
* 'AWG'
value: New value for the option.
"""
if value is not None:
self.channels[name][option] = value
else:
return self.channels[name][option]
def used_AWGs(self):
"""
Returns:
A set of the names of the active AWGs registered
"""
res = set()
for cdict in self.channels.values():
if cdict['active']:
res.add(cdict['AWG'])
return res
def start(self):
"""
Start the active AWGs. If multiple AWGs are used in a setup where the
slave AWGs are triggered by the master AWG, then the slave AWGs must be
running and waiting for trigger when the master AWG is started to
ensure synchronous playback.
"""
if self.master_AWG() is None:
for AWG in self.used_AWGs():
self._start_AWG(AWG)
else:
for AWG in self.used_AWGs():
if AWG != self.master_AWG():
self._start_AWG(AWG)
tstart = time.time()
for AWG in self.used_AWGs():
if AWG != self.master_AWG():
good = False
while time.time() - tstart < 10:
if self._is_AWG_running(AWG):
good = True
break
else:
time.sleep(0.1)
if not good:
raise Exception('AWG {} did not start in 10s'.format(AWG))
self._start_AWG(self.master_AWG())
def stop(self):
"""
Stop all active AWGs.
"""
for AWG in self.used_AWGs():
self._stop_AWG(AWG)
def program_awgs(self, sequence, *elements, AWGs='all', channels='all',
loop=True, allow_first_nonzero=False, verbose=False):
"""
Args:
sequence: The `Sequence` object that determines the segment order,
repetition and trigger wait.
*elements: The `Element` objects to program to the AWGs.
AWGs: List of names of the AWGs to program. Default is 'all'.
channels: List of names of the channels that should be programmed.
Default is `'all'`.
loop: Boolean flag, whether the segments should be looped over.
Default is `True`.
allow_first_nonzero: Boolean flag, whether to allow the first point
of the element to be nonzero if the segment
waits for a trigger. In Tektronix AWG5014,
the output is set to the first value of the
segment while waiting for the trigger. Default
is `False`.
verbose: Currently unused.
"""
# Stores the last uploaded elements for easy access and plotting
self.last_sequence = sequence
self.last_elements = elements
if AWGs == 'all':
AWGs = self.used_AWGs()
if channels == 'all':
channels = self.channels.keys()
# prequery all AWG clock values
self._clock_prequeried(True)
# dict(name of AWG ->
# dict(element name ->
# dict(channel id ->
# waveform data)))
AWG_wfs = {}
for i, el in enumerate(elements):
tvals, waveforms = el.normalized_waveforms()
for cname in waveforms:
if cname not in channels:
continue
if not self.channels[cname]['active']:
continue
cAWG = self.channels[cname]['AWG']
cid = self.channels[cname]['id']
if cAWG not in AWGs:
continue
if cAWG not in AWG_wfs:
AWG_wfs[cAWG] = {}
if (i, el.name) not in AWG_wfs[cAWG]:
AWG_wfs[cAWG][i, el.name] = {}
AWG_wfs[cAWG][i, el.name][cid] = waveforms[cname]
self.update_AWG5014_settings()
for AWG in AWG_wfs:
obj = self.AWG_obj(AWG=AWG)
if isinstance(obj, Tektronix_AWG5014):
self._program_AWG5014(obj, sequence, AWG_wfs[AWG], loop=loop,
allow_first_nonzero=allow_first_nonzero)
elif isinstance(obj, UHFQC):
self._program_UHFQC(obj, sequence, AWG_wfs[AWG], loop=loop,
allow_first_nonzero=allow_first_nonzero)
else:
raise TypeError('Unsupported AWG instrument: {} of type {}'
.format(AWG, type(obj)))
self._clock_prequeried(False)
def _program_AWG5014(self, obj, sequence, el_wfs, loop=True,
allow_first_nonzero=False):
"""
Program the AWG with a sequence of segments.
Args:
obj: the instance of the AWG to program
sequence: the `Sequence` object that determines the segment order,
repetition and trigger wait
el_wfs: A dictionary from element name to a dictionary from channel
id to the waveform.
loop: Boolean flag, whether the segments should be looped over.
Default is `True`.
allow_first_nonzero: Boolean flag, whether to allow the first point
of the element to be nonzero if the segment
waits for a trigger. In Tektronix AWG5014,
the output is set to the first value of the
segment while waiting for the trigger. Default
is `False`.
"""
old_timeout = obj.timeout()
obj.timeout(max(180, old_timeout))
# determine which channel groups are involved in the sequence
grps = set()
for cid_wfs in el_wfs.values():
for cid in cid_wfs:
grps.add(cid[:3])
grps = list(grps)
grps.sort()
# create a packed waveform for each element for each channel group
# in the sequence
packed_waveforms = {}
elements_with_non_zero_first_points = set()
for (i, el), cid_wfs in sorted(el_wfs.items()):
maxlen = 0
for wf in cid_wfs.values():
if len(wf) > maxlen:
maxlen = len(wf)
for grp in grps:
grp_wfs = {}
# arrange waveforms from input data and pad with zeros for
# equal length
for cid in self._AWG5014_group_ids(grp):
grp_wfs[cid] = cid_wfs.get(cid, np.zeros(1))
cname = self._AWG5014_id_channel(cid, obj.name)
if cid[4:-1] == 'marker' or cname is None:
cval = 0
else:
cval = self.channels[cname]['offset']
hi = self.channels[cname]['high']
lo = self.channels[cname]['low']
cval = (2*cval - hi - lo)/(hi - lo)
grp_wfs[cid] = np.pad(grp_wfs[cid],
(0, maxlen - len(grp_wfs[cid])),
'constant',
constant_values=cval)
if grp_wfs[cid][0] != 0.:
elements_with_non_zero_first_points.add(el)
wfname = el + '_' + grp
packed_waveforms[wfname] = obj.pack_waveform(
grp_wfs[grp],
grp_wfs[grp + '_marker1'],
grp_wfs[grp + '_marker2'])
# sequence programming
_t0 = time.time()
if sequence.element_count() > 8000:
logging.warning("Error: trying to program '{:s}' ({:d}'".format(
sequence.name, sequence.element_count()) +
" element(s))...\n Sequence contains more than " +
"8000 elements, Aborting", end=' ')
return
print("Programming {} sequence '{}' ({} element(s)) \t".format(
obj.name, sequence.name, sequence.element_count()), end=' ')
# Create lists with sequence information:
# wfname_l = list of waveform names [[wf1_ch1,wf2_ch1..],
# [wf1_ch2,wf2_ch2..], ...]
# nrep_l = list specifying the number of reps for each seq element
# wait_l = idem for wait_trigger_state
# goto_l = idem for goto_state (goto is the element where it hops to in
# case the element is finished)
wfname_l = []
nrep_l = []
wait_l = []
goto_l = []
logic_jump_l = []
for grp in grps:
grp_wfnames = []
# add all wf names of channel
for i, el in sorted(el_wfs):
wfname = el + '_' + grp
grp_wfnames.append(wfname)
wfname_l.append(grp_wfnames)
for el in sequence.elements:
nrep_l.append(el['repetitions'])
if (el['repetitions'] < 1) or (el['repetitions'] > 65536):
raise Exception(
'Pulsar: The number of repetitions of AWG "{}" element "{}"'
' are out of range. Valid range = 1 to 65536 ("{}" received'
')'.format(obj.name, el['wfname'], el['repetitions'])
)
if el['goto_target'] is not None:
goto_l.append(sequence.element_index(el['goto_target']))
else:
goto_l.append(0)
if el['jump_target'] is not None:
logic_jump_l.append(sequence.element_index(el['jump_target']))
else:
logic_jump_l.append(0)
if el['trigger_wait']:
wait_l.append(1)
if el['wfname'] in elements_with_non_zero_first_points and \
not allow_first_nonzero:
raise Exception('Pulsar: Trigger wait set for element {} '
'with a non-zero first point'.format(
el['wfname']))
else:
wait_l.append(0)
if loop and len(goto_l) > 0:
goto_l[-1] = 1
if len(wfname_l) > 0:
filename = sequence.name + '_FILE.AWG'
awg_file = obj.generate_awg_file(packed_waveforms,
np.array(wfname_l), nrep_l, wait_l,
goto_l, logic_jump_l,
self._AWG5014_chan_cfg(obj.name))
obj.send_awg_file(filename, awg_file)
obj.load_awg_file(filename)
else:
awg_file = None
obj.timeout(old_timeout)
time.sleep(.1)
# Waits for AWG to be ready
obj.is_awg_ready()
self._AWG5014_activate_channels(grps, obj.name)
_t = time.time() - _t0
print(" finished in {:.2f} seconds.".format(_t))
return awg_file
def _program_UHFQC(self, obj, sequence, el_wfs, loop=True,
allow_first_nonzero=False):
header = """const TRIGGER1 = 0x000001;
const WINT_TRIG = 0x000010;
const IAVG_TRIG = 0x000020;
const WINT_EN = 0x1f0000;
setTrigger(WINT_EN);
var loop_cnt = getUserReg(0);
var RO_TRIG;
if (getUserReg(1)) {
RO_TRIG=IAVG_TRIG;
}else{
RO_TRIG=WINT_TRIG;
}
\n"""
if loop:
main_loop = 'while(1) {\n'
footer = '}\n'
else:
main_loop = ''
footer = ''
main_loop += 'repeat (loop_cnt) {\n'
footer += """}
wait(1000);
setTrigger(0);
"""
# parse elements
elements_with_non_zero_first_points = []
wfnames = {'ch1': [], 'ch2': []}
wfdata = {'ch1': [], 'ch2': []}
i = 1
for i, el in el_wfs:
for cid in ['ch1', 'ch2']:
if cid in el_wfs[i, el]:
wfname = el + '_' + cid
cid_wf = el_wfs[i, el][cid]
wfnames[cid].append(wfname)
wfdata[cid].append(cid_wf)
if cid_wf[0] != 0.:
elements_with_non_zero_first_points.append(el)
header += 'wave {} = ramp({}, 0, {});\n'.format(
wfname, len(cid_wf), 1 / i
)
i += 1
else:
wfnames[cid].append(None)
wfdata[cid].append(None)
# create waveform playback code
for i, el in enumerate(sequence.elements):
if el['goto_target'] is not None:
raise NotImplementedError(
'UHFQC sequencer does not yet support nontrivial goto-s.')
if el['jump_target'] is not None:
raise NotImplementedError('UHFQC sequencer does not support'
' jump events.')
if el['trigger_wait']:
if el['wfname'] in elements_with_non_zero_first_points and \
not allow_first_nonzero:
raise Exception(
'Pulsar: Trigger wait set for element {} '
'with a non-zero first point'.format(el['wfname']))
name_ch1 = wfnames['ch1'][i]
name_ch2 = wfnames['ch2'][i]
main_loop += self._UHFQC_element_seqc(el['repetitions'],
el['trigger_wait'],
name_ch1, name_ch2, True)
awg_str = header + main_loop + footer
obj.awg_string(awg_str)
# populate the waveforms with data
i = 0
for data1, data2 in zip(wfdata['ch1'], wfdata['ch2']):
if data1 is None and data2 is None:
continue
elif data1 is None:
obj.awg_update_waveform(i, data2)
i += 1
elif data2 is None:
obj.awg_update_waveform(i, data1)
i += 1
else:
data12 = np.vstack((data1, data2,)).reshape((-1,), order='F')
obj.awg_update_waveform(i, data12)
i += 1
return awg_str
def _start_AWG(self, AWG):
obj = self.AWG_obj(AWG=AWG)
if isinstance(obj, Tektronix_AWG5014):
obj.start()
elif isinstance(obj, UHFQC):
obj.acquisition_arm()
else:
raise ValueError('Unsupported AWG type: {}'.format(type(obj)))
def _stop_AWG(self, AWG):
obj = self.AWG_obj(AWG=AWG)
if isinstance(obj, Tektronix_AWG5014):
obj.stop()
elif isinstance(obj, UHFQC):
obj._daq.syncSetInt('/' + obj._device + '/awgs/0/enable', 0)
else:
raise ValueError('Unsupported AWG type: {}'.format(type(obj)))
def _is_AWG_running(self, AWG):
obj = self.AWG_obj(AWG=AWG)
if isinstance(obj, Tektronix_AWG5014):
return obj.get_state() != 'Idle'
elif isinstance(obj, UHFQC):
raise NotImplementedError()
else:
raise ValueError('Unsupported AWG type: {}'.format(type(obj)))
def _set_default_AWG(self, AWG):
self.AWG = self.AWG_obj(AWG=AWG)
def _get_default_AWG(self):
return self.AWG.name
def _clock_prequeried(self, status):
if status:
self._clock_prequeried_state = False
self._clocks = {}
for c, d in self.channels.items():
if d['AWG'] not in self._clocks:
self._clocks[d['AWG']] = self.clock(c)
self._clock_prequeried_state = True
else:
self._clock_prequeried_state = False
###################################
# AWG5014 specific helper functions
@staticmethod
def update_channel_settings():
logging.error('Pulsar.update_channel_settings() is deprecated with the'
' multi-AWG support. Please update your code.')
def update_AWG5014_settings(self, AWGs='all'):
"""
Updates the AWG5014 parameters to the values in
`self.channels`
Args:
AWGs: A list of AWG names to update or 'all'. Default 'all'.
"""
for cname, cdict in self.channels.items():
if AWGs == 'all' or cdict['AWG'] in AWGs:
obj = self.AWG_obj(channel=cname)
if not isinstance(obj, Tektronix_AWG5014):
continue
if cdict['type'] == 'analog':
amp = cdict['high'] - cdict['low']
offset = (cdict['low'] + cdict['high'])/2
obj.set('{}_amp'.format(cdict['id']), amp)
obj.set('{}_offset'.format(cdict['id']), offset)
else: # c_dict['type'] == 'marker'
cid = cdict['id']
low_par = 'ch{}_m{}_low'.format(cid[2], cid[-1])
high_par = 'ch{}_m{}_high'.format(cid[2], cid[-1])
obj.set(low_par, cdict['low'])
obj.set(high_par, cdict['high'])
@staticmethod
def _AWG5014_group_ids(cid):
"""
Returns all id-s corresponding to a single channel group.
For example `Pulsar._AWG5014_group_ids('ch2')` returns `['ch2',
'ch2_marker1', 'ch2_marker2']`.
Args:
cid: An id of one of the AWG5014 channels.
Returns: A list of id-s corresponding to the same group as `cid`.
"""
return [cid[:3], cid[:3] + '_marker1', cid[:3] + '_marker2']
def _AWG5014_id_channel(self, cid, AWG):
"""
Returns the channel name corresponding to the channel with id `cid` on
the AWG `AWG`.
Args:
cid: An id of one of the AWG5014 channels.
AWG: The name of the AWG.
Returns: The corresponding channel name. If the channel is not found,
returns `None`.
"""
for cname, cdict in self.channels.items():
if cdict['AWG'] == AWG and cdict['id'] == cid:
return cname
return None
def _AWG5014_activate_channels(self, grps, AWG):
"""
Turns on AWG5014 channel groups.
Args:
grps: An iterable of channel group id-s to turn on.
AWG: The name of the AWG.
"""
for grp in grps:
self.AWG_obj(AWG=AWG).set('{}_state'.format(grp), 1)
def _AWG5014_chan_cfg(self, AWG):
channel_cfg = {}
for cdict in self.channels.values():
if cdict['AWG'] != AWG:
continue
cid = cdict['id']
if cdict['type'] == 'analog':
channel_cfg['ANALOG_METHOD_' + cid[2]] = 1
amp = cdict['high'] - cdict['low']
off = (cdict['high'] + cdict['low'])/2.
channel_cfg['ANALOG_AMPLITUDE_' + cid[2]] = amp
channel_cfg['ANALOG_OFFSET_' + cid[2]] = off
elif cdict['type'] == 'marker':
channel_cfg['MARKER1_METHOD_' + cid[2]] = 2
channel_cfg['MARKER2_METHOD_' + cid[2]] = 2
channel_cfg['MARKER{}_LOW_{}'.format(cid[-1], cid[2])] = \
cdict['low']
channel_cfg['MARKER{}_HIGH_{}'.format(cid[-1], cid[2])] = \
cdict['high']
channel_cfg['CHANNEL_STATE_' + cid[2]] = 0
# activate only active channels
for cdict in self.channels.values():
if cdict['AWG'] != AWG:
continue
cid = cdict['id']
if cdict['active']:
channel_cfg['CHANNEL_STATE_' + cid[2]] = 1
return channel_cfg
###################################
# UHFQC specific helper functions
@staticmethod
def _UHFQC_element_seqc(reps, wait, name1, name2, readout):
"""
Generates a part of the sequence code responsible for playing back a
single element
Args:
reps: number of repetitions for this code
wait: boolean flag, whether to wait for trigger
name1: name of the wave to be played on channel 1
name2: name of the wave to be played on channel 2
readout: boolean flag, whether to acquire a datapoint after the
element
Returns:
string for playing back an element
"""
repeat_open_str = '\trepeat ({}) {{\n'.format(reps) if reps != 0 else ''
wait_wave_str = '\t\twaitWave();\n' if wait else ''
trigger_str = '\t\twaitDigTrigger(1, 1);\n' if wait else ''
if name1 is None:
play_str = '\t\tplayWave(2, {});\n'.format(name2)
elif name2 is None:
play_str = '\t\tplayWave(1, {});\n'.format(name1)
else:
play_str = '\t\tplayWave({}, {});\n'.format(name1, name2)
readout_str = '\t\tsetTrigger(WINT_EN+RO_TRIG);\n' if readout else ''
readout_str += '\t\tsetTrigger(WINT_EN);\n' if readout else ''
repeat_close_str = '\t}\n' if reps != 0 else ''
return repeat_open_str + trigger_str + play_str + readout_str + \
wait_wave_str + repeat_close_str
| [
"logging.error",
"numpy.zeros",
"time.sleep",
"time.time",
"numpy.array",
"numpy.vstack",
"qcodes.instrument.base.Instrument.find_instrument"
] | [((5398, 5430), 'qcodes.instrument.base.Instrument.find_instrument', 'Instrument.find_instrument', (['name'], {}), '(name)\n', (5424, 5430), False, 'from qcodes.instrument.base import Instrument\n'), ((14275, 14286), 'time.time', 'time.time', ([], {}), '()\n', (14284, 14286), False, 'import time\n'), ((17445, 17460), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (17455, 17460), False, 'import time\n'), ((22603, 22729), 'logging.error', 'logging.error', (['"""Pulsar.update_channel_settings() is deprecated with the multi-AWG support. Please update your code."""'], {}), "(\n 'Pulsar.update_channel_settings() is deprecated with the multi-AWG support. Please update your code.'\n )\n", (22616, 22729), False, 'import logging\n'), ((7356, 7367), 'time.time', 'time.time', ([], {}), '()\n', (7365, 7367), False, 'import time\n'), ((17594, 17605), 'time.time', 'time.time', ([], {}), '()\n', (17603, 17605), False, 'import time\n'), ((17088, 17106), 'numpy.array', 'np.array', (['wfname_l'], {}), '(wfname_l)\n', (17096, 17106), True, 'import numpy as np\n'), ((13195, 13206), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (13203, 13206), True, 'import numpy as np\n'), ((7513, 7524), 'time.time', 'time.time', ([], {}), '()\n', (7522, 7524), False, 'import time\n'), ((7726, 7741), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (7736, 7741), False, 'import time\n'), ((20796, 20821), 'numpy.vstack', 'np.vstack', (['(data1, data2)'], {}), '((data1, data2))\n', (20805, 20821), True, 'import numpy as np\n')] |
import argparse
import os
import torch
import torch.nn as nn
from model import VGG16
from vis_flux import vis_flux, vis_flux_v2, label2color
from datasets import FluxSegmentationDataset
from torch.autograd import Variable
import scipy.io as sio
from torch.utils.data import Dataset, DataLoader
import cv2
import glob
import numpy as np
DATASET = 'PascalContext'
TEST_VIS_DIR = './test_pred_flux/'
SNAPSHOT_DIR = './snapshots/'
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Super-BPD Network")
parser.add_argument("--dataset", type=str, default=DATASET,
help="Dataset for training.")
parser.add_argument("--test-vis-dir", type=str, default=TEST_VIS_DIR,
help="Directory for saving vis results during testing.")
parser.add_argument("--snapshot-dir", type=str, default=SNAPSHOT_DIR,
help="Where to save snapshots of the model.")
return parser.parse_args()
args = get_arguments()
def main():
if not os.path.exists(args.test_vis_dir + args.dataset):
os.makedirs(args.test_vis_dir + args.dataset)
model = VGG16()
model.load_state_dict(torch.load('PascalContext_400000.pth', map_location=torch.device('cpu')))
model.eval()
# model.cuda()
# dataloader = DataLoader(FluxSegmentationDataset(dataset=args.dataset, mode='test'), batch_size=1, shuffle=False, num_workers=4)
# for i_iter, batch_data in enumerate(dataloader):
image_dir = '..\\video frame\*'
image_files = sorted(glob.glob(image_dir))
IMAGE_MEAN = np.array([103.939, 116.779, 123.675], dtype=np.float32)
for image_path in image_files:
image_name = image_path.split('\\')[-1].split('.')[0]
print(image_path, image_name)
image = cv2.imread(image_path, 1)
vis_image = image.copy()
# print(vis_image.shape)
image = image.astype(np.float32)
image -= IMAGE_MEAN
image = image.transpose(2, 0, 1)
# Input_image, vis_image, gt_mask, gt_flux, weight_matrix, dataset_lendth, image_name = batch_data
# print(i_iter, dataset_lendth)
# pred_flux = model(Input_image.cuda())
Input_image = torch.from_numpy(image).unsqueeze(0)
with torch.no_grad() as f:
pred_flux = model(Input_image)
# print(pred_flux)
vis_flux_v2(vis_image, pred_flux, image_name, args.test_vis_dir)
# vis_flux(vis_image, pred_flux, gt_flux, gt_mask, image_name, args.test_vis_dir + args.dataset + '/')
# pred_flux = pred_flux.data.cpu().numpy()[0, ...]
pred_flux = pred_flux.numpy()[0, ...]
sio.savemat(args.test_vis_dir + args.dataset + '/' + image_name + '.mat', {'flux': pred_flux})
if __name__ == '__main__':
main()
| [
"os.makedirs",
"argparse.ArgumentParser",
"os.path.exists",
"scipy.io.savemat",
"vis_flux.vis_flux_v2",
"model.VGG16",
"cv2.imread",
"numpy.array",
"glob.glob",
"torch.device",
"torch.no_grad",
"torch.from_numpy"
] | [((597, 653), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Super-BPD Network"""'}), "(description='Super-BPD Network')\n", (620, 653), False, 'import argparse\n'), ((1286, 1293), 'model.VGG16', 'VGG16', ([], {}), '()\n', (1291, 1293), False, 'from model import VGG16\n'), ((1735, 1790), 'numpy.array', 'np.array', (['[103.939, 116.779, 123.675]'], {'dtype': 'np.float32'}), '([103.939, 116.779, 123.675], dtype=np.float32)\n', (1743, 1790), True, 'import numpy as np\n'), ((1166, 1214), 'os.path.exists', 'os.path.exists', (['(args.test_vis_dir + args.dataset)'], {}), '(args.test_vis_dir + args.dataset)\n', (1180, 1214), False, 'import os\n'), ((1225, 1270), 'os.makedirs', 'os.makedirs', (['(args.test_vis_dir + args.dataset)'], {}), '(args.test_vis_dir + args.dataset)\n', (1236, 1270), False, 'import os\n'), ((1695, 1715), 'glob.glob', 'glob.glob', (['image_dir'], {}), '(image_dir)\n', (1704, 1715), False, 'import glob\n'), ((1946, 1971), 'cv2.imread', 'cv2.imread', (['image_path', '(1)'], {}), '(image_path, 1)\n', (1956, 1971), False, 'import cv2\n'), ((2536, 2600), 'vis_flux.vis_flux_v2', 'vis_flux_v2', (['vis_image', 'pred_flux', 'image_name', 'args.test_vis_dir'], {}), '(vis_image, pred_flux, image_name, args.test_vis_dir)\n', (2547, 2600), False, 'from vis_flux import vis_flux, vis_flux_v2, label2color\n'), ((2831, 2930), 'scipy.io.savemat', 'sio.savemat', (["(args.test_vis_dir + args.dataset + '/' + image_name + '.mat')", "{'flux': pred_flux}"], {}), "(args.test_vis_dir + args.dataset + '/' + image_name + '.mat', {\n 'flux': pred_flux})\n", (2842, 2930), True, 'import scipy.io as sio\n'), ((2431, 2446), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2444, 2446), False, 'import torch\n'), ((1375, 1394), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1387, 1394), False, 'import torch\n'), ((2380, 2403), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (2396, 2403), False, 'import torch\n')] |
import os
import numpy as np
from PIL import Image, ImageFilter
import cv2
parent_path = './dataset' #menggunakan folder 'dataset' sebagai parent path
all_directories = os.listdir(parent_path+ '/')
#initialisasi in/out training & test
in_train = []
out_train = []
in_test = []
out_test = []
count = 0 #initialisasi count untuk loading image
def imageprepare(argv):
im = Image.open(argv).convert('L') #membuka file image dan mengkonversi image menjadi 8 bit hitam putih (mode 'L')
lebar = float(im.size[0]) #diambil value pertama pada vector image size (x)
tinggi = float(im.size[1]) #diambil value kedua pada vector image size (y)
newImage = Image.new('L', (32,32), (255)) #membuat image 8 bit hitam putih baru dengan size 32*32
if lebar > tinggi:
tinggi_baru = int(round((30/lebar*tinggi),0)) #membuat tinggi baru (nheight)
# preprocess
img = im.resize((30,tinggi_baru), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
offsety = int(round(((32 - tinggi_baru)/2),0)) #menghitung offset image agar centered
newImage.paste(img, (4, offsety)) #paste image pada canvas
else:
lebar_baru = int(round((30/tinggi*lebar),0)) #membuat lebar baru
# preprocess
img = im.resize((lebar_baru,30), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
offsetx = int(round(((32 - lebar_baru)/2),0))
newImage.paste(img, (offsetx, 4))
#newImage.save("test.png")
pix = list(newImage.getdata()) #membuat list dengan isi nilai pixel image
#normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.
pixs = [(255-x)/255 for x in pix] #melakukan normalisasi nilai pixel menjadi 0 untuk putih dan 1 untuk hitam
return pixs
def load_data():
global count
#melakukan scanning pada folder-folder yang tertulis
for i in ['BA', 'CA', 'DA', 'DHA', 'GA', 'HA', 'JA', 'KA', 'LA', 'MA', 'NA', 'NGA', 'NYA', 'PA', 'RA', 'SA', 'TA', 'THA', 'WA', 'YA']:
for z in all_directories:
output_vector = np.zeros((63,1))
path = parent_path + '/' + z
if z.startswith(str(i)) and os.path.isdir(path):
print(path)
total_files = len(os.listdir(path))
training_files = int(90/100 * total_files)
test_files = total_files - training_files
lim = 0
for file_name in os.listdir(path):
pixs = imageprepare(path + '/' + file_name)
input_image = np.array(pixs)
input_image = input_image.reshape(32,32,1) #mengubah array input image menjadi vector 32x32
if lim == 0: #menunjukkan image pertama setiap folder
cv2.imshow('abcd', input_image)
cv2.waitKey(1)
if lim < training_files:
in_train.append(input_image) #memasukkan nilai vector input image kedalam in_train
output_vector[count] = 1
out_train.append(output_vector) #memasukkan nilai output_vector pada out_train
elif lim < training_files + test_files:
in_test.append(input_image)
output_vector[count] = 1
out_test.append(output_vector)
else:
break
lim += 1
count += 1
return in_train, out_train, in_test, out_test
if __name__ == "__main__":
in_train, out_train, in_test, out_test = load_data()
np.save('in_train.npy', in_train)
np.save('out_train.npy', out_train)
np.save('in_test.npy', in_test)
np.save('out_test.npy', out_test)
| [
"PIL.Image.new",
"numpy.save",
"os.path.isdir",
"cv2.waitKey",
"numpy.zeros",
"PIL.Image.open",
"numpy.array",
"cv2.imshow",
"os.listdir"
] | [((170, 199), 'os.listdir', 'os.listdir', (["(parent_path + '/')"], {}), "(parent_path + '/')\n", (180, 199), False, 'import os\n'), ((666, 695), 'PIL.Image.new', 'Image.new', (['"""L"""', '(32, 32)', '(255)'], {}), "('L', (32, 32), 255)\n", (675, 695), False, 'from PIL import Image, ImageFilter\n'), ((3581, 3614), 'numpy.save', 'np.save', (['"""in_train.npy"""', 'in_train'], {}), "('in_train.npy', in_train)\n", (3588, 3614), True, 'import numpy as np\n'), ((3619, 3654), 'numpy.save', 'np.save', (['"""out_train.npy"""', 'out_train'], {}), "('out_train.npy', out_train)\n", (3626, 3654), True, 'import numpy as np\n'), ((3659, 3690), 'numpy.save', 'np.save', (['"""in_test.npy"""', 'in_test'], {}), "('in_test.npy', in_test)\n", (3666, 3690), True, 'import numpy as np\n'), ((3695, 3728), 'numpy.save', 'np.save', (['"""out_test.npy"""', 'out_test'], {}), "('out_test.npy', out_test)\n", (3702, 3728), True, 'import numpy as np\n'), ((382, 398), 'PIL.Image.open', 'Image.open', (['argv'], {}), '(argv)\n', (392, 398), False, 'from PIL import Image, ImageFilter\n'), ((2029, 2046), 'numpy.zeros', 'np.zeros', (['(63, 1)'], {}), '((63, 1))\n', (2037, 2046), True, 'import numpy as np\n'), ((2128, 2147), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2141, 2147), False, 'import os\n'), ((2405, 2421), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2415, 2421), False, 'import os\n'), ((2212, 2228), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2222, 2228), False, 'import os\n'), ((2521, 2535), 'numpy.array', 'np.array', (['pixs'], {}), '(pixs)\n', (2529, 2535), True, 'import numpy as np\n'), ((2746, 2777), 'cv2.imshow', 'cv2.imshow', (['"""abcd"""', 'input_image'], {}), "('abcd', input_image)\n", (2756, 2777), False, 'import cv2\n'), ((2802, 2816), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2813, 2816), False, 'import cv2\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 7 18:55:31 2019
@author: shreya
"""
import numpy as np
from scipy.io import loadmat
from scipy.stats import mode
from PCA import PCA
from LDA import LDA
data = int(input("Select 1.data.mat 2.pose.mat 3.illumination.mat: "))
opt = int(input("Select 1.KNN 2.KNN+PCA 3.KNN+LDA: "))
''' selecting the dataset'''
''' dividing into train and test'''
if data ==1:
x = loadmat('data.mat')
t = x['face'] #acess the images
t = np.matrix(t.reshape((504,600)))
c = 200 # number of classes
d = 504
ntrain = 400
ntest = 200
n = 2 # no.of train samples per class
x_train = np.matrix(np.zeros((d,ntrain)),dtype = complex)
x_test = np.matrix(np.zeros((d,ntest)),dtype= complex)
label_train = np.zeros((ntrain,1))
label_test = np.zeros((ntest,1))
for i in range(0,c):
count = 0
for j in range(0,3):
if j==0 or j==1:
x_train[:,2*i+count] = t[:,3*i+j]
label_train[2*i+count] = i
count = count+1
else:
x_test[:,i] = t[:,3*i+j]
label_test[i] = i
elif data==2:
x = loadmat('pose.mat')
t = x['pose']
d = 1920
c = 68
percent = 0.6 #percent of data for training
n = round(percent*13) # no.of train samples per class
ntrain = n*68
ntest = (13-n)*68
x_train = np.matrix(np.zeros((d,ntrain)),dtype = complex)
x_test = np.matrix(np.zeros((d,ntest)),dtype= complex)
label_train = np.zeros((ntrain,1))
label_test = np.zeros((ntest,1))
for i in range(0,c):
for j in range(0,n):
x_train[:,n*i+j] = np.reshape(t[:,:,j,i],(d,1))
label_train[n*i+j] = i
for j in range(0,(13-n)):
x_test[:,(13-n)*i+j] = np.reshape(t[:,:,n+j,i],(d,1))
label_test[(13-n)*i+j]= i
elif data==3:
x = loadmat('illumination.mat')
t = x['illum']
c = 68
d =1920
percent = 0.6
n = round(percent*21)
ntrain = n*68
ntest = (21-n)*68
x_train = np.matrix(np.zeros((d,ntrain)),dtype = complex)
x_test = np.matrix(np.zeros((d,ntest)),dtype= complex)
label_train = np.zeros((ntrain,1))
label_test = np.zeros((ntest,1))
for i in range(0,c):
for j in range(0,n):
x_train[:,n*i+j] = np.reshape(t[:,j,i],(d,1))
label_train[n*i+j] = i
for j in range(0,(21-n)):
x_test[:,(21-n)*i+j] = np.reshape(t[:,n+j,i],(d,1))
label_test[(21-n)*i+j]= i
''' dimensionality reduction or original data'''
if opt==1:
x_train,x_test = x_train,x_test
elif opt==2:
x_train,x_test = PCA(x_train,x_test)
d,_ = x_train.shape
elif opt ==3:
x_train,x_test = LDA(x_train,x_test,c,n)
d,_ = x_train.shape
k = 1 # k nearest neighbours
''' getting k nearest neighbours'''
solution = np.zeros((ntest,1))
for i in range(0,ntest):
knn = np.zeros((k,1))
knn_label = np.zeros((k,1))
knn[:] = np.inf
knn_label[:] = np.inf
knn[0] = np.linalg.norm(x_test[:,i]-x_train[:,0])
knn_label[0]=label_train[0]
for j in range(1,ntrain):
norm = np.linalg.norm(x_test[:,i]-x_train[:,j])
val_max = np.max(knn)
argmax = np.argmax(knn)
if norm<val_max:
knn[argmax] = norm
knn_label[argmax] = label_train[j]
solution[i],_ = mode(knn_label[:]) # choosing the label that is most frequent, majority voting
accuracy = 0.0
for z in range(0,ntest):
if solution[z]== label_test[z]:
accuracy = accuracy + 1
accuracy = accuracy / ntest;
print("The accuracy is: ")
print(accuracy) | [
"scipy.stats.mode",
"scipy.io.loadmat",
"numpy.argmax",
"numpy.zeros",
"LDA.LDA",
"PCA.PCA",
"numpy.max",
"numpy.linalg.norm",
"numpy.reshape"
] | [((2896, 2916), 'numpy.zeros', 'np.zeros', (['(ntest, 1)'], {}), '((ntest, 1))\n', (2904, 2916), True, 'import numpy as np\n'), ((441, 460), 'scipy.io.loadmat', 'loadmat', (['"""data.mat"""'], {}), "('data.mat')\n", (448, 460), False, 'from scipy.io import loadmat\n'), ((795, 816), 'numpy.zeros', 'np.zeros', (['(ntrain, 1)'], {}), '((ntrain, 1))\n', (803, 816), True, 'import numpy as np\n'), ((833, 853), 'numpy.zeros', 'np.zeros', (['(ntest, 1)'], {}), '((ntest, 1))\n', (841, 853), True, 'import numpy as np\n'), ((2952, 2968), 'numpy.zeros', 'np.zeros', (['(k, 1)'], {}), '((k, 1))\n', (2960, 2968), True, 'import numpy as np\n'), ((2984, 3000), 'numpy.zeros', 'np.zeros', (['(k, 1)'], {}), '((k, 1))\n', (2992, 3000), True, 'import numpy as np\n'), ((3069, 3113), 'numpy.linalg.norm', 'np.linalg.norm', (['(x_test[:, i] - x_train[:, 0])'], {}), '(x_test[:, i] - x_train[:, 0])\n', (3083, 3113), True, 'import numpy as np\n'), ((3418, 3436), 'scipy.stats.mode', 'mode', (['knn_label[:]'], {}), '(knn_label[:])\n', (3422, 3436), False, 'from scipy.stats import mode\n'), ((680, 701), 'numpy.zeros', 'np.zeros', (['(d, ntrain)'], {}), '((d, ntrain))\n', (688, 701), True, 'import numpy as np\n'), ((741, 761), 'numpy.zeros', 'np.zeros', (['(d, ntest)'], {}), '((d, ntest))\n', (749, 761), True, 'import numpy as np\n'), ((1196, 1215), 'scipy.io.loadmat', 'loadmat', (['"""pose.mat"""'], {}), "('pose.mat')\n", (1203, 1215), False, 'from scipy.io import loadmat\n'), ((1543, 1564), 'numpy.zeros', 'np.zeros', (['(ntrain, 1)'], {}), '((ntrain, 1))\n', (1551, 1564), True, 'import numpy as np\n'), ((1581, 1601), 'numpy.zeros', 'np.zeros', (['(ntest, 1)'], {}), '((ntest, 1))\n', (1589, 1601), True, 'import numpy as np\n'), ((2679, 2699), 'PCA.PCA', 'PCA', (['x_train', 'x_test'], {}), '(x_train, x_test)\n', (2682, 2699), False, 'from PCA import PCA\n'), ((3192, 3236), 'numpy.linalg.norm', 'np.linalg.norm', (['(x_test[:, i] - x_train[:, j])'], {}), '(x_test[:, i] - x_train[:, j])\n', (3206, 3236), True, 'import numpy as np\n'), ((3251, 3262), 'numpy.max', 'np.max', (['knn'], {}), '(knn)\n', (3257, 3262), True, 'import numpy as np\n'), ((3280, 3294), 'numpy.argmax', 'np.argmax', (['knn'], {}), '(knn)\n', (3289, 3294), True, 'import numpy as np\n'), ((1428, 1449), 'numpy.zeros', 'np.zeros', (['(d, ntrain)'], {}), '((d, ntrain))\n', (1436, 1449), True, 'import numpy as np\n'), ((1489, 1509), 'numpy.zeros', 'np.zeros', (['(d, ntest)'], {}), '((d, ntest))\n', (1497, 1509), True, 'import numpy as np\n'), ((1911, 1938), 'scipy.io.loadmat', 'loadmat', (['"""illumination.mat"""'], {}), "('illumination.mat')\n", (1918, 1938), False, 'from scipy.io import loadmat\n'), ((2205, 2226), 'numpy.zeros', 'np.zeros', (['(ntrain, 1)'], {}), '((ntrain, 1))\n', (2213, 2226), True, 'import numpy as np\n'), ((2243, 2263), 'numpy.zeros', 'np.zeros', (['(ntest, 1)'], {}), '((ntest, 1))\n', (2251, 2263), True, 'import numpy as np\n'), ((2758, 2784), 'LDA.LDA', 'LDA', (['x_train', 'x_test', 'c', 'n'], {}), '(x_train, x_test, c, n)\n', (2761, 2784), False, 'from LDA import LDA\n'), ((1686, 1719), 'numpy.reshape', 'np.reshape', (['t[:, :, j, i]', '(d, 1)'], {}), '(t[:, :, j, i], (d, 1))\n', (1696, 1719), True, 'import numpy as np\n'), ((1819, 1856), 'numpy.reshape', 'np.reshape', (['t[:, :, n + j, i]', '(d, 1)'], {}), '(t[:, :, n + j, i], (d, 1))\n', (1829, 1856), True, 'import numpy as np\n'), ((2090, 2111), 'numpy.zeros', 'np.zeros', (['(d, ntrain)'], {}), '((d, ntrain))\n', (2098, 2111), True, 'import numpy as np\n'), ((2151, 2171), 'numpy.zeros', 'np.zeros', (['(d, ntest)'], {}), '((d, ntest))\n', (2159, 2171), True, 'import numpy as np\n'), ((2348, 2378), 'numpy.reshape', 'np.reshape', (['t[:, j, i]', '(d, 1)'], {}), '(t[:, j, i], (d, 1))\n', (2358, 2378), True, 'import numpy as np\n'), ((2479, 2513), 'numpy.reshape', 'np.reshape', (['t[:, n + j, i]', '(d, 1)'], {}), '(t[:, n + j, i], (d, 1))\n', (2489, 2513), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import sys
import os
from types import SimpleNamespace
sys.path.append(os.getcwd())
import os.path as osp
import argparse
import json
import random
from collections import OrderedDict, defaultdict
import socket
import getpass
import torch
import numpy as np
from config import CLASS_NUM, IMAGE_DATA_ROOT, MODELS_TEST_STANDARD
from dataset.target_class_dataset import CIFAR10Dataset, CIFAR100Dataset, ImageNetDataset,TinyImageNetDataset
from dataset.dataset_loader_maker import DataLoaderMaker
from models.standard_model import StandardModel
from models.defensive_model import DefensiveModel
import glog as log
from torch.nn import functional as F
import boundary_attack.foolbox as foolbox
from boundary_attack.foolbox.attacks.boundary_attack import BoundaryAttack
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='')
parser.add_argument("--gpu", type=int, required=True)
parser.add_argument('--exp-dir', default='logs', type=str, help='directory to save results and logs')
parser.add_argument('--dataset', required=True, type=str, choices=['CIFAR-10', 'CIFAR-100', 'ImageNet', 'TinyImageNet'],
help='which dataset to use')
parser.add_argument('--phase', default='test', type=str, choices=['train', 'val', 'valv2', 'test'],
help='train, val, test')
parser.add_argument('--arch', default=None, type=str,
help='victim network architecture')
parser.add_argument('--all_archs', action="store_true")
parser.add_argument('--targeted', action="store_true")
parser.add_argument('--target_type', type=str, default='increment', choices=['random', 'least_likely', "increment"])
parser.add_argument('--norm', default='l2', type=str, choices=['l2'],
help='l2 attack or linf attack')
parser.add_argument('--attack-method', default='ba', choices=['ba', 'cw', 'bapp'],
help='attack method')
parser.add_argument('--save-all-steps', action='store_true',
help='save all intermediate adversarial images')
parser.add_argument('--seed', default=0, type=int,
help='random seed')
parser.add_argument('--ssh', action='store_true',
help='whether or not we are executing command via ssh. '
'If set to True, we will not print anything to screen and only redirect them to log file')
parser.add_argument('--json-config', type=str, default='./configures/boundary_attack.json',
help='a configures file to be passed in instead of arguments')
# bapp (a.k.a., hsja) parameters
parser.add_argument('--bapp-iteration', default=132, type=int,
help='boundary attack++: number of iterations')
parser.add_argument('--bapp-initial-num-eval', default=100, type=int,
help='boundary attack++: initial number of evaluations for gradient estimation')
parser.add_argument('--bapp-max-num-eval', default=10000, type=int,
help='boundary attack++: max number of evaluations for gradient estimation')
parser.add_argument('--bapp-stepsize-search', default='geometric_progression', type=str,
choices=['geometric_progression', 'grid_search'],
help='boundary attack++: step size search method')
parser.add_argument('--bapp-gamma', default=0.01, type=float,
help='boundary attack++: to decide binary search threshold')
parser.add_argument('--bapp-batch-size', default=256, type=int,
help='boundary attack++: batch size for model prediction')
parser.add_argument('--bapp-internal-dtype', default='float32', type=str,
help='boundary attack++: internal dtype. foolbox default value is float64')
# boundary attack parameters
parser.add_argument('--ba-iteration', default=1200, type=int,
help='boundary attack: number of iterations')
parser.add_argument('--ba-max-directions', default=25, type=int,
help='boundary attack: batch size')
parser.add_argument('--ba-spherical-step', default=1e-2, type=float,
help='boundary attack: spherical step size')
parser.add_argument('--ba-source-step', default=1e-2, type=float,
help='boundary attack: source step size')
parser.add_argument('--ba-step-adaptation', default=1.5, type=float,
help='boundary attack: step size adaptation multiplier')
parser.add_argument('--ba-batch-size', default=1, type=int,
help='boundary attack: batch size')
parser.add_argument('--ba-no-tune-batch-size', action='store_true',
help='boundary attack: disable automatic batch size tuning')
parser.add_argument('--ba-no-threaded', action='store_true',
help='boundary attack: do not use multi thread to generate candidate and random numbers')
parser.add_argument('--ba-internal-dtype', default='float32', type=str,
help='boundary attack: internal dtype. foolbox default value is float64')
# cw attack (white-box) parameters
parser.add_argument('--cw-binary-search-step', default=5, type=int,
help='cw attack: number of binary search steps of constant')
parser.add_argument('--cw-max-iteration', default=1000, type=int,
help='cw attack: maximum number of iterations')
parser.add_argument('--cw-confidence', default=0.0, type=float,
help='cw attack: confidence threshold')
parser.add_argument('--cw-learning-rate', default=0.005, type=float,
help='cw learning: initial learning rate')
parser.add_argument('--cw-initial-const', default=0.01, type=float,
help='cw attack: initial constant')
parser.add_argument('--attack_defense', action="store_true")
parser.add_argument('--defense_model', type=str, default=None)
parser.add_argument('--max_queries', type=int, default=10000)
parser.add_argument('--defense_norm', type=str, choices=["l2", "linf"], default='linf')
parser.add_argument('--defense_eps', type=str, default="")
parser.add_argument('--epsilon', type=float, help='the lp perturbation bound')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_image_of_target_class(dataset_name, target_labels, target_model):
images = []
for label in target_labels: # length of target_labels is 1
if dataset_name == "ImageNet":
dataset = ImageNetDataset(IMAGE_DATA_ROOT[dataset_name],label.item(), "validation")
elif dataset_name == "CIFAR-10":
dataset = CIFAR10Dataset(IMAGE_DATA_ROOT[dataset_name], label.item(), "validation")
elif dataset_name=="CIFAR-100":
dataset = CIFAR100Dataset(IMAGE_DATA_ROOT[dataset_name], label.item(), "validation")
elif dataset_name == "TinyImageNet":
dataset = TinyImageNetDataset(IMAGE_DATA_ROOT[dataset_name],label.item(), "validation")
index = np.random.randint(0, len(dataset))
image, true_label = dataset[index]
image = image.unsqueeze(0)
if dataset_name == "ImageNet" and target_model.input_size[-1] != 299:
image = F.interpolate(image,
size=(target_model.input_size[-2], target_model.input_size[-1]), mode='bilinear',
align_corners=False)
with torch.no_grad():
logits = target_model(image.cuda())
while logits.max(1)[1].item() != label.item():
index = np.random.randint(0, len(dataset))
image, true_label = dataset[index]
image = image.unsqueeze(0)
if dataset_name == "ImageNet" and target_model.input_size[-1] != 299:
image = F.interpolate(image,
size=(target_model.input_size[-2], target_model.input_size[-1]), mode='bilinear',
align_corners=False)
with torch.no_grad():
logits = target_model(image.cuda())
assert true_label == label.item()
images.append(torch.squeeze(image))
return torch.stack(images) # B,C,H,W
def main(args, result_dump_path):
# make model
log.info('Initializing model {} on {}'.format(args.arch, args.dataset))
if args.attack_defense:
model = DefensiveModel(args.dataset, args.arch, no_grad=True, defense_model=args.defense_model,
norm=args.defense_norm, eps=args.defense_eps)
else:
model = StandardModel(args.dataset, args.arch, no_grad=True, load_pretrained=True)
model.cuda()
model.eval()
fmodel = foolbox.models.PyTorchModel(model, bounds=(0, 1), num_classes=CLASS_NUM[args.dataset], device=str(args.device))
log.info('Foolbox model created')
distortion_all = defaultdict(OrderedDict) # key is image index, value is {query: distortion}
result_json = {"statistical_details":{}}
success_all = []
# make loader
loader = DataLoaderMaker.get_test_attacked_data(args.dataset, args.ba_batch_size)
# extract image_id_ref from args.compare_with
# args.compare_with should be the exp_dir of another experiment generated by policy_attack.py
# these four variables represent type of visited images, and we treat them as boolean tensors
# we use LongTensor instead of ByteTensor because we will do is_x.sum() later and ByteTensor will overflow
correct_all = []
is_ignore = torch.LongTensor(0)
is_image_type = OrderedDict([
('train_seen', torch.LongTensor(0)),
('train_unseen', torch.LongTensor(0)),
('val', torch.LongTensor(0)),
('test', torch.LongTensor(0))
])
# attack
for batch_index, (image, label) in enumerate(loader):
if args.dataset == "ImageNet" and model.input_size[-1] != 299:
image = F.interpolate(image,
size=(model.input_size[-2], model.input_size[-1]), mode='bilinear',
align_corners=False)
# extract inputs
assert image.dim() == 4
assert image.shape[0] == 1
image = image.numpy()[0]
# load init point and init query
init_adv_image = init_distance = None
if args.targeted:
target_label = torch.fmod(label + 1, CLASS_NUM[args.dataset])
init_adv_image = get_image_of_target_class(args.dataset, target_label, model).detach().cpu().numpy()[0]
true_label = label.clone()
label = label.item()
pred = int(np.argmax(fmodel.forward_one(image)))
# append 0, and we will modify them later
is_ignore = torch.cat((is_ignore, torch.LongTensor([0])))
# fill in is_correct, we will use is_correct to check is_ignore later
correct_all.append(int(pred == label))
# ignore image
if is_ignore[-1].item():
continue
if int(correct_all[-1]) == 0:
log.info("{}-th image is already incorrect classified, skip".format(batch_index))
continue
# start attack
log.info('Begin attacking {}-th image'.format(batch_index))
# initialize attack object and perform attack
if not args.targeted:
criterion = foolbox.criteria.Misclassification()
else:
criterion = foolbox.criteria.TargetClass((label + 1) % CLASS_NUM[args.dataset])
if args.attack_method == 'ba':
attack = BoundaryAttack(fmodel, criterion=criterion)
with torch.no_grad():
result = attack(input_or_adv=image,
label=label,
unpack=False,
iterations=args.ba_iteration,
max_directions=args.ba_max_directions,
max_queries=args.max_queries,
starting_point=init_adv_image,
initialization_attack=None, # foolbox default
log_every_n_steps=100,
spherical_step=args.ba_spherical_step,
source_step=args.ba_source_step,
step_adaptation=args.ba_step_adaptation,
batch_size=args.ba_batch_size,
tune_batch_size=not args.ba_no_tune_batch_size,
threaded_rnd=not args.ba_no_threaded,
threaded_gen=not args.ba_no_threaded,
alternative_generator=False, # foolbox default
internal_dtype=eval('np.{}'.format(args.ba_internal_dtype)),
save_all_steps=args.save_all_steps,
verbose=False)
elif args.attack_method == 'cw':
attack = foolbox.attacks.CarliniWagnerL2Attack(fmodel, criterion=criterion)
# cw attack does not required a starting point, since it starts from the clean image
result = attack(input_or_adv=image,
label=label,
unpack=False,
binary_search_steps=args.cw_binary_search_step,
max_iterations=args.cw_max_iteration,
confidence=args.cw_confidence,
learning_rate=args.cw_learning_rate,
initial_const=args.cw_initial_const,
save_all_steps=args.save_all_steps,
abort_early=True)
elif args.attack_method == 'bapp':
attack = foolbox.attacks.BoundaryAttackPlusPlus(fmodel, criterion=criterion)
with torch.no_grad():
result = attack(input_or_adv=image,
label=label,
unpack=False,
iterations=args.bapp_iteration,
initial_num_evals=args.bapp_initial_num_eval,
max_num_evals=args.bapp_max_num_eval,
stepsize_search=args.bapp_stepsize_search,
gamma=args.bapp_gamma,
starting_point=init_adv_image,
batch_size=args.bapp_batch_size,
internal_dtype=eval('np.{}'.format(args.bapp_internal_dtype)),
log_every_n_steps=1,
save_all_steps=args.save_all_steps,
verbose=False)
else:
raise NotImplementedError('Unknown attack_method: {}'.format(args.attack_method))
# attack current image done, print summary for current image
if result.distance.value <= 0:
log.info('Failed to attack {}-th image'.format(batch_index))
log.info('Attack {}-th image done'.format(batch_index))
log.info(' final query count: {}'.format(result._total_prediction_calls))
log.info(' final distance: {:.4g} ({})'.format(result.distance.value, result.distance.name()))
log.info(' final distance: {:.4f}'.format(np.sqrt(result.distance.value * image.size)))
log.info(' label: {}'.format(label))
log.info(' pred: {}'.format(pred))
log.info(' adv label: {}'.format(result.adversarial_class))
# save results
log.info('Final result for {}-th image: query: {:d}, dist: {:.4f}'.format(
batch_index, result._total_prediction_calls, np.sqrt(result.distance.value * image.size)))
if not hasattr(attack, "stats_distances"):
log.info("Blend random noise failed! skip this {}-th image".format(batch_index))
continue
with torch.no_grad():
adv_images = torch.from_numpy(result.perturbed)
if adv_images.dim() == 3:
adv_images = adv_images.unsqueeze(0)
adv_logit = model(adv_images.cuda())
adv_pred = adv_logit.argmax(dim=1)
if args.targeted:
not_done = 1 - adv_pred.eq(target_label.cuda()).float()
else:
not_done = adv_pred.eq(true_label.cuda()).float()
success = (1 - not_done.detach().cpu()) * bool(float(np.sqrt(result.distance.value * image.size)) < args.epsilon)
success_all.append(int(success[0].item()))
result = {
'original_class': int(result.original_class),
'adversarial_class': int(result.adversarial_class),
'final_distance': np.sqrt(result.distance.value * image.size).item(),
'final_query_count': int(result._total_prediction_calls)}
stats_distance = np.sqrt(attack.stats_distances * image.size)
stats_query_count = attack.stats_query_counts
for iteration, query_each_iteration in enumerate(stats_query_count):
distortion_all[batch_index][int(query_each_iteration)] = stats_distance[iteration].item()
result_json["statistical_details"][batch_index] = result
# print progress
# print_progress('Up to now:')
result_json["distortion"] = distortion_all
result_json["args"] = vars(args)
result_json["success_all"] = success_all,
result_json["correct_all"] = correct_all
# print finish information
log.info('Attack finished.')
with open(result_dump_path, "w") as result_file_obj:
json.dump(result_json, result_file_obj, sort_keys=True)
log.info("done, write stats info to {}".format(result_dump_path))
def set_log_file(fname, file_only=False):
# set log file
# simple tricks for duplicating logging destination in the logging module such as:
# logging.getLogger().addHandler(logging.FileHandler(filename))
# does NOT work well here, because python Traceback message (not via logging module) is not sent to the file,
# the following solution (copied from : https://stackoverflow.com/questions/616645) is a little bit
# complicated but simulates exactly the "tee" command in linux shell, and it redirects everything
if file_only:
# we only output messages to file, and stdout/stderr receives nothing.
# this feature is designed for executing the script via ssh:
# since ssh has a windowing kind of flow control, i.e., if the controller does not read data from a
# ssh channel and its buffer fills up, the execution machine will not be able to write anything into the
# channel and the process will be set to sleeping (S) status until someone reads all data from the channel.
# this is not desired since we do not want to read stdout/stderr from the controller machine.
# so, here we use a simple solution: disable output to stdout/stderr and only output messages to log file.
# please note in this case, we need to import glog/logging after calling set_log_file(*, file_only=True)
sys.stdout = sys.stderr = open(fname, 'w', buffering=1)
else:
# we output messages to both file and stdout/stderr
import subprocess
tee = subprocess.Popen(['tee', fname], stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
def print_args():
keys = sorted(vars(args).keys())
max_len = max([len(key) for key in keys])
for key in keys:
prefix = ' ' * (max_len + 1 - len(key)) + key
log.info('{:s}: {}'.format(prefix, args.__getattribute__(key)))
def get_exp_dir_name(dataset, norm, targeted, target_type, args):
target_str = "untargeted" if not targeted else "targeted_{}".format(target_type)
if args.attack_defense:
dirname = 'boundary_attack_on_defensive_model-{}-{}-{}'.format(dataset, norm, target_str)
else:
dirname = 'boundary_attack-{}-{}-{}'.format(dataset, norm, target_str)
return dirname
# foolbox库在foolbox.zip里,调用foolbox库的代码在foolbox_attacks.py里
if __name__ == '__main__':
# before going to the main function, we do following things:
# 1. setup output directory
# 2. make global variables: args, model (on cpu), loaders and device
# 1. setup output directory
args = parse_args()
if args.json_config:
# If a json file is given, use the JSON file as the base, and then update it with args
defaults = json.load(open(args.json_config))[args.dataset][args.norm]
arg_vars = vars(args)
arg_vars = {k: arg_vars[k] for k in arg_vars if arg_vars[k] is not None}
defaults.update(arg_vars)
args = SimpleNamespace(**defaults)
# if args.num_part > 1, then this experiment is just a part and we should use the same token for all parts
# to guarantee that, we use sha256sum of config in string format to generate unique token
args.exp_dir = osp.join(args.exp_dir, get_exp_dir_name(args.dataset, args.norm, args.targeted, args.target_type, args)) # 随机产生一个目录用于实验
os.makedirs(args.exp_dir, exist_ok=True)
if args.all_archs:
if args.attack_defense:
log_file_path = osp.join(args.exp_dir, 'run_defense_{}.log'.format(args.defense_model))
else:
log_file_path = osp.join(args.exp_dir, 'run.log')
elif args.arch is not None:
if args.attack_defense:
if args.defense_model == "adv_train_on_ImageNet":
log_file_path = osp.join(args.exp_dir,
"run_defense_{}_{}_{}_{}.log".format(args.arch, args.defense_model,
args.defense_norm,
args.defense_eps))
else:
log_file_path = osp.join(args.exp_dir, 'run_defense_{}_{}.log'.format(args.arch, args.defense_model))
else:
log_file_path = osp.join(args.exp_dir, 'run_{}.log'.format(args.arch))
set_log_file(log_file_path) # # set log file, and import glog after that (since we might change sys.stdout/stderr on set_log_file())
if args.attack_defense:
assert args.defense_model is not None
if args.targeted:
if args.dataset == "ImageNet":
args.max_queries = 20000
if args.attack_defense and args.defense_model == "adv_train_on_ImageNet":
args.max_queries = 20000
log.info('Foolbox package (version {}) imported from: {}'.format(foolbox.__version__, foolbox.__file__))
log.info('Host: {}, user: {}, CUDA_VISIBLE_DEVICES: {}, cwd: {}'.format(
socket.gethostname(), getpass.getuser(), os.environ.get('CUDA_VISIBLE_DEVICES', ''), os.getcwd()))
log.info('Command line is: {}'.format(' '.join(sys.argv)))
log.info("Log file is written in {}".format(log_file_path))
log.info('Called with args:')
print_args()
# 2. make global variables
# check device
device = torch.device('cuda')
args.device = str(device)
# set random seed before init model
os.environ['PYTHONHASHSEED'] = str(args.seed)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.all_archs:
archs = MODELS_TEST_STANDARD[args.dataset]
else:
assert args.arch is not None
archs = [args.arch]
for arch in archs:
args.arch = arch
if args.attack_defense:
if args.defense_model == "adv_train_on_ImageNet":
save_result_path = args.exp_dir + "/{}_{}_{}_{}_result.json".format(arch, args.defense_model,
args.defense_norm, args.defense_eps)
else:
save_result_path = args.exp_dir + "/{}_{}_result.json".format(arch, args.defense_model)
else:
save_result_path = args.exp_dir + "/{}_result.json".format(arch)
log.info("After attack finished, the result json file will be dumped to {}".format(save_result_path))
# do the business
main(args, save_result_path)
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"getpass.getuser",
"boundary_attack.foolbox.attacks.CarliniWagnerL2Attack",
"collections.defaultdict",
"sys.stderr.fileno",
"dataset.dataset_loader_maker.DataLoaderMaker.get_test_attacked_data",
"torch.device",
"boundary_attack.foolbox.criteria.TargetC... | [((95, 106), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (104, 106), False, 'import os\n'), ((865, 904), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (888, 904), False, 'import argparse\n'), ((8503, 8522), 'torch.stack', 'torch.stack', (['images'], {}), '(images)\n', (8514, 8522), False, 'import torch\n'), ((9134, 9167), 'glog.info', 'log.info', (['"""Foolbox model created"""'], {}), "('Foolbox model created')\n", (9142, 9167), True, 'import glog as log\n'), ((9189, 9213), 'collections.defaultdict', 'defaultdict', (['OrderedDict'], {}), '(OrderedDict)\n', (9200, 9213), False, 'from collections import OrderedDict, defaultdict\n'), ((9363, 9435), 'dataset.dataset_loader_maker.DataLoaderMaker.get_test_attacked_data', 'DataLoaderMaker.get_test_attacked_data', (['args.dataset', 'args.ba_batch_size'], {}), '(args.dataset, args.ba_batch_size)\n', (9401, 9435), False, 'from dataset.dataset_loader_maker import DataLoaderMaker\n'), ((9831, 9850), 'torch.LongTensor', 'torch.LongTensor', (['(0)'], {}), '(0)\n', (9847, 9850), False, 'import torch\n'), ((17879, 17907), 'glog.info', 'log.info', (['"""Attack finished."""'], {}), "('Attack finished.')\n", (17887, 17907), True, 'import glog as log\n'), ((21502, 21542), 'os.makedirs', 'os.makedirs', (['args.exp_dir'], {'exist_ok': '(True)'}), '(args.exp_dir, exist_ok=True)\n', (21513, 21542), False, 'import os\n'), ((23337, 23366), 'glog.info', 'log.info', (['"""Called with args:"""'], {}), "('Called with args:')\n", (23345, 23366), True, 'import glog as log\n'), ((23448, 23468), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (23460, 23468), False, 'import torch\n'), ((23788, 23810), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (23799, 23810), False, 'import random\n'), ((23815, 23840), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (23829, 23840), True, 'import numpy as np\n'), ((23845, 23873), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (23862, 23873), False, 'import torch\n'), ((23878, 23911), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (23900, 23911), False, 'import torch\n'), ((23916, 23953), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (23942, 23953), False, 'import torch\n'), ((23958, 23980), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (23969, 23980), False, 'import random\n'), ((23985, 24010), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (23999, 24010), True, 'import numpy as np\n'), ((24015, 24043), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (24032, 24043), False, 'import torch\n'), ((6555, 6566), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6563, 6566), False, 'import sys\n'), ((8705, 8843), 'models.defensive_model.DefensiveModel', 'DefensiveModel', (['args.dataset', 'args.arch'], {'no_grad': '(True)', 'defense_model': 'args.defense_model', 'norm': 'args.defense_norm', 'eps': 'args.defense_eps'}), '(args.dataset, args.arch, no_grad=True, defense_model=args.\n defense_model, norm=args.defense_norm, eps=args.defense_eps)\n', (8719, 8843), False, 'from models.defensive_model import DefensiveModel\n'), ((8896, 8970), 'models.standard_model.StandardModel', 'StandardModel', (['args.dataset', 'args.arch'], {'no_grad': '(True)', 'load_pretrained': '(True)'}), '(args.dataset, args.arch, no_grad=True, load_pretrained=True)\n', (8909, 8970), False, 'from models.standard_model import StandardModel\n'), ((17260, 17304), 'numpy.sqrt', 'np.sqrt', (['(attack.stats_distances * image.size)'], {}), '(attack.stats_distances * image.size)\n', (17267, 17304), True, 'import numpy as np\n'), ((17973, 18028), 'json.dump', 'json.dump', (['result_json', 'result_file_obj'], {'sort_keys': '(True)'}), '(result_json, result_file_obj, sort_keys=True)\n', (17982, 18028), False, 'import json\n'), ((19643, 19698), 'subprocess.Popen', 'subprocess.Popen', (["['tee', fname]"], {'stdin': 'subprocess.PIPE'}), "(['tee', fname], stdin=subprocess.PIPE)\n", (19659, 19698), False, 'import subprocess\n'), ((21125, 21152), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**defaults)\n', (21140, 21152), False, 'from types import SimpleNamespace\n'), ((7552, 7680), 'torch.nn.functional.interpolate', 'F.interpolate', (['image'], {'size': '(target_model.input_size[-2], target_model.input_size[-1])', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(image, size=(target_model.input_size[-2], target_model.\n input_size[-1]), mode='bilinear', align_corners=False)\n", (7565, 7680), True, 'from torch.nn import functional as F\n'), ((7759, 7774), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7772, 7774), False, 'import torch\n'), ((8470, 8490), 'torch.squeeze', 'torch.squeeze', (['image'], {}), '(image)\n', (8483, 8490), False, 'import torch\n'), ((10222, 10335), 'torch.nn.functional.interpolate', 'F.interpolate', (['image'], {'size': '(model.input_size[-2], model.input_size[-1])', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(image, size=(model.input_size[-2], model.input_size[-1]),\n mode='bilinear', align_corners=False)\n", (10235, 10335), True, 'from torch.nn import functional as F\n'), ((10667, 10713), 'torch.fmod', 'torch.fmod', (['(label + 1)', 'CLASS_NUM[args.dataset]'], {}), '(label + 1, CLASS_NUM[args.dataset])\n', (10677, 10713), False, 'import torch\n'), ((11625, 11661), 'boundary_attack.foolbox.criteria.Misclassification', 'foolbox.criteria.Misclassification', ([], {}), '()\n', (11659, 11661), True, 'import boundary_attack.foolbox as foolbox\n'), ((11700, 11767), 'boundary_attack.foolbox.criteria.TargetClass', 'foolbox.criteria.TargetClass', (['((label + 1) % CLASS_NUM[args.dataset])'], {}), '((label + 1) % CLASS_NUM[args.dataset])\n', (11728, 11767), True, 'import boundary_attack.foolbox as foolbox\n'), ((11828, 11871), 'boundary_attack.foolbox.attacks.boundary_attack.BoundaryAttack', 'BoundaryAttack', (['fmodel'], {'criterion': 'criterion'}), '(fmodel, criterion=criterion)\n', (11842, 11871), False, 'from boundary_attack.foolbox.attacks.boundary_attack import BoundaryAttack\n'), ((16286, 16301), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16299, 16301), False, 'import torch\n'), ((16328, 16362), 'torch.from_numpy', 'torch.from_numpy', (['result.perturbed'], {}), '(result.perturbed)\n', (16344, 16362), False, 'import torch\n'), ((19735, 19754), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (19752, 19754), False, 'import sys\n'), ((19792, 19811), 'sys.stderr.fileno', 'sys.stderr.fileno', ([], {}), '()\n', (19809, 19811), False, 'import sys\n'), ((21740, 21773), 'os.path.join', 'osp.join', (['args.exp_dir', '"""run.log"""'], {}), "(args.exp_dir, 'run.log')\n", (21748, 21773), True, 'import os.path as osp\n'), ((23106, 23126), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (23124, 23126), False, 'import socket\n'), ((23128, 23145), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (23143, 23145), False, 'import getpass\n'), ((23147, 23189), 'os.environ.get', 'os.environ.get', (['"""CUDA_VISIBLE_DEVICES"""', '""""""'], {}), "('CUDA_VISIBLE_DEVICES', '')\n", (23161, 23189), False, 'import os\n'), ((23191, 23202), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (23200, 23202), False, 'import os\n'), ((8126, 8254), 'torch.nn.functional.interpolate', 'F.interpolate', (['image'], {'size': '(target_model.input_size[-2], target_model.input_size[-1])', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(image, size=(target_model.input_size[-2], target_model.\n input_size[-1]), mode='bilinear', align_corners=False)\n", (8139, 8254), True, 'from torch.nn import functional as F\n'), ((8337, 8352), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8350, 8352), False, 'import torch\n'), ((9908, 9927), 'torch.LongTensor', 'torch.LongTensor', (['(0)'], {}), '(0)\n', (9924, 9927), False, 'import torch\n'), ((9955, 9974), 'torch.LongTensor', 'torch.LongTensor', (['(0)'], {}), '(0)\n', (9971, 9974), False, 'import torch\n'), ((9993, 10012), 'torch.LongTensor', 'torch.LongTensor', (['(0)'], {}), '(0)\n', (10009, 10012), False, 'import torch\n'), ((10032, 10051), 'torch.LongTensor', 'torch.LongTensor', (['(0)'], {}), '(0)\n', (10048, 10051), False, 'import torch\n'), ((11044, 11065), 'torch.LongTensor', 'torch.LongTensor', (['[0]'], {}), '([0])\n', (11060, 11065), False, 'import torch\n'), ((11889, 11904), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11902, 11904), False, 'import torch\n'), ((13283, 13349), 'boundary_attack.foolbox.attacks.CarliniWagnerL2Attack', 'foolbox.attacks.CarliniWagnerL2Attack', (['fmodel'], {'criterion': 'criterion'}), '(fmodel, criterion=criterion)\n', (13320, 13349), True, 'import boundary_attack.foolbox as foolbox\n'), ((15659, 15702), 'numpy.sqrt', 'np.sqrt', (['(result.distance.value * image.size)'], {}), '(result.distance.value * image.size)\n', (15666, 15702), True, 'import numpy as np\n'), ((16062, 16105), 'numpy.sqrt', 'np.sqrt', (['(result.distance.value * image.size)'], {}), '(result.distance.value * image.size)\n', (16069, 16105), True, 'import numpy as np\n'), ((14083, 14150), 'boundary_attack.foolbox.attacks.BoundaryAttackPlusPlus', 'foolbox.attacks.BoundaryAttackPlusPlus', (['fmodel'], {'criterion': 'criterion'}), '(fmodel, criterion=criterion)\n', (14121, 14150), True, 'import boundary_attack.foolbox as foolbox\n'), ((17107, 17150), 'numpy.sqrt', 'np.sqrt', (['(result.distance.value * image.size)'], {}), '(result.distance.value * image.size)\n', (17114, 17150), True, 'import numpy as np\n'), ((14168, 14183), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14181, 14183), False, 'import torch\n'), ((16802, 16845), 'numpy.sqrt', 'np.sqrt', (['(result.distance.value * image.size)'], {}), '(result.distance.value * image.size)\n', (16809, 16845), True, 'import numpy as np\n')] |
from typing import Any, Tuple
from Bridges.bridge import Bridge
from ROAR.utilities_module.data_structures_models import Vector3D, SensorsData, \
IMUData, DepthData, RGBData, Transform, Rotation, Location, ViveTrackerData, TrackingData
from ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle
import numpy as np
import cv2
from typing import Optional
from ROAR_Jetson.vive.models import ViveTrackerMessage
from ROAR_Jetson.jetson_vehicle import Vehicle as JetsonVehicle
from ROAR_Jetson.camera_d_t import RealsenseD435iAndT265
from typing import Union
class JetsonBridge(Bridge):
def convert_location_from_source_to_agent(self, source:np.ndarray) -> Location:
"""
Convert Location data from Jetson Vehicle to Agent's location data type.
Args:
source ():
Returns:
Location(x, y, z)
"""
return Location(x=source[0], y=source[1], z=source[2])
def convert_rotation_from_source_to_agent(self, source: np.ndarray) -> Rotation:
"""
Convert a Jetson raw rotation to Rotation(pitch=float,yaw=float,roll=float).
Args:
source ():
Returns:
Rotation(pitch, yaw, roll)
"""
return Rotation(roll=source[0], pitch=source[1], yaw=source[2])
def convert_transform_from_source_to_agent(self, source) -> Transform:
"""
Convert Jetson raw location and rotation to Transform(location,rotation).
Args:
source ():
Returns:
Transform(Location, Rotation)
"""
return Transform(
location=self.convert_location_from_source_to_agent(source=source.location),
rotation=self.convert_rotation_from_source_to_agent(source=source.rotation),
)
def convert_control_from_source_to_agent(self, source: JetsonVehicle) -> VehicleControl:
"""
Convert Jetson raw vehicle control to VehicleControl(throttle,steering).
Args:
source ():
Returns:
VehicleControl(Throttle, Steering)
"""
return VehicleControl(throttle=source.throttle, steering=source.steering)
def convert_rgb_from_source_to_agent(self, source) -> Optional[RGBData]:
"""
Convert Jetson raw Image to an Optional with RGB numpy array.
Args:
source ():
Returns:
RGBData
"""
if source is not None:
return RGBData(data=source)
return None
def convert_depth_from_source_to_agent(self, source) -> Optional[DepthData]:
"""
Convert Jetson raw Image to an Optional with Depth numpy array.
Args:
source ():
Returns:
DepthData
"""
if source is not None:
depth_data = DepthData(data=source / np.amax(source))
return depth_data
return None
def convert_vector3d_from_source_to_agent(self, source) -> Vector3D:
"""
Convert Jetson raw Vector3d Data to a Vector3D Object.
Args:
source ():
Returns:
Vector3D(x, y, z)
"""
return Vector3D(x=source.x, y=source.y, z=source.z)
def convert_imu_from_source_to_agent(self, source) -> IMUData:
"""
Convert Jetson raw IMUData to IMUData(accelerometer, gyroscope).
Args:
source ():
Returns:
IMUData(accelerometer, gyroscope)
"""
# TODO fill in data here
return IMUData(
accelerometer=Vector3D(x=0, y=0, z=0),
gyroscope=Vector3D(x=0, y=0, z=0),
)
def convert_sensor_data_from_source_to_agent(self, source) -> SensorsData:
"""
Returns Jetson Sensors Data from raw front RGB, rear RGB, front depth, and IMU Data.
Args:
source ():
Returns:
SensorData(front_RGB, rear_RGB, front_depth, IMU_Data)
"""
return SensorsData(
front_rgb=self.convert_rgb_from_source_to_agent(
source=source.get("front_rgb", None)
),
rear_rgb=self.convert_rgb_from_source_to_agent(
source=source.get("rear_rgb", None)
),
front_depth=self.convert_depth_from_source_to_agent(
source=source.get("front_depth", None)
),
imu_data=self.convert_imu_from_source_to_agent(
source=source.get("imu", None)
),
location=self.convert_location_from_source_to_agent(
source=source.get("location", None)),
rotation=self.convert_rotation_from_source_to_agent(
source=source.get("rotation", None)),
velocity=self.convert_location_from_source_to_agent(
source=source.get("velocity", None))
)
def convert_vive_tracker_data_from_source_to_agent(self, source: Optional[ViveTrackerMessage]) -> \
Optional[ViveTrackerData]:
"""
Converts raw Vive Tracker data to ViveTrackerData(Location, Rotation, Velocity).
Args:
source ():
Returns:
ViveTrackerData(Location, Rotation, Velocity)
"""
if source is not None:
vive_tracker_data = ViveTrackerData(
location=Location(
x=-source.x,
y=source.y,
z=-source.z
),
rotation=Rotation(
roll=-source.roll,
pitch=source.pitch - 90, # 不知道为什么有60度的误差
yaw=-source.yaw
),
velocity=Vector3D(
x=source.vel_x,
y=source.vel_y,
z=source.vel_z
)
)
return vive_tracker_data
return None
def convert_vehicle_from_source_to_agent(self, source: JetsonVehicle) -> Vehicle:
"""
Converts Transform, Velocity, Wheel_Base, and Control of JetsonVehicle.
Args:
source ():
Returns:
Vehicle(Transform, Velocity, Wheel_Base, Control)
"""
return Vehicle(wheel_base=0.26,
control=self.convert_control_from_source_to_agent(source=source)
)
def convert_control_from_agent_to_source(self, control: VehicleControl) -> Tuple:
"""
Converts control to Throttle and Steering numpy arrays bound between -1 and 1.
Args:
control ():
Returns:
Tuple
"""
return np.clip(control.throttle, a_min=-1, a_max=1), np.clip(control.steering, a_min=-1, a_max=1)
def convert_vector3d_from_agent_to_source(self, vector3d: Vector3D) -> Any:
"""
Convert Jetson Vector3D Object to Vector3D data.
Args:
vector3d ():
Returns:
Array
"""
return [vector3d.x, vector3d.y, vector3d.z]
| [
"ROAR.utilities_module.data_structures_models.Rotation",
"ROAR.utilities_module.data_structures_models.Location",
"numpy.clip",
"numpy.amax",
"ROAR.utilities_module.vehicle_models.VehicleControl",
"ROAR.utilities_module.data_structures_models.RGBData",
"ROAR.utilities_module.data_structures_models.Vecto... | [((892, 939), 'ROAR.utilities_module.data_structures_models.Location', 'Location', ([], {'x': 'source[0]', 'y': 'source[1]', 'z': 'source[2]'}), '(x=source[0], y=source[1], z=source[2])\n', (900, 939), False, 'from ROAR.utilities_module.data_structures_models import Vector3D, SensorsData, IMUData, DepthData, RGBData, Transform, Rotation, Location, ViveTrackerData, TrackingData\n'), ((1244, 1300), 'ROAR.utilities_module.data_structures_models.Rotation', 'Rotation', ([], {'roll': 'source[0]', 'pitch': 'source[1]', 'yaw': 'source[2]'}), '(roll=source[0], pitch=source[1], yaw=source[2])\n', (1252, 1300), False, 'from ROAR.utilities_module.data_structures_models import Vector3D, SensorsData, IMUData, DepthData, RGBData, Transform, Rotation, Location, ViveTrackerData, TrackingData\n'), ((2110, 2176), 'ROAR.utilities_module.vehicle_models.VehicleControl', 'VehicleControl', ([], {'throttle': 'source.throttle', 'steering': 'source.steering'}), '(throttle=source.throttle, steering=source.steering)\n', (2124, 2176), False, 'from ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle\n'), ((3178, 3222), 'ROAR.utilities_module.data_structures_models.Vector3D', 'Vector3D', ([], {'x': 'source.x', 'y': 'source.y', 'z': 'source.z'}), '(x=source.x, y=source.y, z=source.z)\n', (3186, 3222), False, 'from ROAR.utilities_module.data_structures_models import Vector3D, SensorsData, IMUData, DepthData, RGBData, Transform, Rotation, Location, ViveTrackerData, TrackingData\n'), ((2474, 2494), 'ROAR.utilities_module.data_structures_models.RGBData', 'RGBData', ([], {'data': 'source'}), '(data=source)\n', (2481, 2494), False, 'from ROAR.utilities_module.data_structures_models import Vector3D, SensorsData, IMUData, DepthData, RGBData, Transform, Rotation, Location, ViveTrackerData, TrackingData\n'), ((6636, 6680), 'numpy.clip', 'np.clip', (['control.throttle'], {'a_min': '(-1)', 'a_max': '(1)'}), '(control.throttle, a_min=-1, a_max=1)\n', (6643, 6680), True, 'import numpy as np\n'), ((6682, 6726), 'numpy.clip', 'np.clip', (['control.steering'], {'a_min': '(-1)', 'a_max': '(1)'}), '(control.steering, a_min=-1, a_max=1)\n', (6689, 6726), True, 'import numpy as np\n'), ((3572, 3595), 'ROAR.utilities_module.data_structures_models.Vector3D', 'Vector3D', ([], {'x': '(0)', 'y': '(0)', 'z': '(0)'}), '(x=0, y=0, z=0)\n', (3580, 3595), False, 'from ROAR.utilities_module.data_structures_models import Vector3D, SensorsData, IMUData, DepthData, RGBData, Transform, Rotation, Location, ViveTrackerData, TrackingData\n'), ((3619, 3642), 'ROAR.utilities_module.data_structures_models.Vector3D', 'Vector3D', ([], {'x': '(0)', 'y': '(0)', 'z': '(0)'}), '(x=0, y=0, z=0)\n', (3627, 3642), False, 'from ROAR.utilities_module.data_structures_models import Vector3D, SensorsData, IMUData, DepthData, RGBData, Transform, Rotation, Location, ViveTrackerData, TrackingData\n'), ((5355, 5401), 'ROAR.utilities_module.data_structures_models.Location', 'Location', ([], {'x': '(-source.x)', 'y': 'source.y', 'z': '(-source.z)'}), '(x=-source.x, y=source.y, z=-source.z)\n', (5363, 5401), False, 'from ROAR.utilities_module.data_structures_models import Vector3D, SensorsData, IMUData, DepthData, RGBData, Transform, Rotation, Location, ViveTrackerData, TrackingData\n'), ((5506, 5575), 'ROAR.utilities_module.data_structures_models.Rotation', 'Rotation', ([], {'roll': '(-source.roll)', 'pitch': '(source.pitch - 90)', 'yaw': '(-source.yaw)'}), '(roll=-source.roll, pitch=source.pitch - 90, yaw=-source.yaw)\n', (5514, 5575), False, 'from ROAR.utilities_module.data_structures_models import Vector3D, SensorsData, IMUData, DepthData, RGBData, Transform, Rotation, Location, ViveTrackerData, TrackingData\n'), ((5697, 5753), 'ROAR.utilities_module.data_structures_models.Vector3D', 'Vector3D', ([], {'x': 'source.vel_x', 'y': 'source.vel_y', 'z': 'source.vel_z'}), '(x=source.vel_x, y=source.vel_y, z=source.vel_z)\n', (5705, 5753), False, 'from ROAR.utilities_module.data_structures_models import Vector3D, SensorsData, IMUData, DepthData, RGBData, Transform, Rotation, Location, ViveTrackerData, TrackingData\n'), ((2850, 2865), 'numpy.amax', 'np.amax', (['source'], {}), '(source)\n', (2857, 2865), True, 'import numpy as np\n')] |
"""Tests for the attribute .X"""
import numpy as np
import pandas as pd
from scipy import sparse
import anndata as ad
from anndata import AnnData
from anndata.utils import asarray
import pytest
from anndata.tests.helpers import gen_adata, assert_equal
UNLABELLED_ARRAY_TYPES = [
pytest.param(sparse.csr_matrix, id="csr"),
pytest.param(sparse.csc_matrix, id="csc"),
pytest.param(asarray, id="ndarray"),
]
SINGULAR_SHAPES = [
pytest.param(shape, id=str(shape)) for shape in [(1, 10), (10, 1), (1, 1)]
]
@pytest.fixture(params=["h5ad", "zarr"])
def diskfmt(request):
return request.param
@pytest.mark.parametrize("shape", SINGULAR_SHAPES)
@pytest.mark.parametrize("orig_array_type", UNLABELLED_ARRAY_TYPES)
@pytest.mark.parametrize("new_array_type", UNLABELLED_ARRAY_TYPES)
def test_setter_singular_dim(shape, orig_array_type, new_array_type):
# https://github.com/theislab/anndata/issues/500
adata = gen_adata(shape, X_type=orig_array_type)
adata.X = new_array_type(np.ones(shape))
np.testing.assert_equal(asarray(adata.X), 1)
###############################
# Tests for `adata.X is None` #
###############################
def test_set_x_is_none():
# test setter and getter
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]), dict(o1=[1, 2], o2=[3, 4]))
adata.X = None
assert adata.X is None
def test_del_set_equiv_X():
"""Tests that `del adata.X` is equivalent to `adata.X = None`"""
# test setter and deleter
orig = gen_adata((10, 10))
copy = orig.copy()
del orig.X
copy.X = None
assert orig.X is None
assert_equal(orig, copy)
# Check that deleting again is still fine
del orig.X
assert orig.X is None
def test_init_X_as_none():
# test initialiser
shape = (3, 5)
adata = AnnData(None, uns=dict(test=np.array((3, 3))), shape=shape)
assert adata.X is None
assert adata.shape == shape
@pytest.mark.parametrize("shape", SINGULAR_SHAPES + [pytest.param((5, 3), id="(5, 3)")])
def test_transpose_with_X_as_none(shape):
adata = gen_adata(shape, X_type=lambda x: None)
adataT = adata.transpose()
assert_equal(adataT.shape, shape[::-1])
assert_equal(adataT.obsp.keys(), adata.varp.keys())
assert_equal(adataT.T, adata)
def test_copy():
adata = AnnData(
None,
obs=pd.DataFrame(index=[f"cell{i:03}" for i in range(100)]),
var=pd.DataFrame(index=[f"gene{i:03}" for i in range(200)]),
)
assert_equal(adata.copy(), adata)
def test_copy_view():
adata = AnnData(
None,
obs=pd.DataFrame(index=[f"cell{i:03}" for i in range(100)]),
var=pd.DataFrame(index=[f"gene{i:03}" for i in range(200)]),
)
v = adata[::-2, ::-2]
assert_equal(v.copy(), v)
############
# IO tests #
############
def test_io_missing_X(tmp_path, diskfmt):
file_pth = tmp_path / f"x_none_adata.{diskfmt}"
write = lambda obj, pth: getattr(obj, f"write_{diskfmt}")(pth)
read = lambda pth: getattr(ad, f"read_{diskfmt}")(pth)
adata = gen_adata((20, 30))
del adata.X
write(adata, file_pth)
from_disk = read(file_pth)
assert_equal(from_disk, adata)
| [
"pytest.fixture",
"anndata.tests.helpers.gen_adata",
"numpy.ones",
"pytest.param",
"anndata.tests.helpers.assert_equal",
"anndata.utils.asarray",
"numpy.array",
"pytest.mark.parametrize"
] | [((524, 563), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['h5ad', 'zarr']"}), "(params=['h5ad', 'zarr'])\n", (538, 563), False, 'import pytest\n'), ((614, 663), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', 'SINGULAR_SHAPES'], {}), "('shape', SINGULAR_SHAPES)\n", (637, 663), False, 'import pytest\n'), ((665, 731), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""orig_array_type"""', 'UNLABELLED_ARRAY_TYPES'], {}), "('orig_array_type', UNLABELLED_ARRAY_TYPES)\n", (688, 731), False, 'import pytest\n'), ((733, 798), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""new_array_type"""', 'UNLABELLED_ARRAY_TYPES'], {}), "('new_array_type', UNLABELLED_ARRAY_TYPES)\n", (756, 798), False, 'import pytest\n'), ((287, 328), 'pytest.param', 'pytest.param', (['sparse.csr_matrix'], {'id': '"""csr"""'}), "(sparse.csr_matrix, id='csr')\n", (299, 328), False, 'import pytest\n'), ((334, 375), 'pytest.param', 'pytest.param', (['sparse.csc_matrix'], {'id': '"""csc"""'}), "(sparse.csc_matrix, id='csc')\n", (346, 375), False, 'import pytest\n'), ((381, 416), 'pytest.param', 'pytest.param', (['asarray'], {'id': '"""ndarray"""'}), "(asarray, id='ndarray')\n", (393, 416), False, 'import pytest\n'), ((934, 974), 'anndata.tests.helpers.gen_adata', 'gen_adata', (['shape'], {'X_type': 'orig_array_type'}), '(shape, X_type=orig_array_type)\n', (943, 974), False, 'from anndata.tests.helpers import gen_adata, assert_equal\n'), ((1492, 1511), 'anndata.tests.helpers.gen_adata', 'gen_adata', (['(10, 10)'], {}), '((10, 10))\n', (1501, 1511), False, 'from anndata.tests.helpers import gen_adata, assert_equal\n'), ((1600, 1624), 'anndata.tests.helpers.assert_equal', 'assert_equal', (['orig', 'copy'], {}), '(orig, copy)\n', (1612, 1624), False, 'from anndata.tests.helpers import gen_adata, assert_equal\n'), ((2060, 2099), 'anndata.tests.helpers.gen_adata', 'gen_adata', (['shape'], {'X_type': '(lambda x: None)'}), '(shape, X_type=lambda x: None)\n', (2069, 2099), False, 'from anndata.tests.helpers import gen_adata, assert_equal\n'), ((2135, 2174), 'anndata.tests.helpers.assert_equal', 'assert_equal', (['adataT.shape', 'shape[::-1]'], {}), '(adataT.shape, shape[::-1])\n', (2147, 2174), False, 'from anndata.tests.helpers import gen_adata, assert_equal\n'), ((2235, 2264), 'anndata.tests.helpers.assert_equal', 'assert_equal', (['adataT.T', 'adata'], {}), '(adataT.T, adata)\n', (2247, 2264), False, 'from anndata.tests.helpers import gen_adata, assert_equal\n'), ((3036, 3055), 'anndata.tests.helpers.gen_adata', 'gen_adata', (['(20, 30)'], {}), '((20, 30))\n', (3045, 3055), False, 'from anndata.tests.helpers import gen_adata, assert_equal\n'), ((3136, 3166), 'anndata.tests.helpers.assert_equal', 'assert_equal', (['from_disk', 'adata'], {}), '(from_disk, adata)\n', (3148, 3166), False, 'from anndata.tests.helpers import gen_adata, assert_equal\n'), ((1004, 1018), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1011, 1018), True, 'import numpy as np\n'), ((1048, 1064), 'anndata.utils.asarray', 'asarray', (['adata.X'], {}), '(adata.X)\n', (1055, 1064), False, 'from anndata.utils import asarray\n'), ((1244, 1276), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (1252, 1276), True, 'import numpy as np\n'), ((1970, 2003), 'pytest.param', 'pytest.param', (['(5, 3)'], {'id': '"""(5, 3)"""'}), "((5, 3), id='(5, 3)')\n", (1982, 2003), False, 'import pytest\n'), ((1824, 1840), 'numpy.array', 'np.array', (['(3, 3)'], {}), '((3, 3))\n', (1832, 1840), True, 'import numpy as np\n')] |
from .query import QueryManager
import pandas as pd
import numpy as np
import os
import dateutil.parser
import datetime
import csv
import pytz
import tempfile
from functools import reduce
import sys
class StatusTypes:
open = 'open'
backlog = 'backlog'
accepted = 'committed'
complete = 'complete'
abandoned = 'abandoned'
class CycleTimeQueries(QueryManager):
"""Analysis for cycle time data, producing cumulative flow diagrams,
scatter plots and histograms.
Initialise with a `cycle`, a list of dicts representing the steps in
a cycle. Each dict describes that step with keys `name`, `type` (one of
"backlog", "accepted" or "complete" as per the `StatusTypes` enum) and
`statuses` (a list of equivalent JIRA workflow statuses that map onto
this step).
"""
settings = dict(
cycle=[ # flow steps, types, and mapped JIRA statuses
{
"name": 'todo',
"type": StatusTypes.backlog,
"statuses": ["Open", "To Do"],
},
{
"name": 'analysis',
"type": StatusTypes.accepted,
"statuses": ["Analysis"],
},
{
"name": 'analysis-done',
"type": StatusTypes.accepted,
"statuses": ["Analysis Done"],
},
{
"name": 'development',
"type": StatusTypes.accepted,
"statuses": ["In Progress"],
},
{
"name": 'done',
"type": StatusTypes.complete,
"statuses": ["Done", "Closed"],
},
]
)
def __init__(self, jira, **kwargs):
settings = super(CycleTimeQueries, self).settings.copy()
settings.update(self.settings.copy())
settings.update(kwargs)
settings['sized_statuses'] = [] # All columns/statuses other than "open" could be sized via Story Points
settings['none_sized_statuses'] = [] # Only columns/statuses that are of type "open"
for s in settings['cycle']:
# Make all states "sized"
#if (s['type'] == StatusTypes.open):
# settings['none_sized_statuses'].append(s['name'])
#else:
settings['sized_statuses'].append(s['name'])
settings['cycle_lookup'] = {}
for idx, cycle_step in enumerate(settings['cycle']):
for status in cycle_step['statuses']:
settings['cycle_lookup'][status.lower()] = dict(
index=idx,
name=cycle_step['name'],
type=cycle_step['type'],
)
super(CycleTimeQueries, self).__init__(jira, **settings)
def cycle_data(self, verbose=False, result_cycle=None, result_size=None, result_edges=None,changelog=True):
"""Get data from JIRA for cycle/flow times and story points size change.
Build a numerically indexed data frame with the following 'fixed'
columns: `key`, 'url', 'issue_type', `summary`, `status`, and
`resolution` from JIRA, as well as the value of any fields set in
the `fields` dict in `settings`. If `known_values` is set (a dict of
lists, with field names as keys and a list of known values for each
field as values) and a field in `fields` contains a list of values,
only the first value in the list of known values will be used.
If 'query_attribute' is set in `settings`, a column with this name
will be added, and populated with the `value` key, if any, from each
criteria block under `queries` in settings.
In addition, `cycle_time` will be set to the time delta between the
first `accepted`-type column and the first `complete` column, or None.
The remaining columns are the names of the items in the configured
cycle, in order.
Each cell contains the last date/time stamp when the relevant status
was set.
If an item moves backwards through the cycle, subsequent date/time
stamps in the cycle are erased.
"""
cycle_names = [s['name'] for s in self.settings['cycle']]
accepted_steps = set(s['name'] for s in self.settings['cycle'] if s['type'] == StatusTypes.accepted)
completed_steps = set(s['name'] for s in self.settings['cycle'] if s['type'] == StatusTypes.complete)
series = {
'key': {'data': [], 'dtype': str},
'url': {'data': [], 'dtype': str},
'issue_type': {'data': [], 'dtype': str},
'summary': {'data': [], 'dtype': str},
'status': {'data': [], 'dtype': str},
'resolution': {'data': [], 'dtype': str},
'cycle_time': {'data': [], 'dtype': 'timedelta64[ns]'},
'completed_timestamp': {'data': [], 'dtype': 'datetime64[ns]'},
'created_timestamp': {'data': [], 'dtype': 'datetime64[ns]'}
}
if sys.platform.startswith('win'):
buffer = open("cycledata.tmp", "w+",1)
# Opens a file for writing only in binary format. Overwrites the file if the file exists.
# buffering value is 1
# Windows users seem to have a problem with spooled file
else:
buffer = tempfile.SpooledTemporaryFile(max_size=50000, mode='w+t')
#issuelinks = open("issuelinks.csv", "w+", 1)
#df_edges = pd.DataFrame()
#df_edges = pd.DataFrame(columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType'])
#df_edges.to_csv(issuelinks, columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType'], header=True, index=None, sep='\t',encoding='utf-8')
df_size_history = pd.DataFrame( columns=['key','fromDate','toDate','size'])
df_size_history.to_csv(buffer, columns=['key', 'fromDate', 'toDate', 'size'], header=True, index=None, sep='\t',encoding='utf-8')
for cycle_name in cycle_names:
series[cycle_name] = {'data': [], 'dtype': 'datetime64[ns]'}
for name in self.fields.keys():
series[name] = {'data': [], 'dtype': 'object'}
if self.settings['query_attribute']:
series[self.settings['query_attribute']] = {'data': [], 'dtype': str}
for criteria in self.settings['queries']:
for issue in self.find_issues(criteria, order='updatedDate DESC', verbose=verbose, changelog=changelog):
# Deal with the differences in strings between Python 2 & 3
if (sys.version_info > (3, 0)):
# Python 3 code in this block
item = {
'key': issue.key,
'url': "%s/browse/%s" % (self.jira._options['server'], issue.key,),
'issue_type': issue.fields.issuetype.name,
'summary': issue.fields.summary, # .encode('utf-8'),
'status': issue.fields.status.name,
'resolution': issue.fields.resolution.name if issue.fields.resolution else None,
'cycle_time': None,
'completed_timestamp': None,
'created_timestamp': issue.fields.created[:19]
}
else:
# Python 2 code in this block
item = {
'key': issue.key,
'url': "%s/browse/%s" % (self.jira._options['server'], issue.key,),
'issue_type': issue.fields.issuetype.name,
'summary': issue.fields.summary.encode('utf-8'),
'status': issue.fields.status.name,
'resolution': issue.fields.resolution.name if issue.fields.resolution else None,
'cycle_time': None,
'completed_timestamp': None,
'created_timestamp': issue.fields.created[:19]
}
for name, field_name in self.fields.items():
item[name] = self.resolve_field_value(issue, name, field_name)
if self.settings['query_attribute']:
item[self.settings['query_attribute']] = criteria.get('value', None)
for cycle_name in cycle_names:
item[cycle_name] = None
# Get the relationships for this issue
edges = [] # Source, Target, Inward Link, Outward Link, Type
issuelinks = issue.fields.issuelinks
# It is seems that having an Epic Parent does not record an Epic Link, just the name "Epic Name"
# Creating Epic relationship requires more work. Also each Jira instance will have different customfields for Epic data
# Remove this code.
#issueEpic = issue.fields.customfield_10008 if issue.fields.customfield_10008 else None # Epic Link
#if issueEpic is not None:
# data = {'Source':issueEpic, 'Target':issue.key, 'InwardLink':'Belongs to Epic', 'OutwardLink':'Issue in Epic', 'LinkType':'EpicIssue'}
# edges.append(data)
for link in issuelinks:
inwardissue = None
outwardissue = None
try:
inwardissue = link.inwardIssue.key
except:
outwardissue = link.outwardIssue.key
if inwardissue is not None:
data = {'LinkID':link.id,'Source':inwardissue, 'Target':issue.key, 'InwardLink':link.type.inward, 'OutwardLink': link.type.outward, 'LinkType':link.type.name}
else:
data = {'LinkID':link.id,'Source':issue.key, 'Target': outwardissue, 'InwardLink':link.type.inward, 'OutwardLink':link.type.outward, 'LinkType':link.type.name}
edges.append(data)
if len(edges)>0:
try:
df_edges
except NameError:
#print('Not found')
df_edges = pd.DataFrame(edges)
else:
df_links = pd.DataFrame(edges)
df_edges=df_edges.append(df_links) # = pd.DataFrame(edges)
# Got all the relationships for this issue
rows = []
try:
for snapshot in self.iter_size_changes(issue):
data= {'key':snapshot.key,'fromDate':snapshot.date,'size':snapshot.size}
rows.append(data)
df = pd.DataFrame(rows)
# Create the toDate column
df_toDate=df['fromDate'].shift(-1)
df_toDate.loc[len(df_toDate)-1] = datetime.datetime.now(pytz.utc)
df['toDate'] = df_toDate
except:
df = pd.DataFrame(columns = ['key', 'fromDate', 'toDate', 'size'])
# Round Down datetimes to full dates
df['fromDate'] = df['fromDate'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day))
df['toDate'] = df['toDate'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day))
# If we only have one row of size changes and current issue has a size then it must have been created with a size value at creation.
# This size will not be recorded in the size_change record.
# Hence update the single row we have with the current issue size.
# Get Story Points size changes history
#If condition is met update the size cell
if getattr(item, 'StoryPoints', None) is not None and (df.shape[0]==1):
#if (item['StoryPoints'] is not None ) and (len(df)==1):
df.loc[df.index[0], 'size'] = item['StoryPoints']
# Append to csv file
df.to_csv(buffer, columns=['key', 'fromDate', 'toDate', 'size'], header=None,
mode='a', sep='\t', date_format='%Y-%m-%d',encoding='utf-8')
#print(rows)
# If the first column in item lifecycle was scipted put the created data in it.
if item[cycle_names[0]] is None:
item[cycle_names[0]] = dateutil.parser.parse(item['created_timestamp']) #item['created_timestamp']
# Figure out why the first Column does not have created date
#print(dateutil.parser.parse(item['created_timestamp']))
# Record date of status changes
for snapshot in self.iter_changes(issue, True):
snapshot_cycle_step = self.settings['cycle_lookup'].get(snapshot.status.lower(), None)
if snapshot_cycle_step is None:
if verbose:
print(issue.key, "transitioned to unknown JIRA status", snapshot.status)
continue
snapshot_cycle_step_name = snapshot_cycle_step['name']
# Keep the first time we entered a step
if item[snapshot_cycle_step_name] is None:
item[snapshot_cycle_step_name] = snapshot.date
# Wipe any subsequent dates, in case this was a move backwards
found_cycle_name = False
for cycle_name in cycle_names:
if not found_cycle_name and cycle_name == snapshot_cycle_step_name:
found_cycle_name = True
continue
elif found_cycle_name and item[cycle_name] is not None:
if verbose:
print(issue.key, "moved backwards to", snapshot_cycle_step_name, "wiping date for subsequent step", cycle_name)
item[cycle_name] = None
# Wipe timestamps if items have moved backwards; calculate cycle time
previous_timestamp = None
accepted_timestamp = None
completed_timestamp = None
for cycle_name in cycle_names:
if item[cycle_name] is not None:
previous_timestamp = item[cycle_name]
if accepted_timestamp is None and previous_timestamp is not None and cycle_name in accepted_steps:
accepted_timestamp = previous_timestamp
if completed_timestamp is None and previous_timestamp is not None and cycle_name in completed_steps:
completed_timestamp = previous_timestamp
if accepted_timestamp is not None and completed_timestamp is not None:
item['cycle_time'] = completed_timestamp - accepted_timestamp
item['completed_timestamp'] = completed_timestamp
for k, v in item.items():
series[k]['data'].append(v)
data = {}
for k, v in series.items():
data[k] = pd.Series(v['data'], dtype=v['dtype'])
result_cycle = pd.DataFrame(data,
columns=['key', 'url', 'issue_type', 'summary', 'status', 'resolution'] +
sorted(self.fields.keys()) +
([self.settings['query_attribute']] if self.settings['query_attribute'] else []) +
['cycle_time', 'completed_timestamp'] +
cycle_names
)
result_size = pd.DataFrame()
buffer.seek(0)
result_size = result_size.from_csv(buffer, sep='\t')
buffer.close()
try:
df_edges
except NameError:
# print('Not found')
df_edges = pd.DataFrame()
try:
df_edges = df_edges[['Source', 'OutwardLink', 'Target', 'InwardLink','LinkType','LinkID']] # Specify dataframe sort order
#df_edges.to_csv("myedges.csv", sep='\t', index=False,encoding='utf-8')
except KeyError:
print('Info: No issue edges found.')
result_edges=df_edges
# There maybe no result_size data is we might not have any change history
try:
result_size.set_index('key')
except KeyError:
result_size = pd.DataFrame(index= ['key'],columns = ['fromDate', 'toDate', 'size'])
result_size['toDate'] = pd.to_datetime(result_size['toDate'], format=('%Y-%m-%d'))
result_size['fromDate'] = pd.to_datetime(result_size['fromDate'], format=('%Y-%m-%d'))
return result_cycle, result_size, result_edges
def size_history(self,size_data):
"""Return the a DataFrame,
indexed by day, with columns containing story size for each issue.
In addition, columns are soted by Jira Issue key. First by Project and then by id number.
"""
def my_merge(df1, df2):
# http://stackoverflow.com/questions/34411495/pandas-merge-several-dataframes
res = pd.merge(df1, df2, how='outer', left_index=True, right_index=True)
cols = sorted(res.columns)
pairs = []
for col1, col2 in zip(cols[:-1], cols[1:]):
if col1.endswith('_x') and col2.endswith('_y'):
pairs.append((col1, col2))
for col1, col2 in pairs:
res[col1[:-2]] = res[col1].combine_first(res[col2])
res = res.drop([col1, col2], axis=1)
return res
dfs_key = []
# Group the dataframe by regiment, and for each regiment,
for name, group in size_data.groupby('key'):
dfs = []
for row in group.itertuples():
# print(row.Index, row.fromDate,row.toDate, row.size)
dates = pd.date_range(start=row.fromDate, end=row.toDate)
sizes = [row.size] * len(dates)
data = {'date': dates, 'size': sizes}
df2 = pd.DataFrame(data, columns=['date', 'size'])
pd.to_datetime(df2['date'], format=('%Y-%m-%d'))
df2.set_index(['date'], inplace=True)
dfs.append(df2)
# df_final = reduce(lambda left,right: pd.merge(left,right), dfs)
df_key = (reduce(my_merge, dfs))
df_key.columns = [name if x == 'size' else x for x in df_key.columns]
dfs_key.append(df_key)
df_all = (reduce(my_merge, dfs_key))
# Sort the columns based on Jira Project code and issue number
mykeys = df_all.columns.values.tolist()
mykeys.sort(key=lambda x: x.split('-')[0] + '-' + str(int(x.split('-')[1])).zfill(6))
df_all = df_all[mykeys]
# Reindex to make sure we have all dates
start, end = df_all.index.min(), df_all.index.max()
df_all = df_all.reindex(pd.date_range(start, end, freq='D'), method='ffill')
return df_all
def cfd(self, cycle_data,size_history= None, pointscolumn= None, stacked = True ):
"""Return the data to build a cumulative flow diagram: a DataFrame,
indexed by day, with columns containing cumulative counts for each
of the items in the configured cycle.
In addition, a column called `cycle_time` contains the approximate
average cycle time of that day based on the first "accepted" status
and the first "complete" status.
If stacked = True then return dataframe suitable for plotting as stacked area chart
else return for platting as non-staked or line chart.
"""
# Define helper function
def cumulativeColumnStates(df,stacked):
"""
Calculate the column sums, were the incoming matrix columns represents items in workflow states
States progress from left to right.
We what to zero out items, other than right most value to avoid counting items in prior states.
:param df:
:return: pandas dataframe row with sum of column items
"""
# Helper functions to return the right most cells in 2D array
def last_number(lst):
if all(map(lambda x: x == 0, lst)):
return 0
elif lst[-1] != 0:
return len(lst) - 1
else:
return last_number(lst[:-1])
def fill_others(lst):
new_lst = [0] * len(lst)
new_lst[last_number(lst)] = lst[last_number(lst)]
return new_lst
df_zeroed = df.fillna(value=0) # ,inplace = True Get rid of non numeric items. Make a ?deep? copy
if stacked:
df_result = df_zeroed.apply(lambda x: fill_others(x.values.tolist()), axis=1)
else:
df_result = df_zeroed
sum_row = df_result[df.columns].sum() # Sum Columns
return pd.DataFrame(data=sum_row).T # Transpose into row dataframe and return
# Helper function to return the right most cells in 2D array
def keeprightmoststate(df):
"""
Incoming matrix columns represents items in workflow states
States progress from left to right.
We what to zero out items, other than right most value.
:param df:
:return: pandas dataframe row with sum of column items
"""
def last_number(lst):
if all(map(lambda x: x == 0, lst)):
return 0
elif lst[-1] != 0:
return len(lst) - 1
else:
return last_number(lst[:-1])
def fill_others(lst):
new_lst = [0] * len(lst)
new_lst[last_number(lst)] = lst[last_number(lst)]
return new_lst
df_zeroed = df.fillna(value=0) # ,inplace = True Get rid of non numeric items. Make a ?deep? copy
df_result = df_zeroed.apply(lambda x: fill_others(x.values.tolist()), axis=1)
return df_result
# Define helper function
def hide_greater_than_date(cell, adate):
""" Helper function to compare date values in cells
"""
result = False
try:
celldatetime = datetime.date(cell.year, cell.month, cell.day)
except:
return True
if celldatetime > adate:
return True
return False # We have a date value in cell and it is less than or equal to input date
# Helper function
def appendDFToCSV(df, csvFilePath, sep="\t",date_format='%Y-%m-%d', encoding='utf-8'):
import os
if not os.path.isfile(csvFilePath):
df.to_csv(csvFilePath, mode='a', index=False, sep=sep, date_format=date_format, encoding=encoding)
elif len(df.columns) != len(pd.read_csv(csvFilePath, nrows=1, sep=sep).columns):
raise Exception(
"Columns do not match!! Dataframe has " + str(len(df.columns)) + " columns. CSV file has " + str(
len(pd.read_csv(csvFilePath, nrows=1, sep=sep).columns)) + " columns.")
elif not (df.columns == pd.read_csv(csvFilePath, nrows=1, sep=sep).columns).all():
raise Exception("Columns and column order of dataframe and csv file do not match!!")
else:
df.to_csv(csvFilePath, mode='a', index=False, sep=sep, header=False, date_format=date_format, encoding=encoding)
#print(pointscolumn)
# List of all state change columns that may have date value in them
cycle_names = [s['name'] for s in self.settings['cycle']]
# Create list of columns that we want to return in our results dataFrame
slice_columns = list(self.settings['none_sized_statuses']) # Make a COPY of the list so that we dont modify the reference.
if pointscolumn:
for size_state in self.settings['sized_statuses']: # states_to_size:
sizedStateName = size_state + 'Sized'
slice_columns.append(sizedStateName)
# Check that it works if we use all columns as sized.
slice_columns = []
for size_state in cycle_names:
sizedStateName = size_state + 'Sized'
slice_columns.append(sizedStateName)
else:
slice_columns = cycle_names
# Build a dataframe of just the "date" columns
df = cycle_data[cycle_names].copy()
# Strip out times from all dates
df = pd.DataFrame(
np.array(df.values, dtype='<M8[ns]').astype('<M8[D]').astype('<M8[ns]'),
columns=df.columns,
index=df.index
)
# No history provided this thus we return dataframe with just column headers.
if size_history is None:
return df
# Get a list of dates that a issue changed state
state_changes_on_dates_set = set()
for state in cycle_names:
state_changes_on_dates_set = state_changes_on_dates_set.union(set(df[state]))
# How many unique days did a issue stage state
# Remove non timestamp vlaues and sort the list
state_changes_on_dates = filter(lambda x: type(x.date()) == datetime.date,
sorted(list(state_changes_on_dates_set)))
# Replace missing NaT values (happens if a status is skipped) with the subsequent timestamp
df = df.fillna(method='bfill', axis=1)
if pointscolumn:
storypoints = cycle_data[pointscolumn] # As at today
ids = cycle_data['key']
# create blank results dataframe
df_results = pd.DataFrame()
# For each date on which we had a issue state change we want to count and sum the totals for each of the given states
# 'Open','Analysis','Backlog','In Process','Done','Withdrawn'
timenowstr = datetime.datetime.now().strftime('-run-%Y-%m-%d_%H-%M-%S')
for date_index,statechangedate in enumerate(state_changes_on_dates):
if date_index%10 == 0: # Print out Progress every tenth
pass #print("CFD state change {} of {} ".format(date_index,len(state_changes_on_dates)))
if type(statechangedate.date()) == datetime.date:
# filterdate.year,filterdate.month,filterdate.day
filterdate = datetime.date(statechangedate.year, statechangedate.month,
statechangedate.day) # statechangedate.datetime()
# Apply function to each cell and only make it visible if issue was in state on or after the filter date
df_filtered = df.applymap(lambda x: 0 if hide_greater_than_date(x, filterdate) else 1)
if stacked:
df_filtered=keeprightmoststate(df_filtered)
if pointscolumn and (size_history is not None):
# For debug
#if filterdate.isoformat() == '2016-11-22':
# size_history.loc[filterdate.isoformat()].to_csv("debug-size-history.csv")
storypoints_series_on = size_history.loc[filterdate.isoformat()].T
df_size_on_day = pd.Series.to_frame(storypoints_series_on)
df_size_on_day.columns = [pointscolumn]
# Make sure get size data in the same sequence as ids.
left = pd.Series.to_frame(ids)
right = df_size_on_day
result = left.join(right, on=['key']) # http://pandas.pydata.org/pandas-docs/stable/merging.html\
df_countable = pd.concat([result, df_filtered], axis=1)
# for debuging and analytics append the days state to file
df_countable['date'] = filterdate.isoformat()
if stacked:
file_name = "daily-cfd-stacked-run-at"+ timenowstr + ".csv"
else:
file_name = "daily-cfd-run-at" + timenowstr + ".csv"
appendDFToCSV(df_countable, file_name )
else:
df_countable = df_filtered
# Because we size issues with Story Points we need to add some additional columns
# for each state based on size not just count
if pointscolumn:
for size_state in self.settings['sized_statuses']: #states_to_size:
sizedStateName = size_state + 'Sized'
df_countable[sizedStateName] = df_countable.apply( lambda row: (row[pointscolumn] * row[size_state] ), axis=1)
# For debugging write dataframe to sheet for current day.
#file_name="countable-cfd-for-day-"+ filterdate.isoformat()+timenowstr+".csv"
#df_countable.to_csv(file_name, sep='\t', encoding='utf-8', quoting=csv.QUOTE_ALL)
df_slice = df_countable.loc[:,slice_columns].copy()
df_sub_sum = cumulativeColumnStates(df_slice,stacked)
final_table = df_sub_sum.rename(index={0: filterdate})
# append to results
df_results = df_results.append(final_table)
df_results.sort_index(inplace=True)
df= df_results
# Count number of times each date occurs, preserving column order
#df = pd.concat({col: df[col].value_counts() for col in df}, axis=1)[cycle_names]
# Fill missing dates with 0 and run a cumulative sum
#df = df.fillna(0).cumsum(axis=0)
# Reindex to make sure we have all dates
start, end = df.index.min(), df.index.max()
try: # If we have no change history we will not have any data in the df and will get a ValueError on reindex
df = df.reindex(pd.date_range(start, end, freq='D'), method='ffill')
except ValueError:
pass
return df
def histogram(self, cycle_data, bins=10):
"""Return histogram data for the cycle times in `cycle_data`. Returns
a dictionary with keys `bin_values` and `bin_edges` of numpy arrays
"""
values, edges = np.histogram(cycle_data['cycle_time'].astype('timedelta64[D]').dropna(), bins=bins)
index = []
for i, v in enumerate(edges):
if i == 0:
continue
index.append("%.01f to %.01f" % (edges[i - 1], edges[i],))
return pd.Series(values, name="Items", index=index)
def throughput_data(self, cycle_data, frequency='1D',pointscolumn= None):
"""Return a data frame with columns `completed_timestamp` of the
given frequency, either
`count`, where count is the number of items
'sum', where sum is the sum of value specified by pointscolumn. Expected to be 'StoryPoints'
completed at that timestamp (e.g. daily).
"""
if len(cycle_data)<1:
return None # Note completed items yet, return None
if pointscolumn:
return cycle_data[['completed_timestamp', pointscolumn]] \
.rename(columns={pointscolumn: 'sum'}) \
.groupby('completed_timestamp').sum() \
.resample(frequency).sum() \
.fillna(0)
else:
return cycle_data[['completed_timestamp', 'key']] \
.rename(columns={'key': 'count'}) \
.groupby('completed_timestamp').count() \
.resample(frequency).sum() \
.fillna(0)
def scatterplot(self, cycle_data):
"""Return scatterplot data for the cycle times in `cycle_data`. Returns
a data frame containing only those items in `cycle_data` where values
are set for `completed_timestamp` and `cycle_time`, and with those two
columns as the first two, both normalised to whole days, and with
`completed_timestamp` renamed to `completed_date`.
"""
columns = list(cycle_data.columns)
columns.remove('cycle_time')
columns.remove('completed_timestamp')
columns = ['completed_timestamp', 'cycle_time'] + columns
data = (
cycle_data[columns]
.dropna(subset=['cycle_time', 'completed_timestamp'])
.rename(columns={'completed_timestamp': 'completed_date'})
)
data['cycle_time'] = data['cycle_time'].astype('timedelta64[D]')
data['completed_date'] = data['completed_date'].map(pd.Timestamp.date)
return data
def percentiles(self, cycle_data, percentiles=(0.3, 0.5, 0.7, 0.85, 0.95,)):
"""Return percentiles for `cycle_time` in cycle data as a DataFrame
"""
return cycle_data['cycle_time'].dropna().quantile(percentiles)
@staticmethod
def burnup_monte_carlo(start_value, target_value, start_date, throughput_data, trials=100):
frequency = throughput_data.index.freq
if 'count' in throughput_data.columns:
data_column_name = 'count'
else:
data_column_name = 'sum'
# degenerate case - no steps, abort
if throughput_data[data_column_name].sum() <= 0:
return None
# guess how far away we are; drawing samples one at a time is slow
sample_buffer_size = int(2 * (target_value - start_value) / throughput_data[data_column_name].mean())
sample_buffer = dict(idx=0, buffer=None)
def get_sample():
if sample_buffer['buffer'] is None or sample_buffer['idx'] >= len(sample_buffer['buffer'].index):
sample_buffer['buffer'] = throughput_data[data_column_name].sample(sample_buffer_size, replace=True)
sample_buffer['idx'] = 0
sample_buffer['idx'] += 1
return sample_buffer['buffer'].iloc[sample_buffer['idx'] - 1]
series = {}
for t in range(trials):
current_date = start_date
current_value = start_value
dates = [current_date]
steps = [current_value]
while current_value < target_value:
current_date += frequency
sample_data = get_sample()
current_value += sample_data
dates.append(current_date)
steps.append(current_value)
series["Trial %d" % t] = pd.Series(steps, index=dates, name="Trial %d" % t)
return pd.DataFrame(series)
def burnup_forecast(self,
cfd_data,
throughput_data,
trials=100,
target=None,
backlog_column=None,
done_column=None,
percentiles=[0.5, 0.75, 0.85, 0.95],
sized = ''
):
try:
if len(cfd_data.index) == 0:
raise Exception("Cannot calculate burnup forecast with no data")
if len(throughput_data.index) == 0:
raise Exception("Cannot calculate burnup forecast with no completed items")
except AttributeError:
# Method does not exist. What now?
raise Exception("Cannot calculate burnup forecast with data or no completed items")
# Debug - what are the column names
#print("backlog_column --> {} done_column --> {}".format(backlog_column, done_column))
#print(cfd_data.info())
if backlog_column is None:
backlog_column = cfd_data.columns[0]
else:
backlog_column = backlog_column + sized
if done_column is None:
done_column = cfd_data.columns[-1]
else:
done_column = done_column + sized
if target is None:
target = cfd_data[backlog_column].max()
mc_trials = CycleTimeQueries.burnup_monte_carlo(
start_value=cfd_data[done_column].max(),
target_value=target,
start_date=cfd_data.index.max(),
throughput_data=throughput_data,
trials=trials
)
if mc_trials is not None:
for col in mc_trials:
mc_trials[col][mc_trials[col] > target] = target
# percentiles at finish line
finish_dates = mc_trials.apply(pd.Series.last_valid_index)
finish_date_percentiles = finish_dates.quantile(percentiles).dt.normalize()
#Convert burnup_forecast series into a dataframe with column headings so that can be saved to file with column headers
result = pd.DataFrame({'Percentile': finish_date_percentiles.index, 'Date': finish_date_percentiles.values})
else:
result = pd.DataFrame(columns=['Date','Percentile'])
return result
| [
"pandas.DataFrame",
"sys.platform.startswith",
"pandas.date_range",
"pandas.Series.to_frame",
"pandas.read_csv",
"pandas.merge",
"datetime.date",
"tempfile.SpooledTemporaryFile",
"datetime.datetime",
"os.path.isfile",
"pandas.to_datetime",
"pandas.Series",
"numpy.array",
"functools.reduce"... | [((5020, 5050), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (5043, 5050), False, 'import sys\n'), ((5781, 5840), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['key', 'fromDate', 'toDate', 'size']"}), "(columns=['key', 'fromDate', 'toDate', 'size'])\n", (5793, 5840), True, 'import pandas as pd\n'), ((15746, 15760), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15758, 15760), True, 'import pandas as pd\n'), ((16627, 16683), 'pandas.to_datetime', 'pd.to_datetime', (["result_size['toDate']"], {'format': '"""%Y-%m-%d"""'}), "(result_size['toDate'], format='%Y-%m-%d')\n", (16641, 16683), True, 'import pandas as pd\n'), ((16720, 16778), 'pandas.to_datetime', 'pd.to_datetime', (["result_size['fromDate']"], {'format': '"""%Y-%m-%d"""'}), "(result_size['fromDate'], format='%Y-%m-%d')\n", (16734, 16778), True, 'import pandas as pd\n'), ((18642, 18667), 'functools.reduce', 'reduce', (['my_merge', 'dfs_key'], {}), '(my_merge, dfs_key)\n', (18648, 18667), False, 'from functools import reduce\n'), ((25988, 26002), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (26000, 26002), True, 'import pandas as pd\n'), ((30770, 30814), 'pandas.Series', 'pd.Series', (['values'], {'name': '"""Items"""', 'index': 'index'}), "(values, name='Items', index=index)\n", (30779, 30814), True, 'import pandas as pd\n'), ((34723, 34743), 'pandas.DataFrame', 'pd.DataFrame', (['series'], {}), '(series)\n', (34735, 34743), True, 'import pandas as pd\n'), ((5344, 5401), 'tempfile.SpooledTemporaryFile', 'tempfile.SpooledTemporaryFile', ([], {'max_size': '(50000)', 'mode': '"""w+t"""'}), "(max_size=50000, mode='w+t')\n", (5373, 5401), False, 'import tempfile\n'), ((15293, 15331), 'pandas.Series', 'pd.Series', (["v['data']"], {'dtype': "v['dtype']"}), "(v['data'], dtype=v['dtype'])\n", (15302, 15331), True, 'import pandas as pd\n'), ((17238, 17304), 'pandas.merge', 'pd.merge', (['df1', 'df2'], {'how': '"""outer"""', 'left_index': '(True)', 'right_index': '(True)'}), "(df1, df2, how='outer', left_index=True, right_index=True)\n", (17246, 17304), True, 'import pandas as pd\n'), ((18484, 18505), 'functools.reduce', 'reduce', (['my_merge', 'dfs'], {}), '(my_merge, dfs)\n', (18490, 18505), False, 'from functools import reduce\n'), ((19057, 19092), 'pandas.date_range', 'pd.date_range', (['start', 'end'], {'freq': '"""D"""'}), "(start, end, freq='D')\n", (19070, 19092), True, 'import pandas as pd\n'), ((34656, 34706), 'pandas.Series', 'pd.Series', (['steps'], {'index': 'dates', 'name': "('Trial %d' % t)"}), "(steps, index=dates, name='Trial %d' % t)\n", (34665, 34706), True, 'import pandas as pd\n'), ((36739, 36842), 'pandas.DataFrame', 'pd.DataFrame', (["{'Percentile': finish_date_percentiles.index, 'Date':\n finish_date_percentiles.values}"], {}), "({'Percentile': finish_date_percentiles.index, 'Date':\n finish_date_percentiles.values})\n", (36751, 36842), True, 'import pandas as pd\n'), ((36874, 36918), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Date', 'Percentile']"}), "(columns=['Date', 'Percentile'])\n", (36886, 36918), True, 'import pandas as pd\n'), ((15985, 15999), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15997, 15999), True, 'import pandas as pd\n'), ((16524, 16591), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "['key']", 'columns': "['fromDate', 'toDate', 'size']"}), "(index=['key'], columns=['fromDate', 'toDate', 'size'])\n", (16536, 16591), True, 'import pandas as pd\n'), ((18014, 18063), 'pandas.date_range', 'pd.date_range', ([], {'start': 'row.fromDate', 'end': 'row.toDate'}), '(start=row.fromDate, end=row.toDate)\n', (18027, 18063), True, 'import pandas as pd\n'), ((18188, 18232), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['date', 'size']"}), "(data, columns=['date', 'size'])\n", (18200, 18232), True, 'import pandas as pd\n'), ((18249, 18295), 'pandas.to_datetime', 'pd.to_datetime', (["df2['date']"], {'format': '"""%Y-%m-%d"""'}), "(df2['date'], format='%Y-%m-%d')\n", (18263, 18295), True, 'import pandas as pd\n'), ((21129, 21155), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'sum_row'}), '(data=sum_row)\n', (21141, 21155), True, 'import pandas as pd\n'), ((22523, 22569), 'datetime.date', 'datetime.date', (['cell.year', 'cell.month', 'cell.day'], {}), '(cell.year, cell.month, cell.day)\n', (22536, 22569), False, 'import datetime\n'), ((22946, 22973), 'os.path.isfile', 'os.path.isfile', (['csvFilePath'], {}), '(csvFilePath)\n', (22960, 22973), False, 'import os\n'), ((26220, 26243), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (26241, 26243), False, 'import datetime\n'), ((26686, 26765), 'datetime.date', 'datetime.date', (['statechangedate.year', 'statechangedate.month', 'statechangedate.day'], {}), '(statechangedate.year, statechangedate.month, statechangedate.day)\n', (26699, 26765), False, 'import datetime\n'), ((30139, 30174), 'pandas.date_range', 'pd.date_range', (['start', 'end'], {'freq': '"""D"""'}), "(start, end, freq='D')\n", (30152, 30174), True, 'import pandas as pd\n'), ((10771, 10789), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (10783, 10789), True, 'import pandas as pd\n'), ((10946, 10977), 'datetime.datetime.now', 'datetime.datetime.now', (['pytz.utc'], {}), '(pytz.utc)\n', (10967, 10977), False, 'import datetime\n'), ((27542, 27583), 'pandas.Series.to_frame', 'pd.Series.to_frame', (['storypoints_series_on'], {}), '(storypoints_series_on)\n', (27560, 27583), True, 'import pandas as pd\n'), ((27747, 27770), 'pandas.Series.to_frame', 'pd.Series.to_frame', (['ids'], {}), '(ids)\n', (27765, 27770), True, 'import pandas as pd\n'), ((27968, 28008), 'pandas.concat', 'pd.concat', (['[result, df_filtered]'], {'axis': '(1)'}), '([result, df_filtered], axis=1)\n', (27977, 28008), True, 'import pandas as pd\n'), ((10328, 10347), 'pandas.DataFrame', 'pd.DataFrame', (['edges'], {}), '(edges)\n', (10340, 10347), True, 'import pandas as pd\n'), ((11072, 11131), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['key', 'fromDate', 'toDate', 'size']"}), "(columns=['key', 'fromDate', 'toDate', 'size'])\n", (11084, 11131), True, 'import pandas as pd\n'), ((11253, 11297), 'datetime.datetime', 'datetime.datetime', (['dt.year', 'dt.month', 'dt.day'], {}), '(dt.year, dt.month, dt.day)\n', (11270, 11297), False, 'import datetime\n'), ((11360, 11404), 'datetime.datetime', 'datetime.datetime', (['dt.year', 'dt.month', 'dt.day'], {}), '(dt.year, dt.month, dt.day)\n', (11377, 11404), False, 'import datetime\n'), ((10247, 10266), 'pandas.DataFrame', 'pd.DataFrame', (['edges'], {}), '(edges)\n', (10259, 10266), True, 'import pandas as pd\n'), ((23130, 23172), 'pandas.read_csv', 'pd.read_csv', (['csvFilePath'], {'nrows': '(1)', 'sep': 'sep'}), '(csvFilePath, nrows=1, sep=sep)\n', (23141, 23172), True, 'import pandas as pd\n'), ((24857, 24893), 'numpy.array', 'np.array', (['df.values'], {'dtype': '"""<M8[ns]"""'}), "(df.values, dtype='<M8[ns]')\n", (24865, 24893), True, 'import numpy as np\n'), ((23466, 23508), 'pandas.read_csv', 'pd.read_csv', (['csvFilePath'], {'nrows': '(1)', 'sep': 'sep'}), '(csvFilePath, nrows=1, sep=sep)\n', (23477, 23508), True, 'import pandas as pd\n'), ((23362, 23404), 'pandas.read_csv', 'pd.read_csv', (['csvFilePath'], {'nrows': '(1)', 'sep': 'sep'}), '(csvFilePath, nrows=1, sep=sep)\n', (23373, 23404), True, 'import pandas as pd\n')] |
from rlkit.samplers.rollout_functions import rollout
import rlkit.torch.pytorch_util as ptu
import argparse
import pickle
import uuid
import roboverse
import torch
import matplotlib
import matplotlib.pyplot as plt
filename = str(uuid.uuid4())
import numpy as np
from rlkit.torch.sac.policies import TanhGaussianPolicy, GaussianPolicy, MakeDeterministic
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.load_buffer import load_data_from_npy_chaining,load_data_from_npy_chaining_mult
from rlkit.samplers.data_collector import MdpPathCollector, \
CustomMDPPathCollector
from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic
from rlkit.torch.conv_networks import CNN, ConcatCNN, ConcatBottleneckCNN, TwoHeadCNN, VQVAEEncoderConcatCNN, \
ConcatBottleneckVQVAECNN, VQVAEEncoderCNN
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
from rlkit.util.video import VideoSaveFunction
from rlkit.launchers.launcher_util import setup_logger
import gym
import argparse, os
import roboverse
import numpy as np
import os
from os.path import expanduser
def simulate_policy(args):
env = roboverse.make(args.env, transpose_image=True)
action_dim = env.action_space.low.size
cnn_params=dict(
kernel_sizes=[3, 3, 3],
n_channels=[16, 16, 16],
strides=[1, 1, 1],
hidden_sizes=[1024, 512, 256],
paddings=[1, 1, 1],
pool_type='max2d',
pool_sizes=[2, 2, 1], # the one at the end means no pool
pool_strides=[2, 2, 1],
pool_paddings=[0, 0, 0],
image_augmentation=True,
image_augmentation_padding=4)
if args.deeper_net:
print('deeper conv net')
cnn_params.update(
kernel_sizes=[3, 3, 3, 3, 3],
n_channels=[32, 32, 32, 32, 32],
strides=[1, 1, 1, 1, 1],
paddings=[1, 1, 1, 1, 1],
pool_sizes=[2, 2, 1, 1, 1],
pool_strides=[2, 2, 1, 1, 1],
pool_paddings=[0, 0, 0, 0, 0]
)
cnn_params.update(
input_width=48,
input_height=48,
input_channels=9 if args.history else 3,
output_size=1,
added_fc_input_size=action_dim,
)
cnn_params.update(
output_size=256,
added_fc_input_size=args.statedim if args.imgstate else 0,
hidden_sizes=[1024, 512],
)
print(cnn_params)
if args.vqvae_enc:
policy_obs_processor = VQVAEEncoderCNN(**cnn_params, num_res=6 if args.deeper_net else 3)
else:
policy_obs_processor = CNN(**cnn_params)
policy_class = GaussianPolicy if args.gaussian_policy else TanhGaussianPolicy
policy = policy_class(
obs_dim=cnn_params['output_size'],
action_dim=action_dim,
hidden_sizes=[256, 256, 256],
obs_processor=policy_obs_processor,
)
parameters = torch.load(args.policy_path)
policy.load_state_dict(parameters['policy_state_dict'])
print("Policy loaded")
if args.enable_render:
# some environments need to be reconfigured for visualization
env.enable_render()
if args.gpu:
ptu.set_gpu_mode(True)
if hasattr(policy, "to"):
policy.to(ptu.device)
if hasattr(env, "vae"):
env.vae.to(ptu.device)
if args.deterministic:
policy = MakeDeterministic(policy)
if args.pause:
import ipdb; ipdb.set_trace()
policy.train(False)
from skimage import data, color
from skimage.transform import rescale, resize, downscale_local_mean
import torchvision.transforms.functional as F
from PIL import Image
import time
def plot_img(obs_img):
matplotlib.use('TkAgg')
plt.figure()
if type(obs_img) == torch.Tensor:
from torchvision import transforms
im_new = transforms.ToPILImage()(obs_img)
else:
im_new = obs_img
plt.imshow(im_new)
plt.show()
def plot_img_mult(obs_img, num=3, channels=3):
matplotlib.use('TkAgg')
plt.figure()
for i in range(num):
plt.subplot(1,num,i+1)
curr_img = obs_img[channels*i:channels*(i+1)]
if type(curr_img) == torch.Tensor:
from torchvision import transforms
im_new = transforms.ToPILImage()(curr_img)
else:
im_new = curr_img
plt.imshow(im_new)
plt.show()
paths = []
for i in range(args.N):
print('traj', i)
next_observations = []
observations = []
cropped_images = []
actions = []
rewards = []
dones = []
infos = []
observation = env.reset()
prev_context = ptu.zeros(6).float()
prev_imgs = [np.zeros_like(observation['image']) for _ in range(args.num_prev)]
if args.history:
delayed_imgs = []
for x in range(args.num_prev):
delayed_imgs.append(observation['image'])
action = np.concatenate((np.random.uniform(0, 0, (6,)),np.zeros((2,))))
observation, reward, done, info = env.step(action)
all_imgs = prev_imgs + delayed_imgs
for j in range(args.H):
print('trans', j)
obs = observation['image']
if args.history:
curr_imgs = all_imgs[:args.num_prev+1]
all_imgs.append(obs)
all_imgs.pop(0)
curr_imgs = [torch.from_numpy(x.reshape(3,48,48)) for x in curr_imgs]
obs_img = torch.cat(tuple(curr_imgs))
else:
obs_img = torch.from_numpy(obs).reshape(3,48,48)
if args.save_img:
if args.history:
plot_img_mult(obs_img,args.num_prev+1)
else:
plot_img(obs_img)
if args.debug:
action = np.random.rand(8)
else:
if args.scale:
if args.ptype == 1:
context = prev_context
elif args.ptype == 2:
context = ptu.from_numpy(np.random.uniform(-0.05, 0.05, (6,)))
else:
assert False
action = policy.forward(obs_img.flatten()[None],extra_fc_input=context if args.context else None)[0].squeeze().detach().cpu().numpy()
if args.ptype == 1:
prev_context = ptu.from_numpy(action[:6])*(1-1/1.1)
action = (1/1.1)* action
elif args.ptype == 2:
action = action - context #not right
else:
assert False
else:
action = policy.forward(obs_img.flatten()[None],extra_fc_input=torch.from_numpy(observation['state_observation'])[None].float() if args.context else None)[0].squeeze().detach().cpu().numpy()
print('action', action)
old_obs = observation
observation, reward, done, info = env.step(action)
observations.append(old_obs)
next_observations.append(observation)
cropped_images.append(obs_img)
actions.append(action)
rewards.append(reward)
dones.append(done)
infos.append(info)
paths.append(dict(observations=observations,next_observations=next_observations,cropped_images=cropped_images,actions=actions, rewards = rewards, dones=dones, infos=infos))
print('saved', args.out_path)
np.save(args.out_path, paths)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--env", type=str, default='Widow250DoubleDrawerOpenGraspNeutral-v0')
parser.add_argument('--N', type=int, default=10, help='Number of Trajectories')
parser.add_argument('--H', type=int, default=50, help='Max length of rollout')
parser.add_argument('--num_prev', type=int, default=2)
# parser.add_argument()
# parser.add_argument('--policy_path', type=str, default='/nfs/kun1/users/asap7772/cog/data/updatedbuffer-rebuttal-v1-drawer-minq2/updatedbuffer_rebuttal_v1_drawer_minq2_2021_08_22_10_39_45_0000--s-0/model_pkl/190.pt')
parser.add_argument('--policy_path', type=str, default='/nfs/kun1/users/asap7772/cog/data/shifted-relaunchedv2-brac-drawer-beta5/shifted_relaunchedv2_brac_drawer_beta5_2021_08_20_00_41_51_0000--s-0/model_pkl/620.pt')
parser.add_argument('--out_path', type=str, default='evaluation')
parser.add_argument('--env_type', type=str, default='evaluation')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--save_img', action='store_true')
parser.add_argument('--pause', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--scale', action='store_true')
parser.add_argument('--context', action='store_true')
parser.add_argument('--ptype', type=int, default=1)
parser.add_argument('--deterministic', action='store_true')
parser.add_argument('--gaussian_policy', action='store_true')
parser.add_argument('--hide', action='store_true')
parser.add_argument('--enable_render', action='store_true')
parser.add_argument('--log_diagnostics', action='store_true')
parser.add_argument('--smdim', action='store_true')
parser.add_argument('--vqvae_enc', action='store_true')
parser.add_argument('--deeper_net', action='store_true')
parser.add_argument('--imgstate', action='store_true')
parser.add_argument('--pickle', action='store_true')
parser.add_argument('--history', action='store_true')
parser.add_argument('--statedim', type=int, default=3)
args = parser.parse_args()
simulate_policy(args)
| [
"argparse.ArgumentParser",
"ipdb.set_trace",
"rlkit.torch.pytorch_util.from_numpy",
"matplotlib.pyplot.figure",
"rlkit.torch.conv_networks.CNN",
"rlkit.torch.sac.policies.MakeDeterministic",
"numpy.zeros_like",
"matplotlib.pyplot.imshow",
"torch.load",
"torchvision.transforms.ToPILImage",
"rlkit... | [((229, 241), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (239, 241), False, 'import uuid\n'), ((1138, 1184), 'roboverse.make', 'roboverse.make', (['args.env'], {'transpose_image': '(True)'}), '(args.env, transpose_image=True)\n', (1152, 1184), False, 'import roboverse\n'), ((2870, 2898), 'torch.load', 'torch.load', (['args.policy_path'], {}), '(args.policy_path)\n', (2880, 2898), False, 'import torch\n'), ((7697, 7722), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7720, 7722), False, 'import argparse, os\n'), ((2454, 2520), 'rlkit.torch.conv_networks.VQVAEEncoderCNN', 'VQVAEEncoderCNN', ([], {'num_res': '(6 if args.deeper_net else 3)'}), '(**cnn_params, num_res=6 if args.deeper_net else 3)\n', (2469, 2520), False, 'from rlkit.torch.conv_networks import CNN, ConcatCNN, ConcatBottleneckCNN, TwoHeadCNN, VQVAEEncoderConcatCNN, ConcatBottleneckVQVAECNN, VQVAEEncoderCNN\n'), ((2562, 2579), 'rlkit.torch.conv_networks.CNN', 'CNN', ([], {}), '(**cnn_params)\n', (2565, 2579), False, 'from rlkit.torch.conv_networks import CNN, ConcatCNN, ConcatBottleneckCNN, TwoHeadCNN, VQVAEEncoderConcatCNN, ConcatBottleneckVQVAECNN, VQVAEEncoderCNN\n'), ((3142, 3164), 'rlkit.torch.pytorch_util.set_gpu_mode', 'ptu.set_gpu_mode', (['(True)'], {}), '(True)\n', (3158, 3164), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((3329, 3354), 'rlkit.torch.sac.policies.MakeDeterministic', 'MakeDeterministic', (['policy'], {}), '(policy)\n', (3346, 3354), False, 'from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic\n'), ((3396, 3412), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (3410, 3412), False, 'import ipdb\n'), ((3678, 3701), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (3692, 3701), False, 'import matplotlib\n'), ((3710, 3722), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3720, 3722), True, 'import matplotlib.pyplot as plt\n'), ((3917, 3935), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_new'], {}), '(im_new)\n', (3927, 3935), True, 'import matplotlib.pyplot as plt\n'), ((3944, 3954), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3952, 3954), True, 'import matplotlib.pyplot as plt\n'), ((4019, 4042), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (4033, 4042), False, 'import matplotlib\n'), ((4051, 4063), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4061, 4063), True, 'import matplotlib.pyplot as plt\n'), ((4434, 4444), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4442, 4444), True, 'import matplotlib.pyplot as plt\n'), ((7625, 7654), 'numpy.save', 'np.save', (['args.out_path', 'paths'], {}), '(args.out_path, paths)\n', (7632, 7654), True, 'import numpy as np\n'), ((4105, 4131), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'num', '(i + 1)'], {}), '(1, num, i + 1)\n', (4116, 4131), True, 'import matplotlib.pyplot as plt\n'), ((4407, 4425), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_new'], {}), '(im_new)\n', (4417, 4425), True, 'import matplotlib.pyplot as plt\n'), ((4788, 4823), 'numpy.zeros_like', 'np.zeros_like', (["observation['image']"], {}), "(observation['image'])\n", (4801, 4823), True, 'import numpy as np\n'), ((3833, 3856), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (3854, 3856), False, 'from torchvision import transforms\n'), ((4745, 4757), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['(6)'], {}), '(6)\n', (4754, 4757), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5932, 5949), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (5946, 5949), True, 'import numpy as np\n'), ((4309, 4332), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (4330, 4332), False, 'from torchvision import transforms\n'), ((5054, 5083), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(0)', '(6,)'], {}), '(0, 0, (6,))\n', (5071, 5083), True, 'import numpy as np\n'), ((5084, 5098), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (5092, 5098), True, 'import numpy as np\n'), ((5657, 5678), 'torch.from_numpy', 'torch.from_numpy', (['obs'], {}), '(obs)\n', (5673, 5678), False, 'import torch\n'), ((6511, 6537), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['action[:6]'], {}), '(action[:6])\n', (6525, 6537), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6177, 6213), 'numpy.random.uniform', 'np.random.uniform', (['(-0.05)', '(0.05)', '(6,)'], {}), '(-0.05, 0.05, (6,))\n', (6194, 6213), True, 'import numpy as np\n'), ((6868, 6918), 'torch.from_numpy', 'torch.from_numpy', (["observation['state_observation']"], {}), "(observation['state_observation'])\n", (6884, 6918), False, 'import torch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import dataset_utils as du
from skmultilearn.adapt import MLkNN
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import os
import sys
MAX_NB_WORDS =20000
def tokenize_data(X):
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(X)
return tokenizer
def get_cado_predictions():
data_path = '../../datasets/cado/train.csv'
test_path = '../../datasets/cado/test.csv'
data = du.load_data(data_path)
test = du.load_data(test_path)
text_index = 6
label_start_index = 7
X = [d[text_index] for d in data]
labels = [d[label_start_index:label_start_index+12] for d in data ]
X_test = [d[text_index] for d in test]
labels_test = [d[label_start_index:label_start_index+12] for d in test]
Y = np.array(labels, dtype='int')
y_test = np.array(labels_test, dtype='int')
#Y = np.array(binary_labels, dtype='int')
test_index = len(X)
X = X + X_test
Y = np.vstack([Y , y_test])
tokenizer = tokenize_data(X)
word_index = tokenizer.word_index
sequences = tokenizer.texts_to_sequences(X)
X = pad_sequences(sequences, maxlen=700,
padding="post", truncating="post", value=0)
num_words = min(MAX_NB_WORDS, len(word_index)+1)
embedding_matrix = np.zeros((num_words, 1))
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_matrix[i] = 1
X_train = X[0:test_index , :]
Y_train = Y[0:test_index , :]
x_test = X[test_index:len(X), :]
y_test = Y[test_index:len(Y), :]
classifier = MLkNN()
classifier.fit(X_train,Y_train)
predictions = classifier.predict(x_test)
scores = classifier.predict_proba(x_test)
y_pred= predictions.toarray()
y_score= scores.toarray()
return y_pred, y_score
if __name__ == "__main__":
p, pr = get_cado_predictions()
| [
"skmultilearn.adapt.MLkNN",
"keras.preprocessing.sequence.pad_sequences",
"numpy.zeros",
"keras.preprocessing.text.Tokenizer",
"numpy.array",
"dataset_utils.load_data",
"numpy.vstack"
] | [((315, 348), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'MAX_NB_WORDS'}), '(num_words=MAX_NB_WORDS)\n', (324, 348), False, 'from keras.preprocessing.text import Tokenizer\n'), ((547, 570), 'dataset_utils.load_data', 'du.load_data', (['data_path'], {}), '(data_path)\n', (559, 570), True, 'import dataset_utils as du\n'), ((582, 605), 'dataset_utils.load_data', 'du.load_data', (['test_path'], {}), '(test_path)\n', (594, 605), True, 'import dataset_utils as du\n'), ((923, 952), 'numpy.array', 'np.array', (['labels'], {'dtype': '"""int"""'}), "(labels, dtype='int')\n", (931, 952), True, 'import numpy as np\n'), ((966, 1000), 'numpy.array', 'np.array', (['labels_test'], {'dtype': '"""int"""'}), "(labels_test, dtype='int')\n", (974, 1000), True, 'import numpy as np\n'), ((1108, 1130), 'numpy.vstack', 'np.vstack', (['[Y, y_test]'], {}), '([Y, y_test])\n', (1117, 1130), True, 'import numpy as np\n'), ((1274, 1359), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences'], {'maxlen': '(700)', 'padding': '"""post"""', 'truncating': '"""post"""', 'value': '(0)'}), "(sequences, maxlen=700, padding='post', truncating='post', value=0\n )\n", (1287, 1359), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((1458, 1482), 'numpy.zeros', 'np.zeros', (['(num_words, 1)'], {}), '((num_words, 1))\n', (1466, 1482), True, 'import numpy as np\n'), ((1779, 1786), 'skmultilearn.adapt.MLkNN', 'MLkNN', ([], {}), '()\n', (1784, 1786), False, 'from skmultilearn.adapt import MLkNN\n')] |
# -*- coding: utf-8 -*-
"""Fast test."""
import numpy
import ndd
a = [7, 3, 5, 8, 9, 1, 3, 3, 1, 0, 2, 5, 2, 11, 4, 23, 5, 0, 8, 0]
h = ndd.entropy(a, k=len(a))
# href = 2.623634344888532
# href = 2.623634344902917
href = 2.6192535776467056
absolute_error = numpy.abs(h - href)
relative_error = absolute_error / href
# smallest positive number in single precision
eps = numpy.finfo(numpy.float32).eps
try:
assert absolute_error < eps
except AssertionError:
raise AssertionError('estimate %r /= %r' % (h, href))
else:
print('%r. Abs. error is %r. Test ok!' % (h, absolute_error))
| [
"numpy.finfo",
"numpy.abs"
] | [((260, 279), 'numpy.abs', 'numpy.abs', (['(h - href)'], {}), '(h - href)\n', (269, 279), False, 'import numpy\n'), ((372, 398), 'numpy.finfo', 'numpy.finfo', (['numpy.float32'], {}), '(numpy.float32)\n', (383, 398), False, 'import numpy\n')] |
import numpy as np
import matplotlib
import glob
import re
from collections import Counter
from string import punctuation
# if you get the error: "TypeError: 'figure' is an unknown keyword argument"
# uncomment the line below:
# matplotlib.use('Qt4Agg')
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
except ImportError as e:
print(e)
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
exit()
def plot_with_labels(low_dim_embs, labels, filename='tsne_embeddings.png'):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
print("plots saved in {0}".format(filename))
vocabulary_size = 1000
def read_and_clean_data(path):
with open(path,"r") as o:
text = o.read()
punc_rem = re.sub(r'[^\w\s]', '', text)
#punc_rem = text.translate(None, punctuation)
lower_words = map(lambda x: x.lower(),punc_rem.split())
return lower_words
if __name__ == "__main__":
# Step 6: Visualize the embeddings.
corpus = []
count = []
words = []
folders = ["neg", "pos"]
for folder in folders:
for path in glob.glob(folder + "/*.txt"):
words += read_and_clean_data(path)
if len(set(words)) > vocabulary_size :
break
else:
continue
break
count = Counter(words)
unique_words = sorted(count.keys())
idxs = range(len(count.keys()))
data = "the first that is the first the and do not bad and not good to the".split()
reverse_dictionary = dict(zip(unique_words, idxs))
dictionary = dict(zip(idxs, unique_words))
# reverse_dictionary = np.load("Idx2Word.npy").item()
embeddings = np.load("CBOW_Embeddings.npy")
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(embeddings[:plot_only, :])
labels = [dictionary[i] for i in range(plot_only)]
plot_with_labels(low_dim_embs, labels)
plt.show() | [
"numpy.load",
"matplotlib.pyplot.show",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"glob.glob",
"collections.Counter",
"re.sub",
"matplotlib.pyplot.savefig"
] | [((665, 693), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 18)'}), '(figsize=(18, 18))\n', (675, 693), True, 'import matplotlib.pyplot as plt\n'), ((1023, 1044), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (1034, 1044), True, 'import matplotlib.pyplot as plt\n'), ((1797, 1811), 'collections.Counter', 'Counter', (['words'], {}), '(words)\n', (1804, 1811), False, 'from collections import Counter\n'), ((2153, 2183), 'numpy.load', 'np.load', (['"""CBOW_Embeddings.npy"""'], {}), "('CBOW_Embeddings.npy')\n", (2160, 2183), True, 'import numpy as np\n'), ((2195, 2271), 'sklearn.manifold.TSNE', 'TSNE', ([], {'perplexity': '(30)', 'n_components': '(2)', 'init': '"""pca"""', 'n_iter': '(5000)', 'method': '"""exact"""'}), "(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')\n", (2199, 2271), False, 'from sklearn.manifold import TSNE\n'), ((2459, 2469), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2467, 2469), True, 'import matplotlib.pyplot as plt\n'), ((788, 805), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (799, 805), True, 'import matplotlib.pyplot as plt\n'), ((814, 916), 'matplotlib.pyplot.annotate', 'plt.annotate', (['label'], {'xy': '(x, y)', 'xytext': '(5, 2)', 'textcoords': '"""offset points"""', 'ha': '"""right"""', 'va': '"""bottom"""'}), "(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',\n ha='right', va='bottom')\n", (826, 916), True, 'import matplotlib.pyplot as plt\n'), ((1224, 1253), 're.sub', 're.sub', (['"""[^\\\\w\\\\s]"""', '""""""', 'text'], {}), "('[^\\\\w\\\\s]', '', text)\n", (1230, 1253), False, 'import re\n'), ((1584, 1612), 'glob.glob', 'glob.glob', (["(folder + '/*.txt')"], {}), "(folder + '/*.txt')\n", (1593, 1612), False, 'import glob\n')] |
import argparse
import os
import numpy as np
import math
import itertools
import yaml
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from .datasets import WCDataset
from test_tube import Experiment
import torchvision.utils as vutils
import torch.nn as nn
import torch.nn.functional as F
import torch
cuda = True if torch.cuda.is_available() else False
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=1000, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=1024, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=32, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=64, help="dimensionality of the latent space")
parser.add_argument("--code_dim", type=int, default=2, help="latent code")
parser.add_argument("--n_classes", type=int, default=10, help="number of classes for dataset")
parser.add_argument("--img_size", type=int, default=64, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=10, help="interval between image sampling")
opt = parser.parse_args()
print(opt)
return opt
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
def to_categorical(y, num_columns):
"""Returns one-hot encoded Variable"""
y_cat = np.zeros((y.shape[0], num_columns))
y_cat[range(y.shape[0]), y.astype(int)] = 1.0
return Variable(FloatTensor(y_cat))
class Generator(nn.Module):
def __init__(self,opt):
super(Generator, self).__init__()
input_dim = opt['latent_dim'] + opt['n_classes'] + opt['code_dim']
self.init_size = opt['img_size'] // 4 # Initial size before upsampling
self.l1 = nn.Sequential(nn.Linear(input_dim, 128 * self.init_size ** 2))
self.conv_blocks = nn.Sequential(
nn.BatchNorm2d(128),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, opt['channels'], 3, stride=1, padding=1),
nn.Tanh(),
)
def forward(self, noise, labels, code):
gen_input = torch.cat((noise, labels, code), -1)
out = self.l1(gen_input)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img
class Discriminator(nn.Module):
def __init__(self,opt):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
"""Returns layers of each discriminator block"""
block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
if bn:
block.append(nn.BatchNorm2d(out_filters, 0.8))
return block
self.conv_blocks = nn.Sequential(
*discriminator_block(opt['channels'], 16, bn=False),
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
ds_size = opt['img_size'] // 2 ** 4
# Output layers
self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1))
self.aux_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, opt['n_classes']), nn.Softmax())
self.latent_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, opt['code_dim']))
def forward(self, img):
out = self.conv_blocks(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
label = self.aux_layer(out)
latent_code = self.latent_layer(out)
return validity, label, latent_code
if __name__ == '__main__':
opt = parse_args()
# Loss functions
adversarial_loss = torch.nn.MSELoss()
categorical_loss = torch.nn.CrossEntropyLoss()
continuous_loss = torch.nn.MSELoss()
# Loss weights
lambda_cat = 1
lambda_con = 0.1
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
categorical_loss.cuda()
continuous_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Configure data loader
log_save_dir = os.path.expanduser('~/Research/FMEphys/GAN_Logs')
exp_log = Experiment(name='InfoGan',
debug=False,
save_dir=log_save_dir,
autosave=True)
os.makedirs(os.path.join(log_save_dir,exp_log.name,f'version_{exp_log.version}','checkpoints'), exist_ok=True)
configfile = os.path.join(log_save_dir,exp_log.name,f'version_{exp_log.version}','config.yaml')
with open(configfile,'w') as file:
try:
yaml.dump(opt.__dict__, file)
except yaml.YAMLError as exc:
print(exc)
train_path = os.path.expanduser("~/Research/FMEphys/WC3d_Train_Data.csv")
val_path = os.path.expanduser("~/Research/FMEphys/WC3d_Val_Data.csv")
SetRange = transforms.Lambda(lambda X: 2 * X - 1.)
transform = transforms.Compose([transforms.Grayscale(num_output_channels=1),
transforms.Resize((64,64)),
transforms.ToTensor(),
SetRange])
dataset = WCDataset(root_dir = os.path.expanduser("~/Research/FMEphys/data/"),
csv_file = train_path,
transform=transform
)
dataloader = DataLoader(dataset,
batch_size= opt['batch_size'],
shuffle = True,
# drop_last=True,
num_workers=opt['n_cpu'],
persistent_workers=True,
pin_memory=True,
prefetch_factor=10)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt['lr'], betas=(opt['b1'], opt['b2']))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt['lr'], betas=(opt['b1'], opt['b2']))
optimizer_info = torch.optim.Adam(
itertools.chain(generator.parameters(), discriminator.parameters()), lr=opt['lr'], betas=(opt['b1'], opt['b2'])
)
# Static generator inputs for sampling
static_z = Variable(FloatTensor(np.zeros((opt['n_classes'] ** 2, opt['latent_dim']))))
static_label = to_categorical(
np.array([num for _ in range(opt['n_classes']) for num in range(opt['n_classes'])]), num_columns=opt['n_classes']
)
static_code = Variable(FloatTensor(np.zeros((opt['n_classes'] ** 2, opt['code_dim']))))
n_row=10 # N-rows for sampling images
def sample_image(n_row, batches_done):
"""Saves a grid of generated digits ranging from 0 to n_classes"""
# Static sample
z = Variable(FloatTensor(np.random.normal(0, 1, (n_row ** 2, opt['latent_dim']))))
static_sample = generator(z, static_label, static_code)
# save_image(static_sample.data, "images/static/%d.png" % batches_done, nrow=n_row, normalize=True)
exp_log.add_image('static',vutils.make_grid(static_sample.data,nrow=n_row, normalize=True), batches_done)
# Get varied c1 and c2
zeros = np.zeros((n_row ** 2, 1))
c_varied = np.repeat(np.linspace(-1, 1, n_row)[:, np.newaxis], n_row, 0)
c1 = Variable(FloatTensor(np.concatenate((c_varied, zeros), -1)))
c2 = Variable(FloatTensor(np.concatenate((zeros, c_varied), -1)))
sample1 = generator(static_z, static_label, c1)
sample2 = generator(static_z, static_label, c2)
# save_image(sample1.data, "images/varying_c1/%d.png" % batches_done, nrow=n_row, normalize=True)
# save_image(sample2.data, "images/varying_c2/%d.png" % batches_done, nrow=n_row, normalize=True)
exp_log.add_image('varying_c1',vutils.make_grid(sample1.data,nrow=n_row, normalize=True), batches_done)
exp_log.add_image('varying_c2',vutils.make_grid(sample2.data,nrow=n_row, normalize=True), batches_done)
# ----------
# Training
# ----------
sample_image(n_row=n_row, batches_done=0)
for epoch in range(1,opt['n_epochs']+1):
for i, (imgs,labels) in enumerate(dataloader):
batch_size = imgs.shape[0]
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, 1).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, 1).fill_(0.0), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(FloatTensor))
labels = to_categorical(labels.numpy(), num_columns=opt['n_classes'])
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise and labels as generator input
z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, opt['latent_dim']))))
label_input = to_categorical(np.random.randint(0, opt['n_classes'], batch_size), num_columns=opt['n_classes'])
code_input = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt['code_dim']))))
# Generate a batch of images
gen_imgs = generator(z, label_input, code_input)
# Loss measures generator's ability to fool the discriminator
validity, _, _ = discriminator(gen_imgs)
g_loss = adversarial_loss(validity, valid)
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Loss for real images
real_pred, _, _ = discriminator(real_imgs)
d_real_loss = adversarial_loss(real_pred, valid)
# Loss for fake images
fake_pred, _, _ = discriminator(gen_imgs.detach())
d_fake_loss = adversarial_loss(fake_pred, fake)
# Total discriminator loss
d_loss = (d_real_loss + d_fake_loss) / 2
d_loss.backward()
optimizer_D.step()
# ------------------
# Information Loss
# ------------------
optimizer_info.zero_grad()
# Sample labels
sampled_labels = np.random.randint(0, opt['n_classes'], batch_size)
# Ground truth labels
gt_labels = Variable(LongTensor(sampled_labels), requires_grad=False)
# Sample noise, labels and code as generator input
z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, opt['latent_dim']))))
label_input = to_categorical(sampled_labels, num_columns=opt['n_classes'])
code_input = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt['code_dim']))))
gen_imgs = generator(z, label_input, code_input)
_, pred_label, pred_code = discriminator(gen_imgs)
info_loss = lambda_cat * categorical_loss(pred_label, gt_labels) + lambda_con * continuous_loss(
pred_code, code_input
)
info_loss.backward()
optimizer_info.step()
# --------------
# Log Progress
# --------------
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [info loss: %f]"
% (epoch, opt['n_epochs'], i, len(dataloader), d_loss.item(), g_loss.item(), info_loss.item())
)
exp_log.log({'d_loss': d_loss.item(),'g_loss': g_loss.item(), 'info_loss': info_loss.item()})
# ------------
# Save Model
# ------------
sample_image(n_row=n_row, batches_done=epoch)
if epoch % opt['sample_interval'] == 0:
##### Save Model #####
savefile = os.path.join(log_save_dir,exp_log.name,f'version_{exp_log.version}','checkpoints','checkpoint_{:d}.pt'.format(epoch))
torch.save({'generator': generator.state_dict(),
'discriminator':discriminator.state_dict(),
'optimizer_G':optimizer_G.state_dict(),
'optimizer_D':optimizer_D.state_dict(),
'optimizer_info':optimizer_info.state_dict(),
'Epoch': epoch,
'Loss': {'d_loss': d_loss.item(),'g_loss': g_loss.item(), 'info_loss': info_loss.item()}},
savefile)
print('Saved New Model')
| [
"test_tube.Experiment",
"argparse.ArgumentParser",
"yaml.dump",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.Softmax",
"numpy.random.randint",
"numpy.random.normal",
"os.path.join",
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"torchvision.transforms.Lambda",
"torch.nn.Upsample",
... | [((466, 491), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (489, 491), False, 'import torch\n'), ((669, 694), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (692, 694), False, 'import argparse\n'), ((2362, 2397), 'numpy.zeros', 'np.zeros', (['(y.shape[0], num_columns)'], {}), '((y.shape[0], num_columns))\n', (2370, 2397), True, 'import numpy as np\n'), ((5089, 5107), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (5105, 5107), False, 'import torch\n'), ((5131, 5158), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (5156, 5158), False, 'import torch\n'), ((5181, 5199), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (5197, 5199), False, 'import torch\n'), ((5693, 5742), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Research/FMEphys/GAN_Logs"""'], {}), "('~/Research/FMEphys/GAN_Logs')\n", (5711, 5742), False, 'import os\n'), ((5757, 5834), 'test_tube.Experiment', 'Experiment', ([], {'name': '"""InfoGan"""', 'debug': '(False)', 'save_dir': 'log_save_dir', 'autosave': '(True)'}), "(name='InfoGan', debug=False, save_dir=log_save_dir, autosave=True)\n", (5767, 5834), False, 'from test_tube import Experiment\n'), ((6027, 6116), 'os.path.join', 'os.path.join', (['log_save_dir', 'exp_log.name', 'f"""version_{exp_log.version}"""', '"""config.yaml"""'], {}), "(log_save_dir, exp_log.name, f'version_{exp_log.version}',\n 'config.yaml')\n", (6039, 6116), False, 'import os\n'), ((6284, 6344), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Research/FMEphys/WC3d_Train_Data.csv"""'], {}), "('~/Research/FMEphys/WC3d_Train_Data.csv')\n", (6302, 6344), False, 'import os\n'), ((6360, 6418), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Research/FMEphys/WC3d_Val_Data.csv"""'], {}), "('~/Research/FMEphys/WC3d_Val_Data.csv')\n", (6378, 6418), False, 'import os\n'), ((6434, 6474), 'torchvision.transforms.Lambda', 'transforms.Lambda', (['(lambda X: 2 * X - 1.0)'], {}), '(lambda X: 2 * X - 1.0)\n', (6451, 6474), True, 'import torchvision.transforms as transforms\n'), ((6980, 7141), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': "opt['batch_size']", 'shuffle': '(True)', 'num_workers': "opt['n_cpu']", 'persistent_workers': '(True)', 'pin_memory': '(True)', 'prefetch_factor': '(10)'}), "(dataset, batch_size=opt['batch_size'], shuffle=True, num_workers\n =opt['n_cpu'], persistent_workers=True, pin_memory=True, prefetch_factor=10\n )\n", (6990, 7141), False, 'from torch.utils.data import DataLoader\n'), ((2071, 2118), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['m.weight.data', '(0.0)', '(0.02)'], {}), '(m.weight.data, 0.0, 0.02)\n', (2092, 2118), False, 'import torch\n'), ((3428, 3464), 'torch.cat', 'torch.cat', (['(noise, labels, code)', '(-1)'], {}), '((noise, labels, code), -1)\n', (3437, 3464), False, 'import torch\n'), ((5911, 6000), 'os.path.join', 'os.path.join', (['log_save_dir', 'exp_log.name', 'f"""version_{exp_log.version}"""', '"""checkpoints"""'], {}), "(log_save_dir, exp_log.name, f'version_{exp_log.version}',\n 'checkpoints')\n", (5923, 6000), False, 'import os\n'), ((8743, 8768), 'numpy.zeros', 'np.zeros', (['(n_row ** 2, 1)'], {}), '((n_row ** 2, 1))\n', (8751, 8768), True, 'import numpy as np\n'), ((2171, 2218), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['m.weight.data', '(1.0)', '(0.02)'], {}), '(m.weight.data, 1.0, 0.02)\n', (2192, 2218), False, 'import torch\n'), ((2227, 2268), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['m.bias.data', '(0.0)'], {}), '(m.bias.data, 0.0)\n', (2250, 2268), False, 'import torch\n'), ((2777, 2824), 'torch.nn.Linear', 'nn.Linear', (['input_dim', '(128 * self.init_size ** 2)'], {}), '(input_dim, 128 * self.init_size ** 2)\n', (2786, 2824), True, 'import torch.nn as nn\n'), ((2881, 2900), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (2895, 2900), True, 'import torch.nn as nn\n'), ((2914, 2941), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (2925, 2941), True, 'import torch.nn as nn\n'), ((2955, 2998), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 128, 3, stride=1, padding=1)\n', (2964, 2998), True, 'import torch.nn as nn\n'), ((3012, 3036), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)', '(0.8)'], {}), '(128, 0.8)\n', (3026, 3036), True, 'import torch.nn as nn\n'), ((3050, 3081), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3062, 3081), True, 'import torch.nn as nn\n'), ((3095, 3122), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (3106, 3122), True, 'import torch.nn as nn\n'), ((3136, 3178), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 64, 3, stride=1, padding=1)\n', (3145, 3178), True, 'import torch.nn as nn\n'), ((3192, 3215), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)', '(0.8)'], {}), '(64, 0.8)\n', (3206, 3215), True, 'import torch.nn as nn\n'), ((3229, 3260), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3241, 3260), True, 'import torch.nn as nn\n'), ((3274, 3328), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', "opt['channels']", '(3)'], {'stride': '(1)', 'padding': '(1)'}), "(64, opt['channels'], 3, stride=1, padding=1)\n", (3283, 3328), True, 'import torch.nn as nn\n'), ((3342, 3351), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3349, 3351), True, 'import torch.nn as nn\n'), ((4496, 4528), 'torch.nn.Linear', 'nn.Linear', (['(128 * ds_size ** 2)', '(1)'], {}), '(128 * ds_size ** 2, 1)\n', (4505, 4528), True, 'import torch.nn as nn\n'), ((4569, 4616), 'torch.nn.Linear', 'nn.Linear', (['(128 * ds_size ** 2)', "opt['n_classes']"], {}), "(128 * ds_size ** 2, opt['n_classes'])\n", (4578, 4616), True, 'import torch.nn as nn\n'), ((4618, 4630), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (4628, 4630), True, 'import torch.nn as nn\n'), ((4674, 4720), 'torch.nn.Linear', 'nn.Linear', (['(128 * ds_size ** 2)', "opt['code_dim']"], {}), "(128 * ds_size ** 2, opt['code_dim'])\n", (4683, 4720), True, 'import torch.nn as nn\n'), ((6175, 6204), 'yaml.dump', 'yaml.dump', (['opt.__dict__', 'file'], {}), '(opt.__dict__, file)\n', (6184, 6204), False, 'import yaml\n'), ((6510, 6553), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {'num_output_channels': '(1)'}), '(num_output_channels=1)\n', (6530, 6553), True, 'import torchvision.transforms as transforms\n'), ((6591, 6618), 'torchvision.transforms.Resize', 'transforms.Resize', (['(64, 64)'], {}), '((64, 64))\n', (6608, 6618), True, 'import torchvision.transforms as transforms\n'), ((6655, 6676), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6674, 6676), True, 'import torchvision.transforms as transforms\n'), ((6761, 6807), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Research/FMEphys/data/"""'], {}), "('~/Research/FMEphys/data/')\n", (6779, 6807), False, 'import os\n'), ((7821, 7873), 'numpy.zeros', 'np.zeros', (["(opt['n_classes'] ** 2, opt['latent_dim'])"], {}), "((opt['n_classes'] ** 2, opt['latent_dim']))\n", (7829, 7873), True, 'import numpy as np\n'), ((8078, 8128), 'numpy.zeros', 'np.zeros', (["(opt['n_classes'] ** 2, opt['code_dim'])"], {}), "((opt['n_classes'] ** 2, opt['code_dim']))\n", (8086, 8128), True, 'import numpy as np\n'), ((8616, 8680), 'torchvision.utils.make_grid', 'vutils.make_grid', (['static_sample.data'], {'nrow': 'n_row', 'normalize': '(True)'}), '(static_sample.data, nrow=n_row, normalize=True)\n', (8632, 8680), True, 'import torchvision.utils as vutils\n'), ((9361, 9419), 'torchvision.utils.make_grid', 'vutils.make_grid', (['sample1.data'], {'nrow': 'n_row', 'normalize': '(True)'}), '(sample1.data, nrow=n_row, normalize=True)\n', (9377, 9419), True, 'import torchvision.utils as vutils\n'), ((9473, 9531), 'torchvision.utils.make_grid', 'vutils.make_grid', (['sample2.data'], {'nrow': 'n_row', 'normalize': '(True)'}), '(sample2.data, nrow=n_row, normalize=True)\n', (9489, 9531), True, 'import torchvision.utils as vutils\n'), ((11840, 11890), 'numpy.random.randint', 'np.random.randint', (['(0)', "opt['n_classes']", 'batch_size'], {}), "(0, opt['n_classes'], batch_size)\n", (11857, 11890), True, 'import numpy as np\n'), ((3885, 3928), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_filters', 'out_filters', '(3)', '(2)', '(1)'], {}), '(in_filters, out_filters, 3, 2, 1)\n', (3894, 3928), True, 'import torch.nn as nn\n'), ((3930, 3961), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3942, 3961), True, 'import torch.nn as nn\n'), ((3963, 3981), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.25)'], {}), '(0.25)\n', (3975, 3981), True, 'import torch.nn as nn\n'), ((8350, 8405), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', "(n_row ** 2, opt['latent_dim'])"], {}), "(0, 1, (n_row ** 2, opt['latent_dim']))\n", (8366, 8405), True, 'import numpy as np\n'), ((8798, 8823), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'n_row'], {}), '(-1, 1, n_row)\n', (8809, 8823), True, 'import numpy as np\n'), ((8884, 8921), 'numpy.concatenate', 'np.concatenate', (['(c_varied, zeros)', '(-1)'], {}), '((c_varied, zeros), -1)\n', (8898, 8921), True, 'import numpy as np\n'), ((8958, 8995), 'numpy.concatenate', 'np.concatenate', (['(zeros, c_varied)', '(-1)'], {}), '((zeros, c_varied), -1)\n', (8972, 8995), True, 'import numpy as np\n'), ((10499, 10549), 'numpy.random.randint', 'np.random.randint', (['(0)', "opt['n_classes']", 'batch_size'], {}), "(0, opt['n_classes'], batch_size)\n", (10516, 10549), True, 'import numpy as np\n'), ((4031, 4063), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_filters', '(0.8)'], {}), '(out_filters, 0.8)\n', (4045, 4063), True, 'import torch.nn as nn\n'), ((10400, 10455), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', "(batch_size, opt['latent_dim'])"], {}), "(0, 1, (batch_size, opt['latent_dim']))\n", (10416, 10455), True, 'import numpy as np\n'), ((10627, 10682), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', "(batch_size, opt['code_dim'])"], {}), "(-1, 1, (batch_size, opt['code_dim']))\n", (10644, 10682), True, 'import numpy as np\n'), ((12109, 12164), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', "(batch_size, opt['latent_dim'])"], {}), "(0, 1, (batch_size, opt['latent_dim']))\n", (12125, 12164), True, 'import numpy as np\n'), ((12300, 12355), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', "(batch_size, opt['code_dim'])"], {}), "(-1, 1, (batch_size, opt['code_dim']))\n", (12317, 12355), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
MNIST CNN Demo (LeNet5)
"""
import sys
import os
import errno
import numpy as np
import time
import logging
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle_fl.mpc as pfl_mpc
from paddle_fl.mpc.data_utils.data_utils import get_datautils
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
mpc_protocol_name = 'aby3'
mpc_du = get_datautils(mpc_protocol_name)
role, server, port = sys.argv[1], sys.argv[2], sys.argv[3]
# modify host(localhost).
pfl_mpc.init(mpc_protocol_name, int(role), "localhost", server, int(port))
role = int(role)
# data preprocessing
BATCH_SIZE = 128
epoch_num = 1
x = pfl_mpc.data(name='x', shape=[BATCH_SIZE, 1, 28, 28], dtype='int64')
y = pfl_mpc.data(name='y', shape=[BATCH_SIZE, 10], dtype='int64')
fc_out = pfl_mpc.layers.fc(input=x, size=10)
cost, softmax = pfl_mpc.layers.softmax_with_cross_entropy(logits=fc_out,
label=y,
soft_label=True,
return_softmax=True)
infer_program = fluid.default_main_program().clone(for_test=False)
avg_loss = pfl_mpc.layers.mean(cost)
optimizer = pfl_mpc.optimizer.SGD(learning_rate=0.1)
optimizer.minimize(avg_loss)
# prepare train and test reader
mpc_data_dir = "./mpc_data/"
if not os.path.exists(mpc_data_dir):
raise ValueError("mpc_data_dir is not found. Please prepare encrypted data.")
# train_reader
feature_reader = mpc_du.load_shares(mpc_data_dir + "mnist10_feature", id=role, shape=(1, 28, 28))
label_reader = mpc_du.load_shares(mpc_data_dir + "mnist10_label", id=role, shape=(10,))
batch_feature = mpc_du.batch(feature_reader, BATCH_SIZE, drop_last=True)
batch_label = mpc_du.batch(label_reader, BATCH_SIZE, drop_last=True)
# test_reader
test_feature_reader = mpc_du.load_shares(mpc_data_dir + "mnist10_test_feature", id=role, shape=(1, 28, 28))
test_label_reader = mpc_du.load_shares(mpc_data_dir + "mnist10_test_label", id=role, shape=(10,))
test_batch_feature = mpc_du.batch(test_feature_reader, BATCH_SIZE, drop_last=True)
test_batch_label = mpc_du.batch(test_label_reader, BATCH_SIZE, drop_last=True)
place = fluid.CPUPlace()
# async data loader
loader = fluid.io.DataLoader.from_generator(feed_list=[x, y], capacity=BATCH_SIZE)
batch_sample = paddle.reader.compose(batch_feature, batch_label)
loader.set_batch_generator(batch_sample, places=place)
test_loader = fluid.io.DataLoader.from_generator(feed_list=[x, y], capacity=BATCH_SIZE)
test_batch_sample = paddle.reader.compose(test_batch_feature, test_batch_label)
test_loader.set_batch_generator(test_batch_sample, places=place)
# infer
def infer():
"""
MPC infer
"""
mpc_infer_data_dir = "./mpc_infer_data/"
if not os.path.exists(mpc_infer_data_dir):
try:
os.mkdir(mpc_infer_data_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
prediction_file = mpc_infer_data_dir + "mnist_debug_prediction"
prediction_file_part = prediction_file + ".part{}".format(role)
if os.path.exists(prediction_file_part):
os.remove(prediction_file_part)
step = 0
start_time = time.time()
for sample in test_loader():
step += 1
prediction = exe.run(program=infer_program, feed=sample, fetch_list=[softmax])
with open(prediction_file_part, 'ab') as f:
f.write(np.array(prediction).tostring())
if step % 10 == 0:
end_time = time.time()
logger.info('MPC infer of step={}, cost time in seconds:{}'.format(step, (end_time - start_time)))
end_time = time.time()
logger.info('MPC infer time in seconds:{}'.format((end_time - start_time)))
# train
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
mpc_model_basedir = "./mpc_model/"
logger.info('MPC training start...')
for epoch_id in range(epoch_num):
step = 0
epoch_start_time = time.time()
for sample in loader():
step += 1
step_start_time = time.time()
results = exe.run(feed=sample, fetch_list=[softmax])
step_end_time = time.time()
if step % 100 == 0:
logger.info('MPC training of epoch_id={} step={}, cost time in seconds:{}'
.format(epoch_id, step, (step_end_time - step_start_time)))
# For each epoch: infer or save infer program
#infer()
mpc_model_dir = mpc_model_basedir + "epoch{}/party{}".format(epoch_id, role)
fluid.io.save_inference_model(dirname=mpc_model_dir,
feeded_var_names=["x", "y"],
target_vars=[softmax],
executor=exe,
main_program=infer_program,
model_filename="__model__")
epoch_end_time = time.time()
logger.info('MPC training of epoch_id={} batch_size={}, cost time in seconds:{}'
.format(epoch_num, BATCH_SIZE, (epoch_end_time - epoch_start_time)))
# infer
infer()
| [
"paddle.fluid.io.save_inference_model",
"os.remove",
"os.mkdir",
"paddle_fl.mpc.layers.fc",
"paddle.fluid.Executor",
"paddle_fl.mpc.data",
"os.path.exists",
"paddle.reader.compose",
"paddle_fl.mpc.layers.softmax_with_cross_entropy",
"paddle_fl.mpc.data_utils.data_utils.get_datautils",
"paddle.fl... | [((917, 988), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(format='%(asctime)s - %(levelname)s - %(message)s')\n", (936, 988), False, 'import logging\n'), ((998, 1024), 'logging.getLogger', 'logging.getLogger', (['"""fluid"""'], {}), "('fluid')\n", (1015, 1024), False, 'import logging\n'), ((1092, 1124), 'paddle_fl.mpc.data_utils.data_utils.get_datautils', 'get_datautils', (['mpc_protocol_name'], {}), '(mpc_protocol_name)\n', (1105, 1124), False, 'from paddle_fl.mpc.data_utils.data_utils import get_datautils\n'), ((1361, 1429), 'paddle_fl.mpc.data', 'pfl_mpc.data', ([], {'name': '"""x"""', 'shape': '[BATCH_SIZE, 1, 28, 28]', 'dtype': '"""int64"""'}), "(name='x', shape=[BATCH_SIZE, 1, 28, 28], dtype='int64')\n", (1373, 1429), True, 'import paddle_fl.mpc as pfl_mpc\n'), ((1434, 1495), 'paddle_fl.mpc.data', 'pfl_mpc.data', ([], {'name': '"""y"""', 'shape': '[BATCH_SIZE, 10]', 'dtype': '"""int64"""'}), "(name='y', shape=[BATCH_SIZE, 10], dtype='int64')\n", (1446, 1495), True, 'import paddle_fl.mpc as pfl_mpc\n'), ((1506, 1541), 'paddle_fl.mpc.layers.fc', 'pfl_mpc.layers.fc', ([], {'input': 'x', 'size': '(10)'}), '(input=x, size=10)\n', (1523, 1541), True, 'import paddle_fl.mpc as pfl_mpc\n'), ((1558, 1665), 'paddle_fl.mpc.layers.softmax_with_cross_entropy', 'pfl_mpc.layers.softmax_with_cross_entropy', ([], {'logits': 'fc_out', 'label': 'y', 'soft_label': '(True)', 'return_softmax': '(True)'}), '(logits=fc_out, label=y,\n soft_label=True, return_softmax=True)\n', (1599, 1665), True, 'import paddle_fl.mpc as pfl_mpc\n'), ((1916, 1941), 'paddle_fl.mpc.layers.mean', 'pfl_mpc.layers.mean', (['cost'], {}), '(cost)\n', (1935, 1941), True, 'import paddle_fl.mpc as pfl_mpc\n'), ((1954, 1994), 'paddle_fl.mpc.optimizer.SGD', 'pfl_mpc.optimizer.SGD', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (1975, 1994), True, 'import paddle_fl.mpc as pfl_mpc\n'), ((2941, 2957), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (2955, 2957), True, 'import paddle.fluid as fluid\n'), ((2988, 3061), 'paddle.fluid.io.DataLoader.from_generator', 'fluid.io.DataLoader.from_generator', ([], {'feed_list': '[x, y]', 'capacity': 'BATCH_SIZE'}), '(feed_list=[x, y], capacity=BATCH_SIZE)\n', (3022, 3061), True, 'import paddle.fluid as fluid\n'), ((3077, 3126), 'paddle.reader.compose', 'paddle.reader.compose', (['batch_feature', 'batch_label'], {}), '(batch_feature, batch_label)\n', (3098, 3126), False, 'import paddle\n'), ((3197, 3270), 'paddle.fluid.io.DataLoader.from_generator', 'fluid.io.DataLoader.from_generator', ([], {'feed_list': '[x, y]', 'capacity': 'BATCH_SIZE'}), '(feed_list=[x, y], capacity=BATCH_SIZE)\n', (3231, 3270), True, 'import paddle.fluid as fluid\n'), ((3291, 3350), 'paddle.reader.compose', 'paddle.reader.compose', (['test_batch_feature', 'test_batch_label'], {}), '(test_batch_feature, test_batch_label)\n', (3312, 3350), False, 'import paddle\n'), ((4510, 4531), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (4524, 4531), True, 'import paddle.fluid as fluid\n'), ((2093, 2121), 'os.path.exists', 'os.path.exists', (['mpc_data_dir'], {}), '(mpc_data_dir)\n', (2107, 2121), False, 'import os\n'), ((3850, 3886), 'os.path.exists', 'os.path.exists', (['prediction_file_part'], {}), '(prediction_file_part)\n', (3864, 3886), False, 'import os\n'), ((3959, 3970), 'time.time', 'time.time', ([], {}), '()\n', (3968, 3970), False, 'import time\n'), ((4403, 4414), 'time.time', 'time.time', ([], {}), '()\n', (4412, 4414), False, 'import time\n'), ((4540, 4571), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (4569, 4571), True, 'import paddle.fluid as fluid\n'), ((4717, 4728), 'time.time', 'time.time', ([], {}), '()\n', (4726, 4728), False, 'import time\n'), ((5263, 5445), 'paddle.fluid.io.save_inference_model', 'fluid.io.save_inference_model', ([], {'dirname': 'mpc_model_dir', 'feeded_var_names': "['x', 'y']", 'target_vars': '[softmax]', 'executor': 'exe', 'main_program': 'infer_program', 'model_filename': '"""__model__"""'}), "(dirname=mpc_model_dir, feeded_var_names=['x',\n 'y'], target_vars=[softmax], executor=exe, main_program=infer_program,\n model_filename='__model__')\n", (5292, 5445), True, 'import paddle.fluid as fluid\n'), ((5500, 5511), 'time.time', 'time.time', ([], {}), '()\n', (5509, 5511), False, 'import time\n'), ((1853, 1881), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (1879, 1881), True, 'import paddle.fluid as fluid\n'), ((3524, 3558), 'os.path.exists', 'os.path.exists', (['mpc_infer_data_dir'], {}), '(mpc_infer_data_dir)\n', (3538, 3558), False, 'import os\n'), ((3896, 3927), 'os.remove', 'os.remove', (['prediction_file_part'], {}), '(prediction_file_part)\n', (3905, 3927), False, 'import os\n'), ((4801, 4812), 'time.time', 'time.time', ([], {}), '()\n', (4810, 4812), False, 'import time\n'), ((4898, 4909), 'time.time', 'time.time', ([], {}), '()\n', (4907, 4909), False, 'import time\n'), ((3585, 3613), 'os.mkdir', 'os.mkdir', (['mpc_infer_data_dir'], {}), '(mpc_infer_data_dir)\n', (3593, 3613), False, 'import os\n'), ((4264, 4275), 'time.time', 'time.time', ([], {}), '()\n', (4273, 4275), False, 'import time\n'), ((4181, 4201), 'numpy.array', 'np.array', (['prediction'], {}), '(prediction)\n', (4189, 4201), True, 'import numpy as np\n')] |
from functions import *
from global_variables import init_global
import simpy
import matplotlib.pyplot as plt
import random as rd
import numpy as np
import os
from scipy.optimize import curve_fit
from scipy.special import factorial
diff_serverns = [1, 2, 4]
mu = 0.80
l = 0.64
end_n_actions = 12000
batch_size = 8000
initialisation_period = 10000
n_simulations = 30
LT_value = 5
n_batches = (end_n_actions-initialisation_period)/batch_size/2.
sjf = False # use shortest job first
db_helptime = "LT" # choice between M, D, LT
LT_values = [1, 1.5, 3, 4, 5]
list_average_queuelength = []
list_average_queuingtimes = []
all_queue_lengths_overtime = np.zeros((len(diff_serverns), end_n_actions+1))
queuelengthforrepetitios = np.zeros((n_simulations, end_n_actions+1))
# run the simulation multiple times
i = 0
for n_server in diff_serverns:
queuelengthforrepetitios = np.zeros((n_simulations, end_n_actions + 1))
for j in range(0, n_simulations):
mu = 0.80
l = 0.64*n_server
# initialize the global lists
init_global(end_n_actions)
# create a simpy environment
env = simpy.Environment()
# set up the system
env.process(setup(env, n_server, mu, l, sjf, end_n_actions, db_helptime, LT_value))
# run the program
env.run()
average_queuelength = np.average(global_variables.queue_length_list)
list_average_queuelength.append(average_queuelength)
list_batch_averages = batch_averages(batch_size, initialisation_period)
average_queuingtimes = np.average(global_variables.time_spend_in_queue_list)
list_average_queuingtimes.append(average_queuingtimes)
queuelengthforrepetitios[j] = global_variables.queue_length_list
all_queue_lengths_overtime[i] = queuelengthforrepetitios.mean(0)
i += 1
print("Now at simulation {}".format(i))
# calculate the variance
standard_deviation, confidence_interval = calc_varci(list_batch_averages, n_batches)
########################################################################################################
print("The average queueing time is {} +- {}".format(np.average(list_batch_averages), confidence_interval))
# plot the queue length over time
plt.figure()
plt.plot(global_variables.queue_time_list, np.transpose(all_queue_lengths_overtime))
plt.legend(diff_serverns)
ax = plt.gca()
plt.xlabel("Time (a.u.)", fontsize=16, fontweight='bold')
plt.ylabel("Queue length (# of tasks)", fontsize=16, fontweight='bold')
ax.xaxis.set_tick_params(labelsize=14)
ax.yaxis.set_tick_params(labelsize=14)
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.average",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.transpose",
"matplotlib.pyplot.figure",
"global_variables.init_global",
"simpy.Environment",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((725, 769), 'numpy.zeros', 'np.zeros', (['(n_simulations, end_n_actions + 1)'], {}), '((n_simulations, end_n_actions + 1))\n', (733, 769), True, 'import numpy as np\n'), ((2243, 2255), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2253, 2255), True, 'import matplotlib.pyplot as plt\n'), ((2341, 2366), 'matplotlib.pyplot.legend', 'plt.legend', (['diff_serverns'], {}), '(diff_serverns)\n', (2351, 2366), True, 'import matplotlib.pyplot as plt\n'), ((2372, 2381), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2379, 2381), True, 'import matplotlib.pyplot as plt\n'), ((2382, 2439), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (a.u.)"""'], {'fontsize': '(16)', 'fontweight': '"""bold"""'}), "('Time (a.u.)', fontsize=16, fontweight='bold')\n", (2392, 2439), True, 'import matplotlib.pyplot as plt\n'), ((2440, 2511), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Queue length (# of tasks)"""'], {'fontsize': '(16)', 'fontweight': '"""bold"""'}), "('Queue length (# of tasks)', fontsize=16, fontweight='bold')\n", (2450, 2511), True, 'import matplotlib.pyplot as plt\n'), ((2590, 2600), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2598, 2600), True, 'import matplotlib.pyplot as plt\n'), ((873, 917), 'numpy.zeros', 'np.zeros', (['(n_simulations, end_n_actions + 1)'], {}), '((n_simulations, end_n_actions + 1))\n', (881, 917), True, 'import numpy as np\n'), ((2299, 2339), 'numpy.transpose', 'np.transpose', (['all_queue_lengths_overtime'], {}), '(all_queue_lengths_overtime)\n', (2311, 2339), True, 'import numpy as np\n'), ((1049, 1075), 'global_variables.init_global', 'init_global', (['end_n_actions'], {}), '(end_n_actions)\n', (1060, 1075), False, 'from global_variables import init_global\n'), ((1128, 1147), 'simpy.Environment', 'simpy.Environment', ([], {}), '()\n', (1145, 1147), False, 'import simpy\n'), ((1345, 1391), 'numpy.average', 'np.average', (['global_variables.queue_length_list'], {}), '(global_variables.queue_length_list)\n', (1355, 1391), True, 'import numpy as np\n'), ((1565, 1618), 'numpy.average', 'np.average', (['global_variables.time_spend_in_queue_list'], {}), '(global_variables.time_spend_in_queue_list)\n', (1575, 1618), True, 'import numpy as np\n'), ((2153, 2184), 'numpy.average', 'np.average', (['list_batch_averages'], {}), '(list_batch_averages)\n', (2163, 2184), True, 'import numpy as np\n')] |
import os
from operator import itemgetter
from numpy import random, zeros
import numpy as np
from scipy.spatial.distance import cosine
from scipy.linalg import norm
from sklearn.linear_model import LinearRegression, Lasso
from math import log
import matplotlib.pyplot as plt
import pandas as pd
def compute_user_rank(user_item_ratings):
user_rank = {}
for user_id in user_item_ratings.keys():
for item_id in user_item_ratings[user_id]:
user_rank[user_id] = user_rank.get(user_id, 0.0) + 1.0
user_rank_list = []
for user_id in user_rank.keys():
user_rank_list.append((user_id, user_rank[user_id]))
user_rank_list = sorted(user_rank_list, key=itemgetter(1))
user_rank_dict = {}
for user_pair in user_rank_list:
user_rank_dict[user_pair[0]] = user_pair[1]
return user_rank_dict
def compute_item_rank(user_item_ratings):
item_rank = {}
for user_id in user_item_ratings.keys():
for item_id in user_item_ratings[user_id]:
item_rank[item_id] = item_rank.get(item_id, 0.0) + 1.0
item_rank_list = []
for item_id in item_rank.keys():
item_rank_list.append((item_id, item_rank[item_id]))
item_rank_list = sorted(item_rank_list, key=itemgetter(1))
item_rank_dict = {}
for item_pair in item_rank_list:
item_rank_dict[item_pair[0]] = item_pair[1]
return item_rank_dict
def optimize_function_err_fun(train_data_dict, test_data_dict, user_rank_dict, item_rank_dict, iter_no, eta, beta):
user_features = {}
item_features = {}
(user_coef, rank_list) = compute_alpha(train_data_dict, test_data_dict, item_rank_dict)
for x in range(0, iter_no):
print('Iteration # %s' % x)
user_id_list = random.random_sample(100) * train_data_dict.keys().__len__()
user_len = max(train_data_dict.keys()) + 1
item_len = max(item_rank_dict.keys()) + 1
u = random.random([user_len, 30])
v = random.random([item_len, 30])
user_repo = list(train_data_dict.keys())
for uid_id in user_id_list:
user_id = user_repo[int(uid_id)]
if train_data_dict[user_id].__len__() < 1:
continue
item_id_list = list(train_data_dict[user_id].keys())
sampled_item_id_list = random.random_sample(10) * item_id_list.__len__()
item_repo = train_data_dict[user_id].keys()
item_counter = 0
for iid in sampled_item_id_list:
#item_counter += 1
#print('Item Counter %s' % item_counter)
item_id = item_id_list[int(iid)]
R_v = train_data_dict[user_id][item_id_list[int(iid)]]*1.0/5
t_0 = np.dot(u[user_id], v[item_id])
t_1 = norm(u[user_id])
t_2 = norm(v[item_id])
t_3 = t_1 * t_2
t_4 = t_0
rank = user_coef[user_id] * t_0
if rank < 1.0:
rank = 1.0
u[user_id] -= -1.0 * eta *(2*(R_v - t_0/t_3))/t_3 * v[item_id]
u[user_id] -= eta*2.0*t_4*(R_v - t_4/t_3)/(t_1**3*t_2) * u[user_id]
t_0 = np.dot(v[item_id], u[user_id])
t_1 = norm(u[user_id])
t_2 = norm(v[item_id])
t_3 = t_1 * t_2
t_4 = 2*(R_v-t_0/t_3)
v[item_id] -= eta*beta*item_len/(log(rank/item_len)**2*t_0)*u[user_id] - eta*(t_4/t_3*u[user_id] - (t_0*t_4)/(t_1*t_2**3)*v[item_id])
user_features[user_id] = u[user_id]
item_features[item_id] = v[item_id]
return user_features, item_features
def compute_mf(train_data_dict, test_data_dict, item_rank_dict, eta):
user_len = max(train_data_dict.keys()) + 1
item_len = max(item_rank_dict.keys()) + 1
u = random.random([user_len, 30])
v = random.random([item_len, 30])
user_id_list = random.random_sample(100) * train_data_dict.keys().__len__()
user_repo = list(train_data_dict.keys())
user_features = {}
item_features = {}
for uid_id in user_id_list:
user_id = user_repo[int(uid_id)]
if train_data_dict[user_id].__len__() < 1:
continue
item_id_list = list(train_data_dict[user_id].keys())
sampled_item_id_list = random.random_sample(10) * item_id_list.__len__()
item_repo = train_data_dict[user_id].keys()
for iid in sampled_item_id_list:
item_id = item_id_list[int(iid)]
R_v = train_data_dict[user_id][item_id_list[int(iid)]]
u[user_id] += eta*2*(R_v - np.dot(u[user_id], v[item_id])) * v[item_id]
v[item_id] += eta*2*(R_v - np.dot(u[user_id], v[item_id])) * u[user_id]
user_features[user_id] = u[user_id]
item_features[item_id] = v[item_id]
pr_dict = {}
pr_list = []
mae = 0.0
total_no = 0.0
for user_id in test_data_dict.keys():
for item_id in test_data_dict[user_id]:
if user_id in user_features and item_id in item_features:
R_v = 5.0 * (np.dot(u[user_id], v[item_id])/(norm(u[user_id])*norm(v[item_id])))
pr_dict[item_id] = pr_dict.get(item_id, 0)+1
mae += abs(R_v - test_data_dict[user_id][item_id])
total_no += 1
for item_id in pr_dict.keys():
pr_list.append((item_id, pr_dict[item_id]))
pr_list_s = sorted(pr_list, key=itemgetter(1), reverse=True)
rank_list = []
iter_id = 0
rank_id = 1
while iter_id < pr_list_s.__len__():
rank_list.append(rank_id)
while iter_id+1 < pr_list_s.__len__() and pr_list_s[iter_id] == pr_list_s[iter_id+1]:
rank_list.append(rank_id)
iter_id += 1
rank_id += 1
iter_id += 1
DMF = 0.0
for rank_val in rank_list:
DMF += log(rank_val*1.0/rank_list[-1])
DMF = 1 + rank_list.__len__()/DMF
print('DMF:%s' % DMF)
return (mae/total_no, DMF)
def compute_alpha(train_data_dict, test_data_dict, item_rank_dict):
eta = 1e-4
user_len = max(train_data_dict.keys()) + 1
item_idx_list = list(item_rank_dict.keys())
item_len = item_idx_list.__len__()
u = random.random([user_len, 30])
v = random.random([item_len, 30])
user_id_list = random.random_sample(100) * train_data_dict.keys().__len__()
user_repo = list(train_data_dict.keys())
for uid_id in user_id_list:
user_id = user_repo[int(uid_id)]
if train_data_dict[user_id].__len__() < 1:
continue
item_id_list = list(train_data_dict[user_id].keys())
sampled_item_id_list = random.random_sample(10) * item_id_list.__len__()
item_repo = train_data_dict[user_id].keys()
for iid in sampled_item_id_list:
item_id = item_id_list[int(iid)]
R_v = train_data_dict[user_id][item_id_list[int(iid)]]
u[user_id] += eta*2*(R_v - np.dot(u[user_id], v[int(iid)])) * v[int(iid)]
v[int(iid)] += eta*2*(R_v - np.dot(u[user_id], v[int(iid)])) * u[user_id]
X = zeros([item_len, user_len])
Y = zeros([item_len, 1])
for id_idx in range(0, item_len):
Y[id_idx] = item_idx_list[id_idx]
for user_id in range(0, user_len):
X[id_idx][user_id] = np.dot(u[user_id], v[id_idx])
print('Computing Lasso ...')
LR = Lasso(alpha=1e-7).fit(X, Y)
user_coef = LR.coef_
with open('USER_COEF.txt', 'w') as FILE:
for coef_val in user_coef:
FILE.write(str(coef_val)+'\n')
rank_list = {}
for item_id in range(0, item_len):
rank_list[item_idx_list[item_id]] = np.dot(X[item_id][:], user_coef)
print('Completing compute_alpha ...')
return (user_coef, rank_list)
def predict_mf(test_data_dict, total_item_list, u, v):
item_dict = {}
pr_dict = {}
pr_list = []
mae = 0.0
total_no = 0.0
for user_id in test_data_dict.keys():
for item_id in test_data_dict[user_id]:
if user_id in user_features and item_id in item_features:
R_v = 5.0 * (np.dot(u[user_id], v[item_id])/(norm(u[user_id])*norm(v[item_id])))
pr_dict[item_id] = pr_dict.get(item_id, 0)+1
mae += abs(R_v - test_data_dict[user_id][item_id])
total_no += 1
for item_id in pr_dict.keys():
pr_list.append((item_id, pr_dict[item_id]))
pr_list_s = sorted(pr_list, key=itemgetter(1), reverse=True)
rank_list = []
iter_id = 0
rank_id = 1
while iter_id < pr_list_s.__len__():
rank_list.append(rank_id)
while iter_id+1 < pr_list_s.__len__() and pr_list_s[iter_id] == pr_list_s[iter_id+1]:
rank_list.append(rank_id)
iter_id += 1
rank_id += 1
iter_id += 1
DME = 0.0
for rank_val in rank_list:
DME += log(rank_val*1.0/rank_list[-1])
DME = 1 + rank_list.__len__()/DME
print('DME: %s' % DME)
return (mae*1.0/total_no, DME)
if __name__ == '__main__':
input_file = 'ml-latest-small/ratings.csv'
user_item_ratings = {}
with open(input_file, 'r') as FILE:
for line in FILE:
data_rec = line.strip().split(',')
user_id = int(data_rec[0])
item_id = int(data_rec[1])
if user_id not in user_item_ratings:
user_item_ratings[user_id] = {}
user_item_ratings[user_id][item_id] = float(data_rec[2])
train_set = {}
test_set = {}
train_set_list = []
test_set_list = []
train_set_dict = {}
test_set_dict = {}
for user_id in user_item_ratings.keys():
item_list = [item_id for item_id in user_item_ratings[user_id].keys()]
train_set.setdefault(user_id, [])
train_set_dict.setdefault(user_id, {})
test_set_dict.setdefault(user_id, {})
if item_list.__len__() > 8:
train_set[user_id] = item_list[:-4]
for item_id in item_list[:-4]:
train_set_list.append((user_id, item_id, user_item_ratings[user_id][item_id]))
train_set_dict[user_id][item_id] = user_item_ratings[user_id][item_id]
for x in range(-4, 0):
test_set_list.append((user_id, item_list[x], user_item_ratings[user_id][item_list[x]]))
test_set_dict[user_id][item_id] = user_item_ratings[user_id][item_list[x]]
if item_list.__len__() > 4:
train_set[user_id] = item_list[:-2]
for item_id in item_list[:-2]:
train_set_list.append((user_id, item_id, user_item_ratings[user_id][item_id]))
train_set_dict[user_id][item_id] = user_item_ratings[user_id][item_id]
for x in range(-2, 0):
test_set_list.append((user_id, item_list[x], user_item_ratings[user_id][item_list[x]]))
test_set_dict[user_id][item_list[x]] = user_item_ratings[user_id][item_list[x]]
with open('train_set.txt', 'w') as FILE:
for data_rec in train_set_list:
FILE.write('%s\t%s\t%s\n'%(data_rec[0], data_rec[1], data_rec[2]))
with open('test_set.txt', 'w') as FILE:
for data_rec in test_set_list:
FILE.write('%s\t%s\t%s\n'%(data_rec[0], data_rec[1], data_rec[2]))
user_rank_dict = compute_user_rank(user_item_ratings)
item_rank_dict = compute_item_rank(user_item_ratings)
eta_list = [1e-12, 1e-10, 1e-9, 1e-8, 1e-7, 1e-5, 2e-5, 3e-5, 4e-5, 7e-5, 1e-4, 2e-4, 3e-4, 4e-4, 5e-4, 6e-4, 7e-4, 8e-4, 9e-4, 0.001, 0.002, 0.003, 0.004,0.005]
mae_list_0 = []
mae_list_1 = []
DME_list_0 = []
DME_list_1 = []
for eta in eta_list :
user_features, item_features = optimize_function_err_fun(train_set_dict, test_set_dict, user_rank_dict, item_rank_dict, 100, 1e-4, eta)
(mae_0, DME_0) = predict_mf(test_set_dict, item_rank_dict.keys(), user_features, item_features)
(mae_1, DME_1) = compute_mf(train_set_dict, test_set_dict, item_rank_dict, eta)
mae_list_0.append(mae_0)
mae_list_1.append(mae_1)
DME_list_0.append(DME_0)
DME_list_1.append(DME_1)
plt_0, = plt.plot(eta_list, mae_list_0)
plt_1, = plt.plot(eta_list, mae_list_1)
plt.legend([plt_0, plt_1], ['Zipf Matrix Factorization', 'Vanila Matrix Factorization'], loc='best')
plt.xlabel('Zipf Penalty Coefficient')
plt.ylabel('MAE')
plt.show()
plt_0, = plt.plot(eta_list, DME_list_0)
plt_1, = plt.plot(eta_list, DME_list_1)
plt.legend([plt_0, plt_1], ['Zipf Matrix Factorization', 'Vanila Matrix Factorization'], loc='best')
plt.xlabel('Zipf Penalty Coefficient')
plt.ylabel('Degree of Matthew Effect')
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.random.random_sample",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"sklearn.linear_model.Lasso",
"numpy.random.random",
"scipy.linalg.norm",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"math.log",
"matplotlib.pyplot.xlabel",
"operator.it... | [((3904, 3933), 'numpy.random.random', 'random.random', (['[user_len, 30]'], {}), '([user_len, 30])\n', (3917, 3933), False, 'from numpy import random, zeros\n'), ((3942, 3971), 'numpy.random.random', 'random.random', (['[item_len, 30]'], {}), '([item_len, 30])\n', (3955, 3971), False, 'from numpy import random, zeros\n'), ((6311, 6340), 'numpy.random.random', 'random.random', (['[user_len, 30]'], {}), '([user_len, 30])\n', (6324, 6340), False, 'from numpy import random, zeros\n'), ((6349, 6378), 'numpy.random.random', 'random.random', (['[item_len, 30]'], {}), '([item_len, 30])\n', (6362, 6378), False, 'from numpy import random, zeros\n'), ((7199, 7226), 'numpy.zeros', 'zeros', (['[item_len, user_len]'], {}), '([item_len, user_len])\n', (7204, 7226), False, 'from numpy import random, zeros\n'), ((7236, 7256), 'numpy.zeros', 'zeros', (['[item_len, 1]'], {}), '([item_len, 1])\n', (7241, 7256), False, 'from numpy import random, zeros\n'), ((12257, 12287), 'matplotlib.pyplot.plot', 'plt.plot', (['eta_list', 'mae_list_0'], {}), '(eta_list, mae_list_0)\n', (12265, 12287), True, 'import matplotlib.pyplot as plt\n'), ((12301, 12331), 'matplotlib.pyplot.plot', 'plt.plot', (['eta_list', 'mae_list_1'], {}), '(eta_list, mae_list_1)\n', (12309, 12331), True, 'import matplotlib.pyplot as plt\n'), ((12336, 12440), 'matplotlib.pyplot.legend', 'plt.legend', (['[plt_0, plt_1]', "['Zipf Matrix Factorization', 'Vanila Matrix Factorization']"], {'loc': '"""best"""'}), "([plt_0, plt_1], ['Zipf Matrix Factorization',\n 'Vanila Matrix Factorization'], loc='best')\n", (12346, 12440), True, 'import matplotlib.pyplot as plt\n'), ((12441, 12479), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Zipf Penalty Coefficient"""'], {}), "('Zipf Penalty Coefficient')\n", (12451, 12479), True, 'import matplotlib.pyplot as plt\n'), ((12484, 12501), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MAE"""'], {}), "('MAE')\n", (12494, 12501), True, 'import matplotlib.pyplot as plt\n'), ((12506, 12516), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12514, 12516), True, 'import matplotlib.pyplot as plt\n'), ((12535, 12565), 'matplotlib.pyplot.plot', 'plt.plot', (['eta_list', 'DME_list_0'], {}), '(eta_list, DME_list_0)\n', (12543, 12565), True, 'import matplotlib.pyplot as plt\n'), ((12579, 12609), 'matplotlib.pyplot.plot', 'plt.plot', (['eta_list', 'DME_list_1'], {}), '(eta_list, DME_list_1)\n', (12587, 12609), True, 'import matplotlib.pyplot as plt\n'), ((12614, 12718), 'matplotlib.pyplot.legend', 'plt.legend', (['[plt_0, plt_1]', "['Zipf Matrix Factorization', 'Vanila Matrix Factorization']"], {'loc': '"""best"""'}), "([plt_0, plt_1], ['Zipf Matrix Factorization',\n 'Vanila Matrix Factorization'], loc='best')\n", (12624, 12718), True, 'import matplotlib.pyplot as plt\n'), ((12719, 12757), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Zipf Penalty Coefficient"""'], {}), "('Zipf Penalty Coefficient')\n", (12729, 12757), True, 'import matplotlib.pyplot as plt\n'), ((12762, 12800), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Degree of Matthew Effect"""'], {}), "('Degree of Matthew Effect')\n", (12772, 12800), True, 'import matplotlib.pyplot as plt\n'), ((12805, 12815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12813, 12815), True, 'import matplotlib.pyplot as plt\n'), ((1932, 1961), 'numpy.random.random', 'random.random', (['[user_len, 30]'], {}), '([user_len, 30])\n', (1945, 1961), False, 'from numpy import random, zeros\n'), ((1974, 2003), 'numpy.random.random', 'random.random', (['[item_len, 30]'], {}), '([item_len, 30])\n', (1987, 2003), False, 'from numpy import random, zeros\n'), ((3992, 4017), 'numpy.random.random_sample', 'random.random_sample', (['(100)'], {}), '(100)\n', (4012, 4017), False, 'from numpy import random, zeros\n'), ((5953, 5988), 'math.log', 'log', (['(rank_val * 1.0 / rank_list[-1])'], {}), '(rank_val * 1.0 / rank_list[-1])\n', (5956, 5988), False, 'from math import log\n'), ((6399, 6424), 'numpy.random.random_sample', 'random.random_sample', (['(100)'], {}), '(100)\n', (6419, 6424), False, 'from numpy import random, zeros\n'), ((7767, 7799), 'numpy.dot', 'np.dot', (['X[item_id][:]', 'user_coef'], {}), '(X[item_id][:], user_coef)\n', (7773, 7799), True, 'import numpy as np\n'), ((8980, 9015), 'math.log', 'log', (['(rank_val * 1.0 / rank_list[-1])'], {}), '(rank_val * 1.0 / rank_list[-1])\n', (8983, 9015), False, 'from math import log\n'), ((695, 708), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (705, 708), False, 'from operator import itemgetter\n'), ((1249, 1262), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (1259, 1262), False, 'from operator import itemgetter\n'), ((1756, 1781), 'numpy.random.random_sample', 'random.random_sample', (['(100)'], {}), '(100)\n', (1776, 1781), False, 'from numpy import random, zeros\n'), ((4398, 4422), 'numpy.random.random_sample', 'random.random_sample', (['(10)'], {}), '(10)\n', (4418, 4422), False, 'from numpy import random, zeros\n'), ((5537, 5550), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (5547, 5550), False, 'from operator import itemgetter\n'), ((6758, 6782), 'numpy.random.random_sample', 'random.random_sample', (['(10)'], {}), '(10)\n', (6778, 6782), False, 'from numpy import random, zeros\n'), ((7413, 7442), 'numpy.dot', 'np.dot', (['u[user_id]', 'v[id_idx]'], {}), '(u[user_id], v[id_idx])\n', (7419, 7442), True, 'import numpy as np\n'), ((7486, 7504), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': '(1e-07)'}), '(alpha=1e-07)\n', (7491, 7504), False, 'from sklearn.linear_model import LinearRegression, Lasso\n'), ((8564, 8577), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (8574, 8577), False, 'from operator import itemgetter\n'), ((2331, 2355), 'numpy.random.random_sample', 'random.random_sample', (['(10)'], {}), '(10)\n', (2351, 2355), False, 'from numpy import random, zeros\n'), ((2774, 2804), 'numpy.dot', 'np.dot', (['u[user_id]', 'v[item_id]'], {}), '(u[user_id], v[item_id])\n', (2780, 2804), True, 'import numpy as np\n'), ((2827, 2843), 'scipy.linalg.norm', 'norm', (['u[user_id]'], {}), '(u[user_id])\n', (2831, 2843), False, 'from scipy.linalg import norm\n'), ((2866, 2882), 'scipy.linalg.norm', 'norm', (['v[item_id]'], {}), '(v[item_id])\n', (2870, 2882), False, 'from scipy.linalg import norm\n'), ((3254, 3284), 'numpy.dot', 'np.dot', (['v[item_id]', 'u[user_id]'], {}), '(v[item_id], u[user_id])\n', (3260, 3284), True, 'import numpy as np\n'), ((3307, 3323), 'scipy.linalg.norm', 'norm', (['u[user_id]'], {}), '(u[user_id])\n', (3311, 3323), False, 'from scipy.linalg import norm\n'), ((3346, 3362), 'scipy.linalg.norm', 'norm', (['v[item_id]'], {}), '(v[item_id])\n', (3350, 3362), False, 'from scipy.linalg import norm\n'), ((4701, 4731), 'numpy.dot', 'np.dot', (['u[user_id]', 'v[item_id]'], {}), '(u[user_id], v[item_id])\n', (4707, 4731), True, 'import numpy as np\n'), ((4785, 4815), 'numpy.dot', 'np.dot', (['u[user_id]', 'v[item_id]'], {}), '(u[user_id], v[item_id])\n', (4791, 4815), True, 'import numpy as np\n'), ((5186, 5216), 'numpy.dot', 'np.dot', (['u[user_id]', 'v[item_id]'], {}), '(u[user_id], v[item_id])\n', (5192, 5216), True, 'import numpy as np\n'), ((8213, 8243), 'numpy.dot', 'np.dot', (['u[user_id]', 'v[item_id]'], {}), '(u[user_id], v[item_id])\n', (8219, 8243), True, 'import numpy as np\n'), ((5218, 5234), 'scipy.linalg.norm', 'norm', (['u[user_id]'], {}), '(u[user_id])\n', (5222, 5234), False, 'from scipy.linalg import norm\n'), ((5235, 5251), 'scipy.linalg.norm', 'norm', (['v[item_id]'], {}), '(v[item_id])\n', (5239, 5251), False, 'from scipy.linalg import norm\n'), ((8245, 8261), 'scipy.linalg.norm', 'norm', (['u[user_id]'], {}), '(u[user_id])\n', (8249, 8261), False, 'from scipy.linalg import norm\n'), ((8262, 8278), 'scipy.linalg.norm', 'norm', (['v[item_id]'], {}), '(v[item_id])\n', (8266, 8278), False, 'from scipy.linalg import norm\n'), ((3483, 3503), 'math.log', 'log', (['(rank / item_len)'], {}), '(rank / item_len)\n', (3486, 3503), False, 'from math import log\n')] |
import pickle
from datetime import timedelta
from typing import Callable
import numpy as np
import pandas as pd
from prometheus import get_series
parameters = [
"jobs",
"cpu",
"memory",
"io_read",
"io_readReal",
"io_readSyscalls",
"io_write",
# "io_writeCancelled",
"io_writeReal",
"io_writeSyscalls",
"network_rxBytes",
# "network_rxCompressed",
# "network_rxDrop",
# "network_rxErrors",
# "network_rxFifo",
# "network_rxFrame",
# "network_rxMulticast",
"network_rxPackets",
"network_txBytes",
# "network_txCarrier",
# "network_txColls",
# "network_txCompressed",
# "network_txDrop",
# "network_txErrors",
# "network_txFifo",
"network_txPackets",
]
def get_hf_data(fname="../data/hf-data.pkl.gzip") -> pd.DataFrame:
return pd.read_pickle(fname, compression="gzip")
def get_hf_max_values(fname="../data/hf-max_values.pkl.gzip") -> pd.DataFrame:
return pd.read_pickle(fname, compression="gzip")
def get_hf_static_data(df: pd.DataFrame) -> pd.DataFrame:
col = ['size', 'jobs', 'nodes', 'cpu_speed', 'cpu_cores', 'cpu_physical_cores', 'cpu_processors', 'memory', 'workflowName']
hf_static = pd.get_dummies(df[col], columns=["workflowName"])
hf_static["size"] = pd.to_numeric(hf_static["size"], downcast="float")
return hf_static
def get_max_value_in_series(df: pd.DataFrame, get_metrics: Callable[[str, dict], pd.DataFrame]) -> pd.DataFrame:
rows = {}
for index, flow in df.iterrows():
hyperflowId = flow["hyperflowId"]
params = {
"start_time": flow["start"] - timedelta(seconds=5),
"end_time": flow["end"] + timedelta(seconds=5),
"step": "5s",
}
try:
metrics_df = get_metrics(hyperflowId, params)
rows[index] = metrics_df.max()[1:]
except Exception as e:
print(f"Getting time series failed for {index} - {flow['workflowName']} {flow['size']}")
return pd.DataFrame.from_dict(rows, orient='index')
def train_validate_test_split(df, train_percent=.6, validate_percent=.2, seed=None):
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
validate_end = int(validate_percent * m) + train_end
train = df.loc[perm[:train_end]]
validate = df.loc[perm[train_end:validate_end]]
test = df.loc[perm[validate_end:]]
return train, validate, test
def split_many_2_many(series, n_past, n_future, split_stride=1):
X, y = list(), list()
for window_start in range(0, len(series), split_stride):
past_end = window_start + n_past
future_end = past_end + n_future
if future_end > len(series):
break
# slicing the past and future parts of the window
past, future = series[window_start:past_end, :], series[past_end:future_end, :]
X.append(past)
y.append(future)
return np.array(X), np.array(y)
def dataset_split_many_2_many(static, series, n_past, n_future, features, features_pred, split_stride=1):
X = []
y = []
for i in range(static.shape[0]):
X_series, y_series = split_many_2_many(series[i], n_past, n_future, split_stride)
X_static = np.repeat([static[i]], X_series.shape[0], 0)
if y_series.size > 0:
X.append((X_series[:, :, features], X_static))
y.append(y_series[:, :, features_pred])
return X, y
def create_dataset(hf_data: pd.DataFrame, dataset: str, df: pd.DataFrame, steps: list, n_pasts: list, n_futures: list,
train_p: float = 0.75, validate_p: float = 0):
test_p = 1 - train_p - validate_p
train, validate, test = train_validate_test_split(df.select_dtypes(include=np.number), train_percent=train_p, validate_percent=0)
with open(f"dataset/{dataset}-split_{int(train_p * 100)}_{int(validate_p * 100)}_{int(test_p * 100)}.static", "wb") as f:
pickle.dump([train, validate, test], f)
for step in steps:
for n_past, n_future in list(zip(n_pasts, n_futures)):
train_series = get_series(hf_data, train, parameters, step, n_past, n_future)
validate_series = get_series(hf_data, validate, parameters, step, n_past, n_future)
test_series = get_series(hf_data, test, parameters, step, n_past, n_future)
with open(
f"dataset/{dataset}-split_{int(train_p * 100)}_{int(validate_p * 100)}_{int(test_p * 100)}-step_{step}s-past_{n_past}s-future_{n_future}s.dynamic",
"wb") as f:
pickle.dump([train_series, validate_series, test_series], f)
| [
"pickle.dump",
"numpy.random.seed",
"pandas.DataFrame.from_dict",
"prometheus.get_series",
"pandas.get_dummies",
"numpy.array",
"datetime.timedelta",
"numpy.random.permutation",
"pandas.read_pickle",
"pandas.to_numeric",
"numpy.repeat"
] | [((837, 878), 'pandas.read_pickle', 'pd.read_pickle', (['fname'], {'compression': '"""gzip"""'}), "(fname, compression='gzip')\n", (851, 878), True, 'import pandas as pd\n'), ((971, 1012), 'pandas.read_pickle', 'pd.read_pickle', (['fname'], {'compression': '"""gzip"""'}), "(fname, compression='gzip')\n", (985, 1012), True, 'import pandas as pd\n'), ((1217, 1266), 'pandas.get_dummies', 'pd.get_dummies', (['df[col]'], {'columns': "['workflowName']"}), "(df[col], columns=['workflowName'])\n", (1231, 1266), True, 'import pandas as pd\n'), ((1291, 1341), 'pandas.to_numeric', 'pd.to_numeric', (["hf_static['size']"], {'downcast': '"""float"""'}), "(hf_static['size'], downcast='float')\n", (1304, 1341), True, 'import pandas as pd\n'), ((2017, 2061), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['rows'], {'orient': '"""index"""'}), "(rows, orient='index')\n", (2039, 2061), True, 'import pandas as pd\n'), ((2153, 2173), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2167, 2173), True, 'import numpy as np\n'), ((2185, 2216), 'numpy.random.permutation', 'np.random.permutation', (['df.index'], {}), '(df.index)\n', (2206, 2216), True, 'import numpy as np\n'), ((2994, 3005), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3002, 3005), True, 'import numpy as np\n'), ((3007, 3018), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3015, 3018), True, 'import numpy as np\n'), ((3295, 3339), 'numpy.repeat', 'np.repeat', (['[static[i]]', 'X_series.shape[0]', '(0)'], {}), '([static[i]], X_series.shape[0], 0)\n', (3304, 3339), True, 'import numpy as np\n'), ((3991, 4030), 'pickle.dump', 'pickle.dump', (['[train, validate, test]', 'f'], {}), '([train, validate, test], f)\n', (4002, 4030), False, 'import pickle\n'), ((4145, 4207), 'prometheus.get_series', 'get_series', (['hf_data', 'train', 'parameters', 'step', 'n_past', 'n_future'], {}), '(hf_data, train, parameters, step, n_past, n_future)\n', (4155, 4207), False, 'from prometheus import get_series\n'), ((4238, 4303), 'prometheus.get_series', 'get_series', (['hf_data', 'validate', 'parameters', 'step', 'n_past', 'n_future'], {}), '(hf_data, validate, parameters, step, n_past, n_future)\n', (4248, 4303), False, 'from prometheus import get_series\n'), ((4330, 4391), 'prometheus.get_series', 'get_series', (['hf_data', 'test', 'parameters', 'step', 'n_past', 'n_future'], {}), '(hf_data, test, parameters, step, n_past, n_future)\n', (4340, 4391), False, 'from prometheus import get_series\n'), ((1635, 1655), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (1644, 1655), False, 'from datetime import timedelta\n'), ((1695, 1715), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (1704, 1715), False, 'from datetime import timedelta\n'), ((4623, 4683), 'pickle.dump', 'pickle.dump', (['[train_series, validate_series, test_series]', 'f'], {}), '([train_series, validate_series, test_series], f)\n', (4634, 4683), False, 'import pickle\n')] |
"""Test regions mouse bindings."""
import collections
import numpy as np
import pytest
from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks
from napari_plot.layers import Region
from napari_plot.layers.region.region import Mode, Orientation
from napari_plot.utils.vendored.interactions import ReadOnlyWrapper
@pytest.fixture
def Event():
"""Create a subclass for simulating vispy mouse events.
Returns
-------
Event : Type
A new tuple subclass named Event that can be used to create a
NamedTuple object with fields "type", "is_dragging", and "modifiers".
"""
return collections.namedtuple("Event", field_names=["type", "is_dragging", "modifiers", "position", "pos"])
def _get_position(pos):
return 50, pos[1] - (pos[1] - pos[0]) / 2
@pytest.fixture
def create_known_region_layer():
"""Create region layer with known coordinates
Returns
-------
data : list
List containing data used to generate regions.
layer : napari_plot.layers.Region
Region layer.
n_regions : int
Number of regions in the region layer
known_non_region : list
Data coordinates that are known to contain no region. Useful during
testing when needing to guarantee no region is clicked on.
"""
data = [
([25, 50], "vertical"),
([500, 750], "horizontal"),
([80, 90], "vertical"),
]
known_non_region = [0, 0]
n_regions = len(data)
layer = Region(data)
assert layer.ndim == 2
assert len(layer.data) == n_regions
assert len(layer.selected_data) == 0
return data, layer, n_regions, known_non_region
def test_add_region_vertical(create_known_region_layer, Event):
"""Add new region by clicking in add mode."""
data, layer, n_regions, known_non_region = create_known_region_layer
layer.mode = "add"
# Simulate click
event = ReadOnlyWrapper(
Event(
type="mouse_press",
is_dragging=False,
modifiers=[],
pos=np.asarray(known_non_region),
position=known_non_region,
)
)
mouse_press_callbacks(layer, event)
known_non_region_end = [100, 0]
# Simulate drag end
event = ReadOnlyWrapper(
Event(
type="mouse_move",
is_dragging=True,
modifiers=[],
pos=np.asarray(known_non_region_end),
position=known_non_region_end,
)
)
mouse_move_callbacks(layer, event)
# Simulate release
event = ReadOnlyWrapper(
Event(
type="mouse_release",
is_dragging=False,
modifiers=[],
pos=(),
position=known_non_region_end,
)
)
mouse_release_callbacks(layer, event)
# Check new shape added at coordinates
assert len(layer.data) == n_regions + 1
assert layer.orientation[-1] == Orientation.VERTICAL
def test_add_region_horizontal(create_known_region_layer, Event):
"""Add new region by clicking in add mode."""
data, layer, n_regions, known_non_region = create_known_region_layer
layer.mode = "add"
# Simulate click
event = ReadOnlyWrapper(
Event(
type="mouse_press",
is_dragging=False,
modifiers=[],
pos=np.asarray(known_non_region),
position=known_non_region,
)
)
mouse_press_callbacks(layer, event)
known_non_region_end = [0, 100]
# Simulate drag end
event = ReadOnlyWrapper(
Event(
type="mouse_move",
is_dragging=True,
modifiers=[],
pos=np.asarray(known_non_region_end),
position=known_non_region_end,
)
)
mouse_move_callbacks(layer, event)
# Simulate release
event = ReadOnlyWrapper(
Event(
type="mouse_release",
is_dragging=False,
modifiers=[],
pos=(),
position=known_non_region_end,
)
)
mouse_release_callbacks(layer, event)
# Check new shape added at coordinates
assert len(layer.data) == n_regions + 1
assert layer.orientation[-1] == Orientation.HORIZONTAL
def test_not_adding_or_selecting_region(create_known_region_layer, Event):
"""Don't add or select a shape by clicking on one in pan_zoom mode."""
data, layer, n_regions, _ = create_known_region_layer
layer.mode = "pan_zoom"
# Simulate click
event = ReadOnlyWrapper(
Event(
type="mouse_press",
is_dragging=False,
modifiers=[],
pos=(),
position=(0, 0),
)
)
mouse_press_callbacks(layer, event)
# Simulate release
event = ReadOnlyWrapper(
Event(
type="mouse_release",
is_dragging=False,
modifiers=[],
pos=(),
position=(0, 0),
)
)
mouse_release_callbacks(layer, event)
# Check no new shape added and non selected
assert len(layer.data) == n_regions
assert len(layer.selected_data) == 0
def test_select_region(create_known_region_layer, Event):
"""Select a shape by clicking on one in select mode."""
data, layer, n_regions, _ = create_known_region_layer
layer.mode = "select"
position = _get_position(data[0][0])
# Simulate click
event = ReadOnlyWrapper(
Event(
type="mouse_press",
is_dragging=False,
modifiers=[],
pos=(),
position=position,
)
)
mouse_press_callbacks(layer, event)
# Simulate release
event = ReadOnlyWrapper(
Event(
type="mouse_release",
is_dragging=False,
modifiers=[],
pos=(),
position=position,
)
)
mouse_release_callbacks(layer, event)
# Check clicked shape selected
assert len(layer.selected_data) == 1
assert layer.selected_data == {0}
@pytest.mark.parametrize(
"mode",
[
"select",
"move",
"add",
"select",
],
)
def test_after_in_add_mode_region(mode, create_known_region_layer, Event):
"""Don't add or select a shape by clicking on one in pan_zoom mode."""
data, layer, n_regions, _ = create_known_region_layer
layer.mode = mode
layer.mode = "pan_zoom"
position = _get_position(data[0][0])
# Simulate click
event = ReadOnlyWrapper(
Event(
type="mouse_press",
is_dragging=False,
modifiers=[],
pos=(),
position=position,
)
)
mouse_press_callbacks(layer, event)
# Simulate release
event = ReadOnlyWrapper(
Event(
type="mouse_release",
is_dragging=False,
modifiers=[],
pos=(),
position=position,
)
)
mouse_release_callbacks(layer, event)
# Check no new shape added and non selected
assert len(layer.data) == n_regions
assert len(layer.selected_data) == 0
def test_unselect_select_region(create_known_region_layer, Event):
"""Select a shape by clicking on one in select mode."""
data, layer, n_regions, _ = create_known_region_layer
layer.mode = "select"
position = _get_position(data[0][0])
layer.selected_data = {1}
# Simulate click
event = ReadOnlyWrapper(
Event(
type="mouse_press",
is_dragging=False,
modifiers=[],
pos=(),
position=position,
)
)
mouse_press_callbacks(layer, event)
# Simulate release
event = ReadOnlyWrapper(
Event(
type="mouse_release",
is_dragging=False,
modifiers=[],
pos=(),
position=position,
)
)
mouse_release_callbacks(layer, event)
# Check clicked shape selected
assert len(layer.selected_data) == 1
assert layer.selected_data == {0}
def test_not_selecting_region(create_known_region_layer, Event):
"""Don't select a shape by not clicking on one in select mode."""
data, layer, n_regions, known_non_region = create_known_region_layer
layer.mode = "select"
# Simulate click
event = ReadOnlyWrapper(
Event(
type="mouse_press",
is_dragging=False,
modifiers=[],
pos=(),
position=known_non_region,
)
)
mouse_press_callbacks(layer, event)
# Simulate release
event = ReadOnlyWrapper(
Event(
type="mouse_release",
is_dragging=False,
modifiers=[],
pos=(),
position=known_non_region,
)
)
mouse_release_callbacks(layer, event)
# Check clicked shape selected
assert len(layer.selected_data) == 0
def test_unselecting_regions(create_known_region_layer, Event):
"""Unselect shapes by not clicking on one in select mode."""
data, layer, n_regions, known_non_region = create_known_region_layer
layer.mode = "select"
layer.selected_data = {0, 1}
assert len(layer.selected_data) == 2
# Simulate click
event = ReadOnlyWrapper(
Event(
type="mouse_press",
is_dragging=False,
modifiers=[],
pos=(),
position=known_non_region,
)
)
mouse_press_callbacks(layer, event)
# Simulate release
event = ReadOnlyWrapper(
Event(
type="mouse_release",
is_dragging=False,
modifiers=[],
pos=(),
position=known_non_region,
)
)
mouse_release_callbacks(layer, event)
# Check clicked shape selected
assert len(layer.selected_data) == 0
def test_selecting_regions_with_drag(create_known_region_layer, Event):
"""Select all shapes when drag box includes all of them."""
data, layer, n_regions, known_non_region = create_known_region_layer
layer.mode = "select"
# Simulate click
event = ReadOnlyWrapper(
Event(
type="mouse_press",
is_dragging=False,
modifiers=[],
pos=(),
position=known_non_region,
)
)
mouse_press_callbacks(layer, event)
# Simulate drag start
event = ReadOnlyWrapper(
Event(
type="mouse_move",
is_dragging=True,
modifiers=[],
pos=(),
position=known_non_region,
)
)
mouse_move_callbacks(layer, event)
# Simulate drag end
event = ReadOnlyWrapper(Event(type="mouse_move", is_dragging=True, modifiers=[], pos=(), position=(1000, 1000)))
mouse_move_callbacks(layer, event)
# Simulate release
event = ReadOnlyWrapper(
Event(
type="mouse_release",
is_dragging=True,
modifiers=[],
pos=(),
position=(1000, 1000),
)
)
mouse_release_callbacks(layer, event)
# Check all shapes selected as drag box contains them
assert len(layer.selected_data) == n_regions
def test_selecting_no_regions_with_drag(create_known_region_layer, Event):
"""Select all shapes when drag box includes all of them."""
data, layer, n_regions, known_non_region = create_known_region_layer
layer.mode = "select"
# Simulate click
event = ReadOnlyWrapper(
Event(
type="mouse_press",
is_dragging=False,
modifiers=[],
pos=(),
position=known_non_region,
)
)
mouse_press_callbacks(layer, event)
# Simulate drag start
event = ReadOnlyWrapper(
Event(
type="mouse_move",
is_dragging=True,
modifiers=[],
pos=(),
position=known_non_region,
)
)
mouse_move_callbacks(layer, event)
# Simulate drag end
event = ReadOnlyWrapper(
Event(
type="mouse_move",
is_dragging=True,
modifiers=[],
pos=(),
position=(200, 20),
)
)
mouse_move_callbacks(layer, event)
# Simulate release
event = ReadOnlyWrapper(
Event(
type="mouse_release",
is_dragging=True,
modifiers=[],
pos=(),
position=(200, 20),
)
)
mouse_release_callbacks(layer, event)
# Check no shapes selected as drag box doesn't contain them
assert len(layer.selected_data) == 0
@pytest.mark.parametrize("attr", ("_move_modes", "_drag_modes", "_cursor_modes"))
def test_all_modes_covered(attr):
"""
Test that all dictionaries modes have all the keys, this simplify the handling logic
As we do not need to test whether a key is in a dict or not.
"""
mode_dict = getattr(Region, attr)
assert {k.value for k in mode_dict.keys()} == set(Mode.keys())
| [
"napari.utils.interactions.mouse_press_callbacks",
"napari.utils.interactions.mouse_release_callbacks",
"napari_plot.layers.Region",
"napari_plot.layers.region.region.Mode.keys",
"numpy.asarray",
"collections.namedtuple",
"pytest.mark.parametrize",
"napari.utils.interactions.mouse_move_callbacks"
] | [((6040, 6108), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['select', 'move', 'add', 'select']"], {}), "('mode', ['select', 'move', 'add', 'select'])\n", (6063, 6108), False, 'import pytest\n'), ((12600, 12685), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""attr"""', "('_move_modes', '_drag_modes', '_cursor_modes')"], {}), "('attr', ('_move_modes', '_drag_modes', '_cursor_modes')\n )\n", (12623, 12685), False, 'import pytest\n'), ((665, 769), 'collections.namedtuple', 'collections.namedtuple', (['"""Event"""'], {'field_names': "['type', 'is_dragging', 'modifiers', 'position', 'pos']"}), "('Event', field_names=['type', 'is_dragging',\n 'modifiers', 'position', 'pos'])\n", (687, 769), False, 'import collections\n'), ((1528, 1540), 'napari_plot.layers.Region', 'Region', (['data'], {}), '(data)\n', (1534, 1540), False, 'from napari_plot.layers import Region\n'), ((2174, 2209), 'napari.utils.interactions.mouse_press_callbacks', 'mouse_press_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (2195, 2209), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((2515, 2549), 'napari.utils.interactions.mouse_move_callbacks', 'mouse_move_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (2535, 2549), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((2792, 2829), 'napari.utils.interactions.mouse_release_callbacks', 'mouse_release_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (2815, 2829), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((3449, 3484), 'napari.utils.interactions.mouse_press_callbacks', 'mouse_press_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (3470, 3484), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((3790, 3824), 'napari.utils.interactions.mouse_move_callbacks', 'mouse_move_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (3810, 3824), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((4067, 4104), 'napari.utils.interactions.mouse_release_callbacks', 'mouse_release_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (4090, 4104), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((4714, 4749), 'napari.utils.interactions.mouse_press_callbacks', 'mouse_press_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (4735, 4749), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((4978, 5015), 'napari.utils.interactions.mouse_release_callbacks', 'mouse_release_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (5001, 5015), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((5618, 5653), 'napari.utils.interactions.mouse_press_callbacks', 'mouse_press_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (5639, 5653), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((5884, 5921), 'napari.utils.interactions.mouse_release_callbacks', 'mouse_release_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (5907, 5921), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((6685, 6720), 'napari.utils.interactions.mouse_press_callbacks', 'mouse_press_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (6706, 6720), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((6951, 6988), 'napari.utils.interactions.mouse_release_callbacks', 'mouse_release_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (6974, 6988), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((7630, 7665), 'napari.utils.interactions.mouse_press_callbacks', 'mouse_press_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (7651, 7665), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((7896, 7933), 'napari.utils.interactions.mouse_release_callbacks', 'mouse_release_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (7919, 7933), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((8520, 8555), 'napari.utils.interactions.mouse_press_callbacks', 'mouse_press_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (8541, 8555), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((8794, 8831), 'napari.utils.interactions.mouse_release_callbacks', 'mouse_release_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (8817, 8831), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((9448, 9483), 'napari.utils.interactions.mouse_press_callbacks', 'mouse_press_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (9469, 9483), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((9722, 9759), 'napari.utils.interactions.mouse_release_callbacks', 'mouse_release_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (9745, 9759), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((10309, 10344), 'napari.utils.interactions.mouse_press_callbacks', 'mouse_press_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (10330, 10344), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((10582, 10616), 'napari.utils.interactions.mouse_move_callbacks', 'mouse_move_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (10602, 10616), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((10763, 10797), 'napari.utils.interactions.mouse_move_callbacks', 'mouse_move_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (10783, 10797), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((11031, 11068), 'napari.utils.interactions.mouse_release_callbacks', 'mouse_release_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (11054, 11068), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((11652, 11687), 'napari.utils.interactions.mouse_press_callbacks', 'mouse_press_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (11673, 11687), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((11925, 11959), 'napari.utils.interactions.mouse_move_callbacks', 'mouse_move_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (11945, 11959), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((12188, 12222), 'napari.utils.interactions.mouse_move_callbacks', 'mouse_move_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (12208, 12222), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((12453, 12490), 'napari.utils.interactions.mouse_release_callbacks', 'mouse_release_callbacks', (['layer', 'event'], {}), '(layer, event)\n', (12476, 12490), False, 'from napari.utils.interactions import mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks\n'), ((12977, 12988), 'napari_plot.layers.region.region.Mode.keys', 'Mode.keys', ([], {}), '()\n', (12986, 12988), False, 'from napari_plot.layers.region.region import Mode, Orientation\n'), ((2085, 2113), 'numpy.asarray', 'np.asarray', (['known_non_region'], {}), '(known_non_region)\n', (2095, 2113), True, 'import numpy as np\n'), ((2418, 2450), 'numpy.asarray', 'np.asarray', (['known_non_region_end'], {}), '(known_non_region_end)\n', (2428, 2450), True, 'import numpy as np\n'), ((3360, 3388), 'numpy.asarray', 'np.asarray', (['known_non_region'], {}), '(known_non_region)\n', (3370, 3388), True, 'import numpy as np\n'), ((3693, 3725), 'numpy.asarray', 'np.asarray', (['known_non_region_end'], {}), '(known_non_region_end)\n', (3703, 3725), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import division
__author__ = 'marco.muetze <at> tu-dresden.de'
from .helper import check_bounds_and_get_dimension
from .prototypes import Mesh
import numpy as np
from scipy.interpolate import RegularGridInterpolator
def calculate_bounds(axes):
lower = []
upper = []
num_of_axes = len(axes)
for idx in range(num_of_axes):
if axes[idx][0] < axes[idx][-1]:
lower.append(axes[idx][0])
upper.append(axes[idx][-1])
else:
upper.append(axes[idx][0])
lower.append(axes[idx][-1])
return tuple(lower), tuple(upper)
def calculate_shape(axes):
shape = []
if type(axes) in (tuple, list):
for dim in range(len(axes)):
if type(axes[dim]) is np.ndarray:
shape.append(len(axes[dim]))
else:
raise TypeError
elif type(axes) is np.ndarray:
shape.append(len(axes))
else:
raise TypeError
return tuple(shape)
def calculate_center(bounds):
dimension = check_bounds_and_get_dimension(bounds)
center_position = []
for i in range(dimension):
center_position.append((bounds[1][i] + bounds[0][i]) * 0.5)
return tuple(center_position)
class RectilinearMesh(Mesh):
interpolator = RegularGridInterpolator
def __init__(self, axes, axes_names=('x', 'y', 'z'), unit='m'):
""" RectilinearMesh
:param axes: Values of axis nodes as tuple of 1D np.arrays.
:param axes_names: Coordinate system axes names.
:param unit: Unit of mesh values.
"""
bounds = calculate_bounds(axes)
center = calculate_center(bounds)
shape = calculate_shape(axes)
self.__axes = axes
self.__shape = shape
Mesh.__init__(self, bounds, axes_names=axes_names, unit=unit)
self.__center_index = self.nearest_node(center)[0]
def __getitem__(self, item):
# item umpopeln damit tuple of slice passt!
new_axes = []
# This only works when len(item) equals the dimension of the mesh and will not work for None!
for i, x in enumerate(item):
new_axes.append(self.axes[i][x])
return RectilinearMesh(tuple(new_axes), self.axes_names, self.unit)
def copy(self):
new_axes = []
for axe in self.axes:
new_axes.append(axe.copy())
return RectilinearMesh(tuple(new_axes), self.axes_names, self.unit)
def shift(self, offset):
# Update bounds!
low = np.array(self.bounds[0])
high = np.array(self.bounds[1])
tmp = np.array(offset)
self._bounds = (tuple(low+tmp), tuple(high+tmp))
assert len(offset) == len(self.axes)
new_axes = []
for axe in self.axes:
new_axes.append(axe.copy())
for i, d in enumerate(offset):
new_axes[i] += d
self.__axes = tuple(new_axes)
return self
@property
def pitch(self):
dimension = self._dimension # len(self._axes)
pitch = [0.] * dimension
for dim in range(dimension):
axis_len = len(self.__axes[dim])
# create empty numpy array
coordinates = np.zeros(axis_len-1)
for idx in range(axis_len-1):
coordinates[idx] = (self.__axes[dim][idx+1]-self.__axes[dim][idx])
pitch[dim] = coordinates.copy()
return tuple(pitch)
@property
def axes(self):
return self.__axes
@property
def shape(self):
return self.__shape
@property
def center_index(self):
return self.__center_index
@property
def minimum_pitch(self):
""" Returns the minimal pitch between two neighboring nodes of the mesh in each direction.
:return: Minimal pitch in each direction.
"""
pitch = self.pitch
minimal_pitch = []
for p in pitch:
minimal_pitch.append(min(p))
return min(minimal_pitch)
def nearest_node(self, position):
idx = []
point = []
for i in range(len(self.axes)):
if position[i] < self.bounds[0][i] or position[i] > self.bounds[1][i]:
raise ValueError('The given position is outside the mesh bounds!')
tmp = (np.abs(self.axes[i]-position[i])).argmin()
idx.append(int(tmp))
point.append(self.axes[i][tmp])
return tuple(idx), tuple(point), np.linalg.norm(np.asarray(position)-np.asarray(point))
def surrounding_nodes(self, position):
""" Returns nearest node indices and direction of opposite node.
:param position: Position inside the mesh to search nearest node for as (x,y,z)
:return: Nearest node indices and direction of opposite node.
"""
n_node_index, n_node_position, n_node_error = self.nearest_node(position)
if n_node_error == 0.0:
index_mod = []
for i in range(len(n_node_index)):
new_point = np.asarray(n_node_position)
new_point[i] += 1.e-5*np.abs(new_point[i])
try:
self.nearest_node(tuple(new_point))
index_mod.append(-1)
except ValueError:
index_mod.append(1)
else:
# Check if node_position is larger or smaller in resp. axes than position
index_mod = []
for i in range(len(n_node_index)):
if n_node_position[i] > position[i]:
index_mod.append(-1)
else:
index_mod.append(1)
return tuple(n_node_index), tuple(index_mod)
| [
"numpy.zeros",
"numpy.abs",
"numpy.asarray",
"numpy.array"
] | [((2576, 2600), 'numpy.array', 'np.array', (['self.bounds[0]'], {}), '(self.bounds[0])\n', (2584, 2600), True, 'import numpy as np\n'), ((2616, 2640), 'numpy.array', 'np.array', (['self.bounds[1]'], {}), '(self.bounds[1])\n', (2624, 2640), True, 'import numpy as np\n'), ((2655, 2671), 'numpy.array', 'np.array', (['offset'], {}), '(offset)\n', (2663, 2671), True, 'import numpy as np\n'), ((3271, 3293), 'numpy.zeros', 'np.zeros', (['(axis_len - 1)'], {}), '(axis_len - 1)\n', (3279, 3293), True, 'import numpy as np\n'), ((5084, 5111), 'numpy.asarray', 'np.asarray', (['n_node_position'], {}), '(n_node_position)\n', (5094, 5111), True, 'import numpy as np\n'), ((4358, 4392), 'numpy.abs', 'np.abs', (['(self.axes[i] - position[i])'], {}), '(self.axes[i] - position[i])\n', (4364, 4392), True, 'import numpy as np\n'), ((4535, 4555), 'numpy.asarray', 'np.asarray', (['position'], {}), '(position)\n', (4545, 4555), True, 'import numpy as np\n'), ((4556, 4573), 'numpy.asarray', 'np.asarray', (['point'], {}), '(point)\n', (4566, 4573), True, 'import numpy as np\n'), ((5150, 5170), 'numpy.abs', 'np.abs', (['new_point[i]'], {}), '(new_point[i])\n', (5156, 5170), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Time : 2019/4/16 11:31
# @Author : Anyue
# @FileName: preprocess.py
# @Software: PyCharm
import os
import glob
from tqdm import tqdm
import librosa
import numpy as np
from hparams import hparam as hp
audio_path = glob.glob(os.path.dirname(hp.unprocessed_data))
def save_spectrogram_tisv():
print('start text independent feature extraction')
os.makedirs(hp.data.train_path, exist_ok=True)
os.makedirs(hp.data.test_path, exist_ok=True)
utter_min_len = (hp.data.tisv_frame * hp.data.hop + hp.data.window) * hp.data.sr # low bound of utterance length
total_speaker_num = len(audio_path)
train_speaker_num = total_speaker_num // 10 * 9 # 90% train & 10% test
print('total speaker number: {}'.format(total_speaker_num))
print('train: {}, test: {}'.format(train_speaker_num, total_speaker_num - train_speaker_num))
for index, folder in tqdm(enumerate(audio_path)):
utterances_spec = []
for utter_name in os.listdir(folder):
if utter_name[-4:].lower() == '.wav':
utter_path = os.path.join(folder, utter_name) # path of each utterance
utter, sr = librosa.core.load(utter_path, hp.data.sr) # load utterance audio
intervals = librosa.effects.split(utter, top_db=30) # voice activity detection
for interval in intervals:
if interval[1] - interval[0] > utter_min_len:
utter_part = utter[interval[0]:interval[1]]
S = librosa.core.stft(y=utter_part, n_fft=hp.data.nfft, win_length=int(hp.data.window * sr),
hop_length=int(hp.data.hop * sr))
S = np.abs(S) ** 2
mel_basis = librosa.filters.mel(sr=hp.data.sr, n_fft=hp.data.nfft, n_mels=hp.data.nmels)
S = np.log10(np.dot(mel_basis, S) + 1e-6) # log mel spectrogram of utterances
utterances_spec.append(S[:, :hp.data.tisv_frame]) # first 180 frames of partial utterance
utterances_spec.append(S[:, -hp.data.tisv_frame:]) # last 180 frames of partial utterance
utterances_spec = np.asarray(utterances_spec)
if index < train_speaker_num:
np.save(os.path.join(hp.data.train_path, 'speaker{}'.format(index)), utterances_spec)
else:
np.save(os.path.join(hp.data.test_path, 'speaker{}'.format(index - train_speaker_num)), utterances_spec)
if __name__ == '__main__':
save_spectrogram_tisv()
| [
"numpy.abs",
"os.makedirs",
"numpy.asarray",
"os.path.dirname",
"librosa.filters.mel",
"librosa.core.load",
"numpy.dot",
"librosa.effects.split",
"os.path.join",
"os.listdir"
] | [((258, 294), 'os.path.dirname', 'os.path.dirname', (['hp.unprocessed_data'], {}), '(hp.unprocessed_data)\n', (273, 294), False, 'import os\n'), ((386, 432), 'os.makedirs', 'os.makedirs', (['hp.data.train_path'], {'exist_ok': '(True)'}), '(hp.data.train_path, exist_ok=True)\n', (397, 432), False, 'import os\n'), ((437, 482), 'os.makedirs', 'os.makedirs', (['hp.data.test_path'], {'exist_ok': '(True)'}), '(hp.data.test_path, exist_ok=True)\n', (448, 482), False, 'import os\n'), ((989, 1007), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (999, 1007), False, 'import os\n'), ((2227, 2254), 'numpy.asarray', 'np.asarray', (['utterances_spec'], {}), '(utterances_spec)\n', (2237, 2254), True, 'import numpy as np\n'), ((1088, 1120), 'os.path.join', 'os.path.join', (['folder', 'utter_name'], {}), '(folder, utter_name)\n', (1100, 1120), False, 'import os\n'), ((1175, 1216), 'librosa.core.load', 'librosa.core.load', (['utter_path', 'hp.data.sr'], {}), '(utter_path, hp.data.sr)\n', (1192, 1216), False, 'import librosa\n'), ((1269, 1308), 'librosa.effects.split', 'librosa.effects.split', (['utter'], {'top_db': '(30)'}), '(utter, top_db=30)\n', (1290, 1308), False, 'import librosa\n'), ((1790, 1866), 'librosa.filters.mel', 'librosa.filters.mel', ([], {'sr': 'hp.data.sr', 'n_fft': 'hp.data.nfft', 'n_mels': 'hp.data.nmels'}), '(sr=hp.data.sr, n_fft=hp.data.nfft, n_mels=hp.data.nmels)\n', (1809, 1866), False, 'import librosa\n'), ((1739, 1748), 'numpy.abs', 'np.abs', (['S'], {}), '(S)\n', (1745, 1748), True, 'import numpy as np\n'), ((1904, 1924), 'numpy.dot', 'np.dot', (['mel_basis', 'S'], {}), '(mel_basis, S)\n', (1910, 1924), True, 'import numpy as np\n')] |
# Let's play some tones!
import pyaudio
import numpy
def noteToFrequency(note):
return 27.5 * ((2.0 ** (1.0/12.0)) ** float(note-1))
def playNotes(notes):
p = pyaudio.PyAudio()
volume = 0.5
sampleRate = 44100
for note in notes:
frequency = noteToFrequency(note[0])
print(frequency)
samples = (numpy.sin(2*numpy.pi*numpy.arange(sampleRate*note[1])*frequency/sampleRate)).astype(numpy.float32)
stream = p.open(format=pyaudio.paFloat32,
channels=1,
rate=sampleRate,
output=True)
stream.write(volume*samples)
stream.stop_stream()
stream.close()
p.terminate()
notes = [
[56, 1],
[55, 1],
[56, 1],
[55, 1],
[56, 1],
[51, 1],
[54, 1],
[52, 1],
[49, 3]
]
playNotes(notes) | [
"numpy.arange",
"pyaudio.PyAudio"
] | [((174, 191), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (189, 191), False, 'import pyaudio\n'), ((366, 400), 'numpy.arange', 'numpy.arange', (['(sampleRate * note[1])'], {}), '(sampleRate * note[1])\n', (378, 400), False, 'import numpy\n')] |
from flask import Flask,render_template,request
import pickle
from sklearn.preprocessing import StandardScaler
import numpy as np
app = Flask(__name__)
model = pickle.load(open("model.pkl","rb"))
vehicle_class = {'COMPACT': 0.0,
'SUV - SMALL': 11.0,
'MID-SIZE': 2.0,
'TWO-SEATER': 13.0,
'MINICOMPACT': 3.0,
'SUBCOMPACT': 10.0,
'FULL-SIZE': 1.0,
'STATION WAGON - SMALL': 9.0,
'SUV - STANDARD': 12.0,
'VAN - CARGO': 14.0,
'VAN - PASSENGER': 15.0,
'PICKUP TRUCK - STANDARD': 6.0,
'MINIVAN': 4.0,
'SPECIAL PURPOSE VEHICLE': 7.0,
'STATION WAGON - MID-SIZE': 8.0,
'PICKUP TRUCK - SMALL': 5.0}
fuel_type = {'Z': 4.0, 'D': 0.0, 'X': 3.0, 'E': 1.0, 'N': 2.0}
transmission = {'AS5': 14.0,
'M6': 25.0,
'AV7': 22.0,
'AS6': 15.0,
'AM6': 8.0,
'A6': 3.0,
'AM7': 9.0,
'AV8': 23.0,
'AS8': 17.0,
'A7': 4.0,
'A8': 5.0,
'M7': 26.0,
'A4': 1.0,
'M5': 24.0,
'AV': 19.0,
'A5': 2.0,
'AS7': 16.0,
'A9': 6.0,
'AS9': 18.0,
'AV6': 21.0,
'AS4': 13.0,
'AM5': 7.0,
'AM8': 10.0,
'AM9': 11.0,
'AS10': 12.0,
'A10': 0.0,
'AV10': 20.0}
@app.route("/")
def home():
return render_template('index.html',fuel_type=fuel_type,vehicle_class=vehicle_class,
transmission=transmission)
sc = StandardScaler()
@app.route("/predict",methods=["POST"])
def predict():
features = [x for x in request.form.items()]
features = dict(features)
order_of_features = ["vehicle_class","transmission","fuel_type","engine_size","cylinders","fuel_consumption_c"]
X = []
for i in order_of_features:
X.append(features[i])
X = np.array(X).reshape(-1,1).T # row : 1 ; col : 6 (1,6)
return render_template('index.html',fuel_type=fuel_type,vehicle_class=vehicle_class,
transmission=transmission,predicted_value=model.predict(X)[0])
if __name__ == "__main__":
app.run()
| [
"sklearn.preprocessing.StandardScaler",
"flask.Flask",
"numpy.array",
"flask.render_template",
"flask.request.form.items"
] | [((138, 153), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (143, 153), False, 'from flask import Flask, render_template, request\n'), ((1209, 1225), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1223, 1225), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1086, 1197), 'flask.render_template', 'render_template', (['"""index.html"""'], {'fuel_type': 'fuel_type', 'vehicle_class': 'vehicle_class', 'transmission': 'transmission'}), "('index.html', fuel_type=fuel_type, vehicle_class=\n vehicle_class, transmission=transmission)\n", (1101, 1197), False, 'from flask import Flask, render_template, request\n'), ((1308, 1328), 'flask.request.form.items', 'request.form.items', ([], {}), '()\n', (1326, 1328), False, 'from flask import Flask, render_template, request\n'), ((1557, 1568), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1565, 1568), True, 'import numpy as np\n')] |
"""
Author: <NAME>
Date: 11/13/2017
Inputs:
valid file location
number of clusters
attributes to ignore
Outputs:
the centroids of the clusters
the number of instances in each class
the total square error
the number iterations that were used to get this result
Notes:
Runs in python 2.x
"""
import os
import csv
import random
import numpy as np
import math
import copy
def input():
f = raw_input("Enter file location: ")
data, headers = processFile(f)
nclusters = int(raw_input('Enter the number of desired clusters: '))
for x in range(0,len(headers)):
string = str(x) + " ,"
print(str(x) + " - " + headers[x])
ignoreString = raw_input('Enter the numbers (comma seperated) of the attributes you would like to ignore: ')
ignore = [x.strip() for x in ignoreString.split(',')]
ignore = map(int, ignore)
return data, headers, nclusters, ignore
def processFile(f):
with open(f, 'rb') as f:
reader = csv.reader(f)
headers = next(reader)
data = list(reader)
return data, headers
def kmeans(data, headers, nclusters, ignore):
isStillMoving = True
ignore = sorted(ignore, key=int, reverse=True)
for row in data:
for x in ignore:
del row[x]
data = np.array(data,dtype=np.float32)
clusterIDs = np.empty(int(len(data)),dtype=np.int)
clusterIDs = np.random.randint(low=0, high=nclusters, size=len(data))
centroids = initCentroids(data, nclusters)
centroids = np.array(centroids,dtype=np.float32)
centroids = centroids[0]
iterations = 1
instances = []
while(isStillMoving):
centroids = calcCentroid(centroids,data,nclusters,clusterIDs)
clusterIDs,isStillMoving = calcClusters(centroids,data,nclusters,clusterIDs)
iterations += 1
unique, instances = np.unique(clusterIDs, return_counts=True)
tse = calcTSE(data,centroids,clusterIDs,unique)
return instances, centroids, tse, iterations, unique, clusterIDs
def calcTSE(data,centroids,clusterIDs,unique):
tse = 0.0
dist = 0.0
sqError = 0.0
for x in range(0,len(centroids)):
for y in range(0,len(clusterIDs)):
if(clusterIDs[y] == unique[x]):
dist = calcDistance(data[y],centroids[x])
sqError += math.pow(dist,2)
tse += sqError
return tse
def initCentroids(data,nclusters):
centroids = []
centroids.append(random.sample(data, nclusters))
return centroids
def calcCentroid(centroids,data,nclusters,clusterIDs):
totals = np.zeros(len(data[0]),dtype=np.float32)
totalInCluster = 0
for j in range(0,nclusters):
for k in range(len(data)):
if(clusterIDs[k] == j):
for x in range(0,len(totals)):
totals[x] += data[k][x]
totalInCluster += 1
if(totalInCluster > 0):
for x in range(0,len(totals)):
centroids[j][x] = totals[x] / totalInCluster
return centroids
def calcClusters(centroids,data,nclusters,clusterIDs):
isStillMoving = False
originalClusterIDs = clusterIDs
for i in range(0,len(clusterIDs)):
bestMinimum = math.pow(10, 10)
currentCluster = 0
for j in range(0,nclusters):
distance = calcDistance(centroids[j], data[i])
if(distance < bestMinimum):
bestMinimum = distance
currentCluster = j
if(clusterIDs[i] != currentCluster):
clusterIDs[i] = currentCluster
isStillMoving = True
return clusterIDs,isStillMoving
def calcDistance(array1, array2):
# Calculate Euclidean distance.
distance = float(np.linalg.norm(array1-array2))
return distance
def main():
data, headers, nclusters, ignore = input()
originalData = copy.deepcopy(data)
instances, centroids, tse, iterations, unique, clusterIDs = kmeans(data, headers, nclusters, ignore)
print("Here is the associated raw data:")
for x in range(0,len(originalData)):
print(str(clusterIDs[x]) + " - " + str(originalData[x]))
print("")
for x in range(0,nclusters):
print("For cluster " + str(x) + ":")
print("It had " + str(instances[x]) + " instances")
print("and a final centroid at" + str(centroids[x]) + "\n")
print("The algorithm took " + str(iterations) + " iterations and had a total square error of " + str(tse))
main()
| [
"copy.deepcopy",
"csv.reader",
"math.pow",
"random.sample",
"numpy.array",
"numpy.linalg.norm",
"numpy.unique"
] | [((1380, 1412), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (1388, 1412), True, 'import numpy as np\n'), ((1622, 1659), 'numpy.array', 'np.array', (['centroids'], {'dtype': 'np.float32'}), '(centroids, dtype=np.float32)\n', (1630, 1659), True, 'import numpy as np\n'), ((1990, 2031), 'numpy.unique', 'np.unique', (['clusterIDs'], {'return_counts': '(True)'}), '(clusterIDs, return_counts=True)\n', (1999, 2031), True, 'import numpy as np\n'), ((3984, 4003), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (3997, 4003), False, 'import copy\n'), ((1041, 1054), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1051, 1054), False, 'import csv\n'), ((2601, 2631), 'random.sample', 'random.sample', (['data', 'nclusters'], {}), '(data, nclusters)\n', (2614, 2631), False, 'import random\n'), ((3346, 3362), 'math.pow', 'math.pow', (['(10)', '(10)'], {}), '(10, 10)\n', (3354, 3362), False, 'import math\n'), ((3853, 3884), 'numpy.linalg.norm', 'np.linalg.norm', (['(array1 - array2)'], {}), '(array1 - array2)\n', (3867, 3884), True, 'import numpy as np\n'), ((2469, 2486), 'math.pow', 'math.pow', (['dist', '(2)'], {}), '(dist, 2)\n', (2477, 2486), False, 'import math\n')] |
"""
TRAINER
Module for training networks
<NAME> 2018
"""
import importlib
import torch
from torch.utils import tensorboard
from torch import nn
import numpy as np
from lernomatic.train import schedule
from lernomatic.models import common
# timing stuff
import time
from datetime import timedelta
# TODO : Update accuracy so that there is val_acc AND train_acc AND test_acc
class Trainer(object):
"""
Trainer
Base class for model trainers in lernomatic. Note that this is not
an abstract class and can be instantiated.
"""
def __init__(self, model:common.LernomaticModel=None, **kwargs) -> None:
self.model = model
# Training loop options
self.num_epochs :int = kwargs.pop('num_epochs', 10)
self.learning_rate :float = kwargs.pop('learning_rate', 1e-4)
self.momentum :float = kwargs.pop('momentum', 0.5)
self.weight_decay :float = kwargs.pop('weight_decay', 1e-5)
self.loss_function :str = kwargs.pop('loss_function', 'CrossEntropyLoss')
self.optim_function :str = kwargs.pop('optim_function', 'Adam')
self.cur_epoch :int = 0
# validation options
# checkpoint options
self.checkpoint_dir :str = kwargs.pop('checkpoint_dir', 'checkpoint')
self.checkpoint_name :str = kwargs.pop('checkpoint_name', 'ck')
self.save_hist :bool = kwargs.pop('save_hist', True)
# Internal options
self.verbose :float = kwargs.pop('verbose', True)
self.print_every :int = kwargs.pop('print_every', 10)
self.save_every :float = kwargs.pop('save_every', -1) # unit is iterations, -1 = save every epoch
self.save_best :float = kwargs.pop('save_best', False)
# Device options
self.device_id :int = kwargs.pop('device_id', -1)
self.device_map :float = kwargs.pop('device_map', None)
# dataset/loader options
self.batch_size :int = kwargs.pop('batch_size', 64)
self.val_batch_size :int = kwargs.pop('val_batch_size', 0)
self.train_dataset = kwargs.pop('train_dataset', None)
self.test_dataset = kwargs.pop('test_dataset', None)
self.val_dataset = kwargs.pop('val_dataset', None)
self.shuffle :float = kwargs.pop('shuffle', True)
self.num_workers :int = kwargs.pop('num_workers' , 1)
self.drop_last :bool = kwargs.pop('drop_last', True)
# parameter scheduling
self.lr_scheduler = kwargs.pop('lr_scheduler', None)
self.mtm_scheduler = kwargs.pop('mtm_scheduler', None)
self.stop_when_acc :float = kwargs.pop('stop_when_acc', 0.0)
self.early_stop :dict = kwargs.pop('early_stop', None)
# Tensorboard writer
self.tb_writer:tensorboard.SummaryWriter = kwargs.pop('tb_writer', None)
self.start_epoch = 0
if self.val_batch_size == 0:
self.val_batch_size = self.batch_size
# set up device
self._init_device()
# Setup optimizer. If we have no model then assume it will be
self._init_optimizer()
# Init the internal dataloader options. If nothing provided assume that
# we will load options in later (eg: from checkpoint)
self._init_dataloaders()
# Init the loss and accuracy history. If no train_loader is provided
# then we assume that one will be loaded later (eg: in some checkpoint
# data)
self._init_history()
self._send_to_device()
self.best_acc = 0.0
if (self.train_loader is not None) and (self.save_every < 0):
self.save_every = len(self.train_loader)-1
if self.save_every > 0:
self.save_best = True
def __repr__(self) -> str:
return 'Trainer (%d epochs)' % self.num_epochs
def __str__(self) -> str:
s = []
s.append('Trainer :\n')
param = self.get_trainer_params()
for k, v in param.items():
s.append('\t [%s] : %s\n' % (str(k), str(v)))
return ''.join(s)
def _init_optimizer(self) -> None:
if self.model is not None:
if hasattr(torch.optim, self.optim_function):
self.optimizer = getattr(torch.optim, self.optim_function)(
self.model.get_model_parameters(),
lr = self.learning_rate,
weight_decay = self.weight_decay
)
else:
raise ValueError('Cannot find optim function %s' % str(self.optim_function))
else:
self.optimizer = None
# Get a loss function
if hasattr(nn, self.loss_function):
loss_obj = getattr(nn, self.loss_function)
self.criterion = loss_obj()
else:
raise ValueError('Cannot find loss function [%s]' % str(self.loss_function))
def _init_history(self) -> None:
self.loss_iter = 0
self.val_loss_iter = 0
self.acc_iter = 0
if self.train_loader is not None:
self.loss_history = np.zeros(len(self.train_loader) * self.num_epochs)
self.iter_per_epoch = int(len(self.train_loader) / self.num_epochs)
else:
self.loss_history = None
self.iter_per_epoch = 0
if self.val_loader is not None:
self.val_loss_history = np.zeros(len(self.val_loader) * self.num_epochs)
self.acc_history = np.zeros(len(self.val_loader) * self.num_epochs)
else:
self.val_loss_history = None
self.acc_history = None
def _init_dataloaders(self) -> None:
if self.train_dataset is None:
self.train_loader = None
else:
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset,
batch_size = self.batch_size,
drop_last = self.drop_last,
shuffle = self.shuffle
)
if self.test_dataset is None:
self.test_loader = None
else:
self.test_loader = torch.utils.data.Dataloader(
self.test_dataset,
batch_size = self.val_batch_size,
drop_last = self.drop_last,
shuffle = self.shuffle
)
if self.val_dataset is None:
self.val_loader = None
else:
self.val_loader = torch.utils.data.DataLoader(
self.val_dataset,
batch_size = self.val_batch_size,
drop_last = self.drop_last,
shuffle = False
)
def _init_device(self) -> None:
if self.device_id < 0:
self.device = torch.device('cpu')
else:
self.device = torch.device('cuda:%d' % self.device_id)
def _send_to_device(self) -> None:
self.model.send_to(self.device)
def set_num_epochs(self, num_epochs:int) -> None:
if num_epochs > self.num_epochs:
# resize history
temp_loss_history = np.copy(self.loss_history)
if self.val_loss_history is not None:
temp_val_loss_history = np.copy(self.val_loss_history)
if self.acc_history is not None:
temp_acc_history = np.copy(self.acc_history)
temp_loss_iter = self.loss_iter
temp_val_loss_iter = self.val_loss_iter
temp_acc_iter = self.acc_iter
self.num_epochs = num_epochs
self._init_history()
# restore old history
self.loss_history[:len(temp_loss_history)] = temp_loss_history
if self.val_loss_history is not None:
self.val_loss_history[:len(temp_val_loss_history)] = temp_val_loss_history
if self.acc_history is not None:
self.acc_history[:len(temp_acc_history)] = temp_acc_history
self.loss_iter = temp_loss_iter
self.val_loss_iter = temp_val_loss_iter
self.acc_iter = temp_acc_iter
else:
self.num_epochs = num_epochs
def get_num_epochs(self) -> int:
return self.num_epochs
def get_cur_epoch(self) -> int:
return self.cur_epoch
# ======== getters, setters
def get_model(self) -> common.LernomaticModel:
return self.model
def get_model_params(self) -> dict:
if self.model is None:
return None
return self.model.get_net_state_dict()
# common getters/setters
def get_learning_rate(self) -> float:
return self.optimizer.param_groups[0]['lr']
def set_learning_rate(self, lr: float, param_zero:bool=True) -> None:
if param_zero:
self.optimizer.param_groups[0]['lr'] = lr
else:
for g in self.optimizer.param_groups:
g['lr'] = lr
def get_momentum(self) -> float:
optim_state = self.optimizer.state_dict()
if 'momentum' in optim_state:
return optim_state['momentum']
return None
def set_momentum(self, momentum: float) -> None:
optim_state = self.optimizer.state_dict()
if 'momentum' in optim_state:
for g in self.optimizer.param_groups:
g['momentum'] = momentum
# Update batch size
def set_batch_size(self, batch_size:int) -> None:
self.batch_size = batch_size
self._init_dataloaders()
def set_lr_scheduler(self, lr_scheduler: schedule.LRScheduler) -> None:
self.lr_scheduler = lr_scheduler
def get_lr_scheduler(self) -> schedule.LRScheduler:
return self.lr_scheduler
def set_mtm_scheduler(self, mtm_scheduler) -> None:
self.mtm_scheduler = mtm_scheduler
def get_mtm_scheduler(self) -> schedule.LRScheduler:
return self.mtm_scheduler
def set_train_dataset(self, train_dataset) -> None:
self.train_dataset = train_dataset
self._init_dataloaders()
def set_val_dataset(self, val_dataset) -> None:
self.val_dataset = val_dataset
self._init_dataloaders()
def set_test_dataset(self, test_dataset) -> None:
self.test_dataset = test_dataset
self._init_dataloaders()
def set_tb_writer(self, writer:tensorboard.SummaryWriter) -> None:
self.tb_writer = writer
def apply_lr_schedule(self) -> None:
if self.lr_scheduler is not None:
if isinstance(self.lr_scheduler, schedule.TriangularDecayWhenAcc):
new_lr = self.lr_scheduler.get_lr(self.loss_iter, self.acc_history[self.acc_iter])
elif isinstance(self.lr_scheduler, schedule.EpochSetScheduler) or \
isinstance(self.lr_scheduler, schedule.DecayWhenEpoch) or \
isinstance(self.lr_scheduler, schedule.DecayToEpoch):
new_lr = self.lr_scheduler.get_lr(self.cur_epoch)
elif isinstance(self.lr_scheduler, schedule.DecayWhenAcc):
new_lr = self.lr_scheduler.get_lr(self.acc_history[self.acc_iter])
else:
new_lr = self.lr_scheduler.get_lr(self.loss_iter)
self.set_learning_rate(new_lr)
if self.tb_writer is not None:
scalar_tag = 'schedule/lr_%s' % repr(self.lr_scheduler)
self.tb_writer.add_scalar(scalar_tag, new_lr, self.loss_iter)
def apply_mtm_schedule(self) -> None:
if self.mtm_scheduler is not None:
if isinstance(self.mtm_scheduler, schedule.TriangularDecayWhenAcc):
new_mtm = self.mtm_scheduler.get_lr(self.loss_iter, self.acc_history[self.acc_iter])
elif isinstance(self.mtm_scheduler, schedule.EpochSetScheduler) or isinstance(self.mtm_scheduler, schedule.DecayWhenEpoch):
new_mtm = self.mtm_scheduler.get_lr(self.cur_epoch)
elif isinstance(self.mtm_scheduler, schedule.DecayWhenAcc):
new_mtm = self.mtm_scheduler.get_lr(self.acc_history[self.acc_iter])
else:
new_mtm = self.mtm_scheduler.get_lr(self.loss_iter)
self.set_momentum(new_mtm)
if self.tb_writer is not None:
scalar_tag = 'schedule/mtm_%s' % repr(self.mtm_scheduler)
self.tb_writer.add_scalar(scalar_tag, new_mtm, self.loss_iter)
# Layer freeze / unfreeze
def freeze_to(self, layer_num: int) -> None:
"""
Freeze layers in model from the start of the network forwards
"""
for n, param in enumerate(self.model.parameters()):
param.requires_grad = False
if n >= layer_num:
break
def unfreeze_to(self, layer_num: int) -> None:
"""
Unfreeze layers in model from the start of the network forwards
"""
for n, param in enumerate(self.model.parameters()):
param.requires_grad = True
if n >= layer_num:
break
# Basic training/test routines. Specialize these when needed
def train_epoch(self) -> None:
"""
TRAIN_EPOCH
Perform training on the model for a single epoch of the dataset
"""
self.model.set_train()
# training loop
for batch_idx, (data, target) in enumerate(self.train_loader):
# move data
data = data.to(self.device)
target = target.to(self.device)
# optimization
output = self.model.forward(data)
loss = self.criterion(output, target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if (batch_idx > 0) and (batch_idx % self.print_every) == 0:
print('[TRAIN] : Epoch iteration Loss')
print(' [%3d/%3d] [%6d/%6d] %.6f' %\
(self.cur_epoch+1, self.num_epochs, batch_idx, len(self.train_loader), loss.item()))
# if we have a tensorboard writer, update that as well
if self.tb_writer is not None:
self.tb_writer.add_scalar('loss/train', loss.item(), self.loss_iter)
self.loss_history[self.loss_iter] = loss.item()
self.loss_iter += 1
# save checkpoints
if self.save_every > 0 and (self.loss_iter % self.save_every) == 0:
ck_name = self.checkpoint_dir + '/' + self.checkpoint_name +\
'_epoch_' + str(self.cur_epoch) + '_iter_' + str(self.loss_iter) + '.pkl'
if self.verbose:
print('\t Saving checkpoint to file [%s] ' % str(ck_name))
self.save_checkpoint(ck_name)
# apply scheduling
self.apply_lr_schedule()
self.apply_mtm_schedule()
def val_epoch(self) -> None:
"""
VAL_EPOCH
Run a single epoch on the test dataset
"""
self.model.set_eval()
val_loss = 0.0
correct = 0
for batch_idx, (data, labels) in enumerate(self.val_loader):
data = data.to(self.device)
labels = labels.to(self.device)
output = self.model.forward(data)
loss = self.criterion(output, labels)
val_loss += loss.item()
# accuracy
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(labels.data.view_as(pred)).sum().item()
if (batch_idx % self.print_every) == 0:
print('[VAL ] : Epoch iteration Val Loss')
print(' [%3d/%3d] [%6d/%6d] %.6f' %\
(self.cur_epoch+1, self.num_epochs, batch_idx, len(self.val_loader), loss.item()))
if self.tb_writer is not None:
self.tb_writer.add_scalar('loss/val', loss.item(), self.val_loss_iter)
self.val_loss_history[self.val_loss_iter] = loss.item()
self.val_loss_iter += 1
avg_val_loss = val_loss / len(self.val_loader)
acc = correct / len(self.val_loader.dataset)
self.acc_history[self.acc_iter] = acc
self.acc_iter += 1
print('[VAL ] : Avg. Val Loss : %.4f, Accuracy : %d / %d (%.4f%%)' %\
(avg_val_loss, correct, len(self.val_loader.dataset),
100.0 * acc)
)
if self.tb_writer is not None:
self.tb_writer.add_scalar('acc/val', acc, self.acc_iter)
# save the best weights
if acc > self.best_acc:
self.best_acc = acc
if self.save_best is True:
ck_name = self.checkpoint_dir + '/' + 'best_' + self.checkpoint_name + '.pkl'
if self.verbose:
print('\t Saving checkpoint to file [%s] ' % str(ck_name))
self.save_checkpoint(ck_name)
def test_epoch(self) -> None:
self.model.set_eval()
test_loss = 0.0
correct = 0
for batch_idx, (data, labels) in enumerate(self.test_loader):
data = data.to(self.device)
labels = labels.to(self.device)
output = self.model.forward(data)
loss = self.criterion(output, labels)
test_loss += loss.item()
# accuracy
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(labels.data.view_as(pred)).sum().item()
if (batch_idx % self.print_every) == 0:
print('[VAL ] : Epoch iteration Val Loss')
print(' [%3d/%3d] [%6d/%6d] %.6f' %\
(self.cur_epoch+1, self.num_epochs, batch_idx, len(self.val_loader), loss.item()))
if self.tb_writer is not None:
self.tb_writer.add_scalar('loss/test', loss.item(), self.test_loss_iter)
self.val_loss_history[self.val_loss_iter] = loss.item()
self.val_loss_iter += 1
if self.tb_writer is not None:
self.tb_writer.add_scalar('acc/test', acc, self.test_loss_iter)
def train(self) -> None:
"""
TRAIN
Standard training routine
"""
if self.save_every == -1:
self.save_every = len(self.train_loader)-1
for epoch in range(self.cur_epoch, self.num_epochs):
epoch_start_time = time.time()
self.train_epoch()
epoch_end_time = time.time()
epoch_total_time = epoch_end_time - epoch_start_time
print('Epoch %d [%s] took %s' %\
(epoch+1, repr(self), str(timedelta(seconds = epoch_total_time)))
)
if self.val_loader is not None:
val_epoch_start_time = time.time()
self.val_epoch()
val_epoch_end_time = time.time()
val_epoch_total_time = val_epoch_end_time - val_epoch_start_time
print('Epoch %d (validation) [%s] took %s' %\
(epoch+1, repr(self), str(timedelta(seconds = val_epoch_total_time)))
)
if self.test_loader is not None:
self.test_epoch()
# save history at the end of each epoch
if self.save_hist:
hist_name = self.checkpoint_dir + '/' + self.checkpoint_name + '_history.pkl'
if self.verbose:
print('\t Saving history to file [%s] ' % str(hist_name))
self.save_history(hist_name)
# check we have reached the required accuracy and can stop early
if self.stop_when_acc > 0.0 and self.val_loader is not None:
if self.acc_history[self.acc_iter] >= self.stop_when_acc:
return
# check if we need to perform early stopping
if self.early_stop is not None:
if self.cur_epoch > self.early_stop['num_epochs']:
acc_then = self.acc_history[self.acc_iter - self.early_stop['num_epochs']]
acc_now = self.acc_history[self.acc_iter]
acc_delta = acc_now - acc_then
if acc_delta < self.early_stop['improv']:
if self.verbose:
print('[%s] Stopping early at epoch %d' % (repr(self), self.cur_epoch))
return
self.cur_epoch += 1
# history getters - these provide the history up to the current iteration
def get_loss_history(self) -> np.ndarray:
if self.loss_iter == 0:
return None
return self.loss_history[0 : self.loss_iter]
def get_cur_loss(self) -> float:
if self.loss_iter == 0:
return 0.0
return self.loss_history[self.loss_iter]
def get_cur_epoch_loss(self) -> np.ndarray:
if self.loss_iter == 0:
return None
return self.loss_history[self.loss_iter - len(self.train_loader) : self.loss_iter]
def get_val_loss_history(self) -> np.ndarray:
if self.val_loss_iter == 0:
return None
return self.val_loss_history[0 : self.val_loss_iter]
def get_cur_val_loss(self) -> float:
if self.val_loss_iter == 0:
return 0.0
return self.val_loss_history[self.val_loss_iter]
def get_cur_epoch_val_loss(self) -> np.ndarray:
if self.val_loss_iter == 0:
return None
return self.val_loss_history[self.val_loss_iter - len(self.val_loader) : self.val_loss_iter]
def get_acc_history(self) -> np.ndarray:
if self.acc_iter == 0:
return None
return self.acc_history[0 : self.acc_iter]
def get_cur_acc(self) -> float:
if self.acc_iter == 0:
return 0.0
return self.acc_history[self.acc_iter]
# model checkpoints
def save_checkpoint(self, fname : str) -> None:
if self.verbose:
print('\t Saving checkpoint (epoch %d) to [%s]' % (self.cur_epoch, fname))
checkpoint_data = {
'model' : self.model.get_params(),
'optim' : self.optimizer.state_dict(),
'trainer_params' : self.get_trainer_params(),
}
torch.save(checkpoint_data, fname)
def load_checkpoint(self, fname: str) -> None:
"""
Load all data from a checkpoint
"""
checkpoint_data = torch.load(fname)
self.set_trainer_params(checkpoint_data['trainer_params'])
# here we just load the object that derives from LernomaticModel. That
# object will in turn load the actual nn.Module data from the
# checkpoint data with the 'model' key
model_import_path = checkpoint_data['model']['model_import_path']
imp = importlib.import_module(model_import_path)
mod = getattr(imp, checkpoint_data['model']['model_name'])
self.model = mod()
self.model.set_params(checkpoint_data['model'])
# Load optimizer
self._init_optimizer()
self.optimizer.load_state_dict(checkpoint_data['optim'])
# Transfer all the tensors to the current device
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(self.device)
# restore trainer object info
self._send_to_device()
def load_model_checkpoint(self, fname:str) -> None:
"""
Load only the model component of a checkpoint. Trainer parameters
are not affected
"""
checkpoint_data = torch.load(fname)
model_import_path = checkpoint_data['model']['model_import_path']
imp = importlib.import_module(model_import_path)
mod = getattr(imp, checkpoint_data['model']['model_name'])
self.model = mod()
self.model.set_params(checkpoint_data['model'])
self._send_to_device()
# Trainer parameters
def get_trainer_params(self) -> dict:
params = dict()
params['num_epochs'] = self.num_epochs
params['learning_rate'] = self.learning_rate
params['momentum'] = self.momentum
params['weight_decay'] = self.weight_decay
params['loss_function'] = self.loss_function
params['optim_function'] = self.optim_function
params['cur_epoch'] = self.cur_epoch
params['iter_per_epoch'] = self.iter_per_epoch
# also get print, save params
params['save_every'] = self.save_every
params['print_every'] = self.print_every
# dataloader params (to regenerate data loader)
params['batch_size'] = self.batch_size
params['val_batch_size'] = self.val_batch_size
params['shuffle'] = self.shuffle
return params
def set_trainer_params(self, params: dict) -> None:
self.num_epochs = params['num_epochs']
self.learning_rate = params['learning_rate']
self.momentum = params['momentum']
self.weight_decay = params['weight_decay']
self.loss_function = params['loss_function']
self.optim_function = params['optim_function']
self.cur_epoch = params['cur_epoch']
self.iter_per_epoch = params['iter_per_epoch']
self.save_every = params['save_every']
self.print_every = params['print_every']
# dataloader params
self.batch_size = params['batch_size']
self.val_batch_size = params['val_batch_size']
self.shuffle = params['shuffle']
self._init_device()
self._init_dataloaders()
| [
"torch.utils.data.DataLoader",
"importlib.import_module",
"torch.utils.data.Dataloader",
"numpy.copy",
"torch.load",
"time.time",
"torch.save",
"datetime.timedelta",
"torch.device"
] | [((22263, 22297), 'torch.save', 'torch.save', (['checkpoint_data', 'fname'], {}), '(checkpoint_data, fname)\n', (22273, 22297), False, 'import torch\n'), ((22440, 22457), 'torch.load', 'torch.load', (['fname'], {}), '(fname)\n', (22450, 22457), False, 'import torch\n'), ((22809, 22851), 'importlib.import_module', 'importlib.import_module', (['model_import_path'], {}), '(model_import_path)\n', (22832, 22851), False, 'import importlib\n'), ((23645, 23662), 'torch.load', 'torch.load', (['fname'], {}), '(fname)\n', (23655, 23662), False, 'import torch\n'), ((23751, 23793), 'importlib.import_module', 'importlib.import_module', (['model_import_path'], {}), '(model_import_path)\n', (23774, 23793), False, 'import importlib\n'), ((5889, 6016), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.train_dataset'], {'batch_size': 'self.batch_size', 'drop_last': 'self.drop_last', 'shuffle': 'self.shuffle'}), '(self.train_dataset, batch_size=self.batch_size,\n drop_last=self.drop_last, shuffle=self.shuffle)\n', (5916, 6016), False, 'import torch\n'), ((6217, 6348), 'torch.utils.data.Dataloader', 'torch.utils.data.Dataloader', (['self.test_dataset'], {'batch_size': 'self.val_batch_size', 'drop_last': 'self.drop_last', 'shuffle': 'self.shuffle'}), '(self.test_dataset, batch_size=self.\n val_batch_size, drop_last=self.drop_last, shuffle=self.shuffle)\n', (6244, 6348), False, 'import torch\n'), ((6548, 6671), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.val_dataset'], {'batch_size': 'self.val_batch_size', 'drop_last': 'self.drop_last', 'shuffle': '(False)'}), '(self.val_dataset, batch_size=self.\n val_batch_size, drop_last=self.drop_last, shuffle=False)\n', (6575, 6671), False, 'import torch\n'), ((6848, 6867), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6860, 6867), False, 'import torch\n'), ((6908, 6948), 'torch.device', 'torch.device', (["('cuda:%d' % self.device_id)"], {}), "('cuda:%d' % self.device_id)\n", (6920, 6948), False, 'import torch\n'), ((7186, 7212), 'numpy.copy', 'np.copy', (['self.loss_history'], {}), '(self.loss_history)\n', (7193, 7212), True, 'import numpy as np\n'), ((18428, 18439), 'time.time', 'time.time', ([], {}), '()\n', (18437, 18439), False, 'import time\n'), ((18500, 18511), 'time.time', 'time.time', ([], {}), '()\n', (18509, 18511), False, 'import time\n'), ((7303, 7333), 'numpy.copy', 'np.copy', (['self.val_loss_history'], {}), '(self.val_loss_history)\n', (7310, 7333), True, 'import numpy as np\n'), ((7414, 7439), 'numpy.copy', 'np.copy', (['self.acc_history'], {}), '(self.acc_history)\n', (7421, 7439), True, 'import numpy as np\n'), ((18806, 18817), 'time.time', 'time.time', ([], {}), '()\n', (18815, 18817), False, 'import time\n'), ((18888, 18899), 'time.time', 'time.time', ([], {}), '()\n', (18897, 18899), False, 'import time\n'), ((18668, 18703), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'epoch_total_time'}), '(seconds=epoch_total_time)\n', (18677, 18703), False, 'from datetime import timedelta\n'), ((19093, 19132), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'val_epoch_total_time'}), '(seconds=val_epoch_total_time)\n', (19102, 19132), False, 'from datetime import timedelta\n')] |
''' Code used to generate paired data by introducing perturbations
to the images in the Celebahq dataset
An image translation model learns to generate the reconstructed
image from the imperfectly blended face image
'''
import sys
import os
import random
from glob import glob
from tqdm import tqdm
import multiprocessing as mp
from multiprocessing import Process
from concurrent.futures import ThreadPoolExecutor, as_completed
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# from face_segmentation import generate_seg_mask
import torch
import torchvision.transforms as transforms
from model import BiSeNet
import os.path as osp
import numpy as np
from PIL import Image
import cv2
import matplotlib.pyplot as plt
import numpy as np
perturbed_image_dir = '/ssd_scratch/cvit/aditya1/CelebAPerturbed/'
os.makedirs(perturbed_image_dir, exist_ok=True)
PERTURBATIONS_PER_IDENTITY = 5 # indicates the number of perturbations per identity
ngpus = torch.cuda.device_count()
n_classes = 19
nets = [BiSeNet(n_classes=n_classes).to(device='cuda:{}'.format(id)) for id in range(ngpus)]
# load the pretrained checkpoint
cp_path = '../res/cp/79999_iter.pth'
[net.load_state_dict(torch.load(cp_path)) for net in nets]
# set the model into evaluation mode
[net.eval() for net in nets]
to_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
# Perturbation functions -
# 1. Translation along the horizontal direction
# 2. Translation along the vertical direction
# 3. Clockwise and anti-clockwise rotation
# 4. Resize (zoom-in and zoom-out)
# Translates the image in the horizontal direction
def translate_horizontal(x, image):
M = np.float32([
[1, 0, x],
[0, 1, 0]
])
# Apply the translation on the image
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
return shifted
# Translates the image in the vertical direction
def translate_vertical(y, image):
M = np.float32([
[1, 0, 0],
[0, 1, y]
])
# Apply the translation to the image
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
return shifted
# Rotate the image in the clockwise or anti-clockwise direction by the specified degrees of rotation
def rotate_image(rotation, image):
# Rotate the image about the center point
h, w = image.shape[:2]
cX, cY = (w//2, h//2)
M = cv2.getRotationMatrix2D((cX, cY), rotation, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
# Resize the image
# If the image is zoomed out, then add padding to match the dimension of the image
# If the image is zoomed in, then crop the image to match dimension of the image
def resize_image(magnification, image):
res = cv2.resize(image, None, fx=magnification, fy=magnification, interpolation=cv2.INTER_CUBIC)
h, w = image.shape[:2]
if magnification >= 1:
cX, cY = res.shape[1] // 2, res.shape[0] // 2
left_index = cX - w // 2
upper_index = cY - h // 2
modified_image = res[upper_index : upper_index + h, left_index : left_index + w]
else:
modified_image = np.zeros((image.shape), dtype=np.uint8)
hs, ws = res.shape[:2]
difference_h = h - hs
difference_w = w - ws
left_index = difference_w // 2
upper_index = difference_h // 2
modified_image[upper_index : upper_index + hs, left_index : left_index + ws] = res
return modified_image
# Applies shear transformation to the image - applies the same share on both the axes
def shear_image(shear, image):
shear_x, shear_y = shear, shear
M = np.float32([
[1, shear_x, 0],
[shear_y, 1, 0]
])
sheared = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
return sheared
# Method used to blend the perturbed_image and the face_masked image
# generate_mask flag can be used to mask the region that will be occupied by the perturbation
def combine_images(face_mask, perturbed_image, generate_mask=True):
image_masked = face_mask.copy()
if generate_mask:
mask = perturbed_image[..., 0] != 0
image_masked[mask] = 0
combined_image = image_masked + perturbed_image
return combined_image
# Applies a composite perturbation to a single image
# Generate composite perturbations - generate the number of perturbations to apply randomly
def perturb_image_composite(face_image, face_mask):
perturbation_functions = [
translate_horizontal,
translate_vertical,
rotate_image,
resize_image,
shear_image
]
perturbation_function_map = {
translate_horizontal : [-20, 20, 1],
translate_vertical : [-20, 20, 1],
rotate_image : [-25, 25, 1],
resize_image : [90, 110, 100],
shear_image : [-10, 10, 100]
}
# indicates the number of perturbations required in the composite perturbation
# applies multiple distinct perturbations to the same image
# composite_perturbations = random.randint(0, len(perturbation_functions)-1)
composite_perturbations = list()
# ensures atleast one perturbation is produced
while len(composite_perturbations) == 0:
for i, perturbation_function in enumerate(perturbation_functions):
if random.randint(0, 1):
composite_perturbations.append(perturbation_function)
print(f'Perturbations applied : {composite_perturbations}', flush=True)
for perturbation_function in composite_perturbations:
perturbation_map = perturbation_function_map[perturbation_function]
perturbation_value = random.randint(perturbation_map[0], perturbation_map[1])/perturbation_map[2]
face_image = perturbation_function(perturbation_value, face_image)
perturbed_image = combine_images(face_mask, face_image)
return perturbed_image
# The perturb image function randomly selects a perturbation and the amount to perturb the face_image
# The perturbed image is then combined with the face_mask to produce the final image
# Potentially multiple perturbation functions can be combined to generate more complex perturbations
def perturb_image(face_image, face_mask):
perturbation_functions = [
translate_horizontal,
translate_vertical,
rotate_image,
resize_image
]
perturbation_function_map = {
translate_horizontal : [-20, 20, 1],
translate_vertical : [-20, 20, 1],
rotate_image : [-25, 25, 1],
resize_image : [90, 110, 100]
}
random_perturbation_index = random.randint(0, len(perturbation_functions)-1)
# random_perturbation_index = 0 # used for debugging
perturbation_function = perturbation_functions[random_perturbation_index]
perturbation_map = perturbation_function_map[perturbation_function]
perturbation_value = random.randint(perturbation_map[0], perturbation_map[1])/perturbation_map[2]
print(f'Using perturbation : {random_perturbation_index}, with value : {perturbation_value}', flush=True)
intermediate_perturbed_image = perturbation_function(perturbation_value, face_image)
perturbed_image = combine_images(face_mask, intermediate_perturbed_image)
return perturbed_image
# This function segments the face using the face segmentation information
def generate_segmented_face(segmented_image, original_image):
original_copy = np.asarray(original_image.copy())
original_copy = np.transpose(original_copy, (2, 0, 1))
# 3D mask needed for masking face (segmented background)
field3d_face_mask = np.broadcast_to((segmented_image == 1) | (segmented_image == 2) | (segmented_image == 3) |
(segmented_image == 4) | (segmented_image == 5) | (segmented_image == 6) |
(segmented_image == 7) | (segmented_image == 8) | (segmented_image == 9) |
(segmented_image == 10) | (segmented_image == 11) | (segmented_image == 12) |
(segmented_image == 13), original_copy.shape)
# 3D mask needed for masking background (segmenting face)
field3d_background_mask = np.broadcast_to((segmented_image == 0) | (segmented_image > 13), original_copy.shape)
background_image = original_copy.copy()
face_image = original_copy.copy()
background_image[field3d_face_mask] = 0
face_image[field3d_background_mask] = 0
background_image = np.transpose(background_image, (1, 2, 0))
face_image = np.transpose(face_image, (1, 2, 0))
# plt.imsave('/home2/aditya1/cvit/content_sync/face-parsing.PyTorch/extras/background_image.png', background_image)
# plt.imsave('/home2/aditya1/cvit/content_sync/face-parsing.PyTorch/extras/segmented_face.png', face_image)
return face_image, background_image
def get_random_name(random_chars='abcdefghijklmnopqrstuvwxyz01234566789', random_len=5):
random_list = list()
for i in range(random_len):
random_list.append(random_chars[random.randint(0, len(random_chars)-1)])
return ''.join(random_list)
def save_image(image_path, image):
plt.imsave(image_path, image)
def generate_segmentation(file, gpu_id):
with torch.no_grad():
img = Image.open(file)
image = img.resize((512, 512), Image.BILINEAR)
img = to_tensor(image)
img = torch.unsqueeze(img, 0)
device = torch.device('cuda:{}'.format(gpu_id))
img = img.to(device)
out = nets[gpu_id](img)[0]
parsing = out.squeeze(0).cpu().numpy().argmax(0)
return parsing, image
def test_sample():
file = '/ssd_scratch/cvit/aditya1/CelebA-HQ-img/13842.jpg'
gpu_id = 0
parsing, image = generate_segmentation(file, gpu_id)
for i in range(PERTURBATIONS_PER_IDENTITY):
face_image, background_image = generate_segmented_face(parsing, image)
perturbed_image = perturb_image_composite(face_image, background_image)
perturbed_filename, extension = osp.basename(file).split('.')
perturbed_image_path = osp.join(perturbed_image_dir, perturbed_filename + '_' + str(i) + '.' + extension)
save_image(perturbed_image_path, perturbed_image)
def data_gen(split_gpu):
split, gpu_id = split_gpu
for file in split:
print(f'Processing {file} with GPU {gpu_id}', flush=True)
parsing, image = generate_segmentation(file, gpu_id)
for i in range(PERTURBATIONS_PER_IDENTITY):
face_image, background_image = generate_segmented_face(parsing, image)
perturbed_image = perturb_image_composite(face_image, background_image)
perturbed_filename, extension = osp.basename(file).split('.')
perturbed_image_path = osp.join(perturbed_image_dir, perturbed_filename + '_' + str(i) + '.' + extension)
save_image(perturbed_image_path, perturbed_image)
if __name__ == '__main__':
# input_image = '114.jpg'
# cp_path = '../res/cp/79999_iter.pth'
# parsing, image = generate_seg_mask(input_image, cp_path)
# face_image, background_image = generate_segmented_face(parsing, image)
# perturbed_image = perturb_image(face_image, background_image)
# perturbed_image_name = get_random_name()
# print(f'Perturbed image name : {perturbed_image_name}')
# dir_path = '/home2/aditya1/cvit/content_sync/face-parsing.PyTorch/extras'
# perturbed_image_path = osp.join(dir_path, perturbed_image_name + '.png')
# plt.imsave(perturbed_image_path, perturbed_image)
# Generate the perturbations for all the images in the dataset
IMAGE_DIR = '/ssd_scratch/cvit/aditya1/CelebA-HQ-img'
images = glob(IMAGE_DIR + '/*.jpg')
print(f'Total number of images to process : {len(images)}', flush=True)
p = ThreadPoolExecutor(ngpus)
# Split the files into the number of GPUs available
splits = np.array_split(images, ngpus)
jobs = [(split, gpu_id) for gpu_id, split in enumerate(splits)]
futures = [p.submit(data_gen, job) for job in jobs]
_ = [r.result() for r in tqdm(as_completed(futures), total=len(futures))]
# test_sample()
# for i, split in enumerate(splits):
# generate_segmentation(i, split, perturbed_image_dir) | [
"torch.cuda.device_count",
"cv2.warpAffine",
"matplotlib.pyplot.imsave",
"glob.glob",
"torchvision.transforms.Normalize",
"torch.no_grad",
"os.path.join",
"cv2.getRotationMatrix2D",
"random.randint",
"torch.load",
"numpy.transpose",
"concurrent.futures.ThreadPoolExecutor",
"cv2.resize",
"o... | [((811, 858), 'os.makedirs', 'os.makedirs', (['perturbed_image_dir'], {'exist_ok': '(True)'}), '(perturbed_image_dir, exist_ok=True)\n', (822, 858), False, 'import os\n'), ((952, 977), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (975, 977), False, 'import torch\n'), ((449, 480), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (461, 480), False, 'import os\n'), ((1730, 1764), 'numpy.float32', 'np.float32', (['[[1, 0, x], [0, 1, 0]]'], {}), '([[1, 0, x], [0, 1, 0]])\n', (1740, 1764), True, 'import numpy as np\n'), ((1847, 1905), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(image.shape[1], image.shape[0])'], {}), '(image, M, (image.shape[1], image.shape[0]))\n', (1861, 1905), False, 'import cv2\n'), ((2017, 2051), 'numpy.float32', 'np.float32', (['[[1, 0, 0], [0, 1, y]]'], {}), '([[1, 0, 0], [0, 1, y]])\n', (2027, 2051), True, 'import numpy as np\n'), ((2135, 2193), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(image.shape[1], image.shape[0])'], {}), '(image, M, (image.shape[1], image.shape[0]))\n', (2149, 2193), False, 'import cv2\n'), ((2468, 2516), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cX, cY)', 'rotation', '(1.0)'], {}), '((cX, cY), rotation, 1.0)\n', (2491, 2516), False, 'import cv2\n'), ((2531, 2563), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(w, h)'], {}), '(image, M, (w, h))\n', (2545, 2563), False, 'import cv2\n'), ((2823, 2918), 'cv2.resize', 'cv2.resize', (['image', 'None'], {'fx': 'magnification', 'fy': 'magnification', 'interpolation': 'cv2.INTER_CUBIC'}), '(image, None, fx=magnification, fy=magnification, interpolation=\n cv2.INTER_CUBIC)\n', (2833, 2918), False, 'import cv2\n'), ((3716, 3762), 'numpy.float32', 'np.float32', (['[[1, shear_x, 0], [shear_y, 1, 0]]'], {}), '([[1, shear_x, 0], [shear_y, 1, 0]])\n', (3726, 3762), True, 'import numpy as np\n'), ((3800, 3858), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(image.shape[1], image.shape[0])'], {}), '(image, M, (image.shape[1], image.shape[0]))\n', (3814, 3858), False, 'import cv2\n'), ((7541, 7579), 'numpy.transpose', 'np.transpose', (['original_copy', '(2, 0, 1)'], {}), '(original_copy, (2, 0, 1))\n', (7553, 7579), True, 'import numpy as np\n'), ((7666, 8053), 'numpy.broadcast_to', 'np.broadcast_to', (['((segmented_image == 1) | (segmented_image == 2) | (segmented_image == 3) |\n (segmented_image == 4) | (segmented_image == 5) | (segmented_image == 6\n ) | (segmented_image == 7) | (segmented_image == 8) | (segmented_image ==\n 9) | (segmented_image == 10) | (segmented_image == 11) | (\n segmented_image == 12) | (segmented_image == 13))', 'original_copy.shape'], {}), '((segmented_image == 1) | (segmented_image == 2) | (\n segmented_image == 3) | (segmented_image == 4) | (segmented_image == 5) |\n (segmented_image == 6) | (segmented_image == 7) | (segmented_image == 8\n ) | (segmented_image == 9) | (segmented_image == 10) | (segmented_image ==\n 11) | (segmented_image == 12) | (segmented_image == 13), original_copy.\n shape)\n', (7681, 8053), True, 'import numpy as np\n'), ((8288, 8377), 'numpy.broadcast_to', 'np.broadcast_to', (['((segmented_image == 0) | (segmented_image > 13))', 'original_copy.shape'], {}), '((segmented_image == 0) | (segmented_image > 13),\n original_copy.shape)\n', (8303, 8377), True, 'import numpy as np\n'), ((8570, 8611), 'numpy.transpose', 'np.transpose', (['background_image', '(1, 2, 0)'], {}), '(background_image, (1, 2, 0))\n', (8582, 8611), True, 'import numpy as np\n'), ((8629, 8664), 'numpy.transpose', 'np.transpose', (['face_image', '(1, 2, 0)'], {}), '(face_image, (1, 2, 0))\n', (8641, 8664), True, 'import numpy as np\n'), ((9244, 9273), 'matplotlib.pyplot.imsave', 'plt.imsave', (['image_path', 'image'], {}), '(image_path, image)\n', (9254, 9273), True, 'import matplotlib.pyplot as plt\n'), ((11761, 11787), 'glob.glob', 'glob', (["(IMAGE_DIR + '/*.jpg')"], {}), "(IMAGE_DIR + '/*.jpg')\n", (11765, 11787), False, 'from glob import glob\n'), ((11873, 11898), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['ngpus'], {}), '(ngpus)\n', (11891, 11898), False, 'from concurrent.futures import ThreadPoolExecutor, as_completed\n'), ((11974, 12003), 'numpy.array_split', 'np.array_split', (['images', 'ngpus'], {}), '(images, ngpus)\n', (11988, 12003), True, 'import numpy as np\n'), ((1177, 1196), 'torch.load', 'torch.load', (['cp_path'], {}), '(cp_path)\n', (1187, 1196), False, 'import torch\n'), ((1323, 1344), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1342, 1344), True, 'import torchvision.transforms as transforms\n'), ((1354, 1420), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (1374, 1420), True, 'import torchvision.transforms as transforms\n'), ((3218, 3255), 'numpy.zeros', 'np.zeros', (['image.shape'], {'dtype': 'np.uint8'}), '(image.shape, dtype=np.uint8)\n', (3226, 3255), True, 'import numpy as np\n'), ((6948, 7004), 'random.randint', 'random.randint', (['perturbation_map[0]', 'perturbation_map[1]'], {}), '(perturbation_map[0], perturbation_map[1])\n', (6962, 7004), False, 'import random\n'), ((9326, 9341), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9339, 9341), False, 'import torch\n'), ((9357, 9373), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (9367, 9373), False, 'from PIL import Image\n'), ((9474, 9497), 'torch.unsqueeze', 'torch.unsqueeze', (['img', '(0)'], {}), '(img, 0)\n', (9489, 9497), False, 'import torch\n'), ((1001, 1029), 'model.BiSeNet', 'BiSeNet', ([], {'n_classes': 'n_classes'}), '(n_classes=n_classes)\n', (1008, 1029), False, 'from model import BiSeNet\n'), ((5386, 5406), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (5400, 5406), False, 'import random\n'), ((5719, 5775), 'random.randint', 'random.randint', (['perturbation_map[0]', 'perturbation_map[1]'], {}), '(perturbation_map[0], perturbation_map[1])\n', (5733, 5775), False, 'import random\n'), ((10105, 10123), 'os.path.basename', 'osp.basename', (['file'], {}), '(file)\n', (10117, 10123), True, 'import os.path as osp\n'), ((12163, 12184), 'concurrent.futures.as_completed', 'as_completed', (['futures'], {}), '(futures)\n', (12175, 12184), False, 'from concurrent.futures import ThreadPoolExecutor, as_completed\n'), ((10777, 10795), 'os.path.basename', 'osp.basename', (['file'], {}), '(file)\n', (10789, 10795), True, 'import os.path as osp\n')] |
# %% [markdown]
'''
# STEP2 - Embed Samples into Latent Space as Points
- Objective: Create LS with good similiarity dispersion, both globally & locally
- Hyperparams: hparams = {'epochs': [40], 'image_size': [64,128,256], 'latent_dim': [64,128,256]}
- Metrics: Reconstruction error (MSE loss comparing before/after images),
Derived from... RASBT-STAT453 Spring2021 L17 4_VAE_celeba-inspect-latent.ipynb
https://github.com/rasbt/stat453-deep-learning-ss21/blob/main/L17/2_VAE_celeba-sigmoid_mse.ipynb
'''
# %%
# Import packages
import enum
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from PIL import Image, ImageOps
import time, os, random
import torch
import torchvision
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, sampler, SubsetRandomSampler
from torchvision import datasets, transforms
import torch.nn.functional as F
import umap
from utils.experiment import save_dataframe
# from RASBT_helper_utils import set_deterministic, set_all_seeds
# from RASBT_helper_plotting import plot_training_loss
# from RASBT_helper_plotting import plot_generated_images
# from RASBT_helper_plotting import plot_latent_space_with_labels
# %%
# from RASBT-STAT453 Spring2021 L17 helper_utils
def set_deterministic():
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_deterministic(True)
def set_all_seeds(seed):
os.environ["PL_GLOBAL_SEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# %%
# set training non-random-ness
RANDOM_SEED = 42
set_deterministic # from RASBT_helper_utils
set_all_seeds(RANDOM_SEED) # from RASBT_helper_utils
# setup CPU/GPU Device # >>>> TODO make DEVICE a hyperparam
CUDA_DEVICE_NUM = 0
DEVICE = torch.device(f'cuda:{CUDA_DEVICE_NUM}' if torch.cuda.is_available() else 'cpu')
# Local Hyperparameters
LEARNING_RATE = 0.0005
# NUM_EPOCHS = 400
NUM_CHANNELS = 1
IMAGE_SIZE = 128
# NUM_LATENT_DIMS = 200
# %%
# define VAE training dataset from samples_df
class VAE_Train_Dataset(Dataset):
'''
convert file paths to PNG images to samples for custom Dataset
'''
# def __init__(self, samples_df, preprocess=True, transform=None):
def __init__(self, samples_df, transform=None):
"""__init__ Initialize instance of train_dataset
Args:
samples_df (df): sample info either raw (path to PNG) or as preprocess (np.array)
preprocess (str, optional): raw (false) or preprocess in sample.py (true). Defaults to None.
transform (torch transform, optional): To tensor & normalize. Defaults to None.
"""
self.samples_df = samples_df
# self.preprocess = preprocess >>> TODO remove!!! samples_df responsible
# self.samples_df = samples_df
self.transform = transform
# get len of samples_df
self.data_len = len(samples_df.index)
# if not preprocess: >>> TODO remove!!! samples_df responsible
# self.file_list = (samples_df['dataset_name'] + '\\'+samples_df['status_folder'] + \
# '\\' + samples_df['class_folder'] + '\\'+samples_df['img_name']).tolist()
self.samples = np.stack(samples_df['img_array'])
# set label_list to map to scalar
# labels = ['i', 'ii', 'iii', 'iv', 'v', 'vi', 'vii', 'viii', 'ix', 'x'] # OLD
# self.label_list = [labels.index(label) for label in samples_df['class_folder']]
self.label_list = [label for label in samples_df['img_label']]
def __len__(self):
return self.data_len
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.item()
# if self.preprocess: >>> TODO remove!!! samples_df responsible
# train_data = self.samples[idx, :].squeeze()
# else:
# img = Image.open(self.file_list[idx])
# train_data = np.array(ImageOps.invert(img))
train_data = self.samples[idx, :].squeeze()
label_data = self.label_list[idx]
if self.transform is None:
self.transform = transforms.ToTensor()
train_data = self.transform(train_data)
return train_data, label_data
# %%
# MODEL derived from RASBT-STAT453 Spring2021 L17 4_VAE_celeba-inspect-latent.ipynb
# assumes image size 128x128, extended to image sizes 32, 64, 96, 128
class Reshape(nn.Module):
def __init__(self, *args):
super().__init__()
self.shape = args
def forward(self, x):
return x.view(self.shape)
class Trim(nn.Module):
def __init__(self, img_height, img_width, *args):
super().__init__()
self.img_height = img_height
self.img_width = img_width
def forward(self, x):
return x[:, :, :self.img_height, :self.img_width]
# dict table for shape of encoder output that depends on image size
# was designed for 128x128
# assumes square images with size = 32, 64, 96, 128 but not >128! Why? >>>> TODO flex convnet params
CONV_SHAPE_TABLE = {32*i: 256*i**2 for i in range(1,5)}
class VAE(nn.Module):
def __init__(self, num_channels, img_size, num_latent_dims):
super().__init__()
self.encoder_out_shape = 0
if img_size in CONV_SHAPE_TABLE:
self.encoder_out_shape = CONV_SHAPE_TABLE[img_size]
else:
print(f' ERROR: img_size not multiple of 32x32 <= 128x128 square')
self.no_channels = self.encoder_out_shape // 64 # get no_channels for decoder input
self.encoder = nn.Sequential(
nn.Conv2d(num_channels, 32, stride=2, kernel_size=3, bias=False, padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.Conv2d(32, 64, stride=2, kernel_size=3, bias=False, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.Conv2d(64, 64, stride=2, kernel_size=3, bias=False, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.Conv2d(64, 64, stride=2, kernel_size=3, bias=False, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.Flatten(),
)
self.z_mean = torch.nn.Linear(self.encoder_out_shape, num_latent_dims)
self.z_log_var = torch.nn.Linear(self.encoder_out_shape, num_latent_dims)
self.decoder = nn.Sequential(
torch.nn.Linear(num_latent_dims, self.encoder_out_shape),
# Reshape(-1, 64, 8, 8), # for img_size = 128 and encode_out = 4096
Reshape(-1, self.no_channels, 8, 8),
#
nn.ConvTranspose2d(self.no_channels, 64, stride=2, kernel_size=3),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.ConvTranspose2d(64, 64, stride=2, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.ConvTranspose2d(64, 32, stride=2, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.1, inplace=True),
nn.Dropout2d(0.25),
#
nn.ConvTranspose2d(32, num_channels, stride=2, kernel_size=3, padding=1),
#
Trim(img_size, img_size), # 3x129x129 -> 3x128x128 >>>> TODO Needed???? YES! WHY?
nn.Sigmoid()
)
def encoding_fn(self, x):
x = self.encoder(x)
z_mean, z_log_var = self.z_mean(x), self.z_log_var(x)
encoded = self.reparameterize(z_mean, z_log_var)
return encoded
def reparameterize(self, z_mu, z_log_var):
eps = torch.randn(z_mu.size(0), z_mu.size(1)).to(z_mu.get_device())
z = z_mu + eps * torch.exp(z_log_var/2.)
return z
def forward(self, x):
x = self.encoder(x)
z_mean, z_log_var = self.z_mean(x), self.z_log_var(x)
encoded = self.reparameterize(z_mean, z_log_var)
decoded = self.decoder(encoded)
return encoded, z_mean, z_log_var, decoded
# %%
################# compute epoch loss for VAE
def compute_epoch_loss_autoencoder(model, data_loader, loss_fn, device):
model.eval()
curr_loss, num_examples = 0., 0
with torch.no_grad():
for features, _ in data_loader:
features = features.to(device)
logits = model(features)[3] # extract model forward.decoded from 4-tuple
loss = loss_fn(logits, features, reduction='sum')
num_examples += features.size(0)
curr_loss += loss
curr_loss = curr_loss / num_examples
return curr_loss
# %% -------------------------------------------------------------------------------
################# train VAE model
# derived from... RASBT-STAT453 Spring2021 L17 4_VAE_celeba-inspect-latent.ipynb
import wandb
def train_VAE(config,
model, criterion, optimizer,
train_loader,
device,
):
logging_interval = 1
skip_epoch_stats = False
example_ct = 0 # number of samples seen
wandb.watch(model, criterion, log="all", log_freq=10)
log_dict = {'train_combined_loss_per_batch': [],
'train_combined_loss_per_epoch': [],
'train_reconstruction_loss_per_batch': [],
'train_kl_loss_per_batch': []}
loss_fn = F.mse_loss # set loss function >>>> TODO reconcil with W&B 'criterion'
training_start = time.time()
for epoch in range(config.epochs):
epoch_start = time.time()
model.train()
for batch_idx, (features, _) in enumerate(train_loader):
example_ct += len(features)
features = features.to(device)
# forward proprogation
encoded, z_mean, z_log_var, decoded = model(features)
if features.shape != decoded.shape:
print('>>> ERROR: features+decoded shapes NOT EQUAL ', features.shape, decoded.shape)
# total loss = reconstruction loss + KL divergence
#kl_divergence = (0.5 * (z_mean**2 +
# torch.exp(z_log_var) - z_log_var - 1)).sum()
kl_div = -0.5 * torch.sum(1 + z_log_var
- z_mean**2
- torch.exp(z_log_var),
axis=1) # sum over latent dimension
batchsize = kl_div.size(0)
kl_div = kl_div.mean() # average over batch dimension
pixelwise = loss_fn(decoded, features, reduction='none')
pixelwise = pixelwise.view(batchsize, -1).sum(axis=1) # sum over pixels
pixelwise = pixelwise.mean() # average over batch dimension
# TODO https://medium.com/@chengjing/a-must-have-training-trick-for-vae-variational-autoencoder-d28ff53b0023#
loss = pixelwise + (config.beta * kl_div)
# back-proprogation
optimizer.zero_grad() # zero previous gradients
loss.backward() # calculate new ones
optimizer.step() # step backward updating weights & biases
# LOGGING BATCH
log_dict['train_combined_loss_per_batch'].append(loss.item())
log_dict['train_reconstruction_loss_per_batch'].append(pixelwise.item())
log_dict['train_kl_loss_per_batch'].append(kl_div.item())
if not batch_idx % logging_interval:
epoch_duration = time.time() - epoch_start
print(' Epoch: %03d/%03d | Batch %04d/%04d | Loss: %4.4f | Duration: %.3f sec'
% (epoch+1, config.epochs, batch_idx, len(train_loader), loss, epoch_duration))
# LOGGING EPOCH
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
train_loss = compute_epoch_loss_autoencoder(
model, train_loader, loss_fn, device)
log_dict['train_combined_loss_per_epoch'].append(train_loss.item())
wandb.log({'recon_loss': train_loss.item()})
epoch_duration = time.time() - epoch_start
wandb.log({"epoch": config.epochs, "loss": train_loss}, step=example_ct)
print('Epoch: %03d/%03d | Loss: %4.3f | Duration: %.3f sec' %
(epoch+1, config.epochs, train_loss, epoch_duration))
print('Training Time: %.1f min' % ((time.time() - training_start)/60))
# if save_model is not None:
# torch.save(model.state_dict(), save_model)
return log_dict
# %%
#################################################################
# plot learning curve
def plot_learning_curve(run_folder, log_dict, epochs, num_samples):
t_loss = np.array(log_dict['train_combined_loss_per_batch']) / num_samples
k_loss = np.array(log_dict['train_kl_loss_per_batch']) / num_samples
v_loss = np.array(log_dict['train_reconstruction_loss_per_batch']) / num_samples
iter_per_epoch = len(t_loss) // epochs
title_str = run_folder[run_folder.find('RUN') :] # Get run# & hparam from run_folder
fig = plt.figure(figsize=(9.6, 5.4), dpi=100)
fig.suptitle('Point Learning Curve - ' + title_str, fontsize=12, fontweight="bold")
ax = fig.add_subplot()
ax.set_title(f'Loss mean={t_loss.mean():1.4f} min={t_loss.min():1.4f} max={t_loss.max():1.4}')
plt.suptitle(f'Learning Curve -- {title_str}', fontsize=12, fontweight='bold')
plt.xlabel(f'Batch Iterations over {num_samples} Samples with {epochs} Epochs')
plt.ylabel("MSE Loss")
plt.plot(t_loss, label="Combined with KL Loss", marker='.', linestyle = 'None')
plt.plot(v_loss, label="Reconstruction Loss", marker='.', linestyle = 'None')
plt.legend(loc='upper right', shadow=True)
# plt.ylim([0, 0.7 * max(t_loss)]) # TODO ([0, 0.7 * max(t_loss)]) ???
plt.grid(axis = 'y')
# plt.xticks(range(epochs))
# plt.show()
plt.savefig(run_folder+'/Point_Learning_Curve.png', bbox_inches='tight')
plt.close()
# %%
#################################################################
# plot mse distribtion for all images
def plot_MSE_distribution(run_folder, points_df):
mse = np.vstack(points_df.pt_mse_loss)
x_low = mse.mean() - mse.std()
x_mean = mse.mean()
x_hi = mse.mean() + mse.std()
title_str = run_folder[run_folder.find('RUN') :] # Get run# & hparam from run_folder
fig = plt.figure(figsize=(9.6, 5.4), dpi=100)
fig.suptitle('Point MSE Loss - ' + title_str, fontsize=12, fontweight="bold")
ax = fig.add_subplot()
ax.set_title(f'MSE mean={mse.mean():1.4f} std={mse.std():1.4f} min={mse.min():1.4f} max={mse.max():1.4f} for {mse.shape[0]} samples')
plt.hist(mse, bins=50, color='c', edgecolor='k', alpha=0.65)
plt.axvline(x_low, color='g', linestyle='dotted', linewidth=1)
plt.axvline(x_mean, color='k', linestyle='solid', linewidth=1)
plt.axvline(x_hi, color='r', linestyle='dotted', linewidth=1)
# plt.grid()
# plt.xlim(0, 0.40)
# plt.ylim(0, 1000)
plt.xlabel("MSE with Mean +/- Std")
plt.ylabel("Number of Images")
# plt.show()
plt.savefig(run_folder+'/Point_MSE_Loss.png')
plt.close()
# %%
#################################################################
# plot std distribtion for all images
def plot_STD_distribution(run_folder, points_df):
# >>>> TODO boxplot instead of hist
# https://matplotlib.org/stable/gallery/statistics/boxplot_color.html#sphx-glr-gallery-statistics-boxplot-color-py
std = np.vstack(points_df.pt_std)
n_samples = std.shape[0]
n_dims = std.shape[1]
std = std.flatten()
x_low = std.mean() - std.std()
x_mean = std.mean()
x_hi = std.mean() + std.std()
title_str = run_folder[run_folder.find('RUN') :] # Get run# & hparam from run_folder
fig = plt.figure(figsize=(9.6, 5.4), dpi=100)
fig.suptitle('Point Fuzziness - ' + title_str, fontsize=12, fontweight="bold")
ax = fig.add_subplot()
ax.set_title(f'STD mean={std.mean():1.4f} std={std.std():1.4f} min={std.min():1.4f} max={std.max():1.4f} for {n_samples} samples')
plt.hist(std, bins=50, color='c', edgecolor='k', alpha=0.65)
plt.axvline(x_low, color='g', linestyle='dotted', linewidth=1)
plt.axvline(x_mean, color='k', linestyle='solid', linewidth=1)
plt.axvline(x_hi, color='r', linestyle='dotted', linewidth=1)
# plt.grid()
# plt.xlim(0, 0.40)
# plt.ylim(0, 1000)
plt.xlabel(f"Point STD across all {n_dims} dims with Mean +/- one Std")
plt.ylabel(f"Number of Point Positions ({n_samples} samples * {n_dims} dims)")
# plt.show()
plt.savefig(run_folder+'/Point_STD_Fuzziness.png')
plt.close()
# %%
#################################################################
# Plot distribution of each latent dim
def plot_latent_space_density(run_folder, points_df):
title_str = run_folder[run_folder.find('RUN') :] # Get run# & hparm from run_folder
nBins = 100 # plus one
pos = np.vstack(points_df.pt_encoded)
lab = np.vstack(points_df.pt_label)
nPoints = pos.shape[0]
nDim = pos.shape[1]
pos_norm = nBins * (pos - pos.min()) / (pos.max() - pos.min())
pos_bins = (pos_norm + 0.5).astype(int)
density = np.zeros((nDim, nBins+1), dtype=np.int32)
for i in range(nPoints):
for j in range(nDim):
k = pos_bins[i,j]
density[j, k] += 1
fig = plt.figure(figsize=(9.6, 5.4), dpi=100)
fig.suptitle(f'L-Space Density -- {title_str}', fontsize=12, fontweight="bold")
ax = fig.add_subplot()
ax.set_title(f'Density mean={density.mean():.1f} min={density.min():d} max={density.max():d}')
plt.imshow(density, cmap ='Greens', aspect='auto')
plt.colorbar()
plt.grid()
plt.xlabel("Point Latent Position")
plt.ylabel("Point Dimensions")
# set x-axis ticks with pos.min to pos.max
x_pos = np.arange(0,101,20)
x_labels = [str(round(x, 1)) for x in np.linspace(pos.min(), pos.max(), num=6)]
plt.xticks(x_pos,x_labels)
# plt.show()
plt.savefig(run_folder+'/L_Space_Density.png')
plt.close()
# %%
#################################################################
# Plot class entanglement across all latent dims
def plot_latent_space_entangle(run_folder, points_df):
title_str = run_folder[run_folder.find('RUN') :] # Get run# & hparm from run_folder
pos = np.vstack(points_df.pt_encoded)
lab = np.vstack(points_df.pt_label)
lab_unique = np.unique(lab)
# labstr = np.vstack(points_df.pt_labstr) # >>>> TODO handle conversions label <=> label-str
# labstr_unique = np.unique(labstr)
n_dim = pos.shape[1]
fig = plt.figure(figsize=(9.6, 5.4), dpi=100)
fig.suptitle(f'L-Space Entanglement -- {title_str}', fontsize=12, fontweight="bold")
ax = fig.add_subplot()
# ax.set_title(f'Density mean={density.mean():.1f} min={density.min():d} max={density.max():d}')
dat = []
for c in lab_unique:
x = [np.mean(pos[lab[:, 0] == c, i], axis=0) for i in range(n_dim)]
dat.append([c] + x)
y = [n_dim - i - 1 for i in range(n_dim)]
scat = plt.plot(x, y)
plt.grid()
plt.xlabel("Point Latent Position (normalized)")
plt.ylabel("Point Dimensions")
# produce a legend with the unique colors from the scatter
# legend1 = ax.legend(*scat.legend_elements(),
# loc="upper left", title="Classes")
# ax.add_artist(legend1)
# set x-axis ticks with pos.min to pos.max
# x_pos = np.arange(0,101,20)
# x_labels = [str(round(x, 1)) for x in np.linspace(pos.min(), pos.max(), num=6)]
# plt.xticks(x_pos,x_labels)
# plt.show()
plt.savefig(run_folder+'/L_Space_Entanglement.png')
plt.close()
# create LS_entangle dataframe
col = ['label'] + ['dim'+str(d) for d in range(n_dim)]
LS_entangle_df = pd.DataFrame(data=dat, columns=col)
return LS_entangle_df
# %%
#################################################################
# plot first N images with their reconstruction
import math, random
def plot_reconstructed_images( run_folder,
points_df,
id_key, # if list, plot keyed images; else tuple (plot_type, n_samples)
# where type_samples = 'first', 'random', 'lo-mse', 'hi-mse'
tag='',
binarize=True, # convert decoded_data to 0.0 or 1.0 at 0.5 cutoff
save_plot=True
):
N_MAX = 36
N_PER_ROW = 6
# setup id_key param for id_lst
if type(id_key) is list:
id_lst = id_key
n_img = len(id_lst)
elif type(id_key) is tuple:
plot_type, n_img = id_key
tag = plot_type.title() + ' ' + str(n_img)
if plot_type.lower() == 'first':
id_lst = range(n_img)
elif plot_type.lower() == 'last':
id_lst = range(0, n_img, -1)
elif plot_type.lower() == 'random':
id_lst = random.sample(range(len(points_df.index)), k=n_img)
elif plot_type.lower() == 'lo-mse':
id_lst = np.argsort(points_df['pt_mse_loss'].to_numpy())[:N_MAX]
elif plot_type.lower() == 'hi-mse':
id_lst = np.argsort(points_df['pt_mse_loss'].to_numpy())[::-1][:N_MAX]
else: print(f' ERROR: Bad id_key param "{id_key}" with unknown plot_type')
else: print(f' ERROR: Bad id_key param "{id_key}" as not list or tuple')
n_img = min(n_img, N_MAX)
n_rows = int(math.ceil(n_img / N_PER_ROW))
# get data from points_df
feat_data = np.array(points_df['pt_feature'].tolist())
decoded_data = np.array(points_df['pt_decoded'].tolist())
if binarize:
decoded_data = np.where(decoded_data > 0.5, 1, 0)
mse = points_df['pt_mse_loss'].to_numpy()
s_img = int(math.sqrt(feat_data[0,:].shape[0])) # get square height/width size
title_str = run_folder[run_folder.find('RUN') :] # Get run# & hparm from run_folder
fig = plt.figure(figsize=(9.6, 5.4), dpi=100)
fig.suptitle(f'Reconstructed Points ({tag}) -- {title_str}', fontsize=12, fontweight="bold")
ax = fig.add_subplot()
ax.set_title(f'First image original, second recontructed, title with sample id + pre-label')
for i, id in enumerate(id_lst):
img1 = feat_data[id, :].reshape((s_img,s_img))
img2 = decoded_data[id, :].reshape((s_img,s_img))
img = np.hstack((img1, img2))
ax = plt.subplot(n_rows, N_PER_ROW, i + 1)
# plt.imshow(img, cmap='binary_r', aspect='equal')
plt.imshow(img, cmap='binary_r')
labstr = points_df['pt_labstr'].iloc[id]
ax_title = f'#{str(id)} - {labstr.upper()}'
ax.set_title(ax_title, fontsize=8, pad=0)
ax.axvline(s_img, color='k', linestyle='solid', linewidth=1)
# plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if i+1 >= n_img: break
if save_plot:
plt.savefig(f'{run_folder}/Point_Reconstructed_Samples{"_" + tag}.png')
else:
plt.show()
plt.close()
# %%
######### load a previous points_df to avoid retraining the same latent space
def load_previous_points(points_path):
# points_df = pd.read_pickle(points_path + "/points_df.pkl")
points_df = pd.read_json(points_path + "/points_df.json")
return points_df
# %%
######### Preprocess dataset images into NN data vectors
def create_points( samples_df,
image_size=32, # 32, 64, 96, 128
# preprocess=True, # whether to preprocess images with smart-crop >>> TODO remove! samples_df is responsible
nDim=16, # latent dims 2...
nEpochs=10, # num of epochs in training cycle
# loss_reduction='mean', # 'mean' vs 'sum' TODO future... add param
run_folder='./', # where to save df and png files
verbose=True, # print log to file
):
'''
'''
##### train model using latent_dim hparam
if verbose:
print(f'>>> CREATING POINTS with epochs={nEpochs}, latent_dim={nDim}, ')
# custom transforms for creating torch DataSet >>> TODO where did this coming from? Use 'None' for now.
# my_transforms = torchvision.transforms.Compose([
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Resize(image_size),
# torchvision.transforms.CenterCrop(image_size),
# ])
# instantiate torch custom DataSet
# train_dataset = VAE_Train_Dataset(samples_df, preprocess=preprocess, transform=None) >>> TODO remove!!! samples_df responsible
train_dataset = VAE_Train_Dataset(samples_df, transform=None)
# instantiate training DataLoader
batch_size = len(samples_df.index) // 100 # set # of batch to roughly 100
train_loader = DataLoader( dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=2,
# drop_last=True,
# sampler=train_sampler,
)
# check sample shape from dataloader
img, _ = next(iter(train_loader))
if verbose:
print(f' Feature batch shape = {img.shape} with DEVICE = {DEVICE}')
num_channels, img_height, img_width = img.shape[1:4]
if img_height != img_width: # must be square
print(f' ERROR: image is NOT square')
img_size = img_height
if not (img_size % 32 == 0) & (img_size >= 32) & (img_size <= 256):
print(f' ERROR: image size is NOT mutiple of 32 and in 32..256')
# instantiate model and print structure
model = VAE(num_channels, img_size, nDim)
model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
log_dict = train_VAE( num_epochs=nEpochs, model=model,
optimizer=optimizer, device=DEVICE,
train_loader=train_loader,
skip_epoch_stats=True, # TODO False => only log epoch stats???
beta=1, # TODO try higher values for beta 2...10 affect on reconstruction
logging_interval=50,
save_model=run_folder + '/VAE_MNIST_Roman.pt'
)
# instantiate encode-decode DataLoader, same order as original dataset TODO check whether images are sync with img_id !!!
encode_decode_loader = DataLoader( dataset=train_dataset,
batch_size=1,
shuffle=False,
)
feat_lst=[]; encoded_lst=[]; z_mean_lst=[]; z_log_var_lst=[]; decoded_lst=[]; mse_lst=[]
with torch.no_grad(): # turn off gradient
model.eval()
for idx, (features, _) in enumerate(encode_decode_loader):
# forward prop
features = features.to(DEVICE)
encoded, z_mean, z_log_var, decoded = model(features)
# decoded2 = model.decoder(model.z_mean(model.encoder(features))) # TODO does decoded == decoded2?
feat_lst.append(features.cpu().detach().numpy().flatten())
encoded_lst.append(encoded.cpu().detach().numpy().flatten())
z_mean_lst.append(z_mean.cpu().detach().numpy().flatten())
z_log_var_lst.append(z_log_var.cpu().detach().numpy().flatten())
decoded_lst.append(decoded.cpu().detach().numpy().flatten())
loss = F.mse_loss(decoded, features, reduction='mean') # >>> TODO Why 'sum' versus 'mean'?
mse_lst.append(loss.cpu().detach().numpy().flatten())
num_samples = len(encoded_lst)
assert num_samples == len(train_dataset) # should be equal to len(train_dataset)
feat_data = np.array(feat_lst)
encoded_data = np.array(encoded_lst)
z_mean_data = np.array(z_mean_lst)
z_log_var_data = np.array(z_log_var_lst)
z_std_data = np.exp(0.5 * z_log_var_data) # convert log of var to std deviation
decoded_data = np.array(decoded_lst)
mse_loss = np.array(mse_lst)
mse_mean = float(mse_loss.mean())
mse_std = float(mse_loss.std())
# ##### create/save points_df table keyed to samples_df
points_df = pd.DataFrame({'id': samples_df.index})
points_df['flags'] = ''
points_df['pt_label'] = [l for l in samples_df['img_label']] # just copy from samples
points_df['pt_labstr'] = [c for c in samples_df['img_labstr']] # just copy from samples
points_df['pt_feature'] = [pix for pix in feat_data]
points_df['pt_encoded'] = [pos for pos in encoded_data]
points_df['pt_decoded'] = [pix for pix in decoded_data]
points_df['pt_pos'] = [pos for pos in z_mean_data] # >>>>> TODO investigate pt_encoded == pt_pos ???
points_df['pt_std'] = [std for std in z_std_data]
points_df['pt_mse_loss'] = mse_loss
# print/plot results
if verbose:
print(f'>>> Feature/Encode/Decode shapes = {feat_data.shape} {encoded_data.shape} {decoded_data.shape}')
print(f'>>> Z_mean/Z_logvar/Z_std shapes = {z_mean_data.shape} {z_log_var_data.shape} {z_std_data.shape}')
print(f'>>> MSE shape = {mse_loss.shape} mean={mse_mean:0.4f} std={mse_std:0.4f} ' +
f'min={mse_loss.min():0.4f} max={mse_loss.max():0.4f}')
mse_z = (mse_loss - mse_loss.mean()) / mse_loss.std()
mse_out = mse_loss[mse_z > 3]
pct = len(mse_out) / len(mse_loss)
print(f'>>> MSE outliers: {len(mse_out):,d} or {pct:.1%} with Z > 3')
# check that plt.imshow images of samples same as points >>>> TODO
# check_sample_to_point_images(samples.img_array, points.pt_feature)
# plot learning curves
plot_learning_curve(run_folder, log_dict, nEpochs, num_samples)
# plot MSE distribution
plot_MSE_distribution(run_folder, points_df)
# plot pos_std distribution
plot_STD_distribution(run_folder, points_df)
# plot density of latent space
plot_latent_space_density(run_folder, points_df)
# plot entanglement of latent space
LS_entangle_df = plot_latent_space_entangle(run_folder, points_df)
# save_dataframe(run_folder, 'LS_entangle_df', LS_entangle_df)
# plot original vs reconstruction images for First, Lo-MSE, Hi-MSE, etc
plot_reconstructed_images(run_folder, points_df, ('First', 36), binarize=True)
plot_reconstructed_images(run_folder, points_df, ('Lo-MSE', 36), binarize=True)
plot_reconstructed_images(run_folder, points_df, ('Hi-MSE', 36), binarize=True)
return points_df
# %%
# Use UMAP to fit 8D LS samples to 2D and 3D space
def add_posLowD_to_points(points_df, nNeighbers, minDist):
pos = np.stack(points_df['pt_encoded']) # >>>> TODO use pt_pos instead? Make a difference?
for d in [2, 3]:
umap_object = umap.UMAP(
n_neighbors=nNeighbers, # set from hyperparam
min_dist=minDist, # set from hyperparam
n_components=d, # set to 2D
random_state=42)
umap_object.fit(pos)
points_df[f'pt_pos{d}D'] = [pos for pos in umap_object.transform(pos)]
return points_df
# >>>>> TODO random note! where does this go?
# Use UMAP to create graph object for transforming lS and edge creation
# ...for enhanced clustering re https://umap-learn.readthedocs.io/en/latest/clustering.html#umap-enhanced-clustering
# %%
# Use UMAP to fit 8D LS samples to 3D space (which is used in LS_workshop)
def plot_pos2D_points(points_df, nNeighbers, minDist, run_folder):
pos2D = np.stack(points_df['pt_pos2D'])
pt_marker = '.' if pos2D.shape[0] > 10_000 else 'o' # adjust marker depending on # of points
lab = np.stack(points_df['pt_label'])
# labstr = np.stack(points_df['pt_labstr']) # TODO add below to legend
# umap_object = umap.UMAP( >>>> redundant with create_2D_points
# n_neighbors=nNeighbers, # set from hyperparam
# min_dist=minDist, # set from hyperparam
# n_components=2, # set to 2D
# random_state=42)
# umap_object.fit(pos)
# pos2D = umap_object.transform(pos)
title_str = run_folder[run_folder.find('RUN') :] # Get run# & hparm from run_folder
# plt.figure(figsize=(9.6, 5.4), dpi=100)
fig, ax = plt.subplots(figsize=(9.6, 5.4), dpi=100)
plt.title(f'2D Latent Space --' + title_str, fontsize=12, fontweight='bold')
# ax = fig.add_subplot(projection='3d')
scat = ax.scatter(pos2D[:,0], pos2D[:,1], c=lab, marker=pt_marker, s=3, alpha=1.0, cmap='tab10', edgecolors='none')
# produce a legend with the unique colors from the scatter
legend1 = ax.legend(*scat.legend_elements(),
loc="upper left", title="Classes")
ax.add_artist(legend1)
# plt.show()
plt.savefig(run_folder+'/2D_Latent_Space.png')
plt.close()
return points_df
| [
"matplotlib.pyplot.title",
"wandb.log",
"numpy.random.seed",
"wandb.watch",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"torch.no_grad",
"numpy.unique",
"pandas.DataFrame",
"matplotlib.pyplot.axvline",
"torch.set_deterministic",
"t... | [((1301, 1326), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1324, 1326), False, 'import torch\n'), ((1429, 1458), 'torch.set_deterministic', 'torch.set_deterministic', (['(True)'], {}), '(True)\n', (1452, 1458), False, 'import torch\n'), ((1538, 1555), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1549, 1555), False, 'import math, random\n'), ((1560, 1580), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1574, 1580), True, 'import numpy as np\n'), ((1585, 1608), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1602, 1608), False, 'import torch\n'), ((1613, 1645), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1639, 1645), False, 'import torch\n'), ((9538, 9591), 'wandb.watch', 'wandb.watch', (['model', 'criterion'], {'log': '"""all"""', 'log_freq': '(10)'}), "(model, criterion, log='all', log_freq=10)\n", (9549, 9591), False, 'import wandb\n'), ((9920, 9931), 'time.time', 'time.time', ([], {}), '()\n', (9929, 9931), False, 'import time, os, random\n'), ((13646, 13685), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9.6, 5.4)', 'dpi': '(100)'}), '(figsize=(9.6, 5.4), dpi=100)\n', (13656, 13685), True, 'import matplotlib.pyplot as plt\n'), ((13907, 13985), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Learning Curve -- {title_str}"""'], {'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(f'Learning Curve -- {title_str}', fontsize=12, fontweight='bold')\n", (13919, 13985), True, 'import matplotlib.pyplot as plt\n'), ((13990, 14069), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""Batch Iterations over {num_samples} Samples with {epochs} Epochs"""'], {}), "(f'Batch Iterations over {num_samples} Samples with {epochs} Epochs')\n", (14000, 14069), True, 'import matplotlib.pyplot as plt\n'), ((14074, 14096), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSE Loss"""'], {}), "('MSE Loss')\n", (14084, 14096), True, 'import matplotlib.pyplot as plt\n'), ((14101, 14178), 'matplotlib.pyplot.plot', 'plt.plot', (['t_loss'], {'label': '"""Combined with KL Loss"""', 'marker': '"""."""', 'linestyle': '"""None"""'}), "(t_loss, label='Combined with KL Loss', marker='.', linestyle='None')\n", (14109, 14178), True, 'import matplotlib.pyplot as plt\n'), ((14185, 14260), 'matplotlib.pyplot.plot', 'plt.plot', (['v_loss'], {'label': '"""Reconstruction Loss"""', 'marker': '"""."""', 'linestyle': '"""None"""'}), "(v_loss, label='Reconstruction Loss', marker='.', linestyle='None')\n", (14193, 14260), True, 'import matplotlib.pyplot as plt\n'), ((14267, 14309), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'shadow': '(True)'}), "(loc='upper right', shadow=True)\n", (14277, 14309), True, 'import matplotlib.pyplot as plt\n'), ((14397, 14415), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""'}), "(axis='y')\n", (14405, 14415), True, 'import matplotlib.pyplot as plt\n'), ((14472, 14546), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(run_folder + '/Point_Learning_Curve.png')"], {'bbox_inches': '"""tight"""'}), "(run_folder + '/Point_Learning_Curve.png', bbox_inches='tight')\n", (14483, 14546), True, 'import matplotlib.pyplot as plt\n'), ((14549, 14560), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14558, 14560), True, 'import matplotlib.pyplot as plt\n'), ((14736, 14768), 'numpy.vstack', 'np.vstack', (['points_df.pt_mse_loss'], {}), '(points_df.pt_mse_loss)\n', (14745, 14768), True, 'import numpy as np\n'), ((14963, 15002), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9.6, 5.4)', 'dpi': '(100)'}), '(figsize=(9.6, 5.4), dpi=100)\n', (14973, 15002), True, 'import matplotlib.pyplot as plt\n'), ((15258, 15318), 'matplotlib.pyplot.hist', 'plt.hist', (['mse'], {'bins': '(50)', 'color': '"""c"""', 'edgecolor': '"""k"""', 'alpha': '(0.65)'}), "(mse, bins=50, color='c', edgecolor='k', alpha=0.65)\n", (15266, 15318), True, 'import matplotlib.pyplot as plt\n'), ((15323, 15385), 'matplotlib.pyplot.axvline', 'plt.axvline', (['x_low'], {'color': '"""g"""', 'linestyle': '"""dotted"""', 'linewidth': '(1)'}), "(x_low, color='g', linestyle='dotted', linewidth=1)\n", (15334, 15385), True, 'import matplotlib.pyplot as plt\n'), ((15391, 15453), 'matplotlib.pyplot.axvline', 'plt.axvline', (['x_mean'], {'color': '"""k"""', 'linestyle': '"""solid"""', 'linewidth': '(1)'}), "(x_mean, color='k', linestyle='solid', linewidth=1)\n", (15402, 15453), True, 'import matplotlib.pyplot as plt\n'), ((15459, 15520), 'matplotlib.pyplot.axvline', 'plt.axvline', (['x_hi'], {'color': '"""r"""', 'linestyle': '"""dotted"""', 'linewidth': '(1)'}), "(x_hi, color='r', linestyle='dotted', linewidth=1)\n", (15470, 15520), True, 'import matplotlib.pyplot as plt\n'), ((15591, 15626), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""MSE with Mean +/- Std"""'], {}), "('MSE with Mean +/- Std')\n", (15601, 15626), True, 'import matplotlib.pyplot as plt\n'), ((15631, 15661), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Images"""'], {}), "('Number of Images')\n", (15641, 15661), True, 'import matplotlib.pyplot as plt\n'), ((15684, 15731), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(run_folder + '/Point_MSE_Loss.png')"], {}), "(run_folder + '/Point_MSE_Loss.png')\n", (15695, 15731), True, 'import matplotlib.pyplot as plt\n'), ((15734, 15745), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15743, 15745), True, 'import matplotlib.pyplot as plt\n'), ((16084, 16111), 'numpy.vstack', 'np.vstack', (['points_df.pt_std'], {}), '(points_df.pt_std)\n', (16093, 16111), True, 'import numpy as np\n'), ((16385, 16424), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9.6, 5.4)', 'dpi': '(100)'}), '(figsize=(9.6, 5.4), dpi=100)\n', (16395, 16424), True, 'import matplotlib.pyplot as plt\n'), ((16678, 16738), 'matplotlib.pyplot.hist', 'plt.hist', (['std'], {'bins': '(50)', 'color': '"""c"""', 'edgecolor': '"""k"""', 'alpha': '(0.65)'}), "(std, bins=50, color='c', edgecolor='k', alpha=0.65)\n", (16686, 16738), True, 'import matplotlib.pyplot as plt\n'), ((16743, 16805), 'matplotlib.pyplot.axvline', 'plt.axvline', (['x_low'], {'color': '"""g"""', 'linestyle': '"""dotted"""', 'linewidth': '(1)'}), "(x_low, color='g', linestyle='dotted', linewidth=1)\n", (16754, 16805), True, 'import matplotlib.pyplot as plt\n'), ((16811, 16873), 'matplotlib.pyplot.axvline', 'plt.axvline', (['x_mean'], {'color': '"""k"""', 'linestyle': '"""solid"""', 'linewidth': '(1)'}), "(x_mean, color='k', linestyle='solid', linewidth=1)\n", (16822, 16873), True, 'import matplotlib.pyplot as plt\n'), ((16879, 16940), 'matplotlib.pyplot.axvline', 'plt.axvline', (['x_hi'], {'color': '"""r"""', 'linestyle': '"""dotted"""', 'linewidth': '(1)'}), "(x_hi, color='r', linestyle='dotted', linewidth=1)\n", (16890, 16940), True, 'import matplotlib.pyplot as plt\n'), ((17011, 17082), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""Point STD across all {n_dims} dims with Mean +/- one Std"""'], {}), "(f'Point STD across all {n_dims} dims with Mean +/- one Std')\n", (17021, 17082), True, 'import matplotlib.pyplot as plt\n'), ((17087, 17165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""Number of Point Positions ({n_samples} samples * {n_dims} dims)"""'], {}), "(f'Number of Point Positions ({n_samples} samples * {n_dims} dims)')\n", (17097, 17165), True, 'import matplotlib.pyplot as plt\n'), ((17188, 17240), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(run_folder + '/Point_STD_Fuzziness.png')"], {}), "(run_folder + '/Point_STD_Fuzziness.png')\n", (17199, 17240), True, 'import matplotlib.pyplot as plt\n'), ((17243, 17254), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17252, 17254), True, 'import matplotlib.pyplot as plt\n'), ((17548, 17579), 'numpy.vstack', 'np.vstack', (['points_df.pt_encoded'], {}), '(points_df.pt_encoded)\n', (17557, 17579), True, 'import numpy as np\n'), ((17590, 17619), 'numpy.vstack', 'np.vstack', (['points_df.pt_label'], {}), '(points_df.pt_label)\n', (17599, 17619), True, 'import numpy as np\n'), ((17798, 17841), 'numpy.zeros', 'np.zeros', (['(nDim, nBins + 1)'], {'dtype': 'np.int32'}), '((nDim, nBins + 1), dtype=np.int32)\n', (17806, 17841), True, 'import numpy as np\n'), ((17971, 18010), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9.6, 5.4)', 'dpi': '(100)'}), '(figsize=(9.6, 5.4), dpi=100)\n', (17981, 18010), True, 'import matplotlib.pyplot as plt\n'), ((18228, 18277), 'matplotlib.pyplot.imshow', 'plt.imshow', (['density'], {'cmap': '"""Greens"""', 'aspect': '"""auto"""'}), "(density, cmap='Greens', aspect='auto')\n", (18238, 18277), True, 'import matplotlib.pyplot as plt\n'), ((18283, 18297), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (18295, 18297), True, 'import matplotlib.pyplot as plt\n'), ((18302, 18312), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (18310, 18312), True, 'import matplotlib.pyplot as plt\n'), ((18317, 18352), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Point Latent Position"""'], {}), "('Point Latent Position')\n", (18327, 18352), True, 'import matplotlib.pyplot as plt\n'), ((18357, 18387), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point Dimensions"""'], {}), "('Point Dimensions')\n", (18367, 18387), True, 'import matplotlib.pyplot as plt\n'), ((18452, 18473), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(20)'], {}), '(0, 101, 20)\n', (18461, 18473), True, 'import numpy as np\n'), ((18560, 18587), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_pos', 'x_labels'], {}), '(x_pos, x_labels)\n', (18570, 18587), True, 'import matplotlib.pyplot as plt\n'), ((18609, 18657), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(run_folder + '/L_Space_Density.png')"], {}), "(run_folder + '/L_Space_Density.png')\n", (18620, 18657), True, 'import matplotlib.pyplot as plt\n'), ((18660, 18671), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (18669, 18671), True, 'import matplotlib.pyplot as plt\n'), ((18948, 18979), 'numpy.vstack', 'np.vstack', (['points_df.pt_encoded'], {}), '(points_df.pt_encoded)\n', (18957, 18979), True, 'import numpy as np\n'), ((18990, 19019), 'numpy.vstack', 'np.vstack', (['points_df.pt_label'], {}), '(points_df.pt_label)\n', (18999, 19019), True, 'import numpy as np\n'), ((19037, 19051), 'numpy.unique', 'np.unique', (['lab'], {}), '(lab)\n', (19046, 19051), True, 'import numpy as np\n'), ((19229, 19268), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9.6, 5.4)', 'dpi': '(100)'}), '(figsize=(9.6, 5.4), dpi=100)\n', (19239, 19268), True, 'import matplotlib.pyplot as plt\n'), ((19720, 19730), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (19728, 19730), True, 'import matplotlib.pyplot as plt\n'), ((19735, 19783), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Point Latent Position (normalized)"""'], {}), "('Point Latent Position (normalized)')\n", (19745, 19783), True, 'import matplotlib.pyplot as plt\n'), ((19788, 19818), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point Dimensions"""'], {}), "('Point Dimensions')\n", (19798, 19818), True, 'import matplotlib.pyplot as plt\n'), ((20255, 20308), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(run_folder + '/L_Space_Entanglement.png')"], {}), "(run_folder + '/L_Space_Entanglement.png')\n", (20266, 20308), True, 'import matplotlib.pyplot as plt\n'), ((20311, 20322), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20320, 20322), True, 'import matplotlib.pyplot as plt\n'), ((20443, 20478), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'dat', 'columns': 'col'}), '(data=dat, columns=col)\n', (20455, 20478), True, 'import pandas as pd\n'), ((22669, 22708), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9.6, 5.4)', 'dpi': '(100)'}), '(figsize=(9.6, 5.4), dpi=100)\n', (22679, 22708), True, 'import matplotlib.pyplot as plt\n'), ((23767, 23778), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (23776, 23778), True, 'import matplotlib.pyplot as plt\n'), ((23984, 24029), 'pandas.read_json', 'pd.read_json', (["(points_path + '/points_df.json')"], {}), "(points_path + '/points_df.json')\n", (23996, 24029), True, 'import pandas as pd\n'), ((25680, 25769), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(2)'}), '(dataset=train_dataset, batch_size=batch_size, shuffle=True,\n num_workers=2)\n', (25690, 25769), False, 'from torch.utils.data import Dataset, DataLoader, sampler, SubsetRandomSampler\n'), ((27408, 27470), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset=train_dataset, batch_size=1, shuffle=False)\n', (27418, 27470), False, 'from torch.utils.data import Dataset, DataLoader, sampler, SubsetRandomSampler\n'), ((28763, 28781), 'numpy.array', 'np.array', (['feat_lst'], {}), '(feat_lst)\n', (28771, 28781), True, 'import numpy as np\n'), ((28801, 28822), 'numpy.array', 'np.array', (['encoded_lst'], {}), '(encoded_lst)\n', (28809, 28822), True, 'import numpy as np\n'), ((28841, 28861), 'numpy.array', 'np.array', (['z_mean_lst'], {}), '(z_mean_lst)\n', (28849, 28861), True, 'import numpy as np\n'), ((28883, 28906), 'numpy.array', 'np.array', (['z_log_var_lst'], {}), '(z_log_var_lst)\n', (28891, 28906), True, 'import numpy as np\n'), ((28924, 28952), 'numpy.exp', 'np.exp', (['(0.5 * z_log_var_data)'], {}), '(0.5 * z_log_var_data)\n', (28930, 28952), True, 'import numpy as np\n'), ((29013, 29034), 'numpy.array', 'np.array', (['decoded_lst'], {}), '(decoded_lst)\n', (29021, 29034), True, 'import numpy as np\n'), ((29051, 29068), 'numpy.array', 'np.array', (['mse_lst'], {}), '(mse_lst)\n', (29059, 29068), True, 'import numpy as np\n'), ((29220, 29258), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': samples_df.index}"], {}), "({'id': samples_df.index})\n", (29232, 29258), True, 'import pandas as pd\n'), ((31803, 31836), 'numpy.stack', 'np.stack', (["points_df['pt_encoded']"], {}), "(points_df['pt_encoded'])\n", (31811, 31836), True, 'import numpy as np\n'), ((32699, 32730), 'numpy.stack', 'np.stack', (["points_df['pt_pos2D']"], {}), "(points_df['pt_pos2D'])\n", (32707, 32730), True, 'import numpy as np\n'), ((32838, 32869), 'numpy.stack', 'np.stack', (["points_df['pt_label']"], {}), "(points_df['pt_label'])\n", (32846, 32869), True, 'import numpy as np\n'), ((33449, 33490), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9.6, 5.4)', 'dpi': '(100)'}), '(figsize=(9.6, 5.4), dpi=100)\n', (33461, 33490), True, 'import matplotlib.pyplot as plt\n'), ((33495, 33571), 'matplotlib.pyplot.title', 'plt.title', (["(f'2D Latent Space --' + title_str)"], {'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(f'2D Latent Space --' + title_str, fontsize=12, fontweight='bold')\n", (33504, 33571), True, 'import matplotlib.pyplot as plt\n'), ((33957, 34005), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(run_folder + '/2D_Latent_Space.png')"], {}), "(run_folder + '/2D_Latent_Space.png')\n", (33968, 34005), True, 'import matplotlib.pyplot as plt\n'), ((34008, 34019), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (34017, 34019), True, 'import matplotlib.pyplot as plt\n'), ((1949, 1974), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1972, 1974), False, 'import torch\n'), ((3341, 3374), 'numpy.stack', 'np.stack', (["samples_df['img_array']"], {}), "(samples_df['img_array'])\n", (3349, 3374), True, 'import numpy as np\n'), ((3766, 3786), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (3781, 3786), False, 'import torch\n'), ((6588, 6644), 'torch.nn.Linear', 'torch.nn.Linear', (['self.encoder_out_shape', 'num_latent_dims'], {}), '(self.encoder_out_shape, num_latent_dims)\n', (6603, 6644), False, 'import torch\n'), ((6670, 6726), 'torch.nn.Linear', 'torch.nn.Linear', (['self.encoder_out_shape', 'num_latent_dims'], {}), '(self.encoder_out_shape, num_latent_dims)\n', (6685, 6726), False, 'import torch\n'), ((8674, 8689), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8687, 8689), False, 'import torch\n'), ((9994, 10005), 'time.time', 'time.time', ([], {}), '()\n', (10003, 10005), False, 'import time, os, random\n'), ((12689, 12761), 'wandb.log', 'wandb.log', (["{'epoch': config.epochs, 'loss': train_loss}"], {'step': 'example_ct'}), "({'epoch': config.epochs, 'loss': train_loss}, step=example_ct)\n", (12698, 12761), False, 'import wandb\n'), ((13277, 13328), 'numpy.array', 'np.array', (["log_dict['train_combined_loss_per_batch']"], {}), "(log_dict['train_combined_loss_per_batch'])\n", (13285, 13328), True, 'import numpy as np\n'), ((13356, 13401), 'numpy.array', 'np.array', (["log_dict['train_kl_loss_per_batch']"], {}), "(log_dict['train_kl_loss_per_batch'])\n", (13364, 13401), True, 'import numpy as np\n'), ((13429, 13486), 'numpy.array', 'np.array', (["log_dict['train_reconstruction_loss_per_batch']"], {}), "(log_dict['train_reconstruction_loss_per_batch'])\n", (13437, 13486), True, 'import numpy as np\n'), ((19696, 19710), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (19704, 19710), True, 'import matplotlib.pyplot as plt\n'), ((22184, 22212), 'math.ceil', 'math.ceil', (['(n_img / N_PER_ROW)'], {}), '(n_img / N_PER_ROW)\n', (22193, 22212), False, 'import math, random\n'), ((22406, 22440), 'numpy.where', 'np.where', (['(decoded_data > 0.5)', '(1)', '(0)'], {}), '(decoded_data > 0.5, 1, 0)\n', (22414, 22440), True, 'import numpy as np\n'), ((22503, 22538), 'math.sqrt', 'math.sqrt', (['feat_data[0, :].shape[0]'], {}), '(feat_data[0, :].shape[0])\n', (22512, 22538), False, 'import math, random\n'), ((23100, 23123), 'numpy.hstack', 'np.hstack', (['(img1, img2)'], {}), '((img1, img2))\n', (23109, 23123), True, 'import numpy as np\n'), ((23138, 23175), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', 'N_PER_ROW', '(i + 1)'], {}), '(n_rows, N_PER_ROW, i + 1)\n', (23149, 23175), True, 'import matplotlib.pyplot as plt\n'), ((23243, 23275), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""binary_r"""'}), "(img, cmap='binary_r')\n", (23253, 23275), True, 'import matplotlib.pyplot as plt\n'), ((23662, 23733), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{run_folder}/Point_Reconstructed_Samples{\'_\' + tag}.png"""'], {}), '(f"{run_folder}/Point_Reconstructed_Samples{\'_\' + tag}.png")\n', (23673, 23733), True, 'import matplotlib.pyplot as plt\n'), ((23752, 23762), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23760, 23762), True, 'import matplotlib.pyplot as plt\n'), ((27694, 27709), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27707, 27709), False, 'import torch\n'), ((31940, 32028), 'umap.UMAP', 'umap.UMAP', ([], {'n_neighbors': 'nNeighbers', 'min_dist': 'minDist', 'n_components': 'd', 'random_state': '(42)'}), '(n_neighbors=nNeighbers, min_dist=minDist, n_components=d,\n random_state=42)\n', (31949, 32028), False, 'import umap\n'), ((4240, 4261), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4259, 4261), False, 'from torchvision import datasets, transforms\n'), ((5711, 5786), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_channels', '(32)'], {'stride': '(2)', 'kernel_size': '(3)', 'bias': '(False)', 'padding': '(1)'}), '(num_channels, 32, stride=2, kernel_size=3, bias=False, padding=1)\n', (5720, 5786), True, 'import torch.nn as nn\n'), ((5800, 5818), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (5814, 5818), True, 'import torch.nn as nn\n'), ((5832, 5863), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (5844, 5863), True, 'import torch.nn as nn\n'), ((5877, 5895), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.25)'], {}), '(0.25)\n', (5889, 5895), True, 'import torch.nn as nn\n'), ((5923, 5988), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)'], {'stride': '(2)', 'kernel_size': '(3)', 'bias': '(False)', 'padding': '(1)'}), '(32, 64, stride=2, kernel_size=3, bias=False, padding=1)\n', (5932, 5988), True, 'import torch.nn as nn\n'), ((6002, 6020), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (6016, 6020), True, 'import torch.nn as nn\n'), ((6034, 6065), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (6046, 6065), True, 'import torch.nn as nn\n'), ((6079, 6097), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.25)'], {}), '(0.25)\n', (6091, 6097), True, 'import torch.nn as nn\n'), ((6125, 6190), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'stride': '(2)', 'kernel_size': '(3)', 'bias': '(False)', 'padding': '(1)'}), '(64, 64, stride=2, kernel_size=3, bias=False, padding=1)\n', (6134, 6190), True, 'import torch.nn as nn\n'), ((6204, 6222), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (6218, 6222), True, 'import torch.nn as nn\n'), ((6236, 6267), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (6248, 6267), True, 'import torch.nn as nn\n'), ((6281, 6299), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.25)'], {}), '(0.25)\n', (6293, 6299), True, 'import torch.nn as nn\n'), ((6327, 6392), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'stride': '(2)', 'kernel_size': '(3)', 'bias': '(False)', 'padding': '(1)'}), '(64, 64, stride=2, kernel_size=3, bias=False, padding=1)\n', (6336, 6392), True, 'import torch.nn as nn\n'), ((6406, 6424), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (6420, 6424), True, 'import torch.nn as nn\n'), ((6438, 6469), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (6450, 6469), True, 'import torch.nn as nn\n'), ((6483, 6501), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.25)'], {}), '(0.25)\n', (6495, 6501), True, 'import torch.nn as nn\n'), ((6529, 6541), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (6539, 6541), True, 'import torch.nn as nn\n'), ((6786, 6842), 'torch.nn.Linear', 'torch.nn.Linear', (['num_latent_dims', 'self.encoder_out_shape'], {}), '(num_latent_dims, self.encoder_out_shape)\n', (6801, 6842), False, 'import torch\n'), ((7002, 7067), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['self.no_channels', '(64)'], {'stride': '(2)', 'kernel_size': '(3)'}), '(self.no_channels, 64, stride=2, kernel_size=3)\n', (7020, 7067), True, 'import torch.nn as nn\n'), ((7081, 7099), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (7095, 7099), True, 'import torch.nn as nn\n'), ((7113, 7144), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (7125, 7144), True, 'import torch.nn as nn\n'), ((7158, 7176), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.25)'], {}), '(0.25)\n', (7170, 7176), True, 'import torch.nn as nn\n'), ((7204, 7266), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(64)', '(64)'], {'stride': '(2)', 'kernel_size': '(3)', 'padding': '(1)'}), '(64, 64, stride=2, kernel_size=3, padding=1)\n', (7222, 7266), True, 'import torch.nn as nn\n'), ((7280, 7298), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (7294, 7298), True, 'import torch.nn as nn\n'), ((7312, 7343), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (7324, 7343), True, 'import torch.nn as nn\n'), ((7357, 7375), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.25)'], {}), '(0.25)\n', (7369, 7375), True, 'import torch.nn as nn\n'), ((7403, 7465), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(64)', '(32)'], {'stride': '(2)', 'kernel_size': '(3)', 'padding': '(1)'}), '(64, 32, stride=2, kernel_size=3, padding=1)\n', (7421, 7465), True, 'import torch.nn as nn\n'), ((7479, 7497), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (7493, 7497), True, 'import torch.nn as nn\n'), ((7511, 7542), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (7523, 7542), True, 'import torch.nn as nn\n'), ((7556, 7574), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.25)'], {}), '(0.25)\n', (7568, 7574), True, 'import torch.nn as nn\n'), ((7602, 7674), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(32)', 'num_channels'], {'stride': '(2)', 'kernel_size': '(3)', 'padding': '(1)'}), '(32, num_channels, stride=2, kernel_size=3, padding=1)\n', (7620, 7674), True, 'import torch.nn as nn\n'), ((7799, 7811), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (7809, 7811), True, 'import torch.nn as nn\n'), ((12293, 12322), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (12315, 12322), False, 'import torch\n'), ((12655, 12666), 'time.time', 'time.time', ([], {}), '()\n', (12664, 12666), False, 'import time, os, random\n'), ((19540, 19579), 'numpy.mean', 'np.mean', (['pos[lab[:, 0] == c, i]'], {'axis': '(0)'}), '(pos[lab[:, 0] == c, i], axis=0)\n', (19547, 19579), True, 'import numpy as np\n'), ((28470, 28517), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['decoded', 'features'], {'reduction': '"""mean"""'}), "(decoded, features, reduction='mean')\n", (28480, 28517), True, 'import torch.nn.functional as F\n'), ((8172, 8198), 'torch.exp', 'torch.exp', (['(z_log_var / 2.0)'], {}), '(z_log_var / 2.0)\n', (8181, 8198), False, 'import torch\n'), ((12002, 12013), 'time.time', 'time.time', ([], {}), '()\n', (12011, 12013), False, 'import time, os, random\n'), ((12956, 12967), 'time.time', 'time.time', ([], {}), '()\n', (12965, 12967), False, 'import time, os, random\n'), ((10783, 10803), 'torch.exp', 'torch.exp', (['z_log_var'], {}), '(z_log_var)\n', (10792, 10803), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import projection_funcs as pf
import policy_tools as pt
import inspect
from pprint import pprint
profs_table={'prof1':'c:/Users/groberta/Work/data_accelerator/profiles/prof1.csv',
'prof_old': "c:/Users/groberta/Work/data_accelerator/profiles/prof_old.csv"}
##_________________________________________________________________________##
'''Suite of functions with common API which can be passed to a RuleSet object.
Actual functions for passing all prefixed `r_`. Others (top of file) are auxiliary.
TODO: fully document the API. Basically need to be able to pass in a df with rows
for spend on products, and return a df with forecasts for each product (using original
index, so can be appended / joined to input).
Will be a bunch of other parameters, but these are automatically detected in the RuleSet object,
and passed appropriately to front end.
'''
##_________________________________________________________________________##
def infer_launch(in_arr, max_spend, streak_len_threshold=12, delta_threshold = 0.2,
_ma_interval=12, verbose_return=False, _debug=False):
'''Infers a launch date, given a trajectory of spend, by identifying an uptake phase -
the most recent streak of consistent growth - and returning a dict describing it.
PARAMETERS:
in_arr input array (numpy I think)
max_spend the individual product's max spend value
streak_len_threshold the number of periods of successive growth
delta_threshold the change (proportionate to max spend) required for a streak to qualify
_ma_interval interval for calculatino of moving averages
verbose_return [bool] return an object with info on all streaks, rather than the one detected
RETURN:
dictionary with:
uptake_detected: [bool] if a qualifying streak was found
start: start of the detected qualifying streak
delta: the increase over the uptake streak (proportion of max spend)
inferred_launch: expressed relative to period zero of in_arr
'''
# first get the moving ave. and ma_diffs
ma = pf.mov_ave(in_arr, _ma_interval)
ma_diff = ma - np.insert(ma, 0, np.zeros(_ma_interval))[:-_ma_interval]
# now look for streaks
streak = 0
streaks = {'uptake_detected':False, 'streaks':{}}
for p in range(_ma_interval, len(ma_diff)):
if _debug: print(p, end="")
if ma_diff[p] > 0 and p < len(ma_diff)-1:
streak += 1
if _debug: print("+", end="")
else:
if _debug: print('-', end="")
if streak>0 and _debug:
print('\nstreak ends, length is: ', streak, end=" ")
if streak > streak_len_threshold:
if _debug: print(', OVER length threshold, ', end=" ")
# calculate the delta, based on the mov ave
if _debug: print(', START: ', p-streak, ma[p-streak], end=" ")
if _debug: print(', END: ', p, ma[p], end=" ")
delta = (ma[p] - ma[p-streak]) / max_spend
if _debug: print(', delta is, ', delta, end=" ")
# only add to dict if is over threshold
if delta > delta_threshold:
if _debug: print('streak is OVER delta threshold, delta is:'.ljust(20), delta)
s_key = "s_"+str(p)
streaks['streaks'][s_key]={}
streaks['streaks'][s_key]['end_per'] = p
streaks['streaks'][s_key]['length'] = streak
streaks['streaks'][s_key]['end_val'] = ma[p]
streaks['streaks'][s_key]['start_val'] = ma[p-streak]
streaks['streaks'][s_key]['raw_delta'] = ma[p] - ma[p-streak]
streaks['streaks'][s_key]['prop_delta'] = delta
# only going to return the last qualifying streak (others available for `verbose_return=True`)
streaks['uptake_detected'] = True
streaks['last_per'] = p
streaks['last_per_len'] = streak
streaks['last_delta'] = delta
else:
if _debug: print("delta of ", delta, " is BELOW threshold of ", delta_threshold, "\n")
# terminate the streak even if it doesn't qualify
streak = 0
if _debug: pprint(streaks)
# infer start, using y=mx equation of straight line to get x1 - the offset of the first point from origin
if streaks['uptake_detected']:
streak_start = streaks['last_per'] - streaks['last_per_len']
window = min(streaks['last_per_len'], _ma_interval)
# set lower point at start of streak
y1 = ma[streak_start]
if _debug: print("y1 - low val - is", y1)
y2 = ma[streak_start + window]
if _debug: print("y2 - high val - is", y2)
dy = y2 - y1
dx = window
x1 = (dx/dy)*y1
# move the offset back to account for ma window
inferred_launch = streak_start -int(x1) - (_ma_interval//2)
if _debug: print('inferred_launch is ', inferred_launch)
if verbose_return:
return streaks
elif streaks['uptake_detected']:
return dict(uptake_detected = True,
start=streaks['last_per'] - streaks['last_per_len'],
end=streaks['last_per'],
delta=streaks['last_delta'],
inferred_launch=inferred_launch)
else:
return dict(uptake_detected = False)
##_________________________________________________________________________##
def trend(prod, interval=24, *, launch_cat=None, life_cycle_per=0,
shed=None, loe_delay=0, term_gr_pa=None,
term_gr = 0, threshold_rate=0.001, n_pers=12,
_out_type='array', start_m=None, _debug=False, name=None):
'''Takes input array, with parameters, and returns a trend-based projection.
Key parameters:
prod An input array of spend
interval The number of periods (back from last observation) that are used
to calculate the trend
life_cycle_per Where the current period (i.e. last obs) lies in the lifecycle
loe_delay The periods by which actual fall in revenue is delayed beyond
patent expiry
_out_type Pass 'df' to return a df with raw past, mov ave past and projection.
(also pass a start_m to add a PeriodIndex to the df)
Notes on use of loe_delay
-------------------------
The loe_delay is used to extend plat_dur. This has two effects:
1. It may change the classification of the product's lifecycle phase.
For example, if the life_cycle_per is 100 and uptake_dur + raw plat_dur
is 98, then the product would be classified to terminal phase.
But if an loe_delay of 6 was added, uptake_dur + plat_dur is 104.
That puts the product in plateau phase.
This is desirable, IF the loe_delay is correct - as, in the above eg,
at period 100 the product would not yet have had a drop in spend.
However it does put a lot of faith in the loe_delay when loe is close to
to the current period. In particular, if it is too low and the product
is classified to terminal phase when it has not yet had a spend reduction,
it will lead to a large over-estimate of spend (assuming the drop would be large).
There is a smaller problem in the other direction - if loe_delay is too high,
the product will be classified to plateau even though it has already had a spend
reduction. It will then get another (but, presumably, this error will affect a
smaller starting spend level).
Potential solutions could include:
- identifying whether the product actually has had a drop
- allowing manual assignment to phase (eg as a var in the df)
- using gradual erosion (maybe)
2. It extends the plateau duration projection (if there is one)
This is more obvious, and less problematic. Once the product is assigned to
plateau phase, that will be extended by the amount of loe_delay
'''
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n")
pad, lpad, rpad = 45, 35, 20
if _debug: print('\nPROCESSING ARRAY INPUT')
if isinstance(prod, pd.Series):
prod = np.array(prod)
if _debug: print('found a series, len'.ljust(pad), str(len(prod)).rjust(rpad))
elif isinstance(prod, tuple):
prod = np.array(prod)
if _debug: print('found a tuple, len'.ljust(pad), str(len(prod)).rjust(rpad))
elif isinstance(prod, np.ndarray):
prod = np.array(prod)
if _debug: print('found an ndarray, len'.ljust(pad), str(len(prod)).rjust(rpad))
if len(prod) ==1:
if _debug: print('unpacking array of unit length (i.e. actual array nested in a list with len==1)')
prod = prod[0]
if _debug: print('array len now'.ljust(pad), str(len(prod)).rjust(rpad))
else:
print("DON'T KNOW WHAT HAS BEEN PASSED - make sure its not a dataframe")
return
if _debug: print('\nPROCESSING LIFECYCLE INPUTS')
uptake_dur = shed.uptake_dur
# NB this is the critical use of loe_delay
plat_dur = shed.plat_dur + loe_delay
gen_mult = shed.gen_mult
if term_gr_pa is not None:
term_gr = term_gr_pa/12
if _debug:
print(" - uptake_dur".ljust(pad), str(uptake_dur).rjust(rpad))
print(" - plat_dur".ljust(pad), str(plat_dur).rjust(rpad))
print("(after adding loe_delay of)".ljust(pad), str(loe_delay).rjust(rpad))
print(" - gen_mult".ljust(pad), str(gen_mult).rjust(rpad))
print("lifecycle period".ljust(pad), str(life_cycle_per).rjust(rpad))
# make an annual moving average array
prod[np.isnan(prod)]=0
prod_ma = pf.mov_ave(prod, 12)
if _debug: print('\nANALYSING PAST SPEND')
max_spend = np.nanmax(prod)
max_spend_per = np.nanargmax(prod)
last_spend = (prod[-1])
max_spend_ma = np.nanmax(prod_ma)
max_spend_ma_per = np.nanargmax(prod_ma)
last_spend_ma = (prod_ma[-1])
total_drop_ma = max(0, max_spend_ma - last_spend_ma)
if not max_spend_ma == 0:
total_drop_ma_pct = total_drop_ma/max_spend_ma
# get linear change per period over interval of periods
# TODO calculate this on recent averages
interval = min(interval, len(prod))
interval_delta = prod[-1] - prod[-(1 + interval)]
interval_rate = interval_delta / interval
interval_rate_pct = None
if not prod[-(1 + interval)] == 0:
interval_rate_pct = interval_rate / prod[-(1 + interval)]
if _debug:
print("max spend in a single period".ljust(pad), "{:0,.0f}".format(max_spend).rjust(20))
print("period of max spend".ljust(pad), "{}".format(max_spend_per).rjust(rpad))
print("spend in last period".ljust(pad), "{:0,.0f}".format(last_spend).rjust(rpad))
print("max of mov ave spend".ljust(pad), "{:0,.0f}".format(max_spend_ma).rjust(rpad))
print("period of max mov ave spend".ljust(pad), "{}".format(max_spend_ma_per).rjust(rpad))
print("last obs mov ave spend".ljust(pad), "{:0,.0f}".format(last_spend_ma).rjust(rpad))
print("drop in mov ave".ljust(pad), "{:0,.0f}".format(total_drop_ma).rjust(rpad))
print("drop in mov ave pct".ljust(pad), "{:0,.0f}%".format(total_drop_ma_pct*100).rjust(rpad))
print("interval for calculating linear trend".ljust(pad), "{}".format(interval).rjust(rpad))
print("change over that interval".ljust(pad), "{:0,.0f}".format(interval_delta).rjust(rpad))
print("change per period over interval".ljust(pad), "{:0,.0f}".format(interval_rate).rjust(rpad))
print("change per period over interval pct".ljust(pad), "{:0,.0f}%".format(interval_rate_pct*100).rjust(rpad))
if _debug: print('\nCLASSIFYING TO PHASE')
if life_cycle_per <= uptake_dur:
phase = 'uptake'
# note that plat_dur has been extended by the loe delay
elif life_cycle_per <= uptake_dur + plat_dur:
phase = 'plateau'
else: phase = 'terminal'
if _debug: print('Classified as'.ljust(pad), phase.rjust(rpad))
if _debug: print('\nCONSTRUCTING PROJECTION ARRAY')
out = np.array([last_spend_ma]) # this period overlaps with past, will be snipped later
if _debug: print('initial stub of proj. array'.ljust(pad), out)
if phase == 'terminal':
# this is really a shortcut where we know it's in terminal
if _debug: print('\nIn terminal phase, so creating a terminal array')
out = out[-1] * ((1 + term_gr) ** np.arange(1, n_pers+1))
if _debug:
print('First 10 periods of terminal array:')
print(out[:10], end="\n")
else:
# This is the main work. For each phase make an array, and append to the out array
if _debug: print('\nGenerating pre-terminal phases')
# compute remaining UPTAKE periods and generate an array
uptake_pers = min(max(uptake_dur - life_cycle_per, 0),n_pers - (len(out)-1))
uptake_out = out[-1] + (interval_rate * np.arange(1,uptake_pers))
# move the lifecycle period along to the end of uptake phase
life_cycle_per += uptake_pers
if _debug:
print("\nRemaining UPTAKE periods".ljust(pad), str(uptake_pers).rjust(rpad))
print("--> lifecycle period moved to".ljust(pad), str(life_cycle_per).rjust(rpad))
# append the uptake array to the out array
out = np.append(out, uptake_out)
# compute remaining PLATEAU periods, and generate an array
# Note that plat_dur has been extended by loe_delay
plat_pers = min(max((uptake_dur + plat_dur) - life_cycle_per, 0), n_pers - (len(out)-1))
plat_out = out[-1] * np.ones(plat_pers)
life_cycle_per += plat_pers
if _debug:
print("\nRemaining PLATEAU periods".ljust(pad), str(plat_pers).rjust(rpad))
print("--> lifecycle period moved to".ljust(pad), str(life_cycle_per).rjust(rpad))
# append the plateau array to the out array
out = np.append(out, plat_out)
# compute remaining TERMINAL periods and generate an array
term_pers = max(n_pers - (len(out)-1), 0)
term_out = out[-1] * gen_mult * ((1 + term_gr) ** np.arange(1, term_pers+1))
if _debug:
print("\nRemaining TERMINAL periods".ljust(pad), str(term_pers).rjust(rpad))
# append the terminal array to the out array
out = np.append(out, term_out)
# eliminate any negatives
out[out<0] = 0
if _out_type == 'df':
if _debug: print('\nGenerating df output')
spacer = np.empty(len(prod))
spacer[:] = np.nan
out=np.insert(out, 0, spacer)
df=pd.DataFrame([prod, prod_ma, out], index=['raw', 'mov_ave', 'projected']).T
# add an index if a start month was passed
if start_m is not None:
df.index = pd.PeriodIndex(start=pd.Period(start_m, freq='M'), periods=len(df))
# get rid of the ugly trajectory of mov_ave from zero
df['mov_ave'][:interval] = np.nan
if _debug: print("\nLEAVING: ", inspect.stack()[0][3])
return df
else:
if _debug: print("\nLEAVING: ", inspect.stack()[0][3])
return out[1:]
##_________________________________________________________________________##
def r_trend(df, n_pers, *, shed=None, uptake_dur=None, plat_dur=None, gen_mult=None, term_gr=0,
loe_delay=None, threshold_rate=0.001, _interval=24, _out_type='array', _debug=False):
'''Iterates through an input df, calling trend(), returning a df of projections.
Key logic is calculation of lifecycle period, which is passed to trend() to orient the projection.
This is currently done with reference to the loe date.
Eg if last observation is 1-2017, and loe date for a product is 1-2020, then the lifecycle period is
36 periods before the loe lifecycle period (which is uptake_dur + plat_dur).
So if uptake_dur=56, plat_dur=100, lifecycle period is 120 (56+120-36). When passing to trend(), another
36 periods of plateau will be projected, and then the loe drop will be applied.
To reflect a lag in erosion, therefore need to position product further back in lifecyle.
In above example, if lag was 6m, pass the lifecycle period of 114, so that 42 periods of plateau are applied.
To do this, pass an loe_delay parameter that in turn goes to trend() and extends plat_dur.
Could include this loe_delay parameter in the lifecycle model.
_out_type='array' specifies that trend() returns an array, obv, which is required for the actual projections.
But can pass 'df' to get the dataframe output (showing mov ave etc) if calling to visualise projections etc.
In this case, r_trend() will return a list of those dfs.
'''
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
pad = 35
out=[]
# housekeeping - assign lifecycle variables depending on what was passed
if shed is not None:
if _debug: print('using passed shed:')
uptake_dur = shed.uptake_dur
plat_dur = shed.plat_dur
gen_mult = shed.gen_mult
# define the lifecycle period in which loe will occur, according to input shed or shape data
# - this is used to fix the actual lcycle period
loe_lcycle_per = uptake_dur + plat_dur
# enter the key loop through rows in the input df
for row in df.itertuples():
# TODO make params a dict, rather than have to look up by index number
params = row[0]
data = row[1:]
if _debug: print('\nMolecule'.ljust(pad), params[df.index.names.index('molecule')])
if _debug: print('Setting'.ljust(pad), params[df.index.names.index('setting')])
# get loe month
loe_month = pd.Period(params[df.index.names.index('loe_date')], freq='M');
if _debug: print('taking raw loe date'.ljust(pad), loe_month)
# get date of last month in actual data
last_month = df.columns[-1]
if _debug: print('last_month'.ljust(pad), last_month)
# get time after / before loe (negative if befote loe)
pers_post_loe = last_month - loe_month
if _debug: print('pers_post_loe'.ljust(pad), pers_post_loe)
# infer the lifecycle period from this
life_cycle_per = loe_lcycle_per + pers_post_loe
if _debug: print('life_cycle_per'.ljust(pad), life_cycle_per)
# call trend
out_array = trend(data, _interval, n_pers=n_pers, life_cycle_per=life_cycle_per, shed=shed,
loe_delay=loe_delay, name=params[0], term_gr=term_gr,
_out_type=_out_type)
out.append(out_array)
# just return this out list of dataframes if passing 'df' as _out_type (eg for visualisations)
if _out_type == 'df':
if _debug: print("\LEAVING: ", inspect.stack()[0][3])
return out
# Otherwise build the df index and columns and return a single df
cols = pd.PeriodIndex(start=last_month+1, periods=n_pers, freq='M')
if _debug: print("\LEAVING: ", inspect.stack()[0][3])
return pd.DataFrame(out, index=df.index, columns=cols)
##_________________________________________________________________________##
def r_trend_old(df, n_pers, *, streak_len_thresh=12, delta_thresh = 0.2,
uptake_dur=90, plat_dur=24, gen_mult=0.9, term_gr_pa=0,
threshold_rate=0.001, ma_interval=12, _debug=False):
'''iterates through an input df
applies trend()
returns
'''
out=[]
for row in df.itertuples():
max_spend = row[0][df.index.names.index('max_spend')]
inferred_launch_output = infer_launch(row[1:], max_spend=max_spend, delta_threshold = delta_thresh,
_ma_interval=ma_interval, _debug=_debug)
# print(inferred_launch_output)
last_date = df.columns[-1]
if inferred_launch_output['uptake_detected']:
launch_date = pd.Period(df.columns[0], freq='M') + inferred_launch_output['inferred_launch']
life_cycle_per = last_date - launch_date
if _debug: print("for ", row[0][0], " inferred launch is ", launch_date, ", life_cycle_per is ", life_cycle_per)
out_array = trend(row[1:], ma_interval, n_pers=n_pers, life_cycle_per=life_cycle_per,
uptake_dur=uptake_dur, plat_dur=plat_dur, gen_mult=gen_mult,
name=row[0][0], term_gr_pa=term_gr_pa,
_out_type='array', _debug=_debug)
else:
print("for ", row[0][0], " NO inferred launch")
out_array = trend(row[1:], ma_interval, n_pers=n_pers, life_cycle_per=0,
uptake_dur=0, plat_dur=0, gen_mult=gen_mult,
name=row[0][0], term_gr_pa=term_gr_pa,
_out_type='array', _debug=_debug)
if _debug: print(out_array[-10:])
out.append(out_array)
# call trend on row[1:]
# append to out
cols = pd.PeriodIndex(start=last_date+1, periods=n_pers, freq='M')
return pd.DataFrame(out, index=df.index, columns=cols)
##_________________________________________________________________________##
def r_profile(df, n_pers,
*, profile, gen_mult, _debug=True):
'''Applies a profile, with a variable multiplier for patent expiry
args: data (df or series - future) - which includes index data eg launch year
n_pers
r_args - to include the profile itself and the gen_mult
return: df (or series - future)
'''
# first, if the passed profile is a string, retrieve the array
if isinstance(profile, str):
print("it's a string, so retrieving array")
profile = np.genfromtxt(profs_table[profile])
print(profile)
out=[]
for row in df.itertuples():
#note the structure of each row: [0] is the index (a tuple), [1:] is the data
launch_date = row[0][df.index.names.index('start_month')] # gets the index number of 'start_date' in the df.index list
launch_date = pd.Period(launch_date, freq='M')
last_date = df.columns[-1]
last_spend = row[-1]
print('last, launch', last_date, launch_date)
start_x = last_date - launch_date
basic = profile[start_x: start_x+n_pers]
scaling_f = last_spend / profile[start_x-1]
y = basic * scaling_f
out.append(y)
if _debug:
pad1 = 20
print("launch_date".ljust(pad1), launch_date,
"\nlast_date".ljust(pad1), last_date,
"\nn_pers".ljust(pad1), n_pers,
"\nlast_spend".ljust(pad1), last_spend,
"\nstart_x".ljust(pad1), start_x,
"\nscaling_f".ljust(pad1), scaling_f)
# Build the df index and columns
ind = df.index
cols = pd.PeriodIndex(start=last_date+1, periods=n_pers, freq='M')
return pd.DataFrame(out, index=df.index, columns=cols)
##_________________________________________________________________________##
def r_tprofile(df, n_pers, *, profile):
'''
Simple extrapolation using a fixed profile.
No use of cohorts etc.
Takes average of last 3 periods as basis
Projects future periods according to the profile (multiplication of the basis)
If projecting past the end of profile, just continue at level of last period
'''
ave_last = np.array(df.iloc[:,-6:].sum(axis=1)/6)
print('ave last ', ave_last.shape)
if isinstance(profile, str):
print("it's a string, so retrieving array")
profile = np.genfromtxt(profs_table[profile])
print('profile input ', profile.shape)
if len(profile) < n_pers:
profile1 = np.append(profile,[profile[-1]]*(n_pers-len(profile)))
else:
profile1 = profile[:n_pers]
print('new profile shape ', profile1.shape)
profile_arr = np.array([profile1] * len(df)).T
print('profile array ', profile_arr.shape)
out = np.multiply(profile_arr, ave_last)
ind = df.index
last_date = df.columns[-1]
cols = pd.PeriodIndex(start=last_date+1, periods=n_pers, freq='M')
return pd.DataFrame(out.T, index=ind, columns=cols)
##_________________________________________________________________________##
def r_terminal(df, n_pers, *, term_gr_pa, _debug=False):
'''take average of last 3 monthly values and extrapolate at terminal rate
default args: data_in, n_pers
r_args: term_gr_pa - terminal rate, annual (positive)
return: dataframe
'''
# make an initial np array with growth, based on index-1 = 1
term_gr_mo = float(term_gr_pa) / 12
x = np.array([(1+term_gr_mo)**np.arange(1,n_pers+1)]*len(df))
ave_last = df.iloc[:,-3:].sum(axis=1)/3
x=x*ave_last[:,None]
# Build the df index and columns
ind = df.index
last_date = df.columns[-1]
cols = pd.PeriodIndex(start=last_date+1, periods=n_pers, freq='M')
return pd.DataFrame(x, index=ind, columns=cols)
##_________________________________________________________________________##
def r_fut(df, n_pers, *, profile, cutoff_date,
coh_gr_pa, term_gr_pa, name='future', _debug=True):
coh_gr = coh_gr_pa /12
term_gr = term_gr_pa /12
# check if profile is an array yet
if isinstance(profile, str):
print("it's a string, so retrieving array")
profile = np.genfromtxt(profs_table[profile])
print(profile)
# work out what l_start and l_stop should be, and how passed to get_forecast()
# sum the df, as we don't care about individual products. NB it's now a Series
cutoff_date = pd.Period(cutoff_date)
df=df.sum()
last_date = df.index[-1]
l_start=0
l_stop=(last_date - cutoff_date)+n_pers
proj_start=(last_date - cutoff_date)
proj_stop=(last_date - cutoff_date)+n_pers
if _debug:
pad=20
print("n_pers".ljust(pad), n_pers,"\nlast_date".ljust(pad), last_date,"\ncutoff_date".ljust(pad), cutoff_date,"\nl_start".ljust(pad), l_start,"\nl_stop".ljust(pad), l_stop,"\nproj_start".ljust(pad), proj_start,"\nproj_stop".ljust(pad), proj_stop)
# note this gets a projection for one period behind the required, to allow scaling. This must be sliced off later.
fut = pf.get_forecast(profile, l_start, l_stop, coh_gr,term_gr,1, proj_start-1, proj_stop, name=name)
if _debug: print(fut)
# now get scaling factor. Take cumulative sum for last period in slice passed
# (also should have available the actual period from the slice - do later)
last_sum = df[-1]
# to scale, want the period just before the fut forecast to equal last_sum. Deliberately overlap, and then snip off first period?
scaler=last_sum/fut.iloc[0]
if _debug: print(last_sum, scaler)
out = (fut*scaler)[1:]
out.index=pd.PeriodIndex(start=last_date+1, periods=n_pers, freq='M')
return pd.DataFrame(out)
##_________________________________________________________________________##
def r_fut_tr(df, n_pers, *, cut_off, shed=None, loe_delay=None,
coh_gr=None, term_gr=None, name='future', _debug=False):
'''Generates a projection of spend on future launches, based on cumulation
of a lifecycle profile (itself imputed from observations), and scaled using observations.
Note only returns the future projection, not the input past observations
1. Make a shape corresponding to the passed shed
2. Use this to project a forecast (unscaled)
3. Scale the forecast to the last period of actual observations
4. Slice the forecast to give the future only
'''
pad = 25
# INCREMENT PLAT_DUR BY LOE DELAY before passing to make_shape1()
# The non future trend functions use this differently, as they need to calculate the loe_month for
# extending observations etc, based on observed loe date. Think it's ok this way but need to be aware.
# for future want to probably include this as part of shed. Though there's an argument it's
# really part of the plat_dur (in effect), and the need to fiddle around is only when you are working out
# the plat_dur from an external loe date.
if loe_delay is not None:
if _debug: print('remaking shed to add loe delay\n')
shed = pt.Shed(shed.shed_name + '_1',
shed.uptake_dur,
shed.plat_dur + loe_delay,
shed.gen_mult)
if _debug:
print("shed now")
print(shed)
# will be working with the sum of the input df
df=df.sum()
# get useful dates
cut_off = pd.Period(cut_off)
last_date = df.index[-1]
# 1. Make a shape from the passed shed
shape = pt.make_shape1(shed=shed)
# 2. Use this to project a forecast.
# - need to project for n_pers plus the overlap with actual
overlap = last_date - cut_off
if _debug:
print('cut off:'.ljust(pad), cut_off)
print('last_date:'.ljust(pad), last_date)
print('overlapping periods:'.ljust(pad), overlap)
fut = pf.get_forecast1(shape, term_gr=term_gr, coh_gr=coh_gr, n_pers=n_pers+overlap, name=name)
# 3. Scale the forecast
# Take cumulative sum for last period in slice passed
last_sum = df[-1]
if _debug: print('spend at last period'.ljust(pad), last_sum)
# to scale, want the period just before the fut forecast to equal last_sum.
if _debug: print('spend at overlap period'.ljust(pad), fut.iloc[overlap])
scaler=last_sum/fut.iloc[overlap-1]
if _debug: print('scaler to apply'.ljust(pad), scaler)
fut = (fut*scaler)
if _debug: print("\ntail of actual:\n", df.tail(), "\n")
if _debug: print("\nscaled fut at overlap:\n", fut[overlap-5:overlap+5].head(), "\n")
# 4. Slice the forecast to give the future only
out = fut[overlap:]
out.index=pd.PeriodIndex(start=last_date+1, periods=len(out), freq='M')
return pd.DataFrame(out)
| [
"projection_funcs.get_forecast",
"numpy.ones",
"numpy.isnan",
"policy_tools.Shed",
"numpy.arange",
"pprint.pprint",
"pandas.DataFrame",
"numpy.multiply",
"numpy.genfromtxt",
"numpy.insert",
"projection_funcs.get_forecast1",
"numpy.append",
"pandas.Period",
"policy_tools.make_shape1",
"nu... | [((2303, 2335), 'projection_funcs.mov_ave', 'pf.mov_ave', (['in_arr', '_ma_interval'], {}), '(in_arr, _ma_interval)\n', (2313, 2335), True, 'import projection_funcs as pf\n'), ((10545, 10565), 'projection_funcs.mov_ave', 'pf.mov_ave', (['prod', '(12)'], {}), '(prod, 12)\n', (10555, 10565), True, 'import projection_funcs as pf\n'), ((10639, 10654), 'numpy.nanmax', 'np.nanmax', (['prod'], {}), '(prod)\n', (10648, 10654), True, 'import numpy as np\n'), ((10678, 10696), 'numpy.nanargmax', 'np.nanargmax', (['prod'], {}), '(prod)\n', (10690, 10696), True, 'import numpy as np\n'), ((10755, 10773), 'numpy.nanmax', 'np.nanmax', (['prod_ma'], {}), '(prod_ma)\n', (10764, 10773), True, 'import numpy as np\n'), ((10797, 10818), 'numpy.nanargmax', 'np.nanargmax', (['prod_ma'], {}), '(prod_ma)\n', (10809, 10818), True, 'import numpy as np\n'), ((13012, 13037), 'numpy.array', 'np.array', (['[last_spend_ma]'], {}), '([last_spend_ma])\n', (13020, 13037), True, 'import numpy as np\n'), ((19988, 20050), 'pandas.PeriodIndex', 'pd.PeriodIndex', ([], {'start': '(last_month + 1)', 'periods': 'n_pers', 'freq': '"""M"""'}), "(start=last_month + 1, periods=n_pers, freq='M')\n", (20002, 20050), True, 'import pandas as pd\n'), ((20121, 20168), 'pandas.DataFrame', 'pd.DataFrame', (['out'], {'index': 'df.index', 'columns': 'cols'}), '(out, index=df.index, columns=cols)\n', (20133, 20168), True, 'import pandas as pd\n'), ((22072, 22133), 'pandas.PeriodIndex', 'pd.PeriodIndex', ([], {'start': '(last_date + 1)', 'periods': 'n_pers', 'freq': '"""M"""'}), "(start=last_date + 1, periods=n_pers, freq='M')\n", (22086, 22133), True, 'import pandas as pd\n'), ((22148, 22195), 'pandas.DataFrame', 'pd.DataFrame', (['out'], {'index': 'df.index', 'columns': 'cols'}), '(out, index=df.index, columns=cols)\n', (22160, 22195), True, 'import pandas as pd\n'), ((23931, 23992), 'pandas.PeriodIndex', 'pd.PeriodIndex', ([], {'start': '(last_date + 1)', 'periods': 'n_pers', 'freq': '"""M"""'}), "(start=last_date + 1, periods=n_pers, freq='M')\n", (23945, 23992), True, 'import pandas as pd\n'), ((24007, 24054), 'pandas.DataFrame', 'pd.DataFrame', (['out'], {'index': 'df.index', 'columns': 'cols'}), '(out, index=df.index, columns=cols)\n', (24019, 24054), True, 'import pandas as pd\n'), ((25077, 25111), 'numpy.multiply', 'np.multiply', (['profile_arr', 'ave_last'], {}), '(profile_arr, ave_last)\n', (25088, 25111), True, 'import numpy as np\n'), ((25182, 25243), 'pandas.PeriodIndex', 'pd.PeriodIndex', ([], {'start': '(last_date + 1)', 'periods': 'n_pers', 'freq': '"""M"""'}), "(start=last_date + 1, periods=n_pers, freq='M')\n", (25196, 25243), True, 'import pandas as pd\n'), ((25254, 25298), 'pandas.DataFrame', 'pd.DataFrame', (['out.T'], {'index': 'ind', 'columns': 'cols'}), '(out.T, index=ind, columns=cols)\n', (25266, 25298), True, 'import pandas as pd\n'), ((26024, 26085), 'pandas.PeriodIndex', 'pd.PeriodIndex', ([], {'start': '(last_date + 1)', 'periods': 'n_pers', 'freq': '"""M"""'}), "(start=last_date + 1, periods=n_pers, freq='M')\n", (26038, 26085), True, 'import pandas as pd\n'), ((26100, 26140), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'index': 'ind', 'columns': 'cols'}), '(x, index=ind, columns=cols)\n', (26112, 26140), True, 'import pandas as pd\n'), ((26787, 26809), 'pandas.Period', 'pd.Period', (['cutoff_date'], {}), '(cutoff_date)\n', (26796, 26809), True, 'import pandas as pd\n'), ((27428, 27532), 'projection_funcs.get_forecast', 'pf.get_forecast', (['profile', 'l_start', 'l_stop', 'coh_gr', 'term_gr', '(1)', '(proj_start - 1)', 'proj_stop'], {'name': 'name'}), '(profile, l_start, l_stop, coh_gr, term_gr, 1, proj_start - \n 1, proj_stop, name=name)\n', (27443, 27532), True, 'import projection_funcs as pf\n'), ((28005, 28066), 'pandas.PeriodIndex', 'pd.PeriodIndex', ([], {'start': '(last_date + 1)', 'periods': 'n_pers', 'freq': '"""M"""'}), "(start=last_date + 1, periods=n_pers, freq='M')\n", (28019, 28066), True, 'import pandas as pd\n'), ((28077, 28094), 'pandas.DataFrame', 'pd.DataFrame', (['out'], {}), '(out)\n', (28089, 28094), True, 'import pandas as pd\n'), ((29805, 29823), 'pandas.Period', 'pd.Period', (['cut_off'], {}), '(cut_off)\n', (29814, 29823), True, 'import pandas as pd\n'), ((29910, 29935), 'policy_tools.make_shape1', 'pt.make_shape1', ([], {'shed': 'shed'}), '(shed=shed)\n', (29924, 29935), True, 'import policy_tools as pt\n'), ((30257, 30352), 'projection_funcs.get_forecast1', 'pf.get_forecast1', (['shape'], {'term_gr': 'term_gr', 'coh_gr': 'coh_gr', 'n_pers': '(n_pers + overlap)', 'name': 'name'}), '(shape, term_gr=term_gr, coh_gr=coh_gr, n_pers=n_pers +\n overlap, name=name)\n', (30273, 30352), True, 'import projection_funcs as pf\n'), ((31128, 31145), 'pandas.DataFrame', 'pd.DataFrame', (['out'], {}), '(out)\n', (31140, 31145), True, 'import pandas as pd\n'), ((4673, 4688), 'pprint.pprint', 'pprint', (['streaks'], {}), '(streaks)\n', (4679, 4688), False, 'from pprint import pprint\n'), ((9035, 9049), 'numpy.array', 'np.array', (['prod'], {}), '(prod)\n', (9043, 9049), True, 'import numpy as np\n'), ((10513, 10527), 'numpy.isnan', 'np.isnan', (['prod'], {}), '(prod)\n', (10521, 10527), True, 'import numpy as np\n'), ((14297, 14323), 'numpy.append', 'np.append', (['out', 'uptake_out'], {}), '(out, uptake_out)\n', (14306, 14323), True, 'import numpy as np\n'), ((14904, 14928), 'numpy.append', 'np.append', (['out', 'plat_out'], {}), '(out, plat_out)\n', (14913, 14928), True, 'import numpy as np\n'), ((15309, 15333), 'numpy.append', 'np.append', (['out', 'term_out'], {}), '(out, term_out)\n', (15318, 15333), True, 'import numpy as np\n'), ((15547, 15572), 'numpy.insert', 'np.insert', (['out', '(0)', 'spacer'], {}), '(out, 0, spacer)\n', (15556, 15572), True, 'import numpy as np\n'), ((22829, 22864), 'numpy.genfromtxt', 'np.genfromtxt', (['profs_table[profile]'], {}), '(profs_table[profile])\n', (22842, 22864), True, 'import numpy as np\n'), ((23169, 23201), 'pandas.Period', 'pd.Period', (['launch_date'], {'freq': '"""M"""'}), "(launch_date, freq='M')\n", (23178, 23201), True, 'import pandas as pd\n'), ((24678, 24713), 'numpy.genfromtxt', 'np.genfromtxt', (['profs_table[profile]'], {}), '(profs_table[profile])\n', (24691, 24713), True, 'import numpy as np\n'), ((26535, 26570), 'numpy.genfromtxt', 'np.genfromtxt', (['profs_table[profile]'], {}), '(profs_table[profile])\n', (26548, 26570), True, 'import numpy as np\n'), ((29461, 29554), 'policy_tools.Shed', 'pt.Shed', (["(shed.shed_name + '_1')", 'shed.uptake_dur', '(shed.plat_dur + loe_delay)', 'shed.gen_mult'], {}), "(shed.shed_name + '_1', shed.uptake_dur, shed.plat_dur + loe_delay,\n shed.gen_mult)\n", (29468, 29554), True, 'import policy_tools as pt\n'), ((9187, 9201), 'numpy.array', 'np.array', (['prod'], {}), '(prod)\n', (9195, 9201), True, 'import numpy as np\n'), ((14579, 14597), 'numpy.ones', 'np.ones', (['plat_pers'], {}), '(plat_pers)\n', (14586, 14597), True, 'import numpy as np\n'), ((15584, 15657), 'pandas.DataFrame', 'pd.DataFrame', (['[prod, prod_ma, out]'], {'index': "['raw', 'mov_ave', 'projected']"}), "([prod, prod_ma, out], index=['raw', 'mov_ave', 'projected'])\n", (15596, 15657), True, 'import pandas as pd\n'), ((2372, 2394), 'numpy.zeros', 'np.zeros', (['_ma_interval'], {}), '(_ma_interval)\n', (2380, 2394), True, 'import numpy as np\n'), ((9343, 9357), 'numpy.array', 'np.array', (['prod'], {}), '(prod)\n', (9351, 9357), True, 'import numpy as np\n'), ((13382, 13406), 'numpy.arange', 'np.arange', (['(1)', '(n_pers + 1)'], {}), '(1, n_pers + 1)\n', (13391, 13406), True, 'import numpy as np\n'), ((13885, 13910), 'numpy.arange', 'np.arange', (['(1)', 'uptake_pers'], {}), '(1, uptake_pers)\n', (13894, 13910), True, 'import numpy as np\n'), ((15105, 15132), 'numpy.arange', 'np.arange', (['(1)', '(term_pers + 1)'], {}), '(1, term_pers + 1)\n', (15114, 15132), True, 'import numpy as np\n'), ((21010, 21044), 'pandas.Period', 'pd.Period', (['df.columns[0]'], {'freq': '"""M"""'}), "(df.columns[0], freq='M')\n", (21019, 21044), True, 'import pandas as pd\n'), ((8794, 8809), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (8807, 8809), False, 'import inspect\n'), ((8866, 8881), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (8879, 8881), False, 'import inspect\n'), ((15788, 15816), 'pandas.Period', 'pd.Period', (['start_m'], {'freq': '"""M"""'}), "(start_m, freq='M')\n", (15797, 15816), True, 'import pandas as pd\n'), ((17764, 17779), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (17777, 17779), False, 'import inspect\n'), ((17836, 17851), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (17849, 17851), False, 'import inspect\n'), ((20086, 20101), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (20099, 20101), False, 'import inspect\n'), ((25820, 25844), 'numpy.arange', 'np.arange', (['(1)', '(n_pers + 1)'], {}), '(1, n_pers + 1)\n', (25829, 25844), True, 'import numpy as np\n'), ((15981, 15996), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (15994, 15996), False, 'import inspect\n'), ((16074, 16089), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (16087, 16089), False, 'import inspect\n'), ((19863, 19878), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (19876, 19878), False, 'import inspect\n')] |
# a data wrapper to iterate over large matrices in numpy
import numpy as np
# random, unlimited number of access to the corresponding data
class RandomAccessData(object):
def __init__(self, data):
self.data = data
self.lens = len(self.data)
def next(self, rng, batchsize):
ll = rng.choice(self.lens, batchsize)
return self.data[ll]
class DataStream(object):
def __init__(self, data):
self.data = data
def iterate(self, batchsize, shuffle=True):
if shuffle:
indices = np.arange(len(self.data))
np.random.shuffle(indices)
for start_idx in range(0, len(self.data)-batchsize+1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx+batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield self.data[excerpt]
class MultiDataStream(object):
def __init__(self, datas):
self.datas = list(datas)
self.lens = [len(data) for data in self.datas]
self.data_len = min(self.lens)
self.init_data_len = self.data_len
def iterate(self, batchsize, seed, shuffle=True):
if shuffle:
np.random.seed(seed)
indices = np.arange(self.init_data_len)
np.random.shuffle(indices)
for start_idx in range(0, self.data_len-batchsize+1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx+batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield tuple([data[excerpt] for data in self.datas])
class MultiDataSemiStream(object):
def __init__(self, datas, semi_datas):
# semi_datas: no strict correspondence to datas, any time can randomly
# sample
self.datas = list(datas)
self.lens = [len(data) for data in self.datas]
self.data_len = min(self.lens)
self.semi_datas = list(semi_datas)
self.semi_data_len = min([len(d) for d in self.semi_datas])
def iterate(self, batchsize, shuffle=True):
if shuffle:
indices = np.arange(self.data_len)
np.random.shuffle(indices)
semi_indices = np.arange(self.semi_data_len)
np.random.shuffle(semi_indices)
for start_idx in range(0, self.data_len-batchsize+1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx+batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
semi_excerpt = semi_indices[start_idx:start_idx+batchsize]
yield tuple([data[excerpt] for data in self.datas] +
[data[semi_excerpt] for data in self.semi_datas])
| [
"numpy.random.seed",
"numpy.arange",
"numpy.random.shuffle"
] | [((2236, 2265), 'numpy.arange', 'np.arange', (['self.semi_data_len'], {}), '(self.semi_data_len)\n', (2245, 2265), True, 'import numpy as np\n'), ((2274, 2305), 'numpy.random.shuffle', 'np.random.shuffle', (['semi_indices'], {}), '(semi_indices)\n', (2291, 2305), True, 'import numpy as np\n'), ((586, 612), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (603, 612), True, 'import numpy as np\n'), ((1220, 1240), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1234, 1240), True, 'import numpy as np\n'), ((1263, 1292), 'numpy.arange', 'np.arange', (['self.init_data_len'], {}), '(self.init_data_len)\n', (1272, 1292), True, 'import numpy as np\n'), ((1305, 1331), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1322, 1331), True, 'import numpy as np\n'), ((2148, 2172), 'numpy.arange', 'np.arange', (['self.data_len'], {}), '(self.data_len)\n', (2157, 2172), True, 'import numpy as np\n'), ((2185, 2211), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2202, 2211), True, 'import numpy as np\n')] |
import sys, os
import Swing
import pandas as pd
import numpy as np
from Swing.util.Evaluator import Evaluator
import warnings
import Swing.util.utility_module as Rutil
import pdb
class Analyzer:
""" Analyzer is a object that analyzes groups of Rollers. Figures out the completeness if your experiment.
Checks for errored Rollers. Helps open and sort pickle files. It also has methods that ranks Rollers by their model
and cragging scores."""
def __init__(self, my_arg):
"""Constructor: check if the argument supplied is a folder string or a roller object. If it is a folder, process many rollers. If it is a roller object, process one roller and return a dataframe"""
self.error_list = []
self.all_ranked_lists = []
self.max_width_results = None
self.min_width_results = None
self.overall_df = pd.DataFrame()
self.pickle_folder = None
if isinstance(my_arg, basestring):
#if the argument passed into the constructor is a string, then process the pickle path folder
pickle_path_folder = my_arg
self.sorted_edge_lists = []
self.total_files_unpickled = []
self.pickle_paths = os.listdir(pickle_path_folder)
self.pickle_folder = pickle_path_folder
for pickle_path in self.pickle_paths:
self.current_pickle_path = pickle_path
try:
self.current_roller = pd.read_pickle(pickle_path_folder+"/"+ pickle_path)
# saving rollers
df = self.aggregate_ranked_list(self.current_roller)
self.overall_df = self.overall_df.append(df)
except KeyError:
continue
self.total_files_unpickled.append(pickle_path)
else:
self.current_pickle_path = "none"
self.current_roller = my_arg
self.result_df = self.aggregate_ranked_list(self.current_roller)
def get_result_df(self):
return(self.result_df)
def get_ranked_list(self, target_window_index, pickle_path = None, roller_obj = None):
if pickle_path:
roller_obj = pd.read_pickle(pickle_path)
for index,window in enumerate(roller_obj.window_list):
if index == target_window_index:
if not roller_obj.file_path.startswith("/"):
roller_obj.file_path = roller_obj.file_path.replace("data/","/projects/p20519/Swing/data/")
sorted_edge_list = window.results_table
if 'stability' in sorted_edge_list:
sorted_edge_list.rename(columns={'stability':'importance'},inplace=True)
sorted_edge_list.sort(['importance'], ascending=[False], inplace=True)
return(sorted_edge_list)
def aggregate_best_windows(self, my_df = None, roller_obj = None, target_width = None, top_percentile=10):
"""
Identifies the windows with the best cragging score in a dataframe.
Returns a dataframe with the aggregated cragging score.
:param my_df:
:param top_percentile: default 10
"""
corr_list = []
width_list =[]
agg_auroc = []
agg_auroc_table = []
#unfortunately this function has multiple modes of operation. it is meant to be used with single roller objs or folder path to a folder of pickle objects
if my_df is None:
my_df = self.overall_df
if roller_obj is None:
roller_obj = self.current_roller
single_roller = True
if target_width is not None:
target_df = my_df[my_df['window_width'] == target_width]
else:
target_df = my_df
corr_list.append(target_df.corr()['auroc'])
#sort dataframe based on mean squared error
sorted_df = my_df.sort('crag_mse_average',ascending=True)
#grab the first percentile of rows which contains information on the windows that have the least mean squared error
n_rows = round(top_percentile*.01*len(sorted_df))
print(n_rows," windows incorporated")
top_windows = sorted_df.head(n_rows)
#top_windows contain the scores, but does not contain the actual ranked lists. our next step is to query and aggregate the ranked lists.
## aggregate ranked lists from a list of pickle paths and indices
top_ranked_lists = []
for index, row in top_windows.iterrows():
# aggregation step works with dual functionality function: can be used with a folder path or an actual roller object.
if self.pickle_folder:
full_path = self.pickle_folder + row['pickle_paths']
else:
full_path = None
window_ranked_list=self.get_ranked_list(row['window_index'], pickle_path = full_path, roller_obj = roller_obj)
top_ranked_lists.append(window_ranked_list)
agg_auroc_table.append(top_ranked_lists)
## change the value into a rank
for ranked_list in top_ranked_lists:
ranked_list['importance_rank'] = ranked_list.rank(axis=0, ascending=False)['importance']
averaged_lists = Rutil.average_rank(top_ranked_lists,col_string='importance_rank')
gold_standard = self.current_roller.file_path.replace("timeseries.tsv","goldstandard.tsv")
averaged_lists.sort('mean-rank',ascending=True,inplace=True)
self.aggregated_edge_list = averaged_lists
evaluator = Evaluator(gold_standard,sep="\t")
tpr,fpr,auroc=evaluator.calc_roc(averaged_lists)
precision, recall, aupr = evaluator.calc_pr(averaged_lists)
agg_auroc.append(auroc.tolist()[-1])
my_r = zip(width_list, agg_auroc)
#create aggregated dataframe result
my_result = { 'aupr': aupr.tolist()[-1],
'auroc': auroc.tolist()[-1],
'crag_ev_average': 0,
'crag_ev_max': 0,
'crag_ev_median':0 ,
'crag_mse_average':0 ,
'crag_mse_max':0 ,
'crag_mse_median':0 ,
'crag_r2_average':0 ,
'crag_r2_max':0 ,
'crag_r2_median':0 ,
'network_paths': my_df['network_paths'][0],
'pickle_paths': my_df['pickle_paths'][0],
'window_index': 'agg',
'window_width': my_df['window_width'][0],
}
my_result = pd.DataFrame(my_result, index=[0])
return(my_result)
def aggregate_best_windows_scan(self, top_percentile=10):
corr_list = []
width_list = []
agg_auroc = []
agg_auroc_table = []
for target_width in range(4,22):
width_list.append(target_width)
target_df = self.overall_df[self.overall_df['window_width']==target_width]
corr_list.append(target_df.corr()['auroc'])
sorted = self.overall_df.sort('crag_mse_average',ascending=True)
target_sorted = sorted[(sorted['window_width']<target_width) &(sorted['window_width']>target_width-3)]
n_rows = round(top_percentile*.01*len(target_sorted))
print(n_rows," windows incorporated")
top_windows = target_sorted.head(n_rows)
## aggregate ranked lists from a list of pickle paths and indices
top_ranked_lists = []
for index, row in top_windows.iterrows():
full_path = self.pickle_folder + row['pickle_paths']
window_ranked_list=self.get_ranked_list(row['window_index'], full_path)
top_ranked_lists.append(window_ranked_list)
agg_auroc_table.append(top_ranked_lists)
## change the value into a rank
for ranked_list in top_ranked_lists:
ranked_list['importance_rank'] = ranked_list.rank(axis=0, ascending=False)['importance']
top_auroc=self.overall_df.sort('auroc', ascending=False).head()
top_ranked_lists2 = []
for index, row in top_auroc.iterrows():
full_path = self.pickle_folder + row['pickle_paths']
window_ranked_list=self.get_ranked_list(row['window_index'], pickle_path = full_path)
top_ranked_lists2.append(window_ranked_list)
## change the value into a rank
for ranked_list in top_ranked_lists2:
ranked_list['importance_rank'] = ranked_list.rank(axis=0, ascending=False)['importance']
averaged_lists = Rutil.average_rank(top_ranked_lists,col_string='importance_rank')
gold_standard = self.current_roller.file_path.replace("timeseries.tsv","goldstandard.tsv")
averaged_lists.sort('mean-rank',ascending=True,inplace=True)
evaluator = Evaluator(gold_standard,sep="\t")
tpr,fpr,auroc=evaluator.calc_roc(averaged_lists)
precision, recall, aupr = evaluator.calc_pr(averaged_lists)
agg_auroc.append(auroc.tolist()[-1])
my_r = zip(width_list, agg_auroc)
def predict_best_window(self):
#max_value = self.overall_df['crag_mse_average'].max()
max_value = 0
counter = 1
while max_value == 0:
max_value = self.overall_df['crag_mse_average'].nsmallest(counter).values[-1]
counter += 1
best_row = self.overall_df[self.overall_df['crag_mse_average']== max_value]
return(best_row)
def get_correlation(self):
return(self.overall_df.corr())
def load_list(self,csv_file_path):
self.overall_df = pd.read_csv(csv_file_path)
return(df)
def get_best_window(self):
best_row = self.overall_df.loc[self.overall_df['window_width'].idxmax()]
return(best_row)
def get_max_window(self):
### identify status quo ###
max_row = self.overall_df.loc[self.overall_df['window_width'].idxmax()]
max_width = self.current_roller.overall_width
"""
if max_row['window_width'] != max_width:
max_width = max_row['window_width']
#find max window size
warnings.warn("Swing with all timepoints is not present. Using Swing with a maximum width of %s as comparison window" % (max_width))
"""
return(max_row)
def get_window_tag(self):
window_size = self.current_roller.window_width
tag = self.current_pickle_path + "Width: " + str(window_size)
return(tag)
def aggregate_ranked_list(self,roller_obj):
#generate a dataframe that aggregates the window stats for each window/roller
df = pd.DataFrame()
pickle_paths = []
network_paths = []
auroc_list = []
aupr_list = []
window_index_list = []
crag_mse_average_list = []
crag_r2_average_list = []
crag_ev_average_list =[]
crag_mse_median_list = []
crag_r2_median_list = []
crag_ev_median_list =[]
crag_mse_max_list = []
crag_r2_max_list = []
crag_ev_max_list = []
window_width_list = []
for index,window in enumerate(roller_obj.window_list):
if not self.current_roller.file_path.startswith("/"):
self.current_roller.file_path = self.current_roller.file_path.replace("data/","/projects/p20519/Swing/data/")
pickle_paths.append(self.current_pickle_path)
network_paths.append(self.current_roller.file_path)
window_width_list.append(roller_obj.window_width)
try:
sorted_edge_list = window.results_table
#check if the sorted edge list actually has importance/ranking values. if it doesn't, raise an error
if len(sorted_edge_list.columns) < 2:
raise AttributeError
## replace relative file paths with absolute file paths
gold_standard = self.current_roller.file_path.replace("timeseries.tsv","goldstandard.tsv")
evaluator = Evaluator(gold_standard,sep="\t")
#sorted_edge_list.sort(['p_value'], ascending=[True], inplace=True)
if 'stability' in sorted_edge_list:
sorted_edge_list.rename(columns={'stability':'importance'},inplace=True)
sorted_edge_list.sort(['importance'], ascending=[False], inplace=True)
self.all_ranked_lists.append(sorted_edge_list)
tpr,fpr,auroc = evaluator.calc_roc(sorted_edge_list)
precision,recall,aupr = evaluator.calc_pr(sorted_edge_list)
print(aupr.values[-1])
print(auroc.values[-1])
auroc_list.append(auroc.values[-1])
aupr_list.append(aupr.values[-1])
model_crag=[{ 'ev': 0,
'mse':0,
'r2':0
}]
if roller_obj.window_width != roller_obj.overall_width:
if self.max_width_results:
if auroc.values[-1] > self.max_width_results['auroc'].values[-1]:
self.min_width_results = {'tpr':tpr, 'fpr':fpr,'auroc':auroc,'precision':precision,'recall':recall,'aupr':aupr}
crag_iterations = len(window.test_scores)/window.n_genes
cragging_scores = []
for i in range(0,crag_iterations):
cragging_scores.append(window.test_scores[i*window.n_genes:(i+1)*window.n_genes])
# unfortunately, get_coeffs is also called by the null model, so the cragging function also evaluates null models and appends them to window.training_scores. The first indices are the cragging scores for the model.
model_crag = cragging_scores[0]
else:
self.max_width_results = {'tpr':tpr, 'fpr':fpr,'auroc':auroc,'precision':precision,'recall':recall,'aupr':aupr}
crag_ev_average_list.append(self.average_dict(model_crag,'ev'))
crag_mse_average_list.append(self.average_dict(model_crag,'mse'))
crag_r2_average_list.append(self.average_dict(model_crag,'r2'))
crag_ev_median_list.append(self.median_dict(model_crag,'ev'))
crag_mse_median_list.append(self.median_dict(model_crag,'mse'))
crag_r2_median_list.append(self.median_dict(model_crag,'r2'))
crag_ev_max_list.append(self.max_dict(model_crag,'ev'))
crag_mse_max_list.append(self.max_dict(model_crag,'mse'))
crag_r2_max_list.append(self.max_dict(model_crag,'r2'))
window_index_list.append(index)
except (AttributeError,IndexError):
window_tag = self.get_window_tag()
self.error_list.append(window_tag + "Window Index " + str(index) + " : No results table")
if auroc_list:
if roller_obj.window_width == roller_obj.overall_width:
window_index_list = [0]
crag_mse_average_list = [0]
crag_r2_average_list = [0]
crag_ev_average_list =[0]
crag_mse_median_list = [0]
crag_r2_median_list = [0]
crag_ev_median_list =[0]
crag_mse_max_list = [0]
crag_r2_max_list = [0]
crag_ev_max_list = [0]
df = pd.DataFrame( {'pickle_paths':pickle_paths,
'network_paths':network_paths,
'auroc':auroc_list,
'aupr':aupr_list,
'window_index':window_index_list,
'crag_mse_average':crag_mse_average_list,
'crag_ev_average':crag_ev_average_list,
'crag_r2_average':crag_r2_average_list,
'crag_mse_median':crag_mse_median_list,
'crag_ev_median':crag_ev_median_list,
'crag_r2_median':crag_r2_median_list,
'crag_ev_max':crag_ev_max_list,
'crag_mse_max':crag_mse_max_list,
'crag_r2_max':crag_r2_max_list,
'window_width':window_width_list})
return(df)
def average_dict(self,total,key):
return( (sum(d[key] for d in total))/len(total))
def median_dict(self,total,key):
aggr = [x[key] for x in total]
return(np.median(aggr))
def max_dict(self,total,key):
aggr=[x[key] for x in total]
return(np.max(aggr))
| [
"pandas.DataFrame",
"numpy.median",
"pandas.read_csv",
"Swing.util.utility_module.average_rank",
"numpy.max",
"Swing.util.Evaluator.Evaluator",
"pandas.read_pickle",
"os.listdir"
] | [((865, 879), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (877, 879), True, 'import pandas as pd\n'), ((5319, 5385), 'Swing.util.utility_module.average_rank', 'Rutil.average_rank', (['top_ranked_lists'], {'col_string': '"""importance_rank"""'}), "(top_ranked_lists, col_string='importance_rank')\n", (5337, 5385), True, 'import Swing.util.utility_module as Rutil\n'), ((5624, 5658), 'Swing.util.Evaluator.Evaluator', 'Evaluator', (['gold_standard'], {'sep': '"""\t"""'}), "(gold_standard, sep='\\t')\n", (5633, 5658), False, 'from Swing.util.Evaluator import Evaluator\n'), ((6734, 6768), 'pandas.DataFrame', 'pd.DataFrame', (['my_result'], {'index': '[0]'}), '(my_result, index=[0])\n', (6746, 6768), True, 'import pandas as pd\n'), ((9998, 10024), 'pandas.read_csv', 'pd.read_csv', (['csv_file_path'], {}), '(csv_file_path)\n', (10009, 10024), True, 'import pandas as pd\n'), ((11035, 11049), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11047, 11049), True, 'import pandas as pd\n'), ((17189, 17204), 'numpy.median', 'np.median', (['aggr'], {}), '(aggr)\n', (17198, 17204), True, 'import numpy as np\n'), ((17293, 17305), 'numpy.max', 'np.max', (['aggr'], {}), '(aggr)\n', (17299, 17305), True, 'import numpy as np\n'), ((1220, 1250), 'os.listdir', 'os.listdir', (['pickle_path_folder'], {}), '(pickle_path_folder)\n', (1230, 1250), False, 'import sys, os\n'), ((2231, 2258), 'pandas.read_pickle', 'pd.read_pickle', (['pickle_path'], {}), '(pickle_path)\n', (2245, 2258), True, 'import pandas as pd\n'), ((8923, 8989), 'Swing.util.utility_module.average_rank', 'Rutil.average_rank', (['top_ranked_lists'], {'col_string': '"""importance_rank"""'}), "(top_ranked_lists, col_string='importance_rank')\n", (8941, 8989), True, 'import Swing.util.utility_module as Rutil\n'), ((9189, 9223), 'Swing.util.Evaluator.Evaluator', 'Evaluator', (['gold_standard'], {'sep': '"""\t"""'}), "(gold_standard, sep='\\t')\n", (9198, 9223), False, 'from Swing.util.Evaluator import Evaluator\n'), ((16040, 16603), 'pandas.DataFrame', 'pd.DataFrame', (["{'pickle_paths': pickle_paths, 'network_paths': network_paths, 'auroc':\n auroc_list, 'aupr': aupr_list, 'window_index': window_index_list,\n 'crag_mse_average': crag_mse_average_list, 'crag_ev_average':\n crag_ev_average_list, 'crag_r2_average': crag_r2_average_list,\n 'crag_mse_median': crag_mse_median_list, 'crag_ev_median':\n crag_ev_median_list, 'crag_r2_median': crag_r2_median_list,\n 'crag_ev_max': crag_ev_max_list, 'crag_mse_max': crag_mse_max_list,\n 'crag_r2_max': crag_r2_max_list, 'window_width': window_width_list}"], {}), "({'pickle_paths': pickle_paths, 'network_paths': network_paths,\n 'auroc': auroc_list, 'aupr': aupr_list, 'window_index':\n window_index_list, 'crag_mse_average': crag_mse_average_list,\n 'crag_ev_average': crag_ev_average_list, 'crag_r2_average':\n crag_r2_average_list, 'crag_mse_median': crag_mse_median_list,\n 'crag_ev_median': crag_ev_median_list, 'crag_r2_median':\n crag_r2_median_list, 'crag_ev_max': crag_ev_max_list, 'crag_mse_max':\n crag_mse_max_list, 'crag_r2_max': crag_r2_max_list, 'window_width':\n window_width_list})\n", (16052, 16603), True, 'import pandas as pd\n'), ((12486, 12520), 'Swing.util.Evaluator.Evaluator', 'Evaluator', (['gold_standard'], {'sep': '"""\t"""'}), "(gold_standard, sep='\\t')\n", (12495, 12520), False, 'from Swing.util.Evaluator import Evaluator\n'), ((1485, 1539), 'pandas.read_pickle', 'pd.read_pickle', (["(pickle_path_folder + '/' + pickle_path)"], {}), "(pickle_path_folder + '/' + pickle_path)\n", (1499, 1539), True, 'import pandas as pd\n')] |
import tensorflow as tf
import numpy as np
import random
from model.transformer_utils import create_mel_padding_mask, create_mel_random_padding_mask, create_encoder_padding_mask
from utils.losses import weighted_sum_losses, masked_mean_absolute_error, ctc_loss, amsoftmax_loss
from data.text import TextToTokens
from model.layers import StatPredictor, Expand, SelfAttentionBlocks, SelfAttentionBlocksWithIN
class ASREncoder(tf.keras.models.Model):
def __init__(self,
english_lexicon_path,
pinyin_lexicon_path,
mel_channels: int,
spk_count: int,
encoder_model_dimension: int,
encoder_num_heads: list,
encoder_maximum_position_encoding: int,
encoder_prenet_dimension: int,
dropout_rate: float,
encoder_dense_blocks: int,
encoder_attention_conv_filters: int = None,
encoder_attention_conv_kernel: int = None,
encoder_feed_forward_dimension: int = None,
debug=False,
**kwargs):
super(ASREncoder, self).__init__(**kwargs)
self.spk_count = spk_count
self.drop_n_heads = 0
self.text_pipeline = TextToTokens.default(english_lexicon_path,
pinyin_lexicon_path,
add_start_end=False)
self.vocab_size = self.text_pipeline.tokenizer.vocab_size
self.encoder_prenet = tf.keras.layers.Dense(encoder_prenet_dimension,
name='encoder_prenet')
self.encoder = SelfAttentionBlocksWithIN(model_dim=encoder_model_dimension,
dropout_rate=dropout_rate,
num_heads=encoder_num_heads,
feed_forward_dimension=encoder_feed_forward_dimension,
maximum_position_encoding=encoder_maximum_position_encoding,
dense_blocks=encoder_dense_blocks,
conv_filters=encoder_attention_conv_filters,
kernel_size=encoder_attention_conv_kernel,
conv_activation='relu',
name='Encoder')
self.classifier = tf.keras.layers.Dense(self.vocab_size)
self.amsoftmax_weights = tf.Variable(name='amsoftmax_weights',
dtype=tf.float32,
validate_shape=True,
initial_value=np.random.normal(size=[encoder_model_dimension, spk_count]),
trainable=True)
self.training_input_signature = [
tf.TensorSpec(shape=(None), dtype=tf.int32),
tf.TensorSpec(shape=(None, None, mel_channels), dtype=tf.float32),
tf.TensorSpec(shape=(None, None), dtype=tf.int32),
tf.TensorSpec(shape=(None), dtype=tf.int32),
tf.TensorSpec(shape=(None), dtype=tf.int32)
]
self.forward_input_signature = [
tf.TensorSpec(shape=(None, None, mel_channels), dtype=tf.float32)
]
self.encoder_signature = [
tf.TensorSpec(shape=(None, None, mel_channels), dtype=tf.float32)
]
self.debug = debug
self._apply_all_signatures()
@property
def step(self):
return int(self.optimizer.iterations)
def _apply_signature(self, function, signature):
if self.debug:
return function
else:
return tf.function(input_signature=signature)(function)
def _apply_all_signatures(self):
self.forward = self._apply_signature(self._forward, self.forward_input_signature)
self.train_step = self._apply_signature(self._train_step, self.training_input_signature)
self.val_step = self._apply_signature(self._val_step, self.training_input_signature)
self.forward_encoder = self._apply_signature(self._forward_encoder, self.encoder_signature)
def _call_encoder(self, inputs, training):
min_index, padding_mask, random_padding_mask = create_mel_random_padding_mask(inputs)
fb_switch = tf.random.uniform(shape=[], maxval=1, seed=random.randint(0, 2147483647), dtype=tf.float32)
enc_input = self.encoder_prenet(inputs)
spk_output, enc_output, attn_weights = self.encoder(enc_input,
training=training,
fb_switch=fb_switch,
padding_mask=padding_mask,
min_index=min_index,
random_padding_mask=random_padding_mask,
drop_n_heads=self.drop_n_heads)
enc_output = self.classifier(enc_output,
training=training)
return spk_output, enc_output, padding_mask, attn_weights
def _forward(self, inp):
model_out = self.__call__(inputs=inp,
training=False)
return model_out
def _forward_encoder(self, inputs):
return self._call_encoder(inputs, training=False)
def _gta_forward(self, spk, mel_inp, phon_tar, mel_inp_len, phon_tar_len, training):
with tf.GradientTape() as tape:
model_out = self.__call__(inputs=mel_inp,
training=training)
phon_loss = tf.reduce_mean(self.loss[0](phon_tar, model_out['encoder_output'], phon_tar_len, mel_inp_len))
spk_loss = self.loss[1](spk, model_out['spk_output'], self.amsoftmax_weights, self.spk_count)
loss = self.loss_weights[0] * phon_loss + self.loss_weights[1] * spk_loss
model_out.update({'loss': loss})
model_out.update({'losses': {'spk_loss': spk_loss, 'phon_loss': phon_loss}})
return model_out, tape
def _train_step(self, spk, mel_inp, phon_tar, mel_inp_len, phon_tar_len):
model_out, tape = self._gta_forward(spk, mel_inp, phon_tar, mel_inp_len, phon_tar_len, training=True)
gradients = tape.gradient(model_out['loss'], self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return model_out
def _val_step(self, spk, mel_inp, phon_tar, mel_inp_len, phon_tar_len):
model_out, _ = self._gta_forward(spk, mel_inp, phon_tar, mel_inp_len, phon_tar_len, training=False)
return model_out
def _compile(self, optimizer):
self.loss_weights = [1., 1.]
self.compile(loss=[ctc_loss,
amsoftmax_loss],
loss_weights=self.loss_weights,
optimizer=optimizer)
def call(self, inputs, training):
spk_output, encoder_output, padding_mask, encoder_attention = self._call_encoder(inputs, training)
model_out = {}
model_out.update({'encoder_attention': encoder_attention, 'encoder_output': encoder_output, 'text_mask': padding_mask})
model_out.update({'spk_output': spk_output})
return model_out
def predict(self, mel_inp):
out_dict = {}
spk_output, encoder_output, padding_mask, encoder_attention = self.forward_encoder(mel_inp)
out_dict.update({'encoder_attention': encoder_attention, 'encoder_output': encoder_output, 'text_mask': padding_mask})
out_dict.update({'spk_output': spk_output})
return out_dict
def set_constants(self,
learning_rate: float = None):
if learning_rate is not None:
self.optimizer.lr.assign(learning_rate)
def decode_phoneme(self, phoneme):
return self.text_pipeline.tokenizer.decode(phoneme)
class ForwardTransformer(tf.keras.models.Model):
def __init__(self,
english_lexicon_path,
pinyin_lexicon_path,
encoder_model_dimension: int,
decoder_model_dimension: int,
dropout_rate: float,
decoder_num_heads: list,
encoder_num_heads: list,
encoder_maximum_position_encoding: int,
decoder_maximum_position_encoding: int,
encoder_dense_blocks: int,
decoder_dense_blocks: int,
duration_conv_filters: list,
duration_kernel_size: int,
predictors_dropout: float,
mel_channels: int,
encoder_attention_conv_filters: list = None,
decoder_attention_conv_filters: list = None,
encoder_attention_conv_kernel: int = None,
decoder_attention_conv_kernel: int = None,
encoder_feed_forward_dimension: int = None,
decoder_feed_forward_dimension: int = None,
debug=False,
**kwargs):
super(ForwardTransformer, self).__init__(**kwargs)
self.text_pipeline = TextToTokens.default(english_lexicon_path,
pinyin_lexicon_path,
add_start_end=False)
self.mel_channels = mel_channels
self.encoder_prenet = tf.keras.layers.Embedding(self.text_pipeline.tokenizer.vocab_size,
encoder_model_dimension,
name='Embedding')
self.encoder = SelfAttentionBlocks(model_dim=encoder_model_dimension,
dropout_rate=dropout_rate,
num_heads=encoder_num_heads,
feed_forward_dimension=encoder_feed_forward_dimension,
maximum_position_encoding=encoder_maximum_position_encoding,
dense_blocks=encoder_dense_blocks,
conv_filters=encoder_attention_conv_filters,
kernel_size=encoder_attention_conv_kernel,
conv_activation='relu',
name='Encoder')
self.dur_pred = StatPredictor(conv_filters=duration_conv_filters,
kernel_size=duration_kernel_size,
conv_padding='same',
conv_activation='relu',
dense_activation='relu',
dropout_rate=predictors_dropout,
name='dur_pred')
self.expand = Expand(name='expand', model_dim=encoder_model_dimension)
self.speaker_fc = tf.keras.layers.Dense(encoder_model_dimension, name="speaker_fc")
self.decoder = SelfAttentionBlocks(model_dim=decoder_model_dimension,
dropout_rate=dropout_rate,
num_heads=decoder_num_heads,
feed_forward_dimension=decoder_feed_forward_dimension,
maximum_position_encoding=decoder_maximum_position_encoding,
dense_blocks=decoder_dense_blocks,
conv_filters=decoder_attention_conv_filters,
kernel_size=decoder_attention_conv_kernel,
conv_activation='relu',
name='Decoder')
self.out = tf.keras.layers.Dense(mel_channels)
self.training_input_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32),
tf.TensorSpec(shape=(None, None, mel_channels), dtype=tf.float32),
tf.TensorSpec(shape=(None, None), dtype=tf.int32),
tf.TensorSpec(shape=(None, 512), dtype=tf.float32)
]
self.forward_input_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32),
tf.TensorSpec(shape=(), dtype=tf.float32),
]
self.forward_masked_input_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32),
tf.TensorSpec(shape=(), dtype=tf.float32),
tf.TensorSpec(shape=(None, None), dtype=tf.float32),
tf.TensorSpec(shape=(None, None), dtype=tf.float32),
]
self.debug = debug
self._apply_all_signatures()
def _apply_signature(self, function, signature):
if self.debug:
return function
else:
return tf.function(input_signature=signature)(function)
def _apply_all_signatures(self):
self.forward = self._apply_signature(self._forward, self.forward_input_signature)
self.train_step = self._apply_signature(self._train_step, self.training_input_signature)
self.val_step = self._apply_signature(self._val_step, self.training_input_signature)
def _train_step(self, input_sequence, target_sequence, target_durations, spk_emb):
target_durations = tf.expand_dims(target_durations, -1)
mel_len = int(tf.shape(target_sequence)[1])
with tf.GradientTape() as tape:
model_out = self.__call__(input_sequence, target_durations=target_durations, spk_emb=spk_emb, training=True)
loss, loss_vals = weighted_sum_losses((target_sequence,
target_durations),
(model_out['mel'][:, :mel_len, :],
model_out['duration']),
self.loss,
self.loss_weights)
model_out.update({'loss': loss})
model_out.update({'losses': {'mel': loss_vals[0], 'duration': loss_vals[1]}})
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return model_out
def _compile(self, optimizer):
self.loss_weights = [1., 1.]
self.compile(loss=[masked_mean_absolute_error,
masked_mean_absolute_error],
loss_weights=self.loss_weights,
optimizer=optimizer)
def _val_step(self, input_sequence, target_sequence, target_durations, spk_emb):
target_durations = tf.expand_dims(target_durations, -1)
mel_len = int(tf.shape(target_sequence)[1])
model_out = self.__call__(input_sequence, target_durations=target_durations, spk_emb=spk_emb, training=False)
loss, loss_vals = weighted_sum_losses((target_sequence,
target_durations),
(model_out['mel'][:, :mel_len, :],
model_out['duration']),
self.loss,
self.loss_weights)
model_out.update({'loss': loss})
model_out.update({'losses': {'mel': loss_vals[0], 'duration': loss_vals[1]}})
return model_out
def _forward(self, input_sequence, spk_emb, durations_scalar):
return self.__call__(input_sequence, target_durations=None, spk_emb=spk_emb, training=False,
durations_scalar=durations_scalar, max_durations_mask=None,
min_durations_mask=None)
@property
def step(self):
return int(self.optimizer.iterations)
def call(self, x, target_durations, spk_emb, training, durations_scalar=1., max_durations_mask=None,
min_durations_mask=None):
encoder_padding_mask = create_encoder_padding_mask(x)
x = self.encoder_prenet(x)
x, encoder_attention = self.encoder(x, training=training, padding_mask=encoder_padding_mask, drop_n_heads=0)
padding_mask = 1. - tf.squeeze(encoder_padding_mask, axis=(1, 2))[:, :, None]
spk_emb = tf.math.softplus(self.speaker_fc(spk_emb))
spk_emb = tf.expand_dims(spk_emb, 1)
x = x + spk_emb #tf.tile(pitch_embed, [1, tf.shape(x)[1], 1])
durations = self.dur_pred(x, training=training, mask=padding_mask)
if target_durations is not None:
use_durations = target_durations
else:
use_durations = durations * durations_scalar
if max_durations_mask is not None:
use_durations = tf.math.minimum(use_durations, tf.expand_dims(max_durations_mask, -1))
if min_durations_mask is not None:
use_durations = tf.math.maximum(use_durations, tf.expand_dims(min_durations_mask, -1))
mels = self.expand(x, use_durations)
expanded_mask = create_mel_padding_mask(mels)
mels, decoder_attention = self.decoder(mels, training=training, padding_mask=expanded_mask, drop_n_heads=0)
mels = self.out(mels)
model_out = {'mel': mels,
'duration': durations,
'expanded_mask': expanded_mask,
'encoder_attention': encoder_attention,
'decoder_attention': decoder_attention}
return model_out
def set_constants(self, learning_rate: float = None, **kwargs):
if learning_rate is not None:
self.optimizer.lr.assign(learning_rate)
def encode_text(self, text):
return self.text_pipeline(text)
def predict(self, inp, spk_emb, encode=True, speed_regulator=1., phoneme_max_duration=None, phoneme_min_duration=None,
max_durations_mask=None, min_durations_mask=None, phoneme_durations=None):
if encode:
inp = self.encode_text(inp)
if len(tf.shape(inp)) < 2:
inp = tf.expand_dims(inp, 0)
inp = tf.cast(inp, tf.int32)
duration_scalar = tf.cast(1. / speed_regulator, tf.float32)
max_durations_mask = self._make_max_duration_mask(inp, phoneme_max_duration)
min_durations_mask = self._make_min_duration_mask(inp, phoneme_min_duration)
out = self.call(inp,
target_durations=phoneme_durations,
spk_emb=spk_emb,
training=False,
durations_scalar=duration_scalar,
max_durations_mask=max_durations_mask,
min_durations_mask=min_durations_mask)
out['mel'] = tf.squeeze(out['mel'])
return out
def _make_max_duration_mask(self, encoded_text, phoneme_max_duration):
np_text = np.array(encoded_text)
new_mask = np.ones(tf.shape(encoded_text)) * float('inf')
if phoneme_max_duration is not None:
for item in phoneme_max_duration.items():
phon_idx = self.text_pipeline.tokenizer(item[0])[0]
new_mask[np_text == phon_idx] = item[1]
return tf.cast(tf.convert_to_tensor(new_mask), tf.float32)
def _make_min_duration_mask(self, encoded_text, phoneme_min_duration):
np_text = np.array(encoded_text)
new_mask = np.zeros(tf.shape(encoded_text))
if phoneme_min_duration is not None:
for item in phoneme_min_duration.items():
phon_idx = self.text_pipeline.tokenizer(item[0])[0]
new_mask[np_text == phon_idx] = item[1]
return tf.cast(tf.convert_to_tensor(new_mask), tf.float32)
| [
"tensorflow.keras.layers.Dense",
"data.text.TextToTokens.default",
"numpy.random.normal",
"model.layers.Expand",
"random.randint",
"model.layers.SelfAttentionBlocksWithIN",
"model.layers.SelfAttentionBlocks",
"tensorflow.cast",
"tensorflow.squeeze",
"tensorflow.keras.layers.Embedding",
"model.tr... | [((1286, 1374), 'data.text.TextToTokens.default', 'TextToTokens.default', (['english_lexicon_path', 'pinyin_lexicon_path'], {'add_start_end': '(False)'}), '(english_lexicon_path, pinyin_lexicon_path,\n add_start_end=False)\n', (1306, 1374), False, 'from data.text import TextToTokens\n'), ((1569, 1639), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['encoder_prenet_dimension'], {'name': '"""encoder_prenet"""'}), "(encoder_prenet_dimension, name='encoder_prenet')\n", (1590, 1639), True, 'import tensorflow as tf\n'), ((1716, 2135), 'model.layers.SelfAttentionBlocksWithIN', 'SelfAttentionBlocksWithIN', ([], {'model_dim': 'encoder_model_dimension', 'dropout_rate': 'dropout_rate', 'num_heads': 'encoder_num_heads', 'feed_forward_dimension': 'encoder_feed_forward_dimension', 'maximum_position_encoding': 'encoder_maximum_position_encoding', 'dense_blocks': 'encoder_dense_blocks', 'conv_filters': 'encoder_attention_conv_filters', 'kernel_size': 'encoder_attention_conv_kernel', 'conv_activation': '"""relu"""', 'name': '"""Encoder"""'}), "(model_dim=encoder_model_dimension, dropout_rate=\n dropout_rate, num_heads=encoder_num_heads, feed_forward_dimension=\n encoder_feed_forward_dimension, maximum_position_encoding=\n encoder_maximum_position_encoding, dense_blocks=encoder_dense_blocks,\n conv_filters=encoder_attention_conv_filters, kernel_size=\n encoder_attention_conv_kernel, conv_activation='relu', name='Encoder')\n", (1741, 2135), False, 'from model.layers import StatPredictor, Expand, SelfAttentionBlocks, SelfAttentionBlocksWithIN\n'), ((2570, 2608), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.vocab_size'], {}), '(self.vocab_size)\n', (2591, 2608), True, 'import tensorflow as tf\n'), ((4472, 4510), 'model.transformer_utils.create_mel_random_padding_mask', 'create_mel_random_padding_mask', (['inputs'], {}), '(inputs)\n', (4502, 4510), False, 'from model.transformer_utils import create_mel_padding_mask, create_mel_random_padding_mask, create_encoder_padding_mask\n'), ((9522, 9610), 'data.text.TextToTokens.default', 'TextToTokens.default', (['english_lexicon_path', 'pinyin_lexicon_path'], {'add_start_end': '(False)'}), '(english_lexicon_path, pinyin_lexicon_path,\n add_start_end=False)\n', (9542, 9610), False, 'from data.text import TextToTokens\n'), ((9780, 9893), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['self.text_pipeline.tokenizer.vocab_size', 'encoder_model_dimension'], {'name': '"""Embedding"""'}), "(self.text_pipeline.tokenizer.vocab_size,\n encoder_model_dimension, name='Embedding')\n", (9805, 9893), True, 'import tensorflow as tf\n'), ((10025, 10438), 'model.layers.SelfAttentionBlocks', 'SelfAttentionBlocks', ([], {'model_dim': 'encoder_model_dimension', 'dropout_rate': 'dropout_rate', 'num_heads': 'encoder_num_heads', 'feed_forward_dimension': 'encoder_feed_forward_dimension', 'maximum_position_encoding': 'encoder_maximum_position_encoding', 'dense_blocks': 'encoder_dense_blocks', 'conv_filters': 'encoder_attention_conv_filters', 'kernel_size': 'encoder_attention_conv_kernel', 'conv_activation': '"""relu"""', 'name': '"""Encoder"""'}), "(model_dim=encoder_model_dimension, dropout_rate=\n dropout_rate, num_heads=encoder_num_heads, feed_forward_dimension=\n encoder_feed_forward_dimension, maximum_position_encoding=\n encoder_maximum_position_encoding, dense_blocks=encoder_dense_blocks,\n conv_filters=encoder_attention_conv_filters, kernel_size=\n encoder_attention_conv_kernel, conv_activation='relu', name='Encoder')\n", (10044, 10438), False, 'from model.layers import StatPredictor, Expand, SelfAttentionBlocks, SelfAttentionBlocksWithIN\n'), ((10826, 11038), 'model.layers.StatPredictor', 'StatPredictor', ([], {'conv_filters': 'duration_conv_filters', 'kernel_size': 'duration_kernel_size', 'conv_padding': '"""same"""', 'conv_activation': '"""relu"""', 'dense_activation': '"""relu"""', 'dropout_rate': 'predictors_dropout', 'name': '"""dur_pred"""'}), "(conv_filters=duration_conv_filters, kernel_size=\n duration_kernel_size, conv_padding='same', conv_activation='relu',\n dense_activation='relu', dropout_rate=predictors_dropout, name='dur_pred')\n", (10839, 11038), False, 'from model.layers import StatPredictor, Expand, SelfAttentionBlocks, SelfAttentionBlocksWithIN\n'), ((11280, 11336), 'model.layers.Expand', 'Expand', ([], {'name': '"""expand"""', 'model_dim': 'encoder_model_dimension'}), "(name='expand', model_dim=encoder_model_dimension)\n", (11286, 11336), False, 'from model.layers import StatPredictor, Expand, SelfAttentionBlocks, SelfAttentionBlocksWithIN\n'), ((11363, 11428), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['encoder_model_dimension'], {'name': '"""speaker_fc"""'}), "(encoder_model_dimension, name='speaker_fc')\n", (11384, 11428), True, 'import tensorflow as tf\n'), ((11452, 11865), 'model.layers.SelfAttentionBlocks', 'SelfAttentionBlocks', ([], {'model_dim': 'decoder_model_dimension', 'dropout_rate': 'dropout_rate', 'num_heads': 'decoder_num_heads', 'feed_forward_dimension': 'decoder_feed_forward_dimension', 'maximum_position_encoding': 'decoder_maximum_position_encoding', 'dense_blocks': 'decoder_dense_blocks', 'conv_filters': 'decoder_attention_conv_filters', 'kernel_size': 'decoder_attention_conv_kernel', 'conv_activation': '"""relu"""', 'name': '"""Decoder"""'}), "(model_dim=decoder_model_dimension, dropout_rate=\n dropout_rate, num_heads=decoder_num_heads, feed_forward_dimension=\n decoder_feed_forward_dimension, maximum_position_encoding=\n decoder_maximum_position_encoding, dense_blocks=decoder_dense_blocks,\n conv_filters=decoder_attention_conv_filters, kernel_size=\n decoder_attention_conv_kernel, conv_activation='relu', name='Decoder')\n", (11471, 11865), False, 'from model.layers import StatPredictor, Expand, SelfAttentionBlocks, SelfAttentionBlocksWithIN\n'), ((12248, 12283), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['mel_channels'], {}), '(mel_channels)\n', (12269, 12283), True, 'import tensorflow as tf\n'), ((13775, 13811), 'tensorflow.expand_dims', 'tf.expand_dims', (['target_durations', '(-1)'], {}), '(target_durations, -1)\n', (13789, 13811), True, 'import tensorflow as tf\n'), ((15152, 15188), 'tensorflow.expand_dims', 'tf.expand_dims', (['target_durations', '(-1)'], {}), '(target_durations, -1)\n', (15166, 15188), True, 'import tensorflow as tf\n'), ((15385, 15535), 'utils.losses.weighted_sum_losses', 'weighted_sum_losses', (['(target_sequence, target_durations)', "(model_out['mel'][:, :mel_len, :], model_out['duration'])", 'self.loss', 'self.loss_weights'], {}), "((target_sequence, target_durations), (model_out['mel'][\n :, :mel_len, :], model_out['duration']), self.loss, self.loss_weights)\n", (15404, 15535), False, 'from utils.losses import weighted_sum_losses, masked_mean_absolute_error, ctc_loss, amsoftmax_loss\n'), ((16496, 16526), 'model.transformer_utils.create_encoder_padding_mask', 'create_encoder_padding_mask', (['x'], {}), '(x)\n', (16523, 16526), False, 'from model.transformer_utils import create_mel_padding_mask, create_mel_random_padding_mask, create_encoder_padding_mask\n'), ((16844, 16870), 'tensorflow.expand_dims', 'tf.expand_dims', (['spk_emb', '(1)'], {}), '(spk_emb, 1)\n', (16858, 16870), True, 'import tensorflow as tf\n'), ((17528, 17557), 'model.transformer_utils.create_mel_padding_mask', 'create_mel_padding_mask', (['mels'], {}), '(mels)\n', (17551, 17557), False, 'from model.transformer_utils import create_mel_padding_mask, create_mel_random_padding_mask, create_encoder_padding_mask\n'), ((18591, 18613), 'tensorflow.cast', 'tf.cast', (['inp', 'tf.int32'], {}), '(inp, tf.int32)\n', (18598, 18613), True, 'import tensorflow as tf\n'), ((18640, 18682), 'tensorflow.cast', 'tf.cast', (['(1.0 / speed_regulator)', 'tf.float32'], {}), '(1.0 / speed_regulator, tf.float32)\n', (18647, 18682), True, 'import tensorflow as tf\n'), ((19227, 19249), 'tensorflow.squeeze', 'tf.squeeze', (["out['mel']"], {}), "(out['mel'])\n", (19237, 19249), True, 'import tensorflow as tf\n'), ((19367, 19389), 'numpy.array', 'np.array', (['encoded_text'], {}), '(encoded_text)\n', (19375, 19389), True, 'import numpy as np\n'), ((19844, 19866), 'numpy.array', 'np.array', (['encoded_text'], {}), '(encoded_text)\n', (19852, 19866), True, 'import numpy as np\n'), ((3041, 3082), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': 'None', 'dtype': 'tf.int32'}), '(shape=None, dtype=tf.int32)\n', (3054, 3082), True, 'import tensorflow as tf\n'), ((3098, 3163), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None, mel_channels)', 'dtype': 'tf.float32'}), '(shape=(None, None, mel_channels), dtype=tf.float32)\n', (3111, 3163), True, 'import tensorflow as tf\n'), ((3177, 3226), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None)', 'dtype': 'tf.int32'}), '(shape=(None, None), dtype=tf.int32)\n', (3190, 3226), True, 'import tensorflow as tf\n'), ((3240, 3281), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': 'None', 'dtype': 'tf.int32'}), '(shape=None, dtype=tf.int32)\n', (3253, 3281), True, 'import tensorflow as tf\n'), ((3297, 3338), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': 'None', 'dtype': 'tf.int32'}), '(shape=None, dtype=tf.int32)\n', (3310, 3338), True, 'import tensorflow as tf\n'), ((3404, 3469), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None, mel_channels)', 'dtype': 'tf.float32'}), '(shape=(None, None, mel_channels), dtype=tf.float32)\n', (3417, 3469), True, 'import tensorflow as tf\n'), ((3527, 3592), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None, mel_channels)', 'dtype': 'tf.float32'}), '(shape=(None, None, mel_channels), dtype=tf.float32)\n', (3540, 3592), True, 'import tensorflow as tf\n'), ((5799, 5816), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5814, 5816), True, 'import tensorflow as tf\n'), ((12338, 12387), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None)', 'dtype': 'tf.int32'}), '(shape=(None, None), dtype=tf.int32)\n', (12351, 12387), True, 'import tensorflow as tf\n'), ((12401, 12466), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None, mel_channels)', 'dtype': 'tf.float32'}), '(shape=(None, None, mel_channels), dtype=tf.float32)\n', (12414, 12466), True, 'import tensorflow as tf\n'), ((12480, 12529), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None)', 'dtype': 'tf.int32'}), '(shape=(None, None), dtype=tf.int32)\n', (12493, 12529), True, 'import tensorflow as tf\n'), ((12543, 12593), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, 512)', 'dtype': 'tf.float32'}), '(shape=(None, 512), dtype=tf.float32)\n', (12556, 12593), True, 'import tensorflow as tf\n'), ((12657, 12706), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None)', 'dtype': 'tf.int32'}), '(shape=(None, None), dtype=tf.int32)\n', (12670, 12706), True, 'import tensorflow as tf\n'), ((12720, 12761), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '()', 'dtype': 'tf.float32'}), '(shape=(), dtype=tf.float32)\n', (12733, 12761), True, 'import tensorflow as tf\n'), ((12833, 12882), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None)', 'dtype': 'tf.int32'}), '(shape=(None, None), dtype=tf.int32)\n', (12846, 12882), True, 'import tensorflow as tf\n'), ((12896, 12937), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '()', 'dtype': 'tf.float32'}), '(shape=(), dtype=tf.float32)\n', (12909, 12937), True, 'import tensorflow as tf\n'), ((12951, 13002), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None)', 'dtype': 'tf.float32'}), '(shape=(None, None), dtype=tf.float32)\n', (12964, 13002), True, 'import tensorflow as tf\n'), ((13016, 13067), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, None)', 'dtype': 'tf.float32'}), '(shape=(None, None), dtype=tf.float32)\n', (13029, 13067), True, 'import tensorflow as tf\n'), ((13877, 13894), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (13892, 13894), True, 'import tensorflow as tf\n'), ((14055, 14205), 'utils.losses.weighted_sum_losses', 'weighted_sum_losses', (['(target_sequence, target_durations)', "(model_out['mel'][:, :mel_len, :], model_out['duration'])", 'self.loss', 'self.loss_weights'], {}), "((target_sequence, target_durations), (model_out['mel'][\n :, :mel_len, :], model_out['duration']), self.loss, self.loss_weights)\n", (14074, 14205), False, 'from utils.losses import weighted_sum_losses, masked_mean_absolute_error, ctc_loss, amsoftmax_loss\n'), ((18554, 18576), 'tensorflow.expand_dims', 'tf.expand_dims', (['inp', '(0)'], {}), '(inp, 0)\n', (18568, 18576), True, 'import tensorflow as tf\n'), ((19702, 19732), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['new_mask'], {}), '(new_mask)\n', (19722, 19732), True, 'import tensorflow as tf\n'), ((19895, 19917), 'tensorflow.shape', 'tf.shape', (['encoded_text'], {}), '(encoded_text)\n', (19903, 19917), True, 'import tensorflow as tf\n'), ((20165, 20195), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['new_mask'], {}), '(new_mask)\n', (20185, 20195), True, 'import tensorflow as tf\n'), ((2866, 2925), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[encoder_model_dimension, spk_count]'}), '(size=[encoder_model_dimension, spk_count])\n', (2882, 2925), True, 'import numpy as np\n'), ((3894, 3932), 'tensorflow.function', 'tf.function', ([], {'input_signature': 'signature'}), '(input_signature=signature)\n', (3905, 3932), True, 'import tensorflow as tf\n'), ((4574, 4603), 'random.randint', 'random.randint', (['(0)', '(2147483647)'], {}), '(0, 2147483647)\n', (4588, 4603), False, 'import random\n'), ((13285, 13323), 'tensorflow.function', 'tf.function', ([], {'input_signature': 'signature'}), '(input_signature=signature)\n', (13296, 13323), True, 'import tensorflow as tf\n'), ((13834, 13859), 'tensorflow.shape', 'tf.shape', (['target_sequence'], {}), '(target_sequence)\n', (13842, 13859), True, 'import tensorflow as tf\n'), ((15211, 15236), 'tensorflow.shape', 'tf.shape', (['target_sequence'], {}), '(target_sequence)\n', (15219, 15236), True, 'import tensorflow as tf\n'), ((16707, 16752), 'tensorflow.squeeze', 'tf.squeeze', (['encoder_padding_mask'], {'axis': '(1, 2)'}), '(encoder_padding_mask, axis=(1, 2))\n', (16717, 16752), True, 'import tensorflow as tf\n'), ((17277, 17315), 'tensorflow.expand_dims', 'tf.expand_dims', (['max_durations_mask', '(-1)'], {}), '(max_durations_mask, -1)\n', (17291, 17315), True, 'import tensorflow as tf\n'), ((17419, 17457), 'tensorflow.expand_dims', 'tf.expand_dims', (['min_durations_mask', '(-1)'], {}), '(min_durations_mask, -1)\n', (17433, 17457), True, 'import tensorflow as tf\n'), ((18516, 18529), 'tensorflow.shape', 'tf.shape', (['inp'], {}), '(inp)\n', (18524, 18529), True, 'import tensorflow as tf\n'), ((19417, 19439), 'tensorflow.shape', 'tf.shape', (['encoded_text'], {}), '(encoded_text)\n', (19425, 19439), True, 'import tensorflow as tf\n')] |
import numpy as np
from datetime import datetime
from math import factorial
import os
import glob
import logging
import seaborn as sns
import pandas as pd
import warnings
# Share logger between modules - TODO: Create another logger
## Import this module only after creating logger from `ssa_routine`
logger = logging.getLogger("ssa_routine")
## Helper functions
def get_transient_idx(data):
"""Naive method to find some index beyond which stationarity assumed.
TODO: Fit parabola to noisy data and find elbow.
"""
min_idx = np.int(max([10, data.shape[0]/5]))
return min_idx
def downsample_dataFrame(df, freq = 1000):
""" Returns DataFrame with values downsampled by freq
Assumes that each entry in dataframe is array that can be adressed by [column, index]. Values are downsampled along 0th dimension.
"""
freq =np.random.choice([0, 1], df.values[0][0].shape,
p = [1-1/freq, 1/freq]).astype(bool)
df_out = pd.DataFrame(columns = df.columns, index = df.index)
for c in df.columns:
for i in df.index:
if type(freq) == int:
df_out[c][i] = df[c][i][::freq]
else:
df_out[c][i] = df[c][i][freq]
return df_out
def downsample_results(in_list, freq = None):
"""Downsamples arrays and/or dataframes in in_list by freq
"""
if not freq:
freq = max(np.int(in_list[0].shape[0]/2500), 1)
idx =np.random.choice([0, 1], in_list[0].shape[0],
p = [1-1/freq, 1/freq]).astype(bool)
out_list = []
freq_df = freq*in_list[0].shape[-1]
for el in in_list:
if type(el) == pd.DataFrame:
out_list.append(downsample_dataFrame(el, freq_df))
else:
if type(idx) == int:
out_list.append(el[::idx,:])
else:
out_list.append(el[idx,:])
assert len(in_list) == len(out_list)
return out_list
def get_num_bins(data):
""" Compute optimal number of bins according to Freedman-Diaconis rule
The rule is applied for at least moderate copy numbers, else, number of
unique values is selected as number of bins.
Notes
------------
https://stats.stackexchange.com/a/862/194926
"""
IQR = np.subtract.reduce(np.percentile(data, [75, 25]))
num_points = len(data)
bwidth = 2 * IQR * (num_points**(-1/3))
unique_vals = np.unique(data)
if unique_vals.shape[0] > 20:
# For moderate and high copy numbers
with warnings.catch_warnings():
warnings.filterwarnings('error')
np.seterr(divide="warn")
try:
num_bins=min(60,
np.arange(data.min(), data.max(), bwidth).shape[0])
except (ValueError, Warning) as e:
logger.warning(e)
num_bins = min(60, unique_vals.shape[0])
logger.info("Setting number of bins to {} (default).".format(num_bins))
else:
# For low copy numbers
num_bins = unique_vals.shape[0]
return np.int(num_bins)
def moving_average(data, win_size = None):
""" Calculates rolling mean on tuple/list of two arrays: values and times
Parameters
--------------
data: array, values to compute moving average on
win_size: size of window for moving average, default = len(array)/500
Returns
--------------
arrays_tuple: 2 ndarrays, rolling mean of flattened and masked inputs
"""
if win_size is None:
win_size = max(np.int(len(data)/500), 1)
ret = np.cumsum(data, dtype=np.float)
ret[win_size:] = ret[win_size:] - ret[:-win_size]
return ret[win_size - 1:] / win_size
def calculate_rolling_mean(arrays_tuple, win_size = None):
""" Calculates rolling mean on tuple/list of two arrays: values and times
Parameters
--------------
arrays_tuple: tuple or list of exactly two arrays
Qs: 2D array, values x evolutions
times: 2D array, values x evolutions
win_size: size of window for moving average, default = len(array)/500
Returns
--------------
arrays_tuple: 2 ndarrays, rolling mean of flattened, masked and sorted inputs
Notes:
---------
Thanks to Stan for tip.
"""
try:
Qs, times = arrays_tuple
except ValueError as e:
logger.error("Calculate mean expects exactly two arrays, values and times")
logger.error(e)
return None
# Flatten, remove nans, sort in time, calculate moving average
times = times.flatten()
mask_good = np.isfinite(times)
times = times[mask_good]
idxs = np.argsort(times)
times = np.sort(times)
times_out = moving_average(times, win_size)
# Flatten, remove nans, sort by time, calculate moving average
Qs = Qs.flatten()[mask_good]
Qs = Qs[idxs]
Qs_out = moving_average(Qs, win_size)
assert Qs_out.shape == times_out.shape
return Qs_out, times_out
def empirical_cdf(data, yscale = 1.):
"""Calculates scaled emprical cumulative distribution
The function is optionally scaled down to maximum of corresponding pdf/histogram.
"""
x = np.sort(data)
y = 1. * np.arange(len(data)) / (len(data) - 1) * yscale
return x, y
def add_empirical_cdf(data, ax, yscale = 1.):
"""Adds empirical CDF to existing axes.
Horizontal orientation of axes is assumed
"""
x, y = empirical_cdf(data, yscale)
bx = ax.twiny()
bx.plot(y, x, 'g--', alpha = 0.5, lw = 1,
label = "emp. CDF")
bx.tick_params('x', colors='g')
bx.set_xlabel('CDF', color='g')
bx.grid(False)
def add_poisson(ax, lmbd, Qs):
"""Adds Poisson PDF to existing axes.
Horizontal orientation of axes is assumed
@param lambda: int, Event rate parameter
@param ax: plt axis to plot on (assume horzontal)
@Qs: ndarray, data that is supposed to obey Poisson distr.
"""
# Naively trim off transient evolution at the beginning of the simulation
min_idx = get_transient_idx(Qs)
xx = np.arange(0, np.nanmax(Qs[min_idx:,:].flatten())+1)
poiss = [(np.exp(-lmbd) * lmbd ** x) / factorial(x) for x in xx]
ax.plot(poiss, xx, 'bo-', alpha = 0.5, lw = 2,
label = "Poiss. PDF")
ax.legend()
def add_gaussian(ax, mn, vr, Qs, orientation = "horizontal"):
"""Adds Gaussian PDF to existing axes.
Horizontal orientation of axes is assumed
Parameters
-----------
ax: axis, horizontal orientation assumed
mn: float, mean
vr: float, variance
Qs: ndarray: 2d array of species numbers over time
"""
# Naively trim off transient evolution at the beginning of the simulation
min_idx = get_transient_idx(Qs)
max_val = np.nanmax(Qs[min_idx:,:])+1
x = np.linspace(0, max_val, num = 100)
gauss = np.exp(-(x-mn)**2/(2*vr))/np.sqrt(2*np.pi*vr)
if orientation == "horizontal":
ax.plot(gauss, x, 'b-',alpha=0.5, lw = 2, label='Gauss. PDF')
else:
ax.plot(x, gauss, 'b-',alpha=0.5, lw = 2, label='Gauss. PDF')
ax.legend()
def add_kde_estimate(ax, data):
"""Estimates distribution of the data using kernel density estimate
Gaussian kernel with `scott` method of bandwith determination is used.
Parameters
----------
ax: axis, axis object to plot on
data: array, array of copy numbers for single species over all times
Notes
------------
# TODO: make it computationally cheaper
# TODO: call directly statsmodels.(...).fit and use `adjust` to change bw
"""
min_idx = get_transient_idx(data)
data = data[min_idx:, :].flatten()
data = data[np.isfinite(data)]
kde_pars = {"kernel": "gau", "bw": "scott",
"cumulative": False}
nbins = min(get_num_bins(data), data.max()-data.min())
kde_pars.update({"gridsize": nbins})
sns.distplot( data, ax = ax,
hist = False, kde = True, rug = False,
vertical = True, kde_kws = kde_pars, label = "KDE")
ax.legend()
def filter_constant(vars):
"""Create T/F index for data with variance vars, F if variance constant.
"""
return vars != 0
def save_figure(fig, name = "ssa"):
""" Save figure to PDF
"""
fname = "results~/plots/{}_{}.pdf".format(name,
datetime.now().strftime('%Y%m%d-%H%M%S'))
kwargs = { "dpi": 400,
"orientation": "portrait",
"papertype": "a2",
"format": "pdf",
#"frameon": False,
#"bbox_inches": "tight"
}
try:
fig.savefig(fname, **kwargs)
except Exception as e:
try:
logger.error(e)
except NameError: # if logger not defined
print(e)
raise e
def save_data(kwds, suffix=""):
"""Saves data to numpy .npz format
Parameters
--------
kwds: dict, data as key-value pairs ('name': [values])
suffix: string, text to append to filename
"""
time_str = datetime.now().strftime('%Y%m%d-%H%M%S')
parent_dir = "results~/data/"
fname = parent_dir + "out_" + time_str + suffix
try:
np.savez_compressed(fname, **kwds)
logger.info("Data saved to file {}.".format(fname))
except Exception as e:
logger.error(e)
raise e
def load_data( files = ["Qs", "times", "tot_props"],
dir = "results~/data/*.npz"):
"""Loads the newest *.npz file in directory
Parameters
-----------
files: list of char vectors, names of variables we expect to load
dir: string, s`./path/to/files/.*npz`
"""
file_list = glob.glob(dir)
latest_file = max(file_list, key=os.path.getctime)
try:
retObj = np.load(latest_file)
logger.info("Loaded data from {}.".format(latest_file))
except Exception as e:
logger.error(e)
raise e
assert retObj.files == files
retVals = tuple([retObj[f] for f in files])
return retVals
def create_result_folders(parent = "./results~",
dirs = ["data", "logs", "plots"]):
"""Creates folder structure to store simulation results (data, plots, logs)
"""
if not os.path.isdir(parent):
os.mkdir(parent)
for di in dirs:
dir_path = os.path.join(parent, di)
if os.path.isdir(dir_path):
continue
else:
logger.info("Creating {} in {}".format(di, parent))
os.mkdir(dir_path)
def pad_paths(Qs, times, props):
"""Pad paths to common maxima; length and reshape
Pad paths to same length given by the longest path. Fill other with nan.
Additionally, stack to a 3D array of shape (time x n_species x n_paths). Acts only along first dimension.
Parameters
-----------
Qs: list of arrays (ticks x species x 1), sample paths
times: list of arrays (ticks x 1), tick times
how: str, one of "index" or "time" - how to find common length for later concatenation
Currently only "index" implemented
Returns
-----------
Qs: ndarray, shape (min_ticks x species x paths)
times: ndarray, shape (min_ticks x paths)
"""
if type(Qs) == list:
n_species = Qs[0].shape[1]
n_paths = len(Qs)
else:
logger.error( "Can trim paths only on list of arrays."+
"The provided is {}.".format(type(Qs)))
return Qs, times, props
max_length = max([q.shape[0] for q in Qs])
out_list = []
for i, var in enumerate([Qs, times, props]):
ax = var[0].ndim - 1
var=[np.pad(v.astype(np.float64),((0, l-v.shape[0]),)+((0, 0),)*(v.ndim-1),
mode="constant", constant_values = np.nan)
for v,l in zip(var, [max_length]*len(var))]
out_list.append(np.concatenate(var, axis = ax))
return tuple(out_list)
def trim_paths(Qs, times, props):
"""Trim paths to common minimal length and reshape
Trims paths to same length given by the shortest path. Discards values beyond.
Additionally, stack to a 3D array of shape (time x n_species x n_paths). Acts only along first dimension.
Parameters
-----------
Qs: list of arrays (ticks x species x 1), sample paths
times: list of arrays (ticks x 1), tick times
Returns
-----------
Qs: ndarray, shape (min_ticks x species x paths)
times: ndarray, shape (min_ticks x paths)
Notes
----------
By trimming paths in length in indices, you obtain paths in different
length on time. This may impact your statistics afterwards (e.g. \tau
distribution) because you effectively discard periods with different
behaviour. Padding with nans is prefered.
"""
if type(Qs) == list:
n_species = Qs[0].shape[1]
n_paths = len(Qs)
else:
logger.error( "Can trim paths only on list of arrays."+
"The provided is {}.".format(type(Qs)))
return Qs, times, props
min_length = min([q.shape[0] for q in Qs])
#min_length = np.int(min_length / 20)
Qs = [q[-min_length:, :, :] for q in Qs]
times = [t[-min_length:, :] for t in times]
props = [p[-min_length:, :] for p in props]
Qs = np.dstack(Qs)
times = np.squeeze(np.dstack(times))
props = np.squeeze(np.dstack(props))
return Qs, times, props
| [
"os.mkdir",
"numpy.load",
"numpy.argsort",
"numpy.savez_compressed",
"numpy.exp",
"glob.glob",
"os.path.join",
"numpy.unique",
"pandas.DataFrame",
"numpy.isfinite",
"numpy.cumsum",
"numpy.int",
"warnings.catch_warnings",
"numpy.linspace",
"numpy.random.choice",
"datetime.datetime.now",... | [((310, 342), 'logging.getLogger', 'logging.getLogger', (['"""ssa_routine"""'], {}), "('ssa_routine')\n", (327, 342), False, 'import logging\n'), ((1020, 1068), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df.columns', 'index': 'df.index'}), '(columns=df.columns, index=df.index)\n', (1032, 1068), True, 'import pandas as pd\n'), ((2512, 2527), 'numpy.unique', 'np.unique', (['data'], {}), '(data)\n', (2521, 2527), True, 'import numpy as np\n'), ((3191, 3207), 'numpy.int', 'np.int', (['num_bins'], {}), '(num_bins)\n', (3197, 3207), True, 'import numpy as np\n'), ((3707, 3738), 'numpy.cumsum', 'np.cumsum', (['data'], {'dtype': 'np.float'}), '(data, dtype=np.float)\n', (3716, 3738), True, 'import numpy as np\n'), ((4732, 4750), 'numpy.isfinite', 'np.isfinite', (['times'], {}), '(times)\n', (4743, 4750), True, 'import numpy as np\n'), ((4791, 4808), 'numpy.argsort', 'np.argsort', (['times'], {}), '(times)\n', (4801, 4808), True, 'import numpy as np\n'), ((4821, 4835), 'numpy.sort', 'np.sort', (['times'], {}), '(times)\n', (4828, 4835), True, 'import numpy as np\n'), ((5347, 5360), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (5354, 5360), True, 'import numpy as np\n'), ((6991, 7023), 'numpy.linspace', 'np.linspace', (['(0)', 'max_val'], {'num': '(100)'}), '(0, max_val, num=100)\n', (7002, 7023), True, 'import numpy as np\n'), ((8093, 8201), 'seaborn.distplot', 'sns.distplot', (['data'], {'ax': 'ax', 'hist': '(False)', 'kde': '(True)', 'rug': '(False)', 'vertical': '(True)', 'kde_kws': 'kde_pars', 'label': '"""KDE"""'}), "(data, ax=ax, hist=False, kde=True, rug=False, vertical=True,\n kde_kws=kde_pars, label='KDE')\n", (8105, 8201), True, 'import seaborn as sns\n'), ((9932, 9946), 'glob.glob', 'glob.glob', (['dir'], {}), '(dir)\n', (9941, 9946), False, 'import glob\n'), ((13570, 13583), 'numpy.dstack', 'np.dstack', (['Qs'], {}), '(Qs)\n', (13579, 13583), True, 'import numpy as np\n'), ((2392, 2421), 'numpy.percentile', 'np.percentile', (['data', '[75, 25]'], {}), '(data, [75, 25])\n', (2405, 2421), True, 'import numpy as np\n'), ((6955, 6981), 'numpy.nanmax', 'np.nanmax', (['Qs[min_idx:, :]'], {}), '(Qs[min_idx:, :])\n', (6964, 6981), True, 'import numpy as np\n'), ((7038, 7071), 'numpy.exp', 'np.exp', (['(-(x - mn) ** 2 / (2 * vr))'], {}), '(-(x - mn) ** 2 / (2 * vr))\n', (7044, 7071), True, 'import numpy as np\n'), ((7064, 7087), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * vr)'], {}), '(2 * np.pi * vr)\n', (7071, 7087), True, 'import numpy as np\n'), ((7876, 7893), 'numpy.isfinite', 'np.isfinite', (['data'], {}), '(data)\n', (7887, 7893), True, 'import numpy as np\n'), ((9440, 9474), 'numpy.savez_compressed', 'np.savez_compressed', (['fname'], {}), '(fname, **kwds)\n', (9459, 9474), True, 'import numpy as np\n'), ((10028, 10048), 'numpy.load', 'np.load', (['latest_file'], {}), '(latest_file)\n', (10035, 10048), True, 'import numpy as np\n'), ((10501, 10522), 'os.path.isdir', 'os.path.isdir', (['parent'], {}), '(parent)\n', (10514, 10522), False, 'import os\n'), ((10532, 10548), 'os.mkdir', 'os.mkdir', (['parent'], {}), '(parent)\n', (10540, 10548), False, 'import os\n'), ((10588, 10612), 'os.path.join', 'os.path.join', (['parent', 'di'], {}), '(parent, di)\n', (10600, 10612), False, 'import os\n'), ((10624, 10647), 'os.path.isdir', 'os.path.isdir', (['dir_path'], {}), '(dir_path)\n', (10637, 10647), False, 'import os\n'), ((13607, 13623), 'numpy.dstack', 'np.dstack', (['times'], {}), '(times)\n', (13616, 13623), True, 'import numpy as np\n'), ((13648, 13664), 'numpy.dstack', 'np.dstack', (['props'], {}), '(props)\n', (13657, 13664), True, 'import numpy as np\n'), ((865, 940), 'numpy.random.choice', 'np.random.choice', (['[0, 1]', 'df.values[0][0].shape'], {'p': '[1 - 1 / freq, 1 / freq]'}), '([0, 1], df.values[0][0].shape, p=[1 - 1 / freq, 1 / freq])\n', (881, 940), True, 'import numpy as np\n'), ((1455, 1489), 'numpy.int', 'np.int', (['(in_list[0].shape[0] / 2500)'], {}), '(in_list[0].shape[0] / 2500)\n', (1461, 1489), True, 'import numpy as np\n'), ((1506, 1579), 'numpy.random.choice', 'np.random.choice', (['[0, 1]', 'in_list[0].shape[0]'], {'p': '[1 - 1 / freq, 1 / freq]'}), '([0, 1], in_list[0].shape[0], p=[1 - 1 / freq, 1 / freq])\n', (1522, 1579), True, 'import numpy as np\n'), ((2620, 2645), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2643, 2645), False, 'import warnings\n'), ((2659, 2691), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (2682, 2691), False, 'import warnings\n'), ((2704, 2728), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""warn"""'}), "(divide='warn')\n", (2713, 2728), True, 'import numpy as np\n'), ((6345, 6357), 'math.factorial', 'factorial', (['x'], {}), '(x)\n', (6354, 6357), False, 'from math import factorial\n'), ((9296, 9310), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9308, 9310), False, 'from datetime import datetime\n'), ((10760, 10778), 'os.mkdir', 'os.mkdir', (['dir_path'], {}), '(dir_path)\n', (10768, 10778), False, 'import os\n'), ((12122, 12150), 'numpy.concatenate', 'np.concatenate', (['var'], {'axis': 'ax'}), '(var, axis=ax)\n', (12136, 12150), True, 'import numpy as np\n'), ((6316, 6329), 'numpy.exp', 'np.exp', (['(-lmbd)'], {}), '(-lmbd)\n', (6322, 6329), True, 'import numpy as np\n'), ((8578, 8592), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8590, 8592), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
"""This file contains functions which can be used to model the behavior of
flocs based on the chemical interactions of clay, coagulant, and humic acid.
"""
######################### Imports #########################
import numpy as np
from aide_design import utility as ut
from aide_design.units import unit_registry as u
from aide_design import physchem as pc
u.enable_contexts('chem')
##################### Class Definition #####################
class Material:
def __init__(self, name, diameter, density, molecWeight):
self.name = name
self.Diameter = diameter
self.Density = density
self.MolecWeight = molecWeight
class Chemical(Material):
def __init__(self, name, diameter, density, molecWeight, Precipitate,
AluminumMPM=None):
Material.__init__(self, name, diameter, density, molecWeight)
self.AluminumMPM = AluminumMPM
self.Precip = Precipitate
if self.Precip == self.name:
self.PrecipName = name
self.PrecipDiameter = diameter
self.PrecipDensity = density
self.PrecipMolecWeight = molecWeight
self.PrecipAluminumMPM = AluminumMPM
else:
self.PrecipName = Precipitate
def define_Precip(self, diameter, density, molecweight, alumMPM):
self.PrecipDiameter = diameter
self.PrecipDensity = density
self.PrecipMolecWeight = molecweight
self.PrecipAluminumMPM = alumMPM
################## Material Definitions ##################
# name, diameter in m, density in kg/m³, molecular weight in kg/mole
Clay = Material('Clay', 7 * 10**-6, 2650, None)
PACl = Chemical('PACl', (90 * u.nm).to(u.m).magnitude, 1138, 1.039,
'PACl', AluminumMPM=13)
Alum = Chemical('Alum', (70 * u.nm).to(u.m).magnitude, 2420, 0.59921,
'AlOH3', AluminumMPM=2)
Alum.define_Precip((70 * u.nm).to(u.m).magnitude, 2420, 0.078, 1)
HumicAcid = Chemical('Humic Acid', 72 * 10**-9, 1780, None, 'Humic Acid')
################### Necessary Constants ###################
# Fractal dimension, based on data from published in Environmental Engineering
# Science, "Fractal Models for Floc Density, Sedimentation Velocity, and Floc
# Volume Fraction for High Peclet Number Reactors" by <NAME> and
# <NAME> (2015).
DIM_FRACTAL = 2.3
# Ratio of clay platelet height to diameter.
RATIO_HEIGHT_DIAM = 0.1
# Ratio between inner viscous length scale and Kolmogorov length scale.
RATIO_KOLMOGOROV = 50
# Shape factor for drag on flocs used in terminal velocity equation.
PHI_FLOC = 45/24
# The Avogadro constant.
NUM_AVOGADRO = 6.0221415 * 10**23
# Molecular weight of aluminum in kg/mole.
MOLEC_WEIGHT_ALUMINUM = 0.027
######################## Functions ########################
@u.wraps(u.kg/u.m**3, None, False)
def dens_alum_nanocluster(coag):
"""Return the density of the aluminum in the nanocluster.
This is useful for determining the volume of nanoclusters
given a concentration of aluminum.
"""
density = (coag.PrecipDensity * MOLEC_WEIGHT_ALUMINUM
* coag.PrecipAluminumMPM / coag.PrecipMolecWeight)
return density
@u.wraps(u.kg/u.m**3, [u.kg/u.m**3, u.degK], False)
def dens_pacl_solution(ConcAluminum, temp):
"""Return the density of the PACl solution.
From Stock Tank Mixing report Fall 2013:
https://confluence.cornell.edu/download/attachments/137953883/20131213_Research_Report.pdf
"""
return ((0.492 * ConcAluminum * PACl.MolecWeight
/ (PACl.AluminumMPM * MOLEC_WEIGHT_ALUMINUM)
) + pc.density_water(temp).magnitude
)
@u.wraps(u.kg/u.m**3, [u.kg/u.m**3, None], False)
def conc_precipitate(ConcAluminum, coag):
"""Return coagulant precipitate concentration given aluminum dose.
Note that conc_precipitate returns a value that varies from the equivalent
MathCAD function beginning at the third decimal place. The majority of
functions below this point in the file ultimately call on conc_precipitate
at some point, and will not return the same value as their equivalent
function in MathCAD. This is known.
"""
return ((ConcAluminum / MOLEC_WEIGHT_ALUMINUM)
* (coag.PrecipMolecWeight / coag.PrecipAluminumMPM)
)
@u.wraps(u.kg/u.m**3, [u.kg/u.m**3, u.kg/u.m**3, None], False)
def conc_floc(ConcAluminum, concClay, coag):
"""Return floc density given aluminum dose, turbidity, and coagulant"""
return conc_precipitate(ConcAluminum, coag).magnitude + concClay
@u.wraps(u.mol/u.m**3, u.kg/u.m**3, False)
def moles_aluminum(ConcAluminum):
"""Return the # of moles aluminum given aluminum concentration."""
return (ConcAluminum / MOLEC_WEIGHT_ALUMINUM)
@u.wraps(u.m, u.kg/u.m**3, False)
def sep_dist_aluminum(ConcAluminum):
"""Return the separation distance between aluminum molecules."""
return (1 / (NUM_AVOGADRO * moles_aluminum(ConcAluminum).magnitude))**(1/3)
@u.wraps(1/u.m**3, [u.kg/u.m**3, u.m], False)
def num_clay(ConcClay, material):
"""Return the number of clay particles in suspension."""
return ConcClay / ((material.Density * np.pi * material.Diameter**3) / 6)
@u.wraps(u.m, [u.kg/u.m**3, u.m], False)
def sep_dist_clay(ConcClay, material):
"""Return the separation distance between clay particles."""
return ((material.Density/ConcClay)*((np.pi
* material.Diameter ** 3)/6))**(1/3)
@u.wraps(1/u.m**3, [u.kg/u.m**3, None], False)
def num_nanoclusters(ConcAluminum, coag):
"""Return the number of Aluminum nanoclusters."""
return (ConcAluminum / (dens_alum_nanocluster(coag).magnitude
* np.pi * coag.Diameter**3))
@u.wraps(None, [u.kg/u.m**3, u.kg/u.m**3, None, None], False)
def frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material):
"""Return the fraction of flocs initially present."""
return ((conc_precipitate(ConcAluminum, coag).magnitude/coag.PrecipDensity)
+ (ConcClay / material.Density))
####################### p functions #######################
def p(C, Cprime):
return -np.log10(C/Cprime)
def invp(pC, Cprime):
return Cprime * 10**-pC
#################### Fractal functions ####################
@u.wraps(u.m, [u.dimensionless, u.m, u.dimensionless], False)
def diam_fractal(DIM_FRACTAL, DiamInitial, NumCol):
"""Return the diameter of a floc given NumCol doubling collisions."""
return DiamInitial * 2**(NumCol / DIM_FRACTAL)
@u.wraps(None, [u.dimensionless, None, u.m], False)
def num_coll_reqd(DIM_FRACTAL, material, DiamTarget):
"""Return the number of doubling collisions required.
Calculates the number of doubling collisions required to produce
a floc of diameter DiamTarget.
"""
return DIM_FRACTAL * np.log2(DiamTarget/material.Diameter)
@u.wraps(u.m, [u.kg/u.m**3, u.kg/u.m**3, None, None,
u.dimensionless, u.m], False)
def sep_dist_floc(ConcAluminum, ConcClay, coag, material,
DIM_FRACTAL, DiamTarget):
"""Return separation distance as a function of floc size."""
return (material.Diameter
* (np.pi/(6
* frac_vol_floc_initial(ConcAluminum, ConcClay,
coag, material)
))**(1/3)
* (DiamTarget / material.Diameter)**(DIM_FRACTAL / 3)
)
@u.wraps(u.m, [u.kg/u.m**3, u.kg/u.m**3, None, u.dimensionless,
None, u.m], False)
def frac_vol_floc(ConcAluminum, ConcClay, coag, DIM_FRACTAL,
material, DiamTarget):
"""Return the floc volume fraction."""
return (frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material)
* (DiamTarget / material.Diameter)**(3-DIM_FRACTAL)
)
@u.wraps(u.kg/u.m**3, [u.kg/u.m**3, u.kg/u.m**3, None, None], False)
def dens_floc_init(ConcAluminum, ConcClay, coag, material):
"""Return the density of the initial floc.
Initial floc is made primarily of the primary colloid and nanoglobs.
"""
return (conc_floc(ConcAluminum, ConcClay, coag).magnitude
/ frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material)
)
#################### Flocculation Model ####################
@u.wraps(None, u.m, False)
def ratio_clay_sphere(RatioHeightDiameter):
"""Return the surface area to volume ratio for clay.
Normalized by surface area to volume ratio for a sphere.
"""
return (1/2 + RatioHeightDiameter) * (2 / (3*RatioHeightDiameter))**(2/3)
@u.wraps(None, [u.kg/u.m**3, None, u.m, u.dimensionless], False)
def ratio_area_clay_total(ConcClay, material, DiamTube, RatioHeightDiameter):
"""Return the surface area of clay normalized by total surface area.
Total surface area is a combination of clay and reactor wall
surface areas. This function is used to estimate how much coagulant
actually goes to the clay.
"""
return (1
/ (1
+ (2 * material.Diameter
/ (3 * DiamTube * ratio_clay_sphere(RatioHeightDiameter)
* (ConcClay / material.Density)
)
)
)
)
@u.wraps(None, [u.kg/u.m**3, u.kg/u.m**3, None, None,
u.m, u.dimensionless], False)
def gamma_coag(ConcClay, ConcAluminum, coag, material,
DiamTube, RatioHeightDiameter):
"""Return the coverage of clay with nanoglobs.
This function accounts for loss to the tube flocculator walls
and a poisson distribution on the clay given random hits by the
nanoglobs. The poisson distribution results in the coverage only
gradually approaching full coverage as coagulant dose increases.
"""
return (1 - np.exp((
(-frac_vol_floc_initial(ConcAluminum, 0, coag, material)
* material.Diameter)
/ (frac_vol_floc_initial(0, ConcClay, coag, material)
* coag.Diameter))
* (1 / np.pi)
* (ratio_area_clay_total(ConcClay, material,
DiamTube, RatioHeightDiameter)
/ ratio_clay_sphere(RatioHeightDiameter))
))
@u.wraps(None, [u.kg/u.m**3, u.kg/u.m**3, None, None], False)
@ut.list_handler
def gamma_humic_acid_to_coag(ConcAl, ConcNatOrgMat, NatOrgMat, coag):
"""Return the fraction of the coagulant that is coated with humic acid.
Parameters
----------
ConcAl : float
Concentration of alumninum in solution
ConcNatOrgMat : float
Concentration of natural organic matter in solution
NatOrgMat : ?
coag : ?
Returns
-------
float
fraction of the coagulant that is coated with humic acid
"""
return min(((ConcNatOrgMat / conc_precipitate(ConcAl, coag).magnitude)
* (coag.Density / NatOrgMat.Density)
* (coag.Diameter / (4 * NatOrgMat.Diameter))
),
1)
@u.wraps(None, [u.m, u.kg/u.m**3, u.kg/u.m**3, u.kg/u.m**3, None,
None, None, u.dimensionless], False)
def pacl_term(DiamTube, ConcClay, ConcAl, ConcNatOrgMat, NatOrgMat,
coag, material, RatioHeightDiameter):
"""Return the fraction of the surface area that is covered with coagulant
that is not covered with humic acid.
Parameters
----------
DiamTube : float
Diameter of the dosing tube
ConcClay : float
Concentration of clay in solution
ConcAl : float
Concentration of alumninum in solution
ConcNatOrgMat : float
Concentration of natural organic matter in solution
NatOrgMat : ?
coag : ?
material : ?
RatioHeightDiameter : float
Ratio between inner viscous length scale and Kolmogorov length scale
Returns
-------
float
fraction of the surface area that is covered with coagulant that is not
covered with humic acid
"""
return (gamma_coag(ConcClay, ConcAl, coag, material, DiamTube,
RatioHeightDiameter)
* (1 - gamma_humic_acid_to_coag(ConcAl, ConcNatOrgMat,
NatOrgMat, coag))
)
@u.wraps(None, [u.m, u.kg/u.m**3, u.kg/u.m**3, u.kg/u.m**3,
None, None, None, u.dimensionless], False)
def alpha_pacl_clay(DiamTube, ConcClay, ConcAl, ConcNatOrgMat,
NatOrgMat, coag, material, RatioHeightDiameter):
""""""
PAClTerm = pacl_term(DiamTube, ConcClay, ConcAl, ConcNatOrgMat,
NatOrgMat, coag, material, RatioHeightDiameter)
return 2 * (PAClTerm * (1 - gamma_coag(ConcClay, ConcAl, coag, material,
DiamTube, RatioHeightDiameter)))
@u.wraps(None, [u.m, u.kg/u.m**3, u.kg/u.m**3, u.kg/u.m**3,
None, None, None, u.dimensionless], False)
def alpha_pacl_pacl(DiamTube, ConcClay, ConcAl, ConcNatOrgMat,
NatOrgMat, coag, material, RatioHeightDiameter):
""""""
PAClTerm = pacl_term(DiamTube, ConcClay, ConcAl, ConcNatOrgMat,
NatOrgMat, coag, material, RatioHeightDiameter)
return PAClTerm ** 2
@u.wraps(None, [u.m, u.kg/u.m**3, u.kg/u.m**3, u.kg/u.m**3,
None, None, None, u.dimensionless], False)
def alpha_pacl_nat_org_mat(DiamTube, ConcClay, ConcAl, ConcNatOrgMat,
NatOrgMat, coag, material, RatioHeightDiameter):
""""""
PAClTerm = pacl_term(DiamTube, ConcClay, ConcAl, ConcNatOrgMat,
NatOrgMat, coag, material, RatioHeightDiameter)
return (2 * PAClTerm
* gamma_coag(ConcClay, ConcAl, coag, material, DiamTube,
RatioHeightDiameter)
* gamma_humic_acid_to_coag(ConcAl, ConcNatOrgMat, NatOrgMat, coag))
@u.wraps(None, [u.m, u.kg/u.m**3, u.kg/u.m**3, u.kg/u.m**3,
None, None, None, u.dimensionless], False)
def alpha(DiamTube, ConcClay, ConcAl, ConcNatOrgMat,
NatOrgMat, coag, material, RatioHeightDiameter):
""""""
return (alpha_pacl_nat_org_mat(DiamTube, ConcClay, ConcAl, ConcNatOrgMat,
NatOrgMat, coag, material,
RatioHeightDiameter)
+ alpha_pacl_pacl(DiamTube, ConcClay, ConcAl, ConcNatOrgMat,
NatOrgMat, coag, material, RatioHeightDiameter)
+ alpha_pacl_clay(DiamTube, ConcClay, ConcAl, ConcNatOrgMat,
NatOrgMat, coag, material, RatioHeightDiameter)
)
@u.wraps(None, [u.W/u.kg, u.degK, u.s, u.m,
u.kg/u.m**3, u.kg/u.m**3, u.kg/u.m**3, None,
None, None, u.dimensionless, u.dimensionless], False)
def pc_viscous(EnergyDis, Temp, Time, DiamTube,
ConcClay, ConcAl, ConcNatOrgMat, NatOrgMat,
coag, material, FittingParam, RatioHeightDiameter):
""""""
return ((3/2)
* np.log10((2/3) * np.pi * FittingParam * Time
* np.sqrt(EnergyDis / (pc.viscosity_kinematic(Temp).magnitude)
)
* alpha(DiamTube, ConcClay, ConcAl, ConcNatOrgMat,
NatOrgMat, coag, material, RatioHeightDiameter)
* (np.pi/6)**(2/3)
* (material.Diameter / sep_dist_clay(ConcClay, material).magnitude
) ** 2
+ 1
)
)
@u.wraps(u.kg/u.m**3, [u.kg/u.m**3, u.kg/u.m**3, u.dimensionless, u.m,
None, None, u.degK], False)
def dens_floc(ConcAl, ConcClay, DIM_FRACTAL, DiamTarget, coag, material, Temp):
"""Calculate floc density as a function of size."""
WaterDensity = pc.density_water(Temp).magnitude
return ((dens_floc_init(ConcAl, ConcClay, coag, material).magnitude
- WaterDensity
)
* (material.Diameter / DiamTarget)**(3 - DIM_FRACTAL)
+ WaterDensity
)
@u.wraps(u.m/u.s, [u.kg/u.m**3, u.kg/u.m**3, None, None, u.dimensionless,
u.m, u.degK], False)
def vel_term_floc(ConcAl, ConcClay, coag, material, DIM_FRACTAL,
DiamTarget, Temp):
"""Calculate floc terminal velocity."""
WaterDensity = pc.density_water(Temp).magnitude
return (((pc.gravity.magnitude * material.Diameter**2)
/ (18 * PHI_FLOC * pc.viscosity_kinematic(Temp).magnitude)
)
* ((dens_floc_init(ConcAl, ConcClay, coag, material).magnitude
- WaterDensity
)
/ WaterDensity
)
* (DiamTarget / material.Diameter) ** (DIM_FRACTAL - 1)
)
@u.wraps(u.m, [u.kg/u.m**3, u.kg/u.m**3, None, None,
u.dimensionless, u.m/u.s, u.degK], False)
def diam_floc_vel_term(ConcAl, ConcClay, coag, material,
DIM_FRACTAL, VelTerm, Temp):
"""Calculate floc diamter as a function of terminal velocity."""
WaterDensity = pc.density_water(Temp).magnitude
return (material.Diameter * (((18 * VelTerm * PHI_FLOC
* pc.viscosity_kinematic(Temp).magnitude
)
/ (pc.gravity.magnitude * material.Diameter**2)
)
* (WaterDensity
/ (dens_floc_init(ConcAl, ConcClay, coag,
material).magnitude
- WaterDensity
)
)
) ** (1 / (DIM_FRACTAL - 1))
)
@u.wraps(u.s, [u.W/u.kg, u.degK, u.kg/u.m**3, u.kg/u.m**3, None, None,
u.m, u.m, u.dimensionless, u.dimensionless],
False)
def time_col_laminar(EnergyDis, Temp, ConcAl, ConcClay, coag, material,
DiamTarget, DiamTube, DIM_FRACTAL, RatioHeightDiameter):
"""Calculate single collision time for laminar flow mediated collisions.
Calculated as a function of floc size.
"""
return (((1/6) * ((6/np.pi)**(1/3))
* frac_vol_floc_initial(ConcAl, ConcClay, coag, material)**(-2/3)
* (pc.viscosity_kinematic(Temp).magnitude / EnergyDis)**(1/2)
* (DiamTarget / material.Diameter)**(2*DIM_FRACTAL/3 - 2)
) # End of the numerator
/ (gamma_coag(ConcClay, ConcAl, coag, material, DiamTube,
RatioHeightDiameter)
) # End of the denominator
)
@u.wraps(u.s, [u.W/u.kg, u.kg/u.m**3, u.kg/u.m**3, None, None,
u.m, u.dimensionless], False)
def time_col_turbulent(EnergyDis, ConcAl, ConcClay, coag, material,
DiamTarget, DIM_FRACTAL):
"""Calculate single collision time for turbulent flow mediated collisions.
Calculated as a function of floc size.
"""
return((1/6) * (6/np.pi)**(1/9) * EnergyDis**(-1/3) * DiamTarget**(2/3)
* frac_vol_floc_initial(ConcAl, ConcClay, coag, material)**(-8/9)
* (DiamTarget / material.Diameter)**((8*(DIM_FRACTAL-3)) / 9)
)
########### Kolmogorov and viscous length scales ###########
@u.wraps(u.m, [u.W/u.kg, u.degK], False)
def eta_kolmogorov(EnergyDis, Temp):
return ((pc.viscosity_kinematic(Temp).magnitude**3) / EnergyDis) ** (1/4)
@u.wraps(u.m, [u.W/u.kg, u.degK], False)
def lambda_vel(EnergyDis, Temp):
return RATIO_KOLMOGOROV * eta_kolmogorov(EnergyDis, Temp).magnitude
@u.wraps(u.m, [u.W/u.kg, u.degK, u.kg/u.m**3, u.kg/u.m**3, None, None,
u.dimensionless], False)
def diam_kolmogorov(EnergyDis, Temp, ConcAl, ConcClay, coag, material,
DIM_FRACTAL):
"""Return the size of the floc with separation distances equal to
the Kolmogorov length and the inner viscous length scale.
"""
return (material.Diameter
* ((eta_kolmogorov(EnergyDis, Temp).magnitude / material.Diameter)
* ((6 * frac_vol_floc_initial(ConcAl, ConcClay, coag, material))
/ np.pi
)**(1/3)
)**(3 / DIM_FRACTAL)
)
@u.wraps(u.m, [u.W/u.kg, u.degK, u.kg/u.m**3, u.kg/u.m**3, None, None,
u.dimensionless], False)
def diam_vel(EnergyDis, Temp, ConcAl, ConcClay, coag, material, DIM_FRACTAL):
return (material.Diameter
* ((lambda_vel(EnergyDis, Temp).magnitude / material.Diameter)
* ((6 * frac_vol_floc_initial(ConcAl, ConcClay, coag, material))
/ np.pi
)**(1/3)
)**(3/DIM_FRACTAL)
)
@u.wraps(u.m, u.W/u.kg, False)
def diam_floc_max(epsMax):
"""Return floc size as a function of energy dissipation rate.
Based on <NAME>'s work with floc size as a function of energy
dissipation rate. This is for the average energy dissipation rate
in a tube flocculator. It isn't clear how to convert this to the
turbulent flow case. Maybe the flocs are mostly experiencing viscous
shear. But that isn't clear. Some authors have made the case that
floc breakup is due to viscous effects. If that is the case, then
the results from the tube flocculator should be applicable to the
turbulent case. We will have to account for the temporal and spatial
variability in the turbulent energy dissipation rate. The factor of
95 μm is based on the assumption that the ratio of the max to
average energy dissipation rate for laminar flow is approximately 2.
"""
return 9.5 * 10**-5 * (1 / (epsMax)**(1/3))
@u.wraps(u.W/u.kg, u.m, False)
def ener_dis_diam_floc(Diam):
"""Return max energy dissipation rate as a function of max floc diameter.
This equation is under suspicion.
"""
return (9.5 * 10**-5 / Diam) ** 3
##### Velocity gradient in tubing for lab scale laminar flow flocculators #####
@u.wraps(1/u.s, [u.m**3/u.s, u.m], False)
def g_straight(PlantFlow, IDTube):
return 64 * PlantFlow / (3 * np.pi * IDTube**3)
@u.wraps(None, [u.m**3/u.s, u.m, u.degK], False)
def reynolds_rapid_mix(PlantFlow, IDTube, Temp):
return (4 * PlantFlow / (np.pi * IDTube
* pc.viscosity_kinematic(Temp).magnitude))
@u.wraps(None, [u.m**3/u.s, u.m, u.m, u.degK], False)
def dean_number(PlantFlow, IDTube, RadiusCoil, Temp):
"""Return the Dean Number.
The Dean Number is a dimensionless parameter that is the unfortunate
combination of Reynolds and tube curvature. It would have been better
to keep the Reynolds number and define a simple dimensionless geometric
parameter.
"""
return (reynolds_rapid_mix(PlantFlow, IDTube, Temp)
* (IDTube / (2 * RadiusCoil))**(1/2)
)
@u.wraps(1/u.s, [u.m**3/u.s, u.m, u.m, u.degK], False)
def g_coil(FlowPlant, IDTube, RadiusCoil, Temp):
"""We need a reference for this.
Karen's thesis likely has this equation and the reference.
"""
return (g_straight(FlowPlant, IDTube).magnitude
* (1 + 0.033 *
np.log10(dean_number(FlowPlant, IDTube, RadiusCoil, Temp)) ** 4
) ** (1/2)
)
@u.wraps(u.s, [u.m, u.m, u.m**3/u.s], False)
def time_res_tube(IDTube, LengthTube, FlowPlant):
"""Calculate residence time in the flocculator."""
return LengthTube * np.pi * (IDTube**2 / 4) / FlowPlant
@u.wraps(None, [u.m**3/u.s, u.m, u.m, u.m, u.degK], False)
def g_time_res(FlowPlant, IDTube, RadiusCoil, LengthTube, Temp):
"""G Residence Time calculated for a coiled tube flocculator."""
return (g_coil(FlowPlant, IDTube, RadiusCoil, Temp).magnitude
* time_res_tube(IDTube, LengthTube, FlowPlant).magnitude
)
| [
"aide_design.units.unit_registry.wraps",
"numpy.log2",
"numpy.log10",
"aide_design.physchem.density_water",
"aide_design.units.unit_registry.enable_contexts",
"aide_design.physchem.viscosity_kinematic"
] | [((388, 413), 'aide_design.units.unit_registry.enable_contexts', 'u.enable_contexts', (['"""chem"""'], {}), "('chem')\n", (405, 413), True, 'from aide_design.units import unit_registry as u\n'), ((2811, 2848), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(u.kg / u.m ** 3)', 'None', '(False)'], {}), '(u.kg / u.m ** 3, None, False)\n', (2818, 2848), True, 'from aide_design.units import unit_registry as u\n'), ((3196, 3254), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(u.kg / u.m ** 3)', '[u.kg / u.m ** 3, u.degK]', '(False)'], {}), '(u.kg / u.m ** 3, [u.kg / u.m ** 3, u.degK], False)\n', (3203, 3254), True, 'from aide_design.units import unit_registry as u\n'), ((3666, 3722), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(u.kg / u.m ** 3)', '[u.kg / u.m ** 3, None]', '(False)'], {}), '(u.kg / u.m ** 3, [u.kg / u.m ** 3, None], False)\n', (3673, 3722), True, 'from aide_design.units import unit_registry as u\n'), ((4316, 4389), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(u.kg / u.m ** 3)', '[u.kg / u.m ** 3, u.kg / u.m ** 3, None]', '(False)'], {}), '(u.kg / u.m ** 3, [u.kg / u.m ** 3, u.kg / u.m ** 3, None], False)\n', (4323, 4389), True, 'from aide_design.units import unit_registry as u\n'), ((4571, 4620), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(u.mol / u.m ** 3)', '(u.kg / u.m ** 3)', '(False)'], {}), '(u.mol / u.m ** 3, u.kg / u.m ** 3, False)\n', (4578, 4620), True, 'from aide_design.units import unit_registry as u\n'), ((4771, 4807), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.m', '(u.kg / u.m ** 3)', '(False)'], {}), '(u.m, u.kg / u.m ** 3, False)\n', (4778, 4807), True, 'from aide_design.units import unit_registry as u\n'), ((4993, 5045), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(1 / u.m ** 3)', '[u.kg / u.m ** 3, u.m]', '(False)'], {}), '(1 / u.m ** 3, [u.kg / u.m ** 3, u.m], False)\n', (5000, 5045), True, 'from aide_design.units import unit_registry as u\n'), ((5214, 5257), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.m', '[u.kg / u.m ** 3, u.m]', '(False)'], {}), '(u.m, [u.kg / u.m ** 3, u.m], False)\n', (5221, 5257), True, 'from aide_design.units import unit_registry as u\n'), ((5474, 5527), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(1 / u.m ** 3)', '[u.kg / u.m ** 3, None]', '(False)'], {}), '(1 / u.m ** 3, [u.kg / u.m ** 3, None], False)\n', (5481, 5527), True, 'from aide_design.units import unit_registry as u\n'), ((5742, 5810), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.kg / u.m ** 3, u.kg / u.m ** 3, None, None]', '(False)'], {}), '(None, [u.kg / u.m ** 3, u.kg / u.m ** 3, None, None], False)\n', (5749, 5810), True, 'from aide_design.units import unit_registry as u\n'), ((6279, 6339), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.m', '[u.dimensionless, u.m, u.dimensionless]', '(False)'], {}), '(u.m, [u.dimensionless, u.m, u.dimensionless], False)\n', (6286, 6339), True, 'from aide_design.units import unit_registry as u\n'), ((6520, 6570), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.dimensionless, None, u.m]', '(False)'], {}), '(None, [u.dimensionless, None, u.m], False)\n', (6527, 6570), True, 'from aide_design.units import unit_registry as u\n'), ((6862, 6955), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.m', '[u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.dimensionless, u.m]', '(False)'], {}), '(u.m, [u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.dimensionless,\n u.m], False)\n', (6869, 6955), True, 'from aide_design.units import unit_registry as u\n'), ((7427, 7520), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.m', '[u.kg / u.m ** 3, u.kg / u.m ** 3, None, u.dimensionless, None, u.m]', '(False)'], {}), '(u.m, [u.kg / u.m ** 3, u.kg / u.m ** 3, None, u.dimensionless, None,\n u.m], False)\n', (7434, 7520), True, 'from aide_design.units import unit_registry as u\n'), ((7824, 7903), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(u.kg / u.m ** 3)', '[u.kg / u.m ** 3, u.kg / u.m ** 3, None, None]', '(False)'], {}), '(u.kg / u.m ** 3, [u.kg / u.m ** 3, u.kg / u.m ** 3, None, None], False)\n', (7831, 7903), True, 'from aide_design.units import unit_registry as u\n'), ((8297, 8322), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', 'u.m', '(False)'], {}), '(None, u.m, False)\n', (8304, 8322), True, 'from aide_design.units import unit_registry as u\n'), ((8575, 8642), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.kg / u.m ** 3, None, u.m, u.dimensionless]', '(False)'], {}), '(None, [u.kg / u.m ** 3, None, u.m, u.dimensionless], False)\n', (8582, 8642), True, 'from aide_design.units import unit_registry as u\n'), ((9243, 9338), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.m, u.dimensionless]', '(False)'], {}), '(None, [u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.m, u.\n dimensionless], False)\n', (9250, 9338), True, 'from aide_design.units import unit_registry as u\n'), ((10331, 10399), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.kg / u.m ** 3, u.kg / u.m ** 3, None, None]', '(False)'], {}), '(None, [u.kg / u.m ** 3, u.kg / u.m ** 3, None, None], False)\n', (10338, 10399), True, 'from aide_design.units import unit_registry as u\n'), ((11107, 11224), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.m, u.kg / u.m ** 3, u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, None,\n u.dimensionless]', '(False)'], {}), '(None, [u.m, u.kg / u.m ** 3, u.kg / u.m ** 3, u.kg / u.m ** 3, None,\n None, None, u.dimensionless], False)\n', (11114, 11224), True, 'from aide_design.units import unit_registry as u\n'), ((12338, 12455), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.m, u.kg / u.m ** 3, u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, None,\n u.dimensionless]', '(False)'], {}), '(None, [u.m, u.kg / u.m ** 3, u.kg / u.m ** 3, u.kg / u.m ** 3, None,\n None, None, u.dimensionless], False)\n', (12345, 12455), True, 'from aide_design.units import unit_registry as u\n'), ((12897, 13014), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.m, u.kg / u.m ** 3, u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, None,\n u.dimensionless]', '(False)'], {}), '(None, [u.m, u.kg / u.m ** 3, u.kg / u.m ** 3, u.kg / u.m ** 3, None,\n None, None, u.dimensionless], False)\n', (12904, 13014), True, 'from aide_design.units import unit_registry as u\n'), ((13328, 13445), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.m, u.kg / u.m ** 3, u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, None,\n u.dimensionless]', '(False)'], {}), '(None, [u.m, u.kg / u.m ** 3, u.kg / u.m ** 3, u.kg / u.m ** 3, None,\n None, None, u.dimensionless], False)\n', (13335, 13445), True, 'from aide_design.units import unit_registry as u\n'), ((13968, 14085), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.m, u.kg / u.m ** 3, u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, None,\n u.dimensionless]', '(False)'], {}), '(None, [u.m, u.kg / u.m ** 3, u.kg / u.m ** 3, u.kg / u.m ** 3, None,\n None, None, u.dimensionless], False)\n', (13975, 14085), True, 'from aide_design.units import unit_registry as u\n'), ((14724, 14887), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.W / u.kg, u.degK, u.s, u.m, u.kg / u.m ** 3, u.kg / u.m ** 3, u.kg / u.m **\n 3, None, None, None, u.dimensionless, u.dimensionless]', '(False)'], {}), '(None, [u.W / u.kg, u.degK, u.s, u.m, u.kg / u.m ** 3, u.kg / u.m **\n 3, u.kg / u.m ** 3, None, None, None, u.dimensionless, u.dimensionless],\n False)\n', (14731, 14887), True, 'from aide_design.units import unit_registry as u\n'), ((15668, 15781), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(u.kg / u.m ** 3)', '[u.kg / u.m ** 3, u.kg / u.m ** 3, u.dimensionless, u.m, None, None, u.degK]', '(False)'], {}), '(u.kg / u.m ** 3, [u.kg / u.m ** 3, u.kg / u.m ** 3, u.dimensionless,\n u.m, None, None, u.degK], False)\n', (15675, 15781), True, 'from aide_design.units import unit_registry as u\n'), ((16202, 16310), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(u.m / u.s)', '[u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.dimensionless, u.m, u.degK]', '(False)'], {}), '(u.m / u.s, [u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.\n dimensionless, u.m, u.degK], False)\n', (16209, 16310), True, 'from aide_design.units import unit_registry as u\n'), ((16915, 17022), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.m', '[u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.dimensionless, u.m / u.s,\n u.degK]', '(False)'], {}), '(u.m, [u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.dimensionless,\n u.m / u.s, u.degK], False)\n', (16922, 17022), True, 'from aide_design.units import unit_registry as u\n'), ((17864, 17999), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.s', '[u.W / u.kg, u.degK, u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.m, u.m,\n u.dimensionless, u.dimensionless]', '(False)'], {}), '(u.s, [u.W / u.kg, u.degK, u.kg / u.m ** 3, u.kg / u.m ** 3, None,\n None, u.m, u.m, u.dimensionless, u.dimensionless], False)\n', (17871, 17999), True, 'from aide_design.units import unit_registry as u\n'), ((18770, 18875), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.s', '[u.W / u.kg, u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.m, u.dimensionless\n ]', '(False)'], {}), '(u.s, [u.W / u.kg, u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.m,\n u.dimensionless], False)\n', (18777, 18875), True, 'from aide_design.units import unit_registry as u\n'), ((19428, 19469), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.m', '[u.W / u.kg, u.degK]', '(False)'], {}), '(u.m, [u.W / u.kg, u.degK], False)\n', (19435, 19469), True, 'from aide_design.units import unit_registry as u\n'), ((19586, 19627), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.m', '[u.W / u.kg, u.degK]', '(False)'], {}), '(u.m, [u.W / u.kg, u.degK], False)\n', (19593, 19627), True, 'from aide_design.units import unit_registry as u\n'), ((19734, 19842), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.m', '[u.W / u.kg, u.degK, u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.\n dimensionless]', '(False)'], {}), '(u.m, [u.W / u.kg, u.degK, u.kg / u.m ** 3, u.kg / u.m ** 3, None,\n None, u.dimensionless], False)\n', (19741, 19842), True, 'from aide_design.units import unit_registry as u\n'), ((20384, 20492), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.m', '[u.W / u.kg, u.degK, u.kg / u.m ** 3, u.kg / u.m ** 3, None, None, u.\n dimensionless]', '(False)'], {}), '(u.m, [u.W / u.kg, u.degK, u.kg / u.m ** 3, u.kg / u.m ** 3, None,\n None, u.dimensionless], False)\n', (20391, 20492), True, 'from aide_design.units import unit_registry as u\n'), ((20861, 20892), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.m', '(u.W / u.kg)', '(False)'], {}), '(u.m, u.W / u.kg, False)\n', (20868, 20892), True, 'from aide_design.units import unit_registry as u\n'), ((21816, 21847), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(u.W / u.kg)', 'u.m', '(False)'], {}), '(u.W / u.kg, u.m, False)\n', (21823, 21847), True, 'from aide_design.units import unit_registry as u\n'), ((22122, 22168), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(1 / u.s)', '[u.m ** 3 / u.s, u.m]', '(False)'], {}), '(1 / u.s, [u.m ** 3 / u.s, u.m], False)\n', (22129, 22168), True, 'from aide_design.units import unit_registry as u\n'), ((22253, 22304), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.m ** 3 / u.s, u.m, u.degK]', '(False)'], {}), '(None, [u.m ** 3 / u.s, u.m, u.degK], False)\n', (22260, 22304), True, 'from aide_design.units import unit_registry as u\n'), ((22469, 22525), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.m ** 3 / u.s, u.m, u.m, u.degK]', '(False)'], {}), '(None, [u.m ** 3 / u.s, u.m, u.m, u.degK], False)\n', (22476, 22525), True, 'from aide_design.units import unit_registry as u\n'), ((22976, 23035), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['(1 / u.s)', '[u.m ** 3 / u.s, u.m, u.m, u.degK]', '(False)'], {}), '(1 / u.s, [u.m ** 3 / u.s, u.m, u.m, u.degK], False)\n', (22983, 23035), True, 'from aide_design.units import unit_registry as u\n'), ((23390, 23437), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['u.s', '[u.m, u.m, u.m ** 3 / u.s]', '(False)'], {}), '(u.s, [u.m, u.m, u.m ** 3 / u.s], False)\n', (23397, 23437), True, 'from aide_design.units import unit_registry as u\n'), ((23602, 23663), 'aide_design.units.unit_registry.wraps', 'u.wraps', (['None', '[u.m ** 3 / u.s, u.m, u.m, u.m, u.degK]', '(False)'], {}), '(None, [u.m ** 3 / u.s, u.m, u.m, u.m, u.degK], False)\n', (23609, 23663), True, 'from aide_design.units import unit_registry as u\n'), ((6145, 6165), 'numpy.log10', 'np.log10', (['(C / Cprime)'], {}), '(C / Cprime)\n', (6153, 6165), True, 'import numpy as np\n'), ((6821, 6860), 'numpy.log2', 'np.log2', (['(DiamTarget / material.Diameter)'], {}), '(DiamTarget / material.Diameter)\n', (6828, 6860), True, 'import numpy as np\n'), ((15944, 15966), 'aide_design.physchem.density_water', 'pc.density_water', (['Temp'], {}), '(Temp)\n', (15960, 15966), True, 'from aide_design import physchem as pc\n'), ((16480, 16502), 'aide_design.physchem.density_water', 'pc.density_water', (['Temp'], {}), '(Temp)\n', (16496, 16502), True, 'from aide_design import physchem as pc\n'), ((17221, 17243), 'aide_design.physchem.density_water', 'pc.density_water', (['Temp'], {}), '(Temp)\n', (17237, 17243), True, 'from aide_design import physchem as pc\n'), ((3616, 3638), 'aide_design.physchem.density_water', 'pc.density_water', (['temp'], {}), '(temp)\n', (3632, 3638), True, 'from aide_design import physchem as pc\n'), ((22425, 22453), 'aide_design.physchem.viscosity_kinematic', 'pc.viscosity_kinematic', (['Temp'], {}), '(Temp)\n', (22447, 22453), True, 'from aide_design import physchem as pc\n'), ((19518, 19546), 'aide_design.physchem.viscosity_kinematic', 'pc.viscosity_kinematic', (['Temp'], {}), '(Temp)\n', (19540, 19546), True, 'from aide_design import physchem as pc\n'), ((16604, 16632), 'aide_design.physchem.viscosity_kinematic', 'pc.viscosity_kinematic', (['Temp'], {}), '(Temp)\n', (16626, 16632), True, 'from aide_design import physchem as pc\n'), ((17341, 17369), 'aide_design.physchem.viscosity_kinematic', 'pc.viscosity_kinematic', (['Temp'], {}), '(Temp)\n', (17363, 17369), True, 'from aide_design import physchem as pc\n'), ((18424, 18452), 'aide_design.physchem.viscosity_kinematic', 'pc.viscosity_kinematic', (['Temp'], {}), '(Temp)\n', (18446, 18452), True, 'from aide_design import physchem as pc\n'), ((15206, 15234), 'aide_design.physchem.viscosity_kinematic', 'pc.viscosity_kinematic', (['Temp'], {}), '(Temp)\n', (15228, 15234), True, 'from aide_design import physchem as pc\n')] |
import time
from threading import Thread
import numpy as np
import gym
from gym import spaces, logger
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use(['fivethirtyeight', 'seaborn-whitegrid', 'seaborn-ticks'])
from matplotlib import rcParams
from matplotlib import gridspec
import matplotlib.ticker as plticker
rcParams['axes.facecolor'] = 'FFFFFF'
rcParams['savefig.facecolor'] = 'FFFFFF'
rcParams['figure.facecolor'] = 'FFFFFF'
rcParams['xtick.direction'] = 'in'
rcParams['ytick.direction'] = 'in'
rcParams.update({'figure.autolayout': True})
class KerbalLanderSimpleEnvironment(gym.Env):
def __init__(self):
super(KerbalLanderSimpleEnvironment, self).__init__()
self.thrust = 60000.
self.vesselMass = 2355.
self.fuelMass = 2041.
self.fuelBurnRate = 17.70
self.vel = []
self.alt = []
self.acc = []
self.throt = []
self.totalRewards = []
self.reward_range = (-200, 1000)
# Observations are:
# Current throttle (float)
# Altitude above surface (float)
# Velocity vector (3 floats)
self.maxVelocity = 1.5E3
self.maxAltitude = 1E5 # training
# self.maxAltitude = 1E5 # Testing
self.lowObs = np.array([
0., # Acc,
-10., # Altitude,
-self.maxVelocity # vz
])
self.highObs = np.array([
# self.thrust / self.vesselMass, # Acc,
self.fuelMass, # FuelMass
self.maxAltitude, # Altitude,
self.maxVelocity # vz
])
self.observation_space = spaces.Box(self.lowObs, self.highObs, dtype = np.float32)
self.lowAct = np.array([
0., # throttle
])
self.highAct = np.array([
1., # throttle
])
self.action_space = spaces.Box(self.lowAct, self.highAct, dtype = np.float32)
self.init()
@property
def mass(self):
return self.vesselMass + self.fuelMass
def init(self):
self.stepCounter = 0
# Correlated random initial conditions?
rnd = np.random.uniform(0, 1)
self.altitude = rnd * 50000
self.velocity = -1000 if np.random.uniform(0, 1) < 0.1 else - max(4 * rnd * 500, 50)
# self.altitude = 36000 # Test
# self.velocity = -650 # Test
self.throttle = 0.0
self.acceleration = self.g(self.altitude)
self.fuelMass = 2041.
self.episodeReward = 0
def g(self, altitude):
m = 9.7599066E20
r = 200000
G = 6.67430E-11
return -G * m / (r + altitude) ** 2
def thrustAcc(self, throttle):
maxAcc = self.thrust / self.mass
return throttle * maxAcc
def exploded(self):
if self.altitude < 1.0 and self.velocity < -10: # Was -20
return True
else:
return False
def terminate(self):
termDown = self.altitude < 1.0
termHigh = self.altitude > self.maxAltitude
termFuel = self.fuelMass < 1E-4
self.terminated = termDown or termHigh or termFuel
return self.terminated
def reset(self):
self.init()
return self._nextObservation()
def mapRange(self, inLow, inHigh, outLow, outHigh, val):
# From https://math.stackexchange.com/questions/914823/shift-numbers-into-a-different-range
return outLow + ((outHigh - outLow) / (inHigh - inLow)) * (val - inLow)
def forward(self):
dt = 0.1 # seconds
itr = 5
for i in range(itr):
# g acts in -ve direction
newAcceleration = self.thrustAcc(self.throttle) + self.g(self.altitude)
newAltitude = self.altitude + self.velocity * dt + 0.5 * self.acceleration * dt * dt
newVelocity = self.velocity + 0.5 * (self.acceleration + newAcceleration) * dt
self.altitude = newAltitude
self.velocity = newVelocity
self.acceleration = newAcceleration
self.fuelMass = self.fuelMass - dt * self.fuelBurnRate * self.throttle
def _nextObservation(self):
thrustAcc = self.throttle * (self.thrust / self.mass)
obs = np.array([
# self.mapRange(self.lowObs[0], self.highObs[0], -1.0, 1.0, thrustAcc),
self.mapRange(self.lowObs[0], self.highObs[0], -1.0, 1.0, self.fuelMass),
self.mapRange(self.lowObs[1], self.highObs[1], -1.0, 1.0, self.altitude),
self.mapRange(self.lowObs[2], self.highObs[2], -1.0, 1.0, self.velocity),
])
# Sometimes (1, 3), sometimes (,3) - not sure why
return obs.flatten()
def _takeAction(self, action):
# Output actions are sigmoid + OU noise, so clip then scale
# Clipping should be okay, assuming that variance of OU noise is small compared to action range
throttle = np.clip(action, 0, 1)
self.throttle = throttle[0] # Single element vector
def calculateReward(self):
reward = 0
if not self.exploded() and self.altitude < 1.0: # landed
print('LANDED!')
reward += 2.
if self.altitude < 1.0:
reward += 1 * np.exp(-0.01 * np.abs(self.velocity))
print('Hit @', self.velocity, reward)
# New
reward -= 0.1 * np.exp(-0.001 * np.abs(self.fuelMass))
print('Fuel @', self.fuelMass, reward)
if self.fuelMass < 1E-4:
print('No fuel')
reward -= 1.
if self.altitude > self.maxAltitude:
print('Max alt')
reward -= 1.
return np.array(reward)
def makeEpisodePlot(self):
fig, axs = plt.subplots(2, 2, figsize=(26, 26))
plt.subplots_adjust(wspace = 0.25)
axs[0][0].plot(self.alt, linewidth = 2.0)
axs[0][0].set_xlabel('Steps', fontsize = 32)
axs[0][0].set_ylabel('Altitude', fontsize = 32)
axs[0][0].tick_params(labelsize = 24)
axs[0][1].plot(self.vel, linewidth = 2.0)
axs[0][1].set_xlabel('Steps', fontsize = 32)
axs[0][1].set_ylabel('Velocity', fontsize = 32)
axs[0][1].tick_params(labelsize = 24)
axs[1][0].plot(self.acc, linewidth = 2.0)
axs[1][0].set_xlabel('Steps', fontsize = 32)
axs[1][0].set_ylabel('Acceleration', fontsize = 32)
axs[1][0].tick_params(labelsize = 24)
axs[1][1].plot(self.throt, linewidth = 2.0)
axs[1][1].set_xlabel('Steps', fontsize = 32)
axs[1][1].set_ylabel('Throttle', fontsize = 32)
axs[1][1].tick_params(labelsize = 24)
plt.savefig('episode.pdf')
plt.clf()
def makeRewardPlot(self):
plt.plot(self.totalRewards, lw = 0.25, alpha = 1.0)
plt.xlabel('Episode')
plt.ylabel('Reward')
plt.savefig('rewards.pdf')
plt.clf()
def step(self, action):
# self.vel.append( self.velocity )
# self.alt.append( self.altitude )
# self.acc.append( self.acceleration )
# self.throt.append( action[0] )
self.stepCounter += 1
self._takeAction(action)
self.forward()
done = self.terminate()
obs = self._nextObservation()
reward = self.calculateReward()
self.episodeReward += reward
# print('Action', action)
# print('Obs:', obs)
# print('Reward:', reward)
# print('Done:', done)
if done:
self.totalRewards.append( self.episodeReward )
# self.makeEpisodePlot()
if len(self.totalRewards) > 0 and len(self.totalRewards) % 1000 == 0:
self.makeRewardPlot()
return obs, reward, done, {}
if __name__ == '__main__':
lander = KerbalLanderSimpleEnvironment()
action = [0.0]
alt = []
vel = []
acc = []
for i in range(10000):
obs, reward, done, _ = lander.step(action)
if done:
lander.reset()
break
alt.append( lander.altitude )
vel.append( lander.velocity )
acc.append( lander.acceleration )
plt.plot(alt, linewidth = 1.0)
plt.xlabel('steps')
plt.ylabel('Altitude')
plt.savefig('alt.pdf')
plt.clf()
plt.plot(vel, linewidth = 1.0)
plt.xlabel('steps')
plt.ylabel('Velocity')
plt.savefig('vel.pdf')
plt.clf()
plt.plot(acc, linewidth = 1.0)
plt.xlabel('steps')
plt.ylabel('Acceleration')
plt.savefig('acc.pdf')
plt.clf()
| [
"numpy.random.uniform",
"numpy.abs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.rcParams.update",
"numpy.clip",
"matplotlib.pyplot.style.use",
"matplotlib.use",
"numpy.array",
"gym.spaces.Box",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.p... | [((130, 144), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (137, 144), True, 'import matplotlib as mpl\n'), ((177, 249), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['fivethirtyeight', 'seaborn-whitegrid', 'seaborn-ticks']"], {}), "(['fivethirtyeight', 'seaborn-whitegrid', 'seaborn-ticks'])\n", (190, 249), True, 'import matplotlib.pyplot as plt\n'), ((542, 586), 'matplotlib.rcParams.update', 'rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (557, 586), False, 'from matplotlib import rcParams\n'), ((8165, 8193), 'matplotlib.pyplot.plot', 'plt.plot', (['alt'], {'linewidth': '(1.0)'}), '(alt, linewidth=1.0)\n', (8173, 8193), True, 'import matplotlib.pyplot as plt\n'), ((8200, 8219), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {}), "('steps')\n", (8210, 8219), True, 'import matplotlib.pyplot as plt\n'), ((8224, 8246), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Altitude"""'], {}), "('Altitude')\n", (8234, 8246), True, 'import matplotlib.pyplot as plt\n'), ((8251, 8273), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""alt.pdf"""'], {}), "('alt.pdf')\n", (8262, 8273), True, 'import matplotlib.pyplot as plt\n'), ((8278, 8287), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8285, 8287), True, 'import matplotlib.pyplot as plt\n'), ((8293, 8321), 'matplotlib.pyplot.plot', 'plt.plot', (['vel'], {'linewidth': '(1.0)'}), '(vel, linewidth=1.0)\n', (8301, 8321), True, 'import matplotlib.pyplot as plt\n'), ((8328, 8347), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {}), "('steps')\n", (8338, 8347), True, 'import matplotlib.pyplot as plt\n'), ((8352, 8374), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Velocity"""'], {}), "('Velocity')\n", (8362, 8374), True, 'import matplotlib.pyplot as plt\n'), ((8379, 8401), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""vel.pdf"""'], {}), "('vel.pdf')\n", (8390, 8401), True, 'import matplotlib.pyplot as plt\n'), ((8406, 8415), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8413, 8415), True, 'import matplotlib.pyplot as plt\n'), ((8421, 8449), 'matplotlib.pyplot.plot', 'plt.plot', (['acc'], {'linewidth': '(1.0)'}), '(acc, linewidth=1.0)\n', (8429, 8449), True, 'import matplotlib.pyplot as plt\n'), ((8456, 8475), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {}), "('steps')\n", (8466, 8475), True, 'import matplotlib.pyplot as plt\n'), ((8480, 8506), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Acceleration"""'], {}), "('Acceleration')\n", (8490, 8506), True, 'import matplotlib.pyplot as plt\n'), ((8511, 8533), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""acc.pdf"""'], {}), "('acc.pdf')\n", (8522, 8533), True, 'import matplotlib.pyplot as plt\n'), ((8538, 8547), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8545, 8547), True, 'import matplotlib.pyplot as plt\n'), ((1298, 1339), 'numpy.array', 'np.array', (['[0.0, -10.0, -self.maxVelocity]'], {}), '([0.0, -10.0, -self.maxVelocity])\n', (1306, 1339), True, 'import numpy as np\n'), ((1432, 1493), 'numpy.array', 'np.array', (['[self.fuelMass, self.maxAltitude, self.maxVelocity]'], {}), '([self.fuelMass, self.maxAltitude, self.maxVelocity])\n', (1440, 1493), True, 'import numpy as np\n'), ((1654, 1709), 'gym.spaces.Box', 'spaces.Box', (['self.lowObs', 'self.highObs'], {'dtype': 'np.float32'}), '(self.lowObs, self.highObs, dtype=np.float32)\n', (1664, 1709), False, 'from gym import spaces, logger\n'), ((1735, 1750), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1743, 1750), True, 'import numpy as np\n'), ((1808, 1823), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1816, 1823), True, 'import numpy as np\n'), ((1886, 1941), 'gym.spaces.Box', 'spaces.Box', (['self.lowAct', 'self.highAct'], {'dtype': 'np.float32'}), '(self.lowAct, self.highAct, dtype=np.float32)\n', (1896, 1941), False, 'from gym import spaces, logger\n'), ((2162, 2185), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2179, 2185), True, 'import numpy as np\n'), ((4939, 4960), 'numpy.clip', 'np.clip', (['action', '(0)', '(1)'], {}), '(action, 0, 1)\n', (4946, 4960), True, 'import numpy as np\n'), ((5682, 5698), 'numpy.array', 'np.array', (['reward'], {}), '(reward)\n', (5690, 5698), True, 'import numpy as np\n'), ((5751, 5787), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(26, 26)'}), '(2, 2, figsize=(26, 26))\n', (5763, 5787), True, 'import matplotlib.pyplot as plt\n'), ((5796, 5828), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.25)'}), '(wspace=0.25)\n', (5815, 5828), True, 'import matplotlib.pyplot as plt\n'), ((6670, 6696), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""episode.pdf"""'], {}), "('episode.pdf')\n", (6681, 6696), True, 'import matplotlib.pyplot as plt\n'), ((6705, 6714), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6712, 6714), True, 'import matplotlib.pyplot as plt\n'), ((6755, 6802), 'matplotlib.pyplot.plot', 'plt.plot', (['self.totalRewards'], {'lw': '(0.25)', 'alpha': '(1.0)'}), '(self.totalRewards, lw=0.25, alpha=1.0)\n', (6763, 6802), True, 'import matplotlib.pyplot as plt\n'), ((6815, 6836), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (6825, 6836), True, 'import matplotlib.pyplot as plt\n'), ((6845, 6865), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reward"""'], {}), "('Reward')\n", (6855, 6865), True, 'import matplotlib.pyplot as plt\n'), ((6874, 6900), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""rewards.pdf"""'], {}), "('rewards.pdf')\n", (6885, 6900), True, 'import matplotlib.pyplot as plt\n'), ((6909, 6918), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6916, 6918), True, 'import matplotlib.pyplot as plt\n'), ((2256, 2279), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2273, 2279), True, 'import numpy as np\n'), ((5268, 5289), 'numpy.abs', 'np.abs', (['self.velocity'], {}), '(self.velocity)\n', (5274, 5289), True, 'import numpy as np\n'), ((5404, 5425), 'numpy.abs', 'np.abs', (['self.fuelMass'], {}), '(self.fuelMass)\n', (5410, 5425), True, 'import numpy as np\n')] |
import numpy as np
import pickle
import cvxopt
allNodes, edges, equalityPairs, layer, name2ind = [], [], [], dict(), dict()
Cu, Cl = None, None
'''
This is all python2.7 (don't judge me please)
I had to unpickle your pickles and pickle them again in a way
that python2.7 can read them.
I guess you can just run this in python3 as it is though,
but I don't know if cvxopt will like it.
'''
def isDummy(node):
return node[0] == 'D'
'''
!!!!!!!!!!!!!!!!!!!!!!!!!!!! INPUT 1 !!!!!!!!!!!!!!!!!!!!!!!!!!!!
list of lists
each list corresponds to a layer and contains the nodes of that layer ordered from left to right
'''
'''
!!!!!!!!!!!!!!!!!!!!!!!!!!!! INPUT 2 !!!!!!!!!!!!!!!!!!!!!!!!!!!!
the links file as you gave it to me
a set of tuples. The edge type is ignored, only the node names are used
'''
def minimize_positions(nodesPerLayer=None, links=None):
global allNodes, edges, equalityPairs, layer, name2ind, Cu, Cl
allNodes, edges, equalityPairs, layer, name2ind = [], [], [], dict(), dict()
Cu, Cl = None, None
if not nodesPerLayer:
with open('layers.pkl', 'rb') as f:
nodesPerLayer = pickle.load(f)
if not links:
with open('links.pkl', 'rb') as f:
links = pickle.load(f)
print("layers: {}".format(len(nodesPerLayer)))
cnt = 0
for i, l in enumerate(nodesPerLayer):
for t in l:
allNodes.append(t)
name2ind[t] = cnt
layer[cnt] = i
cnt += 1
assert len(allNodes) == len(set(allNodes))
nrNodes = len(allNodes)
nrLayers = max([layer[k] for k in layer]) + 1
for e in links:
if layer[name2ind[e[0]]] < layer[name2ind[e[1]]]:
edges.append((name2ind[e[0]], name2ind[e[1]]))
else:
edges.append((name2ind[e[1]], name2ind[e[0]]))
if isDummy(e[0]) and isDummy(e[1]):
equalityPairs.append((e[0], e[1]))
#calculate upper and lower connectivity
Cu = np.zeros((nrNodes,))
Cl = np.zeros((nrNodes,))
for e in edges:
assert layer[e[0]] < layer[e[1]]
Cl[e[0]] += 1
Cu[e[1]] += 1
nrIneqCons = 0
for i in nodesPerLayer:
nrIneqCons += len(i) - 1
a = 1
x0 = 100
#equality constraints: coordinate of first node set to arbitrary number (x0)
# and edges connected dummy nodes should be straight
A = np.zeros((len(equalityPairs) + 1, nrNodes))
A[0,0] = 1
for i, pair in enumerate(equalityPairs):
A[i+1, name2ind[pair[0]]] = 1
A[i+1, name2ind[pair[1]]] = -1
A = cvxopt.matrix(A)
b = np.zeros((len(equalityPairs) + 1, ))
b[0] = x0
b = cvxopt.matrix(b)
#inequality constraints: each node should be on the right of its preceding
# nodes in the same layer
G = np.zeros((nrIneqCons, nrNodes), float)
counter = 0
for ll in nodesPerLayer:
for i in range(len(ll)-1):
G[counter, name2ind[ll[i+1]]] = -1
G[counter, name2ind[ll[i]]] = 1
counter += 1
G = cvxopt.matrix(G)
h = -a * np.ones((G.size[0],), float)
h = cvxopt.matrix(h)
#c parameter as in paper
c = 0.5
#objective function
#P represents the f1 part of the objective
P = np.zeros((nrNodes, nrNodes))
for i, e in enumerate(edges):
#A[e[0], e[1]] = 1
#A[e[1], e[0]] = 1
P[e[0], e[0]] += 1
P[e[1], e[1]] += 1
P[e[0], e[1]] -= 1
P[e[1], e[0]] -= 1
#P2 represents the f2 part of the objective
P2 = np.zeros((nrNodes, nrNodes))
children = dict()
parents = dict()
for e in edges:
if e[0] not in children:
children[e[0]] = set([e[1]])
else:
children[e[0]].add(e[1])
if e[1] not in parents:
parents[e[1]] = set([e[0]])
else:
parents[e[1]].add(e[0])
for i in range(nrNodes):
if i in children and Cl[i] > 1:
#term has children
P2[i,i] += 1
for c1 in children[i]:
P2[i, c1] -= 1.0 / Cl[i]
P2[c1, i] -= 1.0 / Cl[i]
P2[c1,c1] += 1.0 / (Cl[i] ** 2.0)
for c2 in children[i]:
if c1 != c2:
P2[c1,c2] += 0.5 / (Cl[i] ** 2.0)
P2[c2,c1] += 0.5 / (Cl[i] ** 2.0)
if i in parents and Cu[i] > 1:
#term has children
P2[i,i] += 1
for c1 in parents[i]:
P2[i, c1] -= 1.0 / Cu[i]
P2[c1, i] -= 1.0 / Cu[i]
P2[c1,c1] += 1.0 / (Cu[i] ** 2.0)
for c2 in parents[i]:
if c1 != c2:
P2[c1,c2] += 0.5 / (Cu[i] ** 2.0)
P2[c2,c1] += 0.5 / (Cu[i] ** 2.0)
P = c * P + (1-c) * P2
P = cvxopt.matrix(2.0 * P)
q = cvxopt.matrix(np.zeros(nrNodes,))
# inital_values = np.zeros((nrNodes))
# cnt = 0
# for i, l in enumerate(nodesPerLayer):
# for t in l:
# inital_values[cnt] = i + 100
# cnt += 1
# print('solving...')
# initvals = {'x': cvxopt.matrix(inital_values)}
sol = cvxopt.solvers.qp(P,q,G,h,A,b)
xcoord = np.array(sol['x'])
# assert sol['status'] == 'optimal'
return sol, allNodes
if __name__ == '__main__':
minimize_positions() | [
"cvxopt.matrix",
"numpy.zeros",
"numpy.ones",
"pickle.load",
"numpy.array",
"cvxopt.solvers.qp"
] | [((1960, 1980), 'numpy.zeros', 'np.zeros', (['(nrNodes,)'], {}), '((nrNodes,))\n', (1968, 1980), True, 'import numpy as np\n'), ((1990, 2010), 'numpy.zeros', 'np.zeros', (['(nrNodes,)'], {}), '((nrNodes,))\n', (1998, 2010), True, 'import numpy as np\n'), ((2582, 2598), 'cvxopt.matrix', 'cvxopt.matrix', (['A'], {}), '(A)\n', (2595, 2598), False, 'import cvxopt\n'), ((2669, 2685), 'cvxopt.matrix', 'cvxopt.matrix', (['b'], {}), '(b)\n', (2682, 2685), False, 'import cvxopt\n'), ((2812, 2850), 'numpy.zeros', 'np.zeros', (['(nrIneqCons, nrNodes)', 'float'], {}), '((nrIneqCons, nrNodes), float)\n', (2820, 2850), True, 'import numpy as np\n'), ((3057, 3073), 'cvxopt.matrix', 'cvxopt.matrix', (['G'], {}), '(G)\n', (3070, 3073), False, 'import cvxopt\n'), ((3125, 3141), 'cvxopt.matrix', 'cvxopt.matrix', (['h'], {}), '(h)\n', (3138, 3141), False, 'import cvxopt\n'), ((3270, 3298), 'numpy.zeros', 'np.zeros', (['(nrNodes, nrNodes)'], {}), '((nrNodes, nrNodes))\n', (3278, 3298), True, 'import numpy as np\n'), ((3558, 3586), 'numpy.zeros', 'np.zeros', (['(nrNodes, nrNodes)'], {}), '((nrNodes, nrNodes))\n', (3566, 3586), True, 'import numpy as np\n'), ((4876, 4898), 'cvxopt.matrix', 'cvxopt.matrix', (['(2.0 * P)'], {}), '(2.0 * P)\n', (4889, 4898), False, 'import cvxopt\n'), ((5220, 5255), 'cvxopt.solvers.qp', 'cvxopt.solvers.qp', (['P', 'q', 'G', 'h', 'A', 'b'], {}), '(P, q, G, h, A, b)\n', (5237, 5255), False, 'import cvxopt\n'), ((5266, 5284), 'numpy.array', 'np.array', (["sol['x']"], {}), "(sol['x'])\n", (5274, 5284), True, 'import numpy as np\n'), ((3088, 3116), 'numpy.ones', 'np.ones', (['(G.size[0],)', 'float'], {}), '((G.size[0],), float)\n', (3095, 3116), True, 'import numpy as np\n'), ((4922, 4939), 'numpy.zeros', 'np.zeros', (['nrNodes'], {}), '(nrNodes)\n', (4930, 4939), True, 'import numpy as np\n'), ((1134, 1148), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1145, 1148), False, 'import pickle\n'), ((1230, 1244), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1241, 1244), False, 'import pickle\n')] |
import numpy as np
from numpy import pi as pi64
from jax import numpy as jnp
from jax.config import config
config.update('jax_enable_x64', True)
__all__ = [
'thermal_phase_curve',
'reflected_phase_curve',
'reflected_phase_curve_inhomogeneous'
]
floatX = 'float32'
pi = np.cast[floatX](pi64)
h = np.cast[floatX](6.62607015e-34) # J s
c = np.cast[floatX](299792458.0) # m/s
k_B = np.cast[floatX](1.380649e-23) # J/K
hc2 = np.cast[floatX](6.62607015e-34 * 299792458.0 ** 2)
zero = np.cast[floatX](0)
one = np.cast[floatX](1)
two = np.cast[floatX](2)
half = np.cast[floatX](0.5)
def linspace(start, stop, n):
dx = (stop - start) / (n - 1)
return jnp.arange(start, stop + dx, dx, dtype=floatX)
def mu(theta):
r"""
Angle :math:`\mu = \cos(\theta)`
Parameters
----------
theta : `~numpy.ndarray`
Angle :math:`\theta`
"""
return jnp.cos(theta)
def tilda_mu(theta, alpha):
r"""
The normalized quantity
:math:`\tilde{\mu} = \alpha \mu(\theta)`
Parameters
----------
theta : `~numpy.ndarray`
Angle :math:`\theta`
alpha : float
Dimensionless fluid number :math:`\alpha`
"""
return alpha * mu(theta)
def H(l, theta, alpha):
r"""
Hermite Polynomials in :math:`\tilde{\mu}(\theta)`.
Parameters
----------
l : int
Implemented through :math:`\ell \leq 7`.
theta : float
Angle :math:`\theta`
alpha : float
Dimensionless fluid number :math:`\alpha`
Returns
-------
result : `~numpy.ndarray`
Hermite Polynomial evaluated at angles :math:`\theta`.
"""
if l == 0:
return 1
elif l == 1:
return two * tilda_mu(theta, alpha)
elif l == 2:
return (two + two) * tilda_mu(theta, alpha) ** 2 - two
else:
raise NotImplementedError()
def h_ml(omega_drag, alpha, theta, phi, C_11, m=one, l=one):
r"""
The :math:`h_{m\ell}` basis function.
Parameters
----------
omega_drag : float
Dimensionless drag
alpha : float
Dimensionless fluid number
m : int
Spherical harmonic ``m`` index
l : int
Spherical harmonic ``l`` index
theta : `~numpy.ndarray`
Latitudinal coordinate
phi : `~numpy.ndarray`
Longitudinal coordinate
C_11 : float
Spherical harmonic coefficient
Returns
-------
hml : `~numpy.ndarray`
:math:`h_{m\ell}` basis function.
"""
prefactor = (C_11 /
(jnp.power(omega_drag, two) *
jnp.power(alpha, two * two) +
jnp.power(m, two)) *
jnp.exp(-jnp.power(tilda_mu(theta, alpha), two) * half))
result = prefactor * (mu(theta) * m * H(l, theta, alpha) * jnp.cos(m * phi) +
alpha * omega_drag * (tilda_mu(theta, alpha) *
H(l, theta, alpha) -
H(l + one, theta, alpha)) *
jnp.sin(m * phi))
return result
def h_ml_sum_theano(hotspot_offset, omega_drag, alpha,
theta2d, phi2d, C_11):
"""
Cythonized implementation of the quadruple loop over: theta's, phi's,
l's and m's to compute the h_ml_sum term
"""
phase_offset = half * pi
hml_sum = h_ml(omega_drag, alpha,
theta2d,
phi2d +
phase_offset +
hotspot_offset,
C_11)
return hml_sum
def blackbody_lambda(lam, temperature):
"""
Compute the blackbody flux as a function of wavelength `lam` in mks units
"""
return (two * hc2 / jnp.power(lam, 5) /
jnp.expm1(h * c / (lam * k_B * temperature)))
def blackbody2d(wavelengths, temperature):
"""
Planck function evaluated for a vector of wavelengths in units of meters
and temperature in units of Kelvin
Parameters
----------
wavelengths : `~numpy.ndarray`
Wavelength array in units of meters
temperature : `~numpy.ndarray`
Temperature in units of Kelvin
Returns
-------
pl : `~numpy.ndarray`
Planck function evaluated at each wavelength
"""
return blackbody_lambda(wavelengths, temperature)
def trapz3d(y_3d, x):
"""
Trapezoid rule in ~more dimensions~
"""
s = half * ((x[..., 1:] - x[..., :-1]) * (y_3d[..., 1:] + y_3d[..., :-1]))
return jnp.sum(s, axis=-1)
def integrate_planck(filt_wavelength, filt_trans,
temperature):
"""
Integrate the Planck function over wavelength for the temperature map of the
planet `temperature` and the temperature of the host star `T_s`. If
`return_interp`, returns the interpolation function for the integral over
the ratio of the blackbodies over wavelength; else returns only the map
(which can be used for trapezoidal approximation integration)
"""
bb_num = blackbody2d(filt_wavelength, temperature)
int_bb_num = trapz3d(bb_num * filt_trans, filt_wavelength)
return int_bb_num
# broadcaster = jnp.TensorType(floatX, 4 * [True, ])
def thermal_phase_curve(xi, hotspot_offset, omega_drag,
alpha, C_11, T_s, a_rs, rp_a, A_B,
theta2d, phi2d, filt_wavelength,
filt_transmittance, f):
r"""
Compute the phase curve evaluated at phases ``xi``.
.. warning::
Assumes ``xi`` is sorted, and that ``theta2d`` and ``phi2d`` are
linearly spaced and increasing.
Parameters
----------
xi : array-like
Orbital phase angle, must be sorted
hotspot_offset : float
Angle of hotspot offset [radians]
omega_drag : float
Dimensionless drag frequency
alpha : float
Dimensionless fluid number
C_11 : float
Spherical harmonic power in the :math:`m=1\,\ell=1` mode
T_s : float [K]
Stellar effective temperature
a_rs : float
Semimajor axis in units of stellar radii
rp_a : float
Planet radius normalized by the semimajor axis
A_B : float
Bond albedo
theta2d : array-like
Grid of latitude values evaluated over the surface of the sphere
phi2d : array-like
Grid of longitude values evaluated over the surface of the sphere
filt_wavelength : array-like
Filter transmittance curve wavelengths [m]
filt_transmittance : array-like
Filter transmittance
f : float
Greenhouse parameter (typically ~1/sqrt(2)).
Returns
-------
fluxes : tensor-like
System fluxes as a function of phase angle :math:`\xi`.
T : tensor-like
Temperature map
Examples
--------
Users will typically create the ``theta2d`` and ``phi2d`` grids like so:
>>> # Set resolution of grid points on sphere:
>>> n_phi = 100
>>> n_theta = 10
>>> phi = np.linspace(-2 * np.pi, 2 * np.pi, n_phi, dtype=floatX)
>>> theta = np.linspace(0, np.pi, n_theta, dtype=floatX)
>>> theta2d, phi2d = np.meshgrid(theta, phi)
"""
# Handle broadcasting for 4D tensors
xi_tt = xi[None, None, :, None]
theta2d_tt = theta2d[..., None, None]
phi2d_tt = phi2d[..., None, None]
filt_wavelength_tt = filt_wavelength[None, None, None, :]
filt_transmittance_tt = filt_transmittance[None, None, None, :]
h_ml_sum = h_ml_sum_theano(hotspot_offset, omega_drag,
alpha, theta2d_tt, phi2d_tt, C_11)
T_eq = f * T_s * jnp.power(a_rs, -half)
T = T_eq * jnp.power(one - A_B, half * half) * (one + h_ml_sum)
rp_rs = rp_a * a_rs
int_bb = integrate_planck(filt_wavelength_tt,
filt_transmittance_tt, T)
phi = phi2d_tt[..., 0]
visible = ((phi > - xi_tt[..., 0] - pi * half) &
(phi < - xi_tt[..., 0] + pi * half))
integrand = (int_bb *
sinsq_2d(theta2d_tt[..., 0]) *
cos_2d(phi + xi_tt[..., 0]))
planck_star = trapz3d(filt_transmittance *
blackbody_lambda(filt_wavelength, T_s),
filt_wavelength)
integral = trapz2d(integrand * visible,
phi2d_tt[:, 0, 0, 0],
theta2d_tt[0, :, 0, 0])
fluxes = integral * jnp.power(rp_rs, 2) / pi / planck_star
return fluxes, T
def sum2d(z):
"""
Sum a 2d array over its axes
"""
return jnp.sum(z)
def sum1d(z):
"""
Sum a 1d array over its first axis
"""
return jnp.sum(z)
def sinsq_2d(z):
"""
The square of the sine of a 2d array
"""
return jnp.power(jnp.sin(z), 2)
def cos_2d(z):
"""
The cosine of a 2d array
"""
return jnp.cos(z)
def trapz2d(z, x, y):
"""
Integrates a regularly spaced 2D grid using the composite trapezium rule.
Source: https://github.com/tiagopereira/python_tips/blob/master/code/trapz2d.py
Parameters
----------
z : `~numpy.ndarray`
2D array
x : `~numpy.ndarray`
grid values for x (1D array)
y : `~numpy.ndarray`
grid values for y (1D array)
Returns
-------
t : `~numpy.ndarray`
Trapezoidal approximation to the integral under z
"""
m = z.shape[0] - 1
n = z.shape[1] - 1
dx = x[1] - x[0]
dy = y[1] - y[0]
s1 = z[0, 0, :] + z[m, 0, :] + z[0, n, :] + z[m, n, :]
s2 = (jnp.sum(z[1:m, 0, :], axis=0) + jnp.sum(z[1:m, n, :], axis=0) +
jnp.sum(z[0, 1:n, :], axis=0) + jnp.sum(z[m, 1:n, :], axis=0))
s3 = jnp.sum(jnp.sum(z[1:m, 1:n, :], axis=0), axis=0)
return dx * dy * (s1 + two * s2 + (two + two) * s3) / (two + two)
def reflected_phase_curve(phases, omega, g, a_rp):
"""
Reflected light phase curve for a homogeneous sphere by
Heng, Morris & Kitzmann (2021).
Parameters
----------
phases : `~np.ndarray`
Orbital phases of each observation defined on (0, 1)
omega : tensor-like
Single-scattering albedo as defined in
g : tensor-like
Scattering asymmetry factor, ranges from (-1, 1).
a_rp : float, tensor-like
Semimajor axis scaled by the planetary radius
Returns
-------
flux_ratio_ppm : tensor-like
Flux ratio between the reflected planetary flux and the stellar flux in
units of ppm.
A_g : tensor-like
Geometric albedo derived for the planet given {omega, g}.
q : tensor-like
Integral phase function
"""
# Convert orbital phase on (0, 1) to "alpha" on (0, np.pi)
alpha = jnp.asarray(2 * np.pi * phases - np.pi)
abs_alpha = jnp.abs(alpha)
alpha_sort_order = jnp.argsort(alpha)
sin_abs_sort_alpha = jnp.sin(abs_alpha[alpha_sort_order])
sort_alpha = alpha[alpha_sort_order]
gamma = jnp.sqrt(1 - omega)
eps = (1 - gamma) / (1 + gamma)
# Equation 34 for Henyey-Greestein
P_star = (1 - g ** 2) / (1 + g ** 2 +
2 * g * jnp.cos(alpha)) ** 1.5
# Equation 36
P_0 = (1 - g) / (1 + g) ** 2
# Equation 10:
Rho_S = P_star - 1 + 0.25 * ((1 + eps) * (2 - eps)) ** 2
Rho_S_0 = P_0 - 1 + 0.25 * ((1 + eps) * (2 - eps)) ** 2
Rho_L = 0.5 * eps * (2 - eps) * (1 + eps) ** 2
Rho_C = eps ** 2 * (1 + eps) ** 2
alpha_plus = jnp.sin(abs_alpha / 2) + jnp.cos(abs_alpha / 2)
alpha_minus = jnp.sin(abs_alpha / 2) - jnp.cos(abs_alpha / 2)
# Equation 11:
Psi_0 = jnp.where(
(alpha_plus > -1) & (alpha_minus < 1),
jnp.log((1 + alpha_minus) * (alpha_plus - 1) /
(1 + alpha_plus) / (1 - alpha_minus)),
0
)
Psi_S = 1 - 0.5 * (jnp.cos(abs_alpha / 2) -
1.0 / jnp.cos(abs_alpha / 2)) * Psi_0
Psi_L = (jnp.sin(abs_alpha) + (np.pi - abs_alpha) *
jnp.cos(abs_alpha)) / np.pi
Psi_C = (-1 + 5 / 3 * jnp.cos(abs_alpha / 2) ** 2 - 0.5 *
jnp.tan(abs_alpha / 2) * jnp.sin(abs_alpha / 2) ** 3 * Psi_0)
# Equation 8:
A_g = omega / 8 * (P_0 - 1) + eps / 2 + eps ** 2 / 6 + eps ** 3 / 24
# Equation 9:
Psi = ((12 * Rho_S * Psi_S + 16 * Rho_L *
Psi_L + 9 * Rho_C * Psi_C) /
(12 * Rho_S_0 + 16 * Rho_L + 6 * Rho_C))
flux_ratio_ppm = 1e6 * (a_rp ** -2 * A_g * Psi)
q = _integral_phase_function(
Psi, sin_abs_sort_alpha, sort_alpha, alpha_sort_order
)
return flux_ratio_ppm, A_g, q
def rho(omega, P_0, P_star):
"""
Equation 10
"""
gamma = jnp.sqrt(1 - omega)
eps = (1 - gamma) / (1 + gamma)
Rho_S = P_star - 1 + 0.25 * ((1 + eps) * (2 - eps)) ** 2
Rho_S_0 = P_0 - 1 + 0.25 * ((1 + eps) * (2 - eps)) ** 2
Rho_L = 0.5 * eps * (2 - eps) * (1 + eps) ** 2
Rho_C = eps ** 2 * (1 + eps) ** 2
return Rho_S, Rho_S_0, Rho_L, Rho_C
def I(alpha, Phi):
"""
Equation 39
"""
cos_alpha = jnp.cos(alpha)
cos_alpha_2 = jnp.cos(alpha / 2)
z = jnp.sin(alpha / 2 - Phi / 2) / jnp.cos(Phi / 2)
# The following expression has the same behavior
# as I_0 = jnp.arctanh(z), but it doesn't blow up at alpha=0
I_0 = jnp.where(jnp.abs(z) < 1.0, 0.5 * (jnp.log1p(z) - jnp.log1p(-z)), 0)
I_S = (-1 / (2 * cos_alpha_2) *
(jnp.sin(alpha / 2 - Phi) +
(cos_alpha - 1) * I_0))
I_L = 1 / np.pi * (Phi * cos_alpha -
0.5 * jnp.sin(alpha - 2 * Phi))
I_C = -1 / (24 * cos_alpha_2) * (
-3 * jnp.sin(alpha / 2 - Phi) +
jnp.sin(3 * alpha / 2 - 3 * Phi) +
6 * jnp.sin(3 * alpha / 2 - Phi) -
6 * jnp.sin(alpha / 2 + Phi) +
24 * jnp.sin(alpha / 2) ** 4 * I_0
)
return I_S, I_L, I_C
def trapz1d(y_1d, x):
"""
Trapezoid rule in one dimension. This only works if x is increasing.
"""
s = 0.5 * ((x[1:] - x[:-1]) * (y_1d[1:] + y_1d[:-1]))
return jnp.sum(s, axis=-1)
def _integral_phase_function(Psi, sin_abs_sort_alpha, sort_alpha, sort):
"""
Integral phase function q for a generic, possibly asymmetric reflectivity
map
"""
return trapz1d(Psi[sort] * sin_abs_sort_alpha, sort_alpha)
def _g_from_ag(A_g, omega_0, omega_prime, x1, x2):
"""
Compute the scattering asymmetry factor g for a given geometric albedo,
and possibly asymmetric single scattering albedos.
Parameters
----------
A_g : tensor-like
Geometric albedo on (0, 1)
omega_0 : tensor-like
Single-scattering albedo of the less reflective region.
Defined on (0, 1).
omega_prime : tensor-like
Additional single-scattering albedo of the more reflective region,
such that the single-scattering albedo of the reflective region is
``omega_0 + omega_prime``. Defined on (0, ``1-omega_0``).
x1 : tensor-like
Start longitude of the darker region [radians] on (-pi/2, pi/2)
x2 : tensor-like
Stop longitude of the darker region [radians] on (-pi/2, pi/2)
Returns
-------
g : tensor-like
Scattering asymmetry factor
"""
gamma = jnp.sqrt(1 - omega_0)
eps = (1 - gamma) / (1 + gamma)
gamma_prime = jnp.sqrt(1 - omega_prime)
eps_prime = (1 - gamma_prime) / (1 + gamma_prime)
Rho_L = eps / 2 * (1 + eps) ** 2 * (2 - eps)
Rho_L_prime = eps_prime / 2 * (1 + eps_prime) ** 2 * (2 - eps_prime)
Rho_C = eps ** 2 * (1 + eps) ** 2
Rho_C_prime = eps_prime ** 2 * (1 + eps_prime) ** 2
C = -1 + 0.25 * (1 + eps) ** 2 * (2 - eps) ** 2
C_prime = -1 + 0.25 * (1 + eps_prime) ** 2 * (2 - eps_prime) ** 2
C_2 = 2 + jnp.sin(x1) - jnp.sin(x2)
C_1 = (omega_0 * Rho_L * np.pi / 12 + omega_prime * Rho_L_prime / 12 *
(x1 - x2 + np.pi + 0.5 * (jnp.sin(2 * x1) - jnp.sin(
2 * x2))) +
np.pi * omega_0 * Rho_C / 32 + 3 * np.pi * omega_prime *
Rho_C_prime / 64 *
(2 / 3 + 3 / 8 * (jnp.sin(x1) - jnp.sin(x2)) +
1 / 24 * (jnp.sin(3 * x1) - jnp.sin(3 * x2))))
C_3 = (16 * np.pi * A_g - 32 * C_1 - 2 * np.pi * omega_0 * C -
np.pi * omega_prime * C_2 * C_prime
) / (2 * np.pi * omega_0 + np.pi * omega_prime * C_2)
return - ((2 * C_3 + 1) - jnp.sqrt(1 + 8 * C_3)) / (2 * C_3)
def reflected_phase_curve_inhomogeneous(phases, omega_0, omega_prime, x1, x2,
A_g, a_rp):
"""
Reflected light phase curve for an inhomogeneous sphere by
Heng, Morris & Kitzmann (2021), with inspiration from Hu et al. (2015).
Parameters
----------
phases : `~np.ndarray`
Orbital phases of each observation defined on (0, 1)
omega_0 : tensor-like
Single-scattering albedo of the less reflective region.
Defined on (0, 1).
omega_prime : tensor-like
Additional single-scattering albedo of the more reflective region,
such that the single-scattering albedo of the reflective region is
``omega_0 + omega_prime``. Defined on (0, ``1-omega_0``).
x1 : tensor-like
Start longitude of the darker region [radians] on (-pi/2, pi/2)
x2 : tensor-like
Stop longitude of the darker region [radians] on (-pi/2, pi/2)
a_rp : float, tensor-like
Semimajor axis scaled by the planetary radius
Returns
-------
flux_ratio_ppm : tensor-like
Flux ratio between the reflected planetary flux and the stellar flux
in units of ppm.
g : tensor-like
Scattering asymmetry factor on (-1, 1)
q : tensor-like
Integral phase function
"""
g = _g_from_ag(A_g, omega_0, omega_prime, x1, x2)
# Redefine alpha to be on (-pi, pi)
alpha = (2 * np.pi * phases - np.pi).astype(floatX)
abs_alpha = np.abs(alpha).astype(floatX)
alpha_sort_order = np.argsort(alpha)
sin_abs_sort_alpha = np.sin(abs_alpha[alpha_sort_order]).astype(floatX)
sort_alpha = alpha[alpha_sort_order].astype(floatX)
# Equation 34 for Henyey-Greestein
P_star = (1 - g ** 2) / (1 + g ** 2 +
2 * g * jnp.cos(abs_alpha)) ** 1.5
# Equation 36
P_0 = (1 - g) / (1 + g) ** 2
Rho_S, Rho_S_0, Rho_L, Rho_C = rho(omega_0, P_0, P_star)
Rho_S_prime, Rho_S_0_prime, Rho_L_prime, Rho_C_prime = rho(
omega_prime, P_0, P_star
)
alpha_plus = jnp.sin(abs_alpha / 2) + jnp.cos(abs_alpha / 2)
alpha_minus = jnp.sin(abs_alpha / 2) - jnp.cos(abs_alpha / 2)
# Equation 11:
Psi_0 = jnp.where(
(alpha_plus > -1) & (alpha_minus < 1),
jnp.log((1 + alpha_minus) * (alpha_plus - 1) /
(1 + alpha_plus) / (1 - alpha_minus)),
0
)
Psi_S = 1 - 0.5 * (jnp.cos(abs_alpha / 2) -
1.0 / jnp.cos(abs_alpha / 2)) * Psi_0
Psi_L = (jnp.sin(abs_alpha) + (np.pi - abs_alpha) *
jnp.cos(abs_alpha)) / np.pi
Psi_C = (-1 + 5 / 3 * jnp.cos(abs_alpha / 2) ** 2 -
0.5 * jnp.tan(abs_alpha / 2) *
jnp.sin(abs_alpha / 2) ** 3 * Psi_0)
# Table 1:
condition_a = (-np.pi / 2 <= alpha - np.pi / 2)
condition_0 = ((alpha - np.pi / 2 <= np.pi / 2) &
(np.pi / 2 <= alpha + x1) &
(alpha + x1 <= alpha + x2))
condition_1 = ((alpha - np.pi / 2 <= alpha + x1) &
(alpha + x1 <= np.pi / 2) &
(np.pi / 2 <= alpha + x2))
condition_2 = ((alpha - np.pi / 2 <= alpha + x1) &
(alpha + x1 <= alpha + x2) &
(alpha + x2 <= np.pi / 2))
condition_b = (alpha + np.pi / 2 <= np.pi / 2)
condition_3 = ((alpha + x1 <= alpha + x2) &
(alpha + x2 <= -np.pi / 2) &
(-np.pi / 2 <= alpha + np.pi / 2))
condition_4 = ((alpha + x1 <= -np.pi / 2) &
(-np.pi / 2 <= alpha + x2) &
(alpha + x2 <= alpha + np.pi / 2))
condition_5 = ((-np.pi / 2 <= alpha + x1) &
(alpha + x1 <= alpha + x2) &
(alpha + x2 <= alpha + np.pi / 2))
integration_angles = [
[alpha - np.pi / 2, np.pi / 2],
[alpha - np.pi / 2, alpha + x1],
[alpha - np.pi / 2, alpha + x1, alpha + x2, np.pi / 2],
[-np.pi / 2, alpha + np.pi / 2],
[alpha + x2, alpha + np.pi / 2],
[-np.pi / 2, alpha + x1, alpha + x2, alpha + np.pi / 2]
]
conditions = [
condition_a & condition_0,
condition_a & condition_1,
condition_a & condition_2,
condition_b & condition_3,
condition_b & condition_4,
condition_b & condition_5,
]
Psi_S_prime = 0
Psi_L_prime = 0
Psi_C_prime = 0
for condition_i, angle_i in zip(conditions, integration_angles):
for i, phi_i in enumerate(angle_i):
sign = (-1) ** (i + 1)
I_phi_S, I_phi_L, I_phi_C = I(alpha, phi_i)
Psi_S_prime += jnp.where(condition_i, sign * I_phi_S, 0)
Psi_L_prime += jnp.where(condition_i, sign * I_phi_L, 0)
Psi_C_prime += jnp.where(condition_i, sign * I_phi_C, 0)
# Compute everything for alpha=0
angles_alpha0 = [-np.pi / 2, x1, x2, np.pi / 2]
Psi_S_prime_alpha0 = 0
Psi_L_prime_alpha0 = 0
Psi_C_prime_alpha0 = 0
for i, phi_i in enumerate(angles_alpha0):
sign = (-1) ** (i + 1)
I_phi_S_alpha0, I_phi_L_alpha0, I_phi_C_alpha0 = I(0, phi_i)
Psi_S_prime_alpha0 += sign * I_phi_S_alpha0
Psi_L_prime_alpha0 += sign * I_phi_L_alpha0
Psi_C_prime_alpha0 += sign * I_phi_C_alpha0
# Equation 37
F_S = np.pi / 16 * (omega_0 * Rho_S * Psi_S +
omega_prime * Rho_S_prime * Psi_S_prime)
F_L = np.pi / 12 * (omega_0 * Rho_L * Psi_L +
omega_prime * Rho_L_prime * Psi_L_prime)
F_C = 3 * np.pi / 64 * (omega_0 * Rho_C * Psi_C +
omega_prime * Rho_C_prime * Psi_C_prime)
sobolev_fluxes = F_S + F_L + F_C
F_max = jnp.max(sobolev_fluxes)
Psi = sobolev_fluxes / F_max
flux_ratio_ppm = 1e6 * a_rp**-2 * Psi * A_g
q = _integral_phase_function(Psi, sin_abs_sort_alpha, sort_alpha,
alpha_sort_order)
# F_0 = F_S_alpha0 + F_L_alpha0 + F_C_alpha0
return flux_ratio_ppm, g, q
| [
"numpy.abs",
"jax.numpy.expm1",
"numpy.argsort",
"numpy.sin",
"jax.numpy.where",
"jax.numpy.abs",
"jax.numpy.sin",
"jax.config.config.update",
"jax.numpy.sum",
"jax.numpy.asarray",
"jax.numpy.cos",
"jax.numpy.tan",
"jax.numpy.argsort",
"jax.numpy.log",
"jax.numpy.arange",
"jax.numpy.ma... | [((107, 144), 'jax.config.config.update', 'config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (120, 144), False, 'from jax.config import config\n'), ((671, 717), 'jax.numpy.arange', 'jnp.arange', (['start', '(stop + dx)', 'dx'], {'dtype': 'floatX'}), '(start, stop + dx, dx, dtype=floatX)\n', (681, 717), True, 'from jax import numpy as jnp\n'), ((889, 903), 'jax.numpy.cos', 'jnp.cos', (['theta'], {}), '(theta)\n', (896, 903), True, 'from jax import numpy as jnp\n'), ((4475, 4494), 'jax.numpy.sum', 'jnp.sum', (['s'], {'axis': '(-1)'}), '(s, axis=-1)\n', (4482, 4494), True, 'from jax import numpy as jnp\n'), ((8497, 8507), 'jax.numpy.sum', 'jnp.sum', (['z'], {}), '(z)\n', (8504, 8507), True, 'from jax import numpy as jnp\n'), ((8590, 8600), 'jax.numpy.sum', 'jnp.sum', (['z'], {}), '(z)\n', (8597, 8600), True, 'from jax import numpy as jnp\n'), ((8786, 8796), 'jax.numpy.cos', 'jnp.cos', (['z'], {}), '(z)\n', (8793, 8796), True, 'from jax import numpy as jnp\n'), ((10621, 10660), 'jax.numpy.asarray', 'jnp.asarray', (['(2 * np.pi * phases - np.pi)'], {}), '(2 * np.pi * phases - np.pi)\n', (10632, 10660), True, 'from jax import numpy as jnp\n'), ((10677, 10691), 'jax.numpy.abs', 'jnp.abs', (['alpha'], {}), '(alpha)\n', (10684, 10691), True, 'from jax import numpy as jnp\n'), ((10715, 10733), 'jax.numpy.argsort', 'jnp.argsort', (['alpha'], {}), '(alpha)\n', (10726, 10733), True, 'from jax import numpy as jnp\n'), ((10759, 10795), 'jax.numpy.sin', 'jnp.sin', (['abs_alpha[alpha_sort_order]'], {}), '(abs_alpha[alpha_sort_order])\n', (10766, 10795), True, 'from jax import numpy as jnp\n'), ((10850, 10869), 'jax.numpy.sqrt', 'jnp.sqrt', (['(1 - omega)'], {}), '(1 - omega)\n', (10858, 10869), True, 'from jax import numpy as jnp\n'), ((12537, 12556), 'jax.numpy.sqrt', 'jnp.sqrt', (['(1 - omega)'], {}), '(1 - omega)\n', (12545, 12556), True, 'from jax import numpy as jnp\n'), ((12914, 12928), 'jax.numpy.cos', 'jnp.cos', (['alpha'], {}), '(alpha)\n', (12921, 12928), True, 'from jax import numpy as jnp\n'), ((12947, 12965), 'jax.numpy.cos', 'jnp.cos', (['(alpha / 2)'], {}), '(alpha / 2)\n', (12954, 12965), True, 'from jax import numpy as jnp\n'), ((13889, 13908), 'jax.numpy.sum', 'jnp.sum', (['s'], {'axis': '(-1)'}), '(s, axis=-1)\n', (13896, 13908), True, 'from jax import numpy as jnp\n'), ((15078, 15099), 'jax.numpy.sqrt', 'jnp.sqrt', (['(1 - omega_0)'], {}), '(1 - omega_0)\n', (15086, 15099), True, 'from jax import numpy as jnp\n'), ((15155, 15180), 'jax.numpy.sqrt', 'jnp.sqrt', (['(1 - omega_prime)'], {}), '(1 - omega_prime)\n', (15163, 15180), True, 'from jax import numpy as jnp\n'), ((17778, 17795), 'numpy.argsort', 'np.argsort', (['alpha'], {}), '(alpha)\n', (17788, 17795), True, 'import numpy as np\n'), ((21940, 21963), 'jax.numpy.max', 'jnp.max', (['sobolev_fluxes'], {}), '(sobolev_fluxes)\n', (21947, 21963), True, 'from jax import numpy as jnp\n'), ((3739, 3783), 'jax.numpy.expm1', 'jnp.expm1', (['(h * c / (lam * k_B * temperature))'], {}), '(h * c / (lam * k_B * temperature))\n', (3748, 3783), True, 'from jax import numpy as jnp\n'), ((7566, 7588), 'jax.numpy.power', 'jnp.power', (['a_rs', '(-half)'], {}), '(a_rs, -half)\n', (7575, 7588), True, 'from jax import numpy as jnp\n'), ((8698, 8708), 'jax.numpy.sin', 'jnp.sin', (['z'], {}), '(z)\n', (8705, 8708), True, 'from jax import numpy as jnp\n'), ((9569, 9598), 'jax.numpy.sum', 'jnp.sum', (['z[m, 1:n, :]'], {'axis': '(0)'}), '(z[m, 1:n, :], axis=0)\n', (9576, 9598), True, 'from jax import numpy as jnp\n'), ((9617, 9648), 'jax.numpy.sum', 'jnp.sum', (['z[1:m, 1:n, :]'], {'axis': '(0)'}), '(z[1:m, 1:n, :], axis=0)\n', (9624, 9648), True, 'from jax import numpy as jnp\n'), ((11347, 11369), 'jax.numpy.sin', 'jnp.sin', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (11354, 11369), True, 'from jax import numpy as jnp\n'), ((11372, 11394), 'jax.numpy.cos', 'jnp.cos', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (11379, 11394), True, 'from jax import numpy as jnp\n'), ((11413, 11435), 'jax.numpy.sin', 'jnp.sin', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (11420, 11435), True, 'from jax import numpy as jnp\n'), ((11438, 11460), 'jax.numpy.cos', 'jnp.cos', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (11445, 11460), True, 'from jax import numpy as jnp\n'), ((11559, 11647), 'jax.numpy.log', 'jnp.log', (['((1 + alpha_minus) * (alpha_plus - 1) / (1 + alpha_plus) / (1 - alpha_minus))'], {}), '((1 + alpha_minus) * (alpha_plus - 1) / (1 + alpha_plus) / (1 -\n alpha_minus))\n', (11566, 11647), True, 'from jax import numpy as jnp\n'), ((12975, 13003), 'jax.numpy.sin', 'jnp.sin', (['(alpha / 2 - Phi / 2)'], {}), '(alpha / 2 - Phi / 2)\n', (12982, 13003), True, 'from jax import numpy as jnp\n'), ((13006, 13022), 'jax.numpy.cos', 'jnp.cos', (['(Phi / 2)'], {}), '(Phi / 2)\n', (13013, 13022), True, 'from jax import numpy as jnp\n'), ((15603, 15614), 'jax.numpy.sin', 'jnp.sin', (['x2'], {}), '(x2)\n', (15610, 15614), True, 'from jax import numpy as jnp\n'), ((18309, 18331), 'jax.numpy.sin', 'jnp.sin', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (18316, 18331), True, 'from jax import numpy as jnp\n'), ((18334, 18356), 'jax.numpy.cos', 'jnp.cos', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (18341, 18356), True, 'from jax import numpy as jnp\n'), ((18375, 18397), 'jax.numpy.sin', 'jnp.sin', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (18382, 18397), True, 'from jax import numpy as jnp\n'), ((18400, 18422), 'jax.numpy.cos', 'jnp.cos', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (18407, 18422), True, 'from jax import numpy as jnp\n'), ((18521, 18609), 'jax.numpy.log', 'jnp.log', (['((1 + alpha_minus) * (alpha_plus - 1) / (1 + alpha_plus) / (1 - alpha_minus))'], {}), '((1 + alpha_minus) * (alpha_plus - 1) / (1 + alpha_plus) / (1 -\n alpha_minus))\n', (18528, 18609), True, 'from jax import numpy as jnp\n'), ((3707, 3724), 'jax.numpy.power', 'jnp.power', (['lam', '(5)'], {}), '(lam, 5)\n', (3716, 3724), True, 'from jax import numpy as jnp\n'), ((7605, 7638), 'jax.numpy.power', 'jnp.power', (['(one - A_B)', '(half * half)'], {}), '(one - A_B, half * half)\n', (7614, 7638), True, 'from jax import numpy as jnp\n'), ((9537, 9566), 'jax.numpy.sum', 'jnp.sum', (['z[0, 1:n, :]'], {'axis': '(0)'}), '(z[0, 1:n, :], axis=0)\n', (9544, 9566), True, 'from jax import numpy as jnp\n'), ((11800, 11818), 'jax.numpy.sin', 'jnp.sin', (['abs_alpha'], {}), '(abs_alpha)\n', (11807, 11818), True, 'from jax import numpy as jnp\n'), ((13162, 13172), 'jax.numpy.abs', 'jnp.abs', (['z'], {}), '(z)\n', (13169, 13172), True, 'from jax import numpy as jnp\n'), ((13270, 13294), 'jax.numpy.sin', 'jnp.sin', (['(alpha / 2 - Phi)'], {}), '(alpha / 2 - Phi)\n', (13277, 13294), True, 'from jax import numpy as jnp\n'), ((15589, 15600), 'jax.numpy.sin', 'jnp.sin', (['x1'], {}), '(x1)\n', (15596, 15600), True, 'from jax import numpy as jnp\n'), ((17726, 17739), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (17732, 17739), True, 'import numpy as np\n'), ((17821, 17856), 'numpy.sin', 'np.sin', (['abs_alpha[alpha_sort_order]'], {}), '(abs_alpha[alpha_sort_order])\n', (17827, 17856), True, 'import numpy as np\n'), ((18761, 18779), 'jax.numpy.sin', 'jnp.sin', (['abs_alpha'], {}), '(abs_alpha)\n', (18768, 18779), True, 'from jax import numpy as jnp\n'), ((20864, 20905), 'jax.numpy.where', 'jnp.where', (['condition_i', '(sign * I_phi_S)', '(0)'], {}), '(condition_i, sign * I_phi_S, 0)\n', (20873, 20905), True, 'from jax import numpy as jnp\n'), ((20933, 20974), 'jax.numpy.where', 'jnp.where', (['condition_i', '(sign * I_phi_L)', '(0)'], {}), '(condition_i, sign * I_phi_L, 0)\n', (20942, 20974), True, 'from jax import numpy as jnp\n'), ((21002, 21043), 'jax.numpy.where', 'jnp.where', (['condition_i', '(sign * I_phi_C)', '(0)'], {}), '(condition_i, sign * I_phi_C, 0)\n', (21011, 21043), True, 'from jax import numpy as jnp\n'), ((2617, 2634), 'jax.numpy.power', 'jnp.power', (['m', 'two'], {}), '(m, two)\n', (2626, 2634), True, 'from jax import numpy as jnp\n'), ((2776, 2792), 'jax.numpy.cos', 'jnp.cos', (['(m * phi)'], {}), '(m * phi)\n', (2783, 2792), True, 'from jax import numpy as jnp\n'), ((3039, 3055), 'jax.numpy.sin', 'jnp.sin', (['(m * phi)'], {}), '(m * phi)\n', (3046, 3055), True, 'from jax import numpy as jnp\n'), ((8361, 8380), 'jax.numpy.power', 'jnp.power', (['rp_rs', '(2)'], {}), '(rp_rs, 2)\n', (8370, 8380), True, 'from jax import numpy as jnp\n'), ((9463, 9492), 'jax.numpy.sum', 'jnp.sum', (['z[1:m, 0, :]'], {'axis': '(0)'}), '(z[1:m, 0, :], axis=0)\n', (9470, 9492), True, 'from jax import numpy as jnp\n'), ((9495, 9524), 'jax.numpy.sum', 'jnp.sum', (['z[1:m, n, :]'], {'axis': '(0)'}), '(z[1:m, n, :], axis=0)\n', (9502, 9524), True, 'from jax import numpy as jnp\n'), ((11856, 11874), 'jax.numpy.cos', 'jnp.cos', (['abs_alpha'], {}), '(abs_alpha)\n', (11863, 11874), True, 'from jax import numpy as jnp\n'), ((13187, 13199), 'jax.numpy.log1p', 'jnp.log1p', (['z'], {}), '(z)\n', (13196, 13199), True, 'from jax import numpy as jnp\n'), ((13202, 13215), 'jax.numpy.log1p', 'jnp.log1p', (['(-z)'], {}), '(-z)\n', (13211, 13215), True, 'from jax import numpy as jnp\n'), ((13403, 13427), 'jax.numpy.sin', 'jnp.sin', (['(alpha - 2 * Phi)'], {}), '(alpha - 2 * Phi)\n', (13410, 13427), True, 'from jax import numpy as jnp\n'), ((16206, 16227), 'jax.numpy.sqrt', 'jnp.sqrt', (['(1 + 8 * C_3)'], {}), '(1 + 8 * C_3)\n', (16214, 16227), True, 'from jax import numpy as jnp\n'), ((18817, 18835), 'jax.numpy.cos', 'jnp.cos', (['abs_alpha'], {}), '(abs_alpha)\n', (18824, 18835), True, 'from jax import numpy as jnp\n'), ((2522, 2548), 'jax.numpy.power', 'jnp.power', (['omega_drag', 'two'], {}), '(omega_drag, two)\n', (2531, 2548), True, 'from jax import numpy as jnp\n'), ((2569, 2596), 'jax.numpy.power', 'jnp.power', (['alpha', '(two * two)'], {}), '(alpha, two * two)\n', (2578, 2596), True, 'from jax import numpy as jnp\n'), ((11025, 11039), 'jax.numpy.cos', 'jnp.cos', (['alpha'], {}), '(alpha)\n', (11032, 11039), True, 'from jax import numpy as jnp\n'), ((11701, 11723), 'jax.numpy.cos', 'jnp.cos', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (11708, 11723), True, 'from jax import numpy as jnp\n'), ((11910, 11932), 'jax.numpy.cos', 'jnp.cos', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (11917, 11932), True, 'from jax import numpy as jnp\n'), ((11959, 11981), 'jax.numpy.tan', 'jnp.tan', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (11966, 11981), True, 'from jax import numpy as jnp\n'), ((11984, 12006), 'jax.numpy.sin', 'jnp.sin', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (11991, 12006), True, 'from jax import numpy as jnp\n'), ((13605, 13629), 'jax.numpy.sin', 'jnp.sin', (['(alpha / 2 + Phi)'], {}), '(alpha / 2 + Phi)\n', (13612, 13629), True, 'from jax import numpy as jnp\n'), ((18047, 18065), 'jax.numpy.cos', 'jnp.cos', (['abs_alpha'], {}), '(abs_alpha)\n', (18054, 18065), True, 'from jax import numpy as jnp\n'), ((18662, 18684), 'jax.numpy.cos', 'jnp.cos', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (18669, 18684), True, 'from jax import numpy as jnp\n'), ((18871, 18893), 'jax.numpy.cos', 'jnp.cos', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (18878, 18893), True, 'from jax import numpy as jnp\n'), ((18920, 18942), 'jax.numpy.tan', 'jnp.tan', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (18927, 18942), True, 'from jax import numpy as jnp\n'), ((18958, 18980), 'jax.numpy.sin', 'jnp.sin', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (18965, 18980), True, 'from jax import numpy as jnp\n'), ((11755, 11777), 'jax.numpy.cos', 'jnp.cos', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (11762, 11777), True, 'from jax import numpy as jnp\n'), ((13515, 13547), 'jax.numpy.sin', 'jnp.sin', (['(3 * alpha / 2 - 3 * Phi)'], {}), '(3 * alpha / 2 - 3 * Phi)\n', (13522, 13547), True, 'from jax import numpy as jnp\n'), ((13562, 13590), 'jax.numpy.sin', 'jnp.sin', (['(3 * alpha / 2 - Phi)'], {}), '(3 * alpha / 2 - Phi)\n', (13569, 13590), True, 'from jax import numpy as jnp\n'), ((13645, 13663), 'jax.numpy.sin', 'jnp.sin', (['(alpha / 2)'], {}), '(alpha / 2)\n', (13652, 13663), True, 'from jax import numpy as jnp\n'), ((15959, 15974), 'jax.numpy.sin', 'jnp.sin', (['(3 * x1)'], {}), '(3 * x1)\n', (15966, 15974), True, 'from jax import numpy as jnp\n'), ((15977, 15992), 'jax.numpy.sin', 'jnp.sin', (['(3 * x2)'], {}), '(3 * x2)\n', (15984, 15992), True, 'from jax import numpy as jnp\n'), ((18716, 18738), 'jax.numpy.cos', 'jnp.cos', (['(abs_alpha / 2)'], {}), '(abs_alpha / 2)\n', (18723, 18738), True, 'from jax import numpy as jnp\n'), ((13480, 13504), 'jax.numpy.sin', 'jnp.sin', (['(alpha / 2 - Phi)'], {}), '(alpha / 2 - Phi)\n', (13487, 13504), True, 'from jax import numpy as jnp\n'), ((15908, 15919), 'jax.numpy.sin', 'jnp.sin', (['x1'], {}), '(x1)\n', (15915, 15919), True, 'from jax import numpy as jnp\n'), ((15922, 15933), 'jax.numpy.sin', 'jnp.sin', (['x2'], {}), '(x2)\n', (15929, 15933), True, 'from jax import numpy as jnp\n'), ((15727, 15742), 'jax.numpy.sin', 'jnp.sin', (['(2 * x1)'], {}), '(2 * x1)\n', (15734, 15742), True, 'from jax import numpy as jnp\n'), ((15745, 15760), 'jax.numpy.sin', 'jnp.sin', (['(2 * x2)'], {}), '(2 * x2)\n', (15752, 15760), True, 'from jax import numpy as jnp\n')] |
# Cazoo - 2021-06-12
# This code is free to use and re-distribute, but I cannot be held responsible for damages that it may or may not cause.
#####################
from typing import List
import re
import os
import json
import copy ## copy.deepcopy
import kkpmx_core as kklib
import kkpmx_utils as util
try:
import nuthouse01_core as core
import nuthouse01_pmx_parser as pmxlib
import nuthouse01_pmx_struct as pmxstruct
import _prune_unused_bones as bonelib
import _translation_tools as tlTools
import morph_scale
except ImportError as eee:
print(eee.__class__.__name__, eee)
print("ERROR: failed to import some of the necessary files, all my scripts must be together in the same folder!")
print("...press ENTER to exit...")
input()
exit()
core = pmxlib = pmxstruct = morph_scale = bonelib = None
#############
### :: (\w+)( +):= "_(\w+).png" --> \1\2= "\3"
t__Alpha = "AlphaMask"
t__Another = "AnotherRamp"
t__Color = "ColorMask"
t__Detail = "DetailMask"
t__Glass = "GlassRamp"
t__HairGloss = "HairGloss"
t__Line = "LineMask"
t__Liquid = "liquid"
t__Main = "MainTex"
t__MainCol = "MainC"
t__NorMap = "NormalMap"
t__NorMapDet = "NormalMapDetail"
t__NorMask = "NormalMask"
t__overtex1 = "overtex1"
t__overtex2 = "overtex2"
t__overtex3 = "overtex3"
Color_1 = "Color"
Color_2 = "Color2"
Color_3 = "Color3"
Color_Tex1 = "overcolor1"
Color_Tex2 = "overcolor2"
Color_Tex3 = "overcolor3"
Color_Line = "LineColor"
Color_Shadow = "ShadowColor"
Color_Specular = "SpecularColor"
##########
## meta attributes
NAME = "name"
BASE = "base"
OPTIONS = "options"
#---
OPT_ENG = "use_english"
OPT_HAIR = "process_hair"
#---
GROUP = "group" ### Determines the texture type to use. Will fallback to [shader] if missing.
TEXTURES = "textures" ### Maybe call it "used_textures"
INHERIT = "inherit" ### Shallow copy all attributes from the target block
META = "meta" ### Opens sub tree for meta attributes of the asset
TEMPLATE = "template" ### If true, the block will be considered used when determining unused blocks.
MODE = "mode" ### Secret option to override the "blend_mode" for DetailMask
#--- runtime only
IDX = "index"
NO_FILES = "no_files"
PARENT = "parentMat"
PARSED = "parsed"
ROOT = "root"
#---- added by plugin
SHADER = "shader"
AVAILABLE = "available"
MT_ENABLED = "enabled"
MT_RECEIVE = "receive"
MT_SHADOWS = "shadows"
MT_RENDER = "render"
MT_PARENT = "parent"
MT_SLOT = "slot"
#---- Extras
ARGSTR = "argStr"
t__Reuse = "Reuse"
#############
helptext = """
This is the second of two main methods to make KK-Models look better.
- Note: Current/Working directory == same folder as the *.pmx file.
- Within the *.json file, this is abbreviated as "%PMX%".
It will parse a processed plugin file and apply colors and shaders, as well as combining textures.
It also sets the visibility on customized multi-part assets and adds the accessory slots into the comment.
-- Remarks: All texture adjustments work based on "Garbage in, Garbage out": The results are only as good as the sum of the sources.
This does require some preparations before it can be used:
[1] A *.json file generated by [(7) GenerateJsonFile]
-- Default looks for "#generateJSON.json" in the working directory
-- Otherwise asks for the path if not found
[2] A folder filled with extra textures exported from KK (requires the [MaterialEditor] mod)
-- The path will be read from the above *.json in "options" > "base" and defaults to "./extra".
-- Currently working / supported are:
-- -- MainTex (already exported by [PMXExport], so can be ignored)
-- -- DetailMask
-- -- ColorMask
-- -- LineMask
-- -- overtex1 (on body and eye)
-- -- overtex2 (on eye)
-- Not (yet) supported are:
-- -- on body: overtex2
-- -- on face: overtex1, overtex2, overtex3
-- -- NormalMap, NormalMask
Additional notes:
-- After generation, the *.json file can be edited to change some aspects (some colors, visibility, textures) without KK. Re-run this to apply changes.
-- Due to pre-generation, sometimes textures are used that do not exist (which will print a warning). Remove the item from the faulty material's template to clear the warning.
Options (for Automization):
- apply: bool -- True to not wait for results and write directly to model. Prompts if [None]
"""
DEBUG = util.DEBUG or False
## Todos: Root Path \\ Figure out how to apply fixes for ColorMask (Additive Merge)
#############
pathDetail = r'.\extra\#Apply_DetailMap.py'
pathColor = r'.\extra\#Apply_ColorMap.py'
pathBOver1 = r'.\extra\#Apply_Body_overtex1.py'
pathOT_eye = r'.\extra\#Apply_overtex1__eyes.py'
pathLine = r'.\extra\#Apply_LineMap.py'
suffix_Col = "_pyCol"
suffix_Det = "_pyDet"
suffix_Line = "_pyLin"
suffix_HL = "_pyHL"
suffix_OT1 = "_pyOT1"
suffix_OTEye = "_pyOTHL"
######
## State -- maybe change to { mat_name: state[of "miss", "no_action", "no_files", "ok" ]}
msgs = { 'miss': [], 'no_action': [], 'no_files': [] }
msgsPre = "-- "
global_state = { }
state_info = "showinfo"
################
def parseMatComments(pmx, input_file_name: str, write_model = True, moreinfo = False, opt = {}):
paths = os.path.split(input_file_name)
root = paths[0]
print("==== Start Parsing ====")
print("-- [Working Directory]: " + root);
path = os.path.join(root, "#generateJSON.json") #@todo_note "<< Default file name >>"
global_state[state_info] = moreinfo or DEBUG
global_state[ARGSTR] = {}
#### Load JSON
def callback(raw_data):
# replace Shorthand
raw_data = raw_data.replace("%PMX%", re.escape(root)) #@todo_note "<< explain %PMX% >>"
#raw_data = raw_data.replace("%PMX%", root.replace(r'\\', r'\\\\'))
raw_data = raw_data.replace("SpeclarHeight", "SpecularHeight")
raw_data = raw_data.replace("shadowcolor", Color_Shadow)
return raw_data
data = util.load_json_file(path, callback)
if not data: return ## User already knows the reason
__parse_json_file(pmx, data, root)
### Trim all textures
for (idx,tex) in enumerate(pmx.textures):
if os.path.isabs(tex):
pmx.textures[idx] = os.path.relpath(tex, root)
if write_model:
flag = opt.get("apply", None)
if flag is None: flag = (core.MY_GENERAL_INPUT_FUNC(lambda x: x in ['y','n'], "Apply changes ?") == 'y')
if flag:
return kklib.end(pmx, input_file_name, "_props")
return input_file_name
else: return kklib.end(None, input_file_name, "_props")
parseMatComments.__doc__ = helptext
def __parse_json_file(pmx, data: dict, root: str):
"""
@param pmx [Pmx]
@param data [dict] The parsed JSON object
@param root [str] The root path of the pmx file
"""
verbose = global_state[state_info]
##### == Processing JSON Dict
attrDict = { }
base = data.get(BASE, None) #@todo_note "BASE can be used to provide a relative base path for texture references"
data.setdefault(OPTIONS, {})
#options = defaults | data[OPTIONS] ## Python 3.9
options = {
BASE: base, # @todo_add "BASE can be defined alone or inside OPTIONS"
OPT_ENG: False, #@todo_note: "Using OPT_ENG, the display name (name_jp) in the editor must not match the filename segment"
OPT_HAIR: False,
}
options.update(data[OPTIONS])
if base is None: base = options[BASE] #@todo_add "Outside BASE has priority over the one in OPTIONS"
#--- NAME
name = data.get(NAME, None)
if name and pmx.header.name_jp in ["Koikatu","koikatu","Koikatsu","koikatsu","",None]:
pmx.header.name_jp = name
pmx.header.name_en = name
ask_to_rename_extra(base)
fix_material_names(pmx, data)
hair_tabu = []
global_state[OPT_HAIR] = {}
for mat_name in data.keys():
if mat_name in [NAME, BASE, OPTIONS]: continue
mat_idx = util.find_mat(pmx, mat_name, False)
if mat_idx == -1:
if type(data[mat_name]) == dict:
if data[mat_name].get("template",False): continue # @todo_note "if TEMPLATE is true, it does not count as 'no match'"
msgs['miss'].append(msgsPre + mat_name)
print("\nCould not find {}, skipping".format(mat_name)); continue
mat = pmx.materials[mat_idx]
attr = data[mat_name]
if len(attr) == 0: continue ## Catches {},[],"" #@todo_add "<< silent ignore of empty elements >>"
print("\n==== Processing " + mat.name_jp)
# Copy Type
if type(attr) in [str, list]:
isStr = type(attr) == type("")
org = attr if isStr else attr[0] ## [0] bc list
if org not in data:
print("Did not find '{}' to inherit attributes from".format(org))
continue
print("> Copy attributes from " + org)
attr = data[org]
if isStr: attr[NO_FILES] = True ## @todo_note "Inherit[A]: Reuse the files of [inherit]"
elif NO_FILES in attr: del attr[NO_FILES] ## @todo_note "Inherit[B]: Reuse all but files"
attr[PARENT] = org
#if True:#PARSED not in attr:
# Field: Inherit
if INHERIT in attr: ## @todo_note "Inherit[C]: Extend [inherit] with own fields"
org = attr[INHERIT]
if org not in data:
print("Did not find '{}' to inherit attributes from".format(org))
continue
if verbose: print("> Extends " + org)
#attr = data[org] | attr ### New in py 3.9
tmp = copy.deepcopy(data[org])
tmp.update(attr)
attr = tmp
# Field: Render Type
if GROUP not in attr: #@todo_note "GROUP determines action"
if SHADER in attr: #@todo_note "SHADER is fallback for GROUP"
attr[GROUP] = shader_dict.get(attr[SHADER], attr[SHADER])
else:
print(f"Material has no '{GROUP}' attribute, using 'item' instead") #@todo_add[GROUP] "'item' is default for GROUP"
attr[GROUP] = "item"
# Field: Do not reprocess if cloning
if TEXTURES in attr: #@todo_note "TEXTURES can be used to use default names based on KK & BASE"
if base is None:
print("[err] Cannot process default textures without base path") #@todo_add[BASE] "BASE is required when using TEXTURES"
return
base_mat = mat
if NO_FILES in attr: base_mat = pmx.materials[util.find_mat(pmx, attr[PARENT])] #@todo_ref "<< Inherit[A] >>"
name = base_mat.name_en if options[OPT_ENG] else base_mat.name_jp ## @todo_ref "<< OPT_ENG >>"
name = re.sub(r"\*\d","",name) #@todo_note "<< smt about '* not allowed in filename' >>"
for tex in attr[TEXTURES]:
if tex not in texSuffix:
print("[NotImpl] Could not find texture key '{}'".format(tex))
continue
if texDict[tex] in attr: continue ## @todo_add "All TEXTURES can be hardcoded with explicit paths (+ << %PMX% >>) and override defaults"
## @todo_note: "if 'MainTex' is used but does not exist in BASE, use the one registered in the PmxMaterial, if any"
attr[texDict[tex]] = os.path.join(base, name + texSuffix[tex])
if tex == t__Main and base_mat.tex_idx > -1:
if not os.path.exists(attr[texDict[tex]]):
attr[texDict[tex]] = os.path.join(root, pmx.textures[base_mat.tex_idx])
# Field: Add hair flag and keep track of duplicates
if (attr[GROUP] == "hair"):
attr[OPT_HAIR] = options[OPT_HAIR] and (attr[INHERIT] not in hair_tabu)
hair_tabu.append(attr[INHERIT])
#---
if verbose: print(">--> Found {} attributes to process".format(len(attr)))
attr[ROOT] = root
attr[PARSED] = True
parseDict = {
'cloth': parse_acc, 'acc': parse_acc, 'item': parse_acc,
'body': parse_body, 'face': parse_face,
'eye': parse_eye, 'hair': parse_hair, 'color': parse_color,
'alpha': parse_acc, 'ignore': parse_pass, 'glass': parse_glass,
}
if attr[GROUP] in parseDict: parseDict[attr[GROUP]](pmx, mat, attr); #exit()
else:
msgs['no_action'].append(msgsPre + mat.name_jp)
parseDict["item"](pmx, mat, attr)#; exit()
#######
### Display summary for quick glance that something did not work
arr = []
## Remove meta items
for a in [msgsPre + s for s in [NAME,BASE,OPTIONS]]:
try:
msgs['miss'].remove(a)
except: pass
if len(msgs['miss']) > 0: arr.append('\n'.join(["The following elements were not found:", '\n'.join(msgs['miss'])]))
if len(msgs['no_action']) > 0: arr.append('\n'.join(["The following materials had no valid action:", "\n".join(msgs['no_action'])]))
if len(msgs['no_files']) > 0:
tmp = [msgsPre + os.path.relpath(tex, base) for tex in msgs['no_files']]
arr.append('\n'.join(["Additionally, these textures failed to generate:", "\n".join(tmp)]))
arr.append("- To fix the above, provide the missing base file or remove it from the 'textures' element of the material")
if len(arr) > 0: print('\n'.join(["==========",'\n\n'.join(arr),"=========="]))
#################
#### Parsers ####
#################
## name_jp: str, name_en: str,
## diffRGB: List[float],
## specRGB: List[float],
## ambRGB: List[float],
## alpha: float, specpower: float,
## edgeRGB: List[float], edgealpha: float, edgesize: float,
## tex_idx: int,
## sph_idx: int, sph_mode: int, --> Disabled, Add, Multi, Sub Texture
## toon_idx: int, toon_mode: int, --> 0 or 1
## comment: str,
## faces_ct: int,
## flaglist: List[bool], --> 2-SIDE, G-SHAD, S-MAP, S-SHAD, use_edge, V-COLOR, [Point], [Line]
def parse_color(pmx, mat, attr):
print(":: Running 'color' parser")
if Color_1 in attr:
if mat.tex_idx != -1 and mat.diffRGB != [0,0,0]:
mat.comment += "\r\n Old Diffuse: " + str(mat.diffRGB) + " (by parser)"
mat.diffRGB = [0,0,0]
mat.ambRGB = attr.get(Color_1, [0,0,0])[:3]
# mat.ambRGB = attr.get(Color_Shadow, attr[Color_1])[:3]
#elif Color_Shadow in attr:
# mat.ambRGB = attr[Color_Shadow][:3]
def parse_body(pmx, mat, attr):
print(":: Running 'body' parser")
### (sic): body has no t__Alpha
# t__Detail, t__Line, t__liquid, t__Main, t__NorMap, t__NorMapDet, t__NorMask, t__overtex1+2+3
# Color_Tex1+2+3, Color_Shadow, Color_Specular
# DetailNormalMapScale, nip, nip_specular, nipsize, notusetexspecular
# rimpower, rimV, ShadowExtend, SpeclarHeight, SpecularPower, SpecularPowerNail, tex1mask
###
extend_colors(attr, [Color_Tex1, Color_Tex2, Color_Tex3, Color_Shadow, Color_Specular])
attr.setdefault(t__overtex1, None)
attr.setdefault(t__overtex2, None)
#attr[Color_Shadow] = None ## @todo_note:: "custom Shadows make skin look weird"
#del attr[Color_Shadow]
process_common_attrs(pmx, mat, attr)
if t__Detail in attr: #pass ## Load file, pass to [detail python]
process_color_and_detail(pmx, mat, attr)
if t__Line in attr:
handle_body_line(pmx, attr)
set_new_texture(pmx, mat, attr, [get_working_texture(attr), suffix_Line + ".png"])
#if attr[t__NorMap]: pass
#if attr[t__NorMask]: pass
#if attr[t__NorMasDet]: pass
if attr[t__overtex1]:
handle_body_overtex1(pmx, attr)
set_new_texture(pmx, mat, attr, [get_working_texture(attr), suffix_OT1 + ".png"])
if attr[t__overtex2]: pass ##handle_body_overtex1(pmx, attr)
def parse_face(pmx, mat, attr):
print(":: Running 'face' parser")
###
# t__Alpha, t__Detail, t__Line, t__liquid, t__Main, t__NorMap, t__NorMapDet, t__NorMask, t__overtex1+2+3
# Color_Tex1+2+3, Color_Shadow, Color_Specular
# DetailNormalMapScale, nip, nip_specular, nipsize, notusetexspecular, tex1mask
# rimpower, rimV, ShadowExtend, SpeclarHeight, SpecularPower, SpecularPowerNail
###
extend_colors(attr, [Color_Tex1, Color_Tex2, Color_Tex3, Color_Shadow, Color_Specular])
process_common_attrs(pmx, mat, attr)
## [t__Line] ++ DetailNormalMapScale :: [SpecialEffects]
#>> RGB-B ++ Alpha (0-200)
if t__Detail in attr: #pass ## Load file, pass to [detail python]
process_color_and_detail(pmx, mat, attr)
if t__Line in attr:
handle_body_line(pmx, attr)
set_new_texture(pmx, mat, attr, [get_working_texture(attr), suffix_Line + ".png"])
#if attr[t__NorMap]: pass
#if attr[t__NorMask]: pass
#if attr[t__NorMasDet]: pass
#if attr[t__overtex1]:
# handle_body_overtex1(pmx, attr)
# set_new_texture(pmx, mat, attr, [get_working_texture(attr), suffix_OT1 + ".png"])
#if attr[t__overtex2]: pass ##handle_body_overtex1(pmx, attr)
########
pass
def parse_acc(pmx, mat, attr):
print(":: Running 'acc' parser")
###
# t__Another, t__Color, t__Detail, t__Line, t__Main, t__NorMap
# Color_1, Color_2, Color_3, Color_Shadow
# rimpower, rimV, ShadowExtend, SpeclarHeight
###
extend_colors(attr, [Color_1, Color_2, Color_3, Color_Shadow, Color_Specular])
process_common_attrs(pmx, mat, attr)
process_color_and_detail(pmx, mat, attr)
if t__Line in attr: process_line_mask(pmx, mat, attr)
def parse_eye(pmx, mat, attr): ## @open: rotation, offset, scale
print(":: Running 'eye' parser")
####
## t__expression, t__Main, t__overtex1, t__overtex2
## Color_Tex1+2, Color_Shadow, exppower, isHighLight, rotation
####
extend_colors(attr, [Color_Tex1, Color_Tex2, Color_Shadow], True)
if t__overtex1 in attr or t__overtex2 in attr:
handle_eye_highlight(pmx, attr)
set_new_texture(pmx, mat, attr, [attr[t__Main], suffix_HL + ".png"])
def parse_hair(pmx, mat, attr): ## @open: t__Color, t__Detail, t__HairGloss, Color2, Color3
print(":: Running 'hair' parser")
###
# t__Alpha, t__Another, t__Color, t__Detail, t__HairGloss, t__Main, t__NorMap
# Color_1, Color_2, Color_3, Color_Line, Color_Shadow
# rimpower, rimV, ShadowExtend, SpeclarHeight
###
extend_colors(attr, [Color_1, Color_2, Color_3, Color_Line, Color_Shadow])
process_common_attrs(pmx, mat, attr)
if attr[OPT_HAIR]: process_color_and_detail(pmx, mat, attr)
elif attr[INHERIT] in global_state[OPT_HAIR]:
attr[t__Reuse] = global_state[OPT_HAIR][attr[INHERIT]]
print(f"> Reusing texture {attr[t__Reuse]}")
set_new_texture(pmx, mat, attr, [attr[t__Reuse], ".png"])
if Color_1 in attr: mat.diffRGB = attr[Color_1][:3]
def parse_alpha(pmx, mat, attr): ## @todo
print(":: Running 'alpha' parser")
###
# t__Alpha, t__Another, t__Color, t__Detail, t__Main, t__NorMap
# Color_1, Color_2, Color_3, Color_Line, Color_Shadow
# rimpower, rimV, ShadowExtend, SpeclarHeight
###
extend_colors(attr, [Color_1, Color_2, Color_3, Color_Line, Color_Shadow])
process_common_attrs(pmx, mat, attr)
process_color_and_detail(pmx, mat, attr)
if Color_1 in attr: mat.diffRGB = attr[Color_1][:3]
def parse_glass(pmx, mat, attr):
mat.specpower = 1.0
parse_color(pmx, mat, attr)
def parse_pass(pmx, mat, attr): pass
####################
#### Processing ####
####################
def script__draw_toon_shader(args):
import numpy as np
import cv2
### https://github.com/jerryhouuu/Draw-Gradient-Alpha-Rectangle-using-openCV
def draw_gradient_alpha_rectangle(frame, BGR_Channel, rect_pos=None, rotate=None):
"""
frame: The image to draw in :: np.zeros((300, 300, 3), np.uint8) ++ [:,:,:] = 255
BGR_Channel: The target color, as tuple(B, G, R) ##(R, G, B) ?
rect_pos: The area to draw a gradient into.
rotate (white to color): 0 (L-R), 1(Upwards), 2(R-L), 3(Downwards)
"""
if rect_pos is None: rect_pos = ((0,0), (frame.shape[0], frame.shape[1]))
if rotate is None: rotate = 3
(xMin, yMin), (xMax, yMax) = rect_pos
color = np.array(BGR_Channel, np.uint8)[np.newaxis, :]
mask1 = np.rot90(np.repeat(np.tile(np.linspace(1, 0, (rect_pos[1][1]-rect_pos[0][1])), ((rect_pos[1][0]-rect_pos[0][0]), 1))[:, :, np.newaxis], 3, axis=2), rotate)
frame[yMin:yMax, xMin:xMax, :] = mask1 * frame[yMin:yMax, xMin:xMax, :] + (1-mask1) * color
return frame#42, 175, 121
frame = np.zeros((64, 64, 3), np.uint8)
## options
BGR = True
gradient = 0
####
if BGR: color = list(reversed(args[1]))
else: args[1]
if gradient == 0: frame[:,:,:] = 255
else:
frame[:,:,0] = color[0]
frame[:,:,1] = color[1]
frame[:,:,2] = color[2]
def adjust(c,i): c[i]= min(c[i] * 0.75,255)
adjust(color,0); adjust(color,1); adjust(color,2)
if gradient == 2: ### [:,32,*] = vertical
frame[32:,:,0] = color[0]
frame[32:,:,1] = color[1]
frame[32:,:,2] = color[2]
#frame[:2,:,:] = 255
else: frame = draw_gradient_alpha_rectangle(frame, tuple(color), ((0,32), (64,64)))
cv2.imwrite(args[2], frame)
def add_toon_shader(pmx, mat, attr):
color = tuple(attr[Color_Shadow][:3])
root = attr[ROOT]
rgb = (int(color[0]*255), int(color[1]*255), int(color[2]*255))
name = f"toon__{rgb[0]}_{rgb[1]}_{rgb[2]}.png"
pathDir = os.path.join(root, "toon")
pathFile = os.path.join(pathDir, name)
if not os.path.exists(pathFile):
if not os.path.exists(pathDir): os.mkdir(pathDir)
args = ["", rgb, pathFile]
script__draw_toon_shader(args)
set_new_texture(pmx, mat, attr, [pathFile], tex_mode_toon)
def process_common_attrs(pmx, mat, attr): ## @open: rimpower, rimV, Color_Shadow
###
# Color_Line, Color_Shadow, Color_Specular (figure out smt new)
# Set: specRGB, specpower, ambRGB, edgesize, edgeRGB, edgealpha, alpha, flaglist
###
if Color_Line in attr:
if attr[Color_Line] == [0,0,0,0]:
mat.flaglist[4] = False
mat.edgesize = 0
mat.edgeRGB = attr[Color_Line][:3]
mat.edgealpha = attr[Color_Line][3]
#if Color_Specular in attr: mat.specRGB = attr[Color_Specular][:3] :: Individual per group
if "SpecularPower" in attr: mat.specpower = attr["SpecularPower"]
if "LineWidthS" in attr: mat.edgesize = attr["LineWidthS"]
if Color_Shadow in attr:# and attr[GROUP] not in ["hair"]:
# smt smt only if bnot already set
#if mat.ambRGB == [1,1,1]: mat.ambRGB = attr[Color_Shadow][:3]
#mat.diffRGB = attr[Color_Shadow][:3]
if mat.diffRGB == [1,1,1]:
mat.diffRGB = [0.5, 0.5, 0.5]
#mat.diffRGB = [0,0,0]#attr[Color_Shadow][:3] smt only for black types
add_toon_shader(pmx, mat, attr)
# mat.comment += "Make toon_shader: " + str(attr[Color_Shadow]) ## Multiply each by 255 before
if META in attr:
meta = attr[META]
#### Shadows
## "On": == "one-sided", but 2-SIDE is default in PMX for all objects
## "Two-sided": Makes single-sided objects two-sided
## Maybe add "one-sided" instead #@todo:: find a reliable fix for that, or add a note
shadow = meta.get(MT_SHADOWS, "On")
if shadow == "Shadows Only":
mat.flaglist[4] = False #[use_edge]
mat.flaglist[5] = True #[V-COLOR]
elif shadow == "Off": # no 2-side, G-SHAD, S-MAP, S-SHAD
fl = mat.flaglist
mat.flaglist = [ False, False, False, False, fl[4], fl[5], fl[6], fl[7] ]
mat.flaglist[3] = 1 if meta.get(MT_RECEIVE, False) else 0
#### If not enabled, make 100% transparent + remove edge
if not meta.get(MT_ENABLED, False):
mat.alpha = 0
mat.flaglist[4] = False
#### Add Slot into Comment
if MT_SLOT in meta:
comment = meta[MT_SLOT]
if comment in ["BodyTop","p_cf_head_bone"]: comment = meta[MT_PARENT]
comment = "[:Slot:] " + comment
par = meta[MT_PARENT]
if re.match("ca_slot\d+", par):
comment += "\r\n[:AccId:] " + re.match("ca_slot(\d+)", par)[1]
if not mat.comment or len(mat.comment) == 0:
mat.comment = comment
else: mat.comment += "\r\n" + comment
def process_color_and_detail(pmx, mat, attr):
def replFN(elem, name): return os.path.join(os.path.split(elem)[0], name)
def getFN(elem): return os.path.splitext(os.path.split(elem)[1])[0]
if t__Color in attr:
attr.setdefault(t__Main, None)
if attr[t__Main] and not os.path.exists(attr[t__Main]): attr[t__Main] = None
## Check if there is no MainTex
noMain = attr[t__Main] in [None,""] and META in attr
if noMain:
ff = getFN(attr[t__Color])
if (ff.startswith("mf_m_primmaterial")):
attr[t__Color] = replFN(attr[t__Color], "mf_m_primmaterial_ColorMask.png")
attr["altName"] = re.sub("_ColorMask","",ff) + "@" + attr[META][MT_PARENT]
else:
### Ignore mf_m_primmaterial if they have a MainTex
if (getFN(attr[t__Main]).startswith("mf_m_primmaterial")): return
handle_acc_color(pmx, attr)
ff = attr[t__Color if attr[t__Main] is None else t__Main]
if noMain: ff = replFN(attr[t__Color], attr["altName"])
attr[t__MainCol] = os.path.splitext(ff)[0] + suffix_Col + ".png"
if t__Detail in attr:
handle_acc_detail(pmx, attr)
#if Color_Specular not in attr:
# mat.specRGB = mat.dif
########
if t__Reuse in attr:
print(f"> Reusing texture {attr[t__Reuse]}")
set_new_texture(pmx, mat, attr, [attr[t__Reuse], ".png"])
# if no t__Main, but used t__Color and t__Detail
elif t__MainCol in attr: ## @todo_note
saved = False
if t__Detail in attr:
saved = set_new_texture(pmx, mat, attr, [ff, suffix_Col + suffix_Det + ".png"])
if (not saved) or (t__Detail not in attr):
set_new_texture(pmx, mat, attr, [ff, suffix_Col + ".png"])
elif t__Detail in attr:
set_new_texture(pmx, mat, attr, [attr[t__Main], suffix_Det + ".png"])
if attr.get(OPT_HAIR, False):
global_state[OPT_HAIR][attr[INHERIT]] = get_working_texture(attr)
def process_line_mask(pmx, mat, attr): handle_body_line(pmx, mat, attr)#pass
##############
#### Body ####
##############
def handle_body_detail(pmx, attr): ## @todo
#attr[DetailMap_Scale] ## Mainly affects [Green]
XXXX = attr["DetailNormalMapScale"] ## Render on top of ["nip"]
XXXX = attr["notusetexspecular"] ## Render on top of ["nip"]
XXXX = attr["rimpower"] ## 100% reduce "rimV" to Line
XXXX = attr["rimV"] ## 0..100% of Specularity / Metal t__NorMask on Texture
XXXX = attr["shadowExtend"] ## Render on top of ["nip"]
XXXX = attr["SpecularPower"] ## Render on top of ["nip"]
XXXX = attr["SpecularPowerNail"] ## Render on top of ["nip"]
XXXX = attr[Color_Specular] ##
def handle_body_overtex1(pmx, attr):
#main = get_working_texture(attr)
#mask = attr[t__overtex1]
#col = attr[Color_Tex1]
#XXXX = attr["nip"] # 1
#spec = attr["nip_specular"] # 0.5 -- Strength of Layer X
#size = attr["nipsize"] # 0.6677417
#mask = attr["tex1mask"] # 1 -- Original vs. color overlay
#-----------
if NotFound(attr, t__overtex1): return
arg1 = quote(get_working_texture(attr))
arg2 = quote(attr[t__overtex1])
js = { "color": attr[Color_Tex1], "nip": attr["nip"], "size": attr["nipsize"], "spec": attr["nip_specular"] }
js[state_info] = global_state[state_info]
arg3 = quoteJson(js)
call_img_scripts((pathBOver1, arg1, arg2, arg3), "body1", [3])
def handle_body_overtex2(pmx, attr): pass # @todo
def handle_body_line(pmx, attr):
####
# linetexon -- [Flag]: "body".t__Line.Green
if NO_FILES in attr: return
if NotFound(attr, t__Line): return
arg1 = quote(get_working_texture(attr))
arg2 = quote(attr[t__Line])
js = { }
js["mode"] = "overlay"
js["linetexon"] = attr.get("linetexon",1) < 0 ## "body".tex__Line.Green
js[state_info] = global_state[state_info]
arg3 = quoteJson(js)
call_img_scripts((pathLine, arg1, arg2, arg3), "line", [3])
##############
#### Face ####
##############
def handle_face_detail(pmx, attr): pass ## @todo
def handle_face_effects(pmx, attr): ## @todo
if NO_FILES in attr: return
alpha = float(attr.get("DetailNormalMapScale", 1.0))
alpha *= float(attr.get("ShadowExtend", 1.0))
## apply [t__Line.Blue] * alpha onto [t__Main]
pass
def handle_eye_highlight(pmx, attr): ## Actually uses all three colors, so color in like body.overtex1
if NO_FILES in attr: return
arg1 = quote(attr[t__Main])
js = { "highlight": attr["isHighLight"] }
js["offset"] = attr.get("offset", "(0, 0)")
js["scale"] = attr.get("scale", "(1, 1)")
js[state_info] = global_state[state_info]
arg2 = quoteJson(js)
#### offset(\d, \d), scale(\d,\d) overcolor1, overcolor2
if t__overtex1 in attr:
arg3 = quote(attr[t__overtex1])
col = (attr[Color_Tex1] * 255)
js = { "color": col[:3], "alpha": col[3] }
arg4 = quoteJson(js)
else: arg3,arg4 = "","0"
if t__overtex2 in attr:
arg5 = quote(attr[t__overtex2])
col = (attr[Color_Tex2] * 255)
js = { "color": col[:3], "alpha": col[3] }
arg6 = quoteJson(js)
else: arg5,arg6 = "","0"
call_img_scripts((pathOT_eye, arg1, arg2, arg3, arg4, arg5, arg6), "OT_eye", [2,4,6])
def handle_face_overtex2(pmx, attr): pass ## Do not merge, but try to add as extra ... material.... ._.
##############
#### Hair ####
##############
##############
#### Accs ####
##############
def handle_acc_color(pmx, attr):
if NO_FILES in attr: return
if NotFound(attr, t__Color): return
main = attr.get(t__Main, None)
if main and not os.path.exists(main): main = None
attr[t__Main] = main
attr.setdefault(Color_2, [0,0,0,1])
attr.setdefault(Color_3, None)
arg1 = '""' if attr[t__Main] == None else quote(attr[t__Main])
arg2 = quote(attr[t__Color])
arg3 = quoteColor(attr[Color_1])
arg4 = quoteColor(attr[Color_2])
arg5 = '"[]"' if attr[Color_3] == None else quoteColor(attr[Color_3])
data = {"mode": "Additive", "altName" : attr.get("altName","")}
if attr[GROUP] == "hair": data["hair"] = True
data[state_info] = global_state[state_info]
arg6 = quoteJson(data)
argStr = "color"+arg1+arg2+arg3+arg4+arg5
attr[ARGSTR] = argStr
tmp = global_state[ARGSTR].get(argStr, None)
if (tmp): attr[t__Reuse] = tmp
else: call_img_scripts((pathColor, arg1, arg2, arg3, arg4, arg5, arg6), "color", [6])
def handle_acc_detail(pmx, attr): ## Has @todo_add \\ @open: All the props affecting t__Detail
if NO_FILES in attr: return
if t__Reuse in attr: return
if NotFound(attr, t__Detail): return
### Determine main texture & blend mode
attr.setdefault(t__Main, None)
mode = "overlay"
if t__MainCol in attr:
mode = "darken" ## Try: "mul" "diff" "nor"
main = attr[t__MainCol]
elif attr[GROUP] == 'cloth':
mode = "darken"
main = attr[t__Main]
else: main = attr[t__Main]
if MODE in attr: mode = attr[MODE]
####
if not os.path.exists(main):
print(f">--> [MissingFile(Detail)]: {main}")
return
args = {}
arg1 = quote(main)
arg2 = quote(attr[t__Detail])
arg3 = quote(global_state[state_info])
arg4 = quote(attr[t__Main] is None)
arg5 = quote(mode)
arg6 = quote(attr[SHADER] == "body")
if ARGSTR not in attr:
argStr = "detail"+arg1+arg2+arg4+arg5
attr[ARGSTR] = argStr
tmp = global_state[ARGSTR].get(argStr, None)
if (tmp): attr[t__Reuse] = tmp; return
call_img_scripts((pathDetail, arg1, arg2, arg3, arg4, arg5, arg6), "detail")
############
# X = attr[t__liquid] ### X = attr[t__Texture2] ### X = attr[t__Texture3] ### X = attr[Color_Liquid] ##
#################
#### Utility ####
#################
def call_img_scripts(args, target, isJson = []):
if DEBUG: os.system(' '.join(args))
else:
from unittest.mock import patch
import runpy, sys
try:
target_dict = {
"color": "extra.#Apply_ColorMap",
"detail": "extra.#Apply_DetailMap",
"body1": "extra.#Apply_Body_overtex1",
"OT_eye": "extra.#Apply_overtex1__eyes",
"line": "extra.#Apply_LineMap",
}
if not target_dict.get(target): return
with patch.object(sys, 'argv', [x.strip('"') for x in args]):
runpy.run_module(target_dict.get(target))
except Exception as eee: print(eee)
def ask_to_rename_extra(base):
if not os.path.exists(base):
print(f"{base} does not exist")
return
files = os.listdir(base)
if not any(filter(lambda x: x.startswith("_Export"), files)): return
if not util.ask_yes_no(f"Rename textures in [{base}]"): return
re_cut = re.compile(r" ?\(Instance\)_?(\([-0-9]*\))?|_Export_[\d\-]+_")
for fname in files:
basename = os.path.join(base,fname)
if not os.path.isfile(basename): continue
newname = os.path.join(base, re_cut.sub("", fname))
if basename == newname: continue
if os.path.exists(newname):
print(f"-- {os.path.relpath(newname, base)} already exists!")
continue
os.renames(basename, newname)
def fix_material_names(pmx, data):
matJsn = []
for mat_name in data.keys():
if mat_name in [NAME, BASE, OPTIONS]: continue
if re.search("(@ca_slot\d+|#-\d+)$", mat_name): continue
matJsn.append(mat_name)
matSkip = ["Bonelyfans", "c_m_shadowcast", "cf_m_tooth", "cf_m_noseline"]
for mat in pmx.materials:
name = re.sub("(@ca_slot\d+|#-\d+|\*\d+)?$", "", mat.name_jp)
arr = [x for x in matJsn if x.startswith(name)]
if len(arr) == 0:
if any([x for x in matSkip if name.startswith(x)]): continue
print(f"[*] {name} has no match in matJsn"); continue
elem = arr[0]
mat.name_jp = elem
mat.name_en = elem
del matJsn[matJsn.index(elem)]
if len(matJsn) != 0: print("[**] matJsn is not empty: "); print(matJsn)
##############
pass
#def quote(value): return '"' + re.sub("\\", "\\\\", str(value)) + '"'
def quote(value): ## return '"' + str(value).strip('"').strip("'") + '"'
tmp = '"' + str(value).strip('"').strip("'") + '"'
return tmp
def quoteColor(value):
tmp = []
for (i,c) in enumerate(value): tmp.append(int(c*255))
return '"' + str(tmp).strip('"').strip("'") + '"'
def quoteJson(value):
tmp = json.dumps(value)
if global_state.get(state_info, True): tmp = re.sub(r'"', r'\\"', tmp)
return '"' + tmp + '"'
def extend_colors(attr, col_arr, dodefault = False):
for col in col_arr:
if col in attr:
if dodefault: attr[col] = attr.get(col, [0,0,0])
if attr[col] is None: continue
if len(attr[col]) < 3: raise Exception("Warning: {} is too short!".format(col))
if len(attr[col]) == 3: attr[col].append(1)
tex_mode_toon = "toon"
tex_mode_main = "main"
tex_mode_sphr = "sphere"
def set_new_texture(pmx, mat, attr, tex_names: list, tex_mode=tex_mode_main):
"""
:param mat [PmxMaterial]
:param attr [Dict]
:param tex_names [Dict]
:param tex_mode [enum] "toon", "main", or "sphere"
"""
isToonMode = tex_mode == tex_mode_toon
isSphereMode = tex_mode == tex_mode_sphr
# [attr]: So that one can use "get_working_texture" but must not
if tex_mode != tex_mode_main: tex_name = tex_names[0]
else: tex_name = os.path.splitext(tex_names[0])[0] + tex_names[1]
if not os.path.exists(tex_name):
msgs["no_files"].append(tex_name)
return False
if tex_name in pmx.textures:
tex_idx = pmx.textures.index(tex_name)
else:
tex_idx = len(pmx.textures)
pmx.textures.append(tex_name)
if isToonMode:
mat.toon_idx = tex_idx
mat.toon_mode = 0
elif isSphereMode:
mat.sph_idx = tex_idx
mat.sph_mode = 1
else:
mat.tex_idx = tex_idx
attr[t__MainCol] = tex_name
if ARGSTR in attr:
tmp = attr[ARGSTR]
if tmp not in global_state[ARGSTR]: global_state[ARGSTR][tmp] = tex_name
return True
def get_working_texture(attr):
if t__MainCol in attr:
if attr[t__MainCol] is not None:
return attr[t__MainCol]
if t__Main in attr:
if attr[t__Main] is not None:
return attr[t__Main]
raise Exception("Could not find working main texture")
def NotFound(attr, name, optional=False): ## Returns true if not found
if name not in attr: return True
if not os.path.exists(attr[name]):
if optional: return True
print(f">--> [MissingFile({name})]: {attr[name]}")
return True
return False
##############
## Mappings ##
##############
texDict = {
"t__Alpha" : "AlphaMask",
"t__Another" : "AnotherRamp",
"t__Color" : "ColorMask",
"t__Detail" : "DetailMask",
"t__Glass" : "GlassRamp",
"t__HairGloss" : "HairGloss",
"t__Line" : "LineMask",
"t__Liquid" : "liquid",
"t__Main" : "MainTex",
"t__NorMap" : "NormalMap",
"t__NorMapDet" : "NormalMapDetail",
"t__NorMask" : "NormalMask",
"t__Overtex1" : "overtex1",
"t__overtex1" : "overtex1",
"t__Overtex2" : "overtex2",
"t__overtex2" : "overtex2",
"t__Overtex3" : "overtex3",
"t__overtex3" : "overtex3",
}
texSuffix = { ### :: (\w+)( +):= ("_(\w+).png") --> ' "\1":\2\3,'
"t__Alpha": "_AlphaMask.png",
"t__Another": "_AnotherRamp.png",
"t__Color": "_ColorMask.png",
"t__Detail": "_DetailMask.png",
"t__Glass": "_GlassRamp.png",
"t__HairGloss": "_HairGloss.png",
"t__Line": "_LineMask.png",
"t__Main": "_MainTex.png",
"t__NorMap": "_NormalMap.png",
"t__NorMapDet": "_NormalMapDetail.png",
"t__NorMask": "_NormalMask.png",
"t__Overtex1": "_overtex1.png",
"t__overtex1": "_overtex1.png",
"t__Overtex2": "_overtex2.png",
"t__Overtex3": "_overtex3.png",
}
for (k,v) in copy.deepcopy(texDict).items():
texDict[v] = texDict[k]
if k in texSuffix:
texSuffix[v] = texSuffix[k]
shader_dict = {
### Ignore
"toon_textureanimation": "ignore", ## gageye
"shadowcast": "ignore",
"Bonelyfans": "ignore",
"mnpb": "ignore", ## cf_m_mnpb
# t__Glass, t__Main, t__NorMap ++ Color, Color4, *Sort,*Inverse,RimPower
"toon_glasses_lod0": "glass", ## cf_m_namida_00, c_m_gomu
### Small
"toon_eyew_lod0": "color", ## t__Main ++ Color, shadowcolor \\ mayuge, sirome
"toon_eye_lod0": "eye", ## t__Main, t__expression, t__overtex1, t__overtex2 ++ **
"toon_nose_lod0": "color", ## t__Main ++ Color
"main_emblem": "color", ## t__Main ++ shadowcolor
#########
## "main_skin": --> "face","body" are special case
# "main_skin": "body", ## cm_m_dankon, cm_m_dan_f
# t__Another, t__Color, t__Detail, t__Line, t__Main, t__NorMap
"main_item": "item", ## AnotherRampFull, DetailBLineG, DetailRLineR, LineWidthS, ...
"main_item_studio": "item", ## ++ PatternMask 1,2,3 \\ several uncommon attr
"main_item_emission": "item", ## ++ z__AnimationMask \\ z__EmissionPower
# t__Alpha, t__Another, t__Detail, t__Line, t__liquid, t__Main, t__NorMap,
"main_opaque": "cloth",
"main_opaque2": "cloth", #-- z__AlphaMaskuv
# t__Alpha, t__Another, t__Color, t__Detail, t__HairGloss, t__Main, t__NorMap
"main_hair": "hair",
"main_hair_front": "hair",
"main_alpha": "alpha",
# t__Color, t__Detail, t__Line, t__Main, t__NorMap
"main_texture": "acc", ## C+2+3+S, AnotherRampFull, DetailBLineG+RR, ShadowExtend(Another), SpeclarHeight
####
}
## shorthands: __typePrinter_Dict
if __name__ == '__main__': util.main_starter(parseMatComments) | [
"os.mkdir",
"kkpmx_utils.ask_yes_no",
"json.dumps",
"os.path.isfile",
"kkpmx_utils.main_starter",
"os.path.join",
"kkpmx_core.end",
"nuthouse01_core.MY_GENERAL_INPUT_FUNC",
"cv2.imwrite",
"os.path.exists",
"re.escape",
"kkpmx_utils.load_json_file",
"numpy.linspace",
"re.search",
"re.sub"... | [((5328, 5358), 'os.path.split', 'os.path.split', (['input_file_name'], {}), '(input_file_name)\n', (5341, 5358), False, 'import os\n'), ((5468, 5508), 'os.path.join', 'os.path.join', (['root', '"""#generateJSON.json"""'], {}), "(root, '#generateJSON.json')\n", (5480, 5508), False, 'import os\n'), ((6004, 6039), 'kkpmx_utils.load_json_file', 'util.load_json_file', (['path', 'callback'], {}), '(path, callback)\n', (6023, 6039), True, 'import kkpmx_utils as util\n'), ((19515, 19546), 'numpy.zeros', 'np.zeros', (['(64, 64, 3)', 'np.uint8'], {}), '((64, 64, 3), np.uint8)\n', (19523, 19546), True, 'import numpy as np\n'), ((20124, 20151), 'cv2.imwrite', 'cv2.imwrite', (['args[2]', 'frame'], {}), '(args[2], frame)\n', (20135, 20151), False, 'import cv2\n'), ((20379, 20405), 'os.path.join', 'os.path.join', (['root', '"""toon"""'], {}), "(root, 'toon')\n", (20391, 20405), False, 'import os\n'), ((20419, 20446), 'os.path.join', 'os.path.join', (['pathDir', 'name'], {}), '(pathDir, name)\n', (20431, 20446), False, 'import os\n'), ((31252, 31268), 'os.listdir', 'os.listdir', (['base'], {}), '(base)\n', (31262, 31268), False, 'import os\n'), ((31416, 31483), 're.compile', 're.compile', (['""" ?\\\\(Instance\\\\)_?(\\\\([-0-9]*\\\\))?|_Export_[\\\\d\\\\-]+_"""'], {}), "(' ?\\\\(Instance\\\\)_?(\\\\([-0-9]*\\\\))?|_Export_[\\\\d\\\\-]+_')\n", (31426, 31483), False, 'import re\n'), ((32989, 33006), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (32999, 33006), False, 'import json\n'), ((38098, 38133), 'kkpmx_utils.main_starter', 'util.main_starter', (['parseMatComments'], {}), '(parseMatComments)\n', (38115, 38133), True, 'import kkpmx_utils as util\n'), ((6212, 6230), 'os.path.isabs', 'os.path.isabs', (['tex'], {}), '(tex)\n', (6225, 6230), False, 'import os\n'), ((6551, 6593), 'kkpmx_core.end', 'kklib.end', (['None', 'input_file_name', '"""_props"""'], {}), "(None, input_file_name, '_props')\n", (6560, 6593), True, 'import kkpmx_core as kklib\n'), ((7871, 7906), 'kkpmx_utils.find_mat', 'util.find_mat', (['pmx', 'mat_name', '(False)'], {}), '(pmx, mat_name, False)\n', (7884, 7906), True, 'import kkpmx_utils as util\n'), ((20456, 20480), 'os.path.exists', 'os.path.exists', (['pathFile'], {}), '(pathFile)\n', (20470, 20480), False, 'import os\n'), ((29796, 29816), 'os.path.exists', 'os.path.exists', (['main'], {}), '(main)\n', (29810, 29816), False, 'import os\n'), ((31175, 31195), 'os.path.exists', 'os.path.exists', (['base'], {}), '(base)\n', (31189, 31195), False, 'import os\n'), ((31349, 31396), 'kkpmx_utils.ask_yes_no', 'util.ask_yes_no', (['f"""Rename textures in [{base}]"""'], {}), "(f'Rename textures in [{base}]')\n", (31364, 31396), True, 'import kkpmx_utils as util\n'), ((31515, 31540), 'os.path.join', 'os.path.join', (['base', 'fname'], {}), '(base, fname)\n', (31527, 31540), False, 'import os\n'), ((31682, 31705), 'os.path.exists', 'os.path.exists', (['newname'], {}), '(newname)\n', (31696, 31705), False, 'import os\n'), ((31789, 31818), 'os.renames', 'os.renames', (['basename', 'newname'], {}), '(basename, newname)\n', (31799, 31818), False, 'import os\n'), ((31958, 32003), 're.search', 're.search', (['"""(@ca_slot\\\\d+|#-\\\\d+)$"""', 'mat_name'], {}), "('(@ca_slot\\\\d+|#-\\\\d+)$', mat_name)\n", (31967, 32003), False, 'import re\n'), ((32153, 32211), 're.sub', 're.sub', (['"""(@ca_slot\\\\d+|#-\\\\d+|\\\\*\\\\d+)?$"""', '""""""', 'mat.name_jp'], {}), "('(@ca_slot\\\\d+|#-\\\\d+|\\\\*\\\\d+)?$', '', mat.name_jp)\n", (32159, 32211), False, 'import re\n'), ((33054, 33079), 're.sub', 're.sub', (['"""\\""""', '"""\\\\\\\\\\""""', 'tmp'], {}), '(\'"\', \'\\\\\\\\"\', tmp)\n', (33060, 33079), False, 'import re\n'), ((34024, 34048), 'os.path.exists', 'os.path.exists', (['tex_name'], {}), '(tex_name)\n', (34038, 34048), False, 'import os\n'), ((34956, 34982), 'os.path.exists', 'os.path.exists', (['attr[name]'], {}), '(attr[name])\n', (34970, 34982), False, 'import os\n'), ((36413, 36435), 'copy.deepcopy', 'copy.deepcopy', (['texDict'], {}), '(texDict)\n', (36426, 36435), False, 'import copy\n'), ((5728, 5743), 're.escape', 're.escape', (['root'], {}), '(root)\n', (5737, 5743), False, 'import re\n'), ((6256, 6282), 'os.path.relpath', 'os.path.relpath', (['tex', 'root'], {}), '(tex, root)\n', (6271, 6282), False, 'import os\n'), ((6468, 6509), 'kkpmx_core.end', 'kklib.end', (['pmx', 'input_file_name', '"""_props"""'], {}), "(pmx, input_file_name, '_props')\n", (6477, 6509), True, 'import kkpmx_core as kklib\n'), ((9293, 9317), 'copy.deepcopy', 'copy.deepcopy', (['data[org]'], {}), '(data[org])\n', (9306, 9317), False, 'import copy\n'), ((10269, 10295), 're.sub', 're.sub', (['"""\\\\*\\\\d"""', '""""""', 'name'], {}), "('\\\\*\\\\d', '', name)\n", (10275, 10295), False, 'import re\n'), ((19166, 19197), 'numpy.array', 'np.array', (['BGR_Channel', 'np.uint8'], {}), '(BGR_Channel, np.uint8)\n', (19174, 19197), True, 'import numpy as np\n'), ((20492, 20515), 'os.path.exists', 'os.path.exists', (['pathDir'], {}), '(pathDir)\n', (20506, 20515), False, 'import os\n'), ((20517, 20534), 'os.mkdir', 'os.mkdir', (['pathDir'], {}), '(pathDir)\n', (20525, 20534), False, 'import os\n'), ((22821, 22849), 're.match', 're.match', (['"""ca_slot\\\\d+"""', 'par'], {}), "('ca_slot\\\\d+', par)\n", (22829, 22849), False, 'import re\n'), ((28455, 28475), 'os.path.exists', 'os.path.exists', (['main'], {}), '(main)\n', (28469, 28475), False, 'import os\n'), ((31550, 31574), 'os.path.isfile', 'os.path.isfile', (['basename'], {}), '(basename)\n', (31564, 31574), False, 'import os\n'), ((6365, 6437), 'nuthouse01_core.MY_GENERAL_INPUT_FUNC', 'core.MY_GENERAL_INPUT_FUNC', (["(lambda x: x in ['y', 'n'])", '"""Apply changes ?"""'], {}), "(lambda x: x in ['y', 'n'], 'Apply changes ?')\n", (6391, 6437), True, 'import nuthouse01_core as core\n'), ((10785, 10826), 'os.path.join', 'os.path.join', (['base', '(name + texSuffix[tex])'], {}), '(base, name + texSuffix[tex])\n', (10797, 10826), False, 'import os\n'), ((12316, 12342), 'os.path.relpath', 'os.path.relpath', (['tex', 'base'], {}), '(tex, base)\n', (12331, 12342), False, 'import os\n'), ((23131, 23150), 'os.path.split', 'os.path.split', (['elem'], {}), '(elem)\n', (23144, 23150), False, 'import os\n'), ((23316, 23345), 'os.path.exists', 'os.path.exists', (['attr[t__Main]'], {}), '(attr[t__Main])\n', (23330, 23345), False, 'import os\n'), ((33966, 33996), 'os.path.splitext', 'os.path.splitext', (['tex_names[0]'], {}), '(tex_names[0])\n', (33982, 33996), False, 'import os\n'), ((10095, 10127), 'kkpmx_utils.find_mat', 'util.find_mat', (['pmx', 'attr[PARENT]'], {}), '(pmx, attr[PARENT])\n', (10108, 10127), True, 'import kkpmx_utils as util\n'), ((23204, 23223), 'os.path.split', 'os.path.split', (['elem'], {}), '(elem)\n', (23217, 23223), False, 'import os\n'), ((23651, 23679), 're.sub', 're.sub', (['"""_ColorMask"""', '""""""', 'ff'], {}), "('_ColorMask', '', ff)\n", (23657, 23679), False, 'import re\n'), ((24016, 24036), 'os.path.splitext', 'os.path.splitext', (['ff'], {}), '(ff)\n', (24032, 24036), False, 'import os\n'), ((10890, 10924), 'os.path.exists', 'os.path.exists', (['attr[texDict[tex]]'], {}), '(attr[texDict[tex]])\n', (10904, 10924), False, 'import os\n'), ((10954, 11004), 'os.path.join', 'os.path.join', (['root', 'pmx.textures[base_mat.tex_idx]'], {}), '(root, pmx.textures[base_mat.tex_idx])\n', (10966, 11004), False, 'import os\n'), ((19251, 19301), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(rect_pos[1][1] - rect_pos[0][1])'], {}), '(1, 0, rect_pos[1][1] - rect_pos[0][1])\n', (19262, 19301), True, 'import numpy as np\n'), ((22885, 22915), 're.match', 're.match', (['"""ca_slot(\\\\d+)"""', 'par'], {}), "('ca_slot(\\\\d+)', par)\n", (22893, 22915), False, 'import re\n'), ((31723, 31753), 'os.path.relpath', 'os.path.relpath', (['newname', 'base'], {}), '(newname, base)\n', (31738, 31753), False, 'import os\n')] |
import numpy.random as npr
RANDOM_SEED = 5
class Person:
def __init__(self, name):
self.name = name # 自分の名前
# 到着するまでの時間を指数分布で与える。
expected_1 = 30.0
lam_1 = 1.0 / expected_1 # 期待値=1/lam
self.arrive_time = npr.exponential(1. / lam_1)
# 用を足すのにかかる時間をアーラン分布で与える。
# アーラン分布はガンマ分布の関数で表現できる。
k = 3.0
expected_2 = 5.0
lam_2 = k / expected_2 # 期待値E=k/lamよりlam=k/E
self.relieve_time = npr.gamma(k, 1. / lam_2)
self.status = 'initial' # 自分の状態を表す。Noneは存在しないことを表す。
def __repr__(self): # print(self)をした時の出力を決めておく。
return 'name: %s, status: %s' % (self.name, self.status)
def behave(self): # 1ステップで行う,一連の行動。
if self.status == 'initial':
self.arrive_time -= 1 # カウントダウンする
if self.arrive_time <= 0:
self.status = 'relieving'
elif self.status == 'relieving':
self.relieve_time -= 1 # カウントダウンする
if self.relieve_time <= 0: # もし用を足し終えたら,退出する。
self.status = 'leaving' # 退出中。
print(self)
def simulation():
# シミュレーション準備
person = Person('Yasuda') # 人を設定
time = 0
# シミュレーション開始
while time < 1000:
time += 1
print('time:%d' % time)
person.behave()
if person.status == 'leaving':
break # 退出したのでループを終わる
# シミュレーション終了後まとめ
print('report\nsimulation time: %d' % (time))
if __name__ == '__main__': # このスクリプト自体が実行されたときにのみ以下を実行
npr.seed(RANDOM_SEED)
simulation()
| [
"numpy.random.gamma",
"numpy.random.exponential",
"numpy.random.seed"
] | [((1508, 1529), 'numpy.random.seed', 'npr.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (1516, 1529), True, 'import numpy.random as npr\n'), ((253, 281), 'numpy.random.exponential', 'npr.exponential', (['(1.0 / lam_1)'], {}), '(1.0 / lam_1)\n', (268, 281), True, 'import numpy.random as npr\n'), ((471, 496), 'numpy.random.gamma', 'npr.gamma', (['k', '(1.0 / lam_2)'], {}), '(k, 1.0 / lam_2)\n', (480, 496), True, 'import numpy.random as npr\n')] |
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from mobilenetv2 import backbone
from utils import apply_bilateral_grid
from tensorflow.python.framework import graph_util
def get_tensor_shape(x):
a = x.get_shape().as_list()
b = [tf.shape(x)[i] for i in range(len(a))]
return [aa if type(aa) is int else bb for aa, bb in zip(a, b)]
def coef_mobilenetv2(inputs, width=0.75, luma_bins=8, is_training='False', name='coefficients'):
with tf.variable_scope(name):
with slim.arg_scope([slim.separable_convolution2d, slim.fully_connected],
normalizer_fn=slim.batch_norm, activation_fn=tf.nn.relu6):
with slim.arg_scope([slim.batch_norm],
is_training=is_training, center=True, scale=True):
x = backbone(inputs, width=width, is_training=is_training)
for _ in range(2):
x = slim.convolution2d(x, 48, [3, 3], stride=1)
pool = tf.reduce_mean(x, axis=[1, 2], keepdims=False)
fc1 = slim.fully_connected(pool, 192)
fc2 = slim.fully_connected(fc1, 96)
fc3 = slim.fully_connected(fc2, 48)
feat1 = slim.convolution2d(x, 48, [3, 3], stride=1)
feat2 = slim.convolution2d(feat1, 48, [3, 3], stride=1,
normalizer_fn=None, activation_fn=None)
bs, ch = tf.shape(fc3)[0], tf.shape(fc3)[1]
fc_reshape = tf.reshape(fc3, [bs, 1, 1, ch])
fusion = tf.nn.relu6(feat2 + fc_reshape)
conv7 = slim.convolution2d(fusion, 24*luma_bins, [1, 1], stride=1,
normalizer_fn=None, activation_fn=None)
stack1 = tf.stack(tf.split(conv7, 24, axis=3), axis=4)
stack2 = tf.stack(tf.split(stack1, 4, axis=4), axis=5)
#print(stack2.get_shape().as_list())
# [1, 16, 16, 8, 9, 4]
b, h, w, ch1, ch2, ch3 = get_tensor_shape(stack2)
stack2 = tf.reshape(stack2, [b, h, w, ch1*ch2*ch3])
return stack2
'''
def coefficients(inputs, luma_bins=8, is_training='False', name='coefficients'):
with tf.variable_scope(name):
with slim.arg_scope([slim.separable_convolution2d, slim.fully_connected],
normalizer_fn=slim.batch_norm, activation_fn=tf.nn.relu6):
with slim.arg_scope([slim.batch_norm],
is_training=is_training, center=True, scale=True):
x = slim.convolution2d(inputs, 32, [3, 3], stride=2)
x = slim.convolution2d(x, 64, [3, 3], stride=2)
x = slim.convolution2d(x, 96, [3, 3], stride=2)
x = slim.convolution2d(x, 128, [3, 3], stride=2)
conv4 = x
for _ in range(2):
x = slim.convolution2d(x, 48, [3, 3], stride=2)
pool = tf.reduce_mean(x, axis=[1, 2], keepdims=False)
fc1 = slim.fully_connected(pool, 192)
fc2 = slim.fully_connected(fc1, 96)
fc3 = slim.fully_connected(fc2, 48)
conv5 = slim.convolution2d(conv4, 48, [3, 3], stride=1)
conv6 = slim.convolution2d(conv5, 48, [3, 3], stride=1,
normalizer_fn=None, activation_fn=None)
bs, ch = tf.shape(fc3)[0], tf.shape(fc3)[1]
fc_reshape = tf.reshape(fc3, [bs, 1, 1, ch])
fusion = tf.nn.relu6(conv6 + fc_reshape)
conv7 = slim.convolution2d(fusion, 24*luma_bins, [1, 1], stride=1,
normalizer_fn=None, activation_fn=None)
stack1 = tf.stack(tf.split(conv7, 24, axis=3), axis=4)
stack2 = tf.stack(tf.split(stack1, 4, axis=4), axis=5)
#print(stack2.get_shape().as_list())
# [1, 16, 16, 8, 9, 4]
b, h, w, ch1, ch2, ch3 = get_tensor_shape(stack2)
stack2 = tf.reshape(stack2, [b, h, w, ch1*ch2*ch3])
return stack2
'''
def guide(inputs, is_training=False, name='guide'):
with tf.variable_scope(name):
in_ch = inputs.get_shape().as_list()[-1]
idtity = np.identity(in_ch, dtype=np.float32)\
+ np.random.randn(1).astype(np.float32)*1e-4
ccm = tf.get_variable('ccm', dtype=tf.float32, initializer=idtity)
ccm_bias = tf.get_variable('ccm_bias', shape=[in_ch,], dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
guidemap = tf.matmul(tf.reshape(inputs, [-1, in_ch]), ccm)
guidemap = tf.nn.bias_add(guidemap, ccm_bias, name='ccm_bias_add')
guidemap = tf.reshape(guidemap, tf.shape(inputs))
shifts_ = np.linspace(0, 1, 16, endpoint=False, dtype=np.float32)
shifts_ = shifts_[np.newaxis, np.newaxis, np.newaxis, :]
shifts_ = np.tile(shifts_, (1, 1, in_ch, 1))
guidemap = tf.expand_dims(guidemap, 4)
shifts = tf.get_variable('shifts', dtype=tf.float32, initializer=shifts_)
slopes_ = np.zeros([1, 1, 1, in_ch, 16], dtype=np.float32)
slopes_[:, :, :, :, 0] = 1.0
slopes = tf.get_variable('slopes', dtype=tf.float32, initializer=slopes_)
guidemap = tf.reduce_sum(slopes*tf.nn.relu6(guidemap-shifts), reduction_indices=[4])
guidemap = slim.convolution2d(guidemap, 1, [1, 1], activation_fn=None,
weights_initializer=tf.constant_initializer(1.0/in_ch))
guidemap = tf.clip_by_value(guidemap, 0, 1)
guidemap = tf.squeeze(guidemap, squeeze_dims=[3,])
return guidemap
def inference(hr_input, width=0.75, lr_size=(256, 256), is_training=False, name='inference'):
with tf.variable_scope(name):
lr_input = tf.image.resize_images(hr_input, lr_size,
tf.image.ResizeMethod.BILINEAR)
coeffs = coef_mobilenetv2(lr_input, width=width, is_training=is_training)
#coeffs = coeffient(lr_input, is_training=is_training)
guidemap = guide(hr_input, is_training=is_training)
output = apply_bilateral_grid(coeffs, guidemap, hr_input)
return output
if __name__ == '__main__':
hr_input = tf.placeholder(tf.float32, [1, 1024, 1024, 3])
outputs = inference(hr_input, is_training=False)
print(outputs.get_shape().as_list())
| [
"tensorflow.clip_by_value",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"numpy.tile",
"tensorflow.split",
"utils.apply_bilateral_grid",
"tensorflow.get_variable",
"tensorflow.nn.relu6",
"numpy.random.randn",
"tensorflow.variable_scope",
"numpy.identity",
"tensorflow.placeholder",
... | [((7022, 7068), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1, 1024, 1024, 3]'], {}), '(tf.float32, [1, 1024, 1024, 3])\n', (7036, 7068), True, 'import tensorflow as tf\n'), ((488, 511), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (505, 511), True, 'import tensorflow as tf\n'), ((4803, 4826), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (4820, 4826), True, 'import tensorflow as tf\n'), ((5011, 5071), 'tensorflow.get_variable', 'tf.get_variable', (['"""ccm"""'], {'dtype': 'tf.float32', 'initializer': 'idtity'}), "('ccm', dtype=tf.float32, initializer=idtity)\n", (5026, 5071), True, 'import tensorflow as tf\n'), ((5321, 5376), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['guidemap', 'ccm_bias'], {'name': '"""ccm_bias_add"""'}), "(guidemap, ccm_bias, name='ccm_bias_add')\n", (5335, 5376), True, 'import tensorflow as tf\n'), ((5475, 5530), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(16)'], {'endpoint': '(False)', 'dtype': 'np.float32'}), '(0, 1, 16, endpoint=False, dtype=np.float32)\n', (5486, 5530), True, 'import numpy as np\n'), ((5614, 5648), 'numpy.tile', 'np.tile', (['shifts_', '(1, 1, in_ch, 1)'], {}), '(shifts_, (1, 1, in_ch, 1))\n', (5621, 5648), True, 'import numpy as np\n'), ((5669, 5696), 'tensorflow.expand_dims', 'tf.expand_dims', (['guidemap', '(4)'], {}), '(guidemap, 4)\n', (5683, 5696), True, 'import tensorflow as tf\n'), ((5714, 5778), 'tensorflow.get_variable', 'tf.get_variable', (['"""shifts"""'], {'dtype': 'tf.float32', 'initializer': 'shifts_'}), "('shifts', dtype=tf.float32, initializer=shifts_)\n", (5729, 5778), True, 'import tensorflow as tf\n'), ((5798, 5846), 'numpy.zeros', 'np.zeros', (['[1, 1, 1, in_ch, 16]'], {'dtype': 'np.float32'}), '([1, 1, 1, in_ch, 16], dtype=np.float32)\n', (5806, 5846), True, 'import numpy as np\n'), ((5901, 5965), 'tensorflow.get_variable', 'tf.get_variable', (['"""slopes"""'], {'dtype': 'tf.float32', 'initializer': 'slopes_'}), "('slopes', dtype=tf.float32, initializer=slopes_)\n", (5916, 5965), True, 'import tensorflow as tf\n'), ((6246, 6278), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['guidemap', '(0)', '(1)'], {}), '(guidemap, 0, 1)\n', (6262, 6278), True, 'import tensorflow as tf\n'), ((6298, 6336), 'tensorflow.squeeze', 'tf.squeeze', (['guidemap'], {'squeeze_dims': '[3]'}), '(guidemap, squeeze_dims=[3])\n', (6308, 6336), True, 'import tensorflow as tf\n'), ((6485, 6508), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (6502, 6508), True, 'import tensorflow as tf\n'), ((6538, 6611), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['hr_input', 'lr_size', 'tf.image.ResizeMethod.BILINEAR'], {}), '(hr_input, lr_size, tf.image.ResizeMethod.BILINEAR)\n', (6560, 6611), True, 'import tensorflow as tf\n'), ((6876, 6924), 'utils.apply_bilateral_grid', 'apply_bilateral_grid', (['coeffs', 'guidemap', 'hr_input'], {}), '(coeffs, guidemap, hr_input)\n', (6896, 6924), False, 'from utils import apply_bilateral_grid\n'), ((273, 284), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (281, 284), True, 'import tensorflow as tf\n'), ((535, 665), 'tensorflow.contrib.slim.arg_scope', 'slim.arg_scope', (['[slim.separable_convolution2d, slim.fully_connected]'], {'normalizer_fn': 'slim.batch_norm', 'activation_fn': 'tf.nn.relu6'}), '([slim.separable_convolution2d, slim.fully_connected],\n normalizer_fn=slim.batch_norm, activation_fn=tf.nn.relu6)\n', (549, 665), True, 'import tensorflow.contrib.slim as slim\n'), ((4894, 4930), 'numpy.identity', 'np.identity', (['in_ch'], {'dtype': 'np.float32'}), '(in_ch, dtype=np.float32)\n', (4905, 4930), True, 'import numpy as np\n'), ((5264, 5295), 'tensorflow.reshape', 'tf.reshape', (['inputs', '[-1, in_ch]'], {}), '(inputs, [-1, in_ch])\n', (5274, 5295), True, 'import tensorflow as tf\n'), ((5417, 5433), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (5425, 5433), True, 'import tensorflow as tf\n'), ((708, 795), 'tensorflow.contrib.slim.arg_scope', 'slim.arg_scope', (['[slim.batch_norm]'], {'is_training': 'is_training', 'center': '(True)', 'scale': '(True)'}), '([slim.batch_norm], is_training=is_training, center=True,\n scale=True)\n', (722, 795), True, 'import tensorflow.contrib.slim as slim\n'), ((855, 909), 'mobilenetv2.backbone', 'backbone', (['inputs'], {'width': 'width', 'is_training': 'is_training'}), '(inputs, width=width, is_training=is_training)\n', (863, 909), False, 'from mobilenetv2 import backbone\n'), ((1074, 1120), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': '[1, 2]', 'keepdims': '(False)'}), '(x, axis=[1, 2], keepdims=False)\n', (1088, 1120), True, 'import tensorflow as tf\n'), ((1160, 1191), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['pool', '(192)'], {}), '(pool, 192)\n', (1180, 1191), True, 'import tensorflow.contrib.slim as slim\n'), ((1231, 1260), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['fc1', '(96)'], {}), '(fc1, 96)\n', (1251, 1260), True, 'import tensorflow.contrib.slim as slim\n'), ((1300, 1329), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['fc2', '(48)'], {}), '(fc2, 48)\n', (1320, 1329), True, 'import tensorflow.contrib.slim as slim\n'), ((1371, 1414), 'tensorflow.contrib.slim.convolution2d', 'slim.convolution2d', (['x', '(48)', '[3, 3]'], {'stride': '(1)'}), '(x, 48, [3, 3], stride=1)\n', (1389, 1414), True, 'import tensorflow.contrib.slim as slim\n'), ((1456, 1547), 'tensorflow.contrib.slim.convolution2d', 'slim.convolution2d', (['feat1', '(48)', '[3, 3]'], {'stride': '(1)', 'normalizer_fn': 'None', 'activation_fn': 'None'}), '(feat1, 48, [3, 3], stride=1, normalizer_fn=None,\n activation_fn=None)\n', (1474, 1547), True, 'import tensorflow.contrib.slim as slim\n'), ((1693, 1724), 'tensorflow.reshape', 'tf.reshape', (['fc3', '[bs, 1, 1, ch]'], {}), '(fc3, [bs, 1, 1, ch])\n', (1703, 1724), True, 'import tensorflow as tf\n'), ((1750, 1781), 'tensorflow.nn.relu6', 'tf.nn.relu6', (['(feat2 + fc_reshape)'], {}), '(feat2 + fc_reshape)\n', (1761, 1781), True, 'import tensorflow as tf\n'), ((1823, 1928), 'tensorflow.contrib.slim.convolution2d', 'slim.convolution2d', (['fusion', '(24 * luma_bins)', '[1, 1]'], {'stride': '(1)', 'normalizer_fn': 'None', 'activation_fn': 'None'}), '(fusion, 24 * luma_bins, [1, 1], stride=1, normalizer_fn=\n None, activation_fn=None)\n', (1841, 1928), True, 'import tensorflow.contrib.slim as slim\n'), ((2307, 2353), 'tensorflow.reshape', 'tf.reshape', (['stack2', '[b, h, w, ch1 * ch2 * ch3]'], {}), '(stack2, [b, h, w, ch1 * ch2 * ch3])\n', (2317, 2353), True, 'import tensorflow as tf\n'), ((5204, 5232), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (5227, 5232), True, 'import tensorflow as tf\n'), ((6007, 6037), 'tensorflow.nn.relu6', 'tf.nn.relu6', (['(guidemap - shifts)'], {}), '(guidemap - shifts)\n', (6018, 6037), True, 'import tensorflow as tf\n'), ((6191, 6227), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0 / in_ch)'], {}), '(1.0 / in_ch)\n', (6214, 6227), True, 'import tensorflow as tf\n'), ((986, 1029), 'tensorflow.contrib.slim.convolution2d', 'slim.convolution2d', (['x', '(48)', '[3, 3]'], {'stride': '(1)'}), '(x, 48, [3, 3], stride=1)\n', (1004, 1029), True, 'import tensorflow.contrib.slim as slim\n'), ((2016, 2043), 'tensorflow.split', 'tf.split', (['conv7', '(24)'], {'axis': '(3)'}), '(conv7, 24, axis=3)\n', (2024, 2043), True, 'import tensorflow as tf\n'), ((2087, 2114), 'tensorflow.split', 'tf.split', (['stack1', '(4)'], {'axis': '(4)'}), '(stack1, 4, axis=4)\n', (2095, 2114), True, 'import tensorflow as tf\n'), ((1629, 1642), 'tensorflow.shape', 'tf.shape', (['fc3'], {}), '(fc3)\n', (1637, 1642), True, 'import tensorflow as tf\n'), ((1647, 1660), 'tensorflow.shape', 'tf.shape', (['fc3'], {}), '(fc3)\n', (1655, 1660), True, 'import tensorflow as tf\n'), ((4954, 4972), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (4969, 4972), True, 'import numpy as np\n')] |
import os
import sys
import unittest
import numpy as np
from scipy.io import mmread
from scipy.linalg import det
sys.path.append("../src")
from qr import QR
import utility as ut
from variables import *
class TestQR(unittest.TestCase):
@unittest.skip("Skipping complex random matrix tests because of floating point precision problems.")
def test_wilkinson_shift_random(self):
matrix_sizes = [10]
a = -10
b = 10
for n in matrix_sizes:
m = ut.complex_matrix(n, a, b, np.complex128)
qr_alg = QR(m)
u, r = qr_alg.qr_wilkinson_shift(1e-128, 500)
eigs = qr_alg.extract_eigs(r)
# Check the sum of the eigenvalues against the trace of H.
np.testing.assert_almost_equal(np.sum(eigs), np.trace(m), decimal = 2)
# Check the sum of the squares of the qigenvalues against the trace of H**2.
np.testing.assert_almost_equal(np.sum(eigs ** 2), np.trace(np.linalg.matrix_power(m, 2)), decimal = 2)
# Check the products of the eigenvalues against the determinant of H.
determinant = np.linalg.det(m)
# print(np.prod(eigs))
# print(determinant)
# print(np.prod(eigs) - determinant)
np.testing.assert_almost_equal(np.prod(eigs), determinant, decimal = 0)
def test_wilkinson_shift_market(self):
matrix_filenames = ["gre__115", "jgl011"]
err_msg = "The eigenvalues compute did not pass the test."
for file in matrix_filenames:
mat = mmread(os.path.join(MATRIX_MARKET_PATH, ".".join((file, MATRIX_MARKET_FILE_EXT))))
m = mat.toarray()
qr_alg = QR(m)
u, r = qr_alg.qr_wilkinson_shift(1e-128, 400)
eigs = qr_alg.extract_eigs(r)
# Check the sum of the eigenvalues against the trace of H.
np.testing.assert_almost_equal(np.sum(eigs), np.trace(m), decimal = 2)
# Check the sum of the squares of the qigenvalues against the trace of H**2.
np.testing.assert_almost_equal(np.sum(eigs ** 2), np.trace(np.linalg.matrix_power(m, 2)), decimal = 2)
# Check the products of the eigenvalues against the determinant of H.
determinant = np.linalg.det(m)
np.testing.assert_almost_equal(np.prod(eigs), determinant, decimal = 0)
if __name__ == "__main__":
unittest.main() | [
"sys.path.append",
"unittest.main",
"numpy.trace",
"numpy.sum",
"utility.complex_matrix",
"unittest.skip",
"qr.QR",
"numpy.linalg.matrix_power",
"numpy.linalg.det",
"numpy.prod"
] | [((115, 140), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (130, 140), False, 'import sys\n'), ((242, 351), 'unittest.skip', 'unittest.skip', (['"""Skipping complex random matrix tests because of floating point precision problems."""'], {}), "(\n 'Skipping complex random matrix tests because of floating point precision problems.'\n )\n", (255, 351), False, 'import unittest\n'), ((2135, 2150), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2148, 2150), False, 'import unittest\n'), ((458, 499), 'utility.complex_matrix', 'ut.complex_matrix', (['n', 'a', 'b', 'np.complex128'], {}), '(n, a, b, np.complex128)\n', (475, 499), True, 'import utility as ut\n'), ((512, 517), 'qr.QR', 'QR', (['m'], {}), '(m)\n', (514, 517), False, 'from qr import QR\n'), ((1016, 1032), 'numpy.linalg.det', 'np.linalg.det', (['m'], {}), '(m)\n', (1029, 1032), True, 'import numpy as np\n'), ((1505, 1510), 'qr.QR', 'QR', (['m'], {}), '(m)\n', (1507, 1510), False, 'from qr import QR\n'), ((2009, 2025), 'numpy.linalg.det', 'np.linalg.det', (['m'], {}), '(m)\n', (2022, 2025), True, 'import numpy as np\n'), ((700, 712), 'numpy.sum', 'np.sum', (['eigs'], {}), '(eigs)\n', (706, 712), True, 'import numpy as np\n'), ((714, 725), 'numpy.trace', 'np.trace', (['m'], {}), '(m)\n', (722, 725), True, 'import numpy as np\n'), ((854, 871), 'numpy.sum', 'np.sum', (['(eigs ** 2)'], {}), '(eigs ** 2)\n', (860, 871), True, 'import numpy as np\n'), ((1157, 1170), 'numpy.prod', 'np.prod', (['eigs'], {}), '(eigs)\n', (1164, 1170), True, 'import numpy as np\n'), ((1693, 1705), 'numpy.sum', 'np.sum', (['eigs'], {}), '(eigs)\n', (1699, 1705), True, 'import numpy as np\n'), ((1707, 1718), 'numpy.trace', 'np.trace', (['m'], {}), '(m)\n', (1715, 1718), True, 'import numpy as np\n'), ((1847, 1864), 'numpy.sum', 'np.sum', (['(eigs ** 2)'], {}), '(eigs ** 2)\n', (1853, 1864), True, 'import numpy as np\n'), ((2064, 2077), 'numpy.prod', 'np.prod', (['eigs'], {}), '(eigs)\n', (2071, 2077), True, 'import numpy as np\n'), ((882, 910), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['m', '(2)'], {}), '(m, 2)\n', (904, 910), True, 'import numpy as np\n'), ((1875, 1903), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['m', '(2)'], {}), '(m, 2)\n', (1897, 1903), True, 'import numpy as np\n')] |
#The script uses the neurons selected for the test dataset (datasets_to_test)
#to evaluate the performance of the model 'model'.
#
#The evaluation is based on the correlation coefficient of ground truth and
#prediction, both of them downsampled to 25 Hz, as for the evaluation via
#spikefinder.codeneuro.org.
#
#performance_metrics gives a 10-element vector with the median of the correlation
#coefficient, taken over all neurons of the respective dataset.
import numpy as np
from keras import metrics
alldata3 = {}
for i in range(len(neurons)):
dataset_index = int(datasets[i])
neuron_index = int(neurons[i])
if not dataset_index in alldata3:
alldata3[ dataset_index ] = {}
if not neuron_index in alldata3[ dataset_index ]:
alldata3[ dataset_index ][ neuron_index ] = {"prediction": None, "performance": None}
performance_metrics = np.zeros([10,1])
for ds_idx, neuron_list in datasets_to_test.items():
# for a single dataset, gather all training data
print("process dataset", ds_idx)
Performance = np.zeros([len(neuron_list),1])
for neuron_idx in neuron_list:
d = alldata2[ds_idx][neuron_idx]
calcium_data = d[ calcium_trace ]
spike_gt = d["spikes"]
number_of_points = len(calcium_data)
Xtrain = np.zeros( (number_of_points, windowsize, 1), dtype=np.float32 )
for idx in range(int(windowsize*before_frac), number_of_points - int(windowsize*after_frac)):
start_idx = idx - int(windowsize*before_frac)
end_idx = idx + int(windowsize*after_frac)
Xtrain[idx,:,0] = calcium_data[start_idx:end_idx]
Ypredict = model.predict( Xtrain )
Ypredict[0:int(windowsize*before_frac)] = np.mean(Ypredict)
Ypredict[ (number_of_points - int(windowsize*after_frac))::] = np.mean(Ypredict)
Prediction = np.convolve(Ypredict.ravel(),np.ones(4), 'valid')[::4]
GroundTruth = np.convolve(spike_gt.ravel(),np.ones(4), 'valid')[::4]
Performance[neuron_idx] = np.corrcoef(Prediction,GroundTruth)[0,1]
print("process neuron", neuron_idx,"Performance", int(100*Performance[neuron_idx]))
alldata3[ ds_idx ][ neuron_idx ][ "prediction" ] = Ypredict
alldata3[ ds_idx ][ neuron_idx ][ "performance" ] = Performance[neuron_idx]
print("dataset", ds_idx," processed, Performance", int(100*np.median(Performance)))
performance_metrics[ds_idx-1] = np.median(Performance)
| [
"numpy.median",
"numpy.corrcoef",
"numpy.zeros",
"numpy.ones",
"numpy.mean"
] | [((870, 887), 'numpy.zeros', 'np.zeros', (['[10, 1]'], {}), '([10, 1])\n', (878, 887), True, 'import numpy as np\n'), ((2294, 2316), 'numpy.median', 'np.median', (['Performance'], {}), '(Performance)\n', (2303, 2316), True, 'import numpy as np\n'), ((1248, 1309), 'numpy.zeros', 'np.zeros', (['(number_of_points, windowsize, 1)'], {'dtype': 'np.float32'}), '((number_of_points, windowsize, 1), dtype=np.float32)\n', (1256, 1309), True, 'import numpy as np\n'), ((1639, 1656), 'numpy.mean', 'np.mean', (['Ypredict'], {}), '(Ypredict)\n', (1646, 1656), True, 'import numpy as np\n'), ((1722, 1739), 'numpy.mean', 'np.mean', (['Ypredict'], {}), '(Ypredict)\n', (1729, 1739), True, 'import numpy as np\n'), ((1909, 1945), 'numpy.corrcoef', 'np.corrcoef', (['Prediction', 'GroundTruth'], {}), '(Prediction, GroundTruth)\n', (1920, 1945), True, 'import numpy as np\n'), ((1784, 1794), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1791, 1794), True, 'import numpy as np\n'), ((1855, 1865), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1862, 1865), True, 'import numpy as np\n'), ((2236, 2258), 'numpy.median', 'np.median', (['Performance'], {}), '(Performance)\n', (2245, 2258), True, 'import numpy as np\n')] |
import copy
import torch
import logging
import numpy as np
from sacred import Experiment
from noge.data_loaders import get_datasets, get_test_loader, get_train_generator
from noge.factory import make_env, make_memory
from noge.network import make_network
from noge.neural import DFPRegressionLoss
from noge.agent import Actor, main_loop, loop_ing
from noge.trainers import DFPTrainer, DFPReplay
from noge.policies import LinearSchedule, GraphDFPPolicy
from noge.preprocessors import Preprocessor
from noge.evaluation import Evaluator, eval_ing
from noge.constants import CONFIGS_DIR, EVAL_DIR
from xlog.utils import get_logger
from xlog.mlflow_observer import MlflowObserver
ex = Experiment(name='NOGE_DFP', ingredients=[eval_ing, loop_ing])
ex.add_config(str(CONFIGS_DIR / 'dfp.yaml')) # configuration is in ./configs/dfp.yaml
ex.logger = get_logger(__name__, level=logging.INFO)
ex.observers = [MlflowObserver(tracking_uri=str(EVAL_DIR.absolute()))]
@ex.automain
def train(dataset, test_size, max_episode_steps, input_meas_type, output_meas_type, meas_transform,
target_transform, meas_coeffs, future_steps, temporal_coeffs, sample_goals, goal_space, node_history,
cat_features, feature_range, replay_capacity, min_horizon, epsilon_start, epsilon_end,
exploration_frac, n_train_steps, train_freq, loss, batch_size, lr, n_test_episodes, init_eval,
n_eval_artifacts, test_freq, log_freq, device, seed, data_seed, save_model, _log, _run, _config):
np.set_printoptions(precision=2, suppress=True)
if device.startswith('cuda'):
assert torch.cuda.is_available()
logger = _log
device = torch.device(device)
# convert lists to numpy arrays
assert len(temporal_coeffs) == len(future_steps)
temporal_coeffs = np.asarray(temporal_coeffs, dtype=np.float32)
future_steps = np.asarray(future_steps, dtype=np.int64)
# target coeffs
assert len(meas_coeffs) == len(output_meas_type)
meas_coeffs = np.asarray(meas_coeffs, dtype=np.float32)
logger.info(f"Output meas coeffs ({output_meas_type}): {meas_coeffs}")
# make sure measurement coefficients are valid
if 'L' in output_meas_type:
assert meas_coeffs[0] < 0, "PLOT should be minimized"
elif 'R' in output_meas_type:
assert meas_coeffs[0] > 0, "exploration rate should be maximized"
# load graph data set
train_set, test_set = get_datasets(dataset, seed=data_seed, test_size=test_size)
max_nodes = max(train_set.max_nodes, test_set.max_nodes)
max_edges = 2 * max(train_set.max_edges, test_set.max_edges) # for undirected graphs, consider both directions
test_loader = get_test_loader(test_set, seed=seed, num_samples=n_test_episodes)
train_gen = get_train_generator(train_set, seed=seed)
# create preprocessor/postprocessor for inputs and outputs of neural network
preprocessor = Preprocessor(input_meas_type=input_meas_type,
output_meas_type=output_meas_type,
feature_range=feature_range,
meas_transform=meas_transform,
target_transform=target_transform,
temporal_offsets=future_steps,
max_nodes=max_nodes,
device=device)
# environment configuration
train_env_config = dict(
max_episode_steps=max_episode_steps,
temporal_coeffs=temporal_coeffs,
meas_coeffs=meas_coeffs,
goal_space=goal_space,
sample_goals=sample_goals,
max_nodes=max_nodes,
max_edges=max_edges,
nn_feat='N' in cat_features,
)
# create training and testing environments
train_env = make_env(**train_env_config, data_generator=train_gen, seed=seed)
test_env_config = copy.deepcopy(train_env_config)
test_env_config.update(sample_goals=False, data_generator=None)
test_env = make_env(**test_env_config, seed=seed)
# graph memory configuration
neg_label, pos_label = feature_range
mem_features = dict(cat=cat_features)
graph_mem_config = dict(
max_episode_steps=max_episode_steps,
max_nodes=max_nodes,
max_edges=max_edges,
history=node_history,
memory_type='cat',
features=mem_features,
neg_label=neg_label,
pos_label=pos_label
)
# create acting memory (during training) and evaluation memory
acting_memory = make_memory(online=True, **graph_mem_config)
eval_memory = make_memory(online=True, **graph_mem_config)
# neural net configuration
model_config = dict(
dim_node=eval_memory.dim_node,
dim_meas=preprocessor.dim_input_meas,
dim_goal=len(meas_coeffs) * len(temporal_coeffs),
max_edges=max_edges,
**_config['model']
)
# create neural net
network = make_network(**model_config).to(device)
# evaluation policy
eval_policy = GraphDFPPolicy(network, eval_memory, preprocessor=preprocessor, device=device)
evaluator = Evaluator(test_loader, test_env, eval_policy)
# experience collecting policy
exploration_steps = int(exploration_frac * n_train_steps)
exploration_schedule = LinearSchedule(epsilon_start, epsilon_end, exploration_steps)
acting_policy = GraphDFPPolicy(network,
graph_memory=acting_memory,
preprocessor=preprocessor,
exploration_schedule=exploration_schedule,
device=device)
# replay buffer
replay_buffer = DFPReplay(capacity=replay_capacity,
ob_space=train_env.observation_space,
graph_mem_config=graph_mem_config,
future_steps=future_steps,
min_horizon=min_horizon)
# actor: runs the simulation loop and stores transitions to the replay buffer
actor = Actor(train_env, acting_policy, replay_buffer)
# trainer
optimizer = torch.optim.Adam(network.parameters(), lr=lr)
if loss == 'mse':
criterion = DFPRegressionLoss()
else:
raise ValueError(f"Unsupported loss: {loss}")
# Direct Future Prediction Trainer: samples from replay buffer and updates neural net params
trainer = DFPTrainer(replay_buffer=replay_buffer,
batch_size=batch_size,
network=network,
preprocessor=preprocessor,
criterion=criterion,
optimizer=optimizer,
device=device)
# fill up the replay buffer
network.eval()
logger.info(f"Filling up the replay buffer...")
actor.step(n=replay_capacity, use_tqdm=True)
logger.info(f"Replay buffer filled: [{len(replay_buffer)} / {replay_capacity}]")
# fit the preprocessor with first buffer data
preprocessor.fit(replay_buffer._measurements)
# run the main loop
best_perf = main_loop(actor, trainer, evaluator, network, exploration_schedule,
init_eval, n_eval_artifacts, n_train_steps, train_freq, log_freq, test_freq, save_model)
# clean up
train_env.close()
evaluator.close()
return best_perf
| [
"noge.preprocessors.Preprocessor",
"noge.constants.EVAL_DIR.absolute",
"torch.device",
"noge.data_loaders.get_test_loader",
"numpy.set_printoptions",
"xlog.utils.get_logger",
"noge.data_loaders.get_datasets",
"sacred.Experiment",
"noge.evaluation.Evaluator",
"noge.policies.GraphDFPPolicy",
"copy... | [((682, 743), 'sacred.Experiment', 'Experiment', ([], {'name': '"""NOGE_DFP"""', 'ingredients': '[eval_ing, loop_ing]'}), "(name='NOGE_DFP', ingredients=[eval_ing, loop_ing])\n", (692, 743), False, 'from sacred import Experiment\n'), ((843, 883), 'xlog.utils.get_logger', 'get_logger', (['__name__'], {'level': 'logging.INFO'}), '(__name__, level=logging.INFO)\n', (853, 883), False, 'from xlog.utils import get_logger\n'), ((1497, 1544), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)', 'suppress': '(True)'}), '(precision=2, suppress=True)\n', (1516, 1544), True, 'import numpy as np\n'), ((1653, 1673), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (1665, 1673), False, 'import torch\n'), ((1786, 1831), 'numpy.asarray', 'np.asarray', (['temporal_coeffs'], {'dtype': 'np.float32'}), '(temporal_coeffs, dtype=np.float32)\n', (1796, 1831), True, 'import numpy as np\n'), ((1851, 1891), 'numpy.asarray', 'np.asarray', (['future_steps'], {'dtype': 'np.int64'}), '(future_steps, dtype=np.int64)\n', (1861, 1891), True, 'import numpy as np\n'), ((1984, 2025), 'numpy.asarray', 'np.asarray', (['meas_coeffs'], {'dtype': 'np.float32'}), '(meas_coeffs, dtype=np.float32)\n', (1994, 2025), True, 'import numpy as np\n'), ((2408, 2466), 'noge.data_loaders.get_datasets', 'get_datasets', (['dataset'], {'seed': 'data_seed', 'test_size': 'test_size'}), '(dataset, seed=data_seed, test_size=test_size)\n', (2420, 2466), False, 'from noge.data_loaders import get_datasets, get_test_loader, get_train_generator\n'), ((2663, 2728), 'noge.data_loaders.get_test_loader', 'get_test_loader', (['test_set'], {'seed': 'seed', 'num_samples': 'n_test_episodes'}), '(test_set, seed=seed, num_samples=n_test_episodes)\n', (2678, 2728), False, 'from noge.data_loaders import get_datasets, get_test_loader, get_train_generator\n'), ((2745, 2786), 'noge.data_loaders.get_train_generator', 'get_train_generator', (['train_set'], {'seed': 'seed'}), '(train_set, seed=seed)\n', (2764, 2786), False, 'from noge.data_loaders import get_datasets, get_test_loader, get_train_generator\n'), ((2888, 3145), 'noge.preprocessors.Preprocessor', 'Preprocessor', ([], {'input_meas_type': 'input_meas_type', 'output_meas_type': 'output_meas_type', 'feature_range': 'feature_range', 'meas_transform': 'meas_transform', 'target_transform': 'target_transform', 'temporal_offsets': 'future_steps', 'max_nodes': 'max_nodes', 'device': 'device'}), '(input_meas_type=input_meas_type, output_meas_type=\n output_meas_type, feature_range=feature_range, meas_transform=\n meas_transform, target_transform=target_transform, temporal_offsets=\n future_steps, max_nodes=max_nodes, device=device)\n', (2900, 3145), False, 'from noge.preprocessors import Preprocessor\n'), ((3767, 3832), 'noge.factory.make_env', 'make_env', ([], {'data_generator': 'train_gen', 'seed': 'seed'}), '(**train_env_config, data_generator=train_gen, seed=seed)\n', (3775, 3832), False, 'from noge.factory import make_env, make_memory\n'), ((3855, 3886), 'copy.deepcopy', 'copy.deepcopy', (['train_env_config'], {}), '(train_env_config)\n', (3868, 3886), False, 'import copy\n'), ((3970, 4008), 'noge.factory.make_env', 'make_env', ([], {'seed': 'seed'}), '(**test_env_config, seed=seed)\n', (3978, 4008), False, 'from noge.factory import make_env, make_memory\n'), ((4497, 4541), 'noge.factory.make_memory', 'make_memory', ([], {'online': '(True)'}), '(online=True, **graph_mem_config)\n', (4508, 4541), False, 'from noge.factory import make_env, make_memory\n'), ((4560, 4604), 'noge.factory.make_memory', 'make_memory', ([], {'online': '(True)'}), '(online=True, **graph_mem_config)\n', (4571, 4604), False, 'from noge.factory import make_env, make_memory\n'), ((4989, 5067), 'noge.policies.GraphDFPPolicy', 'GraphDFPPolicy', (['network', 'eval_memory'], {'preprocessor': 'preprocessor', 'device': 'device'}), '(network, eval_memory, preprocessor=preprocessor, device=device)\n', (5003, 5067), False, 'from noge.policies import LinearSchedule, GraphDFPPolicy\n'), ((5084, 5129), 'noge.evaluation.Evaluator', 'Evaluator', (['test_loader', 'test_env', 'eval_policy'], {}), '(test_loader, test_env, eval_policy)\n', (5093, 5129), False, 'from noge.evaluation import Evaluator, eval_ing\n'), ((5255, 5316), 'noge.policies.LinearSchedule', 'LinearSchedule', (['epsilon_start', 'epsilon_end', 'exploration_steps'], {}), '(epsilon_start, epsilon_end, exploration_steps)\n', (5269, 5316), False, 'from noge.policies import LinearSchedule, GraphDFPPolicy\n'), ((5337, 5478), 'noge.policies.GraphDFPPolicy', 'GraphDFPPolicy', (['network'], {'graph_memory': 'acting_memory', 'preprocessor': 'preprocessor', 'exploration_schedule': 'exploration_schedule', 'device': 'device'}), '(network, graph_memory=acting_memory, preprocessor=\n preprocessor, exploration_schedule=exploration_schedule, device=device)\n', (5351, 5478), False, 'from noge.policies import LinearSchedule, GraphDFPPolicy\n'), ((5655, 5823), 'noge.trainers.DFPReplay', 'DFPReplay', ([], {'capacity': 'replay_capacity', 'ob_space': 'train_env.observation_space', 'graph_mem_config': 'graph_mem_config', 'future_steps': 'future_steps', 'min_horizon': 'min_horizon'}), '(capacity=replay_capacity, ob_space=train_env.observation_space,\n graph_mem_config=graph_mem_config, future_steps=future_steps,\n min_horizon=min_horizon)\n', (5664, 5823), False, 'from noge.trainers import DFPTrainer, DFPReplay\n'), ((6031, 6077), 'noge.agent.Actor', 'Actor', (['train_env', 'acting_policy', 'replay_buffer'], {}), '(train_env, acting_policy, replay_buffer)\n', (6036, 6077), False, 'from noge.agent import Actor, main_loop, loop_ing\n'), ((6394, 6567), 'noge.trainers.DFPTrainer', 'DFPTrainer', ([], {'replay_buffer': 'replay_buffer', 'batch_size': 'batch_size', 'network': 'network', 'preprocessor': 'preprocessor', 'criterion': 'criterion', 'optimizer': 'optimizer', 'device': 'device'}), '(replay_buffer=replay_buffer, batch_size=batch_size, network=\n network, preprocessor=preprocessor, criterion=criterion, optimizer=\n optimizer, device=device)\n', (6404, 6567), False, 'from noge.trainers import DFPTrainer, DFPReplay\n'), ((7088, 7252), 'noge.agent.main_loop', 'main_loop', (['actor', 'trainer', 'evaluator', 'network', 'exploration_schedule', 'init_eval', 'n_eval_artifacts', 'n_train_steps', 'train_freq', 'log_freq', 'test_freq', 'save_model'], {}), '(actor, trainer, evaluator, network, exploration_schedule,\n init_eval, n_eval_artifacts, n_train_steps, train_freq, log_freq,\n test_freq, save_model)\n', (7097, 7252), False, 'from noge.agent import Actor, main_loop, loop_ing\n'), ((1595, 1620), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1618, 1620), False, 'import torch\n'), ((6198, 6217), 'noge.neural.DFPRegressionLoss', 'DFPRegressionLoss', ([], {}), '()\n', (6215, 6217), False, 'from noge.neural import DFPRegressionLoss\n'), ((4906, 4934), 'noge.network.make_network', 'make_network', ([], {}), '(**model_config)\n', (4918, 4934), False, 'from noge.network import make_network\n'), ((932, 951), 'noge.constants.EVAL_DIR.absolute', 'EVAL_DIR.absolute', ([], {}), '()\n', (949, 951), False, 'from noge.constants import CONFIGS_DIR, EVAL_DIR\n')] |
import pandas as pd
import pandas_datareader as pdr
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
## Data import and cleaning
# a. Import of enviromental data form the OECD statistics bank API
df = pdr.DataReader("AIR_GHG","oecd")
df.head()
# b. Import of locally placed OECD data on wages
df_wages= pd.read_csv("oecdwages.csv")
print("This is the first 5 rows of the wages dataset")
df_wages.head()
# a. The eviromental data is multi-indexed. Therefore we rearrange the index of the dataset
df.reset_index()
dir(df.index)
df.columns = [' '.join(col).strip() for col in df.columns.values]
# b. In this section, we define a dictionary with all the countries and the countrycodes for the OECD countries
countries = ["Australia","Austria","Belgium","Canada","Chile","Czech Republic","Denmark","Estonia","Finland","France",
"Germany","Greece","Hungary","Iceland","Ireland","Israel","Italy","Japan","Korea","Latvia","Lithuania",
"Luxembourg","Mexico","Netherlands","New Zealand","Norway","Poland","Portugal","Slovak Republic",
"Slovenia","Spain","Sweden","Switzerland","United Kingdom","United States"]
countrycode = ["AUS", "AUT", "BEL", "CAN", "CHL", "CZE", "DNK", "EST", "FIN", "FRA", "DEU","GRC", "HUN", "ISL",
"IRL", "ISR", "ITA", "JPN", "KOR", "LVA", "LTU", "LUX", "MEX", "NLD", "NZL", "NOR", "POL", "PRT",
"SVK", "SVN", "SWE", "ESP", "CHE", "GBR", "USA"]
ccc = dict(zip(countries,countrycode))
# c. We make a tidy dataset, by making a row for each country in each year, and add the variable "Greenhouse gases..."
#to this new data set
# c.i. I initiate an empty list for the dataset and set a counter to 0
x = []
i = 0
#c.ii. Iniate the for-loop that adds the rows to the dataset
for c in countries:
for y in df.index.values :
x.append({"country" : c, "countrycode" : ccc[c],"emissions_GHG" : df[c+" "+"Greenhouse gases Total emissions excluding LULUCF"][i]})
i = i + 1
if i > 6 :
i = 0
# c.iii. Examening the new dataset
df_env = pd.DataFrame(x)
df_env.head()
# a. We define the variables that we do not need in our analysis, and drop them
drop_these= ["INDICATOR","FREQUENCY","MEASURE","SUBJECT", "Flag Codes"]
df_wages = df_wages.drop(drop_these, axis=1, inplace=False)
# b. We change the name of some of the variables
df_wages.rename(columns = {'LOCATION':'countrycode', 'Value' : 'average wage', 'TIME' : 'year'}, inplace=True)
## Mergin data sets
# a. We defne the two datasets, and reset their index
left = df_wages.sort_values("countrycode")
left = left.reset_index()
rigth = df_env.sort_values("countrycode")
rigth = rigth.reset_index()
# b. We merge the two datasets with a left merge
data_all = left.merge(rigth,left_index=True,right_index=True)
data_all.head()
# a. This for-loop test whether or not the data has been merge correctly
for i in data_all.index.values :
# a.i. This statement checks if all the rows have the same countrycode in the two countrycode variables
if data_all["countrycode_x"][i]==data_all["countrycode_y"][i] :
if i == data_all.index.values[-1] :
print("No mistakes in the mergin process")
else :
print("mistake in "+data_all["country"][i])
# a. This section, drops the extra contrycode variable
drop_these= ["index_x","index_y","countrycode_y"]
data_all = data_all.drop(drop_these, axis=1, inplace=False)
data_all.rename(columns ={"countrycode_x":"countrycode"},inplace=True)
# b. Then we sort the dataset by countrycode and year, afterwards we reset the index
data_all = data_all.sort_values(by=["countrycode","year"])
data_all = data_all.reset_index(drop=True)
print(data_all.head())
print(data_all.tail())
## Creating new variables
# a. We use the apply method, and a discret function to make two new variables for percentage change
data_all['d_GHG'] = data_all.groupby('countrycode')['emissions_GHG'].apply(lambda x: x.pct_change())*100
data_all['d_aw'] = data_all.groupby('countrycode')['average wage'].apply(lambda x: x.pct_change())*100
# b. inspect the new data - it should contain a NaN value for each country in the year 2010
data_all.head()
# a. we create an insect both the mean an median of all of the OECD countries change in greenhouse gas emissions
GHG_change = data_all.groupby("year").d_GHG.mean()
print("The mean of change in greenhouse gas emission, in the OECD each",GHG_change)
print("The median change in greenhouse gas emissions in the OECD each", data_all.groupby("year").d_GHG.median())
# a. we create an insect both the mean an median of all of the OECD countries change in average wages
AW_change = data_all.groupby("year").d_aw.mean()
print("The mean of change in average wages, in the OECD each", AW_change)
print("The median of change in average wages, in the OECD each",data_all.groupby("year").d_aw.median())
## New functions
def information(a,b = 0,variable = True):
"""This function takes up to three arguments, the country code, the year (optional) and return the name of the country the average wage and the total emissions of GHG.
The country code is the first column in our data base, three letters which represent the country. If the year is not define it will return for all years. If the variable is define
it will return only this variable
"""
x = data_all[data_all["countrycode"] == a]
#define year and co2
if b != 0 and variable == 'co2':
d= x[data_all["year"] == b]
f = d.loc[:, ["year", "country", "emissions_GHG"]]
return f
#define year and wage
elif b != 0 and variable == 'wage':
d= x[data_all["year"] == b]
g = d.loc[:, ["year", "country", "average wage"]]
return g
#define only co2
elif b == 0 and variable == 'co2':
d= x[data_all["year"] == b]
return x.loc[:, ["year", "country", "emissions_GHG"]]
#define only wage
elif b == 0 and variable == 'wage':
g = x.loc[:, ["year", "country", "average wage"]]
return x.loc[:, ["year", "country", "average wage"]]
#define only the year
elif b != 0 :
h = x[data_all["year"] == b]
return h.loc[:, ["year", "country", "average wage", "emissions_GHG"]]
#nothing define
else :
return x.loc[:, ["year", "country", "average wage", "emissions_GHG"]]
def translate(code = True, countrycode = True):
"""This function take one argument. By default it is the code of the country and return the name of the country. There is the possibility to precise if
the input is a code or country. It it's a country it will return the code.
"""
i = 0
if countrycode == True :
c = str(data_all[data_all["countrycode"]==code]["country"].unique())
c = c.replace("['","")
c = c.replace("']","")
return(c)
elif countrycode == False :
c = str(data_all[data_all["country"]==code]["countrycode"].unique())
c = c.replace("['","")
c = c.replace("']","")
return(c)
else :
return("check you'r spelling")
## Visual analysis
plt.plot(GHG_change,color="g")
plt.plot(AW_change,color="b")
plt.xlabel("Year")
plt.ylabel("Percentage change")
plt.legend(["Greenhouse gas emissions","Average wage"])
plt.axhline(y=0,color="r",linestyle="dashed")
plt.show()
plt.clf
av_w_c= data_all.groupby("countrycode")["average wage"].mean().sort_values()
av_w_c.plot.bar()
plt.xlabel("countrycode")
plt.ylabel("average wage in USD")
plt.show()
plt.clf
av_e_c= data_all.groupby("country")["emissions_GHG"].mean()
plt.ylabel("greenhouse gas emissions (thousands of metric tons)")
plt.xlabel("countrycode")
av_e_c.plot.bar()
plt.show()
plt.clf
#chart plot
united_state_d = information('USA', 2016)
country1 = float(united_state_d.loc[:,"emissions_GHG"])
japan_d = information('JPN', 2016)
country2 = float(japan_d.loc[:,"emissions_GHG"])
germany_d = information("DEU", 2016)
country3 = float(germany_d.loc[:,"emissions_GHG"])
others_d = data_all[(~data_all["countrycode"].isin(["USA","JAP","DEU"]))]
others_d = others_d[others_d['year'] == 2016]["emissions_GHG"]
others = float(np.nansum(others_d))
chart = [country1, country2, country3, others]
labels = 'United State', 'Japan', 'Germany', 'Others'
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
exp = [0.15 , 0, 0, 0]
fig1, chartg = plt.subplots()
chartg.pie(chart, explode=exp , labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=90)
chartg.axis('equal')
plt.show()
plt.clf
def get_con(Country = "Australia"):
print("Country: "+ Country)
print("Mean of Greenhouse gas emissions:" , round(information(translate(Country,countrycode=False))["emissions_GHG"].mean(),2))
print("Mean of average wages:" , round(information(translate(Country,countrycode=False))["average wage"].mean(),2))
fig, ax = plt.subplots()
fig.canvas.draw()
plt.plot(data_all[data_all["country"]==Country]["d_GHG"],color="g")
plt.plot(data_all[data_all["country"]==Country]["d_aw"],color="b")
plt.xlabel("Year")
plt.ylabel("Percentage change")
labels= ["2010","2011","2012","2013","2014","2015","2016"]
ax.set_xticklabels(labels)
plt.legend(["Greenhouse gas emissions","Average wage"])
plt.axhline(y=0,color="r",linestyle="dashed")
plt.show()
return
widgets.interact(get_con,Country=data_all["country"].unique()) | [
"pandas_datareader.DataReader",
"pandas.DataFrame",
"matplotlib.pyplot.axhline",
"numpy.nansum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] | [((233, 266), 'pandas_datareader.DataReader', 'pdr.DataReader', (['"""AIR_GHG"""', '"""oecd"""'], {}), "('AIR_GHG', 'oecd')\n", (247, 266), True, 'import pandas_datareader as pdr\n'), ((336, 364), 'pandas.read_csv', 'pd.read_csv', (['"""oecdwages.csv"""'], {}), "('oecdwages.csv')\n", (347, 364), True, 'import pandas as pd\n'), ((2107, 2122), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (2119, 2122), True, 'import pandas as pd\n'), ((7112, 7143), 'matplotlib.pyplot.plot', 'plt.plot', (['GHG_change'], {'color': '"""g"""'}), "(GHG_change, color='g')\n", (7120, 7143), True, 'import matplotlib.pyplot as plt\n'), ((7143, 7173), 'matplotlib.pyplot.plot', 'plt.plot', (['AW_change'], {'color': '"""b"""'}), "(AW_change, color='b')\n", (7151, 7173), True, 'import matplotlib.pyplot as plt\n'), ((7173, 7191), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (7183, 7191), True, 'import matplotlib.pyplot as plt\n'), ((7192, 7223), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage change"""'], {}), "('Percentage change')\n", (7202, 7223), True, 'import matplotlib.pyplot as plt\n'), ((7224, 7280), 'matplotlib.pyplot.legend', 'plt.legend', (["['Greenhouse gas emissions', 'Average wage']"], {}), "(['Greenhouse gas emissions', 'Average wage'])\n", (7234, 7280), True, 'import matplotlib.pyplot as plt\n'), ((7280, 7327), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'color': '"""r"""', 'linestyle': '"""dashed"""'}), "(y=0, color='r', linestyle='dashed')\n", (7291, 7327), True, 'import matplotlib.pyplot as plt\n'), ((7326, 7336), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7334, 7336), True, 'import matplotlib.pyplot as plt\n'), ((7441, 7466), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""countrycode"""'], {}), "('countrycode')\n", (7451, 7466), True, 'import matplotlib.pyplot as plt\n'), ((7467, 7500), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""average wage in USD"""'], {}), "('average wage in USD')\n", (7477, 7500), True, 'import matplotlib.pyplot as plt\n'), ((7501, 7511), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7509, 7511), True, 'import matplotlib.pyplot as plt\n'), ((7582, 7647), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""greenhouse gas emissions (thousands of metric tons)"""'], {}), "('greenhouse gas emissions (thousands of metric tons)')\n", (7592, 7647), True, 'import matplotlib.pyplot as plt\n'), ((7648, 7673), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""countrycode"""'], {}), "('countrycode')\n", (7658, 7673), True, 'import matplotlib.pyplot as plt\n'), ((7692, 7702), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7700, 7702), True, 'import matplotlib.pyplot as plt\n'), ((8372, 8386), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8384, 8386), True, 'import matplotlib.pyplot as plt\n'), ((8519, 8529), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8527, 8529), True, 'import matplotlib.pyplot as plt\n'), ((8147, 8166), 'numpy.nansum', 'np.nansum', (['others_d'], {}), '(others_d)\n', (8156, 8166), True, 'import numpy as np\n'), ((8877, 8891), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8889, 8891), True, 'import matplotlib.pyplot as plt\n'), ((8918, 8988), 'matplotlib.pyplot.plot', 'plt.plot', (["data_all[data_all['country'] == Country]['d_GHG']"], {'color': '"""g"""'}), "(data_all[data_all['country'] == Country]['d_GHG'], color='g')\n", (8926, 8988), True, 'import matplotlib.pyplot as plt\n'), ((8990, 9059), 'matplotlib.pyplot.plot', 'plt.plot', (["data_all[data_all['country'] == Country]['d_aw']"], {'color': '"""b"""'}), "(data_all[data_all['country'] == Country]['d_aw'], color='b')\n", (8998, 9059), True, 'import matplotlib.pyplot as plt\n'), ((9061, 9079), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (9071, 9079), True, 'import matplotlib.pyplot as plt\n'), ((9084, 9115), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage change"""'], {}), "('Percentage change')\n", (9094, 9115), True, 'import matplotlib.pyplot as plt\n'), ((9215, 9271), 'matplotlib.pyplot.legend', 'plt.legend', (["['Greenhouse gas emissions', 'Average wage']"], {}), "(['Greenhouse gas emissions', 'Average wage'])\n", (9225, 9271), True, 'import matplotlib.pyplot as plt\n'), ((9275, 9322), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'color': '"""r"""', 'linestyle': '"""dashed"""'}), "(y=0, color='r', linestyle='dashed')\n", (9286, 9322), True, 'import matplotlib.pyplot as plt\n'), ((9325, 9335), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9333, 9335), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import pandas as pd
import statsmodels.api as sm
import asym_io
def pred_fold(X, coefs):
return coefs[0] + coefs[1] * X
def linear_fit(X, Y):
X = sm.add_constant(X)
mod = sm.OLS(Y, X)
return mod.fit()
def linear_fit_with_ci(X, Y, ci=0.05):
res = linear_fit(X, Y)
fit_ci = np.array(res.conf_int(ci))
lo = fit_ci[[0,1],[0,1]]
hi = fit_ci[[0,1],[1,0]]
return res.params, lo, hi
def get_folding_kinetics(pfdb, ci=0, X='L', Y='log_kf'):
Xf = np.log10(pfdb[X])
Yf = pfdb[Y]
if ci != 0:
return linear_fit_with_ci(Xf, Yf)
else:
return linear_fit(Xf, Yf).params
def get_folding_translation_rates(df, which='best', acpro=False, reduce_pfdb=True, only2s=False):
if acpro:
pfdb = asym_io.load_acpro()
reduce_pfdb = False
else:
pfdb = asym_io.load_pfdb()
if only2s:
pfdb = pfdb.loc[:88]
if reduce_pfdb:
pfdb = pfdb.loc[pfdb.use]
if which == 'best':
coef = get_folding_kinetics(pfdb)
else:
idx = {'lo':1, 'hi':2}[which]
coef = get_folding_kinetics(pfdb, ci=0.05)[idx]
df['ln_kf'] = df.AA_PDB.apply(lambda x: pred_fold(np.log10(x), coef))
df['T_TRANS'] = np.log10(df.AA_PDB / df.k_trans)
df['REL_RATE'] = - df['ln_kf'] - df['T_TRANS']
return df
def kdb_results(pdb, dom, kdb):
pdb = pdb.copy()
dom = dom.copy()
coef = linear_fit(np.log10(kdb['Protein Length']), kdb['ln kf']).params
pdb['ln_kf'] = pred_fold(np.log10(pdb.AA_PDB), coef)
pdb = get_rel_rate(pdb)
dom['ln_kf'] = pred_fold(np.log10(dom.AA_PDB), coef)
dom = get_rel_rate(dom)
boot_R_and_save(pdb, kdb='_kdb')
boot_enrich_and_save(pdb, dom, kdb='_kdb')
| [
"statsmodels.api.OLS",
"asym_io.load_acpro",
"asym_io.load_pfdb",
"statsmodels.api.add_constant",
"numpy.log10"
] | [((180, 198), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (195, 198), True, 'import statsmodels.api as sm\n'), ((209, 221), 'statsmodels.api.OLS', 'sm.OLS', (['Y', 'X'], {}), '(Y, X)\n', (215, 221), True, 'import statsmodels.api as sm\n'), ((507, 524), 'numpy.log10', 'np.log10', (['pfdb[X]'], {}), '(pfdb[X])\n', (515, 524), True, 'import numpy as np\n'), ((1245, 1277), 'numpy.log10', 'np.log10', (['(df.AA_PDB / df.k_trans)'], {}), '(df.AA_PDB / df.k_trans)\n', (1253, 1277), True, 'import numpy as np\n'), ((780, 800), 'asym_io.load_acpro', 'asym_io.load_acpro', ([], {}), '()\n', (798, 800), False, 'import asym_io\n'), ((854, 873), 'asym_io.load_pfdb', 'asym_io.load_pfdb', ([], {}), '()\n', (871, 873), False, 'import asym_io\n'), ((1524, 1544), 'numpy.log10', 'np.log10', (['pdb.AA_PDB'], {}), '(pdb.AA_PDB)\n', (1532, 1544), True, 'import numpy as np\n'), ((1610, 1630), 'numpy.log10', 'np.log10', (['dom.AA_PDB'], {}), '(dom.AA_PDB)\n', (1618, 1630), True, 'import numpy as np\n'), ((1441, 1472), 'numpy.log10', 'np.log10', (["kdb['Protein Length']"], {}), "(kdb['Protein Length'])\n", (1449, 1472), True, 'import numpy as np\n'), ((1205, 1216), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (1213, 1216), True, 'import numpy as np\n')] |
"""
Advanced Lane Detection Term 1
Self Driving Car NanoDegree
"""
# Pipeline for Advance Lane Detection
# Importing Dependencies
import sys
import cv2
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
from moviepy.editor import VideoFileClip
from IPython.display import HTML
#---------------------------------------
# Importing Modules
from perspective_transform import PerspectiveTransform
from line import Line
from line_detector import LineDetector
# This function computes camera Calibration Parameters
# 1. Calibration Matrix
# 2. Distortion Coefficients
def GetCalibrationParam(image_url):
images = glob.glob(image_url) #store images
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
object_points = [] # 3d Points in real world space
image_points = [] # 2d Points in image plane.
corner = (9, 6) # Chessboard size to 9x6
# Iterate over the stored images
for image in images:
img = mpimg.imread(image)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, corner, None)
if ret:
object_points.append(objp)
image_points.append(corners)
img_size = (img.shape[1], img.shape[0])
# Here, we will use built in cv2 function named as calibrateCamera
# This function finds the camera intrinsic and extrinsic parameters...
# from several views of a calibration pattern
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(object_points, image_points, img_size,None,None)
# Return Calibration matrix and Distortion Matrix
return mtx, dist
# This function gets camera callibration matrix and distortion coefficents from the saved configuration file.
def InputCalibrationFile(path="./cal.npz"):
try:
calibration_param = np.load(path)
return calibration_param['mtx'], calibration_param['dist']
except IOError as e:
print(e) # Throw exception
raise IOError("Please Set Correct Calibration File")
# This function Calculates Direct Threshold
def DirectThreshold(img_ch, sobel_kernel=3, thresh=(0, np.pi/2)):
sobelx = np.absolute(cv2.Sobel(img_ch, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
sobely = np.absolute(cv2.Sobel(img_ch, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
abs_grad_dir = np.absolute(np.arctan(sobely/sobelx))
dir_binary = np.zeros_like(abs_grad_dir)
dir_binary[(abs_grad_dir > thresh[0]) & (abs_grad_dir < thresh[1])] = 1
return dir_binary
def main():
camera_matrix, dist_coeff = GetCalibrationParam('./camera_cal/calibration*.jpg')
np.savez("./cal.npz",mtx=camera_matrix, dist=dist_coeff)
output = './output_video.mp4'
clip1 = VideoFileClip('./project_video.mp4')
# Initiate the LineDetector Object
ld = LineDetector()
# Process Images
white_clip = clip1.fl_image(ld.ProcessImage)
white_clip.write_videofile(output, audio=False)
print("Success, process Finished. Please see %s" % output)
if __name__ == "__main__":
main() | [
"matplotlib.image.imread",
"numpy.zeros_like",
"cv2.findChessboardCorners",
"numpy.load",
"moviepy.editor.VideoFileClip",
"cv2.cvtColor",
"numpy.zeros",
"line_detector.LineDetector",
"cv2.calibrateCamera",
"glob.glob",
"numpy.arctan",
"numpy.savez",
"cv2.Sobel"
] | [((683, 703), 'glob.glob', 'glob.glob', (['image_url'], {}), '(image_url)\n', (692, 703), False, 'import glob\n'), ((732, 764), 'numpy.zeros', 'np.zeros', (['(6 * 9, 3)', 'np.float32'], {}), '((6 * 9, 3), np.float32)\n', (740, 764), True, 'import numpy as np\n'), ((1558, 1628), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['object_points', 'image_points', 'img_size', 'None', 'None'], {}), '(object_points, image_points, img_size, None, None)\n', (1577, 1628), False, 'import cv2\n'), ((2452, 2479), 'numpy.zeros_like', 'np.zeros_like', (['abs_grad_dir'], {}), '(abs_grad_dir)\n', (2465, 2479), True, 'import numpy as np\n'), ((2681, 2738), 'numpy.savez', 'np.savez', (['"""./cal.npz"""'], {'mtx': 'camera_matrix', 'dist': 'dist_coeff'}), "('./cal.npz', mtx=camera_matrix, dist=dist_coeff)\n", (2689, 2738), True, 'import numpy as np\n'), ((2784, 2820), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['"""./project_video.mp4"""'], {}), "('./project_video.mp4')\n", (2797, 2820), False, 'from moviepy.editor import VideoFileClip\n'), ((2869, 2883), 'line_detector.LineDetector', 'LineDetector', ([], {}), '()\n', (2881, 2883), False, 'from line_detector import LineDetector\n'), ((1042, 1061), 'matplotlib.image.imread', 'mpimg.imread', (['image'], {}), '(image)\n', (1054, 1061), True, 'import matplotlib.image as mpimg\n'), ((1077, 1114), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (1089, 1114), False, 'import cv2\n'), ((1138, 1183), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', 'corner', 'None'], {}), '(gray, corner, None)\n', (1163, 1183), False, 'import cv2\n'), ((1896, 1909), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1903, 1909), True, 'import numpy as np\n'), ((2238, 2293), 'cv2.Sobel', 'cv2.Sobel', (['img_ch', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': 'sobel_kernel'}), '(img_ch, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n', (2247, 2293), False, 'import cv2\n'), ((2320, 2375), 'cv2.Sobel', 'cv2.Sobel', (['img_ch', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': 'sobel_kernel'}), '(img_ch, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n', (2329, 2375), False, 'import cv2\n'), ((2408, 2434), 'numpy.arctan', 'np.arctan', (['(sobely / sobelx)'], {}), '(sobely / sobelx)\n', (2417, 2434), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
""" RLBook.Chapter8.TicTacToe
Tic Tac Toe game implementation
"""
from itertools import cycle
import numpy as np
from RLBook.Chapter8.DefaultPlayers import DEFAULT_PLAYERS
class Game:
""" TicTacToe game implementation to be used by Monte Carlo Tree Search
https://en.wikipedia.org/wiki/Tic-tac-toe
"""
DICTIONARY = {(0, 0): 0,
(0, 1): 1,
(0, 2): 2,
(1, 0): 3,
(1, 1): 4,
(1, 2): 5,
(2, 0): 6,
(2, 1): 7,
(2, 2): 8}
INDEX = {1: 0,
0: 1}
players = DEFAULT_PLAYERS
def __init__(self, board_size=3, players=None, using_nn=None, nn_player=0):
"""
:param board_size: 3x3 by Default will be the Board size used
:param players: Optional: Pass
:param using_nn: Flag to indicate if a Neural Network is being utilised
:param nn_player: Identification of which agent is the Neural Network
"""
# Game attributes
self.board_size = board_size
self.state = np.zeros((board_size, board_size), dtype=int)
self.last_play = None
self.sums = np.array([])
# players attributes
if players is not None:
self.players = players
self.players_values = list([p.value for p in self.players])
self.players_gen = cycle(self.players)
self.current_player = next(self.players_gen)
self.history = [(self.state.copy(), None, self.current_player.value, None)]
# Using a Neural Network
self.nn_player = nn_player
self.using_nn = using_nn
def __repr__(self):
return "< TicTacToe > "
def __str__(self):
return "< TicTacToe > "
def legal_plays(self):
""" Takes a sequence of game states representing the full game history
:return: the list of moves tuples that are legal to play for the current player
"""
legal_plays = []
if self.winner is None:
free_spaces = np.isin(self.state, self.players_values, invert=True)
legal_plays = np.argwhere(free_spaces)
# convert numpy array to list of tuples
legal_plays = list(map(tuple, legal_plays))
return legal_plays
@property
def winner(self):
""" Return the winner player. If game is tied, return None
:return: Player or None
"""
for player in self.players:
# one axis is full of this player plays (= win)
if self.board_size * player.value in self.sums:
return player
# no winner found
return None
def show_board(self, state_number=-1, return_string=False):
""" Display the game board
:param state_number: the state to show
:param return_string: whether to return a string or to print it
:return: board representation as a string or nothing
"""
# creates the string representation of the game
lines = []
no_player_display = '.'
for line in self.history[state_number][0]:
elements = []
for element in line:
if element in self.players_values:
for player in self.players:
if element == player.value:
elements.append(player.display)
else:
elements.append(no_player_display)
lines.append('|'.join(elements))
board_representation = '\n'.join(lines)
if return_string:
return board_representation
else:
print(board_representation)
def play(self, move=None, action_prob=1):
""" Play a move
:param move: selected move to play. If None it is chosen randomly from legal plays
:param action_prob: Action probabilities from the Agent
"""
legal_plays = self.legal_plays()
# If input move is provided check that it is legal
if move is not None:
if move in legal_plays:
selected_move = move
else:
raise ValueError('Selected move is illegal')
# Select a move randomly
else:
selected_move = legal_plays[np.random.choice(len(legal_plays), 1)[0]]
# Updates states and players info
self.state[selected_move] = self.current_player.value
# Copy() needed to avoid appending a reference
# noinspection PyTypeChecker
self.history.append((self.state.copy(), self.translate(selected_move), self.current_player.value, action_prob))
self.current_player = next(self.players_gen)
self.last_play = selected_move
# Updates sums that are used to check for winner
self.sums = np.concatenate(
(np.sum(self.state, axis=0), # vertical
np.sum(self.state, axis=1), # horizontal
np.array([np.sum(np.diag(self.state)), # diagonal
np.sum(np.diag(self.state[::-1]))])))
def translate(self, position):
""" Translate tuple to Index
:param position: Tuple of the Position
:return: Index in the Array (0-8)
"""
return self.DICTIONARY.get(position)
def reset(self):
# Game attributes
self.state = np.zeros((self.board_size, self.board_size), dtype=int)
self.history = [self.state.copy()] # copy() needed to avoid appending a reference
self.last_play = None
self.sums = np.array([])
@property
def nn_index(self):
""" Get the Neural Network Players Coin choice and index in the Player
:return: NN Player, Index in List of Players, Player
"""
if not self.using_nn:
return None
else:
return self.players[self.nn_player], self.nn_player
@property
def player(self):
return self.current_player
@property
def competing_player(self):
return self.players[self.INDEX[self.nn_player]]
| [
"numpy.isin",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.argwhere",
"itertools.cycle",
"numpy.diag"
] | [((1196, 1241), 'numpy.zeros', 'np.zeros', (['(board_size, board_size)'], {'dtype': 'int'}), '((board_size, board_size), dtype=int)\n', (1204, 1241), True, 'import numpy as np\n'), ((1292, 1304), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1300, 1304), True, 'import numpy as np\n'), ((1498, 1517), 'itertools.cycle', 'cycle', (['self.players'], {}), '(self.players)\n', (1503, 1517), False, 'from itertools import cycle\n'), ((5590, 5645), 'numpy.zeros', 'np.zeros', (['(self.board_size, self.board_size)'], {'dtype': 'int'}), '((self.board_size, self.board_size), dtype=int)\n', (5598, 5645), True, 'import numpy as np\n'), ((5787, 5799), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5795, 5799), True, 'import numpy as np\n'), ((2173, 2226), 'numpy.isin', 'np.isin', (['self.state', 'self.players_values'], {'invert': '(True)'}), '(self.state, self.players_values, invert=True)\n', (2180, 2226), True, 'import numpy as np\n'), ((2253, 2277), 'numpy.argwhere', 'np.argwhere', (['free_spaces'], {}), '(free_spaces)\n', (2264, 2277), True, 'import numpy as np\n'), ((5058, 5084), 'numpy.sum', 'np.sum', (['self.state'], {'axis': '(0)'}), '(self.state, axis=0)\n', (5064, 5084), True, 'import numpy as np\n'), ((5111, 5137), 'numpy.sum', 'np.sum', (['self.state'], {'axis': '(1)'}), '(self.state, axis=1)\n', (5117, 5137), True, 'import numpy as np\n'), ((5183, 5202), 'numpy.diag', 'np.diag', (['self.state'], {}), '(self.state)\n', (5190, 5202), True, 'import numpy as np\n'), ((5247, 5272), 'numpy.diag', 'np.diag', (['self.state[::-1]'], {}), '(self.state[::-1])\n', (5254, 5272), True, 'import numpy as np\n')] |
from abc import ABC, abstractmethod
import logging
import math
import os
import re
import subprocess
import tempfile
import time
from typing import List
import numpy as np
log = logging.getLogger(__name__)
class CostScaler:
"""
Serves to scale floats and converts them into integers (and vice versa) whilst
maintaining decimal precision
"""
def __init__(self, costValues: List[float], significantDigits: int):
"""
Parameters:
costValues: the sequence of cost values whose precision should be maintained in the int realm
significantDigits: the number of significant digits that shall at least be maintained
"""
exp10 = significantDigits - 1 - min([0] + [np.floor(np.log10(v)) for v in costValues])
self.scalingFactor = math.pow(10, exp10)
def scaledInt(self, originalValue: float) -> int:
"""Returns the scaled value as an integer"""
return int(round(originalValue * self.scalingFactor))
def scaledFloat(self, originalValue: float) -> float:
return originalValue * self.scalingFactor
def originalValue(self, scaledValue: float) -> float:
"""Returns the original unscaled value from a scaled value"""
return scaledValue / self.scalingFactor
def __str__(self):
return "CostScaler[factor=%d]" % self.scalingFactor
class MiniZincProblem(ABC):
def createMiniZincFile(self, f):
"""
Writes MiniZinc code
:param f: an OS-level handle to an open file
"""
os.write(f, bytes(self.getMiniZincCode(), 'utf-8'))
@abstractmethod
def getMiniZincCode(self):
pass
class MiniZincSolver(object):
log = log.getChild(__qualname__)
def __init__(self, name='OSICBC', solverTimeSeconds=None, fznOutputPath=None):
"""
:param name: name of solver compatible with miniZinc
:param solverTimeSeconds: upper time limit for solver in seconds
:param fznOutputPath: flatZinc output path
"""
self.solverName = name
self.solverTimeLimitSecs = solverTimeSeconds
self.fznOutputPath = fznOutputPath
self.lastSolverTimeSecs = None
self.lastSolverOutput = None
self.lastSolverErrOutput = None
def __str(self):
return f"MiniZincSolver[{self.solverName}]"
def solvePath(self, mznPath: str, logInfo=True) -> str:
"""
Solves the MiniZinc problem stored at the given file path
:param mznPath: path to file containing MiniZinc problem code
:param logInfo: whether to log solver output at INFO level rather than DEBUG level
:return: the solver output
"""
self.lastSolverTimeSecs = None
logSolver = self.log.info if logInfo else self.log.debug
args = ["--statistics", "--solver", self.solverName]
if self.solverTimeLimitSecs is not None:
args.append("--time-limit")
args.append(str(self.solverTimeLimitSecs * 1000))
if self.fznOutputPath is not None:
args.append("--output-fzn-to-file")
args.append(self.fznOutputPath)
args.append(mznPath)
command = "minizinc " + " ".join(args)
self.log.info("Running %s" % command)
start_time = time.time()
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = []
while True:
line = proc.stdout.readline().decode("utf-8")
if not line:
break
output.append(line)
logSolver("Solver output: %s" % line.rstrip())
output = "".join(output)
proc.wait()
if proc.returncode != 0:
raise Exception(f"MiniZinc call failed with return code {proc.returncode}; output: {output}")
self.lastSolverTimeSecs = time.time() - start_time
self.lastSolverOutput = output
self.log.info("Solver time: %.1fs" % self.lastSolverTimeSecs)
return output
def solveProblem(self, problem: MiniZincProblem, keepTempFile=False, logInfo=True) -> str:
"""
Solves the given MiniZinc problem
:param problem: the problem to solve
:param keepTempFile: whether to keep the temporary .mzv file
:param logInfo: whether to log solver output at INFO level rather than DEBUG level
:return: the solver output
"""
f, path = tempfile.mkstemp(".mzn")
try:
try:
problem.createMiniZincFile(f)
finally:
os.close(f)
return self.solvePath(path, logInfo=logInfo)
finally:
if not keepTempFile:
os.unlink(path)
def getLastSolverTimeSecs(self):
return self.lastSolverTimeSecs
def extract1DArrayFromOutput(stringIdentifier: str, output: str) -> List:
regexOutput = re.search(r'{stringIdentifier} = array1d\(\d+\.\.\d+, (\[.*?\])'.format(stringIdentifier=stringIdentifier), output)
return eval(regexOutput.group(1))
def extractMultiDimArrayFromOutput(stringIdentifier: str, dim: int, output: str, boolean=False) -> np.array:
dimRegex = r"1..(\d+), "
regex = r'{stringIdentifier} = array{dim}d\({dimsRegex}(\[.*?\])'.format(stringIdentifier=stringIdentifier, dim=dim, dimsRegex=dimRegex*dim)
match = re.search(regex, output)
if match is None:
raise Exception("No match found for regex: %s" % regex)
shape = [int(match.group(i)) for i in range(1, dim+1)]
flatList = match.group(dim+1)
if boolean:
flatList = flatList.replace("false", "0").replace("true", "1")
flatList = eval(flatList)
array1d = np.array(flatList)
arraymd = array1d.reshape(shape)
return arraymd
def array2MiniZinc(a: np.array, elementCast):
shape = a.shape
dims = ", ".join([f"1..{n}" for n in shape])
values = str(list(map(elementCast, a.flatten())))
return f"array{len(shape)}d({dims}, {values})"
| [
"subprocess.Popen",
"os.unlink",
"math.pow",
"tempfile.mkstemp",
"time.time",
"numpy.array",
"os.close",
"numpy.log10",
"re.search",
"logging.getLogger"
] | [((181, 208), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (198, 208), False, 'import logging\n'), ((5356, 5380), 're.search', 're.search', (['regex', 'output'], {}), '(regex, output)\n', (5365, 5380), False, 'import re\n'), ((5691, 5709), 'numpy.array', 'np.array', (['flatList'], {}), '(flatList)\n', (5699, 5709), True, 'import numpy as np\n'), ((808, 827), 'math.pow', 'math.pow', (['(10)', 'exp10'], {}), '(10, exp10)\n', (816, 827), False, 'import math\n'), ((3291, 3302), 'time.time', 'time.time', ([], {}), '()\n', (3300, 3302), False, 'import time\n'), ((3318, 3410), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n', (3334, 3410), False, 'import subprocess\n'), ((4445, 4469), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".mzn"""'], {}), "('.mzn')\n", (4461, 4469), False, 'import tempfile\n'), ((3868, 3879), 'time.time', 'time.time', ([], {}), '()\n', (3877, 3879), False, 'import time\n'), ((4583, 4594), 'os.close', 'os.close', (['f'], {}), '(f)\n', (4591, 4594), False, 'import os\n'), ((4718, 4733), 'os.unlink', 'os.unlink', (['path'], {}), '(path)\n', (4727, 4733), False, 'import os\n'), ((744, 755), 'numpy.log10', 'np.log10', (['v'], {}), '(v)\n', (752, 755), True, 'import numpy as np\n')] |
import pandas as pd, os
import seaborn as sns, matplotlib.pyplot as plt, numpy as np
sns.set_style('darkgrid')
base = '../data/codedtruth'
dfdict = {x[:-4]:pd.read_csv(f'{base}/{x}') for x in os.listdir(base)
if x.endswith('.csv') and 'coded' not in x}
plt.rcParams['figure.figsize'] = 4, 2
plt.rcParams.update({'font.size': 14})
# this might be of use elsewhere, too
scale = ['Essential', 'Worthwhile', 'Unimportant', 'Unwise']
dfdict['truth_ratings']['rating'] = pd.Categorical(dfdict['truth_ratings']['rating'], scale, ordered=True)
def count_sample(df, var):
return df[['lfdn', var]].groupby(var).count()
def plot_sample(df, var, varname):
count_sample(df, var).sort_index(ascending=False).plot.barh(color='k', alpha=0.7, figsize=(10,6), legend=[])
plt.xlabel('Number of Respondents', fontsize = 16)
plt.ylabel(varname, fontsize = 16)
plt.tight_layout()
plt.savefig("../plots/"+varname+".png")
sizes = ['Small (1-4)', 'Medium (5-10)', 'Larger (10-49)', 'Very large (50+)']
ratings_with_respondent_meta = dfdict['truth_ratings'].merge(dfdict['truth_metadata'], how='inner')
ratings_with_respondent_meta['v_14'] = pd.Categorical(ratings_with_respondent_meta['v_14'], sizes, ordered=True)
def plot_df(df, var, varname, absolute=True):
if type(var) == str:
data = df[['lfdn', var, 'rating']
].groupby([var, 'rating']).count(
).fillna(0).astype(int).unstack().sort_index(ascending=False)
else:
data = df[['lfdn'] + var + ['rating']
].groupby(var + ['rating']).count(
).fillna(0).astype(int).unstack().sort_index(ascending=False)
data.columns = data.columns.droplevel()
data.to_excel(r"../data/dataexports/"+varname+".xlsx")
if absolute:
data.plot.barh(stacked=True, cmap='bwr', alpha=0.5, figsize=(10,6))
else:
data.div(data.sum(axis=1).values, axis=0).plot.barh(stacked=True, cmap='bwr', alpha=0.5, figsize=(10,6))
plt.xlim((0,1))
plt.xticks(np.arange(0,1.01,0.1))
# make labels
bars = plt.gca().patches
labels = data.sum(axis=1)
labels = labels.apply(lambda x: "n = " + str(x))
for bar, label in zip(bars, labels):
plt.gca().text(1.05, bar.get_y(), label,
ha='center', va='bottom')
plt.subplots_adjust(left=0.3)
plt.xlabel('Number of Ratings' if absolute else 'Percentage of Ratings', fontsize=16)
plt.ylabel(varname,fontsize=16)
plt.legend(loc='upper center',bbox_to_anchor=(0.5, -0.1), ncol=4)
plt.savefig("../plots/"+varname+".png")
plt.tight_layout()
def split_tags(old_df, level_4=False):
df = pd.DataFrame(old_df, copy=True)
levels = [x.split(':') for x in df.Tag]
df['level_1'] = [x[0] for x in levels]
df['level_2'] = [x[1] for x in levels]
df['level_3'] = [x[2].split('_')[0] for x in levels]
if level_4:
df['level_4'] = [x[2].split('_')[1] if '_' in x[2] else '' for x in levels]
return df
all_levels = [f'level_{x}' for x in range(1,5)]
all_ratings = ['Essential', 'Worthwhile', 'Unimportant', 'Unwise']
papertags_method = split_tags(dfdict['papertags_how_withwhom_final'], level_4=False)
papertags_content = split_tags(dfdict['papertags_what_final'], level_4=True)
def tag_stats(tagdf, all_levels, totals=True, rel=False):
df = tagdf.groupby(all_levels + ['rating']).count()[['Tag']
].dropna().unstack().fillna(0).rename(columns=str).astype(int)
if rel:
rowsums = df.sum(axis=1)
for col in df.columns.values:
df[col] = df[col] / rowsums
if totals:
df['Positive'] = df[('Tag','Essential')] + df[('Tag','Worthwhile')]
df['Negative'] = df[('Tag','Unimportant')] + df[('Tag','Unwise')]
df['Total'] = df['Positive'] + df['Negative']
else:
df['Total'] = df.sum(axis=1)
return df
def plot_tag_ratings(df, name, sort_values=True, rel=False, step=25):
if sort_values:
data= df.sort_values(['Positive', ('Tag', 'Essential')])[df.columns.values[:4]]
else:
data = df[df.columns.values[:4]]
data.columns = data.columns.droplevel()
print(data)
data.to_excel(r"../data/dataexports/"+name+".xlsx")
data.plot.barh(stacked=True, cmap='bwr', figsize=(9,6), alpha=0.5)
plt.legend(loc='upper center',bbox_to_anchor=(0.4, -0.1), ncol=4)
plt.ylabel('')
if not rel:
maxval = df['Total'].max()
upperbound = (divmod(maxval,100)[0]+1)*100
plt.xlim((0,upperbound))
plt.xticks(np.arange(0,upperbound+1,step))
else:
plt.xlim((0,1))
plt.xticks(np.arange(0,1.01,0.1))
plt.tight_layout()
plt.savefig("../plots/"+"tag"+".png")
| [
"pandas.DataFrame",
"seaborn.set_style",
"matplotlib.pyplot.xlim",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.rcParams.update",
"numpy.arange",
"pandas.Categorical",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyp... | [((85, 110), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (98, 110), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((304, 342), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (323, 342), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((479, 549), 'pandas.Categorical', 'pd.Categorical', (["dfdict['truth_ratings']['rating']", 'scale'], {'ordered': '(True)'}), "(dfdict['truth_ratings']['rating'], scale, ordered=True)\n", (493, 549), True, 'import pandas as pd, os\n'), ((1158, 1231), 'pandas.Categorical', 'pd.Categorical', (["ratings_with_respondent_meta['v_14']", 'sizes'], {'ordered': '(True)'}), "(ratings_with_respondent_meta['v_14'], sizes, ordered=True)\n", (1172, 1231), True, 'import pandas as pd, os\n'), ((157, 183), 'pandas.read_csv', 'pd.read_csv', (['f"""{base}/{x}"""'], {}), "(f'{base}/{x}')\n", (168, 183), True, 'import pandas as pd, os\n'), ((781, 829), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Respondents"""'], {'fontsize': '(16)'}), "('Number of Respondents', fontsize=16)\n", (791, 829), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((836, 868), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['varname'], {'fontsize': '(16)'}), '(varname, fontsize=16)\n', (846, 868), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((875, 893), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (891, 893), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((898, 941), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../plots/' + varname + '.png')"], {}), "('../plots/' + varname + '.png')\n", (909, 941), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((2348, 2377), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.3)'}), '(left=0.3)\n', (2367, 2377), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((2382, 2471), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Number of Ratings' if absolute else 'Percentage of Ratings')"], {'fontsize': '(16)'}), "('Number of Ratings' if absolute else 'Percentage of Ratings',\n fontsize=16)\n", (2392, 2471), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((2472, 2504), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['varname'], {'fontsize': '(16)'}), '(varname, fontsize=16)\n', (2482, 2504), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((2508, 2574), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'bbox_to_anchor': '(0.5, -0.1)', 'ncol': '(4)'}), "(loc='upper center', bbox_to_anchor=(0.5, -0.1), ncol=4)\n", (2518, 2574), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((2583, 2626), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../plots/' + varname + '.png')"], {}), "('../plots/' + varname + '.png')\n", (2594, 2626), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((2627, 2645), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2643, 2645), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((2699, 2730), 'pandas.DataFrame', 'pd.DataFrame', (['old_df'], {'copy': '(True)'}), '(old_df, copy=True)\n', (2711, 2730), True, 'import pandas as pd, os\n'), ((4392, 4458), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'bbox_to_anchor': '(0.4, -0.1)', 'ncol': '(4)'}), "(loc='upper center', bbox_to_anchor=(0.4, -0.1), ncol=4)\n", (4402, 4458), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((4462, 4476), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (4472, 4476), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((4743, 4761), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4759, 4761), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((4766, 4807), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../plots/' + 'tag' + '.png')"], {}), "('../plots/' + 'tag' + '.png')\n", (4777, 4807), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((193, 209), 'os.listdir', 'os.listdir', (['base'], {}), '(base)\n', (203, 209), False, 'import pandas as pd, os\n'), ((2004, 2020), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, 1)'], {}), '((0, 1))\n', (2012, 2020), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((4587, 4612), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, upperbound)'], {}), '((0, upperbound))\n', (4595, 4612), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((4681, 4697), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, 1)'], {}), '((0, 1))\n', (4689, 4697), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((2039, 2062), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.1)'], {}), '(0, 1.01, 0.1)\n', (2048, 2062), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((2099, 2108), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2106, 2108), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((4631, 4665), 'numpy.arange', 'np.arange', (['(0)', '(upperbound + 1)', 'step'], {}), '(0, upperbound + 1, step)\n', (4640, 4665), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((4716, 4739), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.1)'], {}), '(0, 1.01, 0.1)\n', (4725, 4739), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n'), ((2265, 2274), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2272, 2274), True, 'import seaborn as sns, matplotlib.pyplot as plt, numpy as np\n')] |
import numpy as np
from scipy.linalg import kron
from IPython.display import Markdown as md
spin_up = np.array([[1, 0]]).T
spin_down = np.array([[0, 1]]).T
# bit[0] = |0>, bit[1] = |1>
bit = [spin_up, spin_down]
def basis(string='00010'):
'''string: the qubits sequence'''
res = np.array([[1]])
# 从最后一位开始往前数,做直积
for idx in string[::-1]:
res = kron(bit[int(idx)], res)
return np.matrix(res)
| [
"numpy.matrix",
"numpy.array"
] | [((109, 127), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (117, 127), True, 'import numpy as np\n'), ((143, 161), 'numpy.array', 'np.array', (['[[0, 1]]'], {}), '([[0, 1]])\n', (151, 161), True, 'import numpy as np\n'), ((302, 317), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (310, 317), True, 'import numpy as np\n'), ((422, 436), 'numpy.matrix', 'np.matrix', (['res'], {}), '(res)\n', (431, 436), True, 'import numpy as np\n')] |
import numpy as np
from sk_dsp_comm import digitalcom as dc
from . caf_verilog_base import CafVerilogBase
from . xcorr import XCorr
from . reference_buffer import ReferenceBuffer
from . capture_buffer import CaptureBuffer
from . freq_shift import FreqShift
from . __version__ import __version__
from jinja2 import Environment, FileSystemLoader, Template
import os
from shutil import copy
from . quantizer import quantize
from . io_helper import write_buffer_values
from . quantizer import bin_num
from . sig_gen import phase_increment
from math import log2, ceil
class CAF(CafVerilogBase):
def __init__(self, reference, received, foas,
ref_i_bits=12, ref_q_bits=0,
rec_i_bits=12, rec_q_bits=0,
fs=625e3, n_bits=8,
pipeline=True, output_dir='.'):
"""
:param reference:
:param received:
:param ref_i_bits:
:param ref_q_bits:
:param output_dir:
"""
self.reference = reference
self.received = received
if not len(self.reference) == (len(self.received) / 2):
raise ValueError("Received signal must be twice the length of the reference signal")
self.foas = foas
self.fs = fs
self.n_bits = n_bits
self.ref_i_bits = ref_i_bits
self.ref_q_bits = ref_q_bits if ref_q_bits else self.ref_i_bits
self.rec_i_bits = rec_i_bits
self.rec_q_bits = rec_q_bits if rec_q_bits else self.rec_i_bits
self.ref_quant = quantize(self.reference, self.ref_i_bits, self.ref_q_bits)
self.rec_quant = quantize(self.received, self.rec_i_bits, self.rec_q_bits)
self.test_value_filename = '%s_input_values.txt' % (self.module_name())
self.test_output_filename = '%s_output_values.txt' % (self.module_name())
self.phase_increment_filename = '%s_phase_increment_values.txt' % self.module_name()
self.neg_shift_filename = '%s_neg_shift_values.txt' % self.module_name()
self.pip = pipeline
if not self.pip:
raise NotImplementedError("A non-pipelined dot-product has not been implemented as of %s" % __version__)
self.output_dir = output_dir
self.submodules = self.gen_submodules()
self.write_module()
def gen_submodules(self):
submodules = dict()
submodules['reference_buffer'] = ReferenceBuffer(self.reference, self.ref_i_bits, self.ref_q_bits,
self.output_dir, 'ref')
submodules['capture_buffer'] = CaptureBuffer(len(self.received), self.rec_i_bits, self.rec_q_bits,
self.output_dir, 'cap')
submodules['freq_shift'] = FreqShift(self.received, self.freq_res(), self.fs, self.n_bits,
i_bits=self.rec_i_bits, q_bits=self.rec_q_bits,
output_dir=self.output_dir)
submodules['x_corr'] = XCorr(self.reference, self.received, self.ref_i_bits, self.ref_q_bits,
self.rec_i_bits, self.rec_q_bits, pipeline=self.pip, output_dir=self.output_dir)
return submodules
def freq_res(self):
freqs = list()
for ff in self.foas:
if ff:
freqs.append(abs(ff))
freqs = np.floor(np.log10(freqs))
freqs = 10 ** freqs
min_res = min(freqs)
return min_res
def template_dict(self, inst_name=None):
t_dict = {**self.submodules['reference_buffer'].template_dict(),
**self.submodules['capture_buffer'].template_dict(),
**self.submodules['freq_shift'].template_dict(),
**self.submodules['x_corr'].template_dict()}
t_dict['%s_foa_len' % self.module_name()] = len(self.foas)
t_dict['%s_foa_len_bits' % self.module_name()] = int(ceil(log2(len(self.foas))))
t_dict['%s_phase_increment_filename' % self.module_name()] = os.path.abspath(os.path.join(self.output_dir,
self.phase_increment_filename))
t_dict['%s_neg_shift_filename' % self.module_name()] = os.path.abspath(os.path.join(self.output_dir,
self.neg_shift_filename))
t_dict['%s_input' % self.module_name()] = os.path.abspath(os.path.join(self.output_dir,
self.test_value_filename))
t_dict['%s_name' % self.module_name()] = inst_name if inst_name else '%s_tb' % self.module_name()
return t_dict
def write_module(self):
super(CAF, self).write_module()
params_path = os.path.abspath(os.path.join(self.tb_module_path(), 'caf_state_params.v'))
self.write_phase_increment_values()
copy(params_path, self.output_dir)
def gen_tb(self):
write_buffer_values(self.output_dir, self.test_value_filename, self.rec_quant, self.rec_i_bits, self.rec_q_bits)
self.write_tb_module()
def write_tb_module(self):
t_dict = self.template_dict()
template_loader = FileSystemLoader(searchpath=self.tb_module_path())
env = Environment(loader=template_loader)
template = env.get_template('%s_tb.v' % self.module_name())
out_tb = template.render(**t_dict)
with open(os.path.join(self.output_dir, '%s_tb.v' % self.module_name()), 'w+') as tb_file:
tb_file.write(out_tb)
def write_phase_increment_values(self):
phase_bits = self.template_dict()['freq_shift_phase_bits']
with open(os.path.join(self.output_dir, self.phase_increment_filename), 'w+') as pi_file:
for freq in self.foas:
incr = phase_increment(abs(freq), phase_bits, self.fs)
pi_file.write(bin_num(incr, phase_bits) + '\n')
with open(os.path.join(self.output_dir, self.neg_shift_filename), 'w+') as nn_file:
for freq in self.foas:
nn_file.write(str(int(freq < 0)) + '\n')
def simple_caf(x, y, foas, fs):
"""
Produce values for a surface plot of the Complex Ambiguity Function.
This function primarily supports testing values produced via sim_helper, so y is expected to be twice the length of
x.
The return is the CAF surface and a time delay range normalized by the sampling frequency.
:param x: Use x as a reference signal.
:param y: Use y as a captured signal.
:param foas: Frequency offsets, provided as a list/iterable object.
:param fs: Sampling frequency
:return: caf_res, dt
"""
nlags = len(x)
ztup = (nlags, len(foas))
caf_res = np.zeros(ztup)
nlen = len(y)
nrange = np.arange(0, nlen)
dt_lags = nlags // 2
dt = np.arange(-dt_lags, dt_lags) / float(fs)
for k, Df in enumerate(foas):
theta = np.exp(1j*2*np.pi*nrange*Df/float(fs))
y_shift = y * theta
rxy, lags = dc.xcorr(x, y_shift, nlags)
caf_res[:, k] = np.abs(rxy)
return caf_res, dt
| [
"numpy.abs",
"sk_dsp_comm.digitalcom.xcorr",
"numpy.zeros",
"jinja2.Environment",
"numpy.arange",
"numpy.log10",
"os.path.join",
"shutil.copy"
] | [((6772, 6786), 'numpy.zeros', 'np.zeros', (['ztup'], {}), '(ztup)\n', (6780, 6786), True, 'import numpy as np\n'), ((6818, 6836), 'numpy.arange', 'np.arange', (['(0)', 'nlen'], {}), '(0, nlen)\n', (6827, 6836), True, 'import numpy as np\n'), ((4932, 4966), 'shutil.copy', 'copy', (['params_path', 'self.output_dir'], {}), '(params_path, self.output_dir)\n', (4936, 4966), False, 'from shutil import copy\n'), ((5303, 5338), 'jinja2.Environment', 'Environment', ([], {'loader': 'template_loader'}), '(loader=template_loader)\n', (5314, 5338), False, 'from jinja2 import Environment, FileSystemLoader, Template\n'), ((6871, 6899), 'numpy.arange', 'np.arange', (['(-dt_lags)', 'dt_lags'], {}), '(-dt_lags, dt_lags)\n', (6880, 6899), True, 'import numpy as np\n'), ((7049, 7076), 'sk_dsp_comm.digitalcom.xcorr', 'dc.xcorr', (['x', 'y_shift', 'nlags'], {}), '(x, y_shift, nlags)\n', (7057, 7076), True, 'from sk_dsp_comm import digitalcom as dc\n'), ((7101, 7112), 'numpy.abs', 'np.abs', (['rxy'], {}), '(rxy)\n', (7107, 7112), True, 'import numpy as np\n'), ((3373, 3388), 'numpy.log10', 'np.log10', (['freqs'], {}), '(freqs)\n', (3381, 3388), True, 'import numpy as np\n'), ((4031, 4091), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.phase_increment_filename'], {}), '(self.output_dir, self.phase_increment_filename)\n', (4043, 4091), False, 'import os\n'), ((4249, 4303), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.neg_shift_filename'], {}), '(self.output_dir, self.neg_shift_filename)\n', (4261, 4303), False, 'import os\n'), ((4450, 4505), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.test_value_filename'], {}), '(self.output_dir, self.test_value_filename)\n', (4462, 4505), False, 'import os\n'), ((5713, 5773), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.phase_increment_filename'], {}), '(self.output_dir, self.phase_increment_filename)\n', (5725, 5773), False, 'import os\n'), ((5981, 6035), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.neg_shift_filename'], {}), '(self.output_dir, self.neg_shift_filename)\n', (5993, 6035), False, 'import os\n')] |
from __future__ import absolute_import, division, unicode_literals
import sys
import os
import torch
import logging
import numpy as np
from torch import nn
np.set_printoptions(precision=4)
np.set_printoptions(suppress=True)
# get models.py from InferSent repo
from models import InferSent
# Set PATHs
PATH_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# PATH_TO_W2V = 'glove/glove.840B.300d.txt' # or crawl-300d-2M.vec for V2
PATH_TO_W2V = 'glove/glove.json' # for small dataset
MODEL_PATH = 'infersent1.pkl'
V = 1 # version of InferSent
assert os.path.isfile(MODEL_PATH) and os.path.isfile(PATH_TO_W2V), \
'Set MODEL and GloVe PATHs'
# import senteval
sys.path.insert(0, PATH_SENTEVAL)
import senteval
from senteval import utils
def prepare(params, samples):
params.infersent.build_vocab([' '.join(s) for s in samples], tokenize=False, is_small=True)
def batcher(params, batch):
sentences = [' '.join(s) for s in batch]
embeddings = params.infersent.encode(sentences, bsize=params.batch_size, tokenize=False)
return embeddings
def convert_str2lst(s1):
return [s.split() for s in s1]
"""
Evaluation of trained model on Transfer Tasks (SentEval)
"""
# define senteval params
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
def apply_logician(s1, s2 , is_list=False, sick_model = False):
# is_list : If you are directly sending sentences then keep is_list = False
# If you are sending list of list of words then keep is_list = True
# sick_model: if True, will use sick model for prediction
# : if False, will use snli model for prediction
# Load InferSent model
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': V}
model = InferSent(params_model)
model.load_state_dict(torch.load(MODEL_PATH))
model.set_w2v_path(PATH_TO_W2V)
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
params_senteval['infersent'] = model.cuda()
if not is_list:
s1 = convert_str2lst(s1)
s2 = convert_str2lst(s2)
samples = s1+s2
params_senteval['batch_size'] = min(128,len(s1))
params_senteval = utils.dotdict(params_senteval)
params_senteval.usepytorch = True
prepare(params_senteval, samples)
emb_s1 = batcher(params_senteval, s1)
emb_s2 = batcher(params_senteval, s2)
if sick_model:
testF = np.c_[ np.abs(emb_s1 - emb_s2),emb_s1 * emb_s2]
cp = torch.load('./saved_sick.pth')
print('[Contradiction Neutral Entailment]')
else:
testF = np.c_[emb_s1, emb_s2, emb_s1 * emb_s2, np.abs(emb_s1 - emb_s2)]
cp = torch.load('./saved_snli_augment_ordered.pth')
print('[ Entailment Neutral Contradiction ]')
inputdim = testF.shape[1]
nclasses = 3
clf = nn.Sequential(nn.Linear(inputdim, nclasses),).cuda()
clf.load_state_dict(cp)
testF = torch.FloatTensor(testF).cuda()
out = clf(testF)
sf = nn.Softmax(1)
probs = sf(out)
return probs
| [
"numpy.set_printoptions",
"numpy.abs",
"models.InferSent",
"torch.load",
"torch.FloatTensor",
"sys.path.insert",
"os.path.isfile",
"senteval.utils.dotdict",
"torch.nn.Softmax",
"torch.nn.Linear"
] | [((158, 190), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)'}), '(precision=4)\n', (177, 190), True, 'import numpy as np\n'), ((191, 225), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (210, 225), True, 'import numpy as np\n'), ((657, 690), 'sys.path.insert', 'sys.path.insert', (['(0)', 'PATH_SENTEVAL'], {}), '(0, PATH_SENTEVAL)\n', (672, 690), False, 'import sys\n'), ((547, 573), 'os.path.isfile', 'os.path.isfile', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (561, 573), False, 'import os\n'), ((578, 605), 'os.path.isfile', 'os.path.isfile', (['PATH_TO_W2V'], {}), '(PATH_TO_W2V)\n', (592, 605), False, 'import os\n'), ((1884, 1907), 'models.InferSent', 'InferSent', (['params_model'], {}), '(params_model)\n', (1893, 1907), False, 'from models import InferSent\n'), ((2398, 2428), 'senteval.utils.dotdict', 'utils.dotdict', (['params_senteval'], {}), '(params_senteval)\n', (2411, 2428), False, 'from senteval import utils\n'), ((3116, 3129), 'torch.nn.Softmax', 'nn.Softmax', (['(1)'], {}), '(1)\n', (3126, 3129), False, 'from torch import nn\n'), ((1931, 1953), 'torch.load', 'torch.load', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (1941, 1953), False, 'import torch\n'), ((2661, 2691), 'torch.load', 'torch.load', (['"""./saved_sick.pth"""'], {}), "('./saved_sick.pth')\n", (2671, 2691), False, 'import torch\n'), ((2828, 2874), 'torch.load', 'torch.load', (['"""./saved_snli_augment_ordered.pth"""'], {}), "('./saved_snli_augment_ordered.pth')\n", (2838, 2874), False, 'import torch\n'), ((3060, 3084), 'torch.FloatTensor', 'torch.FloatTensor', (['testF'], {}), '(testF)\n', (3077, 3084), False, 'import torch\n'), ((2613, 2636), 'numpy.abs', 'np.abs', (['(emb_s1 - emb_s2)'], {}), '(emb_s1 - emb_s2)\n', (2619, 2636), True, 'import numpy as np\n'), ((2796, 2819), 'numpy.abs', 'np.abs', (['(emb_s1 - emb_s2)'], {}), '(emb_s1 - emb_s2)\n', (2802, 2819), True, 'import numpy as np\n'), ((2986, 3015), 'torch.nn.Linear', 'nn.Linear', (['inputdim', 'nclasses'], {}), '(inputdim, nclasses)\n', (2995, 3015), False, 'from torch import nn\n')] |
""" filter_var_in_file.py
load variable in a given netCDF file containing time series on sigma level
(variable shape 1, Ny, Nx, Nt), compute and store filtered and averaged fields
create a netCDF and copy most of the dimensions and dimension variables from the original
will store a subsampled version of the filtered field, along the dimension as the filtering procedure (relevant for low-pass filtering)
adapted from filter_var_in_file.py, NJAL February 2018 """
from __future__ import print_function, division
import numpy as np
from netCDF4 import Dataset
from def_chunk import get_indchk
import scipy.signal as sig
import time, datetime
doverb = True
#### User-defined parameters section
simul = 'luckyt'
varname = 'buoy'
pathbase = '/net/krypton/data0/project/vortex/lahaye/{}_tseries/'.format(simul)
pathread = pathbase+'{0}_tseries_{1}_iz{2}.nc'.format(simul,varname,"{:02d}")
pathstor = "/net/ruchba/local/tmp/2/lahaye/{0}_tseries_lf/{0}_lowf_{1}.nc".format(simul,varname)
transperr = True # data are horizontally transposed (error on time series)
# this is the case on LUCKYT from 2705 to 2945
if varname == 'bvf':
Nz = 79
else:
Nz = 80
Nmax = 2e9 # max size available in ram
nst = 16 # temporal subsampling for storing values
fcut = 1/48. # good practice au pif: fcut = fsubsampout/2 (1/nst/dt/2)
fmod = 'low'
#### Load netCDF to read from, create netCDF to write in, copy stuffs
ncr = Dataset(pathread.format(0),'r')
ncw = Dataset(pathstor,'w')
for dim in ncr.dimensions:
if dim == 'time':
Nt = ncr.dimensions[dim].size
(its,), (nt,) = get_indchk(Nt,1,nst) # center time-subsample
nt = nt[0]
ncw.createDimension(dim,nt)
ncw.createVariable(dim,'i',(dim,))[:] = ncr.variables[dim][its[0]::nst]
elif dim in ["s_w","s_rho"]:
ncw.createDimension(dim,Nz)
ncw.createVariable(dim,'i',(dim,))[:] = np.arange(Nz)
else:
ncw.createDimension(dim,ncr.dimensions[dim].__len__())
ncw.createVariable(dim,'i',(dim,))[:] = ncr.variables[dim][:]
# special case: add vertical
for var in ['scrum_time','lon_rho','lat_rho']:
if var in ncr.variables:
ncwar = ncw.createVariable(var,'f',ncr.variables[var].dimensions)
if 'time' in ncwar.dimensions:
ncwar[:] = ncr.variables[var][its[0]::nst]
else:
ncwar[:] = ncr.variables[var][:]
for attr in ncr.ncattrs():
if attr not in ['generated_on','generating_script']:
ncw.setncattr(attr,ncr.getncattr(attr))
lavar = ncr.variables[varname]
ncw.generated_on = datetime.date.today().isoformat()
ncw.generating_script = 'filter_var_in_file.py'
ncw.from_ncfile = pathread.split()[-1]
dims = lavar.dimensions
ncwar = ncw.createVariable(varname+'_{}f'.format(fmod),'f',dims)
for attr in lavar.ncattrs():
ncwar.setncattr(attr,lavar.getncattr(attr))
ncwar.description = fmod+'-pass filtered at frequency {0:.2e} hour^-1'.format(fcut)
ncwag = ncw.createVariable(varname+'_avg','f',dims[:-1])
for attr in lavar.ncattrs():
ncwag.setncattr(attr,lavar.getncattr(attr))
ncwag.description = 'time averaged over the entire time series'
dt = np.diff(ncr.variables['scrum_time'][:2])[0]/3600. # /h
Ni,Nj = lavar.shape[1:-1]
ncr.close()
if doverb:
print("done with creating netCDF file",pathstor)
##### Define chunking (memory purpose)
nchunks = int(8*Ni*Nj*Nt/Nmax*4) # *4 is ad hoc
(inds,),(schk,) = get_indchk(Ni,nchunks)
if doverb:
print('will use {} chunks'.format(nchunks))
tmes, tmeb = time.clock(), time.time()
bb, aa = sig.butter(4,fcut*2*dt,btype=fmod)
##### Start loop over chunks: filter and store
for iz in range(Nz):
ncr = Dataset(pathread.format(iz),'r')
lavar = ncr.variables[varname][:]
ncr.close()
if doverb: print("done reading")
for ich in range(nchunks):
i1,i2 = inds[ich:ich+2]
prov = lavar[0,i1:i2,...]
print('start filtering',prov.shape)
prov = sig.filtfilt(bb,aa,prov,axis=-1,method='gust')[...,its[0]::nst]
print('done filtering, start storing', prov.shape)
if transperr:
ncwar[iz,:,i1:i2,:] = np.rollaxis(prov,1)
ncwag[iz,:,i1:i2] = np.nanmean(prov,axis=-1).T
else:
ncwar[iz,i1:i2,:,:] = prov
ncwag[iz,i1:i2,:] = np.nanmean(prov,axis=-1)
print('stored lowpass and mean')
if doverb:
print('iz = {0}, iy = {1} -- timing:'.format(iz,ich),time.clock()-tmes,time.time()-tmeb)
tmes, tmeb = time.clock(), time.time()
ncw.close()
| [
"netCDF4.Dataset",
"scipy.signal.filtfilt",
"datetime.date.today",
"time.clock",
"time.time",
"def_chunk.get_indchk",
"numpy.diff",
"numpy.arange",
"numpy.rollaxis",
"scipy.signal.butter",
"numpy.nanmean"
] | [((1496, 1518), 'netCDF4.Dataset', 'Dataset', (['pathstor', '"""w"""'], {}), "(pathstor, 'w')\n", (1503, 1518), False, 'from netCDF4 import Dataset\n'), ((3457, 3480), 'def_chunk.get_indchk', 'get_indchk', (['Ni', 'nchunks'], {}), '(Ni, nchunks)\n', (3467, 3480), False, 'from def_chunk import get_indchk\n'), ((3592, 3632), 'scipy.signal.butter', 'sig.butter', (['(4)', '(fcut * 2 * dt)'], {'btype': 'fmod'}), '(4, fcut * 2 * dt, btype=fmod)\n', (3602, 3632), True, 'import scipy.signal as sig\n'), ((1629, 1651), 'def_chunk.get_indchk', 'get_indchk', (['Nt', '(1)', 'nst'], {}), '(Nt, 1, nst)\n', (1639, 1651), False, 'from def_chunk import get_indchk\n'), ((2606, 2627), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2625, 2627), False, 'import time, datetime\n'), ((3188, 3228), 'numpy.diff', 'np.diff', (["ncr.variables['scrum_time'][:2]"], {}), "(ncr.variables['scrum_time'][:2])\n", (3195, 3228), True, 'import numpy as np\n'), ((3556, 3568), 'time.clock', 'time.clock', ([], {}), '()\n', (3566, 3568), False, 'import time, datetime\n'), ((3570, 3581), 'time.time', 'time.time', ([], {}), '()\n', (3579, 3581), False, 'import time, datetime\n'), ((1931, 1944), 'numpy.arange', 'np.arange', (['Nz'], {}), '(Nz)\n', (1940, 1944), True, 'import numpy as np\n'), ((3988, 4038), 'scipy.signal.filtfilt', 'sig.filtfilt', (['bb', 'aa', 'prov'], {'axis': '(-1)', 'method': '"""gust"""'}), "(bb, aa, prov, axis=-1, method='gust')\n", (4000, 4038), True, 'import scipy.signal as sig\n'), ((4167, 4187), 'numpy.rollaxis', 'np.rollaxis', (['prov', '(1)'], {}), '(prov, 1)\n', (4178, 4187), True, 'import numpy as np\n'), ((4331, 4356), 'numpy.nanmean', 'np.nanmean', (['prov'], {'axis': '(-1)'}), '(prov, axis=-1)\n', (4341, 4356), True, 'import numpy as np\n'), ((4538, 4550), 'time.clock', 'time.clock', ([], {}), '()\n', (4548, 4550), False, 'import time, datetime\n'), ((4552, 4563), 'time.time', 'time.time', ([], {}), '()\n', (4561, 4563), False, 'import time, datetime\n'), ((4219, 4244), 'numpy.nanmean', 'np.nanmean', (['prov'], {'axis': '(-1)'}), '(prov, axis=-1)\n', (4229, 4244), True, 'import numpy as np\n'), ((4481, 4493), 'time.clock', 'time.clock', ([], {}), '()\n', (4491, 4493), False, 'import time, datetime\n'), ((4499, 4510), 'time.time', 'time.time', ([], {}), '()\n', (4508, 4510), False, 'import time, datetime\n')] |
# -*- coding: utf-8 -*-
"""This module contains the class Network."""
import logging
from typing import Dict, List, Optional
import numpy as np
from igraph import Graph, Vertex, VertexSeq
from .gene import Gene
__all__ = [
'Network',
]
logger = logging.getLogger(__name__)
class Network:
"""Encapsulate a PPI network with differential gene expression and disease association annotation."""
def __init__(
self,
ppi_graph: Graph,
max_adj_p: Optional[float] = None,
max_l2fc: Optional[float] = None,
min_l2fc: Optional[float] = None,
) -> None:
"""Initialize the network object.
:param ppi_graph: A graph of protein interactions.
:param max_adj_p: Maximum value for adjusted p-value, used for calculating differential expression
:param max_l2fc: Maximum value for log2 fold change, used for calculating down regulation
:param min_l2fc: Minimum value for log2 fold change, used for calculating up regulation
"""
logger.info("Initializing Network")
self.max_adj_p = max_adj_p or 0.05
self.max_l2fc = max_l2fc or -1.0
self.min_l2fc = min_l2fc or +1.0
self.graph = ppi_graph.copy() # create deep copy of the graph
def set_up_network(
self,
genes: List[Gene],
gene_filter: bool = False,
disease_associations: Optional[Dict] = None,
) -> None:
"""Set up the network.
Filter genes out if requested and add attributes to the vertices.
:param genes: A list of Gene objects.
:param gene_filter: Removes all genes that are not in list <genes> if True.
:param disease_associations: Diseases associated with genes.
"""
if gene_filter:
self.filter_genes([gene.entrez_id for gene in genes])
self._add_vertex_attributes(genes, disease_associations)
self.print_summary("Graph of all genes")
def filter_genes(self, relevant_entrez: List[str]) -> None:
"""Filter out the genes that are not in list relevant_entrez.
:param relevant_entrez: Entrez IDs of genes which are to be kept.
"""
logger.info("In filter_genes()")
irrelevant_genes = self.graph.vs.select(name_notin=relevant_entrez)
self.graph.delete_vertices(irrelevant_genes)
def _add_vertex_attributes(
self,
genes: List[Gene],
disease_associations: Optional[dict] = None,
) -> None:
"""Add attributes to vertices.
:param genes: A list of genes containing attribute information.
"""
self._set_default_vertex_attributes()
self._add_vertex_attributes_by_genes(genes)
# compute up-regulated and down-regulated genes
up_regulated = self.get_upregulated_genes()
down_regulated = self.get_downregulated_genes()
# set the attributes for up-regulated and down-regulated genes
self.graph.vs(up_regulated.indices)["diff_expressed"] = True
self.graph.vs(up_regulated.indices)["up_regulated"] = True
self.graph.vs(down_regulated.indices)["diff_expressed"] = True
self.graph.vs(down_regulated.indices)["down_regulated"] = True
# add disease associations
self._add_disease_associations(disease_associations)
logger.info("Number of all differentially expressed genes is: {}".
format(len(up_regulated) + len(down_regulated)))
def _set_default_vertex_attributes(self) -> None:
"""Assign default values on attributes to all vertices."""
self.graph.vs["l2fc"] = 0
self.graph.vs["padj"] = 0.5
self.graph.vs["symbol"] = self.graph.vs["name"]
self.graph.vs["diff_expressed"] = False
self.graph.vs["up_regulated"] = False
self.graph.vs["down_regulated"] = False
def _add_vertex_attributes_by_genes(self, genes: List[Gene]) -> None:
"""Assign values to attributes on vertices.
:param genes: A list of Gene objects from which values will be extracted.
"""
for gene in genes:
try:
vertex = self.graph.vs.find(name=str(gene.entrez_id)).index
self.graph.vs[vertex]['l2fc'] = gene.log2_fold_change
self.graph.vs[vertex]['symbol'] = gene.symbol
self.graph.vs[vertex]['padj'] = gene.padj
except ValueError:
pass
def _add_disease_associations(self, disease_associations: dict) -> None:
"""Add disease association annotation to the network.
:param disease_associations: Dictionary of disease-gene associations.
"""
if disease_associations is not None:
for target_id, disease_id_list in disease_associations.items():
if target_id in self.graph.vs["name"]:
self.graph.vs.find(name=target_id)["associated_diseases"] = disease_id_list
def get_upregulated_genes(self) -> VertexSeq:
"""Get genes that are up-regulated.
:return: Up-regulated genes.
"""
up_regulated = self.graph.vs.select(self._is_upregulated_gene)
logger.info(f"No. of up-regulated genes after laying on network: {len(up_regulated)}")
return up_regulated
def get_downregulated_genes(self) -> VertexSeq:
"""Get genes that are down-regulated.
:return: Down-regulated genes.
"""
down_regulated = self.graph.vs.select(self._is_downregulated_gene)
logger.info(f"No. of down-regulated genes after laying on network: {len(down_regulated)}")
return down_regulated
def _is_significantly_differentiated(self, v: Vertex) -> bool:
return v.attributes()['padj'] < self.max_adj_p
def _is_upregulated_gene(self, v: Vertex) -> bool:
return self._is_significantly_differentiated(v) and v.attributes()['l2fc'] > self.min_l2fc
def _is_downregulated_gene(self, v: Vertex) -> bool:
return self._is_significantly_differentiated(v) and v.attributes()['l2fc'] < self.max_l2fc
def print_summary(self, heading: str) -> None:
"""Print the summary of a graph.
:param str heading: Title of the graph.
"""
logger.info(heading)
logger.info("Number of nodes: {}".format(len(self.graph.vs)))
logger.info("Number of edges: {}".format(len(self.graph.es)))
def get_differentially_expressed_genes(self, diff_type: str) -> VertexSeq:
"""Get the differentially expressed genes based on diff_type.
:param str diff_type: Differential expression type chosen by the user; all, down, or up.
:return list: A list of differentially expressed genes.
"""
if diff_type == "up":
diff_expr = self.graph.vs.select(up_regulated_eq=True)
elif diff_type == "down":
diff_expr = self.graph.vs.select(down_regulated_eq=True)
else:
diff_expr = self.graph.vs.select(diff_expressed_eq=True)
return diff_expr
def write_adj_list(self, path: str) -> None:
"""Write the network as an adjacency list to a file.
:param path: File path to write the adjacency list.
"""
adj_list = self.get_adjlist()
with open(path, mode="w") as file:
for i, line in enumerate(adj_list):
print(i, *line, file=file)
def get_adjlist(self) -> List[List[int]]:
"""Get the adjacency list of the network.
:return: Adjacency list of the network.
"""
return self.graph.get_adjlist()
def get_attribute_from_indices(self, indices: list, attribute_name: str):
"""Get attribute values for the requested indices.
:param indices: Indices of vertices for which the attribute values are requested.
:param attribute_name: The name of the attribute.
:return: A list of attribute values for the requested indices.
"""
return list(np.array(self.graph.vs[attribute_name])[indices])
| [
"numpy.array",
"logging.getLogger"
] | [((255, 282), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (272, 282), False, 'import logging\n'), ((7974, 8013), 'numpy.array', 'np.array', (['self.graph.vs[attribute_name]'], {}), '(self.graph.vs[attribute_name])\n', (7982, 8013), True, 'import numpy as np\n')] |
import numpy as np
from Bio import Phylo
import networkx as nx
def tree_to_newick_fun_test(tree_set):
tree_set_type = [nx.bfs_tree(G, 0) for G in tree_set]
Phylo.write(tree_set_type, "tree_set.xml", "phyloxml")
Phylo.convert("tree_set.xml", "phyloxml", "tree_set_newick.nhx", "newick")
return "tree_set_newick.nhx"
def sub_tree_to_newick(G, root=None):
subgs = []
for child in G[root]:
try:
length = np.round(G.edges[(root, child)]["length"], 3)
except KeyError:
length = np.round(G.edges[(root, child)]["lenght"], 3)
if len(G[child]) > 0:
subgs.append(sub_tree_to_newick(G, root=child) + f":{length}")
else:
subgs.append(str(child) + f":{length}")
return "(" + ','.join(subgs) + ")"
def tree_to_newick_fun(tree_set, net_num, network_gen="LGT", train_data=True, partial=False, tree_size=None,
forest_size=None, unique_set=True):
if tree_size is None:
tree_size = ""
else:
tree_size += "_"
if train_data:
if partial:
file_name = f"Train/TreeSetsNewick/tree_set_newick_{tree_size}part_{net_num}_{network_gen}.txt"
else:
file_name = f"Train/TreeSetsNewick/tree_set_newick_{tree_size}{net_num}_{network_gen}.txt"
else:
if partial:
file_name = f"Test/TreeSetsNewick/tree_set_newick_{tree_size}T{forest_size}_part_{net_num}_{network_gen}.txt"
else:
file_name = f"Test/TreeSetsNewick/tree_set_newick_{tree_size}T{forest_size}_{net_num}_{network_gen}.txt"
if unique_set:
file = open("LGT/UniqueResults/" + file_name, "w+")
else:
file = open("LGT/NonUniqueResults/" + file_name, "w+")
for tree in tree_set.values():
tree_line = sub_tree_to_newick(tree, 0)
file.write(tree_line)
file.write("\n")
file.close()
| [
"Bio.Phylo.convert",
"Bio.Phylo.write",
"numpy.round",
"networkx.bfs_tree"
] | [((166, 220), 'Bio.Phylo.write', 'Phylo.write', (['tree_set_type', '"""tree_set.xml"""', '"""phyloxml"""'], {}), "(tree_set_type, 'tree_set.xml', 'phyloxml')\n", (177, 220), False, 'from Bio import Phylo\n'), ((225, 299), 'Bio.Phylo.convert', 'Phylo.convert', (['"""tree_set.xml"""', '"""phyloxml"""', '"""tree_set_newick.nhx"""', '"""newick"""'], {}), "('tree_set.xml', 'phyloxml', 'tree_set_newick.nhx', 'newick')\n", (238, 299), False, 'from Bio import Phylo\n'), ((125, 142), 'networkx.bfs_tree', 'nx.bfs_tree', (['G', '(0)'], {}), '(G, 0)\n', (136, 142), True, 'import networkx as nx\n'), ((448, 491), 'numpy.round', 'np.round', (["G.edges[root, child]['length']", '(3)'], {}), "(G.edges[root, child]['length'], 3)\n", (456, 491), True, 'import numpy as np\n'), ((540, 583), 'numpy.round', 'np.round', (["G.edges[root, child]['lenght']", '(3)'], {}), "(G.edges[root, child]['lenght'], 3)\n", (548, 583), True, 'import numpy as np\n')] |
#!/usr/bin/python3
"""
@Author: <NAME>
@Site: https://github.com/liushaoweihua
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import numpy as np
import tensorflow as tf
from pyclue.tf1.open_sources.configs import pretrained_names, pretrained_types
from pyclue.tf1.open_sources.download import get_pretrained_model
from pyclue.tf1.tasks.sentence_pair.siamese.inputs import Processor
from pyclue.tf1.tokenizers.bert_tokenizer import FullTokenizer # Add more tokenizers
class Predictor(object):
def __init__(self, model_file):
self.model_file = os.path.abspath(model_file)
# label
label_map_reverse_file = os.path.join(
self.model_file, 'label_map_reverse.json')
with tf.gfile.GFile(label_map_reverse_file, 'r') as f:
self.label_map_reverse = json.load(f)
self.labels = [item[1] for item in sorted(
self.label_map_reverse.items(), key=lambda i: i[0])]
# model
model_config_file = os.path.join(
self.model_file, 'model_config.json')
with tf.gfile.GFile(model_config_file, 'r') as f:
self.model_config = json.load(f)
self.model_name = self.model_config.get('model_name') or None
self.model_type = self.model_config.get('model_type') or None
self.vocab_file = self.model_config.get('vocab_file') or None
self.max_seq_len = self.model_config.get('max_seq_len') or 512
if not self.model_name:
assert all([self.vocab_file, self.model_type]), \
'If not given model_name provided by open_sources, ' \
'you should specify the model_type and vocab_file.'
else:
assert self.model_name in pretrained_names, \
'%s not provided by open_sources' % self.model_name
self.model_type = pretrained_types.get(self.model_name).split('_')[0]
pretrained_dir = get_pretrained_model(pretrained_name=self.model_name)
self.vocab_file = os.path.join(pretrained_dir, 'vocab.txt')
# tokenizer
if self.model_type == 'bert':
self.tokenizer = FullTokenizer(self.vocab_file)
elif self.model_type == 'albert':
self.tokenizer = FullTokenizer(self.vocab_file)
else:
raise ValueError('model_type %s unknown.' % self.model_type)
# processor
self._load_processor()
# build graph
self._build()
def _load_processor(self):
self.processor = Processor(
max_seq_len=self.max_seq_len, tokenizer=self.tokenizer, labels=self.labels)
def _build(self):
self.graph = tf.Graph()
self.sess = tf.Session()
self.meta_graph_def = tf.saved_model.loader.load(
self.sess, tags=['serve'], export_dir=self.model_file)
self.signature = self.meta_graph_def.signature_def
self.input_ids_1 = self.signature['serving_default'].inputs['input_ids_1'].name
self.input_mask_1 = self.signature['serving_default'].inputs['input_mask_1'].name
self.segment_ids_1 = self.signature['serving_default'].inputs['segment_ids_1'].name
self.input_ids_2 = self.signature['serving_default'].inputs['input_ids_2'].name
self.input_mask_2 = self.signature['serving_default'].inputs['input_mask_2'].name
self.segment_ids_2 = self.signature['serving_default'].inputs['segment_ids_2'].name
self.label_ids = self.signature['serving_default'].inputs['label_ids'].name
self.text_a_embedding = self.signature['serving_default'].outputs['text_a_embedding'].name
self.text_b_embedding = self.signature['serving_default'].outputs['text_b_embedding'].name
self.cos_sims = self.signature['serving_default'].outputs['cos_sims'].name
self.predictions = self.signature['serving_default'].outputs['predictions'].name
self.probabilities = self.signature['serving_default'].outputs['probabilities'].name
def _predict_for_single_example(self, feature):
cos_sim, prediction, probability = self.sess.run(
[self.cos_sims, self.predictions, self.probabilities],
feed_dict={
self.input_ids_1: [feature.input_ids_1],
self.input_mask_1: [feature.input_mask_1],
self.segment_ids_1: [feature.segment_ids_1],
self.input_ids_2: [feature.input_ids_2],
self.input_mask_2: [feature.input_mask_2],
self.segment_ids_2: [feature.segment_ids_2],
self.label_ids: [feature.label_id]})
return cos_sim, prediction, probability
def predict(self, texts):
assert isinstance(texts, list), 'texts format should be `list`'
assert all([isinstance(item, list) for item in texts]), 'texts item format should be `list`'
new_texts = []
for item in texts:
if len(item) == 2 or len(item) == 3:
new_texts.append([self.labels[0], item[-2], item[-1]])
else:
raise ValueError('text item should contain 2 or 3 elements')
assert all([len(item) == 3 for item in new_texts]), \
'texts item should contain 3 elements'
features = self.processor.get_features_for_inputs(new_texts)
results = []
for text, feature in zip(new_texts, features):
cos_sim, prediction, probability = self._predict_for_single_example(feature)
results.append({
'text_a': text[1],
'text_b': text[2],
'cos_sim': np.squeeze(cos_sim).tolist() / 100,
'prediction': self.label_map_reverse[str(np.squeeze(prediction).tolist())],
'probability': np.squeeze(probability).tolist()})
return results
def predict_from_file(self, input_file):
texts = self.processor.read_file(input_file)
texts = np.squeeze(texts).tolist()
return self.predict(texts)
def quality_inspection(self, input_file, save_path):
texts = self.processor.read_file(input_file)
if np.array(texts).ndim == 1:
texts = [texts]
texts = [item for item in texts if len(item) == 3]
features = self.processor.get_features_for_inputs(texts)
cos_sims, predictions, probabilities = [], [], []
for feature in features:
cos_sim, prediction, probability = self._predict_for_single_example(feature)
cos_sims.append(cos_sim)
predictions.append(prediction)
probabilities.append(probability.tolist())
if not tf.gfile.Exists(save_path):
tf.gfile.MakeDirs(save_path)
with tf.gfile.GFile(os.path.join(save_path, input_file.split('/')[-1]), 'w') as writer:
for text, prediction, probability in zip(texts, predictions, probabilities):
prediction = self.label_map_reverse[str(np.squeeze(prediction).tolist())]
if text[0] != prediction:
writer.write(
'text_a = %s, text_b = %s, '
'true = %s, pred = %s, '
'probability = %s, cos_sim = %s\n'
% (text[1], text[2], text[0], prediction, probability, cos_sim / 100))
def close(self):
self.sess.close()
def restart(self):
self._build()
| [
"pyclue.tf1.open_sources.download.get_pretrained_model",
"os.path.abspath",
"json.load",
"tensorflow.gfile.Exists",
"pyclue.tf1.tokenizers.bert_tokenizer.FullTokenizer",
"tensorflow.gfile.MakeDirs",
"tensorflow.Session",
"pyclue.tf1.tasks.sentence_pair.siamese.inputs.Processor",
"tensorflow.gfile.GF... | [((651, 678), 'os.path.abspath', 'os.path.abspath', (['model_file'], {}), '(model_file)\n', (666, 678), False, 'import os\n'), ((729, 784), 'os.path.join', 'os.path.join', (['self.model_file', '"""label_map_reverse.json"""'], {}), "(self.model_file, 'label_map_reverse.json')\n", (741, 784), False, 'import os\n'), ((1072, 1122), 'os.path.join', 'os.path.join', (['self.model_file', '"""model_config.json"""'], {}), "(self.model_file, 'model_config.json')\n", (1084, 1122), False, 'import os\n'), ((2592, 2682), 'pyclue.tf1.tasks.sentence_pair.siamese.inputs.Processor', 'Processor', ([], {'max_seq_len': 'self.max_seq_len', 'tokenizer': 'self.tokenizer', 'labels': 'self.labels'}), '(max_seq_len=self.max_seq_len, tokenizer=self.tokenizer, labels=\n self.labels)\n', (2601, 2682), False, 'from pyclue.tf1.tasks.sentence_pair.siamese.inputs import Processor\n'), ((2735, 2745), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2743, 2745), True, 'import tensorflow as tf\n'), ((2766, 2778), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2776, 2778), True, 'import tensorflow as tf\n'), ((2809, 2895), 'tensorflow.saved_model.loader.load', 'tf.saved_model.loader.load', (['self.sess'], {'tags': "['serve']", 'export_dir': 'self.model_file'}), "(self.sess, tags=['serve'], export_dir=self.\n model_file)\n", (2835, 2895), True, 'import tensorflow as tf\n'), ((811, 854), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['label_map_reverse_file', '"""r"""'], {}), "(label_map_reverse_file, 'r')\n", (825, 854), True, 'import tensorflow as tf\n'), ((898, 910), 'json.load', 'json.load', (['f'], {}), '(f)\n', (907, 910), False, 'import json\n'), ((1149, 1187), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['model_config_file', '"""r"""'], {}), "(model_config_file, 'r')\n", (1163, 1187), True, 'import tensorflow as tf\n'), ((1226, 1238), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1235, 1238), False, 'import json\n'), ((2004, 2057), 'pyclue.tf1.open_sources.download.get_pretrained_model', 'get_pretrained_model', ([], {'pretrained_name': 'self.model_name'}), '(pretrained_name=self.model_name)\n', (2024, 2057), False, 'from pyclue.tf1.open_sources.download import get_pretrained_model\n'), ((2088, 2129), 'os.path.join', 'os.path.join', (['pretrained_dir', '"""vocab.txt"""'], {}), "(pretrained_dir, 'vocab.txt')\n", (2100, 2129), False, 'import os\n'), ((2218, 2248), 'pyclue.tf1.tokenizers.bert_tokenizer.FullTokenizer', 'FullTokenizer', (['self.vocab_file'], {}), '(self.vocab_file)\n', (2231, 2248), False, 'from pyclue.tf1.tokenizers.bert_tokenizer import FullTokenizer\n'), ((6677, 6703), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['save_path'], {}), '(save_path)\n', (6692, 6703), True, 'import tensorflow as tf\n'), ((6717, 6745), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['save_path'], {}), '(save_path)\n', (6734, 6745), True, 'import tensorflow as tf\n'), ((2320, 2350), 'pyclue.tf1.tokenizers.bert_tokenizer.FullTokenizer', 'FullTokenizer', (['self.vocab_file'], {}), '(self.vocab_file)\n', (2333, 2350), False, 'from pyclue.tf1.tokenizers.bert_tokenizer import FullTokenizer\n'), ((5982, 5999), 'numpy.squeeze', 'np.squeeze', (['texts'], {}), '(texts)\n', (5992, 5999), True, 'import numpy as np\n'), ((6166, 6181), 'numpy.array', 'np.array', (['texts'], {}), '(texts)\n', (6174, 6181), True, 'import numpy as np\n'), ((1923, 1960), 'pyclue.tf1.open_sources.configs.pretrained_types.get', 'pretrained_types.get', (['self.model_name'], {}), '(self.model_name)\n', (1943, 1960), False, 'from pyclue.tf1.open_sources.configs import pretrained_names, pretrained_types\n'), ((5809, 5832), 'numpy.squeeze', 'np.squeeze', (['probability'], {}), '(probability)\n', (5819, 5832), True, 'import numpy as np\n'), ((5650, 5669), 'numpy.squeeze', 'np.squeeze', (['cos_sim'], {}), '(cos_sim)\n', (5660, 5669), True, 'import numpy as np\n'), ((6987, 7009), 'numpy.squeeze', 'np.squeeze', (['prediction'], {}), '(prediction)\n', (6997, 7009), True, 'import numpy as np\n'), ((5743, 5765), 'numpy.squeeze', 'np.squeeze', (['prediction'], {}), '(prediction)\n', (5753, 5765), True, 'import numpy as np\n')] |
''''''
'''
@Author: <NAME> (<EMAIL>)
@Date: 2020-03-01
@Copyright: Copyright (C) <NAME> 2020. All rights reserved. Please refer to the license file.
@LastEditTime: 2020-05-27
@LastEditors: <NAME>
@Description: This file contains functions related to GRIC computation
'''
import numpy as np
def compute_fundamental_residual(F, kp1, kp2):
"""
Compute fundamental matrix residual
Args:
F (array, [3x3]): Fundamental matrix (from view-1 to view-2)
kp1 (array, [Nx2]): keypoint 1
kp2 (array, [Nx2]): keypoint 2
Returns:
res (array, [N]): residual
"""
# get homogeneous keypoints (3xN array)
m0 = np.ones((3, kp1.shape[0]))
m0[:2] = np.transpose(kp1, (1,0))
m1 = np.ones((3, kp2.shape[0]))
m1[:2] = np.transpose(kp2, (1,0))
Fm0 = F @ m0 #3xN
Ftm1 = F.T @ m1 #3xN
m1Fm0 = (np.transpose(Fm0, (1,0)) @ m1).diagonal()
res = m1Fm0**2 / (np.sum(Fm0[:2]**2, axis=0) + np.sum(Ftm1[:2]**2, axis=0))
return res
def compute_homography_residual(H_in, kp1, kp2):
"""
Compute homography matrix residual
Args:
H (array, [3x3]): homography matrix (Transformation from view-1 to view-2)
kp1 (array, [Nx2]): keypoint 1
kp2 (array, [Nx2]): keypoint 2
Returns:
res (array, [N]): residual
"""
n = kp1.shape[0]
H = H_in.flatten()
# get homogeneous keypoints (3xN array)
m0 = np.ones((3, kp1.shape[0]))
m0[:2] = np.transpose(kp1, (1,0))
m1 = np.ones((3, kp2.shape[0]))
m1[:2] = np.transpose(kp2, (1,0))
G0 = np.zeros((3, n))
G1 = np.zeros((3, n))
G0[0]= H[0] - m1[0] * H[6]
G0[1]= H[1] - m1[0] * H[7]
G0[2]=-m0[0] * H[6] - m0[1] * H[7] - H[8]
G1[0]= H[3] - m1[1] * H[6]
G1[1]= H[4] - m1[1] * H[7]
G1[2]=-m0[0] * H[6] - m0[1] * H[7] - H[8]
magG0=np.sqrt(G0[0]*G0[0] + G0[1]*G0[1] + G0[2]*G0[2])
magG1=np.sqrt(G1[0]*G1[0] + G1[1]*G1[1] + G1[2]*G1[2])
magG0G1=G0[0]*G1[0] + G0[1]*G1[1]
alpha=np.arccos(magG0G1 /(magG0*magG1))
alg = np.zeros((2, n))
alg[0]= m0[0]*H[0] + m0[1]*H[1] + H[2] - \
m1[0]*(m0[0]*H[6] + m0[1]*H[7] + H[8])
alg[1]= m0[0]*H[3] + m0[1]*H[4] + H[5] - \
m1[1]*(m0[0]*H[6] + m0[1]*H[7] + H[8])
D1=alg[0]/magG0
D2=alg[1]/magG1
res = (D1*D1 + D2*D2 - 2.0*D1*D2*np.cos(alpha))/np.sin(alpha)
return res
def calc_GRIC(res, sigma, n, model):
"""Calculate GRIC
Args:
res (array, [N]): residual
sigma (float): assumed variance of the error
n (int): number of residuals
model (str): model type
- FMat
- EMat
- HMat
"""
R = 4
sigmasq1 = 1./ sigma**2
K = {
"FMat": 7,
"EMat": 5,
"HMat": 8,
}[model]
D = {
"FMat": 3,
"EMat": 3,
"HMat": 2,
}[model]
lam3RD=2.0 * (R-D)
sum_ = 0
for i in range(n):
tmp=res[i] * sigmasq1
if tmp<=lam3RD:
sum_ += tmp
else:
sum_ += lam3RD
sum_ += n * D * np.log(R) + K * np.log(R*n)
return sum_
| [
"numpy.sum",
"numpy.log",
"numpy.zeros",
"numpy.transpose",
"numpy.ones",
"numpy.sin",
"numpy.cos",
"numpy.arccos",
"numpy.sqrt"
] | [((661, 687), 'numpy.ones', 'np.ones', (['(3, kp1.shape[0])'], {}), '((3, kp1.shape[0]))\n', (668, 687), True, 'import numpy as np\n'), ((701, 726), 'numpy.transpose', 'np.transpose', (['kp1', '(1, 0)'], {}), '(kp1, (1, 0))\n', (713, 726), True, 'import numpy as np\n'), ((735, 761), 'numpy.ones', 'np.ones', (['(3, kp2.shape[0])'], {}), '((3, kp2.shape[0]))\n', (742, 761), True, 'import numpy as np\n'), ((775, 800), 'numpy.transpose', 'np.transpose', (['kp2', '(1, 0)'], {}), '(kp2, (1, 0))\n', (787, 800), True, 'import numpy as np\n'), ((1429, 1455), 'numpy.ones', 'np.ones', (['(3, kp1.shape[0])'], {}), '((3, kp1.shape[0]))\n', (1436, 1455), True, 'import numpy as np\n'), ((1469, 1494), 'numpy.transpose', 'np.transpose', (['kp1', '(1, 0)'], {}), '(kp1, (1, 0))\n', (1481, 1494), True, 'import numpy as np\n'), ((1503, 1529), 'numpy.ones', 'np.ones', (['(3, kp2.shape[0])'], {}), '((3, kp2.shape[0]))\n', (1510, 1529), True, 'import numpy as np\n'), ((1543, 1568), 'numpy.transpose', 'np.transpose', (['kp2', '(1, 0)'], {}), '(kp2, (1, 0))\n', (1555, 1568), True, 'import numpy as np\n'), ((1579, 1595), 'numpy.zeros', 'np.zeros', (['(3, n)'], {}), '((3, n))\n', (1587, 1595), True, 'import numpy as np\n'), ((1605, 1621), 'numpy.zeros', 'np.zeros', (['(3, n)'], {}), '((3, n))\n', (1613, 1621), True, 'import numpy as np\n'), ((1851, 1905), 'numpy.sqrt', 'np.sqrt', (['(G0[0] * G0[0] + G0[1] * G0[1] + G0[2] * G0[2])'], {}), '(G0[0] * G0[0] + G0[1] * G0[1] + G0[2] * G0[2])\n', (1858, 1905), True, 'import numpy as np\n'), ((1910, 1964), 'numpy.sqrt', 'np.sqrt', (['(G1[0] * G1[0] + G1[1] * G1[1] + G1[2] * G1[2])'], {}), '(G1[0] * G1[0] + G1[1] * G1[1] + G1[2] * G1[2])\n', (1917, 1964), True, 'import numpy as np\n'), ((2008, 2044), 'numpy.arccos', 'np.arccos', (['(magG0G1 / (magG0 * magG1))'], {}), '(magG0G1 / (magG0 * magG1))\n', (2017, 2044), True, 'import numpy as np\n'), ((2053, 2069), 'numpy.zeros', 'np.zeros', (['(2, n)'], {}), '((2, n))\n', (2061, 2069), True, 'import numpy as np\n'), ((2363, 2376), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (2369, 2376), True, 'import numpy as np\n'), ((926, 954), 'numpy.sum', 'np.sum', (['(Fm0[:2] ** 2)'], {'axis': '(0)'}), '(Fm0[:2] ** 2, axis=0)\n', (932, 954), True, 'import numpy as np\n'), ((955, 984), 'numpy.sum', 'np.sum', (['(Ftm1[:2] ** 2)'], {'axis': '(0)'}), '(Ftm1[:2] ** 2, axis=0)\n', (961, 984), True, 'import numpy as np\n'), ((3099, 3108), 'numpy.log', 'np.log', (['R'], {}), '(R)\n', (3105, 3108), True, 'import numpy as np\n'), ((3115, 3128), 'numpy.log', 'np.log', (['(R * n)'], {}), '(R * n)\n', (3121, 3128), True, 'import numpy as np\n'), ((862, 887), 'numpy.transpose', 'np.transpose', (['Fm0', '(1, 0)'], {}), '(Fm0, (1, 0))\n', (874, 887), True, 'import numpy as np\n'), ((2348, 2361), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (2354, 2361), True, 'import numpy as np\n')] |
import numpy as np
from models.k_means import KMeans
class GMM:
"""Implementes Gaussian Mixture Model fitted with EM algorithm. Similar to
K-means, iteratively adjusts the clusters to match better the points
belonging to them and then recompute which points belong to what cluster.
In this case the clusters are soft, each point has a likelihood for each
possible clusters. Clusters are modeled ase multivariate gaussians. The
model is fitted using the EM algorithm.
Parameters
----------
k : `int`, optional
Number of clusters. Defaults to 2.
max_iter : `int`, optional
Max iterations to perform before the algorithm stops training if
convergence is not achieved.
th : `float`, optional
Minimum change in the posterior probabilities `h` to continue training.
When the change is smaller the training is stopped as the algorithm is
considered to have converged. Defaylts to 1e-7.
"""
def __init__(self, k=2, max_iter=100, th=1e-7):
self.k = k
self.max_iter = max_iter
self.weights = None
self.th = 1e-7
def fit(self, X, re_init=True, verbose=False):
"""Fits `self.k` multivariate gaussians to `X` using EM algorithm.
Parameters
----------
X : `numpy.ndarray` (n_examples, n_features)
Training data.
re_init : `boolean`, optional
Whether to reinitialize centroids. Defaults to True. If this is the
first call to fit, centroids will be initialized eitherway.
"""
# Initialize gaussians
if re_init or (self.weights is None):
self._init_gaussians(X)
prev_h = None
for i in range(self.max_iter):
# Expectation
h = self._expectation_step(X)
# Maximization
self._maximization_step(X, h)
if (prev_h is not None
and np.linalg.norm(h-prev_h) < self.th):
# Converged
break
prev_h = h
def predict(self, X):
return self._expectation_step(X)
def _expectation_step(self, X):
(n_samples, n_features) = X.shape
# Computes likelihood for each sample of each gaussian
h = np.zeros((n_samples, self.k))
for k in range(self.k):
h[:, k] = self._multinomial_gaussian(
X,
self.means[k],
self.covariances[k])
h[:, k] *= self.weights[k]
h = h/np.sum(h, axis=1, keepdims=True)
return h
def _maximization_step(self, X, h):
(n_samples, n_features) = X.shape
# Update weights
self.weights = (1./n_samples)*np.sum(h, axis=0)
# Update gaussians
for k in range(self.k):
h_sum = np.sum(h[:, k])
self.means[k] = np.sum(h[:, k, None]*X, axis=0)/h_sum
diff = (X-self.means[k])
self.covariances[k] = np.matmul(diff.T, h[:, k, None]*diff)/h_sum
def _multinomial_gaussian(self, x, mean, variance):
factor = 1./np.sqrt(np.linalg.det(variance) * (2*np.pi) ** self.k)
diff = x - mean
inv_var = np.linalg.pinv(variance)
exp = np.exp(-0.5 * np.sum((diff @ inv_var) * diff, axis=1))
return factor * exp
def _init_gaussians(self, X):
# Init gaussians using the result of running kmeans
self.means, self.covariances = KMeans(k=self.k)._init_gmm(X)
self.weights = [1./self.k]*self.k
| [
"numpy.sum",
"numpy.zeros",
"numpy.linalg.norm",
"models.k_means.KMeans",
"numpy.matmul",
"numpy.linalg.det",
"numpy.linalg.pinv"
] | [((2290, 2319), 'numpy.zeros', 'np.zeros', (['(n_samples, self.k)'], {}), '((n_samples, self.k))\n', (2298, 2319), True, 'import numpy as np\n'), ((3223, 3247), 'numpy.linalg.pinv', 'np.linalg.pinv', (['variance'], {}), '(variance)\n', (3237, 3247), True, 'import numpy as np\n'), ((2556, 2588), 'numpy.sum', 'np.sum', (['h'], {'axis': '(1)', 'keepdims': '(True)'}), '(h, axis=1, keepdims=True)\n', (2562, 2588), True, 'import numpy as np\n'), ((2753, 2770), 'numpy.sum', 'np.sum', (['h'], {'axis': '(0)'}), '(h, axis=0)\n', (2759, 2770), True, 'import numpy as np\n'), ((2851, 2866), 'numpy.sum', 'np.sum', (['h[:, k]'], {}), '(h[:, k])\n', (2857, 2866), True, 'import numpy as np\n'), ((2895, 2928), 'numpy.sum', 'np.sum', (['(h[:, k, None] * X)'], {'axis': '(0)'}), '(h[:, k, None] * X, axis=0)\n', (2901, 2928), True, 'import numpy as np\n'), ((3005, 3044), 'numpy.matmul', 'np.matmul', (['diff.T', '(h[:, k, None] * diff)'], {}), '(diff.T, h[:, k, None] * diff)\n', (3014, 3044), True, 'import numpy as np\n'), ((3276, 3313), 'numpy.sum', 'np.sum', (['(diff @ inv_var * diff)'], {'axis': '(1)'}), '(diff @ inv_var * diff, axis=1)\n', (3282, 3313), True, 'import numpy as np\n'), ((3481, 3497), 'models.k_means.KMeans', 'KMeans', ([], {'k': 'self.k'}), '(k=self.k)\n', (3487, 3497), False, 'from models.k_means import KMeans\n'), ((1958, 1984), 'numpy.linalg.norm', 'np.linalg.norm', (['(h - prev_h)'], {}), '(h - prev_h)\n', (1972, 1984), True, 'import numpy as np\n'), ((3134, 3157), 'numpy.linalg.det', 'np.linalg.det', (['variance'], {}), '(variance)\n', (3147, 3157), True, 'import numpy as np\n')] |
''' 1. Module Import '''
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms, datasets
''' 2. 딥러닝 모델을 설계할 때 활용하는 장비 확인 '''
if torch.cuda.is_available():
DEVICE = torch.device('cuda')
else:
DEVICE = torch.device('cpu')
print('Using PyTorch version:', torch.__version__, ' Device:', DEVICE)
BATCH_SIZE = 32
EPOCHS = 10
''' 3. FashionMNIST 데이터 다운로드 (Train set, Test set 분리하기) '''
train_dataset = datasets.FashionMNIST(root="./data/FashionMNIST",
train=True,
download=True,
transform=transforms.ToTensor())
test_dataset = datasets.FashionMNIST(root="./data/FashionMNIST",
train=False,
transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False)
''' 4. 데이터 확인하기 (1) '''
for (X_train, y_train) in train_loader:
print('X_train:', X_train.size(), 'type:', X_train.type())
print('y_train:', y_train.size(), 'type:', y_train.type())
break
''' 5. 데이터 확인하기 (2) '''
pltsize = 1
plt.figure(figsize=(10 * pltsize, pltsize))
for i in range(10):
plt.subplot(1, 10, i + 1)
plt.axis('off')
plt.imshow(X_train[i, :, :, :].numpy().reshape(28, 28), cmap="gray_r")
plt.title('Class: ' + str(y_train[i].item()))
''' 6. AutoEncoder (AE) 모델 설계하기 '''
class AE(nn.Module):
def __init__(self):
super(AE, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(28 * 28, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 32),)
self.decoder = nn.Sequential(
nn.Linear(32, 256),
nn.ReLU(),
nn.Linear(256, 512),
nn.ReLU(),
nn.Linear(512, 28 * 28),)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded, decoded
''' 7. Optimizer, Objective Function 설정하기 '''
model = AE().to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
criterion = nn.MSELoss()
print(model)
''' 8. AE 모델 학습을 진행하며 학습 데이터에 대한 모델 성능을 확인하는 함수 정의 '''
def train(model, train_loader, optimizer, log_interval):
model.train()
for batch_idx, (image, _) in enumerate(train_loader):
image = image.view(-1, 28 * 28).to(DEVICE)
target = image.view(-1, 28 * 28).to(DEVICE)
optimizer.zero_grad()
encoded, decoded = model(image)
loss = criterion(decoded, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print("Train Epoch: {} [{}/{} ({:.0f}%)]\tTrain Loss: {:.6f}".format(
epoch, batch_idx * len(image),
len(train_loader.dataset), 100. *
batch_idx / len(train_loader),
loss.item()))
''' 9. 학습되는 과정 속에서 검증 데이터에 대한 모델 성능을 확인하는 함수 정의 '''
def evaluate(model, test_loader):
model.eval()
test_loss = 0
real_image = []
gen_image = []
with torch.no_grad():
for image, _ in test_loader:
image = image.view(-1, 28 * 28).to(DEVICE)
target = image.view(-1, 28 * 28).to(DEVICE)
encoded, decoded = model(image)
test_loss += criterion(decoded, image).item()
real_image.append(image.to("cpu"))
gen_image.append(decoded.to("cpu"))
test_loss /= len(test_loader.dataset)
return test_loss, real_image, gen_image
''' 10. AutoEncoder 학습 실행하며 Test set의 Reconstruction Error 확인하기 '''
for epoch in range(1, EPOCHS + 1):
train(model, train_loader, optimizer, log_interval=200)
test_loss, real_image, gen_image = evaluate(model, test_loader)
print("\n[EPOCH: {}], \tTest Loss: {:.4f}".format(epoch, test_loss))
f, a = plt.subplots(2, 10, figsize=(10, 4))
for i in range(10):
img = np.reshape(real_image[0][i], (28, 28))
a[0][i].imshow(img, cmap="gray_r")
a[0][i].set_xticks(())
a[0][i].set_yticks(())
for i in range(10):
img = np.reshape(gen_image[0][i], (28, 28))
a[1][i].imshow(img, cmap="gray_r")
a[1][i].set_xticks(())
a[1][i].set_yticks(())
plt.show()
| [
"matplotlib.pyplot.subplot",
"torch.nn.MSELoss",
"matplotlib.pyplot.show",
"torch.nn.ReLU",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.axis",
"torch.nn.Linear",
"matplotlib.pyplot.figure",
"torch.cuda.is_available",
"numpy.reshape",
"torch.device",
"torch.no_grad",
"matplotlib.pyplot.... | [((230, 255), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (253, 255), False, 'import torch\n'), ((933, 1024), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=BATCH_SIZE,\n shuffle=True)\n', (960, 1024), False, 'import torch\n'), ((1122, 1213), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'BATCH_SIZE', 'shuffle': '(False)'}), '(dataset=test_dataset, batch_size=BATCH_SIZE,\n shuffle=False)\n', (1149, 1213), False, 'import torch\n'), ((1532, 1575), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10 * pltsize, pltsize)'}), '(figsize=(10 * pltsize, pltsize))\n', (1542, 1575), True, 'import matplotlib.pyplot as plt\n'), ((2544, 2556), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2554, 2556), True, 'import torch.nn as nn\n'), ((270, 290), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (282, 290), False, 'import torch\n'), ((310, 329), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (322, 329), False, 'import torch\n'), ((1600, 1625), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(10)', '(i + 1)'], {}), '(1, 10, i + 1)\n', (1611, 1625), True, 'import matplotlib.pyplot as plt\n'), ((1630, 1645), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1638, 1645), True, 'import matplotlib.pyplot as plt\n'), ((4266, 4302), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(10)'], {'figsize': '(10, 4)'}), '(2, 10, figsize=(10, 4))\n', (4278, 4302), True, 'import matplotlib.pyplot as plt\n'), ((4671, 4681), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4679, 4681), True, 'import matplotlib.pyplot as plt\n'), ((708, 729), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (727, 729), False, 'from torchvision import transforms, datasets\n'), ((894, 915), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (913, 915), False, 'from torchvision import transforms, datasets\n'), ((3498, 3513), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3511, 3513), False, 'import torch\n'), ((4341, 4379), 'numpy.reshape', 'np.reshape', (['real_image[0][i]', '(28, 28)'], {}), '(real_image[0][i], (28, 28))\n', (4351, 4379), True, 'import numpy as np\n'), ((4524, 4561), 'numpy.reshape', 'np.reshape', (['gen_image[0][i]', '(28, 28)'], {}), '(gen_image[0][i], (28, 28))\n', (4534, 4561), True, 'import numpy as np\n'), ((1942, 1965), 'torch.nn.Linear', 'nn.Linear', (['(28 * 28)', '(512)'], {}), '(28 * 28, 512)\n', (1951, 1965), True, 'import torch.nn as nn\n'), ((1979, 1988), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1986, 1988), True, 'import torch.nn as nn\n'), ((2002, 2021), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (2011, 2021), True, 'import torch.nn as nn\n'), ((2035, 2044), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2042, 2044), True, 'import torch.nn as nn\n'), ((2058, 2076), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(32)'], {}), '(256, 32)\n', (2067, 2076), True, 'import torch.nn as nn\n'), ((2130, 2148), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(256)'], {}), '(32, 256)\n', (2139, 2148), True, 'import torch.nn as nn\n'), ((2162, 2171), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2169, 2171), True, 'import torch.nn as nn\n'), ((2185, 2204), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(512)'], {}), '(256, 512)\n', (2194, 2204), True, 'import torch.nn as nn\n'), ((2218, 2227), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2225, 2227), True, 'import torch.nn as nn\n'), ((2241, 2264), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(28 * 28)'], {}), '(512, 28 * 28)\n', (2250, 2264), True, 'import torch.nn as nn\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gym
import numpy as np
from stable_baselines.common.vec_env import VecEnv
def traj_segment_generator(policy, env, horizon, reward_giver=None, gail=False, callback=None):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
:param policy: (MLPPolicy) the policy
:param env: (Gym Environment) the environment
:param horizon: (int) the number of timesteps to run per batch
:param reward_giver: (TransitionClassifier) the reward predicter from obsevation and action
:param gail: (bool) Whether we are using this generator for standard trpo or with gail
:param callback: (BaseCallback)
:return: (dict) generator that returns a dict with the following keys:
- observations: (np.ndarray) observations
- rewards: (numpy float) rewards (if gail is used it is the predicted reward)
- true_rewards: (numpy float) if gail is used it is the original reward
- vpred: (numpy float) action logits
- dones: (numpy bool) dones (is end of episode, used for logging)
- episode_starts: (numpy bool)
True if first timestep of an episode, used for GAE
- actions: (np.ndarray) actions
- ep_rets: (float) cumulated current episode reward
- ep_lens: (int) the length of the current episode
- ep_true_rets: (float) the real environment reward
- continue_training: (bool) Whether to continue training
or stop early (triggered by the callback)
"""
# Check when using GAIL
assert not (
gail and reward_giver is None), "You must pass a reward giver when using GAIL"
num_robot = env.num_robot
if horizon % num_robot != 0:
horizon += (num_robot - horizon % num_robot)
# Initialize state variables
step = 0
observation = env.reset()
# not used, just so we have the datatype
action = [env.action_space.sample() for _ in range(num_robot)]
vpred = [0 for _ in range(num_robot)]
info = [0 for _ in range(num_robot)]
reward = [0 for _ in range(num_robot)]
done = [False for _ in range(num_robot)]
state = [policy.initial_state for _ in range(num_robot)]
cur_ep_ret = 0 # return in current episode
current_it_len = 0 # len of current iteration
current_ep_len = 0 # len of current episode
cur_ep_true_ret = 0
ep_true_rets = []
ep_rets = [] # returns of completed episodes in this segment
ep_lens = [] # Episode lengths
# Initialize history arrays
observations = np.array([observation[0] for _ in range(horizon)])
true_rewards = np.zeros(horizon, 'float32')
rewards = np.zeros(horizon, 'float32')
vpreds = np.zeros(horizon, 'float32')
nextvpreds = np.zeros(horizon, 'float32')
episode_starts = np.zeros(horizon, 'bool')
dones = np.zeros(horizon, 'bool')
actions = np.array([action[0] for _ in range(horizon)])
# marks if we're on first timestep of an episode
episode_start = [True for _ in range(num_robot)]
callback.on_rollout_start()
while True:
for j in range(num_robot):
observation[j] = observation[j].reshape(-1, *observation[j].shape)
act, vpred[j], state[j], info[j] = policy.step(
observation[j], state[j], done[j])
action[j] = act
# Slight weirdness here because we need value function at time T
# before returning segment [0, T-1] so we get the correct
# terminal value
if step > 0 and step % horizon == 0:
last_vpred = 0.0
for j in range(num_robot):
nextvpreds[i+j] = last_vpred
callback.on_rollout_end()
yield {
"observations": observations,
"rewards": rewards,
"dones": dones,
"episode_starts": episode_starts,
"true_rewards": true_rewards,
"vpred": vpreds,
"nextvpreds": nextvpreds,
"actions": actions,
"ep_rets": ep_rets,
"ep_lens": ep_lens,
"ep_true_rets": ep_true_rets,
"total_timestep": current_it_len,
'continue_training': True
}
for j in range(num_robot):
_, vpred[j], _, info[j] = policy.step(observation[j])
# Be careful!!! if you change the downstream algorithm to aggregate
# several of these batches, then be sure to do a deepcopy
ep_rets = []
ep_true_rets = []
ep_lens = []
# Reset current iteration length
current_it_len = 0
callback.on_rollout_start()
i = step % horizon
for j in range(num_robot):
observations[i+j] = observation[j]
vpreds[i+j] = vpred[j][0]
actions[i+j] = action[j][0]
episode_starts[i+j] = episode_start[j]
if (not episode_start[j]) and (i > 0):
nextvpreds[i-num_robot+j] = vpred[j][0]
clipped_action = [0 for _ in range(num_robot)]
# Clip the actions to avoid out of bound error
if isinstance(env.action_space, gym.spaces.Box):
for j in range(num_robot):
clipped_action[j] = np.clip(
action[j], env.action_space.low, env.action_space.high)[0]
# if gail:
# reward = reward_giver.get_reward(observation, clipped_action[0])
# observation, true_reward, done, info = env.step(clipped_action[0])
# else:
# observation, reward, done, info = env.step(clipped_action[0])
# true_reward = reward
observation, reward, done, info = env.step(clipped_action)
true_reward = reward
if callback is not None:
if callback.on_step() is False:
# We have to return everything so pytype does not complain
yield {
"observations": observations,
"rewards": rewards,
"dones": dones,
"episode_starts": episode_starts,
"true_rewards": true_rewards,
"vpred": vpreds,
"nextvpreds": nextvpreds,
"actions": actions,
"ep_rets": ep_rets,
"ep_lens": ep_lens,
"ep_true_rets": ep_true_rets,
"total_timestep": current_it_len,
'continue_training': False
}
return
for j in range(num_robot):
rewards[i+j] = reward[j]
true_rewards[i+j] = true_reward[j]
dones[i+j] = done[j]
episode_start[j] = done[j]
cur_ep_ret += reward[j]
cur_ep_true_ret += true_reward[j]
current_it_len += 1
current_ep_len += 1
if np.sum(done) > 0: # at least one True, then reset
last_vpred = 0.0
for j in range(num_robot):
nextvpreds[i+j] = last_vpred
# Retrieve unnormalized reward if using Monitor wrapper
maybe_ep_info = info[j].get('episode')
if maybe_ep_info is not None:
if not gail:
cur_ep_ret = maybe_ep_info['r']
cur_ep_true_ret = maybe_ep_info['r']
ep_rets.append(cur_ep_ret / num_robot)
ep_true_rets.append(cur_ep_true_ret / num_robot)
ep_lens.append(current_ep_len)
cur_ep_ret = 0
cur_ep_true_ret = 0
current_ep_len = 0
if not isinstance(env, VecEnv):
observation = env.reset()
step += num_robot
return
| [
"numpy.zeros",
"numpy.sum",
"numpy.clip"
] | [((3209, 3237), 'numpy.zeros', 'np.zeros', (['horizon', '"""float32"""'], {}), "(horizon, 'float32')\n", (3217, 3237), True, 'import numpy as np\n'), ((3252, 3280), 'numpy.zeros', 'np.zeros', (['horizon', '"""float32"""'], {}), "(horizon, 'float32')\n", (3260, 3280), True, 'import numpy as np\n'), ((3294, 3322), 'numpy.zeros', 'np.zeros', (['horizon', '"""float32"""'], {}), "(horizon, 'float32')\n", (3302, 3322), True, 'import numpy as np\n'), ((3340, 3368), 'numpy.zeros', 'np.zeros', (['horizon', '"""float32"""'], {}), "(horizon, 'float32')\n", (3348, 3368), True, 'import numpy as np\n'), ((3390, 3415), 'numpy.zeros', 'np.zeros', (['horizon', '"""bool"""'], {}), "(horizon, 'bool')\n", (3398, 3415), True, 'import numpy as np\n'), ((3428, 3453), 'numpy.zeros', 'np.zeros', (['horizon', '"""bool"""'], {}), "(horizon, 'bool')\n", (3436, 3453), True, 'import numpy as np\n'), ((7538, 7550), 'numpy.sum', 'np.sum', (['done'], {}), '(done)\n', (7544, 7550), True, 'import numpy as np\n'), ((5894, 5957), 'numpy.clip', 'np.clip', (['action[j]', 'env.action_space.low', 'env.action_space.high'], {}), '(action[j], env.action_space.low, env.action_space.high)\n', (5901, 5957), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# ## 卷积神经网络(Convolutional Neural Network, CNN)
#
# ## 项目:实现一个狗品种识别算法App
#
# 在这个notebook文件中,有些模板代码已经提供给你,但你还需要实现更多的功能来完成这个项目。除非有明确要求,你无须修改任何已给出的代码。以**'(练习)'**开始的标题表示接下来的代码部分中有你需要实现的功能。这些部分都配有详细的指导,需要实现的部分也会在注释中以'TODO'标出。请仔细阅读所有的提示。
#
# 除了实现代码外,你还**需要**回答一些与项目及代码相关的问题。每个需要回答的问题都会以 **'问题 X'** 标记。请仔细阅读每个问题,并且在问题后的 **'回答'** 部分写出完整的答案。我们将根据 你对问题的回答 和 撰写代码实现的功能 来对你提交的项目进行评分。
#
# >**提示:**Code 和 Markdown 区域可通过 **Shift + Enter** 快捷键运行。此外,Markdown可以通过双击进入编辑模式。
#
# 项目中显示为_选做_的部分可以帮助你的项目脱颖而出,而不是仅仅达到通过的最低要求。如果你决定追求更高的挑战,请在此 notebook 中完成_选做_部分的代码。
#
# ---
#
# ### 让我们开始吧
# 在这个notebook中,你将迈出第一步,来开发可以作为移动端或 Web应用程序一部分的算法。在这个项目的最后,你的程序将能够把用户提供的任何一个图像作为输入。如果可以从图像中检测到一只狗,它会输出对狗品种的预测。如果图像中是一个人脸,它会预测一个与其最相似的狗的种类。下面这张图展示了完成项目后可能的输出结果。(……实际上我们希望每个学生的输出结果不相同!)
#
# 
#
# 在现实世界中,你需要拼凑一系列的模型来完成不同的任务;举个例子,用来预测狗种类的算法会与预测人类的算法不同。在做项目的过程中,你可能会遇到不少失败的预测,因为并不存在完美的算法和模型。你最终提交的不完美的解决方案也一定会给你带来一个有趣的学习经验!
#
# ### 项目内容
#
# 我们将这个notebook分为不同的步骤,你可以使用下面的链接来浏览此notebook。
#
# * [Step 0](#step0): 导入数据集
# * [Step 1](#step1): 检测人脸
# * [Step 2](#step2): 检测狗狗
# * [Step 3](#step3): 从头创建一个CNN来分类狗品种
# * [Step 4](#step4): 使用一个CNN来区分狗的品种(使用迁移学习)
# * [Step 5](#step5): 建立一个CNN来分类狗的品种(使用迁移学习)
# * [Step 6](#step6): 完成你的算法
# * [Step 7](#step7): 测试你的算法
#
# 在该项目中包含了如下的问题:
#
# * [问题 1](#question1)
# * [问题 2](#question2)
# * [问题 3](#question3)
# * [问题 4](#question4)
# * [问题 5](#question5)
# * [问题 6](#question6)
# * [问题 7](#question7)
# * [问题 8](#question8)
# * [问题 9](#question9)
# * [问题 10](#question10)
# * [问题 11](#question11)
#
#
# ---
# <a id='step0'></a>
# ## 步骤 0: 导入数据集
#
# ### 导入狗数据集
# 在下方的代码单元(cell)中,我们导入了一个狗图像的数据集。我们使用 scikit-learn 库中的 `load_files` 函数来获取一些变量:
# - `train_files`, `valid_files`, `test_files` - 包含图像的文件路径的numpy数组
# - `train_targets`, `valid_targets`, `test_targets` - 包含独热编码分类标签的numpy数组
# - `dog_names` - 由字符串构成的与标签相对应的狗的种类
# In[1]:
from sklearn.datasets import load_files
from keras.utils import np_utils
import numpy as np
from glob import glob
# define function to load train, test, and validation datasets
def load_dataset(path):
data = load_files(path)
dog_files = np.array(data['filenames'])
dog_targets = np_utils.to_categorical(np.array(data['target']), 133)
return dog_files, dog_targets
# load train, test, and validation datasets
train_files, train_targets = load_dataset('/data/dog_images/train')
valid_files, valid_targets = load_dataset('/data/dog_images/valid')
test_files, test_targets = load_dataset('/data/dog_images/test')
# load list of dog names
dog_names = [item[20:-1] for item in sorted(glob("/data/dog_images/train/*/"))]
# print statistics about the dataset
print('There are %d total dog categories.' % len(dog_names))
print('There are %s total dog images.\n' % len(np.hstack([train_files, valid_files, test_files])))
print('There are %d training dog images.' % len(train_files))
print('There are %d validation dog images.' % len(valid_files))
print('There are %d test dog images.'% len(test_files))
# ### 导入人脸数据集
#
# 在下方的代码单元中,我们导入人脸图像数据集,文件所在路径存储在名为 `human_files` 的 numpy 数组。
# In[2]:
import random
random.seed(8675309)
# 加载打乱后的人脸数据集的文件名
human_files = np.array(glob("/data/lfw/*/*"))
random.shuffle(human_files)
# 打印数据集的数据量
print('There are %d total human images.' % len(human_files))
# ---
# <a id='step1'></a>
# ## 步骤1:检测人脸
#
# 我们将使用 OpenCV 中的 [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) 来检测图像中的人脸。OpenCV 提供了很多预训练的人脸检测模型,它们以XML文件保存在 [github](https://github.com/opencv/opencv/tree/master/data/haarcascades)。我们已经下载了其中一个检测模型,并且把它存储在 `haarcascades` 的目录中。
#
# 在如下代码单元中,我们将演示如何使用这个检测模型在样本图像中找到人脸。
# In[3]:
import cv2
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# 提取预训练的人脸检测模型
face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')
print("提取预训练的人脸检测模型\n",face_cascade)
# 加载彩色(通道顺序为BGR)图像
img = cv2.imread(human_files[3])
print("加载彩色\n",img)
# 将BGR图像进行灰度处理
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print("进行灰度处理\n",gray)
# 在图像中找出脸
faces = face_cascade.detectMultiScale(gray)
print("在图像中找出脸\n",faces)
# 打印图像中检测到的脸的个数
print('Number of faces detected:', len(faces))
# 获取每一个所检测到的脸的识别框
for (x,y,w,h) in faces:
# 在人脸图像中绘制出识别框
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# 将BGR图像转变为RGB图像以打印
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 展示含有识别框的图像
plt.imshow(cv_rgb)
plt.show()
# 在使用任何一个检测模型之前,将图像转换为灰度图是常用过程。`detectMultiScale` 函数使用储存在 `face_cascade` 中的的数据,对输入的灰度图像进行分类。
#
# 在上方的代码中,`faces` 以 numpy 数组的形式,保存了识别到的面部信息。它其中每一行表示一个被检测到的脸,该数据包括如下四个信息:前两个元素 `x`、`y` 代表识别框左上角的 x 和 y 坐标(参照上图,注意 y 坐标的方向和我们默认的方向不同);后两个元素代表识别框在 x 和 y 轴两个方向延伸的长度 `w` 和 `d`。
#
# ### 写一个人脸识别器
#
# 我们可以将这个程序封装为一个函数。该函数的输入为人脸图像的**路径**,当图像中包含人脸时,该函数返回 `True`,反之返回 `False`。该函数定义如下所示。
# In[4]:
# 如果img_path路径表示的图像检测到了脸,返回"True"
def face_detector(img_path):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
return len(faces) > 0
# ### **【练习】** 评估人脸检测模型
#
# ---
#
# <a id='question1'></a>
# ### __问题 1:__
#
# 在下方的代码块中,使用 `face_detector` 函数,计算:
#
# - `human_files` 的前100张图像中,能够检测到**人脸**的图像占比多少?
# - `dog_files` 的前100张图像中,能够检测到**人脸**的图像占比多少?
#
# 理想情况下,人图像中检测到人脸的概率应当为100%,而狗图像中检测到人脸的概率应该为0%。你会发现我们的算法并非完美,但结果仍然是可以接受的。我们从每个数据集中提取前100个图像的文件路径,并将它们存储在`human_files_short`和`dog_files_short`中。
# In[5]:
human_files_short = human_files[:100]
dog_files_short = train_files[:100]
## 请不要修改上方代码
humans_files_human=[]
dogs_files_human=[]
for human in human_files_short:
if face_detector(human) == True:
humans_files_human.append(human)
for human_find in dog_files_short:
if face_detector(human_find) == True:
dogs_files_human.append(human_find)
## TODO: 基于human_files_short和dog_files_short
## 中的图像测试face_detector的表现
print("在100张人脸图像中检测到人脸的百分比:{:.2%}".format((len(humans_files_human)/len(human_files_short))))
print("在100张狗图像中检测到人的百分比:{:.2%}".format((len(dogs_files_human)/len(dog_files_short))))
# In[6]:
def detect(detector,files):
return np.mean(list(map(detector,files)))
print("human:{:.2%}".format(detect(face_detector,human_files_short)))
print("dog:{:.2%}".format(detect(face_detector,dog_files_short)))
# ---
#
# <a id='question2'></a>
#
# ### __问题 2:__
#
# 就算法而言,该算法成功与否的关键在于,用户能否提供含有清晰面部特征的人脸图像。
# 那么你认为,这样的要求在实际使用中对用户合理吗?如果你觉得不合理,你能否想到一个方法,即使图像中并没有清晰的面部特征,也能够检测到人脸?
#
# __回答:__不合理,例如用手机拍照和相机拍照就会造成图片脸部的模糊
# 我们可以用图片增强的方法,把清晰的照片像素调小,然后对不清晰的图片进行增强
#
#
# ---
#
# <a id='Selection1'></a>
# ### 选做:
#
# 我们建议在你的算法中使用opencv的人脸检测模型去检测人类图像,不过你可以自由地探索其他的方法,尤其是尝试使用深度学习来解决它:)。请用下方的代码单元来设计和测试你的面部监测算法。如果你决定完成这个_选做_任务,你需要报告算法在每一个数据集上的表现。
# ## (选做) TODO: 报告另一个面部检测算法在LFW数据集上的表现
# ### 你可以随意使用所需的代码单元数
# # 提取预训练的人脸检测模型
# image = cv2.imread('_MACOS/lfw')
# print("提取预训练的人脸检测模型\n",face_cascade)
# winSize = (20,20)
# blockSize = (10,10)
# blockStride = (5,5)
# cellSize = (10,10)
# nbins = 9
# derivAperture = 1
# winSigma = -1.
# histogramNormType = 0
# L2HysThreshold = 0.2
# gammaCorrection = 1
# nlevels = 64
# signedGradients =
# hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,
# winSigma,histogramNormType,L2HysThreshold,gammaCorrection,nlevels)
#
#
# hist = hog.compute(image)
# ---
# <a id='step2'></a>
#
# ## 步骤 2: 检测狗狗
#
# 在这个部分中,我们使用预训练的 [ResNet-50](http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006) 模型去检测图像中的狗。下方的第一行代码就是下载了 ResNet-50 模型的网络结构参数,以及基于 [ImageNet](http://www.image-net.org/) 数据集的预训练权重。
#
# ImageNet 这目前一个非常流行的数据集,常被用来测试图像分类等计算机视觉任务相关的算法。它包含超过一千万个 URL,每一个都链接到 [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a) 中所对应的一个物体的图像。任给输入一个图像,该 ResNet-50 模型会返回一个对图像中物体的预测结果。
# In[7]:
from keras.applications.resnet50 import ResNet50
# 定义ResNet50模型
ResNet50_model = ResNet50(weights='imagenet')
# ### 数据预处理
#
# - 在使用 TensorFlow 作为后端的时候,在 Keras 中,CNN 的输入是一个4维数组(也被称作4维张量),它的各维度尺寸为 `(nb_samples, rows, columns, channels)`。其中 `nb_samples` 表示图像(或者样本)的总数,`rows`, `columns`, 和 `channels` 分别表示图像的行数、列数和通道数。
#
#
# - 下方的 `path_to_tensor` 函数实现如下将彩色图像的字符串型的文件路径作为输入,返回一个4维张量,作为 Keras CNN 输入。因为我们的输入图像是彩色图像,因此它们具有三个通道( `channels` 为 `3`)。
# 1. 该函数首先读取一张图像,然后将其缩放为 224×224 的图像。
# 2. 随后,该图像被调整为具有4个维度的张量。
# 3. 对于任一输入图像,最后返回的张量的维度是:`(1, 224, 224, 3)`。
#
#
# - `paths_to_tensor` 函数将图像路径的字符串组成的 numpy 数组作为输入,并返回一个4维张量,各维度尺寸为 `(nb_samples, 224, 224, 3)`。 在这里,`nb_samples`是提供的图像路径的数据中的样本数量或图像数量。你也可以将 `nb_samples` 理解为数据集中3维张量的个数(每个3维张量表示一个不同的图像。
# In[8]:
from keras.preprocessing import image
from tqdm import tqdm
def path_to_tensor(img_path):
# 用PIL加载RGB图像为PIL.Image.Image类型
img = image.load_img(img_path, target_size=(224, 224))
# 将PIL.Image.Image类型转化为格式为(224, 224, 3)的3维张量
x = image.img_to_array(img)
# 将3维张量转化为格式为(1, 224, 224, 3)的4维张量并返回
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
# ### 基于 ResNet-50 架构进行预测
#
# 对于通过上述步骤得到的四维张量,在把它们输入到 ResNet-50 网络、或 Keras 中其他类似的预训练模型之前,还需要进行一些额外的处理:
# 1. 首先,这些图像的通道顺序为 RGB,我们需要重排他们的通道顺序为 BGR。
# 2. 其次,预训练模型的输入都进行了额外的归一化过程。因此我们在这里也要对这些张量进行归一化,即对所有图像所有像素都减去像素均值 `[103.939, 116.779, 123.68]`(以 RGB 模式表示,根据所有的 ImageNet 图像算出)。
#
# 导入的 `preprocess_input` 函数实现了这些功能。如果你对此很感兴趣,可以在 [这里](https://github.com/fchollet/keras/blob/master/keras/applications/imagenet_utils.py) 查看 `preprocess_input`的代码。
#
#
# 在实现了图像处理的部分之后,我们就可以使用模型来进行预测。这一步通过 `predict` 方法来实现,它返回一个向量,向量的第 i 个元素表示该图像属于第 i 个 ImageNet 类别的概率。这通过如下的 `ResNet50_predict_labels` 函数实现。
#
# 通过对预测出的向量取用 argmax 函数(找到有最大概率值的下标序号),我们可以得到一个整数,即模型预测到的物体的类别。进而根据这个 [清单](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a),我们能够知道这具体是哪个品种的狗狗。
#
# In[9]:
from keras.applications.resnet50 import preprocess_input, decode_predictions
def ResNet50_predict_labels(img_path):
# 返回img_path路径的图像的预测向量
img = preprocess_input(path_to_tensor(img_path))
return np.argmax(ResNet50_model.predict(img))
# ### 完成狗检测模型
#
#
# 在研究该 [清单](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a) 的时候,你会注意到,狗类别对应的序号为151-268。因此,在检查预训练模型判断图像是否包含狗的时候,我们只需要检查如上的 `ResNet50_predict_labels` 函数是否返回一个介于151和268之间(包含区间端点)的值。
#
# 我们通过这些想法来完成下方的 `dog_detector` 函数,如果从图像中检测到狗就返回 `True`,否则返回 `False`。
# In[10]:
def dog_detector(img_path):
prediction = ResNet50_predict_labels(img_path)
return ((prediction <= 268) & (prediction >= 151))
# ### 【作业】评估狗狗检测模型
#
# ---
#
# <a id='question3'></a>
# ### __问题 3:__
#
# 在下方的代码块中,使用 `dog_detector` 函数,计算:
#
# - `human_files_short`中图像检测到狗狗的百分比?
# - `dog_files_short`中图像检测到狗狗的百分比?
# In[11]:
### TODO: 测试dog_detector函数在human_files_short和dog_files_short的表现
human_files_dog = []
dog_files_dog = []
for dog_find in human_files_short:
if dog_detector(dog_find) == True:
human_files_dog.append(dog_find)
for dog in dog_files_short:
if dog_detector(dog) == True:
dog_files_dog.append(dog)
print("在100张人脸图像中检测到狗狗的百分比:{:.2%}".format((len(human_files_dog)/len(human_files_short))))
print("在100张狗图像中检测到狗狗的百分比:{:.2%}".format((len(dog_files_dog)/len(dog_files_short))))
# __问题3回答__:
# 1.在100张人脸图像中检测到狗狗的百分比:0%
# 2.在100张狗图像中检测到狗狗的百分比:100%
# ---
#
# <a id='step3'></a>
#
# ## 步骤 3: 从头开始创建一个CNN来分类狗品种
#
#
# 现在我们已经实现了一个函数,能够在图像中识别人类及狗狗。但我们需要更进一步的方法,来对狗的类别进行识别。在这一步中,你需要实现一个卷积神经网络来对狗的品种进行分类。你需要__从头实现__你的卷积神经网络(在这一阶段,你还不能使用迁移学习),并且你需要达到超过1%的测试集准确率。在本项目的步骤五种,你还有机会使用迁移学习来实现一个准确率大大提高的模型。
#
# 在添加卷积层的时候,注意不要加上太多的(可训练的)层。更多的参数意味着更长的训练时间,也就是说你更可能需要一个 GPU 来加速训练过程。万幸的是,Keras 提供了能够轻松预测每次迭代(epoch)花费时间所需的函数。你可以据此推断你算法所需的训练时间。
#
# 值得注意的是,对狗的图像进行分类是一项极具挑战性的任务。因为即便是一个正常人,也很难区分布列塔尼犬和威尔士史宾格犬。
#
#
# 布列塔尼犬(Brittany) | 威尔士史宾格犬(Welsh Springer Spaniel)
# - | -
# <img src="images/Brittany_02625.jpg" width="100"> | <img src="images/Welsh_springer_spaniel_08203.jpg" width="200">
#
# 不难发现其他的狗品种会有很小的类间差别(比如金毛寻回犬和美国水猎犬)。
#
#
# 金毛寻回犬(Curly-Coated Retriever) | 美国水猎犬(American Water Spaniel)
# - | -
# <img src="images/Curly-coated_retriever_03896.jpg" width="200"> | <img src="images/American_water_spaniel_00648.jpg" width="200">
#
# 同样,拉布拉多犬(labradors)有黄色、棕色和黑色这三种。那么你设计的基于视觉的算法将不得不克服这种较高的类间差别,以达到能够将这些不同颜色的同类狗分到同一个品种中。
#
# 黄色拉布拉多犬(Yellow Labrador) | 棕色拉布拉多犬(Chocolate Labrador) | 黑色拉布拉多犬(Black Labrador)
# - | -
# <img src="images/Labrador_retriever_06457.jpg" width="150"> | <img src="images/Labrador_retriever_06455.jpg" width="240"> | <img src="images/Labrador_retriever_06449.jpg" width="220">
#
# 我们也提到了随机分类将得到一个非常低的结果:不考虑品种略有失衡的影响,随机猜测到正确品种的概率是1/133,相对应的准确率是低于1%的。
#
# 请记住,在深度学习领域,实践远远高于理论。大量尝试不同的框架吧,相信你的直觉!当然,玩得开心!
#
#
# ### 数据预处理
#
#
# 通过对每张图像的像素值除以255,我们对图像实现了归一化处理。
# In[12]:
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Keras中的数据预处理过程
train_tensors = paths_to_tensor(train_files).astype('float32')/255
valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
test_tensors = paths_to_tensor(test_files).astype('float32')/255
# In[13]:
print(train_tensors.shape)
# ### 【练习】模型架构
#
#
# 创建一个卷积神经网络来对狗品种进行分类。在你代码块的最后,执行 `model.summary()` 来输出你模型的总结信息。
#
# 我们已经帮你导入了一些所需的 Python 库,如有需要你可以自行导入。如果你在过程中遇到了困难,如下是给你的一点小提示——该模型能够在5个 epoch 内取得超过1%的测试准确率,并且能在CPU上很快地训练。
#
# 
# ---
#
# <a id='question4'></a>
#
# ### __问题 4:__
#
# 在下方的代码块中尝试使用 Keras 搭建卷积网络的架构,并回答相关的问题。
#
# 1. 你可以尝试自己搭建一个卷积网络的模型,那么你需要回答你搭建卷积网络的具体步骤(用了哪些层)以及为什么这样搭建。
# 2. 你也可以根据上图提示的步骤搭建卷积网络,那么请说明为何如上的架构能够在该问题上取得很好的表现。
#
# 1.卷积层和池化层的作用分别是什么?
#
# 2.为何在卷积层使用relu激活函数?
#
# 3.为何在输出层使用softmax激活函数?
#
# 4.为何设置了三层卷积层而不是更少或更多?
#
# 5.为何卷积核的数量是递增的?
#
#
# __回答:__
# 1.卷积层的作用是全连接层的简化,用局部视野来提取特征,把弱影响直接抹到零影响,同时还保留了空间信息,大大减少参数。
# 
# http://www.cnblogs.com/ymjyqsx/p/9451739.html
# 池化层的作用是将宽和高减少一半,同时将深度乘以2倍
# 目的是减少参数
# __回答:__
# 2.卷积层使用relu激活函数
# (1)relu激活函数计算量小,而sigmoid函数计算量大
# (2)relu激活函数保持着网络稀疏性,减少参数的相关性
# (3)sigmoid激活函数会造成梯度消失,从而造成信息丢失。
# 
# https://blog.csdn.net/qq_34638161/article/details/81902989
# __回答:__
# 3.softmax激活函数是转成概率问题,
# 假如是个二分类问题,但是a和b数值相近,用softmax进行处理,a和b转换成概率问题,那么就好区分了
# https://www.cnblogs.com/zhuhou/p/7395752.html
# __回答:__
# 4. 通过过滤器提取每部分的特征,从而到第三层提取更大的特征
# 
# https://www.learnopencv.com/image-classification-using-convolutional-neural-networks-in-keras/
# __回答:__
# 卷积核的数量是递增的,三个通道里最后一个通道是深度,也就是过滤器的数量,通过池化层的影响,从而使卷积核的数量递增。
# In[14]:
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D,Activation
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
from keras.layers import BatchNormalization
model = Sequential()
### TODO: 定义你的网络架构
model.add(Conv2D(filters=16, kernel_size=(2,2),data_format="channels_last", padding='valid',input_shape=(224,224,3),activation='relu'))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Conv2D(filters=32, kernel_size=(2,2),data_format="channels_last",strides=1, padding='valid'))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Conv2D(filters=64, kernel_size=(2,2),data_format="channels_last",strides=1, padding='valid'))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(GlobalAveragePooling2D())
model.add(Dense(133))
model.add(BatchNormalization())
model.add(Activation('softmax'))
model.summary()
# In[15]:
##编译模型
model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])
# ---
# ## 【练习】训练模型
#
#
# ---
#
# <a id='question5'></a>
#
# ### __问题 5:__
#
# 在下方代码单元训练模型。使用模型检查点(model checkpointing)来储存具有最低验证集 loss 的模型。
#
# 可选题:你也可以对训练集进行 [数据增强](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html),来优化模型的表现。
#
#
# In[16]:
from keras.preprocessing.image import ImageDataGenerator
#创造数据增强
datagen_train = ImageDataGenerator(width_shift_range = 0.1,height_shift_range = 0.1, horizontal_flip=True)
#编译
datagen_train.fit(train_tensors)
datagen_train.flow(train_tensors,batch_size = 20)
# In[17]:
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
### TODO: 设置训练模型的epochs的数量
epochs = 10
### 不要修改下方代码
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5',
verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets,
validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=30, callbacks=[checkpointer], verbose=1)
early_stopping=EarlyStopping(monitor='val_loss',patience=5, verbose=2)
# In[18]:
## 加载具有最好验证loss的模型
model.load_weights('saved_models/weights.best.from_scratch.hdf5')
# ### 测试模型
#
# 在狗图像的测试数据集上试用你的模型。确保测试准确率大于1%。
# In[19]:
# 获取测试数据集中每一个图像所预测的狗品种的index
dog_breed_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# 报告测试准确率
test_accuracy = 100*np.sum(np.array(dog_breed_predictions)==np.argmax(test_targets, axis=1))/len(dog_breed_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
# ---
# <a id='step4'></a>
# ## 步骤 4: 使用一个CNN来区分狗的品种
#
#
# 使用 迁移学习(Transfer Learning)的方法,能帮助我们在不损失准确率的情况下大大减少训练时间。在以下步骤中,你可以尝试使用迁移学习来训练你自己的CNN。
#
# ### 得到从图像中提取的特征向量(Bottleneck Features)
# In[20]:
bottleneck_features = np.load('/data/bottleneck_features/DogVGG16Data.npz')
train_VGG16 = bottleneck_features['train']
valid_VGG16 = bottleneck_features['valid']
test_VGG16 = bottleneck_features['test']
# ### 模型架构
#
# 该模型使用预训练的 VGG-16 模型作为固定的图像特征提取器,其中 VGG-16 最后一层卷积层的输出被直接输入到我们的模型。我们只需要添加一个全局平均池化层以及一个全连接层,其中全连接层使用 softmax 激活函数,对每一个狗的种类都包含一个节点。
# In[21]:
VGG16_model = Sequential()
VGG16_model.add(GlobalAveragePooling2D(input_shape=train_VGG16.shape[1:]))
VGG16_model.add(Dense(133, activation='softmax'))
VGG16_model.summary()
# In[22]:
## 编译模型
VGG16_model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
# In[23]:
## 训练模型
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.VGG16.hdf5',
verbose=1, save_best_only=True)
VGG16_model.fit(train_VGG16, train_targets,
validation_data=(valid_VGG16, valid_targets),
epochs=20, batch_size=20, callbacks=[checkpointer], verbose=1)
# In[24]:
## 加载具有最好验证loss的模型
VGG16_model.load_weights('saved_models/weights.best.VGG16.hdf5')
# ### 测试模型
# 现在,我们可以测试此CNN在狗图像测试数据集中识别品种的效果如何。我们在下方打印出测试准确率。
# In[25]:
# 获取测试数据集中每一个图像所预测的狗品种的index
VGG16_predictions = [np.argmax(VGG16_model.predict(np.expand_dims(feature, axis=0))) for feature in test_VGG16]
# 报告测试准确率
test_accuracy = 100*np.sum(np.array(VGG16_predictions)==np.argmax(test_targets, axis=1))/len(VGG16_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
# ### 使用模型预测狗的品种
# In[26]:
from extract_bottleneck_features import *
def VGG16_predict_breed(img_path):
# 提取bottleneck特征
bottleneck_feature = extract_VGG16(path_to_tensor(img_path))
# 获取预测向量
predicted_vector = VGG16_model.predict(bottleneck_feature)
# 返回此模型预测的狗的品种
return dog_names[np.argmax(predicted_vector)]
# ---
# <a id='step5'></a>
# ## 步骤 5: 建立一个CNN来分类狗的品种(使用迁移学习)
#
# 现在你将使用迁移学习来建立一个CNN,从而可以从图像中识别狗的品种。你的 CNN 在测试集上的准确率必须至少达到60%。
#
# 在步骤4中,我们使用了迁移学习来创建一个使用基于 VGG-16 提取的特征向量来搭建一个 CNN。在本部分内容中,你必须使用另一个预训练模型来搭建一个 CNN。为了让这个任务更易实现,我们已经预先对目前 keras 中可用的几种网络进行了预训练:
#
# - [VGG-19](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogVGG19Data.npz) bottleneck features
# - [ResNet-50](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogResnet50Data.npz) bottleneck features
# - [Inception](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogInceptionV3Data.npz) bottleneck features
# - [Xception](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogXceptionData.npz) bottleneck features
#
# 这些文件被命名为为:
#
# Dog{network}Data.npz
#
# 其中 `{network}` 可以是 `VGG19`、`Resnet50`、`InceptionV3` 或 `Xception` 中的一个。选择上方网络架构中的一个,他们已经保存在目录 `/data/bottleneck_features/` 中。
#
#
# ### 【练习】获取模型的特征向量
#
# 在下方代码块中,通过运行下方代码提取训练、测试与验证集相对应的bottleneck特征。
#
# bottleneck_features = np.load('/data/bottleneck_features/Dog{network}Data.npz')
# train_{network} = bottleneck_features['train']
# valid_{network} = bottleneck_features['valid']
# test_{network} = bottleneck_features['test']
# In[27]:
### TODO: 从另一个预训练的CNN获取bottleneck特征
bottleneck_features = np.load('/data/bottleneck_features/DogXceptionData.npz')
train_Xception = bottleneck_features['train']
valid_Xception = bottleneck_features['valid']
test_Xception = bottleneck_features['test']
# In[28]:
print(train_Xception.shape)
# ### 【练习】模型架构
#
# 建立一个CNN来分类狗品种。在你的代码单元块的最后,通过运行如下代码输出网络的结构:
#
# <your model's name>.summary()
#
# ---
#
# <a id='question6'></a>
#
# ### __问题 6:__
#
#
# 在下方的代码块中尝试使用 Keras 搭建最终的网络架构,并回答你实现最终 CNN 架构的步骤与每一步的作用,并描述你在迁移学习过程中,使用该网络架构的原因。
#
# 为什么相比普通的CNN,迁移学习可以取得更好的效果?
#
# 为什么第三步中的尝试没有迁移学习的效果好?
#
#
#
#
#
# __回答:__
# 1.相比普通的cnn,迁移学习能用模型有的初始权重训练模型,再用剩余层来训练模型,从而预测品种。
# https://blog.csdn.net/u010159842/article/details/79202107
# 2.第三步是因为自己建的网络,从而得到的权重并不是数据集训练过的权重,且Xception模型很好
# 
# https://cv-tricks.com/cnn/understand-resnet-alexnet-vgg-inception/
# In[29]:
### TODO: 定义你的框架
Xception_model = Sequential()
Xception_model.add(GlobalAveragePooling2D(input_shape=train_Xception.shape[1:]))
Xception_model.add(Dense(133,activation = 'softmax'))
Xception_model.summary()
# In[30]:
### TODO: 编译模型
Xception_model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
# ---
#
# ### 【练习】训练模型
#
# <a id='question7'></a>
#
# ### __问题 7:__
#
# 在下方代码单元中训练你的模型。使用模型检查点(model checkpointing)来储存具有最低验证集 loss 的模型。
#
# 当然,你也可以对训练集进行 [数据增强](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html) 以优化模型的表现,不过这不是必须的步骤。
#
# In[31]:
#创造数据增强
datagen_train_1 = ImageDataGenerator(width_shift_range = 0.1,height_shift_range = 0.1, horizontal_flip=True)
#编译
datagen_train_1.fit(train_Xception)
datagen_train_1.flow(train_Xception,batch_size = 20)
# In[32]:
### TODO: 训练模型
checkpointer = ModelCheckpoint(filepath ='models_re/weights.best.train_Xception.hdf5',
verbose = 2,save_best_only=True)
Xception_model.fit(train_Xception,train_targets,
validation_data=(valid_Xception,valid_targets),
epochs = 20 ,batch_size = 10,callbacks =[checkpointer],verbose = 1)
early_stopping=EarlyStopping(monitor='val_loss',patience=5, verbose=2)
# In[35]:
### TODO: 加载具有最佳验证loss的模型权重
Xception_model.load_weights('models_re/weights.best.train_Xception.hdf5')
# ---
#
# ### 【练习】测试模型
#
# <a id='question8'></a>
#
# ### __问题 8:__
#
# 在狗图像的测试数据集上试用你的模型。确保测试准确率大于60%。
# In[41]:
### TODO: 在测试集上计算分类准确率
# 获取测试数据集中每一个图像所预测的狗品种的index
Xception_predictions = [np.argmax(Xception_model.predict(np.expand_dims(feature, axis=0))) for feature in test_Xception]
# 报告测试准确率
test_accuracy = 100*np.sum(np.array(Xception_predictions)==np.argmax(test_targets, axis=1))/len(Xception_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
# ---
#
# ### 【练习】使用模型测试狗的品种
#
#
# 实现一个函数,它的输入为图像路径,功能为预测对应图像的类别,输出为你模型预测出的狗类别(`Affenpinscher`, `Afghan_hound` 等)。
#
# 与步骤5中的模拟函数类似,你的函数应当包含如下三个步骤:
#
# 1. 根据选定的模型载入图像特征(bottleneck features)
# 2. 将图像特征输输入到你的模型中,并返回预测向量。注意,在该向量上使用 argmax 函数可以返回狗种类的序号。
# 3. 使用在步骤0中定义的 `dog_names` 数组来返回对应的狗种类名称。
#
# 提取图像特征过程中使用到的函数可以在 `extract_bottleneck_features.py` 中找到。同时,他们应已在之前的代码块中被导入。根据你选定的 CNN 网络,你可以使用 `extract_{network}` 函数来获得对应的图像特征,其中 `{network}` 代表 `VGG19`, `Resnet50`, `InceptionV3`, 或 `Xception` 中的一个。
#
# ---
#
# <a id='question9'></a>
#
# ### __问题 9:__
# In[42]:
### TODO: 写一个函数,该函数将图像的路径作为输入
### 然后返回此模型所预测的狗的品种
def Xception_predict_bred(img_path):
#载入图像特征
bottleneck_features = extract_Xception(path_to_tensor(img_path))
#获取预测向量
predicted_vector = Xception_model.predict(bottleneck_features)
#返回此模型的预测品种
return dog_names[np.argmax(predicted_vector)]
# ---
#
# <a id='step6'></a>
# ## 步骤 6: 完成你的算法
#
#
#
# 实现一个算法,它的输入为图像的路径,它能够区分图像是否包含一个人、狗或两者都不包含,然后:
#
# - 如果从图像中检测到一只__狗__,返回被预测的品种。
# - 如果从图像中检测到__人__,返回最相像的狗品种。
# - 如果两者都不能在图像中检测到,输出错误提示。
#
# 我们非常欢迎你来自己编写检测图像中人类与狗的函数,你可以随意地使用上方完成的 `face_detector` 和 `dog_detector` 函数。你__需要__在步骤5使用你的CNN来预测狗品种。
#
# 下面提供了算法的示例输出,但你可以自由地设计自己的模型!
#
# 
#
#
#
#
# <a id='question10'></a>
#
# ### __问题 10:__
#
# 在下方代码块中完成你的代码。
#
# ---
#
# In[52]:
### TODO: 设计你的算法
### 自由地使用所需的代码单元数吧
def predict_dog_or_not(img_path):
if dog_detector(img_path) == True:
print("hello,dog!")
print(Xception_predict_bred(img_path))
plt.show()
if face_detector(img_path) == True:
print("hello,human!")
print("You look like a ...",Xception_predict_bred(img_path))
plt.show()
if dog_detector(img_path) == False and face_detector(img_path) == False:
print("error!,no human and no dog!!")
plt.show()
# ---
# <a id='step7'></a>
# ## 步骤 7: 测试你的算法
#
# 在这个部分中,你将尝试一下你的新算法!算法认为__你__看起来像什么类型的狗?如果你有一只狗,它可以准确地预测你的狗的品种吗?如果你有一只猫,它会将你的猫误判为一只狗吗?
#
# **上传方式:点击左上角的Jupyter回到上级菜单,你可以看到Jupyter Notebook的右上方会有Upload按钮。**
#
# <a id='question11'></a>
#
# ### __问题 11:__
#
# 在下方编写代码,用至少6张现实中的图片来测试你的算法。你可以使用任意照片,不过请至少使用两张人类图片(要征得当事人同意哦)和两张狗的图片。
# 同时请回答如下问题:
#
# 1. 输出结果比你预想的要好吗 :) ?或者更糟 :( ?
# 2. 提出至少三点改进你的模型的想法。
# __回答__:
# 1.输出结果比我预想的好,因为他正确识别出来不是狗也不是人的图片。
# 2.
# (1)更换人脸检测算法,opencv必须用正脸照片,我用了一张远距离的人脸照片就不能识别了。
# (2)找到更多的数据,这样可以防止模型过拟合
# (3)交叉验证法,我们将数据集切分为训练集及测试集,为了能够更好的优化模型,我们从训练集再切分出一部分作为验证集,最后把结果加权平均。
# In[53]:
## TODO: 在你的电脑上,在步骤6中,至少在6张图片上运行你的算法。
## 自由地使用所需的代码单元数吧
cat1= "picture/cat1.jpg"
cat_1=cat1
cat_1 = cv2.imread(cat_1)
cat_1 = cv2.cvtColor(cat_1, cv2.COLOR_BGR2RGB)
plt.imshow(cat_1)
plt.show()
predict_dog_or_not(cat1)
# In[54]:
cat2="picture/cat2.jpg"
cat_2=cat2
cat_2 = cv2.imread(cat_2)
cat_2 = cv2.cvtColor(cat_2, cv2.COLOR_BGR2RGB)
plt.imshow(cat_2)
plt.show()
predict_dog_or_not(cat2)
# In[55]:
human1="picture/human1.jpg"
human_1=human1
human_1 = cv2.imread(human_1)
human_1= cv2.cvtColor(human_1, cv2.COLOR_BGR2RGB)
plt.imshow(human_1)
plt.show()
predict_dog_or_not(human1)
# In[56]:
human2="picture/human2.jpg"
human_2=human2
human_2 = cv2.imread(human_2)
human_2= cv2.cvtColor(human_2, cv2.COLOR_BGR2RGB)
plt.imshow(human_2)
plt.show()
predict_dog_or_not(human2)
# In[57]:
dog1="picture/dog1.jpg"
dog_1=dog1
dog_1 = cv2.imread(dog_1)
dog_1= cv2.cvtColor(dog_1, cv2.COLOR_BGR2RGB)
plt.imshow(dog_1)
plt.show()
predict_dog_or_not(dog1)
# In[58]:
dog2="picture/dog2.jpg"
dog_2=dog2
dog_2 = cv2.imread(dog_2)
dog_2= cv2.cvtColor(dog_2, cv2.COLOR_BGR2RGB)
plt.imshow(dog_2)
plt.show()
predict_dog_or_not(dog2)
# **注意: 当你写完了所有的代码,并且回答了所有的问题。你就可以把你的 iPython Notebook 导出成 HTML 文件。你可以在菜单栏,这样导出File -> Download as -> HTML (.html)把这个 HTML 和这个 iPython notebook 一起做为你的作业提交。**
| [
"keras.preprocessing.image.ImageDataGenerator",
"numpy.load",
"numpy.argmax",
"random.shuffle",
"keras.preprocessing.image.img_to_array",
"cv2.rectangle",
"glob.glob",
"sklearn.datasets.load_files",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"keras.layers.GlobalAveragePooling2D",
"keras.prepr... | [((3154, 3174), 'random.seed', 'random.seed', (['(8675309)'], {}), '(8675309)\n', (3165, 3174), False, 'import random\n'), ((3240, 3267), 'random.shuffle', 'random.shuffle', (['human_files'], {}), '(human_files)\n', (3254, 3267), False, 'import random\n'), ((3900, 3969), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascades/haarcascade_frontalface_alt.xml"""'], {}), "('haarcascades/haarcascade_frontalface_alt.xml')\n", (3921, 3969), False, 'import cv2\n'), ((4032, 4058), 'cv2.imread', 'cv2.imread', (['human_files[3]'], {}), '(human_files[3])\n', (4042, 4058), False, 'import cv2\n'), ((4101, 4138), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (4113, 4138), False, 'import cv2\n'), ((4451, 4487), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (4463, 4487), False, 'import cv2\n'), ((4502, 4520), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cv_rgb'], {}), '(cv_rgb)\n', (4512, 4520), True, 'import matplotlib.pyplot as plt\n'), ((4521, 4531), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4529, 4531), True, 'import matplotlib.pyplot as plt\n'), ((7962, 7990), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (7970, 7990), False, 'from keras.applications.resnet50 import ResNet50\n'), ((14866, 14878), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (14876, 14878), False, 'from keras.models import Sequential\n'), ((16044, 16135), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'horizontal_flip': '(True)'}), '(width_shift_range=0.1, height_shift_range=0.1,\n horizontal_flip=True)\n', (16062, 16135), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((16406, 16513), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""saved_models/weights.best.from_scratch.hdf5"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath='saved_models/weights.best.from_scratch.hdf5',\n verbose=1, save_best_only=True)\n", (16421, 16513), False, 'from keras.callbacks import ModelCheckpoint\n'), ((16734, 16790), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(5)', 'verbose': '(2)'}), "(monitor='val_loss', patience=5, verbose=2)\n", (16747, 16790), False, 'from keras.callbacks import EarlyStopping\n'), ((17497, 17550), 'numpy.load', 'np.load', (['"""/data/bottleneck_features/DogVGG16Data.npz"""'], {}), "('/data/bottleneck_features/DogVGG16Data.npz')\n", (17504, 17550), True, 'import numpy as np\n'), ((17851, 17863), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (17861, 17863), False, 'from keras.models import Sequential\n'), ((18166, 18266), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""saved_models/weights.best.VGG16.hdf5"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath='saved_models/weights.best.VGG16.hdf5', verbose=1,\n save_best_only=True)\n", (18181, 18266), False, 'from keras.callbacks import ModelCheckpoint\n'), ((20593, 20649), 'numpy.load', 'np.load', (['"""/data/bottleneck_features/DogXceptionData.npz"""'], {}), "('/data/bottleneck_features/DogXceptionData.npz')\n", (20600, 20649), True, 'import numpy as np\n'), ((21514, 21526), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (21524, 21526), False, 'from keras.models import Sequential\n'), ((22151, 22242), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'horizontal_flip': '(True)'}), '(width_shift_range=0.1, height_shift_range=0.1,\n horizontal_flip=True)\n', (22169, 22242), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((22390, 22496), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""models_re/weights.best.train_Xception.hdf5"""', 'verbose': '(2)', 'save_best_only': '(True)'}), "(filepath='models_re/weights.best.train_Xception.hdf5',\n verbose=2, save_best_only=True)\n", (22405, 22496), False, 'from keras.callbacks import ModelCheckpoint\n'), ((22748, 22804), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(5)', 'verbose': '(2)'}), "(monitor='val_loss', patience=5, verbose=2)\n", (22761, 22804), False, 'from keras.callbacks import EarlyStopping\n'), ((26095, 26112), 'cv2.imread', 'cv2.imread', (['cat_1'], {}), '(cat_1)\n', (26105, 26112), False, 'import cv2\n'), ((26121, 26159), 'cv2.cvtColor', 'cv2.cvtColor', (['cat_1', 'cv2.COLOR_BGR2RGB'], {}), '(cat_1, cv2.COLOR_BGR2RGB)\n', (26133, 26159), False, 'import cv2\n'), ((26162, 26179), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cat_1'], {}), '(cat_1)\n', (26172, 26179), True, 'import matplotlib.pyplot as plt\n'), ((26182, 26192), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26190, 26192), True, 'import matplotlib.pyplot as plt\n'), ((26281, 26298), 'cv2.imread', 'cv2.imread', (['cat_2'], {}), '(cat_2)\n', (26291, 26298), False, 'import cv2\n'), ((26307, 26345), 'cv2.cvtColor', 'cv2.cvtColor', (['cat_2', 'cv2.COLOR_BGR2RGB'], {}), '(cat_2, cv2.COLOR_BGR2RGB)\n', (26319, 26345), False, 'import cv2\n'), ((26348, 26365), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cat_2'], {}), '(cat_2)\n', (26358, 26365), True, 'import matplotlib.pyplot as plt\n'), ((26368, 26378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26376, 26378), True, 'import matplotlib.pyplot as plt\n'), ((26478, 26497), 'cv2.imread', 'cv2.imread', (['human_1'], {}), '(human_1)\n', (26488, 26497), False, 'import cv2\n'), ((26507, 26547), 'cv2.cvtColor', 'cv2.cvtColor', (['human_1', 'cv2.COLOR_BGR2RGB'], {}), '(human_1, cv2.COLOR_BGR2RGB)\n', (26519, 26547), False, 'import cv2\n'), ((26550, 26569), 'matplotlib.pyplot.imshow', 'plt.imshow', (['human_1'], {}), '(human_1)\n', (26560, 26569), True, 'import matplotlib.pyplot as plt\n'), ((26572, 26582), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26580, 26582), True, 'import matplotlib.pyplot as plt\n'), ((26686, 26705), 'cv2.imread', 'cv2.imread', (['human_2'], {}), '(human_2)\n', (26696, 26705), False, 'import cv2\n'), ((26715, 26755), 'cv2.cvtColor', 'cv2.cvtColor', (['human_2', 'cv2.COLOR_BGR2RGB'], {}), '(human_2, cv2.COLOR_BGR2RGB)\n', (26727, 26755), False, 'import cv2\n'), ((26758, 26777), 'matplotlib.pyplot.imshow', 'plt.imshow', (['human_2'], {}), '(human_2)\n', (26768, 26777), True, 'import matplotlib.pyplot as plt\n'), ((26780, 26790), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26788, 26790), True, 'import matplotlib.pyplot as plt\n'), ((26885, 26902), 'cv2.imread', 'cv2.imread', (['dog_1'], {}), '(dog_1)\n', (26895, 26902), False, 'import cv2\n'), ((26910, 26948), 'cv2.cvtColor', 'cv2.cvtColor', (['dog_1', 'cv2.COLOR_BGR2RGB'], {}), '(dog_1, cv2.COLOR_BGR2RGB)\n', (26922, 26948), False, 'import cv2\n'), ((26951, 26968), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dog_1'], {}), '(dog_1)\n', (26961, 26968), True, 'import matplotlib.pyplot as plt\n'), ((26971, 26981), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26979, 26981), True, 'import matplotlib.pyplot as plt\n'), ((27075, 27092), 'cv2.imread', 'cv2.imread', (['dog_2'], {}), '(dog_2)\n', (27085, 27092), False, 'import cv2\n'), ((27100, 27138), 'cv2.cvtColor', 'cv2.cvtColor', (['dog_2', 'cv2.COLOR_BGR2RGB'], {}), '(dog_2, cv2.COLOR_BGR2RGB)\n', (27112, 27138), False, 'import cv2\n'), ((27141, 27158), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dog_2'], {}), '(dog_2)\n', (27151, 27158), True, 'import matplotlib.pyplot as plt\n'), ((27161, 27171), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27169, 27171), True, 'import matplotlib.pyplot as plt\n'), ((2147, 2163), 'sklearn.datasets.load_files', 'load_files', (['path'], {}), '(path)\n', (2157, 2163), False, 'from sklearn.datasets import load_files\n'), ((2180, 2207), 'numpy.array', 'np.array', (["data['filenames']"], {}), "(data['filenames'])\n", (2188, 2207), True, 'import numpy as np\n'), ((3217, 3238), 'glob.glob', 'glob', (['"""/data/lfw/*/*"""'], {}), "('/data/lfw/*/*')\n", (3221, 3238), False, 'from glob import glob\n'), ((4370, 4428), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (4383, 4428), False, 'import cv2\n'), ((4997, 5017), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (5007, 5017), False, 'import cv2\n'), ((5029, 5066), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (5041, 5066), False, 'import cv2\n'), ((8808, 8856), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (8822, 8856), False, 'from keras.preprocessing import image\n'), ((8914, 8937), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (8932, 8937), False, 'from keras.preprocessing import image\n'), ((8991, 9016), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (9005, 9016), True, 'import numpy as np\n'), ((9142, 9168), 'numpy.vstack', 'np.vstack', (['list_of_tensors'], {}), '(list_of_tensors)\n', (9151, 9168), True, 'import numpy as np\n'), ((14910, 15045), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(16)', 'kernel_size': '(2, 2)', 'data_format': '"""channels_last"""', 'padding': '"""valid"""', 'input_shape': '(224, 224, 3)', 'activation': '"""relu"""'}), "(filters=16, kernel_size=(2, 2), data_format='channels_last', padding\n ='valid', input_shape=(224, 224, 3), activation='relu')\n", (14916, 15045), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\n'), ((15046, 15082), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (15058, 15082), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\n'), ((15100, 15200), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(2, 2)', 'data_format': '"""channels_last"""', 'strides': '(1)', 'padding': '"""valid"""'}), "(filters=32, kernel_size=(2, 2), data_format='channels_last', strides\n =1, padding='valid')\n", (15106, 15200), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\n'), ((15205, 15241), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (15217, 15241), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\n'), ((15256, 15356), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(2, 2)', 'data_format': '"""channels_last"""', 'strides': '(1)', 'padding': '"""valid"""'}), "(filters=64, kernel_size=(2, 2), data_format='channels_last', strides\n =1, padding='valid')\n", (15262, 15356), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\n'), ((15363, 15399), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (15375, 15399), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\n'), ((15413, 15437), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (15435, 15437), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\n'), ((15453, 15463), 'keras.layers.Dense', 'Dense', (['(133)'], {}), '(133)\n', (15458, 15463), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((15476, 15496), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (15494, 15496), False, 'from keras.layers import BatchNormalization\n'), ((15509, 15530), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (15519, 15530), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\n'), ((17880, 17937), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {'input_shape': 'train_VGG16.shape[1:]'}), '(input_shape=train_VGG16.shape[1:])\n', (17902, 17937), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\n'), ((17955, 17987), 'keras.layers.Dense', 'Dense', (['(133)'], {'activation': '"""softmax"""'}), "(133, activation='softmax')\n", (17960, 17987), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((21547, 21607), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {'input_shape': 'train_Xception.shape[1:]'}), '(input_shape=train_Xception.shape[1:])\n', (21569, 21607), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\n'), ((21629, 21661), 'keras.layers.Dense', 'Dense', (['(133)'], {'activation': '"""softmax"""'}), "(133, activation='softmax')\n", (21634, 21661), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((2250, 2274), 'numpy.array', 'np.array', (["data['target']"], {}), "(data['target'])\n", (2258, 2274), True, 'import numpy as np\n'), ((19268, 19295), 'numpy.argmax', 'np.argmax', (['predicted_vector'], {}), '(predicted_vector)\n', (19277, 19295), True, 'import numpy as np\n'), ((24281, 24308), 'numpy.argmax', 'np.argmax', (['predicted_vector'], {}), '(predicted_vector)\n', (24290, 24308), True, 'import numpy as np\n'), ((25016, 25026), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25024, 25026), True, 'import matplotlib.pyplot as plt\n'), ((25181, 25191), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25189, 25191), True, 'import matplotlib.pyplot as plt\n'), ((25326, 25336), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25334, 25336), True, 'import matplotlib.pyplot as plt\n'), ((2631, 2664), 'glob.glob', 'glob', (['"""/data/dog_images/train/*/"""'], {}), "('/data/dog_images/train/*/')\n", (2635, 2664), False, 'from glob import glob\n'), ((2813, 2862), 'numpy.hstack', 'np.hstack', (['[train_files, valid_files, test_files]'], {}), '([train_files, valid_files, test_files])\n', (2822, 2862), True, 'import numpy as np\n'), ((9114, 9129), 'tqdm.tqdm', 'tqdm', (['img_paths'], {}), '(img_paths)\n', (9118, 9129), False, 'from tqdm import tqdm\n'), ((17030, 17060), 'numpy.expand_dims', 'np.expand_dims', (['tensor'], {'axis': '(0)'}), '(tensor, axis=0)\n', (17044, 17060), True, 'import numpy as np\n'), ((18725, 18756), 'numpy.expand_dims', 'np.expand_dims', (['feature'], {'axis': '(0)'}), '(feature, axis=0)\n', (18739, 18756), True, 'import numpy as np\n'), ((23164, 23195), 'numpy.expand_dims', 'np.expand_dims', (['feature'], {'axis': '(0)'}), '(feature, axis=0)\n', (23178, 23195), True, 'import numpy as np\n'), ((17129, 17160), 'numpy.array', 'np.array', (['dog_breed_predictions'], {}), '(dog_breed_predictions)\n', (17137, 17160), True, 'import numpy as np\n'), ((17162, 17193), 'numpy.argmax', 'np.argmax', (['test_targets'], {'axis': '(1)'}), '(test_targets, axis=1)\n', (17171, 17193), True, 'import numpy as np\n'), ((18824, 18851), 'numpy.array', 'np.array', (['VGG16_predictions'], {}), '(VGG16_predictions)\n', (18832, 18851), True, 'import numpy as np\n'), ((18853, 18884), 'numpy.argmax', 'np.argmax', (['test_targets'], {'axis': '(1)'}), '(test_targets, axis=1)\n', (18862, 18884), True, 'import numpy as np\n'), ((23266, 23296), 'numpy.array', 'np.array', (['Xception_predictions'], {}), '(Xception_predictions)\n', (23274, 23296), True, 'import numpy as np\n'), ((23298, 23329), 'numpy.argmax', 'np.argmax', (['test_targets'], {'axis': '(1)'}), '(test_targets, axis=1)\n', (23307, 23329), True, 'import numpy as np\n')] |
# LICENSE
#
# _This file is Copyright 2018 by the Image Processing and Analysis Group (BioImage Suite Team). Dept. of Radiology & Biomedical Imaging, Yale School of Medicine._
#
# BioImage Suite Web is licensed under the Apache License, Version 2.0 (the "License");
#
# - you may not use this software except in compliance with the License.
# - You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
#
# __Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.__
#
# ENDLICENSE
# imgSegm.py
#
# Created on: Oct 6, 2020
# Authors: <NAME>, <NAME>
#
import os
import sys
import json
import numpy as np
import pandas as pd
import nibabel as nib
from pathlib import Path
from copy import deepcopy as dp
import monai
import torch
import pytorch_lightning
import monai.data as monaiData
import monai.metrics as monaiMetrics
import monai.transforms as monaiTrans
from torch.utils.data import DataLoader as torchDataloader
import bisImgSeg.utilities.modelObjects as model_objects
from monai.inferers import sliding_window_inference
class TRANSFORMATION:
def __init__(self):
self.index = 4
self.comfuncs = []
self.keys = []
self.orientation = 'LPS'
self.patch_size = None
self.spacing = None
self.prefix = None
self.Spacingd = {
'mode': ('bilinear', 'nearest'),
'padding_mode': ('reflection','reflection')
}
self.ScaleIntensityRangePercentilesd = {
'keys': ['IMAGE'],
'lower': 25,
'upper': 75,
'b_min': -0.5,
'b_max': 0.5,
'clip': False
}
self.RandCropByPosNegLabeld = {
'label_key': 'SEGM',
'pos': 3,
'neg': 1,
'num_samples': 4
}
def init_comfuncs(self, status):
self.comfuncs = [
monaiTrans.LoadNiftid(keys=self.keys),
monaiTrans.AddChanneld(keys=self.keys),
monaiTrans.Orientationd(keys=self.keys, axcodes=self.orientation),
monaiTrans.Spacingd(keys=self.keys, pixdim=self.spacing, mode=self.Spacingd['mode'], padding_mode=self.Spacingd['padding_mode']),
monaiTrans.ScaleIntensityRangePercentilesd(keys=self.ScaleIntensityRangePercentilesd['keys'], \
lower=self.ScaleIntensityRangePercentilesd['lower'], upper=self.ScaleIntensityRangePercentilesd['upper'], \
b_min=self.ScaleIntensityRangePercentilesd['b_min'], b_max=self.ScaleIntensityRangePercentilesd['b_max'], \
clip=self.ScaleIntensityRangePercentilesd['clip']),
monaiTrans.ToTensord(keys=self.keys)
]
if status == 'train':
# randomly crop out patch samples from big image based on pos / neg ratio
# the image centers of negative samples must be in valid image area
self.comfuncs.insert(-1, monaiTrans.RandCropByPosNegLabeld(keys=self.keys, label_key=self.RandCropByPosNegLabeld['label_key'], \
spatial_size=self.patch_size, num_samples=self.RandCropByPosNegLabeld['num_samples'], \
pos=self.RandCropByPosNegLabeld['pos'], neg=self.RandCropByPosNegLabeld['neg']),)
def parsingtransformations(self, status, debug, dt_dict=None, aug_dict=None):
if dt_dict:
if 'orientation' in dt_dict.keys():
self.orientation = dt_dict['orientation'].upper()
if status == 'train':
try:
# TODO: 2D framework---------------------------------------------------------------------------------------------------------
self.patch_size = tuple(dt_dict['patch_size'])
# TODO: 2D framework---------------------------------------------------------------------------------------------------------
except:
print("In training mode, patch_size must be defined in the json file. Please double check." )
raise ValueError
try:
self.spacing = tuple(dt_dict['spacing'])
if len(self.keys) == 1:
self.Spacingd['mode'] = [self.Spacingd['mode'][0]]
self.Spacingd['padding_mode'] = [self.Spacingd['padding_mode'][0]]
except:
print("Spacing must be defined in the json file. Please double check." )
raise ValueError
fix_keys = ['orientation', 'spacing', 'patch_size']
for eledf in dt_dict.keys():
if eledf not in fix_keys:
for argd in dt_dict[eledf].keys():
vars(self)[eledf][argd] = dt_dict[eledf][argd]
self.init_comfuncs(status)
if aug_dict:
for elef in aug_dict.keys():
try:
new_dict = dp(aug_dict[elef])
new_dict['functionname'] = elef
sgfunc = model_objects.concatFunctionArguments(new_dict, prefix='monaiTrans.')
if 'keys' not in aug_dict[elef].keys():
sgfunc = sgfunc[:sgfunc.index('(')+1] + 'keys=self.keys, ' +sgfunc[sgfunc.index('(')+1:]
self.comfuncs.insert(self.index, eval(sgfunc))
self.index += 1
except:
print("Cannot apply the transformation, "+ elef +", on the datasest. Please double check." )
raise ValueError
if debug:
statics = 'monaiTrans.DataStatsd(keys=' + str(self.keys) +', prefix=' + str(self.prefix) + ')'
if status == 'train':
self.comfuncs.insert(-2, eval(statics))
else:
self.comfuncs.insert(-1, eval(statics))
def convertInputsToDictionaies(PATH, debug):
df = pd.read_csv(PATH, index_col=False)
df.drop_duplicates(inplace=True)
df = df[['IMAGE','SEGM','DATA_SPLIT']]
df_train = df[df['DATA_SPLIT']=='Training']
df_val = df[df['DATA_SPLIT']=='Validation']
df_test = df[df['DATA_SPLIT']=='Testing']
# Convert DF to dictionary
train_dict = df_train.to_dict('records')
val_dict = df_val.to_dict('records')
test_dict = df_test.to_dict('records')
if debug:
print('Dataset contains %d entries' % len(df))
print('Number of training files: ', len(train_dict))
print('Number of validation files: ', len(val_dict))
print('Number of testing files: ', len(test_dict))
return train_dict, val_dict, test_dict
def initParams():
default_parameters = {}
default_parameters['model'] = {}
# initiate parameters for initiating the data loading
default_parameters['batch_size'] = 16
default_parameters['shuffle'] = True
default_parameters['num_workers'] = 4
default_parameters['collate_fn'] = monaiData.list_data_collate
# initiate parameters for training
default_parameters['max_epochs'] = 2000
default_parameters['num_sanity_val_steps'] = 0
default_parameters['check_val_every_n_epoch'] = 10
# initiate parameters for initiating the model
default_parameters['model']['modelname'] = 'UNet'
default_parameters['model']['dimensions'] = 3
default_parameters['model']['in_channels'] = 1
default_parameters['model']['out_channels'] = 2
default_parameters['model']['channels'] = (16, 32, 64, 128)
default_parameters['model']['stride_size'] = 2
default_parameters['model']['strides'] = None
default_parameters['model']['num_res_units'] = 2
default_parameters['model']['roi_size'] = None
default_parameters['model']['sw_batch_size'] = 4
default_parameters['model']['lossfunction'] = {
'functionname': 'DiceLoss',
'to_onehot_y': True,
'softmax': True
}
default_parameters['model']['metricfunction'] = {
'functionname': 'DiceMetric',
'include_background': True,
'to_onehot_y': True,
'sigmoid': True,
'reduction': 'mean'
}
default_parameters['model']['optimfunction'] = {
'functionname': 'Adam',
'lr': 1e-4
}
default_parameters['testSegm'] = True
default_parameters['postprocessing'] = True
default_parameters['postprocessing_func'] = {
'functionname': 'KeepLargestConnectedComponent',
'applied_labels': [1]
}
return default_parameters
def updateParams(inputs, defaults, status):
if status == 'model':
for model_arg in inputs.keys():
if model_arg == 'name':
if inputs['name'].lower() == 'unet3d':
defaults['model']['modelname'] == 'UNet'
defaults['model']['dimensions'] == 3
elif inputs['name'].lower() == 'unet2d':
defaults['model']['modelname'] == 'UNet'
defaults['model']['dimensions'] == 2
else:
defaults['model'][model_arg] = inputs[model_arg]
if not defaults['model']['strides']:
defaults['model']['strides'] = tuple([defaults['model']['stride_size'] for i in range(len(defaults['model']['channels'])-1)])
else:
if status == 'validate' or status == 'test':
defaults['batch_size'] = 1
defaults['shuffle'] = False
for sarg in inputs.keys():
defaults[sarg] = inputs[sarg]
def parsingInputs(inps, debug):
CACHE_PATH = None
INPUT_PATH = inps['inputpath']
if 'cachepath' in inps.keys():
CACHE_PATH = inps['cachepath']
# Load dataset and convert to dictionary
traind, validated, testd = convertInputsToDictionaies(INPUT_PATH, debug)
res_loader = {}
params = initParams()
params['debug'] = debug
updateParams(inps['model'], params, 'model')
statuslist = []
if 'train' in inps.keys():
statuslist.append('train')
if validated == []:
params['val_percent_check']=0
else:
statuslist.append('validate')
if 'test' in inps.keys():
statuslist.append('test')
for status in statuslist:
input_defaultT = None
input_augmentation = None
# Parsing transformation parameters
vars()[status+'_transformation'] = TRANSFORMATION()
if status == 'test' and type(testd[0]['SEGM']) == float:
params['testSegm'] = False
vars()[status+'_transformation'].keys = ['IMAGE']
vars()[status+'_transformation'].prefix = (status + '_image',)
else:
vars()[status+'_transformation'].keys = ['IMAGE', 'SEGM']
vars()[status+'_transformation'].prefix = (status + '_image', status + '_segm')
if status in inps.keys() and 'defaulttransformation' in inps[status].keys():
input_defaultT = inps[status].pop('defaulttransformation')
elif 'defaulttransformation' in inps.keys():
input_defaultT = inps['defaulttransformation']
if status in inps.keys() and 'augmentation' in inps[status].keys():
input_augmentation = inps[status].pop('augmentation')
elif 'augmentation' in inps.keys():
input_augmentation = inps['augmentation']
vars()[status+'_transformation'].parsingtransformations(status, debug, input_defaultT, input_augmentation)
trans_lists = getattr(vars()[status+'_transformation'], 'comfuncs')
# Parsing status specific arguments
if status in inps.keys():
updateParams(inps[status], params, status)
else:
updateParams({}, params, status)
if CACHE_PATH is None:
vars()[status+'_ds'] = monaiData.Dataset(
data = vars()[status+'d'], \
transform = monaiTrans.Compose(trans_lists)
)
else:
# Maintain a consistent CACHE_PATH if you want mulitple programs to use this
vars()[status+'_ds'] = monaiData.PersistentDataset(
data = vars()[status+'d'], \
transform = monaiTrans.Compose(trans_lists), \
cache_dir=CACHE_PATH\
)
vars()[status+'_loader'] = torchDataloader(
vars()[status+'_ds'], \
batch_size = params['batch_size'], \
shuffle =params['shuffle'], \
num_workers =params['num_workers'], \
collate_fn = params['collate_fn']\
)
res_loader[status+'_loader'] = vars()[status+'_loader']
# TODO: user should keep the parameteres below consistent in training / testing
try:
params['model']['patch_size'] = vars()['train_transformation'].patch_size
params['orientation'] = vars()['train_transformation'].orientation
params['spacing'] = vars()['train_transformation'].spacing
except:
pass
try:
params['orientation'] = vars()['test_transformation'].orientation
params['spacing'] = vars()['test_transformation'].spacing
except:
pass
return params, res_loader
def findMaxDim(loaders):
max_dims = []
with torch.no_grad():
for key in loaders.keys():
if key == 'train_loader':
continue
else:
loader = loaders[key]
for i, data in enumerate(loader):
image_dims = np.array(data['IMAGE'].shape[2:])
if i == 0:
temp_max = image_dims
else:
temp_max = np.amax([temp_max, image_dims], axis=0)
max_dims.append(temp_max)
max_dim = list(np.amax(max_dims, axis=0))
return max_dim
def initRoisize(loaders, min_num):
NEW_DIM = []
DIM = findMaxDim(loaders)
for idx in range(len(DIM)):
dim = DIM[idx]
if (dim % min_num):
new_dim = (dim // min_num + 1) * min_num
NEW_DIM.append(new_dim)
else:
NEW_DIM.append(dim)
return tuple(NEW_DIM)
def setupLoggers(MODEL_ROOT_PATH):
logger = pytorch_lightning.loggers.TensorBoardLogger(
save_dir=os.path.join(MODEL_ROOT_PATH,'saved_model','logs')
)
callback = pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint(
filepath=os.path.join(MODEL_ROOT_PATH, 'saved_model',"{epoch}-{val_dice:.2f}"),
save_last=True,
save_top_k=3,
)
# check for last checkpoint
lastCheckpoint = None
if os.path.exists(os.path.join(MODEL_ROOT_PATH,'saved_model','last.ckpt')):
lastCheckpoint = os.path.join(MODEL_ROOT_PATH,'saved_model','last.ckpt')
return [logger, callback, lastCheckpoint]
def initTrainer(logL, gpuidx, dattrs):
trainer = pytorch_lightning.Trainer(
gpus=[gpuidx],
logger=logL[0],
checkpoint_callback=logL[1],
resume_from_checkpoint=logL[2]
)
for key in dattrs.keys():
# if hasattr(trainer, key):
if key in trainer.default_attributes():
vars(trainer)[key] = dattrs[key]
# TODO: val_percent_check cannnot be assigned with vars()--------------------------------------
# TODO: fix with the following if statement--------------------------------------
if key == 'val_percent_check':
trainer.val_percent_check = 0.0
return trainer
def getAffineMatrix(orientation, spacing, dimension):
affine = np.eye(dimension+1)
for i in range(len(spacing)):
affine[i,i] = spacing[i]
if dimension == 3:
if orientation[0] == 'L':
affine[0, 0] *= -1
if orientation[1] == 'P':
affine[1, 1] *= -1
if orientation[2] == 'I':
affine[2, 2] *= -1
else:
# TODO: xenos please take a look at it---------------------------------------------
pass
return affine
def predictionAndEvaluation(params, test_loader, device, model):
# TODO: affine matrices with different orientation -------------------------------------------------------------------------------
# TODO: 2D affine 'ra' or 'rs'
# TODO: respacing ---------------------------------
test_affine = getAffineMatrix(params['orientation'], params['spacing'], params['model']['dimensions'])
model_metrics = params['model']['metricfunction']
model_metrics['reduction'] = 'none'
eval_metric = eval(model_objects.concatFunctionArguments(model_metrics, prefix='monaiMetrics.'))
original_dice_results = list()
postprocess_dice_results = list()
output_results = list()
original_hd_results = list()
postprocess_hd_results = list()
original_mad_results = list()
postprocess_mad_results = list()
with torch.no_grad():
for i, test_data in enumerate(test_loader):
roi_size = params['model']['roi_size']
sw_batch_size = params['model']['sw_batch_size']
test_outputs = sliding_window_inference(test_data['IMAGE'].to(device), roi_size, sw_batch_size, model)
argmax = torch.argmax(test_outputs, dim=1, keepdim=True)
# Post-processing
if params['postprocessing']:
post_func = eval(model_objects.concatFunctionArguments(params['postprocessing_func'], prefix='monaiTrans.'))
largest = post_func(argmax)
else:
largest = argmax
# Write data out
output_file_name = os.path.split(test_loader.dataset.data[i]['IMAGE'])[1]
output_path = os.path.join(params['output_path'],output_file_name)
output_results.append(output_path)
test_data_nii = nib.load(test_loader.dataset.data[i]['IMAGE'])
target_affine = test_data_nii.affine
# Remove the translation component
# TODO: 2D-----------------------------------------------------------------------------------------
# TODO: xenos please take a look at it
target_affine[0:params['model']['dimensions'],params['model']['dimensions']] = 0
# TODO: xenos please take a look at it: resampling ------------------------------------------------------------------
monaiData.write_nifti(largest.detach().cpu()[0, 0,...].numpy(), output_path,
mode='nearest',
affine=test_affine,
target_affine=target_affine,
output_spatial_shape=test_data_nii.shape,
dtype=np.float32
)
if params['debug']:
print(test_data['IMAGE'].shape)
print(output_path)
# evaluation scores
if params['testSegm']:
test_labels = test_data['SEGM'].to(device)
value = eval_metric(y_pred=argmax, y=test_labels)
print('Dice: {:.5f}'.format(value.item()))
original_dice_results.append(value.item())
hd_value = monaiMetrics.compute_hausdorff_distance(argmax, test_labels, label_idx=1, percentile=95)
print('HD95: {:.5f}'.format(hd_value.item()))
original_hd_results.append(hd_value)
mad_value = monaiMetrics.compute_average_surface_distance(argmax, test_labels, label_idx=1)
print('MAD: {:.5f}'.format(mad_value.item()))
original_mad_results.append(mad_value)
if params['postprocessing']:
value = eval_metric(y_pred=largest, y=test_labels)
postprocess_dice_results.append(value.item())
print('Post-processed Dice: {:.5f}'.format(value.item()))
hd_value = monaiMetrics.compute_hausdorff_distance(largest, test_labels, label_idx=1, percentile=95)
print('Post-processed HD95: {:.5f}'.format(hd_value.item()))
postprocess_hd_results.append(hd_value)
mad_value = monaiMetrics.compute_average_surface_distance(largest, test_labels, label_idx=1)
print('Post-processed HD95: {:.5f}'.format(mad_value.item()))
postprocess_mad_results.append(mad_value)
if params['testSegm']:
eval_results = pd.DataFrame()
eval_results['IMAGE_DATA'] = [ i['IMAGE'] for i in test_loader.dataset.data]
eval_results['SEGM_DATA'] = [ i['SEGM'] for i in test_loader.dataset.data]
eval_results['SEGM_RESULTS'] = output_results
eval_results['DICE'] = original_dice_results
if params['postprocessing']:
eval_results['POST_DICE'] = postprocess_dice_results
eval_results['HD95'] = original_hd_results
if params['postprocessing']:
eval_results['POST_HD95'] = postprocess_hd_results
eval_results['MAD'] = original_mad_results
if params['postprocessing']:
eval_results['POST_MAD'] = postprocess_mad_results
eval_results.to_csv(os.path.join(params['output_path'],'evaluation_results.csv'), index=False)
def imageSegmentation(paramfile, debug, recon=False):
# ## Verify System Setup
# Check torch and CUDA on the system.
if debug:
monai.config.print_config()
print('CUDA available: ', torch.cuda.is_available())
n_gpus = torch.cuda.device_count()
for i in range(n_gpus):
print('GPU %d: %s' % (i, torch.cuda.get_device_name(i)))
# ## Parsing input parameters
_defaults, loaders = parsingInputs(paramfile, debug)
if _defaults['model']['roi_size'] == None:
_defaults['model']['roi_size'] = initRoisize(loaders, np.prod(_defaults['model']['strides']))
# ## initialize training model in training mode
MODEL = vars(model_objects)[_defaults['model']['modelname']]( _defaults['model'])
if debug:
print('******************************************************************')
print('-----------------------MODEl CONFIGURATIONS-----------------------')
print('roi_size for testing/validation: ', _defaults['model']['roi_size'])
print(_defaults['model']['modelname'], " MODEL STRUCTURE:")
print(MODEL)
print('******************************************************************')
if 'train' in paramfile.keys():
# set up loggers and checkpoints
loggers_list = setupLoggers(paramfile['outputmodelpath'])
# # initialise Lightning's trainer.
TRAINER = initTrainer(loggers_list, paramfile['gpu_device'], _defaults)
# train
if 'validate_loader' in loaders.keys():
TRAINER.fit(MODEL, train_dataloader=loaders['train_loader'], val_dataloaders=loaders['validate_loader'])
else:
TRAINER.fit(MODEL, train_dataloader=loaders['train_loader'])
if 'test' in paramfile.keys():
# load the last checkpoint of the trained model
try:
lastcheckpoint = torch.load(os.path.join(paramfile['outputmodelpath'],'saved_model','last.ckpt'))
except:
lastcheckpoint = torch.load(os.path.join(paramfile['outputmodelpath'], 'last.ckpt'))
MODEL.load_state_dict(lastcheckpoint['state_dict'])
if recon:
OUTPUT_PATH = os.path.join(os.path.dirname(paramfile['inputpath']), 'reconResult')
else:
OUTPUT_PATH = os.path.join(paramfile['outputmodelpath'],'results')
Path(OUTPUT_PATH).mkdir(parents=True, exist_ok=True)
device = torch.device("cuda:"+str(paramfile['gpu_device']))
MODEL.to(device)
_defaults['output_path'] = OUTPUT_PATH
predictionAndEvaluation(_defaults, loaders['test_loader'], device, MODEL)
| [
"pytorch_lightning.Trainer",
"monai.transforms.AddChanneld",
"pandas.read_csv",
"torch.argmax",
"bisImgSeg.utilities.modelObjects.concatFunctionArguments",
"torch.cuda.device_count",
"pathlib.Path",
"torch.no_grad",
"os.path.join",
"monai.transforms.Orientationd",
"numpy.prod",
"pandas.DataFra... | [((6344, 6378), 'pandas.read_csv', 'pd.read_csv', (['PATH'], {'index_col': '(False)'}), '(PATH, index_col=False)\n', (6355, 6378), True, 'import pandas as pd\n'), ((15228, 15349), 'pytorch_lightning.Trainer', 'pytorch_lightning.Trainer', ([], {'gpus': '[gpuidx]', 'logger': 'logL[0]', 'checkpoint_callback': 'logL[1]', 'resume_from_checkpoint': 'logL[2]'}), '(gpus=[gpuidx], logger=logL[0],\n checkpoint_callback=logL[1], resume_from_checkpoint=logL[2])\n', (15253, 15349), False, 'import pytorch_lightning\n'), ((15930, 15951), 'numpy.eye', 'np.eye', (['(dimension + 1)'], {}), '(dimension + 1)\n', (15936, 15951), True, 'import numpy as np\n'), ((13602, 13617), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13615, 13617), False, 'import torch\n'), ((14131, 14156), 'numpy.amax', 'np.amax', (['max_dims'], {'axis': '(0)'}), '(max_dims, axis=0)\n', (14138, 14156), True, 'import numpy as np\n'), ((14984, 15041), 'os.path.join', 'os.path.join', (['MODEL_ROOT_PATH', '"""saved_model"""', '"""last.ckpt"""'], {}), "(MODEL_ROOT_PATH, 'saved_model', 'last.ckpt')\n", (14996, 15041), False, 'import os\n'), ((15067, 15124), 'os.path.join', 'os.path.join', (['MODEL_ROOT_PATH', '"""saved_model"""', '"""last.ckpt"""'], {}), "(MODEL_ROOT_PATH, 'saved_model', 'last.ckpt')\n", (15079, 15124), False, 'import os\n'), ((16898, 16974), 'bisImgSeg.utilities.modelObjects.concatFunctionArguments', 'model_objects.concatFunctionArguments', (['model_metrics'], {'prefix': '"""monaiMetrics."""'}), "(model_metrics, prefix='monaiMetrics.')\n", (16935, 16974), True, 'import bisImgSeg.utilities.modelObjects as model_objects\n'), ((17231, 17246), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17244, 17246), False, 'import torch\n'), ((20774, 20788), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (20786, 20788), True, 'import pandas as pd\n'), ((21733, 21760), 'monai.config.print_config', 'monai.config.print_config', ([], {}), '()\n', (21758, 21760), False, 'import monai\n'), ((21840, 21865), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (21863, 21865), False, 'import torch\n'), ((2206, 2243), 'monai.transforms.LoadNiftid', 'monaiTrans.LoadNiftid', ([], {'keys': 'self.keys'}), '(keys=self.keys)\n', (2227, 2243), True, 'import monai.transforms as monaiTrans\n'), ((2257, 2295), 'monai.transforms.AddChanneld', 'monaiTrans.AddChanneld', ([], {'keys': 'self.keys'}), '(keys=self.keys)\n', (2279, 2295), True, 'import monai.transforms as monaiTrans\n'), ((2309, 2374), 'monai.transforms.Orientationd', 'monaiTrans.Orientationd', ([], {'keys': 'self.keys', 'axcodes': 'self.orientation'}), '(keys=self.keys, axcodes=self.orientation)\n', (2332, 2374), True, 'import monai.transforms as monaiTrans\n'), ((2388, 2521), 'monai.transforms.Spacingd', 'monaiTrans.Spacingd', ([], {'keys': 'self.keys', 'pixdim': 'self.spacing', 'mode': "self.Spacingd['mode']", 'padding_mode': "self.Spacingd['padding_mode']"}), "(keys=self.keys, pixdim=self.spacing, mode=self.Spacingd\n ['mode'], padding_mode=self.Spacingd['padding_mode'])\n", (2407, 2521), True, 'import monai.transforms as monaiTrans\n'), ((2530, 2916), 'monai.transforms.ScaleIntensityRangePercentilesd', 'monaiTrans.ScaleIntensityRangePercentilesd', ([], {'keys': "self.ScaleIntensityRangePercentilesd['keys']", 'lower': "self.ScaleIntensityRangePercentilesd['lower']", 'upper': "self.ScaleIntensityRangePercentilesd['upper']", 'b_min': "self.ScaleIntensityRangePercentilesd['b_min']", 'b_max': "self.ScaleIntensityRangePercentilesd['b_max']", 'clip': "self.ScaleIntensityRangePercentilesd['clip']"}), "(keys=self.\n ScaleIntensityRangePercentilesd['keys'], lower=self.\n ScaleIntensityRangePercentilesd['lower'], upper=self.\n ScaleIntensityRangePercentilesd['upper'], b_min=self.\n ScaleIntensityRangePercentilesd['b_min'], b_max=self.\n ScaleIntensityRangePercentilesd['b_max'], clip=self.\n ScaleIntensityRangePercentilesd['clip'])\n", (2572, 2916), True, 'import monai.transforms as monaiTrans\n'), ((3071, 3107), 'monai.transforms.ToTensord', 'monaiTrans.ToTensord', ([], {'keys': 'self.keys'}), '(keys=self.keys)\n', (3091, 3107), True, 'import monai.transforms as monaiTrans\n'), ((14628, 14680), 'os.path.join', 'os.path.join', (['MODEL_ROOT_PATH', '"""saved_model"""', '"""logs"""'], {}), "(MODEL_ROOT_PATH, 'saved_model', 'logs')\n", (14640, 14680), False, 'import os\n'), ((14780, 14850), 'os.path.join', 'os.path.join', (['MODEL_ROOT_PATH', '"""saved_model"""', '"""{epoch}-{val_dice:.2f}"""'], {}), "(MODEL_ROOT_PATH, 'saved_model', '{epoch}-{val_dice:.2f}')\n", (14792, 14850), False, 'import os\n'), ((17549, 17596), 'torch.argmax', 'torch.argmax', (['test_outputs'], {'dim': '(1)', 'keepdim': '(True)'}), '(test_outputs, dim=1, keepdim=True)\n', (17561, 17596), False, 'import torch\n'), ((18033, 18086), 'os.path.join', 'os.path.join', (["params['output_path']", 'output_file_name'], {}), "(params['output_path'], output_file_name)\n", (18045, 18086), False, 'import os\n'), ((18162, 18208), 'nibabel.load', 'nib.load', (["test_loader.dataset.data[i]['IMAGE']"], {}), "(test_loader.dataset.data[i]['IMAGE'])\n", (18170, 18208), True, 'import nibabel as nib\n'), ((21503, 21564), 'os.path.join', 'os.path.join', (["params['output_path']", '"""evaluation_results.csv"""'], {}), "(params['output_path'], 'evaluation_results.csv')\n", (21515, 21564), False, 'import os\n'), ((21796, 21821), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21819, 21821), False, 'import torch\n'), ((22169, 22207), 'numpy.prod', 'np.prod', (["_defaults['model']['strides']"], {}), "(_defaults['model']['strides'])\n", (22176, 22207), True, 'import numpy as np\n'), ((23868, 23921), 'os.path.join', 'os.path.join', (["paramfile['outputmodelpath']", '"""results"""'], {}), "(paramfile['outputmodelpath'], 'results')\n", (23880, 23921), False, 'import os\n'), ((3352, 3633), 'monai.transforms.RandCropByPosNegLabeld', 'monaiTrans.RandCropByPosNegLabeld', ([], {'keys': 'self.keys', 'label_key': "self.RandCropByPosNegLabeld['label_key']", 'spatial_size': 'self.patch_size', 'num_samples': "self.RandCropByPosNegLabeld['num_samples']", 'pos': "self.RandCropByPosNegLabeld['pos']", 'neg': "self.RandCropByPosNegLabeld['neg']"}), "(keys=self.keys, label_key=self.\n RandCropByPosNegLabeld['label_key'], spatial_size=self.patch_size,\n num_samples=self.RandCropByPosNegLabeld['num_samples'], pos=self.\n RandCropByPosNegLabeld['pos'], neg=self.RandCropByPosNegLabeld['neg'])\n", (3385, 3633), True, 'import monai.transforms as monaiTrans\n'), ((17952, 18003), 'os.path.split', 'os.path.split', (["test_loader.dataset.data[i]['IMAGE']"], {}), "(test_loader.dataset.data[i]['IMAGE'])\n", (17965, 18003), False, 'import os\n'), ((19509, 19601), 'monai.metrics.compute_hausdorff_distance', 'monaiMetrics.compute_hausdorff_distance', (['argmax', 'test_labels'], {'label_idx': '(1)', 'percentile': '(95)'}), '(argmax, test_labels, label_idx=1,\n percentile=95)\n', (19548, 19601), True, 'import monai.metrics as monaiMetrics\n'), ((19742, 19821), 'monai.metrics.compute_average_surface_distance', 'monaiMetrics.compute_average_surface_distance', (['argmax', 'test_labels'], {'label_idx': '(1)'}), '(argmax, test_labels, label_idx=1)\n', (19787, 19821), True, 'import monai.metrics as monaiMetrics\n'), ((23471, 23541), 'os.path.join', 'os.path.join', (["paramfile['outputmodelpath']", '"""saved_model"""', '"""last.ckpt"""'], {}), "(paramfile['outputmodelpath'], 'saved_model', 'last.ckpt')\n", (23483, 23541), False, 'import os\n'), ((23772, 23811), 'os.path.dirname', 'os.path.dirname', (["paramfile['inputpath']"], {}), "(paramfile['inputpath'])\n", (23787, 23811), False, 'import os\n'), ((23929, 23946), 'pathlib.Path', 'Path', (['OUTPUT_PATH'], {}), '(OUTPUT_PATH)\n', (23933, 23946), False, 'from pathlib import Path\n'), ((5370, 5388), 'copy.deepcopy', 'dp', (['aug_dict[elef]'], {}), '(aug_dict[elef])\n', (5372, 5388), True, 'from copy import deepcopy as dp\n'), ((5470, 5539), 'bisImgSeg.utilities.modelObjects.concatFunctionArguments', 'model_objects.concatFunctionArguments', (['new_dict'], {'prefix': '"""monaiTrans."""'}), "(new_dict, prefix='monaiTrans.')\n", (5507, 5539), True, 'import bisImgSeg.utilities.modelObjects as model_objects\n'), ((12268, 12299), 'monai.transforms.Compose', 'monaiTrans.Compose', (['trans_lists'], {}), '(trans_lists)\n', (12286, 12299), True, 'import monai.transforms as monaiTrans\n'), ((12554, 12585), 'monai.transforms.Compose', 'monaiTrans.Compose', (['trans_lists'], {}), '(trans_lists)\n', (12572, 12585), True, 'import monai.transforms as monaiTrans\n'), ((13857, 13890), 'numpy.array', 'np.array', (["data['IMAGE'].shape[2:]"], {}), "(data['IMAGE'].shape[2:])\n", (13865, 13890), True, 'import numpy as np\n'), ((17703, 17798), 'bisImgSeg.utilities.modelObjects.concatFunctionArguments', 'model_objects.concatFunctionArguments', (["params['postprocessing_func']"], {'prefix': '"""monaiTrans."""'}), "(params['postprocessing_func'], prefix\n ='monaiTrans.')\n", (17740, 17798), True, 'import bisImgSeg.utilities.modelObjects as model_objects\n'), ((20232, 20325), 'monai.metrics.compute_hausdorff_distance', 'monaiMetrics.compute_hausdorff_distance', (['largest', 'test_labels'], {'label_idx': '(1)', 'percentile': '(95)'}), '(largest, test_labels, label_idx=1,\n percentile=95)\n', (20271, 20325), True, 'import monai.metrics as monaiMetrics\n'), ((20496, 20581), 'monai.metrics.compute_average_surface_distance', 'monaiMetrics.compute_average_surface_distance', (['largest', 'test_labels'], {'label_idx': '(1)'}), '(largest, test_labels, label_idx=1\n )\n', (20541, 20581), True, 'import monai.metrics as monaiMetrics\n'), ((23597, 23652), 'os.path.join', 'os.path.join', (["paramfile['outputmodelpath']", '"""last.ckpt"""'], {}), "(paramfile['outputmodelpath'], 'last.ckpt')\n", (23609, 23652), False, 'import os\n'), ((14030, 14069), 'numpy.amax', 'np.amax', (['[temp_max, image_dims]'], {'axis': '(0)'}), '([temp_max, image_dims], axis=0)\n', (14037, 14069), True, 'import numpy as np\n'), ((21935, 21964), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['i'], {}), '(i)\n', (21961, 21964), False, 'import torch\n')] |
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import main
from numpy.testing import assert_array_equal
import numpy as np
from calour._testing import Tests, assert_experiment_equal
from calour.filtering import _balanced_subsample
import calour as ca
class FilteringTests(Tests):
def setUp(self):
super().setUp()
self.test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat, normalize=None)
self.test1 = ca.read(self.test1_biom, self.test1_samp, self.test1_feat, normalize=None)
def test_balanced_subsample(self):
rand = np.random.RandomState(None)
d = rand.choice([0, 1, 2], 9)
for n in (1, 3, 6, 9, 10):
keep = _balanced_subsample(d, n, None)
d2 = d[keep]
uniq, counts = np.unique(d2, return_counts=True)
self.assertTrue(np.all(counts == n))
def test_downsample_sample(self):
obs = self.test2.downsample('group')
# should be down to 4 samples; feature number is the same
self.assertEqual(obs.shape, (4, 8))
sid = obs.sample_metadata.index.tolist()
all_sid = self.test2.sample_metadata.index.tolist()
exp = self.test2.reorder([all_sid.index(i) for i in sid])
assert_experiment_equal(obs, exp)
def test_downsample_feature(self):
obs = self.test2.downsample('oxygen', axis=1)
sid = obs.feature_metadata.index.tolist()
self.assertEqual(obs.shape, (9, 4))
all_sid = self.test2.feature_metadata.index.tolist()
exp = self.test2.reorder([all_sid.index(i) for i in sid], axis=1)
self.assertEqual(obs, exp)
def test_downsample_num_keep(self):
# test keeping num_keep samples, and inplace
obs = self.test1.downsample('group', num_keep=9, inplace=True)
# should be down to 2 groups (18 samples); feature number is the same
self.assertEqual(obs.shape, (18, 12))
self.assertEqual(set(obs.sample_metadata['group']), set(['1', '2']))
self.assertIs(obs, self.test1)
def test_filter_by_metadata_sample_edge_cases(self):
# no group 3 - none filtered
obs = self.test2.filter_by_metadata('group', [3])
self.assertEqual(obs.shape, (0, 8))
obs = self.test2.filter_by_metadata('group', [3], negate=True)
assert_experiment_equal(obs, self.test2)
# all samples are filtered
obs = self.test2.filter_by_metadata('group', [1, 2])
assert_experiment_equal(obs, self.test2)
obs = self.test2.filter_by_metadata('group', [1, 2], negate=True)
self.assertEqual(obs.shape, (0, 8))
def test_filter_by_metadata_sample(self):
for sparse, inplace in [(True, False), (True, True), (False, False), (False, True)]:
test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat,
sparse=sparse, normalize=None)
# only filter samples bewtween 3 and 7.
obs = test2.filter_by_metadata(
'ori.order', lambda l: [7 > i > 3 for i in l], inplace=inplace)
self.assertEqual(obs.shape, (3, 8))
self.assertEqual(obs.sample_metadata.index.tolist(), ['S5', 'S6', 'S7'])
if inplace:
self.assertIs(obs, test2)
else:
self.assertIsNot(obs, test2)
def test_filter_by_metadata_feature_edge_cases(self):
# none filtered
obs = self.test2.filter_by_metadata('oxygen', ['facultative'], axis=1)
self.assertEqual(obs.shape, (9, 0))
obs = self.test2.filter_by_metadata('oxygen', ['facultative'], axis=1, negate=True)
assert_experiment_equal(obs, self.test2)
def test_filter_by_metadata_feature(self):
for sparse, inplace in [(True, False), (True, True), (False, False), (False, True)]:
test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat, sparse=sparse, normalize=None)
# only filter samples with id bewtween 3 and 7.
obs = test2.filter_by_metadata('oxygen', ['anaerobic'], axis=1, inplace=inplace)
self.assertEqual(obs.shape, (9, 2))
self.assertListEqual(obs.feature_metadata.index.tolist(), ['TG', 'TC'])
if inplace:
self.assertIs(obs, test2)
else:
self.assertIsNot(obs, test2)
def test_filter_by_metadata_na(self):
test = self.test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat,
normalize=None, feature_metadata_kwargs={'na_values': 'B'})
test_drop = test.filter_by_metadata('level1', select=None, axis='f')
self.assertEqual(self.test2.sample_metadata.index.tolist(),
test_drop.sample_metadata.index.tolist())
self.assertEqual(['AT', 'AG', 'AC', 'TA', 'TT', 'TC'],
test_drop.feature_metadata.index.tolist())
def test_filter_by_data_sample_edge_cases(self):
# all samples are filtered out
obs = self.test2.filter_by_data('abundance', axis=0, cutoff=100000, mean_or_sum='sum')
self.assertEqual(obs.shape, (0, 8))
# none is filtered out
obs = self.test2.filter_by_data('abundance', axis=0, cutoff=1, mean_or_sum='sum')
assert_experiment_equal(obs, self.test2)
self.assertIsNot(obs, self.test2)
def test_filter_by_data_sample(self):
for sparse, inplace in [(True, False), (True, True), (False, False), (False, True)]:
test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat, sparse=sparse, normalize=None)
# filter out samples with abundance < 1200. only the last sample is filtered out.
obs = test2.filter_by_data('abundance', axis=0, inplace=inplace, cutoff=1200, mean_or_sum='sum')
self.assertEqual(obs.shape, (8, 8))
self.assertNotIn('S9', obs.sample_metadata)
for sid in obs.sample_metadata.index:
assert_array_equal(obs[sid, :], self.test2[sid, :])
if inplace:
self.assertIs(obs, test2)
else:
self.assertIsNot(obs, test2)
def test_filter_by_data_feature_edge_cases(self):
# all features are filtered out
obs = self.test2.filter_by_data('abundance', axis=1, cutoff=10000, mean_or_sum='sum')
self.assertEqual(obs.shape, (9, 0))
# none is filtered out
obs = self.test2.filter_by_data('abundance', axis=1, cutoff=1, mean_or_sum='sum')
assert_experiment_equal(obs, self.test2)
self.assertIsNot(obs, self.test2)
def test_filter_by_data_feature(self):
# one feature is filtered out when cutoff is set to 25
for inplace in [True, False]:
obs = self.test2.filter_by_data('abundance', axis=1, inplace=inplace, cutoff=25, mean_or_sum='sum')
self.assertEqual(obs.shape, (9, 7))
self.assertNotIn('TA', obs.feature_metadata)
for fid in obs.feature_metadata.index:
assert_array_equal(obs[:, fid], self.test2[:, fid])
if inplace:
self.assertIs(obs, self.test2)
else:
self.assertIsNot(obs, self.test2)
def test_filter_prevalence(self):
# this should filter all features because the upper limit is 100%
exp = self.test1.filter_prevalence(fraction=0.5)
fids = ['AA', 'AT', 'AG', 'TA', 'TT', 'TG', 'TC', 'GG']
self.assertListEqual(exp.feature_metadata.index.tolist(), fids)
self.assertEqual(exp.shape[0], self.test1.shape[0])
def test_filter_prevalence_zero(self):
# keep only features present at least in 0.5 the samples
exp = self.test1.filter_prevalence(fraction=1.01)
self.assertListEqual(exp.feature_metadata.index.tolist(), [])
self.assertEqual(exp.shape[0], self.test1.shape[0])
def test_filter_prevalence_check(self):
# filter over all samples always filter more or euqal features than
# filter over sample groups
frac = 0.001
exp = self.test1.filter_prevalence(fraction=frac)
n = exp.shape[1]
for i in self.test1.sample_metadata.columns:
x = self.test1.filter_prevalence(fraction=frac, field=i)
self.assertLessEqual(x.shape[1], n)
def test_filter_abundance(self):
exp = self.test1.filter_abundance(17008)
self.assertEqual(exp.shape[1], 2)
fids = ['TC', 'GG']
self.assertListEqual(exp.feature_metadata.index.tolist(), fids)
def test_filter_mean_abundance(self):
# default is 0.01 - keep features with mean abundance >= 1%
test1 = self.test1.normalize()
exp = test1.filter_mean_abundance()
fids = ['AT', 'TG', 'TC', 'GG']
self.assertListEqual(exp.feature_metadata.index.tolist(), fids)
self.assertEqual(exp.shape[0], self.test1.shape[0])
exp = test1.filter_mean_abundance(0.4, field=None)
fids = ['TC', 'GG']
self.assertListEqual(exp.feature_metadata.index.tolist(), fids)
exp = test1.filter_mean_abundance(0.6, field=None)
self.assertListEqual(exp.feature_metadata.index.tolist(), [])
exp = test1.filter_mean_abundance(0.6, field='group')
fids = ['GG']
self.assertListEqual(exp.feature_metadata.index.tolist(), fids)
def test_filter_mean_abundance_check(self):
# filter over all samples always filter more or euqal features than
# filter over sample groups
abund = 0.001
exp = self.test1.filter_mean_abundance(abund)
n = exp.shape[1]
for i in self.test1.sample_metadata.columns:
x = self.test1.filter_mean_abundance(abund, field=i)
self.assertLessEqual(x.shape[1], n)
def test_filter_ids_not_in_list(self):
fids = ['GG', 'pita']
exp = self.test1.filter_ids(fids)
self.assertListEqual(exp.feature_metadata.index.tolist(), ['GG'])
def test_filter_ids_default(self):
fids = ['GG', 'AA', 'TT']
exp = self.test1.filter_ids(fids)
self.assertListEqual(exp.feature_metadata.index.tolist(), fids)
self.assertIsNot(exp, self.test1)
def test_filter_ids_samples_inplace_negate(self):
badsamples = ['S1', 'S3', 'S5', 'S7', 'S9', 'S11', 'S13', 'S15', 'S17', 'S19']
oksamples = list(set(self.test1.sample_metadata.index.values).difference(set(badsamples)))
exp = self.test1.filter_ids(badsamples, axis=0, negate=True, inplace=True)
self.assertCountEqual(list(exp.sample_metadata.index.values), oksamples)
self.assertIs(exp, self.test1)
def test_filter_sample_categories(self):
test = self.test1.filter_ids(['badsample'], axis=0, negate=True)
# does not filter anything
assert_experiment_equal(test.filter_sample_categories('group', 9), test)
# filter group of 2
assert_experiment_equal(test.filter_sample_categories('group', 10),
test.filter_samples('group', '1'))
if __name__ == '__main__':
main()
| [
"unittest.main",
"numpy.testing.assert_array_equal",
"calour._testing.assert_experiment_equal",
"calour.read",
"numpy.all",
"numpy.random.RandomState",
"calour.filtering._balanced_subsample",
"numpy.unique"
] | [((11414, 11420), 'unittest.main', 'main', ([], {}), '()\n', (11418, 11420), False, 'from unittest import main\n'), ((668, 742), 'calour.read', 'ca.read', (['self.test2_biom', 'self.test2_samp', 'self.test2_feat'], {'normalize': 'None'}), '(self.test2_biom, self.test2_samp, self.test2_feat, normalize=None)\n', (675, 742), True, 'import calour as ca\n'), ((764, 838), 'calour.read', 'ca.read', (['self.test1_biom', 'self.test1_samp', 'self.test1_feat'], {'normalize': 'None'}), '(self.test1_biom, self.test1_samp, self.test1_feat, normalize=None)\n', (771, 838), True, 'import calour as ca\n'), ((894, 921), 'numpy.random.RandomState', 'np.random.RandomState', (['None'], {}), '(None)\n', (915, 921), True, 'import numpy as np\n'), ((1559, 1592), 'calour._testing.assert_experiment_equal', 'assert_experiment_equal', (['obs', 'exp'], {}), '(obs, exp)\n', (1582, 1592), False, 'from calour._testing import Tests, assert_experiment_equal\n'), ((2633, 2673), 'calour._testing.assert_experiment_equal', 'assert_experiment_equal', (['obs', 'self.test2'], {}), '(obs, self.test2)\n', (2656, 2673), False, 'from calour._testing import Tests, assert_experiment_equal\n'), ((2779, 2819), 'calour._testing.assert_experiment_equal', 'assert_experiment_equal', (['obs', 'self.test2'], {}), '(obs, self.test2)\n', (2802, 2819), False, 'from calour._testing import Tests, assert_experiment_equal\n'), ((3960, 4000), 'calour._testing.assert_experiment_equal', 'assert_experiment_equal', (['obs', 'self.test2'], {}), '(obs, self.test2)\n', (3983, 4000), False, 'from calour._testing import Tests, assert_experiment_equal\n'), ((4737, 4859), 'calour.read', 'ca.read', (['self.test2_biom', 'self.test2_samp', 'self.test2_feat'], {'normalize': 'None', 'feature_metadata_kwargs': "{'na_values': 'B'}"}), "(self.test2_biom, self.test2_samp, self.test2_feat, normalize=None,\n feature_metadata_kwargs={'na_values': 'B'})\n", (4744, 4859), True, 'import calour as ca\n'), ((5596, 5636), 'calour._testing.assert_experiment_equal', 'assert_experiment_equal', (['obs', 'self.test2'], {}), '(obs, self.test2)\n', (5619, 5636), False, 'from calour._testing import Tests, assert_experiment_equal\n'), ((6843, 6883), 'calour._testing.assert_experiment_equal', 'assert_experiment_equal', (['obs', 'self.test2'], {}), '(obs, self.test2)\n', (6866, 6883), False, 'from calour._testing import Tests, assert_experiment_equal\n'), ((1014, 1045), 'calour.filtering._balanced_subsample', '_balanced_subsample', (['d', 'n', 'None'], {}), '(d, n, None)\n', (1033, 1045), False, 'from calour.filtering import _balanced_subsample\n'), ((1098, 1131), 'numpy.unique', 'np.unique', (['d2'], {'return_counts': '(True)'}), '(d2, return_counts=True)\n', (1107, 1131), True, 'import numpy as np\n'), ((3098, 3191), 'calour.read', 'ca.read', (['self.test2_biom', 'self.test2_samp', 'self.test2_feat'], {'sparse': 'sparse', 'normalize': 'None'}), '(self.test2_biom, self.test2_samp, self.test2_feat, sparse=sparse,\n normalize=None)\n', (3105, 3191), True, 'import calour as ca\n'), ((4162, 4255), 'calour.read', 'ca.read', (['self.test2_biom', 'self.test2_samp', 'self.test2_feat'], {'sparse': 'sparse', 'normalize': 'None'}), '(self.test2_biom, self.test2_samp, self.test2_feat, sparse=sparse,\n normalize=None)\n', (4169, 4255), True, 'import calour as ca\n'), ((5835, 5928), 'calour.read', 'ca.read', (['self.test2_biom', 'self.test2_samp', 'self.test2_feat'], {'sparse': 'sparse', 'normalize': 'None'}), '(self.test2_biom, self.test2_samp, self.test2_feat, sparse=sparse,\n normalize=None)\n', (5842, 5928), True, 'import calour as ca\n'), ((1160, 1179), 'numpy.all', 'np.all', (['(counts == n)'], {}), '(counts == n)\n', (1166, 1179), True, 'import numpy as np\n'), ((6298, 6349), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['obs[sid, :]', 'self.test2[sid, :]'], {}), '(obs[sid, :], self.test2[sid, :])\n', (6316, 6349), False, 'from numpy.testing import assert_array_equal\n'), ((7355, 7406), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['obs[:, fid]', 'self.test2[:, fid]'], {}), '(obs[:, fid], self.test2[:, fid])\n', (7373, 7406), False, 'from numpy.testing import assert_array_equal\n')] |
import sys
import numpy as np
def readfile(Basedir,allquery):
featuresize=len(allquery)
featureDic={}
for feature in allquery:
Susfile=Basedir+feature+'.txt'
with open(Susfile) as myfile:
for line in myfile:
items=line.split(" ")
Method=items[0]
value=items[1]
index=allquery.index(feature)
if Method not in featureDic:
allfeatures=np.zeros(featuresize) ## initialize
allfeatures[index]=value
featureDic[Method]=allfeatures
else:
featureDic[Method][index]=value
return featureDic
def writefeatures(Dic,Wfile):
with open(Wfile, "a") as myfile:
for Method in Dic:
myfile.write(Method+' ')
values=Dic[Method]
for v in values:
myfile.write(str(v)+',')
myfile.write('\n')
def main():
project = sys.argv[1]
ID = sys.argv[2]
root_path = sys.argv[3]
Susdir = root_path + "FinalFeatures/Textual/" + project + "/" + ID + "/tempResults/"
Writefile = root_path + "FinalFeatures/Textual/" + project + "/" + ID +'.txt'
IRquery = ['MtoT','MtoTc','MtoTFS','McltoT','McltoTc','McltoTFS','MmtoT','MmtoTc','MmtoTFS','MvtoT','MvtoTc','MvtoTFS',
'McomtoT','McomtoTc','McomtoTFS'] # as 15 IR features
FeatureDic=readfile(Susdir,IRquery)
writefeatures(FeatureDic,Writefile)
main()
| [
"numpy.zeros"
] | [((474, 495), 'numpy.zeros', 'np.zeros', (['featuresize'], {}), '(featuresize)\n', (482, 495), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import pytest
from pytest import approx
from math import radians, nan
import numpy as np
import pymap3d as pm
lla0 = (42, -82, 200)
rlla0 = (radians(lla0[0]), radians(lla0[1]), lla0[2])
lla1 = (42.002582, -81.997752, 1.1397018e3)
rlla1 = (np.radians(lla1[0]), np.radians(lla1[1]), lla1[2])
xyz0 = (660675.2518247,
-4700948.68316,
4245737.66222)
aer0 = (33, 70, 1000)
raer0 = (np.radians(aer0[0]), np.radians(aer0[1]), aer0[2])
ELL = pm.Ellipsoid()
A = ELL.semimajor_axis
B = ELL.semiminor_axis
atol_dist = 1e-6 # 1 micrometer
@pytest.mark.parametrize('lla',
[(42, -82, 200),
([42], [-82], [200]),
(np.array(42), np.array(-82), np.array(200)),
(np.array([42]), np.array([-82]), np.array([200])),
(np.atleast_3d(42), np.atleast_3d(-82), np.atleast_3d(200))],
ids=('scalar', 'list', '0d', '1d', '3d'))
def test_scalar_geodetic2ecef(lla):
"""
verify we can handle the wide variety of input data type users might use
"""
x0, y0, z0 = pm.geodetic2ecef(*lla)
assert (x0, y0, z0) == approx(xyz0)
@pytest.mark.parametrize('xyz',
[(xyz0[0], xyz0[1], xyz0[2]),
([xyz0[0]], [xyz0[1]], [xyz0[2]]),
(np.array(xyz0[0]), np.array(xyz0[1]), np.array(xyz0[2])),
(np.array([xyz0[0]]), np.array([xyz0[1]]), np.array([xyz0[2]])),
(np.atleast_3d(xyz0[0]), np.atleast_3d(xyz0[1]), np.atleast_3d(xyz0[2]))],
ids=('scalar', 'list', '0d', '1d', '3d'))
def test_scalar_ecef2geodetic(xyz):
"""
verify we can handle the wide variety of input data type users might use
"""
lat, lon, alt = pm.ecef2geodetic(*xyz)
assert [lat, lon, alt] == approx(lla0, rel=1e-4)
@pytest.mark.parametrize('xyz',
[(0, A, 50),
([0], [A], [50]),
(np.array(0), np.array(A), np.array(50)),
(np.array([0]), np.array([A]), np.array([50])),
(np.atleast_3d(0), np.atleast_3d(A), np.atleast_3d(50))],
ids=('scalar', 'list', '0d', '1d', '3d'))
def test_scalar_aer_enu(xyz):
"""
verify we can handle the wide variety of input data type users might use
"""
enu = pm.ecef2enu(*xyz, 0, 90, -100)
assert pm.enu2ecef(*enu, 0, 90, -100) == approx([0, A, 50])
def test_xarray():
xarray = pytest.importorskip('xarray')
xr_lla = xarray.DataArray(list(lla0))
xyz = pm.geodetic2ecef(*xr_lla)
assert xyz == approx(xyz0)
assert isinstance(xyz[0], xarray.DataArray)
# %%
xr_xyz = xarray.DataArray(list(xyz0))
lla = pm.ecef2geodetic(*xr_xyz)
assert lla == approx(lla0)
assert isinstance(lla[0], float) # xarrayness is lost, possibly expensive to keep due to isinstance()
def test_pandas():
pandas = pytest.importorskip('pandas')
pd_lla = pandas.Series(lla0)
xyz = pm.geodetic2ecef(*pd_lla)
assert xyz == approx(xyz0)
assert isinstance(xyz[0], float) # series degenerates to scalars by pandas itself
# %% dataframe degenerates to series
pd_lla = pandas.DataFrame([[*lla0], [*lla0]], columns=['lat', 'lon', 'alt_m'])
xyz = pm.geodetic2ecef(pd_lla['lat'], pd_lla['lon'], pd_lla['alt_m'])
assert xyz[0].values == approx(xyz0[0])
assert xyz[1].values == approx(xyz0[1])
assert xyz[2].values == approx(xyz0[2])
assert isinstance(xyz[0], pandas.Series)
def test_ecef():
xyz = pm.geodetic2ecef(*lla0)
assert xyz == approx(xyz0)
assert pm.geodetic2ecef(*rlla0, deg=False) == approx(xyz)
with pytest.raises(ValueError):
pm.geodetic2ecef(-100, lla0[1], lla0[2])
assert pm.ecef2geodetic(*xyz) == approx(lla0)
assert pm.ecef2geodetic(*xyz, deg=False) == approx(rlla0)
assert pm.ecef2geodetic((A - 1) / np.sqrt(2),
(A - 1) / np.sqrt(2), 0) == approx([0, 45, -1])
@pytest.mark.parametrize('lla, xyz', [((0, 0, -1), (A - 1, 0, 0)),
((0, 90, -1), (0, A - 1, 0)),
((0, -90, -1), (0, -A + 1, 0)),
((90, 0, -1), (0, 0, B - 1)),
((90, 15, -1), (0, 0, B - 1)),
((-90, 0, -1), (0, 0, -B + 1))
])
def test_geodetic2ecef(lla, xyz):
assert pm.geodetic2ecef(*lla) == approx(xyz, abs=atol_dist)
@pytest.mark.parametrize('xyz, lla', [((A - 1, 0, 0), (0, 0, -1)),
((0, A - 1, 0), (0, 90, -1)),
((0, 0, B - 1), (90, 0, -1)),
((0, 0, -B + 1), (-90, 0, -1)),
((-A + 1, 0, 0), (0, 180, -1)),
])
def test_ecef2geodetic(xyz, lla):
assert pm.ecef2geodetic(*xyz) == approx(lla)
def test_aer():
lla2 = pm.aer2geodetic(*aer0, *lla0)
rlla2 = pm.aer2geodetic(*raer0, *rlla0, deg=False)
with pytest.raises(ValueError):
pm.aer2geodetic(aer0[0], aer0[1], -1, *lla0)
assert lla2 == approx(lla1)
assert rlla2 == approx(rlla1)
assert pm.geodetic2aer(*lla2, *lla0) == approx(aer0)
assert pm.geodetic2aer(*rlla2, *rlla0, deg=False) == approx(raer0)
def test_allnan():
anan = np.empty((10, 10))
anan.fill(nan)
assert np.isnan(pm.geodetic2aer(anan, anan, anan, *lla0)).all()
assert np.isnan(pm.aer2geodetic(anan, anan, anan, *lla0)).all()
def test_somenan():
xyz = np.stack((xyz0, (nan, nan, nan)))
lat, lon, alt = pm.ecef2geodetic(xyz[:, 0], xyz[:, 1], xyz[:, 2])
assert (lat[0], lon[0], alt[0]) == approx(lla0)
if __name__ == '__main__':
pytest.main(['-x', __file__])
| [
"numpy.empty",
"pytest.main",
"pymap3d.aer2geodetic",
"pytest.mark.parametrize",
"pymap3d.ecef2geodetic",
"pymap3d.ecef2enu",
"pymap3d.Ellipsoid",
"math.radians",
"pytest.raises",
"pymap3d.enu2ecef",
"numpy.stack",
"numpy.radians",
"pymap3d.geodetic2aer",
"pytest.approx",
"pytest.importo... | [((476, 490), 'pymap3d.Ellipsoid', 'pm.Ellipsoid', ([], {}), '()\n', (488, 490), True, 'import pymap3d as pm\n'), ((4115, 4346), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lla, xyz"""', '[((0, 0, -1), (A - 1, 0, 0)), ((0, 90, -1), (0, A - 1, 0)), ((0, -90, -1),\n (0, -A + 1, 0)), ((90, 0, -1), (0, 0, B - 1)), ((90, 15, -1), (0, 0, B -\n 1)), ((-90, 0, -1), (0, 0, -B + 1))]'], {}), "('lla, xyz', [((0, 0, -1), (A - 1, 0, 0)), ((0, 90, \n -1), (0, A - 1, 0)), ((0, -90, -1), (0, -A + 1, 0)), ((90, 0, -1), (0, \n 0, B - 1)), ((90, 15, -1), (0, 0, B - 1)), ((-90, 0, -1), (0, 0, -B + 1))])\n", (4138, 4346), False, 'import pytest\n'), ((4667, 4867), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""xyz, lla"""', '[((A - 1, 0, 0), (0, 0, -1)), ((0, A - 1, 0), (0, 90, -1)), ((0, 0, B - 1),\n (90, 0, -1)), ((0, 0, -B + 1), (-90, 0, -1)), ((-A + 1, 0, 0), (0, 180,\n -1))]'], {}), "('xyz, lla', [((A - 1, 0, 0), (0, 0, -1)), ((0, A - \n 1, 0), (0, 90, -1)), ((0, 0, B - 1), (90, 0, -1)), ((0, 0, -B + 1), (-\n 90, 0, -1)), ((-A + 1, 0, 0), (0, 180, -1))])\n", (4690, 4867), False, 'import pytest\n'), ((165, 181), 'math.radians', 'radians', (['lla0[0]'], {}), '(lla0[0])\n', (172, 181), False, 'from math import radians, nan\n'), ((183, 199), 'math.radians', 'radians', (['lla0[1]'], {}), '(lla0[1])\n', (190, 199), False, 'from math import radians, nan\n'), ((263, 282), 'numpy.radians', 'np.radians', (['lla1[0]'], {}), '(lla1[0])\n', (273, 282), True, 'import numpy as np\n'), ((284, 303), 'numpy.radians', 'np.radians', (['lla1[1]'], {}), '(lla1[1])\n', (294, 303), True, 'import numpy as np\n'), ((418, 437), 'numpy.radians', 'np.radians', (['aer0[0]'], {}), '(aer0[0])\n', (428, 437), True, 'import numpy as np\n'), ((439, 458), 'numpy.radians', 'np.radians', (['aer0[1]'], {}), '(aer0[1])\n', (449, 458), True, 'import numpy as np\n'), ((1146, 1168), 'pymap3d.geodetic2ecef', 'pm.geodetic2ecef', (['*lla'], {}), '(*lla)\n', (1162, 1168), True, 'import pymap3d as pm\n'), ((1853, 1875), 'pymap3d.ecef2geodetic', 'pm.ecef2geodetic', (['*xyz'], {}), '(*xyz)\n', (1869, 1875), True, 'import pymap3d as pm\n'), ((2472, 2502), 'pymap3d.ecef2enu', 'pm.ecef2enu', (['*xyz', '(0)', '(90)', '(-100)'], {}), '(*xyz, 0, 90, -100)\n', (2483, 2502), True, 'import pymap3d as pm\n'), ((2602, 2631), 'pytest.importorskip', 'pytest.importorskip', (['"""xarray"""'], {}), "('xarray')\n", (2621, 2631), False, 'import pytest\n'), ((2685, 2710), 'pymap3d.geodetic2ecef', 'pm.geodetic2ecef', (['*xr_lla'], {}), '(*xr_lla)\n', (2701, 2710), True, 'import pymap3d as pm\n'), ((2849, 2874), 'pymap3d.ecef2geodetic', 'pm.ecef2geodetic', (['*xr_xyz'], {}), '(*xr_xyz)\n', (2865, 2874), True, 'import pymap3d as pm\n'), ((3048, 3077), 'pytest.importorskip', 'pytest.importorskip', (['"""pandas"""'], {}), "('pandas')\n", (3067, 3077), False, 'import pytest\n'), ((3122, 3147), 'pymap3d.geodetic2ecef', 'pm.geodetic2ecef', (['*pd_lla'], {}), '(*pd_lla)\n', (3138, 3147), True, 'import pymap3d as pm\n'), ((3397, 3460), 'pymap3d.geodetic2ecef', 'pm.geodetic2ecef', (["pd_lla['lat']", "pd_lla['lon']", "pd_lla['alt_m']"], {}), "(pd_lla['lat'], pd_lla['lon'], pd_lla['alt_m'])\n", (3413, 3460), True, 'import pymap3d as pm\n'), ((3668, 3691), 'pymap3d.geodetic2ecef', 'pm.geodetic2ecef', (['*lla0'], {}), '(*lla0)\n', (3684, 3691), True, 'import pymap3d as pm\n'), ((5162, 5191), 'pymap3d.aer2geodetic', 'pm.aer2geodetic', (['*aer0', '*lla0'], {}), '(*aer0, *lla0)\n', (5177, 5191), True, 'import pymap3d as pm\n'), ((5204, 5246), 'pymap3d.aer2geodetic', 'pm.aer2geodetic', (['*raer0', '*rlla0'], {'deg': '(False)'}), '(*raer0, *rlla0, deg=False)\n', (5219, 5246), True, 'import pymap3d as pm\n'), ((5566, 5584), 'numpy.empty', 'np.empty', (['(10, 10)'], {}), '((10, 10))\n', (5574, 5584), True, 'import numpy as np\n'), ((5772, 5805), 'numpy.stack', 'np.stack', (['(xyz0, (nan, nan, nan))'], {}), '((xyz0, (nan, nan, nan)))\n', (5780, 5805), True, 'import numpy as np\n'), ((5827, 5876), 'pymap3d.ecef2geodetic', 'pm.ecef2geodetic', (['xyz[:, 0]', 'xyz[:, 1]', 'xyz[:, 2]'], {}), '(xyz[:, 0], xyz[:, 1], xyz[:, 2])\n', (5843, 5876), True, 'import pymap3d as pm\n'), ((5962, 5991), 'pytest.main', 'pytest.main', (["['-x', __file__]"], {}), "(['-x', __file__])\n", (5973, 5991), False, 'import pytest\n'), ((1197, 1209), 'pytest.approx', 'approx', (['xyz0'], {}), '(xyz0)\n', (1203, 1209), False, 'from pytest import approx\n'), ((1907, 1931), 'pytest.approx', 'approx', (['lla0'], {'rel': '(0.0001)'}), '(lla0, rel=0.0001)\n', (1913, 1931), False, 'from pytest import approx\n'), ((2515, 2545), 'pymap3d.enu2ecef', 'pm.enu2ecef', (['*enu', '(0)', '(90)', '(-100)'], {}), '(*enu, 0, 90, -100)\n', (2526, 2545), True, 'import pymap3d as pm\n'), ((2549, 2567), 'pytest.approx', 'approx', (['[0, A, 50]'], {}), '([0, A, 50])\n', (2555, 2567), False, 'from pytest import approx\n'), ((2730, 2742), 'pytest.approx', 'approx', (['xyz0'], {}), '(xyz0)\n', (2736, 2742), False, 'from pytest import approx\n'), ((2894, 2906), 'pytest.approx', 'approx', (['lla0'], {}), '(lla0)\n', (2900, 2906), False, 'from pytest import approx\n'), ((3167, 3179), 'pytest.approx', 'approx', (['xyz0'], {}), '(xyz0)\n', (3173, 3179), False, 'from pytest import approx\n'), ((3490, 3505), 'pytest.approx', 'approx', (['xyz0[0]'], {}), '(xyz0[0])\n', (3496, 3505), False, 'from pytest import approx\n'), ((3534, 3549), 'pytest.approx', 'approx', (['xyz0[1]'], {}), '(xyz0[1])\n', (3540, 3549), False, 'from pytest import approx\n'), ((3578, 3593), 'pytest.approx', 'approx', (['xyz0[2]'], {}), '(xyz0[2])\n', (3584, 3593), False, 'from pytest import approx\n'), ((3711, 3723), 'pytest.approx', 'approx', (['xyz0'], {}), '(xyz0)\n', (3717, 3723), False, 'from pytest import approx\n'), ((3735, 3770), 'pymap3d.geodetic2ecef', 'pm.geodetic2ecef', (['*rlla0'], {'deg': '(False)'}), '(*rlla0, deg=False)\n', (3751, 3770), True, 'import pymap3d as pm\n'), ((3774, 3785), 'pytest.approx', 'approx', (['xyz'], {}), '(xyz)\n', (3780, 3785), False, 'from pytest import approx\n'), ((3796, 3821), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3809, 3821), False, 'import pytest\n'), ((3831, 3871), 'pymap3d.geodetic2ecef', 'pm.geodetic2ecef', (['(-100)', 'lla0[1]', 'lla0[2]'], {}), '(-100, lla0[1], lla0[2])\n', (3847, 3871), True, 'import pymap3d as pm\n'), ((3884, 3906), 'pymap3d.ecef2geodetic', 'pm.ecef2geodetic', (['*xyz'], {}), '(*xyz)\n', (3900, 3906), True, 'import pymap3d as pm\n'), ((3910, 3922), 'pytest.approx', 'approx', (['lla0'], {}), '(lla0)\n', (3916, 3922), False, 'from pytest import approx\n'), ((3934, 3967), 'pymap3d.ecef2geodetic', 'pm.ecef2geodetic', (['*xyz'], {'deg': '(False)'}), '(*xyz, deg=False)\n', (3950, 3967), True, 'import pymap3d as pm\n'), ((3971, 3984), 'pytest.approx', 'approx', (['rlla0'], {}), '(rlla0)\n', (3977, 3984), False, 'from pytest import approx\n'), ((4092, 4111), 'pytest.approx', 'approx', (['[0, 45, -1]'], {}), '([0, 45, -1])\n', (4098, 4111), False, 'from pytest import approx\n'), ((4611, 4633), 'pymap3d.geodetic2ecef', 'pm.geodetic2ecef', (['*lla'], {}), '(*lla)\n', (4627, 4633), True, 'import pymap3d as pm\n'), ((4637, 4663), 'pytest.approx', 'approx', (['xyz'], {'abs': 'atol_dist'}), '(xyz, abs=atol_dist)\n', (4643, 4663), False, 'from pytest import approx\n'), ((5095, 5117), 'pymap3d.ecef2geodetic', 'pm.ecef2geodetic', (['*xyz'], {}), '(*xyz)\n', (5111, 5117), True, 'import pymap3d as pm\n'), ((5121, 5132), 'pytest.approx', 'approx', (['lla'], {}), '(lla)\n', (5127, 5132), False, 'from pytest import approx\n'), ((5257, 5282), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5270, 5282), False, 'import pytest\n'), ((5292, 5336), 'pymap3d.aer2geodetic', 'pm.aer2geodetic', (['aer0[0]', 'aer0[1]', '(-1)', '*lla0'], {}), '(aer0[0], aer0[1], -1, *lla0)\n', (5307, 5336), True, 'import pymap3d as pm\n'), ((5357, 5369), 'pytest.approx', 'approx', (['lla1'], {}), '(lla1)\n', (5363, 5369), False, 'from pytest import approx\n'), ((5390, 5403), 'pytest.approx', 'approx', (['rlla1'], {}), '(rlla1)\n', (5396, 5403), False, 'from pytest import approx\n'), ((5416, 5445), 'pymap3d.geodetic2aer', 'pm.geodetic2aer', (['*lla2', '*lla0'], {}), '(*lla2, *lla0)\n', (5431, 5445), True, 'import pymap3d as pm\n'), ((5449, 5461), 'pytest.approx', 'approx', (['aer0'], {}), '(aer0)\n', (5455, 5461), False, 'from pytest import approx\n'), ((5473, 5515), 'pymap3d.geodetic2aer', 'pm.geodetic2aer', (['*rlla2', '*rlla0'], {'deg': '(False)'}), '(*rlla2, *rlla0, deg=False)\n', (5488, 5515), True, 'import pymap3d as pm\n'), ((5519, 5532), 'pytest.approx', 'approx', (['raer0'], {}), '(raer0)\n', (5525, 5532), False, 'from pytest import approx\n'), ((5916, 5928), 'pytest.approx', 'approx', (['lla0'], {}), '(lla0)\n', (5922, 5928), False, 'from pytest import approx\n'), ((722, 734), 'numpy.array', 'np.array', (['(42)'], {}), '(42)\n', (730, 734), True, 'import numpy as np\n'), ((736, 749), 'numpy.array', 'np.array', (['(-82)'], {}), '(-82)\n', (744, 749), True, 'import numpy as np\n'), ((751, 764), 'numpy.array', 'np.array', (['(200)'], {}), '(200)\n', (759, 764), True, 'import numpy as np\n'), ((794, 808), 'numpy.array', 'np.array', (['[42]'], {}), '([42])\n', (802, 808), True, 'import numpy as np\n'), ((810, 825), 'numpy.array', 'np.array', (['[-82]'], {}), '([-82])\n', (818, 825), True, 'import numpy as np\n'), ((827, 842), 'numpy.array', 'np.array', (['[200]'], {}), '([200])\n', (835, 842), True, 'import numpy as np\n'), ((872, 889), 'numpy.atleast_3d', 'np.atleast_3d', (['(42)'], {}), '(42)\n', (885, 889), True, 'import numpy as np\n'), ((891, 909), 'numpy.atleast_3d', 'np.atleast_3d', (['(-82)'], {}), '(-82)\n', (904, 909), True, 'import numpy as np\n'), ((911, 929), 'numpy.atleast_3d', 'np.atleast_3d', (['(200)'], {}), '(200)\n', (924, 929), True, 'import numpy as np\n'), ((1387, 1404), 'numpy.array', 'np.array', (['xyz0[0]'], {}), '(xyz0[0])\n', (1395, 1404), True, 'import numpy as np\n'), ((1406, 1423), 'numpy.array', 'np.array', (['xyz0[1]'], {}), '(xyz0[1])\n', (1414, 1423), True, 'import numpy as np\n'), ((1425, 1442), 'numpy.array', 'np.array', (['xyz0[2]'], {}), '(xyz0[2])\n', (1433, 1442), True, 'import numpy as np\n'), ((1472, 1491), 'numpy.array', 'np.array', (['[xyz0[0]]'], {}), '([xyz0[0]])\n', (1480, 1491), True, 'import numpy as np\n'), ((1493, 1512), 'numpy.array', 'np.array', (['[xyz0[1]]'], {}), '([xyz0[1]])\n', (1501, 1512), True, 'import numpy as np\n'), ((1514, 1533), 'numpy.array', 'np.array', (['[xyz0[2]]'], {}), '([xyz0[2]])\n', (1522, 1533), True, 'import numpy as np\n'), ((1563, 1585), 'numpy.atleast_3d', 'np.atleast_3d', (['xyz0[0]'], {}), '(xyz0[0])\n', (1576, 1585), True, 'import numpy as np\n'), ((1587, 1609), 'numpy.atleast_3d', 'np.atleast_3d', (['xyz0[1]'], {}), '(xyz0[1])\n', (1600, 1609), True, 'import numpy as np\n'), ((1611, 1633), 'numpy.atleast_3d', 'np.atleast_3d', (['xyz0[2]'], {}), '(xyz0[2])\n', (1624, 1633), True, 'import numpy as np\n'), ((2073, 2084), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (2081, 2084), True, 'import numpy as np\n'), ((2086, 2097), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (2094, 2097), True, 'import numpy as np\n'), ((2099, 2111), 'numpy.array', 'np.array', (['(50)'], {}), '(50)\n', (2107, 2111), True, 'import numpy as np\n'), ((2141, 2154), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2149, 2154), True, 'import numpy as np\n'), ((2156, 2169), 'numpy.array', 'np.array', (['[A]'], {}), '([A])\n', (2164, 2169), True, 'import numpy as np\n'), ((2171, 2185), 'numpy.array', 'np.array', (['[50]'], {}), '([50])\n', (2179, 2185), True, 'import numpy as np\n'), ((2215, 2231), 'numpy.atleast_3d', 'np.atleast_3d', (['(0)'], {}), '(0)\n', (2228, 2231), True, 'import numpy as np\n'), ((2233, 2249), 'numpy.atleast_3d', 'np.atleast_3d', (['A'], {}), '(A)\n', (2246, 2249), True, 'import numpy as np\n'), ((2251, 2268), 'numpy.atleast_3d', 'np.atleast_3d', (['(50)'], {}), '(50)\n', (2264, 2268), True, 'import numpy as np\n'), ((4024, 4034), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4031, 4034), True, 'import numpy as np\n'), ((4074, 4084), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4081, 4084), True, 'import numpy as np\n'), ((5624, 5664), 'pymap3d.geodetic2aer', 'pm.geodetic2aer', (['anan', 'anan', 'anan', '*lla0'], {}), '(anan, anan, anan, *lla0)\n', (5639, 5664), True, 'import pymap3d as pm\n'), ((5692, 5732), 'pymap3d.aer2geodetic', 'pm.aer2geodetic', (['anan', 'anan', 'anan', '*lla0'], {}), '(anan, anan, anan, *lla0)\n', (5707, 5732), True, 'import pymap3d as pm\n')] |
from __future__ import print_function, division, absolute_import
import numpy as np
def valueFormat(value_format, value, unc_format, unc, std_name, multi=None, deg=False, callable_val=None, \
callable_ci=None):
""" Returns the formatted value with an uncertanty an confidence interval, if the uncertanty is given.
Arguments:
value_format: [str] String format for the value.
value: [float] Value to put in the format.
unc_format: [str] String format for the uncertainty.
unc: [MCUncertainties]
std_name: [str] Name of the uncertanty attribute, e.g. if it is 'x', then the uncertanty is
stored in uncertainties.x.
Keyword arguments:
multi: [float] Uncertanty multiplier. None by default. This is used to scale the uncertanty to
different units (e.g. from m/s to km/s). The multiplier is applied after the callable function.
deg: [bool] Converet radians to degrees if True. False by default.
callable_val: [function] Call this function on the provided value. None by default.
callable_ci: [function] Call this function on the provided confidence interval. None by default.
"""
# Convert value from radiants to degrees
if deg:
if multi is None:
multi = 1.0
multi *= np.degrees(1.0)
# Apply callable_val function
if callable_val is not None:
value = callable_val(value)
# Apply the value multiplier, if given
if multi is not None:
value *= multi
# Format the value
ret_str = value_format.format(value)
# Add computed uncertainties
if unc is not None:
# Fetch the 1 sigma uncertainty
sig_unc = getattr(unc, std_name)
if multi is not None:
sig_unc *= multi
# Construct symmetrical 1 sigma uncertainty
ret_str += " +/- " + unc_format.format(sig_unc)
# Add confidence interval if available
if hasattr(unc, std_name + "_ci"):
# Get the confidence interval
ci_l, ci_u = np.array(getattr(unc, std_name + "_ci"))
if callable_ci is not None:
ci_l = callable_ci(ci_l)
ci_u = callable_ci(ci_u)
if multi is not None:
ci_l *= multi
ci_u *= multi
# Format confidence interval
ret_str += ", {:d}% CI [{:s}, {:s}]".format(int(unc.ci), \
value_format.format(ci_l), value_format.format(ci_u))
return ret_str | [
"numpy.degrees"
] | [((1327, 1342), 'numpy.degrees', 'np.degrees', (['(1.0)'], {}), '(1.0)\n', (1337, 1342), True, 'import numpy as np\n')] |
import re
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer
from keras.preprocessing.text import Tokenizer
LIMIT = 2
def cut(x, voc):
return " ".join([w if w in voc else "rareword" for w in x.split(" ")])
def number_preprocess(x):
for d in re.findall(r"\d\d+", x):
x = x.replace(d, "numericword")
return x
class NNPreprocessor(object):
def __init__(self):
self.tok_raw = Tokenizer()
self.le = {}
self.cat_cols = ["brand_name", "subcat_0", "subcat_1", "subcat_2"]
self.cat_vocab = {}
for cat in self.cat_cols:
self.le[cat] = LabelEncoder()
self.freqs = {}
self.max_freqs = {}
self.voc = None
def fit_transform(self, df):
df["name"] = df["name"].apply(number_preprocess)
df["item_description"] = df["item_description"].apply(number_preprocess)
for cat in self.cat_cols:
voc = df[cat].value_counts()
voc = set(voc[voc >= LIMIT].index)
self.cat_vocab[cat] = voc
for cat in self.cat_cols:
df[cat] = df[cat].apply(lambda x: x if x in self.cat_vocab[cat] else "rarecategory")
df[cat] = self.le[cat].fit_transform(df[cat])
cv = CountVectorizer(token_pattern="\w+", min_df=LIMIT)
cv.fit(df["name"])
name_voc = cv.vocabulary_
cv = CountVectorizer(token_pattern="\w+", min_df=LIMIT)
cv.fit(df["item_description"])
desc_voc = cv.vocabulary_
self.voc = set(name_voc).union(set(desc_voc))
df["name"] = df["name"].apply(lambda x: cut(x, self.voc))
df["item_description"] = df["item_description"].apply(lambda x: cut(x, self.voc))
print("Transforming text data to sequences...")
raw_text = np.hstack([df["name"].values, df["item_description"].values])
print("Fitting tokenizer...")
self.tok_raw.fit_on_texts(raw_text)
print("Transforming text to sequences...")
df['seq_item_description'] = self.tok_raw.texts_to_sequences(df["item_description"].values)
df['seq_name'] = self.tok_raw.texts_to_sequences(df["name"].values)
WC = max(self.tok_raw.word_index.values())
for col in ["name_ori", "item_description_ori"]:
f_col = col + "_freq"
self.freqs[col] = df.groupby(col)["train_id"].count().reset_index()
self.freqs[col].columns = [col, f_col]
df = pd.merge(df, self.freqs[col], how="left", on=col)
df[f_col] = df[f_col] - 1
self.max_freqs[col] = df[f_col].max()
df[f_col] = df[f_col] / self.max_freqs[col]
return df, WC
def transform(self, df):
df["name"] = df["name"].apply(number_preprocess)
df["item_description"] = df["item_description"].apply(number_preprocess)
for cat in self.cat_cols:
df[cat] = df[cat].apply(lambda x: x if x in self.cat_vocab[cat] else "rarecategory")
df[cat] = self.le[cat].transform(df[cat])
df["name"] = df["name"].apply(lambda x: cut(x, self.voc))
df["item_description"] = df["item_description"].apply(lambda x: cut(x, self.voc))
df['seq_item_description'] = self.tok_raw.texts_to_sequences(df["item_description"].values)
df['seq_name'] = self.tok_raw.texts_to_sequences(df["name"].values)
for col in ["name_ori", "item_description_ori"]:
f_col = col + "_freq"
df = pd.merge(df, self.freqs[col], how="left", on=col)
df[f_col] = df[f_col].fillna(0)
df[f_col] = df[f_col] / (self.max_freqs[col] + 1)
return df
| [
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.merge",
"sklearn.preprocessing.LabelEncoder",
"numpy.hstack",
"re.findall",
"keras.preprocessing.text.Tokenizer"
] | [((350, 374), 're.findall', 're.findall', (['"""\\\\d\\\\d+"""', 'x'], {}), "('\\\\d\\\\d+', x)\n", (360, 374), False, 'import re\n'), ((507, 518), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (516, 518), False, 'from keras.preprocessing.text import Tokenizer\n'), ((1331, 1382), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'token_pattern': '"""\\\\w+"""', 'min_df': 'LIMIT'}), "(token_pattern='\\\\w+', min_df=LIMIT)\n", (1346, 1382), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((1456, 1507), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'token_pattern': '"""\\\\w+"""', 'min_df': 'LIMIT'}), "(token_pattern='\\\\w+', min_df=LIMIT)\n", (1471, 1507), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((1867, 1928), 'numpy.hstack', 'np.hstack', (["[df['name'].values, df['item_description'].values]"], {}), "([df['name'].values, df['item_description'].values])\n", (1876, 1928), True, 'import numpy as np\n'), ((704, 718), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (716, 718), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2532, 2581), 'pandas.merge', 'pd.merge', (['df', 'self.freqs[col]'], {'how': '"""left"""', 'on': 'col'}), "(df, self.freqs[col], how='left', on=col)\n", (2540, 2581), True, 'import pandas as pd\n'), ((3546, 3595), 'pandas.merge', 'pd.merge', (['df', 'self.freqs[col]'], {'how': '"""left"""', 'on': 'col'}), "(df, self.freqs[col], how='left', on=col)\n", (3554, 3595), True, 'import pandas as pd\n')] |
import numpy as np
def convert_time_to_timebin(time_in_sec,binsize_in_sec):
timebin = np.round(time_in_sec/binsize_in_sec)
return timebin
def realign_data(align_time_in_bins,length_time_in_bins,data):
maxtime = data.shape[-1]
newshape = data.shape[:-1]
newshape+=(length_time_in_bins,)
newdata = np.empty(newshape)
validtrials = np.zeros(data.shape[1],dtype = bool)
for count,align_time_curr_trial in enumerate(align_time_in_bins):
if align_time_curr_trial+length_time_in_bins<maxtime:
newdata[:,count,:]= data[:,count,int(align_time_curr_trial):int(align_time_curr_trial)+length_time_in_bins]
validtrials[count] = 1
newdata = newdata[:,validtrials,:]
return newdata,validtrials
align_time= convert_time_to_timebin(dat['response_time'],dat['bin_size'])
length_time_in_bins = int(0.5/dat['bin_size'])
newdata,validtrials = realign_data(align_time,length_time_in_bins,dat['spks'])
| [
"numpy.zeros",
"numpy.empty",
"numpy.round"
] | [((88, 126), 'numpy.round', 'np.round', (['(time_in_sec / binsize_in_sec)'], {}), '(time_in_sec / binsize_in_sec)\n', (96, 126), True, 'import numpy as np\n'), ((308, 326), 'numpy.empty', 'np.empty', (['newshape'], {}), '(newshape)\n', (316, 326), True, 'import numpy as np\n'), ((343, 378), 'numpy.zeros', 'np.zeros', (['data.shape[1]'], {'dtype': 'bool'}), '(data.shape[1], dtype=bool)\n', (351, 378), True, 'import numpy as np\n')] |
import os
import json
from collections import OrderedDict, Counter
from operator import itemgetter
import logging
from pathlib import Path
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 1000)
pd.set_option('display.width', 100000)
import re
import json
import numpy as np
from tqdm import tqdm
from datetime import datetime
from corpus.tokenization import get_tokenizer
#from config.constants import *
from corpus.document import Document
from corpus.corpus_xray import CorpusXray
from corpus.corpus_emerge_xray import CorpusEmergeXray, DATE_FORMAT
from config.constants_pulmonary import NONE, PRESENT, UNILATERAL, BILATERAL
STUDY_ID = "study_id"
ACCESSION = 'accession'
REPORT = 'report'
DISCH_DAY = 999
Q1 = 'Q1_consolidation'
Q2 = 'Q2_consolidation'
Q3 = 'Q3_consolidation'
Q4 = 'Q4_consolidation'
LEFT_QUADRANTS = [Q1, Q2]
RIGHT_QUADRANTS = [Q3, Q4]
CONSOLIDATION_THRESHOLD = 1.0
ALL_ENCOUNTERS_WORKSHEET = "All encounters"
COLUMN_CORRECTION = ' \(extent\)'
DAY_PREFIX = "Day "
INFILTRATES_IMAGE = 'infiltrates_image'
# map scores worksheet names (day, rep)
#SCORES_DAY_MAP = OrderedDict()
#SCORES_DAY_MAP["Day 0"] = (0, 1)
#SCORES_DAY_MAP["Day 1"] = (1, 1)
#SCORES_DAY_MAP["Day 2"] = (2, 1)
#SCORES_DAY_MAP["Day 3"] = (3, 1)
#ACCESSION_COLUMNS = OrderedDict()
#ACCESSION_COLUMNS["Day 0"] = 'radiograph1_accession_d0'
#ACCESSION_COLUMNS["Day 1"] = 'radiograph1_accession_d1'
#ACCESSION_COLUMNS["Day 2"] = 'radiograph1_accession_d2'
#ACCESSION_COLUMNS["Day 3"] = 'radiograph1_accession_d3'
LF = r'\*CRLF\*'
# Map and counter columns to and day and rep (day, rep)
ALL_ENCOUNTERS_DAY_MAP = OrderedDict()
ALL_ENCOUNTERS_DAY_MAP["radiograph1_accession_d0"] = (0, 1)
ALL_ENCOUNTERS_DAY_MAP["radiograph1_accession_d1"] = (1, 1)
ALL_ENCOUNTERS_DAY_MAP["radiograph1_accession_d2"] = (2, 1)
ALL_ENCOUNTERS_DAY_MAP["radiograph1_accession_d3"] = (3, 1)
ALL_ENCOUNTERS_DAY_MAP["radiograph1_accession_d4"] = (4, 1)
#def save_columns(df, destination):
#
# columns = df.columns.tolist()
# fn = os.path.join(destination, 'spreadsheet_columns.json')
# with open(fn,'w') as f:
# json.dump(columns, f, indent=4)
#
# return columns
def column_name_dissect(name):
'''
"radiograph1_accession_d1",
"radiograph1_accession_disch",
"radiograph1_report_d0",
'''
rep, typ_, day = name.split('_')
rep = int(re.sub('radiograph', '', rep))
if day == 'disch':
day = DISCH_DAY
elif re.match('d[0-9]{1,2}', day):
day = int(day[1:])
else:
raise ValueError("invalid day:\t{}".format(day))
return (rep, typ_, day)
def load_corpus(source, linebreak_bound=True):
'''
Load corpus
'''
logging.info('-'*72)
logging.info('COVID-19 x-rays')
logging.info('-'*72)
logging.info('\tSpreadsheet location:\t{}'.format(source))
# Initialize tokenizer
tokenizer = get_tokenizer(linebreak_bound=linebreak_bound)
# Read spreadsheet
df = pd.read_excel(source)
# Save columns
#logging.info('\tColumns:\n{}'.format(df.columns.tolist()))
# Instantiate corpus
corpus = CorpusXray()
accessions = OrderedDict()
logging.info('Importing docs...')
pbar = tqdm(total=len(df))
# Iterate over rows (patients)
for d in df.to_dict('records'):
study_id = None
# Loop on notes for current patient
for k, v in d.items():
if k == STUDY_ID:
study_id = int(v)
elif isinstance(v, str) or (not np.isnan(v)):
rep, typ_, day = column_name_dissect(k)
rep = int(rep)
day = int(day)
if typ_ == ACCESSION:
assert (study_id, day, rep) not in accessions
accessions[(study_id, day, rep)] = v
elif typ_ == REPORT:
text_ = re.sub(LF, '\n', v)
doc = Document( \
id = (study_id, day, rep),
text_ = text_,
tokenizer = tokenizer)
corpus.add_doc(doc)
else:
raise ValueError("could not resolve field")
pbar.update(1)
assert len(accessions) == len(corpus)
for (study_id, day, rep), accession in accessions.items():
corpus[(study_id, day, rep)].accession = accession
return corpus
def load_emerge_corpus(source, linebreak_bound=True, \
key_study_id='study_id',
key_report_date='report_date',
key_covid_test='before_covid_test',
key_full_text='text_full',
source_date_format="%Y-%m-%d %H:%M:%S"):
'''
Load corpus
'''
logging.info('-'*72)
logging.info('eMERGE x-rays')
logging.info('-'*72)
logging.info('\tsource:\t{}'.format(source))
# Initialize tokenizer
tokenizer = get_tokenizer(linebreak_bound=linebreak_bound)
# Load data
data = json.load(open(source, 'r'))
# Instantiate corpus
corpus = CorpusEmergeXray()
logging.info('Importing docs...')
pbar = tqdm(total=len(data))
# Iterate over rows (patients)
keys = set([])
for note in data:
study_id = note[key_study_id]
# convert to date information to datetime object
date = note[key_report_date]
date = date.split('.')[0]
date = datetime.strptime(date, source_date_format)
date = datetime.strftime(date, DATE_FORMAT)
# define document id
id = (study_id, date)
# get covid information
covid_test = note[key_covid_test]
tags = set(['covid_test_{}'.format(covid_test)])
text = note[key_full_text]
if '\n' not in text:
logging.warn(f"no linebreaks in text: {id}")
doc = Document( \
id = id,
text_ = text,
tokenizer = tokenizer,
tags = tags)
if id in keys:
logging.warn(f"ID in corpus: {id}")
else:
corpus.add_doc(doc)
keys.add(id)
pbar.update(1)
pbar.close()
return corpus
def has_ards(x, threshold=1, q1=Q1, q2=Q2, q3=Q3, q4=Q4):
left = (x[q1] >= threshold) or (x[q2] >= threshold)
right = (x[q3] >= threshold) or (x[q3] >= threshold)
bilateral = left and right
bilateral = int(bilateral)
return bilateral
'''
def get_accession_map(workbook, \
all_encounters_worksheet=ALL_ENCOUNTERS_WORKSHEET,
all_encounters_day_map=ALL_ENCOUNTERS_DAY_MAP):
#Get a map for accession values
df = workbook[all_encounters_worksheet]
map_ = OrderedDict()
for d in df.to_dict(orient="records"):
study_id = d[STUDY_ID]
for column, (day, rep) in all_encounters_day_map.items():
accession = d[column]
k = (study_id, day, rep)
assert k not in map_
if np.isnan(accession):
accession = None
else:
assert accession == float(int(accession))
accession = int(accession)
map_[k] = accession
return map_
'''
def get_label(row, labels=[BILATERAL, UNILATERAL, PRESENT, NONE]):
for label in labels:
if (label in row) and (row[label] == 1):
return label
def get_bilateral_infiltrate(df, \
accession_key = ACCESSION,
left_quadrants = LEFT_QUADRANTS,
right_quadrants = RIGHT_QUADRANTS,
threshold = CONSOLIDATION_THRESHOLD):
df['left_score'] = df[left_quadrants].max(axis=1)
df['right_score'] = df[right_quadrants].max(axis=1)
df['left_positive'] = df['left_score'].ge(threshold).astype(int)
df['right_positive'] = df['right_score'].ge(threshold).astype(int)
df[BILATERAL] = (df['left_positive'] + df['right_positive'] == 2).astype(int)
df[UNILATERAL] = (df['left_positive'] + df['right_positive'] == 1).astype(int)
df[NONE] = (df['left_positive'] + df['right_positive'] == 0).astype(int)
df[INFILTRATES_IMAGE] = df.apply(get_label, axis=1)
return df
def load_xray_images(source, \
day_prefix=DAY_PREFIX,
q1=Q1, q2=Q2, q3=Q3, q4=Q4):
# Load work book
df = pd.read_csv(source)
df = get_bilateral_infiltrate(df)
return df
| [
"datetime.datetime.strftime",
"pandas.read_csv",
"corpus.corpus_emerge_xray.CorpusEmergeXray",
"logging.warn",
"re.match",
"corpus.document.Document",
"numpy.isnan",
"pandas.read_excel",
"logging.info",
"datetime.datetime.strptime",
"re.sub",
"collections.OrderedDict",
"pandas.set_option",
... | [((165, 203), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(500)'], {}), "('display.max_rows', 500)\n", (178, 203), True, 'import pandas as pd\n'), ((204, 246), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(1000)'], {}), "('display.max_columns', 1000)\n", (217, 246), True, 'import pandas as pd\n'), ((247, 285), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(100000)'], {}), "('display.width', 100000)\n", (260, 285), True, 'import pandas as pd\n'), ((1662, 1675), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1673, 1675), False, 'from collections import OrderedDict, Counter\n'), ((2734, 2756), 'logging.info', 'logging.info', (["('-' * 72)"], {}), "('-' * 72)\n", (2746, 2756), False, 'import logging\n'), ((2759, 2790), 'logging.info', 'logging.info', (['"""COVID-19 x-rays"""'], {}), "('COVID-19 x-rays')\n", (2771, 2790), False, 'import logging\n'), ((2795, 2817), 'logging.info', 'logging.info', (["('-' * 72)"], {}), "('-' * 72)\n", (2807, 2817), False, 'import logging\n'), ((2923, 2969), 'corpus.tokenization.get_tokenizer', 'get_tokenizer', ([], {'linebreak_bound': 'linebreak_bound'}), '(linebreak_bound=linebreak_bound)\n', (2936, 2969), False, 'from corpus.tokenization import get_tokenizer\n'), ((3003, 3024), 'pandas.read_excel', 'pd.read_excel', (['source'], {}), '(source)\n', (3016, 3024), True, 'import pandas as pd\n'), ((3148, 3160), 'corpus.corpus_xray.CorpusXray', 'CorpusXray', ([], {}), '()\n', (3158, 3160), False, 'from corpus.corpus_xray import CorpusXray\n'), ((3178, 3191), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3189, 3191), False, 'from collections import OrderedDict, Counter\n'), ((3196, 3229), 'logging.info', 'logging.info', (['"""Importing docs..."""'], {}), "('Importing docs...')\n", (3208, 3229), False, 'import logging\n'), ((4752, 4774), 'logging.info', 'logging.info', (["('-' * 72)"], {}), "('-' * 72)\n", (4764, 4774), False, 'import logging\n'), ((4777, 4806), 'logging.info', 'logging.info', (['"""eMERGE x-rays"""'], {}), "('eMERGE x-rays')\n", (4789, 4806), False, 'import logging\n'), ((4811, 4833), 'logging.info', 'logging.info', (["('-' * 72)"], {}), "('-' * 72)\n", (4823, 4833), False, 'import logging\n'), ((4925, 4971), 'corpus.tokenization.get_tokenizer', 'get_tokenizer', ([], {'linebreak_bound': 'linebreak_bound'}), '(linebreak_bound=linebreak_bound)\n', (4938, 4971), False, 'from corpus.tokenization import get_tokenizer\n'), ((5067, 5085), 'corpus.corpus_emerge_xray.CorpusEmergeXray', 'CorpusEmergeXray', ([], {}), '()\n', (5083, 5085), False, 'from corpus.corpus_emerge_xray import CorpusEmergeXray, DATE_FORMAT\n'), ((5090, 5123), 'logging.info', 'logging.info', (['"""Importing docs..."""'], {}), "('Importing docs...')\n", (5102, 5123), False, 'import logging\n'), ((8273, 8292), 'pandas.read_csv', 'pd.read_csv', (['source'], {}), '(source)\n', (8284, 8292), True, 'import pandas as pd\n'), ((2406, 2435), 're.sub', 're.sub', (['"""radiograph"""', '""""""', 'rep'], {}), "('radiograph', '', rep)\n", (2412, 2435), False, 'import re\n'), ((2494, 2522), 're.match', 're.match', (['"""d[0-9]{1,2}"""', 'day'], {}), "('d[0-9]{1,2}', day)\n", (2502, 2522), False, 'import re\n'), ((5418, 5461), 'datetime.datetime.strptime', 'datetime.strptime', (['date', 'source_date_format'], {}), '(date, source_date_format)\n', (5435, 5461), False, 'from datetime import datetime\n'), ((5477, 5513), 'datetime.datetime.strftime', 'datetime.strftime', (['date', 'DATE_FORMAT'], {}), '(date, DATE_FORMAT)\n', (5494, 5513), False, 'from datetime import datetime\n'), ((5844, 5903), 'corpus.document.Document', 'Document', ([], {'id': 'id', 'text_': 'text', 'tokenizer': 'tokenizer', 'tags': 'tags'}), '(id=id, text_=text, tokenizer=tokenizer, tags=tags)\n', (5852, 5903), False, 'from corpus.document import Document\n'), ((5784, 5828), 'logging.warn', 'logging.warn', (['f"""no linebreaks in text: {id}"""'], {}), "(f'no linebreaks in text: {id}')\n", (5796, 5828), False, 'import logging\n'), ((6015, 6050), 'logging.warn', 'logging.warn', (['f"""ID in corpus: {id}"""'], {}), "(f'ID in corpus: {id}')\n", (6027, 6050), False, 'import logging\n'), ((3546, 3557), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (3554, 3557), True, 'import numpy as np\n'), ((3910, 3929), 're.sub', 're.sub', (['LF', '"""\n"""', 'v'], {}), "(LF, '\\n', v)\n", (3916, 3929), False, 'import re\n'), ((3957, 4024), 'corpus.document.Document', 'Document', ([], {'id': '(study_id, day, rep)', 'text_': 'text_', 'tokenizer': 'tokenizer'}), '(id=(study_id, day, rep), text_=text_, tokenizer=tokenizer)\n', (3965, 4024), False, 'from corpus.document import Document\n')] |
import numpy as np
class CliqueVector(dict):
""" This is a convenience class for simplifying arithmetic over the
concatenated vector of marginals and potentials.
These vectors are represented as a dictionary mapping cliques (subsets of attributes)
to marginals/potentials (Factor objects)
"""
def __init__(self, dictionary):
self.dictionary = dictionary
dict.__init__(self, dictionary)
@staticmethod
def zeros(domain, cliques):
from mbi import Factor
return CliqueVector({ cl : Factor.zeros(domain.project(cl)) for cl in cliques })
@staticmethod
def ones(domain, cliques):
from mbi import Factor
return CliqueVector({ cl : Factor.ones(domain.project(cl)) for cl in cliques })
@staticmethod
def uniform(domain, cliques):
from mbi import Factor
return CliqueVector({ cl : Factor.uniform(domain.project(cl)) for cl in cliques })
@staticmethod
def random(domain, cliques, prng=np.random):
from mbi import Factor
return CliqueVector({ cl : Factor.random(domain.project(cl), prng) for cl in cliques })
@staticmethod
def normal(domain, cliques, prng=np.random):
from mbi import Factor
return CliqueVector({ cl : Factor.normal(domain.project(cl), prng) for cl in cliques })
@staticmethod
def from_data(data, cliques):
from mbi import Factor
ans = {}
for cl in cliques:
mu = data.project(cl)
ans[cl] = Factor(mu.domain, mu.datavector())
return CliqueVector(ans)
def combine(self, other):
# combines this CliqueVector with other, even if they do not share the same set of factors
# used for warm-starting optimization
# Important note: if other contains factors not defined within this CliqueVector, they
# are ignored and *not* combined into this CliqueVector
for cl in other:
for cl2 in self:
if set(cl) <= set(cl2):
self[cl2] += other[cl]
break
def __mul__(self, const):
ans = { cl : const*self[cl] for cl in self }
return CliqueVector(ans)
def __rmul__(self, const):
return self.__mul__(const)
def __add__(self, other):
if np.isscalar(other):
ans = { cl : self[cl] + other for cl in self }
else:
ans = { cl : self[cl] + other[cl] for cl in self }
return CliqueVector(ans)
def __sub__(self, other):
return self + -1*other
def exp(self):
ans = { cl : self[cl].exp() for cl in self }
return CliqueVector(ans)
def log(self):
ans = { cl : self[cl].log() for cl in self }
return CliqueVector(ans)
def dot(self, other):
return sum( (self[cl]*other[cl]).sum() for cl in self )
def size(self):
return sum(self[cl].domain.size() for cl in self)
| [
"numpy.isscalar"
] | [((2331, 2349), 'numpy.isscalar', 'np.isscalar', (['other'], {}), '(other)\n', (2342, 2349), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
class Windy_Grid_World(object):
def __init__(self, epsilon=0.1, step_size=0.5, discount=1):
self.world_size = (7, 10) # first number is row, second number is column
self.num_action = 4 # the number of actions
self.actions = np.arange(self.num_action) # 0:up, 1:down, 2:left, 3:right
self.epsilon = epsilon # epsilon-greedy
self.step_size = step_size # step size for Sarsa
self.discount = discount
self.start = np.array([3, 0])
self.end = np.array([3, 7])
self.reward = -1
self.wind = np.array([0, 0, 0, 1, 1, 1, 2, 2, 1, 0]) # the strength of wind for every column
def epsilon_greedy(self, q_value, state):
rand_num = np.random.rand()
if rand_num < self.epsilon:
return np.random.choice(self.actions)
else:
state_value = q_value[:, state[0], state[1]]
return np.random.choice([act for act, val in enumerate(state_value) if val == np.max(state_value)])
def find_next_state(self, current_state, current_action):
row, col = current_state
# take the up action
if current_action == self.actions[0]:
temp = max(row - 1 - self.wind[col], 0)
return np.array([temp, col])
# take the down action
elif current_action == self.actions[1]:
temp = min(row + 1 - self.wind[col], self.world_size[0]-1)
temp = max(temp, 0)
return np.array([temp, col])
# take the left action
elif current_action == self.actions[2]:
temp = max(row - self.wind[col], 0)
return np.array([temp, max(col-1, 0)])
# take the right action
else:
temp = min(col + 1, self.world_size[1] - 1)
return np.array([max(row - self.wind[col], 0), temp])
def sarsa_algorithm(self, q_value):
state = self.start
time_step = 0 # to track the time steps during the episode
# to use epsilon-greedy algorithm to select action
action = self.epsilon_greedy(q_value, state)
while any(state != self.end):
# according to current state and action to find next state
next_state = self.find_next_state(state, action)
next_action = self.epsilon_greedy(q_value, next_state)
# to update the action value
q_value[action, state[0], state[1]] += \
self.step_size*(self.reward + self.discount*q_value[next_action, next_state[0], next_state[1]] -
q_value[action, state[0], state[1]])
state = next_state
action = next_action
time_step += 1
return time_step
if __name__ == "__main__":
windy_grid_word = Windy_Grid_World()
q_value_fun = np.zeros((windy_grid_word.num_action, windy_grid_word.world_size[0],
windy_grid_word.world_size[1]))
'''
stat = np.array([3, 0])
time = 200
actions = np.zeros(time)
for idx in range(time):
actions[idx] += windy_grid_word.epsilon_greedy(q_value_fun, stat)
print(actions)
'''
episode = 170
time_steps = np.zeros(episode)
for idx in range(episode):
time_steps[idx] += windy_grid_word.sarsa_algorithm(q_value_fun)
time_steps = np.add.accumulate(time_steps)
average_episode = time_steps[-1]/episode
print("the average episode length at about %.f steps" % average_episode)
plt.figure(1)
plt.plot(time_steps, np.arange(episode))
plt.xlabel("Time Steps")
plt.ylabel("Episodes")
plt.savefig("./images/Example6-5.png")
plt.show()
plt.close()
print("Completed!!! You can check it in the 'images' directory")
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.arange",
"numpy.array",
"numpy.random.choice",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"numpy.add.accumulate",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
... | [((2897, 3001), 'numpy.zeros', 'np.zeros', (['(windy_grid_word.num_action, windy_grid_word.world_size[0], windy_grid_word\n .world_size[1])'], {}), '((windy_grid_word.num_action, windy_grid_word.world_size[0],\n windy_grid_word.world_size[1]))\n', (2905, 3001), True, 'import numpy as np\n'), ((3271, 3288), 'numpy.zeros', 'np.zeros', (['episode'], {}), '(episode)\n', (3279, 3288), True, 'import numpy as np\n'), ((3409, 3438), 'numpy.add.accumulate', 'np.add.accumulate', (['time_steps'], {}), '(time_steps)\n', (3426, 3438), True, 'import numpy as np\n'), ((3567, 3580), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3577, 3580), True, 'import matplotlib.pyplot as plt\n'), ((3630, 3654), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time Steps"""'], {}), "('Time Steps')\n", (3640, 3654), True, 'import matplotlib.pyplot as plt\n'), ((3659, 3681), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Episodes"""'], {}), "('Episodes')\n", (3669, 3681), True, 'import matplotlib.pyplot as plt\n'), ((3686, 3724), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./images/Example6-5.png"""'], {}), "('./images/Example6-5.png')\n", (3697, 3724), True, 'import matplotlib.pyplot as plt\n'), ((3729, 3739), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3737, 3739), True, 'import matplotlib.pyplot as plt\n'), ((3744, 3755), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3753, 3755), True, 'import matplotlib.pyplot as plt\n'), ((335, 361), 'numpy.arange', 'np.arange', (['self.num_action'], {}), '(self.num_action)\n', (344, 361), True, 'import numpy as np\n'), ((563, 579), 'numpy.array', 'np.array', (['[3, 0]'], {}), '([3, 0])\n', (571, 579), True, 'import numpy as np\n'), ((599, 615), 'numpy.array', 'np.array', (['[3, 7]'], {}), '([3, 7])\n', (607, 615), True, 'import numpy as np\n'), ((661, 701), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 1, 0]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 1, 0])\n', (669, 701), True, 'import numpy as np\n'), ((811, 827), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (825, 827), True, 'import numpy as np\n'), ((3606, 3624), 'numpy.arange', 'np.arange', (['episode'], {}), '(episode)\n', (3615, 3624), True, 'import numpy as np\n'), ((883, 913), 'numpy.random.choice', 'np.random.choice', (['self.actions'], {}), '(self.actions)\n', (899, 913), True, 'import numpy as np\n'), ((1339, 1360), 'numpy.array', 'np.array', (['[temp, col]'], {}), '([temp, col])\n', (1347, 1360), True, 'import numpy as np\n'), ((1562, 1583), 'numpy.array', 'np.array', (['[temp, col]'], {}), '([temp, col])\n', (1570, 1583), True, 'import numpy as np\n'), ((1075, 1094), 'numpy.max', 'np.max', (['state_value'], {}), '(state_value)\n', (1081, 1094), True, 'import numpy as np\n')] |
import numpy as np
from pmesh.pm import ParticleMesh
from nbodykit.lab import BigFileCatalog, BigFileMesh, FFTPower, MultipleSpeciesCatalog, transform
from nbodykit.source.mesh.field import FieldMesh
from nbodykit.lab import SimulationBox2PCF, FFTCorr
from nbodykit import setup_logging
import os
import sys
sys.path.append('./utils')
import tools, dohod #
from time import time
import HImodels
# enable logging, we have some clue what's going on.
setup_logging('info')
#
#
#Global, fixed things
scratchyf = '/global/cscratch1/sd/yfeng1/m3127/'
scratchcm = '/global/cscratch1/sd/chmodi/m3127/H1mass/'
project = '/project/projectdirs/m3127/H1mass/'
cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}
alist = [0.1429,0.1538,0.1667,0.1818,0.2000,0.2222,0.2500,0.2857,0.3333]
alist = alist[-2:]
#Parameters, box size, number of mesh cells, simulation, ...
bs, nc, ncsim, sim, prefix = 256, 512, 2560, 'highres/%d-9100-fixed'%2560, 'highres'
#bs, nc, ncsim, sim, prefix = 1024, 1024, 10240, 'highres/%d-9100-fixed'%10240, 'highres'
# It's useful to have my rank for printing...
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
comm = pm.comm
#Which model & configuration to use
HImodel = HImodels.ModelA
modelname = 'ModelA'
mode = 'galaxies'
ofolder = '../data/outputs/'
def measurexi(N, edges):
'''plot the power spectrum of halos and H1 with subsampling'''
suff='-m1_00p3mh-alpha-0p8-subvol'
outfolder = ofolder + suff[1:]
if bs == 1024: outfolder = outfolder + "-big"
outfolder += "/%s/"%modelname
for i, aa in enumerate(alist):
dm = BigFileCatalog(scratchyf + sim + '/fastpm_%0.4f/'%aa , dataset='1')
rng = np.random.RandomState(dm.comm.rank)
rank = dm.comm.rank
halocat = BigFileCatalog(scratchyf + sim+ '/fastpm_%0.4f//'%aa, dataset='LL-0.200')
mp = halocat.attrs['MassTable'][1]*1e10##
halocat['Mass'] = halocat['Length'].compute() * mp
cencat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/cencat'%aa+suff)
satcat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/satcat'%aa+suff)
#
HImodelz = HImodel(aa)
los = [0,0,1]
halocat['HImass'], cencat['HImass'], satcat['HImass'] = HImodelz.assignHI(halocat, cencat, satcat)
for cat in [halocat, cencat, satcat]:
cat['Weight'] = cat['HImass']
dm['Weight'] = np.ones(dm.size)
for cat in [dm, halocat, cencat, satcat]: # nbodykit bug in SimBox2PCF that asserts boxsize
cat.attrs['BoxSize'] = np.broadcast_to(cat.attrs['BoxSize'], 3)
#
#Combine galaxies to halos
comm = halocat.comm
if mode == 'halos': catalogs = [halocat]
elif mode == 'galaxies': catalogs = [cencat, satcat]
elif mode == 'all': catalogs = [halocat, cencat, satcat]
else: print('Mode not recognized')
rankweight = sum([cat['Weight'].sum().compute() for cat in catalogs])
totweight = comm.allreduce(rankweight)
for cat in catalogs: cat['Weight'] /= totweight/float(nc)**3
allcat = MultipleSpeciesCatalog(['%d'%i for i in range(len(catalogs))], *catalogs)
#Subsample
if rank == 0 :
print('redshift = ', 1/aa-1)
print('Number of dm particles = ', dm.csize)
print('Number of halos particles = ', halocat.csize)
def subsampler(cat, rng, N, rmax):
# subsample such that we have at most N particles to rmax
nbar = (cat.csize / cat.attrs['BoxSize'].prod())
ratio = (N / rmax ** 3) / nbar
mask = rng.uniform(size=cat.size) < ratio
cat1 = cat[mask]
if rank == 0:
print('truncating catalog from %d to %d' % (cat.csize, cat1.csize))
return cat1
if rank == 0 : print('Create weight array')
#halocat = subsampler(halocat, rng, N, edges.max())
dm = subsampler(dm, rng, N, edges.max())
if rank == 0 : print("Correlation function for edges :\n", edges)
start=time()
#xim = SimulationBox2PCF('1d', data1=dm, edges=edges)
end=time()
if rank == 0 : print('Time for matter = ', end-start)
#Others mass weighted
start = time()
xih1mass = SimulationBox2PCF('1d', data1=halocat, weight='Weight', edges=edges)
end=time()
if rank == 0 : print('Time for HI = ', end-start)
start = time()
ximxh1mass = SimulationBox2PCF('1d', data1=halocat, data2=dm, weight='Weight', edges=edges)
end=time()
if rank == 0 : print('Time for Cross = ', end-start)
def savebinned(path, binstat, header):
r, xi = binstat.corr['r'].real, binstat.corr['corr'].real
if rank == 0:
try:
os.makedirs(os.path.dirname(path))
except IOError:
pass
np.savetxt(path, np.stack((r, xi), axis=1), header=header)
#savebinned(ofolder+'ximatter.txt', xim, header='r, xi(r)')
savebinned(outfolder+"xih1mass_{:6.4f}.txt".format(aa), xih1mass, header='r, xi(r)')
savebinned(outfolder+"ximxh1mass_{:6.4f}.txt".format(aa), ximxh1mass, header='r, xi(r)')
##
##def measurexigal(N, edges):
## '''plot the power spectrum of halos and H1 with subsampling'''
##
## for i, aa in enumerate(aafiles):
##
## dm = BigFileCatalog(scratch + sim + '/fastpm_%0.4f/'%aa , dataset='1')
## cencat = BigFileCatalog(scratch2+sim+'/fastpm_%0.4f/cencat'%aa)
## satcat = BigFileCatalog(scratch2+sim+'/fastpm_%0.4f/satcat'%aa+satsuff)
## cencat['HImass'] = HI_hod(cencat['Mass'],aa)
## satcat['HImass'] = HI_hod(satcat['Mass'],aa)
## cencat['Weight'] = cencat['HImass']
## satcat['Weight'] = satcat['HImass']
## dm['Weight'] = np.ones(dm.size)
##
## for cat in [dm, cencat, satcat]: # nbodykit bug in SimBox2PCF that asserts boxsize
## cat.attrs['BoxSize'] = np.broadcast_to(cat.attrs['BoxSize'], 3)
##
## rng = np.random.RandomState(dm.comm.rank)
## rank = dm.comm.rank
##
## zz = zzfiles[i]
## if rank == 0 :
## print('redshift = ', zz)
## print('Number of dm particles = ', dm.csize)
## print('Number of halos particles = ', cencat.csize+satcat.csize)
##
## def subsampler(cat, rng, N, rmax):
## # subsample such that we have at most N particles to rmax
## nbar = (cat.csize / cat.attrs['BoxSize'].prod())
## ratio = (N / rmax ** 3) / nbar
## mask = rng.uniform(size=cat.size) < ratio
## cat1 = cat[mask]
##
## if rank == 0:
## print('truncating catalog from %d to %d' % (cat.csize, cat1.csize))
## return cat1
##
## if rank == 0 : print('Create weight array')
##
## #h1 = subsampler(allcat, rng, N, edges.max())
## dm = subsampler(dm, rng, N, edges.max())
## #cencat = subsampler(cencat, rng, N, edges.max())
## #satcat = subsampler(satcat, rng, N, edges.max())
## h1 = transform.ConcatenateSources(cencat, satcat)
##
##
## if rank == 0 : print("Correlation function for edges :\n", edges)
## start=time()
## xim = SimulationBox2PCF('1d', data1=dm, edges=edges)
## end=time()
## if rank == 0 : print('Time for matter = ', end-start)
## start=end
## xigal_h1 = SimulationBox2PCF('1d', data1=h1, edges=edges)
## end=time()
## if rank == 0 : print('Time for halos = ', end-start)
## start=end
## xigal_mxh1 = SimulationBox2PCF('1d', data1=h1, data2=dm, edges=edges)
## end=time()
## if rank == 0 : print('Time for matter x halos = ', end-start)
##
##
## def savebinned(path, binstat, header):
## r, xi = binstat.corr['r'].real, binstat.corr['corr'].real
## if rank == 0:
## try:
## os.makedirs(os.path.dirname(path))
## except IOError:
## pass
## np.savetxt(path, np.stack((r, xi), axis=1), header=header)
##
## ofolder = project + '/%s/fastpm_%0.4f/ss_cm-%d/' % (sim, aa, N)
##
## savebinned(ofolder+'ximatter.txt', xim, header='r, xi(r)')
## savebinned(ofolder+'xigal_h1.txt', xigal_h1, header='r, xi(r)')
## savebinned(ofolder+'xigal_mxh1.txt', xigal_mxh1, header='r, xi(r)')
##
##
if __name__=="__main__":
#edges = np.logspace(np.log10(0.5), np.log10(20), 10)
edges = np.logspace(np.log10(1), np.log10(30), 30)
# use 1000 particles up to (20 Mpc/h) ** 3 volume;
# looks good enough?
#measurexigal(N=10000, edges=edges)
measurexi(N=10000, edges=edges)
| [
"sys.path.append",
"numpy.stack",
"nbodykit.lab.BigFileCatalog",
"nbodykit.setup_logging",
"nbodykit.lab.SimulationBox2PCF",
"os.path.dirname",
"pmesh.pm.ParticleMesh",
"numpy.ones",
"numpy.random.RandomState",
"time.time",
"numpy.broadcast_to",
"numpy.log10"
] | [((313, 339), 'sys.path.append', 'sys.path.append', (['"""./utils"""'], {}), "('./utils')\n", (328, 339), False, 'import sys\n'), ((468, 489), 'nbodykit.setup_logging', 'setup_logging', (['"""info"""'], {}), "('info')\n", (481, 489), False, 'from nbodykit import setup_logging\n'), ((1116, 1160), 'pmesh.pm.ParticleMesh', 'ParticleMesh', ([], {'BoxSize': 'bs', 'Nmesh': '[nc, nc, nc]'}), '(BoxSize=bs, Nmesh=[nc, nc, nc])\n', (1128, 1160), False, 'from pmesh.pm import ParticleMesh\n'), ((1634, 1702), 'nbodykit.lab.BigFileCatalog', 'BigFileCatalog', (["(scratchyf + sim + '/fastpm_%0.4f/' % aa)"], {'dataset': '"""1"""'}), "(scratchyf + sim + '/fastpm_%0.4f/' % aa, dataset='1')\n", (1648, 1702), False, 'from nbodykit.lab import BigFileCatalog, BigFileMesh, FFTPower, MultipleSpeciesCatalog, transform\n'), ((1717, 1752), 'numpy.random.RandomState', 'np.random.RandomState', (['dm.comm.rank'], {}), '(dm.comm.rank)\n', (1738, 1752), True, 'import numpy as np\n'), ((1800, 1876), 'nbodykit.lab.BigFileCatalog', 'BigFileCatalog', (["(scratchyf + sim + '/fastpm_%0.4f//' % aa)"], {'dataset': '"""LL-0.200"""'}), "(scratchyf + sim + '/fastpm_%0.4f//' % aa, dataset='LL-0.200')\n", (1814, 1876), False, 'from nbodykit.lab import BigFileCatalog, BigFileMesh, FFTPower, MultipleSpeciesCatalog, transform\n'), ((2000, 2068), 'nbodykit.lab.BigFileCatalog', 'BigFileCatalog', (["(scratchcm + sim + '/fastpm_%0.4f/cencat' % aa + suff)"], {}), "(scratchcm + sim + '/fastpm_%0.4f/cencat' % aa + suff)\n", (2014, 2068), False, 'from nbodykit.lab import BigFileCatalog, BigFileMesh, FFTPower, MultipleSpeciesCatalog, transform\n'), ((2080, 2148), 'nbodykit.lab.BigFileCatalog', 'BigFileCatalog', (["(scratchcm + sim + '/fastpm_%0.4f/satcat' % aa + suff)"], {}), "(scratchcm + sim + '/fastpm_%0.4f/satcat' % aa + suff)\n", (2094, 2148), False, 'from nbodykit.lab import BigFileCatalog, BigFileMesh, FFTPower, MultipleSpeciesCatalog, transform\n'), ((2428, 2444), 'numpy.ones', 'np.ones', (['dm.size'], {}), '(dm.size)\n', (2435, 2444), True, 'import numpy as np\n'), ((4146, 4152), 'time.time', 'time', ([], {}), '()\n', (4150, 4152), False, 'from time import time\n'), ((4228, 4234), 'time.time', 'time', ([], {}), '()\n', (4232, 4234), False, 'from time import time\n'), ((4353, 4359), 'time.time', 'time', ([], {}), '()\n', (4357, 4359), False, 'from time import time\n'), ((4379, 4447), 'nbodykit.lab.SimulationBox2PCF', 'SimulationBox2PCF', (['"""1d"""'], {'data1': 'halocat', 'weight': '"""Weight"""', 'edges': 'edges'}), "('1d', data1=halocat, weight='Weight', edges=edges)\n", (4396, 4447), False, 'from nbodykit.lab import SimulationBox2PCF, FFTCorr\n'), ((4461, 4467), 'time.time', 'time', ([], {}), '()\n', (4465, 4467), False, 'from time import time\n'), ((4542, 4548), 'time.time', 'time', ([], {}), '()\n', (4546, 4548), False, 'from time import time\n'), ((4570, 4648), 'nbodykit.lab.SimulationBox2PCF', 'SimulationBox2PCF', (['"""1d"""'], {'data1': 'halocat', 'data2': 'dm', 'weight': '"""Weight"""', 'edges': 'edges'}), "('1d', data1=halocat, data2=dm, weight='Weight', edges=edges)\n", (4587, 4648), False, 'from nbodykit.lab import SimulationBox2PCF, FFTCorr\n'), ((4662, 4668), 'time.time', 'time', ([], {}), '()\n', (4666, 4668), False, 'from time import time\n'), ((8739, 8750), 'numpy.log10', 'np.log10', (['(1)'], {}), '(1)\n', (8747, 8750), True, 'import numpy as np\n'), ((8752, 8764), 'numpy.log10', 'np.log10', (['(30)'], {}), '(30)\n', (8760, 8764), True, 'import numpy as np\n'), ((2585, 2625), 'numpy.broadcast_to', 'np.broadcast_to', (["cat.attrs['BoxSize']", '(3)'], {}), "(cat.attrs['BoxSize'], 3)\n", (2600, 2625), True, 'import numpy as np\n'), ((5049, 5074), 'numpy.stack', 'np.stack', (['(r, xi)'], {'axis': '(1)'}), '((r, xi), axis=1)\n', (5057, 5074), True, 'import numpy as np\n'), ((4936, 4957), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (4951, 4957), False, 'import os\n')] |
from sentence_transformers import SentenceTransformer, util
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def sorulariDosyadanOkuveListeOlarakDondur():
gecici = []
for i in range(100):
gecici.append([])
df = pd.read_csv('KVKK_100_SORU_CEVAP.txt',sep="\t")
for i in range(100):
gecici[i].append(df.to_numpy()[i][1]) #Soru
gecici[i].append(df.to_numpy()[i][2]) #Cevabı
return(gecici)
testSorulari = ["Rızası Açık Nedir","Bizim rıza kimdir","Açık Rıza Nedir","Veri Sorumlusunun Meşru Menfaatini Tespit Etmek İçin Göz Önünde Bulundurulması Gereken Hususlar Nelerdir?","Kanun Kapsamındaki Kısmi İstisna Halleri Nelerdir"]
modelListesi = [\
"sentence-transformers/paraphrase-xlm-r-multilingual-v1",\
"sentence-transformers/paraphrase-MiniLM-L12-v2",\
"sentence-transformers/paraphrase-MiniLM-L3-v2",\
"sentence-transformers/clip-ViT-B-32-multilingual-v1",\
"sentence-transformers/distiluse-base-multilingual-cased-v2",\
"sentence-transformers/quora-distilbert-multilingual",\
"sentence-transformers/msmarco-distilbert-base-v4",\
"flax-sentence-embeddings/multi-qa_v1-distilbert-cls_dot",\
"flax-sentence-embeddings/all_datasets_v3_distilroberta-base",\
"flax-sentence-embeddings/multi-qa_v1-MiniLM-L6-mean_cos",\
"sentence-transformers/stsb-xlm-r-multilingual"]
for m in modelListesi:
model = SentenceTransformer(m)
encodedSoru = []
for soru in testSorulari:
encodedSoru.append(model.encode(soru))
benzerlikListesi = []
soruListesi = sorulariDosyadanOkuveListeOlarakDondur()
sayac=0
for soru in encodedSoru:
for i in range(100):
benzerlikListesi.append(util.cos_sim(soru,model.encode(soruListesi[i][0])))
sayac+=1
benzerlikListesi2 = []
for oge in benzerlikListesi:
benzerlikListesi2.append(float(oge))
del(benzerlikListesi)
grafik, eksen = plt.subplots(sayac)
grafik.suptitle("Modellerin Sorulara Göre Performansları")
renkKatari = "rgbmk"
for s in range(sayac):
bul = benzerlikListesi2.index(max(benzerlikListesi2[0+s*100:99+s*100]))
print("Model: ",m)
print("Test sorusu: ",testSorulari[s])
print("En yakın soru: ",soruListesi[bul%100][0])
print("En yakın sorunun benzerlik puanı: ",max(benzerlikListesi2[0+s*100:99+s*100]))
print("100 soru için standart sapma: ",np.std(benzerlikListesi2[0+s*100:99+s*100]))
print("100 soru için varyans: ",np.var(benzerlikListesi2[0+s*100:99+s*100]))
print("100 soru için en az benzerlik: ",np.min(benzerlikListesi2[0+s*100:99+s*100]))
print("100 soru için en çok benzerlik: ",np.max(benzerlikListesi2[0+s*100:99+s*100]))
eksen[s].hist(benzerlikListesi2[0+s*100:99+s*100], 100, color = renkKatari[s])
eksen[s].set_title(m+" - "+testSorulari[s])
eksen[s].label_outer()
print("\n")
# plt.legend(m)
# plt.gcf().set_dpi(300) Grafik çözünürlüğünü ayarlamak için kullanılabilir.
# dosyaIsmi = m.replace("/"," ") dosya isminde "/" olmasın
# plt.savefig(dosyaIsmi+".png") dosya olarak grafiğimizi kaydedebiliriz.
plt.show() | [
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.std",
"numpy.var",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.subplots",
"sentence_transformers.SentenceTransformer"
] | [((263, 311), 'pandas.read_csv', 'pd.read_csv', (['"""KVKK_100_SORU_CEVAP.txt"""'], {'sep': '"""\t"""'}), "('KVKK_100_SORU_CEVAP.txt', sep='\\t')\n", (274, 311), True, 'import pandas as pd\n'), ((1451, 1473), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['m'], {}), '(m)\n', (1470, 1473), False, 'from sentence_transformers import SentenceTransformer, util\n'), ((2006, 2025), 'matplotlib.pyplot.subplots', 'plt.subplots', (['sayac'], {}), '(sayac)\n', (2018, 2025), True, 'import matplotlib.pyplot as plt\n'), ((3268, 3278), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3276, 3278), True, 'import matplotlib.pyplot as plt\n'), ((2502, 2553), 'numpy.std', 'np.std', (['benzerlikListesi2[0 + s * 100:99 + s * 100]'], {}), '(benzerlikListesi2[0 + s * 100:99 + s * 100])\n', (2508, 2553), True, 'import numpy as np\n'), ((2588, 2639), 'numpy.var', 'np.var', (['benzerlikListesi2[0 + s * 100:99 + s * 100]'], {}), '(benzerlikListesi2[0 + s * 100:99 + s * 100])\n', (2594, 2639), True, 'import numpy as np\n'), ((2682, 2733), 'numpy.min', 'np.min', (['benzerlikListesi2[0 + s * 100:99 + s * 100]'], {}), '(benzerlikListesi2[0 + s * 100:99 + s * 100])\n', (2688, 2733), True, 'import numpy as np\n'), ((2778, 2829), 'numpy.max', 'np.max', (['benzerlikListesi2[0 + s * 100:99 + s * 100]'], {}), '(benzerlikListesi2[0 + s * 100:99 + s * 100])\n', (2784, 2829), True, 'import numpy as np\n')] |
import os
import numpy as np
from h5py import File
import matplotlib.pyplot as plt
import seaborn as sns
import json
from collections import defaultdict
if __name__ == "__main__":
dir = "pcritical_ntidigits_output"
files = os.listdir(dir)
results = [json.load(open(os.path.join(dir, i), "r")) for i in files]
sns.set_style("whitegrid")
plt.rcParams.update({"font.size": 22})
fig, ax = plt.subplots(figsize=(16, 10))
accuracies = defaultdict(list)
for result in results:
plasticity, spectral_norm = result["plasticity"], result["spectral_radius_norm"]
if plasticity == str(True):
exp_name = "P-CRITICAL"
elif spectral_norm == str(True):
exp_name = "$\\rho$ normalization"
else:
exp_name = "Reservoir"
accuracies[exp_name].append(result["test_accuracies"])
for exp_name, accuracy in accuracies.items():
accuracy = np.array(accuracy)
mean = np.mean(accuracy, axis=0)
std = np.std(accuracy, axis=0)
ax.errorbar(
np.arange(1, 1 + accuracy.shape[1]),
mean,
yerr=std,
label=exp_name,
capsize=5,
elinewidth=1,
fmt="-o",
)
ax.set_xlabel("Epoch")
ax.set_xticks(np.arange(1, 21))
ax.set_ylabel("Accuracy")
# Sort the legend
handles, labels = ax.get_legend_handles_labels()
order = {
"P-CRITICAL": 0,
"$\\rho$ normalization": 1,
"Reservoir": 2,
}
handles, labels = list(
zip(*sorted(zip(handles, labels), key=lambda i: order[i[1]]))
)
ax.legend(
handles,
labels,
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
ncol=3,
fancybox=True,
)
plt.tight_layout()
filename = "ntidigits-results.png"
fig.savefig(filename)
plt.show()
| [
"seaborn.set_style",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"os.path.join",
"numpy.std",
"collections.defaultdict",
"matplotlib.pyplot.rcParams.update",
"numpy.array",
"numpy.mean",
"numpy.arange",
"matplotlib.pyplot.subplots",
"os.listdir"
] | [((233, 248), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (243, 248), False, 'import os\n'), ((329, 355), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (342, 355), True, 'import seaborn as sns\n'), ((360, 398), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 22}"], {}), "({'font.size': 22})\n", (379, 398), True, 'import matplotlib.pyplot as plt\n'), ((413, 443), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (425, 443), True, 'import matplotlib.pyplot as plt\n'), ((461, 478), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (472, 478), False, 'from collections import defaultdict\n'), ((1797, 1815), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1813, 1815), True, 'import matplotlib.pyplot as plt\n'), ((1886, 1896), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1894, 1896), True, 'import matplotlib.pyplot as plt\n'), ((938, 956), 'numpy.array', 'np.array', (['accuracy'], {}), '(accuracy)\n', (946, 956), True, 'import numpy as np\n'), ((973, 998), 'numpy.mean', 'np.mean', (['accuracy'], {'axis': '(0)'}), '(accuracy, axis=0)\n', (980, 998), True, 'import numpy as np\n'), ((1013, 1037), 'numpy.std', 'np.std', (['accuracy'], {'axis': '(0)'}), '(accuracy, axis=0)\n', (1019, 1037), True, 'import numpy as np\n'), ((1303, 1319), 'numpy.arange', 'np.arange', (['(1)', '(21)'], {}), '(1, 21)\n', (1312, 1319), True, 'import numpy as np\n'), ((1071, 1106), 'numpy.arange', 'np.arange', (['(1)', '(1 + accuracy.shape[1])'], {}), '(1, 1 + accuracy.shape[1])\n', (1080, 1106), True, 'import numpy as np\n'), ((280, 300), 'os.path.join', 'os.path.join', (['dir', 'i'], {}), '(dir, i)\n', (292, 300), False, 'import os\n')] |
# coding:utf-8
"""
Filename: app.py
Author: @DvdNss
Created on 12/10/2021
"""
import csv
import os.path
import time
import cv2
import gdown
import numpy as np
import streamlit as st
import torch
def load_classes(csv_reader):
"""
Load classes from csv.
:param csv_reader: csv
:return:
"""
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise (ValueError('line {}: format should be \'class_name,class_id\''.format(line)))
class_id = int(class_id)
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
@st.cache
def draw_caption(image, box, caption):
"""
Draw caption and bbox on image.
:param image: image
:param box: bounding box
:param caption: caption
:return:
"""
b = np.array(box).astype(int)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
@st.cache
def load_labels():
"""
Loads labels.
:return:
"""
with open("dataset/labels.csv", 'r') as f:
classes = load_classes(csv.reader(f, delimiter=','))
labels = {}
for key, value in classes.items():
labels[value] = key
return labels
def download_models(ids):
"""
Download all models.
:param ids: name and links of models
:return:
"""
# Download model from drive if not stored locally
with st.spinner('Downloading models, this may take a minute...'):
for key in ids:
if not os.path.isfile(f"model/{key}.pt"):
url = f"https://drive.google.com/uc?id={ids[key]}"
gdown.download(url=url, output=f"model/{key}.pt")
@st.cache(suppress_st_warning=True)
def load_model(model_path, prefix: str = 'model/'):
"""
Load model.
:param model_path: path to inference model
:param prefix: model prefix if needed
:return:
"""
# Load model
if torch.cuda.is_available():
model = torch.load(f"{prefix}{model_path}.pt").to('cuda')
else:
model = torch.load(f"{prefix}{model_path}.pt", map_location=torch.device('cpu'))
model = model.module.cpu()
model.training = False
model.eval()
return model
def process_img(model, image, labels, caption: bool = True):
"""
Process img given a model.
:param caption: whether to use captions or not
:param image: image to process
:param model: inference model
:param labels: given labels
:return:
"""
image_orig = image.copy()
rows, cols, cns = image.shape
smallest_side = min(rows, cols)
# Rescale the image
min_side = 608
max_side = 1024
scale = min_side / smallest_side
# Check if the largest side is now greater than max_side
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
# Resize the image with the computed scale
image = cv2.resize(image, (int(round(cols * scale)), int(round((rows * scale)))))
rows, cols, cns = image.shape
pad_w = 32 - rows % 32
pad_h = 32 - cols % 32
new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
image = new_image.astype(np.float32)
image /= 255
image -= [0.485, 0.456, 0.406]
image /= [0.229, 0.224, 0.225]
image = np.expand_dims(image, 0)
image = np.transpose(image, (0, 3, 1, 2))
with torch.no_grad():
image = torch.from_numpy(image)
if torch.cuda.is_available():
image = image.cuda()
st = time.time()
scores, classification, transformed_anchors = model(image.float())
elapsed_time = time.time() - st
idxs = np.where(scores.cpu() > 0.5)
for j in range(idxs[0].shape[0]):
bbox = transformed_anchors[idxs[0][j], :]
x1 = int(bbox[0] / scale)
y1 = int(bbox[1] / scale)
x2 = int(bbox[2] / scale)
y2 = int(bbox[3] / scale)
label_name = labels[int(classification[idxs[0][j]])]
colors = {
'with_mask': (0, 255, 0),
'without_mask': (255, 0, 0),
'mask_weared_incorrect': (190, 100, 20)
}
cap = '{}'.format(label_name) if caption else ''
draw_caption(image_orig, (x1, y1, x2, y2), cap)
cv2.rectangle(image_orig, (x1, y1), (x2, y2), color=colors[label_name], thickness=2)
cv2.putText(image_orig,
f"{'{:.1f}'.format(1 / float(elapsed_time))}{' cuda:' + str(torch.cuda.is_available()).lower()}",
fontScale=1, fontFace=cv2.FONT_HERSHEY_PLAIN, org=(10, 20), color=(0, 255, 0))
return image_orig
# Page config
st.set_page_config(layout="centered")
st.sidebar.title("Face Mask Detection")
# Models drive ids
ids = {
'resnet50_20': '17c2kseAC3y62IwaRQW4m1Vc-7o3WjPdh',
# 'resnet50_29': '1E_IOIuE5OpO4tQgTbXjdAmXR-9BCxxmT',
'resnet152_20': '1oUHqE_BgXehopdicuvPCGOxnwAdlDkEY',
}
# Download all models from drive
download_models(ids)
page = st.sidebar.selectbox('', options=('Description', 'Inference', 'Webcam'), index=0, help='Choose where to go. ')
# Model selection
labels = load_labels()
model_path = st.sidebar.selectbox('Choose a model', options=[k for k in ids], index=0)
model = load_model(model_path=model_path) if model_path != '' else None
if page == 'Inference':
# Display example selection
index = st.number_input('', min_value=0, max_value=852, value=495, help='Choose an image. ')
# Whether to use precomputed img or not
cached = st.checkbox('Use cached image (precomputed with gpu)', value=True)
left, right = st.columns([3, 1])
if not cached:
# Get corresponding image and transform it
image = cv2.imread(f'dataset/validation/image/maksssksksss{str(index)}.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Process img
with st.spinner('Please wait while the image is being processed... This may take a while. '):
image = process_img(model, image, labels, caption=False)
else:
image = cv2.imread(f"dataset/validation/{model_path.split('_')[0]}/maksssksksss{str(index)}.jpg")
left.image(image)
# Write labels dict and device on right
right.write({
'green': 'with_mask',
'orange': 'mask_weared_incorrect',
'red': 'without_mask'
})
device = 'CPU' if not torch.cuda.is_available() else 'GPU'
right.write(f"CUDA: {torch.cuda.is_available()} ({device})")
elif page == "Webcam":
try:
# Get webcam feed
camera = cv2.VideoCapture(0)
# Prepare video container
video = st.image([])
while page == "Webcam":
_, frame = camera.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
video.image(process_img(model, frame, labels, caption=True))
except:
st.warning(
'Unable to detect corresponding device. Note that this feature isn\'t available on Streamlit Cloud. ')
elif page == 'Description':
st.title('Face Mask Detection')
st.image('resources/ex.jpg', caption="[GitHub](https://github.com/DvdNss/FaceMaskDetection)")
st.markdown(
"This project aims to create a Face Mask Detection model to visually detect facemasks on images and videos. "
"We operate with 3 labels: \n\n * _with_mask_ \n * _without_mask_\n * _mask_weared_incorrect_ \n\nThe dataset "
"contains approximately 2500 hand-collected and hand-labelled images.")
| [
"csv.reader",
"streamlit.image",
"streamlit.title",
"streamlit.sidebar.title",
"streamlit.sidebar.selectbox",
"cv2.rectangle",
"torch.device",
"torch.no_grad",
"streamlit.set_page_config",
"streamlit.spinner",
"streamlit.cache",
"cv2.cvtColor",
"torch.load",
"numpy.transpose",
"streamlit... | [((1964, 1998), 'streamlit.cache', 'st.cache', ([], {'suppress_st_warning': '(True)'}), '(suppress_st_warning=True)\n', (1972, 1998), True, 'import streamlit as st\n'), ((5066, 5103), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""centered"""'}), "(layout='centered')\n", (5084, 5103), True, 'import streamlit as st\n'), ((5104, 5143), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Face Mask Detection"""'], {}), "('Face Mask Detection')\n", (5120, 5143), True, 'import streamlit as st\n'), ((5407, 5521), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['""""""'], {'options': "('Description', 'Inference', 'Webcam')", 'index': '(0)', 'help': '"""Choose where to go. """'}), "('', options=('Description', 'Inference', 'Webcam'),\n index=0, help='Choose where to go. ')\n", (5427, 5521), True, 'import streamlit as st\n'), ((5573, 5646), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Choose a model"""'], {'options': '[k for k in ids]', 'index': '(0)'}), "('Choose a model', options=[k for k in ids], index=0)\n", (5593, 5646), True, 'import streamlit as st\n'), ((1024, 1116), 'cv2.putText', 'cv2.putText', (['image', 'caption', '(b[0], b[1] - 10)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 0)', '(2)'], {}), '(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (\n 0, 0, 0), 2)\n', (1035, 1116), False, 'import cv2\n'), ((1116, 1214), 'cv2.putText', 'cv2.putText', (['image', 'caption', '(b[0], b[1] - 10)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(255, 255, 255)', '(1)'], {}), '(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (\n 255, 255, 255), 1)\n', (1127, 1214), False, 'import cv2\n'), ((2211, 2236), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2234, 2236), False, 'import torch\n'), ((3658, 3682), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (3672, 3682), True, 'import numpy as np\n'), ((3695, 3728), 'numpy.transpose', 'np.transpose', (['image', '(0, 3, 1, 2)'], {}), '(image, (0, 3, 1, 2))\n', (3707, 3728), True, 'import numpy as np\n'), ((5788, 5877), 'streamlit.number_input', 'st.number_input', (['""""""'], {'min_value': '(0)', 'max_value': '(852)', 'value': '(495)', 'help': '"""Choose an image. """'}), "('', min_value=0, max_value=852, value=495, help=\n 'Choose an image. ')\n", (5803, 5877), True, 'import streamlit as st\n'), ((5931, 5997), 'streamlit.checkbox', 'st.checkbox', (['"""Use cached image (precomputed with gpu)"""'], {'value': '(True)'}), "('Use cached image (precomputed with gpu)', value=True)\n", (5942, 5997), True, 'import streamlit as st\n'), ((6017, 6035), 'streamlit.columns', 'st.columns', (['[3, 1]'], {}), '([3, 1])\n', (6027, 6035), True, 'import streamlit as st\n'), ((1689, 1748), 'streamlit.spinner', 'st.spinner', (['"""Downloading models, this may take a minute..."""'], {}), "('Downloading models, this may take a minute...')\n", (1699, 1748), True, 'import streamlit as st\n'), ((3739, 3754), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3752, 3754), False, 'import torch\n'), ((3773, 3796), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (3789, 3796), False, 'import torch\n'), ((3808, 3833), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3831, 3833), False, 'import torch\n'), ((3882, 3893), 'time.time', 'time.time', ([], {}), '()\n', (3891, 3893), False, 'import time\n'), ((6208, 6246), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (6220, 6246), False, 'import cv2\n'), ((994, 1007), 'numpy.array', 'np.array', (['box'], {}), '(box)\n', (1002, 1007), True, 'import numpy as np\n'), ((1368, 1396), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (1378, 1396), False, 'import csv\n'), ((3397, 3440), 'numpy.zeros', 'np.zeros', (['(rows + pad_w, cols + pad_h, cns)'], {}), '((rows + pad_w, cols + pad_h, cns))\n', (3405, 3440), True, 'import numpy as np\n'), ((3992, 4003), 'time.time', 'time.time', ([], {}), '()\n', (4001, 4003), False, 'import time\n'), ((4681, 4769), 'cv2.rectangle', 'cv2.rectangle', (['image_orig', '(x1, y1)', '(x2, y2)'], {'color': 'colors[label_name]', 'thickness': '(2)'}), '(image_orig, (x1, y1), (x2, y2), color=colors[label_name],\n thickness=2)\n', (4694, 4769), False, 'import cv2\n'), ((6283, 6380), 'streamlit.spinner', 'st.spinner', (['"""Please wait while the image is being processed... This may take a while. """'], {}), "(\n 'Please wait while the image is being processed... This may take a while. '\n )\n", (6293, 6380), True, 'import streamlit as st\n'), ((6779, 6804), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6802, 6804), False, 'import torch\n'), ((6957, 6976), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (6973, 6976), False, 'import cv2\n'), ((7028, 7040), 'streamlit.image', 'st.image', (['[]'], {}), '([])\n', (7036, 7040), True, 'import streamlit as st\n'), ((7423, 7454), 'streamlit.title', 'st.title', (['"""Face Mask Detection"""'], {}), "('Face Mask Detection')\n", (7431, 7454), True, 'import streamlit as st\n'), ((7459, 7557), 'streamlit.image', 'st.image', (['"""resources/ex.jpg"""'], {'caption': '"""[GitHub](https://github.com/DvdNss/FaceMaskDetection)"""'}), "('resources/ex.jpg', caption=\n '[GitHub](https://github.com/DvdNss/FaceMaskDetection)')\n", (7467, 7557), True, 'import streamlit as st\n'), ((7557, 7864), 'streamlit.markdown', 'st.markdown', (['"""This project aims to create a Face Mask Detection model to visually detect facemasks on images and videos. We operate with 3 labels: \n\n * _with_mask_ \n * _without_mask_\n * _mask_weared_incorrect_ \n\nThe dataset contains approximately 2500 hand-collected and hand-labelled images."""'], {}), '(\n """This project aims to create a Face Mask Detection model to visually detect facemasks on images and videos. We operate with 3 labels: \n\n * _with_mask_ \n * _without_mask_\n * _mask_weared_incorrect_ \n\nThe dataset contains approximately 2500 hand-collected and hand-labelled images."""\n )\n', (7568, 7864), True, 'import streamlit as st\n'), ((1911, 1960), 'gdown.download', 'gdown.download', ([], {'url': 'url', 'output': 'f"""model/{key}.pt"""'}), "(url=url, output=f'model/{key}.pt')\n", (1925, 1960), False, 'import gdown\n'), ((2254, 2292), 'torch.load', 'torch.load', (['f"""{prefix}{model_path}.pt"""'], {}), "(f'{prefix}{model_path}.pt')\n", (2264, 2292), False, 'import torch\n'), ((2382, 2401), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2394, 2401), False, 'import torch\n'), ((6841, 6866), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6864, 6866), False, 'import torch\n'), ((7131, 7169), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (7143, 7169), False, 'import cv2\n'), ((7263, 7385), 'streamlit.warning', 'st.warning', (['"""Unable to detect corresponding device. Note that this feature isn\'t available on Streamlit Cloud. """'], {}), '(\n "Unable to detect corresponding device. Note that this feature isn\'t available on Streamlit Cloud. "\n )\n', (7273, 7385), True, 'import streamlit as st\n'), ((4887, 4912), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4910, 4912), False, 'import torch\n')] |
import numpy as np
import torch
import torch.nn as nn
from enum import Enum
from collections import namedtuple
import torch.nn.functional as F
import math
def get_keys(d, name):
if 'state_dict' in d:
d = d['state_dict']
d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
return d_filt
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
""" A named tuple describing a ResNet block. """
def get_block(in_channel, depth, num_units, stride=2):
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
return blocks
class ProgressiveStage(Enum):
WTraining = 0
Delta1Training = 1
Delta2Training = 2
Delta3Training = 3
Delta4Training = 4
Delta5Training = 5
Delta6Training = 6
Delta7Training = 7
Delta8Training = 8
Delta9Training = 9
Delta10Training = 10
Delta11Training = 11
Delta12Training = 12
Delta13Training = 13
Delta14Training = 14
Delta15Training = 15
Delta16Training = 16
Delta17Training = 17
Inference = 18
class EqualLinear(nn.Module):
def __init__(
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = (1 / math.sqrt(in_dim)) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
# out = fused_leaky_relu(out, self.bias * self.lr_mul)
print("do not come here")
else:
out = F.linear(
input, self.weight * self.scale, bias=self.bias * self.lr_mul
)
return out
class GradualStyleBlock(nn.Module):
def __init__(self, in_c, out_c, spatial):
super(GradualStyleBlock, self).__init__()
self.out_c = out_c
self.spatial = spatial
num_pools = int(np.log2(spatial))
modules = []
modules += [nn.Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU()]
for i in range(num_pools - 1):
modules += [
nn.Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU()
]
self.convs = nn.Sequential(*modules)
self.linear = EqualLinear(out_c, out_c, lr_mul=1)
def forward(self, x):
x = self.convs(x)
x = x.view(-1, self.out_c)
x = self.linear(x)
return x
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR_SE(nn.Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = nn.MaxPool2d(1, stride)
else:
self.shortcut_layer = nn.Sequential(
nn.Conv2d(in_channel, depth, (1, 1), stride, bias=False),
nn.BatchNorm2d(depth)
)
self.res_layer = nn.Sequential(
nn.BatchNorm2d(in_channel),
nn.Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
nn.PReLU(depth),
nn.Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
nn.BatchNorm2d(depth),
SEModule(depth, 16)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class Encoder4Editing(nn.Module):
def __init__(self, num_layers, mode='ir_se', size=512):
super(Encoder4Editing, self).__init__()
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
unit_module = bottleneck_IR_SE
self.input_layer = nn.Sequential(nn.Conv2d(3, 64, (3, 3), 1, 1, bias=False),
nn.BatchNorm2d(64),
nn.PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = nn.Sequential(*modules)
self.styles = nn.ModuleList()
log_size = int(math.log(size, 2))
self.style_count = 2 * log_size - 2
self.coarse_ind = 3
self.middle_ind = 7
for i in range(self.style_count):
if i < self.coarse_ind:
style = GradualStyleBlock(512, 512, 16)
elif i < self.middle_ind:
style = GradualStyleBlock(512, 512, 32)
else:
style = GradualStyleBlock(512, 512, 64)
self.styles.append(style)
self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
self.progressive_stage = ProgressiveStage.Inference
def get_deltas_starting_dimensions(self):
''' Get a list of the initial dimension of every delta from which it is applied '''
return list(range(self.style_count)) # Each dimension has a delta applied to it
def set_progressive_stage(self, new_stage: ProgressiveStage):
self.progressive_stage = new_stage
print('Changed progressive stage to: ', new_stage)
def forward(self, x):
x = self.input_layer(x)
modulelist = list(self.body._modules.values())
for i, l in enumerate(modulelist):
x = l(x)
if i == 6:
c1 = x
elif i == 20:
c2 = x
elif i == 23:
c3 = x
# Infer main W and duplicate it
w0 = self.styles[0](c3)
w = w0.repeat(self.style_count, 1, 1).permute(1, 0, 2)
stage = self.progressive_stage.value
features = c3
for i in range(1, min(stage + 1, self.style_count)): # Infer additional deltas
if i == self.coarse_ind:
p2 = _upsample_add(c3, self.latlayer1(c2)) # FPN's middle features
features = p2
elif i == self.middle_ind:
p1 = _upsample_add(p2, self.latlayer2(c1)) # FPN's fine features
features = p1
delta_i = self.styles[i](features)
w[:, i] += delta_i
return w
def _upsample_add(x, y):
"""Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
"""
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
| [
"torch.randn",
"torch.zeros",
"math.log",
"math.sqrt",
"torch.nn.ModuleList",
"torch.norm",
"numpy.log2",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.MaxPool2d",
"torch.nn.LeakyReLU",
"torch.nn.Sigmoid",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.PReLU",
"torch.nn.ReLU",
"torch.n... | [((587, 641), 'collections.namedtuple', 'namedtuple', (['"""Block"""', "['in_channel', 'depth', 'stride']"], {}), "('Block', ['in_channel', 'depth', 'stride'])\n", (597, 641), False, 'from collections import namedtuple\n'), ((481, 513), 'torch.norm', 'torch.norm', (['input', '(2)', 'axis', '(True)'], {}), '(input, 2, axis, True)\n', (491, 513), False, 'import torch\n'), ((527, 549), 'torch.div', 'torch.div', (['input', 'norm'], {}), '(input, norm)\n', (536, 549), False, 'import torch\n'), ((3887, 3910), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (3900, 3910), True, 'import torch.nn as nn\n'), ((4240, 4263), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (4260, 4263), True, 'import torch.nn as nn\n'), ((4283, 4368), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', '(channels // reduction)'], {'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(channels, channels // reduction, kernel_size=1, padding=0, bias=False\n )\n', (4292, 4368), True, 'import torch.nn as nn\n'), ((4384, 4405), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4391, 4405), True, 'import torch.nn as nn\n'), ((4425, 4510), 'torch.nn.Conv2d', 'nn.Conv2d', (['(channels // reduction)', 'channels'], {'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(channels // reduction, channels, kernel_size=1, padding=0, bias=False\n )\n', (4434, 4510), True, 'import torch.nn as nn\n'), ((4529, 4541), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4539, 4541), True, 'import torch.nn as nn\n'), ((6497, 6520), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (6510, 6520), True, 'import torch.nn as nn\n'), ((6544, 6559), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6557, 6559), True, 'import torch.nn as nn\n'), ((7069, 7124), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(256, 512, kernel_size=1, stride=1, padding=0)\n', (7078, 7124), True, 'import torch.nn as nn\n'), ((7150, 7205), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(512)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(128, 512, kernel_size=1, stride=1, padding=0)\n', (7159, 7205), True, 'import torch.nn as nn\n'), ((9334, 9400), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'size': '(H, W)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(x, size=(H, W), mode='bilinear', align_corners=True)\n", (9347, 9400), True, 'import torch.nn.functional as F\n'), ((3007, 3048), 'torch.nn.functional.linear', 'F.linear', (['input', '(self.weight * self.scale)'], {}), '(input, self.weight * self.scale)\n', (3015, 3048), True, 'import torch.nn.functional as F\n'), ((3187, 3258), 'torch.nn.functional.linear', 'F.linear', (['input', '(self.weight * self.scale)'], {'bias': '(self.bias * self.lr_mul)'}), '(input, self.weight * self.scale, bias=self.bias * self.lr_mul)\n', (3195, 3258), True, 'import torch.nn.functional as F\n'), ((3525, 3541), 'numpy.log2', 'np.log2', (['spatial'], {}), '(spatial)\n', (3532, 3541), True, 'import numpy as np\n'), ((3584, 3642), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_c', 'out_c'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(in_c, out_c, kernel_size=3, stride=2, padding=1)\n', (3593, 3642), True, 'import torch.nn as nn\n'), ((3664, 3678), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3676, 3678), True, 'import torch.nn as nn\n'), ((4959, 4982), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(1)', 'stride'], {}), '(1, stride)\n', (4971, 4982), True, 'import torch.nn as nn\n'), ((5224, 5250), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['in_channel'], {}), '(in_channel)\n', (5238, 5250), True, 'import torch.nn as nn\n'), ((5264, 5323), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'depth', '(3, 3)', '(1, 1)', '(1)'], {'bias': '(False)'}), '(in_channel, depth, (3, 3), (1, 1), 1, bias=False)\n', (5273, 5323), True, 'import torch.nn as nn\n'), ((5337, 5352), 'torch.nn.PReLU', 'nn.PReLU', (['depth'], {}), '(depth)\n', (5345, 5352), True, 'import torch.nn as nn\n'), ((5366, 5420), 'torch.nn.Conv2d', 'nn.Conv2d', (['depth', 'depth', '(3, 3)', 'stride', '(1)'], {'bias': '(False)'}), '(depth, depth, (3, 3), stride, 1, bias=False)\n', (5375, 5420), True, 'import torch.nn as nn\n'), ((5434, 5455), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['depth'], {}), '(depth)\n', (5448, 5455), True, 'import torch.nn as nn\n'), ((6046, 6088), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3, 3)', '(1)', '(1)'], {'bias': '(False)'}), '(3, 64, (3, 3), 1, 1, bias=False)\n', (6055, 6088), True, 'import torch.nn as nn\n'), ((6128, 6146), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (6142, 6146), True, 'import torch.nn as nn\n'), ((6186, 6198), 'torch.nn.PReLU', 'nn.PReLU', (['(64)'], {}), '(64)\n', (6194, 6198), True, 'import torch.nn as nn\n'), ((6583, 6600), 'math.log', 'math.log', (['size', '(2)'], {}), '(size, 2)\n', (6591, 6600), False, 'import math\n'), ((2873, 2890), 'math.sqrt', 'math.sqrt', (['in_dim'], {}), '(in_dim)\n', (2882, 2890), False, 'import math\n'), ((3760, 3819), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_c', 'out_c'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(out_c, out_c, kernel_size=3, stride=2, padding=1)\n', (3769, 3819), True, 'import torch.nn as nn\n'), ((3837, 3851), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3849, 3851), True, 'import torch.nn as nn\n'), ((5062, 5118), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'depth', '(1, 1)', 'stride'], {'bias': '(False)'}), '(in_channel, depth, (1, 1), stride, bias=False)\n', (5071, 5118), True, 'import torch.nn as nn\n'), ((5136, 5157), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['depth'], {}), '(depth)\n', (5150, 5157), True, 'import torch.nn as nn\n'), ((2627, 2655), 'torch.randn', 'torch.randn', (['out_dim', 'in_dim'], {}), '(out_dim, in_dim)\n', (2638, 2655), False, 'import torch\n'), ((2725, 2745), 'torch.zeros', 'torch.zeros', (['out_dim'], {}), '(out_dim)\n', (2736, 2745), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import glob
import json
import os
import cv2
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from tensorpack.utils import logger
from tensorpack.utils.timer import timed_operation
from config import config as cfg
from dataset import DatasetRegistry, DatasetSplit
def _category_id_to_class_id(category_id):
if category_id in [1, 2, 3, 4, 5, 6]:
return 1
elif category_id in [7, 8, 9]:
return 2
elif category_id in [10, 11, 12, 13]:
return 3
def _get_bb(img_path, anno_path, heavy_check=False):
raw_json = json.loads(open(anno_path, 'r').read())
if heavy_check:
raw_img = cv2.imread(img_path)
height, width, _ = raw_img.shape
boxes = []
classes = []
for obj in [raw_json[key] for key in raw_json.keys() if 'item' in key]:
bb = obj['bounding_box']
# Note that DeepFashion2 format is bottom left and top right lul
if heavy_check:
x_top = np.clip(float(bb[0]), 0, width)
y_top = np.clip(float(bb[3]), 0, height)
x_bot = np.clip(float(bb[2]), 0, width)
y_bot = np.clip(float(bb[1]), 0, height)
else:
x_top = float(bb[0])
y_top = float(bb[3])
x_bot = float(bb[2])
y_bot = float(bb[1])
boxes.append([x_top, y_top, x_bot, y_bot])
classes.append(_category_id_to_class_id(obj['category_id']))
return np.array(boxes, dtype=np.float32), np.array(classes)
class DeepFashion2Detection(DatasetSplit):
"""
A class to load datasets, evaluate results for a datast split (e.g., "coco_train_2017")
To use your own dataset that's not in COCO format, write a subclass that
implements the interfaces.
"""
class_names = ['top', 'bottom', 'long']
cfg.DATA.CLASS_NAMES = ["BG"] + class_names
def __init__(self, root_path, split):
self.split = split
self.data_path = os.path.join(root_path, split)
def _load(self, load_anno=True, heavy_check=False):
all_imgs = glob.glob(os.path.join(self.data_path, 'image', '*.jpg'))
images_list = []
annos_list = []
for each_img in all_imgs:
if heavy_check:
img = cv2.imread(each_img)
condition = img is not None
else:
condition = os.path.isfile(each_img)
if not condition:
print('[WARNING]:', each_img, 'not existed or empty. Skipped')
continue
file_name = each_img.split('/')[-1]
each_anno = os.path.join(self.data_path, 'annos', file_name.replace('.jpg', '.json'))
if not os.path.isfile(each_anno):
print("[WARNING]: Can't find annotation for", file_name, "at", each_anno, ". Skipped")
continue
images_list.append(each_img)
annos_list.append(each_anno)
print('Found', len(images_list), 'images for', self.split)
roidbs = []
for img, anno in tqdm(zip(images_list, annos_list), desc=self.split, total=len(images_list)):
img_id = '{}_{}'.format(img.split('/')[-1].replace('.jpg',''), type)
sample = {
'file_name': img,
'img_id': img_id,
}
if load_anno:
boxes, classes = _get_bb(img, anno)
sample['boxes'] = boxes
sample['class'] = classes
sample['is_crowd'] = False
roidbs.append(sample)
return roidbs
def training_roidbs(self):
"""
Returns:
roidbs (list[dict]):
Produce "roidbs" as a list of dict, each dict corresponds to one image with k>=0 instances.
and the following keys are expected for training:
file_name: str, full path to the image
boxes: numpy array of kx4 floats, each row is [x1, y1, x2, y2]
class: numpy array of k integers, in the range of [1, #categories], NOT [0, #categories)
is_crowd: k booleans. Use k False if you don't know what it means.
segmentation: k lists of numpy arrays (one for each instance).
Each list of numpy arrays corresponds to the mask for one instance.
Each numpy array in the list is a polygon of shape Nx2,
because one mask can be represented by N polygons.
If your segmentation annotations are originally masks rather than polygons,
either convert it, or the augmentation will need to be changed or skipped accordingly.
Include this field only if training Mask R-CNN.
"""
return self._load()
def inference_roidbs(self):
"""
Returns:
roidbs (list[dict]):
Each dict corresponds to one image to run inference on. The
following keys in the dict are expected:
file_name (str): full path to the image
image_id (str): an id for the image. The inference results will be stored with this id.
"""
return self._load(load_anno=False)
def eval_inference_results(self, results, output=None):
"""
Args:
results (list[dict]): the inference results as dicts.
Each dict corresponds to one __instance__. It contains the following keys:
image_id (str): the id that matches `inference_roidbs`.
category_id (int): the category prediction, in range [1, #category]
bbox (list[float]): x1, y1, x2, y2
score (float):
segmentation: the segmentation mask in COCO's rle format.
output (str): the output file or directory to optionally save the results to.
Returns:
dict: the evaluation results.
"""
raise NotImplementedError
def register_deep_fashion_2(basedir):
"""
Add COCO datasets like "coco_train201x" to the registry,
so you can refer to them with names in `cfg.DATA.TRAIN/VAL`.
"""
for split in ['train', 'val']:
DatasetRegistry.register(split, lambda x=split: DeepFashion2Detection(basedir, x))
if __name__ == '__main__':
basedir = '/Users/linus/techainer/DeepFashion/DeepFashion2'
c = DeepFashion2Detection(basedir, 'train')
roidb = c.training_roidbs()
print("#Images:", len(roidb))
| [
"cv2.imread",
"os.path.isfile",
"numpy.array",
"os.path.join"
] | [((698, 718), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (708, 718), False, 'import cv2\n'), ((1485, 1518), 'numpy.array', 'np.array', (['boxes'], {'dtype': 'np.float32'}), '(boxes, dtype=np.float32)\n', (1493, 1518), True, 'import numpy as np\n'), ((1520, 1537), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (1528, 1537), True, 'import numpy as np\n'), ((1987, 2017), 'os.path.join', 'os.path.join', (['root_path', 'split'], {}), '(root_path, split)\n', (1999, 2017), False, 'import os\n'), ((2104, 2150), 'os.path.join', 'os.path.join', (['self.data_path', '"""image"""', '"""*.jpg"""'], {}), "(self.data_path, 'image', '*.jpg')\n", (2116, 2150), False, 'import os\n'), ((2285, 2305), 'cv2.imread', 'cv2.imread', (['each_img'], {}), '(each_img)\n', (2295, 2305), False, 'import cv2\n'), ((2396, 2420), 'os.path.isfile', 'os.path.isfile', (['each_img'], {}), '(each_img)\n', (2410, 2420), False, 'import os\n'), ((2720, 2745), 'os.path.isfile', 'os.path.isfile', (['each_anno'], {}), '(each_anno)\n', (2734, 2745), False, 'import os\n')] |
"""Training Utilities for ViViT."""
import functools
from typing import Callable, Dict, List, Optional, Tuple, Union
from absl import logging
from flax import jax_utils
import flax.linen as nn
import jax
from jax.experimental.optimizers import clip_grads
import jax.numpy as jnp
import jax.profiler
import matplotlib.pyplot as plt
import ml_collections
import numpy as np
from scenic.dataset_lib import dataset_utils
from scenic.model_lib.base_models import model_utils
from scenic.train_lib import optimizers
from scenic.train_lib import train_utils
import seaborn as sns
# Aliases for custom types:
Array = Union[jnp.ndarray, np.ndarray]
Batch = Dict[str, jnp.ndarray]
MetricFn = Callable[[jnp.ndarray, Dict[str, jnp.ndarray]],
Dict[str, Tuple[float, int]]]
LossFn = Callable[[jnp.ndarray, Batch, Optional[jnp.ndarray]], float]
def to_cpu(array: jnp.ndarray):
"""Transfers array (replicated on multiple hosts) to a single host.
Args:
array: Replicated array of shape
[num_hosts, num_devices, local_batch_size, ...]
Returns:
array of shape [global_batch_size, ...] where
global_batch_size = num_devices * local_batch_size
"""
return jax.device_get(dataset_utils.unshard(jax_utils.unreplicate(array)))
def train_step(
train_state: train_utils.TrainState,
batch: Batch,
*,
flax_model: nn.Module,
learning_rate_fn: Callable[[int], float],
loss_fn: LossFn,
metrics_fn: MetricFn,
config: ml_collections.ConfigDict,
debug: Optional[bool] = False
) -> Tuple[train_utils.TrainState, Dict[str, Tuple[float, int]], float]:
"""Runs a single step of training.
Given the state of the training and a batch of data, computes
the loss and updates the parameters of the model.
Note that in this code, the buffers of the first (train_state) and second
(batch) arguments are donated to the computation.
Args:
train_state: The state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data. The buffer of this argument can be donated to
the computation.
flax_model: A Flax model.
learning_rate_fn: learning rate scheduler which give the global_step
generates the learning rate.
loss_fn: A loss function that given logits, a batch, and parameters of the
model calculates the loss.
metrics_fn: A metrics function that given logits and batch of data,
calculates the metrics as well as the loss.
config: Configuration of the experiment.
debug: Whether the debug mode is enabled during training. `debug=True`
enables model specific logging/storing some values using
jax.host_callback.
Returns:
Updated state of training, computed metrics, and learning rate for logging.
"""
new_rng, rng = jax.random.split(train_state.rng)
if config.get('mixup') and config.mixup.alpha:
mixup_rng, rng = jax.random.split(rng, 2)
mixup_rng = train_utils.bind_rng_to_host_device(
mixup_rng,
axis_name='batch',
bind_to=config.mixup.get('bind_to', 'device'))
batch = dataset_utils.mixup(
batch,
config.mixup.alpha,
config.mixup.get('image_format', 'NTHWC'),
rng=mixup_rng)
# Bind the rng to the host/device we are on for dropout.
dropout_rng = train_utils.bind_rng_to_host_device(
rng, axis_name='batch', bind_to='device')
def training_loss_fn(params):
variables = {'params': params, **train_state.model_state}
logits, new_model_state = flax_model.apply(
variables,
batch['inputs'],
mutable=['batch_stats'],
train=True,
rngs={'dropout': dropout_rng},
debug=debug)
loss = loss_fn(logits, batch, variables['params'])
return loss, (new_model_state, logits)
compute_gradient_fn = jax.value_and_grad(training_loss_fn, has_aux=True)
step = train_state.global_step
lr = learning_rate_fn(step)
if config.get('sam_rho', None) is None:
# Normal training
(train_cost,
(new_model_state,
logits)), grad = compute_gradient_fn(train_state.optimizer.target)
else:
# SAM training, taken from cl/373487774
def dual_vector(y: jnp.ndarray) -> jnp.ndarray:
"""Returns the solution of max_x y^T x s.t. ||x||_2 <= 1."""
gradient_norm = jnp.sqrt(sum(
[jnp.sum(jnp.square(e)) for e in jax.tree_util.tree_leaves(y)]))
normalized_gradient = jax.tree_map(
lambda x: x / (gradient_norm + 1e-7), y)
return normalized_gradient
g_sam, _ = jax.grad(training_loss_fn, has_aux=True)(
train_state.optimizer.target)
g_sam = dual_vector(g_sam)
target_sam = jax.tree_multimap(lambda a, b: a + config.get('sam_rho') * b,
train_state.optimizer.target, g_sam)
(train_cost,
(new_model_state,
logits)), grad = compute_gradient_fn(target_sam)
# TODO(dehghani,aarnab): Check how to move this after the pmeam.
if config.get('max_grad_norm', None) is not None:
grad = clip_grads(grad, config.max_grad_norm)
del train_cost
# Re-use same axis_name as in the call to `pmap(...train_step...)` below.
grad = jax.lax.pmean(grad, axis_name='batch')
new_optimizer = train_state.optimizer.apply_gradient(grad, learning_rate=lr)
# Explicit weight decay, if necessary.
if config.get('explicit_weight_decay', None) is not None:
new_optimizer = new_optimizer.replace(
target=optimizers.tree_map_with_names(
functools.partial(
optimizers.decay_weight_fn,
lr=lr,
decay=config.explicit_weight_decay),
new_optimizer.target,
match_name_fn=lambda name: 'kernel' in name))
metrics = metrics_fn(logits, batch)
new_train_state = train_state.replace( # pytype: disable=attribute-error
global_step=step + 1,
optimizer=new_optimizer,
model_state=new_model_state,
rng=new_rng)
return new_train_state, metrics, lr
def eval_step(
train_state: train_utils.TrainState,
batch: Batch,
*,
flax_model: nn.Module,
metrics_fn: MetricFn,
return_logits_and_labels: bool = False,
return_confusion_matrix: bool = False,
debug: Optional[bool] = False
) -> Union[Tuple[Dict[str, Tuple[float, int]], jnp.ndarray, jnp.array],
Tuple[Dict[str, Tuple[float, int]], jnp.ndarray],
Dict[str, Tuple[float, int]]]:
"""Runs a single step of training.
Note that in this code, the buffer of the second argument (batch) is donated
to the computation.
Assumed API of metrics_fn is:
```metrics = metrics_fn(logits, batch)
where batch is yielded by the batch iterator, and metrics is a dictionary
mapping metric name to a vector of per example measurements. eval_step will
aggregate (by summing) all per example measurements and divide by the
aggregated normalizers. For each given metric we compute:
1/N sum_{b in batch_iter} metric(b), where N is the sum of normalizer
over all batches.
Args:
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data. a metrics function, that given logits and
batch of data, calculates the metrics as well as the loss.
flax_model: A Flax model.
metrics_fn: A metrics function, that given logits and batch of data,
calculates the metrics as well as the loss.
return_logits_and_labels: If true, returns logits and labels. Can be used
for calculating the Mean Average Precision for multi-label problems.
Only one of "return_logits_and_labels" and "return_confusion_matrix"
should be true, with the latter taking precedence if both are set as true.
return_confusion_matrix: If true, returns confusion matrix. Can be used
to calculate additional metrics for k-way classification problems.
debug: Whether the debug mode is enabled during evaluation.
`debug=True` enables model specific logging/storing some values using
jax.host_callback.
Returns:
Calculated metrics [and optionally logits or confusion matrix].
"""
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
logits = flax_model.apply(
variables, batch['inputs'], train=False, mutable=False, debug=debug)
metrics = metrics_fn(logits, batch)
if return_confusion_matrix:
confusion_matrix = get_confusion_matrix(
labels=batch['label'], logits=logits, batch_mask=batch['batch_mask'])
confusion_matrix = jax.lax.all_gather(confusion_matrix, 'batch')
return metrics, confusion_matrix
if return_logits_and_labels:
logits = jax.lax.all_gather(logits, 'batch')
labels = jax.lax.all_gather(batch['label'], 'batch')
return metrics, logits, labels
return metrics
def test_step(
train_state: train_utils.TrainState,
batch: Batch,
*,
flax_model: nn.Module,
metrics_fn: MetricFn,
n_clips: int = 2,
return_logits_and_labels: bool = False,
softmax_logits: bool = False,
debug: bool = False
) -> Union[Dict[str, Tuple[float, int]], Tuple[Dict[str, Tuple[float, int]],
jnp.array, jnp.array]]:
"""Runs a single step of testing.
For multi-crop testing, we assume that num_crops consecutive entries in the
batch are from the same example. And we average the logits over these examples
We assume that the batch contains different crops of the same original
example. Therefore, we can average all the logits of it.
This assumption is true when local_batch_size = num_local_devices
Args:
train_state: The state of training including the current
global_step, model_state, rng, and optimizer, and other metadata.
batch: Dictionary with keys 'inputs', 'labels', 'batch_mask'. We assume that
all the inputs correspond to the same original example in the test set.
The input shapes to this function are batch['inputs'] = [num_crops, t, h,
w, c] batch['labels'] = [num_crops, num_classes] However, for
classification, the labels for all the crops are the same.
batch['batch_mask'] = [num_crops]
flax_model: A Flax model.
metrics_fn: Metrics function for the model.
n_clips: The number of clips to process at a time by each device. Set
due to memory constraints.
return_logits_and_labels: Whether return logits of the model or not.
softmax_logits: Whether to softmax-normalise the logits before
averaging
debug: Whether the debug mode is enabled during evaluation.
`debug=True` enables model specific logging/storing some values using
jax.host_callback.
Returns:
Calculated metrics [and optionally averaged logits that are of
shape `[1, num_classes]`].
"""
all_logits = jnp.zeros(batch['label'].shape[1])
assert len(batch['batch_mask'].shape) == 1, (
'Spatial padding is not supported in multi-crop evaluation.')
num_crops = batch['inputs'].shape[0]
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
for idx in range(0, num_crops, n_clips):
temp_input = batch['inputs'][idx:idx + n_clips]
logits = flax_model.apply(
variables, temp_input, train=False, mutable=False, debug=debug)
if softmax_logits:
logits = nn.softmax(logits, axis=-1)
logits = jnp.sum(logits, axis=0)
all_logits = all_logits + logits
all_logits = all_logits / num_crops
all_logits = jnp.expand_dims(all_logits, axis=0)
batch['label'] = jnp.expand_dims(batch['label'][0], axis=0)
batch['batch_mask'] = jnp.expand_dims(batch['batch_mask'][0], axis=0)
metrics = metrics_fn(all_logits, batch)
if return_logits_and_labels:
return metrics, all_logits, batch['label']
return metrics
def get_confusion_matrix(labels: Array, logits: Array,
batch_mask: Array) -> Array:
"""Computes confusion matrix from predictions.
Args:
labels: [n_batch] or [n_batch, n_classes] array. In the latter case, labels
are assumed to be one-hot, since the confusion matrix is only defined when
each example has one label.
logits: [n_batch, n_classes] array, which are the predictions of the model.
batch_mask: [n_batch] array. Entries should be 1 or 0, and indicate if the
example is valid or not.
Returns:
confusion_matrix of shape [1, n_classes, n_classes]
"""
if labels.ndim == logits.ndim: # one-hot targets
y_true = jnp.argmax(labels, axis=-1)
else:
y_true = labels
y_pred = jnp.argmax(logits, axis=-1)
# Prepare sample weights for confusion matrix:
weights = batch_mask.astype(jnp.float32)
confusion_matrix = model_utils.confusion_matrix(
y_true=y_true,
y_pred=y_pred,
num_classes=logits.shape[-1],
weights=weights)
confusion_matrix = confusion_matrix[jnp.newaxis, ...] # Dummy batch dim.
return confusion_matrix
def render_confusion_matrices(confusion_matrices: List[Array],
normalization_method: str = 'cols',
figsize: Tuple[int, int] = (12, 12),
dpi: int = 100,
font_scale: int = 3) -> Array:
"""Render confusion matrix so that it can be logged to Tensorboard.
Args:
confusion_matrices: List of [n_batch, n_class, n_class] confusion matrices.
The first two dimensions will be summed over to get an [n_class, n_class]
matrix for rendering.
normalization_method: Method of normalizing the confusion matrix before
plotting. Supported values are one of "cols", "rows" and "none".
If any other value, no normalization is performed.
figsize: The figure size used by matplotlib and seaborn.
dpi: The dpi used by matplotlib and seaborn.
font_scale: The font scale used by seaborn.
Returns:
image: Rendered image of the confusion matrix for plotting. Data type is
uint8 and values are in range [0, 255]. Shape is
[1, figsize * dpi, figsize * dpi, 3]
"""
conf_matrix = np.sum(confusion_matrices, axis=0) # Sum over eval batches.
if conf_matrix.ndim != 3:
raise AssertionError(
'Expecting confusion matrix to have shape '
f'[batch_size, num_classes, num_classes], got {conf_matrix.shape}.')
conf_matrix = np.sum(conf_matrix, axis=0) # Sum over batch dimension.
if normalization_method not in {'rows', 'cols', 'none'}:
logging.warning('Normalizer must be one of {rows, cols, none}.'
'Defaulting to none.')
sns.set(font_scale=font_scale)
fig = plt.figure(figsize=figsize, dpi=dpi)
# Normalize entries of the confusion matrix.
if normalization_method == 'rows':
normalizer = conf_matrix.sum(axis=1)[:, np.newaxis]
elif normalization_method == 'cols':
normalizer = conf_matrix.sum(axis=0)[np.newaxis, :]
else:
normalizer = 1
normalized_matrix = np.nan_to_num(conf_matrix / normalizer)
if np.sum(normalized_matrix) > 0:
sns.heatmap(
normalized_matrix,
annot=True,
linewidths=0.5,
square=True,
cbar=False,
cmap='jet',
annot_kws={'size': 18})
fig.tight_layout(pad=0.0)
fig.canvas.draw()
ncols, nrows = fig.canvas.get_width_height()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
image = image.reshape(nrows, ncols, 3)
return np.expand_dims(image, axis=0)
| [
"numpy.sum",
"numpy.nan_to_num",
"seaborn.heatmap",
"matplotlib.pyplot.figure",
"jax.experimental.optimizers.clip_grads",
"scenic.train_lib.train_utils.bind_rng_to_host_device",
"flax.linen.softmax",
"jax.tree_util.tree_leaves",
"jax.numpy.expand_dims",
"jax.numpy.argmax",
"scenic.model_lib.base... | [((2882, 2915), 'jax.random.split', 'jax.random.split', (['train_state.rng'], {}), '(train_state.rng)\n', (2898, 2915), False, 'import jax\n'), ((3392, 3469), 'scenic.train_lib.train_utils.bind_rng_to_host_device', 'train_utils.bind_rng_to_host_device', (['rng'], {'axis_name': '"""batch"""', 'bind_to': '"""device"""'}), "(rng, axis_name='batch', bind_to='device')\n", (3427, 3469), False, 'from scenic.train_lib import train_utils\n'), ((3900, 3950), 'jax.value_and_grad', 'jax.value_and_grad', (['training_loss_fn'], {'has_aux': '(True)'}), '(training_loss_fn, has_aux=True)\n', (3918, 3950), False, 'import jax\n'), ((5245, 5283), 'jax.lax.pmean', 'jax.lax.pmean', (['grad'], {'axis_name': '"""batch"""'}), "(grad, axis_name='batch')\n", (5258, 5283), False, 'import jax\n'), ((10983, 11017), 'jax.numpy.zeros', 'jnp.zeros', (["batch['label'].shape[1]"], {}), "(batch['label'].shape[1])\n", (10992, 11017), True, 'import jax.numpy as jnp\n'), ((11665, 11700), 'jax.numpy.expand_dims', 'jnp.expand_dims', (['all_logits'], {'axis': '(0)'}), '(all_logits, axis=0)\n', (11680, 11700), True, 'import jax.numpy as jnp\n'), ((11720, 11762), 'jax.numpy.expand_dims', 'jnp.expand_dims', (["batch['label'][0]"], {'axis': '(0)'}), "(batch['label'][0], axis=0)\n", (11735, 11762), True, 'import jax.numpy as jnp\n'), ((11787, 11834), 'jax.numpy.expand_dims', 'jnp.expand_dims', (["batch['batch_mask'][0]"], {'axis': '(0)'}), "(batch['batch_mask'][0], axis=0)\n", (11802, 11834), True, 'import jax.numpy as jnp\n'), ((12732, 12759), 'jax.numpy.argmax', 'jnp.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (12742, 12759), True, 'import jax.numpy as jnp\n'), ((12875, 12985), 'scenic.model_lib.base_models.model_utils.confusion_matrix', 'model_utils.confusion_matrix', ([], {'y_true': 'y_true', 'y_pred': 'y_pred', 'num_classes': 'logits.shape[-1]', 'weights': 'weights'}), '(y_true=y_true, y_pred=y_pred, num_classes=\n logits.shape[-1], weights=weights)\n', (12903, 12985), False, 'from scenic.model_lib.base_models import model_utils\n'), ((14251, 14285), 'numpy.sum', 'np.sum', (['confusion_matrices'], {'axis': '(0)'}), '(confusion_matrices, axis=0)\n', (14257, 14285), True, 'import numpy as np\n'), ((14511, 14538), 'numpy.sum', 'np.sum', (['conf_matrix'], {'axis': '(0)'}), '(conf_matrix, axis=0)\n', (14517, 14538), True, 'import numpy as np\n'), ((14742, 14772), 'seaborn.set', 'sns.set', ([], {'font_scale': 'font_scale'}), '(font_scale=font_scale)\n', (14749, 14772), True, 'import seaborn as sns\n'), ((14781, 14817), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (14791, 14817), True, 'import matplotlib.pyplot as plt\n'), ((15103, 15142), 'numpy.nan_to_num', 'np.nan_to_num', (['(conf_matrix / normalizer)'], {}), '(conf_matrix / normalizer)\n', (15116, 15142), True, 'import numpy as np\n'), ((15576, 15605), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (15590, 15605), True, 'import numpy as np\n'), ((2987, 3011), 'jax.random.split', 'jax.random.split', (['rng', '(2)'], {}), '(rng, 2)\n', (3003, 3011), False, 'import jax\n'), ((5103, 5141), 'jax.experimental.optimizers.clip_grads', 'clip_grads', (['grad', 'config.max_grad_norm'], {}), '(grad, config.max_grad_norm)\n', (5113, 5141), False, 'from jax.experimental.optimizers import clip_grads\n'), ((8712, 8757), 'jax.lax.all_gather', 'jax.lax.all_gather', (['confusion_matrix', '"""batch"""'], {}), "(confusion_matrix, 'batch')\n", (8730, 8757), False, 'import jax\n'), ((8840, 8875), 'jax.lax.all_gather', 'jax.lax.all_gather', (['logits', '"""batch"""'], {}), "(logits, 'batch')\n", (8858, 8875), False, 'import jax\n'), ((8889, 8932), 'jax.lax.all_gather', 'jax.lax.all_gather', (["batch['label']", '"""batch"""'], {}), "(batch['label'], 'batch')\n", (8907, 8932), False, 'import jax\n'), ((11550, 11573), 'jax.numpy.sum', 'jnp.sum', (['logits'], {'axis': '(0)'}), '(logits, axis=0)\n', (11557, 11573), True, 'import jax.numpy as jnp\n'), ((12665, 12692), 'jax.numpy.argmax', 'jnp.argmax', (['labels'], {'axis': '(-1)'}), '(labels, axis=-1)\n', (12675, 12692), True, 'import jax.numpy as jnp\n'), ((14632, 14720), 'absl.logging.warning', 'logging.warning', (['"""Normalizer must be one of {rows, cols, none}.Defaulting to none."""'], {}), "(\n 'Normalizer must be one of {rows, cols, none}.Defaulting to none.')\n", (14647, 14720), False, 'from absl import logging\n'), ((15149, 15174), 'numpy.sum', 'np.sum', (['normalized_matrix'], {}), '(normalized_matrix)\n', (15155, 15174), True, 'import numpy as np\n'), ((15184, 15307), 'seaborn.heatmap', 'sns.heatmap', (['normalized_matrix'], {'annot': '(True)', 'linewidths': '(0.5)', 'square': '(True)', 'cbar': '(False)', 'cmap': '"""jet"""', 'annot_kws': "{'size': 18}"}), "(normalized_matrix, annot=True, linewidths=0.5, square=True,\n cbar=False, cmap='jet', annot_kws={'size': 18})\n", (15195, 15307), True, 'import seaborn as sns\n'), ((1228, 1256), 'flax.jax_utils.unreplicate', 'jax_utils.unreplicate', (['array'], {}), '(array)\n', (1249, 1256), False, 'from flax import jax_utils\n'), ((4501, 4555), 'jax.tree_map', 'jax.tree_map', (['(lambda x: x / (gradient_norm + 1e-07))', 'y'], {}), '(lambda x: x / (gradient_norm + 1e-07), y)\n', (4513, 4555), False, 'import jax\n'), ((4615, 4655), 'jax.grad', 'jax.grad', (['training_loss_fn'], {'has_aux': '(True)'}), '(training_loss_fn, has_aux=True)\n', (4623, 4655), False, 'import jax\n'), ((11509, 11536), 'flax.linen.softmax', 'nn.softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (11519, 11536), True, 'import flax.linen as nn\n'), ((5567, 5660), 'functools.partial', 'functools.partial', (['optimizers.decay_weight_fn'], {'lr': 'lr', 'decay': 'config.explicit_weight_decay'}), '(optimizers.decay_weight_fn, lr=lr, decay=config.\n explicit_weight_decay)\n', (5584, 5660), False, 'import functools\n'), ((4417, 4430), 'jax.numpy.square', 'jnp.square', (['e'], {}), '(e)\n', (4427, 4430), True, 'import jax.numpy as jnp\n'), ((4441, 4469), 'jax.tree_util.tree_leaves', 'jax.tree_util.tree_leaves', (['y'], {}), '(y)\n', (4466, 4469), False, 'import jax\n')] |
import unittest
import json
import pandas as pd
import numpy as np
from assistant_dialog_skill_analysis.data_analysis import similarity_analyzer
from assistant_dialog_skill_analysis.utils import skills_util, lang_utils
TOLERANCE = 0.0000001
class TestSimilarityAnalzyer(unittest.TestCase):
"""Test for Similarity Analyzer module"""
def setUp(self):
self.lang_util = lang_utils.LanguageUtility("en")
with open(
"tests/resources/test_workspaces/skill-Customer-Care-Sample.json", "r"
) as skill_file:
workspace_data, workspace_vocabulary = skills_util.extract_workspace_data(
json.load(skill_file), self.lang_util
)
self.workspace_df = pd.DataFrame(workspace_data)
def test_calculate_cosine_similarity(self):
feature_matrix1 = np.array([[1, 2, 0], [0, 0, 1], [1, 2, 0]])
cos_sim_score1 = similarity_analyzer._calculate_cosine_similarity(
feature_matrix1
)
self.assertEqual(
np.abs(np.sum(np.diag(cos_sim_score1) - np.array([1, 1, 1]))) < TOLERANCE,
True,
"Similarity Analyzer Test fail",
)
self.assertEqual(
np.abs(cos_sim_score1[0, 1]) < TOLERANCE,
True,
"Similarity Analyzer Test fail",
)
self.assertEqual(
np.abs(cos_sim_score1[0, 2] - 1) < TOLERANCE,
True,
"Similarity Analyzer Test fail",
)
def test_ambiguous_examples_analysis(self):
ambiguous_dataframe = similarity_analyzer.ambiguous_examples_analysis(
self.workspace_df, threshold=0.85, lang_util=self.lang_util
)
self.assertEqual(
len(ambiguous_dataframe[ambiguous_dataframe["similarity score"] < 0.85]),
0,
"Similarity Analyzer Test fail",
)
self.assertEqual(
len(
np.intersect1d(
ambiguous_dataframe["Intent1"], ambiguous_dataframe["Intent2"]
)
),
0,
"Similarity Analyzer Test fail",
)
def tearDown(self):
unittest.TestCase.tearDown(self)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"pandas.DataFrame",
"json.load",
"numpy.abs",
"assistant_dialog_skill_analysis.data_analysis.similarity_analyzer._calculate_cosine_similarity",
"assistant_dialog_skill_analysis.utils.lang_utils.LanguageUtility",
"unittest.TestCase.tearDown",
"numpy.array",
"numpy.diag",
"numpy.int... | [((2246, 2261), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2259, 2261), False, 'import unittest\n'), ((387, 419), 'assistant_dialog_skill_analysis.utils.lang_utils.LanguageUtility', 'lang_utils.LanguageUtility', (['"""en"""'], {}), "('en')\n", (413, 419), False, 'from assistant_dialog_skill_analysis.utils import skills_util, lang_utils\n'), ((838, 881), 'numpy.array', 'np.array', (['[[1, 2, 0], [0, 0, 1], [1, 2, 0]]'], {}), '([[1, 2, 0], [0, 0, 1], [1, 2, 0]])\n', (846, 881), True, 'import numpy as np\n'), ((907, 972), 'assistant_dialog_skill_analysis.data_analysis.similarity_analyzer._calculate_cosine_similarity', 'similarity_analyzer._calculate_cosine_similarity', (['feature_matrix1'], {}), '(feature_matrix1)\n', (955, 972), False, 'from assistant_dialog_skill_analysis.data_analysis import similarity_analyzer\n'), ((1572, 1684), 'assistant_dialog_skill_analysis.data_analysis.similarity_analyzer.ambiguous_examples_analysis', 'similarity_analyzer.ambiguous_examples_analysis', (['self.workspace_df'], {'threshold': '(0.85)', 'lang_util': 'self.lang_util'}), '(self.workspace_df,\n threshold=0.85, lang_util=self.lang_util)\n', (1619, 1684), False, 'from assistant_dialog_skill_analysis.data_analysis import similarity_analyzer\n'), ((2180, 2212), 'unittest.TestCase.tearDown', 'unittest.TestCase.tearDown', (['self'], {}), '(self)\n', (2206, 2212), False, 'import unittest\n'), ((734, 762), 'pandas.DataFrame', 'pd.DataFrame', (['workspace_data'], {}), '(workspace_data)\n', (746, 762), True, 'import pandas as pd\n'), ((650, 671), 'json.load', 'json.load', (['skill_file'], {}), '(skill_file)\n', (659, 671), False, 'import json\n'), ((1220, 1248), 'numpy.abs', 'np.abs', (['cos_sim_score1[0, 1]'], {}), '(cos_sim_score1[0, 1])\n', (1226, 1248), True, 'import numpy as np\n'), ((1374, 1406), 'numpy.abs', 'np.abs', (['(cos_sim_score1[0, 2] - 1)'], {}), '(cos_sim_score1[0, 2] - 1)\n', (1380, 1406), True, 'import numpy as np\n'), ((1945, 2023), 'numpy.intersect1d', 'np.intersect1d', (["ambiguous_dataframe['Intent1']", "ambiguous_dataframe['Intent2']"], {}), "(ambiguous_dataframe['Intent1'], ambiguous_dataframe['Intent2'])\n", (1959, 2023), True, 'import numpy as np\n'), ((1047, 1070), 'numpy.diag', 'np.diag', (['cos_sim_score1'], {}), '(cos_sim_score1)\n', (1054, 1070), True, 'import numpy as np\n'), ((1073, 1092), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (1081, 1092), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import time, calendar, re, sys
import numpy as np
firstweekday = calendar.SUNDAY
width = 20
offset0 = 0
offset1 = 0
alignment = 'alignc'
monthNameSep = ' '
weekdaySep = ' '
labels = {0:'Mo', 1:'Tu', 2:'We', 3:'Th', 4:'Fr', 5:'Sa', 6:'Su'}
todayColor = '${color5}'
weekdayNamesColor = '${color #bbbbbb}'
todayStringColor = '${color5}'
separator = ' '
todayString = time.strftime('%a %b ') + str(int(time.strftime('%d'))) + \
time.strftime(', %Y')
todayString2 = time.strftime('%Y.%m.%d %H:%M')
calendar.setfirstweekday(firstweekday)
calGen = calendar.Calendar()
calGen.setfirstweekday(firstweekday)
weekendDays = [calendar.SATURDAY, calendar.SUNDAY]
weekdays = [calendar.MONDAY, calendar.TUESDAY, calendar.WEDNESDAY,
calendar.THURSDAY, calendar.FRIDAY]
weekdayOrder = [ np.mod(n,7) for n in range(firstweekday,firstweekday+7) ]
weekdayNames = [ labels[n] for n in weekdayOrder ]
weekdayNames = weekdaySep.join(weekdayNames)
if len(sys.argv) >= 2:
color1 = sys.argv[1]
else:
color1 = 'color6'
if len(sys.argv) >= 3:
color2 = sys.argv[2]
else:
color2 = 'color2'
if len(sys.argv) >= 4:
color3 = sys.argv[3]
else:
color3 = 'color4'
localtime = time.localtime(time.time())
today = int(localtime[2])
thisMonth = (localtime[0], localtime[1])
if thisMonth[1] == 1:
lastMonth = (thisMonth[0]-1, 12)
else:
lastMonth = (thisMonth[0], thisMonth[1]-1)
if thisMonth[1] == 12:
nextMonth = (thisMonth[0]+1, 1)
else:
nextMonth = (thisMonth[0], thisMonth[1]+1)
#-- User-defined
months = [{'year': lastMonth[0], 'month': lastMonth[1],
'weekdayColor': '${color #686868}',
'weekendColor': '${color #888888}',
'abbrevColor': '${color #bbbbbb}' },
{'year': thisMonth[0], 'month': thisMonth[1],
'weekdayColor': '${color #5b6dad}',
'weekendColor': '${color #7f8ed3}',
'abbrevColor': '${color #bbbbbb}' },
{'year': nextMonth[0], 'month': nextMonth[1],
'weekdayColor': '${color #686868}',
'weekendColor': '${color #888888}',
'abbrevColor': '${color #bbbbbb}' }]
for monthNum in range(len(months)):
m = months[monthNum]
months[monthNum]['seqNum'] = monthNum
months[monthNum]['genericCal'] = \
calGen.monthdays2calendar(m['year'],m['month'])
months[monthNum]['daysInMonth'] = np.max(
calGen.monthdayscalendar(m['year'],m['month'])[-1])
months[monthNum]['abbrev'] = \
time.strftime('%b', (m['year'],m['month'],0,0,0,0,0,0,0))
months[monthNum]['name'] = \
time.strftime('%B', (m['year'],m['month'],0,0,0,0,0,0,0))
#-- Initialize calendar matrix
calMatrix = [[]]
#-- Initialize row and column pointers
row = 0
column = len(calMatrix[row])-1
#-- Fill initial blank days (only need first week of first month displayed)
for day in months[0]['genericCal'][0]:
if day[0] == 0:
calMatrix[0].append({'str':' ', 'color': months[0]['weekdayColor']})
#-- Incr. column ptr (ALWAYS on first row, so no need to incr. row ptr)
column += 1
else:
break
#-- Loop through each month, recording the days & appropriate colors
for month in months:
#-- Loop through each day of the month
for week in month['genericCal']:
for day in week:
if day[0] == 0:
pass
else:
#-- Increment row and column pointers
column += 1
if column > 6:
calMatrix.append([])
row += 1
column = 0
#-- Record row and column pointers for FIRST day of the month
if day[0] == 1:
month['fdotmPointer'] = (row, column)
month['fdotmDayOfTheWeek'] = (day[0], day[1])
#-- Record row and column pointers for LAST day of the month
if day[0] == month['daysInMonth']:
month['ldotmPointer'] = (row, column)
month['ldotmDayOfTheWeek'] = (day[0], day[1])
#-- Set color if date is today
if month['month'] == thisMonth[1] and day[0] == today:
color = todayColor
#-- Set color if date is not today but on a weekend
elif day[1] in weekendDays:
color = month['weekendColor']
#-- Set color if date is not today but on a weekday
else:
color = month['weekdayColor']
#-- Append date to calendar matrix
calMatrix[row].append(
{'str': str(day[0]).rjust(2, ' '), 'color': color} )
#-- Fill final blank days (only need final week of final month displayed)
for day in months[-1]['genericCal'][-1][::-1]:
if day[0] == 0:
calMatrix[-1].append({'str':' ', 'color': months[-1]['weekdayColor']})
else:
break
#-- Create month abbreviation matrix
monthAbbrevMat = []
for r in range(len(calMatrix)):
monthAbbrevMat.append([{'str': ' ', 'color': None}])
for month in months:
month['labelRow0'] = int(np.floor(float(
month['ldotmPointer'][0] - month['fdotmPointer'][0] + 1 -
len(month['abbrev']) ) / 2.0) + month['fdotmPointer'][0])
for n in range(len(month['abbrev'])):
row = month['labelRow0'] + n
monthAbbrevMat[row][0]['str'] = str.upper(month['abbrev'])[n]
monthAbbrevMat[row][0]['color'] = month['abbrevColor']
#-- Concatenate month abbreviations with calendar and add spaces
color = None
rowStrings = []
for row in range(len(calMatrix)):
rowString = ''
for entry in monthAbbrevMat[row] + calMatrix[row]:
if color != entry['color'] and entry['color'] != None:
color = entry['color']
rowString += color
rowString += entry['str'] + separator
rowString = rowString[0:-len(separator)]
rowStrings.append(rowString)
#sys.stdout.write('${alignc}' + ' ' + separator + todayStringColor + todayString2 + '\n\n')
sys.stdout.write('${alignc}' + ' ' + separator + todayStringColor + todayString + '\n')
sys.stdout.write('${alignc}' + ' ' + separator + weekdayNamesColor + weekdayNames + '\n')
sys.stdout.write('${alignc}' + '\n${alignc}'.join(rowStrings) + '\n') | [
"sys.stdout.write",
"calendar.setfirstweekday",
"time.strftime",
"numpy.mod",
"time.time",
"calendar.Calendar"
] | [((494, 525), 'time.strftime', 'time.strftime', (['"""%Y.%m.%d %H:%M"""'], {}), "('%Y.%m.%d %H:%M')\n", (507, 525), False, 'import time, calendar, re, sys\n'), ((527, 565), 'calendar.setfirstweekday', 'calendar.setfirstweekday', (['firstweekday'], {}), '(firstweekday)\n', (551, 565), False, 'import time, calendar, re, sys\n'), ((575, 594), 'calendar.Calendar', 'calendar.Calendar', ([], {}), '()\n', (592, 594), False, 'import time, calendar, re, sys\n'), ((6107, 6198), 'sys.stdout.write', 'sys.stdout.write', (["('${alignc}' + ' ' + separator + todayStringColor + todayString + '\\n')"], {}), "('${alignc}' + ' ' + separator + todayStringColor +\n todayString + '\\n')\n", (6123, 6198), False, 'import time, calendar, re, sys\n'), ((6195, 6288), 'sys.stdout.write', 'sys.stdout.write', (["('${alignc}' + ' ' + separator + weekdayNamesColor + weekdayNames + '\\n')"], {}), "('${alignc}' + ' ' + separator + weekdayNamesColor +\n weekdayNames + '\\n')\n", (6211, 6288), False, 'import time, calendar, re, sys\n'), ((457, 478), 'time.strftime', 'time.strftime', (['""", %Y"""'], {}), "(', %Y')\n", (470, 478), False, 'import time, calendar, re, sys\n'), ((817, 829), 'numpy.mod', 'np.mod', (['n', '(7)'], {}), '(n, 7)\n', (823, 829), True, 'import numpy as np\n'), ((1230, 1241), 'time.time', 'time.time', ([], {}), '()\n', (1239, 1241), False, 'import time, calendar, re, sys\n'), ((2513, 2578), 'time.strftime', 'time.strftime', (['"""%b"""', "(m['year'], m['month'], 0, 0, 0, 0, 0, 0, 0)"], {}), "('%b', (m['year'], m['month'], 0, 0, 0, 0, 0, 0, 0))\n", (2526, 2578), False, 'import time, calendar, re, sys\n'), ((2616, 2681), 'time.strftime', 'time.strftime', (['"""%B"""', "(m['year'], m['month'], 0, 0, 0, 0, 0, 0, 0)"], {}), "('%B', (m['year'], m['month'], 0, 0, 0, 0, 0, 0, 0))\n", (2629, 2681), False, 'import time, calendar, re, sys\n'), ((389, 412), 'time.strftime', 'time.strftime', (['"""%a %b """'], {}), "('%a %b ')\n", (402, 412), False, 'import time, calendar, re, sys\n'), ((423, 442), 'time.strftime', 'time.strftime', (['"""%d"""'], {}), "('%d')\n", (436, 442), False, 'import time, calendar, re, sys\n')] |
#
# * The source code in this file is developed independently by NEC Corporation.
#
# # NLCPy License #
#
# Copyright (c) 2020-2021 NEC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither NEC Corporation nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy
import nlcpy
from nlcpy import core
from nlcpy.request import request
def where(condition, x=None, y=None):
"""Returns elements chosen from *x* or *y* depending on *condition*.
Note
----
When only condition is provided, this function is a shorthand for
``nlcpy.asarray(condition).nonzero()``. Using nonzero directly should be preferred,
as it behaves correctly for subclasses. The rest of this documentation covers only
the case where all three arguments are provided.
Parameters
----------
condition : array_like, bool
Where True, yield *x*, otherwise yield *y*.
x, y : array_like
Values from which to choose. *x*, *y* and *condition* need to be broadcastable to
some shape.
Returns
-------
out : ndarray
An array with elements from *x* where *condition* is True, and elements from *y*
elsewhere.
Note
----
If all the arrays are 1-D, :func:`where` is equivalent to::
[xv if c else yv for c, xv, yv in zip(condition, x, y)]
See Also
--------
nonzero : Returns the indices of the elements
that are non-zero.
Examples
--------
>>> import nlcpy as vp
>>> a = vp.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> vp.where(a < 5, a, 10*a)
array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
This can be used on multidimensional arrays too:
>>> vp.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
The shapes of x, y, and the condition are broadcast together:
>>> x = vp.arange(3).reshape([3,1])
>>> y = vp.arange(4).reshape([1,4])
>>> vp.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]])
>>> a = vp.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> vp.where(a < 4, a, -1) # -1 is broadcast
array([[ 0, 1, 2],
[ 0, 2, -1],
[ 0, 3, -1]])
"""
if condition is None:
condition = False
arr = nlcpy.asarray(condition)
if x is None and y is None:
return nlcpy.nonzero(arr)
if x is None or y is None:
raise ValueError("either both or neither of x and y should be given")
if not isinstance(x, nlcpy.ndarray):
x = numpy.asarray(x)
if not isinstance(y, nlcpy.ndarray):
y = numpy.asarray(y)
ret_type = numpy.result_type(x, y)
arr_x = nlcpy.asarray(x, dtype=ret_type)
arr_y = nlcpy.asarray(y, dtype=ret_type)
if arr.dtype != bool:
arr = (arr != 0)
values, shape = core._broadcast_core((arr, arr_x, arr_y))
ret = nlcpy.ndarray(shape=shape, dtype=ret_type)
request._push_request(
"nlcpy_where",
"indexing_op",
(ret, values[0], values[1], values[2]),)
return ret
def diag_indices(n, ndim=2):
"""Returns the indices to access the main diagonal of an array.
This returns a tuple of indices that can be used to access the main diagonal of an
array *a* with ``a.ndim >= 2`` dimensions and shape (n, n, ..., n).
For ``a.ndim = 2`` this is the usual diagonal, for ``a.ndim > 2`` this is the set of
indices to access ``a[i, i, ..., i]`` for ``i = [0..n-1]``.
Parameters
----------
n : int
The size, along each dimension, of the arrays for which the returned indices can
be used.
ndim : int, optional
The number of dimensions.
Examples
--------
Create a set of indices to access the diagonal of a (4, 4) array:
>>> import nlcpy as vp
>>> di = vp.diag_indices(4)
>>> di
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a = vp.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> a[di] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
Now, we create indices to manipulate a 3-D array:
>>> d3 = vp.diag_indices(2, 3)
>>> d3
(array([0, 1]), array([0, 1]), array([0, 1]))
And use it to set the diagonal of an array of zeros to 1:
>>> a = vp.zeros((2, 2, 2), dtype=int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
[0, 0]],
<BLANKLINE>
[[0, 0],
[0, 1]]])
"""
idx = nlcpy.arange(n)
return (idx,) * ndim
| [
"numpy.result_type",
"nlcpy.nonzero",
"nlcpy.ndarray",
"numpy.asarray",
"nlcpy.asarray",
"nlcpy.arange",
"nlcpy.request.request._push_request",
"nlcpy.core._broadcast_core"
] | [((3915, 3939), 'nlcpy.asarray', 'nlcpy.asarray', (['condition'], {}), '(condition)\n', (3928, 3939), False, 'import nlcpy\n'), ((4272, 4295), 'numpy.result_type', 'numpy.result_type', (['x', 'y'], {}), '(x, y)\n', (4289, 4295), False, 'import numpy\n'), ((4309, 4341), 'nlcpy.asarray', 'nlcpy.asarray', (['x'], {'dtype': 'ret_type'}), '(x, dtype=ret_type)\n', (4322, 4341), False, 'import nlcpy\n'), ((4354, 4386), 'nlcpy.asarray', 'nlcpy.asarray', (['y'], {'dtype': 'ret_type'}), '(y, dtype=ret_type)\n', (4367, 4386), False, 'import nlcpy\n'), ((4460, 4501), 'nlcpy.core._broadcast_core', 'core._broadcast_core', (['(arr, arr_x, arr_y)'], {}), '((arr, arr_x, arr_y))\n', (4480, 4501), False, 'from nlcpy import core\n'), ((4512, 4554), 'nlcpy.ndarray', 'nlcpy.ndarray', ([], {'shape': 'shape', 'dtype': 'ret_type'}), '(shape=shape, dtype=ret_type)\n', (4525, 4554), False, 'import nlcpy\n'), ((4559, 4655), 'nlcpy.request.request._push_request', 'request._push_request', (['"""nlcpy_where"""', '"""indexing_op"""', '(ret, values[0], values[1], values[2])'], {}), "('nlcpy_where', 'indexing_op', (ret, values[0], values\n [1], values[2]))\n", (4580, 4655), False, 'from nlcpy.request import request\n'), ((6262, 6277), 'nlcpy.arange', 'nlcpy.arange', (['n'], {}), '(n)\n', (6274, 6277), False, 'import nlcpy\n'), ((3987, 4005), 'nlcpy.nonzero', 'nlcpy.nonzero', (['arr'], {}), '(arr)\n', (4000, 4005), False, 'import nlcpy\n'), ((4170, 4186), 'numpy.asarray', 'numpy.asarray', (['x'], {}), '(x)\n', (4183, 4186), False, 'import numpy\n'), ((4240, 4256), 'numpy.asarray', 'numpy.asarray', (['y'], {}), '(y)\n', (4253, 4256), False, 'import numpy\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 9 09:07:29 2022
@author: alex
"""
import numpy as np
###############################################################################
class MarkovSystem:
"""
Mother class for discrete stochastic dynamical systems
----------------------------------------------
n : number of finite states
m : number of control action
p : number of outputs
k : time index
---------------------------------------
x_k+1 = fk( x_k , u_k , k )
y_k = hk( x_k , u_k , k )
optionnal:
u_k = policy( y_k , k ) : action autonomous decision
"""
###########################################################################
# The two following functions needs to be implemented by child classes
###########################################################################
############################
def __init__(self, n = 1, m = 1):
"""
The __init__ method of the Mother class can be used to fill-in default
labels, units, bounds, and base values.
"""
#############################
# Parameters
#############################
p = n
# Dimensions
self.n = n
self.m = m
self.p = p
# Labels
self.name = 'ContinuousDynamicSystem'
self.state_label = []
self.input_label = []
self.output_label = []
# Default Label and units
for i in range(n):
self.state_label.append('State '+str(i))
for i in range(m):
self.input_label.append('Action '+str(i))
for i in range(p):
self.output_label.append('Output '+str(i))
# Default state and inputs values
self.xbar = 0
self.ubar = 0
################################
# Transition Probabilities
################################
self.T_jia = np.zeros((n,n,m))
for a in range(m):
self.T_jia[:,:,a] = np.diag(np.ones((n)))
################################
# Transition cost
################################
self.a_jia = np.zeros((n,n,m))
################################
# Final cost
################################
self.gN_i = np.zeros((n))
################################
# Variables
################################
# Initial value for simulations
self.x0 = np.zeros( n )
self.x0[0] = 1
# Result of last simulation
self.traj = None
# Cost function for evaluation
# TODO
#############################
def fk( self , x , u , k = 0 ):
"""
compute the evolution of the probability distribution
"""
T_ji = self.T_jia[:,:,u] # transition matrix of given action
x_k1 = np.dot( T_ji , x )
return x_k1
#############################
def check_probability_matrix( self ):
"""
check if transition prob sums to 1
"""
print( self.T_jia.sum( axis = 0 ) )
return self.T_jia.sum( axis = 0 ) # should be all ones
###########################################################################
# The following functions can be overloaded when necessary by child classes
###########################################################################
#############################
def h( self , x , k = 0 ):
"""
Output fonction y = h(x,u,t)
"""
y = x # default output is state
return y
#############################
def policy( self , y , k ):
"""
"""
# Default action
u = self.ubar
return u
#############################
def simulation_of_density_probability( self , N = 10 , plot = True ):
"""
N = number of steps
"""
x_k = self.x0
for k in range(N):
y_k = self.h( x_k , k )
u_k = self.policy( y_k , k )
x_k1 = self.fk( x_k, u_k , k )
x_k = x_k1
if plot:
print(x_k1)
return x_k1 # return probability distribution at k = N
#############################
def get_valueiteration_algo(self):
vi = ValueIterationForMarkovProcess(self.T_jia, self.a_jia, self.gN_i)
return vi
###############################################################################
class ValueIterationForMarkovProcess:
"""
"""
############################
def __init__(self, T_jia , a_jia, gN_i ):
self.alpha = 1.0 # discount factor
self.T = T_jia
self.a = a_jia
self.g = gN_i
self.n = T_jia.shape[0] # Number of states
self.m = T_jia.shape[2] # Number of actions
# Initialise cost-to-go with final cost
self.J = self.g.copy()
# Initialise policy map
self.c = np.zeros((self.n))
# Initialise Q-values
self.Q = np.zeros((self.n,self.m))
self.print = True
###############################
def compute_backward_step(self):
# For all states
for i in range(self.n):
# For all actions
for a in range(self.m):
Q_j = self.a[:,i,a] + self.alpha * self.J # array of possible cost
self.Q[i,a] = np.dot( self.T[:,i,a] , Q_j ) # expected value
self.J = self.Q.min(1) # Minimum over all possible actions
self.c = self.Q.argmin(1) # Action that minimise Q for all i
###############################
def compute_n_backward_steps(self, n):
for k in range(n):
self.compute_backward_step()
if self.print:
print('Backward step N-',k)
print('J = ',self.J)
print('c = ',self.c)
'''
#################################################################
################## Main ########
#################################################################
'''
if __name__ == "__main__":
""" MAIN TEST """
#
m = MarkovSystem(5,3)
m.check_probability_matrix()
vi = m.get_valueiteration_algo()
vi.alpha = 0.9
vi.compute_n_backward_steps(100)
| [
"numpy.dot",
"numpy.zeros",
"numpy.ones"
] | [((2044, 2063), 'numpy.zeros', 'np.zeros', (['(n, n, m)'], {}), '((n, n, m))\n', (2052, 2063), True, 'import numpy as np\n'), ((2320, 2339), 'numpy.zeros', 'np.zeros', (['(n, n, m)'], {}), '((n, n, m))\n', (2328, 2339), True, 'import numpy as np\n'), ((2483, 2494), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2491, 2494), True, 'import numpy as np\n'), ((2687, 2698), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2695, 2698), True, 'import numpy as np\n'), ((3131, 3146), 'numpy.dot', 'np.dot', (['T_ji', 'x'], {}), '(T_ji, x)\n', (3137, 3146), True, 'import numpy as np\n'), ((5546, 5562), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (5554, 5562), True, 'import numpy as np\n'), ((5621, 5647), 'numpy.zeros', 'np.zeros', (['(self.n, self.m)'], {}), '((self.n, self.m))\n', (5629, 5647), True, 'import numpy as np\n'), ((2151, 2161), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2158, 2161), True, 'import numpy as np\n'), ((6079, 6107), 'numpy.dot', 'np.dot', (['self.T[:, i, a]', 'Q_j'], {}), '(self.T[:, i, a], Q_j)\n', (6085, 6107), True, 'import numpy as np\n')] |
import numpy as np
def pair2sum_1(arr, num):
length = len(arr)
for i in range(length - 1):
for j in range(i + 1, length):
if arr[i] + arr[j] == num:
print(arr[i], arr[j])
print('Done')
def pair2sum_2(arr, num):
length = len(arr)
for i in range(length - 1):
for j in range(i + 1, length):
if arr[i] + arr[j] == num:
print(arr[i], arr[j])
print('Done')
# def sum_array(arr, num, length):
# temp = num - arr[0]
# if arr[1] == temp:
# return temp, arr[1]
# else:
# sum_array(arr[])
arr = np.random.randint(10, size = 10)
num = 10
print(arr)
pair2sum_1(arr, num) | [
"numpy.random.randint"
] | [((616, 646), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(10)'}), '(10, size=10)\n', (633, 646), True, 'import numpy as np\n')] |
import numpy as np
class Perturb:
"""
A simple perturb mechanism as specified in (Jaderberg et al., 2017).
"""
def __init__(self, cs_space=None, boundaries={}):
self.boundaries = boundaries
self.cs_space = cs_space
def __call__(self, hyperparameters: dict) -> dict:
"""
Perturb the nodes in the input.
:param hyperparameters: A dict with nodes.
:return: The perturbed nodes.
"""
result = hyperparameters.copy()
for key in hyperparameters:
temp_value = self.cs_space[key]._inverse_transform(result[key])
temp_value += np.random.choice([-1, 1]) * 0.2 * temp_value
result[key] = self.cs_space[key]._transform(temp_value)
self.ensure_boundaries(result)
return result
def ensure_boundaries(self, result):
for key in result:
if key not in self.boundaries:
continue
if result[key] < self.boundaries[key][0]:
result[key] = self.boundaries[key][0]
elif result[key] > self.boundaries[key][1]:
result[key] = self.boundaries[key][1]
| [
"numpy.random.choice"
] | [((641, 666), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (657, 666), True, 'import numpy as np\n')] |
import os
import cv2
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
class EntropyCalibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, training_data, cache_file, image_size, mean=[0, 0, 0], std = [1, 1, 1], batch_size=2):
# Whenever you specify a custom constructor for a TensorRT class,
# you MUST call the constructor of the parent explicitly.
trt.IInt8EntropyCalibrator2.__init__(self)
self.cache_file = cache_file
# Every time get_batch is called, the next batch of size batch_size will be copied to the device and returned.
self.data = self._load_data(training_data, image_size, mean, std)
self.batch_size = batch_size
self.current_index = 0
print("Calibration data shape: ", self.data.shape)
# Allocate enough memory for a whole batch.
self.device_input = cuda.mem_alloc(self.data[0].nbytes * self.batch_size)
def _load_data(self, training_data, image_size, mean, std):
data = []
mean = np.array(mean)
std = np.array(std)
with open(training_data, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if os.path.exists(line):
img = cv2.imread(line)
img = cv2.resize(img, image_size)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img / 255.0
img = img - mean
img = img / std
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
data.append(img)
else:
print("file {} not exist, skip it!".format(line))
continue
return np.concatenate(data, axis=0)
def get_batch_size(self):
return self.batch_size
# TensorRT passes along the names of the engine bindings to the get_batch function.
# You don't necessarily have to use them, but they can be useful to understand the order of
# the inputs. The bindings list is expected to have the same ordering as 'names'.
def get_batch(self, names):
if self.current_index + self.batch_size > self.data.shape[0]:
return None
current_batch = int(self.current_index / self.batch_size)
if current_batch % 10 == 0:
print("Calibrating batch {:}, containing {:} images".format(current_batch, self.batch_size))
batch = self.data[self.current_index:self.current_index + self.batch_size].ravel()
cuda.memcpy_htod(self.device_input, batch)
self.current_index += self.batch_size
return [self.device_input]
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
print("read cache file: {}".format(self.cache_file))
with open(self.cache_file, "rb") as f:
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
f.write(cache)
| [
"cv2.resize",
"tensorrt.IInt8EntropyCalibrator2.__init__",
"cv2.cvtColor",
"os.path.exists",
"pycuda.driver.mem_alloc",
"numpy.transpose",
"numpy.expand_dims",
"cv2.imread",
"numpy.array",
"pycuda.driver.memcpy_htod",
"numpy.concatenate"
] | [((430, 472), 'tensorrt.IInt8EntropyCalibrator2.__init__', 'trt.IInt8EntropyCalibrator2.__init__', (['self'], {}), '(self)\n', (466, 472), True, 'import tensorrt as trt\n'), ((914, 967), 'pycuda.driver.mem_alloc', 'cuda.mem_alloc', (['(self.data[0].nbytes * self.batch_size)'], {}), '(self.data[0].nbytes * self.batch_size)\n', (928, 967), True, 'import pycuda.driver as cuda\n'), ((1066, 1080), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (1074, 1080), True, 'import numpy as np\n'), ((1095, 1108), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (1103, 1108), True, 'import numpy as np\n'), ((1794, 1822), 'numpy.concatenate', 'np.concatenate', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1808, 1822), True, 'import numpy as np\n'), ((2590, 2632), 'pycuda.driver.memcpy_htod', 'cuda.memcpy_htod', (['self.device_input', 'batch'], {}), '(self.device_input, batch)\n', (2606, 2632), True, 'import pycuda.driver as cuda\n'), ((2867, 2898), 'os.path.exists', 'os.path.exists', (['self.cache_file'], {}), '(self.cache_file)\n', (2881, 2898), False, 'import os\n'), ((1264, 1284), 'os.path.exists', 'os.path.exists', (['line'], {}), '(line)\n', (1278, 1284), False, 'import os\n'), ((1308, 1324), 'cv2.imread', 'cv2.imread', (['line'], {}), '(line)\n', (1318, 1324), False, 'import cv2\n'), ((1348, 1375), 'cv2.resize', 'cv2.resize', (['img', 'image_size'], {}), '(img, image_size)\n', (1358, 1375), False, 'import cv2\n'), ((1398, 1434), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1410, 1434), False, 'import cv2\n'), ((1556, 1584), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (1568, 1584), True, 'import numpy as np\n'), ((1607, 1634), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1621, 1634), True, 'import numpy as np\n')] |
"""Functions for evaluating prediction performance."""
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import numpy as np
import pandas as pd
import sklearn.metrics as skm
from scipy.stats import kendalltau
from six.moves import range
from .data import CPG_NAN, OUTPUT_SEP
from .utils import get_from_module
def cor(y, z):
"""Compute Pearson's correlation coefficient."""
return np.corrcoef(y, z)[0, 1]
def kendall(y, z, nb_sample=100000):
"""Compute Kendall's correlation coefficient."""
if len(y) > nb_sample:
idx = np.arange(len(y))
np.random.shuffle(idx)
idx = idx[:nb_sample]
y = y[idx]
z = z[idx]
return kendalltau(y, z)[0]
def mad(y, z):
"""Compute mean absolute deviation."""
return np.mean(np.abs(y - z))
def mse(y, z):
"""Compute mean squared error."""
return np.mean((y - z)**2)
def rmse(y, z):
"""Compute root mean squared error."""
return np.sqrt(mse(y, z))
def auc(y, z, round=True):
"""Compute area under the ROC curve."""
if round:
y = y.round()
if len(y) == 0 or len(np.unique(y)) < 2:
return np.nan
return skm.roc_auc_score(y, z)
def acc(y, z, round=True):
"""Compute accuracy."""
if round:
y = np.round(y)
z = np.round(z)
return skm.accuracy_score(y, z)
def tpr(y, z, round=True):
"""Compute true positive rate."""
if round:
y = np.round(y)
z = np.round(z)
return skm.recall_score(y, z)
def tnr(y, z, round=True):
"""Compute true negative rate."""
if round:
y = np.round(y)
z = np.round(z)
c = skm.confusion_matrix(y, z)
return c[0, 0] / c[0].sum()
def mcc(y, z, round=True):
"""Compute Matthew's correlation coefficient."""
if round:
y = np.round(y)
z = np.round(z)
return skm.matthews_corrcoef(y, z)
def f1(y, z, round=True):
"""Compute F1 score."""
if round:
y = np.round(y)
z = np.round(z)
return skm.f1_score(y, z)
def cat_acc(y, z):
"""Compute categorical accuracy given one-hot matrices."""
return np.mean(y.argmax(axis=1) == z.argmax(axis=1))
# Classification metrics.
CLA_METRICS = [auc, acc, tpr, tnr, f1, mcc]
# Regression metrics.
REG_METRICS = [mse, mad, cor]
# Categorical metrics.
CAT_METRICS = [cat_acc]
def evaluate(y, z, mask=CPG_NAN, metrics=CLA_METRICS):
"""Compute multiple performance metrics.
Computes evaluation metrics using functions in `metrics`.
Parameters
----------
y: :class:`numpy.ndarray`
:class:`numpy.ndarray` vector with labels.
z: :class:`numpy.ndarray`
:class:`numpy.ndarray` vector with predictions.
mask: scalar
Value to mask unobserved labels in `y`.
metrics: list
List of evaluation functions to be used.
Returns
-------
Ordered dict
Ordered dict with name of evaluation functions as keys and evaluation
metrics as values.
"""
z = z.ravel()
if mask is not None:
t = y != mask
y = y[t]
z = z[t]
p = OrderedDict()
for metric in metrics:
if len(y):
p[metric.__name__] = metric(y, z)
else:
p[metric.__name__] = np.nan
p['n'] = len(y)
return p
def evaluate_cat(y, z, metrics=CAT_METRICS,
binary_metrics=None):
"""Compute multiple performance metrics for categorical outputs.
Computes evaluation metrics for categorical (one-hot encoded labels) using
functions in `metrics`.
Parameters
----------
y: :class:`numpy.ndarray`
:class:`numpy.ndarray` matrix with one-hot encoded labels.
z: :class:`numpy.ndarray`
:class:`numpy.ndarray` matrix with class probabilities in rows.
metrics: list
List of evaluation functions to be used.
binary_metrics: list
List of binary evaluation metrics to be computed for each category, e.g.
class, separately. Will be encoded as `name_i` in the output dictionary,
where `name` is the name of the evaluation metrics and `i` the index of
the category.
Returns
-------
Ordered dict
Ordered dict with name of evaluation functions as keys and evaluation
metrics as values.
"""
idx = y.sum(axis=1) > 0
y = y[idx]
z = z[idx]
p = OrderedDict()
for metric in metrics:
p[metric.__name__] = metric(y, z)
if binary_metrics:
for i in range(y.shape[1]):
for metric in binary_metrics:
p['%s_%d' % (metric.__name__, i)] = metric(y[:, i], z[:, i])
p['n'] = len(y)
return p
def get_output_metrics(output_name):
"""Return list of evaluation metrics for model output name."""
_output_name = output_name.split(OUTPUT_SEP)
if _output_name[0] == 'cpg':
metrics = CLA_METRICS
elif _output_name[0] == 'bulk':
metrics = REG_METRICS + CLA_METRICS
elif _output_name[-1] in ['diff', 'mode', 'cat2_var']:
metrics = CLA_METRICS
elif _output_name[-1] == 'mean':
metrics = REG_METRICS + CLA_METRICS + [kendall]
elif _output_name[-1] == 'var':
metrics = REG_METRICS + [kendall]
else:
raise ValueError('Invalid output name "%s"!' % output_name)
return metrics
def evaluate_outputs(outputs, preds):
"""Evaluate performance metrics of multiple outputs.
Given the labels and predictions of multiple outputs, chooses and computes
performance metrics of each output depending on its name.
Parameters
----------
outputs: dict
`dict` with the name of outputs as keys and a :class:`numpy.ndarray`
vector with labels as value.
preds: dict
`dict` with the name of outputs as keys and a :class:`numpy.ndarray`
vector with predictions as value.
Returns
-------
:class:`pandas.DataFrame`
:class:`pandas.DataFrame` with columns `metric`, `output`, `value`.
"""
perf = []
for output_name in outputs:
_output_name = output_name.split(OUTPUT_SEP)
if _output_name[-1] in ['cat_var']:
tmp = evaluate_cat(outputs[output_name],
preds[output_name],
binary_metrics=[auc])
else:
metrics = get_output_metrics(output_name)
tmp = evaluate(outputs[output_name],
preds[output_name],
metrics=metrics)
tmp = pd.DataFrame({'output': output_name,
'metric': list(tmp.keys()),
'value': list(tmp.values())})
perf.append(tmp)
perf = pd.concat(perf)
perf = perf[['metric', 'output', 'value']]
perf.sort_values(['metric', 'value'], inplace=True)
return perf
def is_binary_output(output_name):
"""Return `True` if `output_name` is binary."""
_output_name = output_name.split(OUTPUT_SEP)
if _output_name[0] == 'cpg':
return True
elif _output_name[-1] in ['diff', 'mode', 'cat2_var']:
return True
else:
return False
def evaluate_curve(outputs, preds, fun=skm.roc_curve, mask=CPG_NAN,
nb_point=None):
"""Evaluate performance curves of multiple outputs.
Given the labels and predictions of multiple outputs, computes a performance
a curve, e.g. ROC or PR curve, for each output.
Parameters
----------
outputs: dict
`dict` with the name of outputs as keys and a :class:`numpy.ndarray`
vector with labels as value.
preds: dict
`dict` with the name of outputs as keys and a :class:`numpy.ndarray`
vector with predictions as value.
fun: function
Function to compute the performance curves.
mask: scalar
Value to mask unobserved labels in `y`.
nb_point: int
Maximum number of points to curve to reduce memory.
Returns
-------
:class:`pandas.DataFrame`
:class:`pandas.DataFrame` with columns `output`, `x`, `y`, `thr`.
"""
curves = []
for output_name in outputs.keys():
if not is_binary_output(output_name):
continue
output = outputs[output_name].round().squeeze()
pred = preds[output_name].squeeze()
idx = output != CPG_NAN
output = output[idx]
pred = pred[idx]
x, y, thr = fun(output, pred)
length = min(len(x), len(y), len(thr))
if nb_point and length > nb_point:
idx = np.linspace(0, length - 1, nb_point).astype(np.int32)
else:
idx = slice(0, length)
x = x[idx]
y = y[idx]
thr = thr[idx]
curve = OrderedDict()
curve['output'] = output_name
curve['x'] = x
curve['y'] = y
curve['thr'] = thr
curve = pd.DataFrame(curve)
curves.append(curve)
if not curves:
return None
else:
curves = pd.concat(curves)
return curves
def unstack_report(report):
"""Unstack performance report.
Reshapes a :class:`pandas.DataFrame` of :func:`evaluate_outputs` such that
performance metrics are listed as columns.
Parameters
----------
report: :class:`pandas.DataFrame`
:class:`pandas.DataFrame` from :func:`evaluate_outputs`.
Returns
-------
:class:`pandas.DataFrame`
:class:`pandas.DataFrame` with performance metrics as columns.
"""
index = list(report.columns[~report.columns.isin(['metric', 'value'])])
report = pd.pivot_table(report, index=index, columns='metric',
values='value')
report.reset_index(index, inplace=True)
report.columns.name = None
# Sort columns
columns = list(report.columns)
sorted_columns = []
for fun in CAT_METRICS + CLA_METRICS + REG_METRICS:
for i, column in enumerate(columns):
if column.startswith(fun.__name__):
sorted_columns.append(column)
sorted_columns = index + sorted_columns
sorted_columns += [col for col in columns if col not in sorted_columns]
report = report[sorted_columns]
order = []
if 'auc' in report.columns:
order.append(('auc', False))
elif 'mse' in report.columns:
order.append(('mse', True))
elif 'acc' in report.columns:
order.append(('acc', False))
report.sort_values([x[0] for x in order],
ascending=[x[1] for x in order],
inplace=True)
return report
def get(name):
"""Return object from module by its name."""
return get_from_module(name, globals())
| [
"numpy.abs",
"pandas.pivot_table",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.f1_score",
"numpy.mean",
"scipy.stats.kendalltau",
"numpy.round",
"numpy.unique",
"pandas.DataFrame",
"six.moves.range",
"numpy.linspace",
"pandas.concat",
"numpy.random.shuffle",
"numpy.corrcoef",
"skl... | [((916, 937), 'numpy.mean', 'np.mean', (['((y - z) ** 2)'], {}), '((y - z) ** 2)\n', (923, 937), True, 'import numpy as np\n'), ((1214, 1237), 'sklearn.metrics.roc_auc_score', 'skm.roc_auc_score', (['y', 'z'], {}), '(y, z)\n', (1231, 1237), True, 'import sklearn.metrics as skm\n'), ((1368, 1392), 'sklearn.metrics.accuracy_score', 'skm.accuracy_score', (['y', 'z'], {}), '(y, z)\n', (1386, 1392), True, 'import sklearn.metrics as skm\n'), ((1533, 1555), 'sklearn.metrics.recall_score', 'skm.recall_score', (['y', 'z'], {}), '(y, z)\n', (1549, 1555), True, 'import sklearn.metrics as skm\n'), ((1693, 1719), 'sklearn.metrics.confusion_matrix', 'skm.confusion_matrix', (['y', 'z'], {}), '(y, z)\n', (1713, 1719), True, 'import sklearn.metrics as skm\n'), ((1907, 1934), 'sklearn.metrics.matthews_corrcoef', 'skm.matthews_corrcoef', (['y', 'z'], {}), '(y, z)\n', (1928, 1934), True, 'import sklearn.metrics as skm\n'), ((2064, 2082), 'sklearn.metrics.f1_score', 'skm.f1_score', (['y', 'z'], {}), '(y, z)\n', (2076, 2082), True, 'import sklearn.metrics as skm\n'), ((3154, 3167), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3165, 3167), False, 'from collections import OrderedDict\n'), ((4416, 4429), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4427, 4429), False, 'from collections import OrderedDict\n'), ((6750, 6765), 'pandas.concat', 'pd.concat', (['perf'], {}), '(perf)\n', (6759, 6765), True, 'import pandas as pd\n'), ((9612, 9681), 'pandas.pivot_table', 'pd.pivot_table', (['report'], {'index': 'index', 'columns': '"""metric"""', 'values': '"""value"""'}), "(report, index=index, columns='metric', values='value')\n", (9626, 9681), True, 'import pandas as pd\n'), ((451, 468), 'numpy.corrcoef', 'np.corrcoef', (['y', 'z'], {}), '(y, z)\n', (462, 468), True, 'import numpy as np\n'), ((634, 656), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (651, 656), True, 'import numpy as np\n'), ((736, 752), 'scipy.stats.kendalltau', 'kendalltau', (['y', 'z'], {}), '(y, z)\n', (746, 752), False, 'from scipy.stats import kendalltau\n'), ((835, 848), 'numpy.abs', 'np.abs', (['(y - z)'], {}), '(y - z)\n', (841, 848), True, 'import numpy as np\n'), ((1321, 1332), 'numpy.round', 'np.round', (['y'], {}), '(y)\n', (1329, 1332), True, 'import numpy as np\n'), ((1345, 1356), 'numpy.round', 'np.round', (['z'], {}), '(z)\n', (1353, 1356), True, 'import numpy as np\n'), ((1486, 1497), 'numpy.round', 'np.round', (['y'], {}), '(y)\n', (1494, 1497), True, 'import numpy as np\n'), ((1510, 1521), 'numpy.round', 'np.round', (['z'], {}), '(z)\n', (1518, 1521), True, 'import numpy as np\n'), ((1649, 1660), 'numpy.round', 'np.round', (['y'], {}), '(y)\n', (1657, 1660), True, 'import numpy as np\n'), ((1673, 1684), 'numpy.round', 'np.round', (['z'], {}), '(z)\n', (1681, 1684), True, 'import numpy as np\n'), ((1860, 1871), 'numpy.round', 'np.round', (['y'], {}), '(y)\n', (1868, 1871), True, 'import numpy as np\n'), ((1884, 1895), 'numpy.round', 'np.round', (['z'], {}), '(z)\n', (1892, 1895), True, 'import numpy as np\n'), ((2017, 2028), 'numpy.round', 'np.round', (['y'], {}), '(y)\n', (2025, 2028), True, 'import numpy as np\n'), ((2041, 2052), 'numpy.round', 'np.round', (['z'], {}), '(z)\n', (2049, 2052), True, 'import numpy as np\n'), ((4539, 4556), 'six.moves.range', 'range', (['y.shape[1]'], {}), '(y.shape[1])\n', (4544, 4556), False, 'from six.moves import range\n'), ((8766, 8779), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8777, 8779), False, 'from collections import OrderedDict\n'), ((8907, 8926), 'pandas.DataFrame', 'pd.DataFrame', (['curve'], {}), '(curve)\n', (8919, 8926), True, 'import pandas as pd\n'), ((9023, 9040), 'pandas.concat', 'pd.concat', (['curves'], {}), '(curves)\n', (9032, 9040), True, 'import pandas as pd\n'), ((1162, 1174), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1171, 1174), True, 'import numpy as np\n'), ((8585, 8621), 'numpy.linspace', 'np.linspace', (['(0)', '(length - 1)', 'nb_point'], {}), '(0, length - 1, nb_point)\n', (8596, 8621), True, 'import numpy as np\n')] |
"""Experiment Helper Functions."""
import random as r
import json
import sys
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
# Add the architecture path for the networkarch and RelMSE
sys.path.append("../architecture/")
from NetworkArch import NetworkArch
def construct_network(**architecture_config):
"""Construct neural network architecture."""
return NetworkArch(**architecture_config)
def getdatasize(data_file_prefix):
"""Get size of data files."""
return np.load("{}_train1_x.npy".format(data_file_prefix)).shape
def get_data(data_file_prefix, data_train_len, num_shifts):
"""Load and organize data."""
# Load in the data
data_train = np.load("{}_train1_x.npy".format(data_file_prefix))
for k in range(2, data_train_len + 1):
new_data = np.load("{}_train{}_x.npy".format(data_file_prefix, k))
data_train = np.vstack([data_train, new_data])
data_val = np.load("{}_val_x.npy".format(data_file_prefix))
# Stack data for prediction loss
train_pred = stack_predictions(data_train, num_shifts)
val_pred = stack_predictions(data_val, num_shifts)
# Tensors of zeros for training autoencoder only
train_zeros = np.zeros(train_pred.shape)
val_zeros = np.zeros(val_pred.shape)
return (data_train, data_val, train_zeros,
val_zeros, train_pred, val_pred)
def evaluate_initial_models(save_prefix, all_data,
train_opts, network_config):
"""Train 20 models and choose best one."""
# Extract the training/validation data
(data_train, data_val, train_zeros,
val_zeros, train_pred, val_pred) = all_data
# Gather the relevant training options
aec_only_epochs = train_opts['aec_only_epochs']
init_full_epochs = train_opts['init_full_epochs']
num_init_models = train_opts['num_init_models']
loss_fn = train_opts['loss_fn']
opt = train_opts['optimizer']
optimizer_opts = train_opts['optimizer_opts']
batch_size = train_opts['batch_size']
loss_weights = train_opts['loss_weights']
inner_loss_weights = [loss_weights[i] for i in [2, 4]]
outer_loss_weights = [loss_weights[i] for i in [0, 1, 3]]
# Set up results dictionary
results = {'full_hist': [],
'aec_hist': [],
'lr': [],
'best_loss': [],
'model_path': []}
# For loop for generating, training, and evaluating the initial models
for i in range(num_init_models):
# Randomly selected learning rate
lr = 10**(-r.uniform(3, 6))
# Create a model, initially only train autoencoders
model = construct_network(train_autoencoder_only=True,
inner_loss_weights=inner_loss_weights,
**network_config)
# Compile the model
model.compile(loss=3 * [loss_fn],
optimizer=opt(lr=lr, **optimizer_opts),
loss_weights=outer_loss_weights)
# Use checkpointing
checkpoint_path_aec = save_prefix + 'checkpoint_aec_{}'.format(i)
cbs_aec = [keras.callbacks.ModelCheckpoint(checkpoint_path_aec,
save_weights_only=True,
monitor='val_loss',
save_best_only=True)]
# Fit autoencoder-only model
aec_hist = model.fit(x=data_train,
y=[data_train, data_train, train_zeros],
validation_data=(data_val,
[data_val, data_val, val_zeros]),
callbacks=cbs_aec, batch_size=batch_size,
epochs=aec_only_epochs, verbose=True)
# Re-load weights with best validation loss
model.load_weights(checkpoint_path_aec)
# Now set the model to train with prediction losses
model.train_autoencoder_only = False
# Re-compile the model
model.compile(loss=3 * [loss_fn],
optimizer=opt(lr=lr, **optimizer_opts),
loss_weights=outer_loss_weights)
# Train full model
checkpoint_path_full = save_prefix + 'checkpoint_{}'.format(i)
cbs = [keras.callbacks.ModelCheckpoint(checkpoint_path_full,
save_weights_only=True,
monitor='val_loss',
save_best_only=True)]
# Fit the full model
full_hist = model.fit(x=data_train,
y=[data_train, data_train, train_pred],
validation_data=(data_val,
[data_val, data_val, val_pred]),
callbacks=cbs, batch_size=batch_size,
epochs=init_full_epochs)
# Re-load weights with best validation loss
model.load_weights(checkpoint_path_full)
# Evaluate the model to get final validation loss
best_loss = model.evaluate(x=data_val,
y=[data_val, data_val, val_pred],
verbose=False)
# Save the model
model_path = save_prefix + "model_{}".format(i)
model.save(model_path)
# Append the results to the results list
results['full_hist'].append(full_hist.history.copy())
results['aec_hist'].append(aec_hist.history.copy())
results['lr'].append(lr)
results['best_loss'].append(best_loss[0])
results['model_path'].append(model_path)
# Delete the model variable and clear_session to remove any graph
del model
tf.keras.backend.clear_session()
# Select the best model from the loop
best_model_idc = np.argmin(results['best_loss'])
best_model_path = results['model_path'][best_model_idc]
# Return the best model's path
return results, best_model_path
def train_final_model(model_path,
save_prefix,
all_data,
train_opts,
custom_objects):
"""Load best initial model and train until convergence."""
# Gather the relevant training options
best_model_epochs = train_opts['best_model_epochs']
batch_size = train_opts['batch_size']
# Extract the training/validation data
(data_train, data_val, train_zeros,
val_zeros, train_pred, val_pred) = all_data
# Load the model
model = tf.keras.models.load_model(model_path,
custom_objects=custom_objects)
# Set the place to save the checkpoint model weights
checkpoint_model_path = save_prefix + 'checkpoint_final'
# Use checkpointing
cbs = [keras.callbacks.ModelCheckpoint(checkpoint_model_path,
save_weights_only=True,
monitor='val_loss',
save_best_only=True)]
# Train the model
hist = model.fit(x=data_train,
y=[data_train, data_train, train_pred],
validation_data=(data_val,
[data_val, data_val, val_pred]),
callbacks=cbs, batch_size=batch_size,
epochs=best_model_epochs)
# Re-load the best model weights
model.load_weights(checkpoint_model_path)
# Save the model
model_path = save_prefix + 'final_model'
model.save(model_path)
return hist.history, model_path
def save_results(results_path, random_seed,
model_path, custom_objects,
final_hist, init_hist):
"""Save the results."""
# Load and save the model
model = tf.keras.models.load_model(model_path,
custom_objects=custom_objects)
model.save(results_path + 'final_model')
print("Best model saved to:", model_path)
# Export the initial model training dictionary
hist_filepath = results_path + "initial_pool_results.json"
json.dump(init_hist, open(hist_filepath, 'w'))
# Export the final model training dictionary
final_hist['random_seed'] = random_seed
hist_filepath = results_path + "final_model_history.json"
json.dump(final_hist, open(hist_filepath, 'w'))
print("Exported training dictionaries to: ", results_path)
def check_for_directories(expt_name):
"""Create necessary directories if they do not exist."""
pardir = os.path.abspath(os.pardir)
dirs = ['logs',
'model_weights',
'model_weights' + os.sep + expt_name,
'results',
'results' + os.sep + expt_name,
]
for dirname in dirs:
os.makedirs(pardir + os.sep + dirname, exist_ok=True)
def stack_predictions(data, num_shifts):
"""Create tensors to be used as inputs for prediction/linearity losses."""
len_pred = data.shape[1] - num_shifts
prediction_list = []
for j in range(num_shifts):
prediction_list.append(data[:, j + 1:j + 1 + len_pred, :])
prediction_tensor = np.concatenate(prediction_list, axis=1)
return prediction_tensor
def run_experiment(random_seed, expt_name, data_file_prefix,
training_options, network_config, custom_objects):
"""Run experiment for Koopman autoencoder."""
# Assign a random number generator seed for learning rates
r.seed(random_seed)
# Create necessary directories
check_for_directories(expt_name)
# Get the training data
all_data = get_data(data_file_prefix,
training_options['data_train_len'],
network_config['num_shifts'])
# Set the prefix for where to save the results/checkpointed models
save_prefix = '../model_weights/{}/'.format(expt_name)
# Step 1 -- Train a collection of initial models
# Autoencoders-only, then full model
# This method returns the file path to the best model:
init_hist, model_path = evaluate_initial_models(save_prefix,
all_data,
training_options,
network_config)
# Step 2 -- Load the best model, and train for the full time
# Load the best model
final_hist, model_path = train_final_model(model_path, save_prefix,
all_data,
training_options,
custom_objects)
# Step 3 -- Save the results
results_path = '../results/{}/'.format(expt_name)
save_results(results_path, random_seed,
model_path, custom_objects,
final_hist, init_hist)
| [
"sys.path.append",
"os.path.abspath",
"tensorflow.keras.models.load_model",
"os.makedirs",
"random.uniform",
"tensorflow.keras.backend.clear_session",
"numpy.zeros",
"NetworkArch.NetworkArch",
"numpy.argmin",
"tensorflow.keras.callbacks.ModelCheckpoint",
"random.seed",
"numpy.vstack",
"numpy... | [((220, 255), 'sys.path.append', 'sys.path.append', (['"""../architecture/"""'], {}), "('../architecture/')\n", (235, 255), False, 'import sys\n'), ((400, 434), 'NetworkArch.NetworkArch', 'NetworkArch', ([], {}), '(**architecture_config)\n', (411, 434), False, 'from NetworkArch import NetworkArch\n'), ((1224, 1250), 'numpy.zeros', 'np.zeros', (['train_pred.shape'], {}), '(train_pred.shape)\n', (1232, 1250), True, 'import numpy as np\n'), ((1267, 1291), 'numpy.zeros', 'np.zeros', (['val_pred.shape'], {}), '(val_pred.shape)\n', (1275, 1291), True, 'import numpy as np\n'), ((5955, 5986), 'numpy.argmin', 'np.argmin', (["results['best_loss']"], {}), "(results['best_loss'])\n", (5964, 5986), True, 'import numpy as np\n'), ((6666, 6735), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_path'], {'custom_objects': 'custom_objects'}), '(model_path, custom_objects=custom_objects)\n', (6692, 6735), True, 'import tensorflow as tf\n'), ((7941, 8010), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_path'], {'custom_objects': 'custom_objects'}), '(model_path, custom_objects=custom_objects)\n', (7967, 8010), True, 'import tensorflow as tf\n'), ((8693, 8719), 'os.path.abspath', 'os.path.abspath', (['os.pardir'], {}), '(os.pardir)\n', (8708, 8719), False, 'import os\n'), ((9299, 9338), 'numpy.concatenate', 'np.concatenate', (['prediction_list'], {'axis': '(1)'}), '(prediction_list, axis=1)\n', (9313, 9338), True, 'import numpy as np\n'), ((9619, 9638), 'random.seed', 'r.seed', (['random_seed'], {}), '(random_seed)\n', (9625, 9638), True, 'import random as r\n'), ((902, 935), 'numpy.vstack', 'np.vstack', (['[data_train, new_data]'], {}), '([data_train, new_data])\n', (911, 935), True, 'import numpy as np\n'), ((5858, 5890), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (5888, 5890), True, 'import tensorflow as tf\n'), ((6930, 7054), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['checkpoint_model_path'], {'save_weights_only': '(True)', 'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(checkpoint_model_path, save_weights_only=\n True, monitor='val_loss', save_best_only=True)\n", (6961, 7054), False, 'from tensorflow import keras\n'), ((8933, 8986), 'os.makedirs', 'os.makedirs', (['(pardir + os.sep + dirname)'], {'exist_ok': '(True)'}), '(pardir + os.sep + dirname, exist_ok=True)\n', (8944, 8986), False, 'import os\n'), ((3140, 3261), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['checkpoint_path_aec'], {'save_weights_only': '(True)', 'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(checkpoint_path_aec, save_weights_only=True,\n monitor='val_loss', save_best_only=True)\n", (3171, 3261), False, 'from tensorflow import keras\n'), ((4349, 4472), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['checkpoint_path_full'], {'save_weights_only': '(True)', 'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(checkpoint_path_full, save_weights_only=\n True, monitor='val_loss', save_best_only=True)\n", (4380, 4472), False, 'from tensorflow import keras\n'), ((2565, 2580), 'random.uniform', 'r.uniform', (['(3)', '(6)'], {}), '(3, 6)\n', (2574, 2580), True, 'import random as r\n')] |
import numpy as py
import sys
sys.path.append('../ISCE')
import insarhelpers
import geopandas as gpd
import matplotlib.pyplot as plt
import glob
import os
from PIL import Image, ImageEnhance
import rasterio
import rasterio.merge
import rasterio.plot
import numpy as np
from shapely.geometry import Polygon
import geopandas as gpd
datapath = '/Users/mistral/Documents/CUBoulder/Science/Sulzer+/data/ChrisLarsen'
imgs16 = glob.glob(os.path.join(datapath,'WhiteRiver_June1_2016_ortho*.tif'))
imgs19 = glob.glob(os.path.join(datapath,'Aug30_2019_Sulzer*.tif')) #flat creek and east site
eastsite_imgs = glob.glob(os.path.join(datapath,'Aug30_2019_Sulzer_east_ortho*.tif'))
def make_img_list(fps):
newlist = []
for fp in fps:
src = rasterio.open(fp)
newlist.append(src)
return newlist
mosaic_list16 = make_img_list(imgs16)
mosaic_list19 = make_img_list(imgs19)
eastsite = make_img_list(eastsite_imgs)
def make_bbox_poly(bounds):
bbox = Polygon([(bounds[0],bounds[1]),
(bounds[2],bounds[1]),
(bounds[2],bounds[3]),
(bounds[0],bounds[3]),
(bounds[0],bounds[1])])
return bbox
band_order = [0,1,2]
res = 5
crs = 'EPSG:32607'
bounds16 = (469000,6833500,479500,6844000)
hummocks_bounds = (474791, 6840032, 475066, 6840214)
tree_bounds= (472420, 6842385, 472630, 6842555)
zoom_bounds = (473400,6839400,474400, 6840200)
deflation_bounds = (473944,6839191, 474202, 6839371)
flowlines_bounds = (474074, 6838819, 474297, 6839060)
stripes_bounds = (471765,6836367,472040,6836549)
water_bounds = (473905,6839729,474204,6839968)
scratches_bounds = (471588,6835718,471695,6835812)
hummocks_box = gpd.GeoDataFrame([1], geometry = [make_bbox_poly(hummocks_bounds)], crs = crs)
tree_box = gpd.GeoDataFrame([1], geometry = [make_bbox_poly(tree_bounds)], crs = crs)
zoom_box = gpd.GeoDataFrame([1], geometry = [make_bbox_poly(zoom_bounds)], crs = crs)
deflation_box = gpd.GeoDataFrame([1], geometry = [make_bbox_poly(deflation_bounds)], crs = crs)
flowlines_box = gpd.GeoDataFrame([1], geometry = [make_bbox_poly(flowlines_bounds)], crs = crs)
stripes_box = gpd.GeoDataFrame([1], geometry = [make_bbox_poly(stripes_bounds)], crs = crs)
water_box = gpd.GeoDataFrame([1], geometry = [make_bbox_poly(water_bounds)], crs = crs)
ortho2016, extent = insarhelpers.make_MS_img(mosaic_list16, band_order, res = res, bounds = bounds16)
ortho2019, extent = insarhelpers.make_MS_img(mosaic_list19, band_order, res = res)
ortho_east, east_extent = insarhelpers.make_MS_img(eastsite, band_order, res = res)
hummocks, hummocks_extent = insarhelpers.make_MS_img(mosaic_list19, band_order, bounds = hummocks_bounds)
trees, trees_extent = insarhelpers.make_MS_img(mosaic_list19, band_order, bounds = tree_bounds)
deflation16, deflation_extent = insarhelpers.make_MS_img(mosaic_list16, band_order, bounds = deflation_bounds)
deflation19, deflation_extent = insarhelpers.make_MS_img(mosaic_list19, band_order, bounds = deflation_bounds)
flowlines, fl_extent = insarhelpers.make_MS_img(mosaic_list16, band_order, bounds = flowlines_bounds)
stripes, stripes_extent = insarhelpers.make_MS_img(mosaic_list19, band_order, bounds = stripes_bounds)
waterbodies, water_extent = insarhelpers.make_MS_img(mosaic_list16, band_order, bounds = water_bounds)
scratches, scratches_extent = insarhelpers.make_MS_img(mosaic_list19, band_order, bounds = scratches_bounds)
#load polygons
ertlines = gpd.read_file('/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/ERT_Lines.geojson')
ertlines = ertlines.to_crs('EPSG:32607')
sedsamples = gpd.read_file('/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/SedSamplesFlatCreek.geojson')
sedsamples = sedsamples.to_crs('EPSG:32607')
glaciers = gpd.read_file('/Users/mistral/Documents/CUBoulder/Science/spatial_base_data/01_rgi60_Alaska/01_rgi60_Alaska.shp')
glaciers = glaciers.to_crs('EPSG:32607')
levees = gpd.read_file('/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/levees.geojson')
water = gpd.read_file('/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/water.geojson')
flowbands = gpd.read_file('/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/pressure_ridges.geojson')
runout13 = gpd.read_file('/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/2013_runout.geojson')
runout15 = gpd.read_file('/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/2015_runout.geojson')
molards = gpd.read_file('/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/molards.geojson')
eastdposit = gpd.read_file('/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/eastdeposit_outline.geojson')
#create mask for ortho image
mask = ((np.array(ortho2019)[..., 0] != 0)*255).astype('uint8')
ortho2019.putalpha(Image.fromarray(mask, mode="L"))
#overview plot
f, ax = plt.subplots(figsize = (7.5, 9))
ax.imshow(ortho2019, extent = extent)
hummocks_box.exterior.plot(ax = ax, color = 'k', linewidth = 0.8)
tree_box.exterior.plot(ax = ax, color = 'k', linewidth = 0.8)
deflation_box.exterior.plot(ax = ax, color = 'k', linewidth = 0.8)
flowlines_box.exterior.plot(ax = ax, color = 'k', linewidth = 0.8)
stripes_box.exterior.plot(ax = ax, color = 'k', linewidth = 0.8)
water_box.exterior.plot(ax = ax, color = 'k', linewidth = 0.8)
runout13.exterior.plot(ax = ax, color = 'yellow', linewidth = 0.8)
runout15.exterior.plot(ax = ax, color = 'red', linewidth = 0.8)
ertlines.plot(ax = ax, color = 'w', linewidth = 1)
sedsamples.plot(ax = ax, color = 'k', markersize = 8)
eastdposit.plot(ax = ax, color = 'w', linewidth = 0.8)
#levees.plot(ax = ax, color = 'gold', linewidth = 0.5)
flowbands.plot(ax = ax, color = 'red', linewidth = 0.5)
water.plot(ax = ax, facecolor = 'deepskyblue', edgecolor = 'deepskyblue', linewidth = 0.5)
#molards.plot(ax = ax, color = 'lime', markersize = 1)
runout13.exterior.plot(ax = ax, color = 'w', linewidth = 0.3)
ax.set_xlim([469000,477000])
ax.set_ylim([6833500,6844000])
ax.ticklabel_format(useOffset=None, style = 'plain')
ax.set_xlabel('Easting (m)', fontsize = 16)
ax.set_ylabel('Northing (m)', fontsize = 16)
f.tight_layout()
#f.show()
f.savefig('ortho3.pdf')
#get m-extents:
def offset_extent(bounds):
o_e = [bounds[0]-bounds[0],
bounds[2]-bounds[0],
bounds[1]-bounds[1],
bounds[3]-bounds[1]]
return o_e
#hummocks plot
f,ax = plt.subplots()
ax.imshow(hummocks, extent = offset_extent(hummocks_bounds))
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
f.tight_layout()
#f.show()
f.savefig('hummocks.pdf')
#trees plot
f,ax = plt.subplots()
ax.imshow(trees, extent = offset_extent(tree_bounds))
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
f.tight_layout()
f.show()
#f.savefig('trees.pdf')
#deflation plot
f, (ax1, ax2) = plt.subplots(1,2, sharey = True, figsize = (10,5))
ax1.imshow(deflation16, extent = offset_extent(deflation_bounds))
ax2.imshow(deflation19, extent = offset_extent(deflation_bounds))
ax1.set_xlabel('X (m)')
ax2.set_xlabel('X (m)')
ax1.set_ylabel('Y (m)')
f.tight_layout()
f.show()
#f.savefig('deflation.pdf')
#flowbands plot
f,ax = plt.subplots()
ax.imshow(flowlines, extent = offset_extent(flowlines_bounds))
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
f.tight_layout()
f.show()
#f.savefig('flowbands.pdf')
#stripes plot
f,ax = plt.subplots()
ax.imshow(stripes, extent = offset_extent(stripes_bounds))
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
f.tight_layout()
#f.show()
f.savefig('stripes.pdf')
#waterbodies plot
f,ax = plt.subplots()
ax.imshow(waterbodies, extent = offset_extent(water_bounds))
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
f.tight_layout()
f.show()
#f.savefig('waterbodies.pdf')
#scratches plot
f,ax = plt.subplots()
ax.imshow(scratches, extent = offset_extent(scratches_bounds))
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
f.tight_layout()
#f.show()
f.savefig('scratches.pdf')
# DEM differences
#load DEM difference
dem_src = rasterio.open('/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/2019minus2016_clipped.tif')
dem_diff = dem_src.read(1)
dem_extent = [dem_src.bounds.left, dem_src.bounds.right, dem_src.bounds.bottom, dem_src.bounds.top]
dem_diff = dem_diff - 0.3 #correct by median value over runout zone
dem_diff[dem_diff < -1000] = np.nan
#boxes and close up views
dem_defl = (473900,6839100, 474200, 6839375)
dem_defl_box = gpd.GeoDataFrame([1], geometry = [make_bbbox_poly(dem_defl)], crs = crs)
ert_line = (471650, 6835950, 472000, 6836250)
ert_line_box = gpd.GeoDataFrame([1], geometry = [make_bbbox_poly(ert_line)], crs = crs)
glacier_view = (469700, 6833900, 471875, 6835925)
glacier_view_box = gpd.GeoDataFrame([1], geometry = [make_bbbox_poly(glacier_view)], crs = crs)
f.clf()
plt.close()
f, ax = plt.subplots(figsize = (7.5, 9)) #replot for publication with correct boxes
ax.imshow(ortho2019, extent = extent)
dem = ax.imshow(
dem_diff,
vmin = -10, vmax = 10,
cmap = 'seismic_r',
extent = dem_extent
)
#ertlines.plot(ax = ax, color = 'k', linewidth = 1)
dem_defl_box.exterior.plot(ax = ax, color = 'k', linewidth = 0.8)
ert_line_box.exterior.plot(ax = ax, color = 'k', linewidth = 0.8)
glacier_view_box.exterior.plot(ax = ax, color = 'k', linewidth = 0.8)
ax.set_xlim([dem_src.bounds.left,dem_src.bounds.right])
ax.set_ylim([dem_src.bounds.bottom, dem_src.bounds.top])
ax.ticklabel_format(useOffset=None, style = 'plain')
ax.set_xlabel('Easting (m)', fontsize = 14)
ax.set_ylabel('Northing (m)', fontsize = 14)
cb = f.colorbar(dem)
cb.ax.minorticks_off()
cb.set_label('Elevation change')
f.tight_layout()
#f.show()
f.savefig('DemDiff.pdf')
#dem deflation plot
f,ax = plt.subplots()
dem = ax.imshow(
dem_diff,
vmin = -4, vmax = 4,
cmap = 'seismic_r',
extent = dem_extent
)
cb = f.colorbar(dem, orientation = 'horizontal')
cb.ax.minorticks_off()
cb.set_label('Elevation change')
ax.set_xlim([dem_defl[0],dem_defl[2]])
ax.set_ylim([dem_defl[1],dem_defl[3]])
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
f.tight_layout()
#f.show()
f.savefig('DemDiff_deflation.pdf')
#ert line plot
f,ax = plt.subplots()
ax.imshow(ortho2019, extent = extent)
dem = ax.imshow(
dem_diff,
vmin = -8, vmax = 8,
cmap = 'seismic_r',
extent = dem_extent
)
ax.set_xlim([ert_line[0],ert_line[2]])
ax.set_ylim([ert_line[1],ert_line[3]])
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
cb = f.colorbar(dem, orientation = 'horizontal')
cb.ax.minorticks_off()
cb.set_label('Elevation change')
f.tight_layout()
#f.show()
f.savefig('DemDiff_ertline.pdf')
#glacier view plot
f,ax = plt.subplots()
ax.imshow(ortho2019, extent = extent)
dem = ax.imshow(
dem_diff,
vmin = -20, vmax = 20,
cmap = 'seismic_r',
extent = dem_extent
)
glaciers.exterior.plot(ax = ax, color = 'k')
ax.set_xlim([glacier_view[0],glacier_view[2]])
ax.set_ylim([glacier_view[1],glacier_view[3]])
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
cb = f.colorbar(dem)
cb.ax.minorticks_off()
cb.set_label('Elevation change')
f.tight_layout()
#f.show()
f.savefig('DemDiff_glacierview.pdf')
| [
"sys.path.append",
"rasterio.open",
"os.path.join",
"shapely.geometry.Polygon",
"matplotlib.pyplot.close",
"insarhelpers.make_MS_img",
"numpy.array",
"PIL.Image.fromarray",
"matplotlib.pyplot.subplots",
"geopandas.read_file"
] | [((30, 56), 'sys.path.append', 'sys.path.append', (['"""../ISCE"""'], {}), "('../ISCE')\n", (45, 56), False, 'import sys\n'), ((2312, 2389), 'insarhelpers.make_MS_img', 'insarhelpers.make_MS_img', (['mosaic_list16', 'band_order'], {'res': 'res', 'bounds': 'bounds16'}), '(mosaic_list16, band_order, res=res, bounds=bounds16)\n', (2336, 2389), False, 'import insarhelpers\n'), ((2414, 2474), 'insarhelpers.make_MS_img', 'insarhelpers.make_MS_img', (['mosaic_list19', 'band_order'], {'res': 'res'}), '(mosaic_list19, band_order, res=res)\n', (2438, 2474), False, 'import insarhelpers\n'), ((2503, 2558), 'insarhelpers.make_MS_img', 'insarhelpers.make_MS_img', (['eastsite', 'band_order'], {'res': 'res'}), '(eastsite, band_order, res=res)\n', (2527, 2558), False, 'import insarhelpers\n'), ((2589, 2664), 'insarhelpers.make_MS_img', 'insarhelpers.make_MS_img', (['mosaic_list19', 'band_order'], {'bounds': 'hummocks_bounds'}), '(mosaic_list19, band_order, bounds=hummocks_bounds)\n', (2613, 2664), False, 'import insarhelpers\n'), ((2689, 2760), 'insarhelpers.make_MS_img', 'insarhelpers.make_MS_img', (['mosaic_list19', 'band_order'], {'bounds': 'tree_bounds'}), '(mosaic_list19, band_order, bounds=tree_bounds)\n', (2713, 2760), False, 'import insarhelpers\n'), ((2795, 2871), 'insarhelpers.make_MS_img', 'insarhelpers.make_MS_img', (['mosaic_list16', 'band_order'], {'bounds': 'deflation_bounds'}), '(mosaic_list16, band_order, bounds=deflation_bounds)\n', (2819, 2871), False, 'import insarhelpers\n'), ((2906, 2982), 'insarhelpers.make_MS_img', 'insarhelpers.make_MS_img', (['mosaic_list19', 'band_order'], {'bounds': 'deflation_bounds'}), '(mosaic_list19, band_order, bounds=deflation_bounds)\n', (2930, 2982), False, 'import insarhelpers\n'), ((3008, 3084), 'insarhelpers.make_MS_img', 'insarhelpers.make_MS_img', (['mosaic_list16', 'band_order'], {'bounds': 'flowlines_bounds'}), '(mosaic_list16, band_order, bounds=flowlines_bounds)\n', (3032, 3084), False, 'import insarhelpers\n'), ((3113, 3187), 'insarhelpers.make_MS_img', 'insarhelpers.make_MS_img', (['mosaic_list19', 'band_order'], {'bounds': 'stripes_bounds'}), '(mosaic_list19, band_order, bounds=stripes_bounds)\n', (3137, 3187), False, 'import insarhelpers\n'), ((3218, 3290), 'insarhelpers.make_MS_img', 'insarhelpers.make_MS_img', (['mosaic_list16', 'band_order'], {'bounds': 'water_bounds'}), '(mosaic_list16, band_order, bounds=water_bounds)\n', (3242, 3290), False, 'import insarhelpers\n'), ((3323, 3399), 'insarhelpers.make_MS_img', 'insarhelpers.make_MS_img', (['mosaic_list19', 'band_order'], {'bounds': 'scratches_bounds'}), '(mosaic_list19, band_order, bounds=scratches_bounds)\n', (3347, 3399), False, 'import insarhelpers\n'), ((3429, 3538), 'geopandas.read_file', 'gpd.read_file', (['"""/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/ERT_Lines.geojson"""'], {}), "(\n '/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/ERT_Lines.geojson'\n )\n", (3442, 3538), True, 'import geopandas as gpd\n'), ((3584, 3703), 'geopandas.read_file', 'gpd.read_file', (['"""/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/SedSamplesFlatCreek.geojson"""'], {}), "(\n '/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/SedSamplesFlatCreek.geojson'\n )\n", (3597, 3703), True, 'import geopandas as gpd\n'), ((3751, 3874), 'geopandas.read_file', 'gpd.read_file', (['"""/Users/mistral/Documents/CUBoulder/Science/spatial_base_data/01_rgi60_Alaska/01_rgi60_Alaska.shp"""'], {}), "(\n '/Users/mistral/Documents/CUBoulder/Science/spatial_base_data/01_rgi60_Alaska/01_rgi60_Alaska.shp'\n )\n", (3764, 3874), True, 'import geopandas as gpd\n'), ((3917, 4023), 'geopandas.read_file', 'gpd.read_file', (['"""/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/levees.geojson"""'], {}), "(\n '/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/levees.geojson'\n )\n", (3930, 4023), True, 'import geopandas as gpd\n'), ((4022, 4127), 'geopandas.read_file', 'gpd.read_file', (['"""/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/water.geojson"""'], {}), "(\n '/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/water.geojson'\n )\n", (4035, 4127), True, 'import geopandas as gpd\n'), ((4130, 4245), 'geopandas.read_file', 'gpd.read_file', (['"""/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/pressure_ridges.geojson"""'], {}), "(\n '/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/pressure_ridges.geojson'\n )\n", (4143, 4245), True, 'import geopandas as gpd\n'), ((4247, 4358), 'geopandas.read_file', 'gpd.read_file', (['"""/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/2013_runout.geojson"""'], {}), "(\n '/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/2013_runout.geojson'\n )\n", (4260, 4358), True, 'import geopandas as gpd\n'), ((4360, 4471), 'geopandas.read_file', 'gpd.read_file', (['"""/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/2015_runout.geojson"""'], {}), "(\n '/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/2015_runout.geojson'\n )\n", (4373, 4471), True, 'import geopandas as gpd\n'), ((4472, 4579), 'geopandas.read_file', 'gpd.read_file', (['"""/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/molards.geojson"""'], {}), "(\n '/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/molards.geojson'\n )\n", (4485, 4579), True, 'import geopandas as gpd\n'), ((4583, 4702), 'geopandas.read_file', 'gpd.read_file', (['"""/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/eastdeposit_outline.geojson"""'], {}), "(\n '/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/eastdeposit_outline.geojson'\n )\n", (4596, 4702), True, 'import geopandas as gpd\n'), ((4864, 4894), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7.5, 9)'}), '(figsize=(7.5, 9))\n', (4876, 4894), True, 'import matplotlib.pyplot as plt\n'), ((6389, 6403), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6401, 6403), True, 'import matplotlib.pyplot as plt\n'), ((6584, 6598), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6596, 6598), True, 'import matplotlib.pyplot as plt\n'), ((6782, 6830), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '(True)', 'figsize': '(10, 5)'}), '(1, 2, sharey=True, figsize=(10, 5))\n', (6794, 6830), True, 'import matplotlib.pyplot as plt\n'), ((7115, 7129), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7127, 7129), True, 'import matplotlib.pyplot as plt\n'), ((7315, 7329), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7327, 7329), True, 'import matplotlib.pyplot as plt\n'), ((7513, 7527), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7525, 7527), True, 'import matplotlib.pyplot as plt\n'), ((7715, 7729), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7727, 7729), True, 'import matplotlib.pyplot as plt\n'), ((7944, 8061), 'rasterio.open', 'rasterio.open', (['"""/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/2019minus2016_clipped.tif"""'], {}), "(\n '/Users/mistral/Documents/CUBoulder/Science/Sulzer+/QGIS_Analysis/2019minus2016_clipped.tif'\n )\n", (7957, 8061), False, 'import rasterio\n'), ((8735, 8746), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8744, 8746), True, 'import matplotlib.pyplot as plt\n'), ((8756, 8786), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7.5, 9)'}), '(figsize=(7.5, 9))\n', (8768, 8786), True, 'import matplotlib.pyplot as plt\n'), ((9676, 9690), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9688, 9690), True, 'import matplotlib.pyplot as plt\n'), ((10127, 10141), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10139, 10141), True, 'import matplotlib.pyplot as plt\n'), ((10618, 10632), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10630, 10632), True, 'import matplotlib.pyplot as plt\n'), ((432, 490), 'os.path.join', 'os.path.join', (['datapath', '"""WhiteRiver_June1_2016_ortho*.tif"""'], {}), "(datapath, 'WhiteRiver_June1_2016_ortho*.tif')\n", (444, 490), False, 'import os\n'), ((510, 558), 'os.path.join', 'os.path.join', (['datapath', '"""Aug30_2019_Sulzer*.tif"""'], {}), "(datapath, 'Aug30_2019_Sulzer*.tif')\n", (522, 558), False, 'import os\n'), ((611, 670), 'os.path.join', 'os.path.join', (['datapath', '"""Aug30_2019_Sulzer_east_ortho*.tif"""'], {}), "(datapath, 'Aug30_2019_Sulzer_east_ortho*.tif')\n", (623, 670), False, 'import os\n'), ((968, 1102), 'shapely.geometry.Polygon', 'Polygon', (['[(bounds[0], bounds[1]), (bounds[2], bounds[1]), (bounds[2], bounds[3]), (\n bounds[0], bounds[3]), (bounds[0], bounds[1])]'], {}), '([(bounds[0], bounds[1]), (bounds[2], bounds[1]), (bounds[2], bounds\n [3]), (bounds[0], bounds[3]), (bounds[0], bounds[1])])\n', (975, 1102), False, 'from shapely.geometry import Polygon\n'), ((4807, 4838), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {'mode': '"""L"""'}), "(mask, mode='L')\n", (4822, 4838), False, 'from PIL import Image, ImageEnhance\n'), ((746, 763), 'rasterio.open', 'rasterio.open', (['fp'], {}), '(fp)\n', (759, 763), False, 'import rasterio\n'), ((4733, 4752), 'numpy.array', 'np.array', (['ortho2019'], {}), '(ortho2019)\n', (4741, 4752), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat May 3 21:38:54 2014
@author: spatchcock
"""
import numpy as np
import matplotlib.pyplot as plt
# %% Import data
high_file = './data/hippensteel_total_high_marsh.csv'
intm_file = './data/hippensteel_total_intermediate_marsh.csv'
low_file = './data/hippensteel_total_low_marsh.csv'
high_data = np.genfromtxt(high_file, dtype=float, delimiter=',', names=True)
intm_data = np.genfromtxt(intm_file, dtype=float, delimiter=',', names=True)
low_data = np.genfromtxt(low_file, dtype=float, delimiter=',', names=True)
# %% Function for building subplots
def add_subplot(master_fig, csv, species, plot_index):
plot = master_fig.add_subplot(3,5,plot_index, xlim=(0.0,max(csv[species + '_Dead'])), ylim=(60.0, 0))
plot.plot(csv[species + '_Dead'],csv['Depth'], marker='o', linestyle='None')
plot.plot(csv[species + '_Live'],csv['Depth'], marker='o', linestyle='None')
plot.set_title(species)
plot.grid()
a,b=np.polynomial.polynomial.polyfit(csv['Depth'],csv[species + '_Live'],4,full=True)
x=np.linspace(0,60,500)
y=a[0] + a[1]*x + a[2]*x**2 + a[3]*x**3 + a[4]*x**4# + a[5]*x**5# + a[6]*x**6
plot.plot(y,x)
# %% Create plots
fig = plt.figure()
add_subplot(fig, high_data, 'AMEX',1)
add_subplot(fig, high_data, 'MFUS',2)
add_subplot(fig, high_data, 'PLIM',3)
add_subplot(fig, high_data, 'TINF',4)
add_subplot(fig, high_data, 'JMAC',5)
add_subplot(fig, intm_data, 'AMEX',6)
add_subplot(fig, intm_data, 'MFUS',7)
add_subplot(fig, intm_data, 'PLIM',8)
add_subplot(fig, intm_data, 'TINF',9)
add_subplot(fig, intm_data, 'JMAC',10)
add_subplot(fig, low_data, 'AMEX',11)
add_subplot(fig, low_data, 'MFUS',12)
add_subplot(fig, low_data, 'PLIM',13)
add_subplot(fig, low_data, 'TINF',14)
add_subplot(fig, low_data, 'JMAC',15)
fig.tight_layout()
plt.show() | [
"matplotlib.pyplot.show",
"numpy.polynomial.polynomial.polyfit",
"numpy.genfromtxt",
"matplotlib.pyplot.figure",
"numpy.linspace"
] | [((341, 405), 'numpy.genfromtxt', 'np.genfromtxt', (['high_file'], {'dtype': 'float', 'delimiter': '""","""', 'names': '(True)'}), "(high_file, dtype=float, delimiter=',', names=True)\n", (354, 405), True, 'import numpy as np\n'), ((419, 483), 'numpy.genfromtxt', 'np.genfromtxt', (['intm_file'], {'dtype': 'float', 'delimiter': '""","""', 'names': '(True)'}), "(intm_file, dtype=float, delimiter=',', names=True)\n", (432, 483), True, 'import numpy as np\n'), ((497, 560), 'numpy.genfromtxt', 'np.genfromtxt', (['low_file'], {'dtype': 'float', 'delimiter': '""","""', 'names': '(True)'}), "(low_file, dtype=float, delimiter=',', names=True)\n", (510, 560), True, 'import numpy as np\n'), ((1231, 1243), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1241, 1243), True, 'import matplotlib.pyplot as plt\n'), ((1840, 1850), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1848, 1850), True, 'import matplotlib.pyplot as plt\n'), ((981, 1069), 'numpy.polynomial.polynomial.polyfit', 'np.polynomial.polynomial.polyfit', (["csv['Depth']", "csv[species + '_Live']", '(4)'], {'full': '(True)'}), "(csv['Depth'], csv[species + '_Live'], 4,\n full=True)\n", (1013, 1069), True, 'import numpy as np\n'), ((1069, 1092), 'numpy.linspace', 'np.linspace', (['(0)', '(60)', '(500)'], {}), '(0, 60, 500)\n', (1080, 1092), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.