code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import abc,os,pickle,sys
import scipy.stats
import numpy as np
from datetime import datetime
import tensorflow as tf
from BatchIterator import PaddedDataIterator
from generation import *
from Plotter import get_intensity,get_integral,get_integral_empirical
import statsmodels.api as sm
import scipy.stats as stats
from Utils import sequence_filter,lambda_estimation,file2sequence,sequence2file
##############################################################################
# parameters
DATA = 'hawkes' # hawkes, gaussian, rnn, polynimial
BATCH_SIZE = 256 # Batch size
MAX_STEPS = 300
ITERS = 20000#100000 # how many generator iterations to train for
SEED = 1234 # set graph-level seed to make the random sequences generated by all ops be repeatable across sessions
D_DIFF = False
MARK = False
ITERATION = 1
DATA = sys.argv[1]
#T = 15.0 # end time of simulation
T = float(sys.argv[3])
SEQ_NUM = 2000 # number of sequences
DIM_SIZE = 1
SEQ_NUM = int(float(sys.argv[2]))
if DATA in ['911calls','hawkes_gaussian','hawkes_poly','mimic','meme','citation','stock',"mixture1","mixture2","mixture3","mixture4"]:
REAL_DATA = True
else:
REAL_DATA = False
tf.set_random_seed(SEED)
np.random.seed(SEED)
##############################################################################
# prepare data
FILE_NAME = 'pickled_data_ppgan_{}'.format(DATA)
if not os.path.isfile(FILE_NAME):
if DATA=='gaussian': #QQ_plot for gaussian is not good as hawkes,selfcorrecting, perhaps that simulating is not good.
intensityGaussian = IntensitySumGaussianKernel(3,[3,7,11], [1,1,1], [2,3,2])
real_sequences = generate_sample(intensityGaussian, T, 20000)
sequence2file(real_sequences,'gaussian')
else:
real_sequences = file2sequence(DATA)
lambda0 = np.mean([len(item) for item in real_sequences])/T
intensityPoisson = IntensityHomogenuosPoisson(lambda0)
fake_sequences = generate_sample(intensityPoisson, T, 2000)
pickle.dump([real_sequences,fake_sequences],open(FILE_NAME,'wb'))
else:
real_sequences,fake_sequences = pickle.load(open(FILE_NAME,'rb'))
real_sequences,_ = pickle.load(open(FILE_NAME,'rb'))
print ((np.mean([len(item) for item in real_sequences])/T),((np.mean([len(item) for item in fake_sequences])/T)))
if not REAL_DATA:
real_sequences = real_sequences[:SEQ_NUM]
real_iterator = PaddedDataIterator(real_sequences,T,MARK,D_DIFF)
K= 3
#should add more modal
coef = tf.Variable(tf.random_uniform([K], 0, 1, tf.float32),name='coef')
center = tf.constant([2.7,7.3,11.5], tf.float32) #[1.0,3.0,5.0,7.0,9.0,11.0,14.0] np.arange(1,14.1,13.0/(K-1))
std = tf.constant(np.ones([K]), tf.float32)
data = tf.placeholder(tf.float32, [BATCH_SIZE,None])
seqlen = tf.placeholder(tf.int32, [BATCH_SIZE])
tend = tf.constant(np.ones([BATCH_SIZE])*T, tf.float32)
tstart = tf.constant(np.zeros([BATCH_SIZE]), tf.float32)
lower_triangular_ones = tf.constant(np.tril(np.ones([MAX_STEPS,MAX_STEPS])),dtype=tf.float32)
seqlen_mask = tf.slice(tf.gather(lower_triangular_ones, seqlen - 1),[0, 0], tf.shape(data))
dist_list = []
for i_ in range(K):
dist_list.append( tf.distributions.Normal(center[i_], std[i_]) )
mul_intens = 0
int_intens = 0
for i_ in range(K):
mul_intens += coef[i_]*dist_list[i_].prob(data)
int_intens += coef[i_]*(dist_list[i_].cdf(tend)-dist_list[i_].cdf(tstart))
loglikeylihood = tf.reduce_sum( (tf.log( mul_intens ))*seqlen_mask, axis=1)
loglikeylihood -= int_intens
#loglikeylihood = tf.reduce_sum( (tf.log( coef[0]*dist0.pdf(data) + coef[1]*dist1.pdf(data) +coef[2]* dist2.pdf(data)+coef[3]*dist3.pdf(data) + coef[4]*dist4.pdf(data) +coef[5]* dist5.pdf(data)+coef[6]* dist6.pdf(data) ))*seqlen_mask, axis=1)
#loglikeylihood -= coef[0]*(dist0.cdf(tend)-dist0.cdf(tstart)) + coef[1]*(dist1.cdf(tend)-dist1.cdf(tstart)) + coef[2]*(dist2.cdf(tend)-dist2.cdf(tstart)) + coef[3]*(dist3.cdf(tend)-dist3.cdf(tstart)) + coef[4]*(dist4.cdf(tend)-dist4.cdf(tstart)) + coef[5]*(dist5.cdf(tend)-dist5.cdf(tstart))+ coef[6]*(dist6.cdf(tend)-dist6.cdf(tstart))
loglikeylihood = - tf.reduce_mean(loglikeylihood)
train_variables = tf.trainable_variables()
print(map(lambda x: x.op.name, train_variables))
trainable_variable = [v for v in train_variables if v.name.startswith("coef")]
train_op = tf.train.RMSPropOptimizer(learning_rate=2e-3).minimize(loglikeylihood, var_list=trainable_variable)
saved_file = "gaussian_{}_{}_{}_{}_{}_{}".format(DATA,SEQ_NUM,ITERATION,datetime.now().day,datetime.now().hour,datetime.now().minute)
if not os.path.exists('out/%s'%saved_file):
os.makedirs('out/%s'%saved_file)
n_t = 30
ts_real, intensity_real = get_intensity(real_sequences, T, n_t)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0, allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
stop_indicator = False
last_value = 0#np.zeros([7])
for it in range(ITERS):
real_batch = real_iterator.next_batch(BATCH_SIZE)
loss,_,coef_ = sess.run([loglikeylihood,train_op,coef], feed_dict={ data:np.reshape(real_batch[0],real_batch[0].shape[:2]), seqlen:real_batch[1]})
if it%1000==0:
print ('Iter: {}; loss: {}; {} coef;{}'.format(it, loss, DATA, coef_))
if np.max(np.abs(last_value-coef_))<1e-3:
stop_indicator = True
last_value = coef_
if it%1000==0:
intensityGaussian = IntensitySumGaussianKernel(K,[2.7,7.3,11.5], np.ones([K]), coef_)
generated_sequences = generate_sample(intensityGaussian, T, 256)
ts_gen, intensity_gen = get_intensity(generated_sequences, T, n_t)
deviation = np.linalg.norm(intensity_gen-intensity_real)/np.linalg.norm(intensity_real)
# can use correlation or other metric
print ('Iter: {}; deviation: {}'.format(it,deviation))
plt.plot(ts_real,intensity_real, label='real')
plt.plot(ts_gen, intensity_gen, label='generated')
plt.legend(loc=1)
plt.xlabel('time')
plt.ylabel('intensity')
plt.savefig('out/{}/{}_{}.png'
.format(saved_file,str(it).zfill(3),deviation), bbox_inches='tight')
plt.close()
if not REAL_DATA and DATA!="rmtpp":
integral_intensity = get_integral(generated_sequences, DATA)
integral_intensity = np.asarray(integral_intensity)
fig = plt.figure()
left = -1.8 #x coordinate for text insert
ax1 = fig.add_subplot(1,2,1)
fig = sm.qqplot(integral_intensity, stats.expon, distargs=(), loc=0, scale=1,line='45',ax=ax1)
plt.grid()
ax2 = fig.add_subplot(1,2,2)
top = ax2.get_ylim()[1] * 0.75
res,slope_intercept = stats.probplot(integral_intensity, dist=stats.expon, plot=ax2)
txt = ax2.text(left, top, "{}_{}".format(slope_intercept[0],slope_intercept[1]),verticalalignment='top')
plt.grid()
fig.savefig('out/{}/{}.png'.format(saved_file,it))
plt.close()
if it==ITERS-1 or stop_indicator:
intensityGaussian = IntensitySumGaussianKernel(K,[2.7,7.3,11.5], np.ones([K]), coef_)
generated_sequences = generate_sample(intensityGaussian, T, 2000)
sequence2file(generated_sequences, 'gaussian_solver_{}_{}_{}'.format(DATA,SEQ_NUM,ITERATION))
break
| [
"Utils.file2sequence",
"tensorflow.shape",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.linalg.norm",
"tensorflow.reduce_mean",
"tensorflow.set_random_seed",
"tensorflow.GPUOptions",
"tensorflow.log",
"BatchIterator.PaddedDataIterator",
"os.path.exists",
"Utils.sequence2file",
... | [((19, 40), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (33, 40), False, 'import matplotlib\n'), ((1230, 1254), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['SEED'], {}), '(SEED)\n', (1248, 1254), True, 'import tensorflow as tf\n'), ((1255, 1275), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (1269, 1275), True, 'import numpy as np\n'), ((2428, 2479), 'BatchIterator.PaddedDataIterator', 'PaddedDataIterator', (['real_sequences', 'T', 'MARK', 'D_DIFF'], {}), '(real_sequences, T, MARK, D_DIFF)\n', (2446, 2479), False, 'from BatchIterator import PaddedDataIterator\n'), ((2588, 2629), 'tensorflow.constant', 'tf.constant', (['[2.7, 7.3, 11.5]', 'tf.float32'], {}), '([2.7, 7.3, 11.5], tf.float32)\n', (2599, 2629), True, 'import tensorflow as tf\n'), ((2743, 2789), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[BATCH_SIZE, None]'], {}), '(tf.float32, [BATCH_SIZE, None])\n', (2757, 2789), True, 'import tensorflow as tf\n'), ((2798, 2836), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[BATCH_SIZE]'], {}), '(tf.int32, [BATCH_SIZE])\n', (2812, 2836), True, 'import tensorflow as tf\n'), ((4181, 4205), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4203, 4205), True, 'import tensorflow as tf\n'), ((4698, 4735), 'Plotter.get_intensity', 'get_intensity', (['real_sequences', 'T', 'n_t'], {}), '(real_sequences, T, n_t)\n', (4711, 4735), False, 'from Plotter import get_intensity, get_integral, get_integral_empirical\n'), ((4751, 4820), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(1.0)', 'allow_growth': '(True)'}), '(per_process_gpu_memory_fraction=1.0, allow_growth=True)\n', (4764, 4820), True, 'import tensorflow as tf\n'), ((1429, 1454), 'os.path.isfile', 'os.path.isfile', (['FILE_NAME'], {}), '(FILE_NAME)\n', (1443, 1454), False, 'import abc, os, pickle, sys\n'), ((2525, 2565), 'tensorflow.random_uniform', 'tf.random_uniform', (['[K]', '(0)', '(1)', 'tf.float32'], {}), '([K], 0, 1, tf.float32)\n', (2542, 2565), True, 'import tensorflow as tf\n'), ((2708, 2720), 'numpy.ones', 'np.ones', (['[K]'], {}), '([K])\n', (2715, 2720), True, 'import numpy as np\n'), ((2914, 2936), 'numpy.zeros', 'np.zeros', (['[BATCH_SIZE]'], {}), '([BATCH_SIZE])\n', (2922, 2936), True, 'import numpy as np\n'), ((3068, 3112), 'tensorflow.gather', 'tf.gather', (['lower_triangular_ones', '(seqlen - 1)'], {}), '(lower_triangular_ones, seqlen - 1)\n', (3077, 3112), True, 'import tensorflow as tf\n'), ((3121, 3135), 'tensorflow.shape', 'tf.shape', (['data'], {}), '(data)\n', (3129, 3135), True, 'import tensorflow as tf\n'), ((4131, 4161), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loglikeylihood'], {}), '(loglikeylihood)\n', (4145, 4161), True, 'import tensorflow as tf\n'), ((4587, 4624), 'os.path.exists', 'os.path.exists', (["('out/%s' % saved_file)"], {}), "('out/%s' % saved_file)\n", (4601, 4624), False, 'import abc, os, pickle, sys\n'), ((4628, 4662), 'os.makedirs', 'os.makedirs', (["('out/%s' % saved_file)"], {}), "('out/%s' % saved_file)\n", (4639, 4662), False, 'import abc, os, pickle, sys\n'), ((4923, 4956), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4954, 4956), True, 'import tensorflow as tf\n'), ((1743, 1784), 'Utils.sequence2file', 'sequence2file', (['real_sequences', '"""gaussian"""'], {}), "(real_sequences, 'gaussian')\n", (1756, 1784), False, 'from Utils import sequence_filter, lambda_estimation, file2sequence, sequence2file\n'), ((1819, 1838), 'Utils.file2sequence', 'file2sequence', (['DATA'], {}), '(DATA)\n', (1832, 1838), False, 'from Utils import sequence_filter, lambda_estimation, file2sequence, sequence2file\n'), ((2856, 2877), 'numpy.ones', 'np.ones', (['[BATCH_SIZE]'], {}), '([BATCH_SIZE])\n', (2863, 2877), True, 'import numpy as np\n'), ((2995, 3026), 'numpy.ones', 'np.ones', (['[MAX_STEPS, MAX_STEPS]'], {}), '([MAX_STEPS, MAX_STEPS])\n', (3002, 3026), True, 'import numpy as np\n'), ((3194, 3238), 'tensorflow.distributions.Normal', 'tf.distributions.Normal', (['center[i_]', 'std[i_]'], {}), '(center[i_], std[i_])\n', (3217, 3238), True, 'import tensorflow as tf\n'), ((3456, 3474), 'tensorflow.log', 'tf.log', (['mul_intens'], {}), '(mul_intens)\n', (3462, 3474), True, 'import tensorflow as tf\n'), ((4345, 4391), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': '(0.002)'}), '(learning_rate=0.002)\n', (4370, 4391), True, 'import tensorflow as tf\n'), ((4518, 4532), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4530, 4532), False, 'from datetime import datetime\n'), ((4537, 4551), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4549, 4551), False, 'from datetime import datetime\n'), ((4557, 4571), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4569, 4571), False, 'from datetime import datetime\n'), ((4846, 4912), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'gpu_options': 'gpu_options'}), '(allow_soft_placement=True, gpu_options=gpu_options)\n', (4860, 4912), True, 'import tensorflow as tf\n'), ((5679, 5721), 'Plotter.get_intensity', 'get_intensity', (['generated_sequences', 'T', 'n_t'], {}), '(generated_sequences, T, n_t)\n', (5692, 5721), False, 'from Plotter import get_intensity, get_integral, get_integral_empirical\n'), ((5936, 5983), 'matplotlib.pyplot.plot', 'plt.plot', (['ts_real', 'intensity_real'], {'label': '"""real"""'}), "(ts_real, intensity_real, label='real')\n", (5944, 5983), True, 'import matplotlib.pyplot as plt\n'), ((5991, 6041), 'matplotlib.pyplot.plot', 'plt.plot', (['ts_gen', 'intensity_gen'], {'label': '"""generated"""'}), "(ts_gen, intensity_gen, label='generated')\n", (5999, 6041), True, 'import matplotlib.pyplot as plt\n'), ((6050, 6067), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (6060, 6067), True, 'import matplotlib.pyplot as plt\n'), ((6076, 6094), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (6086, 6094), True, 'import matplotlib.pyplot as plt\n'), ((6103, 6126), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""intensity"""'], {}), "('intensity')\n", (6113, 6126), True, 'import matplotlib.pyplot as plt\n'), ((6263, 6274), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6272, 6274), True, 'import matplotlib.pyplot as plt\n'), ((5543, 5555), 'numpy.ones', 'np.ones', (['[K]'], {}), '([K])\n', (5550, 5555), True, 'import numpy as np\n'), ((5742, 5788), 'numpy.linalg.norm', 'np.linalg.norm', (['(intensity_gen - intensity_real)'], {}), '(intensity_gen - intensity_real)\n', (5756, 5788), True, 'import numpy as np\n'), ((5787, 5817), 'numpy.linalg.norm', 'np.linalg.norm', (['intensity_real'], {}), '(intensity_real)\n', (5801, 5817), True, 'import numpy as np\n'), ((6361, 6400), 'Plotter.get_integral', 'get_integral', (['generated_sequences', 'DATA'], {}), '(generated_sequences, DATA)\n', (6373, 6400), False, 'from Plotter import get_intensity, get_integral, get_integral_empirical\n'), ((6434, 6464), 'numpy.asarray', 'np.asarray', (['integral_intensity'], {}), '(integral_intensity)\n', (6444, 6464), True, 'import numpy as np\n'), ((6483, 6495), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6493, 6495), True, 'import matplotlib.pyplot as plt\n'), ((6611, 6705), 'statsmodels.api.qqplot', 'sm.qqplot', (['integral_intensity', 'stats.expon'], {'distargs': '()', 'loc': '(0)', 'scale': '(1)', 'line': '"""45"""', 'ax': 'ax1'}), "(integral_intensity, stats.expon, distargs=(), loc=0, scale=1,\n line='45', ax=ax1)\n", (6620, 6705), True, 'import statsmodels.api as sm\n'), ((6712, 6722), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6720, 6722), True, 'import matplotlib.pyplot as plt\n'), ((6841, 6903), 'scipy.stats.probplot', 'stats.probplot', (['integral_intensity'], {'dist': 'stats.expon', 'plot': 'ax2'}), '(integral_intensity, dist=stats.expon, plot=ax2)\n', (6855, 6903), True, 'import scipy.stats as stats\n'), ((7033, 7043), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7041, 7043), True, 'import matplotlib.pyplot as plt\n'), ((7119, 7130), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7128, 7130), True, 'import matplotlib.pyplot as plt\n'), ((7247, 7259), 'numpy.ones', 'np.ones', (['[K]'], {}), '([K])\n', (7254, 7259), True, 'import numpy as np\n'), ((5167, 5217), 'numpy.reshape', 'np.reshape', (['real_batch[0]', 'real_batch[0].shape[:2]'], {}), '(real_batch[0], real_batch[0].shape[:2])\n', (5177, 5217), True, 'import numpy as np\n'), ((5357, 5383), 'numpy.abs', 'np.abs', (['(last_value - coef_)'], {}), '(last_value - coef_)\n', (5363, 5383), True, 'import numpy as np\n')] |
import math
import cv2
import numpy as np
import tensorflow as tf
from scipy import ndimage
import os
from MNIST_data import input_data
class Recognizer:
def __init__(self):
pass
@staticmethod
def shift(img, sx, sy):
rows, cols = img.shape
M = np.float32([[1, 0, sx], [0, 1, sy]])
shifted = cv2.warpAffine(img, M, (cols, rows))
return shifted
@staticmethod
def getBestShift(img):
cy, cx = ndimage.measurements.center_of_mass(img)
rows, cols = img.shape
shiftx = np.round(cols / 2.0 - cx).astype(int)
shifty = np.round(rows / 2.0 - cy).astype(int)
return shiftx, shifty
def TrainRecognizer(self):
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
self.x = tf.placeholder("float", [None, 784])
# we need our weights for our neural net
W = tf.Variable(tf.zeros([784, 10]))
# and the biases
b = tf.Variable(tf.zeros([10]))
self.y = tf.nn.softmax(tf.matmul(self.x, W) + b)
self.y_ = tf.placeholder("float", [None, 10])
cross_entropy = -tf.reduce_sum(self.y_ * tf.log(self.y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.initialize_all_variables()
self.sess = tf.Session()
self.sess.run(init)
# use 1000 batches with a size of 100 each to train our network
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
# run the train_step function with the given image values (x) and the real output (y_)
self.sess.run(train_step, feed_dict={self.x: batch_xs, self.y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# print('Probability: ' + str(self.sess.run(self.accuracy,
# feed_dict={self.x: mnist.test.images, self.y_: mnist.test.labels})))
def TestRecognizer(self, directory, images_list):
# create an array where we can store our 4 pictures
images = np.zeros((len(images_list), 784))
# and the correct values
correct_vals = np.zeros((len(images_list), 10))
index = 0
# we want to test our images which you saw at the top of this page
for no in images_list:
# read the image
gray = cv2.imread(directory + '\\' + no, cv2.IMREAD_GRAYSCALE)
os.remove(directory + '\\' + no)
# resize the images and invert it (black background)
gray = cv2.resize(255 - gray, (28, 28))
(thresh, gray) = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
while np.sum(gray[0]) == 0:
gray = gray[1:]
while np.sum(gray[:, 0]) == 0:
gray = np.delete(gray, 0, 1)
while np.sum(gray[-1]) == 0:
gray = gray[:-1]
while np.sum(gray[:, -1]) == 0:
gray = np.delete(gray, -1, 1)
rows, cols = gray.shape
if rows > cols:
factor = 20.0 / rows
rows = 20
cols = int(round(cols * factor))
gray = cv2.resize(gray, (cols, rows))
else:
factor = 20.0 / cols
cols = 20
rows = int(round(rows * factor))
gray = cv2.resize(gray, (cols, rows))
colsPadding = (int(math.ceil((28 - cols) / 2.0)), int(math.floor((28 - cols) / 2.0)))
rowsPadding = (int(math.ceil((28 - rows) / 2.0)), int(math.floor((28 - rows) / 2.0)))
gray = np.lib.pad(gray, (rowsPadding, colsPadding), 'constant')
shiftx, shifty = self.getBestShift(gray)
shifted = self.shift(gray, shiftx, shifty)
gray = shifted
# save the processed images
# cv2.imwrite(directory + '\\' + 'changed'+no, gray)
"""
all images in the training set have an range from 0-1
and not from 0-255 so we divide our flatten images
(a one dimensional vector with our 784 pixels)
to use the same 0-1 based range
"""
flatten = gray.flatten() / 255.0
"""
we need to store the flatten image and generate
the correct_vals array
correct_val for the first digit (9) would be
[0,0,0,0,0,0,0,0,0,1]
"""
images[index] = flatten
# correct_val = np.zeros((10))
# correct_val[no] = 1
# correct_vals[index] = correct_val
index += 1
"""
the prediction will be an array with four values,
which show the predicted number
"""
prediction = tf.argmax(self.y, 1)
"""
we want to run the prediction and the accuracy function
using our generated arrays (images and correct_vals)
"""
return self.sess.run(prediction, feed_dict={self.x: images})
# print(self.sess.run(self.accuracy, feed_dict={self.x: images, self.y_: correct_vals}))
if __name__ == '__main__':
recognizer = Recognizer()
recognizer.TrainRecognizer()
print(recognizer.TestRecognizer('Images', ['5.png']))
| [
"math.floor",
"numpy.lib.pad",
"scipy.ndimage.measurements.center_of_mass",
"tensorflow.cast",
"tensorflow.log",
"os.remove",
"MNIST_data.input_data.read_data_sets",
"cv2.threshold",
"numpy.delete",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.matmul",
"numpy.round",
"tensor... | [((284, 320), 'numpy.float32', 'np.float32', (['[[1, 0, sx], [0, 1, sy]]'], {}), '([[1, 0, sx], [0, 1, sy]])\n', (294, 320), True, 'import numpy as np\n'), ((339, 375), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(cols, rows)'], {}), '(img, M, (cols, rows))\n', (353, 375), False, 'import cv2\n'), ((462, 502), 'scipy.ndimage.measurements.center_of_mass', 'ndimage.measurements.center_of_mass', (['img'], {}), '(img)\n', (497, 502), False, 'from scipy import ndimage\n'), ((722, 776), 'MNIST_data.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data/"""'], {'one_hot': '(True)'}), "('MNIST_data/', one_hot=True)\n", (747, 776), False, 'from MNIST_data import input_data\n'), ((794, 830), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 784]'], {}), "('float', [None, 784])\n", (808, 830), True, 'import tensorflow as tf\n'), ((1065, 1100), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 10]'], {}), "('float', [None, 10])\n", (1079, 1100), True, 'import tensorflow as tf\n'), ((1266, 1295), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1293, 1295), True, 'import tensorflow as tf\n'), ((1316, 1328), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1326, 1328), True, 'import tensorflow as tf\n'), ((4899, 4919), 'tensorflow.argmax', 'tf.argmax', (['self.y', '(1)'], {}), '(self.y, 1)\n', (4908, 4919), True, 'import tensorflow as tf\n'), ((904, 923), 'tensorflow.zeros', 'tf.zeros', (['[784, 10]'], {}), '([784, 10])\n', (912, 923), True, 'import tensorflow as tf\n'), ((974, 988), 'tensorflow.zeros', 'tf.zeros', (['[10]'], {}), '([10])\n', (982, 988), True, 'import tensorflow as tf\n'), ((1744, 1764), 'tensorflow.argmax', 'tf.argmax', (['self.y', '(1)'], {}), '(self.y, 1)\n', (1753, 1764), True, 'import tensorflow as tf\n'), ((1766, 1787), 'tensorflow.argmax', 'tf.argmax', (['self.y_', '(1)'], {}), '(self.y_, 1)\n', (1775, 1787), True, 'import tensorflow as tf\n'), ((1828, 1864), 'tensorflow.cast', 'tf.cast', (['correct_prediction', '"""float"""'], {}), "(correct_prediction, 'float')\n", (1835, 1864), True, 'import tensorflow as tf\n'), ((2481, 2536), 'cv2.imread', 'cv2.imread', (["(directory + '\\\\' + no)", 'cv2.IMREAD_GRAYSCALE'], {}), "(directory + '\\\\' + no, cv2.IMREAD_GRAYSCALE)\n", (2491, 2536), False, 'import cv2\n'), ((2549, 2581), 'os.remove', 'os.remove', (["(directory + '\\\\' + no)"], {}), "(directory + '\\\\' + no)\n", (2558, 2581), False, 'import os\n'), ((2666, 2698), 'cv2.resize', 'cv2.resize', (['(255 - gray)', '(28, 28)'], {}), '(255 - gray, (28, 28))\n', (2676, 2698), False, 'import cv2\n'), ((2728, 2794), 'cv2.threshold', 'cv2.threshold', (['gray', '(128)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (2741, 2794), False, 'import cv2\n'), ((3748, 3804), 'numpy.lib.pad', 'np.lib.pad', (['gray', '(rowsPadding, colsPadding)', '"""constant"""'], {}), "(gray, (rowsPadding, colsPadding), 'constant')\n", (3758, 3804), True, 'import numpy as np\n'), ((551, 576), 'numpy.round', 'np.round', (['(cols / 2.0 - cx)'], {}), '(cols / 2.0 - cx)\n', (559, 576), True, 'import numpy as np\n'), ((606, 631), 'numpy.round', 'np.round', (['(rows / 2.0 - cy)'], {}), '(rows / 2.0 - cy)\n', (614, 631), True, 'import numpy as np\n'), ((1021, 1041), 'tensorflow.matmul', 'tf.matmul', (['self.x', 'W'], {}), '(self.x, W)\n', (1030, 1041), True, 'import tensorflow as tf\n'), ((1187, 1226), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.01)'], {}), '(0.01)\n', (1220, 1226), True, 'import tensorflow as tf\n'), ((2813, 2828), 'numpy.sum', 'np.sum', (['gray[0]'], {}), '(gray[0])\n', (2819, 2828), True, 'import numpy as np\n'), ((2885, 2903), 'numpy.sum', 'np.sum', (['gray[:, 0]'], {}), '(gray[:, 0])\n', (2891, 2903), True, 'import numpy as np\n'), ((2933, 2954), 'numpy.delete', 'np.delete', (['gray', '(0)', '(1)'], {}), '(gray, 0, 1)\n', (2942, 2954), True, 'import numpy as np\n'), ((2973, 2989), 'numpy.sum', 'np.sum', (['gray[-1]'], {}), '(gray[-1])\n', (2979, 2989), True, 'import numpy as np\n'), ((3047, 3066), 'numpy.sum', 'np.sum', (['gray[:, -1]'], {}), '(gray[:, -1])\n', (3053, 3066), True, 'import numpy as np\n'), ((3096, 3118), 'numpy.delete', 'np.delete', (['gray', '(-1)', '(1)'], {}), '(gray, -1, 1)\n', (3105, 3118), True, 'import numpy as np\n'), ((3318, 3348), 'cv2.resize', 'cv2.resize', (['gray', '(cols, rows)'], {}), '(gray, (cols, rows))\n', (3328, 3348), False, 'import cv2\n'), ((3502, 3532), 'cv2.resize', 'cv2.resize', (['gray', '(cols, rows)'], {}), '(gray, (cols, rows))\n', (3512, 3532), False, 'import cv2\n'), ((1150, 1164), 'tensorflow.log', 'tf.log', (['self.y'], {}), '(self.y)\n', (1156, 1164), True, 'import tensorflow as tf\n'), ((3564, 3592), 'math.ceil', 'math.ceil', (['((28 - cols) / 2.0)'], {}), '((28 - cols) / 2.0)\n', (3573, 3592), False, 'import math\n'), ((3599, 3628), 'math.floor', 'math.floor', (['((28 - cols) / 2.0)'], {}), '((28 - cols) / 2.0)\n', (3609, 3628), False, 'import math\n'), ((3662, 3690), 'math.ceil', 'math.ceil', (['((28 - rows) / 2.0)'], {}), '((28 - rows) / 2.0)\n', (3671, 3690), False, 'import math\n'), ((3697, 3726), 'math.floor', 'math.floor', (['((28 - rows) / 2.0)'], {}), '((28 - rows) / 2.0)\n', (3707, 3726), False, 'import math\n')] |
# -*- coding: utf-8 -*-
# Copyright 2017 <NAME>
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import vtk
import numpy
import os
#from ccpi.viewer import ViewerEventManager
from ccpi.viewer.CILViewer2D import ViewerEventManager
from ccpi.viewer.CILViewer2D import SLICE_ORIENTATION_XY, SLICE_ORIENTATION_XZ, \
SLICE_ORIENTATION_YZ, CONTROL_KEY, SHIFT_KEY, ALT_KEY, SLICE_ACTOR, \
OVERLAY_ACTOR, HISTOGRAM_ACTOR, HELP_ACTOR, CURSOR_ACTOR, CROSSHAIR_ACTOR,\
LINEPLOT_ACTOR
from ccpi.viewer.utils import colormaps
class CILInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):
def __init__(self, callback):
vtk.vtkInteractorStyleTrackballCamera.__init__(self)
self._viewer = callback
self.AddObserver('MouseWheelForwardEvent', self.mouseInteraction, 1.0)
self.AddObserver('MouseWheelBackwardEvent', self.mouseInteraction, 1.0)
self.AddObserver('KeyPressEvent', self.keyPress, 1.0)
self.AddObserver('LeftButtonPressEvent', self.OnLeftMouseClick)
self.AddObserver('LeftButtonReleaseEvent', self.OnLeftMouseRelease)
#self.AddObserver('RightButtonPressEvent', self.OnRightMousePress, -0.5)
#self.AddObserver('RightButtonReleaseEvent', self.OnRightMouseRelease, -0.5)
def GetSliceOrientation(self):
return self._viewer.sliceOrientation
def GetDimensions(self):
return self._viewer.img3D.GetDimensions()
def GetActiveSlice(self):
return self._viewer.getActiveSlice()
def SetActiveSlice(self, sliceno):
self._viewer.setActiveSlice(sliceno)
def UpdatePipeline(self, resetcamera=False):
self._viewer.updatePipeline(resetcamera)
def GetSliceActorNo(self):
return self._viewer.sliceActorNo
def SetSliceOrientation(self, orientation):
self._viewer.sliceOrientation = orientation
def SetActiveCamera(self, camera):
self._viewer.ren.SetActiveCamera(camera)
def Render(self):
self._viewer.renWin.Render()
def GetKeyCode(self):
return self.GetInteractor().GetKeyCode()
def SetKeyCode(self, keycode):
self.GetInteractor().SetKeyCode(keycode)
def GetControlKey(self):
return self.GetInteractor().GetControlKey()
def GetShiftKey(self):
return self.GetInteractor().GetShiftKey()
def GetAltKey(self):
return self.GetInteractor().GetAltKey()
def GetEventPosition(self):
return self.GetInteractor().GetEventPosition()
def GetActiveCamera(self):
return self._viewer.ren.GetActiveCamera()
def SetDecimalisation(self, value):
decimate = self._viewer.decimate
decimate.SetTargetReduction(value)
if not decimate.GetInput() is None:
decimate.Update()
def SetEventActive(self, event):
self._viewer.event.On(event)
def SetEventInactive(self, event):
self._viewer.event.Off(event)
def GetViewerEvent(self, event):
return self._viewer.event.isActive(event)
def SetInitialLevel(self, level):
self._viewer.InitialLevel = level
def GetInitialLevel(self):
return self._viewer.InitialLevel
def SetInitialWindow(self, window):
self._viewer.InitialWindow = window
def GetInitialWindow(self):
return self._viewer.InitialWindow
def GetWindowLevel(self):
return self._viewer.wl
def HideActor(self, actorno, delete=False):
self._viewer.hideActor(actorno, delete)
def ShowActor(self, actorno):
self._viewer.showActor(actorno)
def mouseInteraction(self, interactor, event):
shift = interactor.GetShiftKey()
advance = 1
if shift:
advance = 10
if event == 'MouseWheelForwardEvent':
maxSlice = self._viewer.img3D.GetExtent()[self.GetSliceOrientation()*2+1]
# print (self.GetActiveSlice())
if (self.GetActiveSlice() + advance <= maxSlice):
self.SetActiveSlice(self.GetActiveSlice() + advance)
self.UpdatePipeline()
else:
minSlice = self._viewer.img3D.GetExtent()[self.GetSliceOrientation()*2]
if (self.GetActiveSlice() - advance >= minSlice):
self.SetActiveSlice(self.GetActiveSlice() - advance)
self.UpdatePipeline()
def OnLeftMouseClick(self, interactor, event):
self.SetDecimalisation(0.8)
self.OnLeftButtonDown()
def OnLeftMouseRelease(self, interactor, event):
self.SetDecimalisation(0.0)
self.OnLeftButtonUp()
def OnRightMousePress(self, interactor, event):
ctrl = interactor.GetControlKey()
alt = interactor.GetAltKey()
shift = interactor.GetShiftKey()
# print (alt, ctrl,shift)
if alt and not (ctrl and shift):
self.SetEventActive("WINDOW_LEVEL_EVENT")
if not (alt and ctrl and shift):
self.SetEventActive("ZOOM_EVENT")
def OnRightMouseRelease(self, interactor, event):
ctrl = interactor.GetControlKey()
alt = interactor.GetAltKey()
shift = interactor.GetShiftKey()
# print (alt, ctrl,shift)
if alt and not (ctrl and shift):
self.SetEventInactive("WINDOW_LEVEL_EVENT")
if not (alt and ctrl and shift):
self.SetEventInactive("ZOOM_EVENT")
def keyPress(self, interactor, event):
ctrl = interactor.GetControlKey()
shift = interactor.GetAltKey()
alt = interactor.GetShiftKey()
if interactor.GetKeyCode() == "x":
self.SetSliceOrientation( SLICE_ORIENTATION_YZ )
self.UpdatePipeline(resetcamera=True)
elif interactor.GetKeyCode() == "y":
self.SetSliceOrientation(SLICE_ORIENTATION_XZ)
self.UpdatePipeline(resetcamera=True)
elif interactor.GetKeyCode() == "z":
self.SetSliceOrientation(SLICE_ORIENTATION_XY)
self.UpdatePipeline(resetcamera=True)
elif interactor.GetKeyCode() == "a":
# reset color/window
cmin, cmax = self._viewer.ia.GetAutoRange()
# probably the level could be the median of the image within
# the percintiles
level = self._viewer.ia.GetMedian()
# accommodates all values between the level an the percentiles
window = 2*max(abs(level-cmin),abs(level-cmax))
self.SetInitialLevel( level )
self.SetInitialWindow( window )
self.GetWindowLevel().SetLevel(self.GetInitialLevel())
self.GetWindowLevel().SetWindow(self.GetInitialWindow())
self.GetWindowLevel().Update()
self.Render()
elif ctrl and not (alt and shift):
# CREATE ROI
position = interactor.GetEventPosition()
print ("3D VIEWER MOUSE POSITION", position)
elif alt and not (shift and ctrl):
# DELETE ROI
print ("DELETE ROI")
elif interactor.GetKeyCode() == "h":
self.DisplayHelp()
elif interactor.GetKeyCode() == "r":
filename = "current_render"
self.SaveRender(filename)
elif interactor.GetKeyCode() == "v":
# toggle visibility of the volume render
if not self._viewer.volume_render_initialised:
self._viewer.installVolumeRenderActorPipeline()
if self._viewer.volume.GetVisibility():
self._viewer.volume.VisibilityOff()
else:
self._viewer.volume.VisibilityOn()
self._viewer.updatePipeline()
elif interactor.GetKeyCode() == "s":
# toggle visibility of the slice
if self._viewer.sliceActor.GetVisibility():
self._viewer.sliceActor.VisibilityOff()
else:
self._viewer.sliceActor.VisibilityOn()
self._viewer.updatePipeline()
elif interactor.GetKeyCode() == "i":
# toggle interpolation of slice actor
is_interpolated = self._viewer.sliceActor.GetInterpolate()
self._viewer.sliceActor.SetInterpolate(not is_interpolated)
else:
print("Unhandled event %s" % interactor.GetKeyCode())
def DisplayHelp(self):
help_actor = self._viewer.helpActor
slice_actor = self._viewer.sliceActor
if help_actor.GetVisibility():
help_actor.VisibilityOff()
slice_actor.VisibilityOn()
self.ShowActor(1)
self.Render()
return
font_size = 24
# Create the text mappers and the associated Actor2Ds.
# The font and text properties (except justification) are the same for
# each multi line mapper. Let's create a common text property object
multiLineTextProp = vtk.vtkTextProperty()
multiLineTextProp.SetFontSize(font_size)
multiLineTextProp.SetFontFamilyToArial()
multiLineTextProp.BoldOn()
multiLineTextProp.ItalicOn()
multiLineTextProp.ShadowOn()
multiLineTextProp.SetLineSpacing(1.3)
# The text is on multiple lines and center-justified (both horizontal and
# vertical).
textMapperC = vtk.vtkTextMapper()
textMapperC.SetInput("Mouse Interactions:\n"
"\n"
" - Slice: Mouse Scroll\n"
" - Zoom: Right Mouse + Move Up/Down\n"
" - Pan: Middle Mouse Button + Move or Shift + Left Mouse + Move\n"
" - Adjust Camera: Left Mouse + Move\n"
" - Rotate: Ctrl + Left Mouse + Move\n"
"\n"
"Keyboard Interactions:\n"
"\n"
" - YZ Plane: x\n"
" - XZ Plane: y\n"
" - XY Plane: z\n"
" - Save render to current_render.png: r\n"
" - Toggle visibility of volume render: v\n"
" - Toggle visibility of slice: s\n"
" - Whole image Auto Window/Level: a\n"
)
tprop = textMapperC.GetTextProperty()
tprop.ShallowCopy(multiLineTextProp)
tprop.SetJustificationToLeft()
tprop.SetVerticalJustificationToCentered()
tprop.SetColor(0, 1, 0)
help_actor.SetMapper(textMapperC)
help_actor.VisibilityOn()
slice_actor.VisibilityOff()
self.HideActor(1)
self.Render()
def SaveRender(self, filename):
self._viewer.saveRender(filename)
class CILViewer():
'''Simple 3D Viewer based on VTK classes'''
def __init__(self, dimx=600,dimy=600, renWin=None, iren=None, ren=None, debug=False):
'''creates the rendering pipeline'''
# Handle arguments
if renWin is not None:
self.renWin = renWin
else:
self.renWin = vtk.vtkRenderWindow()
if iren is not None:
self.iren = iren
else:
self.iren = vtk.vtkRenderWindowInteractor()
# create a rendering window and renderer
if ren is not None:
self.ren = ren
else:
self.ren = vtk.vtkRenderer()
self.renWin.SetSize(dimx,dimy)
self.renWin.AddRenderer(self.ren)
# img 3D as slice
self.img3D = None
self.slicenos = [0,0,0]
self.sliceOrientation = SLICE_ORIENTATION_XY
self.sliceActor = vtk.vtkImageActor()
self.voi = vtk.vtkExtractVOI()
self.wl = vtk.vtkImageMapToWindowLevelColors()
self.ia = vtk.vtkImageHistogramStatistics()
self.sliceActorNo = 0
# Viewer Event manager
self.event = ViewerEventManager()
# create a renderwindowinteractor
self.style = CILInteractorStyle(self)
self.iren.SetInteractorStyle(self.style)
self.iren.SetRenderWindow(self.renWin)
# Render decimation
self.decimate = vtk.vtkDecimatePro()
self.ren.SetBackground(.1, .2, .4)
self.actors = {}
# Help text
self.helpActor = vtk.vtkActor2D()
self.helpActor.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
self.helpActor.GetPositionCoordinate().SetValue(0.1, 0.5)
self.helpActor.VisibilityOff()
self.ren.AddActor(self.helpActor)
# volume render
volumeMapper = vtk.vtkSmartVolumeMapper()
#volumeMapper = vtk.vtkFixedPointVolumeRayCastMapper()
self.volume_mapper = volumeMapper
volumeProperty = vtk.vtkVolumeProperty()
self.volume_property = volumeProperty
# The volume holds the mapper and the property and
# can be used to position/orient the volume.
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
self.volume = volume
self.volume_render_initialised = False
# axis orientation widget
om = vtk.vtkAxesActor()
ori = vtk.vtkOrientationMarkerWidget()
ori.SetOutlineColor( 0.9300, 0.5700, 0.1300 )
ori.SetInteractor(self.iren)
ori.SetOrientationMarker(om)
ori.SetViewport( 0.0, 0.0, 0.4, 0.4 )
ori.SetEnabled(1)
ori.InteractiveOff()
self.orientation_marker = ori
self.iren.Initialize()
def getRenderer(self):
'''returns the renderer'''
return self.ren
def GetSliceOrientation(self):
return self.sliceOrientation
def getActiveSlice(self):
return self.slicenos[self.GetSliceOrientation()]
def setActiveSlice(self, sliceno):
self.slicenos[self.GetSliceOrientation()] = sliceno
def getRenderWindow(self):
'''returns the render window'''
return self.renWin
def getInteractor(self):
'''returns the render window interactor'''
return self.iren
def getCamera(self):
'''returns the active camera'''
return self.ren.GetActiveCamera()
def getColourWindow(self):
return self.wl.GetWindow()
def getColourLevel(self):
return self.wl.GetLevel()
def createPolyDataActor(self, polydata):
'''returns an actor for a given polydata'''
self.decimate.SetInputData(polydata)
self.decimate.SetTargetReduction(0.0)
self.decimate.Update()
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputConnection(self.decimate.GetOutputPort())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
#actor.GetProperty().SetOpacity(0.8)
return actor
def setPolyDataActor(self, actor):
'''displays the given polydata'''
self.hideActor(1,delete=True)
self.ren.AddActor(actor)
self.actors[len(self.actors)+1] = [actor, True]
self.iren.Initialize()
self.renWin.Render()
def displayPolyData(self, polydata):
self.setPolyDataActor(self.createPolyDataActor(polydata))
def hideActor(self, actorno, delete=False):
'''Hides an actor identified by its number in the list of actors'''
try:
if self.actors[actorno][1]:
self.ren.RemoveActor(self.actors[actorno][0])
self.actors[actorno][1] = False
if delete:
self.actors = {}
self.renWin.Render()
except KeyError as ke:
print ("Warning Actor not present")
def showActor(self, actorno, actor = None):
'''Shows hidden actor identified by its number in the list of actors'''
try:
if not self.actors[actorno][1]:
self.ren.AddActor(self.actors[actorno][0])
self.actors[actorno][1] = True
return actorno
except KeyError as ke:
# adds it to the actors if not there already
if actor != None:
self.ren.AddActor(actor)
self.actors[len(self.actors)+1] = [actor, True]
return len(self.actors)
def addActor(self, actor):
'''Adds an actor to the render'''
return self.showActor(0, actor)
def startRenderLoop(self):
self.iren.Start()
def setInput3DData(self, imageData):
self.img3D = imageData
self.installPipeline()
def setInputData(self, imageData):
'''alias of setInput3DData'''
return self.setInput3DData(imageData)
def setInputAsNumpy(self, numpyarray):
if (len(numpy.shape(numpyarray)) == 3):
doubleImg = vtk.vtkImageData()
shape = numpy.shape(numpyarray)
doubleImg.SetDimensions(shape[0], shape[1], shape[2])
doubleImg.SetOrigin(0,0,0)
doubleImg.SetSpacing(1,1,1)
doubleImg.SetExtent(0, shape[0]-1, 0, shape[1]-1, 0, shape[2]-1)
doubleImg.AllocateScalars(vtk.VTK_DOUBLE,1)
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
doubleImg.SetScalarComponentFromDouble(
i,j,k,0, numpyarray[i][j][k])
# rescale to appropriate VTK_UNSIGNED_SHORT
stats = vtk.vtkImageAccumulate()
stats.SetInputData(doubleImg)
stats.Update()
iMin = stats.GetMin()[0]
iMax = stats.GetMax()[0]
scale = vtk.VTK_UNSIGNED_SHORT_MAX / (iMax - iMin)
shiftScaler = vtk.vtkImageShiftScale ()
shiftScaler.SetInputData(doubleImg)
shiftScaler.SetScale(scale)
shiftScaler.SetShift(iMin)
shiftScaler.SetOutputScalarType(vtk.VTK_UNSIGNED_SHORT)
shiftScaler.Update()
self.img3D = shiftScaler.GetOutput()
def installPipeline(self):
# Reset the viewer when loading a new data source
try:
N = self.ren.GetActors().GetNumberOfItems()
i = 0
while i < N:
actor = self.ren.GetActors().GetNextActor()
self.ren.RemoveActor(actor)
i += 1
except TypeError as te:
print (te)
print (self.ren.GetActors())
self.installSliceActorPipeline()
# self.installVolumeRenderActorPipeline()
self.ren.ResetCamera()
self.ren.Render()
self.adjustCamera()
self.iren.Initialize()
self.renWin.Render()
def installVolumeRenderActorPipeline(self):
self.volume_mapper.SetInputData(self.img3D)
ia = vtk.vtkImageHistogramStatistics()
ia.SetInputData(self.img3D)
ia.SetAutoRangePercentiles(90.,99.)
ia.Update()
cmin, cmax = ia.GetAutoRange()
print ("viewer: cmin cmax", cmin, cmax)
# cmin, cmax = (1000,2000)
# probably the level could be the median of the image within
# the percentiles
median = ia.GetMedian()
# accomodates all values between the level an the percentiles
#window = 2*max(abs(median-cmin),abs(median-cmax))
window = cmax - cmin
viridis = colormaps.CILColorMaps.get_color_transfer_function('viridis', (cmin,cmax))
x = numpy.linspace(ia.GetMinimum(), ia.GetMaximum(), num=255)
scaling = 0.1
opacity = colormaps.CILColorMaps.get_opacity_transfer_function(x,
colormaps.relu, cmin, cmax, scaling)
self.volume_property.SetColor(viridis)
self.volume_property.SetScalarOpacity(opacity)
self.volume_property.ShadeOn()
self.volume_property.SetInterpolationTypeToLinear()
self.ren.AddVolume(self.volume)
self.volume_colormap_limits = (cmin, cmax)
self.volume_render_initialised = True
self.volume.VisibilityOff()
def installSliceActorPipeline(self):
self.voi.SetInputData(self.img3D)
extent = [ i for i in self.img3D.GetExtent()]
for i in range(len(self.slicenos)):
self.slicenos[i] = round((extent[i * 2+1] + extent[i * 2])/2)
extent[self.sliceOrientation * 2] = self.getActiveSlice()
extent[self.sliceOrientation * 2 + 1] = self.getActiveSlice()
self.voi.SetVOI(extent[0], extent[1],
extent[2], extent[3],
extent[4], extent[5])
self.voi.Update()
self.ia.SetInputData(self.voi.GetOutput())
self.ia.SetAutoRangePercentiles(1.0, 99.)
self.ia.Update()
cmin, cmax = self.ia.GetAutoRange()
# probably the level could be the median of the image within
# the percentiles
level = self.ia.GetMedian()
# accomodates all values between the level an the percentiles
window = 2 * max(abs(level - cmin), abs(level - cmax))
self.InitialLevel = level
self.InitialWindow = window
self.wl.SetLevel(self.InitialLevel)
self.wl.SetWindow(self.InitialWindow)
self.wl.SetInputData(self.voi.GetOutput())
self.wl.Update()
self.sliceActor.SetInputData(self.wl.GetOutput())
self.sliceActor.SetDisplayExtent(extent[0], extent[1],
extent[2], extent[3],
extent[4], extent[5])
self.sliceActor.GetProperty().SetOpacity(0.99)
self.sliceActor.Update()
self.sliceActor.SetInterpolate(False)
self.ren.AddActor(self.sliceActor)
def updatePipeline(self, resetcamera = False):
self.hideActor(self.sliceActorNo)
extent = [i for i in self.img3D.GetExtent()]
extent[self.sliceOrientation * 2] = self.getActiveSlice()
extent[self.sliceOrientation * 2 + 1] = self.getActiveSlice()
self.voi.SetVOI(extent[0], extent[1],
extent[2], extent[3],
extent[4], extent[5])
self.voi.Update()
self.ia.Update()
self.wl.Update()
# Set image actor
self.sliceActor.SetInputData(self.wl.GetOutput())
self.sliceActor.SetDisplayExtent(extent[0], extent[1],
extent[2], extent[3],
extent[4], extent[5])
self.sliceActor.GetProperty().SetOpacity(0.99)
self.sliceActor.Update()
no = self.showActor(self.sliceActorNo, self.sliceActor)
self.sliceActorNo = no
self.updateVolumePipeline()
self.adjustCamera(resetcamera)
self.renWin.Render()
def updateVolumePipeline(self):
if self.volume_render_initialised and self.volume.GetVisibility():
cmin , cmax = self.volume_colormap_limits
viridis = colormaps.CILColorMaps.get_color_transfer_function('viridis', (cmin,cmax))
x = numpy.linspace(self.ia.GetMinimum(), self.ia.GetMaximum(), num=255)
scaling = 0.1
opacity = colormaps.CILColorMaps.get_opacity_transfer_function(x,
colormaps.relu, cmin, cmax, scaling)
self.volume_property.SetColor(viridis)
self.volume_property.SetScalarOpacity(opacity)
def setVolumeColorLevelWindow(self, cmin, cmax):
self.volume_colormap_limits = (cmin, cmax)
self.updatePipeline()
def adjustCamera(self, resetcamera= False):
self.ren.ResetCameraClippingRange()
if resetcamera:
self.ren.ResetCamera()
# Set interpolation on
def setInterpolateOn(self):
self.sliceActor.SetInterpolate(True)
self.renWin.Render()
# Set interpolation off
def setInterpolateOff(self):
self.sliceActor.SetInterpolate(False)
self.renWin.Render()
def setColourWindowLevel(self, window, level):
self.wl.SetWindow(window)
self.wl.SetLevel(level)
self.wl.Update()
self.sliceActor.SetInputData(self.wl.GetOutput())
self.sliceActor.Update()
self.ren.Render()
self.renWin.Render()
def saveRender(self, filename, renWin=None):
'''Save the render window to PNG file'''
# screenshot code:
w2if = vtk.vtkWindowToImageFilter()
if renWin == None:
renWin = self.renWin
w2if.SetInput(renWin)
w2if.Update()
# Check if user has supplied an extension
extn = os.path.splitext(filename)[1]
if extn.lower() == '.png':
saveFilename = filename
else:
saveFilename = filename+'.png'
writer = vtk.vtkPNGWriter()
writer.SetFileName(saveFilename)
writer.SetInputConnection(w2if.GetOutputPort())
writer.Write()
| [
"ccpi.viewer.CILViewer2D.ViewerEventManager",
"vtk.vtkDecimatePro",
"vtk.vtkImageMapToWindowLevelColors",
"vtk.vtkTextMapper",
"vtk.vtkTextProperty",
"vtk.vtkPNGWriter",
"ccpi.viewer.utils.colormaps.CILColorMaps.get_color_transfer_function",
"vtk.vtkImageActor",
"vtk.vtkInteractorStyleTrackballCamer... | [((1180, 1232), 'vtk.vtkInteractorStyleTrackballCamera.__init__', 'vtk.vtkInteractorStyleTrackballCamera.__init__', (['self'], {}), '(self)\n', (1226, 1232), False, 'import vtk\n'), ((9408, 9429), 'vtk.vtkTextProperty', 'vtk.vtkTextProperty', ([], {}), '()\n', (9427, 9429), False, 'import vtk\n'), ((9809, 9828), 'vtk.vtkTextMapper', 'vtk.vtkTextMapper', ([], {}), '()\n', (9826, 9828), False, 'import vtk\n'), ((12227, 12246), 'vtk.vtkImageActor', 'vtk.vtkImageActor', ([], {}), '()\n', (12244, 12246), False, 'import vtk\n'), ((12266, 12285), 'vtk.vtkExtractVOI', 'vtk.vtkExtractVOI', ([], {}), '()\n', (12283, 12285), False, 'import vtk\n'), ((12304, 12340), 'vtk.vtkImageMapToWindowLevelColors', 'vtk.vtkImageMapToWindowLevelColors', ([], {}), '()\n', (12338, 12340), False, 'import vtk\n'), ((12359, 12392), 'vtk.vtkImageHistogramStatistics', 'vtk.vtkImageHistogramStatistics', ([], {}), '()\n', (12390, 12392), False, 'import vtk\n'), ((12476, 12496), 'ccpi.viewer.CILViewer2D.ViewerEventManager', 'ViewerEventManager', ([], {}), '()\n', (12494, 12496), False, 'from ccpi.viewer.CILViewer2D import ViewerEventManager\n'), ((12735, 12755), 'vtk.vtkDecimatePro', 'vtk.vtkDecimatePro', ([], {}), '()\n', (12753, 12755), False, 'import vtk\n'), ((12872, 12888), 'vtk.vtkActor2D', 'vtk.vtkActor2D', ([], {}), '()\n', (12886, 12888), False, 'import vtk\n'), ((13173, 13199), 'vtk.vtkSmartVolumeMapper', 'vtk.vtkSmartVolumeMapper', ([], {}), '()\n', (13197, 13199), False, 'import vtk\n'), ((13331, 13354), 'vtk.vtkVolumeProperty', 'vtk.vtkVolumeProperty', ([], {}), '()\n', (13352, 13354), False, 'import vtk\n'), ((13540, 13555), 'vtk.vtkVolume', 'vtk.vtkVolume', ([], {}), '()\n', (13553, 13555), False, 'import vtk\n'), ((13770, 13788), 'vtk.vtkAxesActor', 'vtk.vtkAxesActor', ([], {}), '()\n', (13786, 13788), False, 'import vtk\n'), ((13803, 13835), 'vtk.vtkOrientationMarkerWidget', 'vtk.vtkOrientationMarkerWidget', ([], {}), '()\n', (13833, 13835), False, 'import vtk\n'), ((15167, 15190), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (15188, 15190), False, 'import vtk\n'), ((15383, 15397), 'vtk.vtkActor', 'vtk.vtkActor', ([], {}), '()\n', (15395, 15397), False, 'import vtk\n'), ((19489, 19522), 'vtk.vtkImageHistogramStatistics', 'vtk.vtkImageHistogramStatistics', ([], {}), '()\n', (19520, 19522), False, 'import vtk\n'), ((20058, 20133), 'ccpi.viewer.utils.colormaps.CILColorMaps.get_color_transfer_function', 'colormaps.CILColorMaps.get_color_transfer_function', (['"""viridis"""', '(cmin, cmax)'], {}), "('viridis', (cmin, cmax))\n", (20108, 20133), False, 'from ccpi.viewer.utils import colormaps\n'), ((20244, 20340), 'ccpi.viewer.utils.colormaps.CILColorMaps.get_opacity_transfer_function', 'colormaps.CILColorMaps.get_opacity_transfer_function', (['x', 'colormaps.relu', 'cmin', 'cmax', 'scaling'], {}), '(x, colormaps.relu,\n cmin, cmax, scaling)\n', (20296, 20340), False, 'from ccpi.viewer.utils import colormaps\n'), ((25045, 25073), 'vtk.vtkWindowToImageFilter', 'vtk.vtkWindowToImageFilter', ([], {}), '()\n', (25071, 25073), False, 'import vtk\n'), ((25432, 25450), 'vtk.vtkPNGWriter', 'vtk.vtkPNGWriter', ([], {}), '()\n', (25448, 25450), False, 'import vtk\n'), ((11662, 11683), 'vtk.vtkRenderWindow', 'vtk.vtkRenderWindow', ([], {}), '()\n', (11681, 11683), False, 'import vtk\n'), ((11781, 11812), 'vtk.vtkRenderWindowInteractor', 'vtk.vtkRenderWindowInteractor', ([], {}), '()\n', (11810, 11812), False, 'import vtk\n'), ((11964, 11981), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ([], {}), '()\n', (11979, 11981), False, 'import vtk\n'), ((17460, 17478), 'vtk.vtkImageData', 'vtk.vtkImageData', ([], {}), '()\n', (17476, 17478), False, 'import vtk\n'), ((17499, 17522), 'numpy.shape', 'numpy.shape', (['numpyarray'], {}), '(numpyarray)\n', (17510, 17522), False, 'import numpy\n'), ((18139, 18163), 'vtk.vtkImageAccumulate', 'vtk.vtkImageAccumulate', ([], {}), '()\n', (18161, 18163), False, 'import vtk\n'), ((18397, 18421), 'vtk.vtkImageShiftScale', 'vtk.vtkImageShiftScale', ([], {}), '()\n', (18419, 18421), False, 'import vtk\n'), ((23614, 23689), 'ccpi.viewer.utils.colormaps.CILColorMaps.get_color_transfer_function', 'colormaps.CILColorMaps.get_color_transfer_function', (['"""viridis"""', '(cmin, cmax)'], {}), "('viridis', (cmin, cmax))\n", (23664, 23689), False, 'from ccpi.viewer.utils import colormaps\n'), ((23822, 23918), 'ccpi.viewer.utils.colormaps.CILColorMaps.get_opacity_transfer_function', 'colormaps.CILColorMaps.get_opacity_transfer_function', (['x', 'colormaps.relu', 'cmin', 'cmax', 'scaling'], {}), '(x, colormaps.relu,\n cmin, cmax, scaling)\n', (23874, 23918), False, 'from ccpi.viewer.utils import colormaps\n'), ((25252, 25278), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (25268, 25278), False, 'import os\n'), ((17404, 17427), 'numpy.shape', 'numpy.shape', (['numpyarray'], {}), '(numpyarray)\n', (17415, 17427), False, 'import numpy\n')] |
from setuptools import setup, Extension
import numpy as np
from Cython.Build import cythonize
from Cython.Distutils import build_ext
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
# extensions
ext_args = dict(
include_dirs=[numpy_include],
language='c++',
)
ext_modules = [
Extension(
"nms_cpu",
sources=["src/nms_cpu.cpp"],
**ext_args
),
Extension(
"soft_nms_cpu",
sources=["src/soft_nms_cpu.pyx"],
**ext_args
),
]
setup(
name='nms',
ext_modules=cythonize(ext_modules),
# inject our custom trigger
cmdclass={'build_ext': BuildExtension},
) | [
"setuptools.Extension",
"Cython.Build.cythonize",
"numpy.get_numpy_include",
"numpy.get_include"
] | [((307, 323), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (321, 323), True, 'import numpy as np\n'), ((498, 559), 'setuptools.Extension', 'Extension', (['"""nms_cpu"""'], {'sources': "['src/nms_cpu.cpp']"}), "('nms_cpu', sources=['src/nms_cpu.cpp'], **ext_args)\n", (507, 559), False, 'from setuptools import setup, Extension\n'), ((595, 666), 'setuptools.Extension', 'Extension', (['"""soft_nms_cpu"""'], {'sources': "['src/soft_nms_cpu.pyx']"}), "('soft_nms_cpu', sources=['src/soft_nms_cpu.pyx'], **ext_args)\n", (604, 666), False, 'from setuptools import setup, Extension\n'), ((367, 389), 'numpy.get_numpy_include', 'np.get_numpy_include', ([], {}), '()\n', (387, 389), True, 'import numpy as np\n'), ((744, 766), 'Cython.Build.cythonize', 'cythonize', (['ext_modules'], {}), '(ext_modules)\n', (753, 766), False, 'from Cython.Build import cythonize\n')] |
import numpy as np
def rotX(theta):
return np.array([[1, 0, 0]
, [0, np.cos(theta), -np.sin(theta)]
, [0, np.sin(theta), np.cos(theta)]])
def rotY(theta):
return np.array([[np.cos(theta), 0, np.sin(theta)]
, [0, 1, 0]
, [-np.sin(theta), 0, np.cos(theta)]])
def rotZ(theta):
return np.array([[np.cos(theta), -np.sin(theta), 0]
, [np.sin(theta), np.cos(theta), 0]
, [0, 0, 1]])
def euler_matrix(x, y, z):
return rotX(x).dot(rotY(y)).dot(rotZ(z))
def vector_slerp(v1, v2, fraction):
perp_v = np.cross(v1, v2)
# perp_v /= np.linalg.norm(perp_v)
angle = np.arccos(np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))) * fraction
return rotation_matrix(angle, perp_v).dot(v1)
def unit_vector(v):
return v/np.linalg.norm(v)
def rotation_matrix(angle, direction):
sina = np.sin(angle)
cosa = np.cos(angle)
direction = unit_vector(direction)
# rotation matrix around unit vector
R = np.diag([cosa, cosa, cosa])
R += np.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += np.array([[ 0.0, -direction[2], direction[1]],
[ direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
return R | [
"numpy.cross",
"numpy.diag",
"numpy.array",
"numpy.dot",
"numpy.outer",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin"
] | [((630, 646), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (638, 646), True, 'import numpy as np\n'), ((928, 941), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (934, 941), True, 'import numpy as np\n'), ((953, 966), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (959, 966), True, 'import numpy as np\n'), ((1055, 1082), 'numpy.diag', 'np.diag', (['[cosa, cosa, cosa]'], {}), '([cosa, cosa, cosa])\n', (1062, 1082), True, 'import numpy as np\n'), ((1169, 1292), 'numpy.array', 'np.array', (['[[0.0, -direction[2], direction[1]], [direction[2], 0.0, -direction[0]], [-\n direction[1], direction[0], 0.0]]'], {}), '([[0.0, -direction[2], direction[1]], [direction[2], 0.0, -\n direction[0]], [-direction[1], direction[0], 0.0]])\n', (1177, 1292), True, 'import numpy as np\n'), ((859, 876), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (873, 876), True, 'import numpy as np\n'), ((1092, 1122), 'numpy.outer', 'np.outer', (['direction', 'direction'], {}), '(direction, direction)\n', (1100, 1122), True, 'import numpy as np\n'), ((93, 106), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (99, 106), True, 'import numpy as np\n'), ((149, 162), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (155, 162), True, 'import numpy as np\n'), ((164, 177), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (170, 177), True, 'import numpy as np\n'), ((221, 234), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (227, 234), True, 'import numpy as np\n'), ((239, 252), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (245, 252), True, 'import numpy as np\n'), ((326, 339), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (332, 339), True, 'import numpy as np\n'), ((382, 395), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (388, 395), True, 'import numpy as np\n'), ((438, 451), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (444, 451), True, 'import numpy as np\n'), ((453, 466), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (459, 466), True, 'import numpy as np\n'), ((708, 722), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (714, 722), True, 'import numpy as np\n'), ((109, 122), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (115, 122), True, 'import numpy as np\n'), ((308, 321), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (314, 321), True, 'import numpy as np\n'), ((398, 411), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (404, 411), True, 'import numpy as np\n'), ((723, 741), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (737, 741), True, 'import numpy as np\n'), ((742, 760), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (756, 760), True, 'import numpy as np\n')] |
#<NAME>
from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
# Data set
# Datos de 13/02/2020 a 10/11/2020
# De acuerdo con: https://tablerocovid.mspas.gob.gt/
# Regiones de acuerdo con: https://aprende.guatemala.com/historia/geografia/regiones-de-guatemala/
region_guatemala = ['Region2 Norte', 'Region2 Norte', 'Region 5 Central', 'Region3 Nor-Oriente', 'Region 8 Peten',
'Region3 Nor-Oriente', 'Region 7 Nor-Occidente', 'Region 5 Central', 'Region1 Metropolitana',
'Region 7 Nor-Occidente', 'Region3 Nor-Oriente', 'Region 4 Sur-Oriente', 'Region 4 Sur-Oriente',
'Region 6 Sur-Occidente', 'Region 6 Sur-Occidente', 'Region 5 Central', 'Region 6 Sur-Occidente',
'Region 4 Sur-Oriente', 'Region 6 Sur-Occidente', 'Region 6 Sur-Occidente', 'Region 6 Sur-Occidente',
'Region3 Nor-Oriente']
# departamento = ['Alta Verapaz', 'Baja Verapaz', 'Chimaltenango', 'Chiquimula', 'Petén', 'El Progreso', 'Quiché',
# 'Escuintla', 'Guatemala', 'Huehuetenango', 'Izabal', 'Jalapa', 'Jutiapa', 'Quetzaltenango', 'Retalhuleu',
# 'Sacatepequez', 'San Marcos', 'Santa Rosa', 'Sololá', 'Suchitepéquez', 'Totonicapán', 'Zacapa']
no_fallecidos = [54, 34, 102, 30, 82, 40, 37, 257, 2000, 57, 139, 13, 34, 259, 56, 150, 136, 30, 43, 112, 84, 59]
# Para cambiar letras por numeros
label = preprocessing.LabelEncoder()
# Convertir strings en numeros
region_guatemala2 = label.fit_transform(region_guatemala)
x = np.array([
[region_guatemala2[0],no_fallecidos[0]],[region_guatemala2[1],no_fallecidos[1]],[region_guatemala2[2],no_fallecidos[2]],
[region_guatemala2[3],no_fallecidos[3]],[region_guatemala2[4],no_fallecidos[4]],[region_guatemala2[5],no_fallecidos[5]],
[region_guatemala2[6],no_fallecidos[6]],[region_guatemala2[7],no_fallecidos[7]],[region_guatemala2[8],no_fallecidos[8]],
[region_guatemala2[9],no_fallecidos[9]],[region_guatemala2[10],no_fallecidos[10]],[region_guatemala2[11],no_fallecidos[11]],
[region_guatemala2[12],no_fallecidos[12]],[region_guatemala2[13],no_fallecidos[13]],[region_guatemala2[14],no_fallecidos[14]],
[region_guatemala2[15],no_fallecidos[15]],[region_guatemala2[16],no_fallecidos[16]],[region_guatemala2[17],no_fallecidos[17]],
[region_guatemala2[18],no_fallecidos[18]],[region_guatemala2[19],no_fallecidos[19]],[region_guatemala2[20],no_fallecidos[20]],
[region_guatemala2[21],no_fallecidos[21]],
])
kmeans = KMeans(n_clusters=3)
kmeans.fit(x)
print("Clusters: Fallecidos de acuerdo a regiones de Guatemala - Covid 19\n ",kmeans.cluster_centers_)
plt.scatter(x[:,0], x[:,1], c=kmeans.labels_, cmap='rainbow')
plt.title("Fallecidos de acuerdo a regiones de Guatemala\n- Covid 19 - Datos de 13/02/2020 a 10/11/2020")
plt.xlabel('Regiones')
plt.ylabel('Total fallecidos')
plt.show()
| [
"sklearn.cluster.KMeans",
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((1468, 1496), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (1494, 1496), False, 'from sklearn import preprocessing\n'), ((1591, 2612), 'numpy.array', 'np.array', (['[[region_guatemala2[0], no_fallecidos[0]], [region_guatemala2[1],\n no_fallecidos[1]], [region_guatemala2[2], no_fallecidos[2]], [\n region_guatemala2[3], no_fallecidos[3]], [region_guatemala2[4],\n no_fallecidos[4]], [region_guatemala2[5], no_fallecidos[5]], [\n region_guatemala2[6], no_fallecidos[6]], [region_guatemala2[7],\n no_fallecidos[7]], [region_guatemala2[8], no_fallecidos[8]], [\n region_guatemala2[9], no_fallecidos[9]], [region_guatemala2[10],\n no_fallecidos[10]], [region_guatemala2[11], no_fallecidos[11]], [\n region_guatemala2[12], no_fallecidos[12]], [region_guatemala2[13],\n no_fallecidos[13]], [region_guatemala2[14], no_fallecidos[14]], [\n region_guatemala2[15], no_fallecidos[15]], [region_guatemala2[16],\n no_fallecidos[16]], [region_guatemala2[17], no_fallecidos[17]], [\n region_guatemala2[18], no_fallecidos[18]], [region_guatemala2[19],\n no_fallecidos[19]], [region_guatemala2[20], no_fallecidos[20]], [\n region_guatemala2[21], no_fallecidos[21]]]'], {}), '([[region_guatemala2[0], no_fallecidos[0]], [region_guatemala2[1],\n no_fallecidos[1]], [region_guatemala2[2], no_fallecidos[2]], [\n region_guatemala2[3], no_fallecidos[3]], [region_guatemala2[4],\n no_fallecidos[4]], [region_guatemala2[5], no_fallecidos[5]], [\n region_guatemala2[6], no_fallecidos[6]], [region_guatemala2[7],\n no_fallecidos[7]], [region_guatemala2[8], no_fallecidos[8]], [\n region_guatemala2[9], no_fallecidos[9]], [region_guatemala2[10],\n no_fallecidos[10]], [region_guatemala2[11], no_fallecidos[11]], [\n region_guatemala2[12], no_fallecidos[12]], [region_guatemala2[13],\n no_fallecidos[13]], [region_guatemala2[14], no_fallecidos[14]], [\n region_guatemala2[15], no_fallecidos[15]], [region_guatemala2[16],\n no_fallecidos[16]], [region_guatemala2[17], no_fallecidos[17]], [\n region_guatemala2[18], no_fallecidos[18]], [region_guatemala2[19],\n no_fallecidos[19]], [region_guatemala2[20], no_fallecidos[20]], [\n region_guatemala2[21], no_fallecidos[21]]])\n', (1599, 2612), True, 'import numpy as np\n'), ((2558, 2578), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(3)'}), '(n_clusters=3)\n', (2564, 2578), False, 'from sklearn.cluster import KMeans\n'), ((2696, 2759), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[:, 0]', 'x[:, 1]'], {'c': 'kmeans.labels_', 'cmap': '"""rainbow"""'}), "(x[:, 0], x[:, 1], c=kmeans.labels_, cmap='rainbow')\n", (2707, 2759), True, 'import matplotlib.pyplot as plt\n'), ((2758, 2876), 'matplotlib.pyplot.title', 'plt.title', (['"""Fallecidos de acuerdo a regiones de Guatemala\n- Covid 19 - Datos de 13/02/2020 a 10/11/2020"""'], {}), '(\n """Fallecidos de acuerdo a regiones de Guatemala\n- Covid 19 - Datos de 13/02/2020 a 10/11/2020"""\n )\n', (2767, 2876), True, 'import matplotlib.pyplot as plt\n'), ((2864, 2886), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Regiones"""'], {}), "('Regiones')\n", (2874, 2886), True, 'import matplotlib.pyplot as plt\n'), ((2887, 2917), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total fallecidos"""'], {}), "('Total fallecidos')\n", (2897, 2917), True, 'import matplotlib.pyplot as plt\n'), ((2918, 2928), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2926, 2928), True, 'import matplotlib.pyplot as plt\n')] |
from imutils import contours
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
from Tkinter import Frame, Tk, BOTH, Text, Menu, END
import tkFileDialog
import os.path
import os
import sys
from time import gmtime, strftime
def PT(F):
Time = strftime("%Y-%m-%d %H%M%S", gmtime())
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", default='Endoscope Test Video.mp4',
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=10,
help="max buffer size")
ap.add_argument("-f", "--filename", default='20% 1 CREEP 1min.avi',
help="filename to process")
args = vars(ap.parse_args())
root = Tk()
root.withdraw()
#file = tkFileDialog.askopenfilename()
Run = 1
Setup = 1
Create = 1
camera = cv2.VideoCapture(F)
pts = deque(maxlen=args["buffer"])
counter = 0
FirstInitial = 0
SecondInitial = 0
FirstPoint = 0
SecondPoint = 0
(d1, d2) = (0, 0)
Difference = 0
Delta = 0
PixelToMetric = 0
DotRadius = 4 #4mm dot diameter
MeasuredRadius = 0
RadiusMeasure = 1
TotalPixels = 0
MaxProportion = 0
MinProportion = 0
hul=0
huh=179
sal=0
sah=255
val=0
vah=255
#determine video file name
head, tail = os.path.split(F)
print ("File Selected is: " + tail)
#fourcc = cv2.cv.CV_FOURCC(*'XVID')
#outCROP = cv2.VideoWriter(F + '_OUTPUT.avi',fourcc, 24.0, (175,150))
if os.path.isfile("SliderVals.txt"):
f=open("SliderVals.txt",'r')
hul=int(f.readline())
huh=int(f.readline())
sal=int(f.readline())
sah=int(f.readline())
val=int(f.readline())
vah=int(f.readline())
f.close()
def nothing(x):
pass
while True:
if os.path.isfile(F):
if (Setup == 1):
#cv2.namedWindow('setupimage')
#cv2.namedWindow('frame')
if (Create ==1):
(grabbed, frame) = camera.read()
# Crop Frame to remove side irregularities
#Endoscope Resolution = 640x480
frame = frame[100:275 , 250:400]
#easy assigments
#hh='Hue High'
#hl='Hue Low'
#sh='Saturation High'
#sl='Saturation Low'
#vh='Value High'
#vl='Value Low'
#cv2.createTrackbar(hl, 'setupimage',hul,179,nothing)
#cv2.createTrackbar(hh, 'setupimage',huh,179,nothing)
#cv2.createTrackbar(sl, 'setupimage',sal,255,nothing)
#cv2.createTrackbar(sh, 'setupimage',sah,255,nothing)
#cv2.createTrackbar(vl, 'setupimage',val,255,nothing)
#cv2.createTrackbar(vh, 'setupimage',vah,255,nothing)
Create = 0
#print("Press Esc when trackbars are configured")
#frame=imutils.resize(frame, width=600)
hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#print ("Total Pixels = " + str(TotalPixels))
#read trackbar positions for all
#hul=cv2.getTrackbarPos(hl, 'setupimage')
#huh=cv2.getTrackbarPos(hh, 'setupimage')
#sal=cv2.getTrackbarPos(sl, 'setupimage')
#sah=cv2.getTrackbarPos(sh, 'setupimage')
#val=cv2.getTrackbarPos(vl, 'setupimage')
#vah=cv2.getTrackbarPos(vh, 'setupimage')
#make array for final values
HSVLOW=np.array([hul,sal,val])
HSVHIGH=np.array([huh,sah,vah])
#apply the range on a mask
mask = cv2.inRange(hsv,HSVLOW, HSVHIGH)
res = cv2.bitwise_and(frame,frame, mask =mask)
#Find Total Number of Pixels in the Cropped Video
TotalPixels = 123200
#cv2.imshow('frame', res)
k=cv2.waitKey(10) & 0xFF
Setup = 0
cv2.destroyWindow('setupimage')
#f = open("SliderVals.txt", "w")
#f.write(str(hul) + '\n' )
#f.write(str(huh) + '\n' )
#f.write(str(sal) + '\n' )
#f.write(str(sah) + '\n' )
#f.write(str(val) + '\n' )
#f.write(str(vah) + '\n' )
#f.close()
pass
else:
print('Invalid File Name')
break
if (Setup == 0):
(grabbed, frame) = camera.read()
# if no frame,then end of the video
if args.get("video") and not grabbed:
print('No Video')
#f = open(tail + ".txt", "a")
f = open("Combined.txt", "a")
f.write('\n' + '\n' + "Maximum Proportion = " + str(MaxProportion) + '\t' +
"Minimum Proportion = " + str(MinProportion) + '\n')
f.close()
break
# Crop Frame to remove side irregularities
#Endoscope Resolution = 640x480
frame = frame[100:320 , 40:600]
#frame=imutils.resize(frame, width=600)
##blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv,HSVLOW, HSVHIGH)
#####mask = cv2.erode(mask, None, iterations=2)
######mask = cv2.dilate(mask, None, iterations=2)
WhiteVal = cv2.countNonZero(mask)
BlackVal = TotalPixels - WhiteVal
ProportionVal = ((float(BlackVal) / float(TotalPixels)) * 100)
if counter ==1:
MaxProportion = ProportionVal
MinProportion = ProportionVal
#Create text file with headers
#f = open(tail + ".txt", "a")
f = open("Combined.txt", "a")
f.write(tail + '\n' + Time + '\n' + "Black Pixel Initial Total" + '\t' +
str(BlackVal) + '\n'+ "Initial Proportion Percentage" + '\t' +
str(ProportionVal) + '\n'+ "Total Pixels = " + str(TotalPixels) + '\n'+
"|Frame|" + '\t' + "|BlackPixelVal|" + '\t' + "|Proportion|" + '\n')
f.close()
if counter >=2:
if ProportionVal > MaxProportion:
MaxProportion = ProportionVal
if ProportionVal < MinProportion:
MinProportion = ProportionVal
#Open new text file with new timestamp
#f = open(tail + ".txt", "a")
f = open("Combined.txt", "a")
f.write(str(counter) + '\t' + str(BlackVal) + '\t' + str(ProportionVal) + '\n')
f.close()
##res = cv2.bitwise_and(frame,frame, mask =mask)
#outFULL.write(mask)
#outFULL.release
#Find Size of Cropped Frame
#width, height = frame.shape[:2]
#print "height " + str(height)
#print "width " + str(width)
#outCROP.write(mask)
#outCROP.release
#cv2.imshow('frame', mask)
counter += 1
k=cv2.waitKey(10) & 0xFF
if k == 27:
#f = open(tail + ".txt", "a")
f = open("Combined.txt", "a")
f.write('\n' + '\n' + "Maximum Proportion = " + str(MaxProportion) + '\t' +
+ "Minimum Proportion = " + str(MinProportion) + '\n')
f.close()
break
for file in os.listdir("."):
if file.endswith(".avi"):
PT(file)
#root.destroy()
camera.release()
out.release
cv2.destroyAllWindows() | [
"os.listdir",
"collections.deque",
"cv2.countNonZero",
"argparse.ArgumentParser",
"cv2.inRange",
"Tkinter.Tk",
"cv2.destroyWindow",
"cv2.bitwise_and",
"os.path.split",
"os.path.isfile",
"numpy.array",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"time.gmt... | [((9238, 9253), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (9248, 9253), False, 'import os\n'), ((9371, 9394), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9392, 9394), False, 'import cv2\n'), ((420, 445), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (443, 445), False, 'import argparse\n'), ((819, 823), 'Tkinter.Tk', 'Tk', ([], {}), '()\n', (821, 823), False, 'from Tkinter import Frame, Tk, BOTH, Text, Menu, END\n'), ((953, 972), 'cv2.VideoCapture', 'cv2.VideoCapture', (['F'], {}), '(F)\n', (969, 972), False, 'import cv2\n'), ((984, 1012), 'collections.deque', 'deque', ([], {'maxlen': "args['buffer']"}), "(maxlen=args['buffer'])\n", (989, 1012), False, 'from collections import deque\n'), ((1500, 1516), 'os.path.split', 'os.path.split', (['F'], {}), '(F)\n', (1513, 1516), False, 'import os\n'), ((1700, 1732), 'os.path.isfile', 'os.path.isfile', (['"""SliderVals.txt"""'], {}), "('SliderVals.txt')\n", (1714, 1732), False, 'import os\n'), ((334, 342), 'time.gmtime', 'gmtime', ([], {}), '()\n', (340, 342), False, 'from time import gmtime, strftime\n'), ((2067, 2084), 'os.path.isfile', 'os.path.isfile', (['F'], {}), '(F)\n', (2081, 2084), False, 'import os\n'), ((6321, 6359), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (6333, 6359), False, 'import cv2\n'), ((6402, 6435), 'cv2.inRange', 'cv2.inRange', (['hsv', 'HSVLOW', 'HSVHIGH'], {}), '(hsv, HSVLOW, HSVHIGH)\n', (6413, 6435), False, 'import cv2\n'), ((6592, 6614), 'cv2.countNonZero', 'cv2.countNonZero', (['mask'], {}), '(mask)\n', (6608, 6614), False, 'import cv2\n'), ((3592, 3630), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (3604, 3630), False, 'import cv2\n'), ((4258, 4283), 'numpy.array', 'np.array', (['[hul, sal, val]'], {}), '([hul, sal, val])\n', (4266, 4283), True, 'import numpy as np\n'), ((4307, 4332), 'numpy.array', 'np.array', (['[huh, sah, vah]'], {}), '([huh, sah, vah])\n', (4315, 4332), True, 'import numpy as np\n'), ((4413, 4446), 'cv2.inRange', 'cv2.inRange', (['hsv', 'HSVLOW', 'HSVHIGH'], {}), '(hsv, HSVLOW, HSVHIGH)\n', (4424, 4446), False, 'import cv2\n'), ((4469, 4509), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (4484, 4509), False, 'import cv2\n'), ((4824, 4855), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""setupimage"""'], {}), "('setupimage')\n", (4841, 4855), False, 'import cv2\n'), ((8787, 8802), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (8798, 8802), False, 'import cv2\n'), ((4739, 4754), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (4750, 4754), False, 'import cv2\n')] |
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import numpy as np
def dntrack(df: pd.DataFrame,
rho: (list,str) = None,
ntr: (list,str) = None,
lims: list = None,
lime: bool = False,
dtick: bool =False,
fill: bool =True,
fontsize: int=8,
grid_numbers : list = [11,51],
steps: list = None,
correlation: pd.DataFrame = None,
rho_kw:dict={},
ntr_kw:dict={},
corr_kw:dict={},
ax=None,
rho_colormap:str='hot',
ntr_colormap:str='winter',
depth_ref:str='md'
):
"""dntrack [summary]
Parameters
----------
df : pd.DataFrame
[description]
rho : [type], optional
[description], by default None
ntr : [type], optional
[description], by default None
lims : list, optional
[description], by default None
lime : bool, optional
[description], by default False
dtick : bool, optional
[description], by default False
fill : bool, optional
[description], by default True
fontsize : int, optional
[description], by default 8
grid_numbers : list, optional
[description], by default [11,51]
steps : list, optional
[description], by default None
correlation : pd.DataFrame, optional
[description], by default None
rho_kw : dict, optional
[description], by default {}
ntr_kw : dict, optional
[description], by default {}
corr_kw : dict, optional
[description], by default {}
ax : [type], optional
[description], by default None
rho_colormap : str, optional
[description], by default 'hot'
ntr_colormap : str, optional
[description], by default 'winter'
depth_ref : str, optional
[description], by default 'md'
"""
assert isinstance(df,pd.DataFrame)
assert depth_ref in ['md','tvd','tvdss'], "depth_ref can only be one of ['md','tvd','tvdss']"
#Set Axes
dax=ax or plt.gca()
nax=dax.twiny()
# Default kwargs for rho and ntr lines
def_rho_kw = {
'color': 'darkred',
'linestyle':'-',
'linewidth': 2
}
for (k,v) in def_rho_kw.items():
if k not in rho_kw:
rho_kw[k]=v
def_ntr_kw = {
'color': 'darkblue',
'linestyle':'-',
'linewidth': 1
}
for (k,v) in def_ntr_kw.items():
if k not in ntr_kw:
ntr_kw[k]=v
def_corr_kw = {
'color': 'red',
'linestyle':'--',
'linewidth': 2
}
for (k,v) in def_corr_kw.items():
if k not in corr_kw:
corr_kw[k]=v
#Set type of sync between Neutron GammaRay
if lime==True:
d=2.71
else:
d=2.65
m=(d-1.9)/(0-0.45)
b=-m*0.45+1.9
rholim=-0.15*m+b
#Set the vertical grid spacing
if steps is None:
mayor_grid = np.linspace(lims[0],lims[1],grid_numbers[0])
minor_grid = np.linspace(lims[0],lims[1],grid_numbers[1])
else:
mayor_grid = np.arange(lims[0],lims[1],steps[0])
minor_grid = np.arange(lims[0],lims[1],steps[1])
depth = df.index if depth_ref=='md' else df[depth_ref]
#Set Density Axes
if rho is not None:
if isinstance(rho,str):
dax.plot(df[rho],depth,**rho_kw) #Plotting
elif isinstance(rho,list):
cmap = mpl.cm.get_cmap(rho_colormap,len(rho))
for i,r in enumerate(rho):
rho_kw['color']=cmap(i)
dax.plot(df[r],depth,**rho_kw)
#Set the gridding and ticks
dax.set_xlabel("Density [g/cc]")
dax.set_xticks(np.linspace(1.9,rholim,4))
dax.set_xlim([1.9,rholim])
dax.tick_params("both",labelsize=fontsize)
dax.grid(True,linewidth=1.0)
dax.grid(True,which='minor', linewidth=0.5)
dax.set_yticks(minor_grid,minor=True)
dax.set_yticks(mayor_grid)
if dtick==True:
dax.set_yticklabels(mayor_grid)
else:
dax.set_yticklabels([])
#Set neutron axes
if ntr is not None:
if isinstance(ntr,str):
nax.plot(df[ntr],depth,**ntr_kw) #Plotting
elif isinstance(ntr,list):
cmap = mpl.cm.get_cmap(ntr_colormap,len(ntr))
for i,r in enumerate(ntr):
ntr_kw['color']=cmap(i)
nax.plot(df[r],depth,**ntr_kw)
nax.set_xlabel("Neutron [v/v]")
nax.set_xticks(np.linspace(0.45,-0.15,4))
nax.set_xlim([0.45,-0.15])
nax.tick_params("both",labelsize=fontsize)
nax.set_yticks(minor_grid,minor=True)
nax.set_yticks(mayor_grid)
if dtick==True:
nax.set_yticklabels(mayor_grid)
else:
nax.set_yticklabels([])
if lims==None: #Depth Limits
lims=[depth.min(),depth.max()]
dax.set_ylim([lims[1],lims[0]])
#Convert the Neutron values to Density Values in order to fill the cross Density-Neutron
#When the track is callibrated for sandstone use m=-1.666667 and b=2.65
#When the track is callibrated for limestone use m=-1.8 and b=2.71
if (ntr is not None) & (rho is not None):
NtrTorho=df[ntr]*m+b
ntrrho=NtrTorho.values.ravel()
if fill==True:
dax.fill_betweenx(depth,df[rho],ntrrho,where=(df[rho] < ntrrho),color="red")
#Add Correlation Line
if correlation is not None:
cor_ann = corr_kw.pop('ann',False)
cor_ann_fontsize = corr_kw.pop('fontsize',8)
for i in correlation.iterrows():
if depth_ref == 'tvdss':
if i[1]['depth'] >= lims[0] or i[1]['depth'] <= lims[1]:
continue
else:
if i[1]['depth'] < lims[0] or i[1]['depth'] > lims[1]:
continue
dax.hlines(i[1]['depth'],0,rholim, **corr_kw)
if cor_ann:
try:
dax.annotate(f"{i[1]['depth']} - {i[1]['comment']} ",xy=(rholim-0.3,i[1]['depth']-1),
xycoords='data',horizontalalignment='right',bbox={'boxstyle':'roundtooth', 'fc':'0.8'},
fontsize = cor_ann_fontsize)
except:
dax.annotate(f"{i[1]['depth']}",xy=(rholim-3,i[1]['depth']-1),
xycoords='data',horizontalalignment='right',
bbox={'boxstyle':'roundtooth', 'fc':'0.8'},
fontsize = cor_ann_fontsize) | [
"matplotlib.pyplot.gca",
"numpy.linspace",
"numpy.arange"
] | [((2137, 2146), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2144, 2146), True, 'import matplotlib.pyplot as plt\n'), ((3020, 3066), 'numpy.linspace', 'np.linspace', (['lims[0]', 'lims[1]', 'grid_numbers[0]'], {}), '(lims[0], lims[1], grid_numbers[0])\n', (3031, 3066), True, 'import numpy as np\n'), ((3086, 3132), 'numpy.linspace', 'np.linspace', (['lims[0]', 'lims[1]', 'grid_numbers[1]'], {}), '(lims[0], lims[1], grid_numbers[1])\n', (3097, 3132), True, 'import numpy as np\n'), ((3162, 3199), 'numpy.arange', 'np.arange', (['lims[0]', 'lims[1]', 'steps[0]'], {}), '(lims[0], lims[1], steps[0])\n', (3171, 3199), True, 'import numpy as np\n'), ((3219, 3256), 'numpy.arange', 'np.arange', (['lims[0]', 'lims[1]', 'steps[1]'], {}), '(lims[0], lims[1], steps[1])\n', (3228, 3256), True, 'import numpy as np\n'), ((3774, 3801), 'numpy.linspace', 'np.linspace', (['(1.9)', 'rholim', '(4)'], {}), '(1.9, rholim, 4)\n', (3785, 3801), True, 'import numpy as np\n'), ((4603, 4630), 'numpy.linspace', 'np.linspace', (['(0.45)', '(-0.15)', '(4)'], {}), '(0.45, -0.15, 4)\n', (4614, 4630), True, 'import numpy as np\n')] |
import numpy
import pylab
import random
n = 100000
x = numpy.zeros(n)
y = numpy.zeros(n)
for i in range(1, n):
ran = random.randint(1, 4)
if ran == 1: # rechts
x[i] = x[i - 1] + 1
y[i] = y[i - 1]
elif ran == 2:
x[i] = x[i - 1] - 1
y[i] = y[i - 1]
elif ran == 3:
x[i] = x[i - 1]
y[i] = y[i - 1] + 1
else:
x[i] = x[i - 1]
y[i] = y[i - 1] - 1
pylab.title("2D Random Walker")
pylab.plot(x, y)
pylab.show()
| [
"pylab.title",
"pylab.plot",
"numpy.zeros",
"random.randint",
"pylab.show"
] | [((56, 70), 'numpy.zeros', 'numpy.zeros', (['n'], {}), '(n)\n', (67, 70), False, 'import numpy\n'), ((75, 89), 'numpy.zeros', 'numpy.zeros', (['n'], {}), '(n)\n', (86, 89), False, 'import numpy\n'), ((364, 395), 'pylab.title', 'pylab.title', (['"""2D Random Walker"""'], {}), "('2D Random Walker')\n", (375, 395), False, 'import pylab\n'), ((396, 412), 'pylab.plot', 'pylab.plot', (['x', 'y'], {}), '(x, y)\n', (406, 412), False, 'import pylab\n'), ((413, 425), 'pylab.show', 'pylab.show', ([], {}), '()\n', (423, 425), False, 'import pylab\n'), ((120, 140), 'random.randint', 'random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (134, 140), False, 'import random\n')] |
import numpy as np
import dynet as dy
from xnmt.modelparts.transforms import Linear
from xnmt.persistence import serializable_init, Serializable, Ref
from xnmt.events import register_xnmt_handler, handle_xnmt_event
from xnmt.param_initializers import LeCunUniformInitializer
from xnmt.param_collections import ParamManager
MIN_VALUE = -10000
class TimeDistributed(object):
def __call__(self, input):
(model_dim, seq_len), batch_size = input.dim()
total_words = seq_len * batch_size
return dy.reshape(input, (model_dim,), batch_size=total_words)
class ReverseTimeDistributed(object):
def __call__(self, input, seq_len, batch_size):
(model_dim,), total_words = input.dim()
assert (seq_len * batch_size == total_words)
return dy.reshape(input, (model_dim, seq_len), batch_size=batch_size)
class LinearSent(object):
def __init__(self, dy_model, input_dim, output_dim):
self.L = Linear(input_dim, output_dim, dy_model, param_init=LeCunUniformInitializer(), bias_init=LeCunUniformInitializer())
def __call__(self, input_expr, reconstruct_shape=True, timedistributed=False):
if not timedistributed:
input = TimeDistributed()(input_expr)
else:
input = input_expr
output = self.L(input)
if not reconstruct_shape:
return output
(_, seq_len), batch_size = input_expr.dim()
return ReverseTimeDistributed()(output, seq_len, batch_size)
class LinearNoBiasSent(object):
def __init__(self, dy_model, input_dim, output_dim):
self.L = Linear(input_dim, output_dim, dy_model, bias=False, param_init=LeCunUniformInitializer(), bias_init=LeCunUniformInitializer())
self.output_dim = output_dim
def __call__(self, input_expr):
(_, seq_len), batch_size = input_expr.dim()
output = self.L(input_expr)
if seq_len == 1: # This is helpful when sequence length is 1, especially during decoding
output = ReverseTimeDistributed()(output, seq_len, batch_size)
return output
class LayerNorm(object):
def __init__(self, dy_model, d_hid):
self.p_g = dy_model.add_parameters(dim=d_hid)
self.p_b = dy_model.add_parameters(dim=d_hid)
def __call__(self, input_expr):
g = dy.parameter(self.p_g)
b = dy.parameter(self.p_b)
(_, seq_len), batch_size = input_expr.dim()
input = TimeDistributed()(input_expr)
output = dy.layer_norm(input, g, b)
return ReverseTimeDistributed()(output, seq_len, batch_size)
class MultiHeadAttention(object):
""" Multi Head Attention Layer for Sentence Blocks
"""
def __init__(self, dy_model, n_units, h=1, attn_dropout=False):
self.W_Q = LinearNoBiasSent(dy_model, n_units, n_units)
self.W_K = LinearNoBiasSent(dy_model, n_units, n_units)
self.W_V = LinearNoBiasSent(dy_model, n_units, n_units)
self.finishing_linear_layer = LinearNoBiasSent(dy_model, n_units, n_units)
self.h = h
self.scale_score = 1. / (n_units / h) ** 0.5
self.attn_dropout = attn_dropout
def split_rows(self, X, h):
(n_rows, _), batch = X.dim()
l = range(n_rows)
steps = n_rows // h
output = []
for i in range(0, n_rows, steps):
output.append(dy.pickrange(X, i, i + steps))
return output
def split_batch(self, X, h):
(n_rows, _), batch = X.dim()
l = range(batch)
steps = batch // h
output = []
for i in range(0, batch, steps):
indexes = l[i:i + steps]
output.append(dy.pick_batch_elems(X, indexes))
return output
def set_dropout(self, dropout):
self.dropout = dropout
def __call__(self, x, z=None, mask=None):
h = self.h
if z is None:
Q = self.W_Q(x)
K = self.W_K(x)
V = self.W_V(x)
else:
Q = self.W_Q(x)
K = self.W_K(z)
V = self.W_V(z)
(n_units, n_querys), batch = Q.dim()
(_, n_keys), _ = K.dim()
batch_Q = dy.concatenate_to_batch(self.split_rows(Q, h))
batch_K = dy.concatenate_to_batch(self.split_rows(K, h))
batch_V = dy.concatenate_to_batch(self.split_rows(V, h))
assert(batch_Q.dim() == (n_units // h, n_querys), batch * h)
assert(batch_K.dim() == (n_units // h, n_keys), batch * h)
assert(batch_V.dim() == (n_units // h, n_keys), batch * h)
mask = np.concatenate([mask] * h, axis=0)
mask = np.moveaxis(mask, [1, 0, 2], [0, 2, 1])
mask = dy.inputTensor(mask, batched=True)
batch_A = (dy.transpose(batch_Q) * batch_K) * self.scale_score
batch_A = dy.cmult(batch_A, mask) + (1 - mask)*MIN_VALUE
sent_len = batch_A.dim()[0][0]
if sent_len == 1:
batch_A = dy.softmax(batch_A)
else:
batch_A = dy.softmax(batch_A, d=1)
batch_A = dy.cmult(batch_A, mask)
assert (batch_A.dim() == ((n_querys, n_keys), batch * h))
if self.attn_dropout:
if self.dropout != 0.0:
batch_A = dy.dropout(batch_A, self.dropout)
batch_C = dy.transpose(batch_A * dy.transpose(batch_V))
assert (batch_C.dim() == ((n_units // h, n_querys), batch * h))
C = dy.concatenate(self.split_batch(batch_C, h), d=0)
assert (C.dim() == ((n_units, n_querys), batch))
C = self.finishing_linear_layer(C)
return C
class FeedForwardLayerSent(object):
def __init__(self, dy_model, n_units):
n_inner_units = n_units * 4
self.W_1 = LinearSent(dy_model, n_units, n_inner_units)
self.W_2 = LinearSent(dy_model, n_inner_units, n_units)
self.act = dy.rectify
def __call__(self, e):
e = self.W_1(e, reconstruct_shape=False, timedistributed=True)
e = self.act(e)
e = self.W_2(e, reconstruct_shape=False, timedistributed=True)
return e
class EncoderLayer(object):
def __init__(self, dy_model, n_units, h=1, attn_dropout=False, layer_norm=False):
self.self_attention = MultiHeadAttention(dy_model, n_units, h, attn_dropout=attn_dropout)
self.feed_forward = FeedForwardLayerSent(dy_model, n_units)
self.layer_norm = layer_norm
if self.layer_norm:
self.ln_1 = LayerNorm(dy_model, n_units)
self.ln_2 = LayerNorm(dy_model, n_units)
def set_dropout(self, dropout):
self.dropout = dropout
def __call__(self, e, xx_mask):
self.self_attention.set_dropout(self.dropout)
sub = self.self_attention(e, mask=xx_mask)
if self.dropout != 0.0:
sub = dy.dropout(sub, self.dropout)
e = e + sub
if self.layer_norm:
e = self.ln_1.transform(e)
sub = self.feed_forward(e)
if self.dropout != 0.0:
sub = dy.dropout(sub, self.dropout)
e = e + sub
if self.layer_norm:
e = self.ln_2.transform(e)
return e
class DecoderLayer(object):
def __init__(self, dy_model, n_units, h=1, attn_dropout=False, layer_norm=False):
self.self_attention = MultiHeadAttention(dy_model, n_units, h, attn_dropout=attn_dropout)
self.source_attention = MultiHeadAttention(dy_model, n_units, h, attn_dropout=attn_dropout)
self.feed_forward = FeedForwardLayerSent(dy_model, n_units)
self.layer_norm = layer_norm
if self.layer_norm:
self.ln_1 = LayerNorm(dy_model, n_units)
self.ln_2 = LayerNorm(dy_model, n_units)
self.ln_3 = LayerNorm(dy_model, n_units)
def set_dropout(self, dropout):
self.dropout = dropout
def __call__(self, e, s, xy_mask, yy_mask):
self.self_attention.set_dropout(self.dropout)
sub = self.self_attention(e, mask=yy_mask)
if self.dropout != 0.0:
sub = dy.dropout(sub, self.dropout)
e = e + sub
if self.layer_norm:
e = self.ln_1.transform(e)
self.source_attention.set_dropout(self.dropout)
sub = self.source_attention(e, s, mask=xy_mask)
if self.dropout != 0.0:
sub = dy.dropout(sub, self.dropout)
e = e + sub
if self.layer_norm:
e = self.ln_2.transform(e)
sub = self.feed_forward(e)
if self.dropout != 0.0:
sub = dy.dropout(sub, self.dropout)
e = e + sub
if self.layer_norm:
e = self.ln_3.transform(e)
return e
class TransformerEncoder(Serializable):
yaml_tag = '!TransformerEncoder'
@register_xnmt_handler
@serializable_init
def __init__(self, layers=1, input_dim=512, h=1,
dropout=0.0, attn_dropout=False, layer_norm=False, **kwargs):
dy_model = ParamManager.my_params(self)
self.layer_names = []
for i in range(1, layers + 1):
name = 'l{}'.format(i)
layer = EncoderLayer(dy_model, input_dim, h, attn_dropout, layer_norm)
self.layer_names.append((name, layer))
self.dropout_val = dropout
@handle_xnmt_event
def on_set_train(self, val):
self.set_dropout(self.dropout_val if val else 0.0)
def set_dropout(self, dropout):
self.dropout = dropout
def __call__(self, e, xx_mask):
if self.dropout != 0.0:
e = dy.dropout(e, self.dropout) # Word Embedding Dropout
for name, layer in self.layer_names:
layer.set_dropout(self.dropout)
e = layer(e, xx_mask)
return e
class TransformerDecoder(Serializable):
yaml_tag = '!TransformerDecoder'
@register_xnmt_handler
@serializable_init
def __init__(self, layers=1, input_dim=512, h=1,
dropout=0.0, attn_dropout=False, layer_norm=False,
vocab_size = None, vocab = None,
trg_reader = Ref("model.trg_reader")):
dy_model = ParamManager.my_params(self)
self.layer_names = []
for i in range(1, layers + 1):
name = 'l{}'.format(i)
layer = DecoderLayer(dy_model, input_dim, h, attn_dropout, layer_norm)
self.layer_names.append((name, layer))
self.vocab_size = self.choose_vocab_size(vocab_size, vocab, trg_reader)
self.output_affine = LinearSent(dy_model, input_dim, self.vocab_size)
self.dropout_val = dropout
def choose_vocab_size(self, vocab_size, vocab, trg_reader):
"""Choose the vocab size for the embedder basd on the passed arguments
This is done in order of priority of vocab_size, vocab, model+yaml_path
"""
if vocab_size is not None:
return vocab_size
elif vocab is not None:
return len(vocab)
elif trg_reader is None or trg_reader.vocab is None:
raise ValueError("Could not determine trg_embedder's size. Please set its vocab_size or vocab member explicitly, or specify the vocabulary of trg_reader ahead of time.")
else:
return len(trg_reader.vocab)
@handle_xnmt_event
def on_set_train(self, val):
self.set_dropout(self.dropout_val if val else 0.0)
def set_dropout(self, dropout):
self.dropout = dropout
def __call__(self, e, source, xy_mask, yy_mask):
if self.dropout != 0.0:
e = dy.dropout(e, self.dropout) # Word Embedding Dropout
for name, layer in self.layer_names:
layer.set_dropout(self.dropout)
e = layer(e, source, xy_mask, yy_mask)
return e
def output_and_loss(self, h_block, concat_t_block):
concat_logit_block = self.output_affine(h_block, reconstruct_shape=False)
bool_array = concat_t_block != 0
indexes = np.argwhere(bool_array).ravel()
concat_logit_block = dy.pick_batch_elems(concat_logit_block, indexes)
concat_t_block = concat_t_block[bool_array]
loss = dy.pickneglogsoftmax_batch(concat_logit_block, concat_t_block)
return loss
def output(self, h_block):
concat_logit_block = self.output_affine(h_block, reconstruct_shape=False, timedistributed=True)
return concat_logit_block
| [
"dynet.parameter",
"xnmt.param_initializers.LeCunUniformInitializer",
"dynet.pickneglogsoftmax_batch",
"dynet.reshape",
"dynet.softmax",
"dynet.layer_norm",
"dynet.dropout",
"dynet.pickrange",
"dynet.inputTensor",
"dynet.transpose",
"numpy.argwhere",
"numpy.concatenate",
"xnmt.param_collecti... | [((507, 562), 'dynet.reshape', 'dy.reshape', (['input', '(model_dim,)'], {'batch_size': 'total_words'}), '(input, (model_dim,), batch_size=total_words)\n', (517, 562), True, 'import dynet as dy\n'), ((757, 819), 'dynet.reshape', 'dy.reshape', (['input', '(model_dim, seq_len)'], {'batch_size': 'batch_size'}), '(input, (model_dim, seq_len), batch_size=batch_size)\n', (767, 819), True, 'import dynet as dy\n'), ((2186, 2208), 'dynet.parameter', 'dy.parameter', (['self.p_g'], {}), '(self.p_g)\n', (2198, 2208), True, 'import dynet as dy\n'), ((2217, 2239), 'dynet.parameter', 'dy.parameter', (['self.p_b'], {}), '(self.p_b)\n', (2229, 2239), True, 'import dynet as dy\n'), ((2344, 2370), 'dynet.layer_norm', 'dy.layer_norm', (['input', 'g', 'b'], {}), '(input, g, b)\n', (2357, 2370), True, 'import dynet as dy\n'), ((4195, 4229), 'numpy.concatenate', 'np.concatenate', (['([mask] * h)'], {'axis': '(0)'}), '([mask] * h, axis=0)\n', (4209, 4229), True, 'import numpy as np\n'), ((4241, 4280), 'numpy.moveaxis', 'np.moveaxis', (['mask', '[1, 0, 2]', '[0, 2, 1]'], {}), '(mask, [1, 0, 2], [0, 2, 1])\n', (4252, 4280), True, 'import numpy as np\n'), ((4292, 4326), 'dynet.inputTensor', 'dy.inputTensor', (['mask'], {'batched': '(True)'}), '(mask, batched=True)\n', (4306, 4326), True, 'import dynet as dy\n'), ((4619, 4642), 'dynet.cmult', 'dy.cmult', (['batch_A', 'mask'], {}), '(batch_A, mask)\n', (4627, 4642), True, 'import dynet as dy\n'), ((8123, 8151), 'xnmt.param_collections.ParamManager.my_params', 'ParamManager.my_params', (['self'], {}), '(self)\n', (8145, 8151), False, 'from xnmt.param_collections import ParamManager\n'), ((9131, 9154), 'xnmt.persistence.Ref', 'Ref', (['"""model.trg_reader"""'], {}), "('model.trg_reader')\n", (9134, 9154), False, 'from xnmt.persistence import serializable_init, Serializable, Ref\n'), ((9172, 9200), 'xnmt.param_collections.ParamManager.my_params', 'ParamManager.my_params', (['self'], {}), '(self)\n', (9194, 9200), False, 'from xnmt.param_collections import ParamManager\n'), ((10895, 10943), 'dynet.pick_batch_elems', 'dy.pick_batch_elems', (['concat_logit_block', 'indexes'], {}), '(concat_logit_block, indexes)\n', (10914, 10943), True, 'import dynet as dy\n'), ((11003, 11065), 'dynet.pickneglogsoftmax_batch', 'dy.pickneglogsoftmax_batch', (['concat_logit_block', 'concat_t_block'], {}), '(concat_logit_block, concat_t_block)\n', (11029, 11065), True, 'import dynet as dy\n'), ((4408, 4431), 'dynet.cmult', 'dy.cmult', (['batch_A', 'mask'], {}), '(batch_A, mask)\n', (4416, 4431), True, 'import dynet as dy\n'), ((4531, 4550), 'dynet.softmax', 'dy.softmax', (['batch_A'], {}), '(batch_A)\n', (4541, 4550), True, 'import dynet as dy\n'), ((4579, 4603), 'dynet.softmax', 'dy.softmax', (['batch_A'], {'d': '(1)'}), '(batch_A, d=1)\n', (4589, 4603), True, 'import dynet as dy\n'), ((6214, 6243), 'dynet.dropout', 'dy.dropout', (['sub', 'self.dropout'], {}), '(sub, self.dropout)\n', (6224, 6243), True, 'import dynet as dy\n'), ((6389, 6418), 'dynet.dropout', 'dy.dropout', (['sub', 'self.dropout'], {}), '(sub, self.dropout)\n', (6399, 6418), True, 'import dynet as dy\n'), ((7317, 7346), 'dynet.dropout', 'dy.dropout', (['sub', 'self.dropout'], {}), '(sub, self.dropout)\n', (7327, 7346), True, 'import dynet as dy\n'), ((7565, 7594), 'dynet.dropout', 'dy.dropout', (['sub', 'self.dropout'], {}), '(sub, self.dropout)\n', (7575, 7594), True, 'import dynet as dy\n'), ((7740, 7769), 'dynet.dropout', 'dy.dropout', (['sub', 'self.dropout'], {}), '(sub, self.dropout)\n', (7750, 7769), True, 'import dynet as dy\n'), ((8639, 8666), 'dynet.dropout', 'dy.dropout', (['e', 'self.dropout'], {}), '(e, self.dropout)\n', (8649, 8666), True, 'import dynet as dy\n'), ((10463, 10490), 'dynet.dropout', 'dy.dropout', (['e', 'self.dropout'], {}), '(e, self.dropout)\n', (10473, 10490), True, 'import dynet as dy\n'), ((967, 992), 'xnmt.param_initializers.LeCunUniformInitializer', 'LeCunUniformInitializer', ([], {}), '()\n', (990, 992), False, 'from xnmt.param_initializers import LeCunUniformInitializer\n'), ((1004, 1029), 'xnmt.param_initializers.LeCunUniformInitializer', 'LeCunUniformInitializer', ([], {}), '()\n', (1027, 1029), False, 'from xnmt.param_initializers import LeCunUniformInitializer\n'), ((1582, 1607), 'xnmt.param_initializers.LeCunUniformInitializer', 'LeCunUniformInitializer', ([], {}), '()\n', (1605, 1607), False, 'from xnmt.param_initializers import LeCunUniformInitializer\n'), ((1619, 1644), 'xnmt.param_initializers.LeCunUniformInitializer', 'LeCunUniformInitializer', ([], {}), '()\n', (1642, 1644), False, 'from xnmt.param_initializers import LeCunUniformInitializer\n'), ((3141, 3170), 'dynet.pickrange', 'dy.pickrange', (['X', 'i', '(i + steps)'], {}), '(X, i, i + steps)\n', (3153, 3170), True, 'import dynet as dy\n'), ((3403, 3434), 'dynet.pick_batch_elems', 'dy.pick_batch_elems', (['X', 'indexes'], {}), '(X, indexes)\n', (3422, 3434), True, 'import dynet as dy\n'), ((4342, 4363), 'dynet.transpose', 'dy.transpose', (['batch_Q'], {}), '(batch_Q)\n', (4354, 4363), True, 'import dynet as dy\n'), ((4780, 4813), 'dynet.dropout', 'dy.dropout', (['batch_A', 'self.dropout'], {}), '(batch_A, self.dropout)\n', (4790, 4813), True, 'import dynet as dy\n'), ((4852, 4873), 'dynet.transpose', 'dy.transpose', (['batch_V'], {}), '(batch_V)\n', (4864, 4873), True, 'import dynet as dy\n'), ((10838, 10861), 'numpy.argwhere', 'np.argwhere', (['bool_array'], {}), '(bool_array)\n', (10849, 10861), True, 'import numpy as np\n')] |
import numpy as np
from numpy import ndarray
from numba import njit, prange
__cache = True
@njit(nogil=True, parallel=True, cache=__cache)
def element_transformation_matrices(Q: ndarray, nNE: int=2):
nE = Q.shape[0]
nEVAB = nNE * 6
res = np.zeros((nE, nEVAB, nEVAB), dtype=Q.dtype)
for iE in prange(nE):
for j in prange(2*nNE):
res[iE, 3*j : 3*(j+1), 3*j : 3*(j+1)] = Q[iE]
return res
@njit(nogil=True, parallel=True, cache=__cache)
def nodal_transformation_matrices(Q: ndarray):
nE = Q.shape[0]
res = np.zeros((nE, 6, 6), dtype=Q.dtype)
for iE in prange(nE):
for j in prange(2):
res[iE, 3*j : 3*(j+1), 3*j : 3*(j+1)] = Q[iE]
return res
@njit(nogil=True, parallel=True, cache=__cache)
def transform_element_matrices_out(A: ndarray, Q: ndarray):
res = np.zeros_like(A)
for iE in prange(res.shape[0]):
res[iE] = Q[iE].T @ A[iE] @ Q[iE]
return res
@njit(nogil=True, parallel=True, cache=__cache)
def transform_element_vectors_out(A: ndarray, Q: ndarray):
"""
Transforms element load vectors from local to global.
"""
res = np.zeros_like(A)
for iE in prange(res.shape[0]):
res[iE] = Q[iE].T @ A[iE]
return res
@njit(nogil=True, parallel=True, cache=__cache)
def transform_element_vectors_out_multi(A: ndarray, Q: ndarray):
"""
Transforms multiple element load vectors from
local to global.
"""
nE, nP = A.shape[:2]
res = np.zeros_like(A)
for iE in prange(nE):
for iP in prange(nP):
res[iE, iP] = Q[iE].T @ A[iE, iP]
return res
@njit(nogil=True, parallel=True, cache=__cache)
def transform_element_vectors_in(dofsol: ndarray, Q: ndarray):
"""
Transforms element dof solutions from global to local.
"""
res = np.zeros_like(dofsol)
for i in prange(res.shape[0]):
res[i] = Q[i] @ dofsol[i]
return res
@njit(nogil=True, parallel=True, cache=__cache)
def transform_numint_forces_out(data: ndarray, dcm_G_L: ndarray):
"""
Transforms internal forces of several elements
and evaluation points from local frames
to one global frame.
Parameters
----------
data: (nE, nP, nRHS, nDOFN) numpy.ndarray
nE : number of elements
nP : number of sampling points
nRHS : number of datasets (eg. load cases)
nDOFN : number of dofs of a node
dcm_G_L : (nE, 3, 3) numpy array
Array of DCM matrices from global to local.
Returns
-------
(nE, nP, nRHS, nDOFN) numpy.ndarray
"""
nE, nP, nC = data.shape[:3]
res = np.zeros_like(data)
for iE in prange(nE):
for iP in prange(nP):
for iC in prange(nC):
res[iE, iP, iC, :] = dcm_G_L[iE].T @ data[iE, iP, iC, :]
return res
@njit(nogil=True, parallel=True, cache=__cache)
def transform_numint_forces_in(data: ndarray, dcm_G_L: ndarray):
"""
Transforms internal forces of several elements
and numerical integration points form one global frame
to several local frames.
Parameters
----------
data: (nE, nP, nRHS, nDOF) numpy.ndarray
nE : number of elements
nP : number of sampling points
nRHS : number of datasets (eg. load cases)
nDOFN : number of dofs of a node
dcm_G_L : (nE, 3, 3) numpy array
Array of DCM matrices from global to local.
Returns
-------
(nE, nP, nRHS, nDOFN) numpy.ndarray
"""
nE, nP, nC = data.shape[:3]
res = np.zeros_like(data)
for i in prange(nE):
for j in prange(nP):
for k in prange(nC):
res[i, j, k, :] = dcm_G_L[i] @ data[i, j, k, :]
return res | [
"numba.prange",
"numpy.zeros",
"numba.njit",
"numpy.zeros_like"
] | [((94, 140), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'parallel': '(True)', 'cache': '__cache'}), '(nogil=True, parallel=True, cache=__cache)\n', (98, 140), False, 'from numba import njit, prange\n'), ((430, 476), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'parallel': '(True)', 'cache': '__cache'}), '(nogil=True, parallel=True, cache=__cache)\n', (434, 476), False, 'from numba import njit, prange\n'), ((720, 766), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'parallel': '(True)', 'cache': '__cache'}), '(nogil=True, parallel=True, cache=__cache)\n', (724, 766), False, 'from numba import njit, prange\n'), ((950, 996), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'parallel': '(True)', 'cache': '__cache'}), '(nogil=True, parallel=True, cache=__cache)\n', (954, 996), False, 'from numba import njit, prange\n'), ((1245, 1291), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'parallel': '(True)', 'cache': '__cache'}), '(nogil=True, parallel=True, cache=__cache)\n', (1249, 1291), False, 'from numba import njit, prange\n'), ((1617, 1663), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'parallel': '(True)', 'cache': '__cache'}), '(nogil=True, parallel=True, cache=__cache)\n', (1621, 1663), False, 'from numba import njit, prange\n'), ((1921, 1967), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'parallel': '(True)', 'cache': '__cache'}), '(nogil=True, parallel=True, cache=__cache)\n', (1925, 1967), False, 'from numba import njit, prange\n'), ((2824, 2870), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'parallel': '(True)', 'cache': '__cache'}), '(nogil=True, parallel=True, cache=__cache)\n', (2828, 2870), False, 'from numba import njit, prange\n'), ((252, 295), 'numpy.zeros', 'np.zeros', (['(nE, nEVAB, nEVAB)'], {'dtype': 'Q.dtype'}), '((nE, nEVAB, nEVAB), dtype=Q.dtype)\n', (260, 295), True, 'import numpy as np\n'), ((310, 320), 'numba.prange', 'prange', (['nE'], {}), '(nE)\n', (316, 320), False, 'from numba import njit, prange\n'), ((554, 589), 'numpy.zeros', 'np.zeros', (['(nE, 6, 6)'], {'dtype': 'Q.dtype'}), '((nE, 6, 6), dtype=Q.dtype)\n', (562, 589), True, 'import numpy as np\n'), ((604, 614), 'numba.prange', 'prange', (['nE'], {}), '(nE)\n', (610, 614), False, 'from numba import njit, prange\n'), ((837, 853), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (850, 853), True, 'import numpy as np\n'), ((868, 888), 'numba.prange', 'prange', (['res.shape[0]'], {}), '(res.shape[0])\n', (874, 888), False, 'from numba import njit, prange\n'), ((1140, 1156), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (1153, 1156), True, 'import numpy as np\n'), ((1171, 1191), 'numba.prange', 'prange', (['res.shape[0]'], {}), '(res.shape[0])\n', (1177, 1191), False, 'from numba import njit, prange\n'), ((1480, 1496), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (1493, 1496), True, 'import numpy as np\n'), ((1511, 1521), 'numba.prange', 'prange', (['nE'], {}), '(nE)\n', (1517, 1521), False, 'from numba import njit, prange\n'), ((1812, 1833), 'numpy.zeros_like', 'np.zeros_like', (['dofsol'], {}), '(dofsol)\n', (1825, 1833), True, 'import numpy as np\n'), ((1847, 1867), 'numba.prange', 'prange', (['res.shape[0]'], {}), '(res.shape[0])\n', (1853, 1867), False, 'from numba import njit, prange\n'), ((2623, 2642), 'numpy.zeros_like', 'np.zeros_like', (['data'], {}), '(data)\n', (2636, 2642), True, 'import numpy as np\n'), ((2657, 2667), 'numba.prange', 'prange', (['nE'], {}), '(nE)\n', (2663, 2667), False, 'from numba import njit, prange\n'), ((3546, 3565), 'numpy.zeros_like', 'np.zeros_like', (['data'], {}), '(data)\n', (3559, 3565), True, 'import numpy as np\n'), ((3579, 3589), 'numba.prange', 'prange', (['nE'], {}), '(nE)\n', (3585, 3589), False, 'from numba import njit, prange\n'), ((339, 354), 'numba.prange', 'prange', (['(2 * nNE)'], {}), '(2 * nNE)\n', (345, 354), False, 'from numba import njit, prange\n'), ((633, 642), 'numba.prange', 'prange', (['(2)'], {}), '(2)\n', (639, 642), False, 'from numba import njit, prange\n'), ((1541, 1551), 'numba.prange', 'prange', (['nP'], {}), '(nP)\n', (1547, 1551), False, 'from numba import njit, prange\n'), ((2687, 2697), 'numba.prange', 'prange', (['nP'], {}), '(nP)\n', (2693, 2697), False, 'from numba import njit, prange\n'), ((3608, 3618), 'numba.prange', 'prange', (['nP'], {}), '(nP)\n', (3614, 3618), False, 'from numba import njit, prange\n'), ((2721, 2731), 'numba.prange', 'prange', (['nC'], {}), '(nC)\n', (2727, 2731), False, 'from numba import njit, prange\n'), ((3641, 3651), 'numba.prange', 'prange', (['nC'], {}), '(nC)\n', (3647, 3651), False, 'from numba import njit, prange\n')] |
from __future__ import annotations
import numpy as np
import scipy.signal
from functools import partial
from typing import Callable
try:
import simpleaudio as sa
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install simpleaudio to use this module.\n"
"pip install simpleaudio\n\n"
"For Linux see simpleaudio dependecies at:\n"
"https://simpleaudio.readthedocs.io"
"/en/latest/installation.html#linux-dependencies")
MAX_FREQ = 2000
SignalFunction = Callable[[np.ndarray], np.ndarray]
def sine(x: np.ndarray, freq: float, phase: float) -> np.ndarray:
return np.sin(x * 2 * np.pi * freq + phase)
def triangular(x: np.ndarray, freq: float, phase: float) -> np.ndarray:
return scipy.signal.sawtooth(x * 2 * np.pi * freq + phase, width=0.5)
def square(x: np.ndarray, freq: float, phase: float) -> np.ndarray:
return scipy.signal.square(x * 2 * np.pi * freq + phase, duty=0.5)
def make_signal(f: SignalFunction, duration: float, fs: int = 44100):
step = 1/fs
n = duration // step
t = np.arange(n) * step
return f(t)
class WaveGeneratorChannel:
def __init__(self, fs: int = 44100, duration: int = 60):
self._duration = duration
self._fs = fs
self._freq = 440
self._waveform = "sine"
self._phase = 0
self._amplitude = 1.0
self._enabled_output = False
self._enabled_controls = True
self._waveforms = {
"sine": sine,
"triangular": triangular,
"square": square
}
@property
def enabled(self) -> bool:
return self._enabled_output
@enabled.setter
def enabled(self, value: bool):
self._enabled_output = value
@property
def frequency(self) -> int:
return self._freq
@frequency.setter
def frequency(self, value: int):
if self._enabled_controls is False:
raise RuntimeError("Controls of WaveGenerator are blocked during loop")
if 1 <= value <= MAX_FREQ:
self._freq = int(value)
else:
raise ValueError(f"MIN: 1, MAX: {MAX_FREQ}")
@property
def phase(self) -> float:
return self._phase
@phase.setter
def phase(self, value: float):
if self._enabled_controls is False:
raise RuntimeError("Controls of WaveGenerator are blocked during loop")
self._phase = (value - np.pi) % (2 * np.pi) - np.pi
@property
def amplitude(self) -> float:
return self._amplitude
@amplitude.setter
def amplitude(self, value: float):
if self._enabled_controls is False:
raise RuntimeError("Controls of WaveGenerator are blocked during loop")
if 0.0 <= value <= 1.0:
self._amplitude = float(value)
else:
raise ValueError(f"MIN: 0.0, MAX: 1.0")
@property
def waveform(self) -> str:
return self._waveform
@waveform.setter
def waveform(self, value: str):
if self._enabled_controls is False:
raise RuntimeError("Controls of WaveGenerator are blocked during loop")
if value.lower() not in self._waveforms.keys():
raise ValueError(f"{value.lower()} is not a available waveform.\n"
f"Posible values are: {sorted(self._waveforms.keys())}")
self._waveform = value.lower()
def get_signal(self) -> np.ndarray:
fun = partial(
self._waveforms[self._waveform],
freq=self._freq,
phase=self._phase
)
signal = make_signal(fun, duration=self._duration, fs=self._fs)
signal = signal * self._amplitude * (2**15 - 1)
return signal
class WaveGenerator:
def __init__(self, fs: int = 44100, duration: int = 60):
self._duration = duration
self._fs = fs
self._channel1 = WaveGeneratorChannel(fs=fs, duration=duration)
self._channel2 = WaveGeneratorChannel(fs=fs, duration=duration)
self._wave_loop = None
@property
def duration(self) -> int:
return self._duration
@property
def channel1(self) -> WaveGeneratorChannel:
return self._channel1
@property
def channel2(self) -> WaveGeneratorChannel:
return self._channel2
def get_channels_signal(self) -> np.ndarray:
signal = np.array([
channel.get_signal()
for channel in [self._channel1, self._channel2]
if channel.enabled is True
])
return signal
def play(self) -> WaveLoop:
self._wave_loop = WaveLoop(
signal=self.get_channels_signal(),
normalize=False,
fs=self._fs
)
return self._wave_loop
class WaveLoop:
def __init__(self, signal: np.ndarray, normalize: bool = False, fs: int = 44100):
self._fs = fs
self._wave_object = self._generate(signal, normalize)
self._play_object = None
def _generate(self, signal: np.ndarray, normalize: bool = True) -> sa.WaveObject:
signal = np.squeeze(signal)
if signal.ndim == 1:
channels = [signal]
elif signal.ndim == 2:
if signal.shape[0] == 2:
channels = [signal[0, :], signal[1, :]]
else:
raise ValueError(f"Unexpected shape of signal: "
f"signal.shape={signal.shape}.\n"
f"Posible values are: \n"
f" (n) -> single channel\n"
f" (1, n) -> single channel\n"
f" (2, n) -> double channel\n")
else:
raise ValueError(f"Unexpected dimension of signal: "
f"signal.ndim={signal.ndim}.\n"
f"Posible values are: 1 or 2")
if normalize is True:
# Ensure that highest value is in 16-bit range
channels = [
ch * (2**15 - 1) / np.max(np.abs(ch))
for ch in channels
]
else:
if np.any(signal < -(2**15 - 1)) or np.any(signal > (2**15 - 1)):
raise ValueError(f"Signal type is a 16-bit signed integer. "
f"Values under -32767 or upper 32767 are not allowed")
audio = np.ascontiguousarray(np.asarray(channels).T)
audio = audio.astype(np.int16)
wave_object = sa.WaveObject(
audio_data=audio,
num_channels=len(channels),
bytes_per_sample=2,
sample_rate=self._fs
)
return wave_object
def __enter__(self) -> WaveLoop:
self._play_object = self._wave_object.play()
print("SONANDO")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._play_object.stop()
print("SONIDO DETENIDO")
| [
"numpy.abs",
"numpy.asarray",
"numpy.any",
"numpy.squeeze",
"functools.partial",
"numpy.sin",
"numpy.arange"
] | [((711, 747), 'numpy.sin', 'np.sin', (['(x * 2 * np.pi * freq + phase)'], {}), '(x * 2 * np.pi * freq + phase)\n', (717, 747), True, 'import numpy as np\n'), ((1158, 1170), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1167, 1170), True, 'import numpy as np\n'), ((3540, 3616), 'functools.partial', 'partial', (['self._waveforms[self._waveform]'], {'freq': 'self._freq', 'phase': 'self._phase'}), '(self._waveforms[self._waveform], freq=self._freq, phase=self._phase)\n', (3547, 3616), False, 'from functools import partial\n'), ((5169, 5187), 'numpy.squeeze', 'np.squeeze', (['signal'], {}), '(signal)\n', (5179, 5187), True, 'import numpy as np\n'), ((6225, 6256), 'numpy.any', 'np.any', (['(signal < -(2 ** 15 - 1))'], {}), '(signal < -(2 ** 15 - 1))\n', (6231, 6256), True, 'import numpy as np\n'), ((6258, 6286), 'numpy.any', 'np.any', (['(signal > 2 ** 15 - 1)'], {}), '(signal > 2 ** 15 - 1)\n', (6264, 6286), True, 'import numpy as np\n'), ((6491, 6511), 'numpy.asarray', 'np.asarray', (['channels'], {}), '(channels)\n', (6501, 6511), True, 'import numpy as np\n'), ((6135, 6145), 'numpy.abs', 'np.abs', (['ch'], {}), '(ch)\n', (6141, 6145), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
from functools import reduce
import unittest
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf import ao2mo
from pyscf import fci
from pyscf.tools import molden
from pyscf.grad import rhf as rhf_grad
from pyscf.grad import casscf as casscf_grad
from pyscf.grad.mp2 import _shell_prange
from pyscf.fci.addons import fix_spin_
def grad_elec(mc, mf_grad):
mf = mf_grad.base
mol = mf_grad.mol
mo_energy = mc.mo_energy
mo_coeff = mc.mo_coeff
ncore = mc.ncore
ncas = mc.ncas
nocc = ncore + ncas
nelecas = mc.nelecas
nao, nmo = mo_coeff.shape
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
casdm1, casdm2 = mc.fcisolver.make_rdm12(mc.ci, ncas, nelecas)
dm1 = numpy.zeros((nmo,nmo))
dm1[numpy.diag_indices(ncore)] = 2
dm1[ncore:nocc,ncore:nocc] = casdm1
dm2 = numpy.zeros((nmo,nmo,nmo,nmo))
for i in range(ncore):
for j in range(ncore):
dm2[i,i,j,j] += 4
dm2[i,j,j,i] -= 2
dm2[i,i,ncore:nocc,ncore:nocc] = casdm1 * 2
dm2[ncore:nocc,ncore:nocc,i,i] = casdm1 * 2
dm2[i,ncore:nocc,ncore:nocc,i] =-casdm1
dm2[ncore:nocc,i,i,ncore:nocc] =-casdm1
dm2[ncore:nocc,ncore:nocc,ncore:nocc,ncore:nocc] = casdm2
h1 = reduce(numpy.dot, (mo_coeff.T, mc._scf.get_hcore(), mo_coeff))
h2 = ao2mo.kernel(mf._eri, mo_coeff, compact=False).reshape([nmo]*4)
# Generalized Fock, according to generalized Brillouin theorm
# Adv. Chem. Phys., 69, 63
gfock = numpy.dot(h1, dm1)
gfock+= numpy.einsum('iqrs,qjsr->ij', h2, dm2)
gfock = (gfock + gfock.T) * .5
dme0 = reduce(numpy.dot, (mo_coeff[:,:nocc], gfock[:nocc,:nocc], mo_coeff[:,:nocc].T))
dm1 = reduce(numpy.dot, (mo_coeff, dm1, mo_coeff.T))
dm2 = lib.einsum('ijkl,pi,qj,rk,sl->pqrs', dm2,
mo_coeff, mo_coeff, mo_coeff, mo_coeff)
eri_deriv1 = mol.intor('int2e_ip1', comp=3).reshape(3,nao,nao,nao,nao)
atmlst = range(mol.natm)
aoslices = mol.aoslice_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
h1ao = hcore_deriv(ia)
de[k] += numpy.einsum('xij,ij->x', h1ao, dm1)
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], dme0[p0:p1]) * 2
de[k] -= numpy.einsum('xijkl,ijkl->x', eri_deriv1[:,p0:p1], dm2[p0:p1]) * 2
return de
mol = gto.Mole()
mol.atom = 'N 0 0 0; N 0 0 1.2; H 1 1 0; H 1 1 1.2'
mol.verbose = 5
mol.output = '/dev/null'
mol.symmetry = False
mol.build()
mf = scf.RHF(mol).run(conv_tol=1e-12)
def tearDownModule():
global mol, mf
mol.stdout.close()
del mol, mf
class KnownValues(unittest.TestCase):
def test_casscf_grad(self):
mc = mcscf.CASSCF(mf, 4, 4).run()
g1 = casscf_grad.Gradients(mc).kernel()
self.assertAlmostEqual(lib.fp(g1), -0.065094188906156134, 7)
g1ref = grad_elec(mc, mf.nuc_grad_method())
g1ref += rhf_grad.grad_nuc(mol)
self.assertAlmostEqual(abs(g1-g1ref).max(), 0, 9)
mcs = mc.as_scanner()
pmol = mol.copy()
e1 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2'))
e2 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2'))
self.assertAlmostEqual(g1[1,2], (e1-e2)/0.002*lib.param.BOHR, 4)
# def test_frozen(self):
# mc = mcscf.CASSCF(mf, 4, 4).set(frozen=2).run()
# gscan = mc.nuc_grad_method().as_scanner()
# g1 = gscan(mol)[1]
# self.assertAlmostEqual(lib.fp(g1), -0.065094188906156134, 9)
#
# mcs = mc.as_scanner()
# pmol = mol.copy()
# e1 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2'))
# e2 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2'))
# self.assertAlmostEqual(g1[1,2], (e1-e2)/0.002*lib.param.BOHR, 4)
def test_scanner(self):
mc = mcscf.CASSCF(mf, 4, 4)
gs = mc.nuc_grad_method().as_scanner().as_scanner()
e, g1 = gs(mol.atom, atmlst=range(4))
self.assertAlmostEqual(e, -108.39289688030243, 9)
self.assertAlmostEqual(lib.fp(g1), -0.065094188906156134, 7)
def test_state_specific_scanner(self):
mol = gto.M(atom='N 0 0 0; N 0 0 1.2', basis='631g', verbose=0)
mf = scf.RHF(mol).run(conv_tol=1e-14)
mc = mcscf.CASSCF(mf, 4, 4)
gs = mc.state_specific_(2).nuc_grad_method().as_scanner()
e, de = gs(mol)
self.assertAlmostEqual(e, -108.68788613661442, 7)
self.assertAlmostEqual(lib.fp(de), -0.10695162143777398, 5)
mcs = gs.base
pmol = mol.copy()
e1 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201'))
e2 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199'))
self.assertAlmostEqual(de[1,2], (e1-e2)/0.002*lib.param.BOHR, 5)
def test_state_average_scanner(self):
mc = mcscf.CASSCF(mf, 4, 4)
mc.conv_tol = 1e-10 # B/c high sensitivity in the numerical test
mc.fcisolver.conv_tol = 1e-10
gs = mc.state_average_([0.5, 0.5]).nuc_grad_method().as_scanner()
e_avg, de_avg = gs(mol)
e_0, de_0 = gs(mol, state=0)
e_1, de_1 = gs(mol, state=1)
mcs = gs.base
pmol = mol.copy()
mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2'))
e1_avg = mcs.e_average
e1_0 = mcs.e_states[0]
e1_1 = mcs.e_states[1]
mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2'))
e2_avg = mcs.e_average
e2_0 = mcs.e_states[0]
e2_1 = mcs.e_states[1]
self.assertAlmostEqual(e_avg, -1.083838462140703e+02, 9)
self.assertAlmostEqual(lib.fp(de_avg), -1.034340877615413e-01, 7)
self.assertAlmostEqual(e_0, -1.083902662192770e+02, 9)
self.assertAlmostEqual(lib.fp(de_0), -6.398928175384316e-02, 7)
self.assertAlmostEqual(e_1, -1.083774262088640e+02, 9)
self.assertAlmostEqual(lib.fp(de_1), -1.428890918624837e-01, 7)
self.assertAlmostEqual(de_avg[1,2], (e1_avg-e2_avg)/0.002*lib.param.BOHR, 4)
self.assertAlmostEqual(de_0[1,2], (e1_0-e2_0)/0.002*lib.param.BOHR, 4)
self.assertAlmostEqual(de_1[1,2], (e1_1-e2_1)/0.002*lib.param.BOHR, 4)
def test_state_average_mix_scanner(self):
mc = mcscf.CASSCF(mf, 4, 4)
mc.conv_tol = 1e-10 # B/c high sensitivity in the numerical test
fcisolvers = [fci.solver (mol, singlet=bool(i)) for i in range (2)]
fcisolvers[0].conv_tol = fcisolvers[1].conv_tol = 1e-10
fcisolvers[0].spin = 2
mc = mcscf.addons.state_average_mix_(mc, fcisolvers, (.5, .5))
gs = mc.nuc_grad_method().as_scanner()
e_avg, de_avg = gs(mol)
e_0, de_0 = gs(mol, state=0)
e_1, de_1 = gs(mol, state=1)
mcs = gs.base
pmol = mol.copy()
mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2'))
e1_avg = mcs.e_average
e1_0 = mcs.e_states[0]
e1_1 = mcs.e_states[1]
mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2'))
e2_avg = mcs.e_average
e2_0 = mcs.e_states[0]
e2_1 = mcs.e_states[1]
self.assertAlmostEqual(e_avg, -1.083838462141992e+02, 9)
self.assertAlmostEqual(lib.fp(de_avg), -1.034392760319145e-01, 7)
self.assertAlmostEqual(e_0, -1.083902661656155e+02, 9)
self.assertAlmostEqual(lib.fp(de_0), -6.398921123988113e-02, 7)
self.assertAlmostEqual(e_1, -1.083774262627830e+02, 9)
self.assertAlmostEqual(lib.fp(de_1), -1.428891618903179e-01, 7)
self.assertAlmostEqual(de_avg[1,2], (e1_avg-e2_avg)/0.002*lib.param.BOHR, 4)
self.assertAlmostEqual(de_0[1,2], (e1_0-e2_0)/0.002*lib.param.BOHR, 4)
self.assertAlmostEqual(de_1[1,2], (e1_1-e2_1)/0.002*lib.param.BOHR, 4)
def test_with_x2c_scanner(self):
with lib.light_speed(20.):
mc = mcscf.CASSCF(mf.x2c(), 4, 4).run()
gscan = mc.nuc_grad_method().as_scanner()
g1 = gscan(mol)[1]
self.assertAlmostEqual(lib.fp(g1), -0.07027493570511917, 7)
mcs = mcscf.CASSCF(mf, 4, 4).as_scanner().x2c()
e1 = mcs('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2')
e2 = mcs('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2')
self.assertAlmostEqual(g1[1,2], (e1-e2)/0.002*lib.param.BOHR, 5)
def test_with_qmmm_scanner(self):
from pyscf import qmmm
mol = gto.Mole()
mol.atom = ''' O 0.00000000 0.00000000 -0.11081188
H -0.00000000 -0.84695236 0.59109389
H -0.00000000 0.89830571 0.52404783 '''
mol.verbose = 0
mol.basis = '6-31g'
mol.build()
coords = [(0.5,0.6,0.1)]
#coords = [(0.0,0.0,0.0)]
charges = [-0.1]
mf = qmmm.add_mm_charges(scf.RHF(mol), coords, charges)
mc = mcscf.CASSCF(mf, 4, 4).as_scanner()
e_tot, g = mc.nuc_grad_method().as_scanner()(mol)
self.assertAlmostEqual(e_tot, -76.0461574155984, 7)
self.assertAlmostEqual(lib.fp(g), 0.042835374915102364, 6)
e1 = mc(''' O 0.00100000 0.00000000 -0.11081188
H -0.00000000 -0.84695236 0.59109389
H -0.00000000 0.89830571 0.52404783 ''')
e2 = mc(''' O -0.00100000 0.00000000 -0.11081188
H -0.00000000 -0.84695236 0.59109389
H -0.00000000 0.89830571 0.52404783 ''')
ref = (e1 - e2)/0.002 * lib.param.BOHR
self.assertAlmostEqual(g[0,0], ref, 4)
mf = scf.RHF(mol)
mc = qmmm.add_mm_charges(mcscf.CASSCF(mf, 4, 4).as_scanner(), coords, charges)
e_tot, g = mc.nuc_grad_method().as_scanner()(mol)
self.assertAlmostEqual(e_tot, -76.0461574155984, 7)
self.assertAlmostEqual(lib.fp(g), 0.042835374915102364, 6)
def test_symmetrize(self):
mol = gto.M(atom='N 0 0 0; N 0 0 1.2', basis='631g', symmetry=True, verbose=0)
g = mol.RHF.run().CASSCF(4, 4).run().Gradients().kernel()
self.assertAlmostEqual(lib.fp(g), 0.12355818572359845, 7)
if __name__ == "__main__":
print("Tests for CASSCF gradients")
unittest.main()
| [
"pyscf.gto.Mole",
"pyscf.lib.fp",
"pyscf.lib.light_speed",
"pyscf.gto.M",
"pyscf.ao2mo.kernel",
"functools.reduce",
"pyscf.grad.rhf.grad_nuc",
"numpy.diag_indices",
"pyscf.mcscf.CASSCF",
"numpy.dot",
"numpy.zeros",
"numpy.einsum",
"pyscf.grad.casscf.Gradients",
"pyscf.lib.einsum",
"unitt... | [((2513, 2523), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (2521, 2523), False, 'from pyscf import gto\n'), ((850, 873), 'numpy.zeros', 'numpy.zeros', (['(nmo, nmo)'], {}), '((nmo, nmo))\n', (861, 873), False, 'import numpy\n'), ((963, 996), 'numpy.zeros', 'numpy.zeros', (['(nmo, nmo, nmo, nmo)'], {}), '((nmo, nmo, nmo, nmo))\n', (974, 996), False, 'import numpy\n'), ((1622, 1640), 'numpy.dot', 'numpy.dot', (['h1', 'dm1'], {}), '(h1, dm1)\n', (1631, 1640), False, 'import numpy\n'), ((1653, 1691), 'numpy.einsum', 'numpy.einsum', (['"""iqrs,qjsr->ij"""', 'h2', 'dm2'], {}), "('iqrs,qjsr->ij', h2, dm2)\n", (1665, 1691), False, 'import numpy\n'), ((1738, 1825), 'functools.reduce', 'reduce', (['numpy.dot', '(mo_coeff[:, :nocc], gfock[:nocc, :nocc], mo_coeff[:, :nocc].T)'], {}), '(numpy.dot, (mo_coeff[:, :nocc], gfock[:nocc, :nocc], mo_coeff[:, :\n nocc].T))\n', (1744, 1825), False, 'from functools import reduce\n'), ((1829, 1875), 'functools.reduce', 'reduce', (['numpy.dot', '(mo_coeff, dm1, mo_coeff.T)'], {}), '(numpy.dot, (mo_coeff, dm1, mo_coeff.T))\n', (1835, 1875), False, 'from functools import reduce\n'), ((1886, 1971), 'pyscf.lib.einsum', 'lib.einsum', (['"""ijkl,pi,qj,rk,sl->pqrs"""', 'dm2', 'mo_coeff', 'mo_coeff', 'mo_coeff', 'mo_coeff'], {}), "('ijkl,pi,qj,rk,sl->pqrs', dm2, mo_coeff, mo_coeff, mo_coeff,\n mo_coeff)\n", (1896, 1971), False, 'from pyscf import lib\n'), ((10435, 10450), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10448, 10450), False, 'import unittest\n'), ((881, 906), 'numpy.diag_indices', 'numpy.diag_indices', (['ncore'], {}), '(ncore)\n', (899, 906), False, 'import numpy\n'), ((2297, 2333), 'numpy.einsum', 'numpy.einsum', (['"""xij,ij->x"""', 'h1ao', 'dm1'], {}), "('xij,ij->x', h1ao, dm1)\n", (2309, 2333), False, 'import numpy\n'), ((2655, 2667), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (2662, 2667), False, 'from pyscf import scf\n'), ((3069, 3091), 'pyscf.grad.rhf.grad_nuc', 'rhf_grad.grad_nuc', (['mol'], {}), '(mol)\n', (3086, 3091), True, 'from pyscf.grad import rhf as rhf_grad\n'), ((4001, 4023), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', '(4)', '(4)'], {}), '(mf, 4, 4)\n', (4013, 4023), False, 'from pyscf import mcscf\n'), ((4315, 4372), 'pyscf.gto.M', 'gto.M', ([], {'atom': '"""N 0 0 0; N 0 0 1.2"""', 'basis': '"""631g"""', 'verbose': '(0)'}), "(atom='N 0 0 0; N 0 0 1.2', basis='631g', verbose=0)\n", (4320, 4372), False, 'from pyscf import gto\n'), ((4432, 4454), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', '(4)', '(4)'], {}), '(mf, 4, 4)\n', (4444, 4454), False, 'from pyscf import mcscf\n'), ((4963, 4985), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', '(4)', '(4)'], {}), '(mf, 4, 4)\n', (4975, 4985), False, 'from pyscf import mcscf\n'), ((6369, 6391), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', '(4)', '(4)'], {}), '(mf, 4, 4)\n', (6381, 6391), False, 'from pyscf import mcscf\n'), ((6655, 6714), 'pyscf.mcscf.addons.state_average_mix_', 'mcscf.addons.state_average_mix_', (['mc', 'fcisolvers', '(0.5, 0.5)'], {}), '(mc, fcisolvers, (0.5, 0.5))\n', (6686, 6714), False, 'from pyscf import mcscf\n'), ((8531, 8541), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (8539, 8541), False, 'from pyscf import gto\n'), ((9826, 9838), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (9833, 9838), False, 'from pyscf import scf\n'), ((10157, 10229), 'pyscf.gto.M', 'gto.M', ([], {'atom': '"""N 0 0 0; N 0 0 1.2"""', 'basis': '"""631g"""', 'symmetry': '(True)', 'verbose': '(0)'}), "(atom='N 0 0 0; N 0 0 1.2', basis='631g', symmetry=True, verbose=0)\n", (10162, 10229), False, 'from pyscf import gto\n'), ((1456, 1502), 'pyscf.ao2mo.kernel', 'ao2mo.kernel', (['mf._eri', 'mo_coeff'], {'compact': '(False)'}), '(mf._eri, mo_coeff, compact=False)\n', (1468, 1502), False, 'from pyscf import ao2mo\n'), ((2351, 2403), 'numpy.einsum', 'numpy.einsum', (['"""xij,ij->x"""', 's1[:, p0:p1]', 'dme0[p0:p1]'], {}), "('xij,ij->x', s1[:, p0:p1], dme0[p0:p1])\n", (2363, 2403), False, 'import numpy\n'), ((2424, 2487), 'numpy.einsum', 'numpy.einsum', (['"""xijkl,ijkl->x"""', 'eri_deriv1[:, p0:p1]', 'dm2[p0:p1]'], {}), "('xijkl,ijkl->x', eri_deriv1[:, p0:p1], dm2[p0:p1])\n", (2436, 2487), False, 'import numpy\n'), ((2961, 2971), 'pyscf.lib.fp', 'lib.fp', (['g1'], {}), '(g1)\n', (2967, 2971), False, 'from pyscf import lib\n'), ((4219, 4229), 'pyscf.lib.fp', 'lib.fp', (['g1'], {}), '(g1)\n', (4225, 4229), False, 'from pyscf import lib\n'), ((4634, 4644), 'pyscf.lib.fp', 'lib.fp', (['de'], {}), '(de)\n', (4640, 4644), False, 'from pyscf import lib\n'), ((5752, 5766), 'pyscf.lib.fp', 'lib.fp', (['de_avg'], {}), '(de_avg)\n', (5758, 5766), False, 'from pyscf import lib\n'), ((5889, 5901), 'pyscf.lib.fp', 'lib.fp', (['de_0'], {}), '(de_0)\n', (5895, 5901), False, 'from pyscf import lib\n'), ((6025, 6037), 'pyscf.lib.fp', 'lib.fp', (['de_1'], {}), '(de_1)\n', (6031, 6037), False, 'from pyscf import lib\n'), ((7341, 7355), 'pyscf.lib.fp', 'lib.fp', (['de_avg'], {}), '(de_avg)\n', (7347, 7355), False, 'from pyscf import lib\n'), ((7478, 7490), 'pyscf.lib.fp', 'lib.fp', (['de_0'], {}), '(de_0)\n', (7484, 7490), False, 'from pyscf import lib\n'), ((7613, 7625), 'pyscf.lib.fp', 'lib.fp', (['de_1'], {}), '(de_1)\n', (7619, 7625), False, 'from pyscf import lib\n'), ((7948, 7969), 'pyscf.lib.light_speed', 'lib.light_speed', (['(20.0)'], {}), '(20.0)\n', (7963, 7969), False, 'from pyscf import lib\n'), ((8987, 8999), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (8994, 8999), False, 'from pyscf import scf\n'), ((9216, 9225), 'pyscf.lib.fp', 'lib.fp', (['g'], {}), '(g)\n', (9222, 9225), False, 'from pyscf import lib\n'), ((10075, 10084), 'pyscf.lib.fp', 'lib.fp', (['g'], {}), '(g)\n', (10081, 10084), False, 'from pyscf import lib\n'), ((10327, 10336), 'pyscf.lib.fp', 'lib.fp', (['g'], {}), '(g)\n', (10333, 10336), False, 'from pyscf import lib\n'), ((2853, 2875), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', '(4)', '(4)'], {}), '(mf, 4, 4)\n', (2865, 2875), False, 'from pyscf import mcscf\n'), ((2895, 2920), 'pyscf.grad.casscf.Gradients', 'casscf_grad.Gradients', (['mc'], {}), '(mc)\n', (2916, 2920), True, 'from pyscf.grad import casscf as casscf_grad\n'), ((4386, 4398), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (4393, 4398), False, 'from pyscf import scf\n'), ((8142, 8152), 'pyscf.lib.fp', 'lib.fp', (['g1'], {}), '(g1)\n', (8148, 8152), False, 'from pyscf import lib\n'), ((9031, 9053), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', '(4)', '(4)'], {}), '(mf, 4, 4)\n', (9043, 9053), False, 'from pyscf import mcscf\n'), ((9872, 9894), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', '(4)', '(4)'], {}), '(mf, 4, 4)\n', (9884, 9894), False, 'from pyscf import mcscf\n'), ((8198, 8220), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', '(4)', '(4)'], {}), '(mf, 4, 4)\n', (8210, 8220), False, 'from pyscf import mcscf\n')] |
# SM.NLMS.py
#
# Implements the Set-membership Normalized LMS algorithm for COMPLEX valued data.
# (Algorithm 6.1 - book: Adaptive Filtering: Algorithms and Practical
# Implementation, Diniz)
#
# Authors:
# . <NAME> - <EMAIL> & <EMAIL>
# . <NAME> - <EMAIL> & <EMAIL>
# . <NAME> - <EMAIL> & <EMAIL>
# . <NAME> - <EMAIL> & <EMAIL>
# . <NAME> - <EMAIL> & <EMAIL>
# . <NAME> - <EMAIL>
# Imports
import numpy as np
from time import time
def NLMS(Filter, desired_signal: np.ndarray, input_signal: np.ndarray, gamma_bar: float, gamma:float, verbose: bool = False) -> dict:
"""
Description
-----------
Implements the Set-membership Normalized LMS algorithm for COMPLEX valued data.
(Algorithm 6.1 - book: Adaptive Filtering: Algorithms and Practical Implementation, Diniz)
Syntax
------
OutputDictionary = SM.NLMS(Filter, desired_signal, input_signal, verbose)
Inputs
-------
filter : Adaptive Filter filter object
desired : Desired signal numpy array (row vector)
input : Input signal to feed filter numpy array (row vector)
gamma_bar : Upper bound for the error modulus.
gamma_barVector : Upper bound vector for the error modulus. (COLUMN vector)
gamma : Regularization factor.
memoryLength : Reuse data factor.
verbose : Verbose boolean bool
Outputs
-------
dictionary:
outputs : Store the estimated output of each iteration. numpy array (collumn vector)
errors : Store the error for each iteration. numpy array (collumn vector)
coefficients : Store the estimated coefficients for each iteration. numpy array (collumn vector)
Main Variables
---------
regressor
outputs_vector[k] represents the output errors at iteration k
FIR error vectors.
error_vector[k] represents the output errors at iteration k.
Misc Variables
--------------
tic
nIterations
Comments:
Set-membership filtering implies that the (adaptive) filter coefficients are only
updated if the magnitude of the error is greater than S.gamma_bar. In practice, we
choose S.gamma_bar as a function of the noise variance (sigma_n2). A commom choice
is S.gamma_bar = sqrt(5 * sigma_n2).
Authors
-------
. <NAME> - <EMAIL> & <EMAIL>
. <NAME> - <EMAIL> & <EMAIL>
. <NAME> - <EMAIL> & <EMAIL>
. <NAME> - <EMAIL> & <EMAIL>
. <NAME> - <EMAIL> & <EMAIL>
. <NAME> - <EMAIL>
"""
# Initialization
tic = time()
nIterations = desired_signal.size
regressor = np.zeros(Filter.filter_order+1, dtype=input_signal.dtype)
error_vector = np.array([])
outputs_vector = np.array([])
nUpdates = 0
# Main Loop
prefixedInput = np.concatenate(
(np.zeros(Filter.filter_order), input_signal))
for it in range(nIterations):
regressor = prefixedInput[it+(Filter.filter_order):-1]
coefficients = Filter.coefficients
output_it = np.dot(coefficients.conj(), regressor)
error_it = desired_signal[it] - output_it
if abs(error_it) > gamma_bar:
nUpdates += 1
mu = 1 - (gamma_bar/abs(error_it))
else:
mu = 0
error_vector = np.append(error_vector, error_it)
outputs_vector = np.append(outputs_vector, output_it)
coefficients = coefficients + mu / (gamma + regressor.T.conj()*regressor)*(error_it.conj()*regressor)
Filter.coefficients = coefficients
if verbose == True:
print(" ")
print('Total runtime {:.03} ms'.format((time() - tic)*1000))
return {'outputs': outputs_vector,
'errors': error_vector, 'coefficients': Filter.coefficients_history}, nUpdates
# EOF
| [
"numpy.append",
"numpy.array",
"numpy.zeros",
"time.time"
] | [((3001, 3007), 'time.time', 'time', ([], {}), '()\n', (3005, 3007), False, 'from time import time\n'), ((3063, 3122), 'numpy.zeros', 'np.zeros', (['(Filter.filter_order + 1)'], {'dtype': 'input_signal.dtype'}), '(Filter.filter_order + 1, dtype=input_signal.dtype)\n', (3071, 3122), True, 'import numpy as np\n'), ((3140, 3152), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3148, 3152), True, 'import numpy as np\n'), ((3174, 3186), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3182, 3186), True, 'import numpy as np\n'), ((3752, 3785), 'numpy.append', 'np.append', (['error_vector', 'error_it'], {}), '(error_vector, error_it)\n', (3761, 3785), True, 'import numpy as np\n'), ((3811, 3847), 'numpy.append', 'np.append', (['outputs_vector', 'output_it'], {}), '(outputs_vector, output_it)\n', (3820, 3847), True, 'import numpy as np\n'), ((3267, 3296), 'numpy.zeros', 'np.zeros', (['Filter.filter_order'], {}), '(Filter.filter_order)\n', (3275, 3296), True, 'import numpy as np\n'), ((4103, 4109), 'time.time', 'time', ([], {}), '()\n', (4107, 4109), False, 'from time import time\n')] |
import typing
from typing import List
import numpy as np
class Solution:
def minimumDifference(
self,
nums: List[int],
k: int,
) -> int:
a = np.array(nums)
a.sort()
k -= 1
return (a[k:] - a[:a.size - k]).min()
| [
"numpy.array"
] | [((168, 182), 'numpy.array', 'np.array', (['nums'], {}), '(nums)\n', (176, 182), True, 'import numpy as np\n')] |
import math, random, sys, os
import numpy as np
import pandas as pd
import networkx as nx
import func_timeout
import tqdm
from rdkit import RDLogger
from rdkit.Chem import Descriptors
from rdkit.Chem import rdmolops
import rdkit.Chem.QED
import torch
from botorch.models import SingleTaskGP
from botorch.fit import fit_gpytorch_model
from botorch.utils import standardize
from gpytorch.mlls import ExactMarginalLogLikelihood
from botorch.acquisition import UpperConfidenceBound,ExpectedImprovement,ProbabilityOfImprovement, qExpectedImprovement, qUpperConfidenceBound, qNoisyExpectedImprovement
from botorch.optim import optimize_acqf
from utils import sascorer, quality_filters as qual
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
###################################### NUMERICAL STABILITY ##################################################
def LSE(log_ai):
"""
log_ai of dim (batch_size, num_items_in_sum)
"""
max_log,_ = log_ai.max(dim=1,keepdim=True)
return max_log.squeeze() + torch.log(torch.exp(log_ai - max_log).sum(dim=1,keepdim=False))
def LDE(log_ai,log_bi):
max_log_p = torch.max(log_ai,log_bi)
min_log_p = torch.min(log_ai,log_bi)
return (max_log_p + torch.log(1 - torch.exp(min_log_p - max_log_p)))
###################################### MOLECULE PROPERTIES ##################################################
logP_file = dir_path + os.sep + 'stats_training_data/logP_values.txt'
SAS_file = dir_path + os.sep + 'stats_training_data/SA_scores.txt'
cycle_file = dir_path + os.sep + 'stats_training_data/cycle_scores.txt'
logP_values = np.loadtxt(logP_file)
SAS_values = np.loadtxt(SAS_file)
cycle_values = np.loadtxt(cycle_file)
training_stats = {
'logP_mean':np.mean(logP_values),
'logP_std':np.std(logP_values),
'SAS_mean':np.mean(SAS_values),
'SAS_std':np.std(SAS_values),
'cycles_mean':np.mean(cycle_values),
'cycles_std':np.std(cycle_values)
}
#Property stats training data
final_logP_train_stats_raw={'mean': -0.002467457978476197, 'std': 2.056736565112327, 'median': 0.42761702630532883, 'min': -62.516944569759666, 'max': 4.519902819580757, 'P1': -6.308202037634639, 'P5': -3.7061575195672125, 'P10': -2.6097184083169522, 'P25': -1.0492552134450062, 'P75': 1.4174359964331003, 'P90': 2.1113332292393188, 'P95': 2.4569317747277495, 'P99': 3.0048043651582605}
final_logP_train_stats_normalized={'mean': -0.0013269769793680093, 'std': 1.0022175676799359, 'median': 0.20822120507327543, 'min': -30.46370322413232, 'max': 2.2023601097894416, 'P1': -3.0740150902231402, 'P5': -1.8060773166698125, 'P10': -1.2717987692036161, 'P25': -0.5114081551001504, 'P75': 0.6905739551134478, 'P90': 1.0286998043562519, 'P95': 1.1971048594070872, 'P99': 1.464075062137245}
#Decoder uncertainty stats
decoder_uncertainty_stats_training ={
'JTVAE': {
'MI_Importance_sampling': {'mean': 0.7737001577503979, 'std': 0.7191886465214079, 'median': 0.6115016341209412, 'min': 0.003500204300507903, 'max': 3.2164592742919917, 'P1': 0.004812391460873187, 'P5': 0.03621037751436234, 'P25': 0.16248027607798576, 'P75': 1.1251116693019867, 'P95': 2.4251182436943055, 'P99': 2.9005215597152705},
'NLL_prior': {'mean': 110.49043981933593, 'std': 22.952045705008157, 'median': 106.87257385253906, 'min': 80.51214599609375, 'max': 199.3219451904297, 'P1': 83.63397506713868, 'P5': 86.59568367004395, 'P25': 96.7742748260498, 'P75': 117.61268424987794, 'P95': 147.31882400512683, 'P99': 195.5686897277832}
}
}
def verify_smile(smile):
return (smile != '') and pd.notnull(smile) and (rdkit.Chem.MolFromSmiles(smile) is not None)
def clean_up_smiles(smiles):
return list(map(lambda x: x.strip(), smiles))
def compute_qed(smile, default_value=np.nan):
try:
mol= rdkit.Chem.MolFromSmiles(smile)
qed = rdkit.Chem.QED.qed(mol)
return qed
except:
return default_value
def compute_sas(smile, default_value=np.nan):
try:
mol = rdkit.Chem.MolFromSmiles(smile)
sas = sascorer.calculateScore(mol)
return sas
except:
return default_value
def compute_logP(smile, default_value=np.nan):
try:
mol = rdkit.Chem.MolFromSmiles(smile)
logp = Descriptors.MolLogP(mol)
return logp
except:
return default_value
def compute_logPminusSAS_score(smile, default_value=np.nan):
try:
mol = rdkit.Chem.MolFromSmiles(smile)
score = Descriptors.MolLogP(mol) - sascorer.calculateScore(mol)
return score
except:
return default_value
def compute_target_logP(smile, default_value=np.nan, train_stats = training_stats):
try:
mol = rdkit.Chem.MolFromSmiles(smile)
logP_score = Descriptors.MolLogP(mol)
SAS_score = - sascorer.calculateScore(mol)
cycle_list = nx.cycle_basis(nx.Graph(rdmolops.GetAdjacencyMatrix(mol)))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
cycle_score = - cycle_length
logP_score_normalized = (logP_score - train_stats['logP_mean']) / train_stats['logP_std']
SAS_score_normalized = (SAS_score - train_stats['SAS_mean']) / train_stats['SAS_std']
cycle_score_normalized = (cycle_score - train_stats['cycles_mean']) / train_stats['cycles_std']
return logP_score_normalized + SAS_score_normalized + cycle_score_normalized
except:
return default_value
def convert_tensors_to_smiles(tensor_molecules, indices_chars):
"""
For CharVAE only. Input tensor_molecules of size (batch, seq_len, n_chars)
"""
smiles = []
for molecule in tensor_molecules.detach().cpu().numpy():
temp_str = ""
for atom_j in molecule:
index = np.argmax(atom_j)
temp_str += indices_chars[index]
smiles.append(temp_str)
return np.array(clean_up_smiles(smiles))
def compute_stats(input_array, mode="nan"):
if mode =="normal":
return {
'mean':input_array.mean(),
'std':input_array.std(),
'median':np.median(input_array),
'min':input_array.min(),
'max':input_array.max(),
'P1':np.percentile(input_array,1),
'P5':np.percentile(input_array,5),
'P25':np.percentile(input_array,25),
'P75':np.percentile(input_array,75),
'P95':np.percentile(input_array,95),
'P99':np.percentile(input_array,99)
}
elif mode=="nan":
return {
'mean':np.nanmean(input_array),
'std': np.nanstd(input_array),
'median':np.nanmedian(input_array),
'min':np.nanmin(input_array),
'max':np.nanmax(input_array),
'P1': np.nanpercentile(input_array,1),
'P5': np.nanpercentile(input_array,5),
'P25':np.nanpercentile(input_array,25),
'P75':np.nanpercentile(input_array,75),
'P95':np.nanpercentile(input_array,95),
'P99':np.nanpercentile(input_array,99)
}
def check_validity_objects(smiles, return_valid=True):
"""smiles is a list of molecule SMILE representation.
Returns valid smiles generated by default, since needed to compute unicity and novelty."""
num_molecules=len(smiles)
if num_molecules==0:
print("No valid modelcule generated!")
return 0
valid_smiles=[]
for smile in smiles:
if verify_smile(smile):
valid_smiles.append(smile)
if return_valid:
return len(valid_smiles) / float(num_molecules), valid_smiles
else:
return len(valid_smiles) / float(num_molecules)
def check_unicity_objects(smiles):
"""Need to pass in valid smiles"""
unique_smiles = set() #empty set
num_molecules=len(smiles)
if num_molecules==0:
return 0
else:
for smile in smiles:
if smile not in unique_smiles:
unique_smiles.add(smile)
return len(unique_smiles)/float(num_molecules)
def check_novelty_objects(smiles,training_smiles, verbose=False):
"""Need to pass in valid smiles"""
count_in_training=0
num_molecules_generated = len(smiles)
if num_molecules_generated==0:
return 0
else:
training_smiles_set=set(training_smiles)
num_molecules_training = len(training_smiles_set)
if verbose:
print("Num distinct molecules in training data: "+str(num_molecules_training))
for smile in smiles:
if smile in training_smiles_set:
count_in_training+=1
if verbose:
print("Num generated molecules that were already in training data: "+str(count_in_training))
return 1 - count_in_training/float(num_molecules_generated)
def log_stats(file_name, stats, log_entry):
with open(file_name, "a+") as logs_file:
logs_file.write(log_entry+"\n")
logs_file.write(str(stats))
logs_file.write("\n\n")
class assessment_generated_objects():
def __init__(self, generated_objects_list, model_training_data, prop="final_logP"):
"""Function that returns the property of the best 3 objects generated; top 50 (or less if fewer valid) and average over all generated
Also return % valid, %unique, %novel of all generated elements"""
self.num_generated_objects = len(generated_objects_list)
#Compute Validity of generated objects
self.validity_all, self.valid_generated_objects_list = check_validity_objects(generated_objects_list, return_valid=True)
self.num_valid_generated_objects = len(self.valid_generated_objects_list)
#Compute Properties of generated objects
self.property_generated_objects = []
if prop=="QED":
for valid_generated_object in self.valid_generated_objects_list:
self.property_generated_objects.append(compute_qed(valid_generated_object))
elif prop=="logPminusSAS":
for valid_generated_object in self.valid_generated_objects_list:
self.property_generated_objects.append(compute_logPminusSAS_score(valid_generated_object))
elif prop=="final_logP":
for valid_generated_object in self.valid_generated_objects_list:
self.property_generated_objects.append(compute_target_logP(valid_generated_object))
try:
self.stats_property_generated_objects = compute_stats(np.array(self.property_generated_objects))
except:
self.stats_property_generated_objects = None
property_df = pd.DataFrame({'Valid_generated_objects': np.array(self.valid_generated_objects_list),
'Property_valid_generated_objects': np.array(self.property_generated_objects)})
#quality_filters
if len(property_df)>0:
QF = qual.QualityFiltersCheck(training_data_smi=[])
property_df['Pass_quality_filters']= QF.check_smiles_pass_quality_filters_flag(self.valid_generated_objects_list).astype(bool)
property_df.sort_values(by=['Property_valid_generated_objects'], ascending=False, inplace=True)
#De-normalize scores
if prop=="final_logP":
property_df['Property_valid_generated_objects'] = property_df['Property_valid_generated_objects']*final_logP_train_stats_raw['std'] + final_logP_train_stats_raw['mean']
property_df.reset_index(inplace=True, drop=True)
self.top10_valid_molecules = property_df['Valid_generated_objects'][:10]
self.top50_valid_molecules = property_df['Valid_generated_objects'][:50]
self.top_properties_scores={}
self.top_properties_smiles={}
self.len_property_df = len(property_df)
for i in range(1,11):
if self.len_property_df > i-1:
self.top_properties_scores['top_'+str(i)] = property_df['Property_valid_generated_objects'][i-1]
self.top_properties_smiles['top_'+str(i)] = property_df['Valid_generated_objects'][i-1]
else:
self.top_properties_scores['top_'+str(i)] = None
self.top_properties_smiles['top_'+str(i)] = None
if self.len_property_df > 0:
#Avg property
self.property_all = property_df['Property_valid_generated_objects'].mean()
self.property_top10 = property_df['Property_valid_generated_objects'][:10].mean()
self.property_top50 = property_df['Property_valid_generated_objects'][:50].mean()
#Compute Unicity of generated objects
self.unicity_all = check_unicity_objects(self.valid_generated_objects_list)
self.unicity_top10 = check_unicity_objects(self.top10_valid_molecules)
#Compute Novelty of generated objects
self.novelty_all = check_novelty_objects(self.valid_generated_objects_list, model_training_data)
self.novelty_top10 = check_novelty_objects(self.top10_valid_molecules, model_training_data)
#Quality
self.quality_all = property_df['Pass_quality_filters'].astype(int).mean()
self.quality_top10 = np.nanmean(QF.check_smiles_pass_quality_filters_flag(self.top10_valid_molecules))
#QED
self.qed_all = np.nanmean(np.array([compute_qed(x) for x in self.valid_generated_objects_list]))
self.qed_top10 = np.nanmean(np.array([compute_qed(x) for x in self.top10_valid_molecules]))
else:
self.property_all = None
self.property_top10 = None
self.property_top50 = None
self.unicity_all = None
self.unicity_top10 = None
self.novelty_all = None
self.novelty_top10 = None
self.quality_all = None
self.quality_top10 = None
self.qed_all = None
self.qed_top10 = None
#Stats passing quality filters
property_df_qual = property_df[property_df['Pass_quality_filters']]
property_df_qual.reset_index(inplace=True)
if len(property_df_qual)>0:
self.property_all_qual = property_df_qual['Property_valid_generated_objects'].mean()
self.property_top5avg_qual = property_df_qual['Property_valid_generated_objects'][:5].mean()
self.property_top10avg_qual = property_df_qual['Property_valid_generated_objects'][:10].mean()
self.property_top50avg_qual = property_df_qual['Property_valid_generated_objects'][:50].mean()
self.qed_all_qual = np.nanmean(np.array([compute_qed(x) for x in property_df_qual['Valid_generated_objects']]))
self.qed_top10_qual = np.nanmean(np.array([compute_qed(x) for x in property_df_qual['Valid_generated_objects'][:10]]))
else:
self.property_all_qual = None
self.property_top5avg_qual = None
self.property_top10avg_qual = None
self.property_top50avg_qual = None
self.qed_all_qual = None
self.qed_top10_qual = None
self.property_top_qual = {}
for i in range(1,11):
if len(property_df_qual) > i-1:
self.property_top_qual[i]=property_df_qual['Property_valid_generated_objects'][i-1]
else:
self.property_top_qual[i]=None
def log_all_stats_generated_objects(self, filename):
results={}
log_stats(file_name= filename, stats=self.num_generated_objects, log_entry="Number of generated objects")
log_stats(file_name= filename, stats=self.validity_all, log_entry="Proportion of valid generated objects")
results['validity_all']=self.validity_all
log_stats(file_name= filename, stats=self.unicity_all, log_entry="Proportion of unique valid generated objects")
results['unicity_all']=self.unicity_all
results['unicity_top10']=self.unicity_top10
log_stats(file_name= filename, stats=self.novelty_all, log_entry="Proportion of novel valid generated objects")
results['novelty_all']=self.novelty_all
results['novelty_top10']=self.novelty_top10
log_stats(file_name= filename, stats=self.quality_all, log_entry="Proportion of valid generated objects passing quality filters")
results['quality_all']=self.quality_all
results['quality_top10']=self.quality_top10
log_stats(file_name= filename, stats=self.qed_all, log_entry="Avg qed of valid generated objects")
results['qed_all']=self.qed_all
results['qed_top10']=self.qed_top10
log_stats(file_name= filename, stats=self.stats_property_generated_objects, log_entry="Stats of properties of generated objects")
results['target_property_all']=self.property_all
results['target_property_top10']=self.property_top10
results['target_property_top50']=self.property_top50
for i in range(1,11):
if self.len_property_df > i-1:
log_stats(file_name= filename, stats=self.top_properties_scores['top_'+str(i)], log_entry="Property of top "+str(i)+" generated object")
log_stats(file_name= filename, stats=self.top_properties_smiles['top_'+str(i)], log_entry="Smiles of top "+str(i)+" generated object")
results['top'+str(i)]=self.top_properties_scores['top_'+str(i)]
else:
results['top'+str(i)]=None
#Qual metrics
results['property_all_qual'] = self.property_all_qual
for i in range(1,11):
results['property_top'+str(i)+'_qual'] = self.property_top_qual[i]
results['property_top5avg_qual'] = self.property_top5avg_qual
results['property_top10avg_qual'] = self.property_top10avg_qual
results['property_top50avg_qual'] = self.property_top50avg_qual
results['qed_all_qual'] = self.qed_all_qual
results['qed_top10_qual'] = self.qed_top10_qual
results['top_10_molecules'] = self.top10_valid_molecules
return results
###################################### OPTIMIZATION INITIALIZATION ##################################################
def starting_objects_latent_embeddings(model, data, mode="random", num_objects_to_select=100, batch_size=256, property_upper_bound=None, model_type="JTVAE"):
if model_type=="JTVAE":
latent_space_dim = model.latent_size * 2
elif model_type=="CharVAE":
latent_space_dim = model.params.z_dim
if mode=="random":
num_objects_data = len(data)
selected_objects_indices = np.random.choice(a=range(num_objects_data), size=num_objects_to_select, replace=False).tolist()
starting_objects = np.array(data)[selected_objects_indices]
if model_type=="JTVAE":
starting_objects_smiles = starting_objects
elif model_type=="CharVAE":
starting_objects_smiles = convert_tensors_to_smiles(starting_objects, model.params.indices_char)
starting_objects_latent_embeddings = torch.zeros(num_objects_to_select, latent_space_dim).to(device)
starting_objects_properties = []
for batch_object_indices in range(0,num_objects_to_select,batch_size):
a, b = batch_object_indices, batch_object_indices+batch_size
if model_type=="JTVAE":
starting_objects_latent_embeddings[a:b] = model.encode_and_samples_from_smiles(starting_objects[a:b])
elif model_type=="CharVAE":
mu, log_var = model.encoder(starting_objects[a:b])
starting_objects_latent_embeddings[a:b] = model.sampling(mu, log_var)
for smile in starting_objects_smiles[a:b]:
starting_objects_properties.append(compute_target_logP(smile))
starting_objects_properties = torch.tensor(starting_objects_properties)
elif mode=="low_property_objects":
num_starting_points_selected = 0
index_object_in_dataset = 0
starting_objects = []
starting_objects_smiles = []
starting_objects_properties = []
starting_objects_latent_embeddings = torch.zeros(num_objects_to_select, latent_space_dim).to(device)
while num_starting_points_selected < num_objects_to_select:
if model_type=='JTVAE':
smile_potential_starting_object = data[index_object_in_dataset]
elif model_type=='CharVAE':
potential_starting_object = data[index_object_in_dataset].unsqueeze(0)
smile_potential_starting_object = convert_tensors_to_smiles(potential_starting_object, model.params.indices_char)[0]
final_logP = compute_target_logP(smile_potential_starting_object)
if final_logP < property_upper_bound and final_logP > - 100:
if model_type=='JTVAE':
new_object_latent_representation = model.encode_and_samples_from_smiles([smile_potential_starting_object])
elif model_type=='CharVAE':
mu, log_var = model.encoder(potential_starting_object)
new_object_latent_representation = model.sampling(mu, log_var)
starting_objects_latent_embeddings[num_starting_points_selected] = new_object_latent_representation
starting_objects_properties.append(final_logP)
starting_objects_smiles.append(smile_potential_starting_object)
num_starting_points_selected+=1
index_object_in_dataset+=1
starting_objects_properties=torch.tensor(starting_objects_properties)
return starting_objects_latent_embeddings, starting_objects_properties, starting_objects_smiles
###################################### OPTIMIZATION ROUTINES ##################################################
def gradient_ascent_optimization(model, starting_objects_latent_embeddings, number_gradient_steps=10,
uncertainty_decoder_method=None, num_sampled_models=10, num_sampled_outcomes=40,
model_decoding_mode=None, model_decoding_topk_value=None, alpha=1.0, normalize_gradients=True,
batch_size=64, uncertainty_threshold="No_constraint", keep_all_generated=False, model_type="JTVAE"
):
"""
Perform gradient ascent in latent space. Filter out invalid points, ie. above uncertainty threshold. Keep last number_starting_objects valid points.
model_decoding_mode and model_decoding_topk_value are only relevant for RNN decoding (CharVAE).
"""
number_starting_objects = len(starting_objects_latent_embeddings)
generated_objects_list=[]
if model_type=='JTVAE':
hidden_dim = model.latent_size*2
elif model_type=='CharVAE':
hidden_dim = model.params.z_dim
if model_decoding_mode is not None:
model.sampling_mode = model_decoding_mode
model.generation_top_k_sampling = model_decoding_topk_value
if uncertainty_threshold!='No_constraint':
uncertainty_threshold_value = decoder_uncertainty_stats_training[model_type][uncertainty_decoder_method][uncertainty_threshold]
all_points_latent_representation = torch.zeros((number_gradient_steps+1)*number_starting_objects, hidden_dim)
all_points_latent_representation[:number_starting_objects] = starting_objects_latent_embeddings.view(-1,hidden_dim)
new_objects_latent_representation = starting_objects_latent_embeddings
for step in tqdm.tqdm(range(1, number_gradient_steps+1)):
torch.cuda.empty_cache()
model.zero_grad()
gradient = torch.zeros(number_starting_objects, hidden_dim).to(device)
for batch_object_indices in range(0, number_starting_objects, batch_size):
model.zero_grad()
a, b = batch_object_indices , batch_object_indices+batch_size
new_objects_latent_representation_slice = torch.autograd.Variable(new_objects_latent_representation[a:b], requires_grad=True)
if model_type=='JTVAE':
predicted_property_slice = model.prop_net(new_objects_latent_representation_slice).squeeze()
predicted_property_slice = (predicted_property_slice - (final_logP_train_stats_raw['mean'])) / (final_logP_train_stats_raw['std'])
elif model_type=='CharVAE':
predicted_property_slice = model.qed_net(new_objects_latent_representation_slice).squeeze()
gradient[a:b] = torch.autograd.grad(outputs = predicted_property_slice,
inputs = new_objects_latent_representation_slice,
grad_outputs = torch.ones_like(predicted_property_slice).to(device),
retain_graph=False)[0]
if normalize_gradients:
gradient /= torch.norm(gradient,2)
new_objects_latent_representation = new_objects_latent_representation + alpha * gradient
all_points_latent_representation[step*number_starting_objects:(step+1)*number_starting_objects] = new_objects_latent_representation.view(-1,hidden_dim)
if uncertainty_threshold!='No_constraint':
if keep_all_generated: #Need to compute uncertainty for all points
with torch.no_grad():
num_points_total = (number_gradient_steps+1)*number_starting_objects
uncertainty_all_points = torch.zeros(num_points_total)
for batch_object_indices in range(0, num_points_total, batch_size):
z_slice = all_points_latent_representation[batch_object_indices:batch_object_indices+batch_size].to(device)
uncertainty_all_points[batch_object_indices:batch_object_indices+batch_size] = model.decoder_uncertainty_from_latent(
z = z_slice,
method = uncertainty_decoder_method,
num_sampled_models=num_sampled_models,
num_sampled_outcomes=num_sampled_outcomes
).squeeze().detach().cpu()
index_below_uncertainty_threshold = (uncertainty_all_points < uncertainty_threshold_value)
all_points_latent_representation = all_points_latent_representation[index_below_uncertainty_threshold]
selected_points = all_points_latent_representation
else: #We compute uncertainty in batches starting from latest batch of points generated, and continue until we have reached the desired number of points below uncertainty threshold
with torch.no_grad():
num_points_to_generate = number_starting_objects
point_index = (number_gradient_steps+1)*number_starting_objects + 1
selected_points=[]
while num_points_to_generate > 0:
if point_index>0:
potential_points=all_points_latent_representation[max(point_index-batch_size,0):point_index].view(-1,hidden_dim).to(device)
uncertainty_potential_points = model.decoder_uncertainty_from_latent(
z = potential_points,
method = uncertainty_decoder_method,
num_sampled_models=num_sampled_models,
num_sampled_outcomes=num_sampled_outcomes
).squeeze().detach().cpu()
count_below=(uncertainty_potential_points < uncertainty_threshold_value).sum()
num_points_to_generate -=count_below
selected_points.extend(potential_points[uncertainty_potential_points < uncertainty_threshold_value])
point_index-=batch_size
selected_points=selected_points[:number_starting_objects]
else:
if keep_all_generated:
selected_points=all_points_latent_representation
else:
selected_points=all_points_latent_representation[-number_starting_objects:]
with torch.no_grad():
if model_type=='JTVAE':
for idx in range(len(selected_points)):
z = selected_points[idx].view(1,hidden_dim).to(device)
z_tree, z_mol = z[:,:model.latent_size], z[:,model.latent_size:]
smiles_new_objects = model.decode(z_tree, z_mol, prob_decode=False)
generated_objects_list.append(smiles_new_objects)
elif model_type=='CharVAE':
for batch_object_indices in range(0, len(selected_points), batch_size):
decoded_new_objects = model.generate_from_latent(selected_points[batch_object_indices:batch_object_indices+batch_size].to(device))
smiles_new_objects = convert_tensors_to_smiles(decoded_new_objects, model.params.indices_char)
generated_objects_list.append(smiles_new_objects)
return generated_objects_list
def bayesian_optimization(model, starting_objects_latent_embeddings, starting_objects_properties, number_BO_steps, BO_uncertainty_mode,
BO_uncertainty_threshold='No_constraint', BO_uncertainty_coeff=0.0, uncertainty_decoder_method=None, num_sampled_models=10, num_sampled_outcomes = 40,
model_decoding_mode=None, model_decoding_topk_value=None, BO_acquisition_function="UCB", BO_default_value_invalid=0.0,
min_bound=-2, max_bound = 2, batch_size=64, generation_timout_seconds=600, model_type="JTVAE"
):
"""
Bayesian optimization in latent space. Two different modes: BO_uncertainty_mode=="Penalized_objective" (uncertainty-aware surrogate) or BO_uncertainty_mode=="Uncertainty_censoring"
model_decoding_mode and model_decoding_topk_value are only relevant for RNN decoding (CharVAE).
"""
smiles_generated_objects = []
pred_property_values = []
if model_type=='JTVAE':
hidden_dim = model.latent_size*2
elif model_type=='CharVAE':
hidden_dim = model.params.z_dim
if model_decoding_mode is not None:
model.sampling_mode = model_decoding_mode
model.generation_top_k_sampling = model_decoding_topk_value
#compute actual uncertainty threshold for uncertainty_censoring mode based on percentile
if BO_uncertainty_mode=="Uncertainty_censoring" and BO_uncertainty_threshold!='No_constraint':
BO_uncertainty_threshold_value = decoder_uncertainty_stats_training[model_type][uncertainty_decoder_method][BO_uncertainty_threshold]
objects_latent_representation = starting_objects_latent_embeddings.view(-1, hidden_dim)
objects_properties = starting_objects_properties.view(-1,1)
for step in tqdm.tqdm(range(number_BO_steps)):
num_training_points_surrogate = len(objects_latent_representation)
train_X = objects_latent_representation.detach().to(device)
train_Y = standardize(objects_properties).detach().to(device)
if BO_uncertainty_mode=="Penalized_objective" and BO_uncertainty_coeff > 0.0:
with torch.no_grad():
if step == 0: #On the first step, we compute uncertainty for all starting (latent) points
uncertainty_decoder = torch.zeros(num_training_points_surrogate).to(device)
for batch_object_indices in range(0, num_training_points_surrogate, batch_size):
a, b = batch_object_indices , batch_object_indices+batch_size
z_slice = objects_latent_representation[a:b].to(device)
uncertainty_decoder[batch_object_indices:batch_object_indices+batch_size] = model.decoder_uncertainty_from_latent(
z = z_slice,
method = uncertainty_decoder_method,
num_sampled_models=num_sampled_models,
num_sampled_outcomes=num_sampled_outcomes
).squeeze().detach().cpu()
else:
#For all subsequent steps, we just need to compute the uncertainty for the new point and add to previously computed uncertainties
new_point_uncertainty_decoder[batch_object_indices:batch_object_indices+batch_size] = model.decoder_uncertainty_from_latent(
z = generated_object.to(device),
method = uncertainty_decoder_method,
num_sampled_models=num_sampled_models,
num_sampled_outcomes=num_sampled_outcomes
)
uncertainty_decoder = torch.cat(tensors=(uncertainty_decoder, new_point_uncertainty_decoder.view(1)), dim=0)
train_Y = train_Y - BO_uncertainty_coeff * standardize(uncertainty_decoder.view(-1,1))
train_Y = train_Y.detach().to(device)
#Single-task exact GP model
gp = SingleTaskGP(train_X=train_X, train_Y=train_Y).to(device)
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
fit_gpytorch_model(mll)
#Acquisition function:
if BO_acquisition_function=="UCB":
BO_acq_func, q = UpperConfidenceBound(gp, beta=0.1), 1
elif BO_acquisition_function=="EI":
BO_acq_func, q = ExpectedImprovement(gp, best_f=0.1), 1
elif BO_acquisition_function=="PI":
BO_acq_func, q = ProbabilityOfImprovement(gp, best_f=0.1), 1
elif BO_acquisition_function=="qUCB":
BO_acq_func, q = qUpperConfidenceBound(gp, beta=0.1), 20
elif BO_acquisition_function=="qEI":
BO_acq_func, q = qExpectedImprovement(gp, best_f=0.1), 20
elif BO_acquisition_function=="qNoisyEI":
BO_acq_func, q = qNoisyExpectedImprovement(gp), 20
#Optimize the acquisition function
print("Optimizing acq function")
bounds = torch.stack([torch.ones(hidden_dim) * min_bound, torch.ones(hidden_dim) * max_bound]).to(device)
generated_object, pred_property_value = optimize_acqf(
acq_function=BO_acq_func,
bounds=bounds,
q=q,
num_restarts=min(20,num_training_points_surrogate),
raw_samples=num_training_points_surrogate,
sequential=True,
return_best_only=True
)
generated_object = generated_object.view(-1,hidden_dim)
with torch.no_grad():
if BO_uncertainty_mode=="Uncertainty_censoring" and BO_uncertainty_threshold!="No_constraint":
#Compute uncertainty for each candidate. Check which are below threshold. If at least one, return the one with best predicted value. Otherwise, return lowest uncertainty point.
uncertainty_generated = torch.zeros(len(generated_object)).to(device)
for batch_object_indices in range(0, q, batch_size):
a, b = batch_object_indices , batch_object_indices+batch_size
z_slice = generated_object[a:b].to(device)
uncertainty_generated[batch_object_indices:batch_object_indices+batch_size] = model.decoder_uncertainty_from_latent(
z = z_slice,
method = uncertainty_decoder_method,
num_sampled_models=num_sampled_models,
num_sampled_outcomes=num_sampled_outcomes
).squeeze()
index_below_uncertainty_threshold = (uncertainty_generated < BO_uncertainty_threshold_value)
num_below_threshold = index_below_uncertainty_threshold.int().sum()
if num_below_threshold > 0:
generated_object = generated_object[index_below_uncertainty_threshold]
pred_property_value = pred_property_value[index_below_uncertainty_threshold]
generated_object = generated_object[-1]
pred_property_value = pred_property_value[-1]
else:
min_uncertainty_point = uncertainty_generated.argmin()
generated_object = generated_object[min_uncertainty_point]
pred_property_value = pred_property_value[min_uncertainty_point]
else:
if len(generated_object)>1:
generated_object = generated_object[-1]
pred_property_value = pred_property_value[-1]
pred_property_values.append(pred_property_value.item())
generated_object = generated_object.view(1,hidden_dim)
with torch.no_grad():
if model_type=='JTVAE':
z = generated_object.view(1,hidden_dim).to(device)
z_tree, z_mol = z[:,:model.latent_size], z[:,model.latent_size:]
try:
smiles_new_object = func_timeout.func_timeout(generation_timout_seconds, model.decode, args=(z_tree, z_mol), kwargs={'prob_decode':False})
new_point_property = compute_target_logP(smiles_new_object, default_value=BO_default_value_invalid)
smiles_generated_objects.append(smiles_new_object)
objects_properties = torch.cat(tensors=(objects_properties.float(), torch.tensor(new_point_property).view(1).float()), dim=0)
objects_latent_representation = torch.cat(tensors=(objects_latent_representation, generated_object.view(1,hidden_dim)), dim=0)
except:
print("timed out")
elif model_type=='CharVAE':
decoded_new_object = model.generate_from_latent(generated_object)
smiles_new_object = convert_tensors_to_smiles(decoded_new_object, model.params.indices_char)[0]
smiles_generated_objects.append(smiles_new_object)
new_point_property = compute_target_logP(smiles_new_object, default_value=BO_default_value_invalid)
objects_properties = torch.cat(tensors=(objects_properties.float(), torch.tensor(new_point_property).view(1).float()), dim=0)
objects_latent_representation = torch.cat(tensors=(objects_latent_representation, generated_object.view(1,hidden_dim)), dim=0)
return smiles_generated_objects, pred_property_values | [
"numpy.nanpercentile",
"rdkit.Chem.Descriptors.MolLogP",
"torch.max",
"rdkit.Chem.rdmolops.GetAdjacencyMatrix",
"torch.exp",
"torch.min",
"numpy.array",
"numpy.nanmean",
"torch.cuda.is_available",
"botorch.acquisition.qNoisyExpectedImprovement",
"numpy.percentile",
"botorch.models.SingleTaskGP... | [((695, 712), 'rdkit.RDLogger.logger', 'RDLogger.logger', ([], {}), '()\n', (710, 712), False, 'from rdkit import RDLogger\n'), ((753, 778), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (768, 778), False, 'import math, random, sys, os\n'), ((790, 811), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (805, 811), False, 'import math, random, sys, os\n'), ((1732, 1753), 'numpy.loadtxt', 'np.loadtxt', (['logP_file'], {}), '(logP_file)\n', (1742, 1753), True, 'import numpy as np\n'), ((1767, 1787), 'numpy.loadtxt', 'np.loadtxt', (['SAS_file'], {}), '(SAS_file)\n', (1777, 1787), True, 'import numpy as np\n'), ((1803, 1825), 'numpy.loadtxt', 'np.loadtxt', (['cycle_file'], {}), '(cycle_file)\n', (1813, 1825), True, 'import numpy as np\n'), ((1258, 1283), 'torch.max', 'torch.max', (['log_ai', 'log_bi'], {}), '(log_ai, log_bi)\n', (1267, 1283), False, 'import torch\n'), ((1299, 1324), 'torch.min', 'torch.min', (['log_ai', 'log_bi'], {}), '(log_ai, log_bi)\n', (1308, 1324), False, 'import torch\n'), ((1858, 1878), 'numpy.mean', 'np.mean', (['logP_values'], {}), '(logP_values)\n', (1865, 1878), True, 'import numpy as np\n'), ((1891, 1910), 'numpy.std', 'np.std', (['logP_values'], {}), '(logP_values)\n', (1897, 1910), True, 'import numpy as np\n'), ((1923, 1942), 'numpy.mean', 'np.mean', (['SAS_values'], {}), '(SAS_values)\n', (1930, 1942), True, 'import numpy as np\n'), ((1954, 1972), 'numpy.std', 'np.std', (['SAS_values'], {}), '(SAS_values)\n', (1960, 1972), True, 'import numpy as np\n'), ((1988, 2009), 'numpy.mean', 'np.mean', (['cycle_values'], {}), '(cycle_values)\n', (1995, 2009), True, 'import numpy as np\n'), ((2024, 2044), 'numpy.std', 'np.std', (['cycle_values'], {}), '(cycle_values)\n', (2030, 2044), True, 'import numpy as np\n'), ((23563, 23641), 'torch.zeros', 'torch.zeros', (['((number_gradient_steps + 1) * number_starting_objects)', 'hidden_dim'], {}), '((number_gradient_steps + 1) * number_starting_objects, hidden_dim)\n', (23574, 23641), False, 'import torch\n'), ((844, 869), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (867, 869), False, 'import torch\n'), ((3665, 3682), 'pandas.notnull', 'pd.notnull', (['smile'], {}), '(smile)\n', (3675, 3682), True, 'import pandas as pd\n'), ((4129, 4157), 'utils.sascorer.calculateScore', 'sascorer.calculateScore', (['mol'], {}), '(mol)\n', (4152, 4157), False, 'from utils import sascorer, quality_filters as qual\n'), ((4338, 4362), 'rdkit.Chem.Descriptors.MolLogP', 'Descriptors.MolLogP', (['mol'], {}), '(mol)\n', (4357, 4362), False, 'from rdkit.Chem import Descriptors\n'), ((4849, 4873), 'rdkit.Chem.Descriptors.MolLogP', 'Descriptors.MolLogP', (['mol'], {}), '(mol)\n', (4868, 4873), False, 'from rdkit.Chem import Descriptors\n'), ((20149, 20190), 'torch.tensor', 'torch.tensor', (['starting_objects_properties'], {}), '(starting_objects_properties)\n', (20161, 20190), False, 'import torch\n'), ((23908, 23932), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (23930, 23932), False, 'import torch\n'), ((29164, 29179), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (29177, 29179), False, 'import torch\n'), ((34931, 34976), 'gpytorch.mlls.ExactMarginalLogLikelihood', 'ExactMarginalLogLikelihood', (['gp.likelihood', 'gp'], {}), '(gp.likelihood, gp)\n', (34957, 34976), False, 'from gpytorch.mlls import ExactMarginalLogLikelihood\n'), ((34985, 35008), 'botorch.fit.fit_gpytorch_model', 'fit_gpytorch_model', (['mll'], {}), '(mll)\n', (35003, 35008), False, 'from botorch.fit import fit_gpytorch_model\n'), ((4559, 4583), 'rdkit.Chem.Descriptors.MolLogP', 'Descriptors.MolLogP', (['mol'], {}), '(mol)\n', (4578, 4583), False, 'from rdkit.Chem import Descriptors\n'), ((4586, 4614), 'utils.sascorer.calculateScore', 'sascorer.calculateScore', (['mol'], {}), '(mol)\n', (4609, 4614), False, 'from utils import sascorer, quality_filters as qual\n'), ((4906, 4934), 'utils.sascorer.calculateScore', 'sascorer.calculateScore', (['mol'], {}), '(mol)\n', (4929, 4934), False, 'from utils import sascorer, quality_filters as qual\n'), ((6060, 6077), 'numpy.argmax', 'np.argmax', (['atom_j'], {}), '(atom_j)\n', (6069, 6077), True, 'import numpy as np\n'), ((6384, 6406), 'numpy.median', 'np.median', (['input_array'], {}), '(input_array)\n', (6393, 6406), True, 'import numpy as np\n'), ((6499, 6528), 'numpy.percentile', 'np.percentile', (['input_array', '(1)'], {}), '(input_array, 1)\n', (6512, 6528), True, 'import numpy as np\n'), ((6546, 6575), 'numpy.percentile', 'np.percentile', (['input_array', '(5)'], {}), '(input_array, 5)\n', (6559, 6575), True, 'import numpy as np\n'), ((6594, 6624), 'numpy.percentile', 'np.percentile', (['input_array', '(25)'], {}), '(input_array, 25)\n', (6607, 6624), True, 'import numpy as np\n'), ((6643, 6673), 'numpy.percentile', 'np.percentile', (['input_array', '(75)'], {}), '(input_array, 75)\n', (6656, 6673), True, 'import numpy as np\n'), ((6692, 6722), 'numpy.percentile', 'np.percentile', (['input_array', '(95)'], {}), '(input_array, 95)\n', (6705, 6722), True, 'import numpy as np\n'), ((6741, 6771), 'numpy.percentile', 'np.percentile', (['input_array', '(99)'], {}), '(input_array, 99)\n', (6754, 6771), True, 'import numpy as np\n'), ((11209, 11255), 'utils.quality_filters.QualityFiltersCheck', 'qual.QualityFiltersCheck', ([], {'training_data_smi': '[]'}), '(training_data_smi=[])\n', (11233, 11255), True, 'from utils import sascorer, quality_filters as qual\n'), ((19055, 19069), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (19063, 19069), True, 'import numpy as np\n'), ((21880, 21921), 'torch.tensor', 'torch.tensor', (['starting_objects_properties'], {}), '(starting_objects_properties)\n', (21892, 21921), False, 'import torch\n'), ((24279, 24366), 'torch.autograd.Variable', 'torch.autograd.Variable', (['new_objects_latent_representation[a:b]'], {'requires_grad': '(True)'}), '(new_objects_latent_representation[a:b],\n requires_grad=True)\n', (24302, 24366), False, 'import torch\n'), ((25243, 25266), 'torch.norm', 'torch.norm', (['gradient', '(2)'], {}), '(gradient, 2)\n', (25253, 25266), False, 'import torch\n'), ((36745, 36760), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (36758, 36760), False, 'import torch\n'), ((39329, 39344), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (39342, 39344), False, 'import torch\n'), ((1362, 1394), 'torch.exp', 'torch.exp', (['(min_log_p - max_log_p)'], {}), '(min_log_p - max_log_p)\n', (1371, 1394), False, 'import torch\n'), ((4989, 5021), 'rdkit.Chem.rdmolops.GetAdjacencyMatrix', 'rdmolops.GetAdjacencyMatrix', (['mol'], {}), '(mol)\n', (5016, 5021), False, 'from rdkit.Chem import rdmolops\n'), ((6839, 6862), 'numpy.nanmean', 'np.nanmean', (['input_array'], {}), '(input_array)\n', (6849, 6862), True, 'import numpy as np\n'), ((6883, 6905), 'numpy.nanstd', 'np.nanstd', (['input_array'], {}), '(input_array)\n', (6892, 6905), True, 'import numpy as np\n'), ((6928, 6953), 'numpy.nanmedian', 'np.nanmedian', (['input_array'], {}), '(input_array)\n', (6940, 6953), True, 'import numpy as np\n'), ((6973, 6995), 'numpy.nanmin', 'np.nanmin', (['input_array'], {}), '(input_array)\n', (6982, 6995), True, 'import numpy as np\n'), ((7015, 7037), 'numpy.nanmax', 'np.nanmax', (['input_array'], {}), '(input_array)\n', (7024, 7037), True, 'import numpy as np\n'), ((7057, 7089), 'numpy.nanpercentile', 'np.nanpercentile', (['input_array', '(1)'], {}), '(input_array, 1)\n', (7073, 7089), True, 'import numpy as np\n'), ((7108, 7140), 'numpy.nanpercentile', 'np.nanpercentile', (['input_array', '(5)'], {}), '(input_array, 5)\n', (7124, 7140), True, 'import numpy as np\n'), ((7159, 7192), 'numpy.nanpercentile', 'np.nanpercentile', (['input_array', '(25)'], {}), '(input_array, 25)\n', (7175, 7192), True, 'import numpy as np\n'), ((7211, 7244), 'numpy.nanpercentile', 'np.nanpercentile', (['input_array', '(75)'], {}), '(input_array, 75)\n', (7227, 7244), True, 'import numpy as np\n'), ((7263, 7296), 'numpy.nanpercentile', 'np.nanpercentile', (['input_array', '(95)'], {}), '(input_array, 95)\n', (7279, 7296), True, 'import numpy as np\n'), ((7315, 7348), 'numpy.nanpercentile', 'np.nanpercentile', (['input_array', '(99)'], {}), '(input_array, 99)\n', (7331, 7348), True, 'import numpy as np\n'), ((10778, 10819), 'numpy.array', 'np.array', (['self.property_generated_objects'], {}), '(self.property_generated_objects)\n', (10786, 10819), True, 'import numpy as np\n'), ((10966, 11009), 'numpy.array', 'np.array', (['self.valid_generated_objects_list'], {}), '(self.valid_generated_objects_list)\n', (10974, 11009), True, 'import numpy as np\n'), ((11083, 11124), 'numpy.array', 'np.array', (['self.property_generated_objects'], {}), '(self.property_generated_objects)\n', (11091, 11124), True, 'import numpy as np\n'), ((19373, 19425), 'torch.zeros', 'torch.zeros', (['num_objects_to_select', 'latent_space_dim'], {}), '(num_objects_to_select, latent_space_dim)\n', (19384, 19425), False, 'import torch\n'), ((23978, 24026), 'torch.zeros', 'torch.zeros', (['number_starting_objects', 'hidden_dim'], {}), '(number_starting_objects, hidden_dim)\n', (23989, 24026), False, 'import torch\n'), ((25672, 25687), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25685, 25687), False, 'import torch\n'), ((25816, 25845), 'torch.zeros', 'torch.zeros', (['num_points_total'], {}), '(num_points_total)\n', (25827, 25845), False, 'import torch\n'), ((27354, 27369), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27367, 27369), False, 'import torch\n'), ((32219, 32234), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (32232, 32234), False, 'import torch\n'), ((34859, 34905), 'botorch.models.SingleTaskGP', 'SingleTaskGP', ([], {'train_X': 'train_X', 'train_Y': 'train_Y'}), '(train_X=train_X, train_Y=train_Y)\n', (34871, 34905), False, 'from botorch.models import SingleTaskGP\n'), ((35113, 35147), 'botorch.acquisition.UpperConfidenceBound', 'UpperConfidenceBound', (['gp'], {'beta': '(0.1)'}), '(gp, beta=0.1)\n', (35133, 35147), False, 'from botorch.acquisition import UpperConfidenceBound, ExpectedImprovement, ProbabilityOfImprovement, qExpectedImprovement, qUpperConfidenceBound, qNoisyExpectedImprovement\n'), ((1163, 1190), 'torch.exp', 'torch.exp', (['(log_ai - max_log)'], {}), '(log_ai - max_log)\n', (1172, 1190), False, 'import torch\n'), ((20461, 20513), 'torch.zeros', 'torch.zeros', (['num_objects_to_select', 'latent_space_dim'], {}), '(num_objects_to_select, latent_space_dim)\n', (20472, 20513), False, 'import torch\n'), ((35224, 35259), 'botorch.acquisition.ExpectedImprovement', 'ExpectedImprovement', (['gp'], {'best_f': '(0.1)'}), '(gp, best_f=0.1)\n', (35243, 35259), False, 'from botorch.acquisition import UpperConfidenceBound, ExpectedImprovement, ProbabilityOfImprovement, qExpectedImprovement, qUpperConfidenceBound, qNoisyExpectedImprovement\n'), ((39591, 39715), 'func_timeout.func_timeout', 'func_timeout.func_timeout', (['generation_timout_seconds', 'model.decode'], {'args': '(z_tree, z_mol)', 'kwargs': "{'prob_decode': False}"}), "(generation_timout_seconds, model.decode, args=(\n z_tree, z_mol), kwargs={'prob_decode': False})\n", (39616, 39715), False, 'import func_timeout\n'), ((32055, 32086), 'botorch.utils.standardize', 'standardize', (['objects_properties'], {}), '(objects_properties)\n', (32066, 32086), False, 'from botorch.utils import standardize\n'), ((35336, 35376), 'botorch.acquisition.ProbabilityOfImprovement', 'ProbabilityOfImprovement', (['gp'], {'best_f': '(0.1)'}), '(gp, best_f=0.1)\n', (35360, 35376), False, 'from botorch.acquisition import UpperConfidenceBound, ExpectedImprovement, ProbabilityOfImprovement, qExpectedImprovement, qUpperConfidenceBound, qNoisyExpectedImprovement\n'), ((32384, 32426), 'torch.zeros', 'torch.zeros', (['num_training_points_surrogate'], {}), '(num_training_points_surrogate)\n', (32395, 32426), False, 'import torch\n'), ((35455, 35490), 'botorch.acquisition.qUpperConfidenceBound', 'qUpperConfidenceBound', (['gp'], {'beta': '(0.1)'}), '(gp, beta=0.1)\n', (35476, 35490), False, 'from botorch.acquisition import UpperConfidenceBound, ExpectedImprovement, ProbabilityOfImprovement, qExpectedImprovement, qUpperConfidenceBound, qNoisyExpectedImprovement\n'), ((35847, 35869), 'torch.ones', 'torch.ones', (['hidden_dim'], {}), '(hidden_dim)\n', (35857, 35869), False, 'import torch\n'), ((35883, 35905), 'torch.ones', 'torch.ones', (['hidden_dim'], {}), '(hidden_dim)\n', (35893, 35905), False, 'import torch\n'), ((25061, 25102), 'torch.ones_like', 'torch.ones_like', (['predicted_property_slice'], {}), '(predicted_property_slice)\n', (25076, 25102), False, 'import torch\n'), ((35570, 35606), 'botorch.acquisition.qExpectedImprovement', 'qExpectedImprovement', (['gp'], {'best_f': '(0.1)'}), '(gp, best_f=0.1)\n', (35590, 35606), False, 'from botorch.acquisition import UpperConfidenceBound, ExpectedImprovement, ProbabilityOfImprovement, qExpectedImprovement, qUpperConfidenceBound, qNoisyExpectedImprovement\n'), ((35690, 35719), 'botorch.acquisition.qNoisyExpectedImprovement', 'qNoisyExpectedImprovement', (['gp'], {}), '(gp)\n', (35715, 35719), False, 'from botorch.acquisition import UpperConfidenceBound, ExpectedImprovement, ProbabilityOfImprovement, qExpectedImprovement, qUpperConfidenceBound, qNoisyExpectedImprovement\n'), ((40001, 40033), 'torch.tensor', 'torch.tensor', (['new_point_property'], {}), '(new_point_property)\n', (40013, 40033), False, 'import torch\n'), ((40771, 40803), 'torch.tensor', 'torch.tensor', (['new_point_property'], {}), '(new_point_property)\n', (40783, 40803), False, 'import torch\n')] |
from PIL import Image, ImageDraw
from PIL.ImageChops import multiply
import numpy as np
try:
import matplotlib.pyplot as plt
except:
print("### matplotlib.pyplot could not be imported.")
def imshow(image):
plt.imshow(image)
plt.show()
def pilshow(pil_image):
imshow(np.asarray(pil_image))
def mask_boxes(img, boxes=[[0, 0, 100, 100]]):
"""
Args:
img: PIL image
boxes: [[(left, upper, right, lower)]]
Returns: PIL image
"""
w, h = img.size
img_mask = Image.fromarray(np.full((h, w, 3), fill_value=0, dtype=np.uint8))
d = ImageDraw.Draw(img_mask)
for box in boxes:
d.rectangle(box, fill="white")
img_out = multiply(img, img_mask)
return img_out
if __name__ == "__main__":
boxes = [[300, 100, 400, 200], [140, 120, 265, 170]]
image_path = "../val/instant_coffee/instant_coffee53.jpg"
with Image.open(image_path) as img:
image_pil = img.copy()
pilshow(mask_boxes(image_pil, boxes))
| [
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"numpy.asarray",
"PIL.ImageDraw.Draw",
"PIL.ImageChops.multiply",
"numpy.full",
"matplotlib.pyplot.show"
] | [((221, 238), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (231, 238), True, 'import matplotlib.pyplot as plt\n'), ((243, 253), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (251, 253), True, 'import matplotlib.pyplot as plt\n'), ((595, 619), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_mask'], {}), '(img_mask)\n', (609, 619), False, 'from PIL import Image, ImageDraw\n'), ((695, 718), 'PIL.ImageChops.multiply', 'multiply', (['img', 'img_mask'], {}), '(img, img_mask)\n', (703, 718), False, 'from PIL.ImageChops import multiply\n'), ((291, 312), 'numpy.asarray', 'np.asarray', (['pil_image'], {}), '(pil_image)\n', (301, 312), True, 'import numpy as np\n'), ((537, 585), 'numpy.full', 'np.full', (['(h, w, 3)'], {'fill_value': '(0)', 'dtype': 'np.uint8'}), '((h, w, 3), fill_value=0, dtype=np.uint8)\n', (544, 585), True, 'import numpy as np\n'), ((895, 917), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (905, 917), False, 'from PIL import Image, ImageDraw\n')] |
import numpy as np
import pandas as pd
from mia.estimators import ShadowModelBundle, prepare_attack_data
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import resample
# depent on tensorflow 1.14
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Conv1D, AveragePooling1D, Dropout, Flatten, Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.regularizers import l1
from tensorflow.keras.utils import to_categorical
# privacy package
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
# set random seed
np.random.seed(19122)
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
class DataGenerator(object):
"""
Load and preprocess data: filter NA, binarize phenotype, balance sample, one hot encoding.
"""
def __init__(self, genotype_file, phenotype_file, shuffle=True):
super(DataGenerator, self).__init__()
self.genotype_file = genotype_file
self.phenotype_file = phenotype_file
self.shuffle = shuffle
# preprocess
self._load_data()
self._filter_na_phenotype()
self._binarize_phenotype()
self._balance_sample()
self._one_hot_encode()
def _load_data(self):
self.genotype = pd.read_csv(genotype_file, sep='\t', index_col=0)
# print('genotype_file shape:', genotype.shape)
self.genotype[self.genotype == -1] = 0
self.multi_pheno = pd.read_csv(phenotype_file, sep=',', index_col=0)
self.phenotype = self.multi_pheno.iloc[:, 2]
# print('phenotype shape:', phenotype.shape)
def _filter_na_phenotype(self):
missing_mask = self.phenotype.isna()
self.genotype = self.genotype[~missing_mask]
self.phenotype = self.phenotype[~missing_mask]
def _binarize_phenotype(self):
self.phenotype[self.phenotype > 0] = 1
self.phenotype[self.phenotype < 0] = 0
def _balance_sample(self):
# find majority and minority
num_zeros = self.phenotype.value_counts()[0]
num_ones = self.phenotype.value_counts()[1]
if num_ones > num_zeros:
majority = 1
minority = 0
else:
majority = 0
minority = 1
# downsampling majority
majority_index = self.phenotype == majority
minority_index = self.phenotype == minority
majority_downsampled = resample(
self.genotype[majority_index],
replace=True,
n_samples=self.phenotype.value_counts()[minority],
# random_state=27 # reproducible results
)
self.genotype = pd.concat(
[majority_downsampled, self.genotype[minority_index]], axis=0)
if self.shuffle:
self.genotype = self.genotype.sample(frac=1)
self.phenotype = self.phenotype[self.genotype.index]
def _one_hot_encode(self):
self.onehot = to_categorical(self.genotype)
def split_to_be_divisible(X, y, shadow_perc, batch_size):
"""
Split a dataframe into target dataset and shadow dataset, and make them divisible by batch size.
:param X: genotype data
:param y: phenotype data
:param shadow_perc: specified percent for shadow dataset, target_perc = 1 - shadow_perc
:param batch_size: batch_size for training process
:return: target datasets, shadow datasets
"""
# stop and output error, if X and y have different number of individuals.
assert y.shape[0] == X.shape[0]
# calculate sample size of target and shadow
total_row = X.shape[0]
num_shadow_row = int(total_row * shadow_perc) - int(total_row * shadow_perc) % batch_size
num_target_row = (total_row - num_shadow_row) - (total_row - num_shadow_row) % batch_size
# split train and valid
random_row = np.random.permutation(total_row)
shadow_row = random_row[:num_shadow_row]
target_row = random_row[-num_target_row:]
target_X = X[target_row]
shadow_X = X[shadow_row]
target_y = y.iloc[target_row]
shadow_y = y.iloc[shadow_row]
return target_X, target_y, shadow_X, shadow_y
def target_model():
"""The architecture of the target model.
The attack is white-box, hence the attacker is assumed to know this architecture too.
:return: target model
"""
classifier = Sequential()
classifier.add(Conv1D(num_kernels,
kernel_size,
padding='same',
activation='relu',
kernel_regularizer=l1(kernel_regularization),
input_shape=input_shape)
)
classifier.add(AveragePooling1D(pool_size=2))
classifier.add(Dropout(drop_prec))
classifier.add(Flatten())
classifier.add(Dense(1, activation='sigmoid'))
if dpsgd:
optimizer = DPGradientDescentGaussianOptimizer(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
num_microbatches=int(microbatches_perc * batch_size),
learning_rate=learning_rate)
# Compute vector of per-example loss rather than its mean over a minibatch.
loss = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.compat.v2.losses.Reduction.NONE)
else:
optimizer = GradientDescentOptimizer(learning_rate=learning_rate)
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
# Compile model with Keras
classifier.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
return classifier
def shadow_model():
"""The architecture of the shadow model is same as target model, because the attack is white-box,
hence the attacker is assumed to know this architecture too.
:return: shadow model
"""
classifier = Sequential()
classifier.add(Conv1D(num_kernels,
kernel_size,
padding='same',
activation='relu',
kernel_regularizer=l1(kernel_regularization),
input_shape=input_shape)
)
classifier.add(AveragePooling1D(pool_size=2))
classifier.add(Dropout(drop_prec))
classifier.add(Flatten())
classifier.add(Dense(1, activation='sigmoid'))
if dpsgd:
optimizer = DPGradientDescentGaussianOptimizer(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
num_microbatches=int(microbatches_perc * batch_size),
learning_rate=learning_rate)
# Compute vector of per-example loss rather than its mean over a minibatch.
loss = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.compat.v2.losses.Reduction.NONE)
else:
optimizer = GradientDescentOptimizer(learning_rate=learning_rate)
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
# Compile model with Keras
classifier.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
return classifier
def main():
print("Training the target model...")
# split target dataset to train and valid, and make them evenly divisible by batch size
target_X_train, target_y_train, target_X_valid, target_y_valid = split_to_be_divisible(target_X,
target_y,
0.2,
batch_size)
tm = target_model()
tm.fit(target_X_train,
target_y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=[target_X_valid, target_y_valid],
verbose=1)
print("Training the shadow models.")
# train only one shadow model
SHADOW_DATASET_SIZE = int(shadow_X.shape[0] / 2)
smb = ShadowModelBundle(
shadow_model,
shadow_dataset_size=SHADOW_DATASET_SIZE,
num_models=1,
)
# Training the shadow models with same parameter of target model, and generate attack data...
attacker_X, attacker_y = smb.fit_transform(shadow_X, shadow_y.values,
fit_kwargs=dict(epochs=epochs,
batch_size=batch_size,
verbose=1),
)
print("Training attack model...")
clf = RandomForestClassifier(max_depth=2)
clf.fit(attacker_X, attacker_y)
# Test the success of the attack.
ATTACK_TEST_DATASET_SIZE = unused_X.shape[0]
# Prepare examples that were in the training, and out of the training.
data_in = target_X_train[:ATTACK_TEST_DATASET_SIZE], target_y_train[:ATTACK_TEST_DATASET_SIZE]
data_out = unused_X[:ATTACK_TEST_DATASET_SIZE], unused_y[:ATTACK_TEST_DATASET_SIZE]
# Compile them into the expected format for the AttackModelBundle.
attack_test_data, real_membership_labels = prepare_attack_data(tm, data_in, data_out)
# Compute the attack accuracy.
attack_guesses = clf.predict(attack_test_data)
attack_accuracy = np.mean(attack_guesses == real_membership_labels)
print('attack accuracy: {}'.format(attack_accuracy))
if __name__ == '__main__':
# genotype
genotype_file = '../data/genotype_full.txt'
# phenotype
phenotype_file = '../data/phenotype.csv'
# parameters
dpsgd = True
# target model hyper-parameters same as Lasso-dp
epochs = 50
batch_size = 16
microbatches_perc = 1.0
learning_rate = 0.01
kernel_regularization = 0.001352
drop_prec = 0.25
num_kernels = 8
kernel_size = 5
noise_multiplier = 0.8
l2_norm_clip = 1.0
print("Loading and splitting dataset")
yeast = DataGenerator(genotype_file, phenotype_file)
target_X, target_y, shadow_X, shadow_y = split_to_be_divisible(yeast.onehot,
yeast.phenotype,
0.5,
batch_size=80)
shadow_X, shadow_y, unused_X, unused_y = split_to_be_divisible(shadow_X,
shadow_y,
0.2,
batch_size)
input_shape = (target_X.shape[1], target_X.shape[2])
# # define the grid search parameters
# if dpsgd:
# param_grid = {
# 'epochs': [50, 100],
# 'batch_size': [8, 16],
# 'microbatches_perc': [0.5, 1.0],
# 'learning_rate': [0.01, 0.001],
# 'kernel_regularization': [0, 0.001352],
# 'drop_prec': [0.25, 0.5],
# 'num_kernels': [8, 16],
# 'kernel_size': [5, 9],
# 'noise_multiplier': [0.4, 0.6, 0.8, 1.0, 1.2],
# 'l2_norm_clip': [0.6, 1.0, 1.4, 1.8],
# 'verbose': [0]
# }
# else:
# # define the grid search parameters
# param_grid = {
# 'epochs': [50, 100],
# 'batch_size': [8, 16],
# 'learning_rate': [0.01, 0.001],
# 'kernel_regularization': [0, 0.001352],
# 'drop_prec': [0.25, 0.5],
# 'num_kernels': [8, 16],
# 'kernel_size': [5, 9],
# 'verbose': [0]
# }
main()
| [
"numpy.mean",
"tensorflow.keras.utils.to_categorical",
"mia.estimators.prepare_attack_data",
"pandas.read_csv",
"tensorflow.keras.layers.AveragePooling1D",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.keras.layers.Dropout",
"mia.estimators.ShadowModelBundle",
"sklearn.ensemble.RandomFor... | [((639, 660), 'numpy.random.seed', 'np.random.seed', (['(19122)'], {}), '(19122)\n', (653, 660), True, 'import numpy as np\n'), ((3887, 3919), 'numpy.random.permutation', 'np.random.permutation', (['total_row'], {}), '(total_row)\n', (3908, 3919), True, 'import numpy as np\n'), ((4400, 4412), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4410, 4412), False, 'from tensorflow.keras.models import Sequential\n'), ((5893, 5905), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5903, 5905), False, 'from tensorflow.keras.models import Sequential\n'), ((8054, 8144), 'mia.estimators.ShadowModelBundle', 'ShadowModelBundle', (['shadow_model'], {'shadow_dataset_size': 'SHADOW_DATASET_SIZE', 'num_models': '(1)'}), '(shadow_model, shadow_dataset_size=SHADOW_DATASET_SIZE,\n num_models=1)\n', (8071, 8144), False, 'from mia.estimators import ShadowModelBundle, prepare_attack_data\n'), ((8681, 8716), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': '(2)'}), '(max_depth=2)\n', (8703, 8716), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((9222, 9264), 'mia.estimators.prepare_attack_data', 'prepare_attack_data', (['tm', 'data_in', 'data_out'], {}), '(tm, data_in, data_out)\n', (9241, 9264), False, 'from mia.estimators import ShadowModelBundle, prepare_attack_data\n'), ((9374, 9423), 'numpy.mean', 'np.mean', (['(attack_guesses == real_membership_labels)'], {}), '(attack_guesses == real_membership_labels)\n', (9381, 9423), True, 'import numpy as np\n'), ((1342, 1391), 'pandas.read_csv', 'pd.read_csv', (['genotype_file'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(genotype_file, sep='\\t', index_col=0)\n", (1353, 1391), True, 'import pandas as pd\n'), ((1523, 1572), 'pandas.read_csv', 'pd.read_csv', (['phenotype_file'], {'sep': '""","""', 'index_col': '(0)'}), "(phenotype_file, sep=',', index_col=0)\n", (1534, 1572), True, 'import pandas as pd\n'), ((2719, 2791), 'pandas.concat', 'pd.concat', (['[majority_downsampled, self.genotype[minority_index]]'], {'axis': '(0)'}), '([majority_downsampled, self.genotype[minority_index]], axis=0)\n', (2728, 2791), True, 'import pandas as pd\n'), ((3002, 3031), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['self.genotype'], {}), '(self.genotype)\n', (3016, 3031), False, 'from tensorflow.keras.utils import to_categorical\n'), ((4741, 4770), 'tensorflow.keras.layers.AveragePooling1D', 'AveragePooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (4757, 4770), False, 'from tensorflow.keras.layers import Conv1D, AveragePooling1D, Dropout, Flatten, Dense\n'), ((4791, 4809), 'tensorflow.keras.layers.Dropout', 'Dropout', (['drop_prec'], {}), '(drop_prec)\n', (4798, 4809), False, 'from tensorflow.keras.layers import Conv1D, AveragePooling1D, Dropout, Flatten, Dense\n'), ((4830, 4839), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4837, 4839), False, 'from tensorflow.keras.layers import Conv1D, AveragePooling1D, Dropout, Flatten, Dense\n'), ((4860, 4890), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (4865, 4890), False, 'from tensorflow.keras.layers import Conv1D, AveragePooling1D, Dropout, Flatten, Dense\n'), ((5255, 5358), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.compat.v2.losses.Reduction.NONE'}), '(from_logits=True, reduction=tf.compat.v2\n .losses.Reduction.NONE)\n', (5289, 5358), True, 'import tensorflow as tf\n'), ((5466, 5518), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (5500, 5518), True, 'import tensorflow as tf\n'), ((6234, 6263), 'tensorflow.keras.layers.AveragePooling1D', 'AveragePooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (6250, 6263), False, 'from tensorflow.keras.layers import Conv1D, AveragePooling1D, Dropout, Flatten, Dense\n'), ((6284, 6302), 'tensorflow.keras.layers.Dropout', 'Dropout', (['drop_prec'], {}), '(drop_prec)\n', (6291, 6302), False, 'from tensorflow.keras.layers import Conv1D, AveragePooling1D, Dropout, Flatten, Dense\n'), ((6323, 6332), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6330, 6332), False, 'from tensorflow.keras.layers import Conv1D, AveragePooling1D, Dropout, Flatten, Dense\n'), ((6353, 6383), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (6358, 6383), False, 'from tensorflow.keras.layers import Conv1D, AveragePooling1D, Dropout, Flatten, Dense\n'), ((6748, 6851), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.compat.v2.losses.Reduction.NONE'}), '(from_logits=True, reduction=tf.compat.v2\n .losses.Reduction.NONE)\n', (6782, 6851), True, 'import tensorflow as tf\n'), ((6959, 7011), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (6993, 7011), True, 'import tensorflow as tf\n'), ((4623, 4648), 'tensorflow.keras.regularizers.l1', 'l1', (['kernel_regularization'], {}), '(kernel_regularization)\n', (4625, 4648), False, 'from tensorflow.keras.regularizers import l1\n'), ((6116, 6141), 'tensorflow.keras.regularizers.l1', 'l1', (['kernel_regularization'], {}), '(kernel_regularization)\n', (6118, 6141), False, 'from tensorflow.keras.regularizers import l1\n')] |
from __future__ import print_function
# pyDIA
#
# This software implements the difference-imaging algorithm of Bramich et al. (2010)
# with mixed-resolution delta basis functions. It uses an NVIDIA GPU to do the heavy
# processing.
#
# Subroutines deconvolve3_rows, deconvolve3_columns, resolve_coeffs_2d and
# interpolate_2d are taken from the Gwiddion software for scanning probe
# microscopy (http://gwyddion.net/), which is distributed under the GNU General
# Public License.
#
# All remaining code is Copyright (C) 2014, 2015 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import time
import fnmatch
import itertools
from multiprocessing import Pool
import numpy as np
import data_structures as DS
import io_functions as IO
import image_functions as IM
import photometry_functions as PH
import c_interface_functions as CI
def difference_image(ref, target, params, stamp_positions=None, psf_image=None,
star_positions=None, star_group_boundaries=None,
detector_mean_positions_x=None,
detector_mean_positions_y=None, star_sky=None):
from scipy.linalg import lu_solve, lu_factor, LinAlgError
start = time.time()
print('difference_image', ref.name, target.name)
#
# Set the kernel size based on the difference in seeing from the reference
#
# kernelRadius = min(params.kernel_maximum_radius,
# max(params.kernel_minimum_radius,
# np.abs(target.fw-ref.fw)*params.fwhm_mult))
kernelRadius = min(params.kernel_maximum_radius,
max(params.kernel_minimum_radius, np.sqrt(np.abs(
target.fw ** 2 - ref.fw ** 2)) * params.fwhm_mult))
#
# Mask saturated pixels
#
# print 'Masking ',target.name,time.time()-start
# smask = compute_saturated_pixel_mask(target.image,kernelRadius,params)
#
# Define the kernel basis functions
#
print('Defining kernel pixels', time.time() - start)
if params.use_fft_kernel_pixels:
kernelIndex, extendedBasis = IM.define_kernel_pixels_fft(ref, target,
kernelRadius + 2,
INNER_RADIUS=20,
threshold=params.fft_kernel_threshold)
else:
kernelIndex, extendedBasis = IM.define_kernel_pixels(kernelRadius)
nKernel = kernelIndex.shape[0]
#
# We dont want to use bad pixels in either the target or reference image
#
smask = target.mask * ref.mask
bmask = np.ones(smask.shape, dtype=bool)
g = DS.EmptyBase()
for iteration in range(params.iterations):
print('Computing matrix', time.time() - start)
tmask = bmask * smask
#
# Compute the matrix and vector
#
H, V, RRB = CI.compute_matrix_and_vector_cuda(ref.image, ref.blur,
target.image,
target.inv_variance,
tmask, kernelIndex,
extendedBasis,
kernelRadius, params,
stamp_positions=stamp_positions)
#
# Solve the matrix equation to find the kernel coefficients
#
print('Solving matrix equation', time.time() - start)
try:
lu, piv = lu_factor(H)
c = lu_solve((lu, piv), V).astype(np.float32).copy()
except (LinAlgError, ValueError):
print('LU decomposition failed')
g.model = None
g.flux = None
g.diff = None
print('H')
print(H)
sys.stdout.flush()
return g
#
# Compute the model image
#
print('Computing model', time.time() - start)
g.model = CI.compute_model_cuda(ref.image.shape, RRB, c,
kernelIndex, extendedBasis, params)
#
# Compute the difference image
#
difference = (target.image - g.model)
g.norm = difference * np.sqrt(target.inv_variance)
#
# Recompute the variance image from the model
#
target.inv_variance = 1.0 / (g.model / params.gain + (
params.readnoise / params.gain) ** 2) + (1 - smask)
mp = np.where(tmask == 0)
if len(mp[0]) > 0:
target.inv_variance[mp] = 1.e-12
#
# Mask pixels that disagree with the model
#
if iteration > 2:
bmask = IM.kappa_clip(smask, g.norm,
params.pixel_rejection_threshold)
print('Iteration', iteration, 'completed', time.time() - start)
#
# Delete the target image array to save memory
#
del target.image
#
# Save the kernel coefficients to a file
#
if params.do_photometry and psf_image:
kf = params.loc_output + os.path.sep + 'k_' + os.path.basename(
target.name)
IO.write_kernel_table(kf, kernelIndex, extendedBasis, c, params)
g.norm = difference * np.sqrt(target.inv_variance)
g.variance = 1.0 / target.inv_variance
g.mask = tmask
#
# Do the photometry if requested
#
g.flux = None
if params.do_photometry and psf_image:
print('star_positions', star_positions.shape)
print('star_group_boundaries', star_group_boundaries)
if ref.name == target.name:
sky_image, _ = IO.read_fits_file(
params.loc_output + os.path.sep + 'temp.sub2.fits')
phot_target = ref.image - sky_image
g.flux, g.dflux = CIF.photom_all_stars_simultaneous(phot_target,
target.inv_variance,
star_positions,
psf_image, c,
kernelIndex,
extendedBasis,
kernelRadius,
params,
star_group_boundaries,
detector_mean_positions_x,
detector_mean_positions_y)
else:
phot_target = difference
g.flux, g.dflux = CI.photom_all_stars(phot_target,
target.inv_variance,
star_positions, psf_image, c,
kernelIndex, extendedBasis,
kernelRadius, params,
star_group_boundaries,
detector_mean_positions_x,
detector_mean_positions_y)
print('Photometry completed', time.time() - start)
#
# Apply the photometric scale factor to the difference image.
# We don't do this prior to the photometry because the PSF is
# being convolved by the kernel, which already includes the
# photometric scale factor.
#
g.diff = IM.apply_photometric_scale(difference, c, params.pdeg)
sys.stdout.flush()
return g
def process_reference_image(f, args):
best_seeing_ref, params, stamp_positions = args
result = difference_image(f, best_seeing_ref, params,
stamp_positions=stamp_positions)
del f.image
del f.mask
del f.inv_variance
return result
def process_reference_image_helper(args):
return process_reference_image(*args)
def make_reference(files, params, reference_image='ref.fits'):
seeing = {}
sky = {}
ref_seeing = 1000
#
# Have we specified the files to make the reference with?
#
if params.ref_include_file:
ref_list = []
for line in open(params.ref_include_file, 'r'):
for f in files:
if f.name == line.split()[0]:
ref_list.append(f)
print(f.name, f.fw, f.signal)
if f.fw < ref_seeing:
ref_sky = f.sky
ref_seeing = f.fw
best_seeing_ref = f
else:
#
# We try to choose the best images
#
reference_exclude = []
if params.ref_exclude_file:
for line in open(params.ref_exclude_file, 'r'):
reference_exclude.append(line.split()[0])
sig = []
for f in files:
sig.append(f.signal)
sig = np.asarray(sig)
sigcut = np.mean(sig) - 2.0 * np.std(sig)
print('signal: mean, std, cut = ', np.mean(sig), np.std(sig), sigcut)
print('Searching for best-seeing image')
best_seeing_ref = None
if len(files)==1:
f = files[0]
ref_sky = f.sky
ref_seeing = f.fw
best_seeing_ref = f
else:
for f in files:
print(f.name, f.fw, f.sky, f.signal)
if (f.fw < ref_seeing) and (
f.fw > params.reference_min_seeing) and (
f.roundness < params.reference_max_roundness) and (
f.signal > sigcut) and not (f.name in reference_exclude):
ref_sky = f.sky
ref_seeing = f.fw
best_seeing_ref = f
if best_seeing_ref is None:
print("No ref image satisfies requiremens to be best seeing ref.")
# TODO : raise an exception
ref_list = []
while len(ref_list) < params.min_ref_images:
ref_list = []
print('Reference FWHM = ', ref_seeing)
print('Cutoff FWHM for reference = ',
params.reference_seeing_factor * ref_seeing)
print('Combining for reference:')
if len(files)==1:
ref_list.append(f)
else:
for f in files:
if (f.fw < params.reference_seeing_factor * ref_seeing) and (
f.roundness < params.reference_max_roundness) and (
f.sky < params.reference_sky_factor * ref_sky) and (
f.fw > params.reference_min_seeing) and (
f.signal > sigcut) and not (
f.name in reference_exclude):
ref_list.append(f)
print(f.name, f.fw, f.sky, f.signal)
params.reference_seeing_factor *= 1.02
sig = []
for f in ref_list:
sig.append(f.signal)
sig = np.asarray(sig)
sigcut = np.mean(sig) - 2 * np.std(sig)
print('signal: mean, std, cut = ', np.mean(sig), np.std(sig), sigcut)
ref_seeing = 1000
ref_roundness = 2.0
for f in ref_list:
if (f.fw < ref_seeing) and (f.signal > sigcut):
ref_sky = f.sky
ref_seeing = f.fw
ref_roundness = f.roundness
best_seeing_ref = f
#
# Which ref image has the worst seeing?
#
worst_seeing = 0.0
for f in ref_list:
if f.fw > worst_seeing:
worst_seeing = f.fw
worst_seeing_ref = f
if params.ref_image_list:
with open(params.loc_output + os.path.sep + params.ref_image_list,
'w') as fid:
for f in ref_list:
fid.write(
f.name + ' ' + str(f.fw) + ' ' + str(f.sky) + ' ' + str(
f.signal) + '\n')
#
# Find the locations of the brightest stars to use as stamp positions
# if required
#
stamp_positions = None
if params.use_stamps:
stars = PH.choose_stamps(best_seeing_ref, params)
stamp_positions = stars[:, 0:2]
#
# Construct the reference image.
#
ref = np.zeros([1, 1])
sum1 = 0
sum2 = 0
good_ref_list = []
for f in ref_list:
f.blur = IM.boxcar_blur(f.image)
good_ref_list.append(f)
print('difference_image:', f.name, best_seeing_ref.name)
if not (params.use_GPU) and (params.n_parallel > 1):
#
# Use ParallelProcessing to process images in the reference list
#
pool = Pool(params.n_parallel)
results = pool.map(process_reference_image_helper,
itertools.izip(ref_list, itertools.repeat(
(best_seeing_ref, params, stamp_positions))))
for i, f in enumerate(ref_list):
f.result = results[i]
else:
for f in ref_list:
f.result = process_reference_image(f, (
best_seeing_ref, params, stamp_positions))
#
# Remove bad reference models
#
rlist = [g for g in good_ref_list]
for g in rlist:
if not (isinstance(g.result.diff, np.ndarray)):
print('removing', g.name)
good_ref_list.remove(g)
print('good reference list:')
for g in good_ref_list:
print(g.name)
print('kappa-clipping reference list')
for iterations in range(5):
if len(good_ref_list) < 4:
break
sd = np.zeros(len(good_ref_list))
for i, g in enumerate(good_ref_list):
print(g.name, g.result.diff)
sd[i] = np.std(g.result.diff)
sds = sd.std()
sdm = sd.mean()
rlist = [g for g in good_ref_list]
for g in rlist:
if np.std(g.result.diff) > (sdm + 2.5 * sds):
print('removing', g.name)
good_ref_list.remove(g)
#
# Combine the good reference models
#
g = good_ref_list[0]
gstack = np.zeros(
[len(good_ref_list), g.result.model.shape[0], g.result.model.shape[1]])
mask = np.ones_like(g.result.model)
print('final reference list')
for i, g in enumerate(good_ref_list):
if isinstance(g.result.model, np.ndarray):
print(g.name, np.std(g.result.diff), np.median(g.result.model))
IO.write_image(g.result.model,
params.loc_output + os.path.sep + 'mr_' + g.name)
gstack[i, :, :] = g.result.model
mask *= g.mask
rr = np.median(gstack, axis=0)
IO.write_image(rr, params.loc_output + os.path.sep + reference_image)
IO.write_image(mask,
params.loc_output + os.path.sep + 'mask_' + reference_image)
for f in ref_list:
f.result = None
return stamp_positions
def process_image(f, args):
ref, params, stamp_positions, star_positions, star_group_boundaries, star_unsort_index, detector_mean_positions_x, detector_mean_positions_y = args
dtarget = params.loc_output + os.path.sep + 'd_' + f.name
if not (os.path.exists(dtarget)):
#
# Compute difference image
#
result = difference_image(ref, f, params,
stamp_positions=stamp_positions,
psf_image=params.loc_output + os.path.sep + 'psf.fits',
star_positions=star_positions,
star_group_boundaries=star_group_boundaries,
detector_mean_positions_x=detector_mean_positions_x,
detector_mean_positions_y=detector_mean_positions_y)
del f.image
del f.mask
del f.inv_variance
#
# Save photometry to a file
#
if isinstance(result.flux, np.ndarray):
if not (params.use_GPU):
print('ungrouping fluxes')
result.flux = result.flux[star_unsort_index].copy()
result.dflux = result.dflux[star_unsort_index].copy()
np.savetxt(params.loc_output + os.path.sep + f.name + '.flux',
np.vstack((result.flux, result.dflux)).T)
f.flux = result.flux.copy()
f.dflux = result.dflux.copy()
#
# Save output images to files
#
if isinstance(result.diff, np.ndarray):
IO.write_image(result.diff,
params.loc_output + os.path.sep + 'd_' + f.name)
IO.write_image(result.model,
params.loc_output + os.path.sep + 'm_' + f.name)
IO.write_image(result.norm,
params.loc_output + os.path.sep + 'n_' + f.name)
IO.write_image(result.mask,
params.loc_output + os.path.sep + 'z_' + f.name)
return 0
def process_image_helper(args):
return process_image(*args)
def imsub_all_fits(params, reference='ref.fits'):
#
# Create the output directory if it doesn't exist
#
if not (os.path.exists(params.loc_output)):
os.mkdir(params.loc_output)
#
# The degree of spatial shape changes has to be at least as
# high as the degree of spatial photometric scale
#
if (params.sdeg < params.pdeg):
print('Increasing params.sdeg to ', params.pdeg)
params.sdeg = params.pdeg
#
# Print out the parameters for this run.
#
print('Parameters:')
for par in dir(params):
print(par, getattr(params, par))
print('Determine ur list of images')
#
all_files = os.listdir(params.loc_data)
all_files.sort()
files = []
for f in all_files:
if fnmatch.fnmatch(f, params.name_pattern):
g = DS.Observation(params.loc_data + os.path.sep + f, params)
del g.data
del g.mask
if g.fw > 0.0:
files.append(g)
print(g.name)
if len(files) < 3:
print('Only', len(files), 'files found matching', params.name_pattern)
print('Exiting')
sys.exit(0)
#
# Have we specified a registration template?
#
if params.registration_image:
reg = DS.Observation(params.registration_image, params)
else:
reg = DS.EmptyBase()
reg.fw = 999.0
for f in files:
if (f.fw < reg.fw) and (f.fw > 1.2):
reg = f
print('Registration image:', reg.name)
#
# Register images
#
for f in files:
if f == reg:
f.image = f.data
rf = params.loc_output + os.path.sep + 'r_' + f.name
IO.write_image(f.image, rf)
else:
f.register(reg, params)
# delete image arrays to save memory
del f.image
del f.mask
del f.inv_variance
del reg.data
del reg.image
del reg.mask
del reg.inv_variance
#
# Write image names and dates to a file
#
if params.image_list_file:
try:
with open(params.loc_output + os.path.sep + params.image_list_file,
'w') as fid:
for f in files:
date = None
if params.datekey:
date = IO.get_date(
params.loc_data + os.path.sep + f.name,
key=params.datekey) - 2450000
if date:
fid.write(f.name + ' %10.5f\n' % date)
else:
fid.write(f.name)
except:
raise
#
# Make the photometric reference image if we don't have it.
# Find stamp positions if required.
#
if not (os.path.exists(params.loc_output + os.path.sep + reference)):
print('Reg = ', reg.name)
stamp_positions = make_reference(files, params,
reference_image=reference)
ref = DS.Observation(params.loc_output + os.path.sep + reference,
params)
mask, _ = IO.read_fits_file(
params.loc_output + os.path.sep + 'mask_' + reference)
ref.mask = mask
ref.register(reg, params)
else:
ref = DS.Observation(params.loc_output + os.path.sep + reference,
params)
if os.path.exists(
params.loc_output + os.path.sep + 'mask_' + reference):
mask, _ = IO.read_fits_file(
params.loc_output + os.path.sep + 'mask_' + reference)
else:
mask = np.ones_like(ref.data)
ref.mask = mask
ref.register(reg, params)
stamp_positions = None
if params.use_stamps:
stamp_file = params.loc_output + os.path.sep + 'stamp_positions'
if os.path.exists(stamp_file):
stamp_positions = np.genfromtxt(stamp_file)
else:
stars = PH.choose_stamps(ref, params)
stamp_positions = stars[:, 0:2]
np.savetxt(stamp_file, stamp_positions)
pm = params.pixel_max
params.pixel_max *= 0.9
ref.mask *= IM.compute_saturated_pixel_mask(ref.image, 4, params)
params.pixel_max = pm
ref.blur = IM.boxcar_blur(ref.image)
if params.mask_cluster:
ref.mask *= IM.mask_cluster(ref.image, ref.mask, params)
#
# Detect stars and compute the PSF if we are doing photometry
#
star_positions = None
sky = 0.0
if params.do_photometry:
star_file = params.loc_output + os.path.sep + 'star_positions'
psf_file = params.loc_output + os.path.sep + 'psf.fits'
if not (os.path.exists(psf_file)) or not (os.path.exists(star_file)):
stars = PH.compute_psf_image(params, ref, psf_image=psf_file)
star_positions = stars[:, 0:2]
star_sky = stars[:, 4]
if os.path.exists(star_file):
star_positions = np.genfromtxt(star_file)
star_sky = star_positions[:, 0] * 0.0;
else:
np.savetxt(star_file, star_positions)
print('sky =', sky)
#
# If we have pre-determined star positions
#
# if params.star_file:
# stars = np.genfromtxt(params.star_file)
# star_positions = stars[:,1:3]
# if params.star_reference_image:
# star_ref, h = IO.read_fits_file(params.star_reference_image)
# dy, dx = IM.positional_shift(ref.image,star_ref)
# print 'position shift =',dx,dy
# star_positions[:,0] += dx
# star_positions[:,1] += dy
# np.savetxt(star_file,star_positions)
#
# If we are using a CPU, group the stars by location
#
print('Group_check')
print('params.do_photometry', params.do_photometry)
print('params.use_GPU', params.use_GPU)
star_group_boundaries = None
detector_mean_positions_x = None
detector_mean_positions_y = None
star_unsort_index = None
if params.do_photometry:
star_sort_index, star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y = PH.group_stars_ccd(
params, star_positions,
params.loc_output + os.path.sep + reference)
star_positions = star_positions[star_sort_index]
star_sky = star_sky[star_sort_index]
star_unsort_index = np.argsort(star_sort_index)
#
# Do photometry of the reference image
#
if params.do_photometry:
ref_flux_file = params.loc_output + os.path.sep + 'ref.flux'
if not (os.path.exists(ref_flux_file)):
result = difference_image(ref, ref, params,
stamp_positions=stamp_positions,
psf_image=psf_file,
star_positions=star_positions,
star_group_boundaries=star_group_boundaries,
detector_mean_positions_x=detector_mean_positions_x,
detector_mean_positions_y=detector_mean_positions_y,
star_sky=star_sky)
if isinstance(result.flux, np.ndarray):
print('ungrouping fluxes')
result.flux = result.flux[star_unsort_index].copy()
result.dflux = result.dflux[star_unsort_index].copy()
np.savetxt(ref_flux_file,
np.vstack((result.flux, result.dflux)).T)
#
# Process images
#
if params.make_difference_images:
if not (params.use_GPU) and (params.n_parallel > 1):
pool = Pool(params.n_parallel)
pool.map(process_image_helper, itertools.izip(files,
itertools.repeat((
ref, params,
stamp_positions,
star_positions,
star_group_boundaries,
star_unsort_index,
detector_mean_positions_x,
detector_mean_positions_y))))
else:
for f in files:
process_image(f, (ref, params, stamp_positions, star_positions,
star_group_boundaries, star_unsort_index,
detector_mean_positions_x,
detector_mean_positions_y))
return files
def do_photometry(params, extname='newflux', star_file='star_positions',
psf_file='psf.fits', star_positions=None,
reference_image='ref.fits'):
#
# Determine our list of files
#
all_files = os.listdir(params.loc_data)
all_files.sort()
files = []
for f in all_files:
if fnmatch.fnmatch(f, params.name_pattern):
g = DS.Observation(params.loc_data + os.path.sep + f, params)
if g.fw > 0.0:
files.append(g)
ref = DS.Observation(params.loc_output + os.path.sep + reference_image,
params)
ref.register(ref, params)
#
# Detect stars and compute the PSF if necessary
#
if params.do_photometry:
psf_file = params.loc_output + os.path.sep + psf_file
if os.path.exists(params.star_file):
star_pos = np.genfromtxt(params.star_file)[:, 1:3]
if not (os.path.exists(psf_file)):
stars = PH.compute_psf_image(params, ref, psf_image=psf_file)
else:
if not (os.path.exists(star_file)):
stars = PH.compute_psf_image(params, ref, psf_image=psf_file)
star_pos = stars[:, 0:2]
np.savetxt(star_file, star_pos)
else:
star_pos = np.genfromtxt(star_file)
if not (os.path.exists(psf_file)):
stars = PH.compute_psf_image(params, ref,
psf_image=psf_file)
#
# Have we been passed an array of star positions?
#
if star_positions == None:
star_positions = star_pos
#
# If we are using a CPU, group the stars by location
#
star_group_boundaries = None
detector_mean_positions_x = None
detector_mean_positions_y = None
if not (params.use_GPU):
star_sort_index, star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y = PH.group_stars_ccd(
params, star_positions,
params.loc_output + os.path.sep + reference_image)
star_positions = star_positions[star_sort_index]
star_unsort_index = np.argsort(star_sort_index)
#
# Process the reference image
#
print('Processing', reference_image)
ref = DS.Observation(params.loc_output + os.path.sep + reference_image,
params)
# reg = Observation(params.loc_data+os.path.sep+
# params.registration_image,params)
ref.register(ref, params)
smask = IM.compute_saturated_pixel_mask(ref.image, 6, params)
ref.inv_variance += 1 - smask
ktable = params.loc_output + os.path.sep + 'k_' + os.path.basename(
reference_image)
kernelIndex, extendedBasis, c, params = IO.read_kernel_table(ktable,
params)
kernelRadius = np.max(kernelIndex[:, 0]) + 1
if np.sum(extendedBasis) > 0:
kernelRadius += 1
print('kernelIndex', kernelIndex)
print('extendedBasis', extendedBasis)
print('coeffs', c)
print('kernelRadius', kernelRadius)
phot_target = ref.image
ref.flux, ref.dflux = CI.photom_all_stars(phot_target, ref.inv_variance,
star_positions, psf_file, c,
kernelIndex, extendedBasis,
kernelRadius, params,
star_group_boundaries,
detector_mean_positions_x,
detector_mean_positions_y)
if isinstance(ref.flux, np.ndarray):
if not (params.use_GPU):
print('ungrouping fluxes')
ref.flux = ref.flux[star_unsort_index].copy()
ref.dflux = ref.dflux[star_unsort_index].copy()
np.savetxt(
params.loc_output + os.path.sep + reference_image + '.' + extname,
np.vstack((ref.flux, ref.dflux)).T)
#
# Process difference images
#
for f in files:
if not (os.path.exists(
params.loc_output + os.path.sep + f.name + '.' + extname)):
print('Processing', f.name)
target = f.name
dtarget = params.loc_output + os.path.sep + 'd_' + os.path.basename(
target)
ntarget = params.loc_output + os.path.sep + 'n_' + os.path.basename(
target)
ztarget = params.loc_output + os.path.sep + 'z_' + os.path.basename(
target)
ktable = params.loc_output + os.path.sep + 'k_' + os.path.basename(
target)
if os.path.exists(dtarget) and os.path.exists(
ntarget) and os.path.exists(ktable):
norm, h = IO.read_fits_file(ntarget)
diff, h = IO.read_fits_file(dtarget)
mask, h = IO.read_fits_file(ztarget)
inv_var = (norm / diff) ** 2 + (1 - mask)
kernelIndex, extendedBasis, c, params = IO.read_kernel_table(
ktable, params)
kernelRadius = np.max(kernelIndex[:, 0]) + 1
if np.sum(extendedBasis) > 0:
kernelRadius += 1
print('kernelIndex', kernelIndex)
print('extendedBasis', extendedBasis)
print('coeffs', c)
print('kernelRadius', kernelRadius)
diff = IM.undo_photometric_scale(diff, c, params.pdeg)
flux, dflux = PH.photom_all_stars(diff, inv_var,
star_positions, psf_file, c,
kernelIndex, extendedBasis,
kernelRadius, params,
star_group_boundaries,
detector_mean_positions_x,
detector_mean_positions_y)
if isinstance(flux, np.ndarray):
if not (params.use_GPU):
print('ungrouping fluxes')
flux = flux[star_unsort_index].copy()
dflux = dflux[star_unsort_index].copy()
np.savetxt(
params.loc_output + os.path.sep + f.name + '.' + extname,
np.vstack((flux, dflux)).T)
| [
"c_interface_functions.compute_model_cuda",
"numpy.sqrt",
"io_functions.write_image",
"photometry_functions.compute_psf_image",
"numpy.argsort",
"io_functions.get_date",
"sys.exit",
"data_structures.EmptyBase",
"numpy.genfromtxt",
"itertools.repeat",
"data_structures.Observation",
"os.path.exi... | [((1834, 1845), 'time.time', 'time.time', ([], {}), '()\n', (1843, 1845), False, 'import time\n'), ((3303, 3335), 'numpy.ones', 'np.ones', (['smask.shape'], {'dtype': 'bool'}), '(smask.shape, dtype=bool)\n', (3310, 3335), True, 'import numpy as np\n'), ((3345, 3359), 'data_structures.EmptyBase', 'DS.EmptyBase', ([], {}), '()\n', (3357, 3359), True, 'import data_structures as DS\n'), ((8389, 8443), 'image_functions.apply_photometric_scale', 'IM.apply_photometric_scale', (['difference', 'c', 'params.pdeg'], {}), '(difference, c, params.pdeg)\n', (8415, 8443), True, 'import image_functions as IM\n'), ((8448, 8466), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8464, 8466), False, 'import sys\n'), ((13199, 13215), 'numpy.zeros', 'np.zeros', (['[1, 1]'], {}), '([1, 1])\n', (13207, 13215), True, 'import numpy as np\n'), ((15120, 15148), 'numpy.ones_like', 'np.ones_like', (['g.result.model'], {}), '(g.result.model)\n', (15132, 15148), True, 'import numpy as np\n'), ((15553, 15578), 'numpy.median', 'np.median', (['gstack'], {'axis': '(0)'}), '(gstack, axis=0)\n', (15562, 15578), True, 'import numpy as np\n'), ((15583, 15652), 'io_functions.write_image', 'IO.write_image', (['rr', '(params.loc_output + os.path.sep + reference_image)'], {}), '(rr, params.loc_output + os.path.sep + reference_image)\n', (15597, 15652), True, 'import io_functions as IO\n'), ((15657, 15742), 'io_functions.write_image', 'IO.write_image', (['mask', "(params.loc_output + os.path.sep + 'mask_' + reference_image)"], {}), "(mask, params.loc_output + os.path.sep + 'mask_' +\n reference_image)\n", (15671, 15742), True, 'import io_functions as IO\n'), ((18634, 18661), 'os.listdir', 'os.listdir', (['params.loc_data'], {}), '(params.loc_data)\n', (18644, 18661), False, 'import os\n'), ((22211, 22264), 'image_functions.compute_saturated_pixel_mask', 'IM.compute_saturated_pixel_mask', (['ref.image', '(4)', 'params'], {}), '(ref.image, 4, params)\n', (22242, 22264), True, 'import image_functions as IM\n'), ((22306, 22331), 'image_functions.boxcar_blur', 'IM.boxcar_blur', (['ref.image'], {}), '(ref.image)\n', (22320, 22331), True, 'import image_functions as IM\n'), ((26989, 27016), 'os.listdir', 'os.listdir', (['params.loc_data'], {}), '(params.loc_data)\n', (26999, 27016), False, 'import os\n'), ((27273, 27346), 'data_structures.Observation', 'DS.Observation', (['(params.loc_output + os.path.sep + reference_image)', 'params'], {}), '(params.loc_output + os.path.sep + reference_image, params)\n', (27287, 27346), True, 'import data_structures as DS\n'), ((29043, 29116), 'data_structures.Observation', 'DS.Observation', (['(params.loc_output + os.path.sep + reference_image)', 'params'], {}), '(params.loc_output + os.path.sep + reference_image, params)\n', (29057, 29116), True, 'import data_structures as DS\n'), ((29294, 29347), 'image_functions.compute_saturated_pixel_mask', 'IM.compute_saturated_pixel_mask', (['ref.image', '(6)', 'params'], {}), '(ref.image, 6, params)\n', (29325, 29347), True, 'import image_functions as IM\n'), ((29523, 29559), 'io_functions.read_kernel_table', 'IO.read_kernel_table', (['ktable', 'params'], {}), '(ktable, params)\n', (29543, 29559), True, 'import io_functions as IO\n'), ((29931, 30150), 'c_interface_functions.photom_all_stars', 'CI.photom_all_stars', (['phot_target', 'ref.inv_variance', 'star_positions', 'psf_file', 'c', 'kernelIndex', 'extendedBasis', 'kernelRadius', 'params', 'star_group_boundaries', 'detector_mean_positions_x', 'detector_mean_positions_y'], {}), '(phot_target, ref.inv_variance, star_positions, psf_file,\n c, kernelIndex, extendedBasis, kernelRadius, params,\n star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y\n )\n', (29950, 30150), True, 'import c_interface_functions as CI\n'), ((2736, 2854), 'image_functions.define_kernel_pixels_fft', 'IM.define_kernel_pixels_fft', (['ref', 'target', '(kernelRadius + 2)'], {'INNER_RADIUS': '(20)', 'threshold': 'params.fft_kernel_threshold'}), '(ref, target, kernelRadius + 2, INNER_RADIUS=20,\n threshold=params.fft_kernel_threshold)\n', (2763, 2854), True, 'import image_functions as IM\n'), ((3093, 3130), 'image_functions.define_kernel_pixels', 'IM.define_kernel_pixels', (['kernelRadius'], {}), '(kernelRadius)\n', (3116, 3130), True, 'import image_functions as IM\n'), ((3576, 3764), 'c_interface_functions.compute_matrix_and_vector_cuda', 'CI.compute_matrix_and_vector_cuda', (['ref.image', 'ref.blur', 'target.image', 'target.inv_variance', 'tmask', 'kernelIndex', 'extendedBasis', 'kernelRadius', 'params'], {'stamp_positions': 'stamp_positions'}), '(ref.image, ref.blur, target.image, target\n .inv_variance, tmask, kernelIndex, extendedBasis, kernelRadius, params,\n stamp_positions=stamp_positions)\n', (3609, 3764), True, 'import c_interface_functions as CI\n'), ((4751, 4837), 'c_interface_functions.compute_model_cuda', 'CI.compute_model_cuda', (['ref.image.shape', 'RRB', 'c', 'kernelIndex', 'extendedBasis', 'params'], {}), '(ref.image.shape, RRB, c, kernelIndex, extendedBasis,\n params)\n', (4772, 4837), True, 'import c_interface_functions as CI\n'), ((5258, 5278), 'numpy.where', 'np.where', (['(tmask == 0)'], {}), '(tmask == 0)\n', (5266, 5278), True, 'import numpy as np\n'), ((5930, 5994), 'io_functions.write_kernel_table', 'IO.write_kernel_table', (['kf', 'kernelIndex', 'extendedBasis', 'c', 'params'], {}), '(kf, kernelIndex, extendedBasis, c, params)\n', (5951, 5994), True, 'import io_functions as IO\n'), ((6022, 6050), 'numpy.sqrt', 'np.sqrt', (['target.inv_variance'], {}), '(target.inv_variance)\n', (6029, 6050), True, 'import numpy as np\n'), ((9834, 9849), 'numpy.asarray', 'np.asarray', (['sig'], {}), '(sig)\n', (9844, 9849), True, 'import numpy as np\n'), ((11935, 11950), 'numpy.asarray', 'np.asarray', (['sig'], {}), '(sig)\n', (11945, 11950), True, 'import numpy as np\n'), ((13056, 13097), 'photometry_functions.choose_stamps', 'PH.choose_stamps', (['best_seeing_ref', 'params'], {}), '(best_seeing_ref, params)\n', (13072, 13097), True, 'import photometry_functions as PH\n'), ((13307, 13330), 'image_functions.boxcar_blur', 'IM.boxcar_blur', (['f.image'], {}), '(f.image)\n', (13321, 13330), True, 'import image_functions as IM\n'), ((13596, 13619), 'multiprocessing.Pool', 'Pool', (['params.n_parallel'], {}), '(params.n_parallel)\n', (13600, 13619), False, 'from multiprocessing import Pool\n'), ((16090, 16113), 'os.path.exists', 'os.path.exists', (['dtarget'], {}), '(dtarget)\n', (16104, 16113), False, 'import os\n'), ((18089, 18122), 'os.path.exists', 'os.path.exists', (['params.loc_output'], {}), '(params.loc_output)\n', (18103, 18122), False, 'import os\n'), ((18133, 18160), 'os.mkdir', 'os.mkdir', (['params.loc_output'], {}), '(params.loc_output)\n', (18141, 18160), False, 'import os\n'), ((18733, 18772), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['f', 'params.name_pattern'], {}), '(f, params.name_pattern)\n', (18748, 18772), False, 'import fnmatch\n'), ((19115, 19126), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (19123, 19126), False, 'import sys\n'), ((19237, 19286), 'data_structures.Observation', 'DS.Observation', (['params.registration_image', 'params'], {}), '(params.registration_image, params)\n', (19251, 19286), True, 'import data_structures as DS\n'), ((19311, 19325), 'data_structures.EmptyBase', 'DS.EmptyBase', ([], {}), '()\n', (19323, 19325), True, 'import data_structures as DS\n'), ((20784, 20843), 'os.path.exists', 'os.path.exists', (['(params.loc_output + os.path.sep + reference)'], {}), '(params.loc_output + os.path.sep + reference)\n', (20798, 20843), False, 'import os\n'), ((21018, 21085), 'data_structures.Observation', 'DS.Observation', (['(params.loc_output + os.path.sep + reference)', 'params'], {}), '(params.loc_output + os.path.sep + reference, params)\n', (21032, 21085), True, 'import data_structures as DS\n'), ((21133, 21205), 'io_functions.read_fits_file', 'IO.read_fits_file', (["(params.loc_output + os.path.sep + 'mask_' + reference)"], {}), "(params.loc_output + os.path.sep + 'mask_' + reference)\n", (21150, 21205), True, 'import io_functions as IO\n'), ((21301, 21368), 'data_structures.Observation', 'DS.Observation', (['(params.loc_output + os.path.sep + reference)', 'params'], {}), '(params.loc_output + os.path.sep + reference, params)\n', (21315, 21368), True, 'import data_structures as DS\n'), ((21409, 21478), 'os.path.exists', 'os.path.exists', (["(params.loc_output + os.path.sep + 'mask_' + reference)"], {}), "(params.loc_output + os.path.sep + 'mask_' + reference)\n", (21423, 21478), False, 'import os\n'), ((22380, 22424), 'image_functions.mask_cluster', 'IM.mask_cluster', (['ref.image', 'ref.mask', 'params'], {}), '(ref.image, ref.mask, params)\n', (22395, 22424), True, 'import image_functions as IM\n'), ((22949, 22974), 'os.path.exists', 'os.path.exists', (['star_file'], {}), '(star_file)\n', (22963, 22974), False, 'import os\n'), ((24111, 24202), 'photometry_functions.group_stars_ccd', 'PH.group_stars_ccd', (['params', 'star_positions', '(params.loc_output + os.path.sep + reference)'], {}), '(params, star_positions, params.loc_output + os.path.sep +\n reference)\n', (24129, 24202), True, 'import photometry_functions as PH\n'), ((24354, 24381), 'numpy.argsort', 'np.argsort', (['star_sort_index'], {}), '(star_sort_index)\n', (24364, 24381), True, 'import numpy as np\n'), ((27088, 27127), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['f', 'params.name_pattern'], {}), '(f, params.name_pattern)\n', (27103, 27127), False, 'import fnmatch\n'), ((27569, 27601), 'os.path.exists', 'os.path.exists', (['params.star_file'], {}), '(params.star_file)\n', (27583, 27601), False, 'import os\n'), ((28713, 28810), 'photometry_functions.group_stars_ccd', 'PH.group_stars_ccd', (['params', 'star_positions', '(params.loc_output + os.path.sep + reference_image)'], {}), '(params, star_positions, params.loc_output + os.path.sep +\n reference_image)\n', (28731, 28810), True, 'import photometry_functions as PH\n'), ((28917, 28944), 'numpy.argsort', 'np.argsort', (['star_sort_index'], {}), '(star_sort_index)\n', (28927, 28944), True, 'import numpy as np\n'), ((29436, 29469), 'os.path.basename', 'os.path.basename', (['reference_image'], {}), '(reference_image)\n', (29452, 29469), False, 'import os\n'), ((29644, 29669), 'numpy.max', 'np.max', (['kernelIndex[:, 0]'], {}), '(kernelIndex[:, 0])\n', (29650, 29669), True, 'import numpy as np\n'), ((29681, 29702), 'numpy.sum', 'np.sum', (['extendedBasis'], {}), '(extendedBasis)\n', (29687, 29702), True, 'import numpy as np\n'), ((2641, 2652), 'time.time', 'time.time', ([], {}), '()\n', (2650, 2652), False, 'import time\n'), ((4284, 4296), 'scipy.linalg.lu_factor', 'lu_factor', (['H'], {}), '(H)\n', (4293, 4296), False, 'from scipy.linalg import lu_solve, lu_factor, LinAlgError\n'), ((5010, 5038), 'numpy.sqrt', 'np.sqrt', (['target.inv_variance'], {}), '(target.inv_variance)\n', (5017, 5038), True, 'import numpy as np\n'), ((5469, 5531), 'image_functions.kappa_clip', 'IM.kappa_clip', (['smask', 'g.norm', 'params.pixel_rejection_threshold'], {}), '(smask, g.norm, params.pixel_rejection_threshold)\n', (5482, 5531), True, 'import image_functions as IM\n'), ((5879, 5908), 'os.path.basename', 'os.path.basename', (['target.name'], {}), '(target.name)\n', (5895, 5908), False, 'import os\n'), ((6403, 6472), 'io_functions.read_fits_file', 'IO.read_fits_file', (["(params.loc_output + os.path.sep + 'temp.sub2.fits')"], {}), "(params.loc_output + os.path.sep + 'temp.sub2.fits')\n", (6420, 6472), True, 'import io_functions as IO\n'), ((7514, 7737), 'c_interface_functions.photom_all_stars', 'CI.photom_all_stars', (['phot_target', 'target.inv_variance', 'star_positions', 'psf_image', 'c', 'kernelIndex', 'extendedBasis', 'kernelRadius', 'params', 'star_group_boundaries', 'detector_mean_positions_x', 'detector_mean_positions_y'], {}), '(phot_target, target.inv_variance, star_positions,\n psf_image, c, kernelIndex, extendedBasis, kernelRadius, params,\n star_group_boundaries, detector_mean_positions_x, detector_mean_positions_y\n )\n', (7533, 7737), True, 'import c_interface_functions as CI\n'), ((9867, 9879), 'numpy.mean', 'np.mean', (['sig'], {}), '(sig)\n', (9874, 9879), True, 'import numpy as np\n'), ((9943, 9955), 'numpy.mean', 'np.mean', (['sig'], {}), '(sig)\n', (9950, 9955), True, 'import numpy as np\n'), ((9957, 9968), 'numpy.std', 'np.std', (['sig'], {}), '(sig)\n', (9963, 9968), True, 'import numpy as np\n'), ((11968, 11980), 'numpy.mean', 'np.mean', (['sig'], {}), '(sig)\n', (11975, 11980), True, 'import numpy as np\n'), ((12042, 12054), 'numpy.mean', 'np.mean', (['sig'], {}), '(sig)\n', (12049, 12054), True, 'import numpy as np\n'), ((12056, 12067), 'numpy.std', 'np.std', (['sig'], {}), '(sig)\n', (12062, 12067), True, 'import numpy as np\n'), ((14652, 14673), 'numpy.std', 'np.std', (['g.result.diff'], {}), '(g.result.diff)\n', (14658, 14673), True, 'import numpy as np\n'), ((15364, 15449), 'io_functions.write_image', 'IO.write_image', (['g.result.model', "(params.loc_output + os.path.sep + 'mr_' + g.name)"], {}), "(g.result.model, params.loc_output + os.path.sep + 'mr_' + g.name\n )\n", (15378, 15449), True, 'import io_functions as IO\n'), ((17427, 17503), 'io_functions.write_image', 'IO.write_image', (['result.diff', "(params.loc_output + os.path.sep + 'd_' + f.name)"], {}), "(result.diff, params.loc_output + os.path.sep + 'd_' + f.name)\n", (17441, 17503), True, 'import io_functions as IO\n'), ((17543, 17620), 'io_functions.write_image', 'IO.write_image', (['result.model', "(params.loc_output + os.path.sep + 'm_' + f.name)"], {}), "(result.model, params.loc_output + os.path.sep + 'm_' + f.name)\n", (17557, 17620), True, 'import io_functions as IO\n'), ((17660, 17736), 'io_functions.write_image', 'IO.write_image', (['result.norm', "(params.loc_output + os.path.sep + 'n_' + f.name)"], {}), "(result.norm, params.loc_output + os.path.sep + 'n_' + f.name)\n", (17674, 17736), True, 'import io_functions as IO\n'), ((17776, 17852), 'io_functions.write_image', 'IO.write_image', (['result.mask', "(params.loc_output + os.path.sep + 'z_' + f.name)"], {}), "(result.mask, params.loc_output + os.path.sep + 'z_' + f.name)\n", (17790, 17852), True, 'import io_functions as IO\n'), ((18790, 18847), 'data_structures.Observation', 'DS.Observation', (['(params.loc_data + os.path.sep + f)', 'params'], {}), '(params.loc_data + os.path.sep + f, params)\n', (18804, 18847), True, 'import data_structures as DS\n'), ((19672, 19699), 'io_functions.write_image', 'IO.write_image', (['f.image', 'rf'], {}), '(f.image, rf)\n', (19686, 19699), True, 'import io_functions as IO\n'), ((21519, 21591), 'io_functions.read_fits_file', 'IO.read_fits_file', (["(params.loc_output + os.path.sep + 'mask_' + reference)"], {}), "(params.loc_output + os.path.sep + 'mask_' + reference)\n", (21536, 21591), True, 'import io_functions as IO\n'), ((21642, 21664), 'numpy.ones_like', 'np.ones_like', (['ref.data'], {}), '(ref.data)\n', (21654, 21664), True, 'import numpy as np\n'), ((21876, 21902), 'os.path.exists', 'os.path.exists', (['stamp_file'], {}), '(stamp_file)\n', (21890, 21902), False, 'import os\n'), ((22806, 22859), 'photometry_functions.compute_psf_image', 'PH.compute_psf_image', (['params', 'ref'], {'psf_image': 'psf_file'}), '(params, ref, psf_image=psf_file)\n', (22826, 22859), True, 'import photometry_functions as PH\n'), ((23005, 23029), 'numpy.genfromtxt', 'np.genfromtxt', (['star_file'], {}), '(star_file)\n', (23018, 23029), True, 'import numpy as np\n'), ((23107, 23144), 'numpy.savetxt', 'np.savetxt', (['star_file', 'star_positions'], {}), '(star_file, star_positions)\n', (23117, 23144), True, 'import numpy as np\n'), ((24552, 24581), 'os.path.exists', 'os.path.exists', (['ref_flux_file'], {}), '(ref_flux_file)\n', (24566, 24581), False, 'import os\n'), ((25659, 25682), 'multiprocessing.Pool', 'Pool', (['params.n_parallel'], {}), '(params.n_parallel)\n', (25663, 25682), False, 'from multiprocessing import Pool\n'), ((27145, 27202), 'data_structures.Observation', 'DS.Observation', (['(params.loc_data + os.path.sep + f)', 'params'], {}), '(params.loc_data + os.path.sep + f, params)\n', (27159, 27202), True, 'import data_structures as DS\n'), ((30875, 30947), 'os.path.exists', 'os.path.exists', (["(params.loc_output + os.path.sep + f.name + '.' + extname)"], {}), "(params.loc_output + os.path.sep + f.name + '.' + extname)\n", (30889, 30947), False, 'import os\n'), ((3443, 3454), 'time.time', 'time.time', ([], {}), '()\n', (3452, 3454), False, 'import time\n'), ((4228, 4239), 'time.time', 'time.time', ([], {}), '()\n', (4237, 4239), False, 'import time\n'), ((4584, 4602), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4600, 4602), False, 'import sys\n'), ((4712, 4723), 'time.time', 'time.time', ([], {}), '()\n', (4721, 4723), False, 'import time\n'), ((5618, 5629), 'time.time', 'time.time', ([], {}), '()\n', (5627, 5629), False, 'import time\n'), ((8114, 8125), 'time.time', 'time.time', ([], {}), '()\n', (8123, 8125), False, 'import time\n'), ((9888, 9899), 'numpy.std', 'np.std', (['sig'], {}), '(sig)\n', (9894, 9899), True, 'import numpy as np\n'), ((11987, 11998), 'numpy.std', 'np.std', (['sig'], {}), '(sig)\n', (11993, 11998), True, 'import numpy as np\n'), ((13731, 13791), 'itertools.repeat', 'itertools.repeat', (['(best_seeing_ref, params, stamp_positions)'], {}), '((best_seeing_ref, params, stamp_positions))\n', (13747, 13791), False, 'import itertools\n'), ((14803, 14824), 'numpy.std', 'np.std', (['g.result.diff'], {}), '(g.result.diff)\n', (14809, 14824), True, 'import numpy as np\n'), ((15302, 15323), 'numpy.std', 'np.std', (['g.result.diff'], {}), '(g.result.diff)\n', (15308, 15323), True, 'import numpy as np\n'), ((15325, 15350), 'numpy.median', 'np.median', (['g.result.model'], {}), '(g.result.model)\n', (15334, 15350), True, 'import numpy as np\n'), ((21938, 21963), 'numpy.genfromtxt', 'np.genfromtxt', (['stamp_file'], {}), '(stamp_file)\n', (21951, 21963), True, 'import numpy as np\n'), ((22006, 22035), 'photometry_functions.choose_stamps', 'PH.choose_stamps', (['ref', 'params'], {}), '(ref, params)\n', (22022, 22035), True, 'import photometry_functions as PH\n'), ((22100, 22139), 'numpy.savetxt', 'np.savetxt', (['stamp_file', 'stamp_positions'], {}), '(stamp_file, stamp_positions)\n', (22110, 22139), True, 'import numpy as np\n'), ((22724, 22748), 'os.path.exists', 'os.path.exists', (['psf_file'], {}), '(psf_file)\n', (22738, 22748), False, 'import os\n'), ((22758, 22783), 'os.path.exists', 'os.path.exists', (['star_file'], {}), '(star_file)\n', (22772, 22783), False, 'import os\n'), ((27626, 27657), 'numpy.genfromtxt', 'np.genfromtxt', (['params.star_file'], {}), '(params.star_file)\n', (27639, 27657), True, 'import numpy as np\n'), ((27686, 27710), 'os.path.exists', 'os.path.exists', (['psf_file'], {}), '(psf_file)\n', (27700, 27710), False, 'import os\n'), ((27737, 27790), 'photometry_functions.compute_psf_image', 'PH.compute_psf_image', (['params', 'ref'], {'psf_image': 'psf_file'}), '(params, ref, psf_image=psf_file)\n', (27757, 27790), True, 'import photometry_functions as PH\n'), ((27825, 27850), 'os.path.exists', 'os.path.exists', (['star_file'], {}), '(star_file)\n', (27839, 27850), False, 'import os\n'), ((27877, 27930), 'photometry_functions.compute_psf_image', 'PH.compute_psf_image', (['params', 'ref'], {'psf_image': 'psf_file'}), '(params, ref, psf_image=psf_file)\n', (27897, 27930), True, 'import photometry_functions as PH\n'), ((27988, 28019), 'numpy.savetxt', 'np.savetxt', (['star_file', 'star_pos'], {}), '(star_file, star_pos)\n', (27998, 28019), True, 'import numpy as np\n'), ((28065, 28089), 'numpy.genfromtxt', 'np.genfromtxt', (['star_file'], {}), '(star_file)\n', (28078, 28089), True, 'import numpy as np\n'), ((30757, 30789), 'numpy.vstack', 'np.vstack', (['(ref.flux, ref.dflux)'], {}), '((ref.flux, ref.dflux))\n', (30766, 30789), True, 'import numpy as np\n'), ((31099, 31123), 'os.path.basename', 'os.path.basename', (['target'], {}), '(target)\n', (31115, 31123), False, 'import os\n'), ((31204, 31228), 'os.path.basename', 'os.path.basename', (['target'], {}), '(target)\n', (31220, 31228), False, 'import os\n'), ((31309, 31333), 'os.path.basename', 'os.path.basename', (['target'], {}), '(target)\n', (31325, 31333), False, 'import os\n'), ((31413, 31437), 'os.path.basename', 'os.path.basename', (['target'], {}), '(target)\n', (31429, 31437), False, 'import os\n'), ((31471, 31494), 'os.path.exists', 'os.path.exists', (['dtarget'], {}), '(dtarget)\n', (31485, 31494), False, 'import os\n'), ((31499, 31522), 'os.path.exists', 'os.path.exists', (['ntarget'], {}), '(ntarget)\n', (31513, 31522), False, 'import os\n'), ((31548, 31570), 'os.path.exists', 'os.path.exists', (['ktable'], {}), '(ktable)\n', (31562, 31570), False, 'import os\n'), ((31599, 31625), 'io_functions.read_fits_file', 'IO.read_fits_file', (['ntarget'], {}), '(ntarget)\n', (31616, 31625), True, 'import io_functions as IO\n'), ((31652, 31678), 'io_functions.read_fits_file', 'IO.read_fits_file', (['dtarget'], {}), '(dtarget)\n', (31669, 31678), True, 'import io_functions as IO\n'), ((31705, 31731), 'io_functions.read_fits_file', 'IO.read_fits_file', (['ztarget'], {}), '(ztarget)\n', (31722, 31731), True, 'import io_functions as IO\n'), ((31847, 31883), 'io_functions.read_kernel_table', 'IO.read_kernel_table', (['ktable', 'params'], {}), '(ktable, params)\n', (31867, 31883), True, 'import io_functions as IO\n'), ((32266, 32313), 'image_functions.undo_photometric_scale', 'IM.undo_photometric_scale', (['diff', 'c', 'params.pdeg'], {}), '(diff, c, params.pdeg)\n', (32291, 32313), True, 'import image_functions as IM\n'), ((32345, 32543), 'photometry_functions.photom_all_stars', 'PH.photom_all_stars', (['diff', 'inv_var', 'star_positions', 'psf_file', 'c', 'kernelIndex', 'extendedBasis', 'kernelRadius', 'params', 'star_group_boundaries', 'detector_mean_positions_x', 'detector_mean_positions_y'], {}), '(diff, inv_var, star_positions, psf_file, c, kernelIndex,\n extendedBasis, kernelRadius, params, star_group_boundaries,\n detector_mean_positions_x, detector_mean_positions_y)\n', (32364, 32543), True, 'import photometry_functions as PH\n'), ((2294, 2330), 'numpy.abs', 'np.abs', (['(target.fw ** 2 - ref.fw ** 2)'], {}), '(target.fw ** 2 - ref.fw ** 2)\n', (2300, 2330), True, 'import numpy as np\n'), ((17184, 17222), 'numpy.vstack', 'np.vstack', (['(result.flux, result.dflux)'], {}), '((result.flux, result.dflux))\n', (17193, 17222), True, 'import numpy as np\n'), ((25806, 25974), 'itertools.repeat', 'itertools.repeat', (['(ref, params, stamp_positions, star_positions, star_group_boundaries,\n star_unsort_index, detector_mean_positions_x, detector_mean_positions_y)'], {}), '((ref, params, stamp_positions, star_positions,\n star_group_boundaries, star_unsort_index, detector_mean_positions_x,\n detector_mean_positions_y))\n', (25822, 25974), False, 'import itertools\n'), ((28114, 28138), 'os.path.exists', 'os.path.exists', (['psf_file'], {}), '(psf_file)\n', (28128, 28138), False, 'import os\n'), ((28169, 28222), 'photometry_functions.compute_psf_image', 'PH.compute_psf_image', (['params', 'ref'], {'psf_image': 'psf_file'}), '(params, ref, psf_image=psf_file)\n', (28189, 28222), True, 'import photometry_functions as PH\n'), ((31936, 31961), 'numpy.max', 'np.max', (['kernelIndex[:, 0]'], {}), '(kernelIndex[:, 0])\n', (31942, 31961), True, 'import numpy as np\n'), ((31985, 32006), 'numpy.sum', 'np.sum', (['extendedBasis'], {}), '(extendedBasis)\n', (31991, 32006), True, 'import numpy as np\n'), ((25462, 25500), 'numpy.vstack', 'np.vstack', (['(result.flux, result.dflux)'], {}), '((result.flux, result.dflux))\n', (25471, 25500), True, 'import numpy as np\n'), ((4313, 4335), 'scipy.linalg.lu_solve', 'lu_solve', (['(lu, piv)', 'V'], {}), '((lu, piv), V)\n', (4321, 4335), False, 'from scipy.linalg import lu_solve, lu_factor, LinAlgError\n'), ((20320, 20391), 'io_functions.get_date', 'IO.get_date', (['(params.loc_data + os.path.sep + f.name)'], {'key': 'params.datekey'}), '(params.loc_data + os.path.sep + f.name, key=params.datekey)\n', (20331, 20391), True, 'import io_functions as IO\n'), ((33246, 33270), 'numpy.vstack', 'np.vstack', (['(flux, dflux)'], {}), '((flux, dflux))\n', (33255, 33270), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Operations on surface mesh vertices.
Authors:
- <NAME>, 2012 (<EMAIL>)
- <NAME>, 2012-2016 (<EMAIL>) http://binarybottle.com
Copyright 2016, Mindboggle team (http://mindboggle.info), Apache v2.0 License
"""
def find_neighbors_from_file(input_vtk):
"""
Generate the list of unique, sorted indices of neighboring vertices
for all vertices in the faces of a triangular mesh in a VTK file.
Parameters
----------
input_vtk : string
name of input VTK file containing surface mesh
Returns
-------
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.mesh import find_neighbors_from_file
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> vtk_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')
>>> neighbor_lists = find_neighbors_from_file(vtk_file)
>>> neighbor_lists[0:3]
[[1, 4, 48, 49], [0, 4, 5, 49, 2], [1, 5, 6, 49, 50, 54]]
Write results to vtk file and view (skip test):
>>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> index = 100 # doctest: +SKIP
>>> IDs = -1 * np.ones(len(neighbor_lists)) # doctest: +SKIP
>>> IDs[index] = 1 # doctest: +SKIP
>>> IDs[neighbor_lists[index]] = 2 # doctest: +SKIP
>>> rewrite_scalars(vtk_file, 'find_neighbors_from_file.vtk', IDs,
... 'neighbors', IDs) # doctest: +SKIP
>>> plot_surfaces('find_neighbors_from_file.vtk') # doctest: +SKIP
"""
from mindboggle.mio.vtks import read_faces_points
from mindboggle.guts.mesh import find_neighbors
faces, points, npoints = read_faces_points(input_vtk)
neighbor_lists = find_neighbors(faces, npoints)
return neighbor_lists
def find_neighbors(faces, npoints):
"""
Generate the list of unique, sorted indices of neighboring vertices
for all vertices in the faces of a triangular mesh.
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
npoints: integer
number of vertices on the mesh
Returns
-------
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Examples
--------
>>> # Simple example:
>>> from mindboggle.guts.mesh import find_neighbors
>>> faces = [[0,1,2],[0,2,3],[0,3,4],[0,1,4],[4,3,1]]
>>> npoints = 5
>>> neighbor_lists = find_neighbors(faces, npoints)
>>> neighbor_lists
[[1, 2, 3, 4], [0, 2, 4, 3], [0, 1, 3], [0, 2, 4, 1], [0, 3, 1]]
Real example:
>>> import numpy as np
>>> from mindboggle.guts.mesh import find_neighbors
>>> from mindboggle.mio.vtks import read_faces_points
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> vtk_file = fetch_data(urls['left_mean_curvature'], '', '.vtk')
>>> faces, points, npoints = read_faces_points(vtk_file)
>>> neighbor_lists = find_neighbors(faces, npoints)
>>> neighbor_lists[0:3]
[[1, 4, 48, 49], [0, 4, 5, 49, 2], [1, 5, 6, 49, 50, 54]]
Write results to vtk file and view (skip test):
>>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
>>> index = 100 # doctest: +SKIP
>>> IDs = -1 * np.ones(len(neighbor_lists)) # doctest: +SKIP
>>> IDs[index] = 1 # doctest: +SKIP
>>> IDs[neighbor_lists[index]] = 2 # doctest: +SKIP
>>> rewrite_scalars(vtk_file, 'find_neighbors.vtk', IDs, 'neighbors', IDs) # doctest: +SKIP
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> plot_surfaces('find_neighbors.vtk') # doctest: +SKIP
"""
neighbor_lists = [[] for x in range(npoints)]
for face in faces:
[v0, v1, v2] = face
if v1 not in neighbor_lists[v0]:
neighbor_lists[v0].append(v1)
if v2 not in neighbor_lists[v0]:
neighbor_lists[v0].append(v2)
if v0 not in neighbor_lists[v1]:
neighbor_lists[v1].append(v0)
if v2 not in neighbor_lists[v1]:
neighbor_lists[v1].append(v2)
if v0 not in neighbor_lists[v2]:
neighbor_lists[v2].append(v0)
if v1 not in neighbor_lists[v2]:
neighbor_lists[v2].append(v1)
return neighbor_lists
def find_neighbors_vertex(faces, index):
"""
Find neighbors to a surface mesh vertex.
For a set of surface mesh faces and the index of a surface vertex,
find unique indices for neighboring vertices.
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
index : int
index of surface vertex
Returns
-------
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Examples
--------
>>> from mindboggle.guts.mesh import find_neighbors_vertex
>>> faces = [[0,1,2],[0,2,3],[0,3,4],[0,1,4]]
>>> index = 1
>>> neighbor_lists = find_neighbors_vertex(faces, index)
>>> neighbor_lists
[0, 2, 4]
"""
import numpy as np
# Make sure argument is a numpy array
if not isinstance(faces, np.ndarray):
faces = np.array(faces)
# Create list of vertex indices sharing the same faces as "index"
I = [faces[np.where(faces[:,i] == index)[0], :] for i in (0,1,2)]
# Create single list from nested lists
I = [int(x) for lst in I for sublst in lst for x in sublst]
# Find unique indices not equal to "index"
neighbor_list = []; [neighbor_list.append(x)
for x in I if x not in neighbor_list if x != index]
return neighbor_list
def find_neighborhood(neighbor_lists, indices, nedges=1):
"""
Find neighbors in the neighborhood of given surface mesh vertices.
For indices to surface mesh vertices, find unique indices for
vertices in the neighborhood of the vertices.
Parameters
----------
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
indices : list of integers
indices of surface vertices
nedges : integer
number of edges to propagate from indices
Returns
-------
neighborhood : list of integers
indices to vertices in neighborhood
Examples
--------
>>> from mindboggle.guts.mesh import find_neighborhood
>>> neighbor_lists = [[0,1],[0,2],[1,4,5],[2],[],[0,1,4,5]]
>>> indices = [1,3,4]
>>> neighborhood = find_neighborhood(neighbor_lists, indices, 2)
>>> neighborhood
[0, 2, 5]
"""
# Initialize seed list with indices
neighborhood = []
seed_list = indices[:]
completed = seed_list[:]
# Propagate nedges away from indices:
for iedge in range(nedges):
# Find neighbors of seeds:
if seed_list:
local_neighbors = []
[local_neighbors.extend(neighbor_lists[x]) for x in seed_list]
# Select neighbors that have not been previously selected:
seed_list = list(set(local_neighbors).difference(completed))
# Add to neighborhood:
neighborhood.extend(seed_list)
completed.extend(seed_list)
neighborhood = [int(x) for x in neighborhood]
return neighborhood
def find_endpoints(indices, neighbor_lists):
"""
Extract endpoints from connected set of vertices.
Parameters
----------
indices : list of integers
indices to connected vertices
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Returns
-------
indices_endpoints : list of integers
indices to endpoints of connected vertices
Examples
--------
>>> # Find endpoints of fundus in a fold:
>>> from mindboggle.guts.mesh import find_endpoints
>>> from mindboggle.guts.mesh import find_neighbors_from_file
>>> from mindboggle.mio.fetch_data import prep_tests
>>> from mindboggle.mio.vtks import read_scalars
>>> urls, fetch_data = prep_tests()
>>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> fundus_file = fetch_data(urls['left_fundus_per_fold'], '', '.vtk')
>>> folds, name = read_scalars(folds_file, True, True)
>>> fundi, name = read_scalars(fundus_file, True, True)
>>> background_value = -1
>>> # Limit number of folds to speed up the test:
>>> limit_folds = True
>>> if limit_folds:
... fold_numbers = [2]
... i0 = [i for i,x in enumerate(folds) if x not in fold_numbers]
... folds[i0] = background_value
... fundi[i0] = background_value
... indices = [i for i,x in enumerate(fundi) if x != background_value]
>>> neighbor_lists = find_neighbors_from_file(fundus_file)
>>> indices_endpoints = find_endpoints(indices, neighbor_lists)
>>> indices_endpoints[0:5]
[32782, 35142, 45244, 49010, 63051]
View endpoints (skip test):
>>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> fundi[indices_endpoints] = 50 # doctest: +SKIP
>>> rewrite_scalars(fundus_file, 'find_endpoints.vtk', fundi,
... 'endpoints', folds, background_value) # doctest: +SKIP
>>> plot_surfaces('find_endpoints.vtk') # doctest: +SKIP
"""
# Find vertices with only one neighbor in a set of given indices:
I = set(indices)
indices_endpoints = [x for x in indices
if len(I.intersection(neighbor_lists[x])) == 1]
return indices_endpoints
def find_edges(faces):
"""
Find all edges on a mesh
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
Returns
-------
edges : list of lists of integers
each element is a 2-tuple of vertex ids representing an edge
Examples
--------
>>> # Simple example:
>>> from mindboggle.guts.mesh import find_edges
>>> faces=[[0,1,2], [0,1,4], [1,2,3], [0,2,5]]
>>> find_edges(faces)
[[0, 1], [1, 2], [0, 2], [1, 4], [0, 4], [2, 3], [1, 3], [2, 5], [0, 5]]
"""
edges = [ ]
for face in faces:
for edge in [face[0:2], face[1:3], [face[0], face[2]] ]:
if not edge in edges: # I know that this is costly
edges.append(edge)
return edges
def find_faces_at_edges(faces):
"""
For each edge on the mesh, find the two faces that share the edge.
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
Returns
-------
faces_at_edges : dictionary
keys are tuples of two vertex IDs and values are 2-tuples of face IDs
Examples
--------
>>> # Simple example:
>>> from mindboggle.guts.mesh import find_faces_at_edges
>>> faces=[[0,1,2], [0,1,4], [1,2,3], [0,2,5]]
>>> faces_at_edges = find_faces_at_edges(faces)
>>> faces_at_edges[(0,2)]
[0, 3]
>>> faces_at_edges[(2,1)]
[0, 2]
Notes ::
The faces are assumed to be triangular.
"""
faces_at_edges = {}
for face_id, face in enumerate(faces):
for edge in [face[0:2], face[1:3], [face[0], face[2]] ]:
faces_at_edges.setdefault((edge[0], edge[1]), []).append(face_id)
faces_at_edges.setdefault((edge[1], edge[0]), []).append(face_id) # make it symmetric
return faces_at_edges
def find_faces_with_vertex(index, faces):
"""
For a given vertex, find all faces containing this vertex.
Note: faces do not have to be triangles.
Parameters
----------
index : integer
index to a vertex
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
Returns
-------
faces_with_vertex : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
Examples
--------
>>> # Simple example:
>>> from mindboggle.guts.mesh import find_faces_with_vertex
>>> faces = [[0,1,2],[0,2,3],[0,3,4],[0,1,4],[4,3,1]]
>>> index = 3
>>> find_faces_with_vertex(index, faces)
[[0, 2, 3], [0, 3, 4], [4, 3, 1]]
"""
faces_with_vertex = [x for x in faces if index in x]
return faces_with_vertex
def find_faces_at_vertices(faces, npoints):
"""
For each vertex, find all faces containing this vertex.
Note: faces do not have to be triangles.
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
npoints: integer
number of vertices on the mesh
Returns
-------
faces_at_vertices : list of lists of integers
faces_at_vertices[i] is a list of faces that contain the i-th vertex
Examples
--------
>>> # Simple example:
>>> from mindboggle.guts.mesh import find_faces_at_vertices
>>> faces = [[0,1,2],[0,2,3],[0,3,4],[0,1,4],[4,3,1]]
>>> npoints = 5
>>> find_faces_at_vertices(faces, npoints)
[[0, 1, 2, 3], [0, 3, 4], [0, 1], [1, 2, 4], [2, 3, 4]]
"""
faces_at_vertices = [[] for i in range(npoints)]
for face_id, face in enumerate(faces):
for vertex in face:
faces_at_vertices[vertex].append(face_id)
return faces_at_vertices
def find_adjacent_faces(faces):
"""
For each face in a list of faces, find adjacent faces.
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
(-1 indicates no result for a given face or vertex)
Returns
-------
adjacent_faces: list of pairs of lists of three integers
list 1 indexes three faces adjacent to the three face's edges;
list 2 indexes three vertices opposite the adjacent faces:
adjacent_faces[i][0] = [face0, face1, face2], neighbors of face i
(face0 is the neighbor of face i facing vertex0)
adjacent_faces[i][1] = [vertex0, vertex1, vertex2] for face i
(vertex0 is the vertex of face0 not in face i)
Examples
--------
>>> # Simple example:
>>> from mindboggle.guts.mesh import find_adjacent_faces
>>> faces = [[0,1,2],[0,2,3],[0,3,4],[0,1,4],[4,3,1]]
>>> adjacent_faces = find_adjacent_faces(faces)
>>> adjacent_faces[0:2]
[[[-1, 1, 3], [-1, 3, 4]], [[-1, 2, 0], [-1, 4, 1]]]
"""
#print("Calculating face neighbor list")
n_faces = len(faces)
adjacent_faces = []
[adjacent_faces.append([[-1,-1,-1], [-1,-1,-1]]) for i in range(n_faces)]
Done =[]
[Done.append(0) for i in range(n_faces)]
# Loop through faces:
for i1, face1 in enumerate(faces):
# Loop through remaining faces:
for i2 in range(i1+1, n_faces):
face2 = faces[i2]
# Loop through first two vertices of face:
for ivertex in [0,1]:
index1 = face1[ivertex]
# Loop through remaining vertices of face:
for index2 in face1[ivertex+1:3]:
# If pair of vertices in face2:
if index1 in face2 and index2 in face2:
# Determine if it is face0, face1 or face2:
NbrID1 = 3 - face1.index(index1) - face1.index(index2)
NbrID2 = 3 - face2.index(index1) - face2.index(index2)
adjacent_faces[i1][0][NbrID1] = i2
adjacent_faces[i2][0][NbrID2] = i1
adjacent_faces[i1][1][NbrID1] = face2[NbrID2]
adjacent_faces[i2][1][NbrID2] = face1[NbrID1]
Done[i1] += 1
Done[i2] += 1
# Break if all three neighbors of face1 have been found:
if Done[i1] == 3:
break
return adjacent_faces
def find_complete_faces(indices, faces):
"""
Given a set of vertices, find the ones that make complete faces.
Parameters
----------
indices : list of integers
indices to connected vertices
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
Returns
-------
indices_complete : list of integers
indices to vertices making up complete faces
Examples
--------
>>> from mindboggle.guts.mesh import find_complete_faces
>>> faces = [[0,2,3], [2,3,7], [4,7,8], [3,2,5]]
>>> indices = [3,7,2,5,9,4]
>>> find_complete_faces(indices, faces)
[2, 3, 7, 5]
"""
indices_complete_list = []
for face in faces:
if len(list(frozenset(face).intersection(indices))) == 3:
indices_complete_list.extend(face)
indices_complete = []
[indices_complete.append(x) for x in indices_complete_list
if x not in indices_complete]
return indices_complete
def keep_faces(faces, indices):
"""
Remove surface mesh faces whose three vertices are not all in "indices".
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
indices : list of integers
indices to vertices of the surface mesh that are to be retained
Returns
-------
faces : list of lists of three integers
reduced number of faces
Examples
--------
>>> from mindboggle.guts.mesh import keep_faces
>>> faces = [[1,2,3], [2,3,7], [4,7,8], [3,2,5]]
>>> indices = [0,1,2,3,4,5]
>>> keep_faces(faces, indices)
[[1, 2, 3], [3, 2, 5]]
"""
import numpy as np
fs = frozenset(indices)
faces = [lst for lst in faces if len(fs.intersection(lst)) == 3]
faces = np.reshape(np.ravel(faces), (-1, 3))
#len_faces = len(faces)
#if verbose and len(faces) < len_faces:
# print('Reduced {0} to {1} triangular faces'.
# format(len_faces, len(faces)))
return faces.tolist()
def reindex_faces_points(faces, points=[]):
"""
Renumber indices in faces and remove points (coordinates) not in faces.
Parameters
----------
faces : list of lists of integers
each sublist contains 3 indices of vertices that form a face
on a surface mesh
points : list of lists of floats (optional)
each sublist contains 3-D coordinates of a vertex on a surface mesh
Returns
-------
new_faces : list of lists of integers
each sublist contains 3 (renumbered) indices of vertices
that form a face on a surface mesh
new_points : list of lists of floats
each (new) sublist contains 3-D coordinates of a vertex on a surface mesh
original_indices : list integers
list of indices to original points
Examples
--------
>>> from mindboggle.guts.mesh import reindex_faces_points
>>> # Reindex faces:
>>> faces = [[8,2,3], [2,3,7], [4,7,8], [3,2,5]]
>>> new_faces, new_points, original_indices = reindex_faces_points(faces,
... points=[])
>>> new_faces
[[5, 0, 1], [0, 1, 4], [2, 4, 5], [1, 0, 3]]
Reindex faces of a limited number of folds of the brain:
>>> import numpy as np
>>> from mindboggle.guts.mesh import keep_faces
>>> from mindboggle.mio.vtks import read_faces_points
>>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> folds, name = read_scalars(folds_file, True, True)
>>> fold_numbers = [4]
>>> indices = [i for i,x in enumerate(folds) if x in fold_numbers]
>>> i0 = [i for i,x in enumerate(folds) if x not in fold_numbers]
>>> background_value = -1
>>> folds[i0] = background_value
>>> faces, points, npoints = read_faces_points(folds_file)
>>> faces = keep_faces(faces, indices)
>>> faces[0:3]
[[51535, 50324, 51529], [50317, 50325, 50326], [50324, 50332, 50333]]
>>> new_faces, new_points, original_indices = reindex_faces_points(faces,
... points)
>>> new_faces[0:3]
[[277, 690, 276], [689, 691, 692], [690, 698, 699]]
>>> [np.float("{0:.{1}f}".format(x, 5)) for x in points[0]]
[-13.7924, -76.0973, -2.57594]
>>> [np.float("{0:.{1}f}".format(x, 5)) for x in new_points[0]]
[-13.7802, -12.3814, 57.4042]
View reindexed fold on surface (skip test):
>>> from mindboggle.mio.plots import plot_surfaces
>>> plot_surfaces('reindex_faces_points.vtk') # doctest: +SKIP
"""
import numpy as np
import itertools
if isinstance(points, list):
pass
elif isinstance(points, np.ndarray):
points = points.tolist()
else:
raise IOError("points should be either a list or a numpy array.")
# set() to remove repeated indices and list() to order them for later use:
indices_to_keep = list(set(itertools.chain(*faces)))
reindex = dict([(old_index, new_index)
for new_index, old_index in enumerate(indices_to_keep)])
new_faces = [[reindex[old_index] for old_index in face] for face in faces]
if points:
new_points = [points[new_index] for new_index in indices_to_keep]
else:
new_points = None
original_indices = indices_to_keep
return new_faces, new_points, original_indices
def remove_neighbor_lists(neighbor_lists, indices):
"""
Remove all but a given set of indices from surface mesh neighbor lists.
Note :: SLOW!
Parameters
----------
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
indices : list of integers
indices to vertices of the surface mesh
Returns
-------
neighbor_lists : list of lists of integers
each list has indices to remaining neighboring vertices for each vertex
Examples
--------
>>> from mindboggle.guts.mesh import remove_neighbor_lists
>>> neighbor_lists = [[1,2,3], [2,3,7], [12,43], [4,7,8], [3,2,5]]
>>> indices = [0,1,2,3,4,5]
>>> remove_neighbor_lists(neighbor_lists, indices)
[[1, 2, 3], [2, 3], [], [4], [2, 3, 5]]
"""
neighbor_lists = [list(frozenset(indices).intersection(x))
for x in neighbor_lists]
return neighbor_lists
def reindex_faces_0to1(faces):
"""
Convert 0-indices (Python) to 1-indices (Matlab) for all face indices.
Parameters
----------
faces : list of lists of integers
each sublist contains 3 0-indices of vertices that form a face
on a surface mesh
Returns
-------
faces : list of lists of integers
each sublist contains 3 1-indices of vertices that form a face
on a surface mesh
Examples
--------
>>> from mindboggle.guts.mesh import reindex_faces_0to1
>>> faces = [[0,2,3], [2,3,7], [4,7,8], [3,2,5]]
>>> reindex_faces_0to1(faces)
[[1, 3, 4], [3, 4, 8], [5, 8, 9], [4, 3, 6]]
"""
faces = [[old_index+1 for old_index in face] for face in faces]
return faces
def decimate(points, faces, reduction=0.75, smooth_steps=25,
scalars=[], save_vtk=False, output_vtk=''):
"""
Decimate vtk triangular mesh with vtk.vtkDecimatePro.
Parameters
----------
points : list of lists of floats
each element is a list of 3-D coordinates of a vertex on a surface mesh
faces : list of lists of integers
each element is list of 3 indices of vertices that form a face
on a surface mesh
reduction : float
fraction of mesh faces to remove
smooth_steps : integer
number of smoothing steps
scalars : list of integers or floats
optional scalars for output VTK file
save_vtk : bool
output decimated vtk file?
output_vtk : string
output decimated vtk file name
Returns
-------
points : list of lists of floats
decimated points
faces : list of lists of integers
decimated faces
scalars : list of integers or floats
scalars for output VTK file
output_vtk : string
output decimated vtk file
Examples
--------
>>> # Example: Twins-2-1 left postcentral pial surface, 0.75 decimation:
>>> from mindboggle.guts.mesh import decimate
>>> from mindboggle.mio.vtks import read_vtk
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> input_vtk = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
>>> points, f1, f2, faces, scalars, f3, f4, f5 = read_vtk(input_vtk)
>>> reduction = 0.5
>>> smooth_steps = 25
>>> save_vtk = True
>>> output_vtk = 'decimate.vtk'
>>> points2, faces2, scalars, output_vtk = decimate(points, faces,
... reduction, smooth_steps, scalars, save_vtk, output_vtk)
>>> (len(points), len(points2))
(145069, 72535)
>>> (len(faces), len(faces2))
(290134, 145066)
View decimated surface (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> plot_surfaces('decimate.vtk') # doctest: +SKIP
"""
import os
import vtk
# ------------------------------------------------------------------------
# vtk points:
# ------------------------------------------------------------------------
vtk_points = vtk.vtkPoints()
[vtk_points.InsertPoint(i, x[0], x[1], x[2]) for i,x in enumerate(points)]
# ------------------------------------------------------------------------
# vtk faces:
# ------------------------------------------------------------------------
vtk_faces = vtk.vtkCellArray()
for face in faces:
vtk_face = vtk.vtkPolygon()
vtk_face.GetPointIds().SetNumberOfIds(3)
vtk_face.GetPointIds().SetId(0, face[0])
vtk_face.GetPointIds().SetId(1, face[1])
vtk_face.GetPointIds().SetId(2, face[2])
vtk_faces.InsertNextCell(vtk_face)
# ------------------------------------------------------------------------
# vtk scalars:
# ------------------------------------------------------------------------
if scalars:
vtk_scalars = vtk.vtkFloatArray()
vtk_scalars.SetName("scalars")
for scalar in scalars:
vtk_scalars.InsertNextValue(scalar)
# ------------------------------------------------------------------------
# vtkPolyData:
# ------------------------------------------------------------------------
polydata = vtk.vtkPolyData()
polydata.SetPoints(vtk_points)
polydata.SetPolys(vtk_faces)
if scalars:
polydata.GetPointData().SetScalars(vtk_scalars)
# ------------------------------------------------------------------------
# Decimate:
# ------------------------------------------------------------------------
# We want to preserve topology (not let any cracks form).
# This may limit the total reduction possible.
decimate = vtk.vtkDecimatePro()
# Migrate to VTK6:
# http://www.vtk.org/Wiki/VTK/VTK_6_Migration/Replacement_of_SetInput
# Old: decimate.SetInput(polydata)
decimate.SetInputData(polydata)
decimate.SetTargetReduction(reduction)
decimate.PreserveTopologyOn()
# ------------------------------------------------------------------------
# Smooth:
# ------------------------------------------------------------------------
if save_vtk:
if not output_vtk:
output_vtk = os.path.join(os.getcwd(), 'decimated.vtk')
exporter = vtk.vtkPolyDataWriter()
else:
output_vtk = None
if smooth_steps > 0:
smoother = vtk.vtkSmoothPolyDataFilter()
# Migrate to VTK6:
# http://www.vtk.org/Wiki/VTK/VTK_6_Migration/Replacement_of_SetInput
# Old: smoother.SetInput(decimate.GetOutput())
smoother.SetInputConnection(decimate.GetOutputPort())
smoother.SetNumberOfIterations(smooth_steps)
smoother.Update()
out = smoother.GetOutput()
# Migrate to VTK6:
# http://www.vtk.org/Wiki/VTK/VTK_6_Migration/Replacement_of_SetInput
# Old: exporter.SetInput(smoother.GetOutput())
exporter.SetInputConnection(smoother.GetOutputPort())
else:
decimate.Update()
out = decimate.GetOutput()
if save_vtk:
# Migrate to VTK6:
# http://www.vtk.org/Wiki/VTK/VTK_6_Migration/Replacement_of_SetInput
# http://stackoverflow.com/questions/29020740/
# what-is-the-difference-in-setinputconnection-and-setinput
# Old: exporter.SetInput(decimate.GetOutput())
exporter.SetInputConnection(decimate.GetOutputPort())
# ------------------------------------------------------------------------
# Export output:
# ------------------------------------------------------------------------
if save_vtk:
exporter.SetFileName(output_vtk)
exporter.Write()
if not os.path.exists(output_vtk):
raise IOError(output_vtk + " not found")
# ------------------------------------------------------------------------
# Extract decimated points, faces, and scalars:
# ------------------------------------------------------------------------
points = [list(out.GetPoint(point_id))
for point_id in range(out.GetNumberOfPoints())]
if out.GetNumberOfPolys() > 0:
polys = out.GetPolys()
pt_data = out.GetPointData()
faces = [[int(polys.GetData().GetValue(j))
for j in range(i*4 + 1, i*4 + 4)]
for i in range(polys.GetNumberOfCells())]
if scalars:
scalars = [pt_data.GetScalars().GetValue(i)
for i in range(len(points))]
else:
faces = []
scalars = []
return points, faces, scalars, output_vtk
def decimate_file(input_vtk, reduction=0.5, smooth_steps=100,
save_vtk=True, output_vtk=''):
"""
Decimate vtk triangular mesh file with vtk.vtkDecimatePro.
Parameters
----------
input_vtk : string
input vtk file with triangular surface mesh
reduction : float
fraction of mesh faces to remove
do_smooth : bool
smooth after decimation?
save_vtk : bool
output decimated vtk file?
output_vtk : string
output decimated vtk file name
Returns
-------
output_vtk : string
output decimated vtk file
Examples
--------
>>> from mindboggle.guts.mesh import decimate_file
>>> from mindboggle.mio.vtks import read_vtk
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> input_vtk = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
>>> save_vtk = True
>>> output_vtk = 'decimate.vtk'
>>> reduction = 0.5
>>> smooth_steps = 25
>>> output_vtk = decimate_file(input_vtk, reduction, smooth_steps,
... save_vtk, output_vtk)
>>> f1, f2, f3, faces1, f4, f5, npoints1, f6 = read_vtk(input_vtk)
>>> f1, f2, f3, faces2, f4, f5, npoints2, f6 = read_vtk('decimate.vtk')
>>> (npoints1, npoints2)
(145069, 72535)
>>> (len(faces1), len(faces2))
(290134, 145066)
View decimated surface (skip test):
>>> from mindboggle.mio.plots import plot_surfaces
>>> plot_surfaces('decimate.vtk') # doctest: +SKIP
"""
from mindboggle.mio.vtks import read_vtk
from mindboggle.guts.mesh import decimate
if not save_vtk:
raise NotImplementedError()
# Read VTK surface mesh file:
points, indices, lines, faces, scalars, scalar_names, npoints, \
input_vtk = read_vtk(input_vtk)
# Decimate vtk triangular mesh with vtk.vtkDecimatePro
points, faces, scalars, output_vtk = decimate(points, faces, reduction,
smooth_steps, scalars,
save_vtk, output_vtk)
return output_vtk
def rescale_by_neighborhood(input_vtk, indices=[], nedges=10, p=99,
set_max_to_1=True, save_file=False, output_filestring='rescaled_scalars',
background_value=-1):
"""
Rescale the scalar values of a VTK file by a percentile value
in each vertex's surface mesh neighborhood.
Parameters
----------
input_vtk : string
name of VTK file with a scalar value for each vertex
indices : list of integers (optional)
indices of scalars to normalize
nedges : integer
number or edges from vertex, defining the size of its neighborhood
p : float in range of [0,100]
percentile used to normalize each scalar
set_max_to_1 : bool
set all rescaled values greater than 1 to 1.0?
save_file : bool
save output VTK file?
output_filestring : string (if save_file)
name of output file
background_value : integer
background value
Returns
-------
rescaled_scalars : list of floats
rescaled scalar values
rescaled_scalars_file : string (if save_file)
name of output VTK file with rescaled scalar values
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.mesh import rescale_by_neighborhood
>>> from mindboggle.mio.vtks import read_scalars
>>> from mindboggle.mio.plots import plot_surfaces
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> input_vtk = fetch_data(urls['left_travel_depth'], '', '.vtk')
>>> indices = []
>>> nedges = 10
>>> p = 99
>>> set_max_to_1 = True
>>> save_file = True
>>> output_filestring = 'rescale_by_neighborhood'
>>> background_value = -1
>>> rescaled, rescaled_file = rescale_by_neighborhood(input_vtk,
... indices, nedges, p, set_max_to_1, save_file, output_filestring,
... background_value)
>>> scalars1, name = read_scalars(input_vtk)
>>> print('{0:0.5f}, {1:0.5f}'.format(max(scalars1), max(rescaled)))
34.95560, 1.00000
>>> print('{0:0.5f}, {1:0.5f}'.format(np.mean(scalars1), np.mean(rescaled)))
7.43822, 0.44950
View rescaled scalar values on surface (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> plot_surfaces(rescaled_file) # doctest: +SKIP
"""
import os
import numpy as np
from mindboggle.mio.vtks import read_scalars, rewrite_scalars
from mindboggle.guts.mesh import find_neighbors_from_file, find_neighborhood
# Load scalars and vertex neighbor lists:
scalars, name = read_scalars(input_vtk, True, True)
if not indices:
indices = [i for i,x in enumerate(scalars) if x != background_value]
#print(" Rescaling {0} scalar values by neighborhood...".format(len(indices)))
neighbor_lists = find_neighbors_from_file(input_vtk)
# Loop through vertices:
rescaled_scalars = scalars.copy()
for index in indices:
# Determine the scalars in the vertex's neighborhood:
neighborhood = find_neighborhood(neighbor_lists, [index], nedges)
# Compute a high neighborhood percentile to normalize vertex's value:
normalization_factor = np.percentile(scalars[neighborhood], p)
rescaled_scalar = scalars[index] / normalization_factor
rescaled_scalars[index] = rescaled_scalar
# Make any rescaled value greater than 1 equal to 1:
if set_max_to_1:
rescaled_scalars[[x for x in indices if rescaled_scalars[x] > 1.0]] = 1
rescaled_scalars = rescaled_scalars.tolist()
# ------------------------------------------------------------------------
# Return rescaled scalars and file name
# ------------------------------------------------------------------------
if save_file:
rescaled_scalars_file = os.path.join(os.getcwd(), output_filestring + '.vtk')
rewrite_scalars(input_vtk, rescaled_scalars_file,
rescaled_scalars, 'rescaled_scalars', [],
background_value)
if not os.path.exists(rescaled_scalars_file):
raise IOError(rescaled_scalars_file + " not found")
else:
rescaled_scalars_file = None
return rescaled_scalars, rescaled_scalars_file
def rescale_by_label(input_vtk, labels_or_file, save_file=False,
output_filestring='rescaled_scalars',
background_value=-1, verbose=False):
"""
Rescale scalars for each label (such as depth values within each fold).
Default is to normalize the scalar values of a VTK file by
a percentile value in each vertex's surface mesh for each label.
Parameters
----------
input_vtk : string
name of VTK file with a scalar value for each vertex
labels_or_file : list or string
label number for each vertex or name of VTK file with index scalars
save_file : bool
save output VTK file?
output_filestring : string (if save_file)
name of output file
background_value : integer or float
background value
verbose : bool
print statements?
Returns
-------
rescaled_scalars : list of floats
scalar values rescaled for each label, for label numbers not equal to -1
rescaled_scalars_file : string (if save_file)
name of output VTK file with rescaled scalar values for each label
Examples
--------
>>> # Rescale depths by neighborhood within each label:
>>> import numpy as np
>>> from mindboggle.guts.mesh import rescale_by_label
>>> from mindboggle.mio.vtks import read_scalars
>>> from mindboggle.mio.plots import plot_surfaces
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> input_vtk = fetch_data(urls['left_travel_depth'], '', '.vtk')
>>> labels_or_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> save_file = True
>>> output_filestring = 'rescale_by_label'
>>> background_value = -1
>>> verbose = False
>>> rescaled, rescaled_label_file = rescale_by_label(input_vtk,
... labels_or_file, save_file, output_filestring, background_value, verbose)
>>> scalars1, name = read_scalars(input_vtk)
>>> print('{0:0.5f}, {1:0.5f}'.format(max(scalars1), max(rescaled)))
34.95560, 1.00000
>>> print('{0:0.5f}, {1:0.5f}'.format(np.mean(scalars1), np.mean(rescaled)))
7.43822, 0.30677
View rescaled scalar values on surface (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> plot_surfaces(rescaled_label_file) # doctest: +SKIP
"""
import os
import numpy as np
from mindboggle.mio.vtks import read_scalars, rewrite_scalars
# Load scalars and vertex neighbor lists:
scalars, name = read_scalars(input_vtk, True, True)
if verbose:
print(" Rescaling scalar values within each label...")
# Load label numbers:
if isinstance(labels_or_file, str):
labels, name = read_scalars(labels_or_file, True, True)
elif isinstance(labels_or_file, list):
labels = labels_or_file
unique_labels = np.unique(labels)
# Loop through labels:
for label in unique_labels:
if verbose:
print(" Rescaling values within label {0} of {1} labels...".
format(int(label), len(unique_labels)))
indices = [i for i,x in enumerate(labels) if x == label]
if indices:
# Rescale by the maximum label scalar value:
scalars[indices] = scalars[indices] / np.max(scalars[indices])
#print(max(scalars), max(scalars[indices]))
rescaled_scalars = scalars.tolist()
# ------------------------------------------------------------------------
# Return rescaled scalars and file name
# ------------------------------------------------------------------------
if save_file:
rescaled_scalars_file = os.path.join(os.getcwd(),
output_filestring + '.vtk')
rewrite_scalars(input_vtk, rescaled_scalars_file,
rescaled_scalars, 'rescaled_scalars', labels,
background_value)
if not os.path.exists(rescaled_scalars_file):
raise IOError(rescaled_scalars_file + " not found")
else:
rescaled_scalars_file = None
return rescaled_scalars, rescaled_scalars_file
def area_of_faces(points, faces):
"""
Compute the areas of all triangles on the mesh.
Parameters
----------
points : list of lists of 3 floats
x,y,z coordinates for each vertex of the structure
faces : list of lists of 3 integers
3 indices to vertices that form a triangle on the mesh
Returns
-------
area: 1-D numpy array
area[i] is the area of the i-th triangle
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.mesh import area_of_faces
>>> from mindboggle.mio.vtks import read_vtk
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> input_vtk = fetch_data(urls['left_area'], '', '.vtk')
>>> points, f1, f2, faces, f3, f4, f5, f6 = read_vtk(input_vtk)
>>> area = area_of_faces(points, faces)
>>> [np.float("{0:.{1}f}".format(x, 5)) for x in area[0:5]]
[0.21703, 0.27139, 0.29033, 0.1717, 0.36011]
"""
import numpy as np
area = np.zeros(len(faces))
points = np.array(points)
for i, triangle in enumerate(faces):
a = np.linalg.norm(points[triangle[0]] - points[triangle[1]])
b = np.linalg.norm(points[triangle[1]] - points[triangle[2]])
c = np.linalg.norm(points[triangle[2]] - points[triangle[0]])
s = (a+b+c) / 2.0
area[i] = np.sqrt(s*(s-a)*(s-b)*(s-c))
return area
def dilate(indices, nedges, neighbor_lists):
"""
Dilate region on a surface mesh.
Parameters
----------
indices : list of integers
indices of vertices to dilate
nedges : integer
number of edges to dilate across
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Returns
-------
dilated_indices : list of integers
indices of original vertices with dilated vertices
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.mesh import dilate, find_neighbors_from_file
>>> from mindboggle.mio.vtks import read_scalars
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> vtk_file = fetch_data(urls['left_travel_depth'], '', '.vtk')
>>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> neighbor_lists = find_neighbors_from_file(vtk_file)
>>> nedges = 3
>>> # Select a single fold:
>>> folds, name = read_scalars(folds_file, True, True)
>>> fold_number = 4
>>> indices = [i for i,x in enumerate(folds) if x == fold_number]
>>> dilated_indices = dilate(indices, nedges, neighbor_lists)
>>> (len(indices), len(dilated_indices))
(1151, 1545)
>>> dilated_indices[0:10]
[50317, 50324, 50325, 50326, 50327, 50332, 50333, 50334, 50339, 50340]
Write results to vtk file and view (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> from mindboggle.mio.vtks import rewrite_scalars
>>> IDs = -1 * np.ones(len(folds)) # doctest: +SKIP
>>> IDs[dilated_indices] = 2 # doctest: +SKIP
>>> IDs[indices] = 1 # doctest: +SKIP
>>> rewrite_scalars(vtk_file, 'dilate.vtk', IDs, 'dilated_fold', IDs) # doctest: +SKIP
>>> plot_surfaces('dilate.vtk') # doctest: +SKIP
"""
from mindboggle.guts.mesh import find_neighborhood
N = find_neighborhood(neighbor_lists, indices, nedges)
dilated_indices = indices[:]
dilated_indices.extend(N)
return dilated_indices
def erode(indices, nedges, neighbor_lists):
"""
Erode region on a surface mesh.
Parameters
----------
indices : list of integers
indices of vertices to erode
nedges : integer
number of edges to erode across
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Returns
-------
eroded_indices : list of integers
indices of original vertices without eroded vertices
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.mesh import erode, find_neighbors_from_file
>>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
>>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> neighbor_lists = find_neighbors_from_file(vtk_file)
>>> nedges = 3
>>> # Select a single fold:
>>> folds, name = read_scalars(folds_file, True, True)
>>> fold_number = 4
>>> indices = [i for i,x in enumerate(folds) if x == fold_number]
>>> eroded_indices = erode(indices, nedges, neighbor_lists)
>>> (len(indices), len(eroded_indices))
(1151, 809)
Write results to vtk file and view (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> IDs = -1 * np.ones(len(folds)) # doctest: +SKIP
>>> IDs[indices] = 1 # doctest: +SKIP
>>> IDs[eroded_indices] = 2 # doctest: +SKIP
>>> rewrite_scalars(vtk_file, 'erode.vtk', IDs, 'eroded_fold', IDs) # doctest: +SKIP
>>> plot_surfaces('erode.vtk') # doctest: +SKIP
"""
from mindboggle.guts.mesh import find_neighborhood
N1 = find_neighborhood(neighbor_lists, indices, nedges=1)
N2 = find_neighborhood(neighbor_lists, N1, nedges)
eroded_indices = list(frozenset(indices).difference(N2))
return eroded_indices
def extract_edge(indices, neighbor_lists):
"""
Erode region on a surface mesh to extract the region's edge.
Parameters
----------
indices : list of integers
indices of vertices to erode
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Returns
-------
edge_indices : list of integers
indices of eroded vertices
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.mesh import extract_edge
>>> from mindboggle.guts.mesh import find_neighbors_from_file
>>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars
>>> from mindboggle.mio.fetch_data import prep_tests
>>> urls, fetch_data = prep_tests()
>>> vtk_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
>>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
>>> neighbor_lists = find_neighbors_from_file(vtk_file)
>>> # Select a single fold:
>>> folds, name = read_scalars(folds_file, True, True)
>>> fold_number = 4
>>> indices = [i for i,x in enumerate(folds) if x == fold_number]
>>> edge_indices = extract_edge(indices, neighbor_lists)
>>> (len(indices), len(edge_indices))
(1151, 111)
Write results to vtk file and view (skip test):
>>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
>>> IDs = -1 * np.ones(len(folds)) # doctest: +SKIP
>>> IDs[indices] = 1 # doctest: +SKIP
>>> IDs[edge_indices] = 2 # doctest: +SKIP
>>> rewrite_scalars(vtk_file, 'extract_edge.vtk', IDs, 'edges_of_fold', IDs) # doctest: +SKIP
>>> plot_surfaces('extract_edge.vtk') # doctest: +SKIP
"""
from mindboggle.guts.mesh import find_neighborhood
N1 = find_neighborhood(neighbor_lists, indices, nedges=1)
N2 = find_neighborhood(neighbor_lists, N1, nedges=1)
edge_indices = list(set(N2).intersection(indices))
return edge_indices
def topo_test(index, values, neighbor_lists):
"""
Test to see if vertex is a "simple point".
A simple point is a vertex that when added to or removed from an object
(e.g., a curve) on a surface mesh does not alter the object's topology.
"Simple" is not to be mistaken with the following usage:
"A vertex is usually assigned one of five possible classifications:
simple, complex, boundary, interior edge, or corner vertex.
A simple vertex is surrounded by a closed fan of triangles".
Parameters
----------
index : integer
index of vertex
values : numpy array of integers or floats
values for all vertices
neighbor_lists : list of lists of integers
each list contains indices to neighboring vertices for each vertex
Returns
-------
sp : bool
simple point or not?
n_inside : integer
number of neighboring vertices with a value greater than threshold
Examples
--------
>>> # Square with a center vertex:
>>> # indices [[0,1,2],[3,4,6],[7,8,9]] = 0 and indices [2,4,6] = 1:
>>> import numpy as np
>>> from mindboggle.guts.mesh import topo_test
>>> values = np.array([0,0,1,0,1,0,1,0,0])
>>> neighbor_lists = [[1,3],[0,2,3,4],[1,4,5],
... [0,1,4,6],[1,2,3,5,6,7],[2,4,7,8],
... [3,4,7],[4,5,6,8],[5,7]]
>>> sps = []
>>> for index in range(9):
... sp, n_inside = topo_test(index, values, neighbor_lists)
... sps.append(sp)
>>> sps
[False, True, True, True, False, True, True, True, False]
"""
import numpy as np
# Make sure argument is a numpy array:
if not isinstance(values, np.ndarray):
values = np.array(values)
# Find neighbors to the input vertex, and binarize them
# into those greater or less than a class boundary threshold equal to 0.5
# ("inside" and "outside"); count inside and outside neighbors:
I_neighbors = neighbor_lists[index]
neighbor_values = values[I_neighbors]
inside = [I_neighbors[i] for i,x in enumerate(neighbor_values) if x > 0.5]
n_inside = len(inside)
n_outside = len(I_neighbors) - n_inside
# If the number of inside or outside neighbors is zero,
# than the vertex IS NOT a simple point:
if n_outside * n_inside == 0:
sp = False
# Or if either the number of inside or outside neighbors is one,
# than the vertex IS a simple point:
elif n_outside == 1 or n_inside == 1:
sp = True
# Otherwise, test to see if all of the inside neighbors share neighbors
# with each other, in which case the vertex IS a simple point:
else:
# For each neighbor exceeding the threshold,
# find its neighbors that also exceed the threshold,
# then store these neighbors' indices in a sublist of "N":
labels = list(range(1, n_inside + 1))
N = []
for i_in in range(n_inside):
new_neighbors = neighbor_lists[inside[i_in]]
new_neighbors = [x for x in new_neighbors
if values[x] > 0.5 if x != index]
new_neighbors.extend([inside[i_in]])
N.append(new_neighbors)
# Consolidate labels of connected vertices --
# Loop through neighbors (lists within "N"),
# reassigning the labels for the lists until each label's
# list(s) has a unique set of vertices:
change = True
while change:
change = False
# Loop through pairs of inside neighbors
# and continue if their two labels are different:
for i in range(n_inside - 1):
for j in range(i + 1, n_inside):
if labels[i] != labels[j]:
# Assign the two subsets the same label
# if they share at least one vertex,
# and continue looping:
if set(N[i]).intersection(N[j]):
labels[i] = max([labels[i], labels[j]])
labels[j] = labels[i]
change = True
# The vertex is a simple point if all of its neighbors
# (if any) share neighbors with each other (one unique label):
D = []
if len([D.append(x) for x in labels if x not in D]) == 1:
sp = True
else:
sp = False
return sp, n_inside
# def fill_holes(regions, neighbor_lists, values=[], exclude_range=[],
# background_value=-1):
# """
# Fill holes in regions on a surface mesh by using region boundaries.
#
# NOTE: assumes one set of connected vertices per region
#
# Steps ::
#
# 1. Segment region vertex neighbors into connected vertices (region boundaries).
# 2. Remove the largest region boundary, presumably the
# outer contour of the region, leaving smaller boundaries,
# presumably the contours of holes within the region.
# 3. Call label_holes() to fill holes with surrounding region numbers.
#
# Parameters
# ----------
# regions : numpy array of integers
# region numbers for all vertices
# neighbor_lists : list of lists of integers
# each list contains indices to neighboring vertices for each vertex
# values : list of integers
# values for vertices, for use in determining which holes to remove
# exclude_range : list of two floats
# hole is not filled if it contains values within this range
# (prevents cases where surface connected by folds mistaken for holes)
# background_value : integer
# background value
#
# Returns
# -------
# regions : numpy array of integers
# region numbers for all vertices
#
# Examples
# --------
# >>> import numpy as np
# >>> from mindboggle.guts.mesh import fill_holes
# >>> from mindboggle.guts.mesh import find_neighbors_from_file
# >>> from mindboggle.mio.vtks import read_scalars
# >>> from mindboggle.mio.fetch_data import prep_tests
# >>> urls, fetch_data = prep_tests()
# >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk')
# >>> background_value = -1
# >>> # Select one fold
# >>> folds, name = read_scalars(folds_file, True, True)
# >>> fold_number = 4
# >>> folds[folds != fold_number] = background_value
# >>> I = np.where(folds==fold_number)[0]
# >>> neighbor_lists = find_neighbors_from_file(folds_file)
# >>> ## Find vertex whose removal (with its neighbors) would create a hole:
# >>> #for index in I:
# ... # N1 = neighbor_lists[index]
# ... # stop = True
# ... # for n in N1:
# ... # if any(folds[neighbor_lists[n]] == background_value):
# ... # stop = False
# ... # break
# ... # else:
# ... # for f in neighbor_lists[n]:
# ... # if any(folds[neighbor_lists[f]] == background_value):
# ... # stop = False
# ... # break
# ... # if stop:
# ... # break
# >>> index = I[100]
# >>> N = neighbor_lists[index]
# >>> N.append(index)
# >>> N
# [36768, 37670, 36769, 37679, 38522, 38529, 37688, 37689, 37678]
# >>> folds[N] = background_value
# >>> I = [x for x in I if x not in N]
#
# View hole (skip test):
#
# >>> from mindboggle.mio.vtks import rewrite_scalars # doctest: +SKIP
# >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
# >>> rewrite_scalars(folds_file, 'hole.vtk', folds, 'hole', folds) # doctest: +SKIP
# >>> plot_surfaces('hole.vtk') # doctest: +SKIP
#
# Fill hole:
#
# >>> exclude_range = []
# >>> regions = np.copy(folds)
# >>> values = np.copy(folds)
# >>> regions = fill_holes(regions, neighbor_lists, values, exclude_range,
# ... background_value)
# >>> indices = [i for i,x in enumerate(regions) if x != background_value]
# >>> indices[0:10]
# [34148, 34149, 34150, 34151, 34152, 34153, 34154, 34155, 34157, 34158]
#
# View filled hole (skip test):
#
# >>> rewrite_scalars(folds_file, 'fill_hole.vtk', regions, 'fill_hole', regions) # doctest: +SKIP
# >>> plot_surfaces('fill_hole.vtk') # doctest: +SKIP
#
# """
# import numpy as np
# from mindboggle.guts.segment import segment
#
# # Make sure argument is a numpy array
# if not isinstance(regions, np.ndarray):
# regions = np.array(regions)
#
# def label_holes(holes, regions, neighbor_lists):
# """
# Fill holes in regions on a surface mesh.
#
# Parameters
# ----------
# holes : list or array of integers
# hole numbers for all vertices
# regions : numpy array of integers
# region numbers for all vertices
# neighbor_lists : list of lists of integers
# each list contains indices to neighboring vertices for each vertex
#
# Returns
# -------
# regions : numpy array of integers
# region numbers for all vertices
#
# """
# import numpy as np
#
# # Make sure argument is a numpy array
# if not isinstance(regions, np.ndarray):
# regions = np.array(regions)
#
# # Identify the vertices for each hole
# hole_numbers = [x for x in np.unique(holes) if x != background_value]
# for n_hole in hole_numbers:
# I = [i for i,x in enumerate(holes) if x == n_hole]
#
# # Identify neighbors to these vertices
# N=[]; [N.extend(neighbor_lists[i]) for i in I]
# if N:
#
# # Assign the hole the maximum region ID number of its neighbors
# regions[I] = max([regions[x] for x in N])
#
# return regions
#
# # ------------------------------------------------------------------------
# # Find boundaries to holes
# # ------------------------------------------------------------------------
# hole_boundaries = background_value * np.ones(len(regions))
#
# # Identify vertices for each region
# region_numbers = [x for x in np.unique(regions) if x != background_value]
# count = 0
# for n_region in region_numbers:
# region_indices = np.where(regions == n_region)[0]
#
# # Identify neighbors to these vertices and their neighbors
# N = []
# [N.extend(neighbor_lists[x]) for x in region_indices]
# N = list(frozenset(N).difference(region_indices))
# N2 = []
# [N2.extend(neighbor_lists[x]) for x in N]
# N.extend(N2)
# N = list(frozenset(N).difference(region_indices))
# if N:
#
# # Segment neighbors into connected vertices (region boundaries)
# boundaries = segment(N, neighbor_lists)
#
# # Remove the largest region boundary, presumably the
# # outer contour of the region, leaving smaller boundaries,
# # presumably the contours of holes within the region
# boundary_numbers = [x for x in np.unique(boundaries)
# if x != background_value]
# max_size = 0
# max_number = 0
# for n_boundary in boundary_numbers:
# border_indices = np.where(boundaries == n_boundary)[0]
# if len(border_indices) > max_size:
# max_size = len(border_indices)
# max_number = n_boundary
# boundaries[boundaries == max_number] = background_value
# boundary_numbers = [x for x in boundary_numbers if x != max_number]
#
# # Add remaining boundaries to holes array
# for n_boundary in boundary_numbers:
# indices = [i for i,x in enumerate(boundaries) if x == n_boundary]
# hole_boundaries[indices] = count
# count += 1
#
# # ------------------------------------------------------------------------
# # Fill holes
# # ------------------------------------------------------------------------
# # If there are any holes
# if count > 0:
# hole_numbers = [x for x in np.unique(hole_boundaries)
# if x != background_value]
# background = [i for i,x in enumerate(regions)
# if x == background_value]
#
# # Grow seeds from hole boundaries to fill holes
# for n_hole in hole_numbers:
# seed_list = np.where(hole_boundaries == n_hole)[0].tolist()
# seed_lists = [list(frozenset(background).intersection(seed_list))]
# hole = segment(background, neighbor_lists, 1, seed_lists)
#
# # Label the vertices for each hole by surrounding region number
# # if hole does not include values within exclude_range:
# if len(exclude_range) == 2:
# Ihole = np.where(hole != background_value)[0]
# #if not len(frozenset(values[Ihole]).intersection(exclude_range)):
# if not [x for x in values[Ihole]
# if x > exclude_range[0] if x < exclude_range[1]]:
# regions = label_holes(hole, regions, neighbor_lists)
# else:
# regions = label_holes(hole, regions, neighbor_lists)
#
# return regions
# def close_surface_pair(faces, points1, points2, scalars, background_value=-1):
# """
# Close a surface patch by connecting its border vertices with
# corresponding vertices in a second surface file.
#
# Assumes no lines or indices when reading VTK files.
#
# Note ::
#
# Scalar values different than background define the surface patch.
# The two sets of points have a 1-to-1 mapping; they are from
# two surfaces whose corresponding vertices are shifted in position.
# For pial vs. gray-white matter, the two surfaces are not parallel,
# so connecting the vertices leads to intersecting faces.
#
# Parameters
# ----------
# faces : list of lists of integers
# each sublist contains 3 indices of vertices that form a face
# on a surface mesh
# points1 : list of lists of floats
# each sublist contains 3-D coordinates of a vertex on a surface mesh
# points2 : list of lists of floats
# points from second surface with 1-to-1 correspondence with points1
# scalars : numpy array of integers
# labels used to find foreground vertices
# background_value : integer
# scalar value for background vertices
#
# Returns
# -------
# closed_faces : list of lists of integers
# indices of vertices that form a face on the closed surface mesh
# closed_points : list of lists of floats
# 3-D coordinates from points1 and points2
# closed_scalars : list of integers
# scalar values for points1 and points2
#
# Examples
# --------
# >>> # Build a cube by closing two parallel planes:
# >>> from mindboggle.guts.mesh import close_surface_pair
# >>> # Build plane:
# >>> background_value = -1
# >>> n = 10 # plane edge length
# >>> points1 = []
# >>> for x in range(n):
# ... for y in range(n):
# ... points1.append([x,y,0])
# >>> points2 = [[x[0],x[1],1] for x in points1]
# >>> scalars = [background_value for x in range(len(points1))]
# >>> p = int(n*(n-1)/2 - 1)
# >>> for i in [p, p+1, p+n, p+n+1]:
# ... scalars[i] = 1
# >>> faces = []
# >>> for x in range(n-1):
# ... for y in range(n-1):
# ... faces.append([x+y*n,x+n+y*n,x+n+1+y*n])
# ... faces.append([x+y*n,x+1+y*n,x+n+1+y*n])
# >>> #write_vtk('plane.vtk', points1, [], [], faces, scalars)
# >>> #plot_surfaces('plane.vtk')
# >>> closed_faces, closed_points, closed_scalars = close_surface_pair(faces,
# ... points1, points2, scalars, background_value)
# >>> closed_faces[0:4]
# [[44, 54, 55], [44, 45, 55], [144, 154, 155], [144, 145, 155]]
#
# View cube (skip test):
#
# >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP
# >>> from mindboggle.mio.vtks import write_vtk # doctest: +SKIP
# >>> write_vtk('cube.vtk', closed_points, [],[], closed_faces,
# ... closed_scalars, 'int') # doctest: +SKIP
# >>> plot_surfaces('cube.vtk') # doctest: +SKIP
#
# """
# import sys
# import numpy as np
#
# from mindboggle.guts.mesh import find_neighbors, keep_faces
# from mindboggle.guts.segment import extract_borders
#
# if isinstance(scalars, list):
# scalars = np.array(scalars)
#
# N = len(points1)
# closed_points = points1 + points2
#
# # Find all vertex neighbors and surface patch border vertices:
# neighbor_lists = find_neighbors(faces, N)
# I = np.where(scalars != background_value)[0]
# scalars[scalars == background_value] = background_value + 1
# scalars[I] = background_value + 2
# scalars = scalars.tolist()
# borders, u1, u2 = extract_borders(list(range(N)), scalars, neighbor_lists)
# if not len(borders):
# sys.exit('There are no border vertices!')
# borders = [x for x in borders if x in I]
#
# # Reindex copy of faces and combine with original (both zero-index):
# indices = list(range(N))
# indices2 = list(range(N, 2 * N))
# reindex = dict([(index, indices2[i]) for i, index in enumerate(indices)])
# faces = keep_faces(faces, I)
# faces2 = [[reindex[i] for i in face] for face in faces]
# closed_faces = faces + faces2
#
# # Connect border vertices between surface patches and add new faces:
# add_faces = []
# taken_already = []
# for index in borders:
# if index not in taken_already:
# neighbors = list(set(neighbor_lists[index]).intersection(borders))
# taken_already.append(index)
# #taken_already.extend([index] + neighbors)
# for neighbor in neighbors:
# add_faces.append([index, index + N, neighbor])
# add_faces.append([index + N, neighbor, neighbor + N])
# closed_faces = closed_faces + add_faces
#
# closed_scalars = scalars * 2
#
# return closed_faces, closed_points, closed_scalars
# ============================================================================
# Doctests
# ============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # py.test --doctest-modules | [
"itertools.chain",
"mindboggle.guts.mesh.find_neighbors_from_file",
"mindboggle.guts.mesh.find_neighbors",
"mindboggle.guts.mesh.decimate.SetTargetReduction",
"mindboggle.mio.vtks.rewrite_scalars",
"numpy.sqrt",
"vtk.vtkCellArray",
"vtk.vtkDecimatePro",
"vtk.vtkPoints",
"numpy.array",
"mindboggl... | [((1878, 1906), 'mindboggle.mio.vtks.read_faces_points', 'read_faces_points', (['input_vtk'], {}), '(input_vtk)\n', (1895, 1906), False, 'from mindboggle.mio.vtks import read_faces_points\n'), ((1929, 1959), 'mindboggle.guts.mesh.find_neighbors', 'find_neighbors', (['faces', 'npoints'], {}), '(faces, npoints)\n', (1943, 1959), False, 'from mindboggle.guts.mesh import find_neighbors\n'), ((26006, 26021), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (26019, 26021), False, 'import vtk\n'), ((26293, 26311), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (26309, 26311), False, 'import vtk\n'), ((27157, 27174), 'vtk.vtkPolyData', 'vtk.vtkPolyData', ([], {}), '()\n', (27172, 27174), False, 'import vtk\n'), ((27618, 27638), 'vtk.vtkDecimatePro', 'vtk.vtkDecimatePro', ([], {}), '()\n', (27636, 27638), False, 'import vtk\n'), ((27780, 27811), 'mindboggle.guts.mesh.decimate.SetInputData', 'decimate.SetInputData', (['polydata'], {}), '(polydata)\n', (27801, 27811), False, 'from mindboggle.guts.mesh import decimate\n'), ((27817, 27855), 'mindboggle.guts.mesh.decimate.SetTargetReduction', 'decimate.SetTargetReduction', (['reduction'], {}), '(reduction)\n', (27844, 27855), False, 'from mindboggle.guts.mesh import decimate\n'), ((27860, 27889), 'mindboggle.guts.mesh.decimate.PreserveTopologyOn', 'decimate.PreserveTopologyOn', ([], {}), '()\n', (27887, 27889), False, 'from mindboggle.guts.mesh import decimate\n'), ((32340, 32359), 'mindboggle.mio.vtks.read_vtk', 'read_vtk', (['input_vtk'], {}), '(input_vtk)\n', (32348, 32359), False, 'from mindboggle.mio.vtks import read_vtk\n'), ((32461, 32540), 'mindboggle.guts.mesh.decimate', 'decimate', (['points', 'faces', 'reduction', 'smooth_steps', 'scalars', 'save_vtk', 'output_vtk'], {}), '(points, faces, reduction, smooth_steps, scalars, save_vtk, output_vtk)\n', (32469, 32540), False, 'from mindboggle.guts.mesh import decimate\n'), ((35251, 35286), 'mindboggle.mio.vtks.read_scalars', 'read_scalars', (['input_vtk', '(True)', '(True)'], {}), '(input_vtk, True, True)\n', (35263, 35286), False, 'from mindboggle.mio.vtks import read_scalars, rewrite_scalars\n'), ((35489, 35524), 'mindboggle.guts.mesh.find_neighbors_from_file', 'find_neighbors_from_file', (['input_vtk'], {}), '(input_vtk)\n', (35513, 35524), False, 'from mindboggle.guts.mesh import find_neighbors_from_file, find_neighborhood\n'), ((39462, 39497), 'mindboggle.mio.vtks.read_scalars', 'read_scalars', (['input_vtk', '(True)', '(True)'], {}), '(input_vtk, True, True)\n', (39474, 39497), False, 'from mindboggle.mio.vtks import read_scalars, rewrite_scalars\n'), ((39804, 39821), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (39813, 39821), True, 'import numpy as np\n'), ((42137, 42153), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (42145, 42153), True, 'import numpy as np\n'), ((44452, 44502), 'mindboggle.guts.mesh.find_neighborhood', 'find_neighborhood', (['neighbor_lists', 'indices', 'nedges'], {}), '(neighbor_lists, indices, nedges)\n', (44469, 44502), False, 'from mindboggle.guts.mesh import find_neighborhood\n'), ((46399, 46451), 'mindboggle.guts.mesh.find_neighborhood', 'find_neighborhood', (['neighbor_lists', 'indices'], {'nedges': '(1)'}), '(neighbor_lists, indices, nedges=1)\n', (46416, 46451), False, 'from mindboggle.guts.mesh import find_neighborhood\n'), ((46461, 46506), 'mindboggle.guts.mesh.find_neighborhood', 'find_neighborhood', (['neighbor_lists', 'N1', 'nedges'], {}), '(neighbor_lists, N1, nedges)\n', (46478, 46506), False, 'from mindboggle.guts.mesh import find_neighborhood\n'), ((48376, 48428), 'mindboggle.guts.mesh.find_neighborhood', 'find_neighborhood', (['neighbor_lists', 'indices'], {'nedges': '(1)'}), '(neighbor_lists, indices, nedges=1)\n', (48393, 48428), False, 'from mindboggle.guts.mesh import find_neighborhood\n'), ((48438, 48485), 'mindboggle.guts.mesh.find_neighborhood', 'find_neighborhood', (['neighbor_lists', 'N1'], {'nedges': '(1)'}), '(neighbor_lists, N1, nedges=1)\n', (48455, 48485), False, 'from mindboggle.guts.mesh import find_neighborhood\n'), ((67204, 67233), 'doctest.testmod', 'doctest.testmod', ([], {'verbose': '(True)'}), '(verbose=True)\n', (67219, 67233), False, 'import doctest\n'), ((5542, 5557), 'numpy.array', 'np.array', (['faces'], {}), '(faces)\n', (5550, 5557), True, 'import numpy as np\n'), ((18333, 18348), 'numpy.ravel', 'np.ravel', (['faces'], {}), '(faces)\n', (18341, 18348), True, 'import numpy as np\n'), ((26354, 26370), 'vtk.vtkPolygon', 'vtk.vtkPolygon', ([], {}), '()\n', (26368, 26370), False, 'import vtk\n'), ((26826, 26845), 'vtk.vtkFloatArray', 'vtk.vtkFloatArray', ([], {}), '()\n', (26843, 26845), False, 'import vtk\n'), ((28194, 28217), 'vtk.vtkPolyDataWriter', 'vtk.vtkPolyDataWriter', ([], {}), '()\n', (28215, 28217), False, 'import vtk\n'), ((28298, 28327), 'vtk.vtkSmoothPolyDataFilter', 'vtk.vtkSmoothPolyDataFilter', ([], {}), '()\n', (28325, 28327), False, 'import vtk\n'), ((28908, 28925), 'mindboggle.guts.mesh.decimate.Update', 'decimate.Update', ([], {}), '()\n', (28923, 28925), False, 'from mindboggle.guts.mesh import decimate\n'), ((28940, 28960), 'mindboggle.guts.mesh.decimate.GetOutput', 'decimate.GetOutput', ([], {}), '()\n', (28958, 28960), False, 'from mindboggle.guts.mesh import decimate\n'), ((35705, 35755), 'mindboggle.guts.mesh.find_neighborhood', 'find_neighborhood', (['neighbor_lists', '[index]', 'nedges'], {}), '(neighbor_lists, [index], nedges)\n', (35722, 35755), False, 'from mindboggle.guts.mesh import find_neighborhood\n'), ((35866, 35905), 'numpy.percentile', 'np.percentile', (['scalars[neighborhood]', 'p'], {}), '(scalars[neighborhood], p)\n', (35879, 35905), True, 'import numpy as np\n'), ((36545, 36658), 'mindboggle.mio.vtks.rewrite_scalars', 'rewrite_scalars', (['input_vtk', 'rescaled_scalars_file', 'rescaled_scalars', '"""rescaled_scalars"""', '[]', 'background_value'], {}), "(input_vtk, rescaled_scalars_file, rescaled_scalars,\n 'rescaled_scalars', [], background_value)\n", (36560, 36658), False, 'from mindboggle.mio.vtks import read_scalars, rewrite_scalars\n'), ((39668, 39708), 'mindboggle.mio.vtks.read_scalars', 'read_scalars', (['labels_or_file', '(True)', '(True)'], {}), '(labels_or_file, True, True)\n', (39680, 39708), False, 'from mindboggle.mio.vtks import read_scalars, rewrite_scalars\n'), ((40708, 40825), 'mindboggle.mio.vtks.rewrite_scalars', 'rewrite_scalars', (['input_vtk', 'rescaled_scalars_file', 'rescaled_scalars', '"""rescaled_scalars"""', 'labels', 'background_value'], {}), "(input_vtk, rescaled_scalars_file, rescaled_scalars,\n 'rescaled_scalars', labels, background_value)\n", (40723, 40825), False, 'from mindboggle.mio.vtks import read_scalars, rewrite_scalars\n'), ((42209, 42266), 'numpy.linalg.norm', 'np.linalg.norm', (['(points[triangle[0]] - points[triangle[1]])'], {}), '(points[triangle[0]] - points[triangle[1]])\n', (42223, 42266), True, 'import numpy as np\n'), ((42279, 42336), 'numpy.linalg.norm', 'np.linalg.norm', (['(points[triangle[1]] - points[triangle[2]])'], {}), '(points[triangle[1]] - points[triangle[2]])\n', (42293, 42336), True, 'import numpy as np\n'), ((42349, 42406), 'numpy.linalg.norm', 'np.linalg.norm', (['(points[triangle[2]] - points[triangle[0]])'], {}), '(points[triangle[2]] - points[triangle[0]])\n', (42363, 42406), True, 'import numpy as np\n'), ((42452, 42492), 'numpy.sqrt', 'np.sqrt', (['(s * (s - a) * (s - b) * (s - c))'], {}), '(s * (s - a) * (s - b) * (s - c))\n', (42459, 42492), True, 'import numpy as np\n'), ((50307, 50323), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (50315, 50323), True, 'import numpy as np\n'), ((21531, 21554), 'itertools.chain', 'itertools.chain', (['*faces'], {}), '(*faces)\n', (21546, 21554), False, 'import itertools\n'), ((28525, 28549), 'mindboggle.guts.mesh.decimate.GetOutputPort', 'decimate.GetOutputPort', ([], {}), '()\n', (28547, 28549), False, 'from mindboggle.guts.mesh import decimate\n'), ((29636, 29662), 'os.path.exists', 'os.path.exists', (['output_vtk'], {}), '(output_vtk)\n', (29650, 29662), False, 'import os\n'), ((36496, 36507), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (36505, 36507), False, 'import os\n'), ((36718, 36755), 'os.path.exists', 'os.path.exists', (['rescaled_scalars_file'], {}), '(rescaled_scalars_file)\n', (36732, 36755), False, 'import os\n'), ((40614, 40625), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (40623, 40625), False, 'import os\n'), ((40885, 40922), 'os.path.exists', 'os.path.exists', (['rescaled_scalars_file'], {}), '(rescaled_scalars_file)\n', (40899, 40922), False, 'import os\n'), ((28145, 28156), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (28154, 28156), False, 'import os\n'), ((29332, 29356), 'mindboggle.guts.mesh.decimate.GetOutputPort', 'decimate.GetOutputPort', ([], {}), '()\n', (29354, 29356), False, 'from mindboggle.guts.mesh import decimate\n'), ((40225, 40249), 'numpy.max', 'np.max', (['scalars[indices]'], {}), '(scalars[indices])\n', (40231, 40249), True, 'import numpy as np\n'), ((5644, 5674), 'numpy.where', 'np.where', (['(faces[:, i] == index)'], {}), '(faces[:, i] == index)\n', (5652, 5674), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Filename: test_scripts
"""
introduction:
authors: <NAME>
email:<EMAIL>
add time: 11 April, 2021
"""
import os, sys
import cv2
import numpy as np
code_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.insert(0, code_dir)
import datasets.raster_io as raster_io
def test_read_image_cv2_rasterio():
# run in ~/Data/Arctic/canada_arctic/autoMapping/multiArea_yolov4_1
dir = os.path.expanduser('~/Data/Arctic/canada_arctic/autoMapping/multiArea_yolov4_1')
print('\n')
print('Run read_image_cv2_rasterio')
img_path = os.path.join(dir,'debug_img', '20200818_mosaic_8bit_rgb_0_class_1_p_0.png')
image_cv2 = cv2.imread(img_path)
image_cv2 = cv2.cvtColor(image_cv2, cv2.COLOR_BGR2RGB) # BGR to RGB
print('image_cv2 shape', image_cv2.shape)
for band in range(3):
data = image_cv2[:,:,band]
print('b %d'%band, np.mean(data))
image_rs, nodata = raster_io.read_raster_all_bands_np(img_path)
image_rs = image_rs.transpose(1, 2, 0)
print('image_rs shape', image_rs.shape)
for band in range(3):
data = image_rs[:,:,band]
print('b %d'%band, np.mean(data))
# both image_cv2 and image_rs have date type of 'numpy.ndarray', but when draw rectange on image_rs,
# it complains not TypeError: Expected Ptr<cv::UMat> for argument 'img'
print('type of image_cv2',type(image_cv2))
print('type of image_rs',type(image_rs))
image_cv2 = cv2.rectangle(image_cv2, (10, 10), (100, 100), (0,0,0), 1)
# as suggested by https://stackoverflow.com/questions/57586449/why-cv2-rectangle-sometimes-return-np-ndarray-while-sometimes-cv2-umat
# change to contiguous, then it passes
image_rs = np.ascontiguousarray(image_rs)
image_rs = cv2.rectangle(image_rs, (50, 50), (150, 150), (255,255,255), 1)
cv2.imshow('image_cv2', image_cv2)
cv2.imshow('image_rs', image_rs)
if cv2.waitKey() & 0xFF == ord('q'):
return
| [
"cv2.rectangle",
"numpy.mean",
"sys.path.insert",
"os.path.join",
"datasets.raster_io.read_raster_all_bands_np",
"numpy.ascontiguousarray",
"cv2.imshow",
"cv2.waitKey",
"cv2.cvtColor",
"os.path.abspath",
"cv2.imread",
"os.path.expanduser"
] | [((246, 274), 'sys.path.insert', 'sys.path.insert', (['(0)', 'code_dir'], {}), '(0, code_dir)\n', (261, 274), False, 'import os, sys\n'), ((435, 520), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Data/Arctic/canada_arctic/autoMapping/multiArea_yolov4_1"""'], {}), "('~/Data/Arctic/canada_arctic/autoMapping/multiArea_yolov4_1'\n )\n", (453, 520), False, 'import os, sys\n'), ((589, 665), 'os.path.join', 'os.path.join', (['dir', '"""debug_img"""', '"""20200818_mosaic_8bit_rgb_0_class_1_p_0.png"""'], {}), "(dir, 'debug_img', '20200818_mosaic_8bit_rgb_0_class_1_p_0.png')\n", (601, 665), False, 'import os, sys\n'), ((681, 701), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (691, 701), False, 'import cv2\n'), ((718, 760), 'cv2.cvtColor', 'cv2.cvtColor', (['image_cv2', 'cv2.COLOR_BGR2RGB'], {}), '(image_cv2, cv2.COLOR_BGR2RGB)\n', (730, 760), False, 'import cv2\n'), ((948, 992), 'datasets.raster_io.read_raster_all_bands_np', 'raster_io.read_raster_all_bands_np', (['img_path'], {}), '(img_path)\n', (982, 992), True, 'import datasets.raster_io as raster_io\n'), ((1473, 1533), 'cv2.rectangle', 'cv2.rectangle', (['image_cv2', '(10, 10)', '(100, 100)', '(0, 0, 0)', '(1)'], {}), '(image_cv2, (10, 10), (100, 100), (0, 0, 0), 1)\n', (1486, 1533), False, 'import cv2\n'), ((1728, 1758), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image_rs'], {}), '(image_rs)\n', (1748, 1758), True, 'import numpy as np\n'), ((1774, 1839), 'cv2.rectangle', 'cv2.rectangle', (['image_rs', '(50, 50)', '(150, 150)', '(255, 255, 255)', '(1)'], {}), '(image_rs, (50, 50), (150, 150), (255, 255, 255), 1)\n', (1787, 1839), False, 'import cv2\n'), ((1843, 1877), 'cv2.imshow', 'cv2.imshow', (['"""image_cv2"""', 'image_cv2'], {}), "('image_cv2', image_cv2)\n", (1853, 1877), False, 'import cv2\n'), ((1882, 1914), 'cv2.imshow', 'cv2.imshow', (['"""image_rs"""', 'image_rs'], {}), "('image_rs', image_rs)\n", (1892, 1914), False, 'import cv2\n'), ((212, 237), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (227, 237), False, 'import os, sys\n'), ((909, 922), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (916, 922), True, 'import numpy as np\n'), ((1167, 1180), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (1174, 1180), True, 'import numpy as np\n'), ((1922, 1935), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1933, 1935), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the function "cube_manipulation.sort_coord_in_cube".
"""
import unittest
import iris
import numpy as np
from iris.coords import AuxCoord
from iris.tests import IrisTest
from improver.utilities.cube_manipulation import sort_coord_in_cube
from improver.utilities.warnings_handler import ManageWarnings
from ...set_up_test_cubes import set_up_variable_cube
class Test_sort_coord_in_cube(IrisTest):
"""Class to test the sort_coord_in_cube function."""
def setUp(self):
"""Set up ascending and descending cubes"""
self.ascending_height_points = np.array([5.0, 10.0, 20.0], dtype=np.float32)
self.descending_height_points = np.flip(self.ascending_height_points)
self.data = np.array(
[np.ones((3, 3)), 2 * np.ones((3, 3)), 3 * np.ones((3, 3))],
dtype=np.float32,
)
self.ascending_cube = set_up_variable_cube(self.data)
self.ascending_cube.coord("realization").rename("height")
self.ascending_cube.coord("height").points = self.ascending_height_points
self.ascending_cube.coord("height").units = "m"
self.descending_cube = self.ascending_cube.copy()
self.descending_cube.coord("height").points = self.descending_height_points
def test_ascending_then_ascending(self):
"""Test that the sorting successfully sorts the cube based
on the points within the given coordinate. The points in the resulting
cube should now be in ascending order."""
expected_data = self.data
coord_name = "height"
result = sort_coord_in_cube(self.ascending_cube, coord_name)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(
self.ascending_cube.coord_dims(coord_name), result.coord_dims(coord_name)
)
self.assertArrayAlmostEqual(
self.ascending_height_points, result.coord(coord_name).points
)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_auxcoord(self):
"""Test that the above sorting is successful when an AuxCoord is
used."""
expected_data = self.data
coord_name = "height_aux"
height_coord = self.ascending_cube.coord("height")
(height_coord_index,) = self.ascending_cube.coord_dims("height")
new_coord = AuxCoord(height_coord.points, long_name=coord_name)
self.ascending_cube.add_aux_coord(new_coord, height_coord_index)
result = sort_coord_in_cube(self.ascending_cube, coord_name)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(
self.ascending_cube.coord_dims(coord_name), result.coord_dims(coord_name)
)
self.assertArrayAlmostEqual(
self.ascending_height_points, result.coord(coord_name).points
)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_ascending_then_descending(self):
"""Test that the sorting successfully sorts the cube based
on the points within the given coordinate. The points in the resulting
cube should now be in descending order."""
expected_data = np.flip(self.data)
coord_name = "height"
result = sort_coord_in_cube(self.ascending_cube, coord_name, descending=True)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(
self.descending_cube.coord_dims(coord_name), result.coord_dims(coord_name)
)
self.assertArrayAlmostEqual(
self.descending_height_points, result.coord(coord_name).points
)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_descending_then_ascending(self):
"""Test that the sorting successfully sorts the cube based
on the points within the given coordinate. The points in the resulting
cube should now be in ascending order."""
expected_data = np.flip(self.data)
coord_name = "height"
result = sort_coord_in_cube(self.descending_cube, coord_name)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(
self.ascending_cube.coord_dims(coord_name), result.coord_dims(coord_name)
)
self.assertArrayAlmostEqual(
self.ascending_height_points, result.coord(coord_name).points
)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_descending_then_descending(self):
"""Test that the sorting successfully sorts the cube based
on the points within the given coordinate. The points in the resulting
cube should now be in descending order."""
expected_data = self.data
coord_name = "height"
result = sort_coord_in_cube(self.descending_cube, coord_name, descending=True)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(
self.descending_cube.coord_dims(coord_name), result.coord_dims(coord_name)
)
self.assertArrayAlmostEqual(
self.descending_height_points, result.coord(coord_name).points
)
self.assertArrayAlmostEqual(result.data, expected_data)
def test_latitude(self):
"""Test that the sorting successfully sorts the cube based
on the points within the given coordinate (latitude).
The points in the resulting cube should now be in descending order."""
expected_data = np.array(
[
[[1.00, 1.00, 1.00], [1.00, 1.00, 1.00], [6.00, 1.00, 1.00]],
[[2.00, 2.00, 2.00], [2.00, 2.00, 2.00], [6.00, 2.00, 2.00]],
[[3.00, 3.00, 3.00], [3.00, 3.00, 3.00], [6.00, 3.00, 3.00]],
]
)
self.ascending_cube.data[:, 0, 0] = 6.0
expected_points = np.flip(self.ascending_cube.coord("latitude").points)
coord_name = "latitude"
result = sort_coord_in_cube(self.ascending_cube, coord_name, descending=True)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(
self.ascending_cube.coord_dims(coord_name), result.coord_dims(coord_name)
)
self.assertArrayAlmostEqual(expected_points, result.coord(coord_name).points)
self.assertArrayAlmostEqual(result.data, expected_data)
@ManageWarnings(record=True)
def test_warn_raised_for_circular_coordinate(self, warning_list=None):
"""Test that a warning is successfully raised when circular
coordinates are sorted."""
self.ascending_cube.data[:, 0, 0] = 6.0
coord_name = "latitude"
self.ascending_cube.coord(coord_name).circular = True
result = sort_coord_in_cube(self.ascending_cube, coord_name, descending=True)
self.assertTrue(any(item.category == UserWarning for item in warning_list))
warning_msg = "The latitude coordinate is circular."
self.assertTrue(any(warning_msg in str(item) for item in warning_list))
self.assertIsInstance(result, iris.cube.Cube)
if __name__ == "__main__":
unittest.main()
| [
"numpy.flip",
"numpy.ones",
"iris.coords.AuxCoord",
"numpy.array",
"improver.utilities.cube_manipulation.sort_coord_in_cube",
"unittest.main",
"improver.utilities.warnings_handler.ManageWarnings"
] | [((7954, 7981), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'record': '(True)'}), '(record=True)\n', (7968, 7981), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((8700, 8715), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8713, 8715), False, 'import unittest\n'), ((2252, 2297), 'numpy.array', 'np.array', (['[5.0, 10.0, 20.0]'], {'dtype': 'np.float32'}), '([5.0, 10.0, 20.0], dtype=np.float32)\n', (2260, 2297), True, 'import numpy as np\n'), ((2338, 2375), 'numpy.flip', 'np.flip', (['self.ascending_height_points'], {}), '(self.ascending_height_points)\n', (2345, 2375), True, 'import numpy as np\n'), ((3252, 3303), 'improver.utilities.cube_manipulation.sort_coord_in_cube', 'sort_coord_in_cube', (['self.ascending_cube', 'coord_name'], {}), '(self.ascending_cube, coord_name)\n', (3270, 3303), False, 'from improver.utilities.cube_manipulation import sort_coord_in_cube\n'), ((4005, 4056), 'iris.coords.AuxCoord', 'AuxCoord', (['height_coord.points'], {'long_name': 'coord_name'}), '(height_coord.points, long_name=coord_name)\n', (4013, 4056), False, 'from iris.coords import AuxCoord\n'), ((4147, 4198), 'improver.utilities.cube_manipulation.sort_coord_in_cube', 'sort_coord_in_cube', (['self.ascending_cube', 'coord_name'], {}), '(self.ascending_cube, coord_name)\n', (4165, 4198), False, 'from improver.utilities.cube_manipulation import sort_coord_in_cube\n'), ((4828, 4846), 'numpy.flip', 'np.flip', (['self.data'], {}), '(self.data)\n', (4835, 4846), True, 'import numpy as np\n'), ((4894, 4962), 'improver.utilities.cube_manipulation.sort_coord_in_cube', 'sort_coord_in_cube', (['self.ascending_cube', 'coord_name'], {'descending': '(True)'}), '(self.ascending_cube, coord_name, descending=True)\n', (4912, 4962), False, 'from improver.utilities.cube_manipulation import sort_coord_in_cube\n'), ((5593, 5611), 'numpy.flip', 'np.flip', (['self.data'], {}), '(self.data)\n', (5600, 5611), True, 'import numpy as np\n'), ((5659, 5711), 'improver.utilities.cube_manipulation.sort_coord_in_cube', 'sort_coord_in_cube', (['self.descending_cube', 'coord_name'], {}), '(self.descending_cube, coord_name)\n', (5677, 5711), False, 'from improver.utilities.cube_manipulation import sort_coord_in_cube\n'), ((6399, 6468), 'improver.utilities.cube_manipulation.sort_coord_in_cube', 'sort_coord_in_cube', (['self.descending_cube', 'coord_name'], {'descending': '(True)'}), '(self.descending_cube, coord_name, descending=True)\n', (6417, 6468), False, 'from improver.utilities.cube_manipulation import sort_coord_in_cube\n'), ((7094, 7273), 'numpy.array', 'np.array', (['[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [6.0, 1.0, 1.0]], [[2.0, 2.0, 2.0], [\n 2.0, 2.0, 2.0], [6.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [3.0, 3.0, 3.0], [\n 6.0, 3.0, 3.0]]]'], {}), '([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [6.0, 1.0, 1.0]], [[2.0, 2.0, \n 2.0], [2.0, 2.0, 2.0], [6.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [3.0, 3.0, \n 3.0], [6.0, 3.0, 3.0]]])\n', (7102, 7273), True, 'import numpy as np\n'), ((7553, 7621), 'improver.utilities.cube_manipulation.sort_coord_in_cube', 'sort_coord_in_cube', (['self.ascending_cube', 'coord_name'], {'descending': '(True)'}), '(self.ascending_cube, coord_name, descending=True)\n', (7571, 7621), False, 'from improver.utilities.cube_manipulation import sort_coord_in_cube\n'), ((8319, 8387), 'improver.utilities.cube_manipulation.sort_coord_in_cube', 'sort_coord_in_cube', (['self.ascending_cube', 'coord_name'], {'descending': '(True)'}), '(self.ascending_cube, coord_name, descending=True)\n', (8337, 8387), False, 'from improver.utilities.cube_manipulation import sort_coord_in_cube\n'), ((2420, 2435), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (2427, 2435), True, 'import numpy as np\n'), ((2441, 2456), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (2448, 2456), True, 'import numpy as np\n'), ((2462, 2477), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (2469, 2477), True, 'import numpy as np\n')] |
'''
Tasks which control a plant under pure machine control. Used typically for initializing BMI decoder parameters.
'''
import numpy as np
import time
import os
import pdb
import multiprocessing as mp
import pickle
import tables
import re
import tempfile, traceback, datetime
import riglib.bmi
from riglib.stereo_opengl import ik
from riglib.experiment import traits, experiment
from riglib.bmi import clda, assist, extractor, train, goal_calculators, ppfdecoder
from riglib.bmi.bmi import Decoder, BMISystem, GaussianStateHMM, BMILoop, GaussianState, MachineOnlyFilter
from riglib.bmi.extractor import DummyExtractor
from riglib.stereo_opengl.window import WindowDispl2D, FakeWindow
from riglib.bmi.state_space_models import StateSpaceEndptVel2D
from .bmimultitasks import BMIControlMulti
bmi_ssm_options = ['Endpt2D', 'Tentacle', 'Joint2L']
class EndPostureFeedbackController(BMILoop, traits.HasTraits):
ssm_type_options = bmi_ssm_options
ssm_type = traits.OptionsList(*bmi_ssm_options, bmi3d_input_options=bmi_ssm_options)
def load_decoder(self):
self.ssm = StateSpaceEndptVel2D()
A, B, W = self.ssm.get_ssm_matrices()
filt = MachineOnlyFilter(A, W)
units = []
self.decoder = Decoder(filt, units, self.ssm, binlen=0.1)
self.decoder.n_features = 1
def create_feature_extractor(self):
self.extractor = DummyExtractor()
self._add_feature_extractor_dtype()
class TargetCaptureVisualFeedback(EndPostureFeedbackController, BMIControlMulti):
assist_level = (1, 1)
is_bmi_seed = True
def move_effector(self):
pass
class TargetCaptureVFB2DWindow(TargetCaptureVisualFeedback, WindowDispl2D):
fps = 20.
def __init__(self,*args, **kwargs):
super(TargetCaptureVFB2DWindow, self).__init__(*args, **kwargs)
self.assist_level = (1, 1)
def _start_wait(self):
self.wait_time = 0.
super(TargetCaptureVFB2DWindow, self)._start_wait()
def _test_start_trial(self, ts):
return ts > self.wait_time and not self.pause
@classmethod
def get_desc(cls, params, report):
if isinstance(report, list) and len(report) > 0:
duration = report[-1][-1] - report[0][-1]
reward_count = 0
for item in report:
if item[0] == "reward":
reward_count += 1
return "{} rewarded trials in {} min".format(reward_count, int(np.ceil(duration / 60)))
elif isinstance(report, dict):
duration = report['runtime'] / 60
reward_count = report['n_success_trials']
return "{} rewarded trials in {} min".format(reward_count, int(np.ceil(duration / 60)))
else:
return "No trials"
| [
"numpy.ceil",
"riglib.bmi.bmi.MachineOnlyFilter",
"riglib.bmi.bmi.Decoder",
"riglib.bmi.extractor.DummyExtractor",
"riglib.bmi.state_space_models.StateSpaceEndptVel2D",
"riglib.experiment.traits.OptionsList"
] | [((965, 1038), 'riglib.experiment.traits.OptionsList', 'traits.OptionsList', (['*bmi_ssm_options'], {'bmi3d_input_options': 'bmi_ssm_options'}), '(*bmi_ssm_options, bmi3d_input_options=bmi_ssm_options)\n', (983, 1038), False, 'from riglib.experiment import traits, experiment\n'), ((1087, 1109), 'riglib.bmi.state_space_models.StateSpaceEndptVel2D', 'StateSpaceEndptVel2D', ([], {}), '()\n', (1107, 1109), False, 'from riglib.bmi.state_space_models import StateSpaceEndptVel2D\n'), ((1171, 1194), 'riglib.bmi.bmi.MachineOnlyFilter', 'MachineOnlyFilter', (['A', 'W'], {}), '(A, W)\n', (1188, 1194), False, 'from riglib.bmi.bmi import Decoder, BMISystem, GaussianStateHMM, BMILoop, GaussianState, MachineOnlyFilter\n'), ((1237, 1279), 'riglib.bmi.bmi.Decoder', 'Decoder', (['filt', 'units', 'self.ssm'], {'binlen': '(0.1)'}), '(filt, units, self.ssm, binlen=0.1)\n', (1244, 1279), False, 'from riglib.bmi.bmi import Decoder, BMISystem, GaussianStateHMM, BMILoop, GaussianState, MachineOnlyFilter\n'), ((1382, 1398), 'riglib.bmi.extractor.DummyExtractor', 'DummyExtractor', ([], {}), '()\n', (1396, 1398), False, 'from riglib.bmi.extractor import DummyExtractor\n'), ((2447, 2469), 'numpy.ceil', 'np.ceil', (['(duration / 60)'], {}), '(duration / 60)\n', (2454, 2469), True, 'import numpy as np\n'), ((2686, 2708), 'numpy.ceil', 'np.ceil', (['(duration / 60)'], {}), '(duration / 60)\n', (2693, 2708), True, 'import numpy as np\n')] |
import os
import sys
import random
import numpy as np
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from h01_data.parse import get_data as get_raw_data
from h02_learn.model import opt_params
from h02_learn.train import convert_to_loader, _run_language, write_csv, get_data
from utils import argparser
from utils import utils
full_results = [['lang', 'rare_mode', 'fold', 'avg_len', 'entropy', 'unconditional_entropy',
'test_loss', 'test_acc', 'val_loss', 'val_acc', 'best_epoch']]
def get_full_data_loader(lang, rare_mode):
train_loader, val_loader, test_loader, token_map, labels = \
get_data(lang, rare_mode, args)
full_data = merge_data_loaders([train_loader, val_loader, test_loader])
return full_data, token_map, labels
def merge_data_loaders(data_loaders):
n_items = sum([x.dataset.tensors[0].shape[0] for x in data_loaders])
x_size = max([x.dataset.tensors[0].shape[1] for x in data_loaders])
x, y, idx = np.zeros((n_items, x_size)), np.zeros((n_items)), np.zeros((n_items))
start, end = 0, 0
for loader in data_loaders:
for batch_x, batch_y, batch_idx in loader:
end += batch_x.size(0)
x[start:end, :batch_x.size(1)] = batch_x.cpu()
y[start:end] = batch_y.cpu()
idx[start:end] = batch_idx.cpu()
start = end
return x, y, idx
def get_lang_df(lang, rare_mode):
df, _ = get_raw_data(lang, rare_mode)
return df
def get_ids(lang, rare_mode):
df = get_lang_df(lang, rare_mode)
instance_ids = sorted(list(df.item_id.unique()))
random.shuffle(instance_ids)
return instance_ids
def get_data_loaders_cv(fold, nfolds, full_data, token_map, labels, instance_ids, args, verbose=True):
data_split = get_data_split_cv(fold, nfolds, instance_ids, full_data[2], args, verbose=verbose)
train_loader = get_data_loader(full_data, data_split[0], token_map, 'train', args)
val_loader = get_data_loader(full_data, data_split[1], token_map, 'val', args)
test_loader = get_data_loader(full_data, data_split[2], token_map, 'test', args)
return train_loader, val_loader, test_loader
def get_data_split_cv(fold, nfolds, instance_ids, valid_ids, args, verbose=True):
ids = [x for x in instance_ids if x in valid_ids]
return _get_data_split_cv(fold, nfolds, ids, verbose=verbose)
def _get_data_split_cv(fold, nfolds, instance_ids, verbose=True):
part_size = int(len(instance_ids) / nfolds)
test_fold = (fold + 1) % nfolds
train_start_fold = 0 if test_fold > fold else (test_fold + 1)
train = instance_ids[train_start_fold * part_size:fold * part_size]
train += instance_ids[(fold + 2) * part_size:] if fold + 2 < nfolds else []
val = instance_ids[fold * part_size:(fold + 1) * part_size] if fold + 1 < nfolds else \
instance_ids[fold * part_size:]
test = instance_ids[(test_fold) * part_size:(test_fold + 1) * part_size] if test_fold + 1 < nfolds else \
instance_ids[(test_fold) * part_size:]
if verbose:
print('Train %d, Val %d, Test %d' % (len(train), len(val), len(test)))
return (train, val, test)
def get_data_loader(full_data, ids, token_map, mode, args):
data = split_data(full_data, ids, token_map, mode, args)
return convert_to_loader(data, mode)
def split_data(full_data, ids, token_map, mode, args):
data_partial = [(x, y, item_id) for x, y, item_id in zip(*full_data) if item_id in ids]
max_len = max([len(x) for (x, _, _) in data_partial])
data = np.zeros((len(data_partial), max_len + 2)).astype(int)
data.fill(token_map['PAD'])
for i, (x, y, item_id) in enumerate(data_partial):
data[i, :len(x)] = x
data[i, -2] = y
data[i, -1] = item_id
return data
def run_language_cv(lang, rare_mode, instance_ids, args, embedding_size=None,
hidden_size=256, word2vec_size=10, nlayers=1, dropout=0.2):
global full_results
full_data, token_map, labels = get_full_data_loader(lang, rare_mode)
nfolds = 10
avg_test_loss, avg_test_acc, avg_val_loss, avg_val_acc = 0, 0, 0, 0
for fold in range(nfolds):
print()
print('Fold:', fold, end=' ')
train_loader, val_loader, test_loader = get_data_loaders_cv(
fold, nfolds, full_data, token_map, labels, instance_ids, args)
avg_len, entropy, uncond_entropy, test_loss, test_acc, \
best_epoch, val_loss, val_acc = _run_language(
lang, rare_mode, train_loader, val_loader, test_loader, token_map, labels,
args, embedding_size=embedding_size, hidden_size=hidden_size,
word2vec_size=word2vec_size, nlayers=nlayers, dropout=dropout)
full_results += [[lang, rare_mode, fold, avg_len, entropy, uncond_entropy,
test_loss, test_acc, val_loss, val_acc, best_epoch]]
avg_test_loss += test_loss / nfolds
avg_test_acc += test_acc / nfolds
avg_val_loss += val_loss / nfolds
avg_val_acc += val_acc / nfolds
write_csv(full_results, '%s/%s__%s__full-results.csv' % (args.rfolder, args.model, args.context))
return avg_len, entropy, uncond_entropy, avg_test_loss, avg_test_acc, avg_val_loss, avg_val_acc
def run_opt_language_cv(lang, rare_mode, instance_ids, args):
embedding_size, hidden_size, word2vec_size, nlayers, dropout = opt_params.get_opt_params(lang, rare_mode, args)
print('Optimum hyperparams emb-hs: %d, hs: %d, w2v: %d, nlayers: %d, drop: %.4f'
% (embedding_size, hidden_size, word2vec_size, nlayers, dropout))
return run_language_cv(lang, rare_mode, instance_ids, args,
embedding_size=embedding_size, hidden_size=hidden_size, word2vec_size=word2vec_size,
nlayers=nlayers, dropout=dropout)
def run_language_enveloper_cv(lang, rare_mode, instance_ids, args):
if args.opt:
return run_opt_language_cv(lang, rare_mode, instance_ids, args)
else:
return run_language_cv(lang, rare_mode, instance_ids, args)
def run_languages(args):
results = [['lang', 'rare_mode', 'avg_len', 'entropy', 'unconditional_entropy',
'test_loss', 'test_acc', 'val_loss', 'val_acc']]
languages = utils.get_languages(args.languages, args.rare_modes)
for i, (lang, rare_mode) in enumerate(languages):
print()
print('%d. Language %s (%s)' % (i, lang, rare_mode))
instance_ids = get_ids(lang, rare_mode)
avg_len, entropy, uncond_entropy, test_loss, test_acc, \
val_loss, val_acc = run_language_enveloper_cv(lang, rare_mode, instance_ids, args)
results += [[lang, rare_mode, avg_len, entropy, uncond_entropy, test_loss, test_acc, val_loss, val_acc]]
write_csv(results, '%s/%s__%s__results.csv' % (args.rfolder, args.model, args.context))
write_csv(results, '%s/%s__%s__results-final.csv' % (args.rfolder, args.model, args.context))
if __name__ == '__main__':
args = argparser.parse_args(csv_folder='cv')
run_languages(args)
| [
"h02_learn.train.get_data",
"random.shuffle",
"h02_learn.train.convert_to_loader",
"h02_learn.train.write_csv",
"os.path.join",
"utils.utils.get_languages",
"h02_learn.model.opt_params.get_opt_params",
"numpy.zeros",
"h02_learn.train._run_language",
"utils.argparser.parse_args",
"h01_data.parse.... | [((74, 105), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (86, 105), False, 'import os\n'), ((625, 656), 'h02_learn.train.get_data', 'get_data', (['lang', 'rare_mode', 'args'], {}), '(lang, rare_mode, args)\n', (633, 656), False, 'from h02_learn.train import convert_to_loader, _run_language, write_csv, get_data\n'), ((1424, 1453), 'h01_data.parse.get_data', 'get_raw_data', (['lang', 'rare_mode'], {}), '(lang, rare_mode)\n', (1436, 1453), True, 'from h01_data.parse import get_data as get_raw_data\n'), ((1595, 1623), 'random.shuffle', 'random.shuffle', (['instance_ids'], {}), '(instance_ids)\n', (1609, 1623), False, 'import random\n'), ((3284, 3313), 'h02_learn.train.convert_to_loader', 'convert_to_loader', (['data', 'mode'], {}), '(data, mode)\n', (3301, 3313), False, 'from h02_learn.train import convert_to_loader, _run_language, write_csv, get_data\n'), ((5393, 5441), 'h02_learn.model.opt_params.get_opt_params', 'opt_params.get_opt_params', (['lang', 'rare_mode', 'args'], {}), '(lang, rare_mode, args)\n', (5418, 5441), False, 'from h02_learn.model import opt_params\n'), ((6271, 6323), 'utils.utils.get_languages', 'utils.get_languages', (['args.languages', 'args.rare_modes'], {}), '(args.languages, args.rare_modes)\n', (6290, 6323), False, 'from utils import utils\n'), ((6879, 6977), 'h02_learn.train.write_csv', 'write_csv', (['results', "('%s/%s__%s__results-final.csv' % (args.rfolder, args.model, args.context))"], {}), "(results, '%s/%s__%s__results-final.csv' % (args.rfolder, args.\n model, args.context))\n", (6888, 6977), False, 'from h02_learn.train import convert_to_loader, _run_language, write_csv, get_data\n'), ((7013, 7050), 'utils.argparser.parse_args', 'argparser.parse_args', ([], {'csv_folder': '"""cv"""'}), "(csv_folder='cv')\n", (7033, 7050), False, 'from utils import argparser\n'), ((975, 1002), 'numpy.zeros', 'np.zeros', (['(n_items, x_size)'], {}), '((n_items, x_size))\n', (983, 1002), True, 'import numpy as np\n'), ((1004, 1021), 'numpy.zeros', 'np.zeros', (['n_items'], {}), '(n_items)\n', (1012, 1021), True, 'import numpy as np\n'), ((1025, 1042), 'numpy.zeros', 'np.zeros', (['n_items'], {}), '(n_items)\n', (1033, 1042), True, 'import numpy as np\n'), ((4459, 4681), 'h02_learn.train._run_language', '_run_language', (['lang', 'rare_mode', 'train_loader', 'val_loader', 'test_loader', 'token_map', 'labels', 'args'], {'embedding_size': 'embedding_size', 'hidden_size': 'hidden_size', 'word2vec_size': 'word2vec_size', 'nlayers': 'nlayers', 'dropout': 'dropout'}), '(lang, rare_mode, train_loader, val_loader, test_loader,\n token_map, labels, args, embedding_size=embedding_size, hidden_size=\n hidden_size, word2vec_size=word2vec_size, nlayers=nlayers, dropout=dropout)\n', (4472, 4681), False, 'from h02_learn.train import convert_to_loader, _run_language, write_csv, get_data\n'), ((5063, 5165), 'h02_learn.train.write_csv', 'write_csv', (['full_results', "('%s/%s__%s__full-results.csv' % (args.rfolder, args.model, args.context))"], {}), "(full_results, '%s/%s__%s__full-results.csv' % (args.rfolder, args\n .model, args.context))\n", (5072, 5165), False, 'from h02_learn.train import convert_to_loader, _run_language, write_csv, get_data\n'), ((6787, 6878), 'h02_learn.train.write_csv', 'write_csv', (['results', "('%s/%s__%s__results.csv' % (args.rfolder, args.model, args.context))"], {}), "(results, '%s/%s__%s__results.csv' % (args.rfolder, args.model,\n args.context))\n", (6796, 6878), False, 'from h02_learn.train import convert_to_loader, _run_language, write_csv, get_data\n')] |
import numpy as np
import pytest
from ansys import dpf
from ansys.dpf import core
from ansys.dpf.core import FieldDefinition
from ansys.dpf.core import operators as ops
from ansys.dpf.core.common import locations, shell_layers
@pytest.fixture()
def stress_field(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
return stress.outputs.fields_container()[0]
def test_create_field():
field = dpf.core.Field()
assert field._message.id != 0
def test_create_field_from_helper_scalar():
data = np.random.random(10)
field_a = dpf.core.field_from_array(data)
assert np.allclose(field_a.data, data)
def test_create_field_from_helper_vector():
data = np.random.random((10, 3))
field_a = dpf.core.field_from_array(data)
assert np.allclose(field_a.data, data)
def test_createbycopy_field():
field = dpf.core.Field()
field2 = dpf.core.Field(field=field._message)
assert field._message.id == field2._message.id
def test_set_get_scoping():
field = dpf.core.Field()
scoping = dpf.core.Scoping()
ids = [1, 2, 3, 5, 8, 9, 10]
scoping.ids = ids
field.scoping = scoping
assert field.scoping.ids == ids
def test_set_get_data_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.scalar)
scoping = dpf.core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
scoping.ids = ids
field.scoping = scoping
field.data = data
assert np.allclose(field.data, data)
def test_set_get_data_array_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.vector)
scoping = dpf.core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
data.append(i + 0.001)
data.append(i + 0.001)
data = np.array(data)
data = data.reshape((20, 3))
scoping.ids = ids
field.scoping = scoping
field.data = data
assert np.allclose(field.data, data)
def test_append_data_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.vector)
for i in range(0, 20):
scopingid = i + 1
scopingindex = i
data = [0.01 + i, 0.02 + i, 0.03 + i]
field.append(data, scopingid)
scopingOut = field.scoping
assert scopingOut.ids == list(range(1, 21))
for i in range(0, 20):
scopingid = i + 1
scopingindex = i
datain = [0.01 + i, 0.02 + i, 0.03 + i]
dataout = field.get_entity_data(scopingindex)
assert np.allclose(dataout, datain)
def test_set_get_entity_data_array_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.vector)
for i in range(0, 20):
scopingid = i + 1
scopingindex = i
data = [0.01 + i, 0.02 + i, 0.03 + i]
data = np.array(data)
data = data.reshape((1, 3))
field.append(data, scopingid)
scopingOut = field.scoping
assert scopingOut.ids == list(range(1, 21))
for i in range(0, 20):
scopingid = i + 1
scopingindex = i
datain = [0.01 + i, 0.02 + i, 0.03 + i]
dataout = field.get_entity_data(scopingindex)
assert np.allclose(dataout, datain)
dataout = field.get_entity_data_by_id(scopingid)
assert np.allclose(dataout, datain)
# def test_get_data_ptr_field():
# field= dpf.core.Field(nentities=3, nature=dpf.core.natures.scalar,
# location=dpf.core.locations.elemental_nodal)
# data = [0.01,0.02,0.03]
# field.set_entity_data(data,0,1)
# data = [0.01,0.02,0.03,0.01,0.02,0.03]
# field.set_entity_data(data,1,2)
# data = [0.01,0.02,0.03,0.01]
# field.set_entity_data(data,2,3)
# scopingOut = field.scoping
# assert scopingOut.ids == [1,2,3]
# dataptr = field.data_ptr
# assert dataptr == [0,3,9]
def test_set_get_data_property_field():
field = core.Field(nentities=20, nature=dpf.core.natures.scalar)
scoping = core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
scoping.ids = ids
field.scoping = scoping
field.data = data
assert np.allclose(field.data, data)
def test_count_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.scalar)
scoping = dpf.core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
scoping.ids = ids
field.scoping = scoping
field.data = data
assert field.component_count == 1
assert field.elementary_data_count == 20
assert field.size == 20
def test_resize_field():
field = dpf.core.Field(nentities=1, nature=dpf.core.natures.scalar)
scoping = dpf.core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
field.resize(20, 20)
scoping.ids = ids
field.scoping = scoping
field.data = data
assert field.component_count == 1
assert field.elementary_data_count == 20
assert field.size == 20
def test_fromarray_field():
data = np.empty((100, 6))
f = dpf.core.field_from_array(data)
assert f.shape == (100, 6)
def test_field_definition_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("U")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
f = fcOut[0]
assert f.unit == "m"
assert f.location == dpf.core.locations.nodal
def test_field_definition_modif_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("U")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
f = fcOut[0]
fielddef = f.field_definition
assert fielddef.unit == "m"
assert fielddef.location == dpf.core.locations.nodal
assert fielddef.dimensionality.nature == dpf.core.natures.vector
assert fielddef.dimensionality.dim == [3]
assert fielddef.shell_layers == dpf.core.shell_layers.layerindependent
fielddef.unit = "mm"
assert fielddef.unit == "mm"
fielddef.location = dpf.core.locations.elemental
assert fielddef.location == dpf.core.locations.elemental
fielddef.dimensionality = dpf.core.Dimensionality.scalar_dim()
assert fielddef.dimensionality.nature == dpf.core.natures.scalar
assert fielddef.dimensionality.dim == [1]
fielddef.dimensionality = dpf.core.Dimensionality.tensor_dim()
assert fielddef.dimensionality.nature == dpf.core.natures.symmatrix
assert fielddef.dimensionality.dim == [3, 3]
fielddef.dimensionality = dpf.core.Dimensionality.vector_3d_dim()
assert fielddef.dimensionality.nature == dpf.core.natures.vector
assert fielddef.dimensionality.dim == [3]
fielddef.dimensionality = dpf.core.Dimensionality.vector_dim(4)
assert fielddef.dimensionality.nature == dpf.core.natures.vector
assert fielddef.dimensionality.dim == [4]
fielddef.shell_layers = dpf.core.shell_layers.bottom
assert fielddef.shell_layers == dpf.core.shell_layers.bottom
def test_field_definition_set_in_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("U")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
f = fcOut[0]
fielddef = f.field_definition
fielddef.unit = "mm"
fielddef.location = dpf.core.locations.elemental
fielddef.dimensionality = dpf.core.Dimensionality.scalar_dim()
fielddef.shell_layers = dpf.core.shell_layers.bottom
f.field_definition = fielddef
fielddef = f.field_definition
assert fielddef.unit == "mm"
assert fielddef.location == dpf.core.locations.elemental
assert fielddef.dimensionality.nature == dpf.core.natures.scalar
assert fielddef.dimensionality.dim == [1]
assert fielddef.shell_layers == dpf.core.shell_layers.bottom
assert f.unit == "mm"
assert f.location == dpf.core.locations.elemental
assert f.dimensionality.nature == dpf.core.natures.scalar
assert f.dimensionality.dim == [1]
assert f.shell_layers == dpf.core.shell_layers.bottom
def test_change_field_definition_in_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("U")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
f = fcOut[0]
f.unit = "mm"
f.location = dpf.core.locations.elemental
f.dimensionality = dpf.core.Dimensionality.scalar_dim()
f.shell_layers = dpf.core.shell_layers.bottom
fielddef = f.field_definition
assert fielddef.unit == "mm"
assert fielddef.location == dpf.core.locations.elemental
assert fielddef.dimensionality.nature == dpf.core.natures.scalar
assert fielddef.dimensionality.dim == [1]
assert fielddef.shell_layers == dpf.core.shell_layers.bottom
assert f.unit == "mm"
assert f.location == dpf.core.locations.elemental
assert f.dimensionality.nature == dpf.core.natures.scalar
assert f.dimensionality.dim == [1]
assert f.shell_layers == dpf.core.shell_layers.bottom
def test_create_overall_field():
field_overall = dpf.core.Field(nentities=1, location="overall", nature="vector")
field_overall.scoping.location = "overall"
field_overall.scoping.ids = [0]
field_overall.data = [1.0, 2.0, 3.0]
field = dpf.core.Field(nentities=5, location="nodal")
field.scoping.location = "nodal"
field.scoping.ids = list(range(1, 6))
data = [float(i) for i in range(0, 15)]
field.data = data
add = dpf.core.Operator("add")
add.inputs.fieldA(field)
add.inputs.fieldB(field_overall)
field_added = add.outputs.field()
data_added = field_added.data
for i in range(0, 5):
assert np.allclose(data_added[i], [i * 3.0 + 1.0, i * 3.0 + 3.0, i * 3.0 + 5.0])
def test_data_pointer_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("S")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
data_pointer = fcOut[0]._data_pointer
assert len(data_pointer) == len(fcOut[0].scoping)
assert data_pointer[0] == 0
assert data_pointer[1] == 72
f = fcOut[0]
data_pointer[1] = 40
f._data_pointer = data_pointer
data_pointer = fcOut[0]._data_pointer
assert len(data_pointer) == len(fcOut[0].scoping)
assert data_pointer[0] == 0
assert data_pointer[1] == 40
def test_data_pointer_prop_field():
pfield = dpf.core.PropertyField()
pfield.append([1, 2, 3], 1)
pfield.append([1, 2, 3, 4], 2)
pfield.append([1, 2, 3], 3)
data_pointer = pfield._data_pointer
assert len(data_pointer) == 3
assert data_pointer[0] == 0
assert data_pointer[1] == 3
assert data_pointer[2] == 7
data_pointer[1] = 4
pfield._data_pointer = data_pointer
data_pointer = pfield._data_pointer
assert len(data_pointer) == 3
assert data_pointer[0] == 0
assert data_pointer[1] == 4
assert data_pointer[2] == 7
def test_append_data_elemental_nodal_field(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
f = stress.outputs.fields_container()[0]
assert f.location == "ElementalNodal"
f_new = dpf.core.Field(
f.scoping.size,
nature=dpf.core.natures.symmatrix,
location=dpf.core.locations.elemental_nodal,
)
size = int(f.scoping.size / 100)
for i in range(0, size):
f_new.append(f.get_entity_data(i), f.scoping.id(i))
for i in range(0, size):
assert np.allclose(f_new.get_entity_data(i), f.get_entity_data(i))
def test_str_field(stress_field):
assert "Location" in str(stress_field)
assert "ElementalNodal" in str(stress_field)
assert "Unit" in str(stress_field)
assert "Pa" in str(stress_field)
assert "9255" in str(stress_field)
assert "40016" in str(stress_field)
assert "6" in str(stress_field)
def test_to_nodal(stress_field):
assert stress_field.location == "ElementalNodal"
field_out = stress_field.to_nodal()
assert field_out.location == "Nodal"
def test_mesh_support_field(stress_field):
mesh = stress_field.meshed_region
assert len(mesh.nodes.scoping) == 15129
assert len(mesh.elements.scoping) == 10292
def test_shell_layers_1(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
f = stress.outputs.fields_container()[0]
assert f.shell_layers == shell_layers.topbottommid
model = dpf.core.Model(allkindofcomplexity)
disp = model.results.displacement()
f = disp.outputs.fields_container()[0]
assert f.shell_layers == shell_layers.layerindependent
def test_shell_layers_2(velocity_acceleration):
model = dpf.core.Model(velocity_acceleration)
stress = model.results.stress()
f = stress.outputs.fields_container()[0]
assert f.shell_layers == shell_layers.nonelayer
def test_mesh_support_field_model(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
f = stress.outputs.fields_container()[0]
mesh = f.meshed_region
assert len(mesh.nodes.scoping) == 15129
assert len(mesh.elements.scoping) == 10292
def test_delete_auto_field():
field = dpf.core.Field()
field2 = dpf.core.Field(field=field)
del field
with pytest.raises(Exception):
field2.get_ids()
def test_create_and_update_field_definition():
fieldDef = FieldDefinition()
assert fieldDef is not None
with pytest.raises(Exception):
assert fieldDef.location is None
fieldDef.location = locations.nodal
assert fieldDef.location == locations.nodal
def test_set_support_timefreq(simple_bar):
tfq = dpf.core.TimeFreqSupport()
time_frequencies = dpf.core.Field(
nature=dpf.core.natures.scalar, location=dpf.core.locations.time_freq
)
time_frequencies.scoping.location = dpf.core.locations.time_freq_step
time_frequencies.append([0.1, 0.32, 0.4], 1)
tfq.time_frequencies = time_frequencies
model = dpf.core.Model(simple_bar)
disp = model.results.displacement()
fc = disp.outputs.fields_container()
field = fc[0]
# initial_support = field.time_freq_support
# assert initial_support is None
field.time_freq_support = tfq
tfq_to_check = field.time_freq_support
assert np.allclose(tfq.time_frequencies.data, tfq_to_check.time_frequencies.data)
def test_set_support_mesh(simple_bar):
mesh = dpf.core.MeshedRegion()
mesh.nodes.add_node(1, [0.0, 0.0, 0.0])
model = dpf.core.Model(simple_bar)
disp = model.results.displacement()
fc = disp.outputs.fields_container()
field = fc[0]
field.meshed_region = mesh
mesh_to_check = field.meshed_region
assert mesh_to_check.nodes.n_nodes == 1
assert mesh_to_check.elements.n_elements == 0
mesh.nodes.add_node(2, [1.0, 0.0, 0.0])
mesh.nodes.add_node(3, [1.0, 1.0, 0.0])
mesh.nodes.add_node(4, [0.0, 1.0, 0.0])
field.meshed_region = mesh
mesh_to_check_2 = field.meshed_region
assert mesh_to_check_2.nodes.n_nodes == 4
assert mesh_to_check_2.elements.n_elements == 0
def test_local_field_append():
num_entities = 400
field_to_local = dpf.core.fields_factory.create_3d_vector_field(num_entities)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append([0.1 * i, 0.2 * i, 0.3 * i], i)
assert f._is_set == True
field = dpf.core.fields_factory.create_3d_vector_field(num_entities)
for i in range(1, num_entities + 1):
field.append([0.1 * i, 0.2 * i, 0.3 * i], i)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == 0
def test_local_elemental_nodal_field_append():
num_entities = 100
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]], i)
field = dpf.core.fields_factory.create_3d_vector_field(num_entities)
for i in range(1, num_entities + 1):
field.append([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]], i)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == num_entities
# flat data
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append([0.1 * i, 0.2 * i, 0.3 * i, 0.1 * i, 0.2 * i, 0.3 * i], i)
assert f._is_set == True
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == num_entities
def test_local_array_field_append():
num_entities = 400
field_to_local = dpf.core.fields_factory.create_3d_vector_field(num_entities)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(np.array([0.1 * i, 0.2 * i, 0.3 * i]), i)
assert f._is_set is True
field = dpf.core.fields_factory.create_3d_vector_field(num_entities)
for i in range(1, num_entities + 1):
field.append(np.array([0.1 * i, 0.2 * i, 0.3 * i]), i)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == 0
def test_local_elemental_nodal_array_field_append():
num_entities = 100
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(
np.array([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]]), i
)
field = dpf.core.fields_factory.create_3d_vector_field(num_entities)
for i in range(1, num_entities + 1):
field.append(
np.array([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]]), i
)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == num_entities
# flat data
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(
np.array([0.1 * i, 0.2 * i, 0.3 * i, 0.1 * i, 0.2 * i, 0.3 * i]), i
)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == num_entities
def test_local_get_entity_data():
num_entities = 100
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(np.array([[0.1 * i, 0.2 * i, 0.3 * i]]), i)
assert np.allclose(f.get_entity_data(i - 1), [[0.1 * i, 0.2 * i, 0.3 * i]])
assert np.allclose(
f.get_entity_data_by_id(i), [[0.1 * i, 0.2 * i, 0.3 * i]]
)
assert hasattr(f, "_is_set") is True
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
assert np.allclose(f.get_entity_data(i - 1), [[0.1 * i, 0.2 * i, 0.3 * i]])
assert np.allclose(
f.get_entity_data_by_id(i), [[0.1 * i, 0.2 * i, 0.3 * i]]
)
assert hasattr(f, "_is_set") is False
def test_local_elemental_nodal_get_entity_data():
num_entities = 100
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(
np.array([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]]), i
)
assert np.allclose(
f.get_entity_data(i - 1),
[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]],
)
assert np.allclose(
f.get_entity_data_by_id(i),
[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]],
)
assert hasattr(f, "_is_set") is True
assert f._is_set is True
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
assert np.allclose(
f.get_entity_data(i - 1),
[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]],
)
assert np.allclose(
f.get_entity_data_by_id(i),
[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]],
)
assert hasattr(f, "_is_set") is False
def test_auto_delete_field_local():
num_entities = 1
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
field_to_local.append([3.0, 4.0, 5.0], 1)
fc = dpf.core.fields_container_factory.over_time_freq_fields_container(
[field_to_local]
)
field_to_local = None
with fc[0].as_local_field() as f:
assert np.allclose(f.get_entity_data(0), [3.0, 4.0, 5.0])
def test_auto_delete_field_local2():
num_entities = 1
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
f = field_to_local.as_local_field()
f.append([3.0, 4.0, 5.0], 1)
del f
with field_to_local.as_local_field() as f:
assert np.allclose(f.get_entity_data(0), [3.0, 4.0, 5.0])
def test_get_set_data_local_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
2, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
f.data = [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
assert np.allclose(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
with field_to_local.as_local_field() as f:
f.data = [0.1, 0.2, 0.3, 0.1, 0.2, 0.3]
assert np.allclose(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
with field_to_local.as_local_field() as f:
f.data = np.array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
def test_get_set_data_elemental_nodal_local_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
2, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
f.data = [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
f._data_pointer = [0, 6]
f.scoping_ids = [1, 2]
assert np.allclose(
f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
assert np.allclose(f._data_pointer, [0, 6])
assert np.allclose(f.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]])
assert hasattr(f, "_is_set") is True
assert f._is_set is True
assert np.allclose(
field_to_local.data,
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]],
)
assert np.allclose(field_to_local._data_pointer, [0, 6])
assert np.allclose(
field_to_local.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
)
assert np.allclose(
field_to_local.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
with field_to_local.as_local_field() as f:
f.data = [0.1, 0.2, 0.3, 0.1, 0.2, 0.3, 0.1, 0.2, 0.3, 0.1, 0.2, 0.4]
f._data_pointer = [0, 6]
f.scoping_ids = [1, 2]
assert np.allclose(
f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
assert np.allclose(f._data_pointer, [0, 6])
assert np.allclose(f.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]])
assert hasattr(f, "_is_set") is True
assert f._is_set is True
assert np.allclose(
field_to_local.data,
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]],
)
assert np.allclose(field_to_local._data_pointer, [0, 6])
assert np.allclose(
field_to_local.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
)
assert np.allclose(
field_to_local.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
with field_to_local.as_local_field() as f:
f.data = np.array(
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
f._data_pointer = [0, 6]
f.scoping_ids = [1, 2]
assert np.allclose(
f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
assert np.allclose(f._data_pointer, [0, 6])
assert np.allclose(f.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]])
assert hasattr(f, "_is_set") is True
assert f._is_set is True
assert np.allclose(
field_to_local.data,
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]],
)
assert np.allclose(field_to_local._data_pointer, [0, 6])
assert np.allclose(
field_to_local.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
)
assert np.allclose(
field_to_local.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
def test_get_set_scoping_local_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
2, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
f.data = [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
f.scoping = dpf.core.Scoping(ids=[3, 4])
assert np.allclose(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.scoping_ids, [3, 4])
assert np.allclose(f.scoping.ids, [3, 4])
assert np.allclose(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(field_to_local.scoping.ids, [3, 4])
def test_empty_data_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(100)
data = [1.0, 2.0, 3.0]
field_to_local.data = data
assert np.allclose(field_to_local.data, data)
field_to_local.data = []
assert len(field_to_local.data) == 0
def test_set_data_numpy_array_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(100)
arr = np.arange(300).reshape(100, 3)
field_to_local.data = arr
assert np.allclose(field_to_local.data, arr)
def test_field_huge_amount_of_data(allkindofcomplexity):
# set data with a field created from a model
model = dpf.core.Model(allkindofcomplexity)
field = model.results.displacement().outputs.fields_container()[0]
data = field.data
assert len(data) == 15113
field.data = data
new_data = field.data
assert np.allclose(data, new_data)
modif_data = data
modif_data[245] = 45
modif_data[1129] = 69
modif_data[7209] = 2086
modif_data[9046] = 12
modif_data[12897] = 7894
modif_data[15112] = 2789
field.data = modif_data
new_modif_data = field.data
assert np.allclose(new_modif_data, modif_data)
# set data with a field created from scratch
field = dpf.core.Field(nature=dpf.core.natures.scalar)
data = range(1, 1000000)
field.data = data
data_check = field.data
assert np.allclose(data_check, data)
modif_data = data_check
modif_data[245] = 45
modif_data[10046] = 69
modif_data[1999] = 2086
modif_data[50067] = 12
modif_data[999345] = 7894
modif_data[506734] = 2789
modif_data = modif_data.tolist()
field.data = modif_data
new_modif_data = field.data
assert np.allclose(new_modif_data, modif_data)
def test_deep_copy_field():
field = dpf.core.fields_factory.create_3d_vector_field(100)
arr = np.arange(300).reshape(100, 3)
field.data = arr
copy = field.deep_copy()
iden = dpf.core.operators.logic.identical_fields(field, copy)
assert iden.outputs.boolean()
assert field.unit == copy.unit
def test_deep_copy_elemental_nodal_field(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
field = stress.outputs.fields_container()[0]
copy = field.deep_copy()
iden = dpf.core.operators.logic.identical_fields(field, copy)
try:
assert iden.outputs.boolean()
except AssertionError as e:
print(iden.outputs.message())
raise e
mesh = field.meshed_region
copy = copy.meshed_region
assert copy.nodes.scoping.ids == mesh.nodes.scoping.ids
assert copy.elements.scoping.ids == mesh.elements.scoping.ids
assert copy.unit == mesh.unit
assert np.allclose(
copy.nodes.coordinates_field.data, mesh.nodes.coordinates_field.data
)
assert np.allclose(
copy.elements.element_types_field.data, mesh.elements.element_types_field.data
)
assert np.allclose(
copy.elements.connectivities_field.data, mesh.elements.connectivities_field.data
)
assert np.allclose(
copy.nodes.coordinates_field.scoping.ids,
mesh.nodes.coordinates_field.scoping.ids,
)
assert np.allclose(
copy.elements.element_types_field.scoping.ids,
mesh.elements.element_types_field.scoping.ids,
)
assert np.allclose(
copy.elements.connectivities_field.scoping.ids,
mesh.elements.connectivities_field.scoping.ids,
)
def test_deep_copy_over_time_field(velocity_acceleration):
model = dpf.core.Model(velocity_acceleration)
stress = model.results.stress(time_scoping=[1, 2, 3])
min_max = dpf.core.operators.min_max.min_max_fc(stress)
field = min_max.outputs.field_max()
copy = field.deep_copy()
iden = dpf.core.operators.logic.identical_fields(field, copy)
assert iden.outputs.boolean()
tf = field.time_freq_support
copy = copy.time_freq_support
assert np.allclose(tf.time_frequencies.data, copy.time_frequencies.data)
assert tf.time_frequencies.scoping.ids == copy.time_frequencies.scoping.ids
def test_deep_copy_spec_ncomp_field():
field = dpf.core.fields_factory.create_vector_field(100, 6, dpf.core.locations.elemental)
arr = np.arange(600).reshape(100, 6)
field.data = arr
copy = field.deep_copy()
assert copy.component_count == 6
assert copy.location == dpf.core.locations.elemental
def test_add_operator_field():
field = dpf.core.fields_factory.create_3d_vector_field(2)
field.data = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
field.scoping.ids = [1, 2]
# field+op
forward = ops.utility.forward_field(field)
add = field + forward
assert isinstance(add, ops.math.add)
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array(field.data) * 2.0)
# field + list
add = field + [0.0, 1.0, 2.0]
assert isinstance(add, ops.math.add)
out = add.outputs.field()
assert len(out) == 6
assert out.scoping.ids == [1, 2]
assert np.allclose(
out.data, field.data + np.array([[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]])
)
# field + float
add = field + 1.0
assert isinstance(add, ops.math.add)
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))
def test_minus_operator_field():
field = dpf.core.fields_factory.create_3d_vector_field(2)
field.data = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
field.scoping.ids = [1, 2]
# field-op
forward = ops.utility.forward_field(field)
add = field - forward
assert type(add) == ops.math.minus
out = add.outputs.field()
assert len(out) == 6
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.zeros((2, 3)))
# fc - list
add = field - [0.0, 1.0, 2.0]
assert type(add) == ops.math.minus
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([[0.0, 0.0, 0.0], [3.0, 3.0, 3.0]]))
# operator - float
add = field - 1.0
assert type(add) == ops.math.minus
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([[-1.0, 0.0, 1.0], [2.0, 3.0, 4.0]]))
def test_dot_operator_field():
field = dpf.core.fields_factory.create_3d_vector_field(2)
field.data = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
field.scoping.ids = [1, 2]
# field * op
forward = ops.utility.forward_field(field)
add = field * forward
assert type(add) == ops.math.generalized_inner_product
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([5.0, 50.0]))
# field * field
add = field * field
assert type(add) == ops.math.generalized_inner_product
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([5.0, 50.0]))
# field * list
add = field * [0.0, 1.0, 2.0]
assert type(add) == ops.math.generalized_inner_product
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([5.0, 14.0]))
# field * float
add = field * -1.0
assert type(add) == ops.math.generalized_inner_product
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, -field.data)
if __name__ == "__main__":
test_get_set_data_local_field()
| [
"ansys.dpf.core.Model",
"ansys.dpf.core.Field",
"ansys.dpf.core.DataSources",
"numpy.array",
"pytest.fixture",
"ansys.dpf.core.fields_factory.create_3d_vector_field",
"ansys.dpf.core.Operator",
"numpy.arange",
"ansys.dpf.core.Dimensionality.scalar_dim",
"ansys.dpf.core.operators.logic.identical_fi... | [((230, 246), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (244, 246), False, 'import pytest\n'), ((298, 333), 'ansys.dpf.core.Model', 'dpf.core.Model', (['allkindofcomplexity'], {}), '(allkindofcomplexity)\n', (312, 333), False, 'from ansys import dpf\n'), ((457, 473), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {}), '()\n', (471, 473), False, 'from ansys import dpf\n'), ((565, 585), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (581, 585), True, 'import numpy as np\n'), ((600, 631), 'ansys.dpf.core.field_from_array', 'dpf.core.field_from_array', (['data'], {}), '(data)\n', (625, 631), False, 'from ansys import dpf\n'), ((643, 674), 'numpy.allclose', 'np.allclose', (['field_a.data', 'data'], {}), '(field_a.data, data)\n', (654, 674), True, 'import numpy as np\n'), ((732, 757), 'numpy.random.random', 'np.random.random', (['(10, 3)'], {}), '((10, 3))\n', (748, 757), True, 'import numpy as np\n'), ((772, 803), 'ansys.dpf.core.field_from_array', 'dpf.core.field_from_array', (['data'], {}), '(data)\n', (797, 803), False, 'from ansys import dpf\n'), ((815, 846), 'numpy.allclose', 'np.allclose', (['field_a.data', 'data'], {}), '(field_a.data, data)\n', (826, 846), True, 'import numpy as np\n'), ((892, 908), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {}), '()\n', (906, 908), False, 'from ansys import dpf\n'), ((922, 958), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {'field': 'field._message'}), '(field=field._message)\n', (936, 958), False, 'from ansys import dpf\n'), ((1052, 1068), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {}), '()\n', (1066, 1068), False, 'from ansys import dpf\n'), ((1083, 1101), 'ansys.dpf.core.Scoping', 'dpf.core.Scoping', ([], {}), '()\n', (1099, 1101), False, 'from ansys import dpf\n'), ((1266, 1326), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {'nentities': '(20)', 'nature': 'dpf.core.natures.scalar'}), '(nentities=20, nature=dpf.core.natures.scalar)\n', (1280, 1326), False, 'from ansys import dpf\n'), ((1341, 1359), 'ansys.dpf.core.Scoping', 'dpf.core.Scoping', ([], {}), '()\n', (1357, 1359), False, 'from ansys import dpf\n'), ((1554, 1583), 'numpy.allclose', 'np.allclose', (['field.data', 'data'], {}), '(field.data, data)\n', (1565, 1583), True, 'import numpy as np\n'), ((1635, 1695), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {'nentities': '(20)', 'nature': 'dpf.core.natures.vector'}), '(nentities=20, nature=dpf.core.natures.vector)\n', (1649, 1695), False, 'from ansys import dpf\n'), ((1710, 1728), 'ansys.dpf.core.Scoping', 'dpf.core.Scoping', ([], {}), '()\n', (1726, 1728), False, 'from ansys import dpf\n'), ((1913, 1927), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1921, 1927), True, 'import numpy as np\n'), ((2044, 2073), 'numpy.allclose', 'np.allclose', (['field.data', 'data'], {}), '(field.data, data)\n', (2055, 2073), True, 'import numpy as np\n'), ((2118, 2178), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {'nentities': '(20)', 'nature': 'dpf.core.natures.vector'}), '(nentities=20, nature=dpf.core.natures.vector)\n', (2132, 2178), False, 'from ansys import dpf\n'), ((2702, 2762), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {'nentities': '(20)', 'nature': 'dpf.core.natures.vector'}), '(nentities=20, nature=dpf.core.natures.vector)\n', (2716, 2762), False, 'from ansys import dpf\n'), ((3972, 4028), 'ansys.dpf.core.Field', 'core.Field', ([], {'nentities': '(20)', 'nature': 'dpf.core.natures.scalar'}), '(nentities=20, nature=dpf.core.natures.scalar)\n', (3982, 4028), False, 'from ansys.dpf import core\n'), ((4043, 4057), 'ansys.dpf.core.Scoping', 'core.Scoping', ([], {}), '()\n', (4055, 4057), False, 'from ansys.dpf import core\n'), ((4252, 4281), 'numpy.allclose', 'np.allclose', (['field.data', 'data'], {}), '(field.data, data)\n', (4263, 4281), True, 'import numpy as np\n'), ((4320, 4380), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {'nentities': '(20)', 'nature': 'dpf.core.natures.scalar'}), '(nentities=20, nature=dpf.core.natures.scalar)\n', (4334, 4380), False, 'from ansys import dpf\n'), ((4395, 4413), 'ansys.dpf.core.Scoping', 'dpf.core.Scoping', ([], {}), '()\n', (4411, 4413), False, 'from ansys import dpf\n'), ((4747, 4806), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {'nentities': '(1)', 'nature': 'dpf.core.natures.scalar'}), '(nentities=1, nature=dpf.core.natures.scalar)\n', (4761, 4806), False, 'from ansys import dpf\n'), ((4821, 4839), 'ansys.dpf.core.Scoping', 'dpf.core.Scoping', ([], {}), '()\n', (4837, 4839), False, 'from ansys import dpf\n'), ((5200, 5218), 'numpy.empty', 'np.empty', (['(100, 6)'], {}), '((100, 6))\n', (5208, 5218), True, 'import numpy as np\n'), ((5227, 5258), 'ansys.dpf.core.field_from_array', 'dpf.core.field_from_array', (['data'], {}), '(data)\n', (5252, 5258), False, 'from ansys import dpf\n'), ((5363, 5385), 'ansys.dpf.core.DataSources', 'dpf.core.DataSources', ([], {}), '()\n', (5383, 5385), False, 'from ansys import dpf\n'), ((5452, 5474), 'ansys.dpf.core.Operator', 'dpf.core.Operator', (['"""U"""'], {}), "('U')\n", (5469, 5474), False, 'from ansys import dpf\n'), ((5739, 5761), 'ansys.dpf.core.DataSources', 'dpf.core.DataSources', ([], {}), '()\n', (5759, 5761), False, 'from ansys import dpf\n'), ((5828, 5850), 'ansys.dpf.core.Operator', 'dpf.core.Operator', (['"""U"""'], {}), "('U')\n", (5845, 5850), False, 'from ansys import dpf\n'), ((6477, 6513), 'ansys.dpf.core.Dimensionality.scalar_dim', 'dpf.core.Dimensionality.scalar_dim', ([], {}), '()\n', (6511, 6513), False, 'from ansys import dpf\n'), ((6660, 6696), 'ansys.dpf.core.Dimensionality.tensor_dim', 'dpf.core.Dimensionality.tensor_dim', ([], {}), '()\n', (6694, 6696), False, 'from ansys import dpf\n'), ((6849, 6888), 'ansys.dpf.core.Dimensionality.vector_3d_dim', 'dpf.core.Dimensionality.vector_3d_dim', ([], {}), '()\n', (6886, 6888), False, 'from ansys import dpf\n'), ((7035, 7072), 'ansys.dpf.core.Dimensionality.vector_dim', 'dpf.core.Dimensionality.vector_dim', (['(4)'], {}), '(4)\n', (7069, 7072), False, 'from ansys import dpf\n'), ((7391, 7413), 'ansys.dpf.core.DataSources', 'dpf.core.DataSources', ([], {}), '()\n', (7411, 7413), False, 'from ansys import dpf\n'), ((7480, 7502), 'ansys.dpf.core.Operator', 'dpf.core.Operator', (['"""U"""'], {}), "('U')\n", (7497, 7502), False, 'from ansys import dpf\n'), ((7755, 7791), 'ansys.dpf.core.Dimensionality.scalar_dim', 'dpf.core.Dimensionality.scalar_dim', ([], {}), '()\n', (7789, 7791), False, 'from ansys import dpf\n'), ((8515, 8537), 'ansys.dpf.core.DataSources', 'dpf.core.DataSources', ([], {}), '()\n', (8535, 8537), False, 'from ansys import dpf\n'), ((8604, 8626), 'ansys.dpf.core.Operator', 'dpf.core.Operator', (['"""U"""'], {}), "('U')\n", (8621, 8626), False, 'from ansys import dpf\n'), ((8824, 8860), 'ansys.dpf.core.Dimensionality.scalar_dim', 'dpf.core.Dimensionality.scalar_dim', ([], {}), '()\n', (8858, 8860), False, 'from ansys import dpf\n'), ((9515, 9579), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {'nentities': '(1)', 'location': '"""overall"""', 'nature': '"""vector"""'}), "(nentities=1, location='overall', nature='vector')\n", (9529, 9579), False, 'from ansys import dpf\n'), ((9717, 9762), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {'nentities': '(5)', 'location': '"""nodal"""'}), "(nentities=5, location='nodal')\n", (9731, 9762), False, 'from ansys import dpf\n'), ((9918, 9942), 'ansys.dpf.core.Operator', 'dpf.core.Operator', (['"""add"""'], {}), "('add')\n", (9935, 9942), False, 'from ansys import dpf\n'), ((10265, 10287), 'ansys.dpf.core.DataSources', 'dpf.core.DataSources', ([], {}), '()\n', (10285, 10287), False, 'from ansys import dpf\n'), ((10354, 10376), 'ansys.dpf.core.Operator', 'dpf.core.Operator', (['"""S"""'], {}), "('S')\n", (10371, 10376), False, 'from ansys import dpf\n'), ((10923, 10947), 'ansys.dpf.core.PropertyField', 'dpf.core.PropertyField', ([], {}), '()\n', (10945, 10947), False, 'from ansys import dpf\n'), ((11531, 11566), 'ansys.dpf.core.Model', 'dpf.core.Model', (['allkindofcomplexity'], {}), '(allkindofcomplexity)\n', (11545, 11566), False, 'from ansys import dpf\n'), ((11702, 11817), 'ansys.dpf.core.Field', 'dpf.core.Field', (['f.scoping.size'], {'nature': 'dpf.core.natures.symmatrix', 'location': 'dpf.core.locations.elemental_nodal'}), '(f.scoping.size, nature=dpf.core.natures.symmatrix, location=\n dpf.core.locations.elemental_nodal)\n', (11716, 11817), False, 'from ansys import dpf\n'), ((12796, 12831), 'ansys.dpf.core.Model', 'dpf.core.Model', (['allkindofcomplexity'], {}), '(allkindofcomplexity)\n', (12810, 12831), False, 'from ansys import dpf\n'), ((12980, 13015), 'ansys.dpf.core.Model', 'dpf.core.Model', (['allkindofcomplexity'], {}), '(allkindofcomplexity)\n', (12994, 13015), False, 'from ansys import dpf\n'), ((13220, 13257), 'ansys.dpf.core.Model', 'dpf.core.Model', (['velocity_acceleration'], {}), '(velocity_acceleration)\n', (13234, 13257), False, 'from ansys import dpf\n'), ((13461, 13496), 'ansys.dpf.core.Model', 'dpf.core.Model', (['allkindofcomplexity'], {}), '(allkindofcomplexity)\n', (13475, 13496), False, 'from ansys import dpf\n'), ((13740, 13756), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {}), '()\n', (13754, 13756), False, 'from ansys import dpf\n'), ((13770, 13797), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {'field': 'field'}), '(field=field)\n', (13784, 13797), False, 'from ansys import dpf\n'), ((13936, 13953), 'ansys.dpf.core.FieldDefinition', 'FieldDefinition', ([], {}), '()\n', (13951, 13953), False, 'from ansys.dpf.core import FieldDefinition\n'), ((14205, 14231), 'ansys.dpf.core.TimeFreqSupport', 'dpf.core.TimeFreqSupport', ([], {}), '()\n', (14229, 14231), False, 'from ansys import dpf\n'), ((14255, 14345), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {'nature': 'dpf.core.natures.scalar', 'location': 'dpf.core.locations.time_freq'}), '(nature=dpf.core.natures.scalar, location=dpf.core.locations.\n time_freq)\n', (14269, 14345), False, 'from ansys import dpf\n'), ((14535, 14561), 'ansys.dpf.core.Model', 'dpf.core.Model', (['simple_bar'], {}), '(simple_bar)\n', (14549, 14561), False, 'from ansys import dpf\n'), ((14835, 14909), 'numpy.allclose', 'np.allclose', (['tfq.time_frequencies.data', 'tfq_to_check.time_frequencies.data'], {}), '(tfq.time_frequencies.data, tfq_to_check.time_frequencies.data)\n', (14846, 14909), True, 'import numpy as np\n'), ((14962, 14985), 'ansys.dpf.core.MeshedRegion', 'dpf.core.MeshedRegion', ([], {}), '()\n', (14983, 14985), False, 'from ansys import dpf\n'), ((15043, 15069), 'ansys.dpf.core.Model', 'dpf.core.Model', (['simple_bar'], {}), '(simple_bar)\n', (15057, 15069), False, 'from ansys import dpf\n'), ((15716, 15776), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {}), '(num_entities)\n', (15762, 15776), False, 'from ansys import dpf\n'), ((15967, 16027), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {}), '(num_entities)\n', (16013, 16027), False, 'from ansys import dpf\n'), ((16134, 16178), 'numpy.allclose', 'np.allclose', (['field.data', 'field_to_local.data'], {}), '(field.data, field_to_local.data)\n', (16145, 16178), True, 'import numpy as np\n'), ((16190, 16248), 'numpy.allclose', 'np.allclose', (['field.scoping.ids', 'field_to_local.scoping.ids'], {}), '(field.scoping.ids, field_to_local.scoping.ids)\n', (16201, 16248), True, 'import numpy as np\n'), ((16392, 16502), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {'location': 'dpf.core.locations.elemental_nodal'}), '(num_entities, location=dpf.\n core.locations.elemental_nodal)\n', (16438, 16502), False, 'from ansys import dpf\n'), ((16700, 16760), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {}), '(num_entities)\n', (16746, 16760), False, 'from ansys import dpf\n'), ((16898, 16942), 'numpy.allclose', 'np.allclose', (['field.data', 'field_to_local.data'], {}), '(field.data, field_to_local.data)\n', (16909, 16942), True, 'import numpy as np\n'), ((16954, 17012), 'numpy.allclose', 'np.allclose', (['field.scoping.ids', 'field_to_local.scoping.ids'], {}), '(field.scoping.ids, field_to_local.scoping.ids)\n', (16965, 17012), True, 'import numpy as np\n'), ((17112, 17222), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {'location': 'dpf.core.locations.elemental_nodal'}), '(num_entities, location=dpf.\n core.locations.elemental_nodal)\n', (17158, 17222), False, 'from ansys import dpf\n'), ((17448, 17492), 'numpy.allclose', 'np.allclose', (['field.data', 'field_to_local.data'], {}), '(field.data, field_to_local.data)\n', (17459, 17492), True, 'import numpy as np\n'), ((17504, 17562), 'numpy.allclose', 'np.allclose', (['field.scoping.ids', 'field_to_local.scoping.ids'], {}), '(field.scoping.ids, field_to_local.scoping.ids)\n', (17515, 17562), True, 'import numpy as np\n'), ((17707, 17767), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {}), '(num_entities)\n', (17753, 17767), False, 'from ansys import dpf\n'), ((17968, 18028), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {}), '(num_entities)\n', (18014, 18028), False, 'from ansys import dpf\n'), ((18145, 18189), 'numpy.allclose', 'np.allclose', (['field.data', 'field_to_local.data'], {}), '(field.data, field_to_local.data)\n', (18156, 18189), True, 'import numpy as np\n'), ((18201, 18259), 'numpy.allclose', 'np.allclose', (['field.scoping.ids', 'field_to_local.scoping.ids'], {}), '(field.scoping.ids, field_to_local.scoping.ids)\n', (18212, 18259), True, 'import numpy as np\n'), ((18409, 18519), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {'location': 'dpf.core.locations.elemental_nodal'}), '(num_entities, location=dpf.\n core.locations.elemental_nodal)\n', (18455, 18519), False, 'from ansys import dpf\n'), ((18757, 18817), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {}), '(num_entities)\n', (18803, 18817), False, 'from ansys import dpf\n'), ((18987, 19031), 'numpy.allclose', 'np.allclose', (['field.data', 'field_to_local.data'], {}), '(field.data, field_to_local.data)\n', (18998, 19031), True, 'import numpy as np\n'), ((19043, 19101), 'numpy.allclose', 'np.allclose', (['field.scoping.ids', 'field_to_local.scoping.ids'], {}), '(field.scoping.ids, field_to_local.scoping.ids)\n', (19054, 19101), True, 'import numpy as np\n'), ((19201, 19311), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {'location': 'dpf.core.locations.elemental_nodal'}), '(num_entities, location=dpf.\n core.locations.elemental_nodal)\n', (19247, 19311), False, 'from ansys import dpf\n'), ((19545, 19589), 'numpy.allclose', 'np.allclose', (['field.data', 'field_to_local.data'], {}), '(field.data, field_to_local.data)\n', (19556, 19589), True, 'import numpy as np\n'), ((19601, 19659), 'numpy.allclose', 'np.allclose', (['field.scoping.ids', 'field_to_local.scoping.ids'], {}), '(field.scoping.ids, field_to_local.scoping.ids)\n', (19612, 19659), True, 'import numpy as np\n'), ((19801, 19911), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {'location': 'dpf.core.locations.elemental_nodal'}), '(num_entities, location=dpf.\n core.locations.elemental_nodal)\n', (19847, 19911), False, 'from ansys import dpf\n'), ((20775, 20885), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {'location': 'dpf.core.locations.elemental_nodal'}), '(num_entities, location=dpf.\n core.locations.elemental_nodal)\n', (20821, 20885), False, 'from ansys import dpf\n'), ((22068, 22178), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {'location': 'dpf.core.locations.elemental_nodal'}), '(num_entities, location=dpf.\n core.locations.elemental_nodal)\n', (22114, 22178), False, 'from ansys import dpf\n'), ((22243, 22331), 'ansys.dpf.core.fields_container_factory.over_time_freq_fields_container', 'dpf.core.fields_container_factory.over_time_freq_fields_container', (['[field_to_local]'], {}), '([\n field_to_local])\n', (22308, 22331), False, 'from ansys import dpf\n'), ((22552, 22662), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['num_entities'], {'location': 'dpf.core.locations.elemental_nodal'}), '(num_entities, location=dpf.\n core.locations.elemental_nodal)\n', (22598, 22662), False, 'from ansys import dpf\n'), ((22928, 23027), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['(2)'], {'location': 'dpf.core.locations.elemental_nodal'}), '(2, location=dpf.core.\n locations.elemental_nodal)\n', (22974, 23027), False, 'from ansys import dpf\n'), ((23219, 23287), 'numpy.allclose', 'np.allclose', (['field_to_local.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]'], {}), '(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])\n', (23230, 23287), True, 'import numpy as np\n'), ((23466, 23534), 'numpy.allclose', 'np.allclose', (['field_to_local.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]'], {}), '(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])\n', (23477, 23534), True, 'import numpy as np\n'), ((23727, 23795), 'numpy.allclose', 'np.allclose', (['field_to_local.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]'], {}), '(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])\n', (23738, 23795), True, 'import numpy as np\n'), ((23872, 23971), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['(2)'], {'location': 'dpf.core.locations.elemental_nodal'}), '(2, location=dpf.core.\n locations.elemental_nodal)\n', (23918, 23971), False, 'from ansys import dpf\n'), ((24617, 24724), 'numpy.allclose', 'np.allclose', (['field_to_local.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]'], {}), '(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, \n 0.2, 0.3], [0.1, 0.2, 0.4]])\n', (24628, 24724), True, 'import numpy as np\n'), ((24754, 24803), 'numpy.allclose', 'np.allclose', (['field_to_local._data_pointer', '[0, 6]'], {}), '(field_to_local._data_pointer, [0, 6])\n', (24765, 24803), True, 'import numpy as np\n'), ((25649, 25756), 'numpy.allclose', 'np.allclose', (['field_to_local.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]'], {}), '(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, \n 0.2, 0.3], [0.1, 0.2, 0.4]])\n', (25660, 25756), True, 'import numpy as np\n'), ((25786, 25835), 'numpy.allclose', 'np.allclose', (['field_to_local._data_pointer', '[0, 6]'], {}), '(field_to_local._data_pointer, [0, 6])\n', (25797, 25835), True, 'import numpy as np\n'), ((26721, 26828), 'numpy.allclose', 'np.allclose', (['field_to_local.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]'], {}), '(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, \n 0.2, 0.3], [0.1, 0.2, 0.4]])\n', (26732, 26828), True, 'import numpy as np\n'), ((26858, 26907), 'numpy.allclose', 'np.allclose', (['field_to_local._data_pointer', '[0, 6]'], {}), '(field_to_local._data_pointer, [0, 6])\n', (26869, 26907), True, 'import numpy as np\n'), ((27187, 27286), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['(2)'], {'location': 'dpf.core.locations.elemental_nodal'}), '(2, location=dpf.core.\n locations.elemental_nodal)\n', (27233, 27286), False, 'from ansys import dpf\n'), ((27626, 27694), 'numpy.allclose', 'np.allclose', (['field_to_local.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]'], {}), '(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])\n', (27637, 27694), True, 'import numpy as np\n'), ((27706, 27753), 'numpy.allclose', 'np.allclose', (['field_to_local.scoping.ids', '[3, 4]'], {}), '(field_to_local.scoping.ids, [3, 4])\n', (27717, 27753), True, 'import numpy as np\n'), ((27806, 27857), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['(100)'], {}), '(100)\n', (27852, 27857), False, 'from ansys import dpf\n'), ((27927, 27965), 'numpy.allclose', 'np.allclose', (['field_to_local.data', 'data'], {}), '(field_to_local.data, data)\n', (27938, 27965), True, 'import numpy as np\n'), ((28098, 28149), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['(100)'], {}), '(100)\n', (28144, 28149), False, 'from ansys import dpf\n'), ((28232, 28269), 'numpy.allclose', 'np.allclose', (['field_to_local.data', 'arr'], {}), '(field_to_local.data, arr)\n', (28243, 28269), True, 'import numpy as np\n'), ((28390, 28425), 'ansys.dpf.core.Model', 'dpf.core.Model', (['allkindofcomplexity'], {}), '(allkindofcomplexity)\n', (28404, 28425), False, 'from ansys import dpf\n'), ((28608, 28635), 'numpy.allclose', 'np.allclose', (['data', 'new_data'], {}), '(data, new_data)\n', (28619, 28635), True, 'import numpy as np\n'), ((28892, 28931), 'numpy.allclose', 'np.allclose', (['new_modif_data', 'modif_data'], {}), '(new_modif_data, modif_data)\n', (28903, 28931), True, 'import numpy as np\n'), ((28994, 29040), 'ansys.dpf.core.Field', 'dpf.core.Field', ([], {'nature': 'dpf.core.natures.scalar'}), '(nature=dpf.core.natures.scalar)\n', (29008, 29040), False, 'from ansys import dpf\n'), ((29131, 29160), 'numpy.allclose', 'np.allclose', (['data_check', 'data'], {}), '(data_check, data)\n', (29142, 29160), True, 'import numpy as np\n'), ((29464, 29503), 'numpy.allclose', 'np.allclose', (['new_modif_data', 'modif_data'], {}), '(new_modif_data, modif_data)\n', (29475, 29503), True, 'import numpy as np\n'), ((29546, 29597), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['(100)'], {}), '(100)\n', (29592, 29597), False, 'from ansys import dpf\n'), ((29700, 29754), 'ansys.dpf.core.operators.logic.identical_fields', 'dpf.core.operators.logic.identical_fields', (['field', 'copy'], {}), '(field, copy)\n', (29741, 29754), False, 'from ansys import dpf\n'), ((29901, 29936), 'ansys.dpf.core.Model', 'dpf.core.Model', (['allkindofcomplexity'], {}), '(allkindofcomplexity)\n', (29915, 29936), False, 'from ansys import dpf\n'), ((30062, 30116), 'ansys.dpf.core.operators.logic.identical_fields', 'dpf.core.operators.logic.identical_fields', (['field', 'copy'], {}), '(field, copy)\n', (30103, 30116), False, 'from ansys import dpf\n'), ((30484, 30570), 'numpy.allclose', 'np.allclose', (['copy.nodes.coordinates_field.data', 'mesh.nodes.coordinates_field.data'], {}), '(copy.nodes.coordinates_field.data, mesh.nodes.coordinates_field\n .data)\n', (30495, 30570), True, 'import numpy as np\n'), ((30591, 30687), 'numpy.allclose', 'np.allclose', (['copy.elements.element_types_field.data', 'mesh.elements.element_types_field.data'], {}), '(copy.elements.element_types_field.data, mesh.elements.\n element_types_field.data)\n', (30602, 30687), True, 'import numpy as np\n'), ((30708, 30806), 'numpy.allclose', 'np.allclose', (['copy.elements.connectivities_field.data', 'mesh.elements.connectivities_field.data'], {}), '(copy.elements.connectivities_field.data, mesh.elements.\n connectivities_field.data)\n', (30719, 30806), True, 'import numpy as np\n'), ((30828, 30928), 'numpy.allclose', 'np.allclose', (['copy.nodes.coordinates_field.scoping.ids', 'mesh.nodes.coordinates_field.scoping.ids'], {}), '(copy.nodes.coordinates_field.scoping.ids, mesh.nodes.\n coordinates_field.scoping.ids)\n', (30839, 30928), True, 'import numpy as np\n'), ((30958, 31068), 'numpy.allclose', 'np.allclose', (['copy.elements.element_types_field.scoping.ids', 'mesh.elements.element_types_field.scoping.ids'], {}), '(copy.elements.element_types_field.scoping.ids, mesh.elements.\n element_types_field.scoping.ids)\n', (30969, 31068), True, 'import numpy as np\n'), ((31098, 31210), 'numpy.allclose', 'np.allclose', (['copy.elements.connectivities_field.scoping.ids', 'mesh.elements.connectivities_field.scoping.ids'], {}), '(copy.elements.connectivities_field.scoping.ids, mesh.elements.\n connectivities_field.scoping.ids)\n', (31109, 31210), True, 'import numpy as np\n'), ((31302, 31339), 'ansys.dpf.core.Model', 'dpf.core.Model', (['velocity_acceleration'], {}), '(velocity_acceleration)\n', (31316, 31339), False, 'from ansys import dpf\n'), ((31412, 31457), 'ansys.dpf.core.operators.min_max.min_max_fc', 'dpf.core.operators.min_max.min_max_fc', (['stress'], {}), '(stress)\n', (31449, 31457), False, 'from ansys import dpf\n'), ((31538, 31592), 'ansys.dpf.core.operators.logic.identical_fields', 'dpf.core.operators.logic.identical_fields', (['field', 'copy'], {}), '(field, copy)\n', (31579, 31592), False, 'from ansys import dpf\n'), ((31706, 31771), 'numpy.allclose', 'np.allclose', (['tf.time_frequencies.data', 'copy.time_frequencies.data'], {}), '(tf.time_frequencies.data, copy.time_frequencies.data)\n', (31717, 31771), True, 'import numpy as np\n'), ((31905, 31991), 'ansys.dpf.core.fields_factory.create_vector_field', 'dpf.core.fields_factory.create_vector_field', (['(100)', '(6)', 'dpf.core.locations.elemental'], {}), '(100, 6, dpf.core.locations.\n elemental)\n', (31948, 31991), False, 'from ansys import dpf\n'), ((32217, 32266), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['(2)'], {}), '(2)\n', (32263, 32266), False, 'from ansys import dpf\n'), ((32376, 32408), 'ansys.dpf.core.operators.utility.forward_field', 'ops.utility.forward_field', (['field'], {}), '(field)\n', (32401, 32408), True, 'from ansys.dpf.core import operators as ops\n'), ((33174, 33223), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['(2)'], {}), '(2)\n', (33220, 33223), False, 'from ansys import dpf\n'), ((33333, 33365), 'ansys.dpf.core.operators.utility.forward_field', 'ops.utility.forward_field', (['field'], {}), '(field)\n', (33358, 33365), True, 'from ansys.dpf.core import operators as ops\n'), ((34087, 34136), 'ansys.dpf.core.fields_factory.create_3d_vector_field', 'dpf.core.fields_factory.create_3d_vector_field', (['(2)'], {}), '(2)\n', (34133, 34136), False, 'from ansys import dpf\n'), ((34248, 34280), 'ansys.dpf.core.operators.utility.forward_field', 'ops.utility.forward_field', (['field'], {}), '(field)\n', (34273, 34280), True, 'from ansys.dpf.core import operators as ops\n'), ((35133, 35167), 'numpy.allclose', 'np.allclose', (['out.data', '(-field.data)'], {}), '(out.data, -field.data)\n', (35144, 35167), True, 'import numpy as np\n'), ((2615, 2643), 'numpy.allclose', 'np.allclose', (['dataout', 'datain'], {}), '(dataout, datain)\n', (2626, 2643), True, 'import numpy as np\n'), ((2902, 2916), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2910, 2916), True, 'import numpy as np\n'), ((3265, 3293), 'numpy.allclose', 'np.allclose', (['dataout', 'datain'], {}), '(dataout, datain)\n', (3276, 3293), True, 'import numpy as np\n'), ((3366, 3394), 'numpy.allclose', 'np.allclose', (['dataout', 'datain'], {}), '(dataout, datain)\n', (3377, 3394), True, 'import numpy as np\n'), ((10122, 10195), 'numpy.allclose', 'np.allclose', (['data_added[i]', '[i * 3.0 + 1.0, i * 3.0 + 3.0, i * 3.0 + 5.0]'], {}), '(data_added[i], [i * 3.0 + 1.0, i * 3.0 + 3.0, i * 3.0 + 5.0])\n', (10133, 10195), True, 'import numpy as np\n'), ((13821, 13845), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (13834, 13845), False, 'import pytest\n'), ((13995, 14019), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (14008, 14019), False, 'import pytest\n'), ((23151, 23206), 'numpy.allclose', 'np.allclose', (['f.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]'], {}), '(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])\n', (23162, 23206), True, 'import numpy as np\n'), ((23399, 23454), 'numpy.allclose', 'np.allclose', (['f.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]'], {}), '(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])\n', (23410, 23454), True, 'import numpy as np\n'), ((23600, 23644), 'numpy.array', 'np.array', (['[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]'], {}), '([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])\n', (23608, 23644), True, 'import numpy as np\n'), ((23660, 23715), 'numpy.allclose', 'np.allclose', (['f.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]'], {}), '(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])\n', (23671, 23715), True, 'import numpy as np\n'), ((24193, 24287), 'numpy.allclose', 'np.allclose', (['f.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]'], {}), '(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [\n 0.1, 0.2, 0.4]])\n', (24204, 24287), True, 'import numpy as np\n'), ((24320, 24356), 'numpy.allclose', 'np.allclose', (['f._data_pointer', '[0, 6]'], {}), '(f._data_pointer, [0, 6])\n', (24331, 24356), True, 'import numpy as np\n'), ((25225, 25319), 'numpy.allclose', 'np.allclose', (['f.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]'], {}), '(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [\n 0.1, 0.2, 0.4]])\n', (25236, 25319), True, 'import numpy as np\n'), ((25352, 25388), 'numpy.allclose', 'np.allclose', (['f._data_pointer', '[0, 6]'], {}), '(f._data_pointer, [0, 6])\n', (25363, 25388), True, 'import numpy as np\n'), ((26117, 26195), 'numpy.array', 'np.array', (['[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]'], {}), '([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]])\n', (26125, 26195), True, 'import numpy as np\n'), ((26297, 26391), 'numpy.allclose', 'np.allclose', (['f.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]'], {}), '(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [\n 0.1, 0.2, 0.4]])\n', (26308, 26391), True, 'import numpy as np\n'), ((26424, 26460), 'numpy.allclose', 'np.allclose', (['f._data_pointer', '[0, 6]'], {}), '(f._data_pointer, [0, 6])\n', (26435, 26460), True, 'import numpy as np\n'), ((27415, 27443), 'ansys.dpf.core.Scoping', 'dpf.core.Scoping', ([], {'ids': '[3, 4]'}), '(ids=[3, 4])\n', (27431, 27443), False, 'from ansys import dpf\n'), ((27459, 27514), 'numpy.allclose', 'np.allclose', (['f.data', '[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]'], {}), '(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])\n', (27470, 27514), True, 'import numpy as np\n'), ((27530, 27564), 'numpy.allclose', 'np.allclose', (['f.scoping_ids', '[3, 4]'], {}), '(f.scoping_ids, [3, 4])\n', (27541, 27564), True, 'import numpy as np\n'), ((27580, 27614), 'numpy.allclose', 'np.allclose', (['f.scoping.ids', '[3, 4]'], {}), '(f.scoping.ids, [3, 4])\n', (27591, 27614), True, 'import numpy as np\n'), ((33081, 33125), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]'], {}), '([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n', (33089, 33125), True, 'import numpy as np\n'), ((33556, 33572), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (33564, 33572), True, 'import numpy as np\n'), ((33764, 33808), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [3.0, 3.0, 3.0]]'], {}), '([[0.0, 0.0, 0.0], [3.0, 3.0, 3.0]])\n', (33772, 33808), True, 'import numpy as np\n'), ((33995, 34040), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 1.0], [2.0, 3.0, 4.0]]'], {}), '([[-1.0, 0.0, 1.0], [2.0, 3.0, 4.0]])\n', (34003, 34040), True, 'import numpy as np\n'), ((34466, 34487), 'numpy.array', 'np.array', (['[5.0, 50.0]'], {}), '([5.0, 50.0])\n', (34474, 34487), True, 'import numpy as np\n'), ((34693, 34714), 'numpy.array', 'np.array', (['[5.0, 50.0]'], {}), '([5.0, 50.0])\n', (34701, 34714), True, 'import numpy as np\n'), ((34929, 34950), 'numpy.array', 'np.array', (['[5.0, 14.0]'], {}), '([5.0, 14.0])\n', (34937, 34950), True, 'import numpy as np\n'), ((18091, 18128), 'numpy.array', 'np.array', (['[0.1 * i, 0.2 * i, 0.3 * i]'], {}), '([0.1 * i, 0.2 * i, 0.3 * i])\n', (18099, 18128), True, 'import numpy as np\n'), ((18893, 18961), 'numpy.array', 'np.array', (['[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]]'], {}), '([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]])\n', (18901, 18961), True, 'import numpy as np\n'), ((28160, 28174), 'numpy.arange', 'np.arange', (['(300)'], {}), '(300)\n', (28169, 28174), True, 'import numpy as np\n'), ((29608, 29622), 'numpy.arange', 'np.arange', (['(300)'], {}), '(300)\n', (29617, 29622), True, 'import numpy as np\n'), ((31997, 32011), 'numpy.arange', 'np.arange', (['(600)'], {}), '(600)\n', (32006, 32011), True, 'import numpy as np\n'), ((32576, 32596), 'numpy.array', 'np.array', (['field.data'], {}), '(field.data)\n', (32584, 32596), True, 'import numpy as np\n'), ((32846, 32890), 'numpy.array', 'np.array', (['[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]'], {}), '([[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]])\n', (32854, 32890), True, 'import numpy as np\n'), ((17881, 17918), 'numpy.array', 'np.array', (['[0.1 * i, 0.2 * i, 0.3 * i]'], {}), '([0.1 * i, 0.2 * i, 0.3 * i])\n', (17889, 17918), True, 'import numpy as np\n'), ((18659, 18727), 'numpy.array', 'np.array', (['[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]]'], {}), '([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]])\n', (18667, 18727), True, 'import numpy as np\n'), ((19451, 19515), 'numpy.array', 'np.array', (['[0.1 * i, 0.2 * i, 0.3 * i, 0.1 * i, 0.2 * i, 0.3 * i]'], {}), '([0.1 * i, 0.2 * i, 0.3 * i, 0.1 * i, 0.2 * i, 0.3 * i])\n', (19459, 19515), True, 'import numpy as np\n'), ((20034, 20073), 'numpy.array', 'np.array', (['[[0.1 * i, 0.2 * i, 0.3 * i]]'], {}), '([[0.1 * i, 0.2 * i, 0.3 * i]])\n', (20042, 20073), True, 'import numpy as np\n'), ((21025, 21093), 'numpy.array', 'np.array', (['[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]]'], {}), '([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]])\n', (21033, 21093), True, 'import numpy as np\n')] |
import sys
import importlib
import argparse
import numpy as np
import random
import cmath
import math
from dft import dft, inv_dft
from fft import fft, inv_fft
from rsa import *
#arguments for dft, fft, inverse dft and inverse fft
parameters1 = []
parameters2 = []
i=4
while i<2048:
param1 = list(np.random.randint(low = 0, high = 1000, size = i))
parameters1.append(param1)
param2 = list(np.random.randint(low = 0, high = 1000, size = i))
parameters2.append(param2)
i=i*2
#arguments for RSA encryption and decryption
VC = [[5,8,12,56], [0j, (2+1.1102230246251565e-16j), (1.4997597826618576e-32-2.4492935982947064e-16j), (2+4.440892098500626e-16j)]]
bits = []
for i in range(7, 11):
bits.append(2**i)
def test_case():
try:
for test in range(len(parameters1)):
VA = dft(parameters1[test])
np.allclose(VA, np.fft.fft(parameters1[test]))
print("Test Case", (test+1), "for the function DFT passed")
except:
print("Test Case", (test+1), "for the function DFT failed")
try:
for test in range(len(parameters1)):
VA = fft(parameters1[test])
np.allclose(VA, np.fft.fft(parameters1[test]))
print("Test Case", (test+1), "for the function FFT passed")
except:
print("Test Case", (test+1), "for the function FFT failed")
def test_case2():
try:
for test in range(len(VC)):
mat = VC[test]
mat2 = rsaHelper(mat)
mat = np.array(mat)
mat2 = np.array(mat2)
np.array_equiv(mat, mat2)
print("Test Case", (test+1), "for RSA encryption and decryption passed")
except:
print("Test Case", (test+1), "for RSA encryption and decryption failed")
if __name__ == "__main__":
test_case()
test_case2()
| [
"numpy.array_equiv",
"numpy.fft.fft",
"numpy.array",
"numpy.random.randint",
"dft.dft",
"fft.fft"
] | [((320, 363), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(1000)', 'size': 'i'}), '(low=0, high=1000, size=i)\n', (337, 363), True, 'import numpy as np\n'), ((422, 465), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(1000)', 'size': 'i'}), '(low=0, high=1000, size=i)\n', (439, 465), True, 'import numpy as np\n'), ((860, 882), 'dft.dft', 'dft', (['parameters1[test]'], {}), '(parameters1[test])\n', (863, 882), False, 'from dft import dft, inv_dft\n'), ((1174, 1196), 'fft.fft', 'fft', (['parameters1[test]'], {}), '(parameters1[test])\n', (1177, 1196), False, 'from fft import fft, inv_fft\n'), ((1578, 1591), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (1586, 1591), True, 'import numpy as np\n'), ((1612, 1626), 'numpy.array', 'np.array', (['mat2'], {}), '(mat2)\n', (1620, 1626), True, 'import numpy as np\n'), ((1640, 1665), 'numpy.array_equiv', 'np.array_equiv', (['mat', 'mat2'], {}), '(mat, mat2)\n', (1654, 1665), True, 'import numpy as np\n'), ((912, 941), 'numpy.fft.fft', 'np.fft.fft', (['parameters1[test]'], {}), '(parameters1[test])\n', (922, 941), True, 'import numpy as np\n'), ((1226, 1255), 'numpy.fft.fft', 'np.fft.fft', (['parameters1[test]'], {}), '(parameters1[test])\n', (1236, 1255), True, 'import numpy as np\n')] |
"""Tests for io.py.
"""
import os
import pytest
import tempfile
import unittest.mock as mock
import numpy as np
import pandas as pd
import cytoxnet.dataprep.io
import cytoxnet.data
def test_load_data():
"""Test the load_data function.
Should be able to find files, and package data. Also dropping nans
if asked.
"""
# test with specified path
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(dir_path, '..', 'data', 'minimum_data_example.csv')
# specify the columns to take, don't drop nans
df = cytoxnet.dataprep.io.load_data(filename, cols=['smiles'])
assert len(df.columns) == 1,\
'Correct no. of columns was not loaded'
assert len(df) == 3,\
'Wrong number of rows loaded'
# and to drop nans
df = cytoxnet.dataprep.io.load_data(
filename, id_cols=['smiles'], nans='drop')
assert len(df) == 2,\
'Nans in the specified id columns were not dropped'
# test with package data
datapath = cytoxnet.data.__path__._path[0]
with mock.patch('os.listdir',
return_value=['myfile1.csv', 'myfile2.csv']
) as mocked_os:
with mock.patch('cytoxnet.dataprep.io.pd') as mocked_pandas:
cytoxnet.dataprep.io.load_data('myfile2')
mocked_pandas.read_csv.assert_called_with(
datapath + '/myfile2.csv',
index_col=0
)
assert mocked_os.called, 'Did not list directory.'
# bad file name
with pytest.raises(FileNotFoundError):
cytoxnet.dataprep.io.load_data('astring')
return
@mock.patch('cytoxnet.dataprep.io.pd')
@mock.patch('cytoxnet.dataprep.io.os')
def test_create_compound_codex(mocked_os, mocked_pandas):
"""Initialization of compounds codex.
Should create empty file at specified location containing the requested
id_col and featurizers.
"""
# no features
mocked_os.path.exists.return_value = False
cytoxnet.dataprep.io.create_compound_codex(db_path='./database',
id_col='smiles')
mocked_os.makedirs.assert_called_with('./database')
mocked_pandas.DataFrame.assert_called_with(
columns=['smiles']
)
mocked_pandas.DataFrame().to_csv.assert_called_with(
'./database/compounds.csv'
)
mocked_os.reset_mock()
# features
mocked_os.path.exists.return_value = True
cytoxnet.dataprep.io.create_compound_codex(
db_path='./database',
id_col='smiles',
featurizers=['CircularFingerprint']
)
assert not mocked_os.makedirs.called,\
"Should not have made a dir"
mocked_pandas.DataFrame.assert_called_with(
columns=['smiles', 'CircularFingerprint']
)
return
def test_add_datasets(tmpdir):
"""These should add non duplicate smiles to the compounds list.
Added datasets should also be assigned keys and added to the dataset.
"""
with tempfile.TemporaryDirectory() as tempdir:
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(
dir_path, '..', 'data', 'minimum_data_example.csv')
df = cytoxnet.dataprep.io.load_data(filename, id_cols=['smiles'],
nans='drop')
# test new data computing old features
cytoxnet.dataprep.io.create_compound_codex(
db_path=tempdir + '/database',
id_col='smiles',
featurizers=['CircularFingerprint']
)
cytoxnet.dataprep.io.add_datasets([df],
['mydata'],
id_col='smiles',
db_path=tempdir + '/database')
subject = pd.read_csv(tempdir + '/database/compounds.csv', index_col=0)
assert len(subject) == 2,\
'Not all smiles were added'
assert not subject['CircularFingerprint'].isnull().any(),\
'Did not compute features'
subject = pd.read_csv(tempdir + '/database/mydata.csv', index_col=0)
assert np.array_equal(subject['foreign_key'].values, [0, 1]),\
'foreign keys not assigned properly'
# test new data and new feature
filename = os.path.join(
dir_path, '..', 'data', 'minimum_data_example2.csv')
df = cytoxnet.dataprep.io.load_data(filename, id_cols=['smiles'],
nans='drop')
cytoxnet.dataprep.io.add_datasets([df],
['mydata2'],
id_col='smiles',
db_path=tempdir + '/database',
new_featurizers=['RDKitDescriptors'])
# this addition has a duplicate smiles, so should only add 1 to
# compounds
subject = pd.read_csv(tempdir + '/database/compounds.csv', index_col=0)
assert len(subject) == 3,\
'Smiles not properly added - should have only added 1'
assert not subject['RDKitDescriptors'].isnull().any(),\
'Did not compute features'
subject = pd.read_csv(tempdir + '/database/mydata2.csv', index_col=0)
assert np.array_equal(subject['foreign_key'].values, [2, 1]),\
'foreign keys not assigned properly'
# add package data
with mock.patch(
'cytoxnet.dataprep.io.load_data',
return_value=pd.DataFrame({'smiles': ['C', 'O']})
) as mocked_load_data:
cytoxnet.dataprep.io.add_datasets(['lunghini_algea_EC50'],
['mydata3'],
id_col='smiles',
db_path=tempdir + '/database')
assert mocked_load_data.called,\
'Load data was not called for the string'
return
| [
"tempfile.TemporaryDirectory",
"pandas.read_csv",
"os.path.join",
"os.path.realpath",
"pytest.raises",
"numpy.array_equal",
"pandas.DataFrame",
"unittest.mock.patch"
] | [((1641, 1678), 'unittest.mock.patch', 'mock.patch', (['"""cytoxnet.dataprep.io.pd"""'], {}), "('cytoxnet.dataprep.io.pd')\n", (1651, 1678), True, 'import unittest.mock as mock\n'), ((1680, 1717), 'unittest.mock.patch', 'mock.patch', (['"""cytoxnet.dataprep.io.os"""'], {}), "('cytoxnet.dataprep.io.os')\n", (1690, 1717), True, 'import unittest.mock as mock\n'), ((444, 508), 'os.path.join', 'os.path.join', (['dir_path', '""".."""', '"""data"""', '"""minimum_data_example.csv"""'], {}), "(dir_path, '..', 'data', 'minimum_data_example.csv')\n", (456, 508), False, 'import os\n'), ((401, 427), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (417, 427), False, 'import os\n'), ((1061, 1130), 'unittest.mock.patch', 'mock.patch', (['"""os.listdir"""'], {'return_value': "['myfile1.csv', 'myfile2.csv']"}), "('os.listdir', return_value=['myfile1.csv', 'myfile2.csv'])\n", (1071, 1130), True, 'import unittest.mock as mock\n'), ((1542, 1574), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (1555, 1574), False, 'import pytest\n'), ((2993, 3022), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3020, 3022), False, 'import tempfile\n'), ((3117, 3181), 'os.path.join', 'os.path.join', (['dir_path', '""".."""', '"""data"""', '"""minimum_data_example.csv"""'], {}), "(dir_path, '..', 'data', 'minimum_data_example.csv')\n", (3129, 3181), False, 'import os\n'), ((3808, 3869), 'pandas.read_csv', 'pd.read_csv', (["(tempdir + '/database/compounds.csv')"], {'index_col': '(0)'}), "(tempdir + '/database/compounds.csv', index_col=0)\n", (3819, 3869), True, 'import pandas as pd\n'), ((4069, 4127), 'pandas.read_csv', 'pd.read_csv', (["(tempdir + '/database/mydata.csv')"], {'index_col': '(0)'}), "(tempdir + '/database/mydata.csv', index_col=0)\n", (4080, 4127), True, 'import pandas as pd\n'), ((4143, 4196), 'numpy.array_equal', 'np.array_equal', (["subject['foreign_key'].values", '[0, 1]'], {}), "(subject['foreign_key'].values, [0, 1])\n", (4157, 4196), True, 'import numpy as np\n'), ((4308, 4373), 'os.path.join', 'os.path.join', (['dir_path', '""".."""', '"""data"""', '"""minimum_data_example2.csv"""'], {}), "(dir_path, '..', 'data', 'minimum_data_example2.csv')\n", (4320, 4373), False, 'import os\n'), ((4943, 5004), 'pandas.read_csv', 'pd.read_csv', (["(tempdir + '/database/compounds.csv')"], {'index_col': '(0)'}), "(tempdir + '/database/compounds.csv', index_col=0)\n", (4954, 5004), True, 'import pandas as pd\n'), ((5228, 5287), 'pandas.read_csv', 'pd.read_csv', (["(tempdir + '/database/mydata2.csv')"], {'index_col': '(0)'}), "(tempdir + '/database/mydata2.csv', index_col=0)\n", (5239, 5287), True, 'import pandas as pd\n'), ((5303, 5356), 'numpy.array_equal', 'np.array_equal', (["subject['foreign_key'].values", '[2, 1]'], {}), "(subject['foreign_key'].values, [2, 1])\n", (5317, 5356), True, 'import numpy as np\n'), ((1199, 1236), 'unittest.mock.patch', 'mock.patch', (['"""cytoxnet.dataprep.io.pd"""'], {}), "('cytoxnet.dataprep.io.pd')\n", (1209, 1236), True, 'import unittest.mock as mock\n'), ((3070, 3096), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3086, 3096), False, 'import os\n'), ((5532, 5568), 'pandas.DataFrame', 'pd.DataFrame', (["{'smiles': ['C', 'O']}"], {}), "({'smiles': ['C', 'O']})\n", (5544, 5568), True, 'import pandas as pd\n')] |
import datetime as dtm
from profile_plot import profile_plot
import matplotlib.pyplot as plt
from matplotlib.font_manager import fontManager, FontProperties
from matplotlib import ticker, cm
import sys
import pandas as pd
import numpy as np
import os
import re
from dateutil import parser
import errno
from shutil import copyfile
import subprocess
import argparse
import textwrap
RED = (228/256., 26/256., 28/256.)
BLUE = (55/256., 126/256., 184/256.)
plt.style.use(['seaborn-paper','seaborn-colorblind'])
def process_stations(station_file):
sd=pd.read_csv(station_file,names=["id","x","y","dist_km","elev_navd","name","depth_mllw"],header=0,dtype={"id":pd.StringDtype()})
for i in range(sd.shape[0]):
station=sd["id"][i]
if station.endswith(".0"):
station = station[:-2]
sd.at[i,"id"]=station
sd=sd.set_index('id')
sd.dist_km=sd.dist_km/1000.0
return sd
def process_cruise(path):
print ("process_cruise")
cruisefile = open(path,'r')
cruisetxt = cruisefile.readlines()[2:]
cruisefile.close()
cruiselines = [line.strip().split(",") for line in cruisetxt if (line != "\n")]
cruise_data = {}
for entry in cruiselines:
time = dtm.datetime.strptime("%s %s" % (entry[0],entry[1]), "%m/%d/%Y %H:%M")
station = entry[2]
if station.endswith(".0"):
station = station[:-2]
if not station in cruise_data.keys():
cruise_data[station] = ([],[],time)
depth = float(entry[3])
salinity = float(entry[4])
cruise_data[station][0].append(depth)
cruise_data[station][1].append(salinity)
for station in cruise_data.keys():
time = cruise_data[station][2]
depth = np.array(cruise_data[station][0])
salinity = np.array(cruise_data[station][1])
depthorder = np.argsort(depth)
depth = depth[depthorder]
salinity = salinity[depthorder]
cruise_data[station] = (depth,salinity,time)
return cruise_data
def process_xyt(path,casts,base_time):
print ("process_cruise")
cruisefile = open(path,'r')
cruisetxt = cruisefile.readlines()
cruisefile.close()
cruiselines = [line.strip().split() for line in cruisetxt if (line != "\n")]
cruise_data = {}
for entry in cruiselines:
castno = int(entry[0])
salt = float(entry[1])
depth = -float(entry[2])
elapsed = 24.*3600.*float(entry[4])
time = base_time + dtm.timedelta(seconds=elapsed)
station = casts[castno][4]
if not station in cruise_data.keys():
cruise_data[station] = ([],[],time)
cruise_data[station][0].append(depth)
cruise_data[station][1].append(salt)
for station in cruise_data.keys():
time = cruise_data[station][2]
depth = np.array(cruise_data[station][0])
salinity = np.array(cruise_data[station][1])
depthorder = np.argsort(depth)
depth = depth[depthorder]
salinity = salinity[depthorder]
cruise_data[station] = (depth,salinity,time)
return cruise_data
def match_cruise(time, station, x, z, times, data):
times = np.array(times)
ndxR = np.searchsorted( times, time)
ndxL = max(ndxR - 1,0)
if not (time >= times[0] and time <= times[-1]):
raise ValueError("Time %s (in days) is not in model file spanning from %s to %s" % (time, times[0], times[-1]))
wl = (times[ndxR] - time)/(times[ndxR] - times[ndxL])
wr = 1 - wl
station_ndx = station.data_index
profile = wl*data[ndxL,:,station_ndx] + wr*data[ndxR,:,station_ndx]
xx = x[:,station_ndx]
zz = z[:,station_ndx]
ndx_farleft = max(ndxL-2, 0)
ndx_farright = min(ndxR+3, len(times))
surrounding_profiles = [(time, profile)]
for n in range(ndx_farleft,ndx_farright):
t = times[n]
vals = data[n,:,station_ndx]
surrounding_profiles.append((t, vals))
return zz, surrounding_profiles
def do_depth_plot(station,cruise_data,surrounding_profiles,ax,xlabel,ylabel,add_legend = False):
profiles = []
all_lines = []
col = None
i = 0
for i,prof in enumerate(surrounding_profiles):
p = np.array(prof[1])
zz = np.array(prof[0])
p = np.ma.masked_where(np.isnan(p),p)
z_masked = np.ma.masked_where(np.isnan(p),zz)
linestyle = "solid"
if (i == 0):
col = BLUE
label = "Model"
wide = 2
else:
col = "0.55"
wide = 1
label = "Model +/- 3 hr" if label == "Model" else "_nolegend_"
linestyle = "--"
line, = ax.plot(p,z_masked,color = col, linewidth = wide, linestyle = linestyle)
i += 1
all_lines.append(line)
depth,salinity,time = cruise_data
line, = ax.plot(salinity,depth,color = RED, label = "Observed", linewidth = 2)
all_lines.append(line)
ax.set_ylim(max(z_masked),0)
min_data,max_data = ax.get_xlim()
xcenter = (min_data+max_data)/2
xrange = max_data - min_data
if xrange <8.0:
print (" > 8")
#ax.set_xlim(max(0,min_data-3.5), min(35,max_data+3.5))
if xlabel != None:
ax.set_xlabel(xlabel, size = 14)
if ylabel != None:
ax.set_ylabel('Depth (m)', size = 14)
if add_legend:
leg=ax.legend((all_lines[0],all_lines[1],all_lines[-1]),('Model','Model +/- 3 hr','Observed'),loc='lower left',\
shadow=True, fancybox=True)
ltext = leg.get_texts() # all the text.Text instance in the legend
llines = leg.get_lines() # all the lines.Line2D instance in the legend
#frame.set_facecolor('0.80') # set the frame face color to light gray
#ax.setp(ltext, fontsize='small') # the legend text fontsize
#ax.setp(llines, linewidth=1.5) # the legend linewidth
#ax.set_xlim(0,35)
def longitudinal(cruise_data,station_data,ax,context_label=None,add_labels=False,xlabel=None,xmin=None,xmax=None,max_depth=None):
print ("Longitudinal")
base_date = dtm.datetime(2017,4,18)
maxdepth = 0
stations = []
station_dists = []
bedx=[]
bed=[]
for item in cruise_data.keys():
if (station_data.loc[item].dist_km > 0.0):
#print "Station %s" % item
#print cruise_data[item]
maxdepth=max(maxdepth, max(cruise_data[item][0]))
stations.append(item)
station_dists.append(station_data.loc[item].dist_km)
bedx.append(station_data.loc[item].dist_km)
bed.append( -max(cruise_data[item][0]))
station_dists = np.array(station_dists)
stations = np.array(stations)
sorted_dists = np.argsort(station_dists)
stations = stations[sorted_dists]
station_dists = station_dists[sorted_dists]
nstation = len(station_dists)
ndepth = int(maxdepth + 1)
salt = np.ones((ndepth,nstation),dtype=float) * np.nan
zloc = np.ones((ndepth,nstation),dtype=float) * np.nan
from scipy.interpolate import griddata
for i in range(nstation):
item = stations[i]
depth,salinity,time = cruise_data[item]
salt[:,i] = griddata(depth,salinity,np.arange(ndepth,dtype=float))
if np.isnan(salt[0,i]): salt[0,i] = salt[1,i]
#zloc[0:len(salinity),i] = depth
xloc,zloc = np.meshgrid(station_dists, np.arange(ndepth,dtype=float))
im, cs, ttxt = profile_plot(xloc,zloc,salt,ax,context_label,add_labels,xlabel,xmin,xmax,max_depth)
return cs
def model_data_for_longitude(cruise_data,station_data,x, z, times, model_data, base_date):
maxdepth = 0
stations = []
station_dists = []
# todo: this is boilerplate
for item in cruise_data.keys():
if (station_data[item].dist_km > 0.0):
maxdepth=max(maxdepth, max(cruise_data[item][0]))
stations.append(item)
station_dists.append(station_data[item].dist_km)
station_dists = np.array(station_dists)
stations = np.array(stations)
sorted_dists = np.argsort(station_dists)
stations = stations[sorted_dists]
station_dists = station_dists[sorted_dists]
nstation = len(station_dists)
ndepth = int(maxdepth + 1)
long_data = {}
for station_id in stations:
cruise_profile = cruise_data[station_id]
cruise_time = cruise_profile[2]
rt = (cruise_time - base_date).total_seconds()/(24*3600)
zz,profiles = match_cruise(rt, station_data[station_id], x, z, times, model_data)
prof = profiles[0]
long_data[station_id] = (zz,prof[1],prof[0])
return long_data
def cruise_xyt(path,station_data,base_time,outfile):
print ("cruise_xyt")
cruisefile = open(path,'r')
cruisetxt = cruisefile.readlines()[2:]
cruisefile.close()
cruiselines = [line.strip().split(",") for line in cruisetxt if (line != "\n")]
cruise_locs = []
processed = []
casts = {}
for entry in cruiselines:
if len(entry) < 2: continue
time = dtm.datetime.strptime("%s %s" % (entry[0],entry[1]), "%m/%d/%Y %H:%M")
elapsed = (time - base_time).total_seconds()
station = entry[2]
if station.endswith(".0"):
station = station[:-2]
if not station in processed:
sd=station_data.loc[station]
processed.append(station)
cruise_locs.append((sd.x,sd.y,elapsed,sd.name,station))
with open(outfile,"w") as out:
out.write("Cruise cast model requests\n%s\n" % len(cruise_locs))
for i,loc in enumerate(cruise_locs):
jj = i+1
locentries = (jj,loc[0],loc[1],loc[2],loc[3])
out.write("%s %s %s %s ! %s\n" % locentries)
#out.write("%s %s %s ! %s\n" % loc)
#print (locentries)
casts[jj] = loc
return casts
def gen_profile_plot(base_date,cruise_time,survey_file,model_file,station_file,xytfile):
filename = survey_file
station_data = process_stations(station_file)
cruise_data = process_cruise(filename)
casts = cruise_xyt(filename,station_data,base_date,xytfile)
model_data = process_xyt(model_file,casts,base_date)
fig, (ax0,ax1) = plt.subplots(2,1,sharex=True)
fig.set_size_inches(10,6)
context = cruise_time.strftime("USGS: %d-%b-%Y")
longitudinal(cruise_data,station_data,ax0,context_label = context ,xmin=20,xmax=104,max_depth=30)
cs=longitudinal(model_data,station_data,ax1,context_label = "Model",add_labels=True,
xlabel="Distance from Golden Gate (km)",xmin=20,xmax=104,max_depth=30)
# shared colorbar
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cb = fig.colorbar(cs, cax=cbar_ax,shrink=0.01)
cb.set_label("Salinity (psu)", size = 14)
plt.savefig("salinity_profile_"+cruise_time.strftime("%m_%d_%Y"),dpi=300)
#plt.show()
def main(base_date,cruise_time,obs_file,model_file,station_file,xytfile):
filename = obs_file
station_data = process_stations(station_file)
cruise_data = process_cruise(filename)
casts = cruise_xyt(filename,station_data,base_date,xytfile)
model_data = process_xyt(model_file,casts,base_date)
fig, axes = plt.subplots(2,2,sharex=True)
#x,z,times,model_data = process_data(station_data,model_outfile)
choices = ["657","649","2","3"]
#choices = ["10","13","14","15"]
nchoice = len(choices)
for ichoice in range(nchoice):
ax = axes[ichoice%2,int(ichoice/2)]
#pdb.set_trace()
choice = choices[ichoice]
cruise_profile = cruise_data[choice]
cruise_time = cruise_profile[2]
station = station_data.loc[choice]
model_profile = model_data[choice]
#ax = axes[ichoice%2,ichoice/2]
title = station.name + "(%s km) " % np.round(station.dist_km)
ax.set_title(title)
xlabel = "Salinity (psu)" if ichoice in (1,3) else None
ylabel = "Depth (m)" if ichoice in (0,1) else None
print ("ichoice: %s %s" % (ichoice,xlabel))
#add_legend = (ichoice == (nchoice - 1))
add_legend = (ichoice == 0)
surrounding_profiles = [model_profile]
do_depth_plot(station,cruise_profile, surrounding_profiles,ax,xlabel,ylabel,add_legend)
plt.show()
def gen_station_xyt(base_date,cruise_time,survey_file,station_file,xytfile):
filename = survey_file
station_data = process_stations(station_file)
cruise_data = process_cruise(filename)
casts = cruise_xyt(filename,station_data,base_date,xytfile)
def create_arg_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
prog="cruise.py",
description=textwrap.dedent(
"""
Loop over a number of USGS polaris cruise water quality data in a folder, read observed salinity
data, generate station.xyt file and extract SCHISM model salinity from output nc files respectively.
Finally plot and compared observed and model transect salinity profile along the centerline took
by USGS polaris cruise.
Inputs: SCHISM model base time, a path containing SCHISM output files and a path
containing USGS polaris water quaility data files.
Outputs: A png files comparing observed and model salinity profile.
USGS polaris cruise data should have csv format like below:
Date,Time,Station Number,Depth,Salinity,Temperature
MM/DD/YYYY,24 hr.,,[meters],[psu],[°C]
6/22/2017,7:20,2,1,0.14,22.48
6/22/2017,7:20,2,2,0.13,22.48
6/22/2017,7:20,2,3,0.13,22.48
6/22/2017,7:20,2,4,0.13,22.48
......
Here is a example of command
python cruise.py --data_path ./ --start 04/18/2017 --schism_output_path I:\\itp\\hist_2017\\
Your system should include SCHISM postprocess tool path in the environment.
You can get help by typing $ cruise.py --help
"""))
parser.add_argument('--data_path', default=None,required=True,
help='path contains downloaded USGS crusier water quality data')
parser.add_argument('--start', type=str,required=True,
help='Starting date and time basis for SCHISM model output')
parser.add_argument('--schism_output_path', default=None,required=True,
help='path contains SCHISM output data')
return parser
if __name__== "__main__":
usgs_cruise_file_lst=[]
aug_parser = create_arg_parser()
args = aug_parser.parse_args()
data_folder=args.data_path
base_date=parser.parse(args.start)
schism_output_folder=args.schism_output_path
schism_vgrid_in=os.path.join(schism_output_folder,"vgrid.in")
if not(os.path.exists(schism_vgrid_in)):
raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), schism_vgrid_in)
schism_output_in=os.path.join(schism_output_folder,"read_output_xyt.in")
if not(os.path.exists(schism_output_in)):
raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), schism_output_in)
station_file="usgs_cruise_stations.csv"
if not(os.path.exists(os.path.join(data_folder,station_file))):
raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), os.path.join(data_folder,station_file))
usgs_cruise_match=re.compile("usgs_cruise_(?P<date>[0-9]{8}).csv")
for file_name in os.listdir(data_folder):
match_re=usgs_cruise_match.match(file_name)
if match_re:
print("processing crusier data "+file_name)
cruise_time=parser.parse(match_re.group("date"))
xyt_file="station.xyt"
gen_station_xyt(base_date,cruise_time,file_name,station_file,xyt_file)
copyfile(os.path.join(data_folder,xyt_file),os.path.join(schism_output_folder,xyt_file))
cmd = ['read_output9_xyt.exe']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,cwd=schism_output_folder)
for line in p.stdout:
print(line)
p.wait()
if (p.returncode):
raise ChildProcessError("Fail to extract schism outputs")
model_salt="salt_"+match_re.group("date")
copyfile(os.path.join(schism_output_folder,"fort.18"),os.path.join(data_folder,model_salt))
gen_profile_plot(base_date,cruise_time,file_name,model_salt,station_file,xyt_file)
| [
"re.compile",
"numpy.argsort",
"numpy.array",
"datetime.timedelta",
"os.strerror",
"numpy.arange",
"datetime.datetime",
"os.path.exists",
"textwrap.dedent",
"os.listdir",
"numpy.searchsorted",
"subprocess.Popen",
"matplotlib.pyplot.style.use",
"numpy.round",
"dateutil.parser.parse",
"n... | [((454, 508), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['seaborn-paper', 'seaborn-colorblind']"], {}), "(['seaborn-paper', 'seaborn-colorblind'])\n", (467, 508), True, 'import matplotlib.pyplot as plt\n'), ((3198, 3213), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (3206, 3213), True, 'import numpy as np\n'), ((3225, 3253), 'numpy.searchsorted', 'np.searchsorted', (['times', 'time'], {}), '(times, time)\n', (3240, 3253), True, 'import numpy as np\n'), ((6132, 6157), 'datetime.datetime', 'dtm.datetime', (['(2017)', '(4)', '(18)'], {}), '(2017, 4, 18)\n', (6144, 6157), True, 'import datetime as dtm\n'), ((6710, 6733), 'numpy.array', 'np.array', (['station_dists'], {}), '(station_dists)\n', (6718, 6733), True, 'import numpy as np\n'), ((6749, 6767), 'numpy.array', 'np.array', (['stations'], {}), '(stations)\n', (6757, 6767), True, 'import numpy as np\n'), ((6787, 6812), 'numpy.argsort', 'np.argsort', (['station_dists'], {}), '(station_dists)\n', (6797, 6812), True, 'import numpy as np\n'), ((7499, 7595), 'profile_plot.profile_plot', 'profile_plot', (['xloc', 'zloc', 'salt', 'ax', 'context_label', 'add_labels', 'xlabel', 'xmin', 'xmax', 'max_depth'], {}), '(xloc, zloc, salt, ax, context_label, add_labels, xlabel, xmin,\n xmax, max_depth)\n', (7511, 7595), False, 'from profile_plot import profile_plot\n'), ((8066, 8089), 'numpy.array', 'np.array', (['station_dists'], {}), '(station_dists)\n', (8074, 8089), True, 'import numpy as np\n'), ((8105, 8123), 'numpy.array', 'np.array', (['stations'], {}), '(stations)\n', (8113, 8123), True, 'import numpy as np\n'), ((8143, 8168), 'numpy.argsort', 'np.argsort', (['station_dists'], {}), '(station_dists)\n', (8153, 8168), True, 'import numpy as np\n'), ((10327, 10358), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)'}), '(2, 1, sharex=True)\n', (10339, 10358), True, 'import matplotlib.pyplot as plt\n'), ((11374, 11405), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'sharex': '(True)'}), '(2, 2, sharex=True)\n', (11386, 11405), True, 'import matplotlib.pyplot as plt\n'), ((12441, 12451), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12449, 12451), True, 'import matplotlib.pyplot as plt\n'), ((14219, 14352), 'dateutil.parser.add_argument', 'parser.add_argument', (['"""--data_path"""'], {'default': 'None', 'required': '(True)', 'help': '"""path contains downloaded USGS crusier water quality data"""'}), "('--data_path', default=None, required=True, help=\n 'path contains downloaded USGS crusier water quality data')\n", (14238, 14352), False, 'from dateutil import parser\n'), ((14384, 14505), 'dateutil.parser.add_argument', 'parser.add_argument', (['"""--start"""'], {'type': 'str', 'required': '(True)', 'help': '"""Starting date and time basis for SCHISM model output"""'}), "('--start', type=str, required=True, help=\n 'Starting date and time basis for SCHISM model output')\n", (14403, 14505), False, 'from dateutil import parser\n'), ((14536, 14653), 'dateutil.parser.add_argument', 'parser.add_argument', (['"""--schism_output_path"""'], {'default': 'None', 'required': '(True)', 'help': '"""path contains SCHISM output data"""'}), "('--schism_output_path', default=None, required=True,\n help='path contains SCHISM output data')\n", (14555, 14653), False, 'from dateutil import parser\n'), ((14895, 14919), 'dateutil.parser.parse', 'parser.parse', (['args.start'], {}), '(args.start)\n', (14907, 14919), False, 'from dateutil import parser\n'), ((14999, 15045), 'os.path.join', 'os.path.join', (['schism_output_folder', '"""vgrid.in"""'], {}), "(schism_output_folder, 'vgrid.in')\n", (15011, 15045), False, 'import os\n'), ((15202, 15258), 'os.path.join', 'os.path.join', (['schism_output_folder', '"""read_output_xyt.in"""'], {}), "(schism_output_folder, 'read_output_xyt.in')\n", (15214, 15258), False, 'import os\n'), ((15667, 15715), 're.compile', 're.compile', (['"""usgs_cruise_(?P<date>[0-9]{8}).csv"""'], {}), "('usgs_cruise_(?P<date>[0-9]{8}).csv')\n", (15677, 15715), False, 'import re\n'), ((15737, 15760), 'os.listdir', 'os.listdir', (['data_folder'], {}), '(data_folder)\n', (15747, 15760), False, 'import os\n'), ((1234, 1305), 'datetime.datetime.strptime', 'dtm.datetime.strptime', (["('%s %s' % (entry[0], entry[1]))", '"""%m/%d/%Y %H:%M"""'], {}), "('%s %s' % (entry[0], entry[1]), '%m/%d/%Y %H:%M')\n", (1255, 1305), True, 'import datetime as dtm\n'), ((1754, 1787), 'numpy.array', 'np.array', (['cruise_data[station][0]'], {}), '(cruise_data[station][0])\n', (1762, 1787), True, 'import numpy as np\n'), ((1807, 1840), 'numpy.array', 'np.array', (['cruise_data[station][1]'], {}), '(cruise_data[station][1])\n', (1815, 1840), True, 'import numpy as np\n'), ((1862, 1879), 'numpy.argsort', 'np.argsort', (['depth'], {}), '(depth)\n', (1872, 1879), True, 'import numpy as np\n'), ((2848, 2881), 'numpy.array', 'np.array', (['cruise_data[station][0]'], {}), '(cruise_data[station][0])\n', (2856, 2881), True, 'import numpy as np\n'), ((2901, 2934), 'numpy.array', 'np.array', (['cruise_data[station][1]'], {}), '(cruise_data[station][1])\n', (2909, 2934), True, 'import numpy as np\n'), ((2956, 2973), 'numpy.argsort', 'np.argsort', (['depth'], {}), '(depth)\n', (2966, 2973), True, 'import numpy as np\n'), ((4239, 4256), 'numpy.array', 'np.array', (['prof[1]'], {}), '(prof[1])\n', (4247, 4256), True, 'import numpy as np\n'), ((4270, 4287), 'numpy.array', 'np.array', (['prof[0]'], {}), '(prof[0])\n', (4278, 4287), True, 'import numpy as np\n'), ((6975, 7015), 'numpy.ones', 'np.ones', (['(ndepth, nstation)'], {'dtype': 'float'}), '((ndepth, nstation), dtype=float)\n', (6982, 7015), True, 'import numpy as np\n'), ((7034, 7074), 'numpy.ones', 'np.ones', (['(ndepth, nstation)'], {'dtype': 'float'}), '((ndepth, nstation), dtype=float)\n', (7041, 7074), True, 'import numpy as np\n'), ((7316, 7336), 'numpy.isnan', 'np.isnan', (['salt[0, i]'], {}), '(salt[0, i])\n', (7324, 7336), True, 'import numpy as np\n'), ((7449, 7479), 'numpy.arange', 'np.arange', (['ndepth'], {'dtype': 'float'}), '(ndepth, dtype=float)\n', (7458, 7479), True, 'import numpy as np\n'), ((9124, 9195), 'datetime.datetime.strptime', 'dtm.datetime.strptime', (["('%s %s' % (entry[0], entry[1]))", '"""%m/%d/%Y %H:%M"""'], {}), "('%s %s' % (entry[0], entry[1]), '%m/%d/%Y %H:%M')\n", (9145, 9195), True, 'import datetime as dtm\n'), ((15056, 15087), 'os.path.exists', 'os.path.exists', (['schism_vgrid_in'], {}), '(schism_vgrid_in)\n', (15070, 15087), False, 'import os\n'), ((15269, 15301), 'os.path.exists', 'os.path.exists', (['schism_output_in'], {}), '(schism_output_in)\n', (15283, 15301), False, 'import os\n'), ((2496, 2526), 'datetime.timedelta', 'dtm.timedelta', ([], {'seconds': 'elapsed'}), '(seconds=elapsed)\n', (2509, 2526), True, 'import datetime as dtm\n'), ((4319, 4330), 'numpy.isnan', 'np.isnan', (['p'], {}), '(p)\n', (4327, 4330), True, 'import numpy as np\n'), ((4372, 4383), 'numpy.isnan', 'np.isnan', (['p'], {}), '(p)\n', (4380, 4383), True, 'import numpy as np\n'), ((7274, 7304), 'numpy.arange', 'np.arange', (['ndepth'], {'dtype': 'float'}), '(ndepth, dtype=float)\n', (7283, 7304), True, 'import numpy as np\n'), ((12909, 14205), 'textwrap.dedent', 'textwrap.dedent', (['"""\n Loop over a number of USGS polaris cruise water quality data in a folder, read observed salinity\n data, generate station.xyt file and extract SCHISM model salinity from output nc files respectively. \n Finally plot and compared observed and model transect salinity profile along the centerline took\n by USGS polaris cruise.\n \n Inputs: SCHISM model base time, a path containing SCHISM output files and a path\n containing USGS polaris water quaility data files.\n \n Outputs: A png files comparing observed and model salinity profile.\n \n USGS polaris cruise data should have csv format like below:\n \n Date,Time,Station Number,Depth,Salinity,Temperature\n MM/DD/YYYY,24 hr.,,[meters],[psu],[°C]\n 6/22/2017,7:20,2,1,0.14,22.48\n 6/22/2017,7:20,2,2,0.13,22.48\n 6/22/2017,7:20,2,3,0.13,22.48\n 6/22/2017,7:20,2,4,0.13,22.48\n ......\n \n Here is a example of command\n \n python cruise.py --data_path ./ --start 04/18/2017 --schism_output_path I:\\\\itp\\\\hist_2017\\\\\n \n \n Your system should include SCHISM postprocess tool path in the environment.\n \n You can get help by typing $ cruise.py --help\n """'], {}), '(\n """\n Loop over a number of USGS polaris cruise water quality data in a folder, read observed salinity\n data, generate station.xyt file and extract SCHISM model salinity from output nc files respectively. \n Finally plot and compared observed and model transect salinity profile along the centerline took\n by USGS polaris cruise.\n \n Inputs: SCHISM model base time, a path containing SCHISM output files and a path\n containing USGS polaris water quaility data files.\n \n Outputs: A png files comparing observed and model salinity profile.\n \n USGS polaris cruise data should have csv format like below:\n \n Date,Time,Station Number,Depth,Salinity,Temperature\n MM/DD/YYYY,24 hr.,,[meters],[psu],[°C]\n 6/22/2017,7:20,2,1,0.14,22.48\n 6/22/2017,7:20,2,2,0.13,22.48\n 6/22/2017,7:20,2,3,0.13,22.48\n 6/22/2017,7:20,2,4,0.13,22.48\n ......\n \n Here is a example of command\n \n python cruise.py --data_path ./ --start 04/18/2017 --schism_output_path I:\\\\itp\\\\hist_2017\\\\\n \n \n Your system should include SCHISM postprocess tool path in the environment.\n \n You can get help by typing $ cruise.py --help\n """\n )\n', (12924, 14205), False, 'import textwrap\n'), ((15137, 15162), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (15148, 15162), False, 'import os\n'), ((15351, 15376), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (15362, 15376), False, 'import os\n'), ((15476, 15515), 'os.path.join', 'os.path.join', (['data_folder', 'station_file'], {}), '(data_folder, station_file)\n', (15488, 15515), False, 'import os\n'), ((15565, 15590), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (15576, 15590), False, 'import os\n'), ((15592, 15631), 'os.path.join', 'os.path.join', (['data_folder', 'station_file'], {}), '(data_folder, station_file)\n', (15604, 15631), False, 'import os\n'), ((16230, 16301), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'cwd': 'schism_output_folder'}), '(cmd, stdout=subprocess.PIPE, cwd=schism_output_folder)\n', (16246, 16301), False, 'import subprocess\n'), ((661, 677), 'pandas.StringDtype', 'pd.StringDtype', ([], {}), '()\n', (675, 677), True, 'import pandas as pd\n'), ((11973, 11998), 'numpy.round', 'np.round', (['station.dist_km'], {}), '(station.dist_km)\n', (11981, 11998), True, 'import numpy as np\n'), ((16091, 16126), 'os.path.join', 'os.path.join', (['data_folder', 'xyt_file'], {}), '(data_folder, xyt_file)\n', (16103, 16126), False, 'import os\n'), ((16126, 16170), 'os.path.join', 'os.path.join', (['schism_output_folder', 'xyt_file'], {}), '(schism_output_folder, xyt_file)\n', (16138, 16170), False, 'import os\n'), ((16564, 16609), 'os.path.join', 'os.path.join', (['schism_output_folder', '"""fort.18"""'], {}), "(schism_output_folder, 'fort.18')\n", (16576, 16609), False, 'import os\n'), ((16609, 16646), 'os.path.join', 'os.path.join', (['data_folder', 'model_salt'], {}), '(data_folder, model_salt)\n', (16621, 16646), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2022- <NAME>
#
# Distributed under the terms of the MIT License
# (see wavespin/__init__.py for details)
# -----------------------------------------------------------------------------
from ...frontend.base_frontend import ScatteringBase
import math
import numbers
import warnings
from types import FunctionType
from copy import deepcopy
import numpy as np
from ..filter_bank import (scattering_filter_factory, periodize_filter_fourier,
energy_norm_filterbank_tm)
from ..filter_bank_jtfs import _FrequencyScatteringBase
from ..utils import (compute_border_indices, compute_padding,
compute_minimum_support_to_pad,
compute_meta_scattering, compute_meta_jtfs)
from ...toolkit import fill_default_args
class ScatteringBase1D(ScatteringBase):
"""
This is a modification of
https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/
frontend/base_frontend.py
Kymatio, (C) 2018-present. The Kymatio developers.
"""
def __init__(self, J, shape, Q=1, T=None, max_order=2, average=True,
oversampling=0, out_type='array', pad_mode='reflect',
max_pad_factor=1, analytic=False, normalize='l1-energy',
r_psi=math.sqrt(.5), backend=None):
super(ScatteringBase1D, self).__init__()
self.J = J if isinstance(J, tuple) else (J, J)
self.shape = shape
self.Q = Q if isinstance(Q, tuple) else (Q, 1)
self.T = T
self.max_order = max_order
self.average = average
self.oversampling = oversampling
self.out_type = out_type
self.pad_mode = pad_mode
self.max_pad_factor = max_pad_factor
self.analytic = analytic
self.normalize = (normalize if isinstance(normalize, tuple) else
(normalize, normalize))
self.r_psi = r_psi if isinstance(r_psi, tuple) else (r_psi, r_psi)
self.backend = backend
def build(self):
"""Set up padding and filters
Certain internal data, such as the amount of padding and the wavelet
filters to be used in the scattering transform, need to be computed
from the parameters given during construction. This function is called
automatically during object creation and no subsequent calls are
therefore needed.
"""
self.sigma0 = 0.1
self.alpha = 4.
self.P_max = 5
self.eps = 1e-7
self.criterion_amplitude = 1e-3
# check the shape
if isinstance(self.shape, numbers.Integral):
self.N = self.shape
elif isinstance(self.shape, tuple):
self.N = self.shape[0]
if len(self.shape) > 1:
raise ValueError("If shape is specified as a tuple, it must "
"have exactly one element")
else:
raise ValueError("shape must be an integer or a 1-tuple")
# dyadic scale of N, also min possible padding
self.N_scale = math.ceil(math.log2(self.N))
# check `pad_mode`, set `pad_fn`
if isinstance(self.pad_mode, FunctionType):
def pad_fn(x):
return self.pad_mode(x, self.pad_left, self.pad_right)
self.pad_mode = 'custom'
elif self.pad_mode not in ('reflect', 'zero'):
raise ValueError(("unsupported `pad_mode` '{}';\nmust be a "
"function, or string, one of: 'zero', 'reflect'."
).format(str(self.pad_mode)))
else:
def pad_fn(x):
return self.backend.pad(x, self.pad_left, self.pad_right,
self.pad_mode)
self.pad_fn = pad_fn
# check `normalize`
supported = ('l1', 'l2', 'l1-energy', 'l2-energy')
if any(n not in supported for n in self.normalize):
raise ValueError(("unsupported `normalize`; must be one of: {}\n"
"got {}").format(supported, self.normalize))
# ensure 2**max(J) <= nextpow2(N)
Np2up = 2**self.N_scale
if 2**max(self.J) > Np2up:
raise ValueError(("2**J cannot exceed input length (rounded up to "
"pow2) (got {} > {})".format(
2**max(self.J), Np2up)))
# validate `max_pad_factor`
# 1/2**J < 1/Np2up so impossible to create wavelet without padding
if max(self.J) == self.N_scale and self.max_pad_factor == 0:
raise ValueError("`max_pad_factor` can't be 0 if "
"max(J) == log2(nextpow2(N)). Got, "
"respectively, %s, %s, %s" % (
self.max_pad_factor, max(self.J), self.N_scale))
# check T or set default
if self.T is None:
self.T = 2**max(self.J)
elif self.T == 'global':
self.T == Np2up
elif self.T > Np2up:
raise ValueError(("The temporal support T of the low-pass filter "
"cannot exceed input length (got {} > {})"
).format(self.T, self.N))
# log2_T, global averaging
self.log2_T = math.floor(math.log2(self.T))
self.average_global_phi = bool(self.T == Np2up)
self.average_global = bool(self.average_global_phi and self.average)
# Compute the minimum support to pad (ideally)
min_to_pad, pad_phi, pad_psi1, pad_psi2 = compute_minimum_support_to_pad(
self.N, self.J, self.Q, self.T, r_psi=self.r_psi,
sigma0=self.sigma0, alpha=self.alpha, P_max=self.P_max, eps=self.eps,
criterion_amplitude=self.criterion_amplitude,
normalize=self.normalize, pad_mode=self.pad_mode)
if self.average_global:
min_to_pad = max(pad_psi1, pad_psi2) # ignore phi's padding
J_pad_ideal = math.ceil(math.log2(self.N + 2 * min_to_pad))
if self.max_pad_factor is None:
self.J_pad = J_pad_ideal
else:
self.J_pad = min(J_pad_ideal, self.N_scale + self.max_pad_factor)
if J_pad_ideal - self.J_pad > 1:
extent_txt = ' severe' if J_pad_ideal - self.J_pad > 2 else ''
warnings.warn(("Insufficient temporal padding, will yield"
"{} boundary effects and filter distortion; "
"recommended higher `max_pad_factor` or lower "
"`J` or `T`.").format(extent_txt))
# compute the padding quantities:
self.pad_left, self.pad_right = compute_padding(self.J_pad, self.N)
# compute start and end indices
self.ind_start, self.ind_end = compute_border_indices(
self.log2_T, self.J, self.pad_left, 2**self.J_pad - self.pad_right)
# record whether configuration yields second order filters
meta = ScatteringBase1D.meta(self)
self._no_second_order_filters = (self.max_order < 2 or
bool(np.isnan(meta['n'][-1][1])))
def create_filters(self):
# Create the filters
self.phi_f, self.psi1_f, self.psi2_f = scattering_filter_factory(
self.N, self.J_pad, self.J, self.Q, self.T,
normalize=self.normalize,
criterion_amplitude=self.criterion_amplitude,
r_psi=self.r_psi, sigma0=self.sigma0, alpha=self.alpha,
P_max=self.P_max, eps=self.eps)
# analyticity
if self.analytic:
for psi_fs in (self.psi1_f, self.psi2_f):
for p in psi_fs:
for k in p:
if isinstance(k, int):
M = len(p[k])
p[k][M//2 + 1:] = 0 # zero negatives
p[k][M//2] /= 2 # halve Nyquist
# energy norm
# must do after analytic since analytic affects norm
if any('energy' in n for n in self.normalize):
energy_norm_filterbank_tm(self.psi1_f, self.psi2_f, phi_f=None,
J=self.J, log2_T=self.log2_T,
normalize=self.normalize)
def meta(self):
"""Get meta information on the transform
Calls the static method `compute_meta_scattering()` with the
parameters of the transform object.
Returns
------
meta : dictionary
See the documentation for `compute_meta_scattering()`.
"""
return compute_meta_scattering(self.J_pad, self.J, self.Q, self.T,
r_psi=self.r_psi, max_order=self.max_order)
_doc_shape = 'N'
_doc_instantiation_shape = {True: 'S = Scattering1D(J, N, Q)',
False: 'S = Scattering1D(J, Q)'}
_doc_param_shape = \
r"""
shape : int
The length of the input signals.
"""
_doc_attrs_shape = \
r"""J_pad : int
The logarithm of the padded length of the signals.
pad_left : int
The amount of padding to the left of the signal.
pad_right : int
The amount of padding to the right of the signal.
phi_f : dictionary
A dictionary containing the lowpass filter at all resolutions. See
`filter_bank.scattering_filter_factory` for an exact description.
psi1_f : dictionary
A dictionary containing all the first-order wavelet filters, each
represented as a dictionary containing that filter at all
resolutions. See `filter_bank.scattering_filter_factory` for an
exact description.
psi2_f : dictionary
A dictionary containing all the second-order wavelet filters, each
represented as a dictionary containing that filter at all
resolutions. See `filter_bank.scattering_filter_factory` for an
exact description.
"""
_doc_param_average = \
r"""
average : boolean, optional
Determines whether the output is averaged in time or not. The
averaged output corresponds to the standard scattering transform,
while the un-averaged output skips the last convolution by
:math:`\phi_J(t)`. This parameter may be modified after object
creation. Defaults to `True`.
"""
_doc_attr_average = \
r"""
average : boolean
Controls whether the output should be averaged (the standard
scattering transform) or not (resulting in wavelet modulus
coefficients). Note that to obtain unaveraged output, the
`vectorize` flag must be set to `False` or `out_type` must be set
to `'list'`.
"""
_doc_param_vectorize = \
r"""
vectorize : boolean, optional
Determines wheter to return a vectorized scattering transform
(that is, a large array containing the output) or a dictionary
(where each entry corresponds to a separate scattering
coefficient). This parameter may be modified after object
creation. Deprecated in favor of `out_type` (see below). Defaults
to True.
out_type : str, optional
The format of the output of a scattering transform. If set to
`'list'`, then the output is a list containing each individual
scattering coefficient with meta information. Otherwise, if set to
`'array'`, the output is a large array containing the
concatenation of all scattering coefficients. Defaults to
`'array'`.
pad_mode : str (default 'reflect') / function, optional
Name of padding scheme to use, one of (`x = [1, 2, 3]`):
- zero: [0, 0, 0, 1, 2, 3, 0, 0]
- reflect: [2, 3, 2, 1, 2, 3, 2, 1]
Or, pad function with signature `pad_fn(x, pad_left, pad_right)`.
This sets `self.pad_mode='custom'` (the name of padding is used
for some internal logic).
max_pad_factor : int (default 2) / None, optional
Will pad by at most `2**max_pad_factor` relative to
`nextpow2(shape)`.
E.g. if input length is 150, then maximum padding with
`max_pad_factor=2` is `256 * (2**2) = 1024`.
The maximum realizable value is `4`: a filter of scale `scale`
requires `2**(scale + 4)` samples to convolve without boundary
effects, and with fully decayed wavelets - i.e. x16 the scale,
and the largest permissible `J` or `log2_T` is `log2(N)`.
`None` means limitless. A limitation with `analytic=True` is,
`compute_minimum_support_to_pad` does not account for
`analytic=True`.
normalize : str / tuple[str], optional
Tuple sets first-order and second-order separately, but only the
first element sets `normalize` for `phi_f`. Supported:
- 'l1': bandpass normalization; all filters' amplitude envelopes
sum to 1 in time domain (for Morlets makes them peak at 1
in frequency domain). `sum(abs(psi)) == 1`.
- 'l2': energy normalization; all filters' energies are 1
in time domain; not suitable for scattering.
`sum(abs(psi)**2) == 1`.
- 'l1-energy', 'l2-energy': additionally renormalizes the
entire filterbank such that its LP-sum (overlap of
frequency-domain energies) is `<=1` (`<=2` for time scattering
per using only analytic filters, without anti-analytic).
- This improves "even-ness" of input's representation, i.e.
no frequency is tiled too great or little (amplified or
attenuated).
- `l2-energy` is self-defeating, as the `energy` part
reverts to `l1`.
- `phi` is excluded from norm computations, against the
standard. This is because lowpass functions separately from
bandpass in coefficient computations, and any `log2_T`
that differs from `J` will attenuate or amplify lowest
frequencies in an undesired manner. In rare cases, this
*is* desired, and can be achieved by calling relevant
functions manually.
r_psi : float / tuple[float], optional
Should be >0 and <1. Controls the redundancy of the filters
(the larger r_psi, the larger the overlap between adjacent wavelets),
and stability against time-warp deformations
(larger r_psi improves it).
Defaults to sqrt(0.5).
Tuple sets separately for first- and second-order filters.
"""
_doc_attr_vectorize = \
r"""
vectorize : boolean
Controls whether the output should be vectorized into a single
Tensor or collected into a dictionary. Deprecated in favor of
`out_type`. For more details, see the documentation for
`scattering`.
out_type : str
Specifices the output format of the transform, which is currently
one of `'array'` or `'list`'. If `'array'`, the output is a large
array containing the scattering coefficients. If `'list`', the
output is a list of dictionaries, each containing a scattering
coefficient along with meta information. For more information, see
the documentation for `scattering`.
pad_mode : str
One of supported padding modes: 'reflect', 'zero' - or 'custom'
if a function was passed.
pad_fn : function
A backend padding function, or user function (as passed
to `pad_mode`), with signature `pad_fn(x, pad_left, pad_right)`.
max_pad_factor : int (default 2) / None, optional
Will pad by at most `2**max_pad_factor` relative to
`nextpow2(shape)`.
E.g. if input length is 150, then maximum padding with
`max_pad_factor=2` is `256 * (2**2) = 1024`.
The maximum realizable value is `4`: a filter of scale `scale`
requires `2**(scale + 4)` samples to convolve without boundary
effects, and with fully decayed wavelets - i.e. x16 the scale,
and the largest permissible `J` or `log2_T` is `log2(N)`.
`None` means limitless. A limitation with `analytic=True` is,
`compute_minimum_support_to_pad` does not account for
`analytic=True`.
analytic : bool (default False)
If True, will force negative frequencies to zero. Useful if
strict analyticity is desired, but may worsen time-domain decay.
average_global_phi : bool
True if `T == nextpow2(shape)`, i.e. `T` is maximum possible
and equivalent to global averaging, in which case lowpassing is
replaced by simple arithmetic mean.
In case of `average==False`, controls scattering logic for
`phi_t` pairs in JTFS.
average_global : bool
True if `average_global_phi and average_fr`. Same as
`average_global_phi` if `average_fr==True`.
In case of `average==False`, controls scattering logic for
`psi_t` pairs in JTFS.
"""
_doc_class = \
r"""
The 1D scattering transform
The scattering transform computes a cascade of wavelet transforms
alternated with a complex modulus non-linearity. The scattering
transform of a 1D signal :math:`x(t)` may be written as
$S_J x = [S_J^{{(0)}} x, S_J^{{(1)}} x, S_J^{{(2)}} x]$
where
$S_J^{{(0)}} x(t) = x \star \phi_J(t)$,
$S_J^{{(1)}} x(t, \lambda) = |x \star \psi_\lambda^{{(1)}}|
\star \phi_J$, and
$S_J^{{(2)}} x(t, \lambda, \mu) = |\,| x \star \psi_\lambda^{{(1)}}|
\star \psi_\mu^{{(2)}} | \star \phi_J$.
In the above formulas, :math:`\star` denotes convolution in time. The
filters $\psi_\lambda^{{(1)}}(t)$ and $\psi_\mu^{{(2)}}(t)$ are analytic
wavelets with center frequencies $\lambda$ and $\mu$, while
$\phi_J(t)$ is a real lowpass filter centered at the zero frequency.
The `Scattering1D` class implements the 1D scattering transform for a
given set of filters whose parameters are specified at initialization.
While the wavelets are fixed, other parameters may be changed after
the object is created, such as whether to compute all of
:math:`S_J^{{(0)}} x`, $S_J^{{(1)}} x$, and $S_J^{{(2)}} x$ or just
$S_J^{{(0)}} x$ and $S_J^{{(1)}} x$.
{frontend_paragraph}
Given an input `{array}` `x` of shape `(B, N)`, where `B` is the
number of signals to transform (the batch size) and `N` is the length
of the signal, we compute its scattering transform by passing it to
the `scattering` method (or calling the alias `{alias_name}`). Note
that `B` can be one, in which case it may be omitted, giving an input
of shape `(N,)`.
Example
-------
::
# Set the parameters of the scattering transform.
J = 6
N = 2 ** 13
Q = 8
# Generate a sample signal.
x = {sample}
# Define a Scattering1D object.
{instantiation}
# Calculate the scattering transform.
Sx = S.scattering(x)
# Equivalently, use the alias.
Sx = S{alias_call}(x)
Above, the length of the signal is :math:`N = 2^{{13}} = 8192`, while
the maximum scale ratio of the scattering transform is set to
:math:`2^J = 2^6 = 64`. The time-frequency resolution of the first-order
wavelets :math:`\psi_\lambda^{{(1)}}(t)` is set to `Q = 8` wavelets per
octave. The second-order wavelets :math:`\psi_\mu^{{(2)}}(t)` always have
one wavelet per octave.
Parameters
----------
J : int / tuple[int]
Controls the maximum log-scale and number of octaves of the
scattering transform. There are approx. `Q` wavelets per octave, and
bandwidth halves with each octave. Hence, largest scale wavelet is
about `2**J` larger than smallest scale wavelet.
Tuple sets `J1` and `J2` separately, for first-order and second-order
scattering, respectively.
{param_shape}Q : int >= 1 / tuple[int]
The number of first-order wavelets per octave. Defaults to `1`.
If tuple, sets `Q = (Q1, Q2)`, where `Q2` is the number of
second-order wavelets per octave (which defaults to `1`).
- Q1: For audio signals, a value of `>= 12` is recommended in
order to separate partials.
- Q2: Recommended `1` for most (`Scattering1D`) applications.
- Greater Q also corresponds to greater scale for all wavelets.
T : int / str['global']
Temporal width of low-pass filter, controlling amount of imposed
time-shift invariance and maximum subsampling.
'global' for global average pooling (simple arithmetic mean),
which also eases on padding (ignores `phi_f`'s requirement).
max_order : int, optional
The maximum order of scattering coefficients to compute. Must be
either `1` or `2`. Defaults to `2`.
{param_average}oversampling : integer >= 0, optional
Controls the oversampling factor relative to the default as a
power of two. Since the convolving by wavelets (or lowpass
filters) and taking the modulus reduces the high-frequency content
of the signal, we can subsample to save space and improve
performance. However, this may reduce precision in the
calculation. If this is not desirable, `oversampling` can be set
to a large value to prevent too much subsampling. This parameter
may be modified after object creation.
Defaults to `0`. Has no effect if `average_global=True`.
{param_vectorize}
Attributes
----------
J : int / tuple[int]
Controls the maximum log-scale and number of octaves of the
scattering transform. There are approx. `Q` wavelets per octave,
and bandwidth halves with each octave. Hence, largest scale wavelet
is about `2**J` larger than smallest scale wavelet.
Tuple sets `J1` and `J2` separately, for first-order and second-order
scattering, respectively.
{param_shape}Q : int >= 1 / tuple[int]
The number of first-order wavelets per octave. Defaults to `1`.
If tuple, sets `Q = (Q1, Q2)`, where `Q2` is the number of
second-order wavelets per octave (which defaults to `1`).
- Q1: For audio signals, a value of `>= 12` is recommended in
order to separate partials.
- Q2: Recommended `1` for most (`Scattering1D`) applications.
T : int
Temporal width of low-pass filter, controlling amount of imposed
time-shift invariance and maximum subsampling.
'global' for global average pooling (simple arithmetic mean),
which also eases on padding (ignores `phi_f`'s requirement).
{attrs_shape}max_order : int
The maximum scattering order of the transform.
{attr_average}oversampling : int
The number of powers of two to oversample the output compared to
the default subsampling rate determined from the filters.
{attr_vectorize}
References
----------
This is a modification of
https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/
frontend/base_frontend.py
Kymatio, (C) 2018-present. The Kymatio developers.
"""
_doc_scattering = \
"""
Apply the scattering transform
Given an input `{array}` of size `(B, N)`, where `B` is the batch
size (it can be potentially an integer or a shape) and `N` is the length
of the individual signals, this function computes its scattering
transform. If the `vectorize` flag is set to `True` (or if it is not
available in this frontend), the output is in the form of a `{array}`
or size `(B, C, N1)`, where `N1` is the signal length after subsampling
to the scale :math:`2^J` (with the appropriate oversampling factor to
reduce aliasing), and `C` is the number of scattering coefficients. If
`vectorize` is set `False`, however, the output is a dictionary
containing `C` keys, each a tuple whose length corresponds to the
scattering order and whose elements are the sequence of filter indices
used.
Note that the `vectorize` flag has been deprecated in favor of the
`out_type` parameter. If this is set to `'array'` (the default), the
`vectorize` flag is still respected, but if not, `out_type` takes
precedence. The two current output types are `'array'` and `'list'`.
The former gives the type of output described above. If set to
`'list'`, however, the output is a list of dictionaries, each
dictionary corresponding to a scattering coefficient and its associated
meta information. The coefficient is stored under the `'coef'` key,
while other keys contain additional information, such as `'j'` (the
scale of the filter used) and `'n`' (the filter index).
Furthermore, if the `average` flag is set to `False`, these outputs
are not averaged, but are simply the wavelet modulus coefficients of
the filters.
Parameters
----------
x : {array}
An input `{array}` of size `(B, N)`.
Returns
-------
S : tensor or dictionary
If `out_type` is `'array'` and the `vectorize` flag is `True`, the
output is a{n} `{array}` containing the scattering coefficients,
while if `vectorize` is `False`, it is a dictionary indexed by
tuples of filter indices. If `out_type` is `'list'`, the output is
a list of dictionaries as described above.
References
----------
This is a modification of
https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/
frontend/base_frontend.py
Kymatio, (C) 2018-present. The Kymatio developers.
"""
@classmethod
def _document(cls):
instantiation = cls._doc_instantiation_shape[cls._doc_has_shape]
param_shape = cls._doc_param_shape if cls._doc_has_shape else ''
attrs_shape = cls._doc_attrs_shape if cls._doc_has_shape else ''
param_average = cls._doc_param_average if cls._doc_has_out_type else ''
attr_average = cls._doc_attr_average if cls._doc_has_out_type else ''
param_vectorize = (cls._doc_param_vectorize if cls._doc_has_out_type else
'')
attr_vectorize = cls._doc_attr_vectorize if cls._doc_has_out_type else ''
cls.__doc__ = ScatteringBase1D._doc_class.format(
array=cls._doc_array,
frontend_paragraph=cls._doc_frontend_paragraph,
alias_name=cls._doc_alias_name,
alias_call=cls._doc_alias_call,
instantiation=instantiation,
param_shape=param_shape,
attrs_shape=attrs_shape,
param_average=param_average,
attr_average=attr_average,
param_vectorize=param_vectorize,
attr_vectorize=attr_vectorize,
sample=cls._doc_sample.format(shape=cls._doc_shape))
cls.scattering.__doc__ = ScatteringBase1D._doc_scattering.format(
array=cls._doc_array,
n=cls._doc_array_n)
class TimeFrequencyScatteringBase1D():
SUPPORTED_KWARGS = {'aligned', 'out_3D', 'sampling_filters_fr', 'analytic_fr',
'F_kind', 'max_pad_factor_fr', 'pad_mode_fr',
'normalize_fr', 'r_psi_fr', 'oversampling_fr',
'max_noncqt_fr', 'out_exclude', 'paths_exclude'}
DEFAULT_KWARGS = dict(
aligned=None, out_3D=False, sampling_filters_fr=('exclude', 'resample'),
analytic_fr=True, F_kind='average', max_pad_factor_fr=2,
pad_mode_fr='zero', normalize_fr='l1-energy',
r_psi_fr=math.sqrt(.5), oversampling_fr=0, max_noncqt_fr=None,
out_exclude=None, paths_exclude=None,
)
def __init__(self, J_fr=None, Q_fr=2, F=None, average_fr=False,
out_type='array', implementation=None, **kwargs):
self.J_fr = J_fr
self.Q_fr = Q_fr
self.F = F
self.average_fr = average_fr
self.out_type = out_type
self.implementation = implementation
self.kwargs = kwargs
def build(self):
"""Check args and instantiate `_FrequencyScatteringBase` object
(which builds filters).
Certain internal data, such as the amount of padding and the wavelet
filters to be used in the scattering transform, need to be computed
from the parameters given during construction. This function is called
automatically during object creation and no subsequent calls are
therefore needed.
"""
# if config yields no second order coeffs, we cannot do joint scattering
if self._no_second_order_filters:
raise ValueError("configuration yields no second-order filters; "
"try increasing `J`")
# handle `implementation` ############################################
# validate
if self.implementation is not None:
if len(self.kwargs) > 0:
raise ValueError("if `implementation` is passed, `**kwargs` must "
"be empty; got\n%s" % self.kwargs)
elif not (isinstance(self.implementation, int) and
self.implementation in range(1, 6)):
raise ValueError("`implementation` must be None, or an integer "
"1-5; got %s" % str(self.implementation))
# fill defaults
if len(self.kwargs) > 0:
I = fill_default_args(self.kwargs, self.default_kwargs,
copy_original=True)
else:
I = self.default_kwargs
# handle `None`s
if I['aligned'] is None:
not_recalibrate = bool(I['sampling_filters_fr'] not in
('recalibrate', ('recalibrate', 'recalibrate'))
)
I['aligned'] = bool(not_recalibrate and I['out_3D'])
# store for reference
self.kwargs_filled = deepcopy(I)
for name in TimeFrequencyScatteringBase1D.SUPPORTED_KWARGS:
setattr(self, name, I.pop(name))
# invalid arg check
if len(I) != 0:
raise ValueError("unknown kwargs:\n{}\nSupported are:\n{}".format(
I, TimeFrequencyScatteringBase1D.SUPPORTED_KWARGS))
# define presets
self._implementation_presets = {
1: dict(average_fr=False, aligned=False, out_3D=False,
sampling_filters_fr=('exclude', 'resample'),
out_type='array'),
2: dict(average_fr=True, aligned=True, out_3D=True,
sampling_filters_fr=('exclude', 'resample'),
out_type='array'),
3: dict(average_fr=True, aligned=True, out_3D=True,
sampling_filters_fr=('exclude', 'resample'),
out_type='dict:list'),
4: dict(average_fr=True, aligned=False, out_3D=True,
sampling_filters_fr=('exclude', 'recalibrate'),
out_type='array'),
5: dict(average_fr=True, aligned=False, out_3D=True,
sampling_filters_fr=('recalibrate', 'recalibrate'),
out_type='dict:list'),
}
# override defaults with presets
if isinstance(self.implementation, int):
for k, v in self._implementation_presets[self.implementation].items():
setattr(self, k, v)
######################################################################
# `out_structure`
if isinstance(self.implementation, int) and self.implementation in (3, 5):
self.out_structure = 3
else:
self.out_structure = None
# handle `out_exclude`
if self.out_exclude is not None:
if isinstance(self.out_exclude, str):
self.out_exclude = [self.out_exclude]
# ensure all names are valid
supported = ('S0', 'S1', 'phi_t * phi_f', 'phi_t * psi_f',
'psi_t * phi_f', 'psi_t * psi_f_up', 'psi_t * psi_f_dn')
for name in self.out_exclude:
if name not in supported:
raise ValueError(("'{}' is an invalid coefficient name; "
"must be one of: {}").format(
name, ', '.join(supported)))
# handle `F`
if self.F is None:
# default to one octave (Q wavelets per octave, J octaves,
# approx Q*J total frequency rows, so averaging scale is `Q/total`)
# F is processed further in `_FrequencyScatteringBase`
self.F = self.Q[0]
# handle `max_noncqt_fr`
if self.max_noncqt_fr is not None:
if not isinstance(self.max_noncqt_fr, (str, int)):
raise ValueError("`max_noncqt_fr` must be str, int, or None")
if self.max_noncqt_fr == 'Q':
self.max_noncqt_fr = self.Q[0] // 2
# frequential scattering object ######################################
self._N_frs = self.get_N_frs()
# number of psi1 filters
self._n_psi1_f = len(self.psi1_f)
max_order_fr = 1
self.scf = _FrequencyScatteringBase(
self._N_frs, self.J_fr, self.Q_fr, self.F, max_order_fr,
self.average_fr, self.aligned, self.oversampling_fr,
self.sampling_filters_fr, self.out_type, self.out_3D,
self.max_pad_factor_fr, self.pad_mode_fr, self.analytic_fr,
self.max_noncqt_fr, self.normalize_fr, self.F_kind, self.r_psi_fr,
self._n_psi1_f, self.backend)
self.finish_creating_filters()
self.handle_paths_exclude()
# detach __init__ args, instead access `scf`'s via `__getattr__` #####
# this is so that changes in attributes are reflected here
init_args = ('J_fr', 'Q_fr', 'F', 'average_fr', 'oversampling_fr',
'sampling_filters_fr', 'max_pad_factor_fr', 'pad_mode_fr',
'r_psi_fr', 'out_3D')
for init_arg in init_args:
delattr(self, init_arg)
# sanity warning #####################################################
try:
self.meta()
except:
warnings.warn(("Failed to build meta; the implementation may be "
"faulty. Try another configuration, or call "
"`jtfs.meta()` to debug."))
def get_N_frs(self):
"""This is equivalent to `len(x)` along frequency, which varies across
`psi2`, so we compute for each.
"""
def is_cqt_if_need_cqt(n1):
if self.max_noncqt_fr is None:
return True
return n_non_cqts[n1] <= self.max_noncqt_fr
n_non_cqts = np.cumsum([not p['is_cqt'] for p in self.psi1_f])
N_frs = []
for n2 in range(len(self.psi2_f)):
j2 = self.psi2_f[n2]['j']
max_freq_nrows = 0
if j2 != 0:
for n1 in range(len(self.psi1_f)):
if j2 > self.psi1_f[n1]['j'] and is_cqt_if_need_cqt(n1):
max_freq_nrows += 1
# add rows for `j2 >= j1` up to `nextpow2` of current number
# to not change frequential padding scales
# but account for `cqt_fr`
max_freq_nrows_at_2gt1 = max_freq_nrows
p2up_nrows = int(2**math.ceil(math.log2(max_freq_nrows_at_2gt1)))
for n1 in range(len(self.psi1_f)):
if (j2 == self.psi1_f[n1]['j'] and
max_freq_nrows < p2up_nrows and
is_cqt_if_need_cqt(n1)):
max_freq_nrows += 1
N_frs.append(max_freq_nrows)
return N_frs
def finish_creating_filters(self):
"""Handles necessary adjustments in time scattering filters unaccounted
for in default construction.
"""
# ensure phi is subsampled up to log2_T for `phi_t * psi_f` pairs
max_sub_phi = lambda: max(k for k in self.phi_f if isinstance(k, int))
while max_sub_phi() < self.log2_T:
self.phi_f[max_sub_phi() + 1] = periodize_filter_fourier(
self.phi_f[0], nperiods=2**(max_sub_phi() + 1))
# for early unpadding in joint scattering
# copy filters, assign to `0` trim (time's `subsample_equiv_due_to_pad`)
phi_f = {0: [v for k, v in self.phi_f.items() if isinstance(k, int)]}
# copy meta
for k, v in self.phi_f.items():
if not isinstance(k, int):
phi_f[k] = v
diff = min(max(self.J) - self.log2_T, self.J_pad - self.N_scale)
if diff > 0:
for trim_tm in range(1, diff + 1):
# subsample in Fourier <-> trim in time
phi_f[trim_tm] = [v[::2**trim_tm] for v in phi_f[0]]
self.phi_f = phi_f
# adjust padding
ind_start = {0: {k: v for k, v in self.ind_start.items()}}
ind_end = {0: {k: v for k, v in self.ind_end.items()}}
if diff > 0:
for trim_tm in range(1, diff + 1):
pad_left, pad_right = compute_padding(self.J_pad - trim_tm,
self.N)
start, end = compute_border_indices(
self.log2_T, self.J, pad_left, pad_left + self.N)
ind_start[trim_tm] = start
ind_end[trim_tm] = end
self.ind_start, self.ind_end = ind_start, ind_end
def meta(self):
"""Get meta information on the transform
Calls the static method `compute_meta_jtfs()` with the parameters of the
transform object.
Returns
------
meta : dictionary
See `help(wavespin.scattering1d.utils.compute_meta_jtfs)`.
"""
return compute_meta_jtfs(self.J_pad, self.J, self.Q, self.T, self.r_psi,
self.sigma0, self.average, self.average_global,
self.average_global_phi, self.oversampling,
self.out_exclude, self.paths_exclude, self.scf)
@property
def fr_attributes(self):
"""Exposes `scf`'s attributes via main object."""
return ('J_fr', 'Q_fr', 'N_frs', 'N_frs_max', 'N_frs_min',
'N_fr_scales_max', 'N_fr_scales_min', 'scale_diffs', 'psi_ids',
'J_pad_frs', 'J_pad_frs_max', 'J_pad_frs_max_init',
'average_fr', 'average_fr_global', 'aligned', 'oversampling_fr',
'F', 'log2_F', 'max_order_fr', 'max_pad_factor_fr', 'out_3D',
'sampling_filters_fr', 'sampling_psi_fr', 'sampling_phi_fr',
'phi_f_fr', 'psi1_f_fr_up', 'psi1_f_fr_dn')
@property
def default_kwargs(self):
return deepcopy(TimeFrequencyScatteringBase1D.DEFAULT_KWARGS)
def __getattr__(self, name):
# access key attributes via frequential class
# only called when default attribute lookup fails
# `hasattr` in case called from Scattering1D
if name in self.fr_attributes and hasattr(self, 'scf'):
return getattr(self.scf, name)
raise AttributeError(f"'{type(self).__name__}' object has no "
f"attribute '{name}'") # standard attribute error
def handle_paths_exclude(self):
"""
- Ensures `paths_exclude` is dict and doesn't have unsupported keys
- Ensures the provided n and j aren't out of bounds
- Handles negative indexing
- Handles `key: int` (expected `key: list[int]`)
- "Converts" from j to n (fills all 'n' that have the specified 'j')
"""
supported = {'n2', 'n1_fr', 'j2', 'j1_fr'}
if self.paths_exclude is None:
self.paths_exclude = {nm: [] for nm in supported}
return
elif not isinstance(self.paths_exclude, dict):
raise ValueError("`paths_exclude` must be dict, got %s" % type(
self.paths_exclude))
psis = {'n2': self.psi2_f, 'n1_fr': self.psi1_f_fr_up}
# fill what's missing as we can't change size of dict during iteration
for nm in supported:
if nm not in self.paths_exclude:
self.paths_exclude[nm] = []
# iterate n's first to avoid duplicate j2=0 warnings and appending
# to integer values (if user provided them)
for p_name in ('n2', 'n1_fr', 'j2', 'j1_fr'):
# ensure all keys are functional
assert p_name in supported, (p_name, supported)
# ensure list
if isinstance(self.paths_exclude[p_name], int):
self.paths_exclude[p_name] = [self.paths_exclude[p_name]]
else:
try:
self.paths_exclude[p_name] = list(self.paths_exclude[p_name])
except:
raise ValueError(("`paths_exclude` values must be list[int] "
"or int, got paths_exclude['{}'] type: {}"
).format(p_name,
type(self.paths_exclude[p_name])))
# n2, n1_fr ######################################################
if p_name[0] == 'n':
for i, n in enumerate(self.paths_exclude[p_name]):
# handle negative
if n < 0:
self.paths_exclude[p_name][i] = len(psis[p_name]) + n
# warn if 'n2' already excluded
if p_name == 'n2':
n = self.paths_exclude[p_name][i]
n_j2_0 = [n2 for n2 in range(len(self.psi2_f))
if self.psi2_f[n2]['j'] == 0]
if n in n_j2_0:
warnings.warn(
("`paths_exclude['n2']` includes `{}`, which "
"is already excluded (alongside {}) per "
"having j2==0."
).format(n, ', '.join(map(str, n_j2_0))))
# j2, j1_fr ######################################################
elif p_name[0] == 'j':
for i, j in enumerate(self.paths_exclude[p_name]):
# fetch all j
if p_name == 'j2':
j_all = {p['j'] for p in self.psi2_f}
elif p_name == 'j1_fr':
j_all = set(self.psi1_f_fr_up['j'][0])
# handle negative
if j < 0:
j = max(j_all) + j
# forbid out of bounds
if j > max(j_all):
raise ValueError(("`paths_exclude` exceeded maximum {}: "
"{} > {}\nTo specify max j, use `-1`"
).format(p_name, j, max(j_all)))
# warn if 'j2' already excluded
elif p_name == 'j2' and j == 0:
warnings.warn(("`paths_exclude['j2']` includes `0`, "
"which is already excluded."))
# convert to n ###########################################
# fetch all n that have the specified j
if p_name == 'j2':
n_j_all = [n2 for n2, p in enumerate(self.psi2_f)
if p['j'] == j]
elif p_name == 'j1_fr':
n_j_all = [n1_fr for n1_fr in
range(len(self.psi1_f_fr_up[0]))
if self.psi1_f_fr_up['j'][0][n1_fr] == j]
# append if not already present
n_name = 'n2' if p_name == 'j2' else 'n1_fr'
for n_j in n_j_all:
if n_j not in self.paths_exclude[n_name]:
self.paths_exclude[n_name].append(n_j)
# docs ###################################################################
@classmethod
def _document(cls):
cls.__doc__ = TimeFrequencyScatteringBase1D._doc_class.format(
frontend_paragraph=cls._doc_frontend_paragraph,
alias_call=cls._doc_alias_call,
parameters=cls._doc_params,
attributes=cls._doc_attrs,
sample=cls._doc_sample.format(shape=cls._doc_shape),
terminology=cls._terminology,
)
cls.scattering.__doc__ = (
TimeFrequencyScatteringBase1D._doc_scattering.format(
array=cls._doc_array,
n=cls._doc_array_n,
)
)
def output_size(self):
raise NotImplementedError("Not implemented for JTFS.")
def create_filters(self):
raise NotImplementedError("Implemented in `_FrequencyScatteringBase`.")
_doc_class = \
r"""
The 1D Joint Time-Frequency Scattering transform.
JTFS builds on time scattering by convolving first order coefficients
with joint 2D wavelets along time and frequency, increasing
discriminability while preserving time-shift invariance and time-warp
stability. Invariance to frequency transposition can be imposed via
frequential averaging, while preserving sensitivity to
frequency-dependent time shifts.
Joint wavelets are defined separably in time and frequency and permit fast
separable convolution. Convolutions are followed by complex modulus and
optionally averaging.
The JTFS of a 1D signal :math:`x(t)` may be written as
$S_J x = [S_J^{{(0)}} x, S_J^{{(1)}} x, S_J^{{(2)}} x]$
where
$S_{{J, J_{{fr}}}}^{{(0)}} x(t) = x \star \phi_T(t),$
$S_{{J, J_{{fr}}}}^{{(1)}} x(t, \lambda) =
|x \star \psi_\lambda^{{(1)}}| \star \phi_T,$ and
$S_{{J, J_{{fr}}}}^{{(2)}} x(t, \lambda, \mu, l, s) =
||x \star \psi_\lambda^{{(1)}}| \star \Psi_{{\mu, l, s}}|
\star \Phi_{{T, F}}.$
$\Psi_{{\mu, l, s}}$ comprises of five kinds of joint wavelets:
$\Psi_{{\mu, l, +1}}(t, \lambda) =
\psi_\mu^{{(2)}}(t) \psi_{{l, s}}(+\lambda)$
spin up bandpass
$\Psi_{{\mu, l, -1}}(t, \lambda) =
\psi_\mu^{{(2)}}(t) \psi_{{l, s}}(-\lambda)$
spin down bandpass
$\Psi_{{\mu, -\infty, 0}}(t, \lambda) =
\psi_\mu^{{(2)}}(t) \phi_F(\lambda)$
temporal bandpass, frequential lowpass
$\Psi_{{-\infty, l, 0}}(t, \lambda) =
\phi_T(t) \psi_{{l, s}}(\lambda)$
temporal lowpass, frequential bandpass
$\Psi_{{-\infty, -\infty, 0}}(t, \lambda)
= \phi_T(t) \phi_F(\lambda)$
joint lowpass
and $\Phi_{{T, F}}$ optionally does temporal and/or frequential averaging:
$\Phi_{{T, F}}(t, \lambda) = \phi_T(t) \phi_F(\lambda)$
Above, :math:`\star` denotes convolution in time and/or frequency. The
filters $\psi_\lambda^{{(1)}}(t)$ and $\psi_\mu^{{(2)}}(t)$ are analytic
wavelets with center frequencies $\lambda$ and $\mu$, while
$\phi_T(t)$ is a real lowpass filter centered at the zero frequency.
$\psi_{{l, s}}(+\lambda)$ is like $\psi_\lambda^{{(1)}}(t)$ but with
its own parameters (center frequency, support, etc), and an anti-analytic
complement (spin up is analytic).
Filters are built at initialization. While the wavelets are fixed, other
parameters may be changed after the object is created, such as `out_type`.
{frontend_paragraph}
Example
-------
::
# Set the parameters of the scattering transform.
J = 6
N = 2 ** 13
Q = 8
# Generate a sample signal.
x = {sample}
# Define a `TimeFrequencyScattering1D` object.
jtfs = TimeFrequencyScattering1D(J, N, Q)
# Calculate the scattering transform.
Sx = jtfs(x)
# Equivalently, use the alias.
Sx = S{alias_call}(x)
Above, the length of the signal is :math:`N = 2^{{13}} = 8192`, while the
maximum scale of the scattering transform is set to :math:`2^J = 2^6 =
64`. The time-frequency resolution of the first-order wavelets
:math:`\psi_\lambda^{{(1)}}(t)` is set to `Q = 8` wavelets per octave.
The second-order wavelets :math:`\psi_\mu^{{(2)}}(t)` have one wavelet
per octave by default, but can be set like `Q = (8, 2)`. Internally,
`J_fr` and `Q_fr`, the frequential variants of `J` and `Q`, are defaulted,
but can be specified as well.
For further description and visuals, refer to:
- https://dsp.stackexchange.com/a/78623/50076
- https://dsp.stackexchange.com/a/78625/50076
{parameters}
{attributes}
{terminology}
"""
_doc_params = \
r"""
Parameters
----------
J, shape, T, average, oversampling, pad_mode :
See `help(wavespin.scattering1d.Scattering1D)`.
Unlike in time scattering, `T` plays a role even if `average=False`,
to compute `phi_t` pairs.
J : int / tuple[int]
(Extended docs for JTFS)
Greater `J1` extends time-warp stability to lower frequencies, and
other desired properties, as greater portion of the transform is CQT
(fixed `xi` to `sigma` ratio and both exponentially spaced, as opposed
to fixed `sigma` and linearly spaced `xi`). The number of CQT rows
is approx `(J1 - 1)*Q1` (last octave is non-CQT), so the ratio of CQT
to non-CQT is `(J1 - 1)/J1`, which is greater if `J1` is greater.
Q : int / tuple[int]
`(Q1, Q2)`, where `Q2=1` if `Q` is int. `Q1` is the number of
first-order wavelets per octave, and `Q2` the second-order.
- `Q1`, together with `J`, determines `N_frs_max` and `N_frs`,
or length of inputs to frequential scattering.
- `Q2`, together with `J`, determines `N_frs` (via the `j2 >= j1`
criterion), and total number of joint slices.
- Greater `Q2` values better capture temporal AM modulations (AMM)
of multiple rates. Suited for inputs of multirate or intricate AM.
`Q2=2` is in close correspondence with the mamallian auditory
cortex: https://asa.scitation.org/doi/full/10.1121/1.1945807
2 or 1 should work for most purposes.
- Greater `Q` also corresponds to greater scale for all wavelets.
J_fr : int
Controls the maximum log-scale of frequential scattering in joint
scattering transform, and number of octaves of frequential filters.
There are approx. `Q_fr` wavelets per octave, and bandwidth halves
with each octave. Hence, largest scale wavelet is about `2**J_fr`
larger than smallest scale wavelet.
Default is determined at instantiation from longest frequential row
in frequential scattering, set to `log2(nextpow2(N_frs_max)) - 2`,
i.e. maximum possible minus 2, but no less than 3, and no more than
max.
Q_fr : int
Number of wavelets per octave for frequential scattering.
Greater values better capture quefrential variations of multiple rates
- that is, variations and structures along frequency axis of the
wavelet transform's 2D time-frequency plane. Suited for inputs of many
frequencies or intricate AM-FM variations. 2 or 1 should work for
most purposes.
F : int / str['global'] / None
Temporal support of frequential low-pass filter, controlling amount of
imposed frequency transposition invariance and maximum frequential
subsampling. Defaults to `Q1`, i.e. one octave.
- If `'global'`, sets to maximum possible `F` based on `N_frs_max`.
- Used even with `average_fr=False` (see its docs); this is likewise
true of `T` for `phi_t * phi_f` and `phi_t * psi_f` pairs.
average_fr : bool (default False)
Whether to average (lowpass) along frequency axis.
If `False`, `phi_t * phi_f` and `psi_t * phi_f` pairs are still
computed.
out_type : str, optional
Affects output format (but not how coefficients are computed).
See `help(TimeFrequencyScattering1D.scattering)` for further info.
- 'list': coeffs are packed in a list of dictionaries, each dict
storing meta info, and output tensor keyed by `'coef.`.
- 'array': concatenated along slices (`out_3D=True`) or mixed
slice-frequency dimension (`out_3D=False`). Both require
`average=True` (and `out_3D=True` additionally
`average_fr=True`).
- 'dict:list' || 'dict:array': same as 'array' and 'list', except
coefficients will not be concatenated across pairs - e.g.
tensors from `'S1'` will be kept separate from those from
`'phi_t * psi_f'`.
- See `out_3D` for all behavior controlled by `out_3D`, and
`aligned` for its behavior and interactions with `out_3D`.
kwargs : dict
Keyword arguments controlling advanced configurations.
See `help(TimeFrequencyScattering1D.SUPPORTED_KWARGS)`.
These args are documented below.
implementation : int / None / dict
Preset configuration to use. Overrides the following parameters:
- `average_fr, aligned, out_3D, sampling_filters_fr, out_type`
Defaults to `None`, and any `None` argument above will default to
that of `TimeFrequencyScatteringBase1D.DEFAULT_KWARGS`.
See `help(wavespin.toolkit.pack_coeffs_jtfs)` for further information.
**Implementations:**
1: Standard for 1D convs. `(n1_fr * n2 * n1, t)`.
- average_fr = False
- aligned = False
- out_3D = False
- sampling_psi_fr = 'exclude'
- sampling_phi_fr = 'resample'
2: Standard for 2D convs. `(n1_fr * n2, n1, t)`.
- average_fr = True
- aligned = True
- out_3D = True
- sampling_psi_fr = 'exclude'
- sampling_phi_fr = 'resample'
3: Standard for 3D/4D convs. `(n1_fr, n2, n1, t)`. [2] but
- out_structure = 3
4: Efficient for 2D convs. [2] but
- aligned = False
- sampling_phi_fr = 'recalibrate'
5: Efficient for 3D convs. [3] but
- aligned = False
- sampling_psi_fr = 'recalibrate'
- sampling_phi_fr = 'recalibrate'
`'exclude'` in `sampling_psi_fr` can be replaced with `'resample'`,
which yields significantly more coefficients and doens't lose
information (which `'exclude'` strives to minimize), but is slower
and the coefficients are mostly "synthetic zeros" and uninformative.
`out_structure` refers to packing output coefficients via
`pack_coeffs_jtfs(..., out_structure)`. This zero-pads and reshapes
coefficients, but does not affect their values or computation in any
way. (Thus, 3==2 except for shape). Requires `out_type` 'dict:list'
(default) or 'dict:array'; if 'dict:array' is passed, will use it
instead.
`5` also makes sense with `sampling_phi_fr = 'resample'` and small `F`
(small enough to let `J_pad_frs` drop below max), but the argument
will only set `'recalibrate'`.
aligned : bool / None
Defaults to True if `sampling_filters_fr != 'recalibrate'` and
`out_3D=True`.
If True, rows of joint slices index to same frequency for all slices.
E.g. `S_2[3][5]` and `S_2[4][5]` (fifth row of third and fourth joint
slices) correspond to same frequency. With `aligned=True`:
- `out_3D=True`: all slices are zero-padded to have same number of
rows. Earliest (low `n2`, i.e. high second-order freq) slices are
likely to be mostly zero per `psi2` convolving with minority of
first-order coefficients.
- `out_3D=False`: all slices are padded by minimal amount needed to
avert boundary effects.
- `average_fr=True`: number of output frequency rows will vary
across slices but be same *per `psi2_f`*.
- `average_fr=False`: number of rows will vary across and within
slices (`psi1_f_fr_up`-to-`psi1_f_fr_up`, and down).
For any config, `aligned=True` enforces same total frequential stride
for all slices, while `aligned=False` uses stride that maximizes
information richness and density.
See "Compute logic: stride, padding" in `core`, specifically
'recalibrate'
Note: `sampling_psi_fr = 'recalibrate'` breaks global alignment per
shifting `xi_frs`, but preserves it on per-`N_fr_scale` basis.
**Illustration**:
Intended usage is `aligned=True` && `sampling_filters_fr='resample'`
and `aligned=False` && `sampling_filters_fr='recalibrate'`. Below
example assumes these.
`x` == zero; `0, 4, ...` == indices of actual (nonpadded) data.
That is, `x` means the convolution kernel (wavelet or lowpass) is
centered in the padded region and contains less (or no) information,
whereas `4 ` centers at `input[4]`. And `input` is `U1`, so the
numbers are indexing `xi1` (i.e. are `n1`).
::
data -> padded
16 -> 128
64 -> 128
False:
[0, 4, 8, 16] # stride 4
[0, 16, 32, 48] # stride 16
True:
[0, x, x, x] # stride 16
[0, 16, 32, 48] # stride 16
`False` is more information rich, containing fewer `x`. Further,
with `out_3D=False`, it allows `stride_fr > log2_F`, making it more
information dense
(same info with fewer datapoints <=> non-oversampled).
In terms of unpadding with `out_3D=True`:
- `aligned=True`: we always have fr stride == `log2_F`, with
which we index `ind_start_fr_max` and `ind_end_fr_max`
(i.e. take maximum of all unpads across `n2` from this factor
and reuse it across all other `n2`).
- `aligned=False`: decided from `N_fr_scales_max` case, where we
compute `unpad_len_common_at_max_fr_stride`. Other `N_fr_scales`
use that quantity to compute `min_stride_to_unpad_like_max`.
See "Compute logic: stride, padding" in `core`, specifically
'recalibrate'.
The only exception is with `average_fr_global_phi and not average_fr`:
spinned pairs will have zero stride, but `phi_f` pairs will have max.
out_3D : bool (default False)
`True` (requires `average_fr=True`) adjusts frequential scattering
to enable concatenation along joint slices dimension, as opposed to
flattening (mixing slices and frequencies):
- `False` will unpad freq by exact amounts for each joint slice,
whereas `True` will unpad by minimum amount common to all
slices at a given subsampling factor to enable concatenation.
See `scf_compute_padding_fr()`.
- See `aligned` for its interactions with `out_3D` (also below).
Both `True` and `False` can still be concatenated into the 'true' JTFS
4D structure; see `help(wavespin.toolkit.pack_coeffs_jtfs)` for a
complete description. The difference is in how values are computed,
especially near boundaries. More importantly, `True` enforces
`aligned=True` on *per-`n2`* basis, enabling 3D convs even with
`aligned=False`.
`aligned` and `out_3D`
----------------------
From an information/classification standpoint,
- `True` is more information-rich. The 1D equivalent case is
unpadding by 3, instead of by 6 and then zero-padding by 3: same
final length, but former fills gaps with partial convolutions
where latter fills with zeros.
- `False` is the "latter" case.
We emphasize the above distinction. `out_3D=True` && `aligned=True`
imposes a large compute overhead by padding all `N_frs` maximally.
If a given `N_fr` is treated as a complete input, then unpadding
anything more than `N_fr/stride` includes convolutions from completely
outside of this input, which we never do elsewhere.
- However, we note, if `N_fr=20` for `n2=2` and `N_frs_max=100`,
what this really says is, we *expect* the 80 lowest frequency rows
to yield negligible energy after convolving with `psi2_f`.
That is, zeros (i.e. padding) are the *true* continuation of the
input (hence why 'conj-reflect-zero'), and hence, unpadding by
more than `N_fr/stride` is actually within bounds.
- Hence, unpadding by `N_fr/stride` and then re-padding (i.e.
treating `N_fr` as a complete input) is actually a distortion and
is incorrect.
Namely, the complete scattering, without any
shortcuts/optimizations on stride or padding, is consistent with
unpadding `> N_fr/stride`.
At the same time, depending on our feature goals, especially if
slices are processed independently, such distortion might be
preferable to avoid air-packing (see "Illustration" in `aligned`).
- The described re-padding happens with `aligned=True` &&
`out_3D=False` packed into a 3D/4D tensor; even without
re-padding, this config tosses out valid outputs
(unpads to `N_fr/stride`), though less informative ones.
sampling_filters_fr : str / tuple[str]
Controls filter properties for frequential input lengths (`N_frs`)
below maximum.
- 'resample': preserve physical dimensionality
(center frequeny, width) at every length (trimming in time
domain).
E.g. `psi = psi_fn(N/2) == psi_fn(N)[N/4:-N/4]`.
- 'recalibrate': recalibrate filters to each length.
- widths (in time): widest filter is halved in width, narrowest is
kept unchanged, and other widths are re-distributed from the
new minimum to same maximum.
- center frequencies: all redistribute between new min and max.
New min is set as `2 / new_length`
(old min was `2 / max_length`).
New max is set by halving distance between old max and 0.5
(greatest possible), e.g. 0.44 -> 0.47, then 0.47 -> 0.485, etc.
- 'exclude': same as 'resample' except filters wider than
`widest / 2` are excluded. (and `widest / 4` for next
`N_fr_scales`, etc).
Tuple can set separately `(sampling_psi_fr, sampling_phi_fr)`, else
both set to same value.
From an information/classification standpoint:
- 'resample' enforces freq invariance imposed by `phi_f_fr` and
physical scale of extracted modulations by `psi1_f_fr_up`
(& down). This is consistent with scattering theory and is the
standard used in existing applied literature.
- 'recalibrate' remedies a problem with 'resample'. 'resample'
calibrates all filters relative to longest input; when the
shortest input is very different in comparison, it makes most
filters appear lowpass-ish. In contrast, recalibration enables
better exploitation of fine structure over the smaller interval
(which is the main motivation behind wavelets,
a "multi-scale zoom".)
- 'exclude' circumvents the problem by simply excluding wide
filters. 'exclude' is simply a subset of 'resample', preserving
all center frequencies and widths - a 3D/4D coefficient packing
will zero-pad to compensate
(see `help(wavespin.toolkit.pack_coeffs_jtfs)`).
Note: `sampling_phi_fr = 'exclude'` will re-set to `'resample'`, as
`'exclude'` isn't a valid option (there must exist a lowpass for every
fr input length).
analytic_fr : bool (default True)
If True, will enforce strict analyticity/anti-analyticity:
- zero negative frequencies for temporal and spin up bandpasses
- zero positive frequencies for spin down bandpasses
- halve the Nyquist bin for both spins
`True` improves FDTS-discriminability, especially for
`r_psi > sqrt(.5)`, but may slightly worsen wavelet time decay.
F_kind : str['average', 'decimate']
Kind of lowpass filter to use for spinned coefficients:
- 'average': Gaussian, standard for scattering. Imposes time-shift
invariance.
- 'decimate': Hamming-windowed sinc (~brickwall in freq domain).
Decimates coefficients: used for unaliased downsampling,
without imposing invariance.
- Preserves more information along frequency than 'average'.
- Ignores padding specifications and pads its own way
(future TODO)
- Corrects negative outputs via absolute value; the negatives
are possible since the kernel contains negatives, but are in
minority and are small in magnitude.
Does not interact with other parameters in any way - that is, won't
affect stride, padding, etc - only changes the lowpass filter for
spinned pairs. `phi_f` pairs will still use Gaussian, and `phi_f_fr`
remains Gaussian but is used only for `phi_f` pairs. Has no effect
with `average_fr=False`.
'decimate' is an experimental but tested feature:
- 'torch' backend:
- will assume GPU use and move built filters to GPU
- lacks `register_filters` support, so filters are invisible
to `nn.Module`
- filters are built dynamically, on per-requested basis. The first
run is slower than the rest as a result
- `oversampling_fr != 0` is not supported
- is differentiable
Info preservation
-----------------
'decimate'
- 1) Increases amount of information preserved.
- Its cutoff spills over the alias threshold, and there's
notable amount of aliasing (subject to future improvement).
- Its main lobe is narrower than Gauss's, hence better
preserving component separation along frequency, at expense
of longer tails.
- Limited reconstruction experiments did not reveal a definitive
advantage over Gaussian: either won depending on transform and
optimizer configurations. Further study is required.
- 2) Reduces distortion of preserved information.
- The Gaussian changes relative scalings of bins, progressively
attenuating higher frequencies, whereas windowed sinc is ~flat
in frequency until reaching cutoff (i.e. it copies input's
spectrum). As a result, Gaussian blurs, while sinc faithfully
captures the original.
- At the same time, sinc increases distortion per aliasing, but
the net effect is a benefit.
- 3) Increases distortion of preserved information.
- Due to notable aliasing. Amount of energy aliased is ~1/110 of
total energy, while for Kymatio's Gaussian, it's <1/1000000.
- Due to the time-domain kernel having negatives, which
sometimes outputs negatives for a non-negative input,
requiring correction.
- 2) benefits much more than 3) harms
2) is the main advantage and is the main motivation for 'decimate': we
want a smaller unaveraged output, that resembles the full original.
max_pad_factor_fr : int / None (default) / list[int], optional
`max_pad_factor` for frequential axis in frequential scattering.
- None: unrestricted; will pad as much as needed.
- list[int]: controls max padding for each `N_fr_scales`
separately, in reverse order (max to min).
- Values may not be such that they yield increasing
`J_pad_frs`
- If the list is insufficiently long (less than number of
scales), will extend list with the last provided value
(e.g. `[1, 2] -> [1, 2, 2, 2]`).
- Indexed by `scale_diff == N_fr_scales_max - N_fr_scales`
- int: will convert to list[int] of same value.
Specified values aren't guaranteed to be realized. They override some
padding values, but are overridden by others.
Overrides:
- Padding that lessens boundary effects and wavelet distortion
(`min_to_pad`).
Overridden by:
- `J_pad_frs_min_limit_due_to_phi`
- `J_pad_frs_min_limit_due_to_psi`
- Will not allow any `J_pad_fr > J_pad_frs_max_init`
- With `sampling_psi_fr = 'resample'`, will not allow `J_pad_fr`
that yields a pure sinusoid wavelet (raises `ValueError` in
`filter_bank.get_normalizing_factor`).
A limitation of `None` with`analytic=True` is,
`compute_minimum_support_to_pad` does not account for it.
pad_mode_fr : str['zero', 'conj-reflect-zero'] / function
Name of frequential padding mode to use, one of: 'zero',
'conj-reflect-zero'.
Or, function with signature `pad_fn_fr(x, pad_fr, scf, B)`;
see `_right_pad` in
`wavespin.scattering1d.core.timefrequency_scattering1d`.
If using `pad_mode = 'reflect'` and `average = True`, reflected
portions will be automatically conjugated before frequential
scattering to avoid spin cancellation. For same reason, there isn't
`pad_mode_fr = 'reflect'`.
'zero' is default only because it's faster; in general, if
`J_fr >= log2(N_frs_max) - 3`, 'conj-reflect-zero' should be
preferred.
See https://github.com/kymatio/kymatio/discussions/
752#discussioncomment-864234
Also, note that docs and comments tend to mention only `J, J_fr` and
`T, F`, but `Q, Q_fr` also significantly affect max scale: higher ->
greater max scale.
normalize_fr : str
See `normalize` in `help(wavespin.scattering1d.Scattering1D)`.
Applies to `psi1_f_fr_up`, `psi1_f_fr_dn`, `phi_f_fr`.
r_psi_fr : float
See `r_psi` in `help(wavespin.scattering1d.Scattering1D)`.
See `help(wavespin.scattering1d.utils.calibrate_scattering_filters)`.
oversampling_fr : int (default 0)
How much to oversample along frequency axis (with respect to
`2**J_fr`).
Also see `oversampling` in `Scattering1D`.
Has no effect if `average_fr_global=True`.
max_noncqt_fr : int / None / str['Q']
Maximum non-CQT rows (`U1` vectors) to include in frequential
scattering, i.e. rows derived from `not psi1_f[n1]['is_cqt']`.
- `0` means CQT-only; `3` means *up to* 3 rows (rather than
*at least*)
for any given `N_fr` (see `N_frs`).
- `None` means all non-CQT are permitted
- `'Q'` means up to `Q1//2` non-CQT are permitted
Non-CQT rows are sub-ideal for frequential scattering, as they violate
the basic assumption of convolution that the input is uniformly
spaced.
CQT rows are uniformly spaced in log-space, non-CQT in linear space,
so the two aren't directly compatible and form a discontinuity
boundary.
- This lowers FDTS discriminability, albeit not considerably.
- It also affects frequency transposition invariance and time-warp
stability, as a shift in log space is a shift by different amount
in linear (& fixed wavelet bandwidth) space. The extent is again
acceptable.
- At the same time, excluding such rows loses information.
- `max_noncqt_fr` can control this tradeoff, but in general, `None`
(the default) is recommended.
- Higher `J` (namely `J1`) increases the CQT portion (see `J`),
mediating aforementioned effects.
out_exclude : list/tuple[str] / None
Will exclude coefficients with these names from computation and output
(except for `S1`, which always computes but still excludes from
output).
All names (JTFS pairs, except 'S0', 'S1'):
- 'S0', 'S1', 'phi_t * phi_f', 'phi_t * psi_f', 'psi_t * phi_f',
'psi_t * psi_f_up', 'psi_t * psi_f_dn'
paths_exclude : dict[str: list[int]] / dict[str: int] / None
Will exclude coefficients with these paths from computation and
output.
Supported keys: 'n2', 'n1_fr', 'j2', 'j1_fr'. E.g.:
- {'n2': [2, 3, 5], 'n1_fr': [0, -1]}
- {'j2': [1], 'j1_fr': [3, 1]}
- {'n2': [0, 1], 'j2': [-1]}
Excluding `j2==1` paths yields greatest speedup, and is recommended
in compute-restricted settings, as they're the lowest energy paths
(i.e. generally least informative).
`dict[str: int]` will convert to `dict[str: list[int]`.
"""
_doc_attrs = \
r"""
Attributes
----------
scf : `_FrequencyScatteringBase`
Frequential scattering object, storing pertinent attributes and
filters. Temporal scattering's attributes are accessed directly via
`self`.
"scf" abbreviates "scattering frequency" (i.e. frequential
scattering).
N_frs : list[int]
List of lengths of frequential columns (i.e. numbers of frequential
rows) in joint scattering, indexed by `n2` (second-order temporal
wavelet idx).
E.g. `N_frs[3]==52` means 52 highest-frequency vectors from
first-order time scattering are fed to `psi2_f[3]` (effectively, a
multi-input network).
N_frs_max : int
`== max(N_frs)`.
N_frs_min : int
`== min(N_frs_realized)`
N_frs_realized: list[int]
`N_frs` without `0`s.
Unaffected by `paths_exclude` to allow `paths_exclude` to be
dynamically configurable.
N_frs_max_all : int
`== _n_psi1_f`. Used to compute `_J_pad_frs_fo` (unused quantity),
and `n_zeros` in `_pad_conj_reflect_zero` (`core/timefreq...`).
N_fr_scales : list[int]
`== nextpow2(N_frs)`. Filters are calibrated relative to these
(for 'exclude' & 'recalibrate' `sampling_psi_fr`).
N_fr_scales_max : int
`== max(N_fr_scales)`. Used to set `J_pad_frs_max` and
`J_pad_frs_max_init`.
- `J_fr` default is set using this value, and `J_fr` cannot
exceed it. If `F == 2**J_fr`, then `average_fr_global=True`.
- Used in `compute_J_pad_fr()` and `psi_fr_factory()`.
N_fr_scales_min : int
`== min(N_fr_scales)`.
Used in `scf._compute_J_pad_frs_min_limit_due_to_psi`.
N_fr_scales_unique : list[int]
`N_fr_scales` without duplicate entries.
scale_diffs : list[int]
`scale_diff == N_fr_scales_max - N_fr_scale`.
0-indexed surrogate for `N_fr_scale`, indexing multi-length logic
for building filterbanks and computing stride and padding.
scale_diffs_unique : list[int]
`scale_diffs` without duplicate entries.
scale_diff_max_recalibrate : int / None
Max permitted `scale_diff`, per `sampling_psi_fr='recalibrate'`
and `sigma_max_to_min_max_ratio`. Build terminates to avoid filters
more time-localized than the most time-localized original wavelet
(max sigma in freq domain), within set tolerance, as a quality check.
total_conv_stride_over_U1s : dict[int: list[int]]
Stores total strides for frequential scattering (`psi_f` pairs,
followed by `phi_f_fr`):
{scale_diff: [stride0, stride1, ...]} # list indexed by `n1_fr`
`J_pad_frs` is built to accomodate stride.
See `help(scf.compute_stride_fr)`.
See "Compute logic: stride, padding" in
`core.timefrequency_scattering1d`.
`over_U1` seeks to emphasize that it is the stride over first order
coefficients.
total_conv_stride_over_U1s_phi : dict[int: int]
Stores strides for frequential scattering (`phi_f` pairs):
{scale_diff: stride}
Derives from `total_conv_stride_over_U1s`, differently depending on
`average_fr`, `aligned`, and `sampling_phi_fr`.
See "Stride, padding: `phi_f` pairs" in
`core.timefrequency_scattering1d`.
n1_fr_subsamples : dict[str: dict[int: list[int]]]
Stores strides for frequential scattering (`psi_f` pairs).
Accounts for both `j1_fr` and `log2_F_phi`, so subsampling won't alias
the lowpass.
{'spinned: {scale_diff: [...]},
'phi': {scale_diff: [...]}}
See `scf._compute_scale_and_stride_logic`.
log2_F_phis : dict[str: dict[int: list[int]]]
`log2_F`-equivalent - that is, maximum permitted subsampling, and
dyadic scale of invariance - of lowpass filters used for a given
pair, `N_fr_scale`, and `n1_fr`:
{'spinned: {scale_diff: [...]},
'phi': {scale_diff: [...]}}
Equals `log2_F` everywhere with `sampling_phi_fr='resample'`.
Is `None` for 'spinned' if
`not (average_fr and not average_fr_global)`
(since convolution with lowpass isn't used).
log2_F_phi_diffs : dict[str: dict[int: list[int]]]
`== log2_F - log2_F_phi`. See `log2_F_phis`.
unpad_len_common_at_max_fr_stride : int
Unpad length at `N_fr_scales_max`, with whatever frequential stride
happens to be there. Used when `out_3D=True` && `aligned=False` to set
unpad length for other `N_fr_scales`, via
`min_stride_to_unpad_like_max`.
See "Compute logic: stride, padding" in
`core.timefrequency_scattering1d`, specifically 'recalibrate'
phi_f_fr : dict[int: dict[int: list[tensor[float]]],
str: dict[int: dict[int: list[int]], float]]
Contains the frequential lowpass filter at all resolutions.
See `help(wavespin.scattering1d.filter_bank.phi_fr_factory)`.
psi1_f_fr_up : dict[int: list[tensor[float]],
str: dict[int: list[int/float]]]
List of dictionaries containing all frequential scattering filters
with "up" spin.
See `help(wavespin.scattering1d.filter_bank.psi_fr_factory)`.
psi1_f_fr_dn : dict[int: list[tensor[float]],
str: dict[int: list[int/float]]]
`psi1_f_fr_up`, but with "down" spin, forming a complementary pair.
psi_ids : dict[int: int]
See `help(wavespin.scattering1d.filter_bank_jtfs.psi_fr_factory)`.
psi_fr_params : dict[int:dict[str:list]]
Parameters used to build filterbanks for frequential scattering.
See `help(scf._compute_psi_fr_params)` and
`help(wavespin.scattering1d.filter_bank_jtfs.psi_fr_factory)`.
average_fr_global_phi : bool
True if `F == nextpow2(N_frs_max)`, i.e. `F` is maximum possible
and equivalent to global averaging, in which case lowpassing is
replaced by simple arithmetic mean.
If True, `sampling_phi_fr` has no effect.
In case of `average_fr==False`, controls scattering logic for
`phi_f` pairs.
average_fr_global : bool
True if `average_fr_global_phi and average_fr`. Same as
`average_fr_global_phi` if `average_fr==True`.
- In case of `average_fr==False`, controls scattering logic for
`psi_f` pairs.
- If `True`, `phi_fr` filters are never used (but are still
created).
- Results are very close to lowpassing w/ `F == 2**N_fr_scales_max`.
Unlike with such lowpassing, `psi_fr` filters are allowed to be
created at lower `J_pad_fr` than shortest `phi_fr` (which also is
where greatest deviation with `not average_fr_global` occurs).
log2_F : int
Equal to `log2(prevpow2(F))`; is the maximum frequential subsampling
factor if `average_fr=True` (otherwise that factor is up to `J_fr`).
J_pad_frs : list[int]
log2 of padding lengths of frequential columns in joint scattering
(column lengths given by `N_frs`). See `scf.compute_padding_fr()`.
J_pad_frs_max_init : int
Set as reference for computing other `J_pad_fr`.
Serves to create the initial frequential filterbank, and equates to
`J_pad_frs_max` with `sampling_psi_fr='resample'` &&
`sampling_phi_fr='resample'`. Namely, it is the maximum padding under
"standard" frequential scattering configurations.
J_pad_frs_max : int
`== max(J_pad_frs)`.
J_pad_frs_min : int
`== min(J_pad_frs)` (excluding -1).
J_pad_frs_min_limit_due_to_psi: int / None
Controls minimal padding.
Prevents severe filterbank distortions due to insufficient padding.
See docs for `_compute_J_pad_frs_min_limit_due_to_psi`,
in `filter_bank_jtfs.py`.
_J_pad_fr_fo : int
Padding for the `phi_t` pairs. Used only in edge case testing,
and to warn of an edge case handling in `core`.
`phi_t` pairs reuse spinned pairs' largest padding, yet the `N_fr` of
`phi_t` pairs is always greater than or equal to that of spinned's,
which at times otherwise yields greater padding.
This is done to simplify implementation, with minimal or negligible
effect on `phi_t` pairs.
`core` edge case: `max_pad_factor_fr=0` with
`N_fr_scales_max < N_fr_scale_fo` means the padded length will be
less than `_n_psi1_f`. Accounting for this requires changing
`J_pad_frs_max_init`, yet `compute_padding_fr` doesn't reuse
`J_pad_frs_max_init`, hence accounting for this is complicated and
unworthwhile. Instead, will only include up to `2**N_fr_scales_max`
rows from `U1`.
min_to_pad_fr_max : int
`min_to_pad` from `compute_minimum_support_to_pad(N=N_frs_max)`.
Used in computing `J_pad_fr`. See `scf.compute_J_pad_fr()`.
unrestricted_pad_fr : bool
`True` if `max_pad_factor is None`. Affects padding computation and
filter creation:
- `phi_f_fr` w/ `sampling_phi_fr=='resample'`:
- `True`: will limit the shortest `phi_f_fr` to avoid distorting
its time-domain shape
- `False`: will compute `phi_f_fr` at every `J_pad_fr`
- `psi_f_fr` w/ `sampling_psi_fr=='resample'`: same as for phi
subsample_equiv_relative_to_max_pad_init : int
Amount of *equivalent subsampling* of frequential padding relative to
`J_pad_frs_max_init`, indexed by `n2`.
See `help(scf.compute_padding_fr())`.
scale_diff_max_to_build: int / None
Largest `scale_diff` (smallest `N_fr_scale`) for which a filterbank
will be built; lesser `N_fr_scales` will reuse it. Used alongside
other attributes to control said building, also as an additional
sanity and clarity layer.
Prevents severe filterbank distortions due to insufficient padding.
- Affected by `sampling_psi_fr`, padding, and filterbank param
choices.
See docs for `_compute_J_pad_frs_min_limit_due_to_psi`,
in `filter_bank_jtfs.py`.
- With 'recalibrate', `scale_diff_max_to_build=None` if build didn't
terminate per `sigma_max_to_min_max_ratio`.
sigma_max_to_min_max_ratio : float >= 1
Largest permitted `max(sigma) / min(sigma)`. Used with 'recalibrate'
`sampling_psi_fr` to restrict how large the smallest sigma can get.
Worst cases (high `subsample_equiv_due_to_pad`):
- A value of `< 1` means a lower center frequency will have
the narrowest temporal width, which is undesired.
- A value of `1` means all center frequencies will have the same
temporal width, which is undesired.
- The `1.2` default was chosen arbitrarily as a seemingly good
compromise between not overly restricting sigma and closeness to
`1`.
_n_phi_f_fr : int
`== len(phi_f_fr)`.
Used for setting `max_subsample_equiv_before_phi_fr`.
pad_left_fr : int
Amount of padding to left of frequential columns
(or top of joint matrix). Unused in implementation; can be used
by user if `pad_mode` is a function.
pad_right_fr : int
Amount of padding to right of frequential columns
(or bottom of joint matrix).
ind_start_fr : list[list[int]]
Frequential unpadding start index, indexed by `n2` (`N_fr`) and
stride:
`ind_start_fr[n2][stride]`
See `help(scf.compute_padding_fr)` and `scf.compute_unpadding_fr`.
ind_end_fr : list[list[int]]
Frequential unpadding end index. See `ind_start_fr`.
ind_start_fr_max : list[int]
Frequential unpadding start index common to all `N_frs` for
`out_3D=True`, determined from `N_frs_max` case, indexed by stride:
`ind_start_fr_max[stride]`
See `ind_start_fr`.
ind_end_fr_max : list[int]
Frequential unpadding end index common to all `N_frs` for
`out_3D=True`.
See `ind_start_fr_max`.
r_psi : tuple[float]
Temporal redundancy, first- and second-order.
r_psi_fr : float
Frequential redundancy.
max_order_fr : int == 1
Frequential scattering's `max_order`. Unused.
"""
_terminology = \
r"""
Terminoloy
----------
FDTS :
Frequency-Dependent Time Shift. JTFS's main purpose is to detect
these. Up spin wavelet resonates with up chirp (rising; right-shifts
with increasing freq), down spin with down chirp (left-shifts with
increasing freq).
In convolution (cross-correlation with flipped kernel), the roles are
reversed; the implementation will yield high values for up chirp
from down spin.
Frequency transposition :
i.e. frequency shift, except in context of wavelet transform (hence
scattering) it means log-frequency shift.
n1_fr_subsample, n2 : int, int
See `help(wavespin.scattering1d.core.timefrequency_scattering1d)`.
Not attributes. Summary:
- n1_fr_subsample: subsampling done after convolving with `psi_fr`
- n2: index of temporal wavelet in joint scattering, like
`psi2[n2]`.
"""
_doc_scattering = \
"""
Apply the Joint Time-Frequency Scattering transform.
Given an input `{array}` of size `(B, N)`, where `B` is the batch size
and `N` is the length of the individual signals, computes its JTFS.
Output format is specified by `out_type`: a list, array, tuple, or
dictionary of lists or arrays with keys specifying coefficient names as
follows:
::
{{'S0': ..., # (time) zeroth order
'S1': ..., # (time) first order
'phi_t * phi_f': ..., # (joint) joint lowpass
'phi_t * psi_f': ..., # (joint) time lowpass (w/ freq bandpass)
'psi_t * phi_f': ..., # (joint) freq lowpass (w/ time bandpass)
'psi_t * psi_f_up': ..., # (joint) spin up
'psi_t * psi_f_dn': ..., # (joint) spin down
}}
Coefficient structure depends on `average, average_fr, aligned, out_3D`,
and `sampling_filters_fr`. See `help(wavespin.toolkit.pack_coeffs_jtfs)`
for a complete description.
Parameters
----------
x : {array}
An input `{array}` of size `(B, N)` or `(N,)`.
Returns
-------
S : dict[tensor/list] / tensor/list / tuple of former two
See above.
"""
__all__ = ['ScatteringBase1D', 'TimeFrequencyScatteringBase1D']
| [
"math.sqrt",
"math.log2",
"numpy.isnan",
"copy.deepcopy",
"warnings.warn",
"numpy.cumsum"
] | [((1377, 1391), 'math.sqrt', 'math.sqrt', (['(0.5)'], {}), '(0.5)\n', (1386, 1391), False, 'import math\n'), ((31725, 31736), 'copy.deepcopy', 'deepcopy', (['I'], {}), '(I)\n', (31733, 31736), False, 'from copy import deepcopy\n'), ((36594, 36645), 'numpy.cumsum', 'np.cumsum', (["[(not p['is_cqt']) for p in self.psi1_f]"], {}), "([(not p['is_cqt']) for p in self.psi1_f])\n", (36603, 36645), True, 'import numpy as np\n'), ((40685, 40739), 'copy.deepcopy', 'deepcopy', (['TimeFrequencyScatteringBase1D.DEFAULT_KWARGS'], {}), '(TimeFrequencyScatteringBase1D.DEFAULT_KWARGS)\n', (40693, 40739), False, 'from copy import deepcopy\n'), ((3171, 3188), 'math.log2', 'math.log2', (['self.N'], {}), '(self.N)\n', (3180, 3188), False, 'import math\n'), ((5411, 5428), 'math.log2', 'math.log2', (['self.T'], {}), '(self.T)\n', (5420, 5428), False, 'import math\n'), ((6103, 6137), 'math.log2', 'math.log2', (['(self.N + 2 * min_to_pad)'], {}), '(self.N + 2 * min_to_pad)\n', (6112, 6137), False, 'import math\n'), ((29339, 29353), 'math.sqrt', 'math.sqrt', (['(0.5)'], {}), '(0.5)\n', (29348, 29353), False, 'import math\n'), ((7251, 7277), 'numpy.isnan', 'np.isnan', (["meta['n'][-1][1]"], {}), "(meta['n'][-1][1])\n", (7259, 7277), True, 'import numpy as np\n'), ((36058, 36199), 'warnings.warn', 'warnings.warn', (['"""Failed to build meta; the implementation may be faulty. Try another configuration, or call `jtfs.meta()` to debug."""'], {}), "(\n 'Failed to build meta; the implementation may be faulty. Try another configuration, or call `jtfs.meta()` to debug.'\n )\n", (36071, 36199), False, 'import warnings\n'), ((37254, 37287), 'math.log2', 'math.log2', (['max_freq_nrows_at_2gt1'], {}), '(max_freq_nrows_at_2gt1)\n', (37263, 37287), False, 'import math\n'), ((45020, 45099), 'warnings.warn', 'warnings.warn', (['"""`paths_exclude[\'j2\']` includes `0`, which is already excluded."""'], {}), '("`paths_exclude[\'j2\']` includes `0`, which is already excluded.")\n', (45033, 45099), False, 'import warnings\n')] |
# -*- coding: utf-8 -*-
import unittest
from .. import distributions
from scipy.integrate import quad
import numpy as np
import scipy.stats
class test_distributions(unittest.TestCase):
def _check_pdfintegral(self, integral, integrale, theory):
integrale = max(integrale, 1e-5)
limits = integral + integrale * np.array([-1, 1])
self.assertTrue(limits[0] <= theory <= limits[1])
def quad(self, func, a, b):
return quad(func, a, b)
if b >= a:
return quad(func, a, b)
else:
return 0.0, 1e-16
def func(self, func, args=None):
if args:
return lambda x: func(x, *args)
else:
return func
def checkpdf(self, rv, qmin, qmax, xmin, xmax, xn, args=None):
# self.plot(rv,qmin,qmax,xmin,xmax,xn,args=args)
# return
pdf = self.func(rv.pdf, args=args)
cdf = self.func(rv.cdf, args=args)
ppf = self.func(rv.ppf, args=args)
integral, integrale = self.quad(pdf, qmin, qmax)
self._check_pdfintegral(integral, integrale, 1)
x = np.linspace(xmin, xmax, xn)
np.allclose(ppf(cdf(x)), x)
p = np.linspace(0, 1, xn)
# TODO: check RuntimeWarning
np.allclose(cdf(ppf(x)), x)
for x in np.linspace(xmin, xmax, xn):
integral, integrale = self.quad(pdf, qmin, x)
self._check_pdfintegral(integral, integrale, cdf(x))
def plot(self, rv, qmin, qmax, xmin, xmax, xn, args=None):
import matplotlib.pyplot as plt
pdf = self.func(rv.pdf, args=args)
cdf = self.func(rv.cdf, args=args)
ppf = self.func(rv.ppf, args=args)
x = np.linspace(xmin, xmax, xn)
plt.plot(x, pdf(x), "o-")
plt.plot(x, cdf(x), "o-")
plt.plot(x, [self.quad(pdf, qmin, xi)[0] for xi in x], "o-")
plt.show()
def test_pdf(self):
qmin, qmax = -np.inf, np.inf
xmin, xmax, xn = -10, 10, 50
rv = scipy.stats.norm
self.checkpdf(rv, qmin, qmax, xmin, xmax, xn)
# k = 1
# qmin,qmax = -k,k
# xmin,xmax,xn = -10,10,50
# rv = scipy.stats.truncnorm(a=qmin,b=qmax)
# self.checkpdf(rv,qmin,qmax,xmin,xmax,xn)
k = 1
qmin, qmax = -k, k
xmin, xmax, xn = -10, 10, 50
rv = distributions.limitednorm(k)
self.checkpdf(rv, qmin, qmax, xmin, xmax, xn)
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.TestSuite()
testSuite.addTest(test_distributions("test_pdf"))
return testSuite
if __name__ == "__main__":
import sys
mysuite = test_suite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
| [
"unittest.TestSuite",
"scipy.integrate.quad",
"numpy.array",
"numpy.linspace",
"sys.exit",
"unittest.TextTestRunner",
"matplotlib.pyplot.show"
] | [((2506, 2526), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (2524, 2526), False, 'import unittest\n'), ((2687, 2712), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (2710, 2712), False, 'import unittest\n'), ((458, 474), 'scipy.integrate.quad', 'quad', (['func', 'a', 'b'], {}), '(func, a, b)\n', (462, 474), False, 'from scipy.integrate import quad\n'), ((1110, 1137), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'xn'], {}), '(xmin, xmax, xn)\n', (1121, 1137), True, 'import numpy as np\n'), ((1187, 1208), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'xn'], {}), '(0, 1, xn)\n', (1198, 1208), True, 'import numpy as np\n'), ((1300, 1327), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'xn'], {}), '(xmin, xmax, xn)\n', (1311, 1327), True, 'import numpy as np\n'), ((1699, 1726), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'xn'], {}), '(xmin, xmax, xn)\n', (1710, 1726), True, 'import numpy as np\n'), ((1872, 1882), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1880, 1882), True, 'import matplotlib.pyplot as plt\n'), ((2769, 2780), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2777, 2780), False, 'import sys\n'), ((513, 529), 'scipy.integrate.quad', 'quad', (['func', 'a', 'b'], {}), '(func, a, b)\n', (517, 529), False, 'from scipy.integrate import quad\n'), ((334, 351), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (342, 351), True, 'import numpy as np\n')] |
#Exercício: Ler uma imagem, converter em escala de cinza,
# utilizando a seguinte fórmula:
# Gr = R*0,25 + G*0,65 + B*0,1
#ler o seu histograma e verificar qual o nível de cor
# possui maior intensidade na imagem. Imprima o histograma.
# A partir da imagem do histograma, identifique um limiar
# para fazer a limiarização desta imagem. Imprima a imagem
# resultante.
import cv2
import numpy as np
from matplotlib import pyplot as plt
path = "arcoiris.jpg"
img = cv2.imread(path)
h, w = img.shape[:2]
cv2.imshow("IMG Original ", img)
limiar = 150
imgcinza = np.zeros((h,w), np.uint8)
imgcinza = (img[...,0]*0.1 + img[..., 1]*0.65 + img[...,2]*0.25).astype('uint8')
cv2.imshow("Escala de Cinza ", imgcinza)
histograma = cv2.calcHist([imgcinza], [0], None, [256], [0,256])
max = np.argmax(histograma)
print(max)
th, res = cv2.threshold(img[...,1],220,255,cv2.THRESH_BINARY)
cv2.imshow("Limiarizado", res)
plt.plot(histograma,color = 'b')
plt.xlim([0,255])
plt.show()
cv2.waitKey(0) | [
"cv2.calcHist",
"cv2.threshold",
"matplotlib.pyplot.plot",
"numpy.argmax",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"matplotlib.pyplot.xlim",
"cv2.imread",
"matplotlib.pyplot.show"
] | [((464, 480), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (474, 480), False, 'import cv2\n'), ((502, 534), 'cv2.imshow', 'cv2.imshow', (['"""IMG Original """', 'img'], {}), "('IMG Original ', img)\n", (512, 534), False, 'import cv2\n'), ((559, 585), 'numpy.zeros', 'np.zeros', (['(h, w)', 'np.uint8'], {}), '((h, w), np.uint8)\n', (567, 585), True, 'import numpy as np\n'), ((666, 706), 'cv2.imshow', 'cv2.imshow', (['"""Escala de Cinza """', 'imgcinza'], {}), "('Escala de Cinza ', imgcinza)\n", (676, 706), False, 'import cv2\n'), ((720, 772), 'cv2.calcHist', 'cv2.calcHist', (['[imgcinza]', '[0]', 'None', '[256]', '[0, 256]'], {}), '([imgcinza], [0], None, [256], [0, 256])\n', (732, 772), False, 'import cv2\n'), ((778, 799), 'numpy.argmax', 'np.argmax', (['histograma'], {}), '(histograma)\n', (787, 799), True, 'import numpy as np\n'), ((821, 876), 'cv2.threshold', 'cv2.threshold', (['img[..., 1]', '(220)', '(255)', 'cv2.THRESH_BINARY'], {}), '(img[..., 1], 220, 255, cv2.THRESH_BINARY)\n', (834, 876), False, 'import cv2\n'), ((873, 903), 'cv2.imshow', 'cv2.imshow', (['"""Limiarizado"""', 'res'], {}), "('Limiarizado', res)\n", (883, 903), False, 'import cv2\n'), ((904, 935), 'matplotlib.pyplot.plot', 'plt.plot', (['histograma'], {'color': '"""b"""'}), "(histograma, color='b')\n", (912, 935), True, 'from matplotlib import pyplot as plt\n'), ((937, 955), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 255]'], {}), '([0, 255])\n', (945, 955), True, 'from matplotlib import pyplot as plt\n'), ((955, 965), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (963, 965), True, 'from matplotlib import pyplot as plt\n'), ((966, 980), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (977, 980), False, 'import cv2\n')] |
import numpy as np
from dask import array as da
from typing import Tuple, List, Iterable
import pyqtgraph as pg
import skbeam.core.correlation as corr
from xicam.SAXS.patches.pyFAI import AzimuthalIntegrator
from xicam.core.intents import PlotIntent
from xicam.core import msg
from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, \
input_names, output_names, display_name, categories, intent
from ..utils import get_label_array, average_q_from_labels
@operation
@display_name('1-time Correlation')
@input_names('images', 'labels', 'rois', 'image_item', 'number_of_buffers', 'number_of_levels',
'intensity_drift_correction')
@describe_input('images', 'Input array of two or more dimensions')
@describe_input('labels', 'Labeled array of the same shape as the image stack. \
Each ROI is represented by sequential integers starting at one. For \
example, if you have four ROIs, they must be labeled 1, 2, 3, 4. \
Background is labeled as 0')
@describe_input('number_of_buffers', 'Integer number of buffers (must be even). Maximum \
lag step to compute in each generation of downsampling.')
@describe_input('number_of_levels', 'Integer number defining how many generations of \
downsampling to perform, i.e., the depth of the binomial tree \
of averaged frames')
@output_names('g2', 'tau', 'images', 'labels')
@describe_output('g2', 'Normalized g2 data array with shape = (len(lag_steps), num_rois)')
@describe_output('tau', 'array describing tau (lag steps)')
@visible('images', False)
@visible('labels', False)
@visible('rois', False)
@visible('image_item', False)
@intent(PlotIntent,
match_key='1-time Correlation',
name='g2',
xLogMode=True,
labels={"bottom": "𝜏", "left": "g₂"},
output_map={'x': 'tau', 'y': 'g2'},
mixins=["ToggleSymbols"])
def one_time_correlation(images: np.ndarray,
labels: np.ndarray = None,
rois: Iterable[pg.ROI] = None,
image_item: pg.ImageItem = None,
num_bufs: int = 16,
num_levels: int = 8,
intensity_drift_correction: bool = True) -> Tuple[da.array, da.array, da.array, np.ndarray]:
if images.ndim < 3:
raise ValueError(f"Cannot compute correlation on data with {images.ndim} dimensions.")
# if labels array was not passed in, it must be generated; trimming will occur here for memory conservation
if labels is None:
labels = get_label_array(images, rois=rois, image_item=image_item)
if labels.max() == 0:
msg.notifyMessage("Please add an ROI over which to calculate one-time correlation.")
raise ValueError("Please add an ROI over which to calculate one-time correlation.")
# Trim the image based on labels, and resolve to memory
si, se = np.where(np.flipud(labels))
# trimmed_images = np.asarray(images[:, si.min():si.max() + 1, se.min():se.max() + 1])
trimmed_images = np.asarray([image[si.min():si.max() + 1, se.min():se.max() + 1] for image in images])
trimmed_labels = np.asarray(np.flipud(labels)[si.min():si.max() + 1, se.min():se.max() + 1])
# If a labels array is passed in, no trimming is done; autocorr should read each frame lazy-like
else:
trimmed_images = images
trimmed_labels = labels
# trimmed_images[trimmed_images <= 0] = np.NaN # may be necessary to mask values
if intensity_drift_correction:
trimmed_images = trimmed_images / np.mean(trimmed_images, axis=(1, 2))[:, None, None]
trimmed_images -= np.min(trimmed_images, axis=0)
g2, tau = corr.multi_tau_auto_corr(num_levels, num_bufs,
trimmed_labels.astype(np.uint8),
trimmed_images)
g2 = g2[1:].squeeze()
# FIXME: is it required to trim the 0th value off the tau and g2 arrays?
return g2.T, tau[1:], images, labels
| [
"xicam.plugins.operationplugin.output_names",
"xicam.plugins.operationplugin.visible",
"numpy.mean",
"xicam.plugins.operationplugin.intent",
"numpy.flipud",
"xicam.core.msg.notifyMessage",
"numpy.min",
"xicam.plugins.operationplugin.display_name",
"xicam.plugins.operationplugin.input_names",
"xica... | [((514, 548), 'xicam.plugins.operationplugin.display_name', 'display_name', (['"""1-time Correlation"""'], {}), "('1-time Correlation')\n", (526, 548), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((550, 678), 'xicam.plugins.operationplugin.input_names', 'input_names', (['"""images"""', '"""labels"""', '"""rois"""', '"""image_item"""', '"""number_of_buffers"""', '"""number_of_levels"""', '"""intensity_drift_correction"""'], {}), "('images', 'labels', 'rois', 'image_item', 'number_of_buffers',\n 'number_of_levels', 'intensity_drift_correction')\n", (561, 678), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((689, 754), 'xicam.plugins.operationplugin.describe_input', 'describe_input', (['"""images"""', '"""Input array of two or more dimensions"""'], {}), "('images', 'Input array of two or more dimensions')\n", (703, 754), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((756, 1056), 'xicam.plugins.operationplugin.describe_input', 'describe_input', (['"""labels"""', '"""Labeled array of the same shape as the image stack. Each ROI is represented by sequential integers starting at one. For example, if you have four ROIs, they must be labeled 1, 2, 3, 4. Background is labeled as 0"""'], {}), "('labels',\n 'Labeled array of the same shape as the image stack. Each ROI is represented by sequential integers starting at one. For example, if you have four ROIs, they must be labeled 1, 2, 3, 4. Background is labeled as 0'\n )\n", (770, 1056), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((1055, 1225), 'xicam.plugins.operationplugin.describe_input', 'describe_input', (['"""number_of_buffers"""', '"""Integer number of buffers (must be even). Maximum lag step to compute in each generation of downsampling."""'], {}), "('number_of_buffers',\n 'Integer number of buffers (must be even). Maximum lag step to compute in each generation of downsampling.'\n )\n", (1069, 1225), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((1220, 1429), 'xicam.plugins.operationplugin.describe_input', 'describe_input', (['"""number_of_levels"""', '"""Integer number defining how many generations of downsampling to perform, i.e., the depth of the binomial tree of averaged frames"""'], {}), "('number_of_levels',\n 'Integer number defining how many generations of downsampling to perform, i.e., the depth of the binomial tree of averaged frames'\n )\n", (1234, 1429), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((1426, 1471), 'xicam.plugins.operationplugin.output_names', 'output_names', (['"""g2"""', '"""tau"""', '"""images"""', '"""labels"""'], {}), "('g2', 'tau', 'images', 'labels')\n", (1438, 1471), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((1473, 1566), 'xicam.plugins.operationplugin.describe_output', 'describe_output', (['"""g2"""', '"""Normalized g2 data array with shape = (len(lag_steps), num_rois)"""'], {}), "('g2',\n 'Normalized g2 data array with shape = (len(lag_steps), num_rois)')\n", (1488, 1566), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((1564, 1622), 'xicam.plugins.operationplugin.describe_output', 'describe_output', (['"""tau"""', '"""array describing tau (lag steps)"""'], {}), "('tau', 'array describing tau (lag steps)')\n", (1579, 1622), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((1624, 1648), 'xicam.plugins.operationplugin.visible', 'visible', (['"""images"""', '(False)'], {}), "('images', False)\n", (1631, 1648), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((1650, 1674), 'xicam.plugins.operationplugin.visible', 'visible', (['"""labels"""', '(False)'], {}), "('labels', False)\n", (1657, 1674), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((1676, 1698), 'xicam.plugins.operationplugin.visible', 'visible', (['"""rois"""', '(False)'], {}), "('rois', False)\n", (1683, 1698), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((1700, 1728), 'xicam.plugins.operationplugin.visible', 'visible', (['"""image_item"""', '(False)'], {}), "('image_item', False)\n", (1707, 1728), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((1730, 1915), 'xicam.plugins.operationplugin.intent', 'intent', (['PlotIntent'], {'match_key': '"""1-time Correlation"""', 'name': '"""g2"""', 'xLogMode': '(True)', 'labels': "{'bottom': '𝜏', 'left': 'g₂'}", 'output_map': "{'x': 'tau', 'y': 'g2'}", 'mixins': "['ToggleSymbols']"}), "(PlotIntent, match_key='1-time Correlation', name='g2', xLogMode=True,\n labels={'bottom': '𝜏', 'left': 'g₂'}, output_map={'x': 'tau', 'y': 'g2'\n }, mixins=['ToggleSymbols'])\n", (1736, 1915), False, 'from xicam.plugins.operationplugin import operation, describe_input, describe_output, visible, input_names, output_names, display_name, categories, intent\n'), ((3763, 3793), 'numpy.min', 'np.min', (['trimmed_images'], {'axis': '(0)'}), '(trimmed_images, axis=0)\n', (3769, 3793), True, 'import numpy as np\n'), ((2747, 2836), 'xicam.core.msg.notifyMessage', 'msg.notifyMessage', (['"""Please add an ROI over which to calculate one-time correlation."""'], {}), "(\n 'Please add an ROI over which to calculate one-time correlation.')\n", (2764, 2836), False, 'from xicam.core import msg\n'), ((3019, 3036), 'numpy.flipud', 'np.flipud', (['labels'], {}), '(labels)\n', (3028, 3036), True, 'import numpy as np\n'), ((3280, 3297), 'numpy.flipud', 'np.flipud', (['labels'], {}), '(labels)\n', (3289, 3297), True, 'import numpy as np\n'), ((3688, 3724), 'numpy.mean', 'np.mean', (['trimmed_images'], {'axis': '(1, 2)'}), '(trimmed_images, axis=(1, 2))\n', (3695, 3724), True, 'import numpy as np\n')] |
# Author: <NAME> <<EMAIL>>
# My imports
from . import constants
# Regular imports
from datetime import datetime
from copy import deepcopy
from scipy import signal
import numpy as np
import warnings
import librosa
import random
import torch
# TODO - torch Tensor compatibility
# TODO - try to ensure these won't break if extra dimensions (e.g. batch) are included
# TODO - make sure there are no hard assignments (make return copies instead of original where necessary)
##################################################
# TO BATCH-FRIENDLY NOTES #
##################################################
def notes_to_batched_notes(pitches, intervals):
"""
Convert loose note groups into batch-friendly storage.
Parameters
----------
pitches : ndarray (N)
Array of pitches corresponding to notes
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
Returns
----------
batched_notes : ndarray (N x 3)
Array of note intervals and pitches by row
N - number of notes
"""
# Default the batched notes to an empty array of the correct shape
batched_notes = np.empty([0, 3])
if len(pitches) > 0:
# Add an extra dimension to the pitches to match dimensionality of intervals
pitches = np.expand_dims(pitches, axis=-1)
# Concatenate the loose arrays to obtain ndarray([[onset, offset, pitch]])
batched_notes = np.concatenate((intervals, pitches), axis=-1)
return batched_notes
def batched_notes_to_hz(batched_notes):
"""
Convert batched notes from MIDI to Hertz.
Parameters
----------
batched_notes : ndarray (N x 3)
Array of note intervals and MIDI pitches by row
N - number of notes
Returns
----------
batched_notes : ndarray (N x 3)
Array of note intervals and Hertz pitches by row
N - number of notes
"""
# Convert pitch column to Hertz
batched_notes[..., 2] = librosa.midi_to_hz(batched_notes[..., 2])
return batched_notes
def batched_notes_to_midi(batched_notes):
"""
Convert batched notes from Hertz to MIDI.
Parameters
----------
batched_notes : ndarray (N x 3)
Array of note intervals and Hertz pitches by row
N - number of notes
Returns
----------
batched_notes : ndarray (N x 3)
Array of note intervals and MIDI pitches by row
N - number of notes
"""
# Convert pitch column to MIDI
batched_notes[..., 2] = librosa.hz_to_midi(batched_notes[..., 2])
return batched_notes
def slice_batched_notes(batched_notes, start_time, stop_time):
"""
Remove note entries occurring outside of time window.
Parameters
----------
batched_notes : ndarray (N x 3)
Array of note intervals and pitches by row
N - number of notes
start_time : float
Beginning of time window
stop_time : float
End of time window
Returns
----------
batched_notes : ndarray (N x 3)
Array of note intervals and pitches by row
N - number of notes
"""
# Remove notes with offsets before the slice start time
batched_notes = batched_notes[batched_notes[:, 1] > start_time]
# Remove notes with onsets after the slice stop time
batched_notes = batched_notes[batched_notes[:, 0] < stop_time]
# Clip onsets at the slice start time
batched_notes[:, 0] = np.maximum(batched_notes[:, 0], start_time)
# Clip offsets at the slice stop time
batched_notes[:, 1] = np.minimum(batched_notes[:, 1], stop_time)
return batched_notes
##################################################
# TO NOTES #
##################################################
def batched_notes_to_notes(batched_notes):
"""
Convert batch-friendly notes into loose note groups.
Parameters
----------
batched_notes : ndarray (N x 3)
Array of note intervals and pitches by row
N - number of notes
Returns
----------
pitches : ndarray (N)
Array of pitches corresponding to notes
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
"""
# Split along the final dimension into the loose groups
pitches, intervals = batched_notes[..., 2], batched_notes[:, :2]
return pitches, intervals
def stacked_notes_to_notes(stacked_notes):
"""
Convert a dictionary of stacked notes into a single representation.
Parameters
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches, intervals)) pairs
Returns
----------
pitches : ndarray (N)
Array of pitches corresponding to notes
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
"""
# Obtain the note pairs from the dictionary values
note_pairs = list(stacked_notes.values())
# Extract the pitches and intervals respectively
pitches = np.concatenate([pair[0] for pair in note_pairs])
intervals = np.concatenate([pair[1] for pair in note_pairs])
# Sort the notes by onset
pitches, intervals = sort_notes(pitches, intervals)
return pitches, intervals
def notes_to_hz(pitches):
"""
Convert note pitches from MIDI to Hertz.
Array of corresponding intervals does not change and is
assumed to be managed outside of the function.
Parameters
----------
pitches : ndarray (N)
Array of MIDI pitches corresponding to notes
N - number of notes
Returns
----------
pitches : ndarray (N)
Array of Hertz pitches corresponding to notes
N - number of notes
"""
# Convert to Hertz
pitches = librosa.midi_to_hz(pitches)
return pitches
def notes_to_midi(pitches):
"""
Convert note pitches from Hertz to MIDI.
Array of corresponding intervals does not change and is
assumed to be managed outside of the function.
Parameters
----------
pitches : ndarray (N)
Array of Hertz pitches corresponding to notes
N - number of notes
Returns
----------
pitches : ndarray (N)
Array of MIDI pitches corresponding to notes
N - number of notes
"""
# Convert to MIDI
pitches = librosa.hz_to_midi(pitches)
return pitches
##################################################
# TO STACKED NOTES #
##################################################
def notes_to_stacked_notes(pitches, intervals, i=0):
"""
Convert a collection of notes into a dictionary of stacked notes.
Parameters
----------
pitches : ndarray (N)
Array of pitches corresponding to notes
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
i : int
Slice key to use
Returns
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches, intervals)) pairs
"""
# Initialize a dictionary to hold the notes
stacked_notes = dict()
# Add the pitch-interval pairs to the stacked notes dictionary under the slice key
stacked_notes[i] = sort_notes(pitches, intervals)
return stacked_notes
def stacked_notes_to_hz(stacked_notes):
"""
Convert stacked notes from MIDI to Hertz.
Parameters
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches (MIDI), intervals)) pairs
Returns
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches (Hertz), intervals)) pairs
"""
# Make a copy of the stacked notes for conversion
stacked_notes = deepcopy(stacked_notes)
# Loop through the stack of notes
for slc in stacked_notes.keys():
# Get the pitches from the slice
pitches, intervals = stacked_notes[slc]
# Convert the pitches to Hertz
pitches = notes_to_hz(pitches)
# Add converted slice back to stack
stacked_notes[slc] = pitches, intervals
return stacked_notes
def stacked_notes_to_midi(stacked_notes):
"""
Convert stacked notes from Hertz to MIDI.
Parameters
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches (Hertz), intervals)) pairs
Returns
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches (MIDI), intervals)) pairs
"""
# Make a copy of the stacked notes for conversion
stacked_notes = deepcopy(stacked_notes)
# Loop through the stack of notes
for slc in stacked_notes.keys():
# Get the pitches from the slice
pitches, intervals = stacked_notes[slc]
# Convert the pitches to MIDI
pitches = notes_to_midi(pitches)
# Add converted slice back to stack
stacked_notes[slc] = pitches, intervals
return stacked_notes
##################################################
# TO PITCH LIST #
##################################################
def stacked_pitch_list_to_pitch_list(stacked_pitch_list):
"""
Convert a dictionary of stacked pitch lists into a single representation.
Parameters
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list)) pairs
Returns
----------
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
pitch_list : list of ndarray (N x [...])
Array of pitches corresponding to notes
N - number of pitch observations (frames)
"""
# Obtain the time-pitch list pairs from the dictionary values
pitch_list_pairs = list(stacked_pitch_list.values())
# Collapse the times from each pitch_list into one array
times = np.unique(np.concatenate([pair[0] for pair in pitch_list_pairs]))
# Initialize empty pitch arrays for each time entry
pitch_list = [np.empty(0)] * times.size
# Loop through each pitch list
for slice_times, slice_pitch_arrays in pitch_list_pairs:
# Loop through the pitch list entries
for entry in range(len(slice_pitch_arrays)):
# Determine where this entry belongs in the new pitch list
idx = np.where(times == slice_times[entry])[0].item()
# Insert the frequencies at the corresponding time
pitch_list[idx] = np.append(pitch_list[idx], slice_pitch_arrays[entry])
# Sort the time-pitch array pairs by time
times, pitch_list = sort_pitch_list(times, pitch_list)
return times, pitch_list
def multi_pitch_to_pitch_list(multi_pitch, profile):
"""
Convert a multi pitch array into a pitch list.
Array of corresponding times does not change and is
assumed to be managed outside of the function.
Parameters
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
Returns
----------
pitch_list : list of ndarray (T x [...])
Array of pitches corresponding to notes
T - number of pitch observations (frames)
"""
# Determine the number of frames in the multi pitch array
num_frames = multi_pitch.shape[-1]
# Initialize empty pitch arrays for each time entry
pitch_list = [np.empty(0)] * num_frames
# Determine which frames contain pitch activity
non_silent_frames = np.where(np.sum(multi_pitch, axis=-2) > 0)[-1]
# Loop through the frames containing pitch activity
for i in list(non_silent_frames):
# Determine the MIDI pitches active in the frame and add to the list
pitch_list[i] = profile.low + np.where(multi_pitch[..., i])[-1]
return pitch_list
def pitch_list_to_hz(pitch_list):
"""
Convert pitch list from MIDI to Hertz.
Array of corresponding times does not change and is
assumed to be managed outside of the function.
Parameters
----------
pitch_list : list of ndarray (T x [...])
Array of MIDI pitches corresponding to notes
T - number of pitch observations (frames)
Returns
----------
pitch_list : list of ndarray (T x [...])
Array of Hertz pitches corresponding to notes
T - number of pitch observations (frames)
"""
# Convert to Hertz
pitch_list = [librosa.midi_to_hz(pitch_list[i]) for i in range(len(pitch_list))]
return pitch_list
def pitch_list_to_midi(pitch_list):
"""
Convert pitch list from Hertz to MIDI.
Array of corresponding times does not change and is
assumed to be managed outside of the function.
Parameters
----------
pitch_list : list of ndarray (T x [...])
Array of Hertz pitches corresponding to notes
T - number of pitch observations (frames)
Returns
----------
pitch_list : list of ndarray (T x [...])
Array of MIDI pitches corresponding to notes
T - number of pitch observations (frames)
"""
# Convert to MIDI
pitch_list = [librosa.hz_to_midi(pitch_list[i]) for i in range(len(pitch_list))]
return pitch_list
##################################################
# TO STACKED PITCH LIST #
##################################################
def pitch_list_to_stacked_pitch_list(times, pitch_list, i=0):
"""
Convert a pitch list into a dictionary of stacked pitch lists.
Parameters
----------
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
pitch_list : list of ndarray (N x [...])
Array of pitches corresponding to notes
N - number of pitch observations (frames)
i : int
Slice key to use
Returns
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list)) pairs
"""
# Initialize a dictionary to hold the pitch_list
stacked_pitch_list = dict()
# Add the time-pitch array pairs to the stacked notes dictionary under the slice key
stacked_pitch_list[i] = sort_pitch_list(times, pitch_list)
return stacked_pitch_list
def stacked_multi_pitch_to_stacked_pitch_list(stacked_multi_pitch, times, profile):
"""
Convert a stack of multi pitch arrays into a stack of pitch lists.
Parameters
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
times : ndarray (T)
Time in seconds of beginning of each frame
T - number of time samples (frames)
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
Returns
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list)) pairs
"""
# Determine the number of slices in the stacked multi pitch array
stack_size = stacked_multi_pitch.shape[-3]
# Initialize a dictionary to hold the pitch lists
stacked_pitch_list = dict()
# Loop through the slices of the stack
for slc in range(stack_size):
# Extract the multi pitch array pertaining to this slice
slice_multi_pitch = stacked_multi_pitch[slc]
# Convert the multi pitch array to a pitch list
slice_pitch_list = multi_pitch_to_pitch_list(slice_multi_pitch, profile)
# Add the pitch list to the stacked pitch list dictionary under the slice key
stacked_pitch_list.update(pitch_list_to_stacked_pitch_list(times, slice_pitch_list, slc))
return stacked_pitch_list
def stacked_pitch_list_to_hz(stacked_pitch_list):
"""
Convert stacked pitch list from MIDI to Hertz.
Parameters
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list (MIDI))) pairs
Returns
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list (Hertz)) pairs
"""
# Make a copy of the stacked pitch lists for conversion
stacked_pitch_list = deepcopy(stacked_pitch_list)
# Loop through the stack of pitch lists
for slc in stacked_pitch_list.keys():
# Get the pitch list from the slice
times, pitch_list = stacked_pitch_list[slc]
# Convert the pitches to Hertz
pitch_list = pitch_list_to_hz(pitch_list)
# Add converted slice back to stack
stacked_pitch_list[slc] = times, pitch_list
return stacked_pitch_list
def stacked_pitch_list_to_midi(stacked_pitch_list):
"""
Convert stacked pitch list from Hertz to MIDI.
Parameters
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list (Hertz))) pairs
Returns
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list (MIDI)) pairs
"""
# Make a copy of the stacked pitch lists for conversion
stacked_pitch_list = deepcopy(stacked_pitch_list)
# Loop through the stack of pitch lists
for slc in stacked_pitch_list.keys():
# Get the pitches from the slice
times, pitch_list = stacked_pitch_list[slc]
# Convert the pitches to MIDI
pitch_list = pitch_list_to_midi(pitch_list)
# Add converted slice back to stack
stacked_pitch_list[slc] = times, pitch_list
return stacked_pitch_list
##################################################
# TO MULTI PITCH #
##################################################
def notes_to_multi_pitch(pitches, intervals, times, profile):
"""
Convert loose MIDI note groups into a multi pitch array.
Parameters
----------
pitches : ndarray (N)
Array of pitches corresponding to notes in MIDI format
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
Returns
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
"""
# Determine the dimensionality of the multi pitch array
num_pitches = profile.get_range_len()
num_frames = len(times)
# Initialize an empty multi pitch array
multi_pitch = np.zeros((num_pitches, num_frames))
# Convert the pitches to number of semitones from lowest note
pitches = np.round(pitches - profile.low).astype(constants.UINT)
# Duplicate the array of times for each note and stack along a new axis
times = np.concatenate([[times]] * max(1, len(pitches)), axis=0)
# Determine the frame where each note begins and ends
onsets = np.argmin((times <= intervals[..., :1]), axis=1) - 1
offsets = np.argmin((times < intervals[..., 1:]), axis=1) - 1
# Clip all offsets at last frame - they will end up at -1 from
# previous operation if they occurred beyond last frame time
offsets[offsets == -1] = num_frames - 1
# Loop through each note
for i in range(len(pitches)):
# Populate the multi pitch array with activations for the note
multi_pitch[pitches[i], onsets[i] : offsets[i] + 1] = 1
return multi_pitch
def pitch_list_to_multi_pitch(times, pitch_list, profile, tolerance=0.5):
"""
Convert a MIDI pitch list into a dictionary of stacked pitch lists.
Parameters
----------
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
pitch_list : list of ndarray (N x [...])
Array of pitches corresponding to notes
N - number of pitch observations (frames)
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
tolerance : float
Amount of semitone deviation allowed
Returns
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
"""
# Determine the dimensionality of the multi pitch array
num_pitches = profile.get_range_len()
num_frames = len(times)
# Initialize an empty multi pitch array
multi_pitch = np.zeros((num_pitches, num_frames))
# Loop through each note
for i in range(len(pitch_list)):
# Calculate the pitch semitone difference from the lowest note
difference = pitch_list[i] - profile.low
# Determine the amount of semitone deviation for each pitch
deviation = difference % 1
deviation[deviation > 0.5] -= 1
deviation = np.abs(deviation)
# Convert the pitches to number of semitones from lowest note
pitches = np.round(difference[deviation < tolerance]).astype(constants.UINT)
# Populate the multi pitch array with activations
multi_pitch[pitches, i] = 1
return multi_pitch
def stacked_multi_pitch_to_multi_pitch(stacked_multi_pitch):
"""
Collapse stacked multi pitch arrays into a single representation.
Parameters
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
Returns
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
"""
# Collapse the stacked arrays into one using the max operation
multi_pitch = np.max(stacked_multi_pitch, axis=-3)
return multi_pitch
##################################################
# TO STACKED MULTI PITCH #
##################################################
def stacked_notes_to_stacked_multi_pitch(stacked_notes, times, profile):
"""
Convert a dictionary of MIDI note groups into a stack of multi pitch arrays.
Parameters
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches, intervals)) pairs
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
Returns
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
"""
# Initialize an empty list to hold the multi pitch arrays
stacked_multi_pitch = list()
# Loop through the slices of notes
for slc in range(len(stacked_notes)):
# Get the pitches and intervals from the slice
pitches, intervals = stacked_notes[slc]
# Convert to multi pitch and add to the list
slice_multi_pitch = notes_to_multi_pitch(pitches, intervals, times, profile)
stacked_multi_pitch.append(multi_pitch_to_stacked_multi_pitch(slice_multi_pitch))
# Collapse the list into an array
stacked_multi_pitch = np.concatenate(stacked_multi_pitch)
return stacked_multi_pitch
def stacked_pitch_list_to_stacked_multi_pitch(stacked_pitch_list, profile):
"""
Convert a stacked MIDI pitch list into a stack of multi pitch arrays.
Parameters
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list)) pairs
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
Returns
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
"""
# Initialize an empty list to hold the multi pitch arrays
stacked_multi_pitch = list()
# Loop through the slices of notes
for slc in range(len(stacked_pitch_list)):
# Get the pitches and intervals from the slice
times, pitch_list = stacked_pitch_list[slc]
multi_pitch = pitch_list_to_multi_pitch(times, pitch_list, profile)
stacked_multi_pitch.append(multi_pitch_to_stacked_multi_pitch(multi_pitch))
# Collapse the list into an array
stacked_multi_pitch = np.concatenate(stacked_multi_pitch)
return stacked_multi_pitch
def multi_pitch_to_stacked_multi_pitch(multi_pitch):
"""
Convert a multi pitch array into a stacked representation.
Parameters
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
Returns
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
"""
# Add an extra dimension for slice
stacked_multi_pitch = np.expand_dims(multi_pitch, axis=-3)
return stacked_multi_pitch
def tablature_to_stacked_multi_pitch(tablature, profile):
"""
Convert a tablature representation into a stacked multi pitch array.
Array of corresponding times does not change and is
assumed to be managed outside of the function.
Parameters
----------
tablature : ndarray (S x T)
Array of class membership for multiple degrees of freedom (e.g. strings)
S - number of strings or degrees of freedom
T - number of frames
profile : TablatureProfile (instrument.py)
Tablature instrument profile detailing experimental setup
Returns
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
"""
# Determine the number of degrees of freedom and frames
num_dofs, num_frames = tablature.shape
# Determine the total number of pitches to be incldued
num_pitches = profile.get_range_len()
# Initialize and empty stacked multi pitch array
stacked_multi_pitch = np.zeros((num_dofs, num_pitches, num_frames))
# Obtain the tuning for the tablature (lowest note for each degree of freedom)
tuning = profile.get_midi_tuning()
# Determine the place in the stacked multi pitch array where each degree of freedom begins
dof_start = np.expand_dims(tuning - profile.low, -1)
# Determine which frames, by degree of freedom, contain pitch activity
non_silent_frames = tablature >= 0
# Determine the active pitches, relative to the start of the stacked multi pitch array
pitch_idcs = (tablature + dof_start)[non_silent_frames]
# Break the non-silent frames indices into degree of freedom and frame
dof_idcs, frame_idcs = non_silent_frames.nonzero()
# Populate the stacked multi pitch array
stacked_multi_pitch[(dof_idcs, pitch_idcs, frame_idcs)] = 1
return stacked_multi_pitch
##################################################
# TO TABLATURE #
##################################################
def stacked_pitch_list_to_tablature(stacked_pitch_list, profile):
"""
Convert a stacked MIDI pitch list into a single class representation.
Parameters
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list)) pairs
profile : TablatureProfile (instrument.py)
Tablature instrument profile detailing experimental setup
Returns
----------
tablature : ndarray (S x T)
Array of class membership for multiple degrees of freedom (e.g. strings)
S - number of strings or degrees of freedom
T - number of frames
"""
# Convert the stacked pitch list into a stacked multi pitch representation
stacked_multi_pitch = stacked_pitch_list_to_stacked_multi_pitch(stacked_pitch_list, profile)
# Convert the stacked multi pitch array into tablature
tablature = stacked_multi_pitch_to_tablature(stacked_multi_pitch, profile)
return tablature
def stacked_multi_pitch_to_tablature(stacked_multi_pitch, profile):
"""
Collapse stacked multi pitch arrays into a single class representation.
Parameters
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
profile : TablatureProfile (instrument.py)
Tablature instrument profile detailing experimental setup
Returns
----------
tablature : ndarray (S x T)
Array of class membership for multiple degrees of freedom (e.g. strings)
S - number of strings or degrees of freedom
T - number of frames
"""
# Obtain the tuning for the tablature (lowest note for each degree of freedom)
tuning = profile.get_midi_tuning()
# Initialize an empty list to hold the tablature
tablature = list()
# Loop through the multi pitch arrays
for dof in range(len(stacked_multi_pitch)):
# Obtain the multi pitch array for the degree of freedom
multi_pitch = stacked_multi_pitch[dof]
# Determine which frames have no note activations
silent_frames = np.sum(multi_pitch, axis=0) == 0
# Lower and upper pitch boundary for this degree of freedom
lower_bound = tuning[dof] - profile.low
upper_bound = lower_bound + profile.num_pitches
# Bound the multi pitch array by the support of the degree of freedom
multi_pitch = multi_pitch[lower_bound : upper_bound]
# Determine which class has the highest activation across each frame
highest_class = np.argmax(multi_pitch, axis=0)
# Overwrite the highest class for the silent frames
highest_class[silent_frames] = -1
# Add the class membership to the tablature
tablature += [np.expand_dims(highest_class, axis=0)]
# Collapse the list to get the final tablature
tablature = np.concatenate(tablature)
return tablature
##################################################
# TO ONSETS #
##################################################
def notes_to_onsets(pitches, intervals, times, profile, ambiguity=None):
"""
Obtain the onsets of loose MIDI note groups in multi pitch format.
Parameters
----------
pitches : ndarray (N)
Array of pitches corresponding to notes in MIDI format
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
ambiguity : float or None (optional
Amount of time each onset label should span
Returns
----------
onsets : ndarray (F x T)
Discrete onset activation map
F - number of discrete pitches
T - number of frames
"""
# Determine the absolute time of each onset and offset
onset_times = np.copy(intervals[..., :1])
offset_times = np.copy(intervals[..., 1:])
if ambiguity is not None:
# Obtain the duration of each note
durations = offset_times - onset_times
# Truncate the note lengths
durations = np.minimum(durations, ambiguity)
# Set the offset times to match the truncated length
offset_times = onset_times + durations
else:
# Only mark the frame where the onset happens as an activation
offset_times = np.copy(onset_times)
# Construct the intervals of the truncated note following the onset
truncated_note_intervals = np.concatenate((onset_times, offset_times), axis=-1)
# Obtain the offsets using the note to multi pitch conversion
onsets = notes_to_multi_pitch(pitches, truncated_note_intervals, times, profile)
return onsets
def multi_pitch_to_onsets(multi_pitch):
"""
Obtain a representation detailing where discrete pitches become active.
Parameters
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
Returns
----------
onsets : ndarray (F x T)
Discrete onset activation map
F - number of discrete pitches
T - number of frames
"""
# Any pitches active in the first frame are considered onsets
first_frame = multi_pitch[..., :1]
# Subtract adjacent frames to determine where activity begins
adjacent_diff = multi_pitch[..., 1:] - multi_pitch[..., :-1]
# Combine the previous observations into a single representation
onsets = np.concatenate([first_frame, adjacent_diff], axis=-1)
# Consider anything above zero an onset
onsets[onsets <= 0] = 0
return onsets
##################################################
# TO STACKED ONSETS #
##################################################
def stacked_notes_to_stacked_onsets(stacked_notes, times, profile, ambiguity=None):
"""
Obtain the onsets of stacked loose MIDI note groups in stacked multi pitch format.
Parameters
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches (MIDI), intervals)) pairs
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
ambiguity : float or None (optional
Amount of time each onset label should span
Returns
----------
stacked_onsets : ndarray (S x F x T)
Array of multiple discrete onset activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
"""
# Initialize an empty list to hold the onset arrays
stacked_onsets = list()
# Loop through the slices of notes
for slc in range(len(stacked_notes)):
# Get the pitches and intervals from the slice
pitches, intervals = stacked_notes[slc]
# Convert to onsets and add to the list
slice_onsets = notes_to_onsets(pitches, intervals, times, profile, ambiguity)
stacked_onsets.append(multi_pitch_to_stacked_multi_pitch(slice_onsets))
# Collapse the list into an array
stacked_onsets = np.concatenate(stacked_onsets)
return stacked_onsets
def stacked_multi_pitch_to_stacked_onsets(stacked_multi_pitch):
"""
Obtain a stacked representation detailing where discrete pitches become active.
Parameters
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
Returns
----------
stacked_onsets : ndarray (S x F x T)
Array of multiple discrete onset activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
"""
# Determine the number of slices in the stacked multi pitch array
stack_size = stacked_multi_pitch.shape[-3]
# Initialize an empty list to hold the onset arrays
stacked_onsets = list()
# Loop through the slices of the stack
for slc in range(stack_size):
# Extract the multi pitch array pertaining to this slice
slice_multi_pitch = stacked_multi_pitch[slc]
# Convert to onsets and add to the list
slice_onsets = multi_pitch_to_onsets(slice_multi_pitch)
stacked_onsets.append(multi_pitch_to_stacked_multi_pitch(slice_onsets))
# Collapse the list into an array
stacked_onsets = np.concatenate(stacked_onsets)
return stacked_onsets
##################################################
# TO OFFSETS #
##################################################
def notes_to_offsets(pitches, intervals, times, profile, ambiguity=None):
"""
Obtain the offsets of loose MIDI note groups in multi pitch format.
Parameters
----------
pitches : ndarray (N)
Array of pitches corresponding to notes in MIDI format
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
ambiguity : float or None (optional
Amount of time each offset label should span
Returns
----------
offsets : ndarray (F x T)
Discrete offset activation map
F - number of discrete pitches
T - number of frames
"""
# Determine the absolute time of the offset
offset_times = np.copy(intervals[..., 1:])
# Treat the time after offset as a note
onset_times = np.copy(offset_times)
if ambiguity is not None:
# Add the ambiguity to the "note" duration
offset_times += ambiguity
else:
# Make the duration zero
offset_times = np.copy(onset_times)
# Construct the intervals of the "note" following the offset
post_note_intervals = np.concatenate((onset_times, offset_times), axis=-1)
# Obtain the offsets using the note to multi pitch conversion
offsets = notes_to_multi_pitch(pitches, post_note_intervals, times, profile)
return offsets
def multi_pitch_to_offsets(multi_pitch):
"""
Obtain a representation detailing where discrete pitch activity ceases.
Parameters
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
Returns
----------
offsets : ndarray (F x T)
Discrete offset activation map
F - number of discrete pitches
T - number of frames
"""
# Any pitches active in the last frame are considered offsets
last_frame = multi_pitch[..., -1:]
# Subtract adjacent frames to determine where activity ceases
adjacent_diff = multi_pitch[..., 1:] - multi_pitch[..., :-1]
# Flip the differentials so negative become positive and vise-versa
adjacent_diff = -1 * adjacent_diff
# Combine the previous observations into a single representation
offsets = np.concatenate([adjacent_diff, last_frame], axis=-1)
# Consider anything below zero an offset
offsets[offsets <= 0] = 0
return offsets
##################################################
# TO STACKED OFFSETS #
##################################################
def stacked_notes_to_stacked_offsets(stacked_notes, times, profile, ambiguity):
"""
Obtain the offsets of stacked loose MIDI note groups in stacked multi pitch format.
Parameters
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches (MIDI), intervals)) pairs
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
ambiguity : float or None (optional
Amount of time each onset label should span
Returns
----------
stacked_offsets : ndarray (S x F x T)
Array of multiple discrete offset activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
"""
# Initialize an empty list to hold the offset arrays
stacked_offsets = list()
# Loop through the slices of notes
for slc in range(len(stacked_notes)):
# Get the pitches and intervals from the slice
pitches, intervals = stacked_notes[slc]
# Convert to offsets and add to the list
slice_offsets = notes_to_offsets(pitches, intervals, times, profile, ambiguity)
stacked_offsets.append(multi_pitch_to_stacked_multi_pitch(slice_offsets))
# Collapse the list into an array
stacked_offsets = np.concatenate(stacked_offsets)
return stacked_offsets
def stacked_multi_pitch_to_stacked_offsets(stacked_multi_pitch):
"""
Obtain a stacked representation detailing where discrete pitch activity ceases.
Parameters
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
Returns
----------
stacked_offsets : ndarray (S x F x T)
Array of multiple discrete offset activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
"""
# Determine the number of slices in the stacked multi pitch array
stack_size = stacked_multi_pitch.shape[-3]
# Initialize an empty list to hold the offset arrays
stacked_offsets = list()
# Loop through the slices of the stack
for slc in range(stack_size):
# Extract the multi pitch array pertaining to this slice
slice_multi_pitch = stacked_multi_pitch[slc]
# Convert to offsets and add to the list
slice_offsets = multi_pitch_to_offsets(slice_multi_pitch)
stacked_offsets.append(multi_pitch_to_stacked_multi_pitch(slice_offsets))
# Collapse the list into an array
stacked_offsets = np.concatenate(stacked_offsets)
return stacked_offsets
##################################################
# SORTING #
##################################################
def sort_batched_notes(batched_notes, by=0):
"""
Sort an array of batch-friendly notes by the specified attribute.
Parameters
----------
batched_notes : ndarray (N x 3)
Array of note pitches and intervals by row
N - number of notes
by : int
Index to sort notes by
0 - onset | 1 - offset | 2 - pitch
Returns
----------
batched_notes : ndarray (N x 3)
Array of note pitches and intervals by row, sorted by selected attribute
N - number of notes
"""
# Define the attributes that can be used to sort the notes
attributes = ['onset', 'offset', 'pitch']
# Obtain the dtype of the batch-friendly notes before any manipulation
dtype = batched_notes.dtype
# Set a temporary dtype for sorting purposes
batched_notes.dtype = [(attributes[0], float), (attributes[1], float), (attributes[2], float)]
# Sort the notes along the row axis by the selected attribute
batched_notes = np.sort(batched_notes, axis=0, order=attributes[by])
# Reset the dtype of the batch-friendly notes
batched_notes.dtype = dtype
return batched_notes
def sort_notes(pitches, intervals, by=0):
"""
Sort a collection of notes by the specified attribute.
Parameters
----------
pitches : ndarray (N)
Array of pitches corresponding to notes, sorted by selected attribute
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes, sorted by selected attribute
N - number of notes
by : int
Index to sort notes by
0 - onset | 1 - offset | 2 - pitch
Returns
----------
pitches : ndarray (N)
Array of pitches corresponding to notes
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
"""
# Convert to batched notes for easy sorting
batched_notes = notes_to_batched_notes(pitches, intervals)
# Sort the batched notes
batched_notes = sort_batched_notes(batched_notes, by)
# Convert back to loose note groups
pitches, intervals = batched_notes_to_notes(batched_notes)
return pitches, intervals
def sort_pitch_list(times, pitch_list):
"""
Sort a pitch list by frame time.
Parameters
----------
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
pitch_list : list of ndarray (N x [...])
Array of pitches corresponding to notes
N - number of pitch observations (frames)
Returns
----------
times : ndarray (N)
Time in seconds of beginning of each frame, sorted by time
N - number of time samples (frames)
pitch_list : list of ndarray (N x [...])
Array of pitches corresponding to notes
N - number of pitch observations (frames), sorted by time
"""
# Obtain the indices corresponding to the sorted times
sort_order = list(np.argsort(times))
# Sort the times
times = np.sort(times)
# Sort the pitch list
pitch_list = [pitch_list[i] for i in sort_order]
return times, pitch_list
##################################################
# DATA MANIPULATION #
##################################################
def rms_norm(audio):
"""
Perform root-mean-square normalization.
Parameters
----------
audio : ndarray (N)
Mono-channel audio to normalize
N - number of samples in audio
Returns
----------
audio : ndarray (N)
Normalized mono-channel audio
N - number of samples in audio
"""
# Calculate the square root of the squared mean
rms = np.sqrt(np.mean(audio ** 2))
# If root-mean-square is zero (audio is all zeros), do nothing
if rms > 0:
# Divide the audio by the root-mean-square
audio = audio / rms
return audio
def blur_activations(activations, kernel=None, normalize=False, threshold=False):
"""
Blur activations by convolving them with a kernel.
Parameters
----------
activations : ndarray
Provided activations
kernel : list or ndarray
Convolution kernel for blurring
normalize : bool
TODO - is this necessary? - is it too much to do outside in a separate call?
Whether to normalize the activations after blurring
threshold : bool
TODO - is this necessary? - is it too much to do outside in a separate call?
Whether to threshold the activations after blurring and (potentially) normalizing
Returns
----------
activations : ndarray
Blurred activations
"""
# Default the kernel to leave activations unchanged
if kernel is None:
kernel = [1]
# Make sure the kernel is an ndarray (not a list)
kernel = np.array(kernel)
# Make sure the dimensionality matches
if len(kernel.shape) != len(activations.shape):
# Compute the number of dimensions missing from the kernel
missing_dims = len(activations.shape) - len(kernel.shape)
# Construct a matching shape for the kernel
new_shape = tuple([1] * missing_dims) + tuple(kernel.shape)
# Reshape the kernel
kernel = np.reshape(kernel, new_shape)
# Convolve the kernel with the activations
activations = signal.convolve(activations, kernel, mode='same')
if normalize:
# Normalize with infinity norm
activations = normalize_activations(activations)
if threshold:
# Threshold the activations (will remove pesky epsilons)
activations = threshold_activations(activations)
return activations
def normalize_activations(activations):
"""
Normalizes an array of activations using infinity norm.
Parameters
----------
activations : ndarray
Provided activations
Returns
----------
activations : ndarray
Normalized activations
"""
# Obtain the infinity norm of the activations
inf_norm = np.max(np.abs(activations))
# Avoid divide by zero
if inf_norm != 0:
# Divide the activations by the infinity norm
activations = activations / inf_norm
return activations
def threshold_activations(activations, threshold=0.5):
"""
Performs binary thresholding on an array of activations.
Parameters
----------
activations : ndarray
Provided activations
threshold : float
Value under which activations are negative
Returns
----------
activations : ndarray
Thresholded activations
"""
# Set all elements below threshold to zero (negative activation)
activations[activations < threshold] = 0
# Set remaining elements to one (positive activation)
activations[activations != 0] = 1
return activations
def framify_activations(activations, win_length, hop_length=1, pad=True):
"""
Chunk activations into overlapping frames along the last dimension.
Parameters
----------
activations : ndarray
Provided activations
win_length : int
Number of frames to include in each chunk
hop_length : int
Number of frames to skip between each chunk
pad : bool
Whether to pad incoming activations with zeros to give back array with same shape
Returns
----------
activations : ndarray
Framified activations
"""
# Determine the number of frames provided
num_frames = activations.shape[-1]
# Determine the pad length (also used if not padding)
pad_length = (win_length // 2)
if pad:
# Determine the number of intermediary frames required to give back same size
int_frames = num_frames + 2 * pad_length
# Pad the activations with zeros
activations = librosa.util.pad_center(activations, int_frames)
else:
# Number of intermediary frames is the same
int_frames = num_frames
# TODO - commented code is cleaner but breaks in PyTorch pipeline during model.pre_proc
"""
# Convert the activations to a fortran array
activations = np.asfortranarray(activations)
# Framify the activations using librosa
activations = librosa.util.frame(activations, win_length, hop_length).copy()
# Switch window index and time index axes
activations = np.swapaxes(activations, -1, -2)
return activations
"""
# Determine the number of hops in the activations
num_hops = (int_frames - 2 * pad_length) // hop_length
# Obtain the indices of the start of each chunk
chunk_idcs = np.arange(0, num_hops) * hop_length
# Chunk the activations with the specified window and hop length
activations = [np.expand_dims(activations[..., i : i + win_length], axis=-2) for i in chunk_idcs]
# Combine the chunks to get the framified activations
activations = np.concatenate(activations, axis=-2)
return activations
def inhibit_activations(activations, times, window_length):
"""
Remove any activations within a specified time window following a previous activation.
TODO - this is extremely slow for non-sparse activations
Parameters
----------
activations : ndarray
Provided activations
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
window_length : float
Duration (seconds) of inhibition window
Returns
----------
activations : ndarray
Inhibited activations
"""
# Keep track of non-inhibited non-zeros
pitch_idcs_keep = np.empty(0)
frame_idcs_keep = np.empty(0)
while True:
# Determine the pitch and frame indices where activations begin
pitch_idcs, frame_idcs = activations.nonzero()
# Check if there are any non-zeros left to process
if len(pitch_idcs) == 0 or len(frame_idcs) == 0:
# If not, stop looping
break
# Determine the location of the next non-zero activation
next_nz_pitch, next_nz_frame = pitch_idcs[0], frame_idcs[0]
# Determine where the inhibition window ends
inhibition_end = np.argmax(np.append(times, np.inf) >= times[next_nz_frame] + window_length)
# Zero-out the activations in the inhibition window (including the non-zero itself)
activations[next_nz_pitch, next_nz_frame : inhibition_end] = 0
# The the non-zero that was just processed
pitch_idcs_keep = np.append(pitch_idcs_keep, next_nz_pitch)
frame_idcs_keep = np.append(frame_idcs_keep, next_nz_frame)
# Add back in all of the non-inhibited non-zeros
activations[pitch_idcs_keep.astype(constants.UINT),
frame_idcs_keep.astype(constants.UINT)] = 1
return activations
def remove_activation_blips(activations):
"""
Remove blips (single-frame positives) in activations.
Parameters
----------
activations : ndarray
Provided activations
Returns
----------
activations : ndarray
Blip-free activations
"""
# Determine where activations begin
onsets = multi_pitch_to_onsets(activations)
# Determine where activations end
offsets = multi_pitch_to_offsets(activations)
# Determine where the blips are located
blip_locations = np.logical_and(onsets, offsets)
# Zero out blips
activations[blip_locations] = 0
return activations
##################################################
# UTILITY #
##################################################
def seed_everything(seed):
"""
Set all necessary seeds for PyTorch at once.
WARNING: the number of workers in the training loader affects behavior:
this is because each sample will inevitably end up being processed
by a different worker if num_workers is changed, and each worker
has its own random seed
TODO - I will fix this in the future if possible
Parameters
----------
seed : int
Seed to use for random number generation
"""
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
def estimate_hop_length(times):
"""
Estimate hop length of a semi-regular but non-uniform series of times.
***Taken from an mir_eval pull request.
Parameters
----------
times : ndarray
Array of times corresponding to a time series
Returns
----------
hop_length : float
Estimated hop length (seconds)
"""
# Make sure the times are sorted
times = np.sort(times)
# Determine where there are no gaps
non_gaps = np.append([False], np.isclose(np.diff(times, n=2), 0))
if not np.sum(non_gaps):
raise ValueError("Time observations are too irregular.")
# Take the median of the time differences at non-gaps
hop_length = np.median(np.diff(times)[non_gaps])
return hop_length
def time_series_to_uniform(times, values, hop_length=None, duration=None):
"""
Convert a semi-regular time series with gaps into a uniform time series.
***Taken from an mir_eval pull request.
Parameters
----------
times : ndarray
Array of times corresponding to a time series
values : list of ndarray
Observations made at times
hop_length : number or None (optional)
Time interval (seconds) between each observation in the uniform series
duration : number or None (optional)
Total length (seconds) of times series
If specified, should be greater than all observation times
Returns
-------
times : ndarray
Uniform time array
values : ndarray
Observations corresponding to uniform times
"""
if not len(times) and duration is None:
return np.array([]), []
if hop_length is None:
# If a hop length is not provided, estimate it and throw a warning
warnings.warn(
"Since hop length is unknown, it will be estimated. This may lead to "
"unwanted behavior if the observation times are sporadic or irregular.")
hop_length = estimate_hop_length(times)
# Add an extra entry when duration is unknown
extra = 0
if duration is None:
# Default the duration to the last reported time in the series
duration = times[-1]
extra += 1
# Determine the total number of observations in the uniform time series
num_entries = int(np.ceil(duration / hop_length)) + extra
# Attempt to fill in blank frames with the appropriate value
empty_fill = np.array([])
new_values = [empty_fill] * num_entries
new_times = hop_length * np.arange(num_entries)
# Determine which indices the provided observations fall under
idcs = np.round(times / hop_length).astype(int)
# Fill the observed values into their respective locations in the uniform series
for i in range(len(idcs)):
if times[i] <= duration:
new_values[idcs[i]] = values[i]
return new_times, new_values
def tensor_to_array(tensor):
"""
Simple helper function to convert a PyTorch tensor
into a NumPy array in order to keep code readable.
Parameters
----------
tensor : PyTorch tensor
Tensor to convert to array
Returns
----------
array : NumPy ndarray
Converted array
"""
# Change device to CPU,
# detach from gradient graph,
# and convert to NumPy array
array = tensor.cpu().detach().numpy()
return array
def array_to_tensor(array, device=None):
"""
Simple helper function to convert a NumPy array
into a PyTorch tensor in order to keep code readable.
Parameters
----------
array : NumPy ndarray
Array to convert to tensor
device : string, or None (optional)
Add tensor to this device, if specified
Returns
----------
tensor : PyTorch tensor
Converted tensor
"""
# Convert to PyTorch tensor
tensor = torch.from_numpy(array)
# Add tensor to device, if specified
if device is not None:
tensor = tensor.to(device)
return tensor
def save_pack_npz(path, keys, *args):
"""
Simple helper function to circumvent hardcoding of
keyword arguments for NumPy zip loading and saving.
This saves the desired keys (in-order) for the rest
of the array in the first entry of the zipped array.
Parameters
----------
path : string
Path to save the NumPy zip file
keys : list of str
Keys corresponding to the rest of the entries
*args : object
Any objects to save to the array
"""
# Make sure there is agreement between dataset and features
if len(keys) != len(args):
warnings.warn('Number of keys does not match number of entries provided.')
# Save the keys and entries as a NumPy zip at the specified path
np.savez(path, keys, *args)
def load_unpack_npz(path):
"""
Simple helper function to circumvent hardcoding of
keyword arguments for NumPy zip loading and saving.
This assumes that the first entry of the zipped array
contains the keys (in-order) for the rest of the array.
Parameters
----------
path : string
Path to load the NumPy zip file
Returns
----------
data : dict
Unpacked dictionary with specified keys inserted
"""
# Load the NumPy zip file at the path
data = dict(np.load(path, allow_pickle=True))
# Extract the key names stored in the dictionary
keys = data.pop(list(data.keys())[0])
# Obtain the names of the saved keys
old_keys = list(data.keys())
# Re-add all of the entries of the data with the specified keys
for i in range(len(keys)):
data[keys[i]] = data.pop(old_keys[i])
return data
def track_to_dtype(track, dtype):
"""
Convert all ndarray entries in a dictionary to a specified type.
Parameters
----------
track : dict
Dictionary containing data for a track
dtype : string or type
TODO - will type work?
Ndarray dtype to convert
Returns
----------
track : dict
Dictionary containing data for a track
"""
# Copy the dictionary to avoid hard assignment
track = deepcopy(track)
# Obtain a list of the dictionary keys
keys = list(track.keys())
# Loop through the dictionary keys
for key in keys:
# Check if the dictionary entry is an ndarray
if isinstance(track[key], np.ndarray):
# Convert the ndarray to the specified type
track[key] = track[key].astype(dtype)
# TODO - convert non ndarray to similar type?
#if isinstance(track[key], int):
# track[key] = float(track[key])
return track
def track_to_device(track, device):
"""
Add all tensor entries in a dictionary to a specified device.
Parameters
----------
track : dict
Dictionary containing data for a track
device : string, or None (optional)
Add tensor to this device, if specified
Returns
----------
track : dict
Dictionary containing data for a track
"""
# Copy the dictionary to avoid hard assignment
track = deepcopy(track)
# Obtain a list of the dictionary keys
keys = list(track.keys())
# Loop through the dictionary keys
for key in keys:
# Check if the dictionary entry is a tensor
if isinstance(track[key], torch.Tensor):
# Add the tensor to the specified device
track[key] = track[key].to(device)
return track
def track_to_cpu(track):
"""
Convert all tensor entries in a dictionary to ndarray.
Parameters
----------
track : dict
Dictionary containing data for a track
Returns
----------
track : dict
Dictionary containing data for a track
"""
# Copy the dictionary to avoid hard assignment
# TODO - can't copy tensors with gradients
#track = deepcopy(track)
# Obtain a list of the dictionary keys
keys = list(track.keys())
# Loop through the dictionary keys
for key in keys:
# Check if the entry us another dictionary
if isinstance(track[key], dict):
# Call this function recursively
track[key] = track_to_cpu(track[key])
# Check if the entry is a tensor
if isinstance(track[key], torch.Tensor):
# Squeeze the tensor and convert to ndarray and remove batch dimension
track[key] = tensor_to_array(track[key].squeeze())
return track
def track_to_batch(track):
"""
Treat track data as a batch of size one.
Parameters
----------
track : dict
Dictionary containing data for a track
Returns
----------
track : dict
Dictionary containing data for a track
"""
# Copy the dictionary to avoid hard assignment
track = deepcopy(track)
# Obtain a list of the dictionary keys
keys = list(track.keys())
# Loop through the dictionary keys
for key in keys:
# Check if the dictionary entry is an ndarray
if isinstance(track[key], np.ndarray):
# Convert to tensor and add batch dimension
track[key] = array_to_tensor(track[key]).unsqueeze(0)
return track
def try_unpack_dict(data, key):
"""
Unpack a specified entry if a dictionary is provided and the entry exists.
TODO - can use this many places (e.g. datasets) to be safe and neat
Parameters
----------
data : object
Object to query as being a dictionary and containing the specified key
key : string
Key specifying entry to unpack, if possible
Returns
----------
data : object
Unpacked entry or same object provided if no dictionary
"""
# Unpack the specified dictionary entry
entry = unpack_dict(data, key)
# Return the entry if it existed and the original data otherwise
if entry is not None:
data = entry
return data
def unpack_dict(data, key):
"""
Determine the corresponding entry for a dictionary key.
TODO - can use this many places (e.g. datasets) to be safe and neat
Parameters
----------
data : dictionary or object
Object to query as being a dictionary and containing the specified key
key : string
Key specifying entry to unpack, if possible
Returns
----------
entry : object or None
Unpacked entry or None to indicate non-existence
"""
# Default the entry
entry = None
# Check if a dictionary was provided and if the key is in the dictionary
if isinstance(data, dict) and query_dict(data, key):
# Unpack the relevant entry
entry = data[key]
return entry
def query_dict(dictionary, key):
"""
Determine if a dictionary has an entry for a specified key.
TODO - can use this many places (e.g. datasets) to be safe and neat
Parameters
----------
dictionary : dict
Dictionary to query
key : string
Key to query
Returns
----------
exists : bool
Whether or not the key exists in the dictionary
"""
# Check if the dictionary contains the key
exists = key in dictionary.keys()
return exists
def get_tag(tag=None):
"""
Simple helper function to create a tag for saving a file if one does not already exist.
This is useful because in some places we don't know whether we will have a tag or not,
but still want to save files.
Parameters
----------
tag : string or None (optional)
Name of file if it already exists
Returns
----------
tag : string or None (optional)
Name picked for the file
"""
# Get the data and time in a file-saving--friendly format
date_time = datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
# If there is no tag, use the date and time
tag = date_time if tag is None else tag
return tag
def slice_track(track, start, stop, skip=None):
"""
Slice any ndarray or tensor entries of a dictionary along the last axis.
Parameters
----------
track : dict
Dictionary containing data for a track
start : int
Beginning index
stop : int
End index (excluded in slice)
skip : list of str
Keys to skip during this process
Returns
----------
track : dict
Dictionary containing data for a track
"""
# Default the skipped keys to an empty list if unspecified
if skip is None:
skip = list()
# Copy the dictionary to avoid hard assignment
track = deepcopy(track)
# Obtain a list of the dictionary keys
keys = list(track.keys())
# Loop through the dictionary keys
for key in keys:
# Check if the dictionary entry is an ndarray or tensor
if key not in skip and (isinstance(track[key], np.ndarray) or
isinstance(track[key], torch.Tensor)):
# Slice along the final axis
track[key] = track[key][..., start : stop]
return track
def feats_to_batch(feats, times):
# TODO - a function which accepts only feats (for deployment)
# TODO - I don't think I need this at all if fwd accepts raw features
# TODO - in pre_proc, catch non-dict and call this?
# TODO - while num_dims < 4: feats.unsqueeze(0)
pass
| [
"scipy.signal.convolve",
"librosa.midi_to_hz",
"torch.from_numpy",
"numpy.argsort",
"numpy.array",
"librosa.util.pad_center",
"copy.deepcopy",
"librosa.hz_to_midi",
"numpy.arange",
"numpy.mean",
"numpy.savez",
"numpy.reshape",
"numpy.where",
"numpy.sort",
"numpy.diff",
"numpy.max",
"... | [((1234, 1250), 'numpy.empty', 'np.empty', (['[0, 3]'], {}), '([0, 3])\n', (1242, 1250), True, 'import numpy as np\n'), ((2053, 2094), 'librosa.midi_to_hz', 'librosa.midi_to_hz', (['batched_notes[..., 2]'], {}), '(batched_notes[..., 2])\n', (2071, 2094), False, 'import librosa\n'), ((2583, 2624), 'librosa.hz_to_midi', 'librosa.hz_to_midi', (['batched_notes[..., 2]'], {}), '(batched_notes[..., 2])\n', (2601, 2624), False, 'import librosa\n'), ((3495, 3538), 'numpy.maximum', 'np.maximum', (['batched_notes[:, 0]', 'start_time'], {}), '(batched_notes[:, 0], start_time)\n', (3505, 3538), True, 'import numpy as np\n'), ((3608, 3650), 'numpy.minimum', 'np.minimum', (['batched_notes[:, 1]', 'stop_time'], {}), '(batched_notes[:, 1], stop_time)\n', (3618, 3650), True, 'import numpy as np\n'), ((5170, 5218), 'numpy.concatenate', 'np.concatenate', (['[pair[0] for pair in note_pairs]'], {}), '([pair[0] for pair in note_pairs])\n', (5184, 5218), True, 'import numpy as np\n'), ((5235, 5283), 'numpy.concatenate', 'np.concatenate', (['[pair[1] for pair in note_pairs]'], {}), '([pair[1] for pair in note_pairs])\n', (5249, 5283), True, 'import numpy as np\n'), ((5906, 5933), 'librosa.midi_to_hz', 'librosa.midi_to_hz', (['pitches'], {}), '(pitches)\n', (5924, 5933), False, 'import librosa\n'), ((6459, 6486), 'librosa.hz_to_midi', 'librosa.hz_to_midi', (['pitches'], {}), '(pitches)\n', (6477, 6486), False, 'import librosa\n'), ((7885, 7908), 'copy.deepcopy', 'deepcopy', (['stacked_notes'], {}), '(stacked_notes)\n', (7893, 7908), False, 'from copy import deepcopy\n'), ((8707, 8730), 'copy.deepcopy', 'deepcopy', (['stacked_notes'], {}), '(stacked_notes)\n', (8715, 8730), False, 'from copy import deepcopy\n'), ((16396, 16424), 'copy.deepcopy', 'deepcopy', (['stacked_pitch_list'], {}), '(stacked_pitch_list)\n', (16404, 16424), False, 'from copy import deepcopy\n'), ((17294, 17322), 'copy.deepcopy', 'deepcopy', (['stacked_pitch_list'], {}), '(stacked_pitch_list)\n', (17302, 17322), False, 'from copy import deepcopy\n'), ((18853, 18888), 'numpy.zeros', 'np.zeros', (['(num_pitches, num_frames)'], {}), '((num_pitches, num_frames))\n', (18861, 18888), True, 'import numpy as np\n'), ((20734, 20769), 'numpy.zeros', 'np.zeros', (['(num_pitches, num_frames)'], {}), '((num_pitches, num_frames))\n', (20742, 20769), True, 'import numpy as np\n'), ((22040, 22076), 'numpy.max', 'np.max', (['stacked_multi_pitch'], {'axis': '(-3)'}), '(stacked_multi_pitch, axis=-3)\n', (22046, 22076), True, 'import numpy as np\n'), ((23570, 23605), 'numpy.concatenate', 'np.concatenate', (['stacked_multi_pitch'], {}), '(stacked_multi_pitch)\n', (23584, 23605), True, 'import numpy as np\n'), ((24778, 24813), 'numpy.concatenate', 'np.concatenate', (['stacked_multi_pitch'], {}), '(stacked_multi_pitch)\n', (24792, 24813), True, 'import numpy as np\n'), ((25440, 25476), 'numpy.expand_dims', 'np.expand_dims', (['multi_pitch'], {'axis': '(-3)'}), '(multi_pitch, axis=-3)\n', (25454, 25476), True, 'import numpy as np\n'), ((26610, 26655), 'numpy.zeros', 'np.zeros', (['(num_dofs, num_pitches, num_frames)'], {}), '((num_dofs, num_pitches, num_frames))\n', (26618, 26655), True, 'import numpy as np\n'), ((26891, 26931), 'numpy.expand_dims', 'np.expand_dims', (['(tuning - profile.low)', '(-1)'], {}), '(tuning - profile.low, -1)\n', (26905, 26931), True, 'import numpy as np\n'), ((30547, 30572), 'numpy.concatenate', 'np.concatenate', (['tablature'], {}), '(tablature)\n', (30561, 30572), True, 'import numpy as np\n'), ((31718, 31745), 'numpy.copy', 'np.copy', (['intervals[..., :1]'], {}), '(intervals[..., :1])\n', (31725, 31745), True, 'import numpy as np\n'), ((31765, 31792), 'numpy.copy', 'np.copy', (['intervals[..., 1:]'], {}), '(intervals[..., 1:])\n', (31772, 31792), True, 'import numpy as np\n'), ((32340, 32392), 'numpy.concatenate', 'np.concatenate', (['(onset_times, offset_times)'], {'axis': '(-1)'}), '((onset_times, offset_times), axis=-1)\n', (32354, 32392), True, 'import numpy as np\n'), ((33341, 33394), 'numpy.concatenate', 'np.concatenate', (['[first_frame, adjacent_diff]'], {'axis': '(-1)'}), '([first_frame, adjacent_diff], axis=-1)\n', (33355, 33394), True, 'import numpy as np\n'), ((35035, 35065), 'numpy.concatenate', 'np.concatenate', (['stacked_onsets'], {}), '(stacked_onsets)\n', (35049, 35065), True, 'import numpy as np\n'), ((36366, 36396), 'numpy.concatenate', 'np.concatenate', (['stacked_onsets'], {}), '(stacked_onsets)\n', (36380, 36396), True, 'import numpy as np\n'), ((37542, 37569), 'numpy.copy', 'np.copy', (['intervals[..., 1:]'], {}), '(intervals[..., 1:])\n', (37549, 37569), True, 'import numpy as np\n'), ((37633, 37654), 'numpy.copy', 'np.copy', (['offset_times'], {}), '(offset_times)\n', (37640, 37654), True, 'import numpy as np\n'), ((37950, 38002), 'numpy.concatenate', 'np.concatenate', (['(onset_times, offset_times)'], {'axis': '(-1)'}), '((onset_times, offset_times), axis=-1)\n', (37964, 38002), True, 'import numpy as np\n'), ((39064, 39116), 'numpy.concatenate', 'np.concatenate', (['[adjacent_diff, last_frame]'], {'axis': '(-1)'}), '([adjacent_diff, last_frame], axis=-1)\n', (39078, 39116), True, 'import numpy as np\n'), ((40768, 40799), 'numpy.concatenate', 'np.concatenate', (['stacked_offsets'], {}), '(stacked_offsets)\n', (40782, 40799), True, 'import numpy as np\n'), ((42112, 42143), 'numpy.concatenate', 'np.concatenate', (['stacked_offsets'], {}), '(stacked_offsets)\n', (42126, 42143), True, 'import numpy as np\n'), ((43306, 43358), 'numpy.sort', 'np.sort', (['batched_notes'], {'axis': '(0)', 'order': 'attributes[by]'}), '(batched_notes, axis=0, order=attributes[by])\n', (43313, 43358), True, 'import numpy as np\n'), ((45386, 45400), 'numpy.sort', 'np.sort', (['times'], {}), '(times)\n', (45393, 45400), True, 'import numpy as np\n'), ((47186, 47202), 'numpy.array', 'np.array', (['kernel'], {}), '(kernel)\n', (47194, 47202), True, 'import numpy as np\n'), ((47694, 47743), 'scipy.signal.convolve', 'signal.convolve', (['activations', 'kernel'], {'mode': '"""same"""'}), "(activations, kernel, mode='same')\n", (47709, 47743), False, 'from scipy import signal\n'), ((51217, 51253), 'numpy.concatenate', 'np.concatenate', (['activations'], {'axis': '(-2)'}), '(activations, axis=-2)\n', (51231, 51253), True, 'import numpy as np\n'), ((51929, 51940), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (51937, 51940), True, 'import numpy as np\n'), ((51963, 51974), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (51971, 51974), True, 'import numpy as np\n'), ((53652, 53683), 'numpy.logical_and', 'np.logical_and', (['onsets', 'offsets'], {}), '(onsets, offsets)\n', (53666, 53683), True, 'import numpy as np\n'), ((54494, 54517), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (54511, 54517), False, 'import torch\n'), ((54522, 54554), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (54548, 54554), False, 'import torch\n'), ((54559, 54576), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (54570, 54576), False, 'import random\n'), ((54987, 55001), 'numpy.sort', 'np.sort', (['times'], {}), '(times)\n', (54994, 55001), True, 'import numpy as np\n'), ((56999, 57011), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (57007, 57011), True, 'import numpy as np\n'), ((58407, 58430), 'torch.from_numpy', 'torch.from_numpy', (['array'], {}), '(array)\n', (58423, 58430), False, 'import torch\n'), ((59307, 59334), 'numpy.savez', 'np.savez', (['path', 'keys', '*args'], {}), '(path, keys, *args)\n', (59315, 59334), True, 'import numpy as np\n'), ((60677, 60692), 'copy.deepcopy', 'deepcopy', (['track'], {}), '(track)\n', (60685, 60692), False, 'from copy import deepcopy\n'), ((61646, 61661), 'copy.deepcopy', 'deepcopy', (['track'], {}), '(track)\n', (61654, 61661), False, 'from copy import deepcopy\n'), ((63342, 63357), 'copy.deepcopy', 'deepcopy', (['track'], {}), '(track)\n', (63350, 63357), False, 'from copy import deepcopy\n'), ((67054, 67069), 'copy.deepcopy', 'deepcopy', (['track'], {}), '(track)\n', (67062, 67069), False, 'from copy import deepcopy\n'), ((1380, 1412), 'numpy.expand_dims', 'np.expand_dims', (['pitches'], {'axis': '(-1)'}), '(pitches, axis=-1)\n', (1394, 1412), True, 'import numpy as np\n'), ((1520, 1565), 'numpy.concatenate', 'np.concatenate', (['(intervals, pitches)'], {'axis': '(-1)'}), '((intervals, pitches), axis=-1)\n', (1534, 1565), True, 'import numpy as np\n'), ((10018, 10072), 'numpy.concatenate', 'np.concatenate', (['[pair[0] for pair in pitch_list_pairs]'], {}), '([pair[0] for pair in pitch_list_pairs])\n', (10032, 10072), True, 'import numpy as np\n'), ((12640, 12673), 'librosa.midi_to_hz', 'librosa.midi_to_hz', (['pitch_list[i]'], {}), '(pitch_list[i])\n', (12658, 12673), False, 'import librosa\n'), ((13323, 13356), 'librosa.hz_to_midi', 'librosa.hz_to_midi', (['pitch_list[i]'], {}), '(pitch_list[i])\n', (13341, 13356), False, 'import librosa\n'), ((19243, 19289), 'numpy.argmin', 'np.argmin', (['(times <= intervals[..., :1])'], {'axis': '(1)'}), '(times <= intervals[..., :1], axis=1)\n', (19252, 19289), True, 'import numpy as np\n'), ((19310, 19355), 'numpy.argmin', 'np.argmin', (['(times < intervals[..., 1:])'], {'axis': '(1)'}), '(times < intervals[..., 1:], axis=1)\n', (19319, 19355), True, 'import numpy as np\n'), ((21120, 21137), 'numpy.abs', 'np.abs', (['deviation'], {}), '(deviation)\n', (21126, 21137), True, 'import numpy as np\n'), ((30231, 30261), 'numpy.argmax', 'np.argmax', (['multi_pitch'], {'axis': '(0)'}), '(multi_pitch, axis=0)\n', (30240, 30261), True, 'import numpy as np\n'), ((31970, 32002), 'numpy.minimum', 'np.minimum', (['durations', 'ambiguity'], {}), '(durations, ambiguity)\n', (31980, 32002), True, 'import numpy as np\n'), ((32215, 32235), 'numpy.copy', 'np.copy', (['onset_times'], {}), '(onset_times)\n', (32222, 32235), True, 'import numpy as np\n'), ((37837, 37857), 'numpy.copy', 'np.copy', (['onset_times'], {}), '(onset_times)\n', (37844, 37857), True, 'import numpy as np\n'), ((45333, 45350), 'numpy.argsort', 'np.argsort', (['times'], {}), '(times)\n', (45343, 45350), True, 'import numpy as np\n'), ((46075, 46094), 'numpy.mean', 'np.mean', (['(audio ** 2)'], {}), '(audio ** 2)\n', (46082, 46094), True, 'import numpy as np\n'), ((47598, 47627), 'numpy.reshape', 'np.reshape', (['kernel', 'new_shape'], {}), '(kernel, new_shape)\n', (47608, 47627), True, 'import numpy as np\n'), ((48382, 48401), 'numpy.abs', 'np.abs', (['activations'], {}), '(activations)\n', (48388, 48401), True, 'import numpy as np\n'), ((50149, 50197), 'librosa.util.pad_center', 'librosa.util.pad_center', (['activations', 'int_frames'], {}), '(activations, int_frames)\n', (50172, 50197), False, 'import librosa\n'), ((50932, 50954), 'numpy.arange', 'np.arange', (['(0)', 'num_hops'], {}), '(0, num_hops)\n', (50941, 50954), True, 'import numpy as np\n'), ((51057, 51116), 'numpy.expand_dims', 'np.expand_dims', (['activations[..., i:i + win_length]'], {'axis': '(-2)'}), '(activations[..., i:i + win_length], axis=-2)\n', (51071, 51116), True, 'import numpy as np\n'), ((52820, 52861), 'numpy.append', 'np.append', (['pitch_idcs_keep', 'next_nz_pitch'], {}), '(pitch_idcs_keep, next_nz_pitch)\n', (52829, 52861), True, 'import numpy as np\n'), ((52888, 52929), 'numpy.append', 'np.append', (['frame_idcs_keep', 'next_nz_frame'], {}), '(frame_idcs_keep, next_nz_frame)\n', (52897, 52929), True, 'import numpy as np\n'), ((55125, 55141), 'numpy.sum', 'np.sum', (['non_gaps'], {}), '(non_gaps)\n', (55131, 55141), True, 'import numpy as np\n'), ((56336, 56500), 'warnings.warn', 'warnings.warn', (['"""Since hop length is unknown, it will be estimated. This may lead to unwanted behavior if the observation times are sporadic or irregular."""'], {}), "(\n 'Since hop length is unknown, it will be estimated. This may lead to unwanted behavior if the observation times are sporadic or irregular.'\n )\n", (56349, 56500), False, 'import warnings\n'), ((57085, 57107), 'numpy.arange', 'np.arange', (['num_entries'], {}), '(num_entries)\n', (57094, 57107), True, 'import numpy as np\n'), ((59158, 59232), 'warnings.warn', 'warnings.warn', (['"""Number of keys does not match number of entries provided."""'], {}), "('Number of keys does not match number of entries provided.')\n", (59171, 59232), False, 'import warnings\n'), ((59854, 59886), 'numpy.load', 'np.load', (['path'], {'allow_pickle': '(True)'}), '(path, allow_pickle=True)\n', (59861, 59886), True, 'import numpy as np\n'), ((10149, 10160), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (10157, 10160), True, 'import numpy as np\n'), ((10601, 10654), 'numpy.append', 'np.append', (['pitch_list[idx]', 'slice_pitch_arrays[entry]'], {}), '(pitch_list[idx], slice_pitch_arrays[entry])\n', (10610, 10654), True, 'import numpy as np\n'), ((11631, 11642), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (11639, 11642), True, 'import numpy as np\n'), ((18970, 19001), 'numpy.round', 'np.round', (['(pitches - profile.low)'], {}), '(pitches - profile.low)\n', (18978, 19001), True, 'import numpy as np\n'), ((29783, 29810), 'numpy.sum', 'np.sum', (['multi_pitch'], {'axis': '(0)'}), '(multi_pitch, axis=0)\n', (29789, 29810), True, 'import numpy as np\n'), ((30440, 30477), 'numpy.expand_dims', 'np.expand_dims', (['highest_class'], {'axis': '(0)'}), '(highest_class, axis=0)\n', (30454, 30477), True, 'import numpy as np\n'), ((55088, 55107), 'numpy.diff', 'np.diff', (['times'], {'n': '(2)'}), '(times, n=2)\n', (55095, 55107), True, 'import numpy as np\n'), ((55294, 55308), 'numpy.diff', 'np.diff', (['times'], {}), '(times)\n', (55301, 55308), True, 'import numpy as np\n'), ((56208, 56220), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (56216, 56220), True, 'import numpy as np\n'), ((56876, 56906), 'numpy.ceil', 'np.ceil', (['(duration / hop_length)'], {}), '(duration / hop_length)\n', (56883, 56906), True, 'import numpy as np\n'), ((57187, 57215), 'numpy.round', 'np.round', (['(times / hop_length)'], {}), '(times / hop_length)\n', (57195, 57215), True, 'import numpy as np\n'), ((66252, 66266), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (66264, 66266), False, 'from datetime import datetime\n'), ((11743, 11771), 'numpy.sum', 'np.sum', (['multi_pitch'], {'axis': '(-2)'}), '(multi_pitch, axis=-2)\n', (11749, 11771), True, 'import numpy as np\n'), ((11991, 12020), 'numpy.where', 'np.where', (['multi_pitch[..., i]'], {}), '(multi_pitch[..., i])\n', (11999, 12020), True, 'import numpy as np\n'), ((21226, 21269), 'numpy.round', 'np.round', (['difference[deviation < tolerance]'], {}), '(difference[deviation < tolerance])\n', (21234, 21269), True, 'import numpy as np\n'), ((52512, 52536), 'numpy.append', 'np.append', (['times', 'np.inf'], {}), '(times, np.inf)\n', (52521, 52536), True, 'import numpy as np\n'), ((10460, 10497), 'numpy.where', 'np.where', (['(times == slice_times[entry])'], {}), '(times == slice_times[entry])\n', (10468, 10497), True, 'import numpy as np\n')] |
import numpy as np
def integrand_sin(x):
return x**2 * np.sin(x)
def simpson(f, a, b, nstrips):
x, dx = np.linspace(a, b, num=2*nstrips+1, endpoint=True, retstep=True)
return dx / 3 * (f(x[0]) + f(x[-1]) + 4 * np.sum(f(x[1:-1:2])) + 2 * np.sum(f(x[2:-1:2])))
nstrips_all = 10 * 2**np.arange(8)
dx = 1 / nstrips_all
errs = np.zeros(len(nstrips_all))
for i in range(len(nstrips_all)):
errs[i] = abs(simpson(integrand_sin, 0, 1, nstrips_all[i]) - (2 * np.sin(1) + np.cos(1) - 2))
| [
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"numpy.arange"
] | [((114, 181), 'numpy.linspace', 'np.linspace', (['a', 'b'], {'num': '(2 * nstrips + 1)', 'endpoint': '(True)', 'retstep': '(True)'}), '(a, b, num=2 * nstrips + 1, endpoint=True, retstep=True)\n', (125, 181), True, 'import numpy as np\n'), ((60, 69), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (66, 69), True, 'import numpy as np\n'), ((296, 308), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (305, 308), True, 'import numpy as np\n'), ((480, 489), 'numpy.cos', 'np.cos', (['(1)'], {}), '(1)\n', (486, 489), True, 'import numpy as np\n'), ((468, 477), 'numpy.sin', 'np.sin', (['(1)'], {}), '(1)\n', (474, 477), True, 'import numpy as np\n')] |
"""
Tests for domain helpers.
"""
# pylint: disable=missing-docstring
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import numpy.testing as nt
import scipy.optimize as spop
import copy
import reggie.core.domains as domains
### BASE TEST CLASS ###########################################################
class TransformTest(object):
def __init__(self, domain, transform):
self.bounds = domains.BOUNDS[domain]
self.transform = transform
def test_singleton(self):
t1 = type(self.transform)()
t2 = type(self.transform)()
t3 = copy.copy(self.transform)
t4 = copy.deepcopy(self.transform)
assert t1 == t2 == t3 == t4
def test_bounds(self):
dbounds = np.array(self.bounds)
tbounds = self.transform.get_transform(dbounds)
nt.assert_allclose(self.transform.get_inverse(tbounds), dbounds)
def test_gradfactor(self):
for t in [0.1, 0.5, 1, 2]:
t = np.array([t])
x = self.transform.get_inverse(t)
d1 = self.transform.get_gradfactor(x)
d2 = spop.approx_fprime(t, self.transform.get_inverse, 1e-8)
nt.assert_allclose(d1, d2, rtol=1e-6)
### PER-INSTANCE TESTS ########################################################
class TestLogTransform(TransformTest):
def __init__(self):
TransformTest.__init__(self, domains.POSITIVE, domains.Log())
class TestIdentityTransform(TransformTest):
def __init__(self):
TransformTest.__init__(self, domains.REAL, domains.Identity())
| [
"numpy.testing.assert_allclose",
"reggie.core.domains.Log",
"numpy.array",
"copy.deepcopy",
"copy.copy",
"scipy.optimize.approx_fprime",
"reggie.core.domains.Identity"
] | [((660, 685), 'copy.copy', 'copy.copy', (['self.transform'], {}), '(self.transform)\n', (669, 685), False, 'import copy\n'), ((699, 728), 'copy.deepcopy', 'copy.deepcopy', (['self.transform'], {}), '(self.transform)\n', (712, 728), False, 'import copy\n'), ((811, 832), 'numpy.array', 'np.array', (['self.bounds'], {}), '(self.bounds)\n', (819, 832), True, 'import numpy as np\n'), ((1045, 1058), 'numpy.array', 'np.array', (['[t]'], {}), '([t])\n', (1053, 1058), True, 'import numpy as np\n'), ((1172, 1228), 'scipy.optimize.approx_fprime', 'spop.approx_fprime', (['t', 'self.transform.get_inverse', '(1e-08)'], {}), '(t, self.transform.get_inverse, 1e-08)\n', (1190, 1228), True, 'import scipy.optimize as spop\n'), ((1240, 1278), 'numpy.testing.assert_allclose', 'nt.assert_allclose', (['d1', 'd2'], {'rtol': '(1e-06)'}), '(d1, d2, rtol=1e-06)\n', (1258, 1278), True, 'import numpy.testing as nt\n'), ((1479, 1492), 'reggie.core.domains.Log', 'domains.Log', ([], {}), '()\n', (1490, 1492), True, 'import reggie.core.domains as domains\n'), ((1615, 1633), 'reggie.core.domains.Identity', 'domains.Identity', ([], {}), '()\n', (1631, 1633), True, 'import reggie.core.domains as domains\n')] |
"""
Authors: <NAME>, <NAME>, <NAME>
"""
import numpy as np
import scipy.stats as spst
import scipy.linalg as la
class LQFilter:
def __init__(self, d, h, y_m, r=None, h_eps=None, β=None):
"""
Parameters
----------
d : list or numpy.array (1-D or a 2-D column vector)
The order of the coefficients: [d_0, d_1, ..., d_m]
h : scalar
Parameter of the objective function (corresponding to the
quadratic term)
y_m : list or numpy.array (1-D or a 2-D column vector)
Initial conditions for y
r : list or numpy.array (1-D or a 2-D column vector)
The order of the coefficients: [r_0, r_1, ..., r_k]
(optional, if not defined -> deterministic problem)
β : scalar
Discount factor (optional, default value is one)
"""
self.h = h
self.d = np.asarray(d)
self.m = self.d.shape[0] - 1
self.y_m = np.asarray(y_m)
if self.m == self.y_m.shape[0]:
self.y_m = self.y_m.reshape(self.m, 1)
else:
raise ValueError("y_m must be of length m = {self.m:d}")
#---------------------------------------------
# Define the coefficients of ϕ up front
#---------------------------------------------
ϕ = np.zeros(2 * self.m + 1)
for i in range(- self.m, self.m + 1):
ϕ[self.m - i] = np.sum(np.diag(self.d.reshape(self.m + 1, 1) @ \
self.d.reshape(1, self.m + 1), k=-i))
ϕ[self.m] = ϕ[self.m] + self.h
self.ϕ = ϕ
#-----------------------------------------------------
# If r is given calculate the vector ϕ_r
#-----------------------------------------------------
if r is None:
pass
else:
self.r = np.asarray(r)
self.k = self.r.shape[0] - 1
ϕ_r = np.zeros(2 * self.k + 1)
for i in range(- self.k, self.k + 1):
ϕ_r[self.k - i] = np.sum(np.diag(self.r.reshape(self.k + 1, 1) @ \
self.r.reshape(1, self.k + 1), k=-i))
if h_eps is None:
self.ϕ_r = ϕ_r
else:
ϕ_r[self.k] = ϕ_r[self.k] + h_eps
self.ϕ_r = ϕ_r
#-----------------------------------------------------
# If β is given, define the transformed variables
#-----------------------------------------------------
if β is None:
self.β = 1
else:
self.β = β
self.d = self.β**(np.arange(self.m + 1)/2) * self.d
self.y_m = self.y_m * (self.β**(- np.arange(1, self.m + 1)/2)).reshape(self.m, 1)
def construct_W_and_Wm(self, N):
"""
This constructs the matrices W and W_m for a given number of periods N
"""
m = self.m
d = self.d
W = np.zeros((N + 1, N + 1))
W_m = np.zeros((N + 1, m))
#---------------------------------------
# Terminal conditions
#---------------------------------------
D_m1 = np.zeros((m + 1, m + 1))
M = np.zeros((m + 1, m))
# (1) Constuct the D_{m+1} matrix using the formula
for j in range(m + 1):
for k in range(j, m + 1):
D_m1[j, k] = d[:j + 1] @ d[k - j: k + 1]
# Make the matrix symmetric
D_m1 = D_m1 + D_m1.T - np.diag(np.diag(D_m1))
# (2) Construct the M matrix using the entries of D_m1
for j in range(m):
for i in range(j + 1, m + 1):
M[i, j] = D_m1[i - j - 1, m]
#----------------------------------------------
# Euler equations for t = 0, 1, ..., N-(m+1)
#----------------------------------------------
ϕ = self.ϕ
W[:(m + 1), :(m + 1)] = D_m1 + self.h * np.eye(m + 1)
W[:(m + 1), (m + 1):(2 * m + 1)] = M
for i, row in enumerate(np.arange(m + 1, N + 1 - m)):
W[row, (i + 1):(2 * m + 2 + i)] = ϕ
for i in range(1, m + 1):
W[N - m + i, -(2 * m + 1 - i):] = ϕ[:-i]
for i in range(m):
W_m[N - i, :(m - i)] = ϕ[(m + 1 + i):]
return W, W_m
def roots_of_characteristic(self):
"""
This function calculates z_0 and the 2m roots of the characteristic equation
associated with the Euler equation (1.7)
Note:
------
numpy.poly1d(roots, True) defines a polynomial using its roots that can be
evaluated at any point. If x_1, x_2, ... , x_m are the roots then
p(x) = (x - x_1)(x - x_2)...(x - x_m)
"""
m = self.m
ϕ = self.ϕ
# Calculate the roots of the 2m-polynomial
roots = np.roots(ϕ)
# sort the roots according to their length (in descending order)
roots_sorted = roots[np.argsort(abs(roots))[::-1]]
z_0 = ϕ.sum() / np.poly1d(roots, True)(1)
z_1_to_m = roots_sorted[:m] # we need only those outside the unit circle
λ = 1 / z_1_to_m
return z_1_to_m, z_0, λ
def coeffs_of_c(self):
'''
This function computes the coefficients {c_j, j = 0, 1, ..., m} for
c(z) = sum_{j = 0}^{m} c_j z^j
Based on the expression (1.9). The order is
c_coeffs = [c_0, c_1, ..., c_{m-1}, c_m]
'''
z_1_to_m, z_0 = self.roots_of_characteristic()[:2]
c_0 = (z_0 * np.prod(z_1_to_m).real * (- 1)**self.m)**(.5)
c_coeffs = np.poly1d(z_1_to_m, True).c * z_0 / c_0
return c_coeffs[::-1]
def solution(self):
"""
This function calculates {λ_j, j=1,...,m} and {A_j, j=1,...,m}
of the expression (1.15)
"""
λ = self.roots_of_characteristic()[2]
c_0 = self.coeffs_of_c()[-1]
A = np.zeros(self.m, dtype=complex)
for j in range(self.m):
denom = 1 - λ/λ[j]
A[j] = c_0**(-2) / np.prod(denom[np.arange(self.m) != j])
return λ, A
def construct_V(self, N):
'''
This function constructs the covariance matrix for x^N (see section 6)
for a given period N
'''
V = np.zeros((N, N))
ϕ_r = self.ϕ_r
for i in range(N):
for j in range(N):
if abs(i-j) <= self.k:
V[i, j] = ϕ_r[self.k + abs(i-j)]
return V
def simulate_a(self, N):
"""
Assuming that the u's are normal, this method draws a random path
for x^N
"""
V = self.construct_V(N + 1)
d = spst.multivariate_normal(np.zeros(N + 1), V)
return d.rvs()
def predict(self, a_hist, t):
"""
This function implements the prediction formula discussed is section 6 (1.59)
It takes a realization for a^N, and the period in which the prediciton is formed
Output: E[abar | a_t, a_{t-1}, ..., a_1, a_0]
"""
N = np.asarray(a_hist).shape[0] - 1
a_hist = np.asarray(a_hist).reshape(N + 1, 1)
V = self.construct_V(N + 1)
aux_matrix = np.zeros((N + 1, N + 1))
aux_matrix[:(t + 1), :(t + 1)] = np.eye(t + 1)
L = la.cholesky(V).T
Ea_hist = la.inv(L) @ aux_matrix @ L @ a_hist
return Ea_hist
def optimal_y(self, a_hist, t=None):
"""
- if t is NOT given it takes a_hist (list or numpy.array) as a deterministic a_t
- if t is given, it solves the combined control prediction problem (section 7)
(by default, t == None -> deterministic)
for a given sequence of a_t (either determinstic or a particular realization),
it calculates the optimal y_t sequence using the method of the lecture
Note:
------
scipy.linalg.lu normalizes L, U so that L has unit diagonal elements
To make things cosistent with the lecture, we need an auxiliary diagonal
matrix D which renormalizes L and U
"""
N = np.asarray(a_hist).shape[0] - 1
W, W_m = self.construct_W_and_Wm(N)
L, U = la.lu(W, permute_l=True)
D = np.diag(1 / np.diag(U))
U = D @ U
L = L @ np.diag(1 / np.diag(D))
J = np.fliplr(np.eye(N + 1))
if t is None: # if the problem is deterministic
a_hist = J @ np.asarray(a_hist).reshape(N + 1, 1)
#--------------------------------------------
# Transform the a sequence if β is given
#--------------------------------------------
if self.β != 1:
a_hist = a_hist * (self.β**(np.arange(N + 1) / 2))[::-1].reshape(N + 1, 1)
a_bar = a_hist - W_m @ self.y_m # a_bar from the lecture
Uy = np.linalg.solve(L, a_bar) # U @ y_bar = L^{-1}
y_bar = np.linalg.solve(U, Uy) # y_bar = U^{-1}L^{-1}
# Reverse the order of y_bar with the matrix J
J = np.fliplr(np.eye(N + self.m + 1))
y_hist = J @ np.vstack([y_bar, self.y_m]) # y_hist : concatenated y_m and y_bar
#--------------------------------------------
# Transform the optimal sequence back if β is given
#--------------------------------------------
if self.β != 1:
y_hist = y_hist * (self.β**(- np.arange(-self.m, N + 1)/2)).reshape(N + 1 + self.m, 1)
return y_hist, L, U, y_bar
else: # if the problem is stochastic and we look at it
Ea_hist = self.predict(a_hist, t).reshape(N + 1, 1)
Ea_hist = J @ Ea_hist
a_bar = Ea_hist - W_m @ self.y_m # a_bar from the lecture
Uy = np.linalg.solve(L, a_bar) # U @ y_bar = L^{-1}
y_bar = np.linalg.solve(U, Uy) # y_bar = U^{-1}L^{-1}
# Reverse the order of y_bar with the matrix J
J = np.fliplr(np.eye(N + self.m + 1))
y_hist = J @ np.vstack([y_bar, self.y_m]) # y_hist : concatenated y_m and y_bar
return y_hist, L, U, y_bar
| [
"scipy.linalg.lu",
"numpy.eye",
"numpy.linalg.solve",
"numpy.prod",
"numpy.asarray",
"numpy.roots",
"numpy.diag",
"scipy.linalg.cholesky",
"numpy.zeros",
"numpy.vstack",
"numpy.poly1d",
"scipy.linalg.inv",
"numpy.arange"
] | [((983, 996), 'numpy.asarray', 'np.asarray', (['d'], {}), '(d)\n', (993, 996), True, 'import numpy as np\n'), ((1054, 1069), 'numpy.asarray', 'np.asarray', (['y_m'], {}), '(y_m)\n', (1064, 1069), True, 'import numpy as np\n'), ((1417, 1441), 'numpy.zeros', 'np.zeros', (['(2 * self.m + 1)'], {}), '(2 * self.m + 1)\n', (1425, 1441), True, 'import numpy as np\n'), ((3058, 3082), 'numpy.zeros', 'np.zeros', (['(N + 1, N + 1)'], {}), '((N + 1, N + 1))\n', (3066, 3082), True, 'import numpy as np\n'), ((3097, 3117), 'numpy.zeros', 'np.zeros', (['(N + 1, m)'], {}), '((N + 1, m))\n', (3105, 3117), True, 'import numpy as np\n'), ((3263, 3287), 'numpy.zeros', 'np.zeros', (['(m + 1, m + 1)'], {}), '((m + 1, m + 1))\n', (3271, 3287), True, 'import numpy as np\n'), ((3300, 3320), 'numpy.zeros', 'np.zeros', (['(m + 1, m)'], {}), '((m + 1, m))\n', (3308, 3320), True, 'import numpy as np\n'), ((4925, 4936), 'numpy.roots', 'np.roots', (['φ'], {}), '(φ)\n', (4933, 4936), True, 'import numpy as np\n'), ((6011, 6042), 'numpy.zeros', 'np.zeros', (['self.m'], {'dtype': 'complex'}), '(self.m, dtype=complex)\n', (6019, 6042), True, 'import numpy as np\n'), ((6372, 6388), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (6380, 6388), True, 'import numpy as np\n'), ((7290, 7314), 'numpy.zeros', 'np.zeros', (['(N + 1, N + 1)'], {}), '((N + 1, N + 1))\n', (7298, 7314), True, 'import numpy as np\n'), ((7356, 7369), 'numpy.eye', 'np.eye', (['(t + 1)'], {}), '(t + 1)\n', (7362, 7369), True, 'import numpy as np\n'), ((8276, 8300), 'scipy.linalg.lu', 'la.lu', (['W'], {'permute_l': '(True)'}), '(W, permute_l=True)\n', (8281, 8300), True, 'import scipy.linalg as la\n'), ((1960, 1973), 'numpy.asarray', 'np.asarray', (['r'], {}), '(r)\n', (1970, 1973), True, 'import numpy as np\n'), ((2034, 2058), 'numpy.zeros', 'np.zeros', (['(2 * self.k + 1)'], {}), '(2 * self.k + 1)\n', (2042, 2058), True, 'import numpy as np\n'), ((4107, 4134), 'numpy.arange', 'np.arange', (['(m + 1)', '(N + 1 - m)'], {}), '(m + 1, N + 1 - m)\n', (4116, 4134), True, 'import numpy as np\n'), ((6799, 6814), 'numpy.zeros', 'np.zeros', (['(N + 1)'], {}), '(N + 1)\n', (6807, 6814), True, 'import numpy as np\n'), ((7382, 7396), 'scipy.linalg.cholesky', 'la.cholesky', (['V'], {}), '(V)\n', (7393, 7396), True, 'import scipy.linalg as la\n'), ((8418, 8431), 'numpy.eye', 'np.eye', (['(N + 1)'], {}), '(N + 1)\n', (8424, 8431), True, 'import numpy as np\n'), ((8946, 8971), 'numpy.linalg.solve', 'np.linalg.solve', (['L', 'a_bar'], {}), '(L, a_bar)\n', (8961, 8971), True, 'import numpy as np\n'), ((9028, 9050), 'numpy.linalg.solve', 'np.linalg.solve', (['U', 'Uy'], {}), '(U, Uy)\n', (9043, 9050), True, 'import numpy as np\n'), ((9921, 9946), 'numpy.linalg.solve', 'np.linalg.solve', (['L', 'a_bar'], {}), '(L, a_bar)\n', (9936, 9946), True, 'import numpy as np\n'), ((10003, 10025), 'numpy.linalg.solve', 'np.linalg.solve', (['U', 'Uy'], {}), '(U, Uy)\n', (10018, 10025), True, 'import numpy as np\n'), ((3585, 3598), 'numpy.diag', 'np.diag', (['D_m1'], {}), '(D_m1)\n', (3592, 3598), True, 'import numpy as np\n'), ((4015, 4028), 'numpy.eye', 'np.eye', (['(m + 1)'], {}), '(m + 1)\n', (4021, 4028), True, 'import numpy as np\n'), ((5095, 5117), 'numpy.poly1d', 'np.poly1d', (['roots', '(True)'], {}), '(roots, True)\n', (5104, 5117), True, 'import numpy as np\n'), ((7195, 7213), 'numpy.asarray', 'np.asarray', (['a_hist'], {}), '(a_hist)\n', (7205, 7213), True, 'import numpy as np\n'), ((8325, 8335), 'numpy.diag', 'np.diag', (['U'], {}), '(U)\n', (8332, 8335), True, 'import numpy as np\n'), ((9175, 9197), 'numpy.eye', 'np.eye', (['(N + self.m + 1)'], {}), '(N + self.m + 1)\n', (9181, 9197), True, 'import numpy as np\n'), ((9224, 9252), 'numpy.vstack', 'np.vstack', (['[y_bar, self.y_m]'], {}), '([y_bar, self.y_m])\n', (9233, 9252), True, 'import numpy as np\n'), ((10150, 10172), 'numpy.eye', 'np.eye', (['(N + self.m + 1)'], {}), '(N + self.m + 1)\n', (10156, 10172), True, 'import numpy as np\n'), ((10199, 10227), 'numpy.vstack', 'np.vstack', (['[y_bar, self.y_m]'], {}), '([y_bar, self.y_m])\n', (10208, 10227), True, 'import numpy as np\n'), ((5691, 5716), 'numpy.poly1d', 'np.poly1d', (['z_1_to_m', '(True)'], {}), '(z_1_to_m, True)\n', (5700, 5716), True, 'import numpy as np\n'), ((7146, 7164), 'numpy.asarray', 'np.asarray', (['a_hist'], {}), '(a_hist)\n', (7156, 7164), True, 'import numpy as np\n'), ((7417, 7426), 'scipy.linalg.inv', 'la.inv', (['L'], {}), '(L)\n', (7423, 7426), True, 'import scipy.linalg as la\n'), ((8184, 8202), 'numpy.asarray', 'np.asarray', (['a_hist'], {}), '(a_hist)\n', (8194, 8202), True, 'import numpy as np\n'), ((8383, 8393), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (8390, 8393), True, 'import numpy as np\n'), ((2738, 2759), 'numpy.arange', 'np.arange', (['(self.m + 1)'], {}), '(self.m + 1)\n', (2747, 2759), True, 'import numpy as np\n'), ((5626, 5643), 'numpy.prod', 'np.prod', (['z_1_to_m'], {}), '(z_1_to_m)\n', (5633, 5643), True, 'import numpy as np\n'), ((8518, 8536), 'numpy.asarray', 'np.asarray', (['a_hist'], {}), '(a_hist)\n', (8528, 8536), True, 'import numpy as np\n'), ((6151, 6168), 'numpy.arange', 'np.arange', (['self.m'], {}), '(self.m)\n', (6160, 6168), True, 'import numpy as np\n'), ((2818, 2842), 'numpy.arange', 'np.arange', (['(1)', '(self.m + 1)'], {}), '(1, self.m + 1)\n', (2827, 2842), True, 'import numpy as np\n'), ((8799, 8815), 'numpy.arange', 'np.arange', (['(N + 1)'], {}), '(N + 1)\n', (8808, 8815), True, 'import numpy as np\n'), ((9551, 9576), 'numpy.arange', 'np.arange', (['(-self.m)', '(N + 1)'], {}), '(-self.m, N + 1)\n', (9560, 9576), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from ..layers.LayerSandPileReservoir import LayerSandPileReservoir
from ..layers.LayerLinearRegression import LayerLinearRegression
from .LayeredModel import LayeredModel
class SandPileModel(LayeredModel):
def __init__(self, input_size, output_size, reservoir_size,
# spectral_scale=0.29401253252, thresh_scale=1.1142252352,
spectral_scale=0.2, thresh_scale=3.0,
input_weight_scale=0.01, regulariser=1e-6):
"""
input_size : input dimension of the data
output_size : output dimension of the data
reservoir_size : size of the reservoir
spectral_scale : how much to scale the reservoir weights
echo_param : 'leaky' rate of the activations of the reservoir units
input_weight_scale : how much to scale the input weights by
regulariser : regularisation parameter for the linear regression output
"""
# live info
self.live_im = None
self.live_fig = None
layer_res = LayerSandPileReservoir(input_size, reservoir_size)
layer_res.initialize_input_weights(scale=input_weight_scale, strategy="uniform", offset=0.0, sparsity=0.1)
# print(layer_res.W_in)
# layer_res.initialize_threshold(layer_res.threshold_uniform, thresh_scale=thresh_scale)
layer_res.initialize_threshold(layer_res.threshold_unit, thresh_scale=thresh_scale)
layer_res.initialize_reservoir(strategy='uniform', spectral_scale=spectral_scale)
layer_lr = LayerLinearRegression(reservoir_size+input_size, output_size, regulariser=regulariser)
self.layers = [layer_res, layer_lr]
super(SandPileModel, self).__init__(self.layers)
def plot_reservoir(self):
signals = self.layers[0].signals
signals_shape = np.reshape(signals, (np.shape(signals)[0], -1))
# print(np.shape(signals_shape))
sns.heatmap(signals_shape)
plt.plot()
# def display(self):
# signals = self.layers[0].state
# signals_shape = np.reshape(signals, (np.shape(signals)[0], -1))
# # print(signals_shape)
# # print(np.shape(signals_shape))
# # create the figure
# if self.live_fig == None:
# self.live_fig = plt.figure()
# ax = self.live_fig.add_subplot(111)
# self.live_im = ax.imshow(signals_shape, cmap="Reds")
# # self.live_im = ax.imshow(self.weights,cmap="Reds")
# plt.show(block=False)
# else:
# # draw some data in loop
# # wait for a second
# time.sleep(0.1)
# # replace the image contents
# self.live_im.set_array(signals_shape)
# # self.live_im.set_array(self.weights)
# # redraw the figure
# self.live_fig.canvas.draw()
# plt.pause(0.001)
| [
"numpy.shape",
"matplotlib.pyplot.plot",
"seaborn.heatmap"
] | [((2025, 2051), 'seaborn.heatmap', 'sns.heatmap', (['signals_shape'], {}), '(signals_shape)\n', (2036, 2051), True, 'import seaborn as sns\n'), ((2061, 2071), 'matplotlib.pyplot.plot', 'plt.plot', ([], {}), '()\n', (2069, 2071), True, 'import matplotlib.pyplot as plt\n'), ((1947, 1964), 'numpy.shape', 'np.shape', (['signals'], {}), '(signals)\n', (1955, 1964), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function
import numpy as np
from simdna.util import DEFAULT_LETTER_TO_INDEX
from simdna import util
import math
class PWM(object):
def __init__(self, name, letterToIndex=DEFAULT_LETTER_TO_INDEX):
self.name = name
self.letterToIndex = letterToIndex
self.indexToLetter = dict(
(self.letterToIndex[x], x) for x in self.letterToIndex)
self._rows = []
self._finalised = False
def addRow(self, weights):
if (len(self._rows) > 0):
assert len(weights) == len(self._rows[0])
self._rows.append(weights)
def addRows(self, matrix):
for row in matrix:
self.addRow(weights=row)
return self
def finalise(self, pseudocountProb=0.001):
assert pseudocountProb >= 0 and pseudocountProb < 1
# will smoothen the rows with a pseudocount...
self._rows = np.array(self._rows)
self._rows = self._rows * \
(1 - pseudocountProb) + float(pseudocountProb) / len(self._rows[0])
for row in self._rows:
assert(abs(sum(row) - 1.0) < 0.0001)
self._logRows = np.log(self._rows)
self._finalised = True
self.bestPwmHit = self.computeBestHitGivenMatrix(self._rows)
self.pwmSize = len(self._rows)
return self
def getBestHit(self):
return self.bestPwmHit
def computeBestHitGivenMatrix(self, matrix):
return "".join(self.indexToLetter[x] for x in (np.argmax(matrix, axis=1)))
def getRows(self):
if (not self._finalised):
raise RuntimeError("Please call finalised on " + str(self.name))
return self._rows
def sampleFromPwm(self, bg=None):
if (not self._finalised):
raise RuntimeError("Please call finalised on " + str(self.name))
sampledLetters = []
logOdds = 0
for row in self._rows:
sampledIndex = util.sampleFromProbsArr(row)
letter = self.indexToLetter[sampledIndex]
if (bg is not None):
logOdds += np.log(row[sampledIndex]) - np.log(bg[letter])
sampledLetters.append(letter)
sampledHit = "".join(sampledLetters)
if (bg is not None):
return (sampledHit, logOdds)
else:
return sampledHit
def sampleFromPwmAndScore(self, bg):
return self.sampleFromPwm(bg=bg)
def __str__(self):
return self.name + "\n" + str(self._rows)
| [
"numpy.array",
"numpy.log",
"simdna.util.sampleFromProbsArr",
"numpy.argmax"
] | [((941, 961), 'numpy.array', 'np.array', (['self._rows'], {}), '(self._rows)\n', (949, 961), True, 'import numpy as np\n'), ((1182, 1200), 'numpy.log', 'np.log', (['self._rows'], {}), '(self._rows)\n', (1188, 1200), True, 'import numpy as np\n'), ((1969, 1997), 'simdna.util.sampleFromProbsArr', 'util.sampleFromProbsArr', (['row'], {}), '(row)\n', (1992, 1997), False, 'from simdna import util\n'), ((1523, 1548), 'numpy.argmax', 'np.argmax', (['matrix'], {'axis': '(1)'}), '(matrix, axis=1)\n', (1532, 1548), True, 'import numpy as np\n'), ((2112, 2137), 'numpy.log', 'np.log', (['row[sampledIndex]'], {}), '(row[sampledIndex])\n', (2118, 2137), True, 'import numpy as np\n'), ((2140, 2158), 'numpy.log', 'np.log', (['bg[letter]'], {}), '(bg[letter])\n', (2146, 2158), True, 'import numpy as np\n')] |
from unittest.case import TestCase
import unittest
import pandas as pd
import numpy as np
from datetime import datetime
from qlib import init
from qlib.config import C
from qlib.log import TimeInspector
from qlib.utils.time import cal_sam_minute as cal_sam_minute_new, get_min_cal
def cal_sam_minute(x, sam_minutes):
"""
Sample raw calendar into calendar with sam_minutes freq, shift represents the shift minute the market time
- open time of stock market is [9:30 - shift*pd.Timedelta(minutes=1)]
- mid close time of stock market is [11:29 - shift*pd.Timedelta(minutes=1)]
- mid open time of stock market is [13:00 - shift*pd.Timedelta(minutes=1)]
- close time of stock market is [14:59 - shift*pd.Timedelta(minutes=1)]
"""
# TODO: actually, this version is much faster when no cache or optimization
day_time = pd.Timestamp(x.date())
shift = C.min_data_shift
open_time = day_time + pd.Timedelta(hours=9, minutes=30) - shift * pd.Timedelta(minutes=1)
mid_close_time = day_time + pd.Timedelta(hours=11, minutes=29) - shift * pd.Timedelta(minutes=1)
mid_open_time = day_time + pd.Timedelta(hours=13, minutes=00) - shift * pd.Timedelta(minutes=1)
close_time = day_time + pd.Timedelta(hours=14, minutes=59) - shift * pd.Timedelta(minutes=1)
if open_time <= x <= mid_close_time:
minute_index = (x - open_time).seconds // 60
elif mid_open_time <= x <= close_time:
minute_index = (x - mid_open_time).seconds // 60 + 120
else:
raise ValueError("datetime of calendar is out of range")
minute_index = minute_index // sam_minutes * sam_minutes
if 0 <= minute_index < 120:
return open_time + minute_index * pd.Timedelta(minutes=1)
elif 120 <= minute_index < 240:
return mid_open_time + (minute_index - 120) * pd.Timedelta(minutes=1)
else:
raise ValueError("calendar minute_index error, check `min_data_shift` in qlib.config.C")
class TimeUtils(TestCase):
@classmethod
def setUpClass(cls):
init()
def test_cal_sam_minute(self):
# test the correctness of the code
random_n = 1000
cal = get_min_cal()
def gen_args():
for time in np.random.choice(cal, size=random_n, replace=True):
sam_minutes = np.random.choice([1, 2, 3, 4, 5, 6])
dt = pd.Timestamp(
datetime(
2021,
month=3,
day=3,
hour=time.hour,
minute=time.minute,
second=time.second,
microsecond=time.microsecond,
)
)
args = dt, sam_minutes
yield args
for args in gen_args():
assert cal_sam_minute(*args) == cal_sam_minute_new(*args)
# test the performance of the code
args_l = list(gen_args())
with TimeInspector.logt():
for args in args_l:
cal_sam_minute(*args)
with TimeInspector.logt():
for args in args_l:
cal_sam_minute_new(*args)
if __name__ == "__main__":
unittest.main()
| [
"datetime.datetime",
"qlib.utils.time.get_min_cal",
"numpy.random.choice",
"pandas.Timedelta",
"qlib.utils.time.cal_sam_minute",
"qlib.log.TimeInspector.logt",
"qlib.init",
"unittest.main"
] | [((3232, 3247), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3245, 3247), False, 'import unittest\n'), ((2048, 2054), 'qlib.init', 'init', ([], {}), '()\n', (2052, 2054), False, 'from qlib import init\n'), ((2172, 2185), 'qlib.utils.time.get_min_cal', 'get_min_cal', ([], {}), '()\n', (2183, 2185), False, 'from qlib.utils.time import cal_sam_minute as cal_sam_minute_new, get_min_cal\n'), ((946, 979), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(9)', 'minutes': '(30)'}), '(hours=9, minutes=30)\n', (958, 979), True, 'import pandas as pd\n'), ((990, 1013), 'pandas.Timedelta', 'pd.Timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (1002, 1013), True, 'import pandas as pd\n'), ((1046, 1080), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(11)', 'minutes': '(29)'}), '(hours=11, minutes=29)\n', (1058, 1080), True, 'import pandas as pd\n'), ((1091, 1114), 'pandas.Timedelta', 'pd.Timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (1103, 1114), True, 'import pandas as pd\n'), ((1146, 1179), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(13)', 'minutes': '(0)'}), '(hours=13, minutes=0)\n', (1158, 1179), True, 'import pandas as pd\n'), ((1191, 1214), 'pandas.Timedelta', 'pd.Timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (1203, 1214), True, 'import pandas as pd\n'), ((1243, 1277), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(14)', 'minutes': '(59)'}), '(hours=14, minutes=59)\n', (1255, 1277), True, 'import pandas as pd\n'), ((1288, 1311), 'pandas.Timedelta', 'pd.Timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (1300, 1311), True, 'import pandas as pd\n'), ((2235, 2285), 'numpy.random.choice', 'np.random.choice', (['cal'], {'size': 'random_n', 'replace': '(True)'}), '(cal, size=random_n, replace=True)\n', (2251, 2285), True, 'import numpy as np\n'), ((2997, 3017), 'qlib.log.TimeInspector.logt', 'TimeInspector.logt', ([], {}), '()\n', (3015, 3017), False, 'from qlib.log import TimeInspector\n'), ((3103, 3123), 'qlib.log.TimeInspector.logt', 'TimeInspector.logt', ([], {}), '()\n', (3121, 3123), False, 'from qlib.log import TimeInspector\n'), ((1724, 1747), 'pandas.Timedelta', 'pd.Timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (1736, 1747), True, 'import pandas as pd\n'), ((2317, 2353), 'numpy.random.choice', 'np.random.choice', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (2333, 2353), True, 'import numpy as np\n'), ((2878, 2903), 'qlib.utils.time.cal_sam_minute', 'cal_sam_minute_new', (['*args'], {}), '(*args)\n', (2896, 2903), True, 'from qlib.utils.time import cal_sam_minute as cal_sam_minute_new, get_min_cal\n'), ((3173, 3198), 'qlib.utils.time.cal_sam_minute', 'cal_sam_minute_new', (['*args'], {}), '(*args)\n', (3191, 3198), True, 'from qlib.utils.time import cal_sam_minute as cal_sam_minute_new, get_min_cal\n'), ((1838, 1861), 'pandas.Timedelta', 'pd.Timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (1850, 1861), True, 'import pandas as pd\n'), ((2409, 2530), 'datetime.datetime', 'datetime', (['(2021)'], {'month': '(3)', 'day': '(3)', 'hour': 'time.hour', 'minute': 'time.minute', 'second': 'time.second', 'microsecond': 'time.microsecond'}), '(2021, month=3, day=3, hour=time.hour, minute=time.minute, second=\n time.second, microsecond=time.microsecond)\n', (2417, 2530), False, 'from datetime import datetime\n')] |
import numpy as np
import cv2,os
cam=cv2.VideoCapture(0)
face_cascade=cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
face_data=[]
path="./data/"
if not os.path.exists(path):
os.mkdir(path)
file_name=input("Enter the name")
cnt=0
while True:
ret,frame=cam.read()
if ret==False:
break
faces=face_cascade.detectMultiScale(frame,1.3,5)
if len(faces)==0:
continue
faces=sorted(faces,key=lambda x:x[2]*x[3])
x,y,w,h=faces[-1]
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
offset=10
face_section=frame[y-offset:y+h+offset,x-offset:x+w+offset]
face_section=cv2.resize(face_section,(100,100))
face_data.append(face_section)
cv2.imshow("face_section",face_section)
cnt+=1
cv2.imshow("face",frame)
print(cnt)
key=cv2.waitKey(1) & 0xFF
if key==ord('q'):
break
face_data=np.asarray(face_data)
face_data=face_data.reshape((face_data.shape[0],-1))
print(face_data.shape)
np.save(path+file_name,face_data)
cam.release()
cv2.destroyAllWindows() | [
"cv2.rectangle",
"os.path.exists",
"numpy.asarray",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"os.mkdir",
"cv2.CascadeClassifier",
"cv2.resize",
"cv2.waitKey",
"numpy.save"
] | [((37, 56), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (53, 56), False, 'import cv2, os\n'), ((70, 126), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_alt.xml"""'], {}), "('haarcascade_frontalface_alt.xml')\n", (91, 126), False, 'import cv2, os\n'), ((798, 819), 'numpy.asarray', 'np.asarray', (['face_data'], {}), '(face_data)\n', (808, 819), True, 'import numpy as np\n'), ((896, 932), 'numpy.save', 'np.save', (['(path + file_name)', 'face_data'], {}), '(path + file_name, face_data)\n', (903, 932), True, 'import numpy as np\n'), ((944, 967), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (965, 967), False, 'import cv2, os\n'), ((162, 182), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (176, 182), False, 'import cv2, os\n'), ((185, 199), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (193, 199), False, 'import cv2, os\n'), ((442, 504), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 255)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)\n', (455, 504), False, 'import cv2, os\n'), ((579, 615), 'cv2.resize', 'cv2.resize', (['face_section', '(100, 100)'], {}), '(face_section, (100, 100))\n', (589, 615), False, 'import cv2, os\n'), ((647, 687), 'cv2.imshow', 'cv2.imshow', (['"""face_section"""', 'face_section'], {}), "('face_section', face_section)\n", (657, 687), False, 'import cv2, os\n'), ((696, 721), 'cv2.imshow', 'cv2.imshow', (['"""face"""', 'frame'], {}), "('face', frame)\n", (706, 721), False, 'import cv2, os\n'), ((738, 752), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (749, 752), False, 'import cv2, os\n')] |
#! /usr/bin/env python
"""
Module for generating an RBF approximation
of temporal dynamics in POD basis space
"""
import numpy as np
import scipy
from scipy.spatial.distance import cdist
from numpy.lib.scimath import sqrt as csqrt
from scipy import interpolate
import pod as pod
import greedy as gdy
import rom as rom
import plotting as plo
def compute_rbf(Zsnap_rbf, time_rbf, ep=0.05, beta=2.5, rbf_kernel='matern', msg=False):
"""
Compute the rbf system that includes the rbf interpolation matrix,
the weights for the rbf interpolation, and the optimal scaling factor
Input::
Zsnap_rbf: Dictionary of snapshots containing projected
snapshots of every state variable
time_rbf: Array of time points for the snapshots
ep: Pre-specified minimum threshold of scaling factor
beta: Secondary parameter for some IMQ rbf kernels
Output::
Zcenter_gr: Interpolation Matrix
weights_gr: RBF interpolation coefficients
epsilon_gr: Optimal scaling factor based on fill distances
"""
# --- Recompute RBF system with optimized RBF centers
soln_names = Zsnap_rbf.keys()
rij = compute_radial_distances(Zsnap_rbf)
epsilon = np.minimum(ep, estimate_epsilon_fill(rij))
A, Zcenter = compute_interp_matrix(Zsnap_rbf, Zsnap_rbf[list(Zsnap_rbf.keys())[0]].shape[1]-1,
rbf_kernel=rbf_kernel, epsilon=epsilon, beta=beta)
if msg:
print("Epsilon specified = {0}, epsilon computed = {1}".format(
ep, estimate_epsilon_fill(rij)))
print("Epsilon used = {0}".format(epsilon))
print('Condition number of A: {0}'.format(np.linalg.cond(A)))
return Zcenter, A, epsilon
def compute_interp_matrix(Zsnap, Nt, rbf_kernel='matern', epsilon=0.05, beta=2.5):
"""
Build a radial basis function (RBF) interpolant using the entries in Zsnap to form the centers
Zsnap is input on a solution component basis, e.g., Zsnap['h'] is a N_w['h'] x N_snap array of POD modes
for the 'h' variable.
For now assumes Nt is equal to N_snap-1
Input:
:param: Zsnap -- dictionary of modes for all snapshots
:param: Nt -- Total number of snapshots - 1
:param: component_keys -- which entries to use in building the components
:param: rbf_kernel_flag -- type of kernel to use.
1 Gaussian
Multiquadric otherwise
Returns:
Zcenter -- Composite [nw_total,Nt] array of evaluation points for RBF interpolant
A -- [Nt,Nt] array containing the rbf kernel evaluations on the Zcenter vectors
rij -- [Nt,Nt] array containing the euclidean distances between all paris of vectors in Zcenter
"""
component_keys = Zsnap.keys()
# compute centers
nw_sizes = [Zsnap[key].shape[0] for key in component_keys]
nw_total = sum(nw_sizes)
Zcenter = np.zeros((nw_total, Nt), 'd')
offset = 0
for ii, key in enumerate(component_keys):
# evaluation points are (t_0,t_1,...,t_{snap-1})
Zcenter[offset:offset+Zsnap[key].shape[0], :] = Zsnap[key][:, 0:-1]
offset += Zsnap[key].shape[0]
# distances between all of the evaluation points
rij = rbf_norms(Zcenter, Zcenter)
A = compute_kernel(rij, rbf_kernel, epsilon)
return A, Zcenter
def compute_kernel(rij, rbf_kernel='matern', epsilon=0.05):
"""
Compute Nc x Nc RBF kernel matrix,
A = Phi(r,r) where Nc = # of RBF centers
"""
if rbf_kernel == 'gaussian':
A = rbf_gaussian(rij, epsilon)
elif rbf_kernel == 'inverseMQ':
A = rbf_inverse_multiquadric(rij, epsilon, beta)
elif rbf_kernel == 'matern':
A = rbf_matern(rij, epsilon)
elif rbf_kernel == 'matern1':
A = rbf_matern1(rij, epsilon)
elif rbf_kernel == 'MQ':
A = rbf_multiquadric(rij, epsilon)
return A
def compute_radial_distances(Zsnap):
"""
Routine to compute the distance between data points
"""
component_keys = Zsnap.keys()
nw_sizes = [Zsnap[key].shape[0] for key in component_keys]
nw_total = sum(nw_sizes)
Nt = Zsnap[list(Zsnap.keys())[0]].shape[1]-1
Zcenter = np.zeros((nw_total, Nt), 'd')
offset = 0
for ii, key in enumerate(component_keys):
# evaluation points are (t_0,t_1,...,t_{snap-1})
Zcenter[offset:offset+Zsnap[key].shape[0], :] = Zsnap[key][:, 0:-1]
offset += Zsnap[key].shape[0]
# distances between all of the evaluation points
rij = rbf_norms(Zcenter, Zcenter)
return rij
def build_dFdt_multistep(Z_pod, times_pod, nw, flag=None):
"""
Compute RBF weights for different high order time
discretization methods
Available routines:
1) Explicit midpoint or LeapFrog scheme,
2) 2nd & 3rd order Adams Bashforth
3) Explicit 3rd order Nystrom method
4) 2nd and 3rd order extrapolated BDF methods
======
Input-
Z_pod: dictionary of projected snapshots per component
times_pod: array of normalized time points corresponding to snapshots
nw: dictionary of number of POD modes per component
flag: Denotes the selected time discretization scheme
======
Output-
dZdata: Dictionary of time derivative of modal coefficients,
size = [ nw[key] x Nt_pod-1 ]
"""
soln_names = nw.keys()
dt_pod = times_pod[1:]-times_pod[0:-1]
dZdata = {}
for key in soln_names:
dZdata[key] = np.zeros((nw[key], times_pod.size-1), 'd')
for mode in range(nw[key]):
if flag == 'LF':
dZdata[key][mode, 0] = Z_pod[key][mode, 1]-Z_pod[key][mode, 0]
dZdata[key][mode, 0] /= dt_pod[0]
dZdata[key][mode, 1:] = Z_pod[key][mode, 2:] - \
Z_pod[key][mode, 0:-2]
dZdata[key][mode, 1:] /= (dt_pod[1:]+dt_pod[0:-1])
elif flag == 'AB2': # Adams Bashforth Order 2
dZdata[key][mode, 0] = Z_pod[key][mode, 1]-Z_pod[key][mode, 0]
dZdata[key][mode, 0] /= dt_pod[0]
for inx in range(1, times_pod.size-1):
dZdata[key][mode, inx] = 2. * \
(Z_pod[key][mode, inx+1]-Z_pod[key]
[mode, inx])/(3.*dt_pod[inx])
dZdata[key][mode, inx] += dZdata[key][mode, inx-1]/3.
elif flag == 'AB3': # Adams Bashforth Order 3
dZdata[key][mode, 0] = Z_pod[key][mode, 1]-Z_pod[key][mode, 0]
dZdata[key][mode, 0] /= dt_pod[0]
dZdata[key][mode, 1] = 2. * \
(Z_pod[key][mode, 2]-Z_pod[key][mode, 1])/(3.*dt_pod[1])
dZdata[key][mode, 1] += dZdata[key][mode, 0]/3.
for inx in range(2, times_pod.size-1):
dZdata[key][mode, inx] = 12 * \
(Z_pod[key][mode, inx+1]-Z_pod[key]
[mode, inx])/(23.*dt_pod[inx])
dZdata[key][mode, inx] += 16.*dZdata[key][mode,
inx-1]/23. - 5.*dZdata[key][mode, inx-2]/23.
elif flag == 'NY3': # Explicit Nystrom (k=3)
dZdata[key][mode, 0:2] = Z_pod[key][mode, 1:3] - \
Z_pod[key][mode, 0:2]
dZdata[key][mode, 0:2] /= dt_pod[0:2]
for inx in range(2, times_pod.size-1):
dZdata[key][mode, inx] = 3. * \
(Z_pod[key][mode, inx+1] - Z_pod[key]
[mode, inx-1]) / (7.*dt_pod[inx])
dZdata[key][mode, inx] += (2.*dZdata[key]
[mode, inx-1] - dZdata[key][mode, inx-2])/7.
elif flag == 'BDF-EP2': # Extrapolated BDF order 2
dZdata[key][mode, 0] = Z_pod[key][mode, 1]-Z_pod[key][mode, 0]
dZdata[key][mode, 0] /= dt_pod[0]
for inx in range(1, times_pod.size-1):
dZdata[key][mode, inx] = .75*Z_pod[key][mode, inx+1] - Z_pod[key][mode, inx] \
+ 0.25*Z_pod[key][mode, inx]
dZdata[key][mode, inx] /= (dt_pod[inx])
dZdata[key][mode, inx] += 0.5*dZdata[key][mode, inx-1]
elif flag == 'BDF-EP3': # Extrapolated BDF Order 3
dZdata[key][mode, 0:2] = Z_pod[key][mode, 1:3] - \
Z_pod[key][mode, 0:2]
dZdata[key][mode, 0:2] /= dt_pod[0:2]
# dZdata[key][mode,1] = 2.*(Z_pod[key][mode,2]-Z_pod[key][mode,1])/(3.*dt_pod[1]);
# dZdata[key][mode,1] += dZdata[key][mode,0]/3.
for inx in range(2, times_pod.size-1):
dZdata[key][mode, inx] = 11.*Z_pod[key][mode, inx+1]/18. - Z_pod[key][mode, inx] \
+ 0.5*Z_pod[key][mode, inx-1] - \
Z_pod[key][mode, inx-2]/9.
dZdata[key][mode, inx] /= dt_pod[inx]
dZdata[key][mode, inx] += dZdata[key][mode,
inx-1] - dZdata[key][mode, inx-2]/3.
else:
dZdata[key][mode, :] = Z_pod[key][mode, 1:] - \
Z_pod[key][mode, 0:-1]
dZdata[key][mode, :] /= dt_pod
return dZdata
def build_dFdt_weights_multistep(Z_pod, times_pod, nw, A, flag=None):
"""
Compute RBF weights for different high order time
discretization methods
Available routines:
1) Explicit midpoint or LeapFrog scheme,
2) 2nd & 3rd order Adams Bashforth
3) Explicit 3rd order Nystrom method
4) 2nd and 3rd order extrapolated BDF methods
======
Input-
Z_pod: dictionary of projected snapshots per component
times_pod: array of normalized time points corresponding to snapshots
nw: dictionary of number of POD modes per component
A: RBF interpolation matrix
flag: Denotes the selected time discretization scheme
======
Output-
W_p: dictionary of RBF interpolation coefficients, size = [ nw[key] x Nt_pod-1 ]
"""
W_p = {}
soln_names = Z_pod.keys()
dZdata = build_dFdt_multistep(Z_pod, times_pod, nw, flag=flag)
for key in soln_names:
W_p[key] = np.zeros((nw[key], times_pod.size-1), 'd')
for mode in range(nw[key]):
W_p[key][mode, :] = np.linalg.solve(A, dZdata[key][mode, :])
return W_p, dZdata
def rbf_multiquadric(r, epsilon=1.0, beta=2.5):
"""
multiquadric
"""
return np.sqrt((epsilon*r)**2 + 1.0)
# return np.sqrt((1.0/epsilon*r)**2 + 1.0)
def rbf_inverse_multiquadric(r, epsilon=1.0, beta=2.5):
"""
inverse multiquadric
"""
return np.power((epsilon*r)**2 + 1, -beta)
# return np.power(1.0 + (1.0/epsilon*r)**2,-beta)
def rbf_gaussian(r, epsilon=1.0, beta=2.5):
"""
gaussian
"""
return np.exp(-(epsilon*r)**2)
def rbf_matern(r, epsilon=1.0, beta=2.5):
"""
matern kernel, order 0
"""
return np.exp(-epsilon*r)
def rbf_matern1(r, epsilon=1.0, beta=2.5):
"""
matern kernel, order 1
"""
return np.exp(-epsilon*r)*(1 + epsilon*r)
def rbf_norms(x1, x2, kernel=None):
"""
Computes the distance matrix for vector arguments x1, x2
x1 : N x M_A matrix, M_A vectors in N-space
x2 : N x M_B matrix, M_B vectors in N-space
If kernel is None, returns euclidean distance,
else returns 1D distance matrix for Exp Sin Sqd kernel
"""
if kernel is None:
return scipy.spatial.distance.cdist(x1.T, x2.T, 'euclidean')
else:
assert x1.shape[1] == x2.shape[1], 'x1 and x2 dimensions donot match'
DM = np.empty((x1.shape[0], x2.shape[0]), dtype=np.double)
for i in np.arange(0, x1.shape[0]):
for j in np.arange(0, x2.shape[0]):
DM[i, j] = (x1[i] - x2[j])
return DM
def rbf_evaluate(x, centers, weights, epsilon=0.05, kernel='matern', beta=2.5):
"""
Evaluates an RBF interpolant at unseen point(s)
Input:
x -- N x M matrix, M unseen center points in N-space
centers -- N x P matrix, P centers in N-space used to
define RBF interpolant
weights -- P vector of weights defining RBF interpolant
epsilon -- scale factor used to define RBF interpolant
kernel -- RBF kernel function
Output:
lin. comb. of weights and basis functions
"""
r = rbf_norms(x, centers)
phi = compute_kernel(r, kernel, epsilon)
# phi = kernel(r,epsilon,beta)
return weights.dot(phi.T)
def rbf_evaluate_modal(x, centers, wts, epsilon=0.05, kernel='matern', beta=2.5):
"""
Evaluates an RBF interpolant at unseen point(s)
x : N vector, unseen center point in N-space
centers: N x P matrix, P centers in N-space used to
define RBF interpolant
wts: P vector of weights defining RBF interpolant
epsilon: scale factor used to define RBF interpolant
kernel: RBF kernel function
returns lin. comb. of weights and basis functions
"""
r_online = rbf_norms(x, centers)
phi_online = compute_kernel(r_online, kernel, epsilon)
# phi_online = kernel(r_online,epsilon,beta)
dzdt = np.zeros((wts.shape[0],))
for J in range(wts.shape[0]):
dzdt[J] = wts[J, :].dot(phi_online.T)
return dzdt
def estimate_epsilon(centers):
"""
Estimates scaling factor using
the method outlined in scikit-learn
"""
Nb = centers.shape[1]
ximax2 = np.amax(centers, axis=1)
ximin2 = np.amin(centers, axis=1)
edges2 = ximax2-ximin2
edges2 = edges2[np.nonzero(edges2)]
epsilon2 = np.power(np.prod(edges2)/Nb, 1.0/edges2.size)
return epsilon2
def estimate_epsilon_fill(rij):
"""
Estimates scaling factor as the elementwise
minimum of the distance matrix, rij
"""
rij[rij == 0.] = 999. # mask out the zero values
# compute the minimum distance between any two centers
fill_dist = np.amin(np.amin(rij, axis=0))
# technically the fill distance is the largest of such distances
# fill_dist = np.amax(np.amin(R,axis=0))
return fill_dist
def err_comp(uh, snap, times_offline, times_online):
"""
Computes the absolute l2 error norm and the rms error
norm between the true solution and the nirom solution projected
on to the full dimensional space
"""
err = {}
w_rms = {}
soln_names = uh.keys()
# ky = list(uh.keys())[0]
N = snap[list(uh.keys())[0]].shape[0]
tstep = np.searchsorted(times_offline, times_online)
for key in soln_names:
interp = uh[key]
true = snap[key][:, tstep]
err[key] = np.linalg.norm(true - interp, axis=0)
w_rms[key] = err[key]/(np.sqrt(N))
return w_rms
| [
"numpy.prod",
"numpy.linalg.solve",
"numpy.sqrt",
"numpy.amin",
"numpy.power",
"numpy.searchsorted",
"scipy.spatial.distance.cdist",
"numpy.linalg.cond",
"numpy.exp",
"numpy.zeros",
"numpy.empty",
"numpy.nonzero",
"numpy.linalg.norm",
"numpy.amax",
"numpy.arange"
] | [((2977, 3006), 'numpy.zeros', 'np.zeros', (['(nw_total, Nt)', '"""d"""'], {}), "((nw_total, Nt), 'd')\n", (2985, 3006), True, 'import numpy as np\n'), ((4260, 4289), 'numpy.zeros', 'np.zeros', (['(nw_total, Nt)', '"""d"""'], {}), "((nw_total, Nt), 'd')\n", (4268, 4289), True, 'import numpy as np\n'), ((10679, 10712), 'numpy.sqrt', 'np.sqrt', (['((epsilon * r) ** 2 + 1.0)'], {}), '((epsilon * r) ** 2 + 1.0)\n', (10686, 10712), True, 'import numpy as np\n'), ((10865, 10904), 'numpy.power', 'np.power', (['((epsilon * r) ** 2 + 1)', '(-beta)'], {}), '((epsilon * r) ** 2 + 1, -beta)\n', (10873, 10904), True, 'import numpy as np\n'), ((11041, 11068), 'numpy.exp', 'np.exp', (['(-(epsilon * r) ** 2)'], {}), '(-(epsilon * r) ** 2)\n', (11047, 11068), True, 'import numpy as np\n'), ((11163, 11183), 'numpy.exp', 'np.exp', (['(-epsilon * r)'], {}), '(-epsilon * r)\n', (11169, 11183), True, 'import numpy as np\n'), ((13367, 13392), 'numpy.zeros', 'np.zeros', (['(wts.shape[0],)'], {}), '((wts.shape[0],))\n', (13375, 13392), True, 'import numpy as np\n'), ((13653, 13677), 'numpy.amax', 'np.amax', (['centers'], {'axis': '(1)'}), '(centers, axis=1)\n', (13660, 13677), True, 'import numpy as np\n'), ((13691, 13715), 'numpy.amin', 'np.amin', (['centers'], {'axis': '(1)'}), '(centers, axis=1)\n', (13698, 13715), True, 'import numpy as np\n'), ((14672, 14716), 'numpy.searchsorted', 'np.searchsorted', (['times_offline', 'times_online'], {}), '(times_offline, times_online)\n', (14687, 14716), True, 'import numpy as np\n'), ((5562, 5606), 'numpy.zeros', 'np.zeros', (['(nw[key], times_pod.size - 1)', '"""d"""'], {}), "((nw[key], times_pod.size - 1), 'd')\n", (5570, 5606), True, 'import numpy as np\n'), ((10409, 10453), 'numpy.zeros', 'np.zeros', (['(nw[key], times_pod.size - 1)', '"""d"""'], {}), "((nw[key], times_pod.size - 1), 'd')\n", (10417, 10453), True, 'import numpy as np\n'), ((11281, 11301), 'numpy.exp', 'np.exp', (['(-epsilon * r)'], {}), '(-epsilon * r)\n', (11287, 11301), True, 'import numpy as np\n'), ((11676, 11729), 'scipy.spatial.distance.cdist', 'scipy.spatial.distance.cdist', (['x1.T', 'x2.T', '"""euclidean"""'], {}), "(x1.T, x2.T, 'euclidean')\n", (11704, 11729), False, 'import scipy\n'), ((11831, 11884), 'numpy.empty', 'np.empty', (['(x1.shape[0], x2.shape[0])'], {'dtype': 'np.double'}), '((x1.shape[0], x2.shape[0]), dtype=np.double)\n', (11839, 11884), True, 'import numpy as np\n'), ((11902, 11927), 'numpy.arange', 'np.arange', (['(0)', 'x1.shape[0]'], {}), '(0, x1.shape[0])\n', (11911, 11927), True, 'import numpy as np\n'), ((13763, 13781), 'numpy.nonzero', 'np.nonzero', (['edges2'], {}), '(edges2)\n', (13773, 13781), True, 'import numpy as np\n'), ((14139, 14159), 'numpy.amin', 'np.amin', (['rij'], {'axis': '(0)'}), '(rij, axis=0)\n', (14146, 14159), True, 'import numpy as np\n'), ((14823, 14860), 'numpy.linalg.norm', 'np.linalg.norm', (['(true - interp)'], {'axis': '(0)'}), '(true - interp, axis=0)\n', (14837, 14860), True, 'import numpy as np\n'), ((10520, 10560), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'dZdata[key][mode, :]'], {}), '(A, dZdata[key][mode, :])\n', (10535, 10560), True, 'import numpy as np\n'), ((11950, 11975), 'numpy.arange', 'np.arange', (['(0)', 'x2.shape[0]'], {}), '(0, x2.shape[0])\n', (11959, 11975), True, 'import numpy as np\n'), ((13807, 13822), 'numpy.prod', 'np.prod', (['edges2'], {}), '(edges2)\n', (13814, 13822), True, 'import numpy as np\n'), ((14892, 14902), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (14899, 14902), True, 'import numpy as np\n'), ((1744, 1761), 'numpy.linalg.cond', 'np.linalg.cond', (['A'], {}), '(A)\n', (1758, 1761), True, 'import numpy as np\n')] |
# harmonypy - A data alignment algorithm.
# Copyright (C) 2018 <NAME>
# 2019 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import pandas as pd
import numpy as np
from scipy.cluster.vq import kmeans
import logging
# create logger
logger = logging.getLogger('harmonypy')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# from IPython.core.debugger import set_trace
def run_harmony(
data_mat: np.ndarray,
meta_data: pd.DataFrame,
vars_use,
theta = None,
lamb = None,
sigma = 0.1,
nclust = None,
tau = 0,
block_size = 0.05,
max_iter_harmony = 10,
max_iter_kmeans = 20,
epsilon_cluster = 1e-5,
epsilon_harmony = 1e-4,
plot_convergence = False,
verbose = True,
reference_values = None,
cluster_prior = None,
random_state = 0
):
"""Run Harmony.
"""
# theta = None
# lamb = None
# sigma = 0.1
# nclust = None
# tau = 0
# block_size = 0.05
# epsilon_cluster = 1e-5
# epsilon_harmony = 1e-4
# plot_convergence = False
# verbose = True
# reference_values = None
# cluster_prior = None
# random_state = 0
N = meta_data.shape[0]
if data_mat.shape[1] != N:
data_mat = data_mat.T
assert data_mat.shape[1] == N, \
"data_mat and meta_data do not have the same number of cells"
if nclust is None:
nclust = np.min([np.round(N / 30.0), 100]).astype(int)
if type(sigma) is float and nclust > 1:
sigma = np.repeat(sigma, nclust)
if isinstance(vars_use, str):
vars_use = [vars_use]
phi = pd.get_dummies(meta_data[vars_use]).to_numpy().T
phi_n = meta_data[vars_use].describe().loc['unique'].to_numpy().astype(int)
if theta is None:
theta = np.repeat([1] * len(phi_n), phi_n)
elif isinstance(theta, float) or isinstance(theta, int):
theta = np.repeat([theta] * len(phi_n), phi_n)
elif len(theta) == len(phi_n):
theta = np.repeat([theta], phi_n)
assert len(theta) == np.sum(phi_n), \
"each batch variable must have a theta"
if lamb is None:
lamb = np.repeat([1] * len(phi_n), phi_n)
elif isinstance(lamb, float) or isinstance(lamb, int):
lamb = np.repeat([lamb] * len(phi_n), phi_n)
elif len(lamb) == len(phi_n):
lamb = np.repeat([lamb], phi_n)
assert len(lamb) == np.sum(phi_n), \
"each batch variable must have a lambda"
# Number of items in each category.
N_b = phi.sum(axis = 1)
# Proportion of items in each category.
Pr_b = N_b / N
if tau > 0:
theta = theta * (1 - np.exp(-(N_b / (nclust * tau)) ** 2))
lamb_mat = np.diag(np.insert(lamb, 0, 0))
phi_moe = np.vstack((np.repeat(1, N), phi))
np.random.seed(random_state)
ho = Harmony(
data_mat, phi, phi_moe, Pr_b, sigma, theta, max_iter_harmony, max_iter_kmeans,
epsilon_cluster, epsilon_harmony, nclust, block_size, lamb_mat, verbose
)
return ho
class Harmony(object):
def __init__(
self, Z, Phi, Phi_moe, Pr_b, sigma,
theta, max_iter_harmony, max_iter_kmeans,
epsilon_kmeans, epsilon_harmony, K, block_size,
lamb, verbose
):
self.Z_corr = np.array(Z)
self.Z_orig = np.array(Z)
self.Z_cos = self.Z_orig / self.Z_orig.max(axis=0)
self.Z_cos = self.Z_cos / np.linalg.norm(self.Z_cos, ord=2, axis=0)
self.Phi = Phi
self.Phi_moe = Phi_moe
self.N = self.Z_corr.shape[1]
self.Pr_b = Pr_b
self.B = self.Phi.shape[0] # number of batch variables
self.d = self.Z_corr.shape[0]
self.window_size = 3
self.epsilon_kmeans = epsilon_kmeans
self.epsilon_harmony = epsilon_harmony
self.lamb = lamb
self.sigma = sigma
self.sigma_prior = sigma
self.block_size = block_size
self.K = K # number of clusters
self.max_iter_harmony = max_iter_harmony
self.max_iter_kmeans = max_iter_kmeans
self.verbose = verbose
self.theta = theta
self.objective_harmony = []
self.objective_kmeans = []
self.objective_kmeans_dist = []
self.objective_kmeans_entropy = []
self.objective_kmeans_cross = []
self.kmeans_rounds = []
self.allocate_buffers()
self.init_cluster()
self.harmonize(self.max_iter_harmony, self.verbose)
def result(self):
return self.Z_corr
def allocate_buffers(self):
self._scale_dist = np.zeros((self.K, self.N))
self.dist_mat = np.zeros((self.K, self.N))
self.O = np.zeros((self.K, self.B))
self.E = np.zeros((self.K, self.B))
self.W = np.zeros((self.B + 1, self.d))
self.Phi_Rk = np.zeros((self.B + 1, self.N))
def init_cluster(self):
# Start with cluster centroids
km = kmeans(self.Z_cos.T, self.K, iter=10)
self.Y = km[0].T
# (1) Normalize
self.Y = self.Y / np.linalg.norm(self.Y, ord=2, axis=0)
# (2) Assign cluster probabilities
self.dist_mat = 2 * (1 - np.dot(self.Y.T, self.Z_cos))
self.R = -self.dist_mat
self.R = self.R / self.sigma[:,None]
self.R -= np.max(self.R, axis = 0)
self.R = np.exp(self.R)
self.R = self.R / np.sum(self.R, axis = 0)
# (3) Batch diversity statistics
self.E = np.outer(np.sum(self.R, axis=1), self.Pr_b)
self.O = np.inner(self.R , self.Phi)
self.compute_objective()
# Save results
self.objective_harmony.append(self.objective_kmeans[-1])
def compute_objective(self):
kmeans_error = np.sum(np.multiply(self.R, self.dist_mat))
# Entropy
_entropy = np.sum(safe_entropy(self.R) * self.sigma[:,np.newaxis])
# Cross Entropy
x = (self.R * self.sigma[:,np.newaxis])
y = np.tile(self.theta[:,np.newaxis], self.K).T
z = np.log((self.O + 1) / (self.E + 1))
w = np.dot(y * z, self.Phi)
_cross_entropy = np.sum(x * w)
# Save results
self.objective_kmeans.append(kmeans_error + _entropy + _cross_entropy)
self.objective_kmeans_dist.append(kmeans_error)
self.objective_kmeans_entropy.append(_entropy)
self.objective_kmeans_cross.append(_cross_entropy)
def harmonize(self, iter_harmony=10, verbose=True):
converged = False
for i in range(1, iter_harmony + 1):
if verbose:
logger.info("Iteration {} of {}".format(i, iter_harmony))
# STEP 1: Clustering
self.cluster()
# STEP 2: Regress out covariates
# self.moe_correct_ridge()
self.Z_cos, self.Z_corr, self.W, self.Phi_Rk = moe_correct_ridge(
self.Z_orig, self.Z_cos, self.Z_corr, self.R, self.W, self.K,
self.Phi_Rk, self.Phi_moe, self.lamb
)
# STEP 3: Check for convergence
converged = self.check_convergence(1)
if converged:
if verbose:
logger.info(
"Converged after {} iteration{}"
.format(i, 's' if i > 1 else '')
)
break
if verbose and not converged:
logger.info("Stopped before convergence")
return 0
def cluster(self):
# Z_cos has changed
# R is assumed to not have changed
# Update Y to match new integrated data
self.dist_mat = 2 * (1 - np.dot(self.Y.T, self.Z_cos))
for i in range(self.max_iter_kmeans):
# print("kmeans {}".format(i))
# STEP 1: Update Y
self.Y = np.dot(self.Z_cos, self.R.T)
self.Y = self.Y / np.linalg.norm(self.Y, ord=2, axis=0)
# STEP 2: Update dist_mat
self.dist_mat = 2 * (1 - np.dot(self.Y.T, self.Z_cos))
# STEP 3: Update R
self.update_R()
# STEP 4: Check for convergence
self.compute_objective()
if i > self.window_size:
converged = self.check_convergence(0)
if converged:
break
self.kmeans_rounds.append(i)
self.objective_harmony.append(self.objective_kmeans[-1])
return 0
def update_R(self):
self._scale_dist = -self.dist_mat
self._scale_dist = self._scale_dist / self.sigma[:,None]
self._scale_dist -= np.max(self._scale_dist, axis=0)
self._scale_dist = np.exp(self._scale_dist)
# Update cells in blocks
update_order = np.arange(self.N)
np.random.shuffle(update_order)
n_blocks = np.ceil(1 / self.block_size).astype(int)
blocks = np.array_split(update_order, n_blocks)
for b in blocks:
# STEP 1: Remove cells
self.E -= np.outer(np.sum(self.R[:,b], axis=1), self.Pr_b)
self.O -= np.dot(self.R[:,b], self.Phi[:,b].T)
# STEP 2: Recompute R for removed cells
self.R[:,b] = self._scale_dist[:,b]
self.R[:,b] = np.multiply(
self.R[:,b],
np.dot(
np.power((self.E + 1) / (self.O + 1), self.theta),
self.Phi[:,b]
)
)
self.R[:,b] = self.R[:,b] / np.linalg.norm(self.R[:,b], ord=1, axis=0)
# STEP 3: Put cells back
self.E += np.outer(np.sum(self.R[:,b], axis=1), self.Pr_b)
self.O += np.dot(self.R[:,b], self.Phi[:,b].T)
return 0
def check_convergence(self, i_type):
obj_old = 0.0
obj_new = 0.0
# Clustering, compute new window mean
if i_type == 0:
okl = len(self.objective_kmeans)
for i in range(self.window_size):
obj_old += self.objective_kmeans[okl - 2 - i]
obj_new += self.objective_kmeans[okl - 1 - i]
if abs(obj_old - obj_new) / abs(obj_old) < self.epsilon_kmeans:
return True
return False
# Harmony
if i_type == 1:
obj_old = self.objective_harmony[-2]
obj_new = self.objective_harmony[-1]
if (obj_old - obj_new) / abs(obj_old) < self.epsilon_harmony:
return True
return False
return True
def safe_entropy(x: np.array):
y = np.multiply(x, np.log(x))
y[~np.isfinite(y)] = 0.0
return y
def moe_correct_ridge(Z_orig, Z_cos, Z_corr, R, W, K, Phi_Rk, Phi_moe, lamb):
Z_corr = Z_orig.copy()
for i in range(K):
Phi_Rk = np.multiply(Phi_moe, R[i,:])
x = np.dot(Phi_Rk, Phi_moe.T) + lamb
W = np.dot(np.dot(np.linalg.inv(x), Phi_Rk), Z_orig.T)
W[0,:] = 0 # do not remove the intercept
Z_corr -= np.dot(W.T, Phi_Rk)
Z_cos = Z_corr / np.linalg.norm(Z_corr, ord=2, axis=0)
return Z_cos, Z_corr, W, Phi_Rk
| [
"logging.getLogger",
"logging.StreamHandler",
"numpy.log",
"numpy.array_split",
"numpy.array",
"numpy.isfinite",
"numpy.linalg.norm",
"numpy.arange",
"numpy.multiply",
"scipy.cluster.vq.kmeans",
"numpy.repeat",
"numpy.max",
"numpy.exp",
"numpy.dot",
"numpy.random.seed",
"numpy.round",
... | [((864, 894), 'logging.getLogger', 'logging.getLogger', (['"""harmonypy"""'], {}), "('harmonypy')\n", (881, 894), False, 'import logging\n'), ((931, 954), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (952, 954), False, 'import logging\n'), ((994, 1067), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (1011, 1067), False, 'import logging\n'), ((3530, 3558), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (3544, 3558), True, 'import numpy as np\n'), ((2276, 2300), 'numpy.repeat', 'np.repeat', (['sigma', 'nclust'], {}), '(sigma, nclust)\n', (2285, 2300), True, 'import numpy as np\n'), ((2799, 2812), 'numpy.sum', 'np.sum', (['phi_n'], {}), '(phi_n)\n', (2805, 2812), True, 'import numpy as np\n'), ((3147, 3160), 'numpy.sum', 'np.sum', (['phi_n'], {}), '(phi_n)\n', (3153, 3160), True, 'import numpy as np\n'), ((3453, 3474), 'numpy.insert', 'np.insert', (['lamb', '(0)', '(0)'], {}), '(lamb, 0, 0)\n', (3462, 3474), True, 'import numpy as np\n'), ((4026, 4037), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (4034, 4037), True, 'import numpy as np\n'), ((4060, 4071), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (4068, 4071), True, 'import numpy as np\n'), ((5500, 5526), 'numpy.zeros', 'np.zeros', (['(self.K, self.N)'], {}), '((self.K, self.N))\n', (5508, 5526), True, 'import numpy as np\n'), ((5554, 5580), 'numpy.zeros', 'np.zeros', (['(self.K, self.N)'], {}), '((self.K, self.N))\n', (5562, 5580), True, 'import numpy as np\n'), ((5608, 5634), 'numpy.zeros', 'np.zeros', (['(self.K, self.B)'], {}), '((self.K, self.B))\n', (5616, 5634), True, 'import numpy as np\n'), ((5662, 5688), 'numpy.zeros', 'np.zeros', (['(self.K, self.B)'], {}), '((self.K, self.B))\n', (5670, 5688), True, 'import numpy as np\n'), ((5716, 5746), 'numpy.zeros', 'np.zeros', (['(self.B + 1, self.d)'], {}), '((self.B + 1, self.d))\n', (5724, 5746), True, 'import numpy as np\n'), ((5774, 5804), 'numpy.zeros', 'np.zeros', (['(self.B + 1, self.N)'], {}), '((self.B + 1, self.N))\n', (5782, 5804), True, 'import numpy as np\n'), ((5886, 5923), 'scipy.cluster.vq.kmeans', 'kmeans', (['self.Z_cos.T', 'self.K'], {'iter': '(10)'}), '(self.Z_cos.T, self.K, iter=10)\n', (5892, 5923), False, 'from scipy.cluster.vq import kmeans\n'), ((6238, 6260), 'numpy.max', 'np.max', (['self.R'], {'axis': '(0)'}), '(self.R, axis=0)\n', (6244, 6260), True, 'import numpy as np\n'), ((6280, 6294), 'numpy.exp', 'np.exp', (['self.R'], {}), '(self.R)\n', (6286, 6294), True, 'import numpy as np\n'), ((6465, 6491), 'numpy.inner', 'np.inner', (['self.R', 'self.Phi'], {}), '(self.R, self.Phi)\n', (6473, 6491), True, 'import numpy as np\n'), ((6947, 6982), 'numpy.log', 'np.log', (['((self.O + 1) / (self.E + 1))'], {}), '((self.O + 1) / (self.E + 1))\n', (6953, 6982), True, 'import numpy as np\n'), ((6995, 7018), 'numpy.dot', 'np.dot', (['(y * z)', 'self.Phi'], {}), '(y * z, self.Phi)\n', (7001, 7018), True, 'import numpy as np\n'), ((7044, 7057), 'numpy.sum', 'np.sum', (['(x * w)'], {}), '(x * w)\n', (7050, 7057), True, 'import numpy as np\n'), ((9486, 9518), 'numpy.max', 'np.max', (['self._scale_dist'], {'axis': '(0)'}), '(self._scale_dist, axis=0)\n', (9492, 9518), True, 'import numpy as np\n'), ((9546, 9570), 'numpy.exp', 'np.exp', (['self._scale_dist'], {}), '(self._scale_dist)\n', (9552, 9570), True, 'import numpy as np\n'), ((9627, 9644), 'numpy.arange', 'np.arange', (['self.N'], {}), '(self.N)\n', (9636, 9644), True, 'import numpy as np\n'), ((9653, 9684), 'numpy.random.shuffle', 'np.random.shuffle', (['update_order'], {}), '(update_order)\n', (9670, 9684), True, 'import numpy as np\n'), ((9762, 9800), 'numpy.array_split', 'np.array_split', (['update_order', 'n_blocks'], {}), '(update_order, n_blocks)\n', (9776, 9800), True, 'import numpy as np\n'), ((11430, 11439), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (11436, 11439), True, 'import numpy as np\n'), ((11629, 11658), 'numpy.multiply', 'np.multiply', (['Phi_moe', 'R[i, :]'], {}), '(Phi_moe, R[i, :])\n', (11640, 11658), True, 'import numpy as np\n'), ((11833, 11852), 'numpy.dot', 'np.dot', (['W.T', 'Phi_Rk'], {}), '(W.T, Phi_Rk)\n', (11839, 11852), True, 'import numpy as np\n'), ((11874, 11911), 'numpy.linalg.norm', 'np.linalg.norm', (['Z_corr'], {'ord': '(2)', 'axis': '(0)'}), '(Z_corr, ord=2, axis=0)\n', (11888, 11911), True, 'import numpy as np\n'), ((3502, 3517), 'numpy.repeat', 'np.repeat', (['(1)', 'N'], {}), '(1, N)\n', (3511, 3517), True, 'import numpy as np\n'), ((4166, 4207), 'numpy.linalg.norm', 'np.linalg.norm', (['self.Z_cos'], {'ord': '(2)', 'axis': '(0)'}), '(self.Z_cos, ord=2, axis=0)\n', (4180, 4207), True, 'import numpy as np\n'), ((5999, 6036), 'numpy.linalg.norm', 'np.linalg.norm', (['self.Y'], {'ord': '(2)', 'axis': '(0)'}), '(self.Y, ord=2, axis=0)\n', (6013, 6036), True, 'import numpy as np\n'), ((6321, 6343), 'numpy.sum', 'np.sum', (['self.R'], {'axis': '(0)'}), '(self.R, axis=0)\n', (6327, 6343), True, 'import numpy as np\n'), ((6413, 6435), 'numpy.sum', 'np.sum', (['self.R'], {'axis': '(1)'}), '(self.R, axis=1)\n', (6419, 6435), True, 'import numpy as np\n'), ((6678, 6712), 'numpy.multiply', 'np.multiply', (['self.R', 'self.dist_mat'], {}), '(self.R, self.dist_mat)\n', (6689, 6712), True, 'import numpy as np\n'), ((6891, 6933), 'numpy.tile', 'np.tile', (['self.theta[:, np.newaxis]', 'self.K'], {}), '(self.theta[:, np.newaxis], self.K)\n', (6898, 6933), True, 'import numpy as np\n'), ((8718, 8746), 'numpy.dot', 'np.dot', (['self.Z_cos', 'self.R.T'], {}), '(self.Z_cos, self.R.T)\n', (8724, 8746), True, 'import numpy as np\n'), ((9954, 9992), 'numpy.dot', 'np.dot', (['self.R[:, b]', 'self.Phi[:, b].T'], {}), '(self.R[:, b], self.Phi[:, b].T)\n', (9960, 9992), True, 'import numpy as np\n'), ((10533, 10571), 'numpy.dot', 'np.dot', (['self.R[:, b]', 'self.Phi[:, b].T'], {}), '(self.R[:, b], self.Phi[:, b].T)\n', (10539, 10571), True, 'import numpy as np\n'), ((11448, 11462), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (11459, 11462), True, 'import numpy as np\n'), ((11670, 11695), 'numpy.dot', 'np.dot', (['Phi_Rk', 'Phi_moe.T'], {}), '(Phi_Rk, Phi_moe.T)\n', (11676, 11695), True, 'import numpy as np\n'), ((2377, 2412), 'pandas.get_dummies', 'pd.get_dummies', (['meta_data[vars_use]'], {}), '(meta_data[vars_use])\n', (2391, 2412), True, 'import pandas as pd\n'), ((2747, 2772), 'numpy.repeat', 'np.repeat', (['[theta]', 'phi_n'], {}), '([theta], phi_n)\n', (2756, 2772), True, 'import numpy as np\n'), ((3097, 3121), 'numpy.repeat', 'np.repeat', (['[lamb]', 'phi_n'], {}), '([lamb], phi_n)\n', (3106, 3121), True, 'import numpy as np\n'), ((3391, 3427), 'numpy.exp', 'np.exp', (['(-(N_b / (nclust * tau)) ** 2)'], {}), '(-(N_b / (nclust * tau)) ** 2)\n', (3397, 3427), True, 'import numpy as np\n'), ((6113, 6141), 'numpy.dot', 'np.dot', (['self.Y.T', 'self.Z_cos'], {}), '(self.Y.T, self.Z_cos)\n', (6119, 6141), True, 'import numpy as np\n'), ((8547, 8575), 'numpy.dot', 'np.dot', (['self.Y.T', 'self.Z_cos'], {}), '(self.Y.T, self.Z_cos)\n', (8553, 8575), True, 'import numpy as np\n'), ((8777, 8814), 'numpy.linalg.norm', 'np.linalg.norm', (['self.Y'], {'ord': '(2)', 'axis': '(0)'}), '(self.Y, ord=2, axis=0)\n', (8791, 8814), True, 'import numpy as np\n'), ((9704, 9732), 'numpy.ceil', 'np.ceil', (['(1 / self.block_size)'], {}), '(1 / self.block_size)\n', (9711, 9732), True, 'import numpy as np\n'), ((9892, 9920), 'numpy.sum', 'np.sum', (['self.R[:, b]'], {'axis': '(1)'}), '(self.R[:, b], axis=1)\n', (9898, 9920), True, 'import numpy as np\n'), ((10360, 10403), 'numpy.linalg.norm', 'np.linalg.norm', (['self.R[:, b]'], {'ord': '(1)', 'axis': '(0)'}), '(self.R[:, b], ord=1, axis=0)\n', (10374, 10403), True, 'import numpy as np\n'), ((10471, 10499), 'numpy.sum', 'np.sum', (['self.R[:, b]'], {'axis': '(1)'}), '(self.R[:, b], axis=1)\n', (10477, 10499), True, 'import numpy as np\n'), ((11729, 11745), 'numpy.linalg.inv', 'np.linalg.inv', (['x'], {}), '(x)\n', (11742, 11745), True, 'import numpy as np\n'), ((8890, 8918), 'numpy.dot', 'np.dot', (['self.Y.T', 'self.Z_cos'], {}), '(self.Y.T, self.Z_cos)\n', (8896, 8918), True, 'import numpy as np\n'), ((10203, 10252), 'numpy.power', 'np.power', (['((self.E + 1) / (self.O + 1))', 'self.theta'], {}), '((self.E + 1) / (self.O + 1), self.theta)\n', (10211, 10252), True, 'import numpy as np\n'), ((2177, 2195), 'numpy.round', 'np.round', (['(N / 30.0)'], {}), '(N / 30.0)\n', (2185, 2195), True, 'import numpy as np\n')] |
from numbers import Real, Integral
import numpy as np
import openmc.checkvalue as cv
from .angle_energy import AngleEnergy
from .endf import get_cont_record
class NBodyPhaseSpace(AngleEnergy):
"""N-body phase space distribution
Parameters
----------
total_mass : float
Total mass of product particles
n_particles : int
Number of product particles
atomic_weight_ratio : float
Atomic weight ratio of target nuclide
q_value : float
Q value for reaction in eV
Attributes
----------
total_mass : float
Total mass of product particles
n_particles : int
Number of product particles
atomic_weight_ratio : float
Atomic weight ratio of target nuclide
q_value : float
Q value for reaction in eV
"""
def __init__(self, total_mass, n_particles, atomic_weight_ratio, q_value):
self.total_mass = total_mass
self.n_particles = n_particles
self.atomic_weight_ratio = atomic_weight_ratio
self.q_value = q_value
@property
def total_mass(self):
return self._total_mass
@property
def n_particles(self):
return self._n_particles
@property
def atomic_weight_ratio(self):
return self._atomic_weight_ratio
@property
def q_value(self):
return self._q_value
@total_mass.setter
def total_mass(self, total_mass):
name = 'N-body phase space total mass'
cv.check_type(name, total_mass, Real)
cv.check_greater_than(name, total_mass, 0.)
self._total_mass = total_mass
@n_particles.setter
def n_particles(self, n_particles):
name = 'N-body phase space number of particles'
cv.check_type(name, n_particles, Integral)
cv.check_greater_than(name, n_particles, 0)
self._n_particles = n_particles
@atomic_weight_ratio.setter
def atomic_weight_ratio(self, atomic_weight_ratio):
name = 'N-body phase space atomic weight ratio'
cv.check_type(name, atomic_weight_ratio, Real)
cv.check_greater_than(name, atomic_weight_ratio, 0.0)
self._atomic_weight_ratio = atomic_weight_ratio
@q_value.setter
def q_value(self, q_value):
name = 'N-body phase space Q value'
cv.check_type(name, q_value, Real)
self._q_value = q_value
def to_hdf5(self, group):
"""Write distribution to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
"""
group.attrs['type'] = np.string_('nbody')
group.attrs['total_mass'] = self.total_mass
group.attrs['n_particles'] = self.n_particles
group.attrs['atomic_weight_ratio'] = self.atomic_weight_ratio
group.attrs['q_value'] = self.q_value
@classmethod
def from_hdf5(cls, group):
"""Generate N-body phase space distribution from HDF5 data
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.NBodyPhaseSpace
N-body phase space distribution
"""
total_mass = group.attrs['total_mass']
n_particles = group.attrs['n_particles']
awr = group.attrs['atomic_weight_ratio']
q_value = group.attrs['q_value']
return cls(total_mass, n_particles, awr, q_value)
@classmethod
def from_ace(cls, ace, idx, q_value):
"""Generate N-body phase space distribution from ACE data
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
idx : int
Index in XSS array of the start of the energy distribution data
(LDIS + LOCC - 1)
q_value : float
Q-value for reaction in eV
Returns
-------
openmc.data.NBodyPhaseSpace
N-body phase space distribution
"""
n_particles = int(ace.xss[idx])
total_mass = ace.xss[idx + 1]
return cls(total_mass, n_particles, ace.atomic_weight_ratio, q_value)
@classmethod
def from_endf(cls, file_obj):
"""Generate N-body phase space distribution from an ENDF evaluation
Parameters
----------
file_obj : file-like object
ENDF file positions at the start of the N-body phase space
distribution
Returns
-------
openmc.data.NBodyPhaseSpace
N-body phase space distribution
"""
items = get_cont_record(file_obj)
total_mass = items[0]
n_particles = items[5]
# TODO: get awr and Q value
return cls(total_mass, n_particles, 1.0, 0.0)
| [
"openmc.checkvalue.check_greater_than",
"numpy.string_",
"openmc.checkvalue.check_type"
] | [((1479, 1516), 'openmc.checkvalue.check_type', 'cv.check_type', (['name', 'total_mass', 'Real'], {}), '(name, total_mass, Real)\n', (1492, 1516), True, 'import openmc.checkvalue as cv\n'), ((1525, 1569), 'openmc.checkvalue.check_greater_than', 'cv.check_greater_than', (['name', 'total_mass', '(0.0)'], {}), '(name, total_mass, 0.0)\n', (1546, 1569), True, 'import openmc.checkvalue as cv\n'), ((1736, 1778), 'openmc.checkvalue.check_type', 'cv.check_type', (['name', 'n_particles', 'Integral'], {}), '(name, n_particles, Integral)\n', (1749, 1778), True, 'import openmc.checkvalue as cv\n'), ((1787, 1830), 'openmc.checkvalue.check_greater_than', 'cv.check_greater_than', (['name', 'n_particles', '(0)'], {}), '(name, n_particles, 0)\n', (1808, 1830), True, 'import openmc.checkvalue as cv\n'), ((2024, 2070), 'openmc.checkvalue.check_type', 'cv.check_type', (['name', 'atomic_weight_ratio', 'Real'], {}), '(name, atomic_weight_ratio, Real)\n', (2037, 2070), True, 'import openmc.checkvalue as cv\n'), ((2079, 2132), 'openmc.checkvalue.check_greater_than', 'cv.check_greater_than', (['name', 'atomic_weight_ratio', '(0.0)'], {}), '(name, atomic_weight_ratio, 0.0)\n', (2100, 2132), True, 'import openmc.checkvalue as cv\n'), ((2294, 2328), 'openmc.checkvalue.check_type', 'cv.check_type', (['name', 'q_value', 'Real'], {}), '(name, q_value, Real)\n', (2307, 2328), True, 'import openmc.checkvalue as cv\n'), ((2583, 2602), 'numpy.string_', 'np.string_', (['"""nbody"""'], {}), "('nbody')\n", (2593, 2602), True, 'import numpy as np\n')] |
from PIL import Image
import numpy as np
import math
def symmetric_pad_img(origin_img, pad_pixel=100):
origin_img_array = np.array(origin_img)
padded_img_array = np.pad(origin_img_array,
pad_width=((pad_pixel, pad_pixel), (pad_pixel, pad_pixel), (0, 0)),
mode='symmetric')
padded_img = Image.fromarray(padded_img_array.astype('uint8')).convert('RGB')
return padded_img
def cropfullimg(origin_img, patch_size=512, pad_pixel=100):
center_patch_size = patch_size - 2 * pad_pixel
(w, h) = origin_img.size
p_w = math.ceil(w / center_patch_size)
p_h = math.ceil(h / center_patch_size)
origin_img_array = np.array(origin_img)
padded_img_array = np.pad(origin_img_array,
pad_width=((pad_pixel, pad_pixel), (pad_pixel, pad_pixel), (0, 0)),
mode='symmetric')
padded_img = Image.fromarray(padded_img_array)
(padded_w, padded_h) = padded_img.size
img_patchs = []
center_crops = []
big_patch_crops = []
for h_i in range(p_h):
for w_i in range(p_w):
if(w_i == (p_w - 1) and h_i != (p_h - 1)):
center_crops.append((w - center_patch_size, h_i * center_patch_size,
w, center_patch_size * (h_i + 1)))
big_patch_crops.append((padded_w - patch_size, h_i * center_patch_size,
padded_w, center_patch_size * h_i + patch_size))
elif(h_i == (p_h - 1) and w_i != (p_w - 1)):
center_crops.append((w_i * center_patch_size, h - center_patch_size,
center_patch_size * (w_i + 1), h))
big_patch_crops.append((w_i * center_patch_size, padded_h - patch_size,
center_patch_size * w_i + patch_size, padded_h))
elif(w_i == (p_w - 1) and h_i == (p_h - 1)):
center_crops.append((w - center_patch_size, h - center_patch_size, w, h))
big_patch_crops.append((padded_w - patch_size, padded_h - patch_size, padded_w, padded_h))
else:
center_crops.append((w_i * center_patch_size, h_i * center_patch_size,
center_patch_size * (w_i + 1), center_patch_size * (h_i + 1)))
big_patch_crops.append((w_i * center_patch_size, h_i * center_patch_size,
center_patch_size * (w_i + 1) + 2 * pad_pixel,
center_patch_size * (h_i + 1) + 2 * pad_pixel))
img_patch = padded_img_array[big_patch_crops[h_i * p_w + w_i][1]:big_patch_crops[h_i * p_w + w_i][3],
big_patch_crops[h_i * p_w + w_i][0]:big_patch_crops[h_i * p_w + w_i][2]] # high range, wide range
img_patch = Image.fromarray(img_patch)
img_patchs.append(img_patch)
return img_patchs, center_crops, big_patch_crops
def concat_img_patchs(img_patchs, crops, full_img_size, patch_size=512, pad_pixel=100):
center_patch_size = patch_size - 2 * pad_pixel
(w, h) = full_img_size
p_w = math.ceil(w / center_patch_size)
p_h = math.ceil(h / center_patch_size)
crops = crops
concat_img = Image.new('RGB', (w, h), (0, 0, 0))
for h_i in range(p_h):
for w_i in range(p_w):
img_patch = img_patchs[h_i * p_w + w_i]
center_img_patch = img_patch.crop((pad_pixel, pad_pixel,
patch_size - pad_pixel, patch_size - pad_pixel))
concat_img.paste(center_img_patch, crops[h_i * p_w + w_i])
return concat_img
if __name__ == '__main__':
origin_img = Image.open('G:/dataset/temp/1100-14-300-h (4)_crop.tif')
padded_img = symmetric_pad_img(origin_img)
padded_img.save('G:/dataset/temp/1100-14-300-h (4)_crop_pad.png')
# img_patchs, center_crops, big_patch_crops = cropfullimg(origin_img)
# concat_img = concat_img_patchs(img_patchs, center_crops, (1023, 767))
# concat_img.show()
# for patch in img_patchs:
# patch.show() | [
"PIL.Image.fromarray",
"PIL.Image.open",
"math.ceil",
"PIL.Image.new",
"numpy.array",
"numpy.pad"
] | [((134, 154), 'numpy.array', 'np.array', (['origin_img'], {}), '(origin_img)\n', (142, 154), True, 'import numpy as np\n'), ((179, 293), 'numpy.pad', 'np.pad', (['origin_img_array'], {'pad_width': '((pad_pixel, pad_pixel), (pad_pixel, pad_pixel), (0, 0))', 'mode': '"""symmetric"""'}), "(origin_img_array, pad_width=((pad_pixel, pad_pixel), (pad_pixel,\n pad_pixel), (0, 0)), mode='symmetric')\n", (185, 293), True, 'import numpy as np\n'), ((616, 648), 'math.ceil', 'math.ceil', (['(w / center_patch_size)'], {}), '(w / center_patch_size)\n', (625, 648), False, 'import math\n'), ((660, 692), 'math.ceil', 'math.ceil', (['(h / center_patch_size)'], {}), '(h / center_patch_size)\n', (669, 692), False, 'import math\n'), ((719, 739), 'numpy.array', 'np.array', (['origin_img'], {}), '(origin_img)\n', (727, 739), True, 'import numpy as np\n'), ((764, 878), 'numpy.pad', 'np.pad', (['origin_img_array'], {'pad_width': '((pad_pixel, pad_pixel), (pad_pixel, pad_pixel), (0, 0))', 'mode': '"""symmetric"""'}), "(origin_img_array, pad_width=((pad_pixel, pad_pixel), (pad_pixel,\n pad_pixel), (0, 0)), mode='symmetric')\n", (770, 878), True, 'import numpy as np\n'), ((955, 988), 'PIL.Image.fromarray', 'Image.fromarray', (['padded_img_array'], {}), '(padded_img_array)\n', (970, 988), False, 'from PIL import Image\n'), ((3266, 3298), 'math.ceil', 'math.ceil', (['(w / center_patch_size)'], {}), '(w / center_patch_size)\n', (3275, 3298), False, 'import math\n'), ((3310, 3342), 'math.ceil', 'math.ceil', (['(h / center_patch_size)'], {}), '(h / center_patch_size)\n', (3319, 3342), False, 'import math\n'), ((3382, 3417), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(0, 0, 0)'], {}), "('RGB', (w, h), (0, 0, 0))\n", (3391, 3417), False, 'from PIL import Image\n'), ((3845, 3901), 'PIL.Image.open', 'Image.open', (['"""G:/dataset/temp/1100-14-300-h (4)_crop.tif"""'], {}), "('G:/dataset/temp/1100-14-300-h (4)_crop.tif')\n", (3855, 3901), False, 'from PIL import Image\n'), ((2957, 2983), 'PIL.Image.fromarray', 'Image.fromarray', (['img_patch'], {}), '(img_patch)\n', (2972, 2983), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sympy import *
def session2():
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
c = a + b
print("矩阵加法", c)
d = a - b
print("矩阵减法", d)
e = a * b
print("矩阵乘法", e)
f = np.dot(a, b)
print("矩阵点乘", f)
g = np.sqrt(np.dot(a, a))
print("矩阵长度", g)
h = np.sqrt(np.sum((a - b) ** 2))
print("欧氏距离", h)
i = np.dot(a, b) / (np.sqrt(np.dot(a, a)) * np.sqrt(np.dot(b, b)))
print("余弦相似度,越大越相似,也就表示越近", i)
j = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
k = np.array([[4, 5, 6],
[7, 8, 9],
[10, 11, 12]])
l = j + k
print("矩阵相加", l)
m = j - k
print("矩阵相减", m)
n = j * k
print("矩阵相乘", n)
o = np.dot(j, k)
print("矩阵点乘,必须是行列一样才能点乘", o)
def sessionCal():
# 这里是高数部分
X = np.linspace(-10, 10, 1000)
Y = 2 * X ** 2 + 19
plt.plot(X, Y)
plt.show()
# sympy是自动求导工具
X1 = Symbol('X1')
Y1 = 2 * X1 ** 2 + 19
print('Y1是:', Y1)
# 对X1求导数
Z1 = diff(Y1, X1)
print('Y1对X1求导数:', Z1)
X2 = Symbol('X2')
Y2 = 3 * (X2 ** 2 - 2) ** 2 + 7
Z2 = diff(Y2, X2)
print('Y是', Y2)
print('Y2对X2求导结果', Z2)
# 求偏导数
X3, Y3 = symbols('X3 Y3')
Z3 = X3 ** 2 + 3 * X3 * Y3 + Y3 ** 2
print('Z3是', Z3)
result1 = diff(Z3, X3)
result2 = diff(Z3, Y3)
print('对X3求偏导是', result1)
print('对Y3求骗到是', result2)
# 一般用在特征工程
# 幂级数——>用来拟合数据的,例如拟合股票波动?
# a0X^0+a1X^1+a2X^2......anX^n,这不就是泰勒展开
def session5():
# a = np.random.randint(1,1000,1000)
# print(a)
b = np.array([[1, 2], [3, 4], [5, 6]])
print('shape是:{};size 是 {}'.format(b.shape, b.size))
print(b)
c = b.reshape(2, 3)
print('shape是:{};size 是 {}'.format(c.shape, c.size))
print(c)
d = pd.DataFrame(data=np.array([[175, 150, 36],
[172, 160, 38],
[173, 170, 44]]),
index=[1, 2, 3],
columns=['身高', '体重', '胸围'])
print(d)
print('========================================')
path = './生物信息.csv'
e = pd.DataFrame(pd.read_csv(path))
print(e)
print('===============数据内容======================')
print('columns:\n')
print(e.columns)
print('values:\n')
print(e.values)
print(e.loc[0:, '身高'].values)
# plt.plot(e)
plt.scatter(x=e.loc[0:, '身高'], y=e.loc[0:, '胸围'])
plt.show()
def _loadFile():
France = []
with open('./owid-covid-data.csv', mode='r', encoding='utf-8') as f:
data = f.readlines()
for line in data:
field = [item for item in line.split(',')]
if field[2] == 'France':
France.append(field[4])
# end for
# end with
return France
'''法国疫情'''
def session6_liner():
from sklearn.linear_model import LinearRegression
# 读取csv法国累计病例人数
France = _loadFile()
France = np.array(France, dtype=np.float).astype(int)
X = np.arange(np.size(France))
X = X.reshape(-1, 1)
# 线性回归的工具包
mode = LinearRegression()
# 进行训练
mode.fit(X, France)
# 推断、预测
y = mode.predict(X)
print(France)
print(mode.coef_)
print(mode.intercept_)
# 显示真实值
plt.scatter(X, France)
# 显示拟合曲线
plt.plot(X, y, color='r')
plt.show()
def session6_ploy():
'''
多项式回归
'''
print('多项式回归')
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
# 多项式的包
from sklearn.preprocessing import PolynomialFeatures
# standardscaler是做归一化的
from sklearn.preprocessing import StandardScaler
France = _loadFile()
France = np.array(France, dtype=np.float).astype(int)
X = np.arange(np.size(France)).reshape(-1, 1)
poly = Pipeline(steps=[
('特征工程', PolynomialFeatures(degree=20)),
('归一化', StandardScaler()),
('线性回归', LinearRegression())
]
)
poly.fit(X, France)
y = poly.predict(X)
plt.scatter(X, France)
plt.plot(X, y, color='r')
loss1 = np.dot(y, France) / (np.sqrt(np.dot(y, y)) * np.sqrt(np.dot(France, France)))
print('余弦相似度:{}%'.format(loss1))
print('拟合度是否超过95%:{}'.format('是' if loss1 > 0.95 else '否'))
print('打分结果:{}%'.format(poly.score(X, France)))
plt.show()
'''波士顿房价,多元回归模型'''
def session6_boston():
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
boston = load_boston()
# print(boston)
X = boston.get('data')
Y = boston.get('target')
title = boston.get('feature_names')
stdScaler_X = StandardScaler()
# 归一化
print(X)
X = stdScaler_X.fit_transform(X)
Y = stdScaler_X.fit_transform(Y.reshape(-1,1))
# print(X)
# print(Y)
# 形成测试集
train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size=0.2)
# # 使用模型
model = LinearRegression()
# model = LogisticRegression()
# # 训练
model.fit(train_X, train_Y)
preTestY = model.predict(test_X)
print('线性回归得分:{}'.format(model.score(test_X, test_Y)))
poly = Pipeline(steps=[
('特征工程', PolynomialFeatures(degree=3)),
('归一化', StandardScaler()),
('线性回归', LinearRegression())
]
)
poly.fit(train_X,train_Y)
polyPreTestY = poly.predict(test_X)
# print('多项式回归得分:{}'.format(poly.score(test_X,test_Y)))
plt.plot(train_X,train_Y)
plt.show()
def session6_stock():
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
import tushare as ts
stock = ''
try:
stock = pd.read_csv('300348.csv')
except Exception:
stock = ts.get_hist_data('300348')
stock.to_csv('300348.csv')
stock['date'] = pd.to_datetime(stock['date'])
stock = stock.set_index('date')
stock.sort_values(by=['date'],inplace=True, ascending=True)
print(stock.shape)
Y = pd.DataFrame(stock.get('high'))
X = stock.drop('high',axis=1)
day = np.arange(Y.size).reshape(-1,1)
x_train = X.values[:-120,:]
x_test = X.values[-120:,:]
y_train = Y.values[:-120,:]
y_test = Y.values[-120:,:]
#
day_train = day[:-120,:]
day_test = day[-120:,:]
model = LinearRegression()
model.fit(x_train,y_train)
pre_test_y = model.predict(x_test)
print(X.shape)
print(Y.shape)
plt.plot(day_train,y_train,color='r')
plt.plot(day_test,y_test,color='g')
plt.plot(day_test,pre_test_y,color='b')
plt.title('stock:[300348] model score is:{}'.format(model.score(x_test,y_test)))
plt.show()
def session6_stock_line():
from sklearn.linear_model import LinearRegression
import tushare as ts
stock = ''
try:
stock = pd.read_csv('300348.csv')
except Exception:
stock = ts.get_hist_data('300348')
stock.to_csv('300348.csv')
stock['date'] = pd.to_datetime(stock['date'])
stock = stock.set_index('date')
stock.sort_values(by=['date'], inplace=True, ascending=True)
Y = pd.DataFrame(stock.get('high'))
X = np.arange(Y.size).reshape(-1, 1)
x_train = X[:-120, :]
x_test = X[-120:, :]
y_train = Y.values[:-120, :]
y_test = Y.values[-120:, :]
model = LinearRegression()
model.fit(x_train, y_train)
pre_test_y = model.predict(x_test)
plt.plot(x_train, y_train, color='r')
plt.plot(x_test, y_test, color='g')
plt.plot(x_test, pre_test_y, color='b')
plt.title('stock:[300348] model score is:{}'.format(model.score(x_test, y_test)))
plt.show()
# day = np.arange(data['date'].size).reshape(-1,1)
# print(day)
# print(stock)
# session6_stock_line()
session6_stock()
# session6_liner()
# session6_boston()
# session6_ploy()
| [
"sklearn.preprocessing.PolynomialFeatures",
"pandas.read_csv",
"numpy.arange",
"sklearn.model_selection.train_test_split",
"numpy.size",
"matplotlib.pyplot.plot",
"sklearn.datasets.load_boston",
"tushare.get_hist_data",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.dot",
"numpy... | [((146, 165), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (154, 165), True, 'import numpy as np\n'), ((174, 193), 'numpy.array', 'np.array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (182, 193), True, 'import numpy as np\n'), ((307, 319), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (313, 319), True, 'import numpy as np\n'), ((566, 609), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (574, 609), True, 'import numpy as np\n'), ((654, 700), 'numpy.array', 'np.array', (['[[4, 5, 6], [7, 8, 9], [10, 11, 12]]'], {}), '([[4, 5, 6], [7, 8, 9], [10, 11, 12]])\n', (662, 700), True, 'import numpy as np\n'), ((850, 862), 'numpy.dot', 'np.dot', (['j', 'k'], {}), '(j, k)\n', (856, 862), True, 'import numpy as np\n'), ((939, 965), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(1000)'], {}), '(-10, 10, 1000)\n', (950, 965), True, 'import numpy as np\n'), ((994, 1008), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {}), '(X, Y)\n', (1002, 1008), True, 'from matplotlib import pyplot as plt\n'), ((1013, 1023), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1021, 1023), True, 'from matplotlib import pyplot as plt\n'), ((1694, 1728), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (1702, 1728), True, 'import numpy as np\n'), ((2484, 2533), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': "e.loc[0:, '身高']", 'y': "e.loc[0:, '胸围']"}), "(x=e.loc[0:, '身高'], y=e.loc[0:, '胸围'])\n", (2495, 2533), True, 'from matplotlib import pyplot as plt\n'), ((2538, 2548), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2546, 2548), True, 'from matplotlib import pyplot as plt\n'), ((3172, 3190), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3188, 3190), False, 'from sklearn.linear_model import LinearRegression\n'), ((3345, 3367), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'France'], {}), '(X, France)\n', (3356, 3367), True, 'from matplotlib import pyplot as plt\n'), ((3385, 3410), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y'], {'color': '"""r"""'}), "(X, y, color='r')\n", (3393, 3410), True, 'from matplotlib import pyplot as plt\n'), ((3415, 3425), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3423, 3425), True, 'from matplotlib import pyplot as plt\n'), ((4086, 4108), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'France'], {}), '(X, France)\n', (4097, 4108), True, 'from matplotlib import pyplot as plt\n'), ((4113, 4138), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y'], {'color': '"""r"""'}), "(X, y, color='r')\n", (4121, 4138), True, 'from matplotlib import pyplot as plt\n'), ((4387, 4397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4395, 4397), True, 'from matplotlib import pyplot as plt\n'), ((4822, 4835), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (4833, 4835), False, 'from sklearn.datasets import load_boston\n'), ((4971, 4987), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4985, 4987), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5180, 5217), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)'}), '(X, Y, test_size=0.2)\n', (5196, 5217), False, 'from sklearn.model_selection import train_test_split\n'), ((5243, 5261), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5259, 5261), False, 'from sklearn.linear_model import LinearRegression\n'), ((5732, 5758), 'matplotlib.pyplot.plot', 'plt.plot', (['train_X', 'train_Y'], {}), '(train_X, train_Y)\n', (5740, 5758), True, 'from matplotlib import pyplot as plt\n'), ((5762, 5772), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5770, 5772), True, 'from matplotlib import pyplot as plt\n'), ((6115, 6144), 'pandas.to_datetime', 'pd.to_datetime', (["stock['date']"], {}), "(stock['date'])\n", (6129, 6144), True, 'import pandas as pd\n'), ((6588, 6606), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (6604, 6606), False, 'from sklearn.linear_model import LinearRegression\n'), ((6720, 6759), 'matplotlib.pyplot.plot', 'plt.plot', (['day_train', 'y_train'], {'color': '"""r"""'}), "(day_train, y_train, color='r')\n", (6728, 6759), True, 'from matplotlib import pyplot as plt\n'), ((6762, 6799), 'matplotlib.pyplot.plot', 'plt.plot', (['day_test', 'y_test'], {'color': '"""g"""'}), "(day_test, y_test, color='g')\n", (6770, 6799), True, 'from matplotlib import pyplot as plt\n'), ((6802, 6843), 'matplotlib.pyplot.plot', 'plt.plot', (['day_test', 'pre_test_y'], {'color': '"""b"""'}), "(day_test, pre_test_y, color='b')\n", (6810, 6843), True, 'from matplotlib import pyplot as plt\n'), ((6931, 6941), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6939, 6941), True, 'from matplotlib import pyplot as plt\n'), ((7236, 7265), 'pandas.to_datetime', 'pd.to_datetime', (["stock['date']"], {}), "(stock['date'])\n", (7250, 7265), True, 'import pandas as pd\n'), ((7580, 7598), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7596, 7598), False, 'from sklearn.linear_model import LinearRegression\n'), ((7677, 7714), 'matplotlib.pyplot.plot', 'plt.plot', (['x_train', 'y_train'], {'color': '"""r"""'}), "(x_train, y_train, color='r')\n", (7685, 7714), True, 'from matplotlib import pyplot as plt\n'), ((7719, 7754), 'matplotlib.pyplot.plot', 'plt.plot', (['x_test', 'y_test'], {'color': '"""g"""'}), "(x_test, y_test, color='g')\n", (7727, 7754), True, 'from matplotlib import pyplot as plt\n'), ((7759, 7798), 'matplotlib.pyplot.plot', 'plt.plot', (['x_test', 'pre_test_y'], {'color': '"""b"""'}), "(x_test, pre_test_y, color='b')\n", (7767, 7798), True, 'from matplotlib import pyplot as plt\n'), ((7889, 7899), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7897, 7899), True, 'from matplotlib import pyplot as plt\n'), ((357, 369), 'numpy.dot', 'np.dot', (['a', 'a'], {}), '(a, a)\n', (363, 369), True, 'import numpy as np\n'), ((408, 428), 'numpy.sum', 'np.sum', (['((a - b) ** 2)'], {}), '((a - b) ** 2)\n', (414, 428), True, 'import numpy as np\n'), ((459, 471), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (465, 471), True, 'import numpy as np\n'), ((2251, 2268), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (2262, 2268), True, 'import pandas as pd\n'), ((3104, 3119), 'numpy.size', 'np.size', (['France'], {}), '(France)\n', (3111, 3119), True, 'import numpy as np\n'), ((4152, 4169), 'numpy.dot', 'np.dot', (['y', 'France'], {}), '(y, France)\n', (4158, 4169), True, 'import numpy as np\n'), ((5968, 5993), 'pandas.read_csv', 'pd.read_csv', (['"""300348.csv"""'], {}), "('300348.csv')\n", (5979, 5993), True, 'import pandas as pd\n'), ((7089, 7114), 'pandas.read_csv', 'pd.read_csv', (['"""300348.csv"""'], {}), "('300348.csv')\n", (7100, 7114), True, 'import pandas as pd\n'), ((1919, 1977), 'numpy.array', 'np.array', (['[[175, 150, 36], [172, 160, 38], [173, 170, 44]]'], {}), '([[175, 150, 36], [172, 160, 38], [173, 170, 44]])\n', (1927, 1977), True, 'import numpy as np\n'), ((3041, 3073), 'numpy.array', 'np.array', (['France'], {'dtype': 'np.float'}), '(France, dtype=np.float)\n', (3049, 3073), True, 'import numpy as np\n'), ((3778, 3810), 'numpy.array', 'np.array', (['France'], {'dtype': 'np.float'}), '(France, dtype=np.float)\n', (3786, 3810), True, 'import numpy as np\n'), ((6032, 6058), 'tushare.get_hist_data', 'ts.get_hist_data', (['"""300348"""'], {}), "('300348')\n", (6048, 6058), True, 'import tushare as ts\n'), ((6352, 6369), 'numpy.arange', 'np.arange', (['Y.size'], {}), '(Y.size)\n', (6361, 6369), True, 'import numpy as np\n'), ((7153, 7179), 'tushare.get_hist_data', 'ts.get_hist_data', (['"""300348"""'], {}), "('300348')\n", (7169, 7179), True, 'import tushare as ts\n'), ((7416, 7433), 'numpy.arange', 'np.arange', (['Y.size'], {}), '(Y.size)\n', (7425, 7433), True, 'import numpy as np\n'), ((483, 495), 'numpy.dot', 'np.dot', (['a', 'a'], {}), '(a, a)\n', (489, 495), True, 'import numpy as np\n'), ((507, 519), 'numpy.dot', 'np.dot', (['b', 'b'], {}), '(b, b)\n', (513, 519), True, 'import numpy as np\n'), ((3841, 3856), 'numpy.size', 'np.size', (['France'], {}), '(France)\n', (3848, 3856), True, 'import numpy as np\n'), ((4181, 4193), 'numpy.dot', 'np.dot', (['y', 'y'], {}), '(y, y)\n', (4187, 4193), True, 'import numpy as np\n'), ((4205, 4227), 'numpy.dot', 'np.dot', (['France', 'France'], {}), '(France, France)\n', (4211, 4227), True, 'import numpy as np\n'), ((3926, 3955), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(20)'}), '(degree=20)\n', (3944, 3955), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((3972, 3988), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3986, 3988), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4010, 4028), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (4026, 4028), False, 'from sklearn.linear_model import LinearRegression\n'), ((5490, 5518), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(3)'}), '(degree=3)\n', (5508, 5518), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((5535, 5551), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5549, 5551), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5573, 5591), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5589, 5591), False, 'from sklearn.linear_model import LinearRegression\n')] |
import pandapower as pp
import pytest
from numpy import array
@pytest.fixture()
def base_net():
net = pp.create_empty_network()
pp.create_bus(net, vn_kv=10)
pp.create_bus(net, vn_kv=10)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_kw=200, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.runpp(net)
return net
def test_contingency_sgen(base_net):
net = base_net
pp.create_sgen(net, 1, p_kw=-100, q_kvar =0, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
# pwl costs
# maximize the sgen feed in by using a positive cost slope
# using a slope of 1
# | /
# | /
# | /
# |/
#-------------------------------------------
# p_min_kw /|
# / |
# / |
pp.create_piecewise_linear_cost(net, 0, "sgen", array([[net.sgen.min_p_kw.at[0], net.sgen.min_p_kw.at[0]], [0, 0]]))
pp.runopp(net)
assert abs(net.res_cost - net.res_sgen.p_kw.at[0]) < 1e-5
# minimize the sgen feed in by using a positive cost slope
# using a slope of 1
# \ |
# \ |
# \ |
# \|
#-------------------------------------------
# p_min_kw |\
# | \
# | \
net.piecewise_linear_cost.f.at[0] *= -1
pp.runopp(net)
assert abs(net.res_cost - net.res_sgen.p_kw.at[0]*-1) < 1e-5
try:
net.piecewise_linear_cost = net.piecewise_linear_cost.drop(index=0)
except:
net.piecewise_linear_cost = net.piecewise_linear_cost.drop(0)
# first using a positive slope as in the case above
pp.create_polynomial_cost(net, 0, "sgen", array([1, 0]))
pp.runopp(net)
assert abs(net.res_cost - net.res_sgen.p_kw.at[0]) < 1e-5
# negative slope as in the case above
net.polynomial_cost.c.at[0] *= -1
pp.runopp(net)
assert abs(net.res_cost - net.res_sgen.p_kw.at[0]*-1) < 1e-5
def test_contingency_load(base_net):
net = base_net
pp.create_load(net, 1, p_kw=-100, q_kvar=0, controllable=True, max_p_kw=150, min_p_kw=5,
max_q_kvar=50,
min_q_kvar=-50)
# pwl costs
# minimze the load by using a positive cost slope
# using a slope of 1
# | /
# | /
# | /
# |/
# -------------------------------------------
# p_min_kw /|
# / |
# / |
pp.create_piecewise_linear_cost(net, 1, "load", array(
[[0, 0],[net.load.max_p_kw.at[1], net.load.max_p_kw.at[1]]]))
pp.runopp(net)
assert abs(net.res_cost - net.res_load.p_kw.at[1]) < 1e-5
# maximize the load in by using a negative cost slope
# using a slope of 1
# \ |
# \ |
# \ |
# \|
# -------------------------------------------
# p_min_kw |\
# | \
# | \
net.piecewise_linear_cost.f.at[0] *= -1
pp.runopp(net)
assert abs(net.res_cost - net.res_load.p_kw.at[1] * -1) < 1e-5
# poly costs
try:
net.piecewise_linear_cost = net.piecewise_linear_cost.drop(index=0)
except:
# legacy fix
net.piecewise_linear_cost = net.piecewise_linear_cost.drop(0)
# first using a positive slope as in the case above
pp.create_polynomial_cost(net, 1, "load", array([1, 0]))
pp.runopp(net)
assert abs(net.res_cost - net.res_load.p_kw.at[1]) < 1e-5
# negative slope as in the case above
net.polynomial_cost.c.at[0] *= -1
pp.runopp(net)
assert abs(net.res_cost - net.res_load.p_kw.at[1]*-1) < 1e-5
def test_contingency_gen(base_net):
net = base_net
pp.create_gen(net, 1, p_kw=-100, vm_pu = 1.05, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
# pwl costs
# maximize the sgen feed in by using a positive cost slope
# using a slope of 1
# | /
# | /
# | /
# |/
#-------------------------------------------
# p_min_kw /|
# / |
# / |
pp.create_piecewise_linear_cost(net, 0, "gen", array([[net.gen.min_p_kw.at[0], net.gen.min_p_kw.at[0]], [0, 0]]))
pp.runopp(net)
assert abs(net.res_cost - net.res_gen.p_kw.at[0]) < 1e-5
# minimize the sgen feed in by using a positive cost slope
# using a slope of 1
# \ |
# \ |
# \ |
# \|
#-------------------------------------------
# p_min_kw |\
# | \
# | \
net.piecewise_linear_cost.f.at[0] *= -1
pp.runopp(net)
assert abs(net.res_cost - net.res_gen.p_kw.at[0]*-1) < 1e-5
try:
net.piecewise_linear_cost = net.piecewise_linear_cost.drop(index=0)
except:
# legacy fix
net.piecewise_linear_cost = net.piecewise_linear_cost.drop(0)
# first using a positive slope as in the case above
pp.create_polynomial_cost(net, 0, "gen", array([1, 0]))
pp.runopp(net)
assert abs(net.res_cost - net.res_gen.p_kw.at[0]) < 1e-5
# negative slope as in the case above
net.polynomial_cost.c.at[0] *= -1
pp.runopp(net)
assert abs(net.res_cost - net.res_gen.p_kw.at[0]*-1) < 1e-5
if __name__ == "__main__":
# net = base_net()
# test_contingency_gen(net)
pytest.main(['-s', __file__])
| [
"pandapower.create_sgen",
"pandapower.create_ext_grid",
"pandapower.create_empty_network",
"pandapower.create_line_from_parameters",
"pandapower.create_load",
"pandapower.runopp",
"pandapower.create_gen",
"pytest.main",
"numpy.array",
"pytest.fixture",
"pandapower.runpp",
"pandapower.create_bu... | [((64, 80), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (78, 80), False, 'import pytest\n'), ((107, 132), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (130, 132), True, 'import pandapower as pp\n'), ((137, 165), 'pandapower.create_bus', 'pp.create_bus', (['net'], {'vn_kv': '(10)'}), '(net, vn_kv=10)\n', (150, 165), True, 'import pandapower as pp\n'), ((170, 198), 'pandapower.create_bus', 'pp.create_bus', (['net'], {'vn_kv': '(10)'}), '(net, vn_kv=10)\n', (183, 198), True, 'import pandapower as pp\n'), ((203, 229), 'pandapower.create_ext_grid', 'pp.create_ext_grid', (['net', '(0)'], {}), '(net, 0)\n', (221, 229), True, 'import pandapower as pp\n'), ((234, 286), 'pandapower.create_load', 'pp.create_load', (['net', '(1)'], {'p_kw': '(200)', 'controllable': '(False)'}), '(net, 1, p_kw=200, controllable=False)\n', (248, 286), True, 'import pandapower as pp\n'), ((291, 468), 'pandapower.create_line_from_parameters', 'pp.create_line_from_parameters', (['net', '(0)', '(1)', '(50)'], {'name': '"""line"""', 'r_ohm_per_km': '(0.876)', 'c_nf_per_km': '(260.0)', 'max_i_ka': '(0.123)', 'x_ohm_per_km': '(0.1159876)', 'max_loading_percent': '(100 * 690)'}), "(net, 0, 1, 50, name='line', r_ohm_per_km=\n 0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100 * 690)\n", (321, 468), True, 'import pandapower as pp\n'), ((535, 548), 'pandapower.runpp', 'pp.runpp', (['net'], {}), '(net)\n', (543, 548), True, 'import pandapower as pp\n'), ((626, 751), 'pandapower.create_sgen', 'pp.create_sgen', (['net', '(1)'], {'p_kw': '(-100)', 'q_kvar': '(0)', 'controllable': '(True)', 'max_p_kw': '(-5)', 'min_p_kw': '(-150)', 'max_q_kvar': '(50)', 'min_q_kvar': '(-50)'}), '(net, 1, p_kw=-100, q_kvar=0, controllable=True, max_p_kw=-5,\n min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50)\n', (640, 751), True, 'import pandapower as pp\n'), ((1238, 1252), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (1247, 1252), True, 'import pandapower as pp\n'), ((1690, 1704), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (1699, 1704), True, 'import pandapower as pp\n'), ((2061, 2075), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (2070, 2075), True, 'import pandapower as pp\n'), ((2223, 2237), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (2232, 2237), True, 'import pandapower as pp\n'), ((2366, 2489), 'pandapower.create_load', 'pp.create_load', (['net', '(1)'], {'p_kw': '(-100)', 'q_kvar': '(0)', 'controllable': '(True)', 'max_p_kw': '(150)', 'min_p_kw': '(5)', 'max_q_kvar': '(50)', 'min_q_kvar': '(-50)'}), '(net, 1, p_kw=-100, q_kvar=0, controllable=True, max_p_kw=150,\n min_p_kw=5, max_q_kvar=50, min_q_kvar=-50)\n', (2380, 2489), True, 'import pandapower as pp\n'), ((2995, 3009), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (3004, 3009), True, 'import pandapower as pp\n'), ((3442, 3456), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (3451, 3456), True, 'import pandapower as pp\n'), ((3854, 3868), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (3863, 3868), True, 'import pandapower as pp\n'), ((4016, 4030), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (4025, 4030), True, 'import pandapower as pp\n'), ((4159, 4285), 'pandapower.create_gen', 'pp.create_gen', (['net', '(1)'], {'p_kw': '(-100)', 'vm_pu': '(1.05)', 'controllable': '(True)', 'max_p_kw': '(-5)', 'min_p_kw': '(-150)', 'max_q_kvar': '(50)', 'min_q_kvar': '(-50)'}), '(net, 1, p_kw=-100, vm_pu=1.05, controllable=True, max_p_kw=-5,\n min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50)\n', (4172, 4285), True, 'import pandapower as pp\n'), ((4770, 4784), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (4779, 4784), True, 'import pandapower as pp\n'), ((5221, 5235), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (5230, 5235), True, 'import pandapower as pp\n'), ((5611, 5625), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (5620, 5625), True, 'import pandapower as pp\n'), ((5772, 5786), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (5781, 5786), True, 'import pandapower as pp\n'), ((5939, 5968), 'pytest.main', 'pytest.main', (["['-s', __file__]"], {}), "(['-s', __file__])\n", (5950, 5968), False, 'import pytest\n'), ((1165, 1232), 'numpy.array', 'array', (['[[net.sgen.min_p_kw.at[0], net.sgen.min_p_kw.at[0]], [0, 0]]'], {}), '([[net.sgen.min_p_kw.at[0], net.sgen.min_p_kw.at[0]], [0, 0]])\n', (1170, 1232), False, 'from numpy import array\n'), ((2042, 2055), 'numpy.array', 'array', (['[1, 0]'], {}), '([1, 0])\n', (2047, 2055), False, 'from numpy import array\n'), ((2914, 2981), 'numpy.array', 'array', (['[[0, 0], [net.load.max_p_kw.at[1], net.load.max_p_kw.at[1]]]'], {}), '([[0, 0], [net.load.max_p_kw.at[1], net.load.max_p_kw.at[1]]])\n', (2919, 2981), False, 'from numpy import array\n'), ((3835, 3848), 'numpy.array', 'array', (['[1, 0]'], {}), '([1, 0])\n', (3840, 3848), False, 'from numpy import array\n'), ((4699, 4764), 'numpy.array', 'array', (['[[net.gen.min_p_kw.at[0], net.gen.min_p_kw.at[0]], [0, 0]]'], {}), '([[net.gen.min_p_kw.at[0], net.gen.min_p_kw.at[0]], [0, 0]])\n', (4704, 4764), False, 'from numpy import array\n'), ((5592, 5605), 'numpy.array', 'array', (['[1, 0]'], {}), '([1, 0])\n', (5597, 5605), False, 'from numpy import array\n')] |
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from functools import lru_cache
import cupy as cp
from cupyx.scipy.sparse import csr_matrix as csr_gpu
import itertools
import time
import os
import pickle
import scipy
import random
import correlation_module
import sys
sys.path.insert(0, "../../../lib") # add the library folder to the path I look for modules
sys.path.insert(0, "../../lib") # add the library folder to the path I look for modules specific to symmetric matrix
import latexify
import cavity_symmetric
def directory(gamma):
return 'gamma='+str(gamma)
def save_obj(obj,gamma,T,kind):
if not os.path.exists(directory(gamma)+"/data"):
os.makedirs(directory(gamma)+"/data")
name=kind+'_T='+str(T)+'.pkl'
#if os.path.isfile(directory(gamma)+'/data/dic-' + name ):
# name = name[:-4]+'_'+ str(time.time())+'.pkl'
with open(directory(gamma)+'/data/dic-' + name , 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def generate_degree_seq(gamma, N):
kseq = np.ceil( 1+np.random.pareto(gamma, N))
cond = kseq > N
while any(cond):
temp_seq = np.ceil( np.random.pareto(gamma, np.count_nonzero(cond)))
kseq[cond] = temp_seq
cond = kseq > N
if sum(kseq)%2==1:
kseq[-1]+=1
return np.array(kseq, dtype=int)
def asymmetric_sign(J):
sign_interaction = np.where(np.random.rand(J.nnz) > 0.5, 1, -1) #assign random sign to links, sign not symmetric
J.data = np.array(sign_interaction,dtype=np.float32)
return J
def make_network(N,gamma):
#choises = {'symmetric':symmetric_sign,'antisymmetric':antisymmetric_sign,'asymmetric':asymmetric_sign}
sequence = generate_degree_seq(gamma,N)
#make oriented network
#G = nx.generators.degree_seq.configuration_model([kin]*N)
G = nx.generators.degree_seq.configuration_model(sequence)
G = nx.DiGraph(G)
G.remove_edges_from(nx.selfloop_edges(G))
J = nx.adjacency_matrix(G)
return J
def symmetric_sign(J):
N = J.shape[0]
row = J.tocoo().row
col = J.tocoo().col
cond = row>col
interaction = np.where(np.random.rand(np.count_nonzero(cond))>.5,1,-1).astype(np.float32)
A = scipy.sparse.coo_matrix((interaction,(row[cond],col[cond])),shape = (N,N))
J = (A+A.T).tocsc()
return J
def antisymmetric_sign(J):
N = J.shape[0]
row = J.tocoo().row
col = J.tocoo().col
cond = row>col
interaction = np.where(np.random.rand(np.count_nonzero(cond))>.5,1,-1).astype(np.float32)
A = scipy.sparse.coo_matrix((interaction,(row[cond],col[cond])),shape = (N,N))
J = (A-A.T).tocsc()
return J
def main():
N = 10000
gamma = 3
T = .5
theta = 0
N_replics = 100
N_iterations = 10000
while True:
J = make_network(N, gamma)
Ks = np.diff(J.tocsr().indptr)
if min(Ks>1):
break
print('network done')
choises = {'symmetric':symmetric_sign,'antisymmetric':antisymmetric_sign,'asymmetric':asymmetric_sign}
for kind in ['asymmetric','antisymmetric','symmetric']:
#kind = 'symmetric'
J = choises[kind](J)#select the symmetry of network interactions
if no_gpu:
threads = -1
C,P_sim = correlation_module.replics_parallel(J, np.random.rand(N), T, N_replics, N_iterations,threads)
else:
C,P_sim = correlation_module.replics_gpu(J, cp.random.rand(N), T, N_replics,N_iterations)
P_A,P_B,P_t = cavity_symmetric.cavity_iteration(J,T,max_iter = 10)
#corr_cav = correlation_module.correlation_cavity(J,T,theta,P_A,P_B)
dic = {'C':C,'P_cav':P_t,'P_sim': P_sim,'N_replics':N_replics,'N_iterations':N_iterations,'T':T,'N':N,'gamma':gamma,'J':J, 'descr' : 'C has 2 dimension, dimension 0 runs over the nodes, dimension 1 gives the lag up to cutoff. C is averaged over replics'}
save_obj(dic,gamma,T,kind)
print('saved '+kind)
if __name__ == '__main__':
main()
| [
"sys.path.insert",
"pickle.dump",
"numpy.random.rand",
"cupy.random.rand",
"networkx.adjacency_matrix",
"networkx.selfloop_edges",
"networkx.DiGraph",
"networkx.generators.degree_seq.configuration_model",
"numpy.random.pareto",
"numpy.count_nonzero",
"numpy.array",
"scipy.sparse.coo_matrix",
... | [((293, 327), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../../lib"""'], {}), "(0, '../../../lib')\n", (308, 327), False, 'import sys\n'), ((385, 416), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../lib"""'], {}), "(0, '../../lib')\n", (400, 416), False, 'import sys\n'), ((1311, 1336), 'numpy.array', 'np.array', (['kseq'], {'dtype': 'int'}), '(kseq, dtype=int)\n', (1319, 1336), True, 'import numpy as np\n'), ((1499, 1543), 'numpy.array', 'np.array', (['sign_interaction'], {'dtype': 'np.float32'}), '(sign_interaction, dtype=np.float32)\n', (1507, 1543), True, 'import numpy as np\n'), ((1843, 1897), 'networkx.generators.degree_seq.configuration_model', 'nx.generators.degree_seq.configuration_model', (['sequence'], {}), '(sequence)\n', (1887, 1897), True, 'import networkx as nx\n'), ((1906, 1919), 'networkx.DiGraph', 'nx.DiGraph', (['G'], {}), '(G)\n', (1916, 1919), True, 'import networkx as nx\n'), ((1974, 1996), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['G'], {}), '(G)\n', (1993, 1996), True, 'import networkx as nx\n'), ((2226, 2302), 'scipy.sparse.coo_matrix', 'scipy.sparse.coo_matrix', (['(interaction, (row[cond], col[cond]))'], {'shape': '(N, N)'}), '((interaction, (row[cond], col[cond])), shape=(N, N))\n', (2249, 2302), False, 'import scipy\n'), ((2553, 2629), 'scipy.sparse.coo_matrix', 'scipy.sparse.coo_matrix', (['(interaction, (row[cond], col[cond]))'], {'shape': '(N, N)'}), '((interaction, (row[cond], col[cond])), shape=(N, N))\n', (2576, 2629), False, 'import scipy\n'), ((954, 998), 'pickle.dump', 'pickle.dump', (['obj', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(obj, f, pickle.HIGHEST_PROTOCOL)\n', (965, 998), False, 'import pickle\n'), ((1944, 1964), 'networkx.selfloop_edges', 'nx.selfloop_edges', (['G'], {}), '(G)\n', (1961, 1964), True, 'import networkx as nx\n'), ((3500, 3552), 'cavity_symmetric.cavity_iteration', 'cavity_symmetric.cavity_iteration', (['J', 'T'], {'max_iter': '(10)'}), '(J, T, max_iter=10)\n', (3533, 3552), False, 'import cavity_symmetric\n'), ((1057, 1083), 'numpy.random.pareto', 'np.random.pareto', (['gamma', 'N'], {}), '(gamma, N)\n', (1073, 1083), True, 'import numpy as np\n'), ((1397, 1418), 'numpy.random.rand', 'np.random.rand', (['J.nnz'], {}), '(J.nnz)\n', (1411, 1418), True, 'import numpy as np\n'), ((1178, 1200), 'numpy.count_nonzero', 'np.count_nonzero', (['cond'], {}), '(cond)\n', (1194, 1200), True, 'import numpy as np\n'), ((3305, 3322), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (3319, 3322), True, 'import numpy as np\n'), ((3430, 3447), 'cupy.random.rand', 'cp.random.rand', (['N'], {}), '(N)\n', (3444, 3447), True, 'import cupy as cp\n'), ((2166, 2188), 'numpy.count_nonzero', 'np.count_nonzero', (['cond'], {}), '(cond)\n', (2182, 2188), True, 'import numpy as np\n'), ((2493, 2515), 'numpy.count_nonzero', 'np.count_nonzero', (['cond'], {}), '(cond)\n', (2509, 2515), True, 'import numpy as np\n')] |
"""
Simulation of binary floating point representation at arbitrary fixed or
infinite precision (including greater than 64 bit).
:name: Simfloat
:author: <NAME>
:version: 0.2
:date: August 2008
Updated to version 0.2 for full Python 3 compatibility in 2020.
"""
import numpy as np
import math
import decimal
from decimal import Decimal
__all__ = ['Decimal', 'Binary', 'ContextClass', 'BinaryIntClass',
'quadruple', 'double', 'define_context', 'frexp', 'dec_context',
'single', 'half', 'test', 'binstr2dec', 'decfrac2binrep',
'decint2binstr', 'binfracstr2decfrac', 'dec2binstr', 'binvalstr2dec',
'ROUND_UP', 'ROUND_DOWN', 'ROUND_CEILING', 'ROUND_FLOOR',
'ROUND_HALF_UP', 'ROUND_HALF_DOWN',
'BinaryOverflow', 'BinaryUnderflow', 'BinaryOverflow',
'BinaryNegativeValue', 'BinaryRemainderValue', 'BinaryException']
# Rounding
# up = always away from 0
ROUND_UP = 'ROUND_UP'
# down = always towards 0 (truncate)
ROUND_DOWN = 'ROUND_DOWN'
# ceiling = always towards +inf
ROUND_CEILING = 'ROUND_CEILING'
# floor = always towards -inf
ROUND_FLOOR = 'ROUND_FLOOR'
# half up = to nearest, 0.1b (0.5d) rounds away from 0 (standard and default)
ROUND_HALF_UP = 'ROUND_HALF_UP'
# half down = to nearest, 0.1 (0.5d) rounds towards 0
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
_round_code = {ROUND_UP: 'U', ROUND_DOWN: 'D', ROUND_CEILING: 'C',
ROUND_FLOOR: 'F', ROUND_HALF_UP: 'HU', ROUND_HALF_DOWN: 'HD'}
dec_context = decimal.getcontext()
dec_context.prec = 128
class BinaryIntClass:
"""Abstract class for non-negative binary integer values only, with a fixed
number of digits given by class attribute 'digits'. Set digits in a concrete
subclass. The initialization value string will cause an exception if the
binary value is longer than the class' digits setting.
"""
digits = None
def __init__(self, value):
"""Initialize with a value made up of a string of binary digits.
"""
dec_value = int(value, base=2)
if dec_value < 0:
raise BinaryNegativeValue("Invalid binary value: out of range of %d-digit number"%self.digits)
elif dec_value > self.largest:
raise BinaryOverflow("Invalid binary value: out of range of %d-digit number"%self.digits)
self.bin_value = pad(value, self.digits)
self.dec_value = Decimal(dec_value)
self.tuple_rep = tuple([int(bit) for bit in self.bin_value])
def __repr__(self):
return '%s("%s")' % (self.__class__.__name__, self.bin_value)
def __str__(self):
return self.bin_value
def as_decimal(self):
return self.dec_value
def as_binary(self):
return 'Binary("%s")' % self.bin_value
def as_tuple(self):
return self.tuple_rep
def _op_return_class(self, other):
try:
other_digits = other.digits
except AttributeError:
raise TypeError("Invalid Binary object")
if other_digits > self.digits:
new_class = other.__class__
else:
new_class = self.__class__
return new_class
def max(self, other):
return self.dec_value.max(other)
def min(self, other):
return self.dec_value.min(other)
def __add__(self, other):
new_class = self._op_return_class(other)
result = self.dec_value + other.dec_value
try:
return new_class(decint2binstr(result))
except ValueError:
raise BinaryOverflow(result)
def __sub__(self, other):
new_class = self._op_return_class(other)
result = self.dec_value - other.dec_value
if result < 0:
raise BinaryNegativeValue(result)
return new_class(decint2binstr(result))
def __mul__(self, other):
new_class = self._op_return_class(other)
result = self.dec_value * other.dec_value
try:
return new_class(decint2binstr(result))
except ValueError:
raise BinaryOverflow(result)
def __div__(self, other):
new_class = self._op_return_class(other)
result_dm = divmod(self.dec_value, other.dec_value)
if result_dm[1] != 0:
raise BinaryRemainderValue(result_dm)
return new_class(decint2binstr(result_dm[0]))
def __rshift__(self, y):
result = self.dec_value >> y
return self.__class__(decint2binstr(result))
def __lshift__(self, y):
result = self.dec_value << y
try:
return self.__class__(decint2binstr(result))
except ValueError:
raise BinaryOverflow(result)
def __hash__(self):
"""Hash as per the precise decimal representation.
"""
return hash(self.dec_value)
def next(self):
try:
return self.__class__(decint2binstr(int(self.bin_value,
base=2) + 1))
except BinaryOverflow:
raise BinaryOverflow(int(self.bin_value, base=2) + 1)
def prev(self):
try:
return self.__class__(decint2binstr(int(self.bin_value,
base=2) - 1))
except:
raise BinaryNegativeValue(int(self.bin_value, base=2) - 1)
def __hash__(self):
return hash(repr(self))
def __getitem__(self, i):
return int(self.bin_value[i])
## could provide boolean operator methods but not needed for arithmetic
def __reduce__(self):
return (self.__class__, (repr(self),))
class SignBit(BinaryIntClass):
digits = 1
largest = 1
class BinaryCharacteristic(BinaryIntClass):
"""Abstract class for floating point representation's characteristic,
a.k.a. exponent.
"""
def __init__(self, value):
"""value is a binary string representatino of the exponent (so may be
negative)"""
BinaryIntClass.__init__(self, value)
# interpretation-related attributes
interp_dec_value = self.dec_value - self.bias
if interp_dec_value < self.exp_lowest:
raise BinaryNegativeValue("Invalid binary value: out of range of %d-digit number"%self.digits)
if interp_dec_value > self.exp_largest:
raise BinaryOverflow("Invalid binary value: out of range of %d-digit number"%self.digits)
self.interp_dec_value = interp_dec_value
def interpret(self, denorm=False):
"""Interpret raw binary value as a signed decimal integer.
"""
if denorm:
# bias needs to be adjusted by 1
return self.interp_dec_value + 1
else:
return self.interp_dec_value
class BinarySignificand(BinaryIntClass):
"""Abstract class for floating point representation's significand,
a.k.a. fraction, or mantissa.
"""
def interpret(self):
"""Interpret raw binary value as a decimal fraction.
"""
return binfracstr2decfrac(self.bin_value)
class ContextClass:
"""Abstract class for immutable binary floating point number using
(sign, characteristic, significand) representation.
"""
def __init__(self, value):
"""Initialize with decimal value (either of type 'float' or 'Decimal')
or a binary digit string of total length = <this_class>.digits or the
same string with spaces between sign, characteristic, and significand
elements (total length <this_class>.digits+2).
"""
try:
assert self.significandClass.digits is not None
except AssertionError:
raise NotImplementedError("Create concrete sub-type of ContextClass")
# round_dir may be overwritten by init_from_dec
# round_dir = None => no rounding needed
# 'next' => use next towards +inf
# 'prev' => use next towards -inf
round_dir = None
if isinstance(value, (float, np.float64)):
if not np.isfinite(value):
val_str = str(value)
# val_str may be '1.#INF' rather than 'inf' on some platforms
val_str = val_str.replace('.', '').replace('#', '').replace('1',
'')
round_dir = self.init_from_dec(Decimal(val_str))
else:
if value < 0:
fval = -value
s = '1'
else:
fval = value
s = '0'
# extract full binary representation of the value
# (which is not accessible from str(value) or repr(value)
valstr = pad(decint2binstr(np.int64( \
np.array(fval).view(int))), 64)
e = valstr[1:12]
f = valstr[12:]
denorm = int(e) == 0 and int(f) > 0
if denorm:
bias = 1022
expo = Decimal(2)**Decimal(int(e, base=2) - bias)
frac = binfracstr2decfrac(f)
else:
bias = 1023
expo = Decimal(2)**Decimal(int(e, base=2) - bias)
frac = 1 + binfracstr2decfrac(f)
try:
round_dir = self.init_from_dec((-1)**int(s) * expo * frac)
except:
raise BinaryOverflow("Invalid representation for this class: %s"%value)
elif isinstance(value, np.float32):
if not np.isfinite(value):
val_str = str(value)
# val_str may be '1.#INF' rather than 'inf' on some platforms
val_str = val_str.replace('.', '').replace('#', '').replace('1',
'')
round_dir = self.init_from_dec(Decimal(val_str))
else:
if value < 0:
fval = -value
s = '1'
else:
fval = value
s = '0'
# extract full binary representation of the value
# (which is not accessible from str(value) or repr(value)
valstr = pad(decint2binstr(np.int32( \
np.array(fval).view(int))), 32)
e = valstr[1:9]
f = valstr[9:]
denorm = int(e) == 0 and int(f) > 0
if denorm:
bias = 126
expo = Decimal(2) ** Decimal(int(e, base=2) - bias)
frac = binfracstr2decfrac(f)
else:
bias = 127
expo = Decimal(2) ** Decimal(int(e, base=2) - bias)
frac = 1 + binfracstr2decfrac(f)
try:
round_dir = self.init_from_dec((-1)**int(s) * expo * frac)
except:
raise BinaryOverflow("Invalid representation for this class: %s"%value)
elif isinstance(value, (int, np.int64, np.int32)):
try:
round_dir = self.init_from_dec(Decimal(value))
except OverflowError:
raise BinaryOverflow()
elif isinstance(value, Decimal):
try:
round_dir = self.init_from_dec(value)
except OverflowError:
raise BinaryOverflow()
elif isinstance(value, Binary):
try:
round_dir = self.init_from_dec(value.dec)
except OverflowError:
raise BinaryOverflow()
elif isinstance(value, str):
if len(value) == self.digits:
# e.g. "11101001111010000"
s = value[0]
e = value[1:self.characteristicClass.digits+1]
f = value[self.characteristicClass.digits+1:]
elif len(value) == self.digits + 2:
# e.g. "1 1101 001111010000"
try:
s, e, f = value.split(' ')
except:
raise ValueError("Invalid string representation for this class: %s"%value)
else:
raise ValueError("Invalid string representation for this class: %s"%value)
try:
self.init_from_string(s, e, f)
except:
raise ValueError("Invalid string representation for this class: %s"%value)
else:
raise TypeError("Invalid initialization type")
self.tuple_rep = tuple((self.signbit.as_tuple(),
self.characteristic.as_tuple(),
self.significand.as_tuple()))
# treat special cases for 0, Inf, NaN, and denormalized values
if self.characteristic.dec_value == 0:
if self.significand.dec_value == 0:
# zeros
if self.signbit.dec_value == 0:
self.dec_value = Decimal("0")
self.bin_value = "0"
else:
self.dec_value = Decimal("-0")
self.bin_value = "-0"
else:
# denormalized values
self.dec_value = (-1)**self.signbit.dec_value * \
2**(self.characteristic.interpret(denorm=True)) * \
(0 + self.significand.interpret())
# lazy determination of bin_value when it is requested
# (slow calculation)
self.bin_value = ""
elif self.characteristic.dec_value == self.characteristic.largest:
if self.significand.dec_value == 0:
# +/- Inf
if self.signbit.dec_value == 0:
self.dec_value = Decimal("Inf")
self.bin_value = "Inf"
else:
self.dec_value = Decimal("-Inf")
self.bin_value = "-Inf"
else:
# NaN
self.dec_value = Decimal("NaN")
self.bin_value = "NaN"
else:
# normalized values
self.dec_value = (-1)**self.signbit.dec_value * \
2**(self.characteristic.interpret()) * \
(1 + self.significand.interpret())
# lazy determination of bin_value when it is requested
# (slow calculation)
self.bin_value = ""
if round_dir is not None:
new = getattr(self, round_dir)()
self.signbit = new.signbit
self.characteristic = new.characteristic
self.significand = new.significand
self.bin_value = new.bin_value
self.dec_value = new.dec_value
self.tuple_rep = new.tuple_rep
# for compatibility with Binary attribute
self.context = self.__class__
def init_from_string(self, s, char_str, mant_str):
self.signbit = SignBit(s)
self.characteristic = self.characteristicClass(char_str)
self.significand = self.significandClass(mant_str)
def init_from_dec(self, d):
"""d is a Decimal object"""
if d.is_nan():
self.signbit = SignBit('0')
self.characteristic = \
self.characteristicClass("1"*self.characteristicClass.digits)
self.significand = \
self.significandClass("1"*self.significandClass.digits)
return None
elif d.is_infinite():
self.signbit = SignBit(str(bin_sign(int(d.is_infinite()))))
self.characteristic = \
self.characteristicClass("1"*self.characteristicClass.digits)
self.significand = \
self.significandClass("0"*self.significandClass.digits)
return None
if d < 0:
s = '1'
d = -d
elif d == 0:
self.signbit = SignBit('0')
self.characteristic = \
self.characteristicClass("0"*self.characteristicClass.digits)
self.significand = \
self.significandClass("0"*self.significandClass.digits)
# no rounding, so return None
return None
else:
s = '0'
if d <= self.largest_denorm:
bias = self.characteristicClass.bias-1
frac_leading = 0
else:
bias = self.characteristicClass.bias
frac_leading = 1
# acquire precise fraction, exponent (given that exponent must be
# representable in this class)
f_dec, e_dec = frexp(d, self)
e_dec += bias
# convert to binary and round to precision of significand
i = 0
max_bits = self.characteristicClass.exp_largest + \
self.significandClass.digits + 2
bfrac = np.zeros(max_bits, int)
i_stop = max_bits
not_seen_one = True
# make sure we use extra precision (given that might adjust for
# leading 0's)
while f_dec > 0 and i < max_bits:
f_dec *= 2
bit = int(f_dec)
if not_seen_one and bit == 1:
# first time seen a 1 know that we only need up to
# significand's more digits + 2 (speed optimization)
i_stop = i + self.significandClass.digits + 2
not_seen_one = False
bfrac[i] = bit
f_dec -= bit
i += 1
if i >= i_stop:
break
# binary string representation of fraction to full precision
f = ''.join([str(b) for b in bfrac[:i]])
if frac_leading == 1:
try:
one_pos = f.index('1')
except ValueError:
# f is all 0's
pass
else:
# adjust for leading zeros
f = f[one_pos+1:]
e_dec -= one_pos+1
c = decint2binstr(e_dec)
else:
# denormalized, inevitable lost precision in f
c = '0'*self.characteristicClass.digits
f = '0'*abs(e_dec) + f
#print(len(f), f)
#print(2**Decimal(e_dec - bias) * (frac_leading + binfracstr2decfrac(f)))
round_dir = None # default
if len(c) < self.characteristicClass.digits:
c = pad(c, self.characteristicClass.digits)
if len(f) < self.significandClass.digits:
f = pad(f, self.significandClass.digits, to_right=True)
elif len(f) > self.significandClass.digits:
next_bit = int(f[self.significandClass.digits])
try:
remaining_bits = f[self.significandClass.digits+1:]
except IndexError:
remaining_bits = '0'
else:
if remaining_bits == '':
remaining_bits = '0'
round_dir = self._round(int(s), next_bit, int(remaining_bits) > 0)
f = f[:self.significandClass.digits]
self.signbit = SignBit(s)
self.characteristic = self.characteristicClass(c)
self.significand = self.significandClass(f)
return round_dir
def _round(self, sign, next_bit, non_zero_remainder):
r = self.round_mode
if r == ROUND_DOWN:
return None
elif r == ROUND_UP:
if next_bit == 1 or non_zero_remainder:
# > 0.0b in magnitude
if sign == 0:
return 'next'
else:
return 'prev'
else:
return None
elif r == ROUND_HALF_UP:
if next_bit == 1:
# >= 0.1b in magnitude, round away from 0
if sign == 0:
return 'next'
else:
return 'prev'
else:
return None
elif r == ROUND_HALF_DOWN:
if next_bit == 1:
if non_zero_remainder:
# > 0.1b in magnitude, round away from 0
if sign == 0:
return 'next'
else:
return 'prev'
else:
# = 0.1b, round towards 0
return None
else:
return None
elif r == ROUND_CEILING:
if sign == 0:
if next_bit == 1 or non_zero_remainder:
# > 0.0b in magnitude, round towards +inf
return 'next'
else:
return None
else:
return None
elif r == ROUND_FLOOR:
if sign == 1:
if next_bit == 1 or non_zero_remainder:
# > 0.0b in magnitude, round towards +inf
return 'prev'
else:
return None
else:
return None
else:
raise ValueError("Invalid rounding type")
def is_denormalized(self):
return self.dec_value <= self.largest_denorm
def as_tuple(self):
return self.tuple_rep
def as_binary(self):
"""Lazy evaluation of bin_value in case it's never used.
This is done because the calculation is slow!
"""
if self.bin_value == "":
self.bin_value = dec2binstr(self.dec_value, self)
return Binary(self.bin_value, self.__class__)
def as_decimal(self):
return self.dec_value
def __repr__(self):
if self.bin_value == "":
self.bin_value = dec2binstr(self.dec_value, self)
return 'Binary("%s", (%s, %s, %s))' % \
(self.bin_value,
self.characteristicClass.digits,
self.significandClass.digits,
self.round_mode)
def __str__(self):
return "%s %s %s" % (str(self.signbit), str(self.characteristic),
str(self.significand))
def next(self):
"""Return the successive representable float value in the more
positive direction.
"""
if self.signbit.dec_value == 1:
method = 'prev'
else:
method = 'next'
return self._step(method)
def prev(self):
"""Return the successive representable float value in the more
negative direction.
"""
if self.signbit.dec_value == 0:
method = 'prev'
else:
method = 'next'
return self._step(method)
def _step(self, method):
"""
Internal method for stepping to successive representable values,
either in the increasing or decreasing direction, according to the
method passed.
"""
# default value, unless switches
new_signbit = self.signbit
try:
new_signif = getattr(self.significand, method)()
except BinaryNegativeValue:
# CARRY
try:
new_char = getattr(self.characteristic, method)()
except BinaryNegativeValue:
# CHANGE SIGN
new_char = self.characteristic
new_signbit = SignBit(str(1-self.signbit.dec_value))
# have to reset new_signif in this case
new_signif = self.significandClass(pad('1',
self.significand.digits))
except BinaryOverflow:
raise ValueError("No more representable values")
else:
new_signif = self.significandClass('1'*self.significand.digits)
except BinaryOverflow:
# CARRY
# representation of one in the same number of significant digits
new_signif = self.significandClass('0'*self.significand.digits)
try:
new_char = getattr(self.characteristic, method)()
except BinaryOverflow:
raise ValueError("No more representable values")
else:
new_char = self.characteristic
return self.__class__(" ".join([str(new_signbit), str(new_char),
str(new_signif)]))
def _op_check(self, other):
try:
# may be another ContextClass with matching precision
other_sig_digits = other.significand.digits
other_char_digits = other.characteristic.digits
except AttributeError:
# may be a Binary object with matching precision
try:
other_sig_digits = other.context.significandClass.digits
other_char_digits = other.context.characteristicClass.digits
except AttributeError:
raise TypeError("Invalid Context object")
else:
ox = other.dec
c = other.context
else:
ox = other.dec_value
c = other
if other_sig_digits != self.significand.digits or \
other_char_digits != self.characteristic.digits:
raise ValueError("Mismatched precision")
elif c.round_mode != self.round_mode:
raise ValueError("Mismatched rounding modes")
return ox
def __eq__(self, other):
ox = self._op_check(other)
return self.dec_value == ox
def __ne__(self, other):
ox = self._op_check(other)
return self.dec_value != ox
def __le__(self, other):
ox = self._op_check(other)
return self.dec_value <= ox
def __lt__(self, other):
ox = self._op_check(other)
return self.dec_value < ox
def __ge__(self, other):
ox = self._op_check(other)
return self.dec_value >= ox
def __gt__(self, other):
ox = self._op_check(other)
return self.dec_value > ox
def __neg__(self):
return self.__class__(str(1-self.signbit.dec_value) + str(self)[1:])
def __abs__(self):
return self.__class__('0' + str(self)[1:])
def __add__(self, other):
ox = self._op_check(other)
return self.__class__(self.dec_value + ox)
__radd__ = __add__
def __sub__(self, other):
ox = self._op_check(other)
return self.__class__(self.dec_value - ox)
def __rsub__(self, other):
ox = self._op_check(other)
return self.__class__(ox - self.dec_value)
def __mul__(self, other):
ox = self._op_check(other)
return self.__class__(self.dec_value * ox)
__rmul__ = __mul__
def __div__(self, other):
ox = self._op_check(other)
return self.__class__(self.dec_value / ox)
def __rdiv__(self, other):
ox = self._op_check(other)
return self.__class__(ox / self.dec_value)
__rtruediv__ = __rdiv__
def __pow__(self, other):
ox = self._op_check(other)
return self.__class__(self.dec_value ** ox)
def __rpow__(self, other):
ox = self._op_check(other)
return self.__class__(ox ** self.dec_value)
def sqrt(self):
return self.__class__(self.dec_value.sqrt())
def __nonzero__(self):
return self.dec_value != 0
def max(self, other):
"""Respects NaN and Inf"""
ox = self._op_check(other)
r = self.dec_value.max(ox)
if r == self.dec_value:
return self
else:
return other
def min(self, other):
"""Respects NaN and Inf"""
ox = self._op_check(other)
r = self.dec_value.min(ox)
if r == self.dec_value:
return self
else:
return other
def __copy__(self):
if type(self) == ContextClass:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) == ContextClass:
return self # My components are also immutable
return self.__class__(str(self))
class context_registry:
"""Registry saves re-creating temporary copies of the same context classes
during eval() calls, etc., by keeping a registry of all created
contexts in the present session.
"""
def __init__(self):
self.contexts = {}
def __call__(self, char_digits, sig_digits, rounding=ROUND_HALF_UP):
try:
return self.contexts[(char_digits, sig_digits, rounding)]
except KeyError:
return self.make_context(char_digits, sig_digits, rounding)
def make_context(self, char_digits, sig_digits, rounding):
"""Class factory for binary float arithmetic using the given number of
digits for the characteristic (exponent) and significand (mantissa, or
fraction).
rounding type defaults to rounding up (away from 0).
"""
if rounding not in _round_code:
raise ValueError("Invalid rounding type specified")
class CharacteristicClass(BinaryCharacteristic):
digits = char_digits
largest = 2**char_digits-1
bias = 2**(char_digits-1)-1
exp_largest = 2**(char_digits-1)
exp_lowest = -2**(char_digits-1) + 1
class SignificandClass(BinarySignificand):
digits = sig_digits
largest = 2**sig_digits-1
class context(ContextClass):
characteristicClass = CharacteristicClass
significandClass = SignificandClass
largest_denorm = (Decimal(2) ** Decimal(-2**(char_digits-1)+2) ) * \
(1 - Decimal(2)**(-sig_digits))
largest_norm = (1 - Decimal("0.5")**(sig_digits+1)) * \
2 ** (2**(char_digits - 1))
digits = 1 + char_digits + sig_digits
round_mode = rounding
Etop = 2**(char_digits-1)
Etiny = -2**(char_digits-1) + 1
context.__name__ = "Float_%d_%d_%s" % (char_digits, sig_digits,
_round_code[rounding])
self.contexts[(char_digits, sig_digits, rounding)] = context
return context
define_context = context_registry()
single = define_context(8, 23) # IEEE 754 32 bit float
double = define_context(11, 52) # IEEE 754 64 bit float
quadruple = define_context(15, 112) # IEEE 754 128 bit float
half = define_context(5, 10) # IEEE 754 16 bit float
test = define_context(4, 6, ROUND_DOWN) # for learning purposes
class Binary:
"""Constructor for binary floating point representation from a binary string
or Decimal class representation of a real number: e.g. -1101.100011 or
Decimal("-13.546875000"). The value may also be an instance of an existing
context.
If a context is given as a Context class, the return value will be an
instance of that class (a IEEE 754 representation of that binary value).
If the value given was from a different context it will be coerced.
The context may also be provided as the tuple: (characteristic_digits,
significand_digits,
rounding mode string)
although this is intended primarily for internal use or for eval(repr(x))
for a Context object x.
Binary() is the same as Binary('0') to be functionally compatible with
the Decimal class.
"""
def __init__(self, x='0', context=None):
if isinstance(context, tuple):
self.context = define_context(context[0], context[1],
rounding=context[2])
else:
self.context = context
# placeholder for representation of x
self.rep = None
if isinstance(x, Binary):
self.dec = x.dec
self.bin = x.bin
if context is None:
self.context = x.context
# in case x.context is not None (otherwise gets
# overwritten by x.dec == x.rep anyway)
self.rep = x.rep
elif isinstance(x, ContextClass):
if context is None:
# optimization -- don't recreate self.rep later
# from this context, keep it now.
self.context = x.__class__
self.rep = x
self.dec = x.as_decimal()
self.bin = str(x.as_binary())
else:
x_dec = x.as_decimal()
if abs(x_dec) > self.context.largest_norm:
if x_dec < 0:
self.bin = "-Inf"
seld.dec = Decimal("-Inf")
else:
self.bin = "Inf"
self.dec = Decimal("Inf")
self.rep = self.context(self.dec)
elif isinstance(x, Decimal):
self.dec = x
if self.context is None:
raise ValueError("Cannot create arbitrary precision binary "
"value without a representation context")
if x.is_nan() or x.is_infinite():
if x.is_nan():
self.bin = "NaN"
else:
if x < 0:
self.bin = "-Inf"
else:
self.bin = "Inf"
elif abs(x) > self.context.largest_norm:
if x < 0:
self.dec = Decimal("-Inf")
self.bin = "-Inf"
else:
self.dec = Decimal("Inf")
self.bin = "Inf"
else:
self.bin = dec2binstr(x, self.context)
else:
# string
bstr = x.lower()
if bstr in ['-inf', 'inf', 'nan']:
self.bin = x
self.dec = Decimal(x)
else:
self.bin = x
self.dec = binvalstr2dec(bstr)
if self.context is None:
self.rep = self.dec
else:
if self.rep is None:
try:
self.rep = self.context(self.dec)
except BinaryOverflow:
if self.dec < 0:
self.dec = Decimal("-Inf")
self.bin = "-Inf"
else:
self.dec = Decimal("Inf")
self.bin = "Inf"
self.rep = self.context(self.dec)
self.dec = self.rep.dec_value
if self.rep.bin_value == "":
# lazy evaluation hasn't been performed yet
self.bin = dec2binstr(self.dec, self.rep)
# might as well update the representation
self.rep.bin_value = self.bin
else:
self.bin = self.rep.bin_value
def __hash__(self):
return hash(self.rep)
def __str__(self):
return self.bin
def __repr__(self):
if self.context:
return 'Binary("%s", (%d, %d, %s))' % (self.bin,
self.context.characteristicClass.digits,
self.context.significandClass.digits,
self.context.round_mode)
else:
return 'Binary("%s")' % self.bin
def as_binary(self):
return self
def as_decimal(self):
return self.dec
def _op_check(self, other):
if isinstance(other, (int, np.int32, np.int64)):
ox, c = Decimal(other), None
elif isinstance(other, Binary):
ox, c = other.dec, other.context
elif isinstance(other, Decimal):
ox, c = other, None
elif isinstance(other, ContextClass):
# ContextClass is strict about comparing only with others
# of the same representation, so ensure self is
if self.context == other.__class__:
ox, c = other.as_decimal(), other.__class__
else:
raise TypeError("Invalid object for comparison")
else:
raise TypeError("Invalid object for comparison")
if self.context:
s_digits = self.context.digits
else:
s_digits = 0
if c:
c_digits = c.digits
else:
c_digits = 0
if s_digits > c_digits:
ctx = self.context
else:
if s_digits > 0 and s_digits == c_digits:
# contexts have the same precision, but what about
# rounding?
if self.context.round_mode == c.round_mode:
ctx = c
else:
raise ValueError("Clash of rounding modes for "
"equal-precision comparison")
else:
ctx = c
return ox, ctx
def __eq__(self, other):
ox, c = self._op_check(other)
return self.dec == ox
def __ne__(self, other):
ox, c = self._op_check(other)
return self.dec != ox
def __le__(self, other):
ox, c = self._op_check(other)
return self.dec <= ox
def __lt__(self, other):
ox, c = self._op_check(other)
return self.dec < ox
def __ge__(self, other):
ox, c = self._op_check(other)
return self.dec >= ox
def __gt__(self, other):
ox, c = self._op_check(other)
return self.dec > ox
def __neg__(self):
return self.__class__(-self.rep)
def __abs__(self):
return self.__class__(abs(self.rep))
def __add__(self, other):
ox, ctx = self._op_check(other)
return self.__class__(self.dec + ox, ctx)
__radd__ = __add__
def __sub__(self, other):
ox, ctx = self._op_check(other)
return self.__class__(self.dec - ox, ctx)
def __rsub__(self, other):
ox, ctx = self._op_check(other)
return self.__class__(ox - self.dec, ctx)
def __mul__(self, other):
ox, ctx = self._op_check(other)
return self.__class__(self.dec * ox, ctx)
__rmul__ = __mul__
def __div__(self, other):
ox, ctx = self._op_check(other)
return self.__class__(self.dec / ox, ctx)
def __rdiv__(self, other):
ox, ctx = self._op_check(other)
return self.__class__(ox / self.dec, ctx)
__rtruediv__ = __rdiv__
__truediv__ = __div__
def __pow__(self, other):
ox, ctx = self._op_check(other)
return self.__class__(self.dec ** ox, ctx)
def __rpow__(self, other):
ox, ctx = self._op_check(other)
return self.__class__(ox ** self.dec, ctx)
def __nonzero__(self):
return self.dec != 0
def sqrt(self):
return self.__class__(self.dec.sqrt(), self.context)
def max(self, other):
"""Respects NaN and Inf"""
ox, ctx = self._op_check(other)
r = self.dec.max(ox)
if r == self.dec:
return self
else:
return other
def min(self, other):
"""Respects NaN and Inf"""
ox, ctx = self._op_check(other)
r = self.dec.min(ox)
if r == self.dec:
return self
else:
return other
def __reduce__(self):
return (self.__class__, (repr(self),))
def __copy__(self):
if type(self) == Binary:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) == Binary:
return self # My components are also immutable
return self.__class__(str(self))
def binvalstr2dec(x):
"""Convert signed real numbers in binary string form to decimal
value (no special values Inf, NaN), including values in scientific notation.
"""
if not isbinstr(x):
raise ValueError("Invalid string representation of binary"
" float: %s" % x)
if x[0] == '-':
x = x[1:]
sign = -1
else:
sign = 1
if 'e' in x:
x, estr = x.split('e')
e = int(estr)
elif 'E' in x:
x, estr = x.split('E')
e = int(estr)
else:
e = 0
if '.' in x:
try:
whole, frac = x.split('.')
except ValueError:
raise ValueError("Invalid string representation of binary"
" float")
else:
if frac == "":
frac = '0'
if whole == "":
whole = '0'
else:
whole = x
frac = '0'
try:
dec_whole = Decimal(int(whole, base=2)) * Decimal(2)**e
except ValueError:
dec_whole = Decimal(0)
dec_frac = binfracstr2decfrac(frac) * Decimal(2)**e
return sign*(dec_whole+dec_frac)
def isbinstr(arg):
# supports unary + / - at front, and checks for usage of exponentials
# (using 'E' or 'e')
s = arg.lower()
try:
if s[0] in ['+','-']:
s_rest = s[1:]
else:
s_rest = s
except IndexError:
return False
if '0' not in s_rest and '1' not in s_rest:
return False
pts = s.count('.')
exps = s.count('e')
pm = s_rest.count('+') + s_rest.count('-')
if pts > 1 or exps > 1 or pm > 1:
return False
if exps == 1:
exp_pos = s.find('e')
pre_exp = s[:exp_pos]
# must be numbers before and after the 'e'
if not np.sometrue([n in ('0','1') for n in pre_exp]):
return False
if s[-1]=='e':
# no chars after 'e'!
return False
if not np.sometrue([n in ('0','1','2','3','4','5','6','7','8','9') \
for n in s[exp_pos:]]):
return False
# check that any additional +/- occurs directly after 'e'
if pm == 1:
pm_pos = max([s_rest.find('+'), s_rest.find('-')])
if s_rest[pm_pos-1] != 'e':
return False
e_rest = s_rest[pm_pos+1:] # safe due to previous check
s_rest = s_rest[:pm_pos+1]
else:
e_rest = s[exp_pos+1:]
s_rest = s[:exp_pos+1]
# only remaining chars in s after e and possible +/- are numbers
if '.' in e_rest:
return False
# cannot use additional +/- if not using exponent
if pm == 1 and exps == 0:
return False
return np.alltrue([n in ('0', '1', '.', 'e', '+', '-') for n in s_rest])
def binstr2dec(bstr):
"""Convert binary string representation of an integer to a decimal integer.
"""
return int(bstr, base=2)
def decint2binstr(n):
"""Convert decimal integer to binary string.
"""
if n < 0:
return '-' + decint2binstr(-n)
s = ''
while n != 0:
s = str(n % 2) + s
n >>= 1
return s or '0'
def binfracstr2decfrac(bstr):
"""Convert non-negative binary string fraction (without radix) to decimal
fraction.
e.g. to convert ".1101", binfracstr2decfrac('1101') -> Decimal("0.8125")
"""
assert bstr[0] != '-', "Only pass non-negative values"
dec_value = 0
half = Decimal("0.5")
for place, bit in enumerate(bstr):
if int(bit) == 1:
dec_value += half**(place+1)
return dec_value
def frexp(d, context):
"""Implementation of 'frexp' for arbitrary precision decimals.
Result is a pair F, E, where 0 < F < 1 is a Decimal object,
and E is a signed integer such that
d = F * 2**E
context specifies the maximum absolute value of the exponent
to ensure termination of the calculation. e.g. for double precision
pass the 'double' Context class which defines a characteristic
of 11 bits, with maximum exponent size of 1024.
"""
e_largest = context.characteristicClass.exp_largest
if d < 0:
res = frexp(-d, e_largest)
return -res[0], res[1]
elif d == 0:
return Decimal("0"), 0
elif d >= 1:
w_dec = int(d)
e_dec = 0
while w_dec > 0 and abs(e_dec) <= e_largest:
d /= 2
w_dec = int(d)
e_dec += 1
return d, e_dec
else:
# 0 < d < 1
w_dec = 0
e_dec = 0
while w_dec == 0 and abs(e_dec) <= e_largest:
w_dec = int(d*2)
if w_dec > 0:
break
else:
d *= 2
e_dec -= 1
return d, e_dec
def dec2binstr(x, context=None):
if x < 0:
signstr = '-'
valstr = -x
elif x == 0:
return '0.0'
else:
signstr = ''
valstr = x
# convert to binary fraction representation
e, f = decfrac2binrep(valstr, context)
return signstr + '0.' + f + 'E' + str(int(e, base=2))
def decfrac2binrep(x, context):
"""Convert positive decimal float to the nearest representable
"<characteristic> <significand>" binary string representation (natural
format, where characteristic may be negative), using an exponent no bigger
than permitted in the given context.
"""
assert x > 0, "Provide only positive Decimal float value"
assert isinstance(x, Decimal), "Provide only positive Decimal float value"
fraction, exponent = frexp(x, context)
max_bits = context.characteristicClass.exp_largest + \
context.significandClass.digits
bfrac = np.zeros(max_bits, int)
i = 0
i_stop = max_bits
not_seen_one = True
while fraction > 0 and i < max_bits:
fraction *= 2
bit = int(fraction)
if not_seen_one and bit == 1:
# speed optimization
i_stop = i + context.significandClass.digits + 2
bfrac[i] = bit
fraction -= bit
i += 1
if i >= i_stop:
break
# negative exponents OK for this usage
return decint2binstr(exponent), "".join([str(bit) for \
bit in bfrac[:i]])
def pad(value, digits, to_right=False):
"""Only use for positive binary numbers given as strings.
Pads to the left by default, or to the right using to_right flag.
Inputs: value -- string of bits
digits -- number of bits in representation
to_right -- Boolean, direction of padding
Output: string of bits of length 'digits'
Raises exception if value is larger than digits in length.
Example:
pad('0010', 6) -> '000010'
pad('0010', 6, True) -> '001000'
"""
len_val = len(value)
assert len_val <= digits
rem_digits = digits - len_val
if to_right:
return value + "0"*rem_digits
else:
return "0"*rem_digits + value
def bin_sign(x):
"""Binary representation of sign: x < 0 is represented as 1, 0 otherwise.
"""
s=np.sign(x)
if s >= 0:
return 0
else:
return 1
## exceptions
class BinaryException(ArithmeticError):
def __init__(self, value=None):
self.value = value
self.code = None
def __str__(self):
return repr(self.value)
def __repr__(self):
return repr(self.value)
class BinaryOverflow(BinaryException):
pass
class BinaryUnderflow(BinaryException):
pass
class BinaryNegativeValue(BinaryException):
pass
class BinaryRemainderValue(BinaryException):
pass
| [
"decimal.getcontext",
"numpy.alltrue",
"numpy.sometrue",
"numpy.array",
"numpy.zeros",
"numpy.isfinite",
"numpy.sign",
"decimal.Decimal"
] | [((1497, 1517), 'decimal.getcontext', 'decimal.getcontext', ([], {}), '()\n', (1515, 1517), False, 'import decimal\n'), ((42488, 42555), 'numpy.alltrue', 'np.alltrue', (["[(n in ('0', '1', '.', 'e', '+', '-')) for n in s_rest]"], {}), "([(n in ('0', '1', '.', 'e', '+', '-')) for n in s_rest])\n", (42498, 42555), True, 'import numpy as np\n'), ((43218, 43232), 'decimal.Decimal', 'Decimal', (['"""0.5"""'], {}), "('0.5')\n", (43225, 43232), False, 'from decimal import Decimal\n'), ((45455, 45478), 'numpy.zeros', 'np.zeros', (['max_bits', 'int'], {}), '(max_bits, int)\n', (45463, 45478), True, 'import numpy as np\n'), ((46856, 46866), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (46863, 46866), True, 'import numpy as np\n'), ((2392, 2410), 'decimal.Decimal', 'Decimal', (['dec_value'], {}), '(dec_value)\n', (2399, 2410), False, 'from decimal import Decimal\n'), ((16890, 16913), 'numpy.zeros', 'np.zeros', (['max_bits', 'int'], {}), '(max_bits, int)\n', (16898, 16913), True, 'import numpy as np\n'), ((40770, 40780), 'decimal.Decimal', 'Decimal', (['(0)'], {}), '(0)\n', (40777, 40780), False, 'from decimal import Decimal\n'), ((40823, 40833), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (40830, 40833), False, 'from decimal import Decimal\n'), ((41527, 41576), 'numpy.sometrue', 'np.sometrue', (["[(n in ('0', '1')) for n in pre_exp]"], {}), "([(n in ('0', '1')) for n in pre_exp])\n", (41538, 41576), True, 'import numpy as np\n'), ((41697, 41794), 'numpy.sometrue', 'np.sometrue', (["[(n in ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')) for n in s[exp_pos:]\n ]"], {}), "([(n in ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')) for\n n in s[exp_pos:]])\n", (41708, 41794), True, 'import numpy as np\n'), ((7995, 8013), 'numpy.isfinite', 'np.isfinite', (['value'], {}), '(value)\n', (8006, 8013), True, 'import numpy as np\n'), ((35566, 35580), 'decimal.Decimal', 'Decimal', (['other'], {}), '(other)\n', (35573, 35580), False, 'from decimal import Decimal\n'), ((40713, 40723), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (40720, 40723), False, 'from decimal import Decimal\n'), ((44008, 44020), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (44015, 44020), False, 'from decimal import Decimal\n'), ((8338, 8354), 'decimal.Decimal', 'Decimal', (['val_str'], {}), '(val_str)\n', (8345, 8354), False, 'from decimal import Decimal\n'), ((9560, 9578), 'numpy.isfinite', 'np.isfinite', (['value'], {}), '(value)\n', (9571, 9578), True, 'import numpy as np\n'), ((13023, 13035), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (13030, 13035), False, 'from decimal import Decimal\n'), ((13136, 13149), 'decimal.Decimal', 'Decimal', (['"""-0"""'], {}), "('-0')\n", (13143, 13149), False, 'from decimal import Decimal\n'), ((14072, 14086), 'decimal.Decimal', 'Decimal', (['"""NaN"""'], {}), "('NaN')\n", (14079, 14086), False, 'from decimal import Decimal\n'), ((29564, 29574), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (29571, 29574), False, 'from decimal import Decimal\n'), ((29578, 29614), 'decimal.Decimal', 'Decimal', (['(-2 ** (char_digits - 1) + 2)'], {}), '(-2 ** (char_digits - 1) + 2)\n', (29585, 29614), False, 'from decimal import Decimal\n'), ((9011, 9021), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (9018, 9021), False, 'from decimal import Decimal\n'), ((9184, 9194), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (9191, 9194), False, 'from decimal import Decimal\n'), ((9903, 9919), 'decimal.Decimal', 'Decimal', (['val_str'], {}), '(val_str)\n', (9910, 9919), False, 'from decimal import Decimal\n'), ((13822, 13836), 'decimal.Decimal', 'Decimal', (['"""Inf"""'], {}), "('Inf')\n", (13829, 13836), False, 'from decimal import Decimal\n'), ((13939, 13954), 'decimal.Decimal', 'Decimal', (['"""-Inf"""'], {}), "('-Inf')\n", (13946, 13954), False, 'from decimal import Decimal\n'), ((29652, 29662), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (29659, 29662), False, 'from decimal import Decimal\n'), ((29711, 29725), 'decimal.Decimal', 'Decimal', (['"""0.5"""'], {}), "('0.5')\n", (29718, 29725), False, 'from decimal import Decimal\n'), ((33908, 33918), 'decimal.Decimal', 'Decimal', (['x'], {}), '(x)\n', (33915, 33918), False, 'from decimal import Decimal\n'), ((10572, 10582), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (10579, 10582), False, 'from decimal import Decimal\n'), ((10746, 10756), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (10753, 10756), False, 'from decimal import Decimal\n'), ((11184, 11198), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (11191, 11198), False, 'from decimal import Decimal\n'), ((32686, 32701), 'decimal.Decimal', 'Decimal', (['"""-Inf"""'], {}), "('-Inf')\n", (32693, 32701), False, 'from decimal import Decimal\n'), ((32804, 32818), 'decimal.Decimal', 'Decimal', (['"""Inf"""'], {}), "('Inf')\n", (32811, 32818), False, 'from decimal import Decimal\n'), ((34312, 34327), 'decimal.Decimal', 'Decimal', (['"""-Inf"""'], {}), "('-Inf')\n", (34319, 34327), False, 'from decimal import Decimal\n'), ((34431, 34445), 'decimal.Decimal', 'Decimal', (['"""Inf"""'], {}), "('Inf')\n", (34438, 34445), False, 'from decimal import Decimal\n'), ((33509, 33524), 'decimal.Decimal', 'Decimal', (['"""-Inf"""'], {}), "('-Inf')\n", (33516, 33524), False, 'from decimal import Decimal\n'), ((33616, 33630), 'decimal.Decimal', 'Decimal', (['"""Inf"""'], {}), "('Inf')\n", (33623, 33630), False, 'from decimal import Decimal\n'), ((8776, 8790), 'numpy.array', 'np.array', (['fval'], {}), '(fval)\n', (8784, 8790), True, 'import numpy as np\n'), ((10339, 10353), 'numpy.array', 'np.array', (['fval'], {}), '(fval)\n', (10347, 10353), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import time
import rospy
import math
import copy
import numpy
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point
from tf.transformations import euler_from_quaternion
class CubeRLUtils(object):
def __init__(self):
self.check_all_sensors_ready()
rospy.Subscriber("/moving_cube/joint_states", JointState, self.joints_callback)
rospy.Subscriber("/moving_cube/odom", Odometry, self.odom_callback)
self._roll_vel_pub = rospy.Publisher('/moving_cube/inertia_wheel_roll_joint_velocity_controller/command',
Float64, queue_size=1)
self.check_publishers_connection()
def check_all_sensors_ready(self):
self.disk_joints_data = None
while self.disk_joints_data is None and not rospy.is_shutdown():
try:
self.disk_joints_data = rospy.wait_for_message("/moving_cube/joint_states", JointState, timeout=1.0)
rospy.loginfo("Current moving_cube/joint_states READY=>" + str(self.disk_joints_data))
except:
rospy.logerr("Current moving_cube/joint_states not ready yet, retrying for getting joint_states")
self.cube_odom_data = None
while self.disk_joints_data is None and not rospy.is_shutdown():
try:
self.cube_odom_data = rospy.wait_for_message("/moving_cube/odom", Odometry, timeout=1.0)
rospy.loginfo("Current /moving_cube/odom READY=>" + str(self.cube_odom_data))
except:
rospy.logerr("Current /moving_cube/odom not ready yet, retrying for getting odom")
rospy.loginfo("ALL SENSORS READY")
def check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while (self._roll_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown()):
rospy.loginfo("No susbribers to _roll_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.loginfo("_base_pub Publisher Connected")
rospy.loginfo("All Publishers READY")
def joints_callback(self, data):
self.joints = data
def odom_callback(self, data):
self.odom = data
# Reinforcement Learning Utility Code
def move_joints(self, roll_speed):
joint_speed_value = Float64()
joint_speed_value.data = roll_speed
rospy.loginfo("Single Disk Roll Velocity>>" + str(joint_speed_value))
self._roll_vel_pub.publish(joint_speed_value)
def get_cube_state(self):
# We convert from quaternions to euler
orientation_list = [self.odom.pose.pose.orientation.x,
self.odom.pose.pose.orientation.y,
self.odom.pose.pose.orientation.z,
self.odom.pose.pose.orientation.w]
roll, pitch, yaw = euler_from_quaternion(orientation_list)
# We get the distance from the origin
start_position = Point()
start_position.x = 0.0
start_position.y = 0.0
start_position.z = 0.0
distance = self.get_distance_from_point(start_position,
self.odom.pose.pose.position)
cube_state = [
round(self.joints.velocity[0], 1),
round(distance, 1),
round(roll, 1),
round(pitch, 1),
round(yaw, 1)
]
return cube_state
def observation_checks(self, cube_state):
# MAximum distance to travel permited in meters from origin
max_distance = 2.0
if (cube_state[1] > max_distance):
rospy.logerr("Cube Too Far==>" + str(cube_state[1]))
done = True
else:
rospy.loginfo("Cube NOT Too Far==>" + str(cube_state[1]))
done = False
return done
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
def get_reward_for_observations(self, state):
# We reward it for lower speeds and distance traveled
speed = state[0]
distance = state[1]
# Positive Reinforcement
reward_distance = distance * 10.0
# Negative Reinforcement for magnitude of speed
reward_for_efective_movement = -1 * abs(speed)
reward = reward_distance + reward_for_efective_movement
rospy.loginfo("Reward_distance=" + str(reward_distance))
rospy.loginfo("Reward_for_efective_movement= " + str(reward_for_efective_movement))
return reward
def cube_rl_systems_test():
rospy.init_node('cube_rl_systems_test_node', anonymous=True, log_level=rospy.INFO)
cube_rl_utils_object = CubeRLUtils()
rospy.loginfo("Moving to Speed==>80")
cube_rl_utils_object.move_joints(roll_speed=80.0)
time.sleep(2)
rospy.loginfo("Moving to Speed==>-80")
cube_rl_utils_object.move_joints(roll_speed=-80.0)
time.sleep(2)
rospy.loginfo("Moving to Speed==>0.0")
cube_rl_utils_object.move_joints(roll_speed=0.0)
time.sleep(2)
cube_state = cube_rl_utils_object.get_cube_state()
done = cube_rl_utils_object.observation_checks(cube_state)
reward = cube_rl_utils_object.get_reward_for_observations(cube_state)
rospy.loginfo("Done==>" + str(done))
rospy.loginfo("Reward==>" + str(reward))
if __name__ == "__main__":
cube_rl_systems_test()
| [
"tf.transformations.euler_from_quaternion",
"rospy.logerr",
"rospy.Subscriber",
"std_msgs.msg.Float64",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.wait_for_message",
"time.sleep",
"numpy.array",
"geometry_msgs.msg.Point",
"rospy.Rate",
"numpy.linalg.norm",
"rospy.Publisher",
"rospy.log... | [((5196, 5283), 'rospy.init_node', 'rospy.init_node', (['"""cube_rl_systems_test_node"""'], {'anonymous': '(True)', 'log_level': 'rospy.INFO'}), "('cube_rl_systems_test_node', anonymous=True, log_level=\n rospy.INFO)\n", (5211, 5283), False, 'import rospy\n'), ((5325, 5362), 'rospy.loginfo', 'rospy.loginfo', (['"""Moving to Speed==>80"""'], {}), "('Moving to Speed==>80')\n", (5338, 5362), False, 'import rospy\n'), ((5421, 5434), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5431, 5434), False, 'import time\n'), ((5439, 5477), 'rospy.loginfo', 'rospy.loginfo', (['"""Moving to Speed==>-80"""'], {}), "('Moving to Speed==>-80')\n", (5452, 5477), False, 'import rospy\n'), ((5537, 5550), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5547, 5550), False, 'import time\n'), ((5555, 5593), 'rospy.loginfo', 'rospy.loginfo', (['"""Moving to Speed==>0.0"""'], {}), "('Moving to Speed==>0.0')\n", (5568, 5593), False, 'import rospy\n'), ((5651, 5664), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5661, 5664), False, 'import time\n'), ((382, 461), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/moving_cube/joint_states"""', 'JointState', 'self.joints_callback'], {}), "('/moving_cube/joint_states', JointState, self.joints_callback)\n", (398, 461), False, 'import rospy\n'), ((470, 537), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/moving_cube/odom"""', 'Odometry', 'self.odom_callback'], {}), "('/moving_cube/odom', Odometry, self.odom_callback)\n", (486, 537), False, 'import rospy\n'), ((568, 684), 'rospy.Publisher', 'rospy.Publisher', (['"""/moving_cube/inertia_wheel_roll_joint_velocity_controller/command"""', 'Float64'], {'queue_size': '(1)'}), "(\n '/moving_cube/inertia_wheel_roll_joint_velocity_controller/command',\n Float64, queue_size=1)\n", (583, 684), False, 'import rospy\n'), ((1740, 1774), 'rospy.loginfo', 'rospy.loginfo', (['"""ALL SENSORS READY"""'], {}), "('ALL SENSORS READY')\n", (1753, 1774), False, 'import rospy\n'), ((1926, 1940), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (1936, 1940), False, 'import rospy\n'), ((2336, 2382), 'rospy.loginfo', 'rospy.loginfo', (['"""_base_pub Publisher Connected"""'], {}), "('_base_pub Publisher Connected')\n", (2349, 2382), False, 'import rospy\n'), ((2392, 2429), 'rospy.loginfo', 'rospy.loginfo', (['"""All Publishers READY"""'], {}), "('All Publishers READY')\n", (2405, 2429), False, 'import rospy\n'), ((2667, 2676), 'std_msgs.msg.Float64', 'Float64', ([], {}), '()\n', (2674, 2676), False, 'from std_msgs.msg import Float64\n'), ((3212, 3251), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['orientation_list'], {}), '(orientation_list)\n', (3233, 3251), False, 'from tf.transformations import euler_from_quaternion\n'), ((3324, 3331), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (3329, 3331), False, 'from geometry_msgs.msg import Point\n'), ((4394, 4437), 'numpy.array', 'numpy.array', (['(pstart.x, pstart.y, pstart.z)'], {}), '((pstart.x, pstart.y, pstart.z))\n', (4405, 4437), False, 'import numpy\n'), ((4450, 4490), 'numpy.array', 'numpy.array', (['(p_end.x, p_end.y, p_end.z)'], {}), '((p_end.x, p_end.y, p_end.z))\n', (4461, 4490), False, 'import numpy\n'), ((4511, 4535), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (4528, 4535), False, 'import numpy\n'), ((2052, 2128), 'rospy.loginfo', 'rospy.loginfo', (['"""No susbribers to _roll_vel_pub yet so we wait and try again"""'], {}), "('No susbribers to _roll_vel_pub yet so we wait and try again')\n", (2065, 2128), False, 'import rospy\n'), ((894, 913), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (911, 913), False, 'import rospy\n'), ((972, 1048), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/moving_cube/joint_states"""', 'JointState'], {'timeout': '(1.0)'}), "('/moving_cube/joint_states', JointState, timeout=1.0)\n", (994, 1048), False, 'import rospy\n'), ((1375, 1394), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1392, 1394), False, 'import rospy\n'), ((1451, 1517), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/moving_cube/odom"""', 'Odometry'], {'timeout': '(1.0)'}), "('/moving_cube/odom', Odometry, timeout=1.0)\n", (1473, 1517), False, 'import rospy\n'), ((2018, 2037), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (2035, 2037), False, 'import rospy\n'), ((1189, 1296), 'rospy.logerr', 'rospy.logerr', (['"""Current moving_cube/joint_states not ready yet, retrying for getting joint_states"""'], {}), "(\n 'Current moving_cube/joint_states not ready yet, retrying for getting joint_states'\n )\n", (1201, 1296), False, 'import rospy\n'), ((1649, 1736), 'rospy.logerr', 'rospy.logerr', (['"""Current /moving_cube/odom not ready yet, retrying for getting odom"""'], {}), "(\n 'Current /moving_cube/odom not ready yet, retrying for getting odom')\n", (1661, 1736), False, 'import rospy\n')] |
import sklearn
from sklearn import metrics
import numpy as np
import pandas as pd
import re
class MetricsCls:
"""
Requirement: metric functions under the class should always have the y_true, and y_pred args.
Examples:
>>> obj=MetricsCls(config={'MAPE__version':'sklearn'})
>>> x, y=[0.025,0.5,0.5,0], [2,0.5,0, 5]
>>> print(obj.score(x,y))
{'MAE': 1.8688, 'RMSE': 2.6996, 'MAPE': 5629499534213140.0, 'DS': 0.0}
>>> obj=MetricsCls(config={'MAPE__version':'selfmade'})
>>> print(obj.score(x,y))
{'MAE': 1.8688, 'RMSE': 2.6996, 'MAPE': inf, 'DS': 0.0}
"""
def __init__(self, metrics=[], config={}):
"""
metrics: list of strings of metric name which should be the same as the function name here.
config: Eg, {'MAE__multioutput': 'uniform_average'}
"""
if metrics == []:
metrics = ['MAE', 'RMSE', 'MAPE', 'DS']
self.metrics_dict = dict(zip(metrics, [None]*len(metrics)))
self.config_dict = config
@property
def metrics_dict(self):
return self._metrics_dict
@metrics_dict.setter
def metrics_dict(self, value):
"""
run all metrics stored in the metrics_dict
"""
self._metrics_dict = value
def config_parser(self, metric):
"""
return the dict that contains only the keys with ``metric__`` pattern.
Then this pattern will be removed.
"""
# define pattern
prefix = f'{metric}__\w+'
# find the keys matching the pattern required
keys = re.findall(prefix, ' '.join(self.config_dict.keys()))
# modify the keys by deleting the prefix
keys_no_prefix = [k.replace(f'{metric}__', '') for k in keys]
return dict(zip(keys_no_prefix, [self.config_dict[k] for k in keys] ))
def score(self, y_true, y_pred, digit=4, inplace=False):
""" core function.
run all metrics stored in the metrics_dict and return result as a dict.
digit: number rounding
inplace: bool - modify self.metrics_dict if True
"""
res_dict = self.metrics_dict.copy()
# loop over metrics
for k in self.metrics_dict.keys():
# fetch specific config
config_dict = self.config_parser(k)
# calculate the metric
res = eval(f'self.{k}(y_true, y_pred, **config_dict)')
res = np.round(res, digit)
# record the result
res_dict[k] = res
if inplace:
self.metrics_dict = res_dict
else:
return res_dict
@staticmethod
def MAE(y_true, y_pred, *, sample_weight=None, multioutput='uniform_average'):
return sklearn.metrics.mean_absolute_error(y_true, y_pred, sample_weight=sample_weight, multioutput=multioutput)
@staticmethod
def RMSE(y_true, y_pred, *, sample_weight=None, multioutput='uniform_average', squared=False):
return sklearn.metrics.mean_squared_error(y_true, y_pred, sample_weight=sample_weight, multioutput=multioutput, squared=squared)
@staticmethod
def DS(y_true, y_pred, version='selfmade', **kwargs):
# check & initialization
version = version.lower()
assert version in ['selfmade', 'bd']
y_true, y_pred = np.array(y_true), np.array(y_pred)
if version =='bd':
return MetricsCls._BD_DS(y_true, y_pred, **kwargs)
assert version == 'selfmade'
# matrices case is also valid
d = np.diff(y_true, axis=0) * np.diff(y_pred, axis=0) > 0
return 100.0 * d.sum() / len(d.reshape(-1))
@staticmethod
def _BD_DS(y_true, y_pred, tolerance=0.01):
"""Business Desk version DS (version=='bd')."""
# check
if tolerance < 0:
raise ValueError('Tolerance cannot be less than zero!')
# define common variables
y_true, y_pred = np.array(y_true), np.array(y_pred)
true_diff = np.diff(y_true, axis=0)
pred_diff = np.diff(y_pred, axis=0)
# case not zero: modify true_diff and pred_diff
if tolerance != 0:
# get %change
tmp = pd.DataFrame(y_true).pct_change().iloc[1:,:]
mask = np.array(tmp.abs() < tolerance)
true_diff[mask] = 0
tmp = pd.DataFrame(y_pred).pct_change().iloc[1:,:]
mask = np.array(tmp.abs() < tolerance)
pred_diff[mask] = 0
# core formula
d = (true_diff * pred_diff) > 0
## case of plateau for both y_true, y_pred
d[(true_diff== 0) & (pred_diff == 0)] = 1
dsymm = np.round(100 * d.sum() / d.size, 2)
return dsymm
@staticmethod
def MAPE(y_true, y_pred, sample_weight=None, multioutput='uniform_average', version='sklearn'):
version = version.lower()
assert version in ['sklearn', 'selfmade']
if version == 'sklearn':
return sklearn.metrics.mean_absolute_percentage_error(y_true, y_pred, sample_weight, multioutput)
elif version == 'selfmade':
return MetricsCls._selfmade_MAPE(y_true, y_pred, sample_weight, multioutput)
@staticmethod
def _selfmade_MAPE(y_true, y_pred, sample_weight=None, multioutput='uniform_average'):
"""doesn't deal with the zero divisor case"""
assert multioutput in ['raw_values', 'uniform_average']
y_true, y_pred = np.array(y_true), np.array(y_pred)
mape_array = np.abs( (y_true - y_pred)/y_true )
mape = np.average(mape_array, weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return mape
elif multioutput == 'uniform_average':
return np.average(mape, weights=None) | [
"numpy.abs",
"numpy.average",
"numpy.diff",
"sklearn.metrics.mean_squared_error",
"numpy.array",
"sklearn.metrics.mean_absolute_percentage_error",
"pandas.DataFrame",
"sklearn.metrics.mean_absolute_error",
"numpy.round"
] | [((2860, 2970), 'sklearn.metrics.mean_absolute_error', 'sklearn.metrics.mean_absolute_error', (['y_true', 'y_pred'], {'sample_weight': 'sample_weight', 'multioutput': 'multioutput'}), '(y_true, y_pred, sample_weight=\n sample_weight, multioutput=multioutput)\n', (2895, 2970), False, 'import sklearn\n'), ((3111, 3237), 'sklearn.metrics.mean_squared_error', 'sklearn.metrics.mean_squared_error', (['y_true', 'y_pred'], {'sample_weight': 'sample_weight', 'multioutput': 'multioutput', 'squared': 'squared'}), '(y_true, y_pred, sample_weight=\n sample_weight, multioutput=multioutput, squared=squared)\n', (3145, 3237), False, 'import sklearn\n'), ((4159, 4182), 'numpy.diff', 'np.diff', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (4166, 4182), True, 'import numpy as np\n'), ((4204, 4227), 'numpy.diff', 'np.diff', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (4211, 4227), True, 'import numpy as np\n'), ((5720, 5754), 'numpy.abs', 'np.abs', (['((y_true - y_pred) / y_true)'], {}), '((y_true - y_pred) / y_true)\n', (5726, 5754), True, 'import numpy as np\n'), ((5771, 5824), 'numpy.average', 'np.average', (['mape_array'], {'weights': 'sample_weight', 'axis': '(0)'}), '(mape_array, weights=sample_weight, axis=0)\n', (5781, 5824), True, 'import numpy as np\n'), ((2541, 2561), 'numpy.round', 'np.round', (['res', 'digit'], {}), '(res, digit)\n', (2549, 2561), True, 'import numpy as np\n'), ((3459, 3475), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (3467, 3475), True, 'import numpy as np\n'), ((3477, 3493), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (3485, 3493), True, 'import numpy as np\n'), ((4103, 4119), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (4111, 4119), True, 'import numpy as np\n'), ((4121, 4137), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (4129, 4137), True, 'import numpy as np\n'), ((5176, 5270), 'sklearn.metrics.mean_absolute_percentage_error', 'sklearn.metrics.mean_absolute_percentage_error', (['y_true', 'y_pred', 'sample_weight', 'multioutput'], {}), '(y_true, y_pred,\n sample_weight, multioutput)\n', (5222, 5270), False, 'import sklearn\n'), ((5663, 5679), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (5671, 5679), True, 'import numpy as np\n'), ((5681, 5697), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (5689, 5697), True, 'import numpy as np\n'), ((3689, 3712), 'numpy.diff', 'np.diff', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (3696, 3712), True, 'import numpy as np\n'), ((3715, 3738), 'numpy.diff', 'np.diff', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (3722, 3738), True, 'import numpy as np\n'), ((5962, 5992), 'numpy.average', 'np.average', (['mape'], {'weights': 'None'}), '(mape, weights=None)\n', (5972, 5992), True, 'import numpy as np\n'), ((4370, 4390), 'pandas.DataFrame', 'pd.DataFrame', (['y_true'], {}), '(y_true)\n', (4382, 4390), True, 'import pandas as pd\n'), ((4533, 4553), 'pandas.DataFrame', 'pd.DataFrame', (['y_pred'], {}), '(y_pred)\n', (4545, 4553), True, 'import pandas as pd\n')] |
import InstrumentDriver
import numpy as np
class Driver(InstrumentDriver.InstrumentWorker):
""" This class implements a simple signal generator driver"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
pass
def performClose(self, bError=False, options={}):
"""Perform the close instrument connection operation"""
pass
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# just return the value
return value
def performGetValue(self, quant, options={}):
"""Perform the Get Value instrument operation"""
# proceed depending on quantity
if quant.name == 'Signal':
# if asking for signal, start with getting values of other controls
amp = self.getValue('Amplitude')
freq = self.getValue('Frequency')
phase = self.getValue('Phase')
add_noise = self.getValue('Add noise')
# calculate time vector from 0 to 1 with 1000 elements
time = np.linspace(0,1,1000)
signal = amp * np.sin(freq*time*2*np.pi + phase*np.pi/180.0)
# add noise
if add_noise:
noise_amp = self.getValue('Noise amplitude')
signal += noise_amp * np.random.randn(len(signal))
# create trace object that contains timing info
trace = quant.getTraceDict(signal, t0=0.0, dt=time[1]-time[0])
# finally, return the trace object
return trace
else:
# for other quantities, just return current value of control
return quant.getValue()
| [
"numpy.sin",
"numpy.linspace"
] | [((1218, 1241), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (1229, 1241), True, 'import numpy as np\n'), ((1267, 1322), 'numpy.sin', 'np.sin', (['(freq * time * 2 * np.pi + phase * np.pi / 180.0)'], {}), '(freq * time * 2 * np.pi + phase * np.pi / 180.0)\n', (1273, 1322), True, 'import numpy as np\n')] |
from typing import Union
import numpy as np
def moore_n(
n: int, position: tuple, grid: np.ndarray, invariant: Union[int, np.ndarray] = 0
):
"""Gets the N Moore neighborhood at given postion."""
row, col = position
nrows, ncols = grid.shape
# Target offsets from position.
ofup, ofdo = row + np.array([-n, +n])
ofle, ofri = col + np.array([-n, +n])
try:
if ofup < 0 or ofle < 0 or ofdo + 1 > nrows or ofri + 1 > ncols:
raise IndexError
# Current Grid is enough, just return the requested values.
return grid[ofup : ofdo + 1, ofle : ofri + 1]
except IndexError:
invariant = np.array(invariant, dtype=grid.dtype)
# 1. Generate extended grid.
# Grid lenght at step N.
l = lambda n: 2 * n + 1
ln = l(n)
egrid = np.repeat(invariant, ln * ln).reshape(ln, ln)
# 2. Populate middle cell.
mid = ln // 2
egrid[mid, mid] = grid[row, col]
is_legal = {
"up": ofup >= 0,
"down": ofdo <= nrows - 1,
"left": ofle >= 0,
"right": ofri <= ncols - 1,
}
# Distance
d = lambda a, b: abs(b - a)
# 3. Populate Up-Left Corner
if is_legal["up"] and is_legal["left"]:
egrid[mid - n : mid + 1, mid - n : mid + 1] = grid[
row - n : row + 1, col - n : col + 1
]
elif not is_legal["up"] and not is_legal["left"]: # Both ilegal
br = d(row, 0)
bc = d(col, 0)
egrid[mid - br : mid + 1, mid - bc : mid + 1] = grid[
row - br : row + 1, col - bc : col + 1
]
elif not is_legal["up"]:
br = d(row, 0) # Distance to the border
egrid[mid - br : mid + 1, mid - n : mid + 1] = grid[
row - br : row + 1, col - n : col + 1
]
elif not is_legal["left"]:
bc = d(col, 0)
egrid[mid - n : mid + 1, mid - bc : mid + 1] = grid[
row - n : row + 1, col - bc : col + 1
]
# 4. Populate Up-Right Corner
if is_legal["up"] and is_legal["right"]:
egrid[mid - n : mid + 1, mid : mid + n + 1] = grid[
row - n : row + 1, col : col + n + 1
]
elif not is_legal["up"] and not is_legal["right"]:
br = d(row, 0)
bc = d(col, ncols)
egrid[mid - br : mid + 1, mid : mid + bc] = grid[
row - br : row + 1, col : col + bc
]
elif not is_legal["up"]:
br = d(row, 0)
egrid[mid - br : mid + 1, mid : mid + n + 1] = grid[
row - br : row + 1, col : col + n + 1
]
elif not is_legal["right"]:
bc = d(col, ncols)
egrid[mid - n : mid + 1, mid : mid + bc] = grid[
row - n : row + 1, col : col + bc
]
# 5. Populate Down-Left Corner
if is_legal["down"] and is_legal["left"]:
egrid[mid : mid + n + 1, mid - n : mid + 1] = grid[
row : row + n + 1, col - n : col + 1
]
elif not is_legal["down"] and not is_legal["left"]:
br = d(row, nrows)
bc = d(col, 0)
egrid[mid : mid + br, mid - bc : mid + 1] = grid[
row : row + br, col - bc : col + 1
]
elif not is_legal["down"]:
br = d(row, nrows)
egrid[mid : mid + br, mid - n : mid + 1] = grid[
row : row + br, col - n : col + 1
]
elif not is_legal["left"]:
bc = d(col, 0)
egrid[mid : mid + n + 1, mid - bc : mid + 1] = grid[
row : row + n + 1, col - bc : col + 1
]
# 6. Populate Down-Right Corner
if is_legal["down"] and is_legal["right"]:
egrid[mid : mid + n + 1, mid : mid + n + 1] = grid[
row : row + n + 1, col : col + n + 1
]
elif not is_legal["down"] and not is_legal["right"]:
br = d(row, nrows)
bc = d(col, ncols)
egrid[mid : mid + br, mid : mid + bc] = grid[row : row + br, col : col + bc]
elif not is_legal["down"]:
br = d(row, nrows)
egrid[mid : mid + br, mid : mid + n + 1] = grid[
row : row + br, col : col + n + 1
]
elif not is_legal["right"]:
bc = d(col, ncols)
egrid[mid : mid + n + 1, mid : mid + bc] = grid[
row : row + n + 1, col : col + bc
]
return egrid
# Depracated: Still used as interface for CAs.
# Superseded by Moore N function.
def neighborhood_at(grid, pos, invariant=0):
"""
Calculates the Moore's neighborhood of cell at target position 'pos'.
The boundary conditions are invariant and set to 'empty'.
Returns a named tuple with the values of the nighborhood cells in the following
order: up_left, up, up_right,
left, self, right,
down_left, down, down_right
"""
from collections import namedtuple
Neighbors = namedtuple(
"Neighbors",
[
"up_left",
"up",
"up_right",
"left",
"self",
"right",
"down_left",
"down",
"down_right",
],
)
N = 1
neighborhood = moore_n(N, pos, grid, invariant).flatten().tolist()
return Neighbors(*neighborhood)
| [
"numpy.array",
"collections.namedtuple",
"numpy.repeat"
] | [((5197, 5315), 'collections.namedtuple', 'namedtuple', (['"""Neighbors"""', "['up_left', 'up', 'up_right', 'left', 'self', 'right', 'down_left', 'down',\n 'down_right']"], {}), "('Neighbors', ['up_left', 'up', 'up_right', 'left', 'self',\n 'right', 'down_left', 'down', 'down_right'])\n", (5207, 5315), False, 'from collections import namedtuple\n'), ((321, 339), 'numpy.array', 'np.array', (['[-n, +n]'], {}), '([-n, +n])\n', (329, 339), True, 'import numpy as np\n'), ((363, 381), 'numpy.array', 'np.array', (['[-n, +n]'], {}), '([-n, +n])\n', (371, 381), True, 'import numpy as np\n'), ((663, 700), 'numpy.array', 'np.array', (['invariant'], {'dtype': 'grid.dtype'}), '(invariant, dtype=grid.dtype)\n', (671, 700), True, 'import numpy as np\n'), ((839, 868), 'numpy.repeat', 'np.repeat', (['invariant', '(ln * ln)'], {}), '(invariant, ln * ln)\n', (848, 868), True, 'import numpy as np\n')] |
from nose.plugins.attrib import attr
import os
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import trackpy
from trackpy import plots
from trackpy.utils import suppress_plotting, fit_powerlaw
from trackpy.tests.common import StrictTestCase
import nose
# Quiet warnings about Axes not being compatible with tight_layout
import warnings
warnings.filterwarnings("ignore", message="This figure includes Axes that are not compatible with tight_layout")
path, _ = os.path.split(os.path.abspath(__file__))
try:
import pims
except ImportError:
PIMS_AVAILABLE = False
else:
PIMS_AVAILABLE = True
def _skip_if_no_pims():
if not PIMS_AVAILABLE:
raise nose.SkipTest('PIMS not installed. Skipping.')
class TestPlots(StrictTestCase):
def setUp(self):
# older matplotlib may raise an invalid error
np.seterr(invalid='ignore')
self.sparse = pd.read_pickle(os.path.join(path, 'data',
'sparse_trajectories.df'))
@attr('slow')
def test_labeling_sparse_trajectories(self):
suppress_plotting()
plots.plot_traj(self.sparse, label=True)
def test_ptraj_empty(self):
suppress_plotting()
f = lambda: plots.plot_traj(DataFrame(columns=self.sparse.columns))
self.assertRaises(ValueError, f)
def test_ptraj_unicode_labels(self):
# smoke test
plots.plot_traj(self.sparse, mpp=0.5)
def test_ptraj_t_column(self):
suppress_plotting()
df = self.sparse.copy()
cols = list(df.columns)
cols[cols.index('frame')] = 'arbitrary name'
df.columns = cols
plots.plot_traj(df, t_column='arbitrary name')
def test_annotate(self):
suppress_plotting()
f = DataFrame({'x': [0, 1], 'y': [0, 1], 'frame': [0, 0],
'mass': [10, 20]})
frame = np.random.randint(0, 255, (5, 5))
# Basic usage
plots.annotate(f, frame)
plots.annotate(f, frame, color='r')
# Coloring by threshold
plots.annotate(f, frame, split_category='mass',
split_thresh=15, color=['r', 'g'])
plots.annotate(f, frame, split_category='mass',
split_thresh=[15], color=['r', 'g'])
plots.annotate(f, frame, split_category='mass',
split_thresh=[15, 25], color=['r', 'g', 'b'])
# Check that bad parameters raise an error.
# Too many colors
bad_call = lambda: plots.annotate(
f, frame, split_category='mass', split_thresh=15, color=['r', 'g', 'b'])
self.assertRaises(ValueError, bad_call)
# Not enough colors
bad_call = lambda: plots.annotate(
f, frame, split_category='mass', split_thresh=15, color=['r'])
self.assertRaises(ValueError, bad_call)
bad_call = lambda: plots.annotate(
f, frame, split_category='mass', split_thresh=15, color='r')
self.assertRaises(ValueError, bad_call)
# Nonexistent column name for split_category
bad_call = lambda: plots.annotate(
f, frame, split_category='not a column', split_thresh=15, color='r')
self.assertRaises(ValueError, bad_call)
# 3D image
bad_call = lambda: plots.annotate(f, frame[np.newaxis, :, :])
self.assertRaises(ValueError, bad_call)
def test_annotate3d(self):
_skip_if_no_pims()
suppress_plotting()
f = DataFrame({'x': [0, 1], 'y': [0, 1], 'z': [0, 1], 'frame': [0, 0],
'mass': [10, 20]})
frame = np.random.randint(0, 255, (5, 5, 5))
plots.annotate3d(f, frame)
plots.annotate3d(f, frame, color='r')
# 2D image
bad_call = lambda: plots.annotate3d(f, frame[0])
self.assertRaises(ValueError, bad_call)
# Rest of the functionality is covered by annotate tests
def test_fit_powerlaw(self):
# smoke test
suppress_plotting()
em = Series([1, 2, 3], index=[1, 2, 3])
fit_powerlaw(em)
fit_powerlaw(em, plot=False)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| [
"pandas.Series",
"trackpy.plots.plot_traj",
"nose.plugins.attrib.attr",
"pandas.DataFrame",
"os.path.join",
"trackpy.utils.fit_powerlaw",
"nose.runmodule",
"nose.SkipTest",
"numpy.random.randint",
"trackpy.utils.suppress_plotting",
"trackpy.plots.annotate3d",
"trackpy.plots.annotate",
"os.pa... | [((369, 486), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""This figure includes Axes that are not compatible with tight_layout"""'}), "('ignore', message=\n 'This figure includes Axes that are not compatible with tight_layout')\n", (392, 486), False, 'import warnings\n'), ((507, 532), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (522, 532), False, 'import os\n'), ((1036, 1048), 'nose.plugins.attrib.attr', 'attr', (['"""slow"""'], {}), "('slow')\n", (1040, 1048), False, 'from nose.plugins.attrib import attr\n'), ((4181, 4268), 'nose.runmodule', 'nose.runmodule', ([], {'argv': "[__file__, '-vvs', '-x', '--pdb', '--pdb-failure']", 'exit': '(False)'}), "(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n", (4195, 4268), False, 'import nose\n'), ((702, 748), 'nose.SkipTest', 'nose.SkipTest', (['"""PIMS not installed. Skipping."""'], {}), "('PIMS not installed. Skipping.')\n", (715, 748), False, 'import nose\n'), ((867, 894), 'numpy.seterr', 'np.seterr', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (876, 894), True, 'import numpy as np\n'), ((1106, 1125), 'trackpy.utils.suppress_plotting', 'suppress_plotting', ([], {}), '()\n', (1123, 1125), False, 'from trackpy.utils import suppress_plotting, fit_powerlaw\n'), ((1134, 1174), 'trackpy.plots.plot_traj', 'plots.plot_traj', (['self.sparse'], {'label': '(True)'}), '(self.sparse, label=True)\n', (1149, 1174), False, 'from trackpy import plots\n'), ((1216, 1235), 'trackpy.utils.suppress_plotting', 'suppress_plotting', ([], {}), '()\n', (1233, 1235), False, 'from trackpy.utils import suppress_plotting, fit_powerlaw\n'), ((1424, 1461), 'trackpy.plots.plot_traj', 'plots.plot_traj', (['self.sparse'], {'mpp': '(0.5)'}), '(self.sparse, mpp=0.5)\n', (1439, 1461), False, 'from trackpy import plots\n'), ((1506, 1525), 'trackpy.utils.suppress_plotting', 'suppress_plotting', ([], {}), '()\n', (1523, 1525), False, 'from trackpy.utils import suppress_plotting, fit_powerlaw\n'), ((1677, 1723), 'trackpy.plots.plot_traj', 'plots.plot_traj', (['df'], {'t_column': '"""arbitrary name"""'}), "(df, t_column='arbitrary name')\n", (1692, 1723), False, 'from trackpy import plots\n'), ((1762, 1781), 'trackpy.utils.suppress_plotting', 'suppress_plotting', ([], {}), '()\n', (1779, 1781), False, 'from trackpy.utils import suppress_plotting, fit_powerlaw\n'), ((1794, 1866), 'pandas.DataFrame', 'DataFrame', (["{'x': [0, 1], 'y': [0, 1], 'frame': [0, 0], 'mass': [10, 20]}"], {}), "({'x': [0, 1], 'y': [0, 1], 'frame': [0, 0], 'mass': [10, 20]})\n", (1803, 1866), False, 'from pandas import Series, DataFrame\n'), ((1905, 1938), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(5, 5)'], {}), '(0, 255, (5, 5))\n', (1922, 1938), True, 'import numpy as np\n'), ((1970, 1994), 'trackpy.plots.annotate', 'plots.annotate', (['f', 'frame'], {}), '(f, frame)\n', (1984, 1994), False, 'from trackpy import plots\n'), ((2003, 2038), 'trackpy.plots.annotate', 'plots.annotate', (['f', 'frame'], {'color': '"""r"""'}), "(f, frame, color='r')\n", (2017, 2038), False, 'from trackpy import plots\n'), ((2080, 2166), 'trackpy.plots.annotate', 'plots.annotate', (['f', 'frame'], {'split_category': '"""mass"""', 'split_thresh': '(15)', 'color': "['r', 'g']"}), "(f, frame, split_category='mass', split_thresh=15, color=['r',\n 'g'])\n", (2094, 2166), False, 'from trackpy import plots\n'), ((2194, 2283), 'trackpy.plots.annotate', 'plots.annotate', (['f', 'frame'], {'split_category': '"""mass"""', 'split_thresh': '[15]', 'color': "['r', 'g']"}), "(f, frame, split_category='mass', split_thresh=[15], color=[\n 'r', 'g'])\n", (2208, 2283), False, 'from trackpy import plots\n'), ((2310, 2407), 'trackpy.plots.annotate', 'plots.annotate', (['f', 'frame'], {'split_category': '"""mass"""', 'split_thresh': '[15, 25]', 'color': "['r', 'g', 'b']"}), "(f, frame, split_category='mass', split_thresh=[15, 25],\n color=['r', 'g', 'b'])\n", (2324, 2407), False, 'from trackpy import plots\n'), ((3473, 3492), 'trackpy.utils.suppress_plotting', 'suppress_plotting', ([], {}), '()\n', (3490, 3492), False, 'from trackpy.utils import suppress_plotting, fit_powerlaw\n'), ((3505, 3594), 'pandas.DataFrame', 'DataFrame', (["{'x': [0, 1], 'y': [0, 1], 'z': [0, 1], 'frame': [0, 0], 'mass': [10, 20]}"], {}), "({'x': [0, 1], 'y': [0, 1], 'z': [0, 1], 'frame': [0, 0], 'mass':\n [10, 20]})\n", (3514, 3594), False, 'from pandas import Series, DataFrame\n'), ((3629, 3665), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(5, 5, 5)'], {}), '(0, 255, (5, 5, 5))\n', (3646, 3665), True, 'import numpy as np\n'), ((3675, 3701), 'trackpy.plots.annotate3d', 'plots.annotate3d', (['f', 'frame'], {}), '(f, frame)\n', (3691, 3701), False, 'from trackpy import plots\n'), ((3710, 3747), 'trackpy.plots.annotate3d', 'plots.annotate3d', (['f', 'frame'], {'color': '"""r"""'}), "(f, frame, color='r')\n", (3726, 3747), False, 'from trackpy import plots\n'), ((4002, 4021), 'trackpy.utils.suppress_plotting', 'suppress_plotting', ([], {}), '()\n', (4019, 4021), False, 'from trackpy.utils import suppress_plotting, fit_powerlaw\n'), ((4035, 4069), 'pandas.Series', 'Series', (['[1, 2, 3]'], {'index': '[1, 2, 3]'}), '([1, 2, 3], index=[1, 2, 3])\n', (4041, 4069), False, 'from pandas import Series, DataFrame\n'), ((4078, 4094), 'trackpy.utils.fit_powerlaw', 'fit_powerlaw', (['em'], {}), '(em)\n', (4090, 4094), False, 'from trackpy.utils import suppress_plotting, fit_powerlaw\n'), ((4103, 4131), 'trackpy.utils.fit_powerlaw', 'fit_powerlaw', (['em'], {'plot': '(False)'}), '(em, plot=False)\n', (4115, 4131), False, 'from trackpy.utils import suppress_plotting, fit_powerlaw\n'), ((932, 984), 'os.path.join', 'os.path.join', (['path', '"""data"""', '"""sparse_trajectories.df"""'], {}), "(path, 'data', 'sparse_trajectories.df')\n", (944, 984), False, 'import os\n'), ((2534, 2625), 'trackpy.plots.annotate', 'plots.annotate', (['f', 'frame'], {'split_category': '"""mass"""', 'split_thresh': '(15)', 'color': "['r', 'g', 'b']"}), "(f, frame, split_category='mass', split_thresh=15, color=['r',\n 'g', 'b'])\n", (2548, 2625), False, 'from trackpy import plots\n'), ((2739, 2816), 'trackpy.plots.annotate', 'plots.annotate', (['f', 'frame'], {'split_category': '"""mass"""', 'split_thresh': '(15)', 'color': "['r']"}), "(f, frame, split_category='mass', split_thresh=15, color=['r'])\n", (2753, 2816), False, 'from trackpy import plots\n'), ((2905, 2980), 'trackpy.plots.annotate', 'plots.annotate', (['f', 'frame'], {'split_category': '"""mass"""', 'split_thresh': '(15)', 'color': '"""r"""'}), "(f, frame, split_category='mass', split_thresh=15, color='r')\n", (2919, 2980), False, 'from trackpy import plots\n'), ((3123, 3210), 'trackpy.plots.annotate', 'plots.annotate', (['f', 'frame'], {'split_category': '"""not a column"""', 'split_thresh': '(15)', 'color': '"""r"""'}), "(f, frame, split_category='not a column', split_thresh=15,\n color='r')\n", (3137, 3210), False, 'from trackpy import plots\n'), ((3315, 3357), 'trackpy.plots.annotate', 'plots.annotate', (['f', 'frame[np.newaxis, :, :]'], {}), '(f, frame[np.newaxis, :, :])\n', (3329, 3357), False, 'from trackpy import plots\n'), ((3795, 3824), 'trackpy.plots.annotate3d', 'plots.annotate3d', (['f', 'frame[0]'], {}), '(f, frame[0])\n', (3811, 3824), False, 'from trackpy import plots\n'), ((1272, 1310), 'pandas.DataFrame', 'DataFrame', ([], {'columns': 'self.sparse.columns'}), '(columns=self.sparse.columns)\n', (1281, 1310), False, 'from pandas import Series, DataFrame\n')] |
import torch
import os
import time
import numpy as np
from tqdm import tqdm
from collections import OrderedDict
from torch import optim
from torch import nn
from torchvision.utils import save_image
class Base:
def _get_stats(self, dict_, mode):
stats = OrderedDict({})
for key in dict_.keys():
stats[key] = np.mean(dict_[key])
return stats
def train(self,
itr_train,
itr_valid,
epochs,
model_dir,
result_dir,
save_every=1,
scheduler_fn=None,
scheduler_args={}):
for folder_name in [model_dir, result_dir]:
if folder_name is not None and not os.path.exists(folder_name):
os.makedirs(folder_name)
f_mode = 'w' if not os.path.exists("%s/results.txt" % result_dir) else 'a'
f = None
if result_dir is not None:
f = open("%s/results.txt" % result_dir, f_mode)
if 'SLURM_JOB_NAME' in os.environ and os.environ['SLURM_JOB_NAME'] not in ['bash', 'sh']:
# If this is an sbatch job, don't make it verbose
verbose = False
else:
verbose = True
for epoch in range(self.last_epoch, epochs):
epoch_start_time = time.time()
# Training.
if verbose:
pbar = tqdm(total=len(itr_train))
train_dict = OrderedDict({'epoch': epoch+1})
# item, pose, id
for b, batch in enumerate(itr_train):
batch = self.prepare_batch(batch)
losses, outputs = self.train_on_instance(*batch,
iter=b+1)
for key in losses:
this_key = 'train_%s' % key
if this_key not in train_dict:
train_dict[this_key] = []
train_dict[this_key].append(losses[key])
if verbose:
pbar.update(1)
pbar.set_postfix(self._get_stats(train_dict, 'train'))
# Process handlers.
for handler_fn in self.handlers:
handler_dict = handler_fn(losses, batch, outputs,
{'epoch':epoch+1, 'iter':b+1, 'mode':'train'})
for key in handler_dict.keys():
this_key = 'train_%s' % key
if this_key not in train_dict:
train_dict[this_key] = []
train_dict[this_key].append(handler_dict[key])
if verbose:
pbar.close()
valid_dict = {}
# TODO: enable valid
if verbose:
pbar = tqdm(total=len(itr_valid))
# Validation.
valid_dict = OrderedDict({})
for b, valid_batch in enumerate(itr_valid):
valid_batch = self.prepare_batch(valid_batch)
valid_losses, valid_outputs = self.eval_on_instance(*valid_batch,
iter=b+1)
for key in valid_losses:
this_key = 'valid_%s' % key
if this_key not in valid_dict:
valid_dict[this_key] = []
valid_dict[this_key].append(valid_losses[key])
if verbose:
pbar.update(1)
pbar.set_postfix(self._get_stats(valid_dict, 'valid'))
# Process handlers.
for handler_fn in self.handlers:
handler_dict = handler_fn(valid_losses, valid_batch, valid_outputs,
{'epoch':epoch+1, 'iter':b+1, 'mode':'valid'})
for key in handler_dict.keys():
this_key = 'valid_%s' % key
if this_key not in valid_dict:
valid_dict[this_key] = []
valid_dict[this_key].append(handler_dict[key])
if verbose:
pbar.close()
# Step learning rates.
for sched in self.schedulers:
sched.step()
# Update dictionary of values.
all_dict = train_dict
all_dict.update(valid_dict)
for key in all_dict:
all_dict[key] = np.mean(all_dict[key])
for key in self.optim:
all_dict["lr_%s" % key] = \
self.optim[key].state_dict()['param_groups'][0]['lr']
all_dict['time'] = time.time() - epoch_start_time
str_ = ",".join([str(all_dict[key]) for key in all_dict])
print(str_)
if result_dir is not None:
if (epoch+1) == 1:
f.write(",".join(all_dict.keys()) + "\n")
f.write(str_ + "\n")
f.flush()
if (epoch+1) % save_every == 0 and model_dir is not None:
self.save(filename="%s/%i.pkl" % (model_dir, epoch+1),
epoch=epoch+1)
if f is not None:
f.close()
| [
"numpy.mean",
"collections.OrderedDict",
"os.path.exists",
"os.makedirs",
"time.time"
] | [((267, 282), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (278, 282), False, 'from collections import OrderedDict\n'), ((341, 360), 'numpy.mean', 'np.mean', (['dict_[key]'], {}), '(dict_[key])\n', (348, 360), True, 'import numpy as np\n'), ((1298, 1309), 'time.time', 'time.time', ([], {}), '()\n', (1307, 1309), False, 'import time\n'), ((1433, 1466), 'collections.OrderedDict', 'OrderedDict', (["{'epoch': epoch + 1}"], {}), "({'epoch': epoch + 1})\n", (1444, 1466), False, 'from collections import OrderedDict\n'), ((2881, 2896), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (2892, 2896), False, 'from collections import OrderedDict\n'), ((765, 789), 'os.makedirs', 'os.makedirs', (['folder_name'], {}), '(folder_name)\n', (776, 789), False, 'import os\n'), ((818, 863), 'os.path.exists', 'os.path.exists', (["('%s/results.txt' % result_dir)"], {}), "('%s/results.txt' % result_dir)\n", (832, 863), False, 'import os\n'), ((4462, 4484), 'numpy.mean', 'np.mean', (['all_dict[key]'], {}), '(all_dict[key])\n', (4469, 4484), True, 'import numpy as np\n'), ((4673, 4684), 'time.time', 'time.time', ([], {}), '()\n', (4682, 4684), False, 'import time\n'), ((720, 747), 'os.path.exists', 'os.path.exists', (['folder_name'], {}), '(folder_name)\n', (734, 747), False, 'import os\n')] |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Library with functions required to generate LNS imitation data for one MIP."""
import collections as py_collections
import os
import pickle
from typing import Any, Dict, Optional, Sequence, Text
from absl import logging
import ml_collections
import numpy as np
from neural_lns import data_utils
from neural_lns import local_branching_expert
from neural_lns import mip_utils
# LP feature extraction needs to fully process the root node, so allow enough
# time for that.
MIN_SOLVE_TIME = 1800
# SCIP solving parameters
SCIP_SOLVING_PARAMS = ml_collections.ConfigDict({
'seed': 42,
'time_limit_seconds': 1800,
'relative_gap': 0
})
def get_incumbent(
instance_name: Text,
dataset: Text,
solution_index: int) -> Optional[mip_utils.MPSolutionResponse]:
"""Tries to retrieve a solution for the MIP from corresponding pickle file."""
instance_path = os.path.join(dataset, instance_name)
solutions = pickle.load(open(instance_path, 'rb'))
if len(solutions) <= solution_index:
raise ValueError(
f'Fewer than {solution_index+1} solutions found for {instance_name}')
else:
solution = solutions[solution_index]
return solution
def get_flipped_vars(mip: mip_utils.MPModel,
incumbent: mip_utils.MPSolutionResponse,
improved: mip_utils.MPSolutionResponse,
var_names: np.ndarray) -> np.ndarray:
"""Returns an array indicating which binary variables were flipped."""
is_flipped = {}
# Note that non-binary variables are always assigned a 0.
for idx, variable in enumerate(mip.variable):
if (mip_utils.is_var_binary(variable) and round(
incumbent.variable_value[idx]) != round(improved.variable_value[idx])):
is_flipped[variable.name] = 1.0
else:
is_flipped[variable.name] = 0.0
# Make sure the array has the variables in the order in which they appear in
# the features.
is_flipped_reordered = np.zeros(len(var_names), dtype=np.bool)
for idx, var_name in enumerate(var_names):
if 'Constant' in var_name.decode():
is_flipped_reordered[idx] = 0.0
else:
is_flipped_reordered[idx] = is_flipped[var_name.decode()]
return is_flipped_reordered
def enhance_root_features(
root_features: Dict[str, Any],
incumbents: Sequence[Any],
lp_sol: Optional[Any] = None
) -> Dict[str, Any]:
"""Adds incumbent var values and integer mask to the feature array.
This accepts a list of up to NUM_PAST_INCUMBENTS past incumbents,
sorted from most recent to least. Each incumbent will introduce two columns
to the features: The first column represents the incumbent variable values,
and the second one is a all-ones column indicating that the incumbent is
present in the features.
A final column is added to the end that masks out continuous variables.
Args:
root_features: Root features without incumbent information.
incumbents: List of past incumbents, ordered by most recent first.
lp_sol: solution to the LP relaxation of the LNS MIP solved by the expert.
Returns:
Updated features dict.
"""
if len(incumbents) > data_utils.NUM_PAST_INCUMBENTS:
raise ValueError(
f'The number of past incumbents is not sufficient: {len(incumbents)}')
# Fill columns corresponding to incumbents
for idx, incumbent in enumerate(incumbents):
column = data_utils.NUM_ROOT_VARIABLE_FEATURES + 2 * idx
incumbent_values = np.array(
[incumbent[var_name.decode()]
for var_name in root_features['variable_names']],
dtype=root_features['variable_features'].dtype)
# Override features column corresponding to incumbent values.
root_features['variable_features'][:, column] = incumbent_values
# Override features column corresponding to incumbent presence indicator.
root_features['variable_features'][:, column + 1] = np.ones(
len(incumbent_values))
if lp_sol is not None:
lp_sol_values = np.array([
lp_sol[var_name.decode()]
for var_name in root_features['variable_names']
],
dtype=root_features['variable_features'].dtype)
lp_sol_column_index = data_utils.NUM_ROOT_VARIABLE_FEATURES + 2 * len(
incumbents)
root_features['variable_features'][:, lp_sol_column_index] = lp_sol_values
# The last column masks out the continuous variables.
integer_values_mask = np.ones(len(root_features['variable_names']),
dtype=root_features['variable_features'].dtype)
for idx, _ in enumerate(integer_values_mask):
if idx not in root_features['all_integer_variable_indices']:
integer_values_mask[idx] = 0.0
root_features['variable_features'][:, -1] = integer_values_mask
return root_features
def generate_data_for_instance(
instance_name: Text,
dataset: Text,
neighbourhood_size: int = 20,
percentage: bool = False,
sequence_length: int = 10,
add_incumbent_to_scip: bool = True,
solution_index: int = 0,
scip_params: ml_collections.ConfigDict = SCIP_SOLVING_PARAMS,
num_var_features: int = data_utils.NUM_VARIABLE_FEATURES) -> int:
"""Generates data from which we learn to imitate the expert.
This loads a MIP instance from a pickle file and generates the expert data.
Args:
instance_name: The name of the MIP instance.
dataset: Dataset name that the instance belongs to.
neighbourhood_size: Maximum Hamming dist to search.
percentage: Whether neighbourhood_size should be treated as a percentage
of total number of variables.
sequence_length: How many consecutive improvements to do.
add_incumbent_to_scip: Whether to feed SCIP the incumbent solution.
solution_index: Which of the solutions to use as the first incumbent.
scip_params: Dictionary of SCIP parameters to use.
num_var_features: Number of features, NUM_VARIABLE_FEATURES or
NUM_VARIABLE_FEATURES_LP.
Returns:
status: 1 if expert data generation was successful, 0 otherwise.
"""
mip = pickle.load(open(os.path.join(dataset, instance_name), 'rb'))
if percentage:
num_integer = 0
for var in mip.variable:
if var.is_integer:
num_integer += 1
neighbourhood_size = int(num_integer * neighbourhood_size / 100)
try:
incumbent = get_incumbent(instance_name, dataset, solution_index)
except ValueError:
logging.warning('No solution found for %s', instance_name)
return 0
root_features = data_utils.get_features(mip, scip_params)
if root_features is None or root_features['variable_features'] is None:
logging.warning('No root features found for %s', instance_name)
return 0
# Append dummy columns to the variable features, which is where we will put
# the past incumbent solutions and the mask for assigned values at each step.
num_extra_var_features = num_var_features - data_utils.NUM_ROOT_VARIABLE_FEATURES
dummy_columns = np.zeros((root_features['variable_features'].shape[0],
num_extra_var_features),
dtype=root_features['variable_features'].dtype)
if root_features is not None:
root_features['variable_features'] = np.concatenate(
[root_features['variable_features'], dummy_columns], axis=1)
assert root_features['variable_features'].shape[
1] == data_utils.NUM_VARIABLE_FEATURES
status = 1
past_incumbents = py_collections.deque([incumbent])
for step in range(sequence_length):
incumbent = past_incumbents[0]
improved_sol = local_branching_expert.improve_solution(
mip, incumbent, neighbourhood_size, scip_params,
add_incumbent_to_scip=add_incumbent_to_scip)
lp_sol = local_branching_expert.get_lns_lp_solution(
mip, incumbent, neighbourhood_size, scip_params)
if improved_sol is None:
# In case of solver failure, print a warning and break.
logging.warning('Solver failed for MIP %s at step %d ',
instance_name, step)
status = 0
break
# Add features corresponding to the incumbent solution and integer mask.
# NB This will overwrite the last column of the variable features.
features = enhance_root_features(root_features, past_incumbents, lp_sol)
# Figure out which variables were flipped between incumbent and improved.
features['best_solution_labels'] = get_flipped_vars(
mip, incumbent, improved_sol, features['variable_names'])
# Add new incumbent to incumbent list, and prune to size if necessary
past_incumbents.appendleft(improved_sol)
if len(past_incumbents) > data_utils.NUM_PAST_INCUMBENTS:
past_incumbents.pop()
return status
| [
"neural_lns.data_utils.get_features",
"collections.deque",
"neural_lns.mip_utils.is_var_binary",
"neural_lns.local_branching_expert.get_lns_lp_solution",
"os.path.join",
"ml_collections.ConfigDict",
"absl.logging.warning",
"numpy.zeros",
"numpy.concatenate",
"neural_lns.local_branching_expert.impr... | [((1221, 1311), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', (["{'seed': 42, 'time_limit_seconds': 1800, 'relative_gap': 0}"], {}), "({'seed': 42, 'time_limit_seconds': 1800,\n 'relative_gap': 0})\n", (1246, 1311), False, 'import ml_collections\n'), ((1554, 1590), 'os.path.join', 'os.path.join', (['dataset', 'instance_name'], {}), '(dataset, instance_name)\n', (1566, 1590), False, 'import os\n'), ((7172, 7213), 'neural_lns.data_utils.get_features', 'data_utils.get_features', (['mip', 'scip_params'], {}), '(mip, scip_params)\n', (7195, 7213), False, 'from neural_lns import data_utils\n'), ((7630, 7761), 'numpy.zeros', 'np.zeros', (["(root_features['variable_features'].shape[0], num_extra_var_features)"], {'dtype': "root_features['variable_features'].dtype"}), "((root_features['variable_features'].shape[0],\n num_extra_var_features), dtype=root_features['variable_features'].dtype)\n", (7638, 7761), True, 'import numpy as np\n'), ((8107, 8140), 'collections.deque', 'py_collections.deque', (['[incumbent]'], {}), '([incumbent])\n', (8127, 8140), True, 'import collections as py_collections\n'), ((7292, 7355), 'absl.logging.warning', 'logging.warning', (['"""No root features found for %s"""', 'instance_name'], {}), "('No root features found for %s', instance_name)\n", (7307, 7355), False, 'from absl import logging\n'), ((7887, 7962), 'numpy.concatenate', 'np.concatenate', (["[root_features['variable_features'], dummy_columns]"], {'axis': '(1)'}), "([root_features['variable_features'], dummy_columns], axis=1)\n", (7901, 7962), True, 'import numpy as np\n'), ((8234, 8371), 'neural_lns.local_branching_expert.improve_solution', 'local_branching_expert.improve_solution', (['mip', 'incumbent', 'neighbourhood_size', 'scip_params'], {'add_incumbent_to_scip': 'add_incumbent_to_scip'}), '(mip, incumbent, neighbourhood_size,\n scip_params, add_incumbent_to_scip=add_incumbent_to_scip)\n', (8273, 8371), False, 'from neural_lns import local_branching_expert\n'), ((8399, 8494), 'neural_lns.local_branching_expert.get_lns_lp_solution', 'local_branching_expert.get_lns_lp_solution', (['mip', 'incumbent', 'neighbourhood_size', 'scip_params'], {}), '(mip, incumbent,\n neighbourhood_size, scip_params)\n', (8441, 8494), False, 'from neural_lns import local_branching_expert\n'), ((2289, 2322), 'neural_lns.mip_utils.is_var_binary', 'mip_utils.is_var_binary', (['variable'], {}), '(variable)\n', (2312, 2322), False, 'from neural_lns import mip_utils\n'), ((6748, 6784), 'os.path.join', 'os.path.join', (['dataset', 'instance_name'], {}), '(dataset, instance_name)\n', (6760, 6784), False, 'import os\n'), ((7081, 7139), 'absl.logging.warning', 'logging.warning', (['"""No solution found for %s"""', 'instance_name'], {}), "('No solution found for %s', instance_name)\n", (7096, 7139), False, 'from absl import logging\n'), ((8598, 8674), 'absl.logging.warning', 'logging.warning', (['"""Solver failed for MIP %s at step %d """', 'instance_name', 'step'], {}), "('Solver failed for MIP %s at step %d ', instance_name, step)\n", (8613, 8674), False, 'from absl import logging\n')] |
import os
import pandas as pd
import numpy as np
import pickle
import argparse
## torch packages
import torch
from transformers import BertTokenizer,AutoTokenizer
import re
## for visualisation
import matplotlib.pyplot as plt
import collections
## custom packages
from extract_lexicon import get_arousal_vec,get_valence_vec,get_dom_vec
from utils import flatten_list,tweet_preprocess
from label_dict import ed_label_dict as emo_map
from label_dict import ed_emo_dict as emo_map_inverse
def get_one_hot(emo, class_size):
targets = np.zeros(class_size)
emo_list = [int(e) for e in emo.split(",")]
for e in emo_list:
targets[e] = 1
return list(targets)
def get_speaker_info(speaker_id):
if int(speaker_id) % 2 == 0:
speaker = 1 # listener utterance
else:
speaker = 0 # speaker utterance
return speaker
def data_reader(data_folder, datatype,save=True):
'''
Reads the raw data from EmpatheticDialogues dataset, preprocess the data and save it in a pickle file
'''
print("Datatype:",datatype)
ongoing_utterance_list = []
ids = []
speaker_info = []
data = {'prompt':[],'utterance_data_list':[],'utterance_data':[],'utterance_id':[],"speaker_info":[],'emotion_label':[],'emotion':[]}
df = open(os.path.join(data_folder, f"{datatype}.csv")).readlines()
for i in range(2,len(df)): # starts with 2 becauase df[0] is the coloumn headers, so i-1 i.e. 2-1=1 will start from the actual data
prev_utterance_parts = df[i-1].strip().split(",")
current_utterance_parts = df[i].strip().split(",")
if prev_utterance_parts[0] == current_utterance_parts[0]: #to detect if its the ongoing conversation or the next conversation
prev_utterance_str = prev_utterance_parts[5].replace("_comma_", ",") #replace _comma_ for utterance
ongoing_utterance_list.append(prev_utterance_str)
ids.append((prev_utterance_parts[0],prev_utterance_parts[1]))
speaker_info.append(get_speaker_info(prev_utterance_parts[1]))
if i == len(df)-1 : # reaches the end of the dataset and this adds the last utterance to the ongoing utterance list
current_utterance_str = current_utterance_parts[5].replace("_comma_", ",") #replace _comma_ for utterance
emotion_label_str = current_utterance_parts[2]
prompt_str = current_utterance_parts[3].replace("_comma_", ",")
emotion_label_int = emo_map[current_utterance_parts[2]]
ongoing_utterance_list.append(current_utterance_str)
ids.append((current_utterance_parts[0],current_utterance_parts[1]))
speaker_info.append(get_speaker_info(current_utterance_parts[1]))
data["prompt"].append(prompt_str)
data["utterance_data_list"].append(ongoing_utterance_list)
data["utterance_data"].append("".join(ongoing_utterance_list))
data["utterance_id"].append(ids)
data["speaker_info"].append(speaker_info)
data["emotion_label"].append(emotion_label_str)
data["emotion"].append(emotion_label_int)
else: # condition where it reaches the end of a conversation, so the prev_utterance was part of the previous conversation which is added to the ongoing utterance list
prev_utterance_str = prev_utterance_parts[5].replace("_comma_", ",") #replace _comma_ for utterance
emotion_label_str = prev_utterance_parts[2]
prompt_str = prev_utterance_parts[3].replace("_comma_", ",")
emotion_label_int = emo_map[prev_utterance_parts[2]]
ongoing_utterance_list.append(prev_utterance_str)
ids.append((prev_utterance_parts[0],prev_utterance_parts[1]))
speaker_info.append(get_speaker_info(prev_utterance_parts[1]))
data["prompt"].append(prompt_str)
data["utterance_data_list"].append(ongoing_utterance_list)
data["utterance_data"].append("".join(ongoing_utterance_list))
data["utterance_id"].append(ids)
data["speaker_info"].append(speaker_info)
data["emotion_label"].append(emotion_label_str)
data["emotion"].append(emotion_label_int)
ongoing_utterance_list = []
ongoing_utterance_inter_list = []
ids = []
speaker_info = []
processed_data = {"prompt":data["prompt"],"utterance_data_list":data["utterance_data_list"],"utterance_data":data["utterance_data"],"speaker_info":data["speaker_info"],"emotion":data["emotion"]}
return processed_data
def tokenize_data(processed_data,tokenizer_type="bert-base-uncased"):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_type)
tokenized_inter_speaker, tokenized_inter_listener = [],[]
tokenized_total_data,tokenized_speaker,tokenized_listener = [],[],[]
tokenized_list_data,tokenized_turn_data = [],[]
arousal_data,valence_data,dom_data = [],[],[]
for u,val_utterance in enumerate(processed_data["utterance_data_list"]): #val utterance is one conversation which has multiple utterances
tokenized_i= tokenizer.batch_encode_plus(val_utterance,add_special_tokens=False)["input_ids"]
speaker_utterance,listener_utterance,speaker_iutterance,listener_iutterance,total_utterance = [101],[101],[101],[101],[101]
total_utterance_list = []
for s,val_speaker in enumerate(tokenized_i): ## for each utterance inside a conversation
if s%2 == 0: # when person is the "speaker"
speaker_utterance.extend(val_speaker+[102])
speaker_iutterance.extend(val_speaker+[102])
listener_iutterance.extend([0 for _ in range(len(val_speaker))]+[102])
#
else:
listener_utterance.extend(val_speaker+[102])
listener_iutterance.extend(val_speaker+[102])
speaker_iutterance.extend([0 for _ in range(len(val_speaker))]+[102])
total_utterance.extend(val_speaker+[102])
total_utterance_list.append(val_speaker+[102])
turn_data = [[101]+a+b for a, b in zip(total_utterance_list[::2],total_utterance_list[1::2])] # turnwise data, [[s1],[l1],[s2],[l2],..] --> [[s1;l1],[s2;l2],..]
total_utterance_list = [[101]+i for i in total_utterance_list] #appending 101 to every utterance start
arousal_vec = get_arousal_vec(tokenizer,total_utterance)
valence_vec = get_valence_vec(tokenizer,total_utterance)
dom_vec = get_dom_vec(tokenizer,total_utterance)
tokenized_inter_speaker.append(speaker_iutterance)
tokenized_inter_listener.append(listener_iutterance)
tokenized_speaker.append(speaker_utterance)
tokenized_listener.append(listener_utterance)
tokenized_total_data.append(total_utterance)
tokenized_list_data.append(total_utterance_list)
tokenized_turn_data.append(turn_data)
arousal_data.append(arousal_vec)
valence_data.append(valence_vec)
dom_data.append(dom_vec)
assert len(tokenized_list_data) == len(tokenized_turn_data) ==len(tokenized_inter_speaker) == len(tokenized_inter_listener) == len(tokenized_total_data) ==len(tokenized_listener) ==len(tokenized_speaker) == len(processed_data["emotion"]) == len(tokenized_total_data) == len(arousal_data) == len(valence_data) == len(dom_data)
save_data = {"utterance_data_list":tokenized_list_data,"utterance_data":tokenized_total_data,"utterance_data_str":processed_data["utterance_data_list"],"speaker_idata":tokenized_inter_speaker,"listener_idata":tokenized_inter_listener,"speaker_data":tokenized_speaker,"listener_data":tokenized_listener,"turn_data":tokenized_turn_data,"arousal_data":arousal_data,"valence_data":valence_data,"dom_data":dom_data,"emotion":processed_data["emotion"]}
return save_data
def go_emotions_preprocess(tokenizer_type="bert-base-uncased"):
data_dict = {}
data_home = "./.data/goemotions/"
nlabel = 28
for datatype in ["train","valid","test"]:
datafile = data_home + datatype + ".tsv"
## cause => tweet, changed for uniformity sake
data = pd.read_csv(datafile, sep='\t',names=["cause","emotion","user"])
emotion,cause = [],[]
for i,emo in enumerate(data["emotion"]):
emotion.append(get_one_hot(emo,nlabel))
cause.append(data["cause"][i])
print("Tokenizing data")
tokenizer = AutoTokenizer.from_pretrained(tokenizer_type)
tokenized_cause =tokenizer.batch_encode_plus(cause).input_ids
processed_data = {}
maximum_utterance = max([len(i) for i in tokenized_cause])
average_utterance = np.mean([len(i) for i in tokenized_cause])
print(len(cause),len(emotion),len(tokenized_cause))
print("Max utterance length:",maximum_utterance,"Avg utterance length:",average_utterance)
## changed prompt --> cause for uniformity
processed_data["tokenized_cause"] = tokenized_cause
processed_data["emotion"] = emotion
processed_data["cause"] = cause
arousal_vec,valence_vec,dom_vec = [],[],[]
for cause_i in tokenized_cause:
arousal_vec.append(get_arousal_vec(tokenizer,cause_i))
valence_vec.append(get_valence_vec(tokenizer,cause_i))
dom_vec.append(get_dom_vec(tokenizer,cause_i))
processed_data["arousal_data"] = arousal_vec
processed_data["valence_data"] = valence_vec
processed_data["dom_data"] = dom_vec
processed_data = pd.DataFrame.from_dict(processed_data)
data_dict[datatype] = processed_data
print(len(emotion),len(tokenized_cause),len(arousal_vec),len(valence_vec),len(dom_vec))
if tokenizer_type == "bert-base-uncased":
with open("./.preprocessed_data/goemotions_preprocessed_bert.pkl", 'wb') as f:
pickle.dump(data_dict, f)
f.close()
def sem_eval_preprocess(tokenizer_type):
data_dict = {}
for datatype in ["train","valid","test"]:
with open("./.data/sem_eval/"+datatype+".txt", 'r') as fd:
data = [l.strip().split('\t') for l in fd.readlines()][1:]
X = [d[1] for d in data]
y = [[int(d) for d in d[2:]] for d in data]
# return X, y
cause,emotion = [],[]
count = 0
for i,x_i in enumerate(X):
## Affect in Tweets preprocessing in the utils.py
cause.append(tweet_preprocess(x_proc_i))
emotion.append(y[i])
print("Tokenizing data")
tokenizer = AutoTokenizer.from_pretrained(tokenizer_type)
tokenized_cause =tokenizer.batch_encode_plus(cause).input_ids
processed_data = {}
maximum_utterance = max([len(i) for i in tokenized_cause])
average_utterance = np.mean([len(i) for i in tokenized_cause])
print(len(cause),len(emotion),len(tokenized_cause))
print("Max utterance length:",maximum_utterance,"Avg utterance length:",average_utterance)
## changed prompt --> cause for uniformity
processed_data["tokenized_cause"] = tokenized_cause
processed_data["emotion"] = emotion
processed_data["cause"] = cause
arousal_vec,valence_vec,dom_vec = [],[],[]
for cause_i in tokenized_cause:
arousal_vec.append(get_arousal_vec(tokenizer,cause_i))
valence_vec.append(get_valence_vec(tokenizer,cause_i))
dom_vec.append(get_dom_vec(tokenizer,cause_i))
processed_data["arousal_data"] = arousal_vec
processed_data["valence_data"] = valence_vec
processed_data["dom_data"] = dom_vec
processed_data = pd.DataFrame.from_dict(processed_data)
data_dict[datatype] = processed_data
print(len(emotion),len(tokenized_cause),len(arousal_vec),len(valence_vec),len(dom_vec))
if tokenizer_type == "bert-base-uncased":
with open("./.preprocessed_data/semeval_preprocessed_bert.pkl", 'wb') as f:
pickle.dump(data_dict, f)
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Enter tokenizer type')
parser.add_argument('-t', default="bert-base-uncased",type=str,
help='Enter tokenizer type')
parser.add_argument('-d', default="goemotions",type=str,
help='Enter dataset')
args = parser.parse_args()
tokenizer_type = args.t
if args.d == "ed":
train_pdata = data_reader("./.data/raw/empatheticdialogues/","train")
valid_pdata = data_reader("./.data/raw/empatheticdialogues/","valid")
test_pdata = data_reader("./.data/raw/empatheticdialogues/","test")
train_save_data = tokenize_data(train_pdata,tokenizer_type)
valid_save_data = tokenize_data(valid_pdata,tokenizer_type)
test_save_data = tokenize_data(test_pdata,tokenizer_type)
## used previously during model design
glove_vocab_size = 0
glove_word_embeddings = []
if tokenizer_type == "bert-base-uncased":
with open('./.preprocessed_data/ed_dataset_preproc.p', "wb") as f:
pickle.dump([train_save_data, valid_save_data, test_save_data, glove_vocab_size,glove_word_embeddings], f)
print("Saved PICKLE")
elif args.d == "goemotions":
go_emotions_preprocess(tokenizer_type)
elif args.d == "semeval":
sem_eval_preprocess(tokenizer_type)
| [
"pickle.dump",
"argparse.ArgumentParser",
"pandas.read_csv",
"os.path.join",
"extract_lexicon.get_valence_vec",
"pandas.DataFrame.from_dict",
"extract_lexicon.get_arousal_vec",
"extract_lexicon.get_dom_vec",
"numpy.zeros",
"utils.tweet_preprocess",
"transformers.AutoTokenizer.from_pretrained"
] | [((538, 558), 'numpy.zeros', 'np.zeros', (['class_size'], {}), '(class_size)\n', (546, 558), True, 'import numpy as np\n'), ((4752, 4797), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['tokenizer_type'], {}), '(tokenizer_type)\n', (4781, 4797), False, 'from transformers import BertTokenizer, AutoTokenizer\n'), ((12339, 12398), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Enter tokenizer type"""'}), "(description='Enter tokenizer type')\n", (12362, 12398), False, 'import argparse\n'), ((6466, 6509), 'extract_lexicon.get_arousal_vec', 'get_arousal_vec', (['tokenizer', 'total_utterance'], {}), '(tokenizer, total_utterance)\n', (6481, 6509), False, 'from extract_lexicon import get_arousal_vec, get_valence_vec, get_dom_vec\n'), ((6531, 6574), 'extract_lexicon.get_valence_vec', 'get_valence_vec', (['tokenizer', 'total_utterance'], {}), '(tokenizer, total_utterance)\n', (6546, 6574), False, 'from extract_lexicon import get_arousal_vec, get_valence_vec, get_dom_vec\n'), ((6592, 6631), 'extract_lexicon.get_dom_vec', 'get_dom_vec', (['tokenizer', 'total_utterance'], {}), '(tokenizer, total_utterance)\n', (6603, 6631), False, 'from extract_lexicon import get_arousal_vec, get_valence_vec, get_dom_vec\n'), ((8272, 8339), 'pandas.read_csv', 'pd.read_csv', (['datafile'], {'sep': '"""\t"""', 'names': "['cause', 'emotion', 'user']"}), "(datafile, sep='\\t', names=['cause', 'emotion', 'user'])\n", (8283, 8339), True, 'import pandas as pd\n'), ((8591, 8636), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['tokenizer_type'], {}), '(tokenizer_type)\n', (8620, 8636), False, 'from transformers import BertTokenizer, AutoTokenizer\n'), ((9770, 9808), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['processed_data'], {}), '(processed_data)\n', (9792, 9808), True, 'import pandas as pd\n'), ((10810, 10855), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['tokenizer_type'], {}), '(tokenizer_type)\n', (10839, 10855), False, 'from transformers import BertTokenizer, AutoTokenizer\n'), ((11913, 11951), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['processed_data'], {}), '(processed_data)\n', (11935, 11951), True, 'import pandas as pd\n'), ((10116, 10141), 'pickle.dump', 'pickle.dump', (['data_dict', 'f'], {}), '(data_dict, f)\n', (10127, 10141), False, 'import pickle\n'), ((1285, 1329), 'os.path.join', 'os.path.join', (['data_folder', 'f"""{datatype}.csv"""'], {}), "(data_folder, f'{datatype}.csv')\n", (1297, 1329), False, 'import os\n'), ((9405, 9440), 'extract_lexicon.get_arousal_vec', 'get_arousal_vec', (['tokenizer', 'cause_i'], {}), '(tokenizer, cause_i)\n', (9420, 9440), False, 'from extract_lexicon import get_arousal_vec, get_valence_vec, get_dom_vec\n'), ((9476, 9511), 'extract_lexicon.get_valence_vec', 'get_valence_vec', (['tokenizer', 'cause_i'], {}), '(tokenizer, cause_i)\n', (9491, 9511), False, 'from extract_lexicon import get_arousal_vec, get_valence_vec, get_dom_vec\n'), ((9543, 9574), 'extract_lexicon.get_dom_vec', 'get_dom_vec', (['tokenizer', 'cause_i'], {}), '(tokenizer, cause_i)\n', (9554, 9574), False, 'from extract_lexicon import get_arousal_vec, get_valence_vec, get_dom_vec\n'), ((10695, 10721), 'utils.tweet_preprocess', 'tweet_preprocess', (['x_proc_i'], {}), '(x_proc_i)\n', (10711, 10721), False, 'from utils import flatten_list, tweet_preprocess\n'), ((11572, 11607), 'extract_lexicon.get_arousal_vec', 'get_arousal_vec', (['tokenizer', 'cause_i'], {}), '(tokenizer, cause_i)\n', (11587, 11607), False, 'from extract_lexicon import get_arousal_vec, get_valence_vec, get_dom_vec\n'), ((11639, 11674), 'extract_lexicon.get_valence_vec', 'get_valence_vec', (['tokenizer', 'cause_i'], {}), '(tokenizer, cause_i)\n', (11654, 11674), False, 'from extract_lexicon import get_arousal_vec, get_valence_vec, get_dom_vec\n'), ((11702, 11733), 'extract_lexicon.get_dom_vec', 'get_dom_vec', (['tokenizer', 'cause_i'], {}), '(tokenizer, cause_i)\n', (11713, 11733), False, 'from extract_lexicon import get_arousal_vec, get_valence_vec, get_dom_vec\n'), ((12249, 12274), 'pickle.dump', 'pickle.dump', (['data_dict', 'f'], {}), '(data_dict, f)\n', (12260, 12274), False, 'import pickle\n'), ((13395, 13506), 'pickle.dump', 'pickle.dump', (['[train_save_data, valid_save_data, test_save_data, glove_vocab_size,\n glove_word_embeddings]', 'f'], {}), '([train_save_data, valid_save_data, test_save_data,\n glove_vocab_size, glove_word_embeddings], f)\n', (13406, 13506), False, 'import pickle\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""StNet"""
import math
import numpy as np
import mindspore.nn as nn
from mindspore.common.initializer import HeNormal, HeUniform, Uniform
from mindspore.ops import operations as ops
from mindspore import Tensor
class Bottleneck(nn.Cell):
"""resnet block"""
expansion = 4
def __init__(self, inplanes, planes, cardinality, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, has_bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, has_bias=False, pad_mode='pad'
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, has_bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU()
# Downsample
self.down_sample_layer = downsample
self.stride = stride
def construct(self, x):
"""construct"""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.down_sample_layer is not None:
residual = self.down_sample_layer(x)
res = out + residual
res = self.relu(res)
return res
class TemporalXception(nn.Cell):
'''
model=TemporalXception(2048,2048)
'''
def __init__(self, in_channels, out_channels):
super(TemporalXception, self).__init__()
self.bn1 = nn.BatchNorm2d(in_channels)
self.att_conv = nn.Conv2d(in_channels, out_channels, kernel_size=(3, 1), stride=(1, 1), padding=(1, 1, 0, 0),
# group=2048,
weight_init=HeUniform(), pad_mode='pad', has_bias=True)
self.att_2 = nn.Conv2d(out_channels, 1024, kernel_size=(1, 1), stride=(1, 1), weight_init=HeUniform()
, has_bias=True)
self.bn2 = nn.BatchNorm2d(1024)
self.att_1 = nn.Conv2d(1024, 1024, kernel_size=(3, 1), stride=(1, 1), padding=(1, 1, 0, 0),
# group=1024,
weight_init=HeUniform(), pad_mode='pad', has_bias=True)
self.att1_2 = nn.Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), weight_init=HeUniform(), has_bias=True)
self.dw = nn.Conv2d(in_channels, 1024, kernel_size=(1, 1), stride=(1, 1), weight_init=HeUniform(),
has_bias=True)
self.relu = nn.ReLU()
self.bn3 = nn.BatchNorm2d(1024)
def construct(self, x):
"""construct"""
x = self.bn1(x)
x1 = self.att_conv(x)
x1 = self.att_2(x1)
x1 = self.bn2(x1)
x1 = self.relu(x1)
x1 = self.att_1(x1)
x1 = self.att1_2(x1)
x2 = self.dw(x)
add_to = x1 + x2
return self.relu(self.bn3(add_to))
class TemporalBlock(nn.Cell):
"""temp model"""
def __init__(self, channels):
super(TemporalBlock, self).__init__()
self.channels = channels
self.conv1 = nn.Conv3d(
channels,
channels,
kernel_size=(3, 1, 1),
stride=1,
pad_mode="pad",
padding=(1, 1, 0, 0, 0, 0),
weight_init=HeUniform(),
has_bias=True
)
self.bn1 = nn.BatchNorm3d(channels)
self.relu = nn.ReLU()
self.transpose = ops.Transpose()
self.reshape = ops.Reshape()
def construct(self, x):
"""construct"""
B, T, C, H, W = x.shape
out = self.transpose(x, (0, 2, 1, 3, 4))
x = self.conv1(out)
x = self.relu(x)
x = self.bn1(x)
x = x + out
x = self.transpose(x, (0, 2, 1, 3, 4))
x = self.reshape(x, (B * T, C, H, W))
return x
class Stnet_Res_model(nn.Cell):
"""main model"""
def __init__(
self, block, layers, cardinality=32, num_classes=400, T=7, N=5, input_channels=3,
):
super(Stnet_Res_model, self).__init__()
self.inplanes = 64
self.cardinality = cardinality
self.T = T
self.N = N
self.conv1 = nn.Conv2d(
input_channels * self.N, 64, kernel_size=7, stride=2, padding=3, has_bias=False, pad_mode='pad'
)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.temp1 = TemporalBlock(512)
self.temp2 = TemporalBlock(1024)
self.op_avg = nn.AvgPool2d(kernel_size=(7, 7), pad_mode="valid")
self.xception = TemporalXception(2048, 2048)
self.maxpool1 = nn.MaxPool2d(kernel_size=(self.T, 1))
self.reshape = ops.Reshape()
self.sqrt = ops.Sqrt()
stdv = 1.0/math.sqrt(1024*1.0)
self.fc = nn.Dense(1024, num_classes, weight_init=Uniform(stdv))
self.transpose = ops.Transpose()
def _initialize_weights(self):
self.init_parameters_data()
for _, m in self.cells_and_names():
if isinstance(m, nn.Conv2d):
kaiming_norml = HeNormal(negative_slope=0, mode="fan_out", nonlinearity="relu")
m.weight_init = kaiming_norml
elif isinstance(m, nn.BatchNorm2d):
m.gamma.set_data(
Tensor(np.ones(m.gamma.data.shape, dtype="float32")))
m.beta.set_data(
Tensor(np.zeros(m.beta.data.shape, dtype="float32")))
def _make_layer(self, block, planes, blocks, stride=1):
"""_make_layer"""
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.SequentialCell(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
has_bias=False,
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, self.cardinality, stride, downsample)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, self.cardinality))
return nn.SequentialCell(*layers)
def construct(self, x):
"""construct"""
# size (batch_size, T, video_length = channels* N, height, width)
B, _, L, H, W = x.shape
x = self.reshape(x, (-1, L, H, W))
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
size = x.shape
x = self.reshape(x, (B, self.T, size[1], size[2], size[3]))
x = self.temp1(x)
x = self.layer3(x)
size = x.shape
x = self.reshape(x, (B, self.T, size[1], size[2], size[3]))
x = self.temp2(x)
x = self.layer4(x)
pool = self.op_avg(x)
x = self.reshape(pool, (-1, self.T, pool.shape[1], 1))
x = self.transpose(x, (0, 2, 1, 3))
x = self.xception(x)
x = self.maxpool1(x)
x = self.reshape(x, (-1, 1024))
x = self.fc(x)
return x
def stnet50(**kwargs):
"""
Construct stnet with a Resnet 50 backbone.
"""
model = Stnet_Res_model(
Bottleneck,
[3, 4, 6, 3],
**kwargs,
)
return model
| [
"mindspore.common.initializer.Uniform",
"mindspore.common.initializer.HeUniform",
"mindspore.nn.SequentialCell",
"numpy.ones",
"mindspore.nn.AvgPool2d",
"mindspore.nn.MaxPool2d",
"mindspore.ops.operations.Transpose",
"mindspore.nn.BatchNorm2d",
"math.sqrt",
"mindspore.common.initializer.HeNormal",... | [((1096, 1154), 'mindspore.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'planes'], {'kernel_size': '(1)', 'has_bias': '(False)'}), '(inplanes, planes, kernel_size=1, has_bias=False)\n', (1105, 1154), True, 'import mindspore.nn as nn\n'), ((1174, 1196), 'mindspore.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1188, 1196), True, 'import mindspore.nn as nn\n'), ((1218, 1321), 'mindspore.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'has_bias': '(False)', 'pad_mode': '"""pad"""'}), "(planes, planes, kernel_size=3, stride=stride, padding=1, has_bias\n =False, pad_mode='pad')\n", (1227, 1321), True, 'import mindspore.nn as nn\n'), ((1358, 1380), 'mindspore.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1372, 1380), True, 'import mindspore.nn as nn\n'), ((1402, 1462), 'mindspore.nn.Conv2d', 'nn.Conv2d', (['planes', '(planes * 4)'], {'kernel_size': '(1)', 'has_bias': '(False)'}), '(planes, planes * 4, kernel_size=1, has_bias=False)\n', (1411, 1462), True, 'import mindspore.nn as nn\n'), ((1482, 1508), 'mindspore.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * 4)'], {}), '(planes * 4)\n', (1496, 1508), True, 'import mindspore.nn as nn\n'), ((1529, 1538), 'mindspore.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1536, 1538), True, 'import mindspore.nn as nn\n'), ((2325, 2352), 'mindspore.nn.BatchNorm2d', 'nn.BatchNorm2d', (['in_channels'], {}), '(in_channels)\n', (2339, 2352), True, 'import mindspore.nn as nn\n'), ((2786, 2806), 'mindspore.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1024)'], {}), '(1024)\n', (2800, 2806), True, 'import mindspore.nn as nn\n'), ((3328, 3337), 'mindspore.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3335, 3337), True, 'import mindspore.nn as nn\n'), ((3357, 3377), 'mindspore.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1024)'], {}), '(1024)\n', (3371, 3377), True, 'import mindspore.nn as nn\n'), ((4175, 4199), 'mindspore.nn.BatchNorm3d', 'nn.BatchNorm3d', (['channels'], {}), '(channels)\n', (4189, 4199), True, 'import mindspore.nn as nn\n'), ((4220, 4229), 'mindspore.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4227, 4229), True, 'import mindspore.nn as nn\n'), ((4255, 4270), 'mindspore.ops.operations.Transpose', 'ops.Transpose', ([], {}), '()\n', (4268, 4270), True, 'from mindspore.ops import operations as ops\n'), ((4294, 4307), 'mindspore.ops.operations.Reshape', 'ops.Reshape', ([], {}), '()\n', (4305, 4307), True, 'from mindspore.ops import operations as ops\n'), ((4997, 5107), 'mindspore.nn.Conv2d', 'nn.Conv2d', (['(input_channels * self.N)', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'has_bias': '(False)', 'pad_mode': '"""pad"""'}), "(input_channels * self.N, 64, kernel_size=7, stride=2, padding=3,\n has_bias=False, pad_mode='pad')\n", (5006, 5107), True, 'import mindspore.nn as nn\n'), ((5145, 5163), 'mindspore.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (5159, 5163), True, 'import mindspore.nn as nn\n'), ((5184, 5193), 'mindspore.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5191, 5193), True, 'import mindspore.nn as nn\n'), ((5217, 5271), 'mindspore.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'pad_mode': '"""same"""'}), "(kernel_size=3, stride=2, pad_mode='same')\n", (5229, 5271), True, 'import mindspore.nn as nn\n'), ((5653, 5703), 'mindspore.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(7, 7)', 'pad_mode': '"""valid"""'}), "(kernel_size=(7, 7), pad_mode='valid')\n", (5665, 5703), True, 'import mindspore.nn as nn\n'), ((5781, 5818), 'mindspore.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(self.T, 1)'}), '(kernel_size=(self.T, 1))\n', (5793, 5818), True, 'import mindspore.nn as nn\n'), ((5842, 5855), 'mindspore.ops.operations.Reshape', 'ops.Reshape', ([], {}), '()\n', (5853, 5855), True, 'from mindspore.ops import operations as ops\n'), ((5876, 5886), 'mindspore.ops.operations.Sqrt', 'ops.Sqrt', ([], {}), '()\n', (5884, 5886), True, 'from mindspore.ops import operations as ops\n'), ((6024, 6039), 'mindspore.ops.operations.Transpose', 'ops.Transpose', ([], {}), '()\n', (6037, 6039), True, 'from mindspore.ops import operations as ops\n'), ((7440, 7466), 'mindspore.nn.SequentialCell', 'nn.SequentialCell', (['*layers'], {}), '(*layers)\n', (7457, 7466), True, 'import mindspore.nn as nn\n'), ((5906, 5927), 'math.sqrt', 'math.sqrt', (['(1024 * 1.0)'], {}), '(1024 * 1.0)\n', (5915, 5927), False, 'import math\n'), ((2565, 2576), 'mindspore.common.initializer.HeUniform', 'HeUniform', ([], {}), '()\n', (2574, 2576), False, 'from mindspore.common.initializer import HeNormal, HeUniform, Uniform\n'), ((2707, 2718), 'mindspore.common.initializer.HeUniform', 'HeUniform', ([], {}), '()\n', (2716, 2718), False, 'from mindspore.common.initializer import HeNormal, HeUniform, Uniform\n'), ((2995, 3006), 'mindspore.common.initializer.HeUniform', 'HeUniform', ([], {}), '()\n', (3004, 3006), False, 'from mindspore.common.initializer import HeNormal, HeUniform, Uniform\n'), ((3130, 3141), 'mindspore.common.initializer.HeUniform', 'HeUniform', ([], {}), '()\n', (3139, 3141), False, 'from mindspore.common.initializer import HeNormal, HeUniform, Uniform\n'), ((3252, 3263), 'mindspore.common.initializer.HeUniform', 'HeUniform', ([], {}), '()\n', (3261, 3263), False, 'from mindspore.common.initializer import HeNormal, HeUniform, Uniform\n'), ((4107, 4118), 'mindspore.common.initializer.HeUniform', 'HeUniform', ([], {}), '()\n', (4116, 4118), False, 'from mindspore.common.initializer import HeNormal, HeUniform, Uniform\n'), ((5984, 5997), 'mindspore.common.initializer.Uniform', 'Uniform', (['stdv'], {}), '(stdv)\n', (5991, 5997), False, 'from mindspore.common.initializer import HeNormal, HeUniform, Uniform\n'), ((6229, 6292), 'mindspore.common.initializer.HeNormal', 'HeNormal', ([], {'negative_slope': '(0)', 'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(negative_slope=0, mode='fan_out', nonlinearity='relu')\n", (6237, 6292), False, 'from mindspore.common.initializer import HeNormal, HeUniform, Uniform\n'), ((6844, 6945), 'mindspore.nn.Conv2d', 'nn.Conv2d', (['self.inplanes', '(planes * block.expansion)'], {'kernel_size': '(1)', 'stride': 'stride', 'has_bias': '(False)'}), '(self.inplanes, planes * block.expansion, kernel_size=1, stride=\n stride, has_bias=False)\n', (6853, 6945), True, 'import mindspore.nn as nn\n'), ((7077, 7117), 'mindspore.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * block.expansion)'], {}), '(planes * block.expansion)\n', (7091, 7117), True, 'import mindspore.nn as nn\n'), ((6448, 6492), 'numpy.ones', 'np.ones', (['m.gamma.data.shape'], {'dtype': '"""float32"""'}), "(m.gamma.data.shape, dtype='float32')\n", (6455, 6492), True, 'import numpy as np\n'), ((6555, 6599), 'numpy.zeros', 'np.zeros', (['m.beta.data.shape'], {'dtype': '"""float32"""'}), "(m.beta.data.shape, dtype='float32')\n", (6563, 6599), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 15 16:06:04 2019
@author: <NAME>
"""
# https://realpython.com/python-web-scraping-practical-introduction/
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import string
import time
import math
import datetime
abstracts = []
from nltk.corpus import stopwords
from webscraper_functions import simple_get, is_good_response
from webscraper_functions import search, fetch_details, search2, get_abstract
from webscraper_functions import string_parse1, string_parse2, clean_str
try:
data = pd.read_csv('../Data/RYANDATA.csv')
print('Loading Data')
unique_topics = list(data.Topics.unique())
topics = []
for item in list(data.Topics_split):
if isinstance(item, str):
topics_temp = []
for item2 in item.split('\''):
if not item2[0] in ['[',',',']']:
topics_temp.append(item2)
topics.append(topics_temp)
else:
topics.append('')
topics_split = topics
titles = list(data.Titles)
authors = []
for item in list(data.Authors):
if isinstance(item, str):
authors_temp = []
for item2 in item.split('\''):
if not item2[0] in ['[',',',']']:
authors_temp.append(item2)
authors.append(authors_temp)
else:
authors.append('')
journals = list(data.Journals)
years = list(data.Journals)
vol_isus = list(data.Vol_Isue)
dois = list(data.DOI)
abstracts = list(data.Abstract)
loaded_data = 1
except:
print('No Data Found, Analyzing Everything')
unique_topics = []
topics = []
titles = []
authors = []
links = []
journals = []
years = []
vol_isus = []
dois = []
first_annoying = '12-30-2010'
first_missing_asterisks = '06-16-2010'
first_annoying_time = time.mktime(datetime.datetime.strptime(first_annoying, '%m-%d-%Y').timetuple())
first_missing_asterisks_time = time.mktime(datetime.datetime.strptime(first_missing_asterisks, '%m-%d-%Y').timetuple())
#07-27-2007
jinger_date = '07-19-2007'
jinger_post_time = time.mktime(datetime.datetime.strptime(jinger_date, '%m-%d-%Y').timetuple())
str_punc = ''.join(list(string.punctuation)[0:14]) + ''.join(list(string.punctuation)[15:])
translator = str.maketrans(str_punc, ' '*len(str_punc))
# Make the Stop Words for string cleaning
import string
stop = list(stopwords.words('english'))
stop_c = [string.capwords(word) for word in stop]
for word in stop_c:
stop.append(word)
new_stop = ['StringElement','NlmCategory','Label','attributes','INTRODUCTION','METHODS','BACKGROUND','RESULTS','CONCLUSIONS']
for item in new_stop:
stop.append(item)
for page in np.arange(0,1):
already_analyzed_thread = 0
raw_html = simple_get('https://biomch-l.isbweb.org/forums/7-Literature-Update/page'+str(page))
html = BeautifulSoup(raw_html, 'html.parser')
main_url = 'https://biomch-l.isbweb.org/'
thread = []
for a in html.find_all('a', href=True, id=True):
if a['href'].find('LITERATURE-UPDATE')>0:
thread.append(main_url + a['href'][0:a['href'].find('?s')] + '.html')
if a['href'].find('Literature-Update-(')>0:
thread.append(main_url + a['href'][0:a['href'].find('?s')] + '.html')
if a['href'].find('Literature-Update?s')>0:
thread.append(main_url + a['href'][0:a['href'].find('?s')] + '.html')
if a['href'].find('literature-update')>0:
thread.append(main_url + a['href'][0:a['href'].find('?s')] + '.html')
for url in thread:
time.sleep(.1)
already_analyzed_inthread = 0
parse_date = url[url.find('UPDATE')+7:len(url)-5]
print('Parsing Page: ' + str(page) + ', Thread: ' + parse_date)
lit_update = simple_get(url)
lit_update = BeautifulSoup(lit_update,'html.parser')
for item in lit_update.select('span'):
if 'class=\"date\"' in str(item):
indx = str(item).find('class=\"date\"')
if str(item)[indx+13:indx+18]=='Today' or str(item)[indx+13:indx+22]=='Yesterday':
post_time = time.time()
else:
post_date = str(item)[indx+13:indx+23]
post_time = time.mktime(datetime.datetime.strptime(post_date, '%m-%d-%Y').timetuple())
break
lit_str = lit_update.select('blockquote')[0].text
lit_list = lit_str.split('\n')
if post_time > first_missing_asterisks_time:
for entry in lit_list:
if len(entry)>0 and entry[0] == '*' and not entry[0:3] == '***':
cur_topic = entry[1:entry[1:].find('*')+1].replace(' ','')
cur_topics = cur_topic.split('/')
for item in cur_topics:
if item not in unique_topics:
unique_topics.append(item)
# if not cur_topic in unique_topics:
# unique_topics.append(set(x for l in cur_topic.split('/') for x in l))
elif len(entry) > 200:
topic_temp, author_temp, title_temp, journal_temp, year_temp, vol_isu_temp, doi_temp = string_parse1(entry,cur_topic)
if not isinstance(title_temp,str):
raise Exception('The parsed title is not a string. Url: '+url+'. Entry: '+entry)
if len(title_temp)<5:
raise Exception('The parsed title length is less than 5. Url: '+url+'. Entry: '+entry)
if title_temp not in titles:
# print('Title not in titles')
topics.append(topic_temp.split('/'))
authors.append(author_temp)
titles.append(title_temp)
journals.append(journal_temp)
years.append(year_temp)
vol_isus.append(vol_isu_temp)
dois.append(doi_temp)
try:
abstracts.append(clean_str(str(get_abstract(title_temp,doi_temp)),stop))#
except:
abstracts.append('')
# else:
# print('Title In CSV already')
elif post_time > jinger_post_time:
cur_topic == []
entry_temp = ''
n_combines = 0
found_first_topics = 0
entry2 = []
for k, entry in enumerate(lit_list):
found_topics = 0
if len(entry.translate(translator).replace(' ',''))>2:
for item in entry.translate(translator).replace(' ','').split('/'):
if item in unique_topics:
found_topics += 1
else:
break
if len(entry.translate(translator).replace(' ','').split('/')) == found_topics:
found_topics = 1
found_first_topics = 1
cur_topic = entry.translate(translator).replace(' ','').split('/')
if len(entry)>20 and found_first_topics and not found_topics:
entry_temp = entry_temp + str(entry) + ' '
row_after_topic = 0
n_combines += 1
elif entry == '' and n_combines >=2 and not found_topics:
topic_temp, author_temp, title_temp, journal_temp, year_temp, vol_isu_temp, doi_temp = string_parse2(entry_temp,cur_topic)
if not isinstance(title_temp,str):
breakehrere
if title_temp not in titles:
entry2.append(entry_temp)
topics.append(topic_temp)
authors.append(author_temp)
titles.append(title_temp)
journals.append(journal_temp)
years.append(year_temp)
vol_isus.append(vol_isu_temp)
dois.append(doi_temp)
try:
abstracts.append(clean_str(str(get_abstract(title_temp,doi_temp)),stop))#
except:
abstracts.append('')
entry_temp = ''
n_combines = 0
else:
print('Post after Jinger, skpping')
# else:
# already_analyzed_inthread += 1
# if already_analyzed_inthread > 5:
# print('Already Analyzed Thread, going to next')
# already_analyzed_thread += 1
# break
# if already_analyzed_thread > 3:
# print('Already Analyzed Page, going to next')
# break
## Pull Abstracts
#abstracts = []
#toc = time.perf_counter()
#tic = time.perf_counter()
#for i, title in enumerate(titles):
## if i % 10 == 0:
# print('Analyzing Title: '+ str(i))
# if toc - tic < .1:
# time.sleep(.1 - (toc-tic))
# print('Sleeping a sec')
# tic = time.perf_counter()
# try:
# abstracts.append(clean_str(str(get_abstract(title,dois[i])),stop))#
# except:
# abstracts.append('')
# toc = time.perf_counter()
#========================= Put it together ====================================
#if not loaded_data:
topics_split = topics
topics = ['/'.join(item) for item in topics_split]
data = pd.DataFrame(data = {'Topics_split': topics_split,
'Topics': topics,
'Authors': authors,
'Titles': titles,
'Journals': journals,
'Years': years,
'Vol_Isue': vol_isus,
'DOI':dois,
'Abstract': abstracts})
data.to_csv('../Data/RYANDATA.csv')
#========================= This is now done in keras1.py ======================
#top = []
#top_len = []
#for k in np.arange(len(data['Topics'].unique())):
# top.append(data['Topics'].unique()[k])
# top_len.append(len(data[data['Topics']==top[k]]))
#
#top_lengths = pd.DataFrame(data = {'Topics': top,
# 'Length': top_len})
#min_num = len(topics)*.05
#min_num = 500
#top_lengths = top_lengths.query('Length>' + str(min_num))
#
#filtered_data = pd.DataFrame(data = {'Topics_split': [],
# 'Topics': [],
# 'Authors': [],
# 'Titles': [],
# 'Journals': [],
# 'Years': [],
# 'Vol_Isue': [],
# 'DOI': [],
# 'Abstract': []})
#
#for top in top_lengths.Topics.unique():
# if not top == 'UNIQUETOPIC':
# filtered_data = filtered_data.append(data[data['Topics']==top],sort=True)
#filtered_data = filtered_data[['Topics_split','Topics','Authors','Titles','Journals','Years','Vol_Isue','DOI','Abstract']]
#filtered_data.to_csv('../Data/RYANDATA_filt.csv')
#
#filtered_data_even = filtered_data.groupby('Topics').apply(lambda s: s.sample(500))
#filtered_data_even.to_csv('../Data/RYANDATA_filt_even.csv')
# %% Test
#Some papers just don't have an abstact:
#titles[9] = https://www.ncbi.nlm.nih.gov/pubmed/?term=Accumulation+of+microdamage+at+complete+and+incomplete+fracture+sites+in+a+patient+with+bilateral+atypical+femoral+fractures+on+glucocorticoid+and+bisphosphonate+therapy
#test = get_abstract(titles[84], doi[84])
#test = str(test)
#translator = str.maketrans(string.punctuation, ' '*len(string.punctuation)) #map punctuation to space
#test = test.translate(translator)
#test = test.split()
#test = [word for word in test if word not in stop]
#test = ' '.join(test)
#num_blank = 0
#for item in abstract['abstract']:
# if item == '[]':
# num_blank += 1
# Saving STuff for later?
#if entry[0:10] == 'http://dx.':
# entry = entry[entry.find(' ')+1:]
#
#author = entry[0:entry.find('.')].split(';')
#authors_temp = [author[1:] if author[0]== ' ' else author for author in author]
#
#entry = entry[entry.find('.')+2:]
#titles_temp = (entry[0:entry.find('.')])
#entry = entry[entry.find('.')+2:]
#
#if not titles_temp in titles:
# authors.append(authors_temp)
# titles.append(titles_temp)
# categories.append(cur_cat)
# try:
# year_nan = entry.find('NaN')
# if year_nan == -1:
# year_nan = 1000000
# year_start = min(entry.find('20'),year_nan)
# journals.append(entry[0:year_start-1])
# entry = entry[year_start:]
# if entry[0:3] == 'NaN':
# years.append('NaN')
# else:
# years.append(entry[0:entry.find(';')])
# entry = entry[entry.find(';')+1:]
# if entry[0:3] == 'NaN':
# vol_isu.append('NaN')
# else:
# vol_isu.append(entry[0:entry.find('.')])
# doi.append(entry[entry.find('.')+2:])
# except:
# journals.append(entry)
# years.append('NaN')
# vol_isu.append('NaN')
# doi.append('NaN')
| [
"webscraper_functions.simple_get",
"nltk.corpus.stopwords.words",
"pandas.read_csv",
"datetime.datetime.strptime",
"webscraper_functions.get_abstract",
"time.sleep",
"bs4.BeautifulSoup",
"webscraper_functions.string_parse2",
"string.capwords",
"webscraper_functions.string_parse1",
"pandas.DataFr... | [((2772, 2787), 'numpy.arange', 'np.arange', (['(0)', '(1)'], {}), '(0, 1)\n', (2781, 2787), True, 'import numpy as np\n'), ((9705, 9914), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'Topics_split': topics_split, 'Topics': topics, 'Authors': authors,\n 'Titles': titles, 'Journals': journals, 'Years': years, 'Vol_Isue':\n vol_isus, 'DOI': dois, 'Abstract': abstracts}"}), "(data={'Topics_split': topics_split, 'Topics': topics,\n 'Authors': authors, 'Titles': titles, 'Journals': journals, 'Years':\n years, 'Vol_Isue': vol_isus, 'DOI': dois, 'Abstract': abstracts})\n", (9717, 9914), True, 'import pandas as pd\n'), ((558, 593), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/RYANDATA.csv"""'], {}), "('../Data/RYANDATA.csv')\n", (569, 593), True, 'import pandas as pd\n'), ((2469, 2495), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2484, 2495), False, 'from nltk.corpus import stopwords\n'), ((2507, 2528), 'string.capwords', 'string.capwords', (['word'], {}), '(word)\n', (2522, 2528), False, 'import string\n'), ((2935, 2973), 'bs4.BeautifulSoup', 'BeautifulSoup', (['raw_html', '"""html.parser"""'], {}), "(raw_html, 'html.parser')\n", (2948, 2973), False, 'from bs4 import BeautifulSoup\n'), ((3671, 3686), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3681, 3686), False, 'import time\n'), ((3876, 3891), 'webscraper_functions.simple_get', 'simple_get', (['url'], {}), '(url)\n', (3886, 3891), False, 'from webscraper_functions import simple_get, is_good_response\n'), ((3913, 3953), 'bs4.BeautifulSoup', 'BeautifulSoup', (['lit_update', '"""html.parser"""'], {}), "(lit_update, 'html.parser')\n", (3926, 3953), False, 'from bs4 import BeautifulSoup\n'), ((1927, 1981), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['first_annoying', '"""%m-%d-%Y"""'], {}), "(first_annoying, '%m-%d-%Y')\n", (1953, 1981), False, 'import datetime\n'), ((2038, 2101), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['first_missing_asterisks', '"""%m-%d-%Y"""'], {}), "(first_missing_asterisks, '%m-%d-%Y')\n", (2064, 2101), False, 'import datetime\n'), ((2185, 2236), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['jinger_date', '"""%m-%d-%Y"""'], {}), "(jinger_date, '%m-%d-%Y')\n", (2211, 2236), False, 'import datetime\n'), ((4242, 4253), 'time.time', 'time.time', ([], {}), '()\n', (4251, 4253), False, 'import time\n'), ((5350, 5381), 'webscraper_functions.string_parse1', 'string_parse1', (['entry', 'cur_topic'], {}), '(entry, cur_topic)\n', (5363, 5381), False, 'from webscraper_functions import string_parse1, string_parse2, clean_str\n'), ((7719, 7755), 'webscraper_functions.string_parse2', 'string_parse2', (['entry_temp', 'cur_topic'], {}), '(entry_temp, cur_topic)\n', (7732, 7755), False, 'from webscraper_functions import string_parse1, string_parse2, clean_str\n'), ((4379, 4428), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['post_date', '"""%m-%d-%Y"""'], {}), "(post_date, '%m-%d-%Y')\n", (4405, 4428), False, 'import datetime\n'), ((6250, 6284), 'webscraper_functions.get_abstract', 'get_abstract', (['title_temp', 'doi_temp'], {}), '(title_temp, doi_temp)\n', (6262, 6284), False, 'from webscraper_functions import search, fetch_details, search2, get_abstract\n'), ((8387, 8421), 'webscraper_functions.get_abstract', 'get_abstract', (['title_temp', 'doi_temp'], {}), '(title_temp, doi_temp)\n', (8399, 8421), False, 'from webscraper_functions import search, fetch_details, search2, get_abstract\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 12:05:08 2018
@author: Alexandre
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.dynamic import pendulum
from pyro.control import linear
###############################################################################
class SinglePendulum_with_position_output( pendulum.SinglePendulum ):
def __init__(self):
super().__init__()
self.p = 1 # output size
#self.rbar = np.array([0]) # ref size
def h(self, x, u, t):
# New output function
y = pendulum.SinglePendulum.h(self, x, u, t)
y_position = np.zeros(1)
y_position[0] = y[0]
return y_position
sys = SinglePendulum_with_position_output()
dof = 1
kp = 2 # 2,4
kd = 1 # 1
ki = 1
ctl = linear.PIDController(kp, ki, kd)
# Set Point
q_target = np.array([3.14])
ctl.rbar = q_target
# New cl-dynamic
cl_sys = ctl + sys
# Simultation
cl_sys.x0[0] = 1.0
cl_sys.compute_trajectory(tf=10, n=20001, solver='euler')
cl_sys.plot_phase_plane_trajectory()
cl_sys.plot_trajectory('xu')
cl_sys.plot_internal_controller_states()
cl_sys.animate_simulation() | [
"numpy.array",
"pyro.control.linear.PIDController",
"numpy.zeros",
"pyro.dynamic.pendulum.SinglePendulum.h"
] | [((967, 999), 'pyro.control.linear.PIDController', 'linear.PIDController', (['kp', 'ki', 'kd'], {}), '(kp, ki, kd)\n', (987, 999), False, 'from pyro.control import linear\n'), ((1024, 1040), 'numpy.array', 'np.array', (['[3.14]'], {}), '([3.14])\n', (1032, 1040), True, 'import numpy as np\n'), ((725, 765), 'pyro.dynamic.pendulum.SinglePendulum.h', 'pendulum.SinglePendulum.h', (['self', 'x', 'u', 't'], {}), '(self, x, u, t)\n', (750, 765), False, 'from pyro.dynamic import pendulum\n'), ((799, 810), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (807, 810), True, 'import numpy as np\n')] |
#
# This code is for detecting proper region of the plate
# with the combined approach
#
# imports
import cv2
import imutils
import numpy as np
from imutils import paths
# import RDetectPlates as detplt
from imutils import perspective
import matplotlib.pyplot as plt
import sklearn as sk
from sklearn.decomposition import PCA
from imutils import perspective
from all_params import *
#from all_params import TRAIN_DATA_PATH as path
#from all_params import MODEL_CHECKPOINT_DIR as ls_path
imgs = sorted(list(paths.list_images(path)), reverse=True)
now = datetime.datetime.now()
import Regressor_01
#rnd = 10
# testing the detector:
for _ in range(len(imgs)):
rnd = np.random.randint(0, len(imgs) - 1, 1)[0]
gimg = cv2.imread(imgs[rnd])
plt.imshow(gimg)
plt.close()
try:
gimg = cv2.cvtColor(gimg, cv2.COLOR_BGR2GRAY)
except:
print('there is an error in making img gray')
# Detecting approximate region with OCV
retRegions = [] # this will be the return value
gCoords = [] # this will be the return value
retCoords = []
poss_plates = []
globCoord = []
# Vertical Kernels
vertKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 5))
pKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
#
# Horizontal Kernels
bKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 1))
b2Kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
smallKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 3))
HKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 3))
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 4)) # the rectangle kernel
superKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 3)) # 27,3 check 29 also
#
bigpics = [] # this will be the return value
# then initialize the list of license plate regions
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 5))
squareKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (11, 5))
# convert the image to grayscale, and apply the blackhat operation
blackhat = cv2.morphologyEx(gimg, cv2.MORPH_BLACKHAT, rectKernel)
# find regions in the image that are light
light = cv2.morphologyEx(gimg, cv2.MORPH_CLOSE, rectKernel)
light = cv2.threshold(light, 0, 255, cv2.THRESH_BINARY)[1]
# compute the Scharr gradient representation of the blackhat image and scale the
# resulting image into the range [0, 255]
gradX = cv2.Sobel(blackhat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradX = np.absolute(gradX)
(minVal, maxVal) = (np.min(gradX), np.max(gradX))
gradX = (255 * ((gradX - minVal) / (maxVal - minVal))).astype("uint8")
# blur the gradient representation, apply a closing operation, and threshold the
# image using Otsu's method
gradX = cv2.GaussianBlur(gradX, (5, 5), 0)
gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)
gradX = cv2.threshold(gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# perform a series of erosions and dilations on the image
gradX = cv2.erode(gradX, squareKernel, iterations=2)
gradX = cv2.dilate(gradX, squareKernel, iterations=3)
# take the bitwise 'and' between the 'light' regions of the image, then perform
# another series of erosions and dilations
thresh = cv2.bitwise_and(gradX, gradX, mask=light)
thresh = cv2.erode(thresh, squareKernel, iterations=2)
thresh = cv2.dilate(thresh, squareKernel, iterations=2)
# find contours in the thresholded image
_, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# grab the bounding box associated with the contour and compute the area and
# aspect ratio
(x, y, w, h) = cv2.boundingRect(c)
aspectRatio = w / float(h)
# compute the rotated bounding box of the region
rect = cv2.minAreaRect(c)
box = np.int0(cv2.boxPoints(rect))
# ensure the aspect ratio, width, and height of the bounding box fall within
# tolerable limits, then update the list of license plate regions
if (aspectRatio > 2 and aspectRatio < 8) and h > 10 and w > 50 and h < 125 and w < 400:
# if h > 10 and w > 50 and h < 250 and w < 750:
bigpics.append(box)
gCoords.append(np.array([x, y, w, h]))
#
# processing the resulted images
#
for bigpic in bigpics:
platel = perspective.four_point_transform(gimg, bigpic)
# 1. Classifier: setting a classifier to select figures with all kind of plates included
#Using trained network
# 2. Regressor: Applting a light regressor to extract the exact region of the plate
# net is loaded and we want to use it
filedir = "save_load/"
# net.save_params(filename)
regplt = Regressor_01.Regression_plt()
#
tr_input, tr_target, ts_input, ts_target, size_1, size_2 = regplt.Reg_Data()
net = regplt.Model()
## first we should define net the same as training, then load the saved parameters
net.load_params(filedir + "testnet20180319-081631.params", ctx=mx.cpu())
print('model loaded')
# Here goes to padding image and dimension reduction which is currently
# incomplete
# which image?
# an integer between 0-453
wi = 420
pred = np.int0(net(ts_input[wi].reshape((1, -1)))[0].asnumpy())
# Showing the results
image = ts_input[wi].reshape([100, 200])
# after process, padding should be removed or taget also should be padded
# drawing rectangle
plate = cv2.rectangle(image, (pred[0], pred[1]), \
(pred[2], pred[3]), (0, 200, 0), 3)
# this is for cropping plate number
# plt_points = np.array([[int(pred[0]), int(pred[1])],[int(pred[0]) + int(pred[2]), int(pred[1])],\
# [int(pred[0]), int(pred[1]) + int(pred[3])], [int(pred[0]) + int(pred[2]), int(pred[1]) + int(pred[3])]])
# cvxh = cv2.convexHull(plt_points)
# rect = cv2.minAreaRect(cvxh)
# box = np.int0(cv2.boxPoints(rect))
# platel = perspective.four_point_transform(image, box)
plt.imshow(plate)
# for j in range(inp_raw.shape[0]):
# plate = cv2.rectangle(image[j], (pred[0], pred[1]),(pred[3], pred[4]), (200,0,0), 0)
# plt.imshow(plate)
# 3. Here goes to segmentation
| [
"cv2.rectangle",
"imutils.perspective.four_point_transform",
"numpy.array",
"imutils.paths.list_images",
"matplotlib.pyplot.imshow",
"cv2.threshold",
"cv2.erode",
"numpy.max",
"matplotlib.pyplot.close",
"cv2.minAreaRect",
"numpy.min",
"Regressor_01.Regression_plt",
"cv2.boxPoints",
"cv2.mo... | [((722, 743), 'cv2.imread', 'cv2.imread', (['imgs[rnd]'], {}), '(imgs[rnd])\n', (732, 743), False, 'import cv2\n'), ((749, 765), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gimg'], {}), '(gimg)\n', (759, 765), True, 'import matplotlib.pyplot as plt\n'), ((770, 781), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (779, 781), True, 'import matplotlib.pyplot as plt\n'), ((1160, 1209), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(2, 5)'], {}), '(cv2.MORPH_RECT, (2, 5))\n', (1185, 1209), False, 'import cv2\n'), ((1224, 1273), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(5, 5)'], {}), '(cv2.MORPH_RECT, (5, 5))\n', (1249, 1273), False, 'import cv2\n'), ((1319, 1368), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(2, 1)'], {}), '(cv2.MORPH_RECT, (2, 1))\n', (1344, 1368), False, 'import cv2\n'), ((1384, 1433), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(1, 1)'], {}), '(cv2.MORPH_RECT, (1, 1))\n', (1409, 1433), False, 'import cv2\n'), ((1452, 1501), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(4, 3)'], {}), '(cv2.MORPH_RECT, (4, 3))\n', (1477, 1501), False, 'import cv2\n'), ((1516, 1565), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(5, 3)'], {}), '(cv2.MORPH_RECT, (5, 3))\n', (1541, 1565), False, 'import cv2\n'), ((1583, 1633), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(13, 4)'], {}), '(cv2.MORPH_RECT, (13, 4))\n', (1608, 1633), False, 'import cv2\n'), ((1676, 1726), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(25, 3)'], {}), '(cv2.MORPH_RECT, (25, 3))\n', (1701, 1726), False, 'import cv2\n'), ((1878, 1928), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(25, 5)'], {}), '(cv2.MORPH_RECT, (25, 5))\n', (1903, 1928), False, 'import cv2\n'), ((1948, 1998), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(11, 5)'], {}), '(cv2.MORPH_RECT, (11, 5))\n', (1973, 1998), False, 'import cv2\n'), ((2087, 2141), 'cv2.morphologyEx', 'cv2.morphologyEx', (['gimg', 'cv2.MORPH_BLACKHAT', 'rectKernel'], {}), '(gimg, cv2.MORPH_BLACKHAT, rectKernel)\n', (2103, 2141), False, 'import cv2\n'), ((2202, 2253), 'cv2.morphologyEx', 'cv2.morphologyEx', (['gimg', 'cv2.MORPH_CLOSE', 'rectKernel'], {}), '(gimg, cv2.MORPH_CLOSE, rectKernel)\n', (2218, 2253), False, 'import cv2\n'), ((2461, 2521), 'cv2.Sobel', 'cv2.Sobel', (['blackhat'], {'ddepth': 'cv2.CV_32F', 'dx': '(1)', 'dy': '(0)', 'ksize': '(-1)'}), '(blackhat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)\n', (2470, 2521), False, 'import cv2\n'), ((2534, 2552), 'numpy.absolute', 'np.absolute', (['gradX'], {}), '(gradX)\n', (2545, 2552), True, 'import numpy as np\n'), ((2812, 2846), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gradX', '(5, 5)', '(0)'], {}), '(gradX, (5, 5), 0)\n', (2828, 2846), False, 'import cv2\n'), ((2859, 2911), 'cv2.morphologyEx', 'cv2.morphologyEx', (['gradX', 'cv2.MORPH_CLOSE', 'rectKernel'], {}), '(gradX, cv2.MORPH_CLOSE, rectKernel)\n', (2875, 2911), False, 'import cv2\n'), ((3068, 3112), 'cv2.erode', 'cv2.erode', (['gradX', 'squareKernel'], {'iterations': '(2)'}), '(gradX, squareKernel, iterations=2)\n', (3077, 3112), False, 'import cv2\n'), ((3125, 3170), 'cv2.dilate', 'cv2.dilate', (['gradX', 'squareKernel'], {'iterations': '(3)'}), '(gradX, squareKernel, iterations=3)\n', (3135, 3170), False, 'import cv2\n'), ((3316, 3357), 'cv2.bitwise_and', 'cv2.bitwise_and', (['gradX', 'gradX'], {'mask': 'light'}), '(gradX, gradX, mask=light)\n', (3331, 3357), False, 'import cv2\n'), ((3371, 3416), 'cv2.erode', 'cv2.erode', (['thresh', 'squareKernel'], {'iterations': '(2)'}), '(thresh, squareKernel, iterations=2)\n', (3380, 3416), False, 'import cv2\n'), ((3430, 3476), 'cv2.dilate', 'cv2.dilate', (['thresh', 'squareKernel'], {'iterations': '(2)'}), '(thresh, squareKernel, iterations=2)\n', (3440, 3476), False, 'import cv2\n'), ((506, 529), 'imutils.paths.list_images', 'paths.list_images', (['path'], {}), '(path)\n', (523, 529), False, 'from imutils import paths\n'), ((807, 845), 'cv2.cvtColor', 'cv2.cvtColor', (['gimg', 'cv2.COLOR_BGR2GRAY'], {}), '(gimg, cv2.COLOR_BGR2GRAY)\n', (819, 845), False, 'import cv2\n'), ((2266, 2313), 'cv2.threshold', 'cv2.threshold', (['light', '(0)', '(255)', 'cv2.THRESH_BINARY'], {}), '(light, 0, 255, cv2.THRESH_BINARY)\n', (2279, 2313), False, 'import cv2\n'), ((2577, 2590), 'numpy.min', 'np.min', (['gradX'], {}), '(gradX)\n', (2583, 2590), True, 'import numpy as np\n'), ((2592, 2605), 'numpy.max', 'np.max', (['gradX'], {}), '(gradX)\n', (2598, 2605), True, 'import numpy as np\n'), ((2924, 2989), 'cv2.threshold', 'cv2.threshold', (['gradX', '(0)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (2937, 2989), False, 'import cv2\n'), ((3796, 3815), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (3812, 3815), False, 'import cv2\n'), ((3924, 3942), 'cv2.minAreaRect', 'cv2.minAreaRect', (['c'], {}), '(c)\n', (3939, 3942), False, 'import cv2\n'), ((4479, 4525), 'imutils.perspective.four_point_transform', 'perspective.four_point_transform', (['gimg', 'bigpic'], {}), '(gimg, bigpic)\n', (4511, 4525), False, 'from imutils import perspective\n'), ((4881, 4910), 'Regressor_01.Regression_plt', 'Regressor_01.Regression_plt', ([], {}), '()\n', (4908, 4910), False, 'import Regressor_01\n'), ((5696, 5772), 'cv2.rectangle', 'cv2.rectangle', (['image', '(pred[0], pred[1])', '(pred[2], pred[3])', '(0, 200, 0)', '(3)'], {}), '(image, (pred[0], pred[1]), (pred[2], pred[3]), (0, 200, 0), 3)\n', (5709, 5772), False, 'import cv2\n'), ((6289, 6306), 'matplotlib.pyplot.imshow', 'plt.imshow', (['plate'], {}), '(plate)\n', (6299, 6306), True, 'import matplotlib.pyplot as plt\n'), ((3965, 3984), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3978, 3984), False, 'import cv2\n'), ((4361, 4383), 'numpy.array', 'np.array', (['[x, y, w, h]'], {}), '([x, y, w, h])\n', (4369, 4383), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
from gym import spaces
from gym.utils import seeding
from learn2learn.gym.envs.meta_env import MetaEnv
class Particles2DEnv(MetaEnv):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/gym/envs/particles/particles_2d.py)
**Description**
Each task is defined by the location of the goal. A point mass
receives a directional force and moves accordingly
(clipped in [-0.1,0.1]). The reward is equal to the negative
distance from the goal.
**Credit**
Adapted from <NAME>othfuss' implementation.
"""
def __init__(self, task=None):
self.seed()
super(Particles2DEnv, self).__init__(task)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf,
shape=(2,), dtype=np.float32)
self.action_space = spaces.Box(low=-0.1, high=0.1,
shape=(2,), dtype=np.float32)
self.reset()
# -------- MetaEnv Methods --------
def sample_tasks(self, num_tasks):
"""
Tasks correspond to a goal point chosen uniformly at random.
"""
goals = self.np_random.uniform(-0.5, 0.5, size=(num_tasks, 2))
tasks = [{'goal': goal} for goal in goals]
return tasks
def set_task(self, task):
self._task = task
self._goal = task['goal']
# -------- Gym Methods --------
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self, env=True):
"""
Sets point mass position back to (0,0)
"""
self._state = np.zeros(2, dtype=np.float32)
return self._state
def step(self, action):
"""
**Description**
Given an action, clips the action to be in the
appropriate range and moves the point mass position
according to the action.
**Arguments**
action (2-element array) - Array specifying the magnitude
and direction of the forces to be applied in the x and y
planes.
**Returns**
*state, reward, done, task*
* state (arr) - is a 2-element array encoding the x,y position of
the point mass
* reward (float) - signal equal to the negative squared distance
from the goal
* done (bool) - boolean indicating whether or not the point mass
is epsilon or less distance from the goal
* task (dict) - dictionary of task specific parameters and their current
values
"""
action = np.clip(action, -0.1, 0.1)
assert self.action_space.contains(action)
self._state = self._state + action
x = self._state[0] - self._goal[0]
y = self._state[1] - self._goal[1]
reward = -np.sqrt(x ** 2 + y ** 2)
done = ((np.abs(x) < 0.01) and (np.abs(y) < 0.01))
return self._state, reward, done, self._task
def render(self, mode=None):
raise NotImplementedError
| [
"numpy.clip",
"numpy.abs",
"numpy.sqrt",
"gym.spaces.Box",
"numpy.zeros",
"gym.utils.seeding.np_random"
] | [((756, 822), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(2,)', 'dtype': 'np.float32'}), '(low=-np.inf, high=np.inf, shape=(2,), dtype=np.float32)\n', (766, 822), False, 'from gym import spaces\n'), ((895, 955), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-0.1)', 'high': '(0.1)', 'shape': '(2,)', 'dtype': 'np.float32'}), '(low=-0.1, high=0.1, shape=(2,), dtype=np.float32)\n', (905, 955), False, 'from gym import spaces\n'), ((1522, 1545), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (1539, 1545), False, 'from gym.utils import seeding\n'), ((1693, 1722), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float32'}), '(2, dtype=np.float32)\n', (1701, 1722), True, 'import numpy as np\n'), ((2638, 2664), 'numpy.clip', 'np.clip', (['action', '(-0.1)', '(0.1)'], {}), '(action, -0.1, 0.1)\n', (2645, 2664), True, 'import numpy as np\n'), ((2863, 2887), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (2870, 2887), True, 'import numpy as np\n'), ((2905, 2914), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (2911, 2914), True, 'import numpy as np\n'), ((2928, 2937), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (2934, 2937), True, 'import numpy as np\n')] |
import json
import numpy as np
import os
import pickle
import sklearn.metrics
import time
from chapydette import cp_estimation
def load_features(cruise, features_dir, projection_dim, subsample_num=1, subsample_of=1):
"""
Load features for a cruise.
:param cruise: Cruise to load features for.
:param features_dir: Directory where the features are stored.
:param projection_dim: Dimension of the features.
:param subsample_num: Subsample number to load.
:param subsample_of: Number of subsamples previously generated for this cruise.
:return: Tuple consisting of:
* cruise_features['bio_features']: The features for the biological data
* cruise_features['phys_features']: The features for the physical data
"""
if subsample_of == 1:
features_path = os.path.join(features_dir, cruise + '_features_' + str(projection_dim) + '.pickle')
else:
features_path = os.path.join(features_dir, cruise + '_features_' + str(projection_dim) + '_subsample_' +
str(subsample_num+1) + '_of_' + str(subsample_of) + '.pickle')
cruise_features = pickle.load(open(features_path, 'rb'))
return cruise_features['bio_features'], cruise_features['phys_features']
def get_bw_range(features):
"""
Get the rule-of-thumb bandwidth and a range of bandwidths on a log scale for the Gaussian RBF kernel.
:param features: Features to use to obtain the bandwidths.
:return: Tuple consisting of:
* rule_of_thumb_bw: Computed rule-of-thumb bandwidth.
* bws: List of bandwidths on a log scale.
"""
dists = sklearn.metrics.pairwise.pairwise_distances(features).reshape(-1)
rule_of_thumb_bw = np.median(dists)
gammas = np.logspace(np.log(0.5/np.percentile(dists, 99)**2), np.log(0.5/np.percentile(dists, 1)**2), 10, base=np.e)
bws = np.sqrt(1/(2*gammas))
return rule_of_thumb_bw, bws
def est_cps(cruise, bio_features, phys_features, max_ncp=150, min_dists=[5], kernel_types=['Gaussian-Euclidean'],
bw_method='rule-of-thumb', subsample_num=1, subsample_of=1, save_dir='../results/'):
"""
Estimate the locations of change points in the input biological and physical features for a single cruise.
:param cruise: Name of the cruise the features are from.
:param bio_features: Features for the biological data.
:param phys_features: Features for the physical data.
:param max_ncp: Maximum number of change points in a sequence.
:param min_dists: List of minimum acceptable distances between change points.
:param kernel_types: List containing 'Gaussian-Euclidean' (Gaussian RBF kernel) and/or 'Linear'.
:param bw_method: Method to use for obtaining the bandwidth(s). Either 'rule-of-thumb' or 'list'.
:param subsample_num: Subsample number being used.
:param subsample_of: Number of subsamples previously generated for this cruise.
:param save_dir: Top-level directory where the results will be stored.
"""
projection_dim = bio_features.shape[1]
for min_dist in min_dists:
# Perform change-point estimation on the physical data
if not os.path.exists(os.path.join(save_dir, cruise)):
os.makedirs(os.path.join(save_dir, cruise))
if phys_features is not None:
cps_phys, objs_phys = cp_estimation.mkcpe(X=phys_features,
n_cp=(1, min(max_ncp, int((len(phys_features)-1)/min_dist)-1)),
kernel_type='linear', min_dist=min_dist, return_obj=True)
for key in cps_phys.keys():
cps_phys[key] = cps_phys[key].flatten().tolist()
save_path = os.path.join(save_dir, cruise, 'cps_phys.json')
json.dump({'cps_phys': cps_phys, 'objs_phys': objs_phys}, open(save_path, 'w'))
for kernel_type in kernel_types:
# Get the bandwidth(s) (if applicable)
if kernel_type != 'Linear':
rot_bw, bws = get_bw_range(bio_features)
all_bws = [rot_bw] if bw_method == 'rule-of-thumb' else bws
else:
all_bws = [0]
for bw in all_bws:
# Perform change-point estimation on the biological data
cps_bio, objs_bio = cp_estimation.mkcpe(X=bio_features,
n_cp=(1, min(max_ncp, int((len(bio_features)-1)/min_dist)-1)),
kernel_type=kernel_type, bw=bw, min_dist=min_dist,
return_obj=True)
for key in cps_bio.keys():
cps_bio[key] = cps_bio[key].flatten().tolist()
bw_short = 'rule-of-thumb_' + str(np.round(bw, 3)) if bw_method == 'rule-of-thumb' else \
str(np.round(bw, 3))
if subsample_of == 1:
save_path = os.path.join(save_dir, cruise, 'cps_bio_' + str(projection_dim) + '_' +
kernel_type + '_' + str(bw_short) + '_' + str(min_dist) + '.json')
else:
save_path = os.path.join(save_dir, cruise, 'cps_bio_' + str(projection_dim) + '_' +
kernel_type + '_' + str(bw_short) + '_' + str(min_dist) + '_subsample_' +
str(subsample_num+1) + '_of_' + str(subsample_of) + '.json')
json.dump({'cps_bio': cps_bio, 'bw': bw, 'objs_bio': objs_bio}, open(save_path, 'w'))
def est_cps_all_cruises(cruises, features_dir, max_ncp=150, min_dist=5, projection_dim=128, save_dir='../results'):
"""
Estimate the biological and physical change points for each cruise and for all desired parameter settings in order
to make the plots.
:param cruises: List of cruises to estimate change points for.
:param features_dir: Directory where the features are stored.
:param max_ncp: Maximum number of acceptable change points.
:param min_dist: Minimum acceptable distance between change points.
:param projection_dim: Dimension of the features.
:param save_dir: Location where the estimated change points will be stored.
"""
for cruise_num, cruise in enumerate(cruises):
print('Estimating change points for', cruise, '- Cruise ', cruise_num+1, '/', len(cruises))
bio_features, phys_features = load_features(cruise, features_dir, projection_dim)
est_cps(cruise, bio_features, phys_features, max_ncp=max_ncp, min_dists=[min_dist], save_dir=save_dir)
def sensitivity_analysis(cruise, features_dir, max_ncp=150, min_dist=5, projection_dim=128,
save_dir='../results/sensitivity_analysis/'):
"""
Estimate the biological change points for a given cruise when varying the parameter settings.
:param cruise: Name of the cruise to perform the sensitivity analysis on.
:param features_dir: Directory where the features are stored.
:param max_ncp: Maximum number of acceptable change points.
:param min_dist: Baseline minimum acceptable distance between change points.
:param projection_dim: Baseline dimension of the features.
:param save_dir: Location where the estimated change points will be stored.
"""
print('Performing sensitivity analysis')
bio_features, phys_features = load_features(cruise, features_dir, projection_dim)
kernel_types = ['Linear']
est_cps(cruise, bio_features, phys_features, max_ncp=max_ncp, min_dists=[min_dist], kernel_types=kernel_types,
bw_method='rule-of-thumb', save_dir=save_dir)
bw_method = 'list'
est_cps(cruise, bio_features, phys_features, max_ncp=max_ncp, min_dists=[min_dist],
kernel_types=['Gaussian-Euclidean'], bw_method=bw_method, save_dir=save_dir)
min_dists = [1] + list(range(5, 55, 5))
est_cps(cruise, bio_features, phys_features, max_ncp=max_ncp, min_dists=min_dists,
kernel_types=['Gaussian-Euclidean'], bw_method='rule-of-thumb', save_dir=save_dir)
for projection_dim in [2 ** i for i in range(2, 11)]:
bio_features, phys_features = load_features(cruise, features_dir, projection_dim)
est_cps(cruise, bio_features, phys_features, max_ncp=max_ncp, min_dists=[min_dist],
kernel_types=['Gaussian-Euclidean'], bw_method='rule-of-thumb', save_dir=save_dir)
def variance_analysis(cruise, features_dir, max_ncp=150, min_dist=5, projection_dim=128, subsample_of=10,
save_dir='../results'):
"""
Estimate the biological and physical change points for the given cruise after subsampling the data.
:param cruise: Name of the cruise to perform the variance analysis on.
:param features_dir: Directory where the features are stored.
:param max_ncp: Maximum number of acceptable change points.
:param min_dist: Minimum acceptable distance between change points.
:param projection_dim: Dimension of the features.
:param subsample_of: Number of subsamples previously generated.
:param save_dir: Location where the estimated change points will be stored.
"""
for subsample_num in range(subsample_of):
print('Estimating change points for', cruise, '- subsample ', subsample_num+1, '/', subsample_of)
bio_features, phys_features = load_features(cruise, features_dir, projection_dim, subsample_num=subsample_num,
subsample_of=subsample_of)
est_cps(cruise, bio_features, phys_features, max_ncp=max_ncp, min_dists=[min_dist], subsample_num=subsample_num,
subsample_of=subsample_of, save_dir=save_dir)
def simple_avg_comparison(cruises, features_dir, kernel_types=['Linear'], max_ncp=150, min_dist=5,
save_dir='../results'):
"""
Estimate the biological change points for the given cruise when using features derived from simply averaging the
data within each point cloud.
:param cruises: List of cruises to estimate change points for.
:param features_dir: Directory where the features are stored.
:param kernel_types: List containing 'Gaussian-Euclidean' (Gaussian RBF kernel) and/or 'Linear'.
:param max_ncp: Maximum number of acceptable change points.
:param min_dist: Minimum acceptable distance between change points.
:param save_dir: Location where the estimated change points will be stored.
"""
for cruise in cruises:
features_dict = pickle.load(open(os.path.join(features_dir, cruise + '_features_simple_avg.pickle'), 'rb'))
bio_features = features_dict['bio_features']
est_cps(cruise, bio_features, None, kernel_types=kernel_types, max_ncp=max_ncp, min_dists=[min_dist],
save_dir=save_dir)
if __name__ == '__main__':
t1 = time.time()
# Directory where the features are stored
features_dir = '../features/'
# Directory where the output from the main analysis will be stored
save_dir = '../results/'
# Directory where the output from the sensitivity analysis will be stored
save_dir_sensitivity_analysis = '../results/sensitivity_analysis/'
# Directory where the output from the analysis of subsampled data will be stored
save_dir_variance_analysis = '../results/variance_analysis/'
# Directory where the output from the comparison with simply averaging the data for each point in a point cloud will
# be stored
save_dir_average_comparison = '../results/average_comparison/'
# List of cruises to use
cruises = ['DeepDOM', 'KM1712', 'KM1713', 'MGL1704', 'SCOPE_2', 'SCOPE_16', 'Thompson_1', 'Thompson_9',
'Thompson_12', 'Tokyo_1', 'Tokyo_2', 'Tokyo_3']
est_cps_all_cruises(cruises, features_dir, save_dir=save_dir)
sensitivity_analysis('SCOPE_16', features_dir, save_dir=save_dir_sensitivity_analysis)
variance_analysis('SCOPE_16', features_dir, save_dir=save_dir_variance_analysis, subsample_of=10)
simple_avg_comparison(cruises, features_dir, max_ncp=150, min_dist=5, save_dir=save_dir_average_comparison)
t2 = time.time()
print('Runtime:', t2-t1)
# Runtime (Intel i9-7960X CPU @ 2.80GHz): 5m39s
| [
"numpy.median",
"numpy.sqrt",
"os.path.join",
"numpy.percentile",
"time.time",
"numpy.round"
] | [((1742, 1758), 'numpy.median', 'np.median', (['dists'], {}), '(dists)\n', (1751, 1758), True, 'import numpy as np\n'), ((1890, 1915), 'numpy.sqrt', 'np.sqrt', (['(1 / (2 * gammas))'], {}), '(1 / (2 * gammas))\n', (1897, 1915), True, 'import numpy as np\n'), ((10935, 10946), 'time.time', 'time.time', ([], {}), '()\n', (10944, 10946), False, 'import time\n'), ((12212, 12223), 'time.time', 'time.time', ([], {}), '()\n', (12221, 12223), False, 'import time\n'), ((3756, 3803), 'os.path.join', 'os.path.join', (['save_dir', 'cruise', '"""cps_phys.json"""'], {}), "(save_dir, cruise, 'cps_phys.json')\n", (3768, 3803), False, 'import os\n'), ((3198, 3228), 'os.path.join', 'os.path.join', (['save_dir', 'cruise'], {}), '(save_dir, cruise)\n', (3210, 3228), False, 'import os\n'), ((3255, 3285), 'os.path.join', 'os.path.join', (['save_dir', 'cruise'], {}), '(save_dir, cruise)\n', (3267, 3285), False, 'import os\n'), ((10624, 10690), 'os.path.join', 'os.path.join', (['features_dir', "(cruise + '_features_simple_avg.pickle')"], {}), "(features_dir, cruise + '_features_simple_avg.pickle')\n", (10636, 10690), False, 'import os\n'), ((1795, 1819), 'numpy.percentile', 'np.percentile', (['dists', '(99)'], {}), '(dists, 99)\n', (1808, 1819), True, 'import numpy as np\n'), ((1836, 1859), 'numpy.percentile', 'np.percentile', (['dists', '(1)'], {}), '(dists, 1)\n', (1849, 1859), True, 'import numpy as np\n'), ((4934, 4949), 'numpy.round', 'np.round', (['bw', '(3)'], {}), '(bw, 3)\n', (4942, 4949), True, 'import numpy as np\n'), ((4846, 4861), 'numpy.round', 'np.round', (['bw', '(3)'], {}), '(bw, 3)\n', (4854, 4861), True, 'import numpy as np\n')] |
import numpy as np
import skimage.segmentation
import skimage.io
import keras.backend as K
import tensorflow as tf
debug = False
def channel_precision(channel, name):
def precision_func(y_true, y_pred):
y_pred_tmp = K.cast(tf.equal( K.argmax(y_pred, axis=-1), channel), "float32")
true_positives = K.sum(K.round(K.clip(y_true[:,:,:,channel] * y_pred_tmp, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred_tmp, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision_func.__name__ = name
return precision_func
def channel_recall(channel, name):
def recall_func(y_true, y_pred):
y_pred_tmp = K.cast(tf.equal( K.argmax(y_pred, axis=-1), channel), "float32")
true_positives = K.sum(K.round(K.clip(y_true[:,:,:,channel] * y_pred_tmp, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true[:,:,:,channel], 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
recall_func.__name__ = name
return recall_func
## PROBMAP TO CONTOURS TO LABEL
def probmap_to_contour(probmap, threshold = 0.5):
# assume 2D input
outline = probmap >= threshold
return outline
def contour_to_label(outline, image):
# see notebook contours_to_labels for why we do what we do here
# get connected components
labels = skimage.morphology.label(outline, background=1)
skimage.morphology.remove_small_objects(labels, min_size = 100, in_place = True)
n_ccs = np.max(labels)
# buffer label image
filtered_labels = np.zeros_like(labels, dtype=np.uint16)
# relabel as we don't know what connected component the background has been given before
label_index = 1
# start at 1 (0 is contours), end at number of connected components
for i in range(1, n_ccs + 1):
# get mask of connected compoenents
mask = labels == i
# get mean
mean = np.mean(np.take(image.flatten(),np.nonzero(mask.flatten())))
if(mean > 50/255):
filtered_labels[mask] = label_index
label_index = label_index + 1
return filtered_labels
## PROBMAP TO PRED TO LABEL
def probmap_to_pred(probmap, boundary_boost_factor, cell_boost_factor = 1,):
# we need to boost the boundary class to ma ke it more visible
# this shrinks the cells a little bit but avoids undersegmentation
pred = np.argmax(probmap * [1, cell_boost_factor , boundary_boost_factor], -1)
return pred
def pred_to_label(pred, cell_min_size, cell_label=1):
cell = (pred == cell_label)
# fix cells
cell = skimage.morphology.remove_small_holes(cell, min_size=cell_min_size)
cell = skimage.morphology.remove_small_objects(cell, min_size=cell_min_size)
# label cells only
[label, num] = skimage.morphology.label(cell, return_num=True)
return label
def compare_two_labels(label_model, label_gt, return_IoU_matrix):
# get number of detected nuclei
nb_nuclei_gt = np.max(label_gt)
nb_nuclei_model = np.max(label_model)
# catch the case of an empty picture in model and gt
if nb_nuclei_gt == 0 and nb_nuclei_model == 0:
if(return_IoU_matrix):
return [0, 0, 1, np.empty(0)]
else:
return [0, 0, 1]
# catch the case of empty picture in model
if nb_nuclei_model == 0:
if(return_IoU_matrix):
return [0, nb_nuclei_gt, 0, np.empty(0)]
else:
return [0, nb_nuclei_gt, 0]
# catch the case of empty picture in gt
if nb_nuclei_gt == 0:
if(return_IoU_matrix):
return [nb_nuclei_model, 0, 0, np.empty(0)]
else:
return [nb_nuclei_model, 0, 0]
# build IoU matrix
IoUs = np.full((nb_nuclei_gt, nb_nuclei_model), -1, dtype = np.float32)
# calculate IoU for each nucleus index_gt in GT and nucleus index_pred in prediction
# TODO improve runtime of this algorithm
for index_gt in range(1,nb_nuclei_gt+1):
nucleus_gt = label_gt == index_gt
number_gt = np.sum(nucleus_gt)
for index_model in range(1,nb_nuclei_model+1):
if debug:
print(index_gt, "/", index_model)
nucleus_model = label_model == index_model
number_model = np.sum(nucleus_model)
same_and_1 = np.sum((nucleus_gt == nucleus_model) * nucleus_gt)
IoUs[index_gt-1,index_model-1] = same_and_1 / (number_gt + number_model - same_and_1)
# get matches and errors
detection_map = (IoUs > 0.5)
nb_matches = np.sum(detection_map)
detection_rate = IoUs * detection_map
nb_overdetection = nb_nuclei_model - nb_matches
nb_underdetection = nb_nuclei_gt - nb_matches
mean_IoU = np.mean(np.sum(detection_rate, axis = 1))
if(return_IoU_matrix):
result = [nb_overdetection, nb_underdetection, mean_IoU, IoUs]
else:
result = [nb_overdetection, nb_underdetection, mean_IoU]
return result
def splits_and_merges_3_class(y_model_pred, y_gt_pred):
# get segmentations
label_gt = pred_to_label(y_gt_pred, cell_min_size=2)
label_model = pred_to_label(y_model_pred, cell_min_size=2)
# compare labels
result = compare_two_labels(label_model, label_gt, False)
return result
def splits_and_merges_boundary(y_model_outline, y_gt_outline, image):
# get segmentations
label_gt = contour_to_label(y_gt_outline, image)
label_model = contour_to_label(y_model_outline, image)
# compare labels
result = compare_two_labels(label_model, label_gt, False)
return result
| [
"keras.backend.clip",
"numpy.argmax",
"numpy.max",
"keras.backend.argmax",
"numpy.sum",
"numpy.empty",
"keras.backend.epsilon",
"numpy.full",
"numpy.zeros_like"
] | [((1574, 1588), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (1580, 1588), True, 'import numpy as np\n'), ((1637, 1675), 'numpy.zeros_like', 'np.zeros_like', (['labels'], {'dtype': 'np.uint16'}), '(labels, dtype=np.uint16)\n', (1650, 1675), True, 'import numpy as np\n'), ((2484, 2554), 'numpy.argmax', 'np.argmax', (['(probmap * [1, cell_boost_factor, boundary_boost_factor])', '(-1)'], {}), '(probmap * [1, cell_boost_factor, boundary_boost_factor], -1)\n', (2493, 2554), True, 'import numpy as np\n'), ((3086, 3102), 'numpy.max', 'np.max', (['label_gt'], {}), '(label_gt)\n', (3092, 3102), True, 'import numpy as np\n'), ((3125, 3144), 'numpy.max', 'np.max', (['label_model'], {}), '(label_model)\n', (3131, 3144), True, 'import numpy as np\n'), ((3866, 3928), 'numpy.full', 'np.full', (['(nb_nuclei_gt, nb_nuclei_model)', '(-1)'], {'dtype': 'np.float32'}), '((nb_nuclei_gt, nb_nuclei_model), -1, dtype=np.float32)\n', (3873, 3928), True, 'import numpy as np\n'), ((4740, 4761), 'numpy.sum', 'np.sum', (['detection_map'], {}), '(detection_map)\n', (4746, 4761), True, 'import numpy as np\n'), ((4178, 4196), 'numpy.sum', 'np.sum', (['nucleus_gt'], {}), '(nucleus_gt)\n', (4184, 4196), True, 'import numpy as np\n'), ((4940, 4970), 'numpy.sum', 'np.sum', (['detection_rate'], {'axis': '(1)'}), '(detection_rate, axis=1)\n', (4946, 4970), True, 'import numpy as np\n'), ((4434, 4455), 'numpy.sum', 'np.sum', (['nucleus_model'], {}), '(nucleus_model)\n', (4440, 4455), True, 'import numpy as np\n'), ((4494, 4544), 'numpy.sum', 'np.sum', (['((nucleus_gt == nucleus_model) * nucleus_gt)'], {}), '((nucleus_gt == nucleus_model) * nucleus_gt)\n', (4500, 4544), True, 'import numpy as np\n'), ((247, 272), 'keras.backend.argmax', 'K.argmax', (['y_pred'], {'axis': '(-1)'}), '(y_pred, axis=-1)\n', (255, 272), True, 'import keras.backend as K\n'), ((334, 385), 'keras.backend.clip', 'K.clip', (['(y_true[:, :, :, channel] * y_pred_tmp)', '(0)', '(1)'], {}), '(y_true[:, :, :, channel] * y_pred_tmp, 0, 1)\n', (340, 385), True, 'import keras.backend as K\n'), ((429, 453), 'keras.backend.clip', 'K.clip', (['y_pred_tmp', '(0)', '(1)'], {}), '(y_pred_tmp, 0, 1)\n', (435, 453), True, 'import keras.backend as K\n'), ((516, 527), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (525, 527), True, 'import keras.backend as K\n'), ((732, 757), 'keras.backend.argmax', 'K.argmax', (['y_pred'], {'axis': '(-1)'}), '(y_pred, axis=-1)\n', (740, 757), True, 'import keras.backend as K\n'), ((819, 870), 'keras.backend.clip', 'K.clip', (['(y_true[:, :, :, channel] * y_pred_tmp)', '(0)', '(1)'], {}), '(y_true[:, :, :, channel] * y_pred_tmp, 0, 1)\n', (825, 870), True, 'import keras.backend as K\n'), ((913, 951), 'keras.backend.clip', 'K.clip', (['y_true[:, :, :, channel]', '(0)', '(1)'], {}), '(y_true[:, :, :, channel], 0, 1)\n', (919, 951), True, 'import keras.backend as K\n'), ((1007, 1018), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1016, 1018), True, 'import keras.backend as K\n'), ((3318, 3329), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (3326, 3329), True, 'import numpy as np\n'), ((3531, 3542), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (3539, 3542), True, 'import numpy as np\n'), ((3752, 3763), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (3760, 3763), True, 'import numpy as np\n')] |
"""
Add samples
This script takes a wav file that must have n peaks and create n different files for each peak.
"""
import numpy as np
from scipy.io.wavfile import read,write
import peakutils, os
# average
# 3169 / 2 = 1584.5
side = 10000
# The stander for the peaks
stander = 0
"""
Write the .wav file
"""
def exportFile(filename,rate,data):
write(filename, rate, data)
pass
# try:
# write(filename, rate, data)
# except:
# print("exportFile(filename,rate,data)")
"""
Get a peak by its index from a list
Return: a list (a peak)
"""
def getPeak(source,index,side):
if side <= 10:
return None
try:
leftSide = source[index - side :index]
rightSide = source[index:index + side]
#print(len(leftSide),len(rightSide))
return leftSide + rightSide
except:
getPeak(source,index,side-10)
"""
This function takes a peak as an input and and gets it from the main list (audio)
"""
def fun1(source,rate,peaksList,filename):
for peak in peaksList:
# Every peak is [value,index]
value = peak[0]
index = peak[1]
tmpList = getPeak(source,index,side)
if type(tmpList) == np.ndarray:
# save each peak
filename = str(index)+filename
exportFile(filename,rate,tmpList)
else:
print("Else fun1()")
"""
Import the audio
"""
def importFile(filenmae):
print(filenmae)
rate, raw = read(filenmae)
data = np.array(raw, dtype=np.int16)
return rate, raw, data
"""
Lambda
"""
def takeOne(elem):
return elem[0]
"""
filter the peaks next to each other
"""
def filter1(peaksList):
# Sort list with key.
# Explanation: Since the peaks that are near to each other usually are
peaksListLength = len(peaksList)
peaksList.sort(key=takeOne,reverse=True)
i = 0
# Filter the close peaks
while ((i + 1) < peaksListLength):
currentElement = peaksList[i]
nextElement = peaksList[i+1]
currentValue = currentElement[0]
currentIndex = currentElement[1]
nextValue = nextElement[0]
nextIndex = nextElement[1]
# If the difference is less than 1000 index
if abs(currentIndex-nextIndex) < 10:
# Remove the next one since it should be less than the value of the current
# Because it's already sorted, duh!
peaksList.remove(peaksList[i+1])
peaksListLength -= 1
break
i += 1
# Filter the short False positive peaks
# TODO
return peaksList
"""
inset the highest peaks in a list and checks for other conditions.
"""
def getTheHighestNPeaksHelper(value, index, peaksList, N):
# If the list has less than the number of the peaks.
# And if it is higher than the stander, which is a stander can be configured above.
if len(peaksList) < N & value > stander:
peaksList.append([value,index])
return peaksList
else:
# If the list already has more than 10 peaks.
# Then starts a comparison/filtration process using filter1()
peaksList = filter1(peaksList)
print(peaksList)
# After the comparison/filtration function
# Check if the current value is
for element in peaksList:
highestValue = element[0]
highestValueIndex = element[1]
if value > highestValue:
peaksList.remove(element)
peaksList.append([value,index])
return peaksList
return peaksList
"""
Get the highest peaks in a list
data: a list represents the audio.
N: the number if peaks.
Return: A list of [value,index]s
where, value is the number of how high is the peak in "data",
and index is the index of the value in "data".
"""
def getTheHighestNPeaks(data,N):
peaksList = []
length = len(data)
index = 0
while (index < length):
value = data[index]
peaksList = getTheHighestNPeaksHelper(value,index,peaksList,N)
index += 1
return peaksList
def print1():
print("Menu")
pass
"""
A debugging function, it shows the differences between the peaks as percentages.
"""
def debug1(peaksList,length):
for e in peaksList:
index = e[1]
print(index * 100 / length)
"""
Using peakutils library
"""
def findpeaks(time_series,N):
cb = time_series
indexes = peakutils.indexes(cb, thres=0.02 / max(cb), min_dist=(int(len(time_series)/int(N))))
sList=[]
print(indexes)
for i in indexes:
sList.append([time_series[i],i])
print(i)
return sList
def derive(filePath,N):
rate, raw, data = importFile(filePath)
sList = findpeaks(data, N)
debug1(sList, len(data))
data = fun1(data, rate, sList, filePath)
"""
Main
"""
def main():
print1()
# Dynamic
# filename = input("Enter the file name: \n>")
# howManyPeaks = input("Enter how many peaks: \n>")
# Static values are always better :)
filename = 'ready1.wav'
howManyPeaks = '4'
cwd = os.getcwd()
fullfilename = cwd + '/'+filename
rate, raw, data = importFile(fullfilename)
sList = findpeaks(data,howManyPeaks)
# peaksList = getTheHighestNPeaks(data,int(howManyPeaks))
# print(len(data))
# print(peaksList)
# test1(peaksList,len(data))
debug1(sList, len(data))
# data = fun1(data,rate,peaksList,filename)
data = fun1(data, rate, sList, filename)
if __name__ == '__main__':
main()
| [
"numpy.array",
"scipy.io.wavfile.read",
"scipy.io.wavfile.write",
"os.getcwd"
] | [((353, 380), 'scipy.io.wavfile.write', 'write', (['filename', 'rate', 'data'], {}), '(filename, rate, data)\n', (358, 380), False, 'from scipy.io.wavfile import read, write\n'), ((1462, 1476), 'scipy.io.wavfile.read', 'read', (['filenmae'], {}), '(filenmae)\n', (1466, 1476), False, 'from scipy.io.wavfile import read, write\n'), ((1488, 1517), 'numpy.array', 'np.array', (['raw'], {'dtype': 'np.int16'}), '(raw, dtype=np.int16)\n', (1496, 1517), True, 'import numpy as np\n'), ((5050, 5061), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5059, 5061), False, 'import peakutils, os\n')] |
import warnings
import csv
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from models.cnn_models import CNN1
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical, plot_model
# gpus = tf.config.experimental.list_physical_devices('GPU')
# tf.config.experimental.set_memory_growth(gpus[0], True)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def plot_image_grid(X, y, nrows, ncols, figname="sample.png"):
fig, axes = plt.subplots(nrows=nrows, ncols=ncols)
for i in range(nrows * ncols):
# axes[row, col] = plt.imshow(X_train[count])
plt.subplot(nrows, ncols, i + 1)
plt.imshow(X[i])
plt.xlabel(np.argmax(y[i]))
plt.show()
plt.savefig(figname)
def preprocess_image_input(X):
X = X.reshape(X.shape[0], 28, 28, 1)
X = X.astype('float32')
# RGB values are usually stored as integers to save memory. But doing math on colors is usually done in float because it's easier, more powerful, and more precise. The act of converting floats to integers is called "Quantization", and it throws away precision.
# Typically, RGB values are encoded as 8-bit integers, which range from 0 to 255. It's an industry standard to think of 0.0f as black and 1.0f as white (max brightness). To convert [0, 255] to [0.0f, 1.0f] all you have to do is divide by 255.0f.
X /= 255
return X
def predict_test_classes(model, X_test):
y_test_pred = np.argmax(model.predict(X_test), axis=-1)
return y_test_pred
def main():
df_train = pd.read_csv(os.path.join(
os.path.dirname(__file__), "../input/train.csv"))
df_test = pd.read_csv(os.path.join(
os.path.dirname(__file__), "../input/test.csv"))
X, y = df_train.iloc[:, 1:].values, df_train.iloc[:, 0].values
X_test = df_test.iloc[0:, 0:].values
X = preprocess_image_input(X)
X_test = preprocess_image_input(X_test)
y = np.array(y)
y = y.reshape(y.shape[0], 1)
y = y.astype('int32')
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.1, random_state=42)
y_train = to_categorical(y_train)
y_valid = to_categorical(y_valid)
print(X_train[0].shape)
# print(X[0:5], y[0:5])
print(X.shape, y.shape)
print(type(X), type(y))
nrows = 2
ncols = 3
warnings.simplefilter(action='ignore', category=FutureWarning)
plot_image_grid(X_train, y_train, nrows,
ncols, figname=os.path.join(os.path.dirname(__file__),
"../resources/images/training_image_lables.png"))
model = CNN1()
model.build(input_shape=X.shape)
# model.train(X_train, X_valid, y_train,
# y_valid, epochs=10, batch_size=32)
# y_pred = model.predict_test_classes(X_test)
# print(y_pred)
epochs = 10
batch_size = 32
loss = "categorical_crossentropy",
optimizer = "adam"
metrics = ["accuracy"]
model.compile(loss=loss,
optimizer=optimizer, metrics=metrics)
model.fit(
X_train, y_train, epochs=epochs,
batch_size=batch_size, verbose=1,
# validation_data=(X_valid, y_valid)
)
y_pred = np.argmax(model.predict(X_valid), axis=-1)
print(y_pred)
print("Evaluating on valid data")
results = model.evaluate(X_valid, y_valid, batch_size=batch_size)
print("valid loss, valid acc:", results)
# Creating the submission file
header = ["ImageId", "Label"]
rows = []
id = 1
y_test_pred = predict_test_classes(model, X_test)
print(y_test_pred)
print("X_test shape is: ", X_test.shape)
print("y_test_pred shape is: ", y_test_pred.shape)
for _, pred in list(zip(X_test, y_test_pred)):
rows.append((id, pred))
id = id + 1
# # Evaluate
# print(f"Validation accuracy: {model.evaluate(X_valid, y_valid)[1]}")
plot_image_grid(X_test, to_categorical(y_test_pred), nrows,
ncols, figname=os.path.join(os.path.dirname(__file__),
"../resources/images/test_image_predictions.png")
)
with open(
os.path.join(os.path.dirname(__file__),
"../output/cnn1_func.csv"),
"w", encoding="UTF8", newline="") as f:
writer = csv.writer(f)
# Write the headers
writer.writerow(header)
# write multiple rows
writer.writerows(rows)
dot_img_file = os.path.join(os.path.dirname(__file__),
"../resources/images/cnn1_func.png")
plot_model(model, to_file=dot_img_file, show_shapes=True)
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.imshow",
"tensorflow.keras.utils.to_categorical",
"matplotlib.pyplot.savefig",
"tensorflow.config.experimental.set_memory_growth",
"sklearn.model_selection.train_test_split",
"csv.writer",
"models.cnn_models.CNN1",
"numpy.argmax",
"tensorflow.keras.utils.plot_model",
"numpy.arra... | [((412, 463), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (456, 463), True, 'import tensorflow as tf\n'), ((485, 536), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (525, 536), True, 'import tensorflow as tf\n'), ((618, 656), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols'}), '(nrows=nrows, ncols=ncols)\n', (630, 656), True, 'import matplotlib.pyplot as plt\n'), ((853, 863), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (861, 863), True, 'import matplotlib.pyplot as plt\n'), ((868, 888), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {}), '(figname)\n', (879, 888), True, 'import matplotlib.pyplot as plt\n'), ((2068, 2079), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2076, 2079), True, 'import numpy as np\n'), ((2181, 2235), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(X, y, test_size=0.1, random_state=42)\n', (2197, 2235), False, 'from sklearn.model_selection import train_test_split\n'), ((2260, 2283), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_train'], {}), '(y_train)\n', (2274, 2283), False, 'from tensorflow.keras.utils import to_categorical, plot_model\n'), ((2298, 2321), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_valid'], {}), '(y_valid)\n', (2312, 2321), False, 'from tensorflow.keras.utils import to_categorical, plot_model\n'), ((2469, 2531), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (2490, 2531), False, 'import warnings\n'), ((2763, 2769), 'models.cnn_models.CNN1', 'CNN1', ([], {}), '()\n', (2767, 2769), False, 'from models.cnn_models import CNN1\n'), ((4755, 4812), 'tensorflow.keras.utils.plot_model', 'plot_model', (['model'], {'to_file': 'dot_img_file', 'show_shapes': '(True)'}), '(model, to_file=dot_img_file, show_shapes=True)\n', (4765, 4812), False, 'from tensorflow.keras.utils import to_categorical, plot_model\n'), ((755, 787), 'matplotlib.pyplot.subplot', 'plt.subplot', (['nrows', 'ncols', '(i + 1)'], {}), '(nrows, ncols, i + 1)\n', (766, 787), True, 'import matplotlib.pyplot as plt\n'), ((796, 812), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X[i]'], {}), '(X[i])\n', (806, 812), True, 'import matplotlib.pyplot as plt\n'), ((4063, 4090), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_test_pred'], {}), '(y_test_pred)\n', (4077, 4090), False, 'from tensorflow.keras.utils import to_categorical, plot_model\n'), ((4485, 4498), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4495, 4498), False, 'import csv\n'), ((4655, 4680), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4670, 4680), False, 'import os\n'), ((832, 847), 'numpy.argmax', 'np.argmax', (['y[i]'], {}), '(y[i])\n', (841, 847), True, 'import numpy as np\n'), ((1724, 1749), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1739, 1749), False, 'import os\n'), ((1822, 1847), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1837, 1847), False, 'import os\n'), ((2625, 2650), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2640, 2650), False, 'import os\n'), ((4147, 4172), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4162, 4172), False, 'import os\n'), ((4335, 4360), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4350, 4360), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script doing the actual virtual screening of a library of compounds over a previously built Bayesian model.
"""
import argparse
import logging
import numpy as np
import csv
import json
import math
from rdkit import Chem
import common
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = 'X11'
def score_feature_vector(feature_vector, probs, cnt_bins, restrict_features):
"""
For a feature vector, computes its log transformed likelihood ratio with respect to the model.
That is, every feature value in the feature vector is evaluated and log of the likelihood ratio is added
to the total score.
:param feature_vector: Vector of feature values (each value is expected to be already scaled to the [0;1] interval)
:param probs: Model storing the probabilities of individual feature values of being related to activity.
:param cnt_bins: Number of bins to be used in the binning (the model has probabilities for bins,
not individual values).
:param restrict_features: List of features to be considered when evaluating the vector. If the list is empty,
all features will be evaluated
:return: Log of likelihood ratio of the input feature vector.
"""
ix = 0
score = 0
for ix_feature in restrict_features:
feature_value = feature_vector[ix_feature]
fv = int(math.floor(feature_value * cnt_bins)) # feature values are scaled to [0,1]
if fv == cnt_bins: fv -= 1 # feature_value 1 will map to the last bin
score += math.log(
(probs["feature_value_in_actives"][ix][fv]) / probs["feature_value_in_inactives"][ix][fv])
ix += 1
score += math.log(probs["active"]/probs["inactive"])
return score
def screen(fn_ds_json, model, fragments_features, restrict_features):
"""
Screening of fragments against the model.
:param fn_ds_json: List of molecules to screen and their respective features (as returned by of biochem-tools)
:param model: Model storing the probabilities of individual feature values of being related to activity
:param fragments_features: Dictionary with information about fragments and corresponding feature vectors
:return: List dictionaries of molecules and scores ({"molecule", "score"})
"""
with common.open_file(fn_ds_json) as f:
mols = json.load(f)
for mol in mols:
fragments_scores = []
processed_frags = []
for frag in mol["fragments"]:
if frag["smiles"] not in processed_frags:
processed_frags.append(frag["smiles"])
fragments_scores.append(score_feature_vector(fragments_features[frag["smiles"]], model["probabilities"], model["cnt_bins"], restrict_features))
if len(fragments_scores) > 0:
# mol["score"] = np.nanmean(fragments_scores) #not available for np < 1.7
mol["score"] = np.mean([x for x in fragments_scores if x is not np.nan and x is not None])
result = [{"molecule": mol["name"], "score": mol["score"]} for mol in mols if "score" in mol.keys()]
# return sorted(result, key=lambda x: x["score"], reverse=True)
return result
def get_normalized_features(fn_ds_csv, model):
"""
When fragments are extracted from the screening library and feature vectors are computed, they need to be
normalized in the same way the model was normalized. That happens in this function.
:param fn_ds_csv: CSV file with fragments from active molecules and corresponding features.
:param model: The Bayes model including normalization information.
:return: Dictionary with keys corresponding to fragments and values to normalized feature vectors.
"""
feature_names = model["features_names"]
ixs_uncorr_features = []
features = []
fragments = []
for row in csv.reader(common.open_file(fn_ds_csv)):
if not row: continue
if len(ixs_uncorr_features) == 0:
for ix in range(len(row)):
if row[ix] in feature_names:
features.append([])
ixs_uncorr_features.append(ix)
else:
fragments.append(row[0])
for ix in range(len(ixs_uncorr_features)) :
features[ix].append(row[ixs_uncorr_features[ix]])
# Convert strings to numbers
features = [[common.to_float(y) for y in x] for x in features]
# Imputation
for ix in range(len(features)):
features[ix] = [model["normalization"]["imputation_values"][ix] if math.isnan(x) else x for x in features[ix]]
# Normalization
# Some values in test set can be out of the min-max range of train set, therefore we set
# such values to the max/mins
feature_matrix = np.array(features)
for ix in range(len(model["normalization"]["mins"])):
feature_matrix[ix] = feature_matrix[ix].clip(model["normalization"]["mins"][ix], model["normalization"]["maxs"][ix])
for ix1 in range(len(feature_matrix[ix])):
if feature_matrix[ix][ix1] < model["normalization"]["mins"][ix] or feature_matrix[ix][ix1] > model["normalization"]["maxs"][ix]:
print(ix, ix1)
feature_matrix = feature_matrix.transpose()
feature_matrix = (feature_matrix - np.array(model["normalization"]["mins"])) / \
(np.array(model["normalization"]["maxs"]) - np.array(model["normalization"]["mins"]))
fragments_features = {}
for ix in range(len(fragments)):
fragments_features[fragments[ix]] = feature_matrix[ix]
# with open("normalized.features.screen.csv", "w") as f:
# line = "Name"
# for name in model["features_names"]: line += ",{}".format(name)
# f.write(line + "\n")
# for ix in range(len(fragments)):
# line = fragments[ix]
# for feature in feature_matrix[ix]: line += ",{}".format(feature)
# f.write(line + "\n")
return fragments_features
def main():
"""
The function implements the following steps:
1. Reading in the model
2. Extracting fragments for every input molecule (biochem-tools) and storing them in a json file.
3. Generating features for the extracted fragments (biochem-tools) and storing them in a csv file.
4. Normalizing the features.
5. Screening.
6. Outputing ranked library together with the scores.
7. Optionally deleting the intermediate files, i.e. the fragments (json) and their features (csv)
:return:
"""
common.init_logging()
with common.open_file(args.model) as f:
model = json.load(f)
[fn_ds_json] = common.fragments_extraction([args.dataset], model["fragment_types"])
[fn_ds_csv] = common.descriptors_extraction([fn_ds_json], model["features_generator"], model["path_to_padel"])
fragments_features = get_normalized_features(fn_ds_csv, model)
# Convert list of features to restrict the model into a lit
restrict_features = list(range(0, len(model['features_names'])))
if args.features:
restrict_features = []
aux_features = [aux_feature.lower() for aux_feature in model['features_names']]
for aux_feature in args.features.split(","):
aux_feature = aux_feature.strip().lower()
if aux_feature in aux_features:
restrict_features.append(aux_features.index(aux_feature))
results = screen(fn_ds_json, model, fragments_features, restrict_features)
with common.open_file(args.output, "w") as f:
for r in results:
f.write("{}: {}\n".format(r["molecule"], r["score"]))
if args.clean:
common.delete_files([fn_ds_json, fn_ds_csv])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model",
required=True,
help="Output JSON file containing the resulting model.")
parser.add_argument("-d", "--dataset",
required=True,
help="Molecules in SDF or SMILES format to rank.")
parser.add_argument("-o", "--output",
required=True,
help="Output file for resulting ranking.")
parser.add_argument("-f", "--features",
required=False,
help="Comma separated list of features which will be considered when evaluating model. "
"Using this features, one can limit the set of features to be used to assess activity, "
"i.e. one can use only the most relevant features.")
parser.add_argument("-c", "--clean",
action='store_true',
help="Delete the fragment and features files.")
args = parser.parse_args()
main() | [
"numpy.mean",
"argparse.ArgumentParser",
"math.floor",
"common.to_float",
"common.fragments_extraction",
"math.log",
"common.delete_files",
"numpy.array",
"common.init_logging",
"common.descriptors_extraction",
"json.load",
"common.open_file",
"math.isnan"
] | [((1746, 1791), 'math.log', 'math.log', (["(probs['active'] / probs['inactive'])"], {}), "(probs['active'] / probs['inactive'])\n", (1754, 1791), False, 'import math\n'), ((4903, 4921), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (4911, 4921), True, 'import numpy as np\n'), ((6717, 6738), 'common.init_logging', 'common.init_logging', ([], {}), '()\n', (6736, 6738), False, 'import common\n'), ((8020, 8045), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8043, 8045), False, 'import argparse\n'), ((1601, 1703), 'math.log', 'math.log', (["(probs['feature_value_in_actives'][ix][fv] / probs[\n 'feature_value_in_inactives'][ix][fv])"], {}), "(probs['feature_value_in_actives'][ix][fv] / probs[\n 'feature_value_in_inactives'][ix][fv])\n", (1609, 1703), False, 'import math\n'), ((2375, 2403), 'common.open_file', 'common.open_file', (['fn_ds_json'], {}), '(fn_ds_json)\n', (2391, 2403), False, 'import common\n'), ((2426, 2438), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2435, 2438), False, 'import json\n'), ((3989, 4016), 'common.open_file', 'common.open_file', (['fn_ds_csv'], {}), '(fn_ds_csv)\n', (4005, 4016), False, 'import common\n'), ((6751, 6779), 'common.open_file', 'common.open_file', (['args.model'], {}), '(args.model)\n', (6767, 6779), False, 'import common\n'), ((6803, 6815), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6812, 6815), False, 'import json\n'), ((6842, 6910), 'common.fragments_extraction', 'common.fragments_extraction', (['[args.dataset]', "model['fragment_types']"], {}), "([args.dataset], model['fragment_types'])\n", (6869, 6910), False, 'import common\n'), ((6934, 7034), 'common.descriptors_extraction', 'common.descriptors_extraction', (['[fn_ds_json]', "model['features_generator']", "model['path_to_padel']"], {}), "([fn_ds_json], model['features_generator'],\n model['path_to_padel'])\n", (6963, 7034), False, 'import common\n'), ((1427, 1463), 'math.floor', 'math.floor', (['(feature_value * cnt_bins)'], {}), '(feature_value * cnt_bins)\n', (1437, 1463), False, 'import math\n'), ((4502, 4520), 'common.to_float', 'common.to_float', (['y'], {}), '(y)\n', (4517, 4520), False, 'import common\n'), ((5422, 5462), 'numpy.array', 'np.array', (["model['normalization']['mins']"], {}), "(model['normalization']['mins'])\n", (5430, 5462), True, 'import numpy as np\n'), ((5491, 5531), 'numpy.array', 'np.array', (["model['normalization']['maxs']"], {}), "(model['normalization']['maxs'])\n", (5499, 5531), True, 'import numpy as np\n'), ((5534, 5574), 'numpy.array', 'np.array', (["model['normalization']['mins']"], {}), "(model['normalization']['mins'])\n", (5542, 5574), True, 'import numpy as np\n'), ((7749, 7783), 'common.open_file', 'common.open_file', (['args.output', '"""w"""'], {}), "(args.output, 'w')\n", (7765, 7783), False, 'import common\n'), ((7931, 7975), 'common.delete_files', 'common.delete_files', (['[fn_ds_json, fn_ds_csv]'], {}), '([fn_ds_json, fn_ds_csv])\n', (7950, 7975), False, 'import common\n'), ((3029, 3104), 'numpy.mean', 'np.mean', (['[x for x in fragments_scores if x is not np.nan and x is not None]'], {}), '([x for x in fragments_scores if x is not np.nan and x is not None])\n', (3036, 3104), True, 'import numpy as np\n'), ((4685, 4698), 'math.isnan', 'math.isnan', (['x'], {}), '(x)\n', (4695, 4698), False, 'import math\n')] |
""" impyute.imputation.ts.locf """
import numpy as np
from impyute.util import find_null
from impyute.util import checks
from impyute.util import preprocess
@preprocess
@checks
def locf(data, axis=0):
""" Last Observation Carried Forward
For each set of missing indices, use the value of one row before(same
column). In the case that the missing value is the first row, look one
row ahead instead. If this next row is also NaN, look to the next row.
Repeat until you find a row in this column that's not NaN. All the rows
before will be filled with this value.
Parameters
----------
data: numpy.ndarray
Data to impute.
axis: boolean (optional)
0 if time series is in row format (Ex. data[0][:] is 1st data point).
1 if time series is in col format (Ex. data[:][0] is 1st data point).
Returns
-------
numpy.ndarray
Imputed data.
"""
if axis == 0:
data = np.transpose(data)
elif axis == 1:
pass
null_xy = find_null(data)
for x_i, y_i in null_xy:
# Simplest scenario, look one row back
if x_i-1 > -1:
data[x_i][y_i] = data[x_i-1][y_i]
# Look n rows forward
else:
x_residuals = np.shape(data)[0]-x_i-1 # n datapoints left
val_found = False
for i in range(1, x_residuals):
if not np.isnan(data[x_i+i][y_i]):
val_found = True
break
if val_found:
# pylint: disable=undefined-loop-variable
for x_nan in range(i):
data[x_i+x_nan][y_i] = data[x_i+i][y_i]
else:
raise Exception("Error: Entire Column is NaN")
return data
| [
"numpy.shape",
"numpy.transpose",
"impyute.util.find_null",
"numpy.isnan"
] | [((1025, 1040), 'impyute.util.find_null', 'find_null', (['data'], {}), '(data)\n', (1034, 1040), False, 'from impyute.util import find_null\n'), ((958, 976), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (970, 976), True, 'import numpy as np\n'), ((1398, 1426), 'numpy.isnan', 'np.isnan', (['data[x_i + i][y_i]'], {}), '(data[x_i + i][y_i])\n', (1406, 1426), True, 'import numpy as np\n'), ((1256, 1270), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (1264, 1270), True, 'import numpy as np\n')] |
#Filename: initialize.py
#Institute: IIT Roorkee
import torch.nn as nn
import numpy as np
def weights_init_kaimingUniform(module):
for m in module.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, mode = 'fan_in', nonlinearity = 'relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.uniform_(m.weight, a = 0, b = 1)
nn.init.constant_(m.bias, val = 0.)
elif isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, mode = 'fan_in', nonlinearity = 'relu')
if m.bias is not None:
nn.init.constant_(m.bias, val = 0.)
def weights_init_kaimingNormal(module):
for m in module.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode = 'fan_in', nonlinearity = 'relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, val = 0.)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode = 'fan_in', nonlinearity = 'relu')
if m.bias is not None:
nn.init.constant_(m.bias, val = 0.)
def weights_init_xavierUniform(module):
for m in module.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight, gain = np.sqrt(2))
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.uniform_(m.weight, a = 0, b = 1)
nn.init.constant_(m.bias, val = 0.)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight, gain = np.sqrt(2))
if m.bias is not None:
nn.init.constant_(m.bias, val = 0.)
def weights_init_xavierNormal(module):
for m in module.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight, gain = np.sqrt(2))
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, val = 0.)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, gain = np.sqrt(2))
if m.bias is not None:
nn.init.constant_(m.bias, val = 0.)
| [
"numpy.sqrt",
"torch.nn.init.constant_",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.kaiming_uniform_",
"torch.nn.init.uniform_",
"torch.nn.init.normal_"
] | [((214, 284), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['m.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_in', nonlinearity='relu')\n", (238, 284), True, 'import torch.nn as nn\n'), ((851, 920), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_in', nonlinearity='relu')\n", (874, 920), True, 'import torch.nn as nn\n'), ((340, 368), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (357, 368), True, 'import torch.nn as nn\n'), ((426, 462), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['m.weight'], {'a': '(0)', 'b': '(1)'}), '(m.weight, a=0, b=1)\n', (442, 462), True, 'import torch.nn as nn\n'), ((479, 513), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias'], {'val': '(0.0)'}), '(m.bias, val=0.0)\n', (496, 513), True, 'import torch.nn as nn\n'), ((976, 1004), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (993, 1004), True, 'import torch.nn as nn\n'), ((1062, 1096), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight', '(0)', '(0.01)'], {}), '(m.weight, 0, 0.01)\n', (1077, 1096), True, 'import torch.nn as nn\n'), ((1109, 1143), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias'], {'val': '(0.0)'}), '(m.bias, val=0.0)\n', (1126, 1143), True, 'import torch.nn as nn\n'), ((1584, 1612), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (1601, 1612), True, 'import torch.nn as nn\n'), ((1670, 1706), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['m.weight'], {'a': '(0)', 'b': '(1)'}), '(m.weight, a=0, b=1)\n', (1686, 1706), True, 'import torch.nn as nn\n'), ((1723, 1757), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias'], {'val': '(0.0)'}), '(m.bias, val=0.0)\n', (1740, 1757), True, 'import torch.nn as nn\n'), ((2174, 2202), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (2191, 2202), True, 'import torch.nn as nn\n'), ((2260, 2294), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight', '(0)', '(0.01)'], {}), '(m.weight, 0, 0.01)\n', (2275, 2294), True, 'import torch.nn as nn\n'), ((2307, 2341), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias'], {'val': '(0.0)'}), '(m.bias, val=0.0)\n', (2324, 2341), True, 'import torch.nn as nn\n'), ((567, 637), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['m.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_in', nonlinearity='relu')\n", (591, 637), True, 'import torch.nn as nn\n'), ((1197, 1266), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_in', nonlinearity='relu')\n", (1220, 1266), True, 'import torch.nn as nn\n'), ((1521, 1531), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1528, 1531), True, 'import numpy as np\n'), ((2111, 2121), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2118, 2121), True, 'import numpy as np\n'), ((693, 727), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias'], {'val': '(0.0)'}), '(m.bias, val=0.0)\n', (710, 727), True, 'import torch.nn as nn\n'), ((1322, 1356), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias'], {'val': '(0.0)'}), '(m.bias, val=0.0)\n', (1339, 1356), True, 'import torch.nn as nn\n'), ((1914, 1948), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias'], {'val': '(0.0)'}), '(m.bias, val=0.0)\n', (1931, 1948), True, 'import torch.nn as nn\n'), ((2499, 2533), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias'], {'val': '(0.0)'}), '(m.bias, val=0.0)\n', (2516, 2533), True, 'import torch.nn as nn\n'), ((1851, 1861), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1858, 1861), True, 'import numpy as np\n'), ((2436, 2446), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2443, 2446), True, 'import numpy as np\n')] |
import os
import tempfile
import numpy as np
from microscopium.screens.cellomics import SPIRAL_CLOCKWISE_RIGHT_25
from microscopium import preprocess as pre
from microscopium import io as mio
import pytest
import warnings
@pytest.fixture
def image_files():
# for clarity we define images as integer arrays in [0, 11) and
# divide by 10 later
i = np.array([[7, 4, 1, 1, 0],
[2, 5, 9, 6, 7],
[2, 3, 3, 8, 5],
[3, 0, 1, 7, 5],
[6, 0, 10, 1, 6]], np.uint8)
j = np.array([[1, 10, 0, 9, 0],
[3, 10, 4, 1, 1],
[4, 10, 0, 7, 4],
[9, 3, 2, 0, 7],
[1, 3, 3, 9, 3]], np.uint8)
k = np.array([[9, 1, 7, 7, 3],
[9, 1, 6, 2, 2],
[2, 8, 2, 0, 3],
[4, 3, 8, 9, 10],
[6, 0, 2, 3, 10]], np.uint8)
files = []
for im in [i, j, k]:
f, fn = tempfile.mkstemp(suffix='.png')
files.append(fn)
mio.imsave(fn, im)
yield files
for fn in files:
os.remove(fn)
def test_illumination_mean(image_files):
illum = pre.find_background_illumination(image_files, radius=1,
quantile=0.5)
illum_true = np.array([[161, 174, 188, 81, 94],
[174, 174, 81, 161, 94],
[174, 67, 161, 121, 161],
[134, 107, 107, 161, 215],
[134, 134, 134, 174, 215]], np.uint8)
np.testing.assert_array_almost_equal(illum, illum_true, decimal=1)
def test_color_stack(image_files):
images = list(map(mio.imread, image_files))
stack = pre.stack_channels(images[:2], [None, 1, 0])
np.testing.assert_equal(stack[0, 0], [0, 1, 7])
np.testing.assert_equal(stack[..., 2], images[0])
def conv(im):
return np.round(np.clip(im, 0, np.inf) * 255).astype(np.uint8)
@pytest.fixture
def image_files_noise(request):
"""Three sham images; one has no signal, one has an intensity artifact."""
r = np.random.RandomState(0)
shape = (5, 5)
# no signal
i = conv(0.01 * np.ones(shape, dtype=float) + 0.005 * r.randn(*shape))
# normal image
j = conv(0.5 * r.rand(*shape))
# blown-out corner
k = 0.5 * r.rand(*shape)
k[3:, 3:] = 1.0
k = conv(k)
files = []
for im in [i, j, k]:
f, fn = tempfile.mkstemp(suffix='.png')
files.append(fn)
mio.imsave(fn, im)
def cleanup():
for fn in files:
os.remove(fn)
request.addfinalizer(cleanup)
illum = 0.01 * np.ones(shape, dtype=float)
return files, illum
def test_correct_multiimage_illum(image_files_noise):
files, illum = image_files_noise
with mio.temporary_file('.tif') as out_fn:
ims = pre.correct_multiimage_illumination(files, illum, (2 / 25), 0)
i, j, k = list(ims)
# 1. check noise is not blown out in i
assert not np.any(i > 10)
# 2. check blown out corner in k has not suppressed all other values
assert np.median(k) > 100
cellomics_pattern = "MFGTMP_150406100001_A01f{0:02d}d0.TIF"
missing_test_fns = [
([cellomics_pattern.format(i) for i in range(25)], []),
([cellomics_pattern.format(i) for i in range(25)], [1, 13])
]
# delete "images" with fields 1 and 13 from second set of
# image filesnames
missing_test_fns[1][0].remove(cellomics_pattern.format(1))
missing_test_fns[1][0].remove(cellomics_pattern.format(13))
@pytest.mark.parametrize("fns, expected", missing_test_fns)
def test_find_missing_fields(fns, expected):
actual = pre.find_missing_fields(fns)
np.testing.assert_array_equal(actual, expected)
# create a list of parameters for testing the create missing mask files
# each entry in the tuple represents the fields: missing, order, rows, cols
# and expected (the expected output from the function)
missing_mask_test = [
([], [[0, 1, 2]], 10, 5, np.ones((10, 15), dtype=np.bool)),
([0, 5], [[0, 1, 2], [4, 5, 6]], 5, 10, np.ones((10, 30), dtype=np.bool)),
([3, 4], [[0, 1], [2, 3], [4, 5]], 10, 5, np.ones((30, 10), dtype=np.bool))
]
# insert False to missing areas of expected output
missing_mask_test[1][4][0:5, 0:10] = False
missing_mask_test[1][4][5:10, 10:20] = False
missing_mask_test[2][4][10:20, 5:10] = False
missing_mask_test[2][4][20:30, 0:5] = False
# pass the set of list parameters to the test_create_missing_mask
# function. the test wil run against every of parameters in the
# missing_mask_test list
@pytest.mark.parametrize("missing, order, rows, cols, expected",
missing_mask_test)
def test_create_missing_mask(missing, order, rows, cols, expected):
actual = pre.create_missing_mask(missing, order, rows, cols)
np.testing.assert_array_equal(actual, expected)
@pytest.fixture
def test_image_files_montage(request):
def make_test_montage_files(missing_fields):
shape = (2, 2)
fields = list(range(0, 25))
for missing_field in missing_fields:
fields.remove(missing_field)
ims = [np.ones(shape, np.uint8) * i for i in fields]
files = []
for field, im in zip(fields, ims):
prefix = "MFGTMP_140206180002_A01f{0:02d}d0".format(field)
f, fn = tempfile.mkstemp(prefix=prefix, suffix=".tif")
files.append(fn)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mio.imsave(fn, im)
def cleanup():
for file in files:
os.remove(file)
request.addfinalizer(cleanup)
return files
return make_test_montage_files
def test_montage_with_missing(test_image_files_montage):
files = test_image_files_montage(missing_fields=[20])
montage, mask, number_missing = \
pre.montage_with_missing(files, order=SPIRAL_CLOCKWISE_RIGHT_25,
re_string=r'.*_[A-P]\d{2}f(\d{2})d0',
re_group=1)
expect_montage = np.array([[0, 0, 21, 21, 22, 22, 23, 23, 24, 24],
[0, 0, 21, 21, 22, 22, 23, 23, 24, 24],
[19, 19, 6, 6, 7, 7, 8, 8, 9, 9],
[19, 19, 6, 6, 7, 7, 8, 8, 9, 9],
[18, 18, 5, 5, 0, 0, 1, 1, 10, 10],
[18, 18, 5, 5, 0, 0, 1, 1, 10, 10],
[17, 17, 4, 4, 3, 3, 2, 2, 11, 11],
[17, 17, 4, 4, 3, 3, 2, 2, 11, 11],
[16, 16, 15, 15, 14, 14, 13, 13, 12, 12],
[16, 16, 15, 15, 14, 14, 13, 13, 12, 12]],
np.uint8)
np.testing.assert_array_equal(expect_montage, montage)
def test_montage_with_missing_mask(test_image_files_montage):
files = test_image_files_montage(missing_fields=[3, 8])
montage, mask, number_missing = \
pre.montage_with_missing(files, order=SPIRAL_CLOCKWISE_RIGHT_25,
re_string=r'.*_[A-P]\d{2}f(\d{2})d0',
re_group=1)
expected_mask = np.ones((10, 10), np.bool)
expected_mask[6:8, 4:6] = False
expected_mask[2:4, 6:8] = False
np.testing.assert_array_equal(expected_mask, mask)
def test_montage_with_missing_number_missing(test_image_files_montage):
files = test_image_files_montage(missing_fields=[10, 11, 12])
montage, mask, number_missing = \
pre.montage_with_missing(files, order=SPIRAL_CLOCKWISE_RIGHT_25,
re_string=r'.*_[A-P]\d{2}f(\d{2})d0',
re_group=1)
assert number_missing == 3
if __name__ == '__main__':
pytest.main()
| [
"numpy.clip",
"numpy.testing.assert_equal",
"numpy.array",
"microscopium.io.temporary_file",
"numpy.random.RandomState",
"os.remove",
"microscopium.preprocess.correct_multiimage_illumination",
"numpy.testing.assert_array_almost_equal",
"microscopium.preprocess.montage_with_missing",
"microscopium.... | [((3552, 3610), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fns, expected"""', 'missing_test_fns'], {}), "('fns, expected', missing_test_fns)\n", (3575, 3610), False, 'import pytest\n'), ((4590, 4676), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""missing, order, rows, cols, expected"""', 'missing_mask_test'], {}), "('missing, order, rows, cols, expected',\n missing_mask_test)\n", (4613, 4676), False, 'import pytest\n'), ((360, 471), 'numpy.array', 'np.array', (['[[7, 4, 1, 1, 0], [2, 5, 9, 6, 7], [2, 3, 3, 8, 5], [3, 0, 1, 7, 5], [6, 0,\n 10, 1, 6]]', 'np.uint8'], {}), '([[7, 4, 1, 1, 0], [2, 5, 9, 6, 7], [2, 3, 3, 8, 5], [3, 0, 1, 7, 5\n ], [6, 0, 10, 1, 6]], np.uint8)\n', (368, 471), True, 'import numpy as np\n'), ((547, 659), 'numpy.array', 'np.array', (['[[1, 10, 0, 9, 0], [3, 10, 4, 1, 1], [4, 10, 0, 7, 4], [9, 3, 2, 0, 7], [1,\n 3, 3, 9, 3]]', 'np.uint8'], {}), '([[1, 10, 0, 9, 0], [3, 10, 4, 1, 1], [4, 10, 0, 7, 4], [9, 3, 2, 0,\n 7], [1, 3, 3, 9, 3]], np.uint8)\n', (555, 659), True, 'import numpy as np\n'), ((736, 848), 'numpy.array', 'np.array', (['[[9, 1, 7, 7, 3], [9, 1, 6, 2, 2], [2, 8, 2, 0, 3], [4, 3, 8, 9, 10], [6, 0,\n 2, 3, 10]]', 'np.uint8'], {}), '([[9, 1, 7, 7, 3], [9, 1, 6, 2, 2], [2, 8, 2, 0, 3], [4, 3, 8, 9, \n 10], [6, 0, 2, 3, 10]], np.uint8)\n', (744, 848), True, 'import numpy as np\n'), ((1176, 1245), 'microscopium.preprocess.find_background_illumination', 'pre.find_background_illumination', (['image_files'], {'radius': '(1)', 'quantile': '(0.5)'}), '(image_files, radius=1, quantile=0.5)\n', (1208, 1245), True, 'from microscopium import preprocess as pre\n'), ((1308, 1463), 'numpy.array', 'np.array', (['[[161, 174, 188, 81, 94], [174, 174, 81, 161, 94], [174, 67, 161, 121, 161],\n [134, 107, 107, 161, 215], [134, 134, 134, 174, 215]]', 'np.uint8'], {}), '([[161, 174, 188, 81, 94], [174, 174, 81, 161, 94], [174, 67, 161, \n 121, 161], [134, 107, 107, 161, 215], [134, 134, 134, 174, 215]], np.uint8)\n', (1316, 1463), True, 'import numpy as np\n'), ((1576, 1642), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['illum', 'illum_true'], {'decimal': '(1)'}), '(illum, illum_true, decimal=1)\n', (1612, 1642), True, 'import numpy as np\n'), ((1740, 1784), 'microscopium.preprocess.stack_channels', 'pre.stack_channels', (['images[:2]', '[None, 1, 0]'], {}), '(images[:2], [None, 1, 0])\n', (1758, 1784), True, 'from microscopium import preprocess as pre\n'), ((1789, 1836), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['stack[0, 0]', '[0, 1, 7]'], {}), '(stack[0, 0], [0, 1, 7])\n', (1812, 1836), True, 'import numpy as np\n'), ((1841, 1890), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['stack[..., 2]', 'images[0]'], {}), '(stack[..., 2], images[0])\n', (1864, 1890), True, 'import numpy as np\n'), ((2111, 2135), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (2132, 2135), True, 'import numpy as np\n'), ((3669, 3697), 'microscopium.preprocess.find_missing_fields', 'pre.find_missing_fields', (['fns'], {}), '(fns)\n', (3692, 3697), True, 'from microscopium import preprocess as pre\n'), ((3702, 3749), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (3731, 3749), True, 'import numpy as np\n'), ((4779, 4830), 'microscopium.preprocess.create_missing_mask', 'pre.create_missing_mask', (['missing', 'order', 'rows', 'cols'], {}), '(missing, order, rows, cols)\n', (4802, 4830), True, 'from microscopium import preprocess as pre\n'), ((4835, 4882), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (4864, 4882), True, 'import numpy as np\n'), ((5904, 6024), 'microscopium.preprocess.montage_with_missing', 'pre.montage_with_missing', (['files'], {'order': 'SPIRAL_CLOCKWISE_RIGHT_25', 're_string': '""".*_[A-P]\\\\d{2}f(\\\\d{2})d0"""', 're_group': '(1)'}), "(files, order=SPIRAL_CLOCKWISE_RIGHT_25, re_string=\n '.*_[A-P]\\\\d{2}f(\\\\d{2})d0', re_group=1)\n", (5928, 6024), True, 'from microscopium import preprocess as pre\n'), ((6115, 6532), 'numpy.array', 'np.array', (['[[0, 0, 21, 21, 22, 22, 23, 23, 24, 24], [0, 0, 21, 21, 22, 22, 23, 23, 24,\n 24], [19, 19, 6, 6, 7, 7, 8, 8, 9, 9], [19, 19, 6, 6, 7, 7, 8, 8, 9, 9],\n [18, 18, 5, 5, 0, 0, 1, 1, 10, 10], [18, 18, 5, 5, 0, 0, 1, 1, 10, 10],\n [17, 17, 4, 4, 3, 3, 2, 2, 11, 11], [17, 17, 4, 4, 3, 3, 2, 2, 11, 11],\n [16, 16, 15, 15, 14, 14, 13, 13, 12, 12], [16, 16, 15, 15, 14, 14, 13, \n 13, 12, 12]]', 'np.uint8'], {}), '([[0, 0, 21, 21, 22, 22, 23, 23, 24, 24], [0, 0, 21, 21, 22, 22, 23,\n 23, 24, 24], [19, 19, 6, 6, 7, 7, 8, 8, 9, 9], [19, 19, 6, 6, 7, 7, 8, \n 8, 9, 9], [18, 18, 5, 5, 0, 0, 1, 1, 10, 10], [18, 18, 5, 5, 0, 0, 1, 1,\n 10, 10], [17, 17, 4, 4, 3, 3, 2, 2, 11, 11], [17, 17, 4, 4, 3, 3, 2, 2,\n 11, 11], [16, 16, 15, 15, 14, 14, 13, 13, 12, 12], [16, 16, 15, 15, 14,\n 14, 13, 13, 12, 12]], np.uint8)\n', (6123, 6532), True, 'import numpy as np\n'), ((6826, 6880), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expect_montage', 'montage'], {}), '(expect_montage, montage)\n', (6855, 6880), True, 'import numpy as np\n'), ((7055, 7175), 'microscopium.preprocess.montage_with_missing', 'pre.montage_with_missing', (['files'], {'order': 'SPIRAL_CLOCKWISE_RIGHT_25', 're_string': '""".*_[A-P]\\\\d{2}f(\\\\d{2})d0"""', 're_group': '(1)'}), "(files, order=SPIRAL_CLOCKWISE_RIGHT_25, re_string=\n '.*_[A-P]\\\\d{2}f(\\\\d{2})d0', re_group=1)\n", (7079, 7175), True, 'from microscopium import preprocess as pre\n'), ((7265, 7291), 'numpy.ones', 'np.ones', (['(10, 10)', 'np.bool'], {}), '((10, 10), np.bool)\n', (7272, 7291), True, 'import numpy as np\n'), ((7369, 7419), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_mask', 'mask'], {}), '(expected_mask, mask)\n', (7398, 7419), True, 'import numpy as np\n'), ((7610, 7730), 'microscopium.preprocess.montage_with_missing', 'pre.montage_with_missing', (['files'], {'order': 'SPIRAL_CLOCKWISE_RIGHT_25', 're_string': '""".*_[A-P]\\\\d{2}f(\\\\d{2})d0"""', 're_group': '(1)'}), "(files, order=SPIRAL_CLOCKWISE_RIGHT_25, re_string=\n '.*_[A-P]\\\\d{2}f(\\\\d{2})d0', re_group=1)\n", (7634, 7730), True, 'from microscopium import preprocess as pre\n'), ((7863, 7876), 'pytest.main', 'pytest.main', ([], {}), '()\n', (7874, 7876), False, 'import pytest\n'), ((972, 1003), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".png"""'}), "(suffix='.png')\n", (988, 1003), False, 'import tempfile\n'), ((1037, 1055), 'microscopium.io.imsave', 'mio.imsave', (['fn', 'im'], {}), '(fn, im)\n', (1047, 1055), True, 'from microscopium import io as mio\n'), ((1107, 1120), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (1116, 1120), False, 'import os\n'), ((2444, 2475), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".png"""'}), "(suffix='.png')\n", (2460, 2475), False, 'import tempfile\n'), ((2509, 2527), 'microscopium.io.imsave', 'mio.imsave', (['fn', 'im'], {}), '(fn, im)\n', (2519, 2527), True, 'from microscopium import io as mio\n'), ((2653, 2680), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (2660, 2680), True, 'import numpy as np\n'), ((2807, 2833), 'microscopium.io.temporary_file', 'mio.temporary_file', (['""".tif"""'], {}), "('.tif')\n", (2825, 2833), True, 'from microscopium import io as mio\n'), ((2859, 2919), 'microscopium.preprocess.correct_multiimage_illumination', 'pre.correct_multiimage_illumination', (['files', 'illum', '(2 / 25)', '(0)'], {}), '(files, illum, 2 / 25, 0)\n', (2894, 2919), True, 'from microscopium import preprocess as pre\n'), ((4006, 4038), 'numpy.ones', 'np.ones', (['(10, 15)'], {'dtype': 'np.bool'}), '((10, 15), dtype=np.bool)\n', (4013, 4038), True, 'import numpy as np\n'), ((4085, 4117), 'numpy.ones', 'np.ones', (['(10, 30)'], {'dtype': 'np.bool'}), '((10, 30), dtype=np.bool)\n', (4092, 4117), True, 'import numpy as np\n'), ((4166, 4198), 'numpy.ones', 'np.ones', (['(30, 10)'], {'dtype': 'np.bool'}), '((30, 10), dtype=np.bool)\n', (4173, 4198), True, 'import numpy as np\n'), ((2585, 2598), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (2594, 2598), False, 'import os\n'), ((3016, 3030), 'numpy.any', 'np.any', (['(i > 10)'], {}), '(i > 10)\n', (3022, 3030), True, 'import numpy as np\n'), ((3123, 3135), 'numpy.median', 'np.median', (['k'], {}), '(k)\n', (3132, 3135), True, 'import numpy as np\n'), ((5351, 5397), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': 'prefix', 'suffix': '""".tif"""'}), "(prefix=prefix, suffix='.tif')\n", (5367, 5397), False, 'import tempfile\n'), ((2191, 2218), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (2198, 2218), True, 'import numpy as np\n'), ((5151, 5175), 'numpy.ones', 'np.ones', (['shape', 'np.uint8'], {}), '(shape, np.uint8)\n', (5158, 5175), True, 'import numpy as np\n'), ((5445, 5470), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (5468, 5470), False, 'import warnings\n'), ((5488, 5519), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (5509, 5519), False, 'import warnings\n'), ((5536, 5554), 'microscopium.io.imsave', 'mio.imsave', (['fn', 'im'], {}), '(fn, im)\n', (5546, 5554), True, 'from microscopium import io as mio\n'), ((5626, 5641), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (5635, 5641), False, 'import os\n'), ((1927, 1949), 'numpy.clip', 'np.clip', (['im', '(0)', 'np.inf'], {}), '(im, 0, np.inf)\n', (1934, 1949), True, 'import numpy as np\n')] |
#! /usr/bin/env python
import _pickle as cPickle, gzip
import numpy as np
from tqdm import tqdm
import torch
import torch.autograd as autograd
import torch.nn.functional as F
import torch.nn as nn
import sys
sys.path.append("..")
import utils
from utils import *
from train_utils import batchify_data, run_epoch, train_model
def main(batch_size = 32, lr = 0.1, momentum=0, hidden_size = 10, leakyReLU = False):
# Load the dataset
num_classes = 10
X_train, y_train, X_test, y_test = get_MNIST_data()
# Split into train and dev
dev_split_index = int(9 * len(X_train) / 10)
X_dev = X_train[dev_split_index:]
y_dev = y_train[dev_split_index:]
X_train = X_train[:dev_split_index]
y_train = y_train[:dev_split_index]
permutation = np.array([i for i in range(len(X_train))])
np.random.shuffle(permutation)
X_train = [X_train[i] for i in permutation]
y_train = [y_train[i] for i in permutation]
# Split dataset into batches
train_batches = batchify_data(X_train, y_train, batch_size)
dev_batches = batchify_data(X_dev, y_dev, batch_size)
test_batches = batchify_data(X_test, y_test, batch_size)
#################################
## Model specification TODO
if leakyReLU == False:
nonLinearLayer = nn.ReLU()
else:
nonLinearLayer = nn.LeakyReLU()
model = nn.Sequential(
nn.Linear(784, hidden_size),
nonLinearLayer,
nn.Linear(hidden_size, 10),
)
##################################
train_model(train_batches, dev_batches, model, lr=lr, momentum=momentum)
## Evaluate the model on test data
loss, accuracy = run_epoch(test_batches, model.eval(), None)
print ("Loss on test set:" + str(loss) + " Accuracy on test set: " + str(accuracy))
if __name__ == '__main__':
# Specify seed for deterministic behavior, then shuffle. Do not change seed for official submissions to edx
np.random.seed(12321) # for reproducibility
torch.manual_seed(12321) # for reproducibility
main(hidden_size=128)
main(hidden_size=128, batch_size=64)
main(hidden_size=128, lr=0.01)
main(hidden_size=128, momentum=0.9)
main(hidden_size=128, leakyReLU=True)
| [
"torch.manual_seed",
"torch.nn.ReLU",
"torch.nn.LeakyReLU",
"train_utils.batchify_data",
"train_utils.train_model",
"numpy.random.seed",
"torch.nn.Linear",
"sys.path.append",
"numpy.random.shuffle"
] | [((209, 230), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (224, 230), False, 'import sys\n'), ((816, 846), 'numpy.random.shuffle', 'np.random.shuffle', (['permutation'], {}), '(permutation)\n', (833, 846), True, 'import numpy as np\n'), ((997, 1040), 'train_utils.batchify_data', 'batchify_data', (['X_train', 'y_train', 'batch_size'], {}), '(X_train, y_train, batch_size)\n', (1010, 1040), False, 'from train_utils import batchify_data, run_epoch, train_model\n'), ((1059, 1098), 'train_utils.batchify_data', 'batchify_data', (['X_dev', 'y_dev', 'batch_size'], {}), '(X_dev, y_dev, batch_size)\n', (1072, 1098), False, 'from train_utils import batchify_data, run_epoch, train_model\n'), ((1118, 1159), 'train_utils.batchify_data', 'batchify_data', (['X_test', 'y_test', 'batch_size'], {}), '(X_test, y_test, batch_size)\n', (1131, 1159), False, 'from train_utils import batchify_data, run_epoch, train_model\n'), ((1543, 1615), 'train_utils.train_model', 'train_model', (['train_batches', 'dev_batches', 'model'], {'lr': 'lr', 'momentum': 'momentum'}), '(train_batches, dev_batches, model, lr=lr, momentum=momentum)\n', (1554, 1615), False, 'from train_utils import batchify_data, run_epoch, train_model\n'), ((1956, 1977), 'numpy.random.seed', 'np.random.seed', (['(12321)'], {}), '(12321)\n', (1970, 1977), True, 'import numpy as np\n'), ((2005, 2029), 'torch.manual_seed', 'torch.manual_seed', (['(12321)'], {}), '(12321)\n', (2022, 2029), False, 'import torch\n'), ((1283, 1292), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1290, 1292), True, 'import torch.nn as nn\n'), ((1328, 1342), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (1340, 1342), True, 'import torch.nn as nn\n'), ((1384, 1411), 'torch.nn.Linear', 'nn.Linear', (['(784)', 'hidden_size'], {}), '(784, hidden_size)\n', (1393, 1411), True, 'import torch.nn as nn\n'), ((1457, 1483), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(10)'], {}), '(hidden_size, 10)\n', (1466, 1483), True, 'import torch.nn as nn\n')] |
from __future__ import print_function
import os
import sys
import pickle
import json
import datetime
from collections import namedtuple
import numpy as np
import sqlite3
from sqlalchemy import create_engine
from sqlalchemy_utils import drop_database
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import IntegrityError
import analysis.delayedfeedback.database as db
from utils.logger import *
from states.common import *
def print_list(info, lst):
print(info)
for item in lst:
print("\t \"{}\"".format(item))
def object_to_record(obj):
if isinstance(obj, np.ndarray):
res = obj.tolist()
elif hasattr(obj, "__dict__"):
res = {k: object_to_record(v) for k, v in obj.__dict__.items()}
elif isinstance(obj, dict):
res = {k: object_to_record(v) for k, v in obj.items()}
elif isinstance(obj, list):
res = [object_to_record(i) for i in obj]
else:
res = obj
return res
class EventType(object):
BlockSequenceStart = 0
BlockSequenceEnd = 1
TrialSequenceStart = 2
TrialSequenceEnd = 3
TrialGoSignal = 4
TrialGoalTimeout = 5
TrialGoalReached = 6
TrialStartReached = 7
TrialDisturbanceTrigger = 8
_id_count = 9
messages = (
(BlockSequenceStart, "Block: sequence start"),
(BlockSequenceEnd, "Block: sequence end"),
(TrialSequenceStart, "Trial: sequence start"),
(TrialSequenceEnd, "Trial: sequence end"),
(TrialStartReached, "Trial: start reached"),
(TrialGoSignal, "Trial: go signal"),
(TrialGoalTimeout, "Trial: goal timeout"),
(TrialGoalReached, "Trial: goal reached"),
(TrialDisturbanceTrigger, "Trial: disturbance trigger")
)
message_to_id = None
@classmethod
def _preinit(cls):
cls.message_to_id = {message[1]: message[0] for message in EventType.messages}
@classmethod
def fill_database(cls, dbsession):
try:
print("Adding event types... ", end="")
for message in cls.messages:
dbsession.add(db.EventType(id=message[0] ,desc=message[1]))
dbsession.commit()
print("OK")
except IntegrityError:
print("IntegrityError, skipped")
dbsession.rollback()
except:
print("FAIL")
dbsession.rollback()
raise
EventType._preinit()
def create_database(db_url):
try:
print("Creating new database [{}]... ".format(db_url), end="")
try:
drop_database(db_url)
except:
pass
engine = create_engine(db_url)
db.Base.metadata.create_all(engine)
print("OK")
except:
print("FAIL")
raise
return engine
def open_database(db_url):
engine = create_engine(db_url)
return engine
def add_subject(dbsession, name, age, sex):
try:
print("Adding new subject [{}]... ".format(name), end="")
dbsession.add(db.Subject(name=name, age=age, sex=sex))
dbsession.commit()
print("OK")
except IntegrityError:
print("IntegrityError, skipped")
dbsession.rollback()
except:
print("FAIL")
dbsession.rollback()
raise
def _correct_nplog_events_order(nplog):
events = []
for event in nplog.events:
if event.msg == "Trial: goal reached":
events.insert(-2, event)
else:
events.append(event)
nplog.events = events
def get_trial_start_stop(trial):
t0, t1 = None, None
for event in trial.events:
if event.event_type_id == EventType.TrialSequenceStart:
t0 = event.time
elif event.event_type_id == EventType.TrialSequenceEnd:
t1 = event.time
#print(t0, t1)
return t0, t1
def find_trials(block, times):
trials_times = [(trial, get_trial_start_stop(trial)) for trial in block.trials]
def find_trial(t):
for trial, (t0, t1) in trials_times:
#print(t0, t, t1)
if t0 <= t and t <= t1:
return trial
return None
return [find_trial(t) for t in times]
def add_session(dbsession, name, sessionpath):
""" Add one experiment session to the database.
Trial events:
- trial start (go to home position)
- go (target appears)
- disturbance introduced
- target reached or timeout
- trial end
Block events:
- block start
- block end
Session events:
- sesion start
- session end
"""
try:
session = None # current session
block = None # current block
trial = None # current trial
# Find the subject
print("Searching subject [{}]... ".format(name), end="")
subj = dbsession.query(db.Subject).filter(db.Subject.name == name).first()
if subj is None:
raise ValueError("Subject [{}] is not found".format(name))
print("OK")
# Add session
print("Session path is: \"{}\"".format(sessionpath))
blockpaths = [x[0] for x in os.walk(sessionpath)][1:]
print_list("Blocks found:", blockpaths)
print("Addind new session for subject [{}]... ".format(name), end="")
session = db.Session(subject=subj)
dbsession.add(session)
print("OK")
# Process blocks
for i, blockpath in enumerate(blockpaths):
print("Loading block \"{}\"".format(blockpath))
nplogfilename = os.path.join(blockpath, "delayedfeedback.pkl")
nplog = NPLog.from_file(nplogfilename)
#_correct_nplog_events_order(nplog) # workaround for old recording of Gunnar
dumps = lambda x: json.dumps(object_to_record(x))
blockparamslist = nplog.select_by_name("Experiment.params")[0].value
blockparamsliststr = dumps(blockparamslist)
trialparamslist = [tp.value for tp in nplog.select_by_name("Trial.params")]
#trialparamsliststrs = [dumps(tp) for tp in trialparamslist]
print_list("Found logged streams:", nplog.get_names())
block = db.Block(session=session,
number=blockparamslist.block_number,
paramslist=blockparamsliststr,
opto_filename=os.path.join(blockpath, "REC-001.OPTO.npy"),
odau_filename=os.path.join(blockpath, "REC-001.ODAU.npy"))
dbsession.add(block)
# Find trial periods from ODAU sync channel.
data = np.load(block.odau_filename)
syncdata = data[:, 0]
x = (syncdata > 0).astype(int)
starts = np.where(np.diff(x) > 0)[0] + 1
stops = np.where(np.diff(x) < 0)[0] + 1
print("Found {} starts and {} stops in sync channel".format(len(starts), len(stops)))
assert len(starts) == len(stops)
print("Number of events: {}".format(len(nplog.events)))
itrial = 0
for event in nplog.events:
# Process only trial events
if isinstance(event, EventRecord):
if event.msg.startswith("Trial:"):
if event.msg.startswith("Trial: sequence start"):
# Add new trial
trial = db.Trial(block=block,
paramslist=dumps(trialparamslist[itrial]),
number=itrial,
disturbance_mode=trialparamslist[itrial].disturbance_mode,
feedback_delay=trialparamslist[itrial].feedback_delay,
opto_start=int(0.1 * starts[itrial]),
opto_stop=int(0.1 * stops[itrial]),
odau_start=starts[itrial],
odau_stop=stops[itrial],)
itrial += 1
dbsession.add(trial)
# Add trial event
trialevent = db.TrialEvent(trial=trial,
event_type_id=EventType.message_to_id[event.msg],
time=datetime.datetime.fromtimestamp(event.created))
dbsession.add(trialevent)
# Trigger is logged at every frame when it is active.
# Detect trigger onset and add a TrialEvent
triggers = nplog.select_by_name("AffineDisturbanceInducer.triggered")
triggerstimes = [] # trigger timestamps
tprev = 0
for record in triggers:
if record.created > tprev + 0.1:
triggerstimes.append(datetime.datetime.fromtimestamp(record.created))
tprev = record.created
print("Number of disturbance triggers: {}".format(len(triggerstimes)))
# Add trigger events
trials = find_trials(block, triggerstimes)
for trial, triggertime in zip(trials, triggerstimes):
trialevent = db.TrialEvent(trial=trial,
event_type_id=EventType.TrialDisturbanceTrigger,
time=triggertime)
dbsession.add(trialevent)
dbsession.commit()
print("Commit OK")
except:
print("FAIL")
raise
QUERY = """
SELECT trial_id1 as trial_id, time_start, time_stop FROM (
(SELECT trial.id as trial_id1, trial_event.time as time_start FROM trial_event LEFT JOIN trial ON trial.id = trial_event.trial_id
WHERE event_type_id = 2)
INNER JOIN
(SELECT trial.id as trial_id2, trial_event.time as time_stop FROM trial_event LEFT JOIN trial ON trial.id = trial_event.trial_id
WHERE event_type_id = 3)
ON trial_id1 = trial_id2
)
"""
if __name__ == "__main__":
create = raw_input("Re-create database? (y/n)")
if create == "y":
db_url = 'sqlite:///delayed_feedback.db'
engine = create_database(db_url)
Session = sessionmaker(bind=engine)
dbsession = Session()
EventType.fill_database(dbsession)
#add_subject(dbsession, "<NAME>", 40.5, "male")
#add_session(dbsession, "<NAME>", sessionpath="../../../data/delayedfeedback/2017-06-12-(Gunnar)")
name = "A"
add_subject(dbsession, name, age=22.0, sex="male")
add_session(dbsession, name, sessionpath="../../../data/delayedfeedback/2017-08-12-(Felix-1)")
name = "B"
add_subject(dbsession, name, age=42.0, sex="male")
add_session(dbsession, name, sessionpath="../../../data/delayedfeedback/2017-08-21-(Dominik-2)")
name = "C"
add_subject(dbsession, name, age=25.0, sex="male")
add_session(dbsession, name, sessionpath="../../../data/delayedfeedback/2017-08-22-(Ben-3)")
name = "D"
add_subject(dbsession, name, age=25.0, sex="male")
add_session(dbsession, name, sessionpath="../../../data/delayedfeedback/2017-08-23-(Brandon-4)")
name = "E"
add_subject(dbsession, name, age=25.0, sex="male")
add_session(dbsession, name, sessionpath="../../../data/delayedfeedback/2017-08-24-(Deng-5)")
name = "F"
add_subject(dbsession, name, age=25.0, sex="male")
add_session(dbsession, name, sessionpath="../../../data/delayedfeedback/2017-08-25-(Jonathan-6)")
name = "G"
add_subject(dbsession, name, age=25.0, sex="female")
add_session(dbsession, name, sessionpath="../../../data/delayedfeedback/2017-08-29-(JVanderlinden-7)")
name = "H"
add_subject(dbsession, name, age=25.0, sex="female")
add_session(dbsession, name, sessionpath="../../../data/delayedfeedback/2017-08-30-(Johanna-lefthanded-8)")
name = "I"
add_subject(dbsession, name, age=22.0, sex="female")
add_session(dbsession, name, sessionpath="../../../data/delayedfeedback/2017-08-31-(Ayako-9)")
dbsession.close()
| [
"sqlalchemy.orm.sessionmaker",
"sqlalchemy_utils.drop_database",
"analysis.delayedfeedback.database.EventType",
"datetime.datetime.fromtimestamp",
"sqlalchemy.create_engine",
"os.path.join",
"numpy.diff",
"analysis.delayedfeedback.database.Subject",
"analysis.delayedfeedback.database.Base.metadata.c... | [((2862, 2883), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (2875, 2883), False, 'from sqlalchemy import create_engine\n'), ((2668, 2689), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (2681, 2689), False, 'from sqlalchemy import create_engine\n'), ((2698, 2733), 'analysis.delayedfeedback.database.Base.metadata.create_all', 'db.Base.metadata.create_all', (['engine'], {}), '(engine)\n', (2725, 2733), True, 'import analysis.delayedfeedback.database as db\n'), ((5325, 5349), 'analysis.delayedfeedback.database.Session', 'db.Session', ([], {'subject': 'subj'}), '(subject=subj)\n', (5335, 5349), True, 'import analysis.delayedfeedback.database as db\n'), ((10168, 10193), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (10180, 10193), False, 'from sqlalchemy.orm import sessionmaker\n'), ((2595, 2616), 'sqlalchemy_utils.drop_database', 'drop_database', (['db_url'], {}), '(db_url)\n', (2608, 2616), False, 'from sqlalchemy_utils import drop_database\n'), ((3046, 3085), 'analysis.delayedfeedback.database.Subject', 'db.Subject', ([], {'name': 'name', 'age': 'age', 'sex': 'sex'}), '(name=name, age=age, sex=sex)\n', (3056, 3085), True, 'import analysis.delayedfeedback.database as db\n'), ((5566, 5612), 'os.path.join', 'os.path.join', (['blockpath', '"""delayedfeedback.pkl"""'], {}), "(blockpath, 'delayedfeedback.pkl')\n", (5578, 5612), False, 'import os\n'), ((6616, 6644), 'numpy.load', 'np.load', (['block.odau_filename'], {}), '(block.odau_filename)\n', (6623, 6644), True, 'import numpy as np\n'), ((9187, 9284), 'analysis.delayedfeedback.database.TrialEvent', 'db.TrialEvent', ([], {'trial': 'trial', 'event_type_id': 'EventType.TrialDisturbanceTrigger', 'time': 'triggertime'}), '(trial=trial, event_type_id=EventType.TrialDisturbanceTrigger,\n time=triggertime)\n', (9200, 9284), True, 'import analysis.delayedfeedback.database as db\n'), ((2119, 2163), 'analysis.delayedfeedback.database.EventType', 'db.EventType', ([], {'id': 'message[0]', 'desc': 'message[1]'}), '(id=message[0], desc=message[1])\n', (2131, 2163), True, 'import analysis.delayedfeedback.database as db\n'), ((5155, 5175), 'os.walk', 'os.walk', (['sessionpath'], {}), '(sessionpath)\n', (5162, 5175), False, 'import os\n'), ((6373, 6416), 'os.path.join', 'os.path.join', (['blockpath', '"""REC-001.OPTO.npy"""'], {}), "(blockpath, 'REC-001.OPTO.npy')\n", (6385, 6416), False, 'import os\n'), ((6449, 6492), 'os.path.join', 'os.path.join', (['blockpath', '"""REC-001.ODAU.npy"""'], {}), "(blockpath, 'REC-001.ODAU.npy')\n", (6461, 6492), False, 'import os\n'), ((8833, 8880), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['record.created'], {}), '(record.created)\n', (8864, 8880), False, 'import datetime\n'), ((6752, 6762), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (6759, 6762), True, 'import numpy as np\n'), ((6804, 6814), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (6811, 6814), True, 'import numpy as np\n'), ((8317, 8363), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['event.created'], {}), '(event.created)\n', (8348, 8363), False, 'import datetime\n')] |
import yake
import gensim
import numpy as np
import itertools
import nltk
from nltk.corpus import stopwords
def extract_keywords(text):
"""
Extract keywords from a text using yake model.
"""
# extract keywords
kw_extractor = yake.KeywordExtractor()
keywords = kw_extractor.extract_keywords(text)
# Sort them
sorted_kw = sorted(keywords, key=lambda tup: tup[1])
sorted_kw.reverse()
sorted_kw = [x[0].replace(" ", "_") for x in sorted_kw]
# # Make a list of strings
# sorted_kw = [x.split() for x in sorted_kw]
# sorted_kw = list(itertools.chain(*sorted_kw))
# # Removing stopwords
# stop_words = set(stopwords.words('english'))
# filtered_kw = [w for w in sorted_kw if not w in stop_words]
return sorted_kw
def word2vec(keywords, model):
"""
Convert list of keywords to equivalent word2vec representation using pretrained model.
"""
v = np.zeros(300)
for w in keywords:
if w in model.wv:
v += model.wv[w]
return v / len(keywords)
if __name__ == "__main__":
# Load Google's pre-trained Word2Vec model.
model = gensim.models.KeyedVectors.load_word2vec_format(
'./word2vec/GoogleNews-vectors-negative300.bin', binary=True)
# interests = ['Advertising', 'Media', 'Agriculture', 'Audio', 'Video', 'multimedia_production', 'Banking', 'Finance', 'Invest',
# 'Construction', 'Urban_planning', 'Landscape_design', 'Consulting', 'Design', 'Graphic_arts', 'Education', 'Teaching', 'Training',
# 'Engineering', 'Environment', 'Natural_resources', 'Insurance', 'Visual', 'Performing_arts',
# 'Government', 'Public_sector', 'Public_policy', 'Health_services', 'Healthcare', 'Medical', 'Hospitality', 'Tourism', 'Food', 'Human_resources'
# 'Labor_relations', 'InfoTech', 'Computer_science', 'Electronics', 'Artificial_Intelligence', 'Criminal_justice', 'Security', 'Management', 'Supply_Chain',
# 'Manufacturing', 'Marketing', 'Sales', 'Research', 'Quality_Assurance', 'Biotech', 'Social_Services', 'Social_Community', 'Writing',
# 'Publishing', 'Art', 'Design', 'Architecture', 'Film', 'Entrepreneurship', 'Mathematics']
interests = ['Art','Computer','Mathematics', 'Economics', "Physics"]
text = "Advanced topics in cloud computing with emphasis on scalable distributed computing technologies employed in cloud computing. Key cloud technologies and their algorithmic background. Main topics are distributed file systems, distributed batch processing with the MapReduce and the Apache Spark computing frameworks, and distributed cloud based databases."
#text = "Content topics covered during the course: - Learning ability and various challenges in learning and studying - Effective learning and academic study skills - Identifying your own strengths and challenges in learning and reflecting on them - Designing your own studies and developing a Personal Study Plan"
#text = "The goal of the course is to provide a practical deep-dive into effective communications. Students will learn to apply it in their own lives and careers through a series of exercises. The course emphasizes iterative cycles of research and practice, where personal storytelling skills are developed through feedback and discussion."
keywords = extract_keywords(text)
encoding = word2vec(keywords, model)
# Compute similarities
cos_sim = np.zeros(len(interests))
for i in range(len(interests)):
cos_sim[i] = np.sum(encoding*word2vec(interests[i], model))/(
np.linalg.norm(encoding)*np.linalg.norm(word2vec(interests[i], model)))
# Take the max labels
sorted_idx = np.argsort(cos_sim)[-4:]
print(interests[sorted_idx[3]], interests[sorted_idx[2]], interests[sorted_idx[1]], interests[sorted_idx[0]])
| [
"yake.KeywordExtractor",
"gensim.models.KeyedVectors.load_word2vec_format",
"numpy.argsort",
"numpy.zeros",
"numpy.linalg.norm"
] | [((247, 270), 'yake.KeywordExtractor', 'yake.KeywordExtractor', ([], {}), '()\n', (268, 270), False, 'import yake\n'), ((914, 927), 'numpy.zeros', 'np.zeros', (['(300)'], {}), '(300)\n', (922, 927), True, 'import numpy as np\n'), ((1128, 1242), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (['"""./word2vec/GoogleNews-vectors-negative300.bin"""'], {'binary': '(True)'}), "(\n './word2vec/GoogleNews-vectors-negative300.bin', binary=True)\n", (1175, 1242), False, 'import gensim\n'), ((3786, 3805), 'numpy.argsort', 'np.argsort', (['cos_sim'], {}), '(cos_sim)\n', (3796, 3805), True, 'import numpy as np\n'), ((3670, 3694), 'numpy.linalg.norm', 'np.linalg.norm', (['encoding'], {}), '(encoding)\n', (3684, 3694), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
from utils import util
from scipy.special import logit
import sklearn.linear_model as lm
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn.metrics.pairwise import linear_kernel, polynomial_kernel, rbf_kernel
from scipy.stats import multivariate_normal as mvn
from sklearn.preprocessing import PolynomialFeatures
def create_xor_data(N):
#np.random.seed(234)
np.random.RandomState(0)
C = 0.01*np.eye(2)
Gs = [mvn(mean=[0.5,0.5], cov=C),
mvn(mean=[-0.5,-0.5], cov=C),
mvn(mean=[0.5,-0.5], cov=C),
mvn(mean=[-0.5,0.5], cov=C)]
X = np.concatenate([G.rvs(size=N) for G in Gs])
y = np.concatenate((np.zeros(2*N), np.ones(2*N)))
return X,y
def plotScatter(X0, X1, y):
for x0, x1, cls in zip(X0, X1, y):
#color = 'blue' if cls == 1 else 'red'
color = 'red' if cls == 1 else 'blue'
marker = 'x' if cls == 1 else 'o'
plt.scatter(x0, x1, marker=marker, color=color)
X,y = create_xor_data(10)
transformers = [PolynomialFeatures(1), # no-op
PolynomialFeatures(2),
PolynomialFeatures(1),
PolynomialFeatures(1),
PolynomialFeatures(1)]
models = [LogisticRegression(C=1.0),
LogisticRegression(C=1.0),
LogisticRegression(C=1.0),
LogisticRegression(C=1.0),
LogisticRegression(C=1.0),]
kernels = [lambda X0, X1: X0, # No Kernel
lambda X0, X1: X0, # No Kernel
lambda X0, X1: linear_kernel(X0, X1),
lambda X0, X1: polynomial_kernel(X0, X1, degree=2),
lambda X0, X1: rbf_kernel(X0, X1, gamma=15)]
names = ['Linear Logistic Regression',
'Quadratic Logistic Regression',
'Linear kernel',
'Quadratic kernel',
'RBF Kernel']
file_names = ['Linear', 'Quad', 'LinearKernel', 'QuadKernel', 'Rbf']
# pdf image files are very big (1MB), png is ~24kb
#file_type = '.pdf'
file_type = '.png'
for i in range(len(models)):
transformer = transformers[i]
XX = transformer.fit_transform(X)[:,1:] # skip the first column of 1s
transX = kernels[i](XX, XX)
model = models[i].fit(transX, y)
print('experiment %d' % (i))
#print(model.Cs_)
#print(model.C_)
#print(model.scores_)
xx, yy = np.meshgrid(np.linspace(-1, 1, 250), np.linspace(-1, 1, 250))
grid = np.c_[xx.ravel(), yy.ravel()]
grid2 = transformer.transform(grid)[:,1:]
Z = model.predict(kernels[i](grid2, XX)).reshape(xx.shape)
fig, ax = plt.subplots()
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.coolwarm)
plotScatter(X[:, 0], X[:, 1], y)
plt.title(names[i])
fname = 'figures/logregXor%sBoundary%s' % (file_names[i], file_type)
print(fname)
plt.savefig(fname, dpi=600)
plt.draw()
if True:
Z = model.predict_proba(kernels[i](grid2, XX))[:,1].reshape(xx.shape)
fig, ax = plt.subplots()
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.coolwarm)
plt.colorbar()
plt.title('Prob Class 1')
plt.savefig('figures/logregXor%sProbClass1%s' % (file_names[i], file_type))
plt.draw()
plt.show()
| [
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.metrics.pairwise.rbf_kernel",
"scipy.stats.multivariate_normal",
"matplotlib.pyplot.pcolormesh",
"numpy.random.RandomState",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"numpy.eye",
"matplotlib.pyplot.savefig",
"numpy.ones",
"sklearn.metric... | [((3144, 3154), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3152, 3154), True, 'import matplotlib.pyplot as plt\n'), ((477, 501), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (498, 501), True, 'import numpy as np\n'), ((1089, 1110), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(1)'], {}), '(1)\n', (1107, 1110), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1135, 1156), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(2)'], {}), '(2)\n', (1153, 1156), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1173, 1194), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(1)'], {}), '(1)\n', (1191, 1194), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1212, 1233), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(1)'], {}), '(1)\n', (1230, 1233), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1250, 1271), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(1)'], {}), '(1)\n', (1268, 1271), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1284, 1309), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1.0)'}), '(C=1.0)\n', (1302, 1309), False, 'from sklearn.linear_model import LogisticRegressionCV, LogisticRegression\n'), ((1321, 1346), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1.0)'}), '(C=1.0)\n', (1339, 1346), False, 'from sklearn.linear_model import LogisticRegressionCV, LogisticRegression\n'), ((1358, 1383), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1.0)'}), '(C=1.0)\n', (1376, 1383), False, 'from sklearn.linear_model import LogisticRegressionCV, LogisticRegression\n'), ((1395, 1420), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1.0)'}), '(C=1.0)\n', (1413, 1420), False, 'from sklearn.linear_model import LogisticRegressionCV, LogisticRegression\n'), ((1432, 1457), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1.0)'}), '(C=1.0)\n', (1450, 1457), False, 'from sklearn.linear_model import LogisticRegressionCV, LogisticRegression\n'), ((2561, 2575), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2573, 2575), True, 'import matplotlib.pyplot as plt\n'), ((2578, 2625), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['xx', 'yy', 'Z'], {'cmap': 'plt.cm.coolwarm'}), '(xx, yy, Z, cmap=plt.cm.coolwarm)\n', (2592, 2625), True, 'import matplotlib.pyplot as plt\n'), ((2663, 2682), 'matplotlib.pyplot.title', 'plt.title', (['names[i]'], {}), '(names[i])\n', (2672, 2682), True, 'import matplotlib.pyplot as plt\n'), ((2771, 2798), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'dpi': '(600)'}), '(fname, dpi=600)\n', (2782, 2798), True, 'import matplotlib.pyplot as plt\n'), ((2807, 2817), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2815, 2817), True, 'import matplotlib.pyplot as plt\n'), ((515, 524), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (521, 524), True, 'import numpy as np\n'), ((535, 562), 'scipy.stats.multivariate_normal', 'mvn', ([], {'mean': '[0.5, 0.5]', 'cov': 'C'}), '(mean=[0.5, 0.5], cov=C)\n', (538, 562), True, 'from scipy.stats import multivariate_normal as mvn\n'), ((573, 602), 'scipy.stats.multivariate_normal', 'mvn', ([], {'mean': '[-0.5, -0.5]', 'cov': 'C'}), '(mean=[-0.5, -0.5], cov=C)\n', (576, 602), True, 'from scipy.stats import multivariate_normal as mvn\n'), ((613, 641), 'scipy.stats.multivariate_normal', 'mvn', ([], {'mean': '[0.5, -0.5]', 'cov': 'C'}), '(mean=[0.5, -0.5], cov=C)\n', (616, 641), True, 'from scipy.stats import multivariate_normal as mvn\n'), ((652, 680), 'scipy.stats.multivariate_normal', 'mvn', ([], {'mean': '[-0.5, 0.5]', 'cov': 'C'}), '(mean=[-0.5, 0.5], cov=C)\n', (655, 680), True, 'from scipy.stats import multivariate_normal as mvn\n'), ((996, 1043), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x0', 'x1'], {'marker': 'marker', 'color': 'color'}), '(x0, x1, marker=marker, color=color)\n', (1007, 1043), True, 'import matplotlib.pyplot as plt\n'), ((1573, 1594), 'sklearn.metrics.pairwise.linear_kernel', 'linear_kernel', (['X0', 'X1'], {}), '(X0, X1)\n', (1586, 1594), False, 'from sklearn.metrics.pairwise import linear_kernel, polynomial_kernel, rbf_kernel\n'), ((1622, 1657), 'sklearn.metrics.pairwise.polynomial_kernel', 'polynomial_kernel', (['X0', 'X1'], {'degree': '(2)'}), '(X0, X1, degree=2)\n', (1639, 1657), False, 'from sklearn.metrics.pairwise import linear_kernel, polynomial_kernel, rbf_kernel\n'), ((1685, 1713), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_kernel', (['X0', 'X1'], {'gamma': '(15)'}), '(X0, X1, gamma=15)\n', (1695, 1713), False, 'from sklearn.metrics.pairwise import linear_kernel, polynomial_kernel, rbf_kernel\n'), ((2355, 2378), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(250)'], {}), '(-1, 1, 250)\n', (2366, 2378), True, 'import numpy as np\n'), ((2380, 2403), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(250)'], {}), '(-1, 1, 250)\n', (2391, 2403), True, 'import numpy as np\n'), ((2922, 2936), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2934, 2936), True, 'import matplotlib.pyplot as plt\n'), ((2943, 2990), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['xx', 'yy', 'Z'], {'cmap': 'plt.cm.coolwarm'}), '(xx, yy, Z, cmap=plt.cm.coolwarm)\n', (2957, 2990), True, 'import matplotlib.pyplot as plt\n'), ((2997, 3011), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3009, 3011), True, 'import matplotlib.pyplot as plt\n'), ((3018, 3043), 'matplotlib.pyplot.title', 'plt.title', (['"""Prob Class 1"""'], {}), "('Prob Class 1')\n", (3027, 3043), True, 'import matplotlib.pyplot as plt\n'), ((3050, 3125), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('figures/logregXor%sProbClass1%s' % (file_names[i], file_type))"], {}), "('figures/logregXor%sProbClass1%s' % (file_names[i], file_type))\n", (3061, 3125), True, 'import matplotlib.pyplot as plt\n'), ((3132, 3142), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3140, 3142), True, 'import matplotlib.pyplot as plt\n'), ((757, 772), 'numpy.zeros', 'np.zeros', (['(2 * N)'], {}), '(2 * N)\n', (765, 772), True, 'import numpy as np\n'), ((772, 786), 'numpy.ones', 'np.ones', (['(2 * N)'], {}), '(2 * N)\n', (779, 786), True, 'import numpy as np\n')] |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2017/6/21 12:26
# @Author : HouJP
# @Email : <EMAIL>
import ConfigParser
import random
import numpy as np
from scipy import sparse
from bin.featwheel.utils import LogUtil
from ..featwheel.feature import Feature
from ..postprocessor import PostProcessor
class Stacking(object):
def __init__(self, config_fp):
# load configuration file
self.config = ConfigParser.ConfigParser()
self.config.read(config_fp)
def extract(self):
version = self.config.get('INFO', 'version')
cv_num = self.config.get('INFO', 'cv_num')
offline_rawset_name = self.config.get('MODEL', 'offline_rawset_name')
index_fp = self.config.get('DIRECTORY', 'feature_pt')
feature_name = '%s_%s' % (self.__class__.__name__, version)
# load prediction of offline tests
offline_test_pred_all_fp = '%s/pred/cv_n%d_test.%s.pred' % (
self.config.get('DIRECTORY', 'out_pt'), cv_num, offline_rawset_name)
offline_test_pred_all_origin = PostProcessor.read_result_list(offline_test_pred_all_fp)
offline_test_pred_all = [0] * len(offline_test_pred_all_origin)
# load index of offline tests
offline_test_index_all = list()
for fold_id in range(cv_num):
offline_test_indexs_fp = '%s/cv_n%d_f%d_test.%s.index' % (
index_fp, cv_num, fold_id, offline_rawset_name)
offline_test_indexs = Feature.load_index(offline_test_indexs_fp)
offline_test_index_all.extend(offline_test_indexs)
for index in range(len(offline_test_pred_all)):
offline_test_pred_all[offline_test_index_all[index]] = offline_test_pred_all_origin[index]
# load prediction of online data set
online_preds = list()
for fold_id in range(cv_num):
online_pred_fp = '%s/cv_n%d_f%d_online.%s.pred' % (
self.config.get('DIRECTORY', 'pred_pt'),
cv_num,
fold_id,
self.config.get('MODEL', 'online_test_rawset_name'))
online_pred_one = PostProcessor.read_result_list(online_pred_fp)
online_preds.append(online_pred_one)
# sample for online prediction
online_pred = []
for i in range(len(online_preds[0])):
cv_id = int(random.random() * cv_num)
online_pred.append(online_preds[cv_id][i])
offline_pred = [[fv] for fv in offline_test_pred_all]
online_pred = [[fv] for fv in online_pred]
# directory of features
feature_pt = self.config.get('DIRECTORY', 'feature_pt')
train_feature_fp = '%s/%s.train.smat' % (feature_pt, feature_name)
test_feature_fp = '%s/%s.test.smat' % (feature_pt, feature_name)
train_features = sparse.csr_matrix(np.array(offline_pred))
Feature.save_smat(train_features, train_feature_fp)
LogUtil.log('INFO', 'save train features (%s) done' % feature_name)
test_features = sparse.csr_matrix(np.array(online_pred))
Feature.save_smat(test_features, test_feature_fp)
LogUtil.log('INFO', 'save test features (%s) done' % feature_name) | [
"numpy.array",
"random.random",
"bin.featwheel.utils.LogUtil.log",
"ConfigParser.ConfigParser"
] | [((434, 461), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ([], {}), '()\n', (459, 461), False, 'import ConfigParser\n'), ((2935, 3002), 'bin.featwheel.utils.LogUtil.log', 'LogUtil.log', (['"""INFO"""', "('save train features (%s) done' % feature_name)"], {}), "('INFO', 'save train features (%s) done' % feature_name)\n", (2946, 3002), False, 'from bin.featwheel.utils import LogUtil\n'), ((3135, 3201), 'bin.featwheel.utils.LogUtil.log', 'LogUtil.log', (['"""INFO"""', "('save test features (%s) done' % feature_name)"], {}), "('INFO', 'save test features (%s) done' % feature_name)\n", (3146, 3201), False, 'from bin.featwheel.utils import LogUtil\n'), ((2843, 2865), 'numpy.array', 'np.array', (['offline_pred'], {}), '(offline_pred)\n', (2851, 2865), True, 'import numpy as np\n'), ((3046, 3067), 'numpy.array', 'np.array', (['online_pred'], {}), '(online_pred)\n', (3054, 3067), True, 'import numpy as np\n'), ((2359, 2374), 'random.random', 'random.random', ([], {}), '()\n', (2372, 2374), False, 'import random\n')] |
"""
This RNN is used for predicting stock trends of the Google stock.
@Editor: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Part 1 - Data Preprocessing
# Training set is only used, not test set.
# Once training is done, then the test set will be introduced.
dataset_train = pd.read_csv('Google_Stock_Price_Train.csv')
# This creates a numpy array rather than a simple vector
# This grabs the open column and makes it into a numpy array
training_set = dataset_train.iloc[:,1:2].values
#This works because the stock prices will be normalized between 0 & 1
sc = MinMaxScaler(feature_range = (0,1), copy=True)
#This method fits the data (finds min & max) to apply normalization
# The transform methods computes the standardized
training_set_scaled = sc.fit_transform(training_set)
# Creating a data structure with 60 time steps and 1 output
# the 60 time steps is a tested value, that has to be iterated over the model to find.
# This means that each day looks at the three previous months to predict the price
x_train = []
y_train = []
# the 60 refers to the three months, 1258 is total # of prices
for i in range(60, 1258):
# This gets the range of all of the 60 last stock prices
x_train.append(training_set_scaled[i-60:i,0])
#uses the next stock price as the output prediction
y_train.append(training_set_scaled[i,0])
# This converts the data to user arrays for tensorflow
x_train, y_train = np.array(x_train), np.array(y_train)
# Reshaping, only done to inputs to add dimensions
# Use the keras library for this
# input dimensions can refer to the same stock stats, or comparing stocks
# the shape function gets the size of the axis specified, can use more than 2 dimensions
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
# Part 2 - Building the RNN
# This is a stacked LSTM with dropout regularization
# This is called regressor due to its continuous output
# Regression is for predicting continuous, classification is for predicting finite output
regressor = Sequential()
# Adding the first LSTM Layer and some dropout regularization
# Units -> The number of LSTM cells or modules in the layer
# Return Sequences -> True, because it will have several LSTM layers. False when you're on the last layer
# Input Shape -> Shape of the input containing x_train
# High dimensionality and lots of neurons in each will help the accuracy
regressor.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
# second step of first LSTM Layer is to add dropout regularization
# Classic # is 20%, aka 20% of neurons will be ignored in forward and backware propagation
regressor.add(Dropout(rate=0.20))
# Add a second LSTM layer with dropout regularization
# because this is the second layer, input layer is not required
# 50 neurons in previous layer is assumed
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(rate=0.20))
# Add a third LSTM layer with dropout regularization
# Same as second LSTM layer -> both are middle, hidden layers
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(rate=0.20))
# Add a fourth LSTM Layer with dropout regularization
# 50 units stays the same because this is not the final layer.
# Output layer to follow for the one continuous output of the regression
# Return sequences should be false because no more LSTM modules present
regressor.add(LSTM(units=50))
regressor.add(Dropout(rate=0.20))
# Add a classic fully connected, output layer
# 1 output unit corresponds to the
regressor.add(Dense(units=1))
# Compile the RNN
# RMS prop is recommended for RNN's but adam used here
# Loss function is mean squared error
regressor.compile(optimizer='adam', loss='mean_squared_error')
# Fit the RNN
# X & Y are the input numpy arrays and output numpy arrays
# Batch_size
regressor.fit(x=x_train, y=y_train, batch_size=32, epochs=100)
# Part 3 - Making the predictions and visualizing the results
# Avoid overfitting of the training set because then it won't have enough variance to recognize other test sets
# Getting the real stock price open of 2017
actual_results = pd.read_csv('Google_Stock_Price_Test.csv')
real_stock_price = actual_results.iloc[:, 1:2].values
# Getting the predicted stock price of 2017
# Need the 60 previous days to create a concatenated data
dataset_total = pd.concat((dataset_train['Open'], actual_results['Open']), axis=0)
inputs = dataset_total[len(dataset_total)-len(actual_results)-60:].values
inputs = inputs.reshape(-1, 1)
# The object doesn't need to be fit, only scaled
inputs = sc.transform(inputs)
# only scale the inputs, not the test values
# Visualize the results
x_test = []
# the 60 refers to the three months, 80 refers to the predicted month
for i in range(60, 80):
# This gets the range of all of the 60 last stock prices
x_test.append(inputs[i-60:i,0])
# This converts the data to user arrays for tensorflow
x_test = np.array(x_test)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
# This is the prediction part of the RNN
predicted_stock_price = regressor.predict(x_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
# Visualization of prices using matplotlib
plt.plot(real_stock_price, color='red', label='Real Google Stock Price')
plt.plot(predicted_stock_price, color='blue', label='Predicted Google Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time (Days)')
plt.ylabel('Stock Price ($)')
plt.legend()
plt.show() | [
"numpy.reshape",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"keras.models.Sequential",
"numpy.array",
"keras.layers.LSTM",
"keras.layers.Dropout",
"pandas.concat",
"keras.layers.Dense",
"matplotlib.pyplot.title",
"sklearn.preprocessin... | [((497, 540), 'pandas.read_csv', 'pd.read_csv', (['"""Google_Stock_Price_Train.csv"""'], {}), "('Google_Stock_Price_Train.csv')\n", (508, 540), True, 'import pandas as pd\n'), ((784, 829), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)', 'copy': '(True)'}), '(feature_range=(0, 1), copy=True)\n', (796, 829), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1937, 1997), 'numpy.reshape', 'np.reshape', (['x_train', '(x_train.shape[0], x_train.shape[1], 1)'], {}), '(x_train, (x_train.shape[0], x_train.shape[1], 1))\n', (1947, 1997), True, 'import numpy as np\n'), ((2239, 2251), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2249, 2251), False, 'from keras.models import Sequential\n'), ((4344, 4386), 'pandas.read_csv', 'pd.read_csv', (['"""Google_Stock_Price_Test.csv"""'], {}), "('Google_Stock_Price_Test.csv')\n", (4355, 4386), True, 'import pandas as pd\n'), ((4560, 4626), 'pandas.concat', 'pd.concat', (["(dataset_train['Open'], actual_results['Open'])"], {'axis': '(0)'}), "((dataset_train['Open'], actual_results['Open']), axis=0)\n", (4569, 4626), True, 'import pandas as pd\n'), ((5154, 5170), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (5162, 5170), True, 'import numpy as np\n'), ((5180, 5237), 'numpy.reshape', 'np.reshape', (['x_test', '(x_test.shape[0], x_test.shape[1], 1)'], {}), '(x_test, (x_test.shape[0], x_test.shape[1], 1))\n', (5190, 5237), True, 'import numpy as np\n'), ((5442, 5514), 'matplotlib.pyplot.plot', 'plt.plot', (['real_stock_price'], {'color': '"""red"""', 'label': '"""Real Google Stock Price"""'}), "(real_stock_price, color='red', label='Real Google Stock Price')\n", (5450, 5514), True, 'import matplotlib.pyplot as plt\n'), ((5515, 5603), 'matplotlib.pyplot.plot', 'plt.plot', (['predicted_stock_price'], {'color': '"""blue"""', 'label': '"""Predicted Google Stock Price"""'}), "(predicted_stock_price, color='blue', label=\n 'Predicted Google Stock Price')\n", (5523, 5603), True, 'import matplotlib.pyplot as plt\n'), ((5599, 5641), 'matplotlib.pyplot.title', 'plt.title', (['"""Google Stock Price Prediction"""'], {}), "('Google Stock Price Prediction')\n", (5608, 5641), True, 'import matplotlib.pyplot as plt\n'), ((5642, 5667), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Days)"""'], {}), "('Time (Days)')\n", (5652, 5667), True, 'import matplotlib.pyplot as plt\n'), ((5668, 5697), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stock Price ($)"""'], {}), "('Stock Price ($)')\n", (5678, 5697), True, 'import matplotlib.pyplot as plt\n'), ((5698, 5710), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5708, 5710), True, 'import matplotlib.pyplot as plt\n'), ((5711, 5721), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5719, 5721), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1659), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (1650, 1659), True, 'import numpy as np\n'), ((1661, 1678), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1669, 1678), True, 'import numpy as np\n'), ((2623, 2695), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(50)', 'return_sequences': '(True)', 'input_shape': '(x_train.shape[1], 1)'}), '(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1))\n', (2627, 2695), False, 'from keras.layers import LSTM\n'), ((2870, 2887), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.2)'}), '(rate=0.2)\n', (2877, 2887), False, 'from keras.layers import Dropout\n'), ((3065, 3102), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(50)', 'return_sequences': '(True)'}), '(units=50, return_sequences=True)\n', (3069, 3102), False, 'from keras.layers import LSTM\n'), ((3118, 3135), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.2)'}), '(rate=0.2)\n', (3125, 3135), False, 'from keras.layers import Dropout\n'), ((3268, 3305), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(50)', 'return_sequences': '(True)'}), '(units=50, return_sequences=True)\n', (3272, 3305), False, 'from keras.layers import LSTM\n'), ((3321, 3338), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.2)'}), '(rate=0.2)\n', (3328, 3338), False, 'from keras.layers import Dropout\n'), ((3618, 3632), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(50)'}), '(units=50)\n', (3622, 3632), False, 'from keras.layers import LSTM\n'), ((3648, 3665), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.2)'}), '(rate=0.2)\n', (3655, 3665), False, 'from keras.layers import Dropout\n'), ((3765, 3779), 'keras.layers.Dense', 'Dense', ([], {'units': '(1)'}), '(units=1)\n', (3770, 3779), False, 'from keras.layers import Dense\n')] |
import numpy as np
# Matrix for converting axial coordinates to pixel coordinates
axial_to_pixel_mat = np.array([[np.sqrt(3), np.sqrt(3) / 2], [0, 3 / 2.]])
# Matrix for converting pixel coordinates to axial coordinates
pixel_to_axial_mat = np.linalg.inv(axial_to_pixel_mat)
# These are the vectors for moving from any hex to one of its neighbors.
SE = np.array((1, 0, -1))
SW = np.array((0, 1, -1))
W = np.array((-1, 1, 0))
NW = np.array((-1, 0, 1))
NE = np.array((0, -1, 1))
E = np.array((1, -1, 0))
ALL_DIRECTIONS = np.array([NW, NE, E, SE, SW, W, ])
def get_cube_distance(hex_start, hex_end):
"""
Computes the smallest number of hexes between hex_start and hex_end, on the hex lattice.
:param hex_start: Starting hex...
:param hex_end: Ending hex...
:return: Smallest number of hexes between `hex_start` and `hex_end`, on hex lattice.
"""
return np.sum(np.abs(hex_start - hex_end) / 2)
# Selection Functions ######
def get_neighbor(hex, direction):
"""
Simply returns the neighbor, in the direction specified, of the hexagon.
:param hex: Cube coordinates of the hexagon.
:param direction: A direction from the DIR class.
:return: The location of the neighbor in cube coordinates.
"""
return hex + direction
def get_ring(center, radius):
"""
Retrieves the locations of all the hexes exactly a certain distance from a hexagon.
:param center: The location of the hexagon to get the ring of.
:param radius: The distance from `center` of the hexes we want.
:return: An array of locations of the hexes that are exactly `radius` units away from `center`.
"""
if radius < 0:
return []
if radius == 0:
return [center]
rad_hex = np.zeros((6 * radius, 3))
count = 0
for i in range(0, 6):
for k in range(0, radius):
rad_hex[count] = ALL_DIRECTIONS[i - 1] * (radius - k) + ALL_DIRECTIONS[i] * (k)
count += 1
return np.squeeze(rad_hex) + center
def get_disk(center, radius):
"""
Retrieves the locations of all the hexes within certain distance from a hexagon.
:param center: The location of the hexagon to get the neighbors of.
:param radius: The distance from `center` of the hexes we want.
:return: An array of locations of the hexes that are within `radius` units away from `center`.
"""
hex_set = []
for x in range(-radius, radius + 1):
for y in range(max(-radius, -(x + radius)), min(radius + 1, -x + radius + 1)):
z = -x - y
hex_set.append(np.array([x, y, z]))
return np.array(hex_set) + center
def get_spiral(center, radius_start=1, radius_end=2):
"""
Retrieves all hexes that are `radius` hexes away from the `center`.
:param center: The location of the center hex.
:param radius_start: The distance from center. We want all hexes greater than or equal to this distance.
:param radius_end: The distance from center. We want all hexes within this distance from `center`.
:return: An array of locations of the hexes that are within `radius` hexes away from `center`.
"""
hex_area = get_ring(center, 0)
for i in range(radius_start, radius_end + 1):
hex_area = np.append(hex_area, get_ring(center, i), axis=0)
return hex_area
def get_hex_line(hex_start, hex_end):
"""
Get hexes on line from hex_start to hex_end.
:param hex_start: The hex where the line starts.
:param hex_end: The hex where the line ends.
:return: A set of hexes along a straight line from hex_start to hex_end.
"""
hex_distance = get_cube_distance(hex_start, hex_end)
if hex_distance < 1:
return np.array([hex_start])
# Set up linear system to compute linearly interpolated cube points
bottom_row = np.array([i / hex_distance for i in np.arange(hex_distance)])
x = np.vstack((1 - bottom_row, bottom_row))
A = np.vstack((hex_start, hex_end)).T
# linearly interpolate from a to b in n steps
interpolated_points = A.dot(x)
interpolated_points = np.vstack((interpolated_points.T, hex_end))
return np.array(cube_round(interpolated_points))
# Conversion Functions ######
def cube_to_axial(cube):
"""
Convert cube to axial coordinates.
:param cube: A coordinate in cube form. nx3
:return: `cube` in axial form.
"""
return np.vstack((cube[:, 0], cube[:, 2])).T
def axial_to_cube(axial):
"""
Convert axial to cube coordinates.
:param axial: A coordinate in axial form.
:return: `axial` in cube form.
"""
x = axial[:, 0]
z = axial[:, 1]
y = -x - z
cube_coords = np.vstack((x, y, z)).T
return cube_coords
def axial_to_pixel(axial, radius):
"""
Converts the location of a hex in axial form to pixel coordinates.
:param axial: The location of a hex in axial form. nx3
:param radius: Radius of all hexagons.
:return: `axial` in pixel coordinates.
"""
pos = radius * axial_to_pixel_mat.dot(axial.T)
return pos.T
def cube_to_pixel(cube, radius):
"""
Converts the location of a hex in cube form to pixel coordinates.
:param cube: The location of a hex in cube form. nx3
:param radius: Radius of all hexagons.
:return: `cube` in pixel coordinates.
"""
in_axial_form = cube_to_axial(cube)
return axial_to_pixel(in_axial_form, radius)
def pixel_to_cube(pixel, radius):
"""
Converts the location of a hex in pixel coordinates to cube form.
:param pixel: The location of a hex in pixel coordinates. nx2
:param radius: Radius of all hexagons.
:return: `pixel` in cube coordinates.
"""
axial = pixel_to_axial_mat.dot(pixel.T) / radius
return cube_round(axial_to_cube(axial.T))
def pixel_to_axial(pixel, radius):
"""
Converts the location of a hex in pixel coordinates to axial form.
:param pixel: The location of a hex in pixel coordinates. nx2
:param radius: Radius of all hexagons.
:return: `pixel` in axial coordinates.
"""
cube = pixel_to_cube(pixel, radius)
return cube_to_axial(cube)
def cube_round(cubes):
"""
Rounds a location in cube coordinates to the center of the nearest hex.
:param cubes: Locations in cube form. nx3
:return: The location of the center of the nearest hex in cube coordinates.
"""
rounded = np.zeros((cubes.shape[0], 3))
rounded_cubes = np.round(cubes)
for i, cube in enumerate(rounded_cubes):
(rx, ry, rz) = cube
xdiff, ydiff, zdiff = np.abs(cube-cubes[i])
if xdiff > ydiff and xdiff > zdiff:
rx = -ry - rz
elif ydiff > zdiff:
ry = -rx - rz
else:
rz = -rx - ry
rounded[i] = (rx, ry, rz)
return rounded
def axial_round(axial):
"""
Rounds a location in axial coordinates to the center of the nearest hex.
:param axial: A location in axial form. nx2
:return: The location of the center of the nearest hex in axial coordinates.
"""
return cube_to_axial(cube_round(axial_to_cube(axial)))
| [
"numpy.abs",
"numpy.sqrt",
"numpy.arange",
"numpy.squeeze",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.vstack",
"numpy.round"
] | [((244, 277), 'numpy.linalg.inv', 'np.linalg.inv', (['axial_to_pixel_mat'], {}), '(axial_to_pixel_mat)\n', (257, 277), True, 'import numpy as np\n'), ((358, 378), 'numpy.array', 'np.array', (['(1, 0, -1)'], {}), '((1, 0, -1))\n', (366, 378), True, 'import numpy as np\n'), ((384, 404), 'numpy.array', 'np.array', (['(0, 1, -1)'], {}), '((0, 1, -1))\n', (392, 404), True, 'import numpy as np\n'), ((409, 429), 'numpy.array', 'np.array', (['(-1, 1, 0)'], {}), '((-1, 1, 0))\n', (417, 429), True, 'import numpy as np\n'), ((435, 455), 'numpy.array', 'np.array', (['(-1, 0, 1)'], {}), '((-1, 0, 1))\n', (443, 455), True, 'import numpy as np\n'), ((461, 481), 'numpy.array', 'np.array', (['(0, -1, 1)'], {}), '((0, -1, 1))\n', (469, 481), True, 'import numpy as np\n'), ((486, 506), 'numpy.array', 'np.array', (['(1, -1, 0)'], {}), '((1, -1, 0))\n', (494, 506), True, 'import numpy as np\n'), ((524, 556), 'numpy.array', 'np.array', (['[NW, NE, E, SE, SW, W]'], {}), '([NW, NE, E, SE, SW, W])\n', (532, 556), True, 'import numpy as np\n'), ((1744, 1769), 'numpy.zeros', 'np.zeros', (['(6 * radius, 3)'], {}), '((6 * radius, 3))\n', (1752, 1769), True, 'import numpy as np\n'), ((3869, 3908), 'numpy.vstack', 'np.vstack', (['(1 - bottom_row, bottom_row)'], {}), '((1 - bottom_row, bottom_row))\n', (3878, 3908), True, 'import numpy as np\n'), ((4063, 4106), 'numpy.vstack', 'np.vstack', (['(interpolated_points.T, hex_end)'], {}), '((interpolated_points.T, hex_end))\n', (4072, 4106), True, 'import numpy as np\n'), ((6353, 6382), 'numpy.zeros', 'np.zeros', (['(cubes.shape[0], 3)'], {}), '((cubes.shape[0], 3))\n', (6361, 6382), True, 'import numpy as np\n'), ((6403, 6418), 'numpy.round', 'np.round', (['cubes'], {}), '(cubes)\n', (6411, 6418), True, 'import numpy as np\n'), ((1972, 1991), 'numpy.squeeze', 'np.squeeze', (['rad_hex'], {}), '(rad_hex)\n', (1982, 1991), True, 'import numpy as np\n'), ((2600, 2617), 'numpy.array', 'np.array', (['hex_set'], {}), '(hex_set)\n', (2608, 2617), True, 'import numpy as np\n'), ((3687, 3708), 'numpy.array', 'np.array', (['[hex_start]'], {}), '([hex_start])\n', (3695, 3708), True, 'import numpy as np\n'), ((3917, 3948), 'numpy.vstack', 'np.vstack', (['(hex_start, hex_end)'], {}), '((hex_start, hex_end))\n', (3926, 3948), True, 'import numpy as np\n'), ((4367, 4402), 'numpy.vstack', 'np.vstack', (['(cube[:, 0], cube[:, 2])'], {}), '((cube[:, 0], cube[:, 2]))\n', (4376, 4402), True, 'import numpy as np\n'), ((4642, 4662), 'numpy.vstack', 'np.vstack', (['(x, y, z)'], {}), '((x, y, z))\n', (4651, 4662), True, 'import numpy as np\n'), ((6522, 6545), 'numpy.abs', 'np.abs', (['(cube - cubes[i])'], {}), '(cube - cubes[i])\n', (6528, 6545), True, 'import numpy as np\n'), ((116, 126), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (123, 126), True, 'import numpy as np\n'), ((892, 919), 'numpy.abs', 'np.abs', (['(hex_start - hex_end)'], {}), '(hex_start - hex_end)\n', (898, 919), True, 'import numpy as np\n'), ((128, 138), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (135, 138), True, 'import numpy as np\n'), ((2568, 2587), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (2576, 2587), True, 'import numpy as np\n'), ((3835, 3858), 'numpy.arange', 'np.arange', (['hex_distance'], {}), '(hex_distance)\n', (3844, 3858), True, 'import numpy as np\n')] |
import numpy as np
import scipy
from scipy.stats import binom
# numpy.seterr(all='raise')
#
# Decision tree: Regression
#
class Tree():
def __init__(self, X, y, maxDepth, alpha0 = None, baseline_features = None, peek_ahead_max_depth=0, split_val_quantiles = [], peek_ahead_quantiles = [], nSamples = 0, cut_middle_y=0, no_downstream_feature_repeats=0, internal_cross_val=0, internal_cross_val_nodes=0, treegrowth_CV=0, beta0 = 0, beta0_vec = [0, 0]):
self.X = X
self.y = y
self.maxDepth = maxDepth
self.alpha0 = alpha0
self.peek_ahead_max_depth = peek_ahead_max_depth
self.split_val_quantiles = split_val_quantiles
self.peek_ahead_quantiles = peek_ahead_quantiles
self.nSamples = nSamples
self.cut_middle_y = cut_middle_y
self.no_downstream_feature_repeats = no_downstream_feature_repeats
self.internal_cross_val = internal_cross_val
self.internal_cross_val_nodes = internal_cross_val_nodes
self.treegrowth_CV = treegrowth_CV
self.beta0 = beta0
self.beta0_vec = beta0_vec
self.peek_ahead_depth = 0 # Changed in loop in build_tree
self.tree_info = []
self.current_node_index = 0
self.baseline_features = baseline_features
if self.cut_middle_y == 1:
q = np.quantile(self.y, [0.33, 0.66])
self.X = self.X[(self.y < q[0]) | (self.y > q[1]), :]
self.y = self.y[(self.y < q[0]) | (self.y > q[1])]
if self.alpha0 == None:
self.f_auto_alpha()
def f_auto_alpha(self):
self.alpha0 = 0
current_d_alpha = 0.1
min_d_alpha = 0.003
nIts_per_alpha = 200
print("Finding alpha:")
while abs(current_d_alpha) > min_d_alpha:
print('\tAlpha = ', self.alpha0)
n_non_empty_tree = 0
for iIt in range(nIts_per_alpha):
perm_indices = np.random.permutation(len(self.y))
y_perm = self.y[perm_indices]
tree0 = self.teg_tree_inner(self.X, y_perm)
C, nodes_collapsed = self.prune_the_tree(tree0)
# print(C)
# print(nodes_collapsed)
best_collapse_seq_end = np.argmin(C)
if (best_collapse_seq_end < (len(C) - 1)):
n_non_empty_tree = n_non_empty_tree + 1
p = n_non_empty_tree / nIts_per_alpha # False-positive tree
if current_d_alpha > 0:
if p < 0.05:
current_d_alpha = -abs(current_d_alpha) / 2
if current_d_alpha < 0:
if p >= 0.05:
current_d_alpha = abs(current_d_alpha) / 2
self.alpha0 = self.alpha0 + current_d_alpha
if (self.alpha0 < 0):
self.alpha0 = 0
print('\t-> p, new alpha, new dAlpha = ', p, self.alpha0, current_d_alpha)
print('Auto-selected alpha = ', self.alpha0);
def build_tree(self):
tree0 = []
cost_complexity_criterion = np.inf
best_peek_crit = np.NaN
best_raw_tree = []
best_C_min_v_crossval = []
best_C_min_v_null = []
p = 1
for peek_ahead_depth in range(self.peek_ahead_max_depth + 1):
print('Finding tree for peek_ahead_depth = ', peek_ahead_depth)
self.peek_ahead_depth = peek_ahead_depth
tree0_this, cost_complexity_criterion_this, raw_tree, C_min_v_crossval, C_min_v_null, p = self.teg_regression_tree()
print('Cost-Complexity Criterion = ', cost_complexity_criterion_this)
if cost_complexity_criterion_this < cost_complexity_criterion:
tree0 = tree0_this
cost_complexity_criterion = cost_complexity_criterion_this
best_peek_crit = peek_ahead_depth
best_raw_tree = raw_tree
best_C_min_v_crossval = C_min_v_crossval
best_C_min_v_null = C_min_v_null
print(" ! New best tree !")
print("\n")
print("Best tree was found at peek-ahead depth = ", best_peek_crit)
Output = {'tree': tree0, 'cost_complexity_criterion':cost_complexity_criterion, 'best_peek_crit':best_peek_crit, 'raw_tree':best_raw_tree, 'CV_distr':best_C_min_v_crossval, 'null_distr':best_C_min_v_null, 'p':p}
self.tree_info = Output
return Output
def teg_regression_tree(self):
if (self.nSamples == 0):
if self.internal_cross_val == 1:
print('Internal cross validation not used with nSamples=0.')
mean_y = np.nanmean(self.y)
sd_y = np.sqrt(np.var(self.y))
y = (self.y - mean_y) / sd_y
tree0 = self.teg_tree_inner(self.X, y)
C, nodes_collapsed = self.prune_the_tree(tree0)
C_min_v_crossval = []
C_min_v_null = []
p = 1
else:
best_mean_y = np.NaN
best_sd_y = np.NaN
best_C_min = np.inf
best_tree = []
best_C = []
best_nodes_collapsed = []
C_min_v_crossval = []
C_min_v_null = []
for iSample in range(self.nSamples):
#print(iSample)
# Random split sample into:
# Subsample to construct tree
# Independent subsample used for entropy per terminal node
# Additionally, create a randomly permuted sample to generate a null distribution over the samples
perm_indices = np.random.permutation(len(self.y))
a = int(np.floor(len(self.y) / 2))
set1_indices = perm_indices[1:a]
set2_indices = perm_indices[a:]
# Split half used to build tree
y_1 = self.y[set1_indices]
X_1 = self.X[set1_indices, :]
# Split half used for cross-validation and NHST
y_2 = self.y[set2_indices]
X_2 = self.X[set2_indices, :]
set3_indices = np.random.permutation(set2_indices)
X_3 = self.X[set3_indices, :]
mean_y_1 = np.nanmean(y_1)
sd_y_1 = np.sqrt(np.var(y_1))
y_1 = (y_1 - mean_y_1) / sd_y_1
mean_y_2 = np.nanmean(y_2)
sd_y_2 = np.sqrt(np.var(y_2))
y_2 = (y_2 - mean_y_2) / sd_y_2
# Null distribution
# y_null = np.random.permutation(y_2) # Already normalized
y_null = y_2
X_null = X_3 # Permuted X
if self.baseline_features != None:
# Baseline columns of X_null remain statistically linked to y_null; other columns are randomized
X_null[:, self.baseline_features] = X_2[:, self.baseline_features]
#
tree0 = self.teg_tree_inner(X_1, y_1)
C, nodes_collapsed = self.prune_the_tree(tree0)
best_collapse_seq_end = np.argmin(C)
nodes_collapsed_choice = nodes_collapsed[0:(best_collapse_seq_end + 1)]
tree0_CV = self.tree_copy(tree0, X_2, y_2)
min_C_CV_original_pruning = self.f_C(tree0_CV, nodes_collapsed_choice)
C_CV, nodes_collapsed_CV = self.prune_the_tree(tree0_CV)
min_C_CV = np.min(C_CV)
tree0_null = self.tree_copy(tree0, X_null, y_null)
min_C_null = self.f_C(tree0_null, nodes_collapsed_choice)
#C_null, nodes_collapsed_null = self.prune_the_tree(tree0_null)
#min_C_null = np.min(C_null)
#
C_min_v_crossval.append(min_C_CV_original_pruning)
C_min_v_null.append(min_C_null)
delta_C = min_C_CV_original_pruning - min_C_null # Find cross-validated tree must distinct from null tree
if self.internal_cross_val == 1:
best_C_min_to_use = min_C_CV
# best_C_min_to_use = delta_C
else:
best_C_min_to_use = np.min(C)
# Pick the tree that has the lowest minimal CCC found in the C vector
if best_C_min_to_use < best_C_min:
best_C_min = best_C_min_to_use
best_mean_y = mean_y_1
best_sd_y = sd_y_1
best_tree = tree0
best_C = C
if (self.internal_cross_val == 1) and (self.internal_cross_val_nodes == 1):
best_nodes_collapsed = nodes_collapsed_CV
else:
best_nodes_collapsed = nodes_collapsed
mean_y = best_mean_y
sd_y = best_sd_y
tree0 = best_tree
C = best_C
nodes_collapsed = best_nodes_collapsed
d_for_NHST = np.array(C_min_v_crossval) - np.array(C_min_v_null)
obs_CV_better = np.sum(d_for_NHST < 0)
p = 1 - binom.cdf(obs_CV_better, d_for_NHST.size, 0.5) # Ties are coded conservatively
#print(tree0)
#print(C)
#print(nodes_collapsed)
# print(len(C))
# Apply selected tree0 to full dataset to get consistent terminal nodes, independent of random split when best tree was found
mean_y = np.nanmean(self.y)
sd_y = np.sqrt(np.var(self.y))
tree0 = self.tree_copy(tree0, self.X, self.y)
self.print_tree(tree0, C, nodes_collapsed, mean_y, sd_y)
collapsed_tree = self.collapse_tree(tree0, C, nodes_collapsed, mean_y, sd_y)
if len(C) > 0:
return collapsed_tree, min(C), tree0, C_min_v_crossval, C_min_v_null, p
else:
return collapsed_tree, np.NaN, tree0, C_min_v_crossval, C_min_v_null, p
def teg_tree_inner(self, X, y, iDepth=0, prev_terminal_node_pred=np.nan, visited_features_v = None):
if not isinstance(visited_features_v, np.ndarray):
visited_features_v = np.array([])
# print("Params: ", twostep, internalEnsemble)
if (iDepth == 0):
self.current_node_index = 0
else:
self.current_node_index = self.current_node_index + 1
# print(node_index_v)
if len(y) > 0:
terminal_node_pred = np.nanmean(y)
else:
terminal_node_pred = 0
SS_pre_split = self.f_SS_assigned_to_node(y)
# Check whether maxdepth passed or y is empty
if (iDepth >= self.maxDepth) or (len(y) <= 1) or (SS_pre_split == 0):
if len(y) > 0:
terminal_node_pred = np.nanmean(y)
else:
terminal_node_pred = prev_terminal_node_pred
return [[np.NaN, terminal_node_pred, SS_pre_split, 0, 0, 0, self.current_node_index, iDepth, y], np.NaN, np.NaN]
# Create branches
# Check one step ahead
best_split_feature = np.NaN
best_split_val = np.NaN
SS_best = np.inf
if self.treegrowth_CV == 1:
perm_indices = np.random.permutation(len(y))
a = int(np.floor(len(y) / 2))
ind_for_splitval = perm_indices[1:a]
ind_for_feature_comparison = perm_indices[a:]
else:
ind_for_splitval = range(len(y))
ind_for_feature_comparison = range(len(y))
for iFeature1 in range(X.shape[1]):
if self.no_downstream_feature_repeats and (np.sum(np.array(visited_features_v) == iFeature1) > 0):
continue
best_split_val_this, SS_best_this = self.f_get_best_split_val(iFeature1, y, X, ind_for_splitval, ind_for_feature_comparison, self.maxDepth - iDepth)
if SS_best_this < SS_best:
#print("New best!")
best_split_feature = iFeature1
best_split_val = best_split_val_this
SS_best = SS_best_this
#print("> iFeature1: ", iFeature1, ", SS_best_this: ", SS_best_this)
if np.isnan(best_split_feature):
if len(y) > 0:
terminal_node_pred = np.nanmean(y)
else:
terminal_node_pred = prev_terminal_node_pred
return [[np.NaN, terminal_node_pred, SS_pre_split, 0, 0, 0, self.current_node_index, iDepth, y], np.NaN, np.NaN]
ind_left = (X[:, best_split_feature] < best_split_val)
ind_right = (X[:, best_split_feature] >= best_split_val)
SS_left = self.f_SS(y[ind_left])
SS_right = self.f_SS(y[ind_right])
best_split = [best_split_feature, best_split_val, SS_pre_split, SS_left, SS_right, len(y), self.current_node_index, iDepth, y]
branch_left = self.teg_tree_inner(X[ind_left, :], y[ind_left], iDepth + 1, prev_terminal_node_pred=terminal_node_pred, visited_features_v=np.append(visited_features_v, best_split_feature))
branch_right = self.teg_tree_inner(X[ind_right, :], y[ind_right], iDepth + 1, visited_features_v=np.append(visited_features_v, best_split_feature))
return [best_split, branch_left, branch_right]
def f_get_best_SS_peek(self, y, X, this_peek_ahead_depth, peek_ahead_maxDepth_limiter, current_peek_depth = 0):
# print(current_peek_depth, peek_ahead_depth, peek_ahead_maxDepth_limiter)
if (len(y) <= 1) or (current_peek_depth >= this_peek_ahead_depth) or (current_peek_depth >= peek_ahead_maxDepth_limiter):
return self.f_SS_for_split(y)
best_SS = np.inf
best_feature_peek = np.nan
best_val_peek = np.nan
for iFeature_this in range(X.shape[1]):
if len(self.peek_ahead_quantiles) == 0:
splitting_vals_this = np.unique(X[:, iFeature_this])
else:
splitting_vals_this = np.quantile(X[:, iFeature_this], self.peek_ahead_quantiles)
for val_this in splitting_vals_this:
ind_left = (X[:, iFeature_this] < val_this)
ind_right = (X[:, iFeature_this] >= val_this)
best_SS_left = self.f_get_best_SS_peek(y[ind_left], X[ind_left, :], this_peek_ahead_depth, peek_ahead_maxDepth_limiter, current_peek_depth + 1)
best_SS_right = self.f_get_best_SS_peek(y[ind_right], X[ind_right, :], this_peek_ahead_depth, peek_ahead_maxDepth_limiter, current_peek_depth + 1)
current_SS = best_SS_left + best_SS_right
if (current_SS < best_SS):
best_SS = current_SS
best_feature_peek = iFeature_this
best_val_peek = val_this
#print(">>> best_feature_peek: ", best_feature_peek, ", best_val_peek: ", best_val_peek, ", best_SS: ", best_SS)
return best_SS
def f_get_SS_for_split(self, iFeature1, val1, X, y, peek_ahead_maxDepth_limiter):
ind_left = (X[:, iFeature1] < val1)
ind_right = (X[:, iFeature1] >= val1)
SS_best_over_peeks = np.inf
for this_peek_ahead_depth in range(self.peek_ahead_depth + 1):
SS_left = self.f_get_best_SS_peek(y[ind_left], X[ind_left, :], this_peek_ahead_depth,
peek_ahead_maxDepth_limiter)
SS_right = self.f_get_best_SS_peek(y[ind_right], X[ind_right, :], this_peek_ahead_depth,
peek_ahead_maxDepth_limiter)
# print(iFeature1, val1, SS_left, SS_right)
SS_this = SS_left + SS_right
if (SS_this < SS_best_over_peeks):
SS_best_over_peeks = SS_this
return SS_best_over_peeks
def f_get_best_split_val(self, iFeature1, y, X, ind_for_splitval, ind_for_feature_comparison, peek_ahead_maxDepth_limiter):
best_split_val = np.NaN
SS_best = np.inf
if len(self.split_val_quantiles) == 0:
splitting_vals1 = np.unique(X[:, iFeature1])
else:
splitting_vals1 = np.quantile(X[:, iFeature1], split.split_val_quantiles)
for val1 in splitting_vals1:
SS_this = self.f_get_SS_for_split(iFeature1, val1, X[ind_for_splitval, :], y[ind_for_splitval], peek_ahead_maxDepth_limiter)
if SS_this < SS_best:
SS_best = SS_this
best_split_val = val1
#print(">> val1: ", val1, ", SS_this: ", SS_this)
#print(iFeature1, best_split_val, SS_best)
SS_best = self.f_get_SS_for_split(iFeature1, best_split_val, X[ind_for_feature_comparison, :], y[ind_for_feature_comparison], peek_ahead_maxDepth_limiter)
p = np.sum(splitting_vals1 < best_split_val) / len(splitting_vals1)
SS_best = SS_best + self.beta0 * (1 - p * (1 - p) / 0.25) * len(self.y) # beta0 adjusts SS_best for feature selection but not split-point
return best_split_val, SS_best
def f_SS_for_split(self, v):
p = len(v) / len(self.y) # Use the original, full target vector here
if (p < self.beta0_vec[0]) and (self.beta0_vec[0] > 0):
beta0_scaler = self.beta0_vec[1] * ((self.beta0_vec[0] - p) / self.beta0_vec[0])
else:
beta0_scaler = 0
if np.isinf(beta0_scaler):
return_val = np.inf
else:
return_val = self.f_SS(v) * (1 + beta0_scaler)
return return_val
def f_SS_assigned_to_node(self, v):
return_val = self.f_SS(v)
return return_val
def f_SS(self, v):
if len(v) <= 1:
return 0
return_val = np.sum((v - np.mean(v))**2)
return return_val
# Generate tree with alternative SS_pre_split
def tree_copy(self, tree0, X_new, y_new, iDepth=0, node_index_v = None, previous_terminal_node_pred=np.nan):
#print(tree0[0][0:4], node_index_v, iDepth)
if (iDepth == 0):
self.current_node_index = 0
else:
self.current_node_index = self.current_node_index + 1
# print(node_index_v)
if len(y_new) == 0:
terminal_node_pred = previous_terminal_node_pred
else:
terminal_node_pred = np.nanmean(y_new)
SS_pre_split = self.f_SS_assigned_to_node(y_new)
if len(y_new) == 0:
return [[np.NaN, terminal_node_pred, SS_pre_split, 0, 0, 0, self.current_node_index, iDepth, y_new], np.NaN, np.NaN]
if not(isinstance(tree0, list)):
return [[np.NaN, terminal_node_pred, SS_pre_split, 0, 0, 0, self.current_node_index, iDepth, y_new], np.NaN, np.NaN]
if np.isnan(tree0[0][0]):
return [[np.NaN, terminal_node_pred, SS_pre_split, 0, 0, 0, self.current_node_index, iDepth, y_new], np.NaN, np.NaN]
#print('Non-terminal node')
best_split_feature = tree0[0][0]
best_split_val = tree0[0][1]
ind_left = (X_new[:, best_split_feature] < best_split_val)
ind_right = (X_new[:, best_split_feature] >= best_split_val)
SS_left = self.f_SS(y_new[ind_left])
SS_right = self.f_SS(y_new[ind_right])
best_split = [best_split_feature, best_split_val, SS_pre_split, SS_left, SS_right, len(y_new), self.current_node_index, iDepth, y_new]
branch_left = self.tree_copy(tree0[1], X_new[ind_left, :], y_new[ind_left], iDepth + 1, previous_terminal_node_pred=terminal_node_pred)
branch_right = self.tree_copy(tree0[2], X_new[ind_right, :], y_new[ind_right], iDepth + 1, previous_terminal_node_pred=terminal_node_pred)
#print(branch_left[0][0], branch_right[0][0])
return [best_split, branch_left, branch_right]
# Cost-Complexity Pruning
def retrieve_info_from_terminal_nodes(self, this_tree, nodes_to_collapse_tmp = [-1]):
#print(nodes_to_collapse_tmp, this_tree[0][6], nodes_to_collapse_tmp.count(this_tree[0][6]))
if np.isnan(this_tree[0][0]) or (nodes_to_collapse_tmp.count(this_tree[0][6]) > 0):
# print(this_tree)
# Elements divides by and then multiplies by Nm
return this_tree[0][2], 1, this_tree[0][7]
else:
SS_left, N_left, depth_left = self.retrieve_info_from_terminal_nodes(this_tree[1], nodes_to_collapse_tmp)
SS_right, N_right, depth_right = self.retrieve_info_from_terminal_nodes(this_tree[2], nodes_to_collapse_tmp)
# print(N_left, N_right)
error_metric = SS_left + SS_right
return error_metric, (N_left + N_right), max(depth_left, depth_right)
def f_C(self, this_tree, nodes_to_collapse_tmp = [-1]):
#print('zz', nodes_to_collapse_tmp)
if nodes_to_collapse_tmp[0] == -1:
node_indices = []
this_SS, this_N_nodes, max_depth_terminals = self.retrieve_info_from_terminal_nodes(this_tree, nodes_to_collapse_tmp)
# print("fC: ", this_N, max_depth_terminals)
n_data_points = this_tree[0][5]
return this_SS / n_data_points + self.alpha0 * this_N_nodes
def get_all_node_indices(self, this_tree, node_indices = [-1]):
if node_indices[0] == -1:
node_indices = []
#print(this_tree)
node_indices.append(this_tree[0][6])
if not(np.isnan(this_tree[0][0])):
self.get_all_node_indices(this_tree[1], node_indices)
self.get_all_node_indices(this_tree[2], node_indices)
return node_indices
def get_internal_node_indices(self, this_tree, node_indices = [-1]):
if node_indices[0] == -1:
node_indices = []
#print(this_tree)
if not(np.isnan(this_tree[0][0])):
node_indices.append(this_tree[0][6])
self.get_internal_node_indices(this_tree[1], node_indices)
self.get_internal_node_indices(this_tree[2], node_indices)
return node_indices
def get_downstream_nodes(self, this_tree, iNode_to_collapse, downstream_nodes = [-1], downstream_on = 0):
if len(downstream_nodes) > 0:
if downstream_nodes[0] == -1:
downstream_nodes = []
if this_tree[0][6] == iNode_to_collapse:
downstream_on = 1
if downstream_on == 1:
downstream_nodes.append(this_tree[0][6])
if not(np.isnan(this_tree[0][0])):
self.get_downstream_nodes(this_tree[1], iNode_to_collapse, downstream_nodes, downstream_on)
self.get_downstream_nodes(this_tree[2], iNode_to_collapse, downstream_nodes, downstream_on)
return downstream_nodes
def prune_the_tree(self, this_tree):
node_indices = self.get_internal_node_indices(this_tree)
# print(node_indices)
uncollapsed_v = [1 for a in node_indices]
nodes_collapsed = []
C = []
while sum(uncollapsed_v) > 0:
#print('uncollapsed_v: ', uncollapsed_v)
#print('nodes_collapsed: ', nodes_collapsed[:8])
# print('x', uncollapsed_v)
C_vec_tmp = []
iNode_indices_tmp = []
#print(node_indices)
#print(uncollapsed_v)
for iiNode in range(len(node_indices)):
if uncollapsed_v[iiNode] == 0:
continue
iNode = node_indices[iiNode]
iNode_indices_tmp.append(iNode)
nodes_to_collapse_tmp = nodes_collapsed.copy()
nodes_to_collapse_tmp.append(iNode)
this_C = self.f_C(this_tree, nodes_to_collapse_tmp)
C_vec_tmp.append(this_C)
#print(iiNode_indices_tmp)
#print(iNode_indices_tmp)
i_C_vec_tmp = np.argmin(C_vec_tmp)
iNode_to_collapse = iNode_indices_tmp[i_C_vec_tmp]
#print('iNode_to_collapse: ', iNode_to_collapse, ', i_C_vec_tmp: ', i_C_vec_tmp)
ndf = self.get_downstream_nodes(this_tree, iNode_to_collapse)
#print('ndf: ', ndf)
for iNode_downstream in ndf: # iNodeToCollapse, includes source-collapser
for iiNode in range(len(node_indices)):
#print('In loop: ', iNode_downstream, iiNode, node_indices[iiNode])
if iNode_downstream == node_indices[iiNode]:
#print('\tCollapse')
uncollapsed_v[iiNode] = 0
nodes_collapsed.append(iNode_to_collapse)
C.append(min(C_vec_tmp))
#print(iiNode_to_collapse, iNode_to_collapse)
# Collapse all downstream internal nodes
return C, nodes_collapsed
def print_tree(self, this_tree, C, nodes_collapsed, mean_y, sd_y):
def print_tree_inner(this_tree, nodes_collapsed_choice, mean_y, sd_y):
#print(this_tree[0][0])
iDepth = int(this_tree[0][7])
indent0 = ''
for t in range(iDepth):
indent0 = indent0 + '\t'
if nodes_collapsed_choice.count(this_tree[0][6]) == 0 and not(np.isnan(this_tree[0][0])):
print(indent0, this_tree[0][0:2])
print_tree_inner(this_tree[1], nodes_collapsed_choice, mean_y, sd_y)
print_tree_inner(this_tree[2], nodes_collapsed_choice, mean_y, sd_y)
else:
if len(this_tree[0][-1]) > 0:
m = np.nanmean(this_tree[0][-1])
else:
m = 0 # Note: target values are normalized
print(indent0, 'terminal node: ', mean_y + sd_y * m)
if len(C) == 0:
print('Empty tree.');
return
best_collapse_seq_end = np.argmin(C)
nodes_collapsed_choice = nodes_collapsed[0:(best_collapse_seq_end + 1)]
print_tree_inner(this_tree, nodes_collapsed_choice, mean_y, sd_y)
def collapse_tree(self, this_tree, C, nodes_collapsed, mean_y, sd_y):
def build_tree_inner(this_tree, nodes_collapsed_choice, mean_y, sd_y):
if (nodes_collapsed_choice.count(this_tree[0][6]) == 0 and not(np.isnan(this_tree[0][0]))):
to_report = this_tree[0][0:2]
return [to_report, build_tree_inner(this_tree[1], nodes_collapsed_choice, mean_y, sd_y), build_tree_inner(this_tree[2], nodes_collapsed_choice, mean_y, sd_y)]
else:
if len(this_tree[0][-1]) > 0:
to_report = mean_y + sd_y * np.nanmean(this_tree[0][-1])
else:
to_report = mean_y
return to_report
if len(C) == 0:
return []
best_collapse_seq_end = np.argmin(C)
nodes_collapsed_choice = nodes_collapsed[0:(best_collapse_seq_end + 1)]
return build_tree_inner(this_tree, nodes_collapsed_choice, mean_y, sd_y)
def tree_prediction(X, tree0):
def tree_prediction_inner(xvec, current_tree):
if not isinstance(current_tree, list):
prediction = current_tree
else:
this_split_var = current_tree[0][0]
this_split_val = current_tree[0][1]
if (xvec[this_split_var] < this_split_val):
branch = current_tree[1]
else:
branch = current_tree[2]
if isinstance(branch, list):
prediction = tree_prediction_inner(xvec, branch)
else:
prediction = branch
return prediction
y_pred = []
for xrow in X:
y_pred.append(tree_prediction_inner(xrow, tree0))
return y_pred
def describe_splits_in_tree(this_tree, splits_v = None):
if splits_v == None:
splits_v = []
def describe_splits_in_tree_inner(this_tree, splits_v=[]):
if not isinstance(this_tree, list):
return
splits_v.append(this_tree[0])
describe_splits_in_tree_inner(this_tree[1], splits_v)
describe_splits_in_tree_inner(this_tree[2], splits_v)
describe_splits_in_tree_inner(this_tree, splits_v)
if not isinstance(splits_v, list):
splits_v = []
return splits_v
| [
"numpy.mean",
"numpy.unique",
"numpy.append",
"numpy.nanmean",
"numpy.quantile",
"numpy.sum",
"numpy.isnan",
"numpy.array",
"scipy.stats.binom.cdf",
"numpy.min",
"numpy.argmin",
"numpy.isinf",
"numpy.var",
"numpy.random.permutation"
] | [((9593, 9611), 'numpy.nanmean', 'np.nanmean', (['self.y'], {}), '(self.y)\n', (9603, 9611), True, 'import numpy as np\n'), ((12297, 12325), 'numpy.isnan', 'np.isnan', (['best_split_feature'], {}), '(best_split_feature)\n', (12305, 12325), True, 'import numpy as np\n'), ((17461, 17483), 'numpy.isinf', 'np.isinf', (['beta0_scaler'], {}), '(beta0_scaler)\n', (17469, 17483), True, 'import numpy as np\n'), ((18835, 18856), 'numpy.isnan', 'np.isnan', (['tree0[0][0]'], {}), '(tree0[0][0])\n', (18843, 18856), True, 'import numpy as np\n'), ((25899, 25911), 'numpy.argmin', 'np.argmin', (['C'], {}), '(C)\n', (25908, 25911), True, 'import numpy as np\n'), ((26875, 26887), 'numpy.argmin', 'np.argmin', (['C'], {}), '(C)\n', (26884, 26887), True, 'import numpy as np\n'), ((1367, 1400), 'numpy.quantile', 'np.quantile', (['self.y', '[0.33, 0.66]'], {}), '(self.y, [0.33, 0.66])\n', (1378, 1400), True, 'import numpy as np\n'), ((4730, 4748), 'numpy.nanmean', 'np.nanmean', (['self.y'], {}), '(self.y)\n', (4740, 4748), True, 'import numpy as np\n'), ((9213, 9235), 'numpy.sum', 'np.sum', (['(d_for_NHST < 0)'], {}), '(d_for_NHST < 0)\n', (9219, 9235), True, 'import numpy as np\n'), ((9636, 9650), 'numpy.var', 'np.var', (['self.y'], {}), '(self.y)\n', (9642, 9650), True, 'import numpy as np\n'), ((10272, 10284), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10280, 10284), True, 'import numpy as np\n'), ((10580, 10593), 'numpy.nanmean', 'np.nanmean', (['y'], {}), '(y)\n', (10590, 10593), True, 'import numpy as np\n'), ((16174, 16200), 'numpy.unique', 'np.unique', (['X[:, iFeature1]'], {}), '(X[:, iFeature1])\n', (16183, 16200), True, 'import numpy as np\n'), ((16247, 16302), 'numpy.quantile', 'np.quantile', (['X[:, iFeature1]', 'split.split_val_quantiles'], {}), '(X[:, iFeature1], split.split_val_quantiles)\n', (16258, 16302), True, 'import numpy as np\n'), ((16880, 16920), 'numpy.sum', 'np.sum', (['(splitting_vals1 < best_split_val)'], {}), '(splitting_vals1 < best_split_val)\n', (16886, 16920), True, 'import numpy as np\n'), ((18416, 18433), 'numpy.nanmean', 'np.nanmean', (['y_new'], {}), '(y_new)\n', (18426, 18433), True, 'import numpy as np\n'), ((20123, 20148), 'numpy.isnan', 'np.isnan', (['this_tree[0][0]'], {}), '(this_tree[0][0])\n', (20131, 20148), True, 'import numpy as np\n'), ((21477, 21502), 'numpy.isnan', 'np.isnan', (['this_tree[0][0]'], {}), '(this_tree[0][0])\n', (21485, 21502), True, 'import numpy as np\n'), ((21853, 21878), 'numpy.isnan', 'np.isnan', (['this_tree[0][0]'], {}), '(this_tree[0][0])\n', (21861, 21878), True, 'import numpy as np\n'), ((22521, 22546), 'numpy.isnan', 'np.isnan', (['this_tree[0][0]'], {}), '(this_tree[0][0])\n', (22529, 22546), True, 'import numpy as np\n'), ((23919, 23939), 'numpy.argmin', 'np.argmin', (['C_vec_tmp'], {}), '(C_vec_tmp)\n', (23928, 23939), True, 'import numpy as np\n'), ((2308, 2320), 'numpy.argmin', 'np.argmin', (['C'], {}), '(C)\n', (2317, 2320), True, 'import numpy as np\n'), ((4777, 4791), 'numpy.var', 'np.var', (['self.y'], {}), '(self.y)\n', (4783, 4791), True, 'import numpy as np\n'), ((6222, 6257), 'numpy.random.permutation', 'np.random.permutation', (['set2_indices'], {}), '(set2_indices)\n', (6243, 6257), True, 'import numpy as np\n'), ((6335, 6350), 'numpy.nanmean', 'np.nanmean', (['y_1'], {}), '(y_1)\n', (6345, 6350), True, 'import numpy as np\n'), ((6475, 6490), 'numpy.nanmean', 'np.nanmean', (['y_2'], {}), '(y_2)\n', (6485, 6490), True, 'import numpy as np\n'), ((7213, 7225), 'numpy.argmin', 'np.argmin', (['C'], {}), '(C)\n', (7222, 7225), True, 'import numpy as np\n'), ((7565, 7577), 'numpy.min', 'np.min', (['C_CV'], {}), '(C_CV)\n', (7571, 7577), True, 'import numpy as np\n'), ((9132, 9158), 'numpy.array', 'np.array', (['C_min_v_crossval'], {}), '(C_min_v_crossval)\n', (9140, 9158), True, 'import numpy as np\n'), ((9161, 9183), 'numpy.array', 'np.array', (['C_min_v_null'], {}), '(C_min_v_null)\n', (9169, 9183), True, 'import numpy as np\n'), ((9257, 9303), 'scipy.stats.binom.cdf', 'binom.cdf', (['obs_CV_better', 'd_for_NHST.size', '(0.5)'], {}), '(obs_CV_better, d_for_NHST.size, 0.5)\n', (9266, 9303), False, 'from scipy.stats import binom\n'), ((10899, 10912), 'numpy.nanmean', 'np.nanmean', (['y'], {}), '(y)\n', (10909, 10912), True, 'import numpy as np\n'), ((12393, 12406), 'numpy.nanmean', 'np.nanmean', (['y'], {}), '(y)\n', (12403, 12406), True, 'import numpy as np\n'), ((13113, 13162), 'numpy.append', 'np.append', (['visited_features_v', 'best_split_feature'], {}), '(visited_features_v, best_split_feature)\n', (13122, 13162), True, 'import numpy as np\n'), ((13270, 13319), 'numpy.append', 'np.append', (['visited_features_v', 'best_split_feature'], {}), '(visited_features_v, best_split_feature)\n', (13279, 13319), True, 'import numpy as np\n'), ((13989, 14019), 'numpy.unique', 'np.unique', (['X[:, iFeature_this]'], {}), '(X[:, iFeature_this])\n', (13998, 14019), True, 'import numpy as np\n'), ((14078, 14137), 'numpy.quantile', 'np.quantile', (['X[:, iFeature_this]', 'self.peek_ahead_quantiles'], {}), '(X[:, iFeature_this], self.peek_ahead_quantiles)\n', (14089, 14137), True, 'import numpy as np\n'), ((6385, 6396), 'numpy.var', 'np.var', (['y_1'], {}), '(y_1)\n', (6391, 6396), True, 'import numpy as np\n'), ((6525, 6536), 'numpy.var', 'np.var', (['y_2'], {}), '(y_2)\n', (6531, 6536), True, 'import numpy as np\n'), ((8322, 8331), 'numpy.min', 'np.min', (['C'], {}), '(C)\n', (8328, 8331), True, 'import numpy as np\n'), ((17832, 17842), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (17839, 17842), True, 'import numpy as np\n'), ((25258, 25283), 'numpy.isnan', 'np.isnan', (['this_tree[0][0]'], {}), '(this_tree[0][0])\n', (25266, 25283), True, 'import numpy as np\n'), ((25600, 25628), 'numpy.nanmean', 'np.nanmean', (['this_tree[0][-1]'], {}), '(this_tree[0][-1])\n', (25610, 25628), True, 'import numpy as np\n'), ((26301, 26326), 'numpy.isnan', 'np.isnan', (['this_tree[0][0]'], {}), '(this_tree[0][0])\n', (26309, 26326), True, 'import numpy as np\n'), ((11747, 11775), 'numpy.array', 'np.array', (['visited_features_v'], {}), '(visited_features_v)\n', (11755, 11775), True, 'import numpy as np\n'), ((26668, 26696), 'numpy.nanmean', 'np.nanmean', (['this_tree[0][-1]'], {}), '(this_tree[0][-1])\n', (26678, 26696), True, 'import numpy as np\n')] |
"""Test becquerel's Spectrum."""
import pytest
import datetime
import numpy as np
from uncertainties import ufloat, UFloat, unumpy
import becquerel as bq
TEST_DATA_LENGTH = 256
TEST_COUNTS = 4
TEST_GAIN = 8.23
TEST_EDGES_KEV = np.arange(TEST_DATA_LENGTH + 1) * TEST_GAIN
def make_data(lam=TEST_COUNTS, size=TEST_DATA_LENGTH):
"""Build a vector of random counts."""
floatdata = np.random.poisson(lam=lam, size=size)
return floatdata.astype(int)
def make_spec(t, lt=None, lam=TEST_COUNTS):
"""Get spectrum to use in parameterized tests.
Pytest Note:
one might think you could do:
@pytest.mark.parametrize('spec1, spec2', [
(uncal_spec, uncal_spec),
(cal_spec, cal_spec)
])
def test_add(spec1, spec2):
...
but you can't put fixtures inside parametrize(). Thus the fixtures
call this function for simplicity.
"""
if t == "uncal":
return bq.Spectrum(make_data(lam=lam), livetime=lt)
elif t == "cal":
return bq.Spectrum(
make_data(lam=lam), bin_edges_kev=TEST_EDGES_KEV, livetime=lt
)
elif t == "cal_new":
return bq.Spectrum(
make_data(lam=lam),
livetime=lt,
bin_edges_kev=np.arange(TEST_DATA_LENGTH + 1) * 0.67,
)
elif t == "applied_energy_cal":
spec = bq.Spectrum(
make_data(lam=lam),
livetime=lt,
)
cal = bq.Calibration("p[0] * x", [0.67])
spec.apply_calibration(cal)
return spec
elif t == "cal_cps":
return bq.Spectrum(
cps=make_data(lam=lam), bin_edges_kev=TEST_EDGES_KEV, livetime=lt
)
elif t == "uncal_long":
return bq.Spectrum(make_data(lam=lam, size=TEST_DATA_LENGTH * 2), livetime=lt)
elif t == "uncal_cps":
return bq.Spectrum(cps=make_data(lam=lam), livetime=lt)
elif t == "data":
return make_data()
else:
return t
@pytest.fixture
def spec_data():
"""Build a vector of random counts."""
return make_data()
@pytest.fixture
def uncal_spec(spec_data):
"""Generate an uncalibrated spectrum."""
return make_spec("uncal")
@pytest.fixture
def uncal_spec_2(spec_data):
"""Generate an uncalibrated spectrum (2nd instance)."""
return make_spec("uncal")
@pytest.fixture
def uncal_spec_cps(spec_data):
"""Generate an uncalibrated spectrum with cps data."""
return make_spec("uncal_cps")
@pytest.fixture
def uncal_spec_long(spec_data):
"""Generate an uncalibrated spectrum, of longer length."""
return make_spec("uncal_long")
@pytest.fixture
def cal_spec(spec_data):
"""Generate a calibrated spectrum."""
return make_spec("cal")
@pytest.fixture
def cal_spec_2(spec_data):
"""Generate a calibrated spectrum (2nd instance)."""
return make_spec("cal")
# ----------------------------------------------
# Test Spectrum.__init__()
# ----------------------------------------------
def test_uncal(uncal_spec):
"""Test simple uncalibrated construction."""
assert len(uncal_spec.counts) == TEST_DATA_LENGTH
assert not uncal_spec.is_calibrated
assert uncal_spec.energy_cal is None
def test_uncal_cps(uncal_spec_cps):
"""Test simple uncalibrated construction w CPS. More CPS tests later"""
assert len(uncal_spec_cps.cps) == TEST_DATA_LENGTH
assert not uncal_spec_cps.is_calibrated
assert uncal_spec_cps.energy_cal is None
def test_cal(cal_spec):
"""Test simple calibrated construction."""
assert len(cal_spec.counts) == TEST_DATA_LENGTH
assert len(cal_spec.bin_edges_kev) == TEST_DATA_LENGTH + 1
assert len(cal_spec.bin_centers_kev) == TEST_DATA_LENGTH
assert cal_spec.is_calibrated
def test_init_exceptions(spec_data):
"""Test errors on initialization."""
with pytest.raises(bq.SpectrumError):
bq.Spectrum([])
with pytest.raises(bq.SpectrumError):
bq.Spectrum(cps=[])
with pytest.raises(bq.SpectrumError):
bq.Spectrum(spec_data, bin_edges_kev=TEST_EDGES_KEV[:-1])
with pytest.raises(bq.SpectrumError):
bq.Spectrum(cps=spec_data, bin_edges_kev=TEST_EDGES_KEV[:-1])
with pytest.raises(bq.SpectrumError):
bq.Spectrum(spec_data, cps=spec_data)
with pytest.raises(bq.SpectrumError):
bq.Spectrum(bin_edges_kev=TEST_EDGES_KEV)
bad_edges = TEST_EDGES_KEV.copy()
bad_edges[12] = bad_edges[9]
with pytest.raises(ValueError):
bq.Spectrum(spec_data, bin_edges_kev=bad_edges)
def test_uncalibrated_exception(uncal_spec):
"""Test UncalibratedError."""
with pytest.raises(bq.UncalibratedError):
uncal_spec.bin_centers_kev
def test_negative_input(spec_data):
"""Make sure negative values in counts throw an exception,
and exception is not raised if uncs are provided."""
neg_spec = spec_data[:]
neg_spec[::2] *= -1
neg_uncs = np.where(neg_spec < 0, np.nan, 1)
with pytest.raises(bq.SpectrumError):
spec = bq.Spectrum(neg_spec)
spec = bq.Spectrum(neg_spec, uncs=neg_uncs)
assert np.any(spec.counts_vals < 0)
assert np.any(np.isnan(spec.counts_uncs))
@pytest.mark.parametrize("spec_type", ["uncal", "cal", "uncal_long", "cal"])
def test_init_with_lists(spec_type):
spec = make_spec(spec_type)
assert np.all(
bq.Spectrum(
counts=list(spec.counts_vals),
bin_edges_raw=spec.bin_edges_raw,
).counts_vals
== spec.counts_vals
)
# ----------------------------------------------
# Test Spectrum.from_listmode behavior
# ----------------------------------------------
NBINS = 100
NEDGES = NBINS + 1
MEAN = 1000.0
STDDEV = 50.0
NSAMPLES = 10000
XMIN, XMAX = 0.0, 2000.0
BW = (XMAX - XMIN) / (1.0 * NBINS)
lmd = np.random.normal(MEAN, STDDEV, NSAMPLES)
log_bins = np.logspace(1, 4, num=NEDGES, base=10.0)
def make_spec_listmode(t, is_cal=False, apply_cal=False):
if t == "uniform":
spec = bq.Spectrum.from_listmode(
lmd, is_cal=is_cal, bins=NBINS, xmin=XMIN, xmax=XMAX
)
elif t == "log":
spec = bq.Spectrum.from_listmode(lmd, is_cal=is_cal, bins=log_bins)
elif t == "default":
spec = bq.Spectrum.from_listmode(lmd, is_cal=is_cal)
else:
return t
if apply_cal:
cal = bq.Calibration.from_linear([0.0, TEST_GAIN])
spec.apply_calibration(cal)
assert spec.energy_cal is not None
return spec
@pytest.mark.parametrize("is_cal", [False, True])
@pytest.mark.parametrize("apply_cal", [None, False, True])
def test_listmode_uniform(is_cal, apply_cal):
"""Test listmode spectra with uniform binning.
It's easy to introduce off-by-one errors in histogramming listmode data,
so run quite a few sanity checks here and in the following tests.
"""
if is_cal and apply_cal:
return
spec = make_spec_listmode("uniform", is_cal, apply_cal)
xmin, xmax, bw = XMIN, XMAX, BW
if apply_cal and not is_cal:
xmin *= TEST_GAIN
xmax *= TEST_GAIN
bw *= TEST_GAIN
edges, widths, _ = spec.get_bin_properties()
assert len(spec) == NBINS
assert np.all(np.isclose(widths, bw))
assert edges[0] == xmin
assert edges[-1] == xmax
assert len(edges) == NBINS + 1
assert spec.has_uniform_bins()
@pytest.mark.parametrize("is_cal", [False, True])
@pytest.mark.parametrize("apply_cal", [None, False, True])
def test_listmode_non_uniform(is_cal, apply_cal):
"""Test listmode spectra with non-uniform bins."""
if is_cal and apply_cal:
return
spec = make_spec_listmode("log", is_cal, apply_cal)
assert len(spec) == NBINS
assert spec.has_uniform_bins() is False
@pytest.mark.parametrize("is_cal", [False, True])
@pytest.mark.parametrize("apply_cal", [None, False, True])
def test_listmode_no_args(is_cal, apply_cal):
"""Test listmode spectra without args."""
spec = make_spec_listmode("default", is_cal, apply_cal)
assert len(spec) == int(np.ceil(max(lmd)))
def test_listmode_is_cal():
"""Test that initially-calibrated listmode data matches uncal data."""
spec = make_spec_listmode("default", is_cal=True)
e0, w0, c0 = spec.get_bin_properties(use_kev=True)
e1, w1, c1 = spec.get_bin_properties(use_kev=False)
assert np.allclose(e0, e1)
assert np.allclose(w0, w1)
assert np.allclose(c0, c1)
@pytest.mark.parametrize("spec_str", ["uniform", "log"])
@pytest.mark.parametrize("is_cal", [False, True])
@pytest.mark.parametrize("apply_cal", [None, False, True])
def test_find_bin_index(spec_str, is_cal, apply_cal):
"""Test that find_bin_index works for various spectrum objects."""
spec = make_spec_listmode(spec_str, is_cal, apply_cal)
edges, widths, _ = spec.get_bin_properties()
xmin, xmax = edges[0], edges[-1]
assert spec.find_bin_index(xmin) == 0
assert spec.find_bin_index(xmin + widths[0] / 4.0) == 0
assert spec.find_bin_index(xmax - widths[-1] / 4.0) == len(spec) - 1
assert np.all(spec.find_bin_index(edges[:-1]) == np.arange(len(spec)))
@pytest.mark.parametrize("spec_str", ["uniform", "default", "log"])
@pytest.mark.parametrize("is_cal", [False, True])
@pytest.mark.parametrize("apply_cal", [None, False, True])
def test_index_out_of_bounds(spec_str, is_cal, apply_cal):
"""Raise a SpectrumError when we look for a bin index out of bounds, or an
UncalibratedError when we ask to search bin_edges_kev in an uncal spectrum.
"""
spec = make_spec_listmode(spec_str, is_cal, apply_cal)
edges, widths, _ = spec.get_bin_properties()
xmin, xmax = edges[0], edges[-1]
# out of histogram bounds
with pytest.raises(bq.SpectrumError):
spec.find_bin_index(xmax)
with pytest.raises(bq.SpectrumError):
spec.find_bin_index(xmin - widths[0] / 4.0)
# UncalibratedError if not calibrated and we ask for calibrated
if not spec.is_calibrated:
with pytest.raises(bq.UncalibratedError):
spec.find_bin_index(xmin, use_kev=True)
@pytest.mark.parametrize("is_cal", [False, True])
@pytest.mark.parametrize("apply_cal", [None, False, True])
def test_bin_index_types(is_cal, apply_cal):
"""Additional bin index type checking."""
spec = make_spec_listmode("uniform", is_cal, apply_cal)
assert isinstance(spec.find_bin_index(XMIN), (int, np.integer))
assert isinstance(spec.find_bin_index([XMIN]), np.ndarray)
# ----------------------------------------------
# Test Spectrum repr behavior
# ----------------------------------------------
def test_repr(cal_spec):
repr(cal_spec)
def test_str(cal_spec):
str(cal_spec)
# ----------------------------------------------
# Test Spectrum livetime properties
# ----------------------------------------------
@pytest.fixture(params=[86400, 300.6, 0.88])
def livetime(request):
return request.param
def test_livetime_arg(spec_data, livetime):
"""Test manual livetime input."""
spec = bq.Spectrum(spec_data, livetime=livetime)
assert spec.livetime == livetime
def test_livetime_arg_cps(spec_data, livetime):
"""Test manual livetime input with CPS."""
cps = spec_data / float(livetime)
spec = bq.Spectrum(cps=cps, livetime=livetime)
assert spec.livetime == livetime
def test_no_livetime(spec_data):
"""Test livetime property when not specified."""
spec = bq.Spectrum(spec_data)
assert spec.livetime is None
cps_spec = bq.Spectrum(cps=spec_data / 300.6)
assert cps_spec.livetime is None
# ----------------------------------------------
# Test start_time, stop_time, realtime
# ----------------------------------------------
@pytest.mark.parametrize(
"start, stop",
[
(
datetime.datetime(2017, 1, 1, 17, 0, 3),
datetime.datetime(2017, 1, 1, 18, 0, 3),
),
("2017-01-19 17:21:00", "2017-01-20 14:19:32"),
(datetime.datetime(2017, 1, 1, 0, 30, 0, 385), "2017-01-01 12:44:22"),
],
)
@pytest.mark.parametrize("rt", [3600, 2345.6])
def test_acqtime_construction(spec_data, start, stop, rt):
"""Test construction with 2 out of 3 of start, stop, and realtime."""
bq.Spectrum(spec_data, start_time=start, stop_time=stop)
bq.Spectrum(spec_data, start_time=start, realtime=rt)
bq.Spectrum(spec_data, realtime=rt, stop_time=stop)
@pytest.mark.parametrize(
"start, stop, rt, expected_err",
[
("2017-01-19 17:21:00", "2017-01-20 17:21:00", 86400, bq.SpectrumError),
("2017-01-19 17:21:00", "2017-01-18 17:21:00", None, ValueError),
],
)
def test_bad_acqtime_construction(spec_data, start, stop, rt, expected_err):
"""Test bad construction of a spectrum with start, stop, or realtimes."""
with pytest.raises(expected_err):
bq.Spectrum(spec_data, start_time=start, stop_time=stop, realtime=rt)
def test_bad_realtime_livetime(spec_data):
"""Test error of livetime > realtime."""
with pytest.raises(ValueError):
bq.Spectrum(spec_data, livetime=300, realtime=290)
# ----------------------------------------------
# Test uncertainties in Spectrum
# ----------------------------------------------
def test_construct_float_int(spec_data):
"""Construct spectrum with non-UFloats (float and int)."""
spec = bq.Spectrum(spec_data)
assert isinstance(spec.counts[0], UFloat)
spec = bq.Spectrum(spec_data.astype(float))
assert isinstance(spec.counts[0], UFloat)
def test_construct_ufloat(spec_data):
"""Construct spectrum with UFloats"""
ucounts = unumpy.uarray(spec_data, np.ones_like(spec_data))
spec = bq.Spectrum(ucounts)
assert isinstance(spec.counts[0], UFloat)
assert spec.counts[0].std_dev == 1
def test_construct_float_int_uncs(spec_data):
"""Construct spectrum with non-UFloats and specify uncs."""
uncs = np.ones_like(spec_data)
spec = bq.Spectrum(spec_data, uncs=uncs)
assert isinstance(spec.counts[0], UFloat)
uncs2 = np.array([c.std_dev for c in spec.counts])
assert np.allclose(uncs2, 1)
def test_construct_errors(spec_data):
"""Construct spectrum with UFloats plus uncs and get an error."""
uncs = np.ones_like(spec_data)
ucounts = unumpy.uarray(spec_data, uncs)
with pytest.raises(bq.core.utils.UncertaintiesError):
bq.Spectrum(ucounts, uncs=uncs)
ucounts[0] = 1
with pytest.raises(bq.core.utils.UncertaintiesError):
bq.Spectrum(ucounts)
def test_properties(spec_data):
"""Test counts_vals and counts_uncs."""
spec = bq.Spectrum(spec_data)
assert isinstance(spec.counts[0], UFloat)
assert np.allclose(spec.counts_vals, spec_data)
expected_uncs = np.sqrt(spec_data)
expected_uncs[expected_uncs == 0] = 1
assert np.allclose(spec.counts_uncs, expected_uncs)
uncs = spec_data
ucounts = unumpy.uarray(spec_data, uncs)
spec = bq.Spectrum(ucounts)
assert np.allclose(spec.counts_vals, spec_data)
assert np.allclose(spec.counts_uncs, uncs)
uncs = np.ones_like(spec_data)
spec = bq.Spectrum(spec_data, uncs=uncs)
assert np.allclose(spec.counts_uncs, uncs)
# ----------------------------------------------
# Test Spectrum.bin_widths
# ----------------------------------------------
def test_bin_widths_kev(cal_spec):
"""Test Spectrum.bin_widths_kev"""
cal_spec.bin_widths_kev
assert len(cal_spec.bin_widths_kev) == len(cal_spec.counts)
assert np.allclose(cal_spec.bin_widths_kev, TEST_GAIN)
def test_bin_widths_uncal(uncal_spec):
"""Test Spectrum.bin_widths_raw"""
uncal_spec.bin_widths_raw
assert len(uncal_spec.bin_widths_raw) == len(uncal_spec.counts)
# ----------------------------------------------
# Test Spectrum CPS and CPS/keV
# ----------------------------------------------
@pytest.mark.parametrize(
"construction_kwargs",
[
{"livetime": 300.0},
{"livetime": 300.0, "bin_edges_kev": TEST_EDGES_KEV},
],
)
def test_cps(spec_data, construction_kwargs):
"""Test cps property and uncertainties on uncal and cal spectrum."""
spec = bq.Spectrum(spec_data, **construction_kwargs)
spec.cps
spec.cps_vals
spec.cps_uncs
assert np.all(spec.counts_vals == spec_data)
assert np.allclose(spec.cps_vals, spec_data / spec.livetime)
assert np.allclose(spec.cps_uncs, spec.counts_uncs / spec.livetime)
def test_cpskev(spec_data, livetime):
"""Test cpskev property and uncertainties"""
spec = bq.Spectrum(spec_data, livetime=livetime, bin_edges_kev=TEST_EDGES_KEV)
spec.cpskev
spec.cpskev_vals
spec.cpskev_uncs
assert np.allclose(
spec.cpskev_vals, spec_data / spec.bin_widths_kev / float(livetime)
)
assert np.allclose(
spec.cpskev_uncs, spec.counts_uncs / spec.bin_widths_kev / float(livetime)
)
def test_cps_cpsspec(spec_data, livetime):
"""Test cps property of CPS-style spectrum."""
spec = bq.Spectrum(cps=spec_data / float(livetime))
assert spec.cps is not None
assert np.all(spec.cps_vals == spec_data / float(livetime))
assert np.all(np.isnan(spec.cps_uncs))
with pytest.raises(bq.SpectrumError):
spec.counts
with pytest.raises(bq.SpectrumError):
spec.counts_vals
with pytest.raises(bq.SpectrumError):
spec.counts_uncs
def test_cps_errors(uncal_spec):
"""Test errors in CPS."""
with pytest.raises(bq.SpectrumError):
uncal_spec.cps
def test_cpskev_errors(spec_data):
"""Test errors in CPS/keV."""
spec = bq.Spectrum(spec_data, livetime=300.9)
with pytest.raises(bq.UncalibratedError):
spec.cpskev
# ----------------------------------------------
# Test addition and subtraction of spectra
# ----------------------------------------------
@pytest.mark.parametrize(
"lt1, lt2", [(300, 600), (12.6, 0.88), (300, 12.6), (12.6, None), (None, None)]
)
@pytest.mark.parametrize("type1, type2", [("uncal", "uncal"), ("cal", "cal")])
def test_add(type1, type2, lt1, lt2):
"""Test addition of spectra"""
spec1, spec2 = (make_spec(type1, lt=lt1), make_spec(type2, lt=lt2))
if lt1 and lt2:
tot = spec1 + spec2
assert tot.livetime == lt1 + lt2
else:
with pytest.warns(bq.SpectrumWarning):
tot = spec1 + spec2
assert tot.livetime is None
assert np.all(tot.counts == spec1.counts + spec2.counts)
assert np.all(tot.counts_vals == spec1.counts_vals + spec2.counts_vals)
@pytest.mark.parametrize(
"type1, type2, expected_error",
[
("uncal", "cal", bq.SpectrumError),
("uncal", "uncal_long", bq.SpectrumError),
("uncal", "data", TypeError),
("data", "uncal", TypeError),
("uncal", 5, TypeError),
(5, "cal", TypeError),
("cal", "asdf", TypeError),
("asdf", "uncal", TypeError),
("uncal", "data", TypeError),
("cal", "cal_new", NotImplementedError),
],
)
def test_add_sub_errors(type1, type2, expected_error):
"""Test addition and subtraction that causes errors"""
spec1, spec2 = make_spec(type1), make_spec(type2)
with pytest.raises(expected_error):
spec1 + spec2
with pytest.raises(expected_error):
spec1 - spec2
@pytest.mark.parametrize("type1, type2", [("uncal", "uncal"), ("cal", "cal")])
def test_add_uncs(type1, type2):
"""Test uncertainties on addition of uncal spectra"""
spec1, spec2 = make_spec(type1), make_spec(type2)
with pytest.warns(bq.SpectrumWarning):
tot = spec1 + spec2
uncs = np.sqrt(spec1.counts_uncs**2 + spec2.counts_uncs**2)
assert np.allclose(tot.counts_uncs, uncs)
@pytest.mark.parametrize(
"type1, type2, lt1, lt2",
[
("uncal_cps", "uncal_cps", 300, 12.6),
("uncal_cps", "uncal_cps", None, 12.6),
("uncal_cps", "uncal_cps", None, None),
],
)
def test_add_sub_cps(type1, type2, lt1, lt2):
"""Test addition and subtraction of CPS spectra"""
spec1, spec2 = (make_spec(type1, lt=lt1), make_spec(type2, lt=lt2))
tot = spec1 + spec2
assert np.all(tot.cps_vals == spec1.cps_vals + spec2.cps_vals)
assert tot.livetime is None
diff = spec1 - spec2
assert diff.livetime is None
assert np.all(diff.cps_vals == spec1.cps_vals - spec2.cps_vals)
@pytest.mark.parametrize(
"type1, type2, lt1, lt2",
[
("uncal", "uncal_cps", None, None),
("uncal_cps", "uncal", None, None),
("uncal", "uncal_cps", 300, None),
("uncal_cps", "uncal", None, 300),
("uncal", "uncal_cps", 300, 600),
("uncal_cps", "uncal", 600, 300),
],
)
def test_adddition_errors(type1, type2, lt1, lt2):
"""Test errors during addition of mixed spectra"""
spec1, spec2 = (make_spec(type1, lt=lt1), make_spec(type2, lt=lt2))
with pytest.raises(bq.SpectrumError):
spec1 + spec2
@pytest.mark.parametrize("lt1, lt2", [(300, 600), (12.6, 0.88), (300, 12.6)])
@pytest.mark.parametrize("type1, type2", [("uncal", "uncal"), ("cal", "cal")])
def test_subtract_counts(type1, type2, lt1, lt2):
"""Test Spectrum subtraction with counts"""
spec1, spec2 = (make_spec(type1, lt=lt1), make_spec(type2, lt=lt2))
with pytest.warns(bq.SpectrumWarning):
diff = spec1 - spec2
assert diff.livetime is None
assert np.allclose(diff.cps_vals, spec1.cps_vals - spec2.cps_vals)
assert np.all(diff.cps_uncs > spec1.cps_uncs)
assert np.all(diff.cps_uncs > spec2.cps_uncs)
@pytest.mark.parametrize(
"type1, type2, lt1, lt2",
[
("uncal", "uncal_cps", None, None),
("uncal_cps", "uncal", None, None),
("uncal", "uncal_cps", None, 300),
("uncal_cps", "uncal", 300, None),
("uncal", "uncal_cps", 300, None),
("uncal_cps", "uncal", None, 300),
],
)
def test_subtract_errors(type1, type2, lt1, lt2):
"""Test errors/warnings during subtraction of mixed spectra"""
spec1, spec2 = (make_spec(type1, lt=lt1), make_spec(type2, lt=lt2))
if lt1 is None and lt2 is None:
with pytest.raises(bq.SpectrumError):
diff = spec1 - spec2
else:
with pytest.warns(bq.SpectrumWarning):
diff = spec1 - spec2
assert diff.livetime is None
# ----------------------------------------------
# Test multiplication and division of spectra
# ----------------------------------------------
@pytest.mark.parametrize("factor", [0.88, 1, 2, 43.6])
@pytest.mark.parametrize("spectype", ["uncal", "cal"])
def test_basic_mul_div(spectype, factor):
"""
Basic multiplication/division of uncalibrated spectrum by a scalar.
"""
spec = make_spec(spectype)
mult_left = spec * factor
assert np.allclose(mult_left.counts_vals, factor * spec.counts_vals)
assert np.allclose(mult_left.counts_uncs, factor * spec.counts_uncs)
assert mult_left.livetime is None
mult_right = factor * spec
assert np.allclose(mult_right.counts_vals, factor * spec.counts_vals)
assert np.allclose(mult_right.counts_uncs, factor * spec.counts_uncs)
assert mult_right.livetime is None
div = spec / factor
assert np.allclose(div.counts_vals, spec.counts_vals / factor)
assert np.allclose(div.counts_uncs, spec.counts_uncs / factor)
assert div.livetime is None
@pytest.mark.parametrize("factor", [0.88, 1, 2, 43.6])
def test_cps_mul_div(uncal_spec_cps, factor):
"""Multiplication/division of a CPS spectrum."""
mult_left = uncal_spec_cps * factor
assert np.allclose(mult_left.cps_vals, factor * uncal_spec_cps.cps_vals)
assert mult_left.livetime is None
mult_right = factor * uncal_spec_cps
assert np.allclose(mult_right.cps_vals, factor * uncal_spec_cps.cps_vals)
assert mult_right.livetime is None
div = uncal_spec_cps / factor
assert np.allclose(div.cps_vals, uncal_spec_cps.cps_vals / factor)
assert div.livetime is None
@pytest.mark.parametrize("factor", [ufloat(0.88, 0.01), ufloat(1, 0.1), ufloat(43, 1)])
@pytest.mark.parametrize("spectype", ["uncal", "cal"])
def test_uncal_mul_div_uncertainties(spectype, factor):
"""
Multiplication/division of uncal spectrum by a scalar with uncertainty.
"""
spec = make_spec(spectype)
mult_left = spec * factor
assert np.allclose(mult_left.counts_vals, factor.nominal_value * spec.counts_vals)
assert np.all(
(mult_left.counts_uncs > factor.nominal_value * spec.counts_uncs)
| (spec.counts_vals == 0)
)
assert mult_left.livetime is None
mult_right = factor * spec
assert np.allclose(mult_right.counts_vals, factor.nominal_value * spec.counts_vals)
assert np.all(
(mult_right.counts_uncs > factor.nominal_value * spec.counts_uncs)
| (spec.counts_vals == 0)
)
assert mult_right.livetime is None
div = spec / factor
assert np.allclose(div.counts_vals, spec.counts_vals / factor.nominal_value)
assert np.all(
(div.counts_uncs > spec.counts_uncs / factor.nominal_value)
| (spec.counts_vals == 0)
)
assert div.livetime is None
@pytest.mark.parametrize(
"type1, type2, error",
[
("uncal", "uncal", TypeError),
("uncal", "asdf", TypeError),
("uncal", "data", TypeError),
("uncal", 0, ValueError),
("uncal", np.inf, ValueError),
("uncal", np.nan, ValueError),
("uncal", ufloat(0, 1), ValueError),
("uncal", ufloat(np.inf, np.nan), ValueError),
],
)
def test_mul_div_errors(type1, type2, error):
"""Multiplication/division errors."""
spec, bad_factor = make_spec(type1), make_spec(type2)
with pytest.raises(error):
spec * bad_factor
with pytest.raises(error):
bad_factor * spec
with pytest.raises(error):
spec / bad_factor
# ----------------------------------------------
# Test Spectrum.calibrate_like
# ----------------------------------------------
def test_calibrate_like(uncal_spec, cal_spec):
"""Test calibrate_like with an uncalibrated spectrum."""
uncal_spec.calibrate_like(cal_spec)
assert uncal_spec.is_calibrated
assert np.all(uncal_spec.bin_edges_kev == cal_spec.bin_edges_kev)
def test_recalibrate_like(cal_spec):
"""Test calibrate_like with an already calibrated spectrum."""
cal_new = make_spec("cal_new")
edges1 = cal_spec.bin_edges_kev
cal_spec.calibrate_like(cal_new)
assert cal_spec.is_calibrated
assert np.all(cal_spec.bin_edges_kev == cal_new.bin_edges_kev)
assert cal_spec.bin_edges_kev[-1] != edges1[-1]
def test_calibrate_like_error(uncal_spec, uncal_spec_2):
"""Test that calibrate_like raises an error if arg is uncalibrated"""
with pytest.raises(bq.UncalibratedError):
uncal_spec.calibrate_like(uncal_spec_2)
def test_calibrate_like_copy(uncal_spec, cal_spec):
"""Test that calibrate_like makes a copy of the bin edges"""
uncal_spec.calibrate_like(cal_spec)
assert uncal_spec.bin_edges_kev is not cal_spec.bin_edges_kev
cal_spec.rm_calibration()
assert uncal_spec.is_calibrated
# ----------------------------------------------
# Test Spectrum.combine_bins
# ----------------------------------------------
@pytest.mark.parametrize("spectype", ["uncal", "cal", "uncal_cps"])
def test_combine_bins(spectype):
"""Test combine_bins with no padding."""
spec = make_spec(spectype)
f = 8
combined = spec.combine_bins(f)
assert len(combined) == TEST_DATA_LENGTH / f
if spec._counts is not None:
assert combined.counts_vals[0] == np.sum(spec.counts_vals[:f])
assert np.sum(combined.counts_vals) == np.sum(spec.counts_vals)
else:
assert combined.cps_vals[0] == np.sum(spec.cps_vals[:f])
assert np.sum(combined.cps_vals) == np.sum(spec.cps_vals)
@pytest.mark.parametrize("spectype", ["uncal", "cal", "uncal_cps"])
def test_combine_bins_padding(spectype):
"""Test combine_bins with padding (an uneven factor)."""
spec = make_spec(spectype)
f = 10
combined = spec.combine_bins(f)
assert len(combined) == np.ceil(float(TEST_DATA_LENGTH) / f)
if spec._counts is not None:
assert combined.counts_vals[0] == np.sum(spec.counts_vals[:f])
assert np.sum(combined.counts_vals) == np.sum(spec.counts_vals)
else:
assert combined.cps_vals[0] == np.sum(spec.cps_vals[:f])
assert np.sum(combined.cps_vals) == np.sum(spec.cps_vals)
# calibration methods tested in energycal_test.py
# ----------------------------------------------
# Test Spectrum.downsample
# ----------------------------------------------
@pytest.mark.parametrize("spectype", ["uncal", "cal"])
@pytest.mark.parametrize("f", [2, 1.5, 999.99])
def test_downsample(spectype, f):
"""Test Spectrum.downsample on uncalibrated and calibrated spectra"""
spec = make_spec(spectype, lam=1000)
s1 = np.sum(spec.counts_vals)
spec2 = spec.downsample(f)
s2 = np.sum(spec2.counts_vals)
r = float(s2) / s1
five_sigma = 5 * np.sqrt(s1 / f) / (s1 / f)
assert np.isclose(r, 1.0 / f, atol=five_sigma)
def test_no_downsample(cal_spec):
"""Test that downsample(1) doesn't do anything"""
s1 = np.sum(cal_spec.counts_vals)
spec2 = cal_spec.downsample(1.0)
s2 = np.sum(spec2.counts_vals)
assert s1 == s2
def test_zero_downsample(cal_spec):
"""Test that downsample(very large number) gives 0"""
spec2 = cal_spec.downsample(10**10)
s2 = np.sum(spec2.counts_vals)
assert s2 == 0
def test_downsample_handle_livetime(cal_spec):
"""Test handle_livetime behavior"""
f = 2
test_livetime = 300.0
cal_spec.livetime = test_livetime
spec2 = cal_spec.downsample(f)
assert spec2.livetime is None
spec3 = cal_spec.downsample(f, handle_livetime="preserve")
assert spec3.livetime == cal_spec.livetime
spec4 = cal_spec.downsample(f, handle_livetime="reduce")
assert spec4.livetime == cal_spec.livetime / f
def test_downsample_error(cal_spec):
"""Test that downsample(<1) raises ValueError"""
with pytest.raises(ValueError):
cal_spec.downsample(0.5)
def test_downsample_cps_error(uncal_spec_cps):
"""Test that downsampling a CPS spectrum gives a SpectrumError"""
with pytest.raises(bq.SpectrumError):
uncal_spec_cps.downsample(12)
def test_downsample_handle_livetime_error(uncal_spec):
"""Test bad value of handle_livetime"""
with pytest.raises(ValueError):
uncal_spec.downsample(5, handle_livetime="asdf")
# ----------------------------------------------
# Test Spectrum.__len__
# ----------------------------------------------
@pytest.fixture(params=[1, 8, 256, 16384])
def length(request):
return request.param
def test_len(length):
"""Test len(spectrum)"""
floatdata = np.random.poisson(lam=TEST_COUNTS, size=length)
spec = bq.Spectrum(floatdata.astype(int))
assert len(spec) == length
def test_len_cps(length, livetime):
"""Test len(spectrum) for a CPS-based spectrum"""
floatdata = np.random.poisson(lam=TEST_COUNTS, size=length)
spec = bq.Spectrum(cps=floatdata / livetime)
assert len(spec) == length
# ----------------------------------------------
# Test Spectrum.copy
# ----------------------------------------------
def test_copy_uncal(uncal_spec):
"""Test copy method on uncal spectrum"""
uncal2 = uncal_spec.copy()
assert np.all(uncal2.counts_vals == uncal_spec.counts_vals)
assert np.all(uncal2.counts_uncs == uncal_spec.counts_uncs)
assert uncal2 is not uncal_spec
assert uncal2.counts is not uncal_spec.counts
assert uncal2.counts[0] is not uncal_spec.counts[0]
def test_copy_cal(cal_spec):
"""Test copy method on cal spectrum"""
cal2 = cal_spec.copy()
assert np.all(cal2.counts_vals == cal_spec.counts_vals)
assert np.all(cal2.counts_uncs == cal_spec.counts_uncs)
assert np.all(cal2.bin_edges_kev == cal_spec.bin_edges_kev)
assert cal2 is not cal_spec
assert cal2.counts is not cal_spec.counts
assert cal2.counts[0] is not cal_spec.counts[0]
assert cal2.bin_edges_kev is not cal_spec.bin_edges_kev
# ----------------------------------------------
# Test Spectrum.rebin
# ----------------------------------------------
@pytest.fixture(
params=[
TEST_EDGES_KEV.copy(),
TEST_EDGES_KEV.copy()[1:-2],
np.linspace(
TEST_EDGES_KEV.min(), TEST_EDGES_KEV.max(), len(TEST_EDGES_KEV) + 10
),
],
ids=["same edges", "subset of edges", "same bounds more bins"],
)
def rebin_new_edges(request):
return request.param.astype(float)
@pytest.fixture(
params=["interpolation", "listmode"],
ids=["interpolation method", "listmode method"],
)
def rebin_method(request):
return request.param
@pytest.fixture(
params=[("uncal", 300), ("uncal", None), ("cal_cps", None)],
ids=[
"uncalibrated spectrum with livetime",
"uncalibrated spectrum without livetime",
"calibrated spectrum with cps",
],
)
def rebin_spectrum_failure(request):
return make_spec(request.param[0], lt=request.param[1])
def test_spectrum_rebin_failure(rebin_spectrum_failure, rebin_new_edges, rebin_method):
with pytest.raises(bq.SpectrumError):
rebin_spectrum_failure.rebin(
rebin_new_edges, method=rebin_method, zero_pad_warnings=False
)
@pytest.fixture(
params=[("cal", 300), ("cal", None), ("cal_cps", 300)],
ids=[
"calibrated spectrum with livetime",
"calibrated spectrum without livetime",
"calibrated spectrum with cps and livetime",
],
)
def rebin_spectrum_success(request):
return make_spec(request.param[0], lt=request.param[1])
def test_spectrum_rebin_success(rebin_spectrum_success, rebin_new_edges, rebin_method):
kwargs = dict(
out_edges=rebin_new_edges, method=rebin_method, zero_pad_warnings=False
)
if (rebin_spectrum_success._counts is None) and (rebin_method == "listmode"):
with pytest.warns(bq.SpectrumWarning):
spec = rebin_spectrum_success.rebin(**kwargs)
else:
spec = rebin_spectrum_success.rebin(**kwargs)
assert np.isclose(rebin_spectrum_success.counts_vals.sum(), spec.counts_vals.sum())
if rebin_spectrum_success.livetime is None:
assert spec.livetime is None
else:
assert np.isclose(rebin_spectrum_success.livetime, spec.livetime)
# ----------------------------------------------
# Test Spectrum.rebin_like
# ----------------------------------------------
def test_spectrum_rebin_like():
spec1 = make_spec("cal")
spec2 = make_spec("cal_new")
assert np.any(~np.isclose(spec1.bin_edges_kev, spec2.bin_edges_kev))
spec2_rebin = spec2.rebin_like(spec1)
assert np.all(np.isclose(spec1.bin_edges_kev, spec2_rebin.bin_edges_kev))
assert np.isclose(spec2.counts_vals.sum(), spec2_rebin.counts_vals.sum())
| [
"numpy.sqrt",
"numpy.array",
"becquerel.Spectrum.from_listmode",
"pytest.fixture",
"uncertainties.ufloat",
"numpy.arange",
"datetime.datetime",
"numpy.random.poisson",
"numpy.where",
"becquerel.Calibration.from_linear",
"numpy.logspace",
"uncertainties.unumpy.uarray",
"numpy.random.normal",
... | [((5201, 5276), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spec_type"""', "['uncal', 'cal', 'uncal_long', 'cal']"], {}), "('spec_type', ['uncal', 'cal', 'uncal_long', 'cal'])\n", (5224, 5276), False, 'import pytest\n'), ((5819, 5859), 'numpy.random.normal', 'np.random.normal', (['MEAN', 'STDDEV', 'NSAMPLES'], {}), '(MEAN, STDDEV, NSAMPLES)\n', (5835, 5859), True, 'import numpy as np\n'), ((5871, 5911), 'numpy.logspace', 'np.logspace', (['(1)', '(4)'], {'num': 'NEDGES', 'base': '(10.0)'}), '(1, 4, num=NEDGES, base=10.0)\n', (5882, 5911), True, 'import numpy as np\n'), ((6498, 6546), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_cal"""', '[False, True]'], {}), "('is_cal', [False, True])\n", (6521, 6546), False, 'import pytest\n'), ((6548, 6605), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""apply_cal"""', '[None, False, True]'], {}), "('apply_cal', [None, False, True])\n", (6571, 6605), False, 'import pytest\n'), ((7363, 7411), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_cal"""', '[False, True]'], {}), "('is_cal', [False, True])\n", (7386, 7411), False, 'import pytest\n'), ((7413, 7470), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""apply_cal"""', '[None, False, True]'], {}), "('apply_cal', [None, False, True])\n", (7436, 7470), False, 'import pytest\n'), ((7753, 7801), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_cal"""', '[False, True]'], {}), "('is_cal', [False, True])\n", (7776, 7801), False, 'import pytest\n'), ((7803, 7860), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""apply_cal"""', '[None, False, True]'], {}), "('apply_cal', [None, False, True])\n", (7826, 7860), False, 'import pytest\n'), ((8426, 8481), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spec_str"""', "['uniform', 'log']"], {}), "('spec_str', ['uniform', 'log'])\n", (8449, 8481), False, 'import pytest\n'), ((8483, 8531), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_cal"""', '[False, True]'], {}), "('is_cal', [False, True])\n", (8506, 8531), False, 'import pytest\n'), ((8533, 8590), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""apply_cal"""', '[None, False, True]'], {}), "('apply_cal', [None, False, True])\n", (8556, 8590), False, 'import pytest\n'), ((9117, 9183), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spec_str"""', "['uniform', 'default', 'log']"], {}), "('spec_str', ['uniform', 'default', 'log'])\n", (9140, 9183), False, 'import pytest\n'), ((9185, 9233), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_cal"""', '[False, True]'], {}), "('is_cal', [False, True])\n", (9208, 9233), False, 'import pytest\n'), ((9235, 9292), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""apply_cal"""', '[None, False, True]'], {}), "('apply_cal', [None, False, True])\n", (9258, 9292), False, 'import pytest\n'), ((10071, 10119), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_cal"""', '[False, True]'], {}), "('is_cal', [False, True])\n", (10094, 10119), False, 'import pytest\n'), ((10121, 10178), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""apply_cal"""', '[None, False, True]'], {}), "('apply_cal', [None, False, True])\n", (10144, 10178), False, 'import pytest\n'), ((10830, 10873), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[86400, 300.6, 0.88]'}), '(params=[86400, 300.6, 0.88])\n', (10844, 10873), False, 'import pytest\n'), ((12033, 12078), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""rt"""', '[3600, 2345.6]'], {}), "('rt', [3600, 2345.6])\n", (12056, 12078), False, 'import pytest\n'), ((12391, 12597), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""start, stop, rt, expected_err"""', "[('2017-01-19 17:21:00', '2017-01-20 17:21:00', 86400, bq.SpectrumError), (\n '2017-01-19 17:21:00', '2017-01-18 17:21:00', None, ValueError)]"], {}), "('start, stop, rt, expected_err', [(\n '2017-01-19 17:21:00', '2017-01-20 17:21:00', 86400, bq.SpectrumError),\n ('2017-01-19 17:21:00', '2017-01-18 17:21:00', None, ValueError)])\n", (12414, 12597), False, 'import pytest\n'), ((15849, 15977), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""construction_kwargs"""', "[{'livetime': 300.0}, {'livetime': 300.0, 'bin_edges_kev': TEST_EDGES_KEV}]"], {}), "('construction_kwargs', [{'livetime': 300.0}, {\n 'livetime': 300.0, 'bin_edges_kev': TEST_EDGES_KEV}])\n", (15872, 15977), False, 'import pytest\n'), ((17824, 17932), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lt1, lt2"""', '[(300, 600), (12.6, 0.88), (300, 12.6), (12.6, None), (None, None)]'], {}), "('lt1, lt2', [(300, 600), (12.6, 0.88), (300, 12.6),\n (12.6, None), (None, None)])\n", (17847, 17932), False, 'import pytest\n'), ((17936, 18013), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""type1, type2"""', "[('uncal', 'uncal'), ('cal', 'cal')]"], {}), "('type1, type2', [('uncal', 'uncal'), ('cal', 'cal')])\n", (17959, 18013), False, 'import pytest\n'), ((18515, 18908), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""type1, type2, expected_error"""', "[('uncal', 'cal', bq.SpectrumError), ('uncal', 'uncal_long', bq.\n SpectrumError), ('uncal', 'data', TypeError), ('data', 'uncal',\n TypeError), ('uncal', 5, TypeError), (5, 'cal', TypeError), ('cal',\n 'asdf', TypeError), ('asdf', 'uncal', TypeError), ('uncal', 'data',\n TypeError), ('cal', 'cal_new', NotImplementedError)]"], {}), "('type1, type2, expected_error', [('uncal', 'cal',\n bq.SpectrumError), ('uncal', 'uncal_long', bq.SpectrumError), ('uncal',\n 'data', TypeError), ('data', 'uncal', TypeError), ('uncal', 5,\n TypeError), (5, 'cal', TypeError), ('cal', 'asdf', TypeError), ('asdf',\n 'uncal', TypeError), ('uncal', 'data', TypeError), ('cal', 'cal_new',\n NotImplementedError)])\n", (18538, 18908), False, 'import pytest\n'), ((19283, 19360), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""type1, type2"""', "[('uncal', 'uncal'), ('cal', 'cal')]"], {}), "('type1, type2', [('uncal', 'uncal'), ('cal', 'cal')])\n", (19306, 19360), False, 'import pytest\n'), ((19693, 19872), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""type1, type2, lt1, lt2"""', "[('uncal_cps', 'uncal_cps', 300, 12.6), ('uncal_cps', 'uncal_cps', None, \n 12.6), ('uncal_cps', 'uncal_cps', None, None)]"], {}), "('type1, type2, lt1, lt2', [('uncal_cps',\n 'uncal_cps', 300, 12.6), ('uncal_cps', 'uncal_cps', None, 12.6), (\n 'uncal_cps', 'uncal_cps', None, None)])\n", (19716, 19872), False, 'import pytest\n'), ((20334, 20608), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""type1, type2, lt1, lt2"""', "[('uncal', 'uncal_cps', None, None), ('uncal_cps', 'uncal', None, None), (\n 'uncal', 'uncal_cps', 300, None), ('uncal_cps', 'uncal', None, 300), (\n 'uncal', 'uncal_cps', 300, 600), ('uncal_cps', 'uncal', 600, 300)]"], {}), "('type1, type2, lt1, lt2', [('uncal', 'uncal_cps',\n None, None), ('uncal_cps', 'uncal', None, None), ('uncal', 'uncal_cps',\n 300, None), ('uncal_cps', 'uncal', None, 300), ('uncal', 'uncal_cps', \n 300, 600), ('uncal_cps', 'uncal', 600, 300)])\n", (20357, 20608), False, 'import pytest\n'), ((20909, 20985), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lt1, lt2"""', '[(300, 600), (12.6, 0.88), (300, 12.6)]'], {}), "('lt1, lt2', [(300, 600), (12.6, 0.88), (300, 12.6)])\n", (20932, 20985), False, 'import pytest\n'), ((20987, 21064), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""type1, type2"""', "[('uncal', 'uncal'), ('cal', 'cal')]"], {}), "('type1, type2', [('uncal', 'uncal'), ('cal', 'cal')])\n", (21010, 21064), False, 'import pytest\n'), ((21515, 21791), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""type1, type2, lt1, lt2"""', "[('uncal', 'uncal_cps', None, None), ('uncal_cps', 'uncal', None, None), (\n 'uncal', 'uncal_cps', None, 300), ('uncal_cps', 'uncal', 300, None), (\n 'uncal', 'uncal_cps', 300, None), ('uncal_cps', 'uncal', None, 300)]"], {}), "('type1, type2, lt1, lt2', [('uncal', 'uncal_cps',\n None, None), ('uncal_cps', 'uncal', None, None), ('uncal', 'uncal_cps',\n None, 300), ('uncal_cps', 'uncal', 300, None), ('uncal', 'uncal_cps', \n 300, None), ('uncal_cps', 'uncal', None, 300)])\n", (21538, 21791), False, 'import pytest\n'), ((22427, 22480), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""factor"""', '[0.88, 1, 2, 43.6]'], {}), "('factor', [0.88, 1, 2, 43.6])\n", (22450, 22480), False, 'import pytest\n'), ((22482, 22535), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spectype"""', "['uncal', 'cal']"], {}), "('spectype', ['uncal', 'cal'])\n", (22505, 22535), False, 'import pytest\n'), ((23326, 23379), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""factor"""', '[0.88, 1, 2, 43.6]'], {}), "('factor', [0.88, 1, 2, 43.6])\n", (23349, 23379), False, 'import pytest\n'), ((24023, 24076), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spectype"""', "['uncal', 'cal']"], {}), "('spectype', ['uncal', 'cal'])\n", (24046, 24076), False, 'import pytest\n'), ((27246, 27312), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spectype"""', "['uncal', 'cal', 'uncal_cps']"], {}), "('spectype', ['uncal', 'cal', 'uncal_cps'])\n", (27269, 27312), False, 'import pytest\n'), ((27840, 27906), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spectype"""', "['uncal', 'cal', 'uncal_cps']"], {}), "('spectype', ['uncal', 'cal', 'uncal_cps'])\n", (27863, 27906), False, 'import pytest\n'), ((28660, 28713), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spectype"""', "['uncal', 'cal']"], {}), "('spectype', ['uncal', 'cal'])\n", (28683, 28713), False, 'import pytest\n'), ((28715, 28761), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""f"""', '[2, 1.5, 999.99]'], {}), "('f', [2, 1.5, 999.99])\n", (28738, 28761), False, 'import pytest\n'), ((30697, 30738), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[1, 8, 256, 16384]'}), '(params=[1, 8, 256, 16384])\n', (30711, 30738), False, 'import pytest\n'), ((32696, 32802), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['interpolation', 'listmode']", 'ids': "['interpolation method', 'listmode method']"}), "(params=['interpolation', 'listmode'], ids=[\n 'interpolation method', 'listmode method'])\n", (32710, 32802), False, 'import pytest\n'), ((32864, 33066), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('uncal', 300), ('uncal', None), ('cal_cps', None)]", 'ids': "['uncalibrated spectrum with livetime',\n 'uncalibrated spectrum without livetime', 'calibrated spectrum with cps']"}), "(params=[('uncal', 300), ('uncal', None), ('cal_cps', None)],\n ids=['uncalibrated spectrum with livetime',\n 'uncalibrated spectrum without livetime', 'calibrated spectrum with cps'])\n", (32878, 33066), False, 'import pytest\n'), ((33455, 33666), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[('cal', 300), ('cal', None), ('cal_cps', 300)]", 'ids': "['calibrated spectrum with livetime',\n 'calibrated spectrum without livetime',\n 'calibrated spectrum with cps and livetime']"}), "(params=[('cal', 300), ('cal', None), ('cal_cps', 300)], ids=\n ['calibrated spectrum with livetime',\n 'calibrated spectrum without livetime',\n 'calibrated spectrum with cps and livetime'])\n", (33469, 33666), False, 'import pytest\n'), ((230, 261), 'numpy.arange', 'np.arange', (['(TEST_DATA_LENGTH + 1)'], {}), '(TEST_DATA_LENGTH + 1)\n', (239, 261), True, 'import numpy as np\n'), ((391, 428), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': 'lam', 'size': 'size'}), '(lam=lam, size=size)\n', (408, 428), True, 'import numpy as np\n'), ((4949, 4982), 'numpy.where', 'np.where', (['(neg_spec < 0)', 'np.nan', '(1)'], {}), '(neg_spec < 0, np.nan, 1)\n', (4957, 4982), True, 'import numpy as np\n'), ((5075, 5111), 'becquerel.Spectrum', 'bq.Spectrum', (['neg_spec'], {'uncs': 'neg_uncs'}), '(neg_spec, uncs=neg_uncs)\n', (5086, 5111), True, 'import becquerel as bq\n'), ((5123, 5151), 'numpy.any', 'np.any', (['(spec.counts_vals < 0)'], {}), '(spec.counts_vals < 0)\n', (5129, 5151), True, 'import numpy as np\n'), ((8341, 8360), 'numpy.allclose', 'np.allclose', (['e0', 'e1'], {}), '(e0, e1)\n', (8352, 8360), True, 'import numpy as np\n'), ((8372, 8391), 'numpy.allclose', 'np.allclose', (['w0', 'w1'], {}), '(w0, w1)\n', (8383, 8391), True, 'import numpy as np\n'), ((8403, 8422), 'numpy.allclose', 'np.allclose', (['c0', 'c1'], {}), '(c0, c1)\n', (8414, 8422), True, 'import numpy as np\n'), ((11018, 11059), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'livetime': 'livetime'}), '(spec_data, livetime=livetime)\n', (11029, 11059), True, 'import becquerel as bq\n'), ((11244, 11283), 'becquerel.Spectrum', 'bq.Spectrum', ([], {'cps': 'cps', 'livetime': 'livetime'}), '(cps=cps, livetime=livetime)\n', (11255, 11283), True, 'import becquerel as bq\n'), ((11421, 11443), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {}), '(spec_data)\n', (11432, 11443), True, 'import becquerel as bq\n'), ((11493, 11527), 'becquerel.Spectrum', 'bq.Spectrum', ([], {'cps': '(spec_data / 300.6)'}), '(cps=spec_data / 300.6)\n', (11504, 11527), True, 'import becquerel as bq\n'), ((12217, 12273), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'start_time': 'start', 'stop_time': 'stop'}), '(spec_data, start_time=start, stop_time=stop)\n', (12228, 12273), True, 'import becquerel as bq\n'), ((12278, 12331), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'start_time': 'start', 'realtime': 'rt'}), '(spec_data, start_time=start, realtime=rt)\n', (12289, 12331), True, 'import becquerel as bq\n'), ((12336, 12387), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'realtime': 'rt', 'stop_time': 'stop'}), '(spec_data, realtime=rt, stop_time=stop)\n', (12347, 12387), True, 'import becquerel as bq\n'), ((13340, 13362), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {}), '(spec_data)\n', (13351, 13362), True, 'import becquerel as bq\n'), ((13661, 13681), 'becquerel.Spectrum', 'bq.Spectrum', (['ucounts'], {}), '(ucounts)\n', (13672, 13681), True, 'import becquerel as bq\n'), ((13891, 13914), 'numpy.ones_like', 'np.ones_like', (['spec_data'], {}), '(spec_data)\n', (13903, 13914), True, 'import numpy as np\n'), ((13926, 13959), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'uncs': 'uncs'}), '(spec_data, uncs=uncs)\n', (13937, 13959), True, 'import becquerel as bq\n'), ((14018, 14060), 'numpy.array', 'np.array', (['[c.std_dev for c in spec.counts]'], {}), '([c.std_dev for c in spec.counts])\n', (14026, 14060), True, 'import numpy as np\n'), ((14072, 14093), 'numpy.allclose', 'np.allclose', (['uncs2', '(1)'], {}), '(uncs2, 1)\n', (14083, 14093), True, 'import numpy as np\n'), ((14216, 14239), 'numpy.ones_like', 'np.ones_like', (['spec_data'], {}), '(spec_data)\n', (14228, 14239), True, 'import numpy as np\n'), ((14254, 14284), 'uncertainties.unumpy.uarray', 'unumpy.uarray', (['spec_data', 'uncs'], {}), '(spec_data, uncs)\n', (14267, 14284), False, 'from uncertainties import ufloat, UFloat, unumpy\n'), ((14580, 14602), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {}), '(spec_data)\n', (14591, 14602), True, 'import becquerel as bq\n'), ((14660, 14700), 'numpy.allclose', 'np.allclose', (['spec.counts_vals', 'spec_data'], {}), '(spec.counts_vals, spec_data)\n', (14671, 14700), True, 'import numpy as np\n'), ((14721, 14739), 'numpy.sqrt', 'np.sqrt', (['spec_data'], {}), '(spec_data)\n', (14728, 14739), True, 'import numpy as np\n'), ((14793, 14837), 'numpy.allclose', 'np.allclose', (['spec.counts_uncs', 'expected_uncs'], {}), '(spec.counts_uncs, expected_uncs)\n', (14804, 14837), True, 'import numpy as np\n'), ((14874, 14904), 'uncertainties.unumpy.uarray', 'unumpy.uarray', (['spec_data', 'uncs'], {}), '(spec_data, uncs)\n', (14887, 14904), False, 'from uncertainties import ufloat, UFloat, unumpy\n'), ((14916, 14936), 'becquerel.Spectrum', 'bq.Spectrum', (['ucounts'], {}), '(ucounts)\n', (14927, 14936), True, 'import becquerel as bq\n'), ((14948, 14988), 'numpy.allclose', 'np.allclose', (['spec.counts_vals', 'spec_data'], {}), '(spec.counts_vals, spec_data)\n', (14959, 14988), True, 'import numpy as np\n'), ((15000, 15035), 'numpy.allclose', 'np.allclose', (['spec.counts_uncs', 'uncs'], {}), '(spec.counts_uncs, uncs)\n', (15011, 15035), True, 'import numpy as np\n'), ((15048, 15071), 'numpy.ones_like', 'np.ones_like', (['spec_data'], {}), '(spec_data)\n', (15060, 15071), True, 'import numpy as np\n'), ((15083, 15116), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'uncs': 'uncs'}), '(spec_data, uncs=uncs)\n', (15094, 15116), True, 'import becquerel as bq\n'), ((15128, 15163), 'numpy.allclose', 'np.allclose', (['spec.counts_uncs', 'uncs'], {}), '(spec.counts_uncs, uncs)\n', (15139, 15163), True, 'import numpy as np\n'), ((15479, 15526), 'numpy.allclose', 'np.allclose', (['cal_spec.bin_widths_kev', 'TEST_GAIN'], {}), '(cal_spec.bin_widths_kev, TEST_GAIN)\n', (15490, 15526), True, 'import numpy as np\n'), ((16138, 16183), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {}), '(spec_data, **construction_kwargs)\n', (16149, 16183), True, 'import becquerel as bq\n'), ((16244, 16281), 'numpy.all', 'np.all', (['(spec.counts_vals == spec_data)'], {}), '(spec.counts_vals == spec_data)\n', (16250, 16281), True, 'import numpy as np\n'), ((16293, 16346), 'numpy.allclose', 'np.allclose', (['spec.cps_vals', '(spec_data / spec.livetime)'], {}), '(spec.cps_vals, spec_data / spec.livetime)\n', (16304, 16346), True, 'import numpy as np\n'), ((16358, 16418), 'numpy.allclose', 'np.allclose', (['spec.cps_uncs', '(spec.counts_uncs / spec.livetime)'], {}), '(spec.cps_uncs, spec.counts_uncs / spec.livetime)\n', (16369, 16418), True, 'import numpy as np\n'), ((16520, 16591), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'livetime': 'livetime', 'bin_edges_kev': 'TEST_EDGES_KEV'}), '(spec_data, livetime=livetime, bin_edges_kev=TEST_EDGES_KEV)\n', (16531, 16591), True, 'import becquerel as bq\n'), ((17571, 17609), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'livetime': '(300.9)'}), '(spec_data, livetime=300.9)\n', (17582, 17609), True, 'import becquerel as bq\n'), ((18386, 18435), 'numpy.all', 'np.all', (['(tot.counts == spec1.counts + spec2.counts)'], {}), '(tot.counts == spec1.counts + spec2.counts)\n', (18392, 18435), True, 'import numpy as np\n'), ((18447, 18511), 'numpy.all', 'np.all', (['(tot.counts_vals == spec1.counts_vals + spec2.counts_vals)'], {}), '(tot.counts_vals == spec1.counts_vals + spec2.counts_vals)\n', (18453, 18511), True, 'import numpy as np\n'), ((19591, 19647), 'numpy.sqrt', 'np.sqrt', (['(spec1.counts_uncs ** 2 + spec2.counts_uncs ** 2)'], {}), '(spec1.counts_uncs ** 2 + spec2.counts_uncs ** 2)\n', (19598, 19647), True, 'import numpy as np\n'), ((19655, 19689), 'numpy.allclose', 'np.allclose', (['tot.counts_uncs', 'uncs'], {}), '(tot.counts_uncs, uncs)\n', (19666, 19689), True, 'import numpy as np\n'), ((20116, 20171), 'numpy.all', 'np.all', (['(tot.cps_vals == spec1.cps_vals + spec2.cps_vals)'], {}), '(tot.cps_vals == spec1.cps_vals + spec2.cps_vals)\n', (20122, 20171), True, 'import numpy as np\n'), ((20274, 20330), 'numpy.all', 'np.all', (['(diff.cps_vals == spec1.cps_vals - spec2.cps_vals)'], {}), '(diff.cps_vals == spec1.cps_vals - spec2.cps_vals)\n', (20280, 20330), True, 'import numpy as np\n'), ((21352, 21411), 'numpy.allclose', 'np.allclose', (['diff.cps_vals', '(spec1.cps_vals - spec2.cps_vals)'], {}), '(diff.cps_vals, spec1.cps_vals - spec2.cps_vals)\n', (21363, 21411), True, 'import numpy as np\n'), ((21423, 21461), 'numpy.all', 'np.all', (['(diff.cps_uncs > spec1.cps_uncs)'], {}), '(diff.cps_uncs > spec1.cps_uncs)\n', (21429, 21461), True, 'import numpy as np\n'), ((21473, 21511), 'numpy.all', 'np.all', (['(diff.cps_uncs > spec2.cps_uncs)'], {}), '(diff.cps_uncs > spec2.cps_uncs)\n', (21479, 21511), True, 'import numpy as np\n'), ((22740, 22801), 'numpy.allclose', 'np.allclose', (['mult_left.counts_vals', '(factor * spec.counts_vals)'], {}), '(mult_left.counts_vals, factor * spec.counts_vals)\n', (22751, 22801), True, 'import numpy as np\n'), ((22813, 22874), 'numpy.allclose', 'np.allclose', (['mult_left.counts_uncs', '(factor * spec.counts_uncs)'], {}), '(mult_left.counts_uncs, factor * spec.counts_uncs)\n', (22824, 22874), True, 'import numpy as np\n'), ((22956, 23018), 'numpy.allclose', 'np.allclose', (['mult_right.counts_vals', '(factor * spec.counts_vals)'], {}), '(mult_right.counts_vals, factor * spec.counts_vals)\n', (22967, 23018), True, 'import numpy as np\n'), ((23030, 23092), 'numpy.allclose', 'np.allclose', (['mult_right.counts_uncs', '(factor * spec.counts_uncs)'], {}), '(mult_right.counts_uncs, factor * spec.counts_uncs)\n', (23041, 23092), True, 'import numpy as np\n'), ((23168, 23223), 'numpy.allclose', 'np.allclose', (['div.counts_vals', '(spec.counts_vals / factor)'], {}), '(div.counts_vals, spec.counts_vals / factor)\n', (23179, 23223), True, 'import numpy as np\n'), ((23235, 23290), 'numpy.allclose', 'np.allclose', (['div.counts_uncs', '(spec.counts_uncs / factor)'], {}), '(div.counts_uncs, spec.counts_uncs / factor)\n', (23246, 23290), True, 'import numpy as np\n'), ((23531, 23596), 'numpy.allclose', 'np.allclose', (['mult_left.cps_vals', '(factor * uncal_spec_cps.cps_vals)'], {}), '(mult_left.cps_vals, factor * uncal_spec_cps.cps_vals)\n', (23542, 23596), True, 'import numpy as np\n'), ((23688, 23754), 'numpy.allclose', 'np.allclose', (['mult_right.cps_vals', '(factor * uncal_spec_cps.cps_vals)'], {}), '(mult_right.cps_vals, factor * uncal_spec_cps.cps_vals)\n', (23699, 23754), True, 'import numpy as np\n'), ((23840, 23899), 'numpy.allclose', 'np.allclose', (['div.cps_vals', '(uncal_spec_cps.cps_vals / factor)'], {}), '(div.cps_vals, uncal_spec_cps.cps_vals / factor)\n', (23851, 23899), True, 'import numpy as np\n'), ((24299, 24374), 'numpy.allclose', 'np.allclose', (['mult_left.counts_vals', '(factor.nominal_value * spec.counts_vals)'], {}), '(mult_left.counts_vals, factor.nominal_value * spec.counts_vals)\n', (24310, 24374), True, 'import numpy as np\n'), ((24386, 24489), 'numpy.all', 'np.all', (['((mult_left.counts_uncs > factor.nominal_value * spec.counts_uncs) | (spec.\n counts_vals == 0))'], {}), '((mult_left.counts_uncs > factor.nominal_value * spec.counts_uncs) |\n (spec.counts_vals == 0))\n', (24392, 24489), True, 'import numpy as np\n'), ((24589, 24665), 'numpy.allclose', 'np.allclose', (['mult_right.counts_vals', '(factor.nominal_value * spec.counts_vals)'], {}), '(mult_right.counts_vals, factor.nominal_value * spec.counts_vals)\n', (24600, 24665), True, 'import numpy as np\n'), ((24677, 24781), 'numpy.all', 'np.all', (['((mult_right.counts_uncs > factor.nominal_value * spec.counts_uncs) | (spec\n .counts_vals == 0))'], {}), '((mult_right.counts_uncs > factor.nominal_value * spec.counts_uncs) |\n (spec.counts_vals == 0))\n', (24683, 24781), True, 'import numpy as np\n'), ((24875, 24944), 'numpy.allclose', 'np.allclose', (['div.counts_vals', '(spec.counts_vals / factor.nominal_value)'], {}), '(div.counts_vals, spec.counts_vals / factor.nominal_value)\n', (24886, 24944), True, 'import numpy as np\n'), ((24956, 25054), 'numpy.all', 'np.all', (['((div.counts_uncs > spec.counts_uncs / factor.nominal_value) | (spec.\n counts_vals == 0))'], {}), '((div.counts_uncs > spec.counts_uncs / factor.nominal_value) | (spec.\n counts_vals == 0))\n', (24962, 25054), True, 'import numpy as np\n'), ((26159, 26217), 'numpy.all', 'np.all', (['(uncal_spec.bin_edges_kev == cal_spec.bin_edges_kev)'], {}), '(uncal_spec.bin_edges_kev == cal_spec.bin_edges_kev)\n', (26165, 26217), True, 'import numpy as np\n'), ((26478, 26533), 'numpy.all', 'np.all', (['(cal_spec.bin_edges_kev == cal_new.bin_edges_kev)'], {}), '(cal_spec.bin_edges_kev == cal_new.bin_edges_kev)\n', (26484, 26533), True, 'import numpy as np\n'), ((28921, 28945), 'numpy.sum', 'np.sum', (['spec.counts_vals'], {}), '(spec.counts_vals)\n', (28927, 28945), True, 'import numpy as np\n'), ((28986, 29011), 'numpy.sum', 'np.sum', (['spec2.counts_vals'], {}), '(spec2.counts_vals)\n', (28992, 29011), True, 'import numpy as np\n'), ((29095, 29134), 'numpy.isclose', 'np.isclose', (['r', '(1.0 / f)'], {'atol': 'five_sigma'}), '(r, 1.0 / f, atol=five_sigma)\n', (29105, 29134), True, 'import numpy as np\n'), ((29235, 29263), 'numpy.sum', 'np.sum', (['cal_spec.counts_vals'], {}), '(cal_spec.counts_vals)\n', (29241, 29263), True, 'import numpy as np\n'), ((29310, 29335), 'numpy.sum', 'np.sum', (['spec2.counts_vals'], {}), '(spec2.counts_vals)\n', (29316, 29335), True, 'import numpy as np\n'), ((29502, 29527), 'numpy.sum', 'np.sum', (['spec2.counts_vals'], {}), '(spec2.counts_vals)\n', (29508, 29527), True, 'import numpy as np\n'), ((30855, 30902), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': 'TEST_COUNTS', 'size': 'length'}), '(lam=TEST_COUNTS, size=length)\n', (30872, 30902), True, 'import numpy as np\n'), ((31089, 31136), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': 'TEST_COUNTS', 'size': 'length'}), '(lam=TEST_COUNTS, size=length)\n', (31106, 31136), True, 'import numpy as np\n'), ((31148, 31185), 'becquerel.Spectrum', 'bq.Spectrum', ([], {'cps': '(floatdata / livetime)'}), '(cps=floatdata / livetime)\n', (31159, 31185), True, 'import becquerel as bq\n'), ((31469, 31521), 'numpy.all', 'np.all', (['(uncal2.counts_vals == uncal_spec.counts_vals)'], {}), '(uncal2.counts_vals == uncal_spec.counts_vals)\n', (31475, 31521), True, 'import numpy as np\n'), ((31533, 31585), 'numpy.all', 'np.all', (['(uncal2.counts_uncs == uncal_spec.counts_uncs)'], {}), '(uncal2.counts_uncs == uncal_spec.counts_uncs)\n', (31539, 31585), True, 'import numpy as np\n'), ((31841, 31889), 'numpy.all', 'np.all', (['(cal2.counts_vals == cal_spec.counts_vals)'], {}), '(cal2.counts_vals == cal_spec.counts_vals)\n', (31847, 31889), True, 'import numpy as np\n'), ((31901, 31949), 'numpy.all', 'np.all', (['(cal2.counts_uncs == cal_spec.counts_uncs)'], {}), '(cal2.counts_uncs == cal_spec.counts_uncs)\n', (31907, 31949), True, 'import numpy as np\n'), ((31961, 32013), 'numpy.all', 'np.all', (['(cal2.bin_edges_kev == cal_spec.bin_edges_kev)'], {}), '(cal2.bin_edges_kev == cal_spec.bin_edges_kev)\n', (31967, 32013), True, 'import numpy as np\n'), ((3869, 3900), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (3882, 3900), False, 'import pytest\n'), ((3910, 3925), 'becquerel.Spectrum', 'bq.Spectrum', (['[]'], {}), '([])\n', (3921, 3925), True, 'import becquerel as bq\n'), ((3935, 3966), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (3948, 3966), False, 'import pytest\n'), ((3976, 3995), 'becquerel.Spectrum', 'bq.Spectrum', ([], {'cps': '[]'}), '(cps=[])\n', (3987, 3995), True, 'import becquerel as bq\n'), ((4005, 4036), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (4018, 4036), False, 'import pytest\n'), ((4046, 4103), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'bin_edges_kev': 'TEST_EDGES_KEV[:-1]'}), '(spec_data, bin_edges_kev=TEST_EDGES_KEV[:-1])\n', (4057, 4103), True, 'import becquerel as bq\n'), ((4113, 4144), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (4126, 4144), False, 'import pytest\n'), ((4154, 4215), 'becquerel.Spectrum', 'bq.Spectrum', ([], {'cps': 'spec_data', 'bin_edges_kev': 'TEST_EDGES_KEV[:-1]'}), '(cps=spec_data, bin_edges_kev=TEST_EDGES_KEV[:-1])\n', (4165, 4215), True, 'import becquerel as bq\n'), ((4225, 4256), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (4238, 4256), False, 'import pytest\n'), ((4266, 4303), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'cps': 'spec_data'}), '(spec_data, cps=spec_data)\n', (4277, 4303), True, 'import becquerel as bq\n'), ((4313, 4344), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (4326, 4344), False, 'import pytest\n'), ((4354, 4395), 'becquerel.Spectrum', 'bq.Spectrum', ([], {'bin_edges_kev': 'TEST_EDGES_KEV'}), '(bin_edges_kev=TEST_EDGES_KEV)\n', (4365, 4395), True, 'import becquerel as bq\n'), ((4477, 4502), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4490, 4502), False, 'import pytest\n'), ((4512, 4559), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'bin_edges_kev': 'bad_edges'}), '(spec_data, bin_edges_kev=bad_edges)\n', (4523, 4559), True, 'import becquerel as bq\n'), ((4651, 4686), 'pytest.raises', 'pytest.raises', (['bq.UncalibratedError'], {}), '(bq.UncalibratedError)\n', (4664, 4686), False, 'import pytest\n'), ((4993, 5024), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (5006, 5024), False, 'import pytest\n'), ((5041, 5062), 'becquerel.Spectrum', 'bq.Spectrum', (['neg_spec'], {}), '(neg_spec)\n', (5052, 5062), True, 'import becquerel as bq\n'), ((5170, 5196), 'numpy.isnan', 'np.isnan', (['spec.counts_uncs'], {}), '(spec.counts_uncs)\n', (5178, 5196), True, 'import numpy as np\n'), ((6010, 6089), 'becquerel.Spectrum.from_listmode', 'bq.Spectrum.from_listmode', (['lmd'], {'is_cal': 'is_cal', 'bins': 'NBINS', 'xmin': 'XMIN', 'xmax': 'XMAX'}), '(lmd, is_cal=is_cal, bins=NBINS, xmin=XMIN, xmax=XMAX)\n', (6035, 6089), True, 'import becquerel as bq\n'), ((6355, 6399), 'becquerel.Calibration.from_linear', 'bq.Calibration.from_linear', (['[0.0, TEST_GAIN]'], {}), '([0.0, TEST_GAIN])\n', (6381, 6399), True, 'import becquerel as bq\n'), ((7209, 7231), 'numpy.isclose', 'np.isclose', (['widths', 'bw'], {}), '(widths, bw)\n', (7219, 7231), True, 'import numpy as np\n'), ((9705, 9736), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (9718, 9736), False, 'import pytest\n'), ((9781, 9812), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (9794, 9812), False, 'import pytest\n'), ((12788, 12815), 'pytest.raises', 'pytest.raises', (['expected_err'], {}), '(expected_err)\n', (12801, 12815), False, 'import pytest\n'), ((12825, 12894), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'start_time': 'start', 'stop_time': 'stop', 'realtime': 'rt'}), '(spec_data, start_time=start, stop_time=stop, realtime=rt)\n', (12836, 12894), True, 'import becquerel as bq\n'), ((12995, 13020), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13008, 13020), False, 'import pytest\n'), ((13030, 13080), 'becquerel.Spectrum', 'bq.Spectrum', (['spec_data'], {'livetime': '(300)', 'realtime': '(290)'}), '(spec_data, livetime=300, realtime=290)\n', (13041, 13080), True, 'import becquerel as bq\n'), ((13625, 13648), 'numpy.ones_like', 'np.ones_like', (['spec_data'], {}), '(spec_data)\n', (13637, 13648), True, 'import numpy as np\n'), ((14294, 14341), 'pytest.raises', 'pytest.raises', (['bq.core.utils.UncertaintiesError'], {}), '(bq.core.utils.UncertaintiesError)\n', (14307, 14341), False, 'import pytest\n'), ((14351, 14382), 'becquerel.Spectrum', 'bq.Spectrum', (['ucounts'], {'uncs': 'uncs'}), '(ucounts, uncs=uncs)\n', (14362, 14382), True, 'import becquerel as bq\n'), ((14412, 14459), 'pytest.raises', 'pytest.raises', (['bq.core.utils.UncertaintiesError'], {}), '(bq.core.utils.UncertaintiesError)\n', (14425, 14459), False, 'import pytest\n'), ((14469, 14489), 'becquerel.Spectrum', 'bq.Spectrum', (['ucounts'], {}), '(ucounts)\n', (14480, 14489), True, 'import becquerel as bq\n'), ((17136, 17159), 'numpy.isnan', 'np.isnan', (['spec.cps_uncs'], {}), '(spec.cps_uncs)\n', (17144, 17159), True, 'import numpy as np\n'), ((17170, 17201), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (17183, 17201), False, 'import pytest\n'), ((17232, 17263), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (17245, 17263), False, 'import pytest\n'), ((17299, 17330), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (17312, 17330), False, 'import pytest\n'), ((17432, 17463), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (17445, 17463), False, 'import pytest\n'), ((17619, 17654), 'pytest.raises', 'pytest.raises', (['bq.UncalibratedError'], {}), '(bq.UncalibratedError)\n', (17632, 17654), False, 'import pytest\n'), ((19165, 19194), 'pytest.raises', 'pytest.raises', (['expected_error'], {}), '(expected_error)\n', (19178, 19194), False, 'import pytest\n'), ((19227, 19256), 'pytest.raises', 'pytest.raises', (['expected_error'], {}), '(expected_error)\n', (19240, 19256), False, 'import pytest\n'), ((19517, 19549), 'pytest.warns', 'pytest.warns', (['bq.SpectrumWarning'], {}), '(bq.SpectrumWarning)\n', (19529, 19549), False, 'import pytest\n'), ((20851, 20882), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (20864, 20882), False, 'import pytest\n'), ((21245, 21277), 'pytest.warns', 'pytest.warns', (['bq.SpectrumWarning'], {}), '(bq.SpectrumWarning)\n', (21257, 21277), False, 'import pytest\n'), ((23970, 23988), 'uncertainties.ufloat', 'ufloat', (['(0.88)', '(0.01)'], {}), '(0.88, 0.01)\n', (23976, 23988), False, 'from uncertainties import ufloat, UFloat, unumpy\n'), ((23990, 24004), 'uncertainties.ufloat', 'ufloat', (['(1)', '(0.1)'], {}), '(1, 0.1)\n', (23996, 24004), False, 'from uncertainties import ufloat, UFloat, unumpy\n'), ((24006, 24019), 'uncertainties.ufloat', 'ufloat', (['(43)', '(1)'], {}), '(43, 1)\n', (24012, 24019), False, 'from uncertainties import ufloat, UFloat, unumpy\n'), ((25658, 25678), 'pytest.raises', 'pytest.raises', (['error'], {}), '(error)\n', (25671, 25678), False, 'import pytest\n'), ((25716, 25736), 'pytest.raises', 'pytest.raises', (['error'], {}), '(error)\n', (25729, 25736), False, 'import pytest\n'), ((25774, 25794), 'pytest.raises', 'pytest.raises', (['error'], {}), '(error)\n', (25787, 25794), False, 'import pytest\n'), ((26729, 26764), 'pytest.raises', 'pytest.raises', (['bq.UncalibratedError'], {}), '(bq.UncalibratedError)\n', (26742, 26764), False, 'import pytest\n'), ((30107, 30132), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (30120, 30132), False, 'import pytest\n'), ((30296, 30327), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (30309, 30327), False, 'import pytest\n'), ((30478, 30503), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (30491, 30503), False, 'import pytest\n'), ((33297, 33328), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (33310, 33328), False, 'import pytest\n'), ((34437, 34495), 'numpy.isclose', 'np.isclose', (['rebin_spectrum_success.livetime', 'spec.livetime'], {}), '(rebin_spectrum_success.livetime, spec.livetime)\n', (34447, 34495), True, 'import numpy as np\n'), ((34860, 34918), 'numpy.isclose', 'np.isclose', (['spec1.bin_edges_kev', 'spec2_rebin.bin_edges_kev'], {}), '(spec1.bin_edges_kev, spec2_rebin.bin_edges_kev)\n', (34870, 34918), True, 'import numpy as np\n'), ((6148, 6208), 'becquerel.Spectrum.from_listmode', 'bq.Spectrum.from_listmode', (['lmd'], {'is_cal': 'is_cal', 'bins': 'log_bins'}), '(lmd, is_cal=is_cal, bins=log_bins)\n', (6173, 6208), True, 'import becquerel as bq\n'), ((9979, 10014), 'pytest.raises', 'pytest.raises', (['bq.UncalibratedError'], {}), '(bq.UncalibratedError)\n', (9992, 10014), False, 'import pytest\n'), ((11783, 11822), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(1)', '(17)', '(0)', '(3)'], {}), '(2017, 1, 1, 17, 0, 3)\n', (11800, 11822), False, 'import datetime\n'), ((11836, 11875), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(1)', '(18)', '(0)', '(3)'], {}), '(2017, 1, 1, 18, 0, 3)\n', (11853, 11875), False, 'import datetime\n'), ((11953, 11997), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(1)', '(0)', '(30)', '(0)', '(385)'], {}), '(2017, 1, 1, 0, 30, 0, 385)\n', (11970, 11997), False, 'import datetime\n'), ((18273, 18305), 'pytest.warns', 'pytest.warns', (['bq.SpectrumWarning'], {}), '(bq.SpectrumWarning)\n', (18285, 18305), False, 'import pytest\n'), ((22084, 22115), 'pytest.raises', 'pytest.raises', (['bq.SpectrumError'], {}), '(bq.SpectrumError)\n', (22097, 22115), False, 'import pytest\n'), ((22173, 22205), 'pytest.warns', 'pytest.warns', (['bq.SpectrumWarning'], {}), '(bq.SpectrumWarning)\n', (22185, 22205), False, 'import pytest\n'), ((25410, 25422), 'uncertainties.ufloat', 'ufloat', (['(0)', '(1)'], {}), '(0, 1)\n', (25416, 25422), False, 'from uncertainties import ufloat, UFloat, unumpy\n'), ((25455, 25477), 'uncertainties.ufloat', 'ufloat', (['np.inf', 'np.nan'], {}), '(np.inf, np.nan)\n', (25461, 25477), False, 'from uncertainties import ufloat, UFloat, unumpy\n'), ((27595, 27623), 'numpy.sum', 'np.sum', (['spec.counts_vals[:f]'], {}), '(spec.counts_vals[:f])\n', (27601, 27623), True, 'import numpy as np\n'), ((27639, 27667), 'numpy.sum', 'np.sum', (['combined.counts_vals'], {}), '(combined.counts_vals)\n', (27645, 27667), True, 'import numpy as np\n'), ((27671, 27695), 'numpy.sum', 'np.sum', (['spec.counts_vals'], {}), '(spec.counts_vals)\n', (27677, 27695), True, 'import numpy as np\n'), ((27745, 27770), 'numpy.sum', 'np.sum', (['spec.cps_vals[:f]'], {}), '(spec.cps_vals[:f])\n', (27751, 27770), True, 'import numpy as np\n'), ((27786, 27811), 'numpy.sum', 'np.sum', (['combined.cps_vals'], {}), '(combined.cps_vals)\n', (27792, 27811), True, 'import numpy as np\n'), ((27815, 27836), 'numpy.sum', 'np.sum', (['spec.cps_vals'], {}), '(spec.cps_vals)\n', (27821, 27836), True, 'import numpy as np\n'), ((28229, 28257), 'numpy.sum', 'np.sum', (['spec.counts_vals[:f]'], {}), '(spec.counts_vals[:f])\n', (28235, 28257), True, 'import numpy as np\n'), ((28273, 28301), 'numpy.sum', 'np.sum', (['combined.counts_vals'], {}), '(combined.counts_vals)\n', (28279, 28301), True, 'import numpy as np\n'), ((28305, 28329), 'numpy.sum', 'np.sum', (['spec.counts_vals'], {}), '(spec.counts_vals)\n', (28311, 28329), True, 'import numpy as np\n'), ((28379, 28404), 'numpy.sum', 'np.sum', (['spec.cps_vals[:f]'], {}), '(spec.cps_vals[:f])\n', (28385, 28404), True, 'import numpy as np\n'), ((28420, 28445), 'numpy.sum', 'np.sum', (['combined.cps_vals'], {}), '(combined.cps_vals)\n', (28426, 28445), True, 'import numpy as np\n'), ((28449, 28470), 'numpy.sum', 'np.sum', (['spec.cps_vals'], {}), '(spec.cps_vals)\n', (28455, 28470), True, 'import numpy as np\n'), ((29056, 29071), 'numpy.sqrt', 'np.sqrt', (['(s1 / f)'], {}), '(s1 / f)\n', (29063, 29071), True, 'import numpy as np\n'), ((34083, 34115), 'pytest.warns', 'pytest.warns', (['bq.SpectrumWarning'], {}), '(bq.SpectrumWarning)\n', (34095, 34115), False, 'import pytest\n'), ((34746, 34798), 'numpy.isclose', 'np.isclose', (['spec1.bin_edges_kev', 'spec2.bin_edges_kev'], {}), '(spec1.bin_edges_kev, spec2.bin_edges_kev)\n', (34756, 34798), True, 'import numpy as np\n'), ((6249, 6294), 'becquerel.Spectrum.from_listmode', 'bq.Spectrum.from_listmode', (['lmd'], {'is_cal': 'is_cal'}), '(lmd, is_cal=is_cal)\n', (6274, 6294), True, 'import becquerel as bq\n'), ((1474, 1508), 'becquerel.Calibration', 'bq.Calibration', (['"""p[0] * x"""', '[0.67]'], {}), "('p[0] * x', [0.67])\n", (1488, 1508), True, 'import becquerel as bq\n'), ((1279, 1310), 'numpy.arange', 'np.arange', (['(TEST_DATA_LENGTH + 1)'], {}), '(TEST_DATA_LENGTH + 1)\n', (1288, 1310), True, 'import numpy as np\n')] |
from __future__ import division, print_function, absolute_import
"""
Author: PinAxe
Project: Convolutional Auto Encoder Example.
A 7 layers auto-encoder with TensorFlow Convolutional layers
trains on noised MNIST set. Supposed to do some serious denoising.
Also it saves and automaticaly restores the model and does visualisation.
The work is derived from a source code by <NAME>. See References.
References:
https://towardsdatascience.com/autoencoders-introduction-and-implementation-3f40483b0a85
https://hackernoon.com/autoencoders-deep-learning-bits-1-11731e200694
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
https://towardsdatascience.com/autoencoders-introduction-and-implementation-3f40483b0a85
"""
import os.path
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
def show() : # Visualisation function
# Testing =================================================================
# Encode and decode images from test set and visualize their reconstruction.
n = 4
canvas_orig = np.empty((28 * n, 28 * n))
canvas_noisy = np.empty((28 * n, 28 * n))
canvas_recon = np.empty((28 * n, 28 * n))
for i in range(n): # MNIST test set
batch1, _ = mnist.test.next_batch(n)
imgs = batch1[:].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs+ noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Encode and decode the digit images
g = sess.run(logits, feed_dict={inputs_: noisy_imgs})
# Display images
for j in range(n): # Draw the original digits
canvas_orig [i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = np.squeeze(imgs[j])
canvas_noisy [i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = np.squeeze(noisy_imgs[j])
canvas_recon [i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = np.squeeze(g[j])
#print(g.shape)
plt.figure(figsize=(2,2))
print("Original Images")
plt.imshow(canvas_orig, origin="upper", cmap="gray")
plt.show()
plt.figure(figsize=(2,2))
print("Noisy Images")
plt.imshow(canvas_noisy, origin="upper", cmap="gray")
plt.show()
plt.figure(figsize=(2,2))
print("Reconstructed Images")
plt.imshow(canvas_recon, origin="upper", cmap="gray")
plt.show()
return()
#===============================================================================
# main function
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
tf.reset_default_graph() # Need to avoid Tensor_Name not found in CHeckPoint when loading a model
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs=inputs_, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
maxpool1 = tf.layers.max_pooling2d(conv1, pool_size=(2,2), strides=(2,2), padding='same')
# Now 14x14x32
conv2 = tf.layers.conv2d(inputs=maxpool1, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
maxpool2 = tf.layers.max_pooling2d(conv2, pool_size=(2,2), strides=(2,2), padding='same')
# Now 7x7x32
conv3 = tf.layers.conv2d(inputs=maxpool2, filters=16, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
encoded = tf.layers.max_pooling2d(conv3, pool_size=(2,2), strides=(2,2), padding='same')
# Now 4x4x16
### Decoder
upsample1 = tf.image.resize_images(encoded, size=(7,7), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 7x7x16
conv4 = tf.layers.conv2d(inputs=upsample1, filters=16, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
upsample2 = tf.image.resize_images(conv4, size=(14,14), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 14x14x16
conv5 = tf.layers.conv2d(inputs=upsample2, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
upsample3 = tf.image.resize_images(conv5, size=(28,28), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 28x28x32
conv6 = tf.layers.conv2d(inputs=upsample3, filters=32, kernel_size=(3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
logits = tf.layers.conv2d(inputs=conv6, filters=1, kernel_size=(3,3), padding='same', activation=None)
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits)
learning_rate = 0.001 #0.001
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.pow(targets_-logits, 2)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
# you may try another optimiser
# opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
#Training:
epochs = 10#100
batch_size = 20 #200
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5 #0.5
display_step = 30 #50
save_step = display_step*10
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
file_path=".\modelCAE1.ckpt"
if os.path.isfile(file_path+".meta") :
saver.restore(sess, file_path)
print(file_path,'-found')
else:
print(file_path,'-NOT found')
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch,_ = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[:].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs+ noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
if ii % display_step == 0 or ii == 1:
print("Epoch: {} of {}...".format(e+1, epochs),
'batch ', ii,": of ",mnist.train.num_examples//batch_size,
"Training loss: {:.5f}".format(batch_cost))
show()
if ii % save_step == 0:
save_path = saver.save(sess=sess, save_path=file_path)
print("Model saved in file: %s" % save_path)
| [
"numpy.clip",
"tensorflow.image.resize_images",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.reduce_mean",
"matplotlib.pyplot.imshow",
"tensorflow.pow",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.layers.conv2d",
"tensorflow.nn.sigmoid",
"numpy.emp... | [((2607, 2660), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""/tmp/data/"""'], {'one_hot': '(True)'}), "('/tmp/data/', one_hot=True)\n", (2632, 2660), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((2661, 2685), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2683, 2685), True, 'import tensorflow as tf\n'), ((2771, 2831), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 28, 28, 1)'], {'name': '"""inputs"""'}), "(tf.float32, (None, 28, 28, 1), name='inputs')\n", (2785, 2831), True, 'import tensorflow as tf\n'), ((2843, 2904), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 28, 28, 1)'], {'name': '"""targets"""'}), "(tf.float32, (None, 28, 28, 1), name='targets')\n", (2857, 2904), True, 'import tensorflow as tf\n'), ((2925, 3033), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'inputs_', 'filters': '(32)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=inputs_, filters=32, kernel_size=(3, 3), padding=\n 'same', activation=tf.nn.relu)\n", (2941, 3033), True, 'import tensorflow as tf\n'), ((3054, 3139), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['conv1'], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""same"""'}), "(conv1, pool_size=(2, 2), strides=(2, 2), padding='same'\n )\n", (3077, 3139), True, 'import tensorflow as tf\n'), ((3156, 3265), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'maxpool1', 'filters': '(32)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=maxpool1, filters=32, kernel_size=(3, 3), padding=\n 'same', activation=tf.nn.relu)\n", (3172, 3265), True, 'import tensorflow as tf\n'), ((3286, 3371), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['conv2'], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""same"""'}), "(conv2, pool_size=(2, 2), strides=(2, 2), padding='same'\n )\n", (3309, 3371), True, 'import tensorflow as tf\n'), ((3386, 3495), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'maxpool2', 'filters': '(16)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=maxpool2, filters=16, kernel_size=(3, 3), padding=\n 'same', activation=tf.nn.relu)\n", (3402, 3495), True, 'import tensorflow as tf\n'), ((3513, 3598), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['conv3'], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""same"""'}), "(conv3, pool_size=(2, 2), strides=(2, 2), padding='same'\n )\n", (3536, 3598), True, 'import tensorflow as tf\n'), ((3629, 3725), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['encoded'], {'size': '(7, 7)', 'method': 'tf.image.ResizeMethod.NEAREST_NEIGHBOR'}), '(encoded, size=(7, 7), method=tf.image.ResizeMethod.\n NEAREST_NEIGHBOR)\n', (3651, 3725), True, 'import tensorflow as tf\n'), ((3741, 3851), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'upsample1', 'filters': '(16)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=upsample1, filters=16, kernel_size=(3, 3), padding=\n 'same', activation=tf.nn.relu)\n", (3757, 3851), True, 'import tensorflow as tf\n'), ((3871, 3967), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['conv4'], {'size': '(14, 14)', 'method': 'tf.image.ResizeMethod.NEAREST_NEIGHBOR'}), '(conv4, size=(14, 14), method=tf.image.ResizeMethod.\n NEAREST_NEIGHBOR)\n', (3893, 3967), True, 'import tensorflow as tf\n'), ((3985, 4095), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'upsample2', 'filters': '(32)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=upsample2, filters=32, kernel_size=(3, 3), padding=\n 'same', activation=tf.nn.relu)\n", (4001, 4095), True, 'import tensorflow as tf\n'), ((4117, 4213), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['conv5'], {'size': '(28, 28)', 'method': 'tf.image.ResizeMethod.NEAREST_NEIGHBOR'}), '(conv5, size=(28, 28), method=tf.image.ResizeMethod.\n NEAREST_NEIGHBOR)\n', (4139, 4213), True, 'import tensorflow as tf\n'), ((4231, 4341), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'upsample3', 'filters': '(32)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=upsample3, filters=32, kernel_size=(3, 3), padding=\n 'same', activation=tf.nn.relu)\n", (4247, 4341), True, 'import tensorflow as tf\n'), ((4360, 4459), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'conv6', 'filters': '(1)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': 'None'}), "(inputs=conv6, filters=1, kernel_size=(3, 3), padding=\n 'same', activation=None)\n", (4376, 4459), True, 'import tensorflow as tf\n'), ((4534, 4555), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logits'], {}), '(logits)\n', (4547, 4555), True, 'import tensorflow as tf\n'), ((4660, 4688), 'tensorflow.pow', 'tf.pow', (['(targets_ - logits)', '(2)'], {}), '(targets_ - logits, 2)\n', (4666, 4688), True, 'import tensorflow as tf\n'), ((4730, 4750), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (4744, 4750), True, 'import tensorflow as tf\n'), ((1120, 1146), 'numpy.empty', 'np.empty', (['(28 * n, 28 * n)'], {}), '((28 * n, 28 * n))\n', (1128, 1146), True, 'import numpy as np\n'), ((1166, 1192), 'numpy.empty', 'np.empty', (['(28 * n, 28 * n)'], {}), '((28 * n, 28 * n))\n', (1174, 1192), True, 'import numpy as np\n'), ((1212, 1238), 'numpy.empty', 'np.empty', (['(28 * n, 28 * n)'], {}), '((28 * n, 28 * n))\n', (1220, 1238), True, 'import numpy as np\n'), ((2086, 2112), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)'}), '(figsize=(2, 2))\n', (2096, 2112), True, 'import matplotlib.pyplot as plt\n'), ((2146, 2198), 'matplotlib.pyplot.imshow', 'plt.imshow', (['canvas_orig'], {'origin': '"""upper"""', 'cmap': '"""gray"""'}), "(canvas_orig, origin='upper', cmap='gray')\n", (2156, 2198), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2213), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2211, 2213), True, 'import matplotlib.pyplot as plt\n'), ((2218, 2244), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)'}), '(figsize=(2, 2))\n', (2228, 2244), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2328), 'matplotlib.pyplot.imshow', 'plt.imshow', (['canvas_noisy'], {'origin': '"""upper"""', 'cmap': '"""gray"""'}), "(canvas_noisy, origin='upper', cmap='gray')\n", (2285, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2333, 2343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2341, 2343), True, 'import matplotlib.pyplot as plt\n'), ((2348, 2374), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)'}), '(figsize=(2, 2))\n', (2358, 2374), True, 'import matplotlib.pyplot as plt\n'), ((2413, 2466), 'matplotlib.pyplot.imshow', 'plt.imshow', (['canvas_recon'], {'origin': '"""upper"""', 'cmap': '"""gray"""'}), "(canvas_recon, origin='upper', cmap='gray')\n", (2423, 2466), True, 'import matplotlib.pyplot as plt\n'), ((2471, 2481), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2479, 2481), True, 'import matplotlib.pyplot as plt\n'), ((5093, 5105), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5103, 5105), True, 'import tensorflow as tf\n'), ((5172, 5188), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5186, 5188), True, 'import tensorflow as tf\n'), ((1564, 1593), 'numpy.clip', 'np.clip', (['noisy_imgs', '(0.0)', '(1.0)'], {}), '(noisy_imgs, 0.0, 1.0)\n', (1571, 1593), True, 'import numpy as np\n'), ((4758, 4798), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (4783, 4798), True, 'import tensorflow as tf\n'), ((5125, 5158), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5156, 5158), True, 'import tensorflow as tf\n'), ((1849, 1868), 'numpy.squeeze', 'np.squeeze', (['imgs[j]'], {}), '(imgs[j])\n', (1859, 1868), True, 'import numpy as np\n'), ((1938, 1963), 'numpy.squeeze', 'np.squeeze', (['noisy_imgs[j]'], {}), '(noisy_imgs[j])\n', (1948, 1963), True, 'import numpy as np\n'), ((2033, 2049), 'numpy.squeeze', 'np.squeeze', (['g[j]'], {}), '(g[j])\n', (2043, 2049), True, 'import numpy as np\n'), ((5785, 5814), 'numpy.clip', 'np.clip', (['noisy_imgs', '(0.0)', '(1.0)'], {}), '(noisy_imgs, 0.0, 1.0)\n', (5792, 5814), True, 'import numpy as np\n'), ((1466, 1494), 'numpy.random.randn', 'np.random.randn', (['*imgs.shape'], {}), '(*imgs.shape)\n', (1481, 1494), True, 'import numpy as np\n'), ((5687, 5715), 'numpy.random.randn', 'np.random.randn', (['*imgs.shape'], {}), '(*imgs.shape)\n', (5702, 5715), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import StackingRegressor
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
from sklearn.utils.random import sample_without_replacement
def corrupt(X, y, outlier_ratio=0.1, random_state=None):
random = check_random_state(random_state)
n_samples = len(y)
n_outliers = int(outlier_ratio * n_samples)
W = X.copy()
z = y.copy()
mask = np.ones(n_samples).astype(bool)
outlier_ids = random.choice(n_samples, n_outliers)
mask[outlier_ids] = False
W[~mask, 4] *= 0.1
return W, z
class ENOLS:
def __init__(self, n_estimators=500, sample_size='auto'):
"""
Parameters
----------
n_estimators: number of OLS models to train
sample_size: size of random subset used to train the OLS models, default to 'auto'
- If 'auto': use subsets of size n_features+1 during training
- If int: use subsets of size sample_size during training
- If float: use subsets of size ceil(n_sample*sample_size) during training
"""
self.n_estimators = n_estimators
self.sample_size = sample_size
def fit(self, X, y, random_state=None):
"""
Train ENOLS on the given training set.
Parameters
----------
X: an input array of shape (n_sample, n_features)
y: an array of shape (n_sample,) containing the classes for the input examples
Return
------
self: the fitted model
"""
# use random instead of np.random to sample random numbers below
random = check_random_state(random_state)
estimators = [('lr', LinearRegression())]
if isinstance(self.sample_size, int):
self.sample_size = 'reservoir_sampling'
# add all the trained OLS models to this list
self.estimators_lr, self.estimators_TSR, self.estimators_enols = [], [], []
for i in range(self.n_estimators):
samples = sample_without_replacement(n_population=random.choice([50, 100]),
n_samples=random.choice([10, 20]),
random_state=random_state, method=self.sample_size)
X_train, y_train = [], []
for i in samples:
X_train.append(X[i]), y_train.append(y[i])
reg = LinearRegression()
reg.fit(np.array(X_train), np.array(y_train))
tsr = TheilSenRegressor()
tsr.fit(np.array(X_train), np.array(y_train))
enol = StackingRegressor(estimators=estimators, final_estimator=LinearRegression())
enol.fit(np.array(X_train), np.array(y_train))
self.estimators_lr.append(reg), self.estimators_TSR.append(tsr), self.estimators_enols.append(enol)
return self
def predict(self, X, method='average'):
"""
Parameters
----------
X: an input array of shape (n_sample, n_features)
method: 'median' or 'average', corresponding to predicting median and
mean of the OLS models' predictions respectively.
Returns
-------
y: an array of shape (n_samples,) containing the predicted classes
"""
ols, ts_reg, enols = [], [], []
for reg in self.estimators_lr:
ols.append(reg.predict(X))
for tsr in self.estimators_TSR:
ts_reg.append(tsr.predict(X))
for enol in self.estimators_enols:
enols.append(enol.predict(X))
if method == 'average':
ols = np.average(ols, axis=0)
enols = np.average(enols, axis=0)
ts_reg = np.average(ts_reg, axis=0)
else:
ols = np.average(ols, axis=0)
enols = np.median(enols, axis=0)
ts_reg = np.median(ts_reg, axis=0)
return ols, ts_reg, enols
if __name__ == '__main__':
ensemble_ols, tsregressor, ordinaryleastsquare = [], [], []
p = [0, 0.01, 0.05, 0.08, 0.1, 0.15, 0.2, 0.25, 0.30, 0.40, 0.50]
for i in p:
X, y = load_boston(return_X_y=True)
X_tr, X_ts, y_tr, y_ts = train_test_split(X, y, test_size=0.3, random_state=42)
W, z = corrupt(X_tr, y_tr, outlier_ratio=i, random_state=42)
reg = ENOLS(sample_size=42)
reg.fit(W, z, random_state=42)
ols, ts_reg, enols = reg.predict(X_ts, method='median')
mse_ols = mean_squared_error(y_ts, ols)
mse_ts_reg = mean_squared_error(y_ts, ts_reg)
mse_enols = mean_squared_error(y_ts, enols)
ensemble_ols.append(mse_enols), tsregressor.append(mse_ts_reg), ordinaryleastsquare.append(mse_ols)
plt.plot(p, ensemble_ols, 'b', label="enols")
plt.plot(p, tsregressor, 'r', label="tsr")
plt.plot(p, ordinaryleastsquare, 'g', label="ols")
plt.legend(loc="upper right")
plt.xlabel('p', fontsize=18)
plt.ylabel('mse', fontsize=16)
plt.show()
| [
"sklearn.utils.check_random_state",
"numpy.median",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"numpy.average",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sklearn.datasets.load_boston",
"sklearn.linear_model.TheilSenRegressor",
"sklearn.metr... | [((485, 517), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (503, 517), False, 'from sklearn.utils import check_random_state\n'), ((4933, 4978), 'matplotlib.pyplot.plot', 'plt.plot', (['p', 'ensemble_ols', '"""b"""'], {'label': '"""enols"""'}), "(p, ensemble_ols, 'b', label='enols')\n", (4941, 4978), True, 'import matplotlib.pyplot as plt\n'), ((4983, 5025), 'matplotlib.pyplot.plot', 'plt.plot', (['p', 'tsregressor', '"""r"""'], {'label': '"""tsr"""'}), "(p, tsregressor, 'r', label='tsr')\n", (4991, 5025), True, 'import matplotlib.pyplot as plt\n'), ((5030, 5080), 'matplotlib.pyplot.plot', 'plt.plot', (['p', 'ordinaryleastsquare', '"""g"""'], {'label': '"""ols"""'}), "(p, ordinaryleastsquare, 'g', label='ols')\n", (5038, 5080), True, 'import matplotlib.pyplot as plt\n'), ((5085, 5114), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (5095, 5114), True, 'import matplotlib.pyplot as plt\n'), ((5119, 5147), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""p"""'], {'fontsize': '(18)'}), "('p', fontsize=18)\n", (5129, 5147), True, 'import matplotlib.pyplot as plt\n'), ((5152, 5182), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mse"""'], {'fontsize': '(16)'}), "('mse', fontsize=16)\n", (5162, 5182), True, 'import matplotlib.pyplot as plt\n'), ((5187, 5197), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5195, 5197), True, 'import matplotlib.pyplot as plt\n'), ((1842, 1874), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (1860, 1874), False, 'from sklearn.utils import check_random_state\n'), ((4338, 4366), 'sklearn.datasets.load_boston', 'load_boston', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (4349, 4366), False, 'from sklearn.datasets import load_boston\n'), ((4400, 4454), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(X, y, test_size=0.3, random_state=42)\n', (4416, 4454), False, 'from sklearn.model_selection import train_test_split\n'), ((4683, 4712), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_ts', 'ols'], {}), '(y_ts, ols)\n', (4701, 4712), False, 'from sklearn.metrics import mean_squared_error\n'), ((4734, 4766), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_ts', 'ts_reg'], {}), '(y_ts, ts_reg)\n', (4752, 4766), False, 'from sklearn.metrics import mean_squared_error\n'), ((4787, 4818), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_ts', 'enols'], {}), '(y_ts, enols)\n', (4805, 4818), False, 'from sklearn.metrics import mean_squared_error\n'), ((637, 655), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (644, 655), True, 'import numpy as np\n'), ((2627, 2645), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2643, 2645), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((2723, 2742), 'sklearn.linear_model.TheilSenRegressor', 'TheilSenRegressor', ([], {}), '()\n', (2740, 2742), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((3843, 3866), 'numpy.average', 'np.average', (['ols'], {'axis': '(0)'}), '(ols, axis=0)\n', (3853, 3866), True, 'import numpy as np\n'), ((3887, 3912), 'numpy.average', 'np.average', (['enols'], {'axis': '(0)'}), '(enols, axis=0)\n', (3897, 3912), True, 'import numpy as np\n'), ((3934, 3960), 'numpy.average', 'np.average', (['ts_reg'], {'axis': '(0)'}), '(ts_reg, axis=0)\n', (3944, 3960), True, 'import numpy as np\n'), ((3993, 4016), 'numpy.average', 'np.average', (['ols'], {'axis': '(0)'}), '(ols, axis=0)\n', (4003, 4016), True, 'import numpy as np\n'), ((4037, 4061), 'numpy.median', 'np.median', (['enols'], {'axis': '(0)'}), '(enols, axis=0)\n', (4046, 4061), True, 'import numpy as np\n'), ((4083, 4108), 'numpy.median', 'np.median', (['ts_reg'], {'axis': '(0)'}), '(ts_reg, axis=0)\n', (4092, 4108), True, 'import numpy as np\n'), ((1905, 1923), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1921, 1923), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n'), ((2666, 2683), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (2674, 2683), True, 'import numpy as np\n'), ((2685, 2702), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (2693, 2702), True, 'import numpy as np\n'), ((2763, 2780), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (2771, 2780), True, 'import numpy as np\n'), ((2782, 2799), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (2790, 2799), True, 'import numpy as np\n'), ((2919, 2936), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (2927, 2936), True, 'import numpy as np\n'), ((2938, 2955), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (2946, 2955), True, 'import numpy as np\n'), ((2878, 2896), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2894, 2896), False, 'from sklearn.linear_model import LinearRegression, TheilSenRegressor\n')] |
import openl3
from pathlib import Path
import numpy as np
import librosa
import tensorflow_hub as hub
class AudioL3:
def __init__(self, input_repr: str = 'mel256', content_type: str = 'music', embedding_size: int = 512) -> None:
self.model = openl3.models.load_audio_embedding_model(input_repr=input_repr,
content_type=content_type,
embedding_size=embedding_size)
def get_embedding(self,
audio: Path,
sr: int = 16000,
batch_size: int = 1,
hop_size: float = 0.5,
center: bool = True) -> np.ndarray:
# Read audio
y, _ = librosa.load(audio, sr=sr)
# Calculate embedding
emb, ts = openl3.get_audio_embedding(y,
sr,
model=self.model,
batch_size=batch_size,
hop_size=hop_size,
center=center,
verbose=False)
emb = emb[1:]
emb = np.mean(emb, axis=0)
return emb
class YamNet:
def __init__(self) -> None:
self.model = hub.load('https://tfhub.dev/google/yamnet/1')
def get_embedding(self, audio: Path, sr: int = 16000) -> np.ndarray:
# Read audio
y, _ = librosa.load(audio, sr=sr)
# Calculate embedding
_, emb, _ = self.model(y)
emb = np.mean(emb, axis=0)
return emb
| [
"numpy.mean",
"openl3.models.load_audio_embedding_model",
"tensorflow_hub.load",
"openl3.get_audio_embedding",
"librosa.load"
] | [((258, 383), 'openl3.models.load_audio_embedding_model', 'openl3.models.load_audio_embedding_model', ([], {'input_repr': 'input_repr', 'content_type': 'content_type', 'embedding_size': 'embedding_size'}), '(input_repr=input_repr,\n content_type=content_type, embedding_size=embedding_size)\n', (298, 383), False, 'import openl3\n'), ((789, 815), 'librosa.load', 'librosa.load', (['audio'], {'sr': 'sr'}), '(audio, sr=sr)\n', (801, 815), False, 'import librosa\n'), ((865, 992), 'openl3.get_audio_embedding', 'openl3.get_audio_embedding', (['y', 'sr'], {'model': 'self.model', 'batch_size': 'batch_size', 'hop_size': 'hop_size', 'center': 'center', 'verbose': '(False)'}), '(y, sr, model=self.model, batch_size=batch_size,\n hop_size=hop_size, center=center, verbose=False)\n', (891, 992), False, 'import openl3\n'), ((1295, 1315), 'numpy.mean', 'np.mean', (['emb'], {'axis': '(0)'}), '(emb, axis=0)\n', (1302, 1315), True, 'import numpy as np\n'), ((1406, 1451), 'tensorflow_hub.load', 'hub.load', (['"""https://tfhub.dev/google/yamnet/1"""'], {}), "('https://tfhub.dev/google/yamnet/1')\n", (1414, 1451), True, 'import tensorflow_hub as hub\n'), ((1562, 1588), 'librosa.load', 'librosa.load', (['audio'], {'sr': 'sr'}), '(audio, sr=sr)\n', (1574, 1588), False, 'import librosa\n'), ((1668, 1688), 'numpy.mean', 'np.mean', (['emb'], {'axis': '(0)'}), '(emb, axis=0)\n', (1675, 1688), True, 'import numpy as np\n')] |
"""
Author: <NAME>
Source:
Content: File includes calculation of values on cubic Bézier curve.
"""
import numpy as np
# calculates one point on a cubic Bézier curve
# input: - uVal (float): parameter of value u
# - b (list) = [b0, b1, ..., bn] control and Bézier points as list
# output: - point on the curve (list)
def getBezierValue(uVal, b):
return b[0]*((1-uVal)**3) + b[1]*3*uVal*((1-uVal)**2) + b[2]*3*(uVal**2)*(1-uVal) + b[3]*(uVal**3)
# calculates N discrete values on a cubic Bézier curve
# input: - b (list) = [b0, b1, ..., bn] control and Bézier points as list
# - N (int): number of points on the curve
# output: - points on the curve (list)
def getBezierValues(b, N):
uValues = np.linspace(0, 1, N)
bezValues = []
for i in uValues:
bezValues.append(getBezierValue(i, b))
return bezValues
| [
"numpy.linspace"
] | [((743, 763), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (754, 763), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Plotting planet population Joint PDF
Written By: <NAME>
2/1/2019
"""
try:
import cPickle as pickle
except:
import pickle
import os
if not 'DISPLAY' in os.environ.keys(): #Check environment for keys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
else:
import matplotlib.pyplot as plt
import numpy as np
from numpy import nan
import argparse
import json
import sys, os.path, EXOSIMS, EXOSIMS.MissionSim
import astropy.units as u
import copy
import random
import datetime
import re
from EXOSIMS.util.vprint import vprint
from copy import deepcopy
from astropy.io import fits
import scipy.interpolate
import astropy.units as u
import numpy as np
from EXOSIMS.MissionSim import MissionSim
import numbers
from scipy import interpolate
from matplotlib import ticker, cm
class plotCompletenessJointPDFs(object):
"""Plotting utility to reproduce Completeness Joint PDF
"""
_modtype = 'util'
def __init__(self, args=None):
vprint(args)
vprint('plotCompletenessJointPDFs done')
pass
def singleRunPostProcessing(self, PPoutpath, folder):
"""Generates a single yield histogram for the run_type
Args:
PPoutpath (string) - output path to place data in
folder (string) - full filepath to folder containing runs
"""
#Get name of pkl file
if not os.path.exists(folder):
raise ValueError('%s not found'%folder)
outspecPath = os.path.join(folder,'outspec.json')
try:
with open(outspecPath, 'rb') as g:
outspec = json.load(g)
except:
vprint('Failed to open outspecfile %s'%outspecPath)
pass
#Create Simulation Object
sim = EXOSIMS.MissionSim.MissionSim(scriptfile=None, nopar=True, **outspec)
self.plotJointPDF(sim,PPoutpath,folder)
def plotJointPDF(self, sim, PPoutpath, folder):
"""
Args:
sim
PPoutpath
folder
Returns:
None
"""
xnew = sim.SurveySimulation.Completeness.xnew #this pulls an array of star-planet distances based on rrange
dMag = np.linspace(start=10.,stop=50.,num=200)
xmin = np.min(xnew)
xmax = np.max(xnew)
ymin = np.min(dMag)
ymax = np.max(dMag)
f = list()
for k, dm in enumerate(dMag):
f.append(sim.SurveySimulation.Completeness.EVPOCpdf(xnew,dm)[:,0])
f = np.asarray(f)
f[ 10**-5. >= f] = np.nan
maxf = np.ceil(np.log10(np.nanmax(f)))
minf = np.floor(np.log10(np.nanmin(f)))
levelList = [10**x for x in np.linspace(start=minf,stop=maxf,num=maxf-minf+1, endpoint=True)]
#xlims = [xmin,sim.SurveySimulation.PlanetPopulation.rrange[1].to('AU').value] # largest possible planet orbital radius
maxXIndinRows = [np.max(np.where(f[i,:]>=1e-5)) for i in np.arange(len(f)) if np.any(f[i,:]>=1e-5)]
maxYIndinCols = [np.max(np.where(f[:,j]>=1e-5)) for j in np.arange(len(f[0,:])) if np.any(f[:,j]>=1e-5)]
xlims = [xmin,xnew[np.max(maxXIndinRows)]] # based on where furthest right of 1e-5 occurs
ylims = [ymin,dMag[np.max(maxYIndinCols)]]#ymax]
plt.close(351687)
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
fig = plt.figure(351687)
ax1 = plt.subplot(111)
CS = ax1.contourf(xnew,dMag,f, levels=levelList, extent=[xlims[0], xlims[1], ylims[0], ylims[1]], cmap='bwr', intepolation='nearest', locator=ticker.LogLocator())
CS2 = ax1.contour(CS, levels=levelList, extent=[xlims[0], xlims[1], ylims[0], ylims[1]], linewidths=2.0,colors='k')
#ATTEMPTING TO ADD CONTOUR LABELS plt.clabel(CS2, fmt='%2.1f', colors='k', fontsize=12)
ax1.set_xlim(xlims)
ax1.set_ylim(ylims)
cbar = fig.colorbar(CS)
plt.xlabel(r'$s$ (AU)',weight='bold')
plt.ylabel(r'$\Delta$mag',weight='bold')
plt.show(block=False)
# Save to a File
date = unicode(datetime.datetime.now())
date = ''.join(c + '_' for c in re.split('-|:| ',date)[0:-1])#Removes seconds from date
fname = 'completenessJoinfPDF_' + folder.split('/')[-1] + '_' + date
plt.savefig(os.path.join(PPoutpath, fname + '.png'), format='png', dpi=500)
plt.savefig(os.path.join(PPoutpath, fname + '.svg'))
plt.savefig(os.path.join(PPoutpath, fname + '.eps'), format='eps', dpi=500)
plt.savefig(os.path.join(PPoutpath, fname + '.pdf'), format='pdf', dpi=500)
| [
"matplotlib.ticker.LogLocator",
"matplotlib.pyplot.ylabel",
"numpy.nanmin",
"os.path.exists",
"re.split",
"numpy.where",
"matplotlib.pyplot.xlabel",
"EXOSIMS.util.vprint.vprint",
"numpy.asarray",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.nanmax",
"numpy.min",
"os.e... | [((262, 283), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (276, 283), False, 'import matplotlib\n'), ((189, 206), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (204, 206), False, 'import os\n'), ((1022, 1034), 'EXOSIMS.util.vprint.vprint', 'vprint', (['args'], {}), '(args)\n', (1028, 1034), False, 'from EXOSIMS.util.vprint import vprint\n'), ((1043, 1083), 'EXOSIMS.util.vprint.vprint', 'vprint', (['"""plotCompletenessJointPDFs done"""'], {}), "('plotCompletenessJointPDFs done')\n", (1049, 1083), False, 'from EXOSIMS.util.vprint import vprint\n'), ((1520, 1556), 'os.path.join', 'os.path.join', (['folder', '"""outspec.json"""'], {}), "(folder, 'outspec.json')\n", (1532, 1556), False, 'import os\n'), ((1801, 1870), 'EXOSIMS.MissionSim.MissionSim', 'EXOSIMS.MissionSim.MissionSim', ([], {'scriptfile': 'None', 'nopar': '(True)'}), '(scriptfile=None, nopar=True, **outspec)\n', (1830, 1870), False, 'import sys, os.path, EXOSIMS, EXOSIMS.MissionSim\n'), ((2235, 2278), 'numpy.linspace', 'np.linspace', ([], {'start': '(10.0)', 'stop': '(50.0)', 'num': '(200)'}), '(start=10.0, stop=50.0, num=200)\n', (2246, 2278), True, 'import numpy as np\n'), ((2290, 2302), 'numpy.min', 'np.min', (['xnew'], {}), '(xnew)\n', (2296, 2302), True, 'import numpy as np\n'), ((2318, 2330), 'numpy.max', 'np.max', (['xnew'], {}), '(xnew)\n', (2324, 2330), True, 'import numpy as np\n'), ((2346, 2358), 'numpy.min', 'np.min', (['dMag'], {}), '(dMag)\n', (2352, 2358), True, 'import numpy as np\n'), ((2374, 2386), 'numpy.max', 'np.max', (['dMag'], {}), '(dMag)\n', (2380, 2386), True, 'import numpy as np\n'), ((2536, 2549), 'numpy.asarray', 'np.asarray', (['f'], {}), '(f)\n', (2546, 2549), True, 'import numpy as np\n'), ((3296, 3313), 'matplotlib.pyplot.close', 'plt.close', (['(351687)'], {}), '(351687)\n', (3305, 3313), True, 'import matplotlib.pyplot as plt\n'), ((3322, 3349), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'linewidth': '(2)'}), "('axes', linewidth=2)\n", (3328, 3349), True, 'import matplotlib.pyplot as plt\n'), ((3357, 3385), 'matplotlib.pyplot.rc', 'plt.rc', (['"""lines"""'], {'linewidth': '(2)'}), "('lines', linewidth=2)\n", (3363, 3385), True, 'import matplotlib.pyplot as plt\n'), ((3434, 3463), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'weight': '"""bold"""'}), "('font', weight='bold')\n", (3440, 3463), True, 'import matplotlib.pyplot as plt\n'), ((3477, 3495), 'matplotlib.pyplot.figure', 'plt.figure', (['(351687)'], {}), '(351687)\n', (3487, 3495), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3526), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3521, 3526), True, 'import matplotlib.pyplot as plt\n'), ((4016, 4053), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$s$ (AU)"""'], {'weight': '"""bold"""'}), "('$s$ (AU)', weight='bold')\n", (4026, 4053), True, 'import matplotlib.pyplot as plt\n'), ((4062, 4103), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Delta$mag"""'], {'weight': '"""bold"""'}), "('$\\\\Delta$mag', weight='bold')\n", (4072, 4103), True, 'import matplotlib.pyplot as plt\n'), ((4111, 4132), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (4119, 4132), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1444), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1436, 1444), False, 'import os\n'), ((4182, 4205), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4203, 4205), False, 'import datetime\n'), ((4400, 4439), 'os.path.join', 'os.path.join', (['PPoutpath', "(fname + '.png')"], {}), "(PPoutpath, fname + '.png')\n", (4412, 4439), False, 'import os\n'), ((4484, 4523), 'os.path.join', 'os.path.join', (['PPoutpath', "(fname + '.svg')"], {}), "(PPoutpath, fname + '.svg')\n", (4496, 4523), False, 'import os\n'), ((4545, 4584), 'os.path.join', 'os.path.join', (['PPoutpath', "(fname + '.eps')"], {}), "(PPoutpath, fname + '.eps')\n", (4557, 4584), False, 'import os\n'), ((4629, 4668), 'os.path.join', 'os.path.join', (['PPoutpath', "(fname + '.pdf')"], {}), "(PPoutpath, fname + '.pdf')\n", (4641, 4668), False, 'import os\n'), ((1642, 1654), 'json.load', 'json.load', (['g'], {}), '(g)\n', (1651, 1654), False, 'import json\n'), ((1683, 1736), 'EXOSIMS.util.vprint.vprint', 'vprint', (["('Failed to open outspecfile %s' % outspecPath)"], {}), "('Failed to open outspecfile %s' % outspecPath)\n", (1689, 1736), False, 'from EXOSIMS.util.vprint import vprint\n'), ((2616, 2628), 'numpy.nanmax', 'np.nanmax', (['f'], {}), '(f)\n', (2625, 2628), True, 'import numpy as np\n'), ((2664, 2676), 'numpy.nanmin', 'np.nanmin', (['f'], {}), '(f)\n', (2673, 2676), True, 'import numpy as np\n'), ((2715, 2785), 'numpy.linspace', 'np.linspace', ([], {'start': 'minf', 'stop': 'maxf', 'num': '(maxf - minf + 1)', 'endpoint': '(True)'}), '(start=minf, stop=maxf, num=maxf - minf + 1, endpoint=True)\n', (2726, 2785), True, 'import numpy as np\n'), ((2942, 2968), 'numpy.where', 'np.where', (['(f[i, :] >= 1e-05)'], {}), '(f[i, :] >= 1e-05)\n', (2950, 2968), True, 'import numpy as np\n'), ((2996, 3020), 'numpy.any', 'np.any', (['(f[i, :] >= 1e-05)'], {}), '(f[i, :] >= 1e-05)\n', (3002, 3020), True, 'import numpy as np\n'), ((3050, 3076), 'numpy.where', 'np.where', (['(f[:, j] >= 1e-05)'], {}), '(f[:, j] >= 1e-05)\n', (3058, 3076), True, 'import numpy as np\n'), ((3110, 3134), 'numpy.any', 'np.any', (['(f[:, j] >= 1e-05)'], {}), '(f[:, j] >= 1e-05)\n', (3116, 3134), True, 'import numpy as np\n'), ((3159, 3180), 'numpy.max', 'np.max', (['maxXIndinRows'], {}), '(maxXIndinRows)\n', (3165, 3180), True, 'import numpy as np\n'), ((3257, 3278), 'numpy.max', 'np.max', (['maxYIndinCols'], {}), '(maxYIndinCols)\n', (3263, 3278), True, 'import numpy as np\n'), ((3678, 3697), 'matplotlib.ticker.LogLocator', 'ticker.LogLocator', ([], {}), '()\n', (3695, 3697), False, 'from matplotlib import ticker, cm\n'), ((4247, 4270), 're.split', 're.split', (['"""-|:| """', 'date'], {}), "('-|:| ', date)\n", (4255, 4270), False, 'import re\n')] |
# python3.7
"""Implements image cropping."""
import numpy as np
try:
import nvidia.dali.fn as fn
except ImportError:
fn = None
try:
import cupy
except ImportError:
cupy = None
from utils.formatting_utils import format_image_size
from .base_transformation import BaseTransformation
__all__ = ['CenterCrop', 'RandomCrop', 'LongSideCrop']
class CenterCrop(BaseTransformation):
"""Applies central cropping to images.
Args:
crop_size: Size of the cropped image, which is assumed with order
(height, width).
"""
def __init__(self, crop_size):
super().__init__(support_dali=(fn is not None))
self.crop_size = format_image_size(crop_size)
def _CPU_forward(self, data):
outputs = []
for image in data:
height, width = image.shape[:2]
if height == self.crop_size[0] and width == self.crop_size[1]:
outputs.append(image)
continue
if height < self.crop_size[0]:
raise ValueError(f'Cropping height `{self.crop_size[0]}` is '
f'larger than image height `{height}`!')
if width < self.crop_size[1]:
raise ValueError(f'Cropping width `{self.crop_size[1]}` is '
f'larger than image width `{width}`!')
y = (height - self.crop_size[0]) // 2
x = (width - self.crop_size[1]) // 2
outputs.append(np.ascontiguousarray(
image[y:y + self.crop_size[0], x:x + self.crop_size[1]]))
return outputs
def _DALI_forward(self, data):
return fn.crop(data,
crop_pos_x=0.5,
crop_pos_y=0.5,
crop_w=self.crop_size[1],
crop_h=self.crop_size[0],
out_of_bounds_policy='error')
class RandomCrop(BaseTransformation):
"""Applies random cropping to images.
Args:
crop_size: Size of the cropped image, which is assumed with order
(height, width).
"""
def __init__(self, crop_size):
super().__init__(support_dali=(fn is not None))
self.crop_size = format_image_size(crop_size)
def _CPU_forward(self, data):
crop_pos_y = np.random.uniform()
crop_pos_x = np.random.uniform()
outputs = []
for image in data:
height, width = image.shape[:2]
if height == self.crop_size[0] and width == self.crop_size[1]:
outputs.append(image)
continue
if height < self.crop_size[0]:
raise ValueError(f'Cropping height `{self.crop_size[0]}` is '
f'larger than image height `{height}`!')
if width < self.crop_size[1]:
raise ValueError(f'Cropping width `{self.crop_size[1]}` is '
f'larger than image width `{width}`!')
y = int((height - self.crop_size[0]) * crop_pos_y)
x = int((width - self.crop_size[1]) * crop_pos_x)
outputs.append(np.ascontiguousarray(
image[y:y + self.crop_size[0], x:x + self.crop_size[1]]))
return outputs
def _DALI_forward(self, data):
crop_pos_y = fn.random.uniform(range=(0, 1))
crop_pos_x = fn.random.uniform(range=(0, 1))
return fn.crop(data,
crop_pos_x=crop_pos_x,
crop_pos_y=crop_pos_y,
crop_w=self.crop_size[1],
crop_h=self.crop_size[0],
out_of_bounds_policy='error')
class LongSideCrop(BaseTransformation):
"""Crops a square region from images along the long side.
The length of the short side will be kept.
NOTE: This transformation applies a customized python operation (with CuPy)
for DALI forwarding, which may disable parallel data pre-processing.
Args:
center_crop: Whether to centrally crop the image along the long side.
(default: True)
"""
def __init__(self, center_crop=True):
super().__init__(support_dali=(fn is not None and cupy is not None))
self._has_customized_function_for_dali = True
self.center_crop = center_crop
def _CPU_forward(self, data):
if self.center_crop:
crop_pos = 0.5
else:
crop_pos = np.random.uniform()
outputs = []
for image in data:
height, width = image.shape[:2]
if height == width:
outputs.append(image)
continue
crop_size = min(height, width)
y = int((height - crop_size) * crop_pos)
x = int((width - crop_size) * crop_pos)
outputs.append(np.ascontiguousarray(
image[y:y + crop_size, x:x + crop_size]))
return outputs
def _DALI_forward(self, data):
# Defines a helper function implemented with cupy.
def helper(*images):
if self.center_crop:
crop_pos = 0.5
else:
crop_pos = cupy.random.uniform()
outputs = []
for image in images:
height, width = image.shape[:2]
if height == width:
outputs.append(image)
continue
crop_size = min(height, width)
y = int((height - crop_size) * crop_pos)
x = int((width - crop_size) * crop_pos)
outputs.append(cupy.ascontiguousarray(
image[y:y + crop_size, x:x + crop_size]))
return tuple(outputs)
return fn.python_function(
*data, device='gpu', function=helper, num_outputs=len(data))
| [
"cupy.ascontiguousarray",
"nvidia.dali.fn.random.uniform",
"numpy.ascontiguousarray",
"cupy.random.uniform",
"numpy.random.uniform",
"utils.formatting_utils.format_image_size",
"nvidia.dali.fn.crop"
] | [((681, 709), 'utils.formatting_utils.format_image_size', 'format_image_size', (['crop_size'], {}), '(crop_size)\n', (698, 709), False, 'from utils.formatting_utils import format_image_size\n'), ((1657, 1788), 'nvidia.dali.fn.crop', 'fn.crop', (['data'], {'crop_pos_x': '(0.5)', 'crop_pos_y': '(0.5)', 'crop_w': 'self.crop_size[1]', 'crop_h': 'self.crop_size[0]', 'out_of_bounds_policy': '"""error"""'}), "(data, crop_pos_x=0.5, crop_pos_y=0.5, crop_w=self.crop_size[1],\n crop_h=self.crop_size[0], out_of_bounds_policy='error')\n", (1664, 1788), True, 'import nvidia.dali.fn as fn\n'), ((2222, 2250), 'utils.formatting_utils.format_image_size', 'format_image_size', (['crop_size'], {}), '(crop_size)\n', (2239, 2250), False, 'from utils.formatting_utils import format_image_size\n'), ((2307, 2326), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2324, 2326), True, 'import numpy as np\n'), ((2348, 2367), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2365, 2367), True, 'import numpy as np\n'), ((3313, 3344), 'nvidia.dali.fn.random.uniform', 'fn.random.uniform', ([], {'range': '(0, 1)'}), '(range=(0, 1))\n', (3330, 3344), True, 'import nvidia.dali.fn as fn\n'), ((3366, 3397), 'nvidia.dali.fn.random.uniform', 'fn.random.uniform', ([], {'range': '(0, 1)'}), '(range=(0, 1))\n', (3383, 3397), True, 'import nvidia.dali.fn as fn\n'), ((3413, 3559), 'nvidia.dali.fn.crop', 'fn.crop', (['data'], {'crop_pos_x': 'crop_pos_x', 'crop_pos_y': 'crop_pos_y', 'crop_w': 'self.crop_size[1]', 'crop_h': 'self.crop_size[0]', 'out_of_bounds_policy': '"""error"""'}), "(data, crop_pos_x=crop_pos_x, crop_pos_y=crop_pos_y, crop_w=self.\n crop_size[1], crop_h=self.crop_size[0], out_of_bounds_policy='error')\n", (3420, 3559), True, 'import nvidia.dali.fn as fn\n'), ((4443, 4462), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4460, 4462), True, 'import numpy as np\n'), ((1487, 1564), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image[y:y + self.crop_size[0], x:x + self.crop_size[1]]'], {}), '(image[y:y + self.crop_size[0], x:x + self.crop_size[1]])\n', (1507, 1564), True, 'import numpy as np\n'), ((3137, 3214), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image[y:y + self.crop_size[0], x:x + self.crop_size[1]]'], {}), '(image[y:y + self.crop_size[0], x:x + self.crop_size[1]])\n', (3157, 3214), True, 'import numpy as np\n'), ((4826, 4887), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image[y:y + crop_size, x:x + crop_size]'], {}), '(image[y:y + crop_size, x:x + crop_size])\n', (4846, 4887), True, 'import numpy as np\n'), ((5162, 5183), 'cupy.random.uniform', 'cupy.random.uniform', ([], {}), '()\n', (5181, 5183), False, 'import cupy\n'), ((5589, 5652), 'cupy.ascontiguousarray', 'cupy.ascontiguousarray', (['image[y:y + crop_size, x:x + crop_size]'], {}), '(image[y:y + crop_size, x:x + crop_size])\n', (5611, 5652), False, 'import cupy\n')] |
import numpy as np
import nept
def bayesian_prob(counts, tuning_curves, binsize, min_neurons, min_spikes=1):
"""Computes the bayesian probability of location based on spike counts.
Parameters
----------
counts : nept.AnalogSignal
Where each inner array is the number of spikes (int) in each bin for an individual neuron.
tuning_curves : np.array
Where each inner array is the tuning curve (floats) for an individual neuron.
binsize : float or np.array
Size of the time bins. If np.array, must be the same length as counts.
min_neurons : int
Mininum number of neurons active in a given bin.
min_spikes : int
Mininum number of spikes in a given bin.
Returns
-------
prob : np.array
Where each inner array is the probability (floats) for an individual neuron by location bins.
Notes
-----
If a bin does not meet the min_neuron/min_spikes requirement, that bin's probability
is set to nan. To convert it to 0s instead, use : prob[np.isnan(prob)] = 0 on the output.
"""
n_time_bins = np.shape(counts.time)[0]
n_position_bins = np.shape(tuning_curves)[1]
if not isinstance(binsize, float):
binsize = np.asarray(binsize)
if np.asarray(binsize).size != n_time_bins:
raise ValueError(
"binsize must be a float or the same length as counts.time."
)
likelihood = np.empty((n_time_bins, n_position_bins)) * np.nan
# Ignore warnings when inf created in this loop
error_settings = np.seterr(over="ignore")
for idx in range(n_position_bins):
valid_idx = tuning_curves[:, idx] > 1 # log of 1 or less is negative or invalid
if np.any(valid_idx):
# event_rate is the lambda in this poisson distribution
event_rate = (
tuning_curves[valid_idx, idx, np.newaxis].T ** counts.data[:, valid_idx]
)
prior = np.exp(-binsize * np.sum(tuning_curves[valid_idx, idx]))
# Below is the same as
# likelihood[:, idx] = np.prod(event_rate, axis=0) * prior * (1/n_position_bins)
# only less likely to have floating point issues, though slower
likelihood[:, idx] = (
np.exp(np.sum(np.log(event_rate), axis=1))
* prior
* (1 / n_position_bins)
)
np.seterr(**error_settings)
# Set any inf value to be largest float
largest_float = np.finfo(float).max
likelihood[np.isinf(likelihood)] = largest_float
likelihood /= np.nansum(likelihood, axis=1)[..., np.newaxis]
# Remove bins with too few neurons that that are active
# a neuron is considered active by having at least min_spikes in a bin
n_active_neurons = np.sum(counts.data >= min_spikes, axis=1)
likelihood[n_active_neurons < min_neurons] = np.nan
return likelihood
def decode_location(likelihood, pos_centers, time_centers):
"""Finds the decoded location based on the centers of the position bins.
Parameters
----------
likelihood : np.array
With shape(n_timebins, n_positionbins)
pos_centers : np.array
time_centers : np.array
Returns
-------
decoded : nept.Position
Estimate of decoded position.
"""
keep_idx = np.sum(np.isnan(likelihood), axis=1) < likelihood.shape[1]
likelihood = likelihood[keep_idx]
max_decoded_idx = np.nanargmax(likelihood, axis=1)
decoded_data = pos_centers[max_decoded_idx]
decoded_time = time_centers[keep_idx]
return nept.Position(decoded_data, decoded_time)
def remove_teleports(position, speed_thresh, min_length):
"""Removes positions above a certain speed threshold.
Parameters
----------
position : nept.Position
speed_thresh : int
Maximum speed to consider natural rat movements. Anything
above this theshold will not be included in the filtered positions.
min_length : int
Minimum length for a sequence to be included in filtered positions.
Returns
-------
filtered_position : nept.Epoch
"""
# TODO: implement with run_threshold to simplify
velocity = np.squeeze(position.speed().data)
split_idx = np.where(velocity >= speed_thresh)[0]
keep_idx = [
idx
for idx in np.split(np.arange(position.n_samples), split_idx)
if idx.size >= min_length
]
if len(keep_idx) == 0:
return nept.Epoch([], [])
starts = [
position.time[idx_sequence[0]]
for idx_sequence in keep_idx
if len(idx_sequence) > 1
]
stops = [
position.time[idx_sequence[-1]]
for idx_sequence in keep_idx
if len(idx_sequence) > 1
]
return nept.Epoch(starts, stops)
| [
"numpy.nanargmax",
"nept.Position",
"numpy.where",
"numpy.nansum",
"numpy.log",
"numpy.asarray",
"nept.Epoch",
"numpy.any",
"numpy.sum",
"numpy.empty",
"numpy.isnan",
"numpy.finfo",
"numpy.shape",
"numpy.isinf",
"numpy.seterr",
"numpy.arange"
] | [((1569, 1593), 'numpy.seterr', 'np.seterr', ([], {'over': '"""ignore"""'}), "(over='ignore')\n", (1578, 1593), True, 'import numpy as np\n'), ((2408, 2435), 'numpy.seterr', 'np.seterr', ([], {}), '(**error_settings)\n', (2417, 2435), True, 'import numpy as np\n'), ((2798, 2839), 'numpy.sum', 'np.sum', (['(counts.data >= min_spikes)'], {'axis': '(1)'}), '(counts.data >= min_spikes, axis=1)\n', (2804, 2839), True, 'import numpy as np\n'), ((3452, 3484), 'numpy.nanargmax', 'np.nanargmax', (['likelihood'], {'axis': '(1)'}), '(likelihood, axis=1)\n', (3464, 3484), True, 'import numpy as np\n'), ((3589, 3630), 'nept.Position', 'nept.Position', (['decoded_data', 'decoded_time'], {}), '(decoded_data, decoded_time)\n', (3602, 3630), False, 'import nept\n'), ((4771, 4796), 'nept.Epoch', 'nept.Epoch', (['starts', 'stops'], {}), '(starts, stops)\n', (4781, 4796), False, 'import nept\n'), ((1101, 1122), 'numpy.shape', 'np.shape', (['counts.time'], {}), '(counts.time)\n', (1109, 1122), True, 'import numpy as np\n'), ((1148, 1171), 'numpy.shape', 'np.shape', (['tuning_curves'], {}), '(tuning_curves)\n', (1156, 1171), True, 'import numpy as np\n'), ((1233, 1252), 'numpy.asarray', 'np.asarray', (['binsize'], {}), '(binsize)\n', (1243, 1252), True, 'import numpy as np\n'), ((1445, 1485), 'numpy.empty', 'np.empty', (['(n_time_bins, n_position_bins)'], {}), '((n_time_bins, n_position_bins))\n', (1453, 1485), True, 'import numpy as np\n'), ((1733, 1750), 'numpy.any', 'np.any', (['valid_idx'], {}), '(valid_idx)\n', (1739, 1750), True, 'import numpy as np\n'), ((2501, 2516), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2509, 2516), True, 'import numpy as np\n'), ((2536, 2556), 'numpy.isinf', 'np.isinf', (['likelihood'], {}), '(likelihood)\n', (2544, 2556), True, 'import numpy as np\n'), ((2592, 2621), 'numpy.nansum', 'np.nansum', (['likelihood'], {'axis': '(1)'}), '(likelihood, axis=1)\n', (2601, 2621), True, 'import numpy as np\n'), ((4259, 4293), 'numpy.where', 'np.where', (['(velocity >= speed_thresh)'], {}), '(velocity >= speed_thresh)\n', (4267, 4293), True, 'import numpy as np\n'), ((4479, 4497), 'nept.Epoch', 'nept.Epoch', (['[]', '[]'], {}), '([], [])\n', (4489, 4497), False, 'import nept\n'), ((3339, 3359), 'numpy.isnan', 'np.isnan', (['likelihood'], {}), '(likelihood)\n', (3347, 3359), True, 'import numpy as np\n'), ((1265, 1284), 'numpy.asarray', 'np.asarray', (['binsize'], {}), '(binsize)\n', (1275, 1284), True, 'import numpy as np\n'), ((4354, 4383), 'numpy.arange', 'np.arange', (['position.n_samples'], {}), '(position.n_samples)\n', (4363, 4383), True, 'import numpy as np\n'), ((1988, 2025), 'numpy.sum', 'np.sum', (['tuning_curves[valid_idx, idx]'], {}), '(tuning_curves[valid_idx, idx])\n', (1994, 2025), True, 'import numpy as np\n'), ((2297, 2315), 'numpy.log', 'np.log', (['event_rate'], {}), '(event_rate)\n', (2303, 2315), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import torch.nn.functional as F
import pdb
def Entropy(input_):
bs = input_.size(0)
epsilon = 1e-5
entropy = -input_ * torch.log(input_ + epsilon)
entropy = torch.sum(entropy, dim=1)
return entropy
def grl_hook(coeff):
def fun1(grad):
return -coeff*grad.clone()
return fun1
def CDAN(input_list, ad_net, entropy=None, coeff=None, random_layer=None):
softmax_output = input_list[1].detach()
feature = input_list[0]
if random_layer is None:
op_out = torch.bmm(softmax_output.unsqueeze(2), feature.unsqueeze(1))
ad_out = ad_net(op_out.view(-1, softmax_output.size(1) * feature.size(1)))
else:
random_out = random_layer.forward([feature, softmax_output])
ad_out = ad_net(random_out.view(-1, random_out.size(1)))
batch_size = softmax_output.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
if entropy is not None:
entropy.register_hook(grl_hook(coeff))
entropy = 1.0+torch.exp(-entropy)
source_mask = torch.ones_like(entropy)
source_mask[feature.size(0)//2:] = 0
source_weight = entropy*source_mask
target_mask = torch.ones_like(entropy)
target_mask[0:feature.size(0)//2] = 0
target_weight = entropy*target_mask
weight = source_weight / torch.sum(source_weight).detach().item() + \
target_weight / torch.sum(target_weight).detach().item()
return torch.sum(weight.view(-1, 1) * nn.BCELoss(reduction='none')(ad_out, dc_target)) / torch.sum(weight).detach().item()
else:
return nn.BCELoss()(ad_out, dc_target)
def DANN(features, ad_net):
ad_out = ad_net(features)
batch_size = ad_out.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
return nn.BCELoss()(ad_out, dc_target)
class CrossEntropyLabelSmooth(nn.Module):
"""Cross entropy loss with label smoothing regularizer.
Reference:
Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
Equation: y = (1 - epsilon) * y + epsilon / K.
Args:
num_classes (int): number of classes.
epsilon (float): weight.
"""
def __init__(self, num_classes, epsilon=0.1, use_gpu=True, reduction=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.reduction = reduction
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).cpu(), 1)
if self.use_gpu: targets = targets.cuda()
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (- targets * log_probs).sum(dim=1)
if self.reduction:
return loss.mean()
else:
return loss
return loss
class KnowledgeDistillationLoss(nn.Module):
def __init__(self, reduction='mean', alpha=1.0):
super().__init__()
self.reduction = reduction
self.alpha = alpha
def forward(self, inputs, targets, mask=None):
inputs = inputs.narrow(1, 0, targets.shape[1])
outputs = torch.log_softmax(inputs, dim=1)
labels = torch.softmax(targets * self.alpha, dim=1)
loss = (outputs * labels).mean(dim=1)
if mask is not None:
loss = loss * mask.float()
if self.reduction == 'mean':
outputs = -torch.mean(loss)
elif self.reduction == 'sum':
outputs = -torch.sum(loss)
else:
outputs = -loss
return outputs
class entropy_loss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, logits):
y_pred = F.softmax(logits, dim=-1)
size = logits.size(0)
if size == 0:
loss = 0.0
else:
loss = torch.sum(-y_pred * torch.log(y_pred + 1e-5), dim=1)
return torch.mean(loss) | [
"torch.ones_like",
"torch.log",
"torch.log_softmax",
"torch.mean",
"torch.exp",
"torch.softmax",
"numpy.array",
"torch.nn.BCELoss",
"torch.sum",
"torch.nn.LogSoftmax",
"torch.nn.functional.softmax"
] | [((276, 301), 'torch.sum', 'torch.sum', (['entropy'], {'dim': '(1)'}), '(entropy, dim=1)\n', (285, 301), False, 'import torch\n'), ((234, 261), 'torch.log', 'torch.log', (['(input_ + epsilon)'], {}), '(input_ + epsilon)\n', (243, 261), False, 'import torch\n'), ((1187, 1211), 'torch.ones_like', 'torch.ones_like', (['entropy'], {}), '(entropy)\n', (1202, 1211), False, 'import torch\n'), ((1323, 1347), 'torch.ones_like', 'torch.ones_like', (['entropy'], {}), '(entropy)\n', (1338, 1347), False, 'import torch\n'), ((1985, 1997), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1995, 1997), True, 'import torch.nn as nn\n'), ((2672, 2692), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (2685, 2692), True, 'import torch.nn as nn\n'), ((3676, 3708), 'torch.log_softmax', 'torch.log_softmax', (['inputs'], {'dim': '(1)'}), '(inputs, dim=1)\n', (3693, 3708), False, 'import torch\n'), ((3726, 3768), 'torch.softmax', 'torch.softmax', (['(targets * self.alpha)'], {'dim': '(1)'}), '(targets * self.alpha, dim=1)\n', (3739, 3768), False, 'import torch\n'), ((4239, 4264), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (4248, 4264), True, 'import torch.nn.functional as F\n'), ((4441, 4457), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (4451, 4457), False, 'import torch\n'), ((1145, 1164), 'torch.exp', 'torch.exp', (['(-entropy)'], {}), '(-entropy)\n', (1154, 1164), False, 'import torch\n'), ((1746, 1758), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1756, 1758), True, 'import torch.nn as nn\n'), ((3946, 3962), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (3956, 3962), False, 'import torch\n'), ((4024, 4039), 'torch.sum', 'torch.sum', (['loss'], {}), '(loss)\n', (4033, 4039), False, 'import torch\n'), ((4393, 4418), 'torch.log', 'torch.log', (['(y_pred + 1e-05)'], {}), '(y_pred + 1e-05)\n', (4402, 4418), False, 'import torch\n'), ((982, 1031), 'numpy.array', 'np.array', (['([[1]] * batch_size + [[0]] * batch_size)'], {}), '([[1]] * batch_size + [[0]] * batch_size)\n', (990, 1031), True, 'import numpy as np\n'), ((1636, 1664), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (1646, 1664), True, 'import torch.nn as nn\n'), ((1908, 1957), 'numpy.array', 'np.array', (['([[1]] * batch_size + [[0]] * batch_size)'], {}), '([[1]] * batch_size + [[0]] * batch_size)\n', (1916, 1957), True, 'import numpy as np\n'), ((1687, 1704), 'torch.sum', 'torch.sum', (['weight'], {}), '(weight)\n', (1696, 1704), False, 'import torch\n'), ((1471, 1495), 'torch.sum', 'torch.sum', (['source_weight'], {}), '(source_weight)\n', (1480, 1495), False, 'import torch\n'), ((1549, 1573), 'torch.sum', 'torch.sum', (['target_weight'], {}), '(target_weight)\n', (1558, 1573), False, 'import torch\n')] |
import re
import warnings
from astropy.coordinates import SkyCoord
import astropy.units as u
import numpy as np
from astropy.wcs import WCS, FITSFixedWarning
from astropy.io import fits
def _arange_inclusive(x0, x1, binx):
"""
Return np.arange(x0, x1, binx) except that range is inclusive of x1.
"""
delx = (x1 - x0)
nbin = delx / binx
if abs(round(nbin) - nbin) < 1e-8:
return np.linspace(x0, x1, round(nbin) + 1)
else:
return np.arange(x0, x1, binx, dtype=np.float)
def get_event_hdu_wcs(hdus, hdu_num=None):
"""
Get the event list table and corresponding WCS in ``hdus``
Raises TypeError if the ``hdus`` do not contain a valid event list with
defined WCS info.
:param hdus: FITS HDU list object
:param hdu_num: HDU number (default=first matching HDU)
:returns hdu, wcs: tuple of HDU and corresponding WCS object.
"""
if hdu_num is None:
hdus = hdus[1:]
else:
hdus = [hdus[hdu_num]]
for hdu in hdus:
hdr = hdu.header.copy()
# Need at least two WCS header keywords
if len(hdr['TCTYP*']) >= 2:
# Related to bug in astropy https://github.com/astropy/astropy/issues/11413
del hdr['LONP*']
del hdr['LATP*']
# Remove all WCS except for (x, y) => (RA, Dec). I could not figure
# out how to make things work without doing this.
for key, val in hdr['TCTYP*'].items():
if match := re.search(r'(\d+)$', key):
colnum = match.group(1)
if val in ('RA---TAN', 'DEC--TAN'):
# CXC seems to use a non-standard convention of a
# zero-based origin for CRPIX. The FITS standard
# requires that the first "image pixel" has a coordinate
# of 1.0. See:
# https://github.com/astropy/astropy/issues/11808.
# Munging the header here makes the WCS object function
# as expected with the high-level world/pixel
# transforms.
hdr[f'TCRPX{colnum}'] += 1.0
else:
del hdr[f'TC*{colnum}']
else:
print(f'WARNING: got a column {key} that looks like WCS but does not '
'end with a number')
with warnings.catch_warnings():
# For some reason FITS/WCS seems to think many of the CXC header
# keywords are non-standard and need to be fixed.
warnings.simplefilter('ignore', category=FITSFixedWarning)
wcs = WCS(hdr, keysel=['pixel'])
return hdu, wcs
else:
raise TypeError('FITS file has no event table extensions')
def event_filter(events, filters):
"""
Filter ``events`` based on matching or limits on event columns.
``filters`` must be a list of tuples either 2 or 3 elements long:
- (col_name, value)
- (col_name, low_value | None, high_value | None)
:param filters: list of tuples defining filters
:returns: filtered table of events
"""
if not filters:
return events
ok = np.ones(len(events), dtype=np.bool)
for filter_ in filters:
colname = filter_[0]
colvals = events[colname]
if len(filter_) == 2:
ok &= (colvals == filter_[1])
elif len(filter_) == 3:
lo, hi = filter_[1], filter_[2]
if lo is None and hi is not None:
ok &= (colvals < hi)
elif lo is not None and hi is None:
ok &= (colvals >= lo)
elif lo is not None and hi is not None:
ok &= (colvals >= lo) & (colvals < hi)
else:
raise ValueError('Filter must contain either 2 or 3 values')
return events[ok]
class FITSEventList(object):
def __init__(self, hdus, hdu_num=None):
"""
Object containing an X-ray event file with WCS and binning convenience methods
:hdus: FITS HDU list object containing an event data table
:param hdu_num: HDU number (default=first matching HDU)
"""
self.event_hdu, self.wcs = get_event_hdu_wcs(hdus, hdu_num)
self.header = self.event_hdu.header
events = self.event_hdu.data
events_x = events['x']
self.events = events[np.argsort(events_x)]
def pixel_to_world(self, x, y):
"""
Get world coordinates for (x, y)
:param x: Pixel x value
:param y: Pixel y value
:returns: SkyCoord
World coordinate (ra, dec) values
"""
return self.wcs.pixel_to_worl(x, y)
def world_to_pixel(self, *args):
"""
Get pixel coordinates for (ra, dec)
:param *args: coordinate(s)
Either one SkyCoord or two value (ra, dec) in degrees
:returns: pixel coordinate (x, y) values
"""
if len(args) == 1:
sc = args[0]
elif len(args) == 2:
ra = args[0]
if not isinstance(ra, u.Quantity):
ra = ra * u.deg
dec = args[0]
if not isinstance(dec, u.Quantity):
dec = dec * u.deg
sc = SkyCoord(ra, dec)
else:
raise ValueError('must supply either 1 or 2 positional args')
return self.wcs.world_to_pixel(sc)
def image(self, x0=None, x1=None, binx=1.0, y0=None, y1=None, biny=1.0,
filters=None, dtype=np.int32):
"""
Create a binned image corresponding to the X-ray event (x, y) pairs.
:param x0: lower limit of x (default = min(x))
:param x1: upper limit of x (default = max(x))
:param binx: bin size in x
:param y0: lower limit of y (default = min(y))
:param y1: upper limit of y (default = max(y))
:param biny: bin size in y
:param filters: table filters to apply using ``event_filters()``
:param dytpe: output image array dtype
:returns: fits.PrimaryHDU object with binned image
"""
binx = float(binx)
biny = float(biny)
events = self.events
if x0 is None:
x0 = np.min(events['x'])
if x1 is None:
x1 = np.max(events['x'])
if y0 is None:
y0 = np.min(events['y'])
if y1 is None:
y1 = np.max(events['y'])
i0, i1 = np.searchsorted(events['x'], [x0, x1])
events = events[i0:i1]
ok = (events['y'] >= y0) & (events['y'] <= y1)
events = event_filter(events[ok], filters)
x_bins = _arange_inclusive(x0, x1, binx)
y_bins = _arange_inclusive(y0, y1, biny)
if len(events) > 0:
# Bug in np.histogram2d as of July 2011
# http://old.nabble.com/histogram2d-error-with-empty-inputs-td31940769.html
img, x_bins, y_bins = np.histogram2d(events['y'], events['x'],
bins=[y_bins, x_bins])
else:
img = np.zeros((len(x_bins) - 1, len(y_bins) - 1))
# Find the position in image coords of the sky pix reference position
# The -0.5 assumes that image coords refer to the center of the image
# bin. Subtracting 1 from CRPIX is to undo where 1 gets added when the
# WCS object is first created. I don't understand this perfectly but it
# does make the header WCS values match those created by CIAO DM.
x_crpix = (self.wcs.wcs.crpix[0] - 1 - (x0 - binx / 2.0)) / binx
y_crpix = (self.wcs.wcs.crpix[1] - 1 - (y0 - biny / 2.0)) / biny
# Create the image => sky transformation
w = WCS(naxis=2)
w.wcs.equinox = 2000.0
w.wcs.crpix = [x_crpix, y_crpix]
w.wcs.cdelt = [self.wcs.wcs.cdelt[0] * binx, self.wcs.wcs.cdelt[1] * biny]
w.wcs.cunit = [self.wcs.wcs.cunit[0], self.wcs.wcs.cunit[1]]
w.wcs.crval = [self.wcs.wcs.crval[0], self.wcs.wcs.crval[1]]
w.wcs.ctype = [self.wcs.wcs.ctype[0], self.wcs.wcs.ctype[1]]
header = w.to_header()
# Create the image => physical transformation and add to header
w = WCS(naxis=2)
w.wcs.crpix = [0.5, 0.5]
w.wcs.cdelt = [binx, biny]
w.wcs.crval = [x0, y0]
w.wcs.ctype = ['x', 'y']
for key, val in w.to_header().items():
header[key + 'P'] = val
header['WCSTY1P'] = 'PHYSICAL'
header['WCSTY2P'] = 'PHYSICAL'
# Set LTVi and LTMi_i keywords (seems to be needed for ds9)
imgx0, imgy0 = w.wcs_world2pix([[0.0, 0.0]], 1)[0]
imgx1, imgy1 = w.wcs_world2pix([[1.0, 1.0]], 1)[0]
header['LTM1_1'] = imgx1 - imgx0
header['LTM2_2'] = imgy1 - imgy0
header['LTV1'] = imgx0
header['LTV2'] = imgy0
hdu = fits.PrimaryHDU(np.array(img, dtype=dtype), header=header)
return hdu
class XrayEvents(FITSEventList):
def __init__(self, filename, hdu=None):
"""
Object containing an X-ray event file with WCS and binning convenience methods.
Legacy version that accepts a filename.
:param filename: event FITS file
:hdu: HDU number containing the event data table (default=first event table)
"""
self.filename = filename
hdus = fits.open(filename)
super(XrayEvents, self).__init__(hdus, hdu)
hdus.close()
| [
"numpy.searchsorted",
"warnings.catch_warnings",
"numpy.min",
"astropy.coordinates.SkyCoord",
"numpy.max",
"numpy.argsort",
"numpy.array",
"warnings.simplefilter",
"astropy.io.fits.open",
"numpy.histogram2d",
"astropy.wcs.WCS",
"numpy.arange",
"re.search"
] | [((477, 516), 'numpy.arange', 'np.arange', (['x0', 'x1', 'binx'], {'dtype': 'np.float'}), '(x0, x1, binx, dtype=np.float)\n', (486, 516), True, 'import numpy as np\n'), ((6542, 6580), 'numpy.searchsorted', 'np.searchsorted', (["events['x']", '[x0, x1]'], {}), "(events['x'], [x0, x1])\n", (6557, 6580), True, 'import numpy as np\n'), ((7807, 7819), 'astropy.wcs.WCS', 'WCS', ([], {'naxis': '(2)'}), '(naxis=2)\n', (7810, 7819), False, 'from astropy.wcs import WCS, FITSFixedWarning\n'), ((8298, 8310), 'astropy.wcs.WCS', 'WCS', ([], {'naxis': '(2)'}), '(naxis=2)\n', (8301, 8310), False, 'from astropy.wcs import WCS, FITSFixedWarning\n'), ((9443, 9462), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (9452, 9462), False, 'from astropy.io import fits\n'), ((4479, 4499), 'numpy.argsort', 'np.argsort', (['events_x'], {}), '(events_x)\n', (4489, 4499), True, 'import numpy as np\n'), ((6324, 6343), 'numpy.min', 'np.min', (["events['x']"], {}), "(events['x'])\n", (6330, 6343), True, 'import numpy as np\n'), ((6384, 6403), 'numpy.max', 'np.max', (["events['x']"], {}), "(events['x'])\n", (6390, 6403), True, 'import numpy as np\n'), ((6444, 6463), 'numpy.min', 'np.min', (["events['y']"], {}), "(events['y'])\n", (6450, 6463), True, 'import numpy as np\n'), ((6504, 6523), 'numpy.max', 'np.max', (["events['y']"], {}), "(events['y'])\n", (6510, 6523), True, 'import numpy as np\n'), ((7019, 7082), 'numpy.histogram2d', 'np.histogram2d', (["events['y']", "events['x']"], {'bins': '[y_bins, x_bins]'}), "(events['y'], events['x'], bins=[y_bins, x_bins])\n", (7033, 7082), True, 'import numpy as np\n'), ((8967, 8993), 'numpy.array', 'np.array', (['img'], {'dtype': 'dtype'}), '(img, dtype=dtype)\n', (8975, 8993), True, 'import numpy as np\n'), ((2463, 2488), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2486, 2488), False, 'import warnings\n'), ((2653, 2711), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'FITSFixedWarning'}), "('ignore', category=FITSFixedWarning)\n", (2674, 2711), False, 'import warnings\n'), ((2734, 2760), 'astropy.wcs.WCS', 'WCS', (['hdr'], {'keysel': "['pixel']"}), "(hdr, keysel=['pixel'])\n", (2737, 2760), False, 'from astropy.wcs import WCS, FITSFixedWarning\n'), ((5355, 5372), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ra', 'dec'], {}), '(ra, dec)\n', (5363, 5372), False, 'from astropy.coordinates import SkyCoord\n'), ((1501, 1526), 're.search', 're.search', (['"""(\\\\d+)$"""', 'key'], {}), "('(\\\\d+)$', key)\n", (1510, 1526), False, 'import re\n')] |
import os
import matplotlib.pyplot as plt
import numpy as np
from skimage.feature import plot_matches
def show_correspondences(imgA, imgB, X1, Y1, X2, Y2, matches, good_matches, number_to_display, filename=None):
"""
Visualizes corresponding points between two images, either as
arrows or dots
mode='dots': Corresponding points will have the same random color
mode='arrows': Corresponding points will be joined by a line
Writes out a png of the visualization if 'filename' is not None.
"""
# generates unique figures so students can
# look at all three at once
fig, ax = plt.subplots(nrows=1, ncols=1)
matches = matches[0:number_to_display, :]
good_matches = good_matches[0:number_to_display]
kp1 = zip_x_y(Y1, X1)
kp2 = zip_x_y(Y2, X2)
matches = matches.astype(int)
plot_matches(ax, imgA, imgB, kp1, kp2, matches[np.logical_not(good_matches)], matches_color='orangered')
plot_matches(ax, imgA, imgB, kp1, kp2, matches[good_matches], matches_color='springgreen')
fig = plt.gcf()
if filename:
if not os.path.isdir('../results'):
os.mkdir('../results')
fig.savefig('../results/' + filename)
plt.show()
def zip_x_y(x, y):
zipped_points = []
for i in range(len(x)):
zipped_points.append(np.array([x[i], y[i]]))
return np.array(zipped_points)
| [
"matplotlib.pyplot.gcf",
"numpy.logical_not",
"skimage.feature.plot_matches",
"numpy.array",
"os.path.isdir",
"os.mkdir",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((592, 622), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)'}), '(nrows=1, ncols=1)\n', (604, 622), True, 'import matplotlib.pyplot as plt\n'), ((902, 997), 'skimage.feature.plot_matches', 'plot_matches', (['ax', 'imgA', 'imgB', 'kp1', 'kp2', 'matches[good_matches]'], {'matches_color': '"""springgreen"""'}), "(ax, imgA, imgB, kp1, kp2, matches[good_matches], matches_color\n ='springgreen')\n", (914, 997), False, 'from skimage.feature import plot_matches\n'), ((1001, 1010), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1008, 1010), True, 'import matplotlib.pyplot as plt\n'), ((1132, 1142), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1140, 1142), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1287), 'numpy.array', 'np.array', (['zipped_points'], {}), '(zipped_points)\n', (1272, 1287), True, 'import numpy as np\n'), ((843, 871), 'numpy.logical_not', 'np.logical_not', (['good_matches'], {}), '(good_matches)\n', (857, 871), True, 'import numpy as np\n'), ((1035, 1062), 'os.path.isdir', 'os.path.isdir', (['"""../results"""'], {}), "('../results')\n", (1048, 1062), False, 'import os\n'), ((1067, 1089), 'os.mkdir', 'os.mkdir', (['"""../results"""'], {}), "('../results')\n", (1075, 1089), False, 'import os\n'), ((1232, 1254), 'numpy.array', 'np.array', (['[x[i], y[i]]'], {}), '([x[i], y[i]])\n', (1240, 1254), True, 'import numpy as np\n')] |
"""
The purpose of this code is to first create the raw directory folder and include the following files
starting protein receptor
starting ligand
target ligand
glide pose viewer file
Then the top glide poses are added
Then the decoys are created
It can be run on sherlock using
$ $SCHRODINGER/run python3 decoy.py all /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /oak/stanford/groups/rondror/projects/ligand-docking/pdbbind_2019/data --decoy_type conformer_poses
$ $SCHRODINGER/run python3 decoy.py group /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /oak/stanford/groups/rondror/projects/ligand-docking/pdbbind_2019/data --index 0 --decoy_type conformer_poses
$ $SCHRODINGER/run python3 decoy.py check /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /oak/stanford/groups/rondror/projects/ligand-docking/pdbbind_2019/data --decoy_type conformer_poses
$ $SCHRODINGER/run python3 decoy.py group /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random_clash.txt /home/users/sidhikab/lig_clash_score/src/data/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /oak/stanford/groups/rondror/projects/ligand-docking/pdbbind_2019/data --decoy_type conformer_poses --index 0
$ $SCHRODINGER/run python3 decoy.py delete /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /oak/stanford/groups/rondror/projects/ligand-docking/pdbbind_2019/data --n 10 --decoy_type conformer_poses
"""
import argparse
import os
import schrodinger.structure as structure
import schrodinger.structutils.transform as transform
from schrodinger.structutils.transform import get_centroid
import schrodinger.structutils.interactions.steric_clash as steric_clash
import numpy as np
import statistics
import pickle
from tqdm import tqdm
import subprocess
import random
_CONFGEN_CMD = ("$SCHRODINGER/confgenx -WAIT -optimize -drop_problematic -num_conformers {num_conformers} "
"-max_num_conformers {num_conformers} {input_file}")
_ALIGN_CMD = "$SCHRODINGER/run rmsd.py {ref_file} {conf_file} -m -o {output_file}"
class MCSS:
"""
Reads and writes MCSS features for a ligand pair.
There are two key phases of the computation:
(1) Identification of maximum common substructure(s)
(2) Computation of RMSDs between the substructures in
docking results.
Task (1) is accomplished using Schrodinger's canvasMCSS utility.
Task (2) is accomplished by identifying all matches of the substructure(s)
from (1) and finding the pair with the mimimum RMSD. This is a subtlely
difficult task because of symmetry concerns and details of extracting
substructures.
MCSS must be at least half the size of the smaller of the ligands
or no RMSDs are computed.
A key design decision is to not specify any file names in this class
(other than those associated with temp files). The implication of this
is that MCSSController will be completely in control of this task, while
this class can be dedicated to actually computing the MCSS feature.
"""
mcss_cmd = ("$SCHRODINGER/utilities/canvasMCS -imae {} -ocsv {}"
" -stop {} -atomtype C {}")
def __init__(self, l1, l2):
"""
l1, l2: string, ligand names
"""
if l1 > l2: l1, l2 = l2, l1
self.l1 = l1
self.l2 = l2
self.name = "{}-{}".format(l1, l2)
self.n_l1_atoms = 0
self.n_l2_atoms = 0
self.n_mcss_atoms = 0
self.smarts_l1 = []
self.smarts_l2 = []
self.rmsds = {}
self.tried_small = False
# Deprecated.
self.n_mcss_bonds = 0
def __str__(self):
return ','.join(map(str,
[self.l1, self.l2,
self.n_l1_atoms, self.n_l2_atoms, self.n_mcss_atoms, self.n_mcss_bonds,
';'.join(self.smarts_l1), ';'.join(self.smarts_l2), self.tried_small]
))
def compute_mcss(self, ligands, init_file, mcss_types_file, small=False):
"""
Compute the MCSS file by calling Schrodinger canvasMCSS.
Updates instance with MCSSs present in the file
"""
structure_file = '{}.ligands.mae'.format(init_file)
mcss_file = '{}.mcss.csv'.format(init_file)
stwr = structure.StructureWriter(structure_file)
stwr.append(ligands[self.l1])
stwr.append(ligands[self.l2])
stwr.close()
# set the sizes in atoms of each of the ligands
self._set_ligand_sizes(structure_file)
if os.system(self.mcss_cmd.format(structure_file,
mcss_file,
5 if small else 10,
mcss_types_file)):
assert False, 'MCSS computation failed'
self._set_mcss(mcss_file)
self.tried_small = small
with open(init_file, 'a+') as fp:
fp.write(str(self) + '\n')
os.system('rm {} {}'.format(structure_file, mcss_file))
def _set_ligand_sizes(self, structure_file):
try:
refs = [st for st in structure.StructureReader(structure_file)]
except:
print('Unable to read MCSS structure file for', self.l1, self.l2)
return None
if len(refs) != 2:
print('Wrong number of structures', self.l1, self.l2)
return None
ref1, ref2 = refs
n_l1_atoms = len([a for a in ref1.atom if a.element != 'H'])
n_l2_atoms = len([a for a in ref2.atom if a.element != 'H'])
self.n_l1_atoms = n_l1_atoms
self.n_l2_atoms = n_l2_atoms
def _set_mcss(self, mcss_file):
"""
Updates MCS from the direct output of canvasMCSS.
Note that there can be multiple maximum common substructures
of the same size.
"""
ligs = {}
n_mcss_atoms = None
with open(mcss_file) as fp:
fp.readline() # Header
for line in fp:
smiles, lig, _, _, _, _n_mcss_atoms, _n_mcss_bonds = line.strip().split(',')[:7]
smarts = line.strip().split(',')[-1] # There are commas in some of the fields
_n_mcss_atoms = int(_n_mcss_atoms)
assert n_mcss_atoms is None or n_mcss_atoms == _n_mcss_atoms, self.name
if lig not in ligs: ligs[lig] = []
ligs[lig] += [smarts]
n_mcss_atoms = _n_mcss_atoms
if len(ligs) != 2:
print('Wrong number of ligands in MCSS file', ligs)
return None
assert all(smarts for smarts in ligs.values()), ligs
# MCSS size can change when tautomers change. One particularly prevalent
# case is when oxyanions are neutralized. Oxyanions are sometimes specified
# by the smiles string, but nevertheless glide neutralizes them.
# Can consider initially considering oxyanions and ketones interchangable
# (see mcss15.typ).
if self.n_mcss_atoms:
assert self.n_mcss_atoms <= n_mcss_atoms + 1, 'MCSS size decreased by more than 1.'
if self.n_mcss_atoms < n_mcss_atoms:
print(self.name, 'MCSS size increased.')
if self.n_mcss_atoms > n_mcss_atoms:
print(self.name, 'MCSS size dencreased by one.')
self.n_mcss_atoms = n_mcss_atoms
def get_prots(docked_prot_file):
"""
gets list of all protein, target ligands, and starting ligands in the index file
:param docked_prot_file: (string) file listing proteins to process
:return: process (list) list of all protein, target ligands, and starting ligands to process
"""
process = []
with open(docked_prot_file) as fp:
for line in tqdm(fp, desc='index file'):
if line[0] == '#': continue
protein, target, start = line.strip().split()
process.append((protein, target, start))
return process
def group_files(n, process):
"""
groups pairs into sublists of size n
:param n: (int) sublist size
:param process: (list) list of pairs to process
:return: grouped_files (list) list of sublists of pairs
"""
grouped_files = []
for i in range(0, len(process), n):
grouped_files += [process[i: i + n]]
return grouped_files
def random_three_vector():
"""
Generates a random 3D unit vector (direction) with a uniform spherical distribution
Algo from http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution
:return:
"""
phi = np.random.uniform(0,np.pi*2)
costheta = np.random.uniform(-1,1)
theta = np.arccos(costheta)
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
return x, y, z
def cartesian_vector(i):
"""
Generates a random 3D unit vector (direction) with a uniform spherical distribution
Algo from http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution
:return:
"""
if i == 0:
return 1, 0, 0
elif i == 1:
return -1, 0, 0
elif i == 2:
return 0, 1, 0
elif i == 3:
return 0, -1, 0
elif i == 4:
return 0, 0, 1
elif i == 5:
return 0, 0, -1
else:
print('Bad input')
def modify_file(path, name):
reading_file = open(path, "r")
file_name = path.split('/')[-1]
new_file_content = ""
for line in reading_file:
if line.strip() == name:
new_line = line.replace(name, file_name)
else:
new_line = line
new_file_content += new_line
reading_file.close()
writing_file = open(path, "w")
writing_file.write(new_file_content)
writing_file.close()
def create_mcss_file(path, ligand, save_folder):
reading_file = open(path, "r")
new_file_content = ""
for line in reading_file:
new_line = line.replace("_pro_ligand", ligand)
new_file_content += new_line + "\n"
reading_file.close()
writing_path = os.path.join(save_folder, '{}.mae'.format(ligand))
writing_file = open(writing_path, "w")
writing_file.write(new_file_content)
writing_file.close()
return writing_path
def compute_protein_mcss(ligands, pair_path):
init_file = '{}/{}-to-{}_mcss.csv'.format(pair_path, ligands[0], ligands[1])
for i in range(len(ligands)):
for j in range(i + 1, len(ligands)):
l1, l2 = ligands[i], ligands[j]
l1_path = '{}/ligand_poses/{}_lig0.mae'.format(pair_path, l1)
new_l1_path = create_mcss_file(l1_path, l1, pair_path)
l2_path = '{}/{}_lig.mae'.format(pair_path, l2)
new_l2_path = create_mcss_file(l2_path, l2, pair_path)
mcss_types_file = 'mcss_type_file.typ'
mcss = MCSS(l1, l2)
with structure.StructureReader(new_l1_path) as ligand1, structure.StructureReader(new_l2_path) as ligand2:
ligands = {l1: next(ligand1), l2: next(ligand2)}
mcss.compute_mcss(ligands, init_file, mcss_types_file)
os.system('rm {} {}'.format(new_l1_path, new_l2_path))
def create_decoys(lig_file, max_decoys, mean_translation, stdev_translation, min_angle, max_angle):
"""
creates MAX_DECOYS number of translated/rotated decoys
:param lig_file: (string) file of glide ligand pose that will be translated/rotated
:return:
"""
code = lig_file.split('/')[-1].split('_')[-1]
if code == 'lig0.mae':
modify_file(lig_file, '_pro_ligand')
else:
modify_file(lig_file, '_ligand')
for i in range(max_decoys):
s = list(structure.StructureReader(lig_file))[0]
# translation
x, y, z = random_three_vector()
dist = np.random.normal(mean_translation, stdev_translation)
transform.translate_structure(s, x * dist, y * dist, z * dist)
# rotation
x_angle = np.random.uniform(min_angle, max_angle)
y_angle = np.random.uniform(min_angle, max_angle)
z_angle = np.random.uniform(min_angle, max_angle)
rot_center = list(get_centroid(s))
transform.rotate_structure(s, x_angle, y_angle, z_angle, rot_center)
decoy_file = lig_file[:-4] + chr(ord('a')+i) + '.mae'
with structure.StructureWriter(decoy_file) as decoy:
decoy.append(s)
if code == 'lig0.mae':
modify_file(decoy_file, lig_file.split('/')[-1])
else:
modify_file(decoy_file, lig_file.split('/')[-1])
def create_cartesian_decoys(lig_file):
"""
creates MAX_DECOYS number of translated/rotated decoys
:param lig_file: (string) file of glide ligand pose that will be translated/rotated
:return:
"""
code = lig_file.split('/')[-1].split('_')[-1]
if code == 'lig0.mae':
modify_file(lig_file, '_pro_ligand')
else:
modify_file(lig_file, '_ligand')
for i in range(6):
s = list(structure.StructureReader(lig_file))[0]
# translation
x, y, z = cartesian_vector(i)
transform.translate_structure(s, x, y, z)
decoy_file = lig_file[:-4] + chr(ord('a')+i) + '.mae'
with structure.StructureWriter(decoy_file) as decoy:
decoy.append(s)
if code == 'lig0.mae':
modify_file(decoy_file, lig_file.split('/')[-1])
else:
modify_file(decoy_file, lig_file.split('/')[-1])
def run_cmd(cmd, error_msg=None, raise_except=False):
try:
return subprocess.check_output(
cmd,
universal_newlines=True,
shell=True)
except Exception as e:
if error_msg is not None:
print(error_msg)
if raise_except:
raise e
def gen_ligand_conformers(path, output_dir, num_conformers):
current_dir = os.getcwd()
os.chdir(output_dir)
basename = os.path.basename(path)
### Note: For some reason, confgen isn't able to find the .mae file,
# unless it is in working directory. So, we need to copy it over.
### Note: There may be duplicated ligand names (for different targets).
# Since it only happens for CHEMBL ligand, just ignore it for now.
# Otherwise, might want consider to generate the conformers to separate
# folders for each (target, ligand) pair.
# Run ConfGen
run_cmd(f'cp {path:} ./{basename:}')
command = _CONFGEN_CMD.format(num_conformers=num_conformers,
input_file=f'./{basename:}')
run_cmd(command, f'Failed to run ConfGen on {path:}')
run_cmd(f'rm ./{basename:}')
os.chdir(current_dir)
def get_aligned_conformers(conformer_file, ref_file, aligned_file):
run_cmd(_ALIGN_CMD.format(ref_file=ref_file, conf_file=conformer_file, output_file=aligned_file))
def create_conformer_decoys(conformers, grid_size, start_lig_center, prot, pose_path, target, max_poses, min_angle,
max_angle):
num_iter_without_pose = 0
num_valid_poses = 1
grid = []
for dx in range(-grid_size, grid_size):
for dy in range(-grid_size, grid_size):
for dz in range(-grid_size, grid_size):
grid.append([[dx, dy, dz], 0])
while num_valid_poses < max_poses:
num_iter_without_pose += 1
conformer = random.choice(conformers)
conformer_center = list(get_centroid(conformer))
# translation
index = random.randint(0, len(grid) - 1)
grid_loc = grid[index][0]
transform.translate_structure(conformer, start_lig_center[0] - conformer_center[0] + grid_loc[0],
start_lig_center[1] - conformer_center[1] + grid_loc[1],
start_lig_center[2] - conformer_center[2] + grid_loc[2])
conformer_center = list(get_centroid(conformer))
# rotation
x_angle = np.random.uniform(min_angle, max_angle)
y_angle = np.random.uniform(min_angle, max_angle)
z_angle = np.random.uniform(min_angle, max_angle)
transform.rotate_structure(conformer, x_angle, y_angle, z_angle, conformer_center)
if steric_clash.clash_volume(prot, struc2=conformer) < 200:
decoy_file = os.path.join(pose_path, "{}_lig{}.mae".format(target, num_valid_poses))
with structure.StructureWriter(decoy_file) as decoy:
decoy.append(conformer)
modify_file(decoy_file, '_pro_ligand')
modify_file(decoy_file, '{}_lig0.mae'.format(target))
num_valid_poses += 1
grid[index][1] = 0
num_iter_without_pose = 0
elif num_iter_without_pose == 5 and len(grid) > 1:
max_val = max(grid, key=lambda x: x[1])
grid.remove(max_val)
num_iter_without_pose = 0
else:
grid[index][1] += 1
def run_all(docked_prot_file, run_path, raw_root, data_root, grouped_files, n, decoy_type):
"""
submits sbatch script to create decoys for each protein, target, start group
:param docked_prot_file: (string) file listing proteins to process
:param run_path: (string) directory where script and output files will be written
:param raw_root: (string) directory where raw data will be placed
:param data_root: (string) pdbbind directory where raw data will be obtained
:param grouped_files: (list) list of protein, target, start groups
:return:
"""
for i, group in enumerate(grouped_files):
cmd = 'sbatch -p owners -t 1:00:00 -o {} --wrap="$SCHRODINGER/run python3 decoy.py group {} {} {} {} --n {} ' \
'--index {} --decoy_type {}"'
os.system(cmd.format(os.path.join(run_path, 'decoy{}.out'.format(i)), docked_prot_file,
run_path, raw_root, data_root, n, i, decoy_type))
def run_group(grouped_files, raw_root, data_root, index, max_poses, decoy_type, max_decoys, mean_translation,
stdev_translation, min_angle, max_angle, num_conformers, grid_size):
"""
creates decoys for each protein, target, start group
:param grouped_files: (list) list of protein, target, start groups
:param raw_root: (string) directory where raw data will be placed
:param data_root: (string) pdbbind directory where raw data will be obtained
:param index: (int) group number
:param max_poses: (int) maximum number of glide poses considered
:param decoy_type: (string) either cartesian or random
:param max_decoys: (int) maximum number of decoys created per glide pose
:param mean_translation: (float) mean distance decoys are translated
:param stdev_translation: (float) stdev of distance decoys are translated
:param min_angle: (float) minimum angle decoys are rotated
:param max_angle: (float) maximum angle decoys are rotated
:return:
"""
for protein, target, start in grouped_files[index]:
pair = '{}-to-{}'.format(target, start)
protein_path = os.path.join(raw_root, protein)
pair_path = os.path.join(protein_path, pair)
pose_path = os.path.join(pair_path, decoy_type)
dock_root = os.path.join(data_root, '{}/docking/sp_es4/{}'.format(protein, pair))
struct_root = os.path.join(data_root, '{}/structures/aligned'.format(protein))
# create folders
if not os.path.exists(raw_root):
os.mkdir(raw_root)
if not os.path.exists(protein_path):
os.mkdir(protein_path)
if not os.path.exists(pair_path):
os.mkdir(pair_path)
if not os.path.exists(pose_path):
os.mkdir(pose_path)
# add basic files
if not os.path.exists('{}/{}_prot.mae'.format(pair_path, start)):
os.system('cp {}/{}_prot.mae {}/{}_prot.mae'.format(struct_root, start, pair_path, start))
if not os.path.exists('{}/{}_prot.mae'.format(pair_path, target)):
os.system('cp {}/{}_prot.mae {}/{}_prot.mae'.format(struct_root, target, pair_path, target))
if not os.path.exists('{}/{}_lig.mae'.format(pair_path, start)):
os.system('cp {}/{}_lig.mae {}/{}_lig.mae'.format(struct_root, start, pair_path, start))
if not os.path.exists('{}/{}_lig0.mae'.format(pose_path, target)):
os.system('cp {}/{}_lig.mae {}/{}_lig0.mae'.format(struct_root, target, pose_path, target))
modify_file('{}/{}_lig0.mae'.format(pose_path, target), '_pro_ligand')
# add combine glide poses
pv_file = '{}/{}_glide_pv.maegz'.format(pair_path, pair)
if not os.path.exists(pv_file):
os.system('cp {}/{}_pv.maegz {}'.format(dock_root, pair, pv_file))
if decoy_type == "ligand_poses" or decoy_type == "cartesian_poses":
# extract glide poses and create decoys
num_poses = len(list(structure.StructureReader(pv_file)))
for i in range(num_poses):
if i == max_poses:
break
lig_file = os.path.join(pose_path, '{}_lig{}.mae'.format(target, i))
if i != 0:
with structure.StructureWriter(lig_file) as all_file:
all_file.append(list(structure.StructureReader(pv_file))[i])
if decoy_type == 'cartesian_poses':
create_cartesian_decoys(lig_file)
elif decoy_type == 'ligand_poses':
create_decoys(lig_file, max_decoys, mean_translation, stdev_translation, min_angle, max_angle)
elif decoy_type == "conformer_poses":
start_lig_file = os.path.join(pair_path, '{}_lig.mae'.format(start))
start_lig = list(structure.StructureReader(start_lig_file))[0]
target_lig_file = os.path.join(pair_path, 'ligand_poses', '{}_lig0.mae'.format(target))
start_lig_center = list(get_centroid(start_lig))
prot_file = os.path.join(pair_path, '{}_prot.mae'.format(start))
prot = list(structure.StructureReader(prot_file))[0]
aligned_file = os.path.join(pair_path, "aligned_conformers.mae")
if not os.path.exists(aligned_file):
if not os.path.exists(os.path.join(pair_path, "{}_lig0-out.maegz".format(target))):
gen_ligand_conformers(target_lig_file, pair_path, num_conformers)
conformer_file = os.path.join(pair_path, "{}_lig0-out.maegz".format(target))
get_aligned_conformers(conformer_file, target_lig_file, aligned_file)
conformers = list(structure.StructureReader(aligned_file))
create_conformer_decoys(conformers, grid_size, start_lig_center, prot, pose_path, target, max_poses, min_angle,
max_angle)
if os.path.exists(os.path.join(pair_path, '{}_lig0.log'.format(target))):
os.remove(os.path.join(pair_path, '{}_lig0.log'.format(target)))
if os.path.exists(os.path.join(pair_path, "{}_lig0-out.maegz".format(target))):
os.remove(os.path.join(pair_path, "{}_lig0-out.maegz".format(target)))
# combine ligands
if os.path.exists('{}/{}_{}_merge_pv.mae'.format(pair_path, pair, decoy_type)):
os.remove('{}/{}_{}_merge_pv.mae'.format(pair_path, pair, decoy_type))
with structure.StructureWriter('{}/{}_{}_merge_pv.mae'.format(pair_path, pair, decoy_type)) as all_file:
for file in os.listdir(pose_path):
if file[-3:] == 'mae':
pv = list(structure.StructureReader(os.path.join(pose_path, file)))
all_file.append(pv[0])
# compute mcss
if not os.path.exists(os.path.join(pair_path, '{}_mcss.csv'.format(pair))):
compute_protein_mcss([target, start], pair_path)
def run_check(docked_prot_file, raw_root, max_poses, max_decoys, decoy_type):
"""
check if all files are created
:param docked_prot_file: (string) file listing proteins to process
:param raw_root: (string) directory where raw data will be placed
:param max_poses: (int) maximum number of glide poses considered
:param max_decoys: (int) maximum number of decoys created per glide pose
:return:
"""
process = []
num_pairs = 0
with open(docked_prot_file) as fp:
for line in tqdm(fp, desc='protein, target, start groups'):
if line[0] == '#': continue
protein, target, start = line.strip().split()
pair = '{}-to-{}'.format(target, start)
num_pairs += 1
protein_path = os.path.join(raw_root, protein)
pair_path = os.path.join(protein_path, pair)
pose_path = os.path.join(pair_path, decoy_type)
pv_file = os.path.join(pair_path, '{}_glide_pv.maegz'.format(pair))
if not os.path.exists('{}/{}_prot.mae'.format(pair_path, start)):
process.append((protein, target, start))
print('{}/{}_prot.mae'.format(pair_path, start))
continue
if not os.path.exists('{}/{}_lig.mae'.format(pair_path, start)):
process.append((protein, target, start))
print('{}/{}_lig.mae'.format(pair_path, start))
continue
if not os.path.exists('{}/{}_lig0.mae'.format(pose_path, target)):
process.append((protein, target, start))
print('{}/{}_lig0.mae'.format(pose_path, target))
continue
if not os.path.exists(pv_file):
process.append((protein, target, start))
print(pv_file)
# continue
if decoy_type == 'ligand_poses' or decoy_type == 'cartesian-poses':
num_poses = min(max_poses, len(list(structure.StructureReader(pv_file))))
if not os.path.exists(os.path.join(pose_path, '{}_lig{}.mae'.format(target, num_poses - 1))):
process.append((protein, target, start))
print(os.path.join(pose_path, '{}_lig{}.mae'.format(target, num_poses - 1)))
continue
for i in range(max_decoys):
if not os.path.exists(os.path.join(pose_path, '{}_lig{}.mae'.format(target, str(num_poses - 1) +
chr(ord('a') + i)))):
process.append((protein, target, start))
print(os.path.join(pose_path, '{}_lig{}.mae'.format(target, str(num_poses - 1) + chr(ord('a') + i))))
break
elif decoy_type == 'conformer_poses':
finish = False
for i in range(max_poses):
if not os.path.exists(os.path.join(pose_path, '{}_lig{}.mae'.format(target, i))):
process.append((protein, target, start))
print(os.path.join(pose_path, '{}_lig{}.mae'.format(target, i)))
finish = True
break
if finish:
continue
if not os.path.exists(os.path.join(pair_path, '{}_mcss.csv'.format(pair))):
process.append((protein, target, start))
print(os.path.join(pair_path, '{}_mcss.csv'.format(pair)))
continue
if not os.path.exists(os.path.join(pair_path, '{}/{}_{}_merge_pv.mae'.format(pair_path, pair, decoy_type))):
process.append((protein, target, start))
print(os.path.join(pair_path, '{}/{}_{}_merge_pv.mae'.format(pair_path, pair, decoy_type)))
continue
print('Missing', len(process), '/', num_pairs)
print(process)
def run_all_dist_check(docked_prot_file, run_path, raw_root, data_root, grouped_files):
"""
submits sbatch script to check mean distance of displacement for decoys for each protein, target, start group
:param docked_prot_file: (string) file listing proteins to process
:param run_path: (string) directory where script and output files will be written
:param raw_root: (string) directory where raw data will be placed
:param data_root: (string) pdbbind directory where raw data will be obtained
:param grouped_files: (list) list of protein, target, start groups
:return:
"""
for i, group in enumerate(grouped_files):
cmd = 'sbatch -p owners -t 1:00:00 -o {} --wrap="$SCHRODINGER/run python3 decoy.py group_dist_check {} {} {} {} ' \
'--index {}"'
os.system(cmd.format(os.path.join(run_path, 'dist{}.out'.format(i)), docked_prot_file,
run_path, raw_root, data_root, i))
def run_group_dist_check(grouped_files, raw_root, index, dist_dir, max_poses, max_decoys):
"""
checks mean distance of displacement for decoys for each protein, target, start group
:param grouped_files: (list) list of protein, target, start groups
:param raw_root: (string) directory where raw data will be placed
:param index: (int) group number
:param dist_dir: (string) directiory to place distances
:param max_poses: (int) maximum number of glide poses considered
:param max_decoys: (int) maximum number of decoys created per glide pose
:return:
"""
save = []
for protein, target, start in grouped_files[index]:
protein_path = os.path.join(raw_root, protein)
pair_path = os.path.join(protein_path, '{}-to-{}'.format(target, start))
pose_path = os.path.join(pair_path, 'ligand_poses')
pv_file = os.path.join(pair_path, '{}-to-{}_pv.maegz'.format(target, start))
num_poses = len(list(structure.StructureReader(pv_file)))
means = []
for i in range(num_poses):
if i == max_poses:
break
lig_file = os.path.join(pose_path, '{}_lig{}.mae'.format(target, i))
s = list(structure.StructureReader(lig_file))[0]
c = get_centroid(s)
dists = []
for j in range(max_decoys):
decoy_file = lig_file[:-4] + chr(ord('a') + j) + '.mae'
decoy = list(structure.StructureReader(decoy_file))[0]
dists.append(transform.get_vector_magnitude(c - get_centroid(decoy)))
means.append(statistics.mean(dists))
save.append(statistics.mean(means))
outfile = open(os.path.join(dist_dir, '{}.pkl'.format(index)), 'wb')
pickle.dump(save, outfile)
print(save)
def run_check_dist_check(grouped_files, dist_dir):
"""
check if all dist files created and if all means are appropriate
:param grouped_files: (list) list of protein, target, start groups
:param dist_dir: (string) directiory to place distances
:return:
"""
if len(os.listdir(dist_dir)) != len(grouped_files):
print('Not all files created')
else:
print('All files created')
errors = []
for i in range(len(grouped_files)):
infile = open(os.path.join(dist_dir, '{}.pkl'.format(i)), 'rb')
vals = pickle.load(infile)
infile.close()
for j in vals:
if j > 2 or j < -1:
print(vals)
errors.append(i)
break
print('Potential errors', len(errors), '/', len(grouped_files))
print(errors)
def run_all_name_check(docked_prot_file, run_path, raw_root, data_root, decoy_type, grouped_files):
"""
submits sbatch script to check names of decoys for each protein, target, start group
:param docked_prot_file: (string) file listing proteins to process
:param run_path: (string) directory where script and output files will be written
:param raw_root: (string) directory where raw data will be placed
:param data_root: (string) pdbbind directory where raw data will be obtained
:param grouped_files: (list) list of protein, target, start groups
:return:
"""
for i, group in enumerate(grouped_files):
cmd = 'sbatch -p owners -t 1:00:00 -o {} --wrap="$SCHRODINGER/run python3 decoy.py group_name_check {} {} {} {} ' \
'--index {} --decoy_type {}"'
os.system(cmd.format(os.path.join(run_path, 'name{}.out'.format(i)), docked_prot_file,
run_path, raw_root, data_root, i, decoy_type))
def run_group_name_check(grouped_files, raw_root, index, name_dir, decoy_type, max_poses, max_decoys):
"""
checks names of decoys for each protein, target, start group
:param grouped_files: (list) list of protein, target, start groups
:param raw_root: (string) directory where raw data will be placed
:param index: (int) group number
:param name_dir: (string) directiory to place unfinished protein, target, start groups
:param max_poses: (int) maximum number of glide poses considered
:param max_decoys: (int) maximum number of decoys created per glide pose
:return:
"""
unfinished = []
for protein, target, start in grouped_files[index]:
protein_path = os.path.join(raw_root, protein)
pair_path = os.path.join(protein_path, '{}-to-{}'.format(target, start))
pose_path = os.path.join(pair_path, decoy_type)
for i in range(max_poses):
lig_file = os.path.join(pose_path, '{}_lig{}.mae'.format(target, i))
found = False
with open(lig_file, "r") as f:
file_name = lig_file.split('/')[-1]
for line in f:
if line.strip() == file_name:
found = True
if not found:
print(lig_file)
unfinished.append((protein, target, start))
break
outfile = open(os.path.join(name_dir, '{}.pkl'.format(index)), 'wb')
pickle.dump(unfinished, outfile)
# print(unfinished)
def run_check_name_check(process, grouped_files, name_dir):
"""
check if all dist files created and if all means are appropriate
:param process: (list) list of all protein, target, start
:param grouped_files: (list) list of protein, target, start groups
:param name_dir: (string) directiory to place unfinished protein, target, start groups
:return:
"""
print(name_dir)
if len(os.listdir(name_dir)) != len(grouped_files):
print(len(os.listdir(name_dir)), len(grouped_files))
print('Not all files created')
else:
print('All files created')
errors = []
for i in range(len(grouped_files)):
infile = open(os.path.join(name_dir, '{}.pkl'.format(i)), 'rb')
unfinished = pickle.load(infile)
if len(unfinished) != 0:
print(i)
infile.close()
errors.extend(unfinished)
print('Errors', len(errors), '/', len(process))
print(errors)
def run_delete(grouped_files, run_path, raw_root, decoy_type):
"""
delete all folders in raw_root
:param grouped_files: (list) list of protein, target, start groups
:param run_path: (string) directory where script and output files will be written
:param raw_root: (string) directory where raw data will be placed
:return:
"""
for i, group in enumerate(grouped_files):
with open(os.path.join(run_path, 'delete{}_in.sh'.format(i)), 'w') as f:
f.write('#!/bin/bash\n')
for protein, target, start in grouped_files[i]:
protein_path = os.path.join(raw_root, protein)
pair_path = os.path.join(protein_path, '{}-to-{}'.format(target, start))
pose_path = os.path.join(pair_path, decoy_type)
if os.path.exists(pose_path):
f.write('rm -r {}\n'.format(pose_path))
os.chdir(run_path)
os.system('sbatch -p owners -t 02:00:00 -o delete{}.out delete{}_in.sh'.format(i, i))
def run_update(docked_prot_file, raw_root, new_prot_file, max_poses, decoy_type):
"""
update index by removing protein, target, start that could not create grids
:param docked_prot_file: (string) file listing proteins to process
:param raw_root: (string) directory where raw data will be placed
:param new_prot_file: (string) name of new prot file
:param max_poses: (int) maximum number of glide poses considered
:param max_decoys: (int) maximum number of decoys created per glide pose
:return:
"""
if decoy_type != 'conformer_poses':
return
text = []
with open(docked_prot_file) as fp:
for line in tqdm(fp, desc='protein, target, start groups'):
if line[0] == '#': continue
protein, target, start = line.strip().split()
pair = '{}-to-{}'.format(target, start)
protein_path = os.path.join(raw_root, protein)
pair_path = os.path.join(protein_path, pair)
pose_path = os.path.join(pair_path, decoy_type)
keep = True
for i in range(max_poses):
if not os.path.exists(os.path.join(pose_path, '{}_lig{}.mae'.format(target, i))):
keep = False
break
if keep:
text.append(line)
file = open(new_prot_file, "w")
file.writelines(text)
file.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('task', type=str, help='either all, group, check, '
'all_dist_check, group_dist_check, check_dist_check, '
'all_name_check, group_name_check, check_name_check, or delete')
parser.add_argument('docked_prot_file', type=str, help='file listing proteins to process')
parser.add_argument('run_path', type=str, help='directory where script and output files will be written')
parser.add_argument('raw_root', type=str, help='directory where raw data will be placed')
parser.add_argument('data_root', type=str, help='pdbbind directory where raw data will be obtained')
parser.add_argument('--index', type=int, default=-1, help='for group task, group number')
parser.add_argument('--dist_dir', type=str, default=os.path.join(os.getcwd(), 'dists'),
help='for all_dist_check and group_dist_check task, directiory to place distances')
parser.add_argument('--name_dir', type=str, default=os.path.join(os.getcwd(), 'names'),
help='for all_name_check and group_name_check task, directiory to place unfinished protein, '
'target, start groups')
parser.add_argument('--new_prot_file', type=str, default=os.path.join(os.getcwd(), 'index.txt'),
help='for update task, name of new prot file')
parser.add_argument('--n', type=int, default=3, help='number of protein, target, start groups processed in '
'group task')
parser.add_argument('--max_poses', type=int, default=100, help='maximum number of poses considered')
parser.add_argument('--decoy_type', type=str, default='ligand_poses', help='either cartesian_poses, ligand_poses, '
'or conformer_poses')
parser.add_argument('--max_decoys', type=int, default=10, help='maximum number of decoys created per glide pose')
parser.add_argument('--mean_translation', type=int, default=0, help='mean distance decoys are translated')
parser.add_argument('--stdev_translation', type=int, default=1, help='stdev of distance decoys are translated')
parser.add_argument('--min_angle', type=float, default=- np.pi / 6, help='minimum angle decoys are rotated')
parser.add_argument('--max_angle', type=float, default=np.pi / 6, help='maximum angle decoys are rotated')
parser.add_argument('--num_conformers', type=int, default=300, help='maximum number of conformers considered')
parser.add_argument('--grid_size', type=int, default=6, help='grid size in positive and negative x, y, z directions')
args = parser.parse_args()
if not os.path.exists(args.run_path):
os.mkdir(args.run_path)
if args.task == 'all':
process = get_prots(args.docked_prot_file)
grouped_files = group_files(args.n, process)
run_all(args.docked_prot_file, args.run_path, args.raw_root, args.data_root, grouped_files, args.n,
args.decoy_type)
if args.task == 'group':
process = get_prots(args.docked_prot_file)
grouped_files = group_files(args.n, process)
run_group(grouped_files, args.raw_root, args.data_root, args.index, args.max_poses, args.decoy_type,
args.max_decoys, args.mean_translation, args.stdev_translation, args.min_angle, args.max_angle,
args.num_conformers, args.grid_size)
if args.task == 'check':
run_check(args.docked_prot_file, args.raw_root, args.max_poses, args.max_decoys, args.decoy_type)
if args.task == 'all_dist_check':
if not os.path.exists(args.dist_dir):
os.mkdir(args.dist_dir)
process = get_prots(args.docked_prot_file)
grouped_files = group_files(args.n, process)
run_all_dist_check(args.docked_prot_file, args.run_path, args.raw_root, args.data_root, grouped_files)
if args.task == 'group_dist_check':
if not os.path.exists(args.dist_dir):
os.mkdir(args.dist_dir)
process = get_prots(args.docked_prot_file)
grouped_files = group_files(args.n, process)
run_group_dist_check(grouped_files, args.raw_root, args.index, args.dist_dir, args.max_poses, args.max_decoys)
if args.task == 'check_dist_check':
process = get_prots(args.docked_prot_file)
grouped_files = group_files(args.n, process)
run_check_dist_check(grouped_files, args.dist_dir)
if args.task == 'all_name_check':
if not os.path.exists(args.name_dir):
os.mkdir(args.name_dir)
process = get_prots(args.docked_prot_file)
grouped_files = group_files(args.n, process)
run_all_name_check(args.docked_prot_file, args.run_path, args.raw_root, args.data_root, args.decoy_type, grouped_files)
if args.task == 'group_name_check':
process = get_prots(args.docked_prot_file)
grouped_files = group_files(args.n, process)
run_group_name_check(grouped_files, args.raw_root, args.index, args.name_dir, args.decoy_type, args.max_poses, args.max_decoys)
if args.task == 'check_name_check':
process = get_prots(args.docked_prot_file)
grouped_files = group_files(args.n, process)
run_check_name_check(process, grouped_files, args.name_dir)
if args.task == 'delete':
process = get_prots(args.docked_prot_file)
grouped_files = group_files(args.n, process)
run_delete(grouped_files, args.run_path, args.raw_root, args.decoy_type)
if args.task == 'update':
run_update(args.docked_prot_file, args.raw_root, args.new_prot_file, args.max_poses, args.decoy_type)
if __name__=="__main__":
main() | [
"schrodinger.structure.StructureWriter",
"numpy.arccos",
"schrodinger.structutils.transform.get_centroid",
"schrodinger.structutils.interactions.steric_clash.clash_volume",
"numpy.sin",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"schrodinger.structutils.transform.rotate_structure",
... | [((9233, 9264), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(np.pi * 2)'], {}), '(0, np.pi * 2)\n', (9250, 9264), True, 'import numpy as np\n'), ((9277, 9301), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (9294, 9301), True, 'import numpy as np\n'), ((9314, 9333), 'numpy.arccos', 'np.arccos', (['costheta'], {}), '(costheta)\n', (9323, 9333), True, 'import numpy as np\n'), ((9414, 9427), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (9420, 9427), True, 'import numpy as np\n'), ((14479, 14490), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14488, 14490), False, 'import os\n'), ((14495, 14515), 'os.chdir', 'os.chdir', (['output_dir'], {}), '(output_dir)\n', (14503, 14515), False, 'import os\n'), ((14531, 14553), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (14547, 14553), False, 'import os\n'), ((15248, 15269), 'os.chdir', 'os.chdir', (['current_dir'], {}), '(current_dir)\n', (15256, 15269), False, 'import os\n'), ((31086, 31112), 'pickle.dump', 'pickle.dump', (['save', 'outfile'], {}), '(save, outfile)\n', (31097, 31112), False, 'import pickle\n'), ((34403, 34435), 'pickle.dump', 'pickle.dump', (['unfinished', 'outfile'], {}), '(unfinished, outfile)\n', (34414, 34435), False, 'import pickle\n'), ((37862, 37887), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (37885, 37887), False, 'import argparse\n'), ((4939, 4980), 'schrodinger.structure.StructureWriter', 'structure.StructureWriter', (['structure_file'], {}), '(structure_file)\n', (4964, 4980), True, 'import schrodinger.structure as structure\n'), ((8416, 8443), 'tqdm.tqdm', 'tqdm', (['fp'], {'desc': '"""index file"""'}), "(fp, desc='index file')\n", (8420, 8443), False, 'from tqdm import tqdm\n'), ((9342, 9355), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (9348, 9355), True, 'import numpy as np\n'), ((9358, 9369), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (9364, 9369), True, 'import numpy as np\n'), ((9378, 9391), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (9384, 9391), True, 'import numpy as np\n'), ((9394, 9405), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (9400, 9405), True, 'import numpy as np\n'), ((12425, 12478), 'numpy.random.normal', 'np.random.normal', (['mean_translation', 'stdev_translation'], {}), '(mean_translation, stdev_translation)\n', (12441, 12478), True, 'import numpy as np\n'), ((12487, 12549), 'schrodinger.structutils.transform.translate_structure', 'transform.translate_structure', (['s', '(x * dist)', '(y * dist)', '(z * dist)'], {}), '(s, x * dist, y * dist, z * dist)\n', (12516, 12549), True, 'import schrodinger.structutils.transform as transform\n'), ((12588, 12627), 'numpy.random.uniform', 'np.random.uniform', (['min_angle', 'max_angle'], {}), '(min_angle, max_angle)\n', (12605, 12627), True, 'import numpy as np\n'), ((12646, 12685), 'numpy.random.uniform', 'np.random.uniform', (['min_angle', 'max_angle'], {}), '(min_angle, max_angle)\n', (12663, 12685), True, 'import numpy as np\n'), ((12704, 12743), 'numpy.random.uniform', 'np.random.uniform', (['min_angle', 'max_angle'], {}), '(min_angle, max_angle)\n', (12721, 12743), True, 'import numpy as np\n'), ((12795, 12863), 'schrodinger.structutils.transform.rotate_structure', 'transform.rotate_structure', (['s', 'x_angle', 'y_angle', 'z_angle', 'rot_center'], {}), '(s, x_angle, y_angle, z_angle, rot_center)\n', (12821, 12863), True, 'import schrodinger.structutils.transform as transform\n'), ((13721, 13762), 'schrodinger.structutils.transform.translate_structure', 'transform.translate_structure', (['s', 'x', 'y', 'z'], {}), '(s, x, y, z)\n', (13750, 13762), True, 'import schrodinger.structutils.transform as transform\n'), ((14161, 14226), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'universal_newlines': '(True)', 'shell': '(True)'}), '(cmd, universal_newlines=True, shell=True)\n', (14184, 14226), False, 'import subprocess\n'), ((15953, 15978), 'random.choice', 'random.choice', (['conformers'], {}), '(conformers)\n', (15966, 15978), False, 'import random\n'), ((16150, 16373), 'schrodinger.structutils.transform.translate_structure', 'transform.translate_structure', (['conformer', '(start_lig_center[0] - conformer_center[0] + grid_loc[0])', '(start_lig_center[1] - conformer_center[1] + grid_loc[1])', '(start_lig_center[2] - conformer_center[2] + grid_loc[2])'], {}), '(conformer, start_lig_center[0] -\n conformer_center[0] + grid_loc[0], start_lig_center[1] -\n conformer_center[1] + grid_loc[1], start_lig_center[2] -\n conformer_center[2] + grid_loc[2])\n', (16179, 16373), True, 'import schrodinger.structutils.transform as transform\n'), ((16533, 16572), 'numpy.random.uniform', 'np.random.uniform', (['min_angle', 'max_angle'], {}), '(min_angle, max_angle)\n', (16550, 16572), True, 'import numpy as np\n'), ((16591, 16630), 'numpy.random.uniform', 'np.random.uniform', (['min_angle', 'max_angle'], {}), '(min_angle, max_angle)\n', (16608, 16630), True, 'import numpy as np\n'), ((16649, 16688), 'numpy.random.uniform', 'np.random.uniform', (['min_angle', 'max_angle'], {}), '(min_angle, max_angle)\n', (16666, 16688), True, 'import numpy as np\n'), ((16697, 16783), 'schrodinger.structutils.transform.rotate_structure', 'transform.rotate_structure', (['conformer', 'x_angle', 'y_angle', 'z_angle', 'conformer_center'], {}), '(conformer, x_angle, y_angle, z_angle,\n conformer_center)\n', (16723, 16783), True, 'import schrodinger.structutils.transform as transform\n'), ((19613, 19644), 'os.path.join', 'os.path.join', (['raw_root', 'protein'], {}), '(raw_root, protein)\n', (19625, 19644), False, 'import os\n'), ((19665, 19697), 'os.path.join', 'os.path.join', (['protein_path', 'pair'], {}), '(protein_path, pair)\n', (19677, 19697), False, 'import os\n'), ((19718, 19753), 'os.path.join', 'os.path.join', (['pair_path', 'decoy_type'], {}), '(pair_path, decoy_type)\n', (19730, 19753), False, 'import os\n'), ((24942, 24988), 'tqdm.tqdm', 'tqdm', (['fp'], {'desc': '"""protein, target, start groups"""'}), "(fp, desc='protein, target, start groups')\n", (24946, 24988), False, 'from tqdm import tqdm\n'), ((30014, 30045), 'os.path.join', 'os.path.join', (['raw_root', 'protein'], {}), '(raw_root, protein)\n', (30026, 30045), False, 'import os\n'), ((30147, 30186), 'os.path.join', 'os.path.join', (['pair_path', '"""ligand_poses"""'], {}), "(pair_path, 'ligand_poses')\n", (30159, 30186), False, 'import os\n'), ((31694, 31713), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (31705, 31713), False, 'import pickle\n'), ((33660, 33691), 'os.path.join', 'os.path.join', (['raw_root', 'protein'], {}), '(raw_root, protein)\n', (33672, 33691), False, 'import os\n'), ((33793, 33828), 'os.path.join', 'os.path.join', (['pair_path', 'decoy_type'], {}), '(pair_path, decoy_type)\n', (33805, 33828), False, 'import os\n'), ((35215, 35234), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (35226, 35234), False, 'import pickle\n'), ((36327, 36345), 'os.chdir', 'os.chdir', (['run_path'], {}), '(run_path)\n', (36335, 36345), False, 'import os\n'), ((37105, 37151), 'tqdm.tqdm', 'tqdm', (['fp'], {'desc': '"""protein, target, start groups"""'}), "(fp, desc='protein, target, start groups')\n", (37109, 37151), False, 'from tqdm import tqdm\n'), ((40671, 40700), 'os.path.exists', 'os.path.exists', (['args.run_path'], {}), '(args.run_path)\n', (40685, 40700), False, 'import os\n'), ((40710, 40733), 'os.mkdir', 'os.mkdir', (['args.run_path'], {}), '(args.run_path)\n', (40718, 40733), False, 'import os\n'), ((12770, 12785), 'schrodinger.structutils.transform.get_centroid', 'get_centroid', (['s'], {}), '(s)\n', (12782, 12785), False, 'from schrodinger.structutils.transform import get_centroid\n'), ((12940, 12977), 'schrodinger.structure.StructureWriter', 'structure.StructureWriter', (['decoy_file'], {}), '(decoy_file)\n', (12965, 12977), True, 'import schrodinger.structure as structure\n'), ((13839, 13876), 'schrodinger.structure.StructureWriter', 'structure.StructureWriter', (['decoy_file'], {}), '(decoy_file)\n', (13864, 13876), True, 'import schrodinger.structure as structure\n'), ((16011, 16034), 'schrodinger.structutils.transform.get_centroid', 'get_centroid', (['conformer'], {}), '(conformer)\n', (16023, 16034), False, 'from schrodinger.structutils.transform import get_centroid\n'), ((16470, 16493), 'schrodinger.structutils.transform.get_centroid', 'get_centroid', (['conformer'], {}), '(conformer)\n', (16482, 16493), False, 'from schrodinger.structutils.transform import get_centroid\n'), ((16792, 16841), 'schrodinger.structutils.interactions.steric_clash.clash_volume', 'steric_clash.clash_volume', (['prot'], {'struc2': 'conformer'}), '(prot, struc2=conformer)\n', (16817, 16841), True, 'import schrodinger.structutils.interactions.steric_clash as steric_clash\n'), ((19972, 19996), 'os.path.exists', 'os.path.exists', (['raw_root'], {}), '(raw_root)\n', (19986, 19996), False, 'import os\n'), ((20010, 20028), 'os.mkdir', 'os.mkdir', (['raw_root'], {}), '(raw_root)\n', (20018, 20028), False, 'import os\n'), ((20044, 20072), 'os.path.exists', 'os.path.exists', (['protein_path'], {}), '(protein_path)\n', (20058, 20072), False, 'import os\n'), ((20086, 20108), 'os.mkdir', 'os.mkdir', (['protein_path'], {}), '(protein_path)\n', (20094, 20108), False, 'import os\n'), ((20124, 20149), 'os.path.exists', 'os.path.exists', (['pair_path'], {}), '(pair_path)\n', (20138, 20149), False, 'import os\n'), ((20163, 20182), 'os.mkdir', 'os.mkdir', (['pair_path'], {}), '(pair_path)\n', (20171, 20182), False, 'import os\n'), ((20198, 20223), 'os.path.exists', 'os.path.exists', (['pose_path'], {}), '(pose_path)\n', (20212, 20223), False, 'import os\n'), ((20237, 20256), 'os.mkdir', 'os.mkdir', (['pose_path'], {}), '(pose_path)\n', (20245, 20256), False, 'import os\n'), ((21188, 21211), 'os.path.exists', 'os.path.exists', (['pv_file'], {}), '(pv_file)\n', (21202, 21211), False, 'import os\n'), ((24056, 24077), 'os.listdir', 'os.listdir', (['pose_path'], {}), '(pose_path)\n', (24066, 24077), False, 'import os\n'), ((25194, 25225), 'os.path.join', 'os.path.join', (['raw_root', 'protein'], {}), '(raw_root, protein)\n', (25206, 25225), False, 'import os\n'), ((25250, 25282), 'os.path.join', 'os.path.join', (['protein_path', 'pair'], {}), '(protein_path, pair)\n', (25262, 25282), False, 'import os\n'), ((25307, 25342), 'os.path.join', 'os.path.join', (['pair_path', 'decoy_type'], {}), '(pair_path, decoy_type)\n', (25319, 25342), False, 'import os\n'), ((30604, 30619), 'schrodinger.structutils.transform.get_centroid', 'get_centroid', (['s'], {}), '(s)\n', (30616, 30619), False, 'from schrodinger.structutils.transform import get_centroid\n'), ((30984, 31006), 'statistics.mean', 'statistics.mean', (['means'], {}), '(means)\n', (30999, 31006), False, 'import statistics\n'), ((31421, 31441), 'os.listdir', 'os.listdir', (['dist_dir'], {}), '(dist_dir)\n', (31431, 31441), False, 'import os\n'), ((34875, 34895), 'os.listdir', 'os.listdir', (['name_dir'], {}), '(name_dir)\n', (34885, 34895), False, 'import os\n'), ((37330, 37361), 'os.path.join', 'os.path.join', (['raw_root', 'protein'], {}), '(raw_root, protein)\n', (37342, 37361), False, 'import os\n'), ((37386, 37418), 'os.path.join', 'os.path.join', (['protein_path', 'pair'], {}), '(protein_path, pair)\n', (37398, 37418), False, 'import os\n'), ((37443, 37478), 'os.path.join', 'os.path.join', (['pair_path', 'decoy_type'], {}), '(pair_path, decoy_type)\n', (37455, 37478), False, 'import os\n'), ((41609, 41638), 'os.path.exists', 'os.path.exists', (['args.dist_dir'], {}), '(args.dist_dir)\n', (41623, 41638), False, 'import os\n'), ((41652, 41675), 'os.mkdir', 'os.mkdir', (['args.dist_dir'], {}), '(args.dist_dir)\n', (41660, 41675), False, 'import os\n'), ((41948, 41977), 'os.path.exists', 'os.path.exists', (['args.dist_dir'], {}), '(args.dist_dir)\n', (41962, 41977), False, 'import os\n'), ((41991, 42014), 'os.mkdir', 'os.mkdir', (['args.dist_dir'], {}), '(args.dist_dir)\n', (41999, 42014), False, 'import os\n'), ((42497, 42526), 'os.path.exists', 'os.path.exists', (['args.name_dir'], {}), '(args.name_dir)\n', (42511, 42526), False, 'import os\n'), ((42540, 42563), 'os.mkdir', 'os.mkdir', (['args.name_dir'], {}), '(args.name_dir)\n', (42548, 42563), False, 'import os\n'), ((11503, 11541), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['new_l1_path'], {}), '(new_l1_path)\n', (11528, 11541), True, 'import schrodinger.structure as structure\n'), ((11554, 11592), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['new_l2_path'], {}), '(new_l2_path)\n', (11579, 11592), True, 'import schrodinger.structure as structure\n'), ((12307, 12342), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['lig_file'], {}), '(lig_file)\n', (12332, 12342), True, 'import schrodinger.structure as structure\n'), ((13612, 13647), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['lig_file'], {}), '(lig_file)\n', (13637, 13647), True, 'import schrodinger.structure as structure\n'), ((16963, 17000), 'schrodinger.structure.StructureWriter', 'structure.StructureWriter', (['decoy_file'], {}), '(decoy_file)\n', (16988, 17000), True, 'import schrodinger.structure as structure\n'), ((22668, 22717), 'os.path.join', 'os.path.join', (['pair_path', '"""aligned_conformers.mae"""'], {}), "(pair_path, 'aligned_conformers.mae')\n", (22680, 22717), False, 'import os\n'), ((26118, 26141), 'os.path.exists', 'os.path.exists', (['pv_file'], {}), '(pv_file)\n', (26132, 26141), False, 'import os\n'), ((30301, 30335), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['pv_file'], {}), '(pv_file)\n', (30326, 30335), True, 'import schrodinger.structure as structure\n'), ((30939, 30961), 'statistics.mean', 'statistics.mean', (['dists'], {}), '(dists)\n', (30954, 30961), False, 'import statistics\n'), ((34938, 34958), 'os.listdir', 'os.listdir', (['name_dir'], {}), '(name_dir)\n', (34948, 34958), False, 'import os\n'), ((36027, 36058), 'os.path.join', 'os.path.join', (['raw_root', 'protein'], {}), '(raw_root, protein)\n', (36039, 36058), False, 'import os\n'), ((36176, 36211), 'os.path.join', 'os.path.join', (['pair_path', 'decoy_type'], {}), '(pair_path, decoy_type)\n', (36188, 36211), False, 'import os\n'), ((36231, 36256), 'os.path.exists', 'os.path.exists', (['pose_path'], {}), '(pose_path)\n', (36245, 36256), False, 'import os\n'), ((38745, 38756), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (38754, 38756), False, 'import os\n'), ((38945, 38956), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (38954, 38956), False, 'import os\n'), ((39213, 39224), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (39222, 39224), False, 'import os\n'), ((5778, 5819), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['structure_file'], {}), '(structure_file)\n', (5803, 5819), True, 'import schrodinger.structure as structure\n'), ((21454, 21488), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['pv_file'], {}), '(pv_file)\n', (21479, 21488), True, 'import schrodinger.structure as structure\n'), ((22473, 22496), 'schrodinger.structutils.transform.get_centroid', 'get_centroid', (['start_lig'], {}), '(start_lig)\n', (22485, 22496), False, 'from schrodinger.structutils.transform import get_centroid\n'), ((22737, 22765), 'os.path.exists', 'os.path.exists', (['aligned_file'], {}), '(aligned_file)\n', (22751, 22765), False, 'import os\n'), ((23163, 23202), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['aligned_file'], {}), '(aligned_file)\n', (23188, 23202), True, 'import schrodinger.structure as structure\n'), ((30548, 30583), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['lig_file'], {}), '(lig_file)\n', (30573, 30583), True, 'import schrodinger.structure as structure\n'), ((21728, 21763), 'schrodinger.structure.StructureWriter', 'structure.StructureWriter', (['lig_file'], {}), '(lig_file)\n', (21753, 21763), True, 'import schrodinger.structure as structure\n'), ((22291, 22332), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['start_lig_file'], {}), '(start_lig_file)\n', (22316, 22332), True, 'import schrodinger.structure as structure\n'), ((22599, 22635), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['prot_file'], {}), '(prot_file)\n', (22624, 22635), True, 'import schrodinger.structure as structure\n'), ((30785, 30822), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['decoy_file'], {}), '(decoy_file)\n', (30810, 30822), True, 'import schrodinger.structure as structure\n'), ((24174, 24203), 'os.path.join', 'os.path.join', (['pose_path', 'file'], {}), '(pose_path, file)\n', (24186, 24203), False, 'import os\n'), ((26391, 26425), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['pv_file'], {}), '(pv_file)\n', (26416, 26425), True, 'import schrodinger.structure as structure\n'), ((30891, 30910), 'schrodinger.structutils.transform.get_centroid', 'get_centroid', (['decoy'], {}), '(decoy)\n', (30903, 30910), False, 'from schrodinger.structutils.transform import get_centroid\n'), ((21822, 21856), 'schrodinger.structure.StructureReader', 'structure.StructureReader', (['pv_file'], {}), '(pv_file)\n', (21847, 21856), True, 'import schrodinger.structure as structure\n')] |
# -*- coding:utf-8 -*-
"""
This file generates description of model and
intermediate images of extracted features
"""
import keras
import sys
import json
import numpy as np
from PIL import Image
import os
base_path = os.path.dirname(os.path.abspath(__file__))
x_norm_file = sys.argv[1]
x_test_file = sys.argv[2]
y_test_file = sys.argv[3]
x_norm = np.load(x_norm_file)["arr_0"]
x_test = np.load(x_test_file)["arr_0"]
y_test = np.load(y_test_file)["arr_0"]
model_file = sys.argv[4]
print("py script get model file path [{}]".format(model_file))
model = keras.models.load_model(model_file)
total_num_layers = len(model.layers)
total_num_units = 0
for i in range(len(model.layers)):
total_num_units += np.prod(model.layers[i].output_shape[1:])
model.summary()
print("total layers={}, num params={}, num units={}".format(total_num_layers, model.count_params(), total_num_units))
model_layers_info=[]
# desc each layer of model
# layer name, output shape, params
for layer in model.layers:
if layer.__class__.__name__ == "InputLayer":
model_layers_info.append({"name":layer.__class__.__name__, "shape":model.input_shape[1:], "params":layer.count_params()})
else:
model_layers_info.append({"name":layer.__class__.__name__, "shape":layer.output_shape[1:], "params":layer.count_params()})
# save basic info
with open("model_general_info.json","w+",encoding="utf-8") as f:
f.write(json.dumps({"total_num_layers":total_num_layers,"total_params":model.count_params(), "total_units":int(total_num_units)}))
# save info of each layer
with open("model_layers_info.json","w+",encoding="utf-8") as f:
f.write(json.dumps(model_layers_info))
# visualize the conv and activation layers
layer_vis_data=[]
idx=0
max_vis_each_layer=5
for layer in model.layers:
idx +=1
if layer.__class__.__name__ == "Conv2D" or layer.__class__.__name__ == "Activation":
layer_output = keras.models.Model(inputs=model.input, outputs=layer.output).predict(x_norm[0:1])
layer_output = layer_output[0]
print("shape of layer output={}".format(np.shape(layer_output)))
layer_vis_img_paths=[]
for i in range(min(np.shape(layer_output)[-1], max_vis_each_layer)):
img = np.array(layer_output[:,:,i],dtype="float32")*255.0
img = np.array(img, dtype="uint8")
img = Image.fromarray(img)
img = img.resize((32,32))
img.save(os.path.join(base_path,"layer_vis_imgs", "layer_{}_{}_vis.png".format(idx,i)))
layer_vis_img_paths.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), \
"layer_vis_imgs", "layer_{}_{}_vis.png".format(idx,i)))
layer_vis_data.append({"layer_name": layer.__class__.__name__, "layer_index":idx, \
"layer_vis_img_paths": layer_vis_img_paths})
# save layer images info
with open("layer_vis_info.json","w+") as f:
f.write(json.dumps(layer_vis_data))
# extract info for model vis
conv_sizes=[]
conv_channels=[]
kernel_sizes=[]
dense_sizes=[]
model.summary()
for layer in model.layers:
if layer.__class__.__name__ == "InputLayer":
conv_sizes.append(layer.output_shape[0][1:-1])
conv_channels.append(layer.output_shape[0][-1])
elif layer.__class__.__name__ == "Conv2D":
conv_sizes.append(layer.output_shape[1:-1])
conv_channels.append(layer.output_shape[-1])
kernel_sizes.append(layer.kernel_size)
print("layer name={}, output shape={}, channel size={}, kernel_size={}".format(layer.__class__.__name__,\
layer.output_shape[1:-1],layer.output_shape[-1],layer.kernel_size))
elif layer.__class__.__name__ == "AveragePooling2D":
conv_sizes.append(layer.output_shape[1:-1])
conv_channels.append(layer.output_shape[-1])
kernel_sizes.append(layer.pool_size)
print("layer name={}, output shape={}, channel size={}, pool_size={}".format(layer.__class__.__name__,\
layer.output_shape[1:-1],layer.output_shape[-1],layer.pool_size))
elif layer.__class__.__name__ == "Flatten":
dense_sizes.append(layer.output_shape[-1])
elif layer.__class__.__name__ == "Dense":
dense_sizes.append(layer.output_shape[-1])
print("layer size={}".format(layer.output_shape[-1]))
# draw model graph
print("convsize list={}, conv num list={}, kernel size list={}, dense size list={}".format(conv_sizes, conv_channels, kernel_sizes, dense_sizes))
import draw_convnet
draw_convnet.run_draw(conv_size_list=conv_sizes,
conv_num_list=conv_channels,
kernel_size_list=kernel_sizes,
dense_size_list=dense_sizes,
save_fig_path=os.path.join(base_path, "ann_model_vis.png"))
model_vis_img_info = {"model_vis_img_path":os.path.join(base_path, "ann_model_vis.png")}
with open("model_vis_info.json","w+") as f:
f.write(json.dumps(model_vis_img_info))
| [
"numpy.prod",
"PIL.Image.fromarray",
"keras.models.load_model",
"json.dumps",
"os.path.join",
"numpy.array",
"keras.models.Model",
"os.path.abspath",
"numpy.shape",
"numpy.load"
] | [((557, 592), 'keras.models.load_model', 'keras.models.load_model', (['model_file'], {}), '(model_file)\n', (580, 592), False, 'import keras\n'), ((235, 260), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (250, 260), False, 'import os\n'), ((352, 372), 'numpy.load', 'np.load', (['x_norm_file'], {}), '(x_norm_file)\n', (359, 372), True, 'import numpy as np\n'), ((391, 411), 'numpy.load', 'np.load', (['x_test_file'], {}), '(x_test_file)\n', (398, 411), True, 'import numpy as np\n'), ((430, 450), 'numpy.load', 'np.load', (['y_test_file'], {}), '(y_test_file)\n', (437, 450), True, 'import numpy as np\n'), ((710, 751), 'numpy.prod', 'np.prod', (['model.layers[i].output_shape[1:]'], {}), '(model.layers[i].output_shape[1:])\n', (717, 751), True, 'import numpy as np\n'), ((4805, 4849), 'os.path.join', 'os.path.join', (['base_path', '"""ann_model_vis.png"""'], {}), "(base_path, 'ann_model_vis.png')\n", (4817, 4849), False, 'import os\n'), ((1644, 1673), 'json.dumps', 'json.dumps', (['model_layers_info'], {}), '(model_layers_info)\n', (1654, 1673), False, 'import json\n'), ((2914, 2940), 'json.dumps', 'json.dumps', (['layer_vis_data'], {}), '(layer_vis_data)\n', (2924, 2940), False, 'import json\n'), ((4715, 4759), 'os.path.join', 'os.path.join', (['base_path', '"""ann_model_vis.png"""'], {}), "(base_path, 'ann_model_vis.png')\n", (4727, 4759), False, 'import os\n'), ((4907, 4937), 'json.dumps', 'json.dumps', (['model_vis_img_info'], {}), '(model_vis_img_info)\n', (4917, 4937), False, 'import json\n'), ((2307, 2335), 'numpy.array', 'np.array', (['img'], {'dtype': '"""uint8"""'}), "(img, dtype='uint8')\n", (2315, 2335), True, 'import numpy as np\n'), ((2354, 2374), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (2369, 2374), False, 'from PIL import Image\n'), ((1917, 1977), 'keras.models.Model', 'keras.models.Model', ([], {'inputs': 'model.input', 'outputs': 'layer.output'}), '(inputs=model.input, outputs=layer.output)\n', (1935, 1977), False, 'import keras\n'), ((2086, 2108), 'numpy.shape', 'np.shape', (['layer_output'], {}), '(layer_output)\n', (2094, 2108), True, 'import numpy as np\n'), ((2237, 2285), 'numpy.array', 'np.array', (['layer_output[:, :, i]'], {'dtype': '"""float32"""'}), "(layer_output[:, :, i], dtype='float32')\n", (2245, 2285), True, 'import numpy as np\n'), ((2169, 2191), 'numpy.shape', 'np.shape', (['layer_output'], {}), '(layer_output)\n', (2177, 2191), True, 'import numpy as np\n'), ((2581, 2606), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2596, 2606), False, 'import os\n')] |
#===============================================================================
# Copyright 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from abc import ABCMeta, abstractmethod
from enum import Enum
import sys
from numbers import Number, Integral
from distutils.version import LooseVersion
import numpy as np
from scipy import sparse as sp
from ..datatypes import (
_check_X_y,
_check_array,
_column_or_1d,
_check_n_features,
_check_classification_targets,
_num_samples
)
from onedal import _backend
from ..common._mixin import ClassifierMixin, RegressorMixin
from ..common._policy import _get_policy
from ..common._estimator_checks import _check_is_fitted, _is_classifier, _is_regressor
from ..datatypes._data_conversion import from_table, to_table
class NeighborsCommonBase(metaclass=ABCMeta):
def _parse_auto_method(self, method, n_samples, n_features):
result_method = method
if (method in ['auto', 'ball_tree']):
condition = self.n_neighbors is not None and \
self.n_neighbors >= n_samples // 2
if self.metric == 'precomputed' or n_features > 11 or condition:
result_method = 'brute'
else:
if self.metric == 'euclidean':
result_method = 'kd_tree'
else:
result_method = 'brute'
return result_method
def _validate_data(self, X, y=None, reset=True,
validate_separately=False, **check_params):
if y is None:
if self.requires_y:
raise ValueError(
f"This {self.__class__.__name__} estimator "
f"requires y to be passed, but the target y is None."
)
X = _check_array(X, **check_params)
out = X, y
else:
if validate_separately:
# We need this because some estimators validate X and y
# separately, and in general, separately calling _check_array()
# on X and y isn't equivalent to just calling _check_X_y()
# :(
check_X_params, check_y_params = validate_separately
X = _check_array(X, **check_X_params)
y = _check_array(y, **check_y_params)
else:
X, y = _check_X_y(X, y, **check_params)
out = X, y
if check_params.get('ensure_2d', True):
_check_n_features(self, X, reset=reset)
return out
def _get_weights(self, dist, weights):
if weights in (None, "uniform"):
return None
if weights == "distance":
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, "__contains__") and 0.0 in point_dist:
dist[point_dist_i] = point_dist == 0.0
else:
dist[point_dist_i] = 1.0 / point_dist
else:
with np.errstate(divide="ignore"):
dist = 1.0 / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError(
"weights not recognized: should be 'uniform', "
"'distance', or a callable function"
)
def _get_onedal_params(self, data):
class_count = 0 if self.classes_ is None else len(self.classes_)
weights = getattr(self, 'weights', 'uniform')
return {
'fptype': 'float' if data.dtype is np.dtype('float32') else 'double',
'vote_weights': 'uniform' if weights == 'uniform' else 'distance',
'method': self._fit_method,
'radius': self.radius,
'class_count': class_count,
'neighbor_count': self.n_neighbors,
'metric': self.effective_metric_,
'p': self.p,
'metric_params': self.effective_metric_params_,
'result_option': 'indices|distances',
}
class NeighborsBase(NeighborsCommonBase, metaclass=ABCMeta):
def __init__(self, n_neighbors=None, radius=None,
algorithm='auto', metric='minkowski',
p=2, metric_params=None):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.metric = metric
self.p = p
self.metric_params = metric_params
def _validate_targets(self, y, dtype):
arr = _column_or_1d(y, warn=True)
try:
return arr.astype(dtype, copy=False)
except ValueError:
return arr
def _validate_n_classes(self):
if len(self.classes_) < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d"
" class" % len(self.classes_))
def _fit(self, X, y, queue):
self._onedal_model = None
self._tree = None
self.shape = None
self.classes_ = None
self.effective_metric_ = getattr(self, 'effective_metric_', self.metric)
self.effective_metric_params_ = getattr(
self, 'effective_metric_params_', self.metric_params)
if y is not None or self.requires_y:
X, y = super()._validate_data(X, y, dtype=[np.float64, np.float32])
self.shape = y.shape
if _is_classifier(self) or _is_regressor(self):
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
if _is_classifier(self):
_check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(
y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
self._validate_n_classes()
else:
self._y = y
else:
X, _ = super()._validate_data(X, dtype=[np.float64, np.float32])
self.n_samples_fit_ = X.shape[0]
self.n_features_in_ = X.shape[1]
self._fit_X = X
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
if not isinstance(self.n_neighbors, Integral):
raise TypeError(
"n_neighbors does not take %s value, "
"enter integer value" %
type(self.n_neighbors))
self._fit_method = super()._parse_auto_method(
self.algorithm,
self.n_samples_fit_, self.n_features_in_)
if _is_classifier(self) and y.dtype != X.dtype:
y = self._validate_targets(self._y, X.dtype).reshape((-1, 1))
result = self._onedal_fit(X, y, queue)
if y is not None and _is_regressor(self):
self._y = y if self.shape is None else y.reshape(self.shape)
self._onedal_model = result.model
result = self
return result
def _kneighbors(self, X=None, n_neighbors=None,
return_distance=True, queue=None):
n_features = getattr(self, 'n_features_in_', None)
shape = getattr(X, 'shape', None)
if n_features and shape and len(shape) > 1 and shape[1] != n_features:
raise ValueError((f'X has {X.shape[1]} features, '
f'but kneighbors is expecting '
f'{n_features} features as input'))
_check_is_fitted(self)
if n_neighbors is None:
n_neighbors = self.n_neighbors
elif n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
n_neighbors
)
else:
if not isinstance(n_neighbors, Integral):
raise TypeError(
"n_neighbors does not take %s value, "
"enter integer value" %
type(n_neighbors))
if X is not None:
query_is_train = False
X = _check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
self.n_neighbors = n_neighbors
n_samples_fit = self.n_samples_fit_
if n_neighbors > n_samples_fit:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(n_samples_fit, n_neighbors)
)
chunked_results = None
method = super()._parse_auto_method(
self._fit_method, self.n_samples_fit_, n_features)
params = super()._get_onedal_params(X)
prediction_results = self._onedal_predict(
self._onedal_model, X, params, queue=queue)
distances = from_table(prediction_results.distances)
indices = from_table(prediction_results.indices)
if method == 'kd_tree':
for i in range(distances.shape[0]):
seq = distances[i].argsort()
indices[i] = indices[i][seq]
distances[i] = distances[i][seq]
if return_distance:
results = distances, indices
else:
results = indices
if chunked_results is not None:
if return_distance:
neigh_dist, neigh_ind = zip(*chunked_results)
results = np.vstack(neigh_dist), np.vstack(neigh_ind)
else:
results = np.vstack(chunked_results)
if not query_is_train:
return results
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
distances = distances[:, 1:]
indices = indices[:, 1:]
if return_distance:
neigh_dist, neigh_ind = results
else:
neigh_ind = results
n_queries, _ = X.shape
sample_range = np.arange(n_queries)[:, None]
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_queries, n_neighbors - 1))
if return_distance:
neigh_dist = np.reshape(
neigh_dist[sample_mask], (n_queries, n_neighbors - 1))
return neigh_dist, neigh_ind
return neigh_ind
class KNeighborsClassifier(NeighborsBase, ClassifierMixin):
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto',
p=2, metric='minkowski', metric_params=None, **kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params,
**kwargs)
self.weights = weights
def _get_onedal_params(self, data):
params = super()._get_onedal_params(data)
params['result_option'] = 'responses'
return params
def _onedal_fit(self, X, y, queue):
policy = _get_policy(queue, X, y)
params = self._get_onedal_params(X)
train_alg = _backend.neighbors.classification.train(policy, params,
*to_table(X, y))
return train_alg
def _onedal_predict(self, model, X, params, queue):
policy = _get_policy(queue, X)
if hasattr(self, '_onedal_model'):
model = self._onedal_model
else:
model = self._create_model(_backend.neighbors.classification)
result = _backend.neighbors.classification.infer(
policy, params, model, to_table(X))
return result
def fit(self, X, y, queue=None):
return super()._fit(X, y, queue=queue)
def predict(self, X, queue=None):
X = _check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
onedal_model = getattr(self, '_onedal_model', None)
n_features = getattr(self, 'n_features_in_', None)
n_samples_fit_ = getattr(self, 'n_samples_fit_', None)
shape = getattr(X, 'shape', None)
if n_features and shape and len(shape) > 1 and shape[1] != n_features:
raise ValueError((f'X has {X.shape[1]} features, '
f'but KNNClassifier is expecting '
f'{n_features} features as input'))
_check_is_fitted(self)
self._fit_method = super()._parse_auto_method(
self.algorithm,
n_samples_fit_, n_features)
self._validate_n_classes()
params = self._get_onedal_params(X)
prediction_result = self._onedal_predict(onedal_model, X, params, queue=queue)
responses = from_table(prediction_result.responses)
result = self.classes_.take(
np.asarray(responses.ravel(), dtype=np.intp))
return result
def predict_proba(self, X, queue=None):
neigh_dist, neigh_ind = self.kneighbors(X, queue=queue)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_queries = _num_samples(X)
weights = self._get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(n_queries)
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_queries, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
def kneighbors(self, X=None, n_neighbors=None,
return_distance=True, queue=None):
return super()._kneighbors(X, n_neighbors, return_distance, queue=queue)
class NearestNeighbors(NeighborsBase):
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto',
p=2, metric='minkowski', metric_params=None, **kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params,
**kwargs)
self.weights = weights
def _get_onedal_params(self, data):
params = super()._get_onedal_params(data)
params['result_option'] = 'indices|distances'
return params
def _onedal_fit(self, X, y, queue):
policy = _get_policy(queue, X, y)
params = self._get_onedal_params(X)
train_alg = _backend.neighbors.search.train(policy, params,
to_table(X))
return train_alg
def _onedal_predict(self, model, X, params, queue):
policy = _get_policy(queue, X)
if hasattr(self, '_onedal_model'):
model = self._onedal_model
else:
model = self._create_model(_backend.neighbors.search)
result = _backend.neighbors.search.infer(policy, params, model, to_table(X))
return result
def fit(self, X, y, queue=None):
return super()._fit(X, y, queue=queue)
def kneighbors(self, X=None, n_neighbors=None,
return_distance=True, queue=None):
return super()._kneighbors(X, n_neighbors, return_distance, queue=queue)
| [
"numpy.dtype",
"numpy.ones_like",
"numpy.reshape",
"numpy.unique",
"numpy.any",
"numpy.errstate",
"numpy.zeros",
"numpy.empty",
"numpy.vstack",
"numpy.all",
"numpy.isinf",
"numpy.arange"
] | [((12504, 12531), 'numpy.all', 'np.all', (['sample_mask'], {'axis': '(1)'}), '(sample_mask, axis=1)\n', (12510, 12531), True, 'import numpy as np\n'), ((12603, 12667), 'numpy.reshape', 'np.reshape', (['neigh_ind[sample_mask]', '(n_queries, n_neighbors - 1)'], {}), '(neigh_ind[sample_mask], (n_queries, n_neighbors - 1))\n', (12613, 12667), True, 'import numpy as np\n'), ((15967, 15987), 'numpy.arange', 'np.arange', (['n_queries'], {}), '(n_queries)\n', (15976, 15987), True, 'import numpy as np\n'), ((12181, 12201), 'numpy.arange', 'np.arange', (['n_queries'], {}), '(n_queries)\n', (12190, 12201), True, 'import numpy as np\n'), ((12739, 12804), 'numpy.reshape', 'np.reshape', (['neigh_dist[sample_mask]', '(n_queries, n_neighbors - 1)'], {}), '(neigh_dist[sample_mask], (n_queries, n_neighbors - 1))\n', (12749, 12804), True, 'import numpy as np\n'), ((15921, 15944), 'numpy.ones_like', 'np.ones_like', (['neigh_ind'], {}), '(neigh_ind)\n', (15933, 15944), True, 'import numpy as np\n'), ((16136, 16173), 'numpy.zeros', 'np.zeros', (['(n_queries, classes_k.size)'], {}), '((n_queries, classes_k.size))\n', (16144, 16173), True, 'import numpy as np\n'), ((3611, 3627), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (3619, 3627), True, 'import numpy as np\n'), ((4271, 4285), 'numpy.isinf', 'np.isinf', (['dist'], {}), '(dist)\n', (4279, 4285), True, 'import numpy as np\n'), ((4313, 4337), 'numpy.any', 'np.any', (['inf_mask'], {'axis': '(1)'}), '(inf_mask, axis=1)\n', (4319, 4337), True, 'import numpy as np\n'), ((7201, 7229), 'numpy.empty', 'np.empty', (['y.shape'], {'dtype': 'int'}), '(y.shape, dtype=int)\n', (7209, 7229), True, 'import numpy as np\n'), ((11663, 11689), 'numpy.vstack', 'np.vstack', (['chunked_results'], {}), '(chunked_results)\n', (11672, 11689), True, 'import numpy as np\n'), ((4174, 4202), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (4185, 4202), True, 'import numpy as np\n'), ((4899, 4918), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (4907, 4918), True, 'import numpy as np\n'), ((7327, 7366), 'numpy.unique', 'np.unique', (['y[:, k]'], {'return_inverse': '(True)'}), '(y[:, k], return_inverse=True)\n', (7336, 7366), True, 'import numpy as np\n'), ((11573, 11594), 'numpy.vstack', 'np.vstack', (['neigh_dist'], {}), '(neigh_dist)\n', (11582, 11594), True, 'import numpy as np\n'), ((11596, 11616), 'numpy.vstack', 'np.vstack', (['neigh_ind'], {}), '(neigh_ind)\n', (11605, 11616), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from classy import Class
import pickle
import sys,os
import astropy
from astropy.cosmology import Planck15
from astropy import units as u
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
from scipy import interpolate
from scipy import integrate
from scipy import special
from scipy.signal import argrelextrema
class CoflexTwopoint:
def __init__(self, coflex_power, survey, bin_combo):
self.l_list = coflex_power['ell']
self.P_F_list = coflex_power['P_F']
self.P_kappa_F_list = coflex_power['P_kappa_F']
self.survey = str(survey)
self.bin_combo = str(bin_combo)
def getTwoPoint(self):
# First, interpolate all arrays so that they can be turned into callable functions
self.interpolateArrays()
# .. Get two point correlation functions
# .. .. F-F autocorrelation
xi_FF_plus = self.two_point_corr_flexflex(theta_flexflex_list, 'FF_plus')
xi_FF_minus = self.two_point_corr_flexflex(theta_flexflex_list, 'FF_minus')
# .. .. F-G cross-correlation. Note: xi_FG_plus = -xi_FF_minus
xi_FG_plus = [-xi_FF_minus[i] for i in range(len(xi_FF_minus))]
xi_FG_minus = self.two_point_corr_flexflex(theta_flexflex_list, 'FG_minus')
# .. .. G-G cross correlation. Note: xi_GG_plus = xi_FF_plus
xi_GG_plus = xi_FF_plus
xi_GG_minus = self.two_point_corr_flexflex(theta_flexflex_list, 'GG_minus')
# .. Export flexion-flexion correlation functions to .pkl file
col_list = ['theta', 'xi_FF_plus', 'xi_FF_minus', 'xi_FG_plus', 'xi_FG_minus', 'xi_GG_plus', 'xi_GG_minus']
arrs = [theta_flexflex_list, xi_FF_plus, xi_FF_minus, xi_FG_plus, xi_FG_minus, xi_GG_plus, xi_GG_minus]
dat = {i:arrs[j] for i,j in zip(col_list, range(len(col_list)))}
out_frame = pd.DataFrame(data = dat, columns = col_list)
out_frame.to_pickle(self.survey+'/'+self.survey+'_Theory/flexion-flexion_two_point_'+self.survey+'_bin_combo_'+self.bin_combo+'.pkl')
# Shear-flexion correlations:
# .. Get theta_list
theta_shearflex_list = self.theta_shearflex_list()
# .. Get two point correlation functions
# .. .. gam-F cross-correlation
xi_gamF_plus = self.two_point_corr_shearflex(theta_shearflex_list, 'gamF_plus')
xi_gamF_minus = self.two_point_corr_shearflex(theta_shearflex_list, 'gamF_minus')
# .. .. G-gam cross-correlation. Note: xi_Ggam_plus = xi_gamF_minus
xi_Ggam_plus = xi_gamF_plus
xi_Ggam_minus = self.two_point_corr_shearflex(theta_shearflex_list, 'Ggam_minus')
# .. Export flexion-flexion correlation functions to .pkl file
col_list = ['theta', 'xi_gamF_plus', 'xi_gamF_minus', 'xi_Ggam_plus', 'xi_Ggam_minus']
arrs = [theta_shearflex_list, xi_gamF_plus, xi_gamF_minus, xi_Ggam_plus, xi_Ggam_minus]
dat = {i:arrs[j] for i,j in zip(col_list, range(len(col_list)))}
out_frame = pd.DataFrame(data = dat, columns = col_list)
out_frame.to_pickle(self.survey+'/'+self.survey+'_Theory/shear-flexion_two_point_'+self.survey+'_bin_combo_'+self.bin_combo+'.pkl')
def theta_flexflex_list(self, theta_min=1, theta_max=100, N_theta=100):
"""
List of theta values for real-space cosmic flexion correlation functions
Input angle values are in untis of arcseconds
self, theta_min=1, theta_max=120, N_theta=100
"""
# Create logspace list of angular scale, in units of arcseconds
theta_min = np.log10(theta_min)
theta_max = np.log10(theta_max)
theta_list = np.logspace(theta_min,theta_max,N_theta)
dtheta = np.log10(theta_list[1])-np.log10(theta_list[0])
bin_low_list = 10**(np.log10(theta_list)-dtheta/2)
bin_high_list = 10**(np.log10(theta_list)+dtheta/2)
theta_max = np.log10(bin_high_list[-1])
theta_list = np.logspace(theta_min,theta_max,N_theta)
theta_list *= u.arcsec
return theta_list
def theta_shearflex_list(self, theta_min=1/60, theta_max=10., N_theta=100):
"""
List of theta values for real-space cosmic shear-flexion correlation functions
Input angle values are in untis of arcminutes
self, theta_min=0.01, theta_max=15., N_theta=100
theta_min=1/60, theta_max=50., N_theta=100
"""
# Create logspace list of angular scale, in units of arcseconds
theta_min = np.log10(theta_min)
theta_max = np.log10(theta_max)
theta_list = np.logspace(theta_min,theta_max,N_theta)
dtheta = np.log10(theta_list[1])-np.log10(theta_list[0])
bin_low_list = 10**(np.log10(theta_list)-dtheta/2)
bin_high_list = 10**(np.log10(theta_list)+dtheta/2)
theta_max = np.log10(bin_high_list[-1])
theta_list = np.logspace(theta_min,theta_max,N_theta)
theta_list *= u.arcmin
return theta_list
def interpolateArrays(self):
self.P_F_interpolate = interpolate.interp1d(self.l_list, self.P_F_list)
self.P_kappa_F_interpolate = interpolate.interp1d(self.l_list, self.P_kappa_F_list)
def P_F(self, ell):
return self.P_F_interpolate(ell)
def P_kappa_F(self, ell):
return self.P_kappa_F_interpolate(ell)
def two_point_corr_flexflex(self, theta_list, fields):
# First, convert the list of angles to radians
theta_list_rad = theta_list.to(u.rad).value
# Get parameters specific to the particular two-point correlation function.
# These include the order of the Bessel function for the Hankel transform,
# as well as the algebraic sign of the two-point correlation function.
if fields == 'FF_plus':
order = 0
sign = (+1)
elif fields == 'FF_minus':
order = 2
sign = (-1)
elif fields == 'FG_plus':
order = 2
sign = (+1)
elif fields == 'FG_minus':
order = 4
sign = (-1)
elif fields == 'GG_plus':
order = 0
sign = (+1)
elif fields == 'GG_minus':
order = 6
sign = (-1)
# Get two-point correlation function for each angular separation
xi_list_norm = []
for theta in theta_list_rad:
# Get down-sampled ell list
l_list = np.logspace(np.log10(np.min(self.l_list)), np.log10(np.max(self.l_list)), int(1e7))
# Get integrand of two-point correlation function
xi_integrand_unnorm = l_list * special.jv(order, l_list*theta)*self.P_F(l_list)
# Perform integrand renormalization.
ell_min_index = argrelextrema(xi_integrand_unnorm, np.less)[0]
ell_min = l_list[ell_min_index]
xi_integrand_min = xi_integrand_unnorm[ell_min_index]
id_min = np.where(xi_integrand_min < 0)
ell_min_1 = ell_min[id_min][0]
ell_max_index = argrelextrema(xi_integrand_unnorm, np.greater)[0]
ell_max = l_list[ell_max_index][0]
ell_max_1 = l_list[ell_max_index][1]
ell_max_2 = l_list[ell_max_index][2]
xi_integrand_norm = xi_integrand_unnorm*np.e**(-((l_list*(l_list+1))/ell_max_2**2.))
# Now we can integrate. We use Simpson's rule for fast integration
xi_integral_norm = integrate.simps(xi_integrand_norm, l_list, axis=-1)
# xi = 1/2pi times the integral, with the appropriate algebraic sign
xi_norm = sign*(1/(2*np.pi))*xi_integral_norm
xi_list_norm.append(xi_norm)
return xi_list_norm
def two_point_corr_shearflex(self, theta_list, fields):
# First, convert the list of angles to radians
theta_list_rad = theta_list.to(u.rad).value
# Get parameters specific to the particular two-point correlation function.
# These include the order of the Bessel function for the Hankel transform,
# as well as the algebraic sign of the two-point correlation function.
if fields == 'gamF_plus':
order = 1
sign = (-1)
elif fields == 'gamF_minus':
order = 3
sign = (+1)
elif fields == 'Ggam_plus':
order = 1
sign = (-1)
elif fields == 'Ggam_minus':
order = 5
sign = (-1)
# Get two-point correlation function for each angular separation
xi_list_norm = []
for theta in theta_list_rad:
# Get down-sampled ell list
l_list = np.logspace(np.log10(np.min(self.l_list)), np.log10(np.max(self.l_list)), int(1e7))
# Get integrand of two-point correlation function
xi_integrand_unnorm = l_list * special.jv(order, l_list*theta)*self.P_kappa_F(l_list)
# Perform integrand renormalization.
ell_min_index = argrelextrema(xi_integrand_unnorm, np.less)[0]
ell_min = l_list[ell_min_index]
xi_integrand_min = xi_integrand_unnorm[ell_min_index]
id_min = np.where(xi_integrand_min < 0)
ell_min_1 = ell_min[id_min][0]
ell_max_index = argrelextrema(xi_integrand_unnorm, np.greater)[0]
ell_max = l_list[ell_max_index][0]
ell_max_1 = l_list[ell_max_index][1]
ell_max_2 = l_list[ell_max_index][2]
xi_integrand_norm = xi_integrand_unnorm*np.e**(-((l_list*(l_list+1))/ell_max_2**2.))
# Now we can integrate. We use Simpson's rule for fast integration
xi_integral_norm = integrate.simps(xi_integrand_norm, l_list, axis=-1)
# xi = 1/2pi times the integral, with the appropriate algebraic sign
xi_norm = sign*(1/(2*np.pi))*xi_integral_norm
xi_list_norm.append(xi_norm)
return xi_list_norm
| [
"numpy.log10",
"scipy.signal.argrelextrema",
"numpy.where",
"scipy.integrate.simps",
"scipy.interpolate.interp1d",
"numpy.max",
"matplotlib.rc",
"numpy.min",
"pandas.DataFrame",
"scipy.special.jv",
"numpy.logspace"
] | [((236, 303), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})\n", (238, 303), False, 'from matplotlib import rc\n'), ((300, 323), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (302, 323), False, 'from matplotlib import rc\n'), ((1980, 2020), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'dat', 'columns': 'col_list'}), '(data=dat, columns=col_list)\n', (1992, 2020), True, 'import pandas as pd\n'), ((3126, 3166), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'dat', 'columns': 'col_list'}), '(data=dat, columns=col_list)\n', (3138, 3166), True, 'import pandas as pd\n'), ((3693, 3712), 'numpy.log10', 'np.log10', (['theta_min'], {}), '(theta_min)\n', (3701, 3712), True, 'import numpy as np\n'), ((3733, 3752), 'numpy.log10', 'np.log10', (['theta_max'], {}), '(theta_max)\n', (3741, 3752), True, 'import numpy as np\n'), ((3774, 3816), 'numpy.logspace', 'np.logspace', (['theta_min', 'theta_max', 'N_theta'], {}), '(theta_min, theta_max, N_theta)\n', (3785, 3816), True, 'import numpy as np\n'), ((4020, 4047), 'numpy.log10', 'np.log10', (['bin_high_list[-1]'], {}), '(bin_high_list[-1])\n', (4028, 4047), True, 'import numpy as np\n'), ((4070, 4112), 'numpy.logspace', 'np.logspace', (['theta_min', 'theta_max', 'N_theta'], {}), '(theta_min, theta_max, N_theta)\n', (4081, 4112), True, 'import numpy as np\n'), ((4614, 4633), 'numpy.log10', 'np.log10', (['theta_min'], {}), '(theta_min)\n', (4622, 4633), True, 'import numpy as np\n'), ((4654, 4673), 'numpy.log10', 'np.log10', (['theta_max'], {}), '(theta_max)\n', (4662, 4673), True, 'import numpy as np\n'), ((4695, 4737), 'numpy.logspace', 'np.logspace', (['theta_min', 'theta_max', 'N_theta'], {}), '(theta_min, theta_max, N_theta)\n', (4706, 4737), True, 'import numpy as np\n'), ((4941, 4968), 'numpy.log10', 'np.log10', (['bin_high_list[-1]'], {}), '(bin_high_list[-1])\n', (4949, 4968), True, 'import numpy as np\n'), ((4991, 5033), 'numpy.logspace', 'np.logspace', (['theta_min', 'theta_max', 'N_theta'], {}), '(theta_min, theta_max, N_theta)\n', (5002, 5033), True, 'import numpy as np\n'), ((5154, 5202), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.l_list', 'self.P_F_list'], {}), '(self.l_list, self.P_F_list)\n', (5174, 5202), False, 'from scipy import interpolate\n'), ((5240, 5294), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.l_list', 'self.P_kappa_F_list'], {}), '(self.l_list, self.P_kappa_F_list)\n', (5260, 5294), False, 'from scipy import interpolate\n'), ((3833, 3856), 'numpy.log10', 'np.log10', (['theta_list[1]'], {}), '(theta_list[1])\n', (3841, 3856), True, 'import numpy as np\n'), ((3857, 3880), 'numpy.log10', 'np.log10', (['theta_list[0]'], {}), '(theta_list[0])\n', (3865, 3880), True, 'import numpy as np\n'), ((4754, 4777), 'numpy.log10', 'np.log10', (['theta_list[1]'], {}), '(theta_list[1])\n', (4762, 4777), True, 'import numpy as np\n'), ((4778, 4801), 'numpy.log10', 'np.log10', (['theta_list[0]'], {}), '(theta_list[0])\n', (4786, 4801), True, 'import numpy as np\n'), ((7043, 7073), 'numpy.where', 'np.where', (['(xi_integrand_min < 0)'], {}), '(xi_integrand_min < 0)\n', (7051, 7073), True, 'import numpy as np\n'), ((7562, 7613), 'scipy.integrate.simps', 'integrate.simps', (['xi_integrand_norm', 'l_list'], {'axis': '(-1)'}), '(xi_integrand_norm, l_list, axis=-1)\n', (7577, 7613), False, 'from scipy import integrate\n'), ((9281, 9311), 'numpy.where', 'np.where', (['(xi_integrand_min < 0)'], {}), '(xi_integrand_min < 0)\n', (9289, 9311), True, 'import numpy as np\n'), ((9800, 9851), 'scipy.integrate.simps', 'integrate.simps', (['xi_integrand_norm', 'l_list'], {'axis': '(-1)'}), '(xi_integrand_norm, l_list, axis=-1)\n', (9815, 9851), False, 'from scipy import integrate\n'), ((3909, 3929), 'numpy.log10', 'np.log10', (['theta_list'], {}), '(theta_list)\n', (3917, 3929), True, 'import numpy as np\n'), ((3969, 3989), 'numpy.log10', 'np.log10', (['theta_list'], {}), '(theta_list)\n', (3977, 3989), True, 'import numpy as np\n'), ((4830, 4850), 'numpy.log10', 'np.log10', (['theta_list'], {}), '(theta_list)\n', (4838, 4850), True, 'import numpy as np\n'), ((4890, 4910), 'numpy.log10', 'np.log10', (['theta_list'], {}), '(theta_list)\n', (4898, 4910), True, 'import numpy as np\n'), ((6865, 6908), 'scipy.signal.argrelextrema', 'argrelextrema', (['xi_integrand_unnorm', 'np.less'], {}), '(xi_integrand_unnorm, np.less)\n', (6878, 6908), False, 'from scipy.signal import argrelextrema\n'), ((7145, 7191), 'scipy.signal.argrelextrema', 'argrelextrema', (['xi_integrand_unnorm', 'np.greater'], {}), '(xi_integrand_unnorm, np.greater)\n', (7158, 7191), False, 'from scipy.signal import argrelextrema\n'), ((9103, 9146), 'scipy.signal.argrelextrema', 'argrelextrema', (['xi_integrand_unnorm', 'np.less'], {}), '(xi_integrand_unnorm, np.less)\n', (9116, 9146), False, 'from scipy.signal import argrelextrema\n'), ((9383, 9429), 'scipy.signal.argrelextrema', 'argrelextrema', (['xi_integrand_unnorm', 'np.greater'], {}), '(xi_integrand_unnorm, np.greater)\n', (9396, 9429), False, 'from scipy.signal import argrelextrema\n'), ((6557, 6576), 'numpy.min', 'np.min', (['self.l_list'], {}), '(self.l_list)\n', (6563, 6576), True, 'import numpy as np\n'), ((6588, 6607), 'numpy.max', 'np.max', (['self.l_list'], {}), '(self.l_list)\n', (6594, 6607), True, 'import numpy as np\n'), ((6738, 6771), 'scipy.special.jv', 'special.jv', (['order', '(l_list * theta)'], {}), '(order, l_list * theta)\n', (6748, 6771), False, 'from scipy import special\n'), ((8801, 8820), 'numpy.min', 'np.min', (['self.l_list'], {}), '(self.l_list)\n', (8807, 8820), True, 'import numpy as np\n'), ((8832, 8851), 'numpy.max', 'np.max', (['self.l_list'], {}), '(self.l_list)\n', (8838, 8851), True, 'import numpy as np\n'), ((8970, 9003), 'scipy.special.jv', 'special.jv', (['order', '(l_list * theta)'], {}), '(order, l_list * theta)\n', (8980, 9003), False, 'from scipy import special\n')] |
import traceback
import h5py
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sn
import sys
import tensorflow as tf
from support.data_model import CLASSES, TAG_CLASS_MAP, Track, UNCLASSIFIED_TAGS
from support.track_utils import convert_frames, convert_hdf5_frames
START_TIME = '2021-03-29T08:07:54.240643+13:00'
sample_dims = (32,32)
bright_pixel_threshold = .25
def format_sample(dims, input):
def slice(actual, limit):
start = (limit - actual) // 2
end = start + actual
return start, end
sample = np.zeros(dims, np.float32)
row0, row1 = slice(input.shape[0], dims[0])
col0, col1 = slice(input.shape[1], dims[1])
sample[row0:row1, col0:col1] = input
sample = sample.reshape(sample.shape + (1,))
return sample
def evaluate(predicts, actuals, base_path, title):
confusion_matrix = np.zeros((len(CLASSES), len(CLASSES)))
for predict, actual in zip(predicts, actuals):
confusion_matrix[actual, predict] += 1
normalized_matrix = (confusion_matrix.T / confusion_matrix.sum(axis=1)).T
precision_row = np.diag(confusion_matrix) / confusion_matrix.sum(axis=0)
display_matrix = np.row_stack((normalized_matrix, precision_row))
num_correct = np.diag(confusion_matrix).sum()
num_total = len(predicts)
num_wrong = num_total - num_correct
print(f'{title} gives total of {num_correct} tracks predicted correctly ({num_correct/num_total:.4f}), {num_wrong} predicted incorrectly ({num_wrong/num_total:.4f})')
print(f' mean accuracy (recall) {np.diag(normalized_matrix).sum()/len(CLASSES):.4f}, mean precision {np.sum(precision_row)/len(CLASSES):.4f}')
print(confusion_matrix)
plt.figure(figsize=(10,10))
sn.heatmap(display_matrix, annot=True, fmt='.2f', cmap='Blues', xticklabels=CLASSES, yticklabels=CLASSES + ['precision'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.suptitle(f'overall accuracy: {num_correct/num_total:.4f}, mean recall: {np.diag(normalized_matrix).sum()/len(CLASSES):.4f}, mean precision: {np.sum(precision_row)/len(CLASSES):.4f}')
plt.savefig(f'{base_path}-{title.replace(" ", "_")}test-results.png')
plt.close()
def sum_weighted(predicts, weights):
return np.matmul(weights.T, predicts)
def test(tracks, model_path, weights_path=None):
model = tf.keras.models.load_model(model_path)
if weights_path:
model.load_weights(weights_path)
print(f'Testing model {model_path} with weights {weights_path}:')
class_to_index = { c:i for i,c in enumerate(CLASSES) }
frames_correct = 0
frames_wrong = 0
actuals = []
frame_mean_predicts = []
frame_squared_predicts = []
frame_pixelcount_predicts = []
frame_pixelcount_sqrpredicts = []
for track in tracks:
tag = track.tag
if tag in UNCLASSIFIED_TAGS:
tag = 'unclassified'
actuals.append(class_to_index[tag])
frames = track.frames
frames = np.array([format_sample(sample_dims, f) for f in frames])
predicts = model.predict(frames)
frame_mean_predicts.append(np.argmax(predicts.sum(axis=0)))
predicts_squared = predicts**2
frame_squared_predicts.append(np.argmax(predicts_squared.sum(axis=0)))
pixelcount_weights = np.array([(f > 0).sum() for f in frames])
frame_pixelcount_predicts.append(np.argmax(sum_weighted(predicts, pixelcount_weights)))
frame_pixelcount_sqrpredicts.append(np.argmax(sum_weighted(predicts_squared, pixelcount_weights)))
frame_maxes = np.argmax(predicts, axis=1)
correct = np.sum(frame_maxes == class_to_index[tag])
frames_correct += correct
frames_wrong += len(frame_maxes) - correct
print(f'{track.clip_key}-{track.track_key} with tag {tag} has {correct} correct and {len(frame_maxes) - correct} wrong predictions')
total_frames = frames_correct + frames_wrong
print(f'frames {frames_correct} predicted correctly ({frames_correct/total_frames:.4f}), {frames_wrong} predicted incorrectly ({frames_wrong/total_frames:.4f})')
figure_path = weights_path if weights_path else model_path
evaluate(frame_mean_predicts, actuals, figure_path, 'frame mean')
evaluate(frame_squared_predicts, actuals, figure_path, 'frame squared predicts mean')
evaluate(frame_pixelcount_predicts, actuals, figure_path, 'nonzero pixel count weighting')
evaluate(frame_pixelcount_sqrpredicts, actuals, figure_path, 'nonzero pixel count weighting squared predicts')
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = ''
argv = sys.argv
dataset_path = argv[1]
model_directory = argv[2]
file_hdf5 = h5py.File(dataset_path, 'r')
clips_hdf5 = file_hdf5['clips']
tracks = []
for clip_id in clips_hdf5:
clip_hdf5 = clips_hdf5[clip_id]
#if clip_hdf5.attrs['start_time'] > START_TIME:
if clip_hdf5.attrs['start_time'] < '2019-10-29T08:07:54.240643+13:00':
tkeys = [k for k in clip_hdf5 if not k == 'background_frame']
for track_id in tkeys:
try:
track_hdf5 = clip_hdf5[track_id]
tag = track_hdf5.attrs['tag']
frames, bounds = convert_hdf5_frames(track_hdf5['cropped'], track_hdf5.attrs['bounds_history'])
frames, bounds = convert_frames(frames, bounds, clip_id, track_id)
if len(frames):
start_time = track_hdf5.attrs['start_time']
end_time = track_hdf5.attrs['end_time']
if tag in TAG_CLASS_MAP:
tag = TAG_CLASS_MAP[tag]
tracks.append(Track(tag, clip_id, track_id, start_time, end_time, bounds, None, frames))
else:
print(f'Ignoring {clip_id}-{track_id}: unsupported tag {tag}')
else:
print(f'Ignoring {clip_id}-{track_id}: no usable frames')
except Exception:
print(f'Exception processing {clip_id}-{track_id}')
traceback.print_exc()
print(f'found {len(tracks)} with start times after {START_TIME}')
if len(argv) > 3:
test(tracks, f'{model_directory}/model.sav', f'{model_directory}/{argv[3]}')
else:
test(tracks, f'{model_directory}/model.sav')
if __name__ == '__main__':
sys.exit(main())
| [
"support.track_utils.convert_hdf5_frames",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"support.data_model.Track",
"numpy.argmax",
"seaborn.heatmap",
"h5py.File",
"matplotlib.pyplot.close",
"numpy.diag",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.matmul",
"numpy.row_stack"... | [((570, 596), 'numpy.zeros', 'np.zeros', (['dims', 'np.float32'], {}), '(dims, np.float32)\n', (578, 596), True, 'import numpy as np\n'), ((1190, 1238), 'numpy.row_stack', 'np.row_stack', (['(normalized_matrix, precision_row)'], {}), '((normalized_matrix, precision_row))\n', (1202, 1238), True, 'import numpy as np\n'), ((1709, 1737), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1719, 1737), True, 'import matplotlib.pyplot as plt\n'), ((1741, 1867), 'seaborn.heatmap', 'sn.heatmap', (['display_matrix'], {'annot': '(True)', 'fmt': '""".2f"""', 'cmap': '"""Blues"""', 'xticklabels': 'CLASSES', 'yticklabels': "(CLASSES + ['precision'])"}), "(display_matrix, annot=True, fmt='.2f', cmap='Blues', xticklabels\n =CLASSES, yticklabels=CLASSES + ['precision'])\n", (1751, 1867), True, 'import seaborn as sn\n'), ((1867, 1887), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Actual"""'], {}), "('Actual')\n", (1877, 1887), True, 'import matplotlib.pyplot as plt\n'), ((1892, 1915), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted"""'], {}), "('Predicted')\n", (1902, 1915), True, 'import matplotlib.pyplot as plt\n'), ((2185, 2196), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2194, 2196), True, 'import matplotlib.pyplot as plt\n'), ((2247, 2277), 'numpy.matmul', 'np.matmul', (['weights.T', 'predicts'], {}), '(weights.T, predicts)\n', (2256, 2277), True, 'import numpy as np\n'), ((2341, 2379), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_path'], {}), '(model_path)\n', (2367, 2379), True, 'import tensorflow as tf\n'), ((4671, 4699), 'h5py.File', 'h5py.File', (['dataset_path', '"""r"""'], {}), "(dataset_path, 'r')\n", (4680, 4699), False, 'import h5py\n'), ((1112, 1137), 'numpy.diag', 'np.diag', (['confusion_matrix'], {}), '(confusion_matrix)\n', (1119, 1137), True, 'import numpy as np\n'), ((3557, 3584), 'numpy.argmax', 'np.argmax', (['predicts'], {'axis': '(1)'}), '(predicts, axis=1)\n', (3566, 3584), True, 'import numpy as np\n'), ((3603, 3645), 'numpy.sum', 'np.sum', (['(frame_maxes == class_to_index[tag])'], {}), '(frame_maxes == class_to_index[tag])\n', (3609, 3645), True, 'import numpy as np\n'), ((1257, 1282), 'numpy.diag', 'np.diag', (['confusion_matrix'], {}), '(confusion_matrix)\n', (1264, 1282), True, 'import numpy as np\n'), ((1635, 1656), 'numpy.sum', 'np.sum', (['precision_row'], {}), '(precision_row)\n', (1641, 1656), True, 'import numpy as np\n'), ((2065, 2086), 'numpy.sum', 'np.sum', (['precision_row'], {}), '(precision_row)\n', (2071, 2086), True, 'import numpy as np\n'), ((5228, 5306), 'support.track_utils.convert_hdf5_frames', 'convert_hdf5_frames', (["track_hdf5['cropped']", "track_hdf5.attrs['bounds_history']"], {}), "(track_hdf5['cropped'], track_hdf5.attrs['bounds_history'])\n", (5247, 5306), False, 'from support.track_utils import convert_frames, convert_hdf5_frames\n'), ((5344, 5393), 'support.track_utils.convert_frames', 'convert_frames', (['frames', 'bounds', 'clip_id', 'track_id'], {}), '(frames, bounds, clip_id, track_id)\n', (5358, 5393), False, 'from support.track_utils import convert_frames, convert_hdf5_frames\n'), ((6136, 6157), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6155, 6157), False, 'import traceback\n'), ((1567, 1593), 'numpy.diag', 'np.diag', (['normalized_matrix'], {}), '(normalized_matrix)\n', (1574, 1593), True, 'import numpy as np\n'), ((1996, 2022), 'numpy.diag', 'np.diag', (['normalized_matrix'], {}), '(normalized_matrix)\n', (2003, 2022), True, 'import numpy as np\n'), ((5706, 5779), 'support.data_model.Track', 'Track', (['tag', 'clip_id', 'track_id', 'start_time', 'end_time', 'bounds', 'None', 'frames'], {}), '(tag, clip_id, track_id, start_time, end_time, bounds, None, frames)\n', (5711, 5779), False, 'from support.data_model import CLASSES, TAG_CLASS_MAP, Track, UNCLASSIFIED_TAGS\n')] |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = data.rename(columns={'Total':'Total_Medals'})
data.head()
#Code starts here
# --------------
#Code starts here
data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'] , 'Summer', 'Winter')
data['Better_Event'] = np.where(data['Total_Summer'] == data['Total_Winter'],'Both',data['Better_Event'])
d = data['Better_Event'].value_counts().to_dict()
print(d)
def freqmax(a):
i = 0
j = 0
for each in a.keys():
if a[each] > i:
i = a[each]
j = each
return j
better_event = freqmax(d)
print(better_event)
# --------------
#Code starts here
top_countries = pd.DataFrame(data = data, columns=['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'])
top_countries = top_countries[:-1]
def top_ten(col):
country_list = []
country_list= list((top_countries.nlargest(10,col)['Country_Name']))
return country_list
top_10_summer = top_ten('Total_Summer')
print(top_10_summer)
top_10_winter = top_ten('Total_Winter')
top_10 = top_ten('Total_Medals')
common = [x for x in top_10_summer if x in top_10_winter and top_10]
# --------------
#Code starts here
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
fig , (ax_1,ax_2,ax_3) = plt.subplots(3,1)
ax_1.bar(summer_df['Country_Name'],summer_df['Total_Summer'])
ax_2.bar(winter_df['Country_Name'],winter_df['Total_Summer'])
ax_3.bar(top_df['Country_Name'],top_df['Total_Summer'])
ax_1.set_title('Summer')
ax_2.set_title('Winter')
ax_3.set_title('Top 10')
# --------------
#Code starts here
summer_df['Golden_Ratio'] = summer_df['Gold_Summer'] / summer_df['Total_Summer']
summer_max_ratio = max(summer_df['Golden_Ratio'])
summer_country_gold = summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df['Golden_Ratio'] = winter_df['Gold_Winter'] / winter_df['Total_Winter']
winter_max_ratio = max(winter_df['Golden_Ratio'])
winter_country_gold = winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df['Golden_Ratio'] = top_df['Gold_Total'] / top_df['Total_Medals']
top_max_ratio = max(top_df['Golden_Ratio'])
top_country_gold = top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
# --------------
#Code starts here
data_1 = data[:-1]
data_1['Total_Points'] = 3*data_1['Gold_Total'] + 2*data_1['Silver_Total'] + data_1['Bronze_Total']
most_points=max(data_1['Total_Points'])
best_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
# --------------
#Code starts here
best=data[data['Country_Name']==best_country]
print(best.head())
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
print(best.head())
best.plot.bar()
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
plt.show()
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.xlabel",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((150, 167), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (161, 167), True, 'import pandas as pd\n'), ((319, 392), 'numpy.where', 'np.where', (["(data['Total_Summer'] > data['Total_Winter'])", '"""Summer"""', '"""Winter"""'], {}), "(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter')\n", (327, 392), True, 'import numpy as np\n'), ((420, 509), 'numpy.where', 'np.where', (["(data['Total_Summer'] == data['Total_Winter'])", '"""Both"""', "data['Better_Event']"], {}), "(data['Total_Summer'] == data['Total_Winter'], 'Both', data[\n 'Better_Event'])\n", (428, 509), True, 'import numpy as np\n'), ((790, 891), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': "['Country_Name', 'Total_Summer', 'Total_Winter', 'Total_Medals']"}), "(data=data, columns=['Country_Name', 'Total_Summer',\n 'Total_Winter', 'Total_Medals'])\n", (802, 891), True, 'import pandas as pd\n'), ((1524, 1542), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (1536, 1542), True, 'import matplotlib.pyplot as plt\n'), ((2981, 3008), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""United States"""'], {}), "('United States')\n", (2991, 3008), True, 'import matplotlib.pyplot as plt\n'), ((3010, 3036), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Medals Tally"""'], {}), "('Medals Tally')\n", (3020, 3036), True, 'import matplotlib.pyplot as plt\n'), ((3038, 3061), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (3048, 3061), True, 'import matplotlib.pyplot as plt\n'), ((3063, 3073), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3071, 3073), True, 'import matplotlib.pyplot as plt\n')] |
import matplotlib
matplotlib.use('Agg')
import os, sys
import yaml
from argparse import ArgumentParser
from tqdm import tqdm
import imageio
import numpy as np
from skimage.transform import resize
from skimage import img_as_ubyte
import torch
from sync_batchnorm import DataParallelWithCallback
# from modules.generator import OcclusionAwareGenerator
from modules.keypoint_detector import KPDetector
from animate import normalize_kp
from scipy.spatial import ConvexHull
import gzip
import pickle
import time
WAIT = 0
if sys.version_info[0] < 3:
raise Exception("You must use Python 3 or higher. Recommended version is Python 3.7")
def load_checkpoints(config_path, checkpoint_path, cpu=False):
print("loading_model,", time.time())
with open(config_path) as f:
config = yaml.load(f)
kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
if not cpu:
kp_detector.cuda()
if cpu:
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
else:
checkpoint = torch.load(checkpoint_path)
kp_detector.load_state_dict(checkpoint['kp_detector'])
if not cpu:
kp_detector = DataParallelWithCallback(kp_detector)
kp_detector.eval()
return None, kp_detector
def extract_keypoints(source_image, driving_video, generator, kp_detector, relative=True, adapt_movement_scale=True,
cpu=False):
kp = []
with torch.no_grad():
source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
driving = torch.tensor(np.array(driving_video)[np.newaxis].astype(np.float32)).permute(0, 4, 1, 2, 3)
if not cpu:
source = source.cuda()
kp_source = kp_detector(source)
kp_driving_initial = kp_detector(driving[:, :, 0])
print("extracting_key_points,", time.time())
for frame_idx in tqdm(range(driving.shape[2])):
driving_frame = driving[:, :, frame_idx]
if not cpu:
driving_frame = driving_frame.cuda()
# extract the keypoints from current video frame
kp_driving = kp_detector(driving_frame)
kp_norm = normalize_kp(kp_source=kp_source, kp_driving=kp_driving,
kp_driving_initial=kp_driving_initial, use_relative_movement=relative,
use_relative_jacobian=relative, adapt_movement_scale=adapt_movement_scale)
kp.append(kp_norm)
return kp
def write_compressed_keypoint_file(file_name, file):
with gzip.open(file_name+".gz", "wb") as f:
pickle.dump(np.array(file), f)
if __name__ == "__main__":
print("begin_wait,", time.time())
time.sleep(WAIT)
parser = ArgumentParser()
parser.add_argument("--config", default='checkpoints/taichi/torch/pretrained_taichi-cpk.pth.yaml', help="path to config")
parser.add_argument("--checkpoint",
default='checkpoints/taichi/torch/pretrained_taichi-cpk.pth.tar',
help="path to checkpoint to restore")
parser.add_argument("--driving_video", default='taichi_sample1.mp4', help="path to driving video")
parser.add_argument("--cpu", default=False, dest="cpu", action="store_true", help="cpu mode.")
parser.add_argument("--out_kp_file", default='pre_taichi_sample1.kp', help="path to output keypoints file")
parser.add_argument("--out_img_file", default='pre_taichi_sample1.jpeg', help="path to output image file")
parser.set_defaults(relative=False)
parser.set_defaults(adapt_scale=False)
opt = parser.parse_args()
reader = imageio.get_reader(opt.driving_video)
fps = reader.get_meta_data()['fps']
driving_video = []
print("reading_video,", time.time())
try:
for im in reader:
driving_video.append(im)
except RuntimeError:
pass
reader.close()
driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
import copy
source_image = copy.deepcopy(driving_video[0])
imageio.imwrite(opt.out_img_file, source_image)
_, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu)
key_points = extract_keypoints(source_image, driving_video, None, kp_detector, relative=opt.relative,
adapt_movement_scale=opt.adapt_scale, cpu=opt.cpu)
print("process_key_points,", time.time())
print("save_key_points,", time.time())
kp_save_start = time.time()
np.save(opt.out_kp_file, np.array(key_points), allow_pickle=True)
# save compressed file(for file size comparison)
write_compressed_keypoint_file(opt.out_kp_file+"_compressed", key_points)
print("end,", time.time())
time.sleep(WAIT)
| [
"sync_batchnorm.DataParallelWithCallback",
"argparse.ArgumentParser",
"imageio.imwrite",
"matplotlib.use",
"gzip.open",
"torch.load",
"animate.normalize_kp",
"torch.device",
"modules.keypoint_detector.KPDetector",
"time.sleep",
"yaml.load",
"numpy.array",
"copy.deepcopy",
"torch.no_grad",
... | [((19, 40), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (33, 40), False, 'import matplotlib\n'), ((827, 933), 'modules.keypoint_detector.KPDetector', 'KPDetector', ([], {}), "(**config['model_params']['kp_detector_params'], **config[\n 'model_params']['common_params'])\n", (837, 933), False, 'from modules.keypoint_detector import KPDetector\n'), ((2804, 2820), 'time.sleep', 'time.sleep', (['WAIT'], {}), '(WAIT)\n', (2814, 2820), False, 'import time\n'), ((2834, 2850), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2848, 2850), False, 'from argparse import ArgumentParser\n'), ((3724, 3761), 'imageio.get_reader', 'imageio.get_reader', (['opt.driving_video'], {}), '(opt.driving_video)\n', (3742, 3761), False, 'import imageio\n'), ((4116, 4147), 'copy.deepcopy', 'copy.deepcopy', (['driving_video[0]'], {}), '(driving_video[0])\n', (4129, 4147), False, 'import copy\n'), ((4153, 4200), 'imageio.imwrite', 'imageio.imwrite', (['opt.out_img_file', 'source_image'], {}), '(opt.out_img_file, source_image)\n', (4168, 4200), False, 'import imageio\n'), ((4610, 4621), 'time.time', 'time.time', ([], {}), '()\n', (4619, 4621), False, 'import time\n'), ((4860, 4876), 'time.sleep', 'time.sleep', (['WAIT'], {}), '(WAIT)\n', (4870, 4876), False, 'import time\n'), ((732, 743), 'time.time', 'time.time', ([], {}), '()\n', (741, 743), False, 'import time\n'), ((795, 807), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (804, 807), False, 'import yaml\n'), ((1128, 1155), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1138, 1155), False, 'import torch\n'), ((1255, 1292), 'sync_batchnorm.DataParallelWithCallback', 'DataParallelWithCallback', (['kp_detector'], {}), '(kp_detector)\n', (1279, 1292), False, 'from sync_batchnorm import DataParallelWithCallback\n'), ((1521, 1536), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1534, 1536), False, 'import torch\n'), ((2655, 2689), 'gzip.open', 'gzip.open', (["(file_name + '.gz')", '"""wb"""'], {}), "(file_name + '.gz', 'wb')\n", (2664, 2689), False, 'import gzip\n'), ((2787, 2798), 'time.time', 'time.time', ([], {}), '()\n', (2796, 2798), False, 'import time\n'), ((3853, 3864), 'time.time', 'time.time', ([], {}), '()\n', (3862, 3864), False, 'import time\n'), ((4534, 4545), 'time.time', 'time.time', ([], {}), '()\n', (4543, 4545), False, 'import time\n'), ((4577, 4588), 'time.time', 'time.time', ([], {}), '()\n', (4586, 4588), False, 'import time\n'), ((4651, 4671), 'numpy.array', 'np.array', (['key_points'], {}), '(key_points)\n', (4659, 4671), True, 'import numpy as np\n'), ((4843, 4854), 'time.time', 'time.time', ([], {}), '()\n', (4852, 4854), False, 'import time\n'), ((1939, 1950), 'time.time', 'time.time', ([], {}), '()\n', (1948, 1950), False, 'import time\n'), ((2273, 2484), 'animate.normalize_kp', 'normalize_kp', ([], {'kp_source': 'kp_source', 'kp_driving': 'kp_driving', 'kp_driving_initial': 'kp_driving_initial', 'use_relative_movement': 'relative', 'use_relative_jacobian': 'relative', 'adapt_movement_scale': 'adapt_movement_scale'}), '(kp_source=kp_source, kp_driving=kp_driving, kp_driving_initial\n =kp_driving_initial, use_relative_movement=relative,\n use_relative_jacobian=relative, adapt_movement_scale=adapt_movement_scale)\n', (2285, 2484), False, 'from animate import normalize_kp\n'), ((2714, 2728), 'numpy.array', 'np.array', (['file'], {}), '(file)\n', (2722, 2728), True, 'import numpy as np\n'), ((4016, 4041), 'skimage.transform.resize', 'resize', (['frame', '(256, 256)'], {}), '(frame, (256, 256))\n', (4022, 4041), False, 'from skimage.transform import resize\n'), ((1076, 1095), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1088, 1095), False, 'import torch\n'), ((1664, 1687), 'numpy.array', 'np.array', (['driving_video'], {}), '(driving_video)\n', (1672, 1687), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Plot a depth plane extracted from the SCEC Community Velocity Model.
"""
import math
import numpy as np
import matplotlib.pyplot as plt
import cst.data
import cst.cvms
import cst.cvmh
# parameters
prop = 'rho'
prop = 'Vs'
label = 'S-wave velocity (m/s)'
depth = 500.0
vmin, vmax = 300, 3200
delta = 0.5 / 60.0
lon, lat = (-120.0, -114.5), (32.5, 35.0)
cmap = cst.plt.colormap('rgb')
# create mesh
x = np.arange(lon[0], lon[1] + 0.5 * delta, delta)
y = np.arange(lat[0], lat[1] + 0.5 * delta, delta)
x, y = np.meshgrid(x, y)
z = np.empty_like(x)
z.fill(depth)
# CVM extractions
vss = cst.cvms.extract(x, y, z, prop)[0]
vsh = cst.cvmh.extract(x, y, z, prop)[0]
# map data
x, y = cst.data.mapdata('coastlines', 'high', (lon, lat), 100.0)
# plot
for vs, tag in [
(vss, 'S'),
(vsh, 'H'),
]:
fig = plt.figure(figsize=(6.4, 4.8))
ax = plt.gca()
im = ax.imshow(
vs, extent=lon+lat, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
interpolation='nearest'
)
fig.colorbar(im, orientation='horizontal').set_label(label)
ax.plot(x - 360, y, 'k-')
ax.set_aspect(1.0 / math.cos(33.75 / 180.0 * math.pi))
ax.set_title('CVM%s %.0f m depth' % (tag, depth))
ax.axis(lon + lat)
f = 'CVM-Slice-%s-%s.png' % (prop, tag)
print(f)
fig.savefig(f)
| [
"matplotlib.pyplot.gca",
"math.cos",
"matplotlib.pyplot.figure",
"numpy.empty_like",
"numpy.meshgrid",
"numpy.arange"
] | [((429, 475), 'numpy.arange', 'np.arange', (['lon[0]', '(lon[1] + 0.5 * delta)', 'delta'], {}), '(lon[0], lon[1] + 0.5 * delta, delta)\n', (438, 475), True, 'import numpy as np\n'), ((480, 526), 'numpy.arange', 'np.arange', (['lat[0]', '(lat[1] + 0.5 * delta)', 'delta'], {}), '(lat[0], lat[1] + 0.5 * delta, delta)\n', (489, 526), True, 'import numpy as np\n'), ((534, 551), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (545, 551), True, 'import numpy as np\n'), ((556, 572), 'numpy.empty_like', 'np.empty_like', (['x'], {}), '(x)\n', (569, 572), True, 'import numpy as np\n'), ((835, 865), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4, 4.8)'}), '(figsize=(6.4, 4.8))\n', (845, 865), True, 'import matplotlib.pyplot as plt\n'), ((875, 884), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (882, 884), True, 'import matplotlib.pyplot as plt\n'), ((1138, 1171), 'math.cos', 'math.cos', (['(33.75 / 180.0 * math.pi)'], {}), '(33.75 / 180.0 * math.pi)\n', (1146, 1171), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""
v9s model
* Input: v5_im
Author: Kohei <<EMAIL>>
"""
from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler
from pathlib import Path
import subprocess
import argparse
import math
import glob
import sys
import json
import re
import warnings
import scipy
import tqdm
import click
import tables as tb
import pandas as pd
import numpy as np
from keras.models import Model
from keras.engine.topology import merge as merge_l
from keras.layers import (
Input, Convolution2D, MaxPooling2D, UpSampling2D,
Reshape, core, Dropout,
Activation, BatchNormalization)
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, History
from keras import backend as K
import skimage.transform
import skimage.morphology
import rasterio.features
import shapely.wkt
import shapely.ops
import shapely.geometry
MODEL_NAME = 'v9s'
ORIGINAL_SIZE = 650
INPUT_SIZE = 256
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
BASE_DIR = "/data/train"
WORKING_DIR = "/data/working"
IMAGE_DIR = "/data/working/images/{}".format('v5')
MODEL_DIR = "/data/working/models/{}".format(MODEL_NAME)
FN_SOLUTION_CSV = "/data/output/{}.csv".format(MODEL_NAME)
# Parameters
MIN_POLYGON_AREA = 30
# Input files
FMT_TRAIN_SUMMARY_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("summaryData/{prefix:s}_Train_Building_Solutions.csv"))
FMT_TRAIN_RGB_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TEST_RGB_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Test_public/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TRAIN_MSPEC_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
FMT_TEST_MSPEC_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Test_public/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
# Preprocessing result
FMT_BANDCUT_TH_PATH = IMAGE_DIR + "/bandcut{}.csv"
FMT_MUL_BANDCUT_TH_PATH = IMAGE_DIR + "/mul_bandcut{}.csv"
# Image list, Image container and mask container
FMT_VALTRAIN_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_valtrain_ImageId.csv"
FMT_VALTEST_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_valtest_ImageId.csv"
FMT_VALTRAIN_IM_STORE = IMAGE_DIR + "/valtrain_{}_im.h5"
FMT_VALTEST_IM_STORE = IMAGE_DIR + "/valtest_{}_im.h5"
FMT_VALTRAIN_MASK_STORE = IMAGE_DIR + "/valtrain_{}_mask.h5"
FMT_VALTEST_MASK_STORE = IMAGE_DIR + "/valtest_{}_mask.h5"
FMT_VALTRAIN_MUL_STORE = IMAGE_DIR + "/valtrain_{}_mul.h5"
FMT_VALTEST_MUL_STORE = IMAGE_DIR + "/valtest_{}_mul.h5"
FMT_TRAIN_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_train_ImageId.csv"
FMT_TEST_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_test_ImageId.csv"
FMT_TRAIN_IM_STORE = IMAGE_DIR + "/train_{}_im.h5"
FMT_TEST_IM_STORE = IMAGE_DIR + "/test_{}_im.h5"
FMT_TRAIN_MASK_STORE = IMAGE_DIR + "/train_{}_mask.h5"
FMT_TRAIN_MUL_STORE = IMAGE_DIR + "/train_{}_mul.h5"
FMT_TEST_MUL_STORE = IMAGE_DIR + "/test_{}_mul.h5"
FMT_IMMEAN = IMAGE_DIR + "/{}_immean.h5"
FMT_MULMEAN = IMAGE_DIR + "/{}_mulmean.h5"
# Model files
FMT_VALMODEL_PATH = MODEL_DIR + "/{}_val_weights.h5"
FMT_FULLMODEL_PATH = MODEL_DIR + "/{}_full_weights.h5"
FMT_VALMODEL_HIST = MODEL_DIR + "/{}_val_hist.csv"
FMT_VALMODEL_EVALHIST = MODEL_DIR + "/{}_val_evalhist.csv"
FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv"
# Prediction & polygon result
FMT_TESTPRED_PATH = MODEL_DIR + "/{}_pred.h5"
FMT_VALTESTPRED_PATH = MODEL_DIR + "/{}_eval_pred.h5"
FMT_VALTESTPOLY_PATH = MODEL_DIR + "/{}_eval_poly.csv"
FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv"
FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv"
FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv"
FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv"
# Model related files (others)
FMT_VALMODEL_LAST_PATH = MODEL_DIR + "/{}_val_weights_last.h5"
FMT_FULLMODEL_LAST_PATH = MODEL_DIR + "/{}_full_weights_last.h5"
# Logger
warnings.simplefilter("ignore", UserWarning)
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter(LOGFORMAT))
fh_handler = FileHandler(".{}.log".format(MODEL_NAME))
fh_handler.setFormatter(Formatter(LOGFORMAT))
logger = getLogger('spacenet2')
logger.setLevel(INFO)
if __name__ == '__main__':
logger.addHandler(handler)
logger.addHandler(fh_handler)
# Fix seed for reproducibility
np.random.seed(1145141919)
def directory_name_to_area_id(datapath):
"""
Directory name to AOI number
Usage:
>>> directory_name_to_area_id("/data/test/AOI_2_Vegas")
2
"""
dir_name = Path(datapath).name
if dir_name.startswith('AOI_2_Vegas'):
return 2
elif dir_name.startswith('AOI_3_Paris'):
return 3
elif dir_name.startswith('AOI_4_Shanghai'):
return 4
elif dir_name.startswith('AOI_5_Khartoum'):
return 5
else:
raise RuntimeError("Unsupported city id is given.")
def _remove_interiors(line):
if "), (" in line:
line_prefix = line.split('), (')[0]
line_terminate = line.split('))",')[-1]
line = (
line_prefix +
'))",' +
line_terminate
)
return line
def __load_band_cut_th(band_fn, bandsz=3):
df = pd.read_csv(band_fn, index_col='area_id')
all_band_cut_th = {area_id: {} for area_id in range(2, 6)}
for area_id, row in df.iterrows():
for chan_i in range(bandsz):
all_band_cut_th[area_id][chan_i] = dict(
min=row['chan{}_min'.format(chan_i)],
max=row['chan{}_max'.format(chan_i)],
)
return all_band_cut_th
def _calc_fscore_per_aoi(area_id):
prefix = area_id_to_prefix(area_id)
truth_file = FMT_VALTESTTRUTH_PATH.format(prefix)
poly_file = FMT_VALTESTPOLY_PATH.format(prefix)
cmd = [
'java',
'-jar',
'/root/visualizer-2.0/visualizer.jar',
'-truth',
truth_file,
'-solution',
poly_file,
'-no-gui',
'-band-triplets',
'/root/visualizer-2.0/data/band-triplets.txt',
'-image-dir',
'pass',
]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = proc.communicate()
lines = [line for line in stdout_data.decode('utf8').split('\n')[-10:]]
"""
Overall F-score : 0.85029
AOI_2_Vegas:
TP : 27827
FP : 4999
FN : 4800
Precision: 0.847712
Recall : 0.852883
F-score : 0.85029
"""
if stdout_data.decode('utf8').strip().endswith("Overall F-score : 0"):
overall_fscore = 0
tp = 0
fp = 0
fn = 0
precision = 0
recall = 0
fscore = 0
elif len(lines) > 0 and lines[0].startswith("Overall F-score : "):
assert lines[0].startswith("Overall F-score : ")
assert lines[2].startswith("AOI_")
assert lines[3].strip().startswith("TP")
assert lines[4].strip().startswith("FP")
assert lines[5].strip().startswith("FN")
assert lines[6].strip().startswith("Precision")
assert lines[7].strip().startswith("Recall")
assert lines[8].strip().startswith("F-score")
overall_fscore = float(re.findall("([\d\.]+)", lines[0])[0])
tp = int(re.findall("(\d+)", lines[3])[0])
fp = int(re.findall("(\d+)", lines[4])[0])
fn = int(re.findall("(\d+)", lines[5])[0])
precision = float(re.findall("([\d\.]+)", lines[6])[0])
recall = float(re.findall("([\d\.]+)", lines[7])[0])
fscore = float(re.findall("([\d\.]+)", lines[8])[0])
else:
logger.warn("Unexpected data >>> " + stdout_data.decode('utf8'))
raise RuntimeError("Unsupported format")
return {
'overall_fscore': overall_fscore,
'tp': tp,
'fp': fp,
'fn': fn,
'precision': precision,
'recall': recall,
'fscore': fscore,
}
def prefix_to_area_id(prefix):
area_dict = {
'AOI_2_Vegas': 2,
'AOI_3_Paris': 3,
'AOI_4_Shanghai': 4,
'AOI_5_Khartoum': 5,
}
return area_dict[area_id]
def area_id_to_prefix(area_id):
area_dict = {
2: 'AOI_2_Vegas',
3: 'AOI_3_Paris',
4: 'AOI_4_Shanghai',
5: 'AOI_5_Khartoum',
}
return area_dict[area_id]
# ---------------------------------------------------------
# main
def _get_model_parameter(area_id):
prefix = area_id_to_prefix(area_id)
fn_hist = FMT_VALMODEL_EVALTHHIST.format(prefix)
best_row = pd.read_csv(fn_hist).sort_values(
by='fscore',
ascending=False,
).iloc[0]
param = dict(
fn_epoch=int(best_row['zero_base_epoch']),
min_poly_area=int(best_row['min_area_th']),
)
return param
def get_resized_raster_3chan_image(image_id, band_cut_th=None):
fn = train_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
values = skimage.transform.resize(values, (INPUT_SIZE, INPUT_SIZE))
return values
def get_resized_raster_3chan_image_test(image_id, band_cut_th=None):
fn = test_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
values = skimage.transform.resize(values, (INPUT_SIZE, INPUT_SIZE))
return values
def image_mask_resized_from_summary(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = skimage.transform.resize(im_mask, (INPUT_SIZE, INPUT_SIZE))
im_mask = (im_mask > 0.5).astype(np.uint8)
return im_mask
def train_test_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(
FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_TEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_cut_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_TRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_3chan_image_test(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def valtrain_test_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("valtrain_test_image_prep for {}".format(prefix))
df_train = pd.read_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_cut_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_VALTRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
fn = FMT_VALTEST_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def train_test_mul_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(
FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_TEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_rgb_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
band_mul_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH.format(prefix), bandsz=8)[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_TRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_8chan_image_test(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
def valtrain_test_mul_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("valtrain_test_image_prep for {}".format(prefix))
df_train = pd.read_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_rgb_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
band_mul_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH.format(prefix), bandsz=8)[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_VALTRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
def _load_train_summary_data(area_id):
prefix = area_id_to_prefix(area_id)
fn = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df = pd.read_csv(fn)
return df
def split_val_train_test(area_id):
prefix = area_id_to_prefix(area_id)
df = _load_train_summary_data(area_id)
df_agg = df.groupby('ImageId').agg('first')
image_id_list = df_agg.index.tolist()
np.random.shuffle(image_id_list)
sz_valtrain = int(len(image_id_list) * 0.7)
sz_valtest = len(image_id_list) - sz_valtrain
pd.DataFrame({'ImageId': image_id_list[:sz_valtrain]}).to_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index=False)
pd.DataFrame({'ImageId': image_id_list[sz_valtrain:]}).to_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index=False)
def train_image_id_to_mspec_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TRAIN_MSPEC_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def test_image_id_to_mspec_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TEST_MSPEC_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def train_image_id_to_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TRAIN_RGB_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def test_image_id_to_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TEST_RGB_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def image_id_to_prefix(image_id):
prefix = image_id.split('img')[0][:-1]
return prefix
def calc_multiband_cut_threshold(area_id):
rows = []
band_cut_th = __calc_multiband_cut_threshold(area_id)
prefix = area_id_to_prefix(area_id)
row = dict(prefix=area_id_to_prefix(area_id))
row['area_id'] = area_id
for chan_i in band_cut_th.keys():
row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max']
row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min']
rows.append(row)
pd.DataFrame(rows).to_csv(FMT_BANDCUT_TH_PATH.format(prefix), index=False)
def __calc_multiband_cut_threshold(area_id):
prefix = area_id_to_prefix(area_id)
band_values = {k: [] for k in range(3)}
band_cut_th = {k: dict(max=0, min=0) for k in range(3)}
image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
for i_chan in range(3):
band_values[i_chan] = np.concatenate(
band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(
band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(
band_values[i_chan], 2)
return band_cut_th
def calc_mul_multiband_cut_threshold(area_id):
rows = []
band_cut_th = __calc_mul_multiband_cut_threshold(area_id)
prefix = area_id_to_prefix(area_id)
row = dict(prefix=area_id_to_prefix(area_id))
row['area_id'] = area_id
for chan_i in band_cut_th.keys():
row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max']
row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min']
rows.append(row)
pd.DataFrame(rows).to_csv(
FMT_MUL_BANDCUT_TH_PATH.format(prefix),
index=False)
def __calc_mul_multiband_cut_threshold(area_id):
prefix = area_id_to_prefix(area_id)
band_values = {k: [] for k in range(8)}
band_cut_th = {k: dict(max=0, min=0) for k in range(8)}
image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(8):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(8):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
for i_chan in range(8):
band_values[i_chan] = np.concatenate(
band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(
band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(
band_values[i_chan], 2)
return band_cut_th
def get_unet():
conv_params = dict(activation='relu', border_mode='same')
merge_params = dict(mode='concat', concat_axis=1)
inputs = Input((8, 256, 256))
conv1 = Convolution2D(32, 3, 3, **conv_params)(inputs)
conv1 = Convolution2D(32, 3, 3, **conv_params)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(pool1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(pool2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(pool3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(pool4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(conv5)
up6 = merge_l([UpSampling2D(size=(2, 2))(conv5), conv4], **merge_params)
conv6 = Convolution2D(256, 3, 3, **conv_params)(up6)
conv6 = Convolution2D(256, 3, 3, **conv_params)(conv6)
up7 = merge_l([UpSampling2D(size=(2, 2))(conv6), conv3], **merge_params)
conv7 = Convolution2D(128, 3, 3, **conv_params)(up7)
conv7 = Convolution2D(128, 3, 3, **conv_params)(conv7)
up8 = merge_l([UpSampling2D(size=(2, 2))(conv7), conv2], **merge_params)
conv8 = Convolution2D(64, 3, 3, **conv_params)(up8)
conv8 = Convolution2D(64, 3, 3, **conv_params)(conv8)
up9 = merge_l([UpSampling2D(size=(2, 2))(conv8), conv1], **merge_params)
conv9 = Convolution2D(32, 3, 3, **conv_params)(up9)
conv9 = Convolution2D(32, 3, 3, **conv_params)(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
adam = Adam()
model = Model(input=inputs, output=conv10)
model.compile(optimizer=adam,
loss='binary_crossentropy',
metrics=['accuracy', jaccard_coef, jaccard_coef_int])
return model
def jaccard_coef(y_true, y_pred):
smooth = 1e-12
intersection = K.sum(y_true * y_pred, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def jaccard_coef_int(y_true, y_pred):
smooth = 1e-12
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred_pos, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def generate_test_batch(area_id,
batch_size=64,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_test = pd.read_csv(FMT_TEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_TEST_MUL_STORE.format(prefix)
image_id_list = df_test.ImageId.tolist()
if enable_tqdm:
pbar = tqdm.tqdm(total=len(image_id_list))
while 1:
total_sz = len(image_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im:
for i_batch in range(n_batch):
target_image_ids = image_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_image_ids) == 0:
continue
X_test = []
y_test = []
for image_id in target_image_ids:
im = np.array(f_im.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
mask = np.zeros((INPUT_SIZE, INPUT_SIZE)).astype(np.uint8)
y_test.append(mask)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_test = y_test.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_test = X_test - immean
if enable_tqdm:
pbar.update(y_test.shape[0])
yield (X_test, y_test)
if enable_tqdm:
pbar.close()
def get_resized_raster_8chan_image_test(image_id, band_rgb_th, band_mul_th):
"""
RGB + multispectral (total: 8 channels)
"""
im = []
fn = test_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_rgb_th[chan_i]['min']
max_val = band_rgb_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
fn = test_image_id_to_mspec_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
usechannels = [1, 2, 5, 6, 7]
for chan_i in usechannels:
min_val = band_mul_th[chan_i]['min']
max_val = band_mul_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
im = np.array(im) # (ch, w, h)
im = np.swapaxes(im, 0, 2) # -> (h, w, ch)
im = np.swapaxes(im, 0, 1) # -> (w, h, ch)
return im
def get_resized_raster_8chan_image(image_id, band_rgb_th, band_mul_th):
"""
RGB + multispectral (total: 8 channels)
"""
im = []
fn = train_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_rgb_th[chan_i]['min']
max_val = band_rgb_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
usechannels = [1, 2, 5, 6, 7]
for chan_i in usechannels:
min_val = band_mul_th[chan_i]['min']
max_val = band_mul_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
im = np.array(im) # (ch, w, h)
im = np.swapaxes(im, 0, 2) # -> (h, w, ch)
im = np.swapaxes(im, 0, 1) # -> (w, h, ch)
return im
def _get_train_mul_data(area_id):
"""
RGB + multispectral (total: 8 channels)
"""
prefix = area_id_to_prefix(area_id)
fn_train = FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_train = []
fn_im = FMT_TRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
X_train = np.array(X_train)
y_train = []
fn_mask = FMT_TRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_train.append(mask)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_train, y_train
def _get_test_mul_data(area_id):
"""
RGB + multispectral (total: 8 channels)
"""
prefix = area_id_to_prefix(area_id)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_test = []
fn_im = FMT_TEST_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
X_test = np.array(X_test)
return X_test
def _get_valtest_mul_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_val = []
fn_im = FMT_VALTEST_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def _get_valtrain_mul_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_val = []
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def get_mul_mean_image(area_id):
prefix = area_id_to_prefix(area_id)
with tb.open_file(FMT_MULMEAN.format(prefix), 'r') as f:
im_mean = np.array(f.get_node('/mulmean'))
return im_mean
def preproc_stage3(area_id):
prefix = area_id_to_prefix(area_id)
if not Path(FMT_VALTEST_MUL_STORE.format(prefix)).exists():
valtrain_test_mul_image_prep(area_id)
if not Path(FMT_TEST_MUL_STORE.format(prefix)).exists():
train_test_mul_image_prep(area_id)
# mean image for subtract preprocessing
X1, _ = _get_train_mul_data(area_id)
X2 = _get_test_mul_data(area_id)
X = np.vstack([X1, X2])
print(X.shape)
X_mean = X.mean(axis=0)
fn = FMT_MULMEAN.format(prefix)
logger.info("Prepare mean image: {}".format(fn))
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(X_mean.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'mulmean', atom, X_mean.shape,
filters=filters)
ds[:] = X_mean
def _internal_test_predict_best_param(area_id,
save_pred=True):
prefix = area_id_to_prefix(area_id)
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
min_th = param['min_poly_area']
# Prediction phase
logger.info("Prediction phase: {}".format(prefix))
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
fn = FMT_TESTPRED_PATH.format(prefix)
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet()
model.load_weights(fn_model)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_test_batch(
area_id,
batch_size=64,
immean=X_mean,
enable_tqdm=True,
),
val_samples=len(df_test),
)
del model
# Save prediction result
if save_pred:
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
return y_pred
def _internal_test(area_id, enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
y_pred = _internal_test_predict_best_param(area_id, save_pred=False)
param = _get_model_parameter(area_id)
min_th = param['min_poly_area']
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn_out = FMT_TESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_image_list = df_test.index.tolist()
for idx, image_id in tqdm.tqdm(enumerate(test_image_list),
total=len(test_image_list)):
df_poly = mask_to_poly(y_pred[idx][0], min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
def validate_score(area_id):
"""
Calc competition score
"""
prefix = area_id_to_prefix(area_id)
# Prediction phase
if not Path(FMT_VALTESTPRED_PATH.format(prefix)).exists():
X_val, y_val = _get_valtest_mul_data(area_id)
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
model = get_unet()
model.load_weights(FMT_VALMODEL_PATH.format(prefix))
y_pred = model.predict(X_val - X_mean, batch_size=8, verbose=1)
del model
# Save prediction result
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
# Postprocessing phase
if not Path(FMT_VALTESTPOLY_PATH.format(prefix)).exists():
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'r') as f:
y_pred = np.array(f.get_node('/pred'))
print(y_pred.shape)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
for idx, image_id in enumerate(df_test.index.tolist()):
df_poly = mask_to_poly(y_pred[idx][0])
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio))
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# update fn_out
with open(fn_out, 'r') as f:
lines = f.readlines()
with open(fn_out, 'w') as f:
f.write(lines[0])
for line in lines[1:]:
line = _remove_interiors(line)
f.write(line)
# Validation solution file
if not Path(FMT_VALTESTTRUTH_PATH.format(prefix)).exists():
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_"
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def validate_all_score():
header_line = []
lines = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
assert Path(FMT_VALTESTTRUTH_PATH.format(prefix)).exists()
with open(FMT_VALTESTTRUTH_PATH.format(prefix), 'r') as f:
header_line = f.readline()
lines += f.readlines()
with open(FMT_VALTESTTRUTH_OVALL_PATH, 'w') as f:
f.write(header_line)
for line in lines:
f.write(line)
# Predicted polygons
header_line = []
lines = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
assert Path(FMT_VALTESTPOLY_PATH.format(prefix)).exists()
with open(FMT_VALTESTPOLY_PATH.format(prefix), 'r') as f:
header_line = f.readline()
lines += f.readlines()
with open(FMT_VALTESTPOLY_OVALL_PATH, 'w') as f:
f.write(header_line)
for line in lines:
f.write(line)
def generate_valtest_batch(area_id,
batch_size=8,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTEST_MUL_STORE.format(prefix)
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
image_id_list = df_train.ImageId.tolist()
if enable_tqdm:
pbar = tqdm.tqdm(total=len(image_id_list))
while 1:
total_sz = len(image_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_image_ids = image_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_image_ids) == 0:
continue
X_train = []
y_train = []
for image_id in target_image_ids:
im = np.array(f_im.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
mask = np.array(f_mask.get_node('/' + image_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
if enable_tqdm:
pbar.update(y_train.shape[0])
yield (X_train, y_train)
if enable_tqdm:
pbar.close()
def generate_valtrain_batch(area_id, batch_size=8, immean=None):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
image_id_list = df_train.ImageId.tolist()
np.random.shuffle(image_id_list)
while 1:
total_sz = len(image_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_image_ids = image_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_image_ids) == 0:
continue
X_train = []
y_train = []
for image_id in target_image_ids:
im = np.array(f_im.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
mask = np.array(f_mask.get_node('/' + image_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
yield (X_train, y_train)
def _get_test_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_test = []
fn_im = FMT_TEST_IM_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
X_test = np.array(X_test)
return X_test
def _get_valtest_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_val = []
fn_im = FMT_VALTEST_IM_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def _get_valtrain_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_val = []
fn_im = FMT_VALTRAIN_IM_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def predict(area_id):
prefix = area_id_to_prefix(area_id)
X_test = _get_test_mul_data(area_id)
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
model = get_unet()
model.load_weights(FMT_VALMODEL_PATH.format(prefix))
y_pred = model.predict(X_test - X_mean, batch_size=8, verbose=1)
del model
# Save prediction result
fn = FMT_TESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
def _internal_validate_predict_best_param(area_id,
enable_tqdm=False):
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
y_pred = _internal_validate_predict(
area_id,
epoch=epoch,
save_pred=False,
enable_tqdm=enable_tqdm)
return y_pred
def _internal_validate_predict(area_id,
epoch=3,
save_pred=True,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet()
model.load_weights(fn_model)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_valtest_batch(
area_id,
batch_size=64,
immean=X_mean,
enable_tqdm=enable_tqdm,
),
val_samples=len(df_test),
)
del model
# Save prediction result
if save_pred:
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root,
'pred',
atom,
y_pred.shape,
filters=filters)
ds[:] = y_pred
return y_pred
def _internal_validate_fscore_wo_pred_file(area_id,
epoch=3,
min_th=MIN_POLYGON_AREA,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
# Prediction phase
logger.info("Prediction phase")
y_pred = _internal_validate_predict(
area_id,
epoch=epoch,
save_pred=False,
enable_tqdm=enable_tqdm)
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_VALTESTPRED_PATH.format(prefix)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_list = df_test.index.tolist()
iterator = enumerate(test_list)
for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)):
df_poly = mask_to_poly(y_pred[idx][0], min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# ------------------------
# Validation solution file
logger.info("Validation solution file")
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_"
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def _internal_validate_fscore(area_id,
epoch=3,
predict=True,
min_th=MIN_POLYGON_AREA,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
# Prediction phase
logger.info("Prediction phase")
if predict:
_internal_validate_predict(
area_id,
epoch=epoch,
enable_tqdm=enable_tqdm)
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_VALTESTPRED_PATH.format(prefix)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f,\
tb.open_file(fn, 'r') as fr:
y_pred = np.array(fr.get_node('/pred'))
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_list = df_test.index.tolist()
iterator = enumerate(test_list)
for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)):
df_poly = mask_to_poly(y_pred[idx][0], min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# ------------------------
# Validation solution file
logger.info("Validation solution file")
# if not Path(FMT_VALTESTTRUTH_PATH.format(prefix)).exists():
if True:
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_"
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
@click.group()
def cli():
pass
@cli.command()
@click.argument('datapath', type=str)
def validate(datapath):
area_id = directory_name_to_area_id(datapath)
prefix = area_id_to_prefix(area_id)
logger.info(">> validate sub-command: {}".format(prefix))
X_mean = get_mul_mean_image(area_id)
X_val, y_val = _get_valtest_mul_data(area_id)
X_val = X_val - X_mean
if not Path(MODEL_DIR).exists():
Path(MODEL_DIR).mkdir(parents=True)
logger.info("load valtrain")
X_trn, y_trn = _get_valtrain_mul_data(area_id)
X_trn = X_trn - X_mean
model = get_unet()
model_checkpoint = ModelCheckpoint(
FMT_VALMODEL_PATH.format(prefix + "_{epoch:02d}"),
monitor='val_jaccard_coef_int',
save_best_only=False)
model_earlystop = EarlyStopping(
monitor='val_jaccard_coef_int',
patience=10,
verbose=0,
mode='max')
model_history = History()
df_train = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix))
logger.info("Fit")
model.fit(
X_trn, y_trn,
nb_epoch=200,
shuffle=True,
verbose=1,
validation_data=(X_val, y_val),
callbacks=[model_checkpoint, model_earlystop, model_history])
model.save_weights(FMT_VALMODEL_LAST_PATH.format(prefix))
# Save evaluation history
pd.DataFrame(model_history.history).to_csv(
FMT_VALMODEL_HIST.format(prefix), index=False)
logger.info(">> validate sub-command: {} ... Done".format(prefix))
@cli.command()
@click.argument('datapath', type=str)
def testproc(datapath):
area_id = directory_name_to_area_id(datapath)
prefix = area_id_to_prefix(area_id)
logger.info(">>>> Test proc for {}".format(prefix))
_internal_test(area_id)
logger.info(">>>> Test proc for {} ... done".format(prefix))
@cli.command()
@click.argument('datapath', type=str)
def evalfscore(datapath):
area_id = directory_name_to_area_id(datapath)
prefix = area_id_to_prefix(area_id)
logger.info("Evaluate fscore on validation set: {}".format(prefix))
# for each epoch
# if not Path(FMT_VALMODEL_EVALHIST.format(prefix)).exists():
if True:
df_hist = pd.read_csv(FMT_VALMODEL_HIST.format(prefix))
df_hist.loc[:, 'epoch'] = list(range(1, len(df_hist) + 1))
rows = []
for zero_base_epoch in range(0, len(df_hist)):
logger.info(">>> Epoch: {}".format(zero_base_epoch))
_internal_validate_fscore_wo_pred_file(
area_id,
epoch=zero_base_epoch,
enable_tqdm=True,
min_th=MIN_POLYGON_AREA)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['zero_base_epoch'] = zero_base_epoch
evaluate_record['min_area_th'] = MIN_POLYGON_AREA
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
rows.append(evaluate_record)
pd.DataFrame(rows).to_csv(
FMT_VALMODEL_EVALHIST.format(prefix),
index=False)
# find best min-poly-threshold
df_evalhist = pd.read_csv(FMT_VALMODEL_EVALHIST.format(prefix))
best_row = df_evalhist.sort_values(by='fscore', ascending=False).iloc[0]
best_epoch = int(best_row.zero_base_epoch)
best_fscore = best_row.fscore
# optimize min area th
rows = []
for th in [30, 60, 90, 120, 150, 180, 210, 240]:
logger.info(">>> TH: {}".format(th))
predict_flag = False
if th == 30:
predict_flag = True
_internal_validate_fscore(
area_id,
epoch=best_epoch,
enable_tqdm=True,
min_th=th,
predict=predict_flag)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['zero_base_epoch'] = best_epoch
evaluate_record['min_area_th'] = th
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
rows.append(evaluate_record)
pd.DataFrame(rows).to_csv(
FMT_VALMODEL_EVALTHHIST.format(prefix),
index=False)
logger.info("Evaluate fscore on validation set: {} .. done".format(prefix))
def mask_to_poly(mask, min_polygon_area_th=MIN_POLYGON_AREA):
"""
Convert from 256x256 mask to polygons on 650x650 image
"""
mask = (skimage.transform.resize(mask, (650, 650)) > 0.5).astype(np.uint8)
shapes = rasterio.features.shapes(mask.astype(np.int16), mask > 0)
poly_list = []
mp = shapely.ops.cascaded_union(
shapely.geometry.MultiPolygon([
shapely.geometry.shape(shape)
for shape, value in shapes
]))
if isinstance(mp, shapely.geometry.Polygon):
df = pd.DataFrame({
'area_size': [mp.area],
'poly': [mp],
})
else:
df = pd.DataFrame({
'area_size': [p.area for p in mp],
'poly': [p for p in mp],
})
df = df[df.area_size > min_polygon_area_th].sort_values(
by='area_size', ascending=False)
df.loc[:, 'wkt'] = df.poly.apply(lambda x: shapely.wkt.dumps(
x, rounding_precision=0))
df.loc[:, 'bid'] = list(range(1, len(df) + 1))
df.loc[:, 'area_ratio'] = df.area_size / df.area_size.max()
return df
def postproc(area_id):
# Mask to poly
print(area_id)
prefix = area_id_to_prefix(area_id)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_TESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'r') as f:
y_pred = np.array(f.get_node('/pred'))
print(y_pred.shape)
fn_out = FMT_TESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
for idx, image_id in enumerate(df_test.index.tolist()):
df_poly = mask_to_poly(y_pred[idx][0])
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio))
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
def merge():
df_list = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
df_part = pd.read_csv(
FMT_TESTPOLY_PATH.format(prefix))
df_list.append(df_part)
df = pd.concat(df_list)
df.to_csv(FN_SOLUTION_CSV, index=False)
with open(FN_SOLUTION_CSV, 'r') as f:
lines = f.readlines()
with open(FN_SOLUTION_CSV, 'w') as f:
f.write(lines[0])
for line in lines[1:]:
line = _remove_interiors(line)
f.write(line)
if __name__ == '__main__':
cli()
| [
"logging.getLogger",
"numpy.clip",
"logging.StreamHandler",
"keras.backend.sum",
"pandas.read_csv",
"tables.Atom.from_dtype",
"math.floor",
"keras.callbacks.History",
"numpy.array",
"tables.Filters",
"pathlib.Path",
"click.group",
"subprocess.Popen",
"keras.backend.clip",
"json.dumps",
... | [((4066, 4110), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (4087, 4110), False, 'import warnings\n'), ((4121, 4136), 'logging.StreamHandler', 'StreamHandler', ([], {}), '()\n', (4134, 4136), False, 'from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler\n'), ((4314, 4336), 'logging.getLogger', 'getLogger', (['"""spacenet2"""'], {}), "('spacenet2')\n", (4323, 4336), False, 'from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler\n'), ((4486, 4512), 'numpy.random.seed', 'np.random.seed', (['(1145141919)'], {}), '(1145141919)\n', (4500, 4512), True, 'import numpy as np\n'), ((57801, 57814), 'click.group', 'click.group', ([], {}), '()\n', (57812, 57814), False, 'import click\n'), ((57853, 57889), 'click.argument', 'click.argument', (['"""datapath"""'], {'type': 'str'}), "('datapath', type=str)\n", (57867, 57889), False, 'import click\n'), ((59336, 59372), 'click.argument', 'click.argument', (['"""datapath"""'], {'type': 'str'}), "('datapath', type=str)\n", (59350, 59372), False, 'import click\n'), ((59655, 59691), 'click.argument', 'click.argument', (['"""datapath"""'], {'type': 'str'}), "('datapath', type=str)\n", (59669, 59691), False, 'import click\n'), ((4181, 4201), 'logging.Formatter', 'Formatter', (['LOGFORMAT'], {}), '(LOGFORMAT)\n', (4190, 4201), False, 'from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler\n'), ((4283, 4303), 'logging.Formatter', 'Formatter', (['LOGFORMAT'], {}), '(LOGFORMAT)\n', (4292, 4303), False, 'from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler\n'), ((5366, 5407), 'pandas.read_csv', 'pd.read_csv', (['band_fn'], {'index_col': '"""area_id"""'}), "(band_fn, index_col='area_id')\n", (5377, 5407), True, 'import pandas as pd\n'), ((6257, 6326), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (6273, 6326), False, 'import subprocess\n'), ((9423, 9448), 'numpy.swapaxes', 'np.swapaxes', (['values', '(0)', '(2)'], {}), '(values, 0, 2)\n', (9434, 9448), True, 'import numpy as np\n'), ((9462, 9487), 'numpy.swapaxes', 'np.swapaxes', (['values', '(0)', '(1)'], {}), '(values, 0, 1)\n', (9473, 9487), True, 'import numpy as np\n'), ((10065, 10090), 'numpy.swapaxes', 'np.swapaxes', (['values', '(0)', '(2)'], {}), '(values, 0, 2)\n', (10076, 10090), True, 'import numpy as np\n'), ((10104, 10129), 'numpy.swapaxes', 'np.swapaxes', (['values', '(0)', '(1)'], {}), '(values, 0, 1)\n', (10115, 10129), True, 'import numpy as np\n'), ((10287, 10307), 'numpy.zeros', 'np.zeros', (['(650, 650)'], {}), '((650, 650))\n', (10295, 10307), True, 'import numpy as np\n'), ((19568, 19583), 'pandas.read_csv', 'pd.read_csv', (['fn'], {}), '(fn)\n', (19579, 19583), True, 'import pandas as pd\n'), ((19812, 19844), 'numpy.random.shuffle', 'np.random.shuffle', (['image_id_list'], {}), '(image_id_list)\n', (19829, 19844), True, 'import numpy as np\n'), ((21933, 21963), 'tqdm.tqdm', 'tqdm.tqdm', (['image_id_list[:500]'], {}), '(image_id_list[:500])\n', (21942, 21963), False, 'import tqdm\n'), ((22518, 22548), 'tqdm.tqdm', 'tqdm.tqdm', (['image_id_list[:500]'], {}), '(image_id_list[:500])\n', (22527, 22548), False, 'import tqdm\n'), ((24164, 24194), 'tqdm.tqdm', 'tqdm.tqdm', (['image_id_list[:500]'], {}), '(image_id_list[:500])\n', (24173, 24194), False, 'import tqdm\n'), ((24755, 24785), 'tqdm.tqdm', 'tqdm.tqdm', (['image_id_list[:500]'], {}), '(image_id_list[:500])\n', (24764, 24785), False, 'import tqdm\n'), ((25686, 25706), 'keras.layers.Input', 'Input', (['(8, 256, 256)'], {}), '((8, 256, 256))\n', (25691, 25706), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((27347, 27353), 'keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (27351, 27353), False, 'from keras.optimizers import Adam\n'), ((27367, 27401), 'keras.models.Model', 'Model', ([], {'input': 'inputs', 'output': 'conv10'}), '(input=inputs, output=conv10)\n', (27372, 27401), False, 'from keras.models import Model\n'), ((27645, 27685), 'keras.backend.sum', 'K.sum', (['(y_true * y_pred)'], {'axis': '[0, -1, -2]'}), '(y_true * y_pred, axis=[0, -1, -2])\n', (27650, 27685), True, 'from keras import backend as K\n'), ((27697, 27737), 'keras.backend.sum', 'K.sum', (['(y_true + y_pred)'], {'axis': '[0, -1, -2]'}), '(y_true + y_pred, axis=[0, -1, -2])\n', (27702, 27737), True, 'from keras import backend as K\n'), ((27816, 27827), 'keras.backend.mean', 'K.mean', (['jac'], {}), '(jac)\n', (27822, 27827), True, 'from keras import backend as K\n'), ((27953, 27997), 'keras.backend.sum', 'K.sum', (['(y_true * y_pred_pos)'], {'axis': '[0, -1, -2]'}), '(y_true * y_pred_pos, axis=[0, -1, -2])\n', (27958, 27997), True, 'from keras import backend as K\n'), ((28009, 28053), 'keras.backend.sum', 'K.sum', (['(y_true + y_pred_pos)'], {'axis': '[0, -1, -2]'}), '(y_true + y_pred_pos, axis=[0, -1, -2])\n', (28014, 28053), True, 'from keras import backend as K\n'), ((28132, 28143), 'keras.backend.mean', 'K.mean', (['jac'], {}), '(jac)\n', (28138, 28143), True, 'from keras import backend as K\n'), ((31085, 31097), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (31093, 31097), True, 'import numpy as np\n'), ((31121, 31142), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(2)'], {}), '(im, 0, 2)\n', (31132, 31142), True, 'import numpy as np\n'), ((31169, 31190), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(1)'], {}), '(im, 0, 1)\n', (31180, 31190), True, 'import numpy as np\n'), ((32481, 32493), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (32489, 32493), True, 'import numpy as np\n'), ((32517, 32538), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(2)'], {}), '(im, 0, 2)\n', (32528, 32538), True, 'import numpy as np\n'), ((32565, 32586), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(1)'], {}), '(im, 0, 1)\n', (32576, 32586), True, 'import numpy as np\n'), ((32831, 32852), 'pandas.read_csv', 'pd.read_csv', (['fn_train'], {}), '(fn_train)\n', (32842, 32852), True, 'import pandas as pd\n'), ((33202, 33219), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (33210, 33219), True, 'import numpy as np\n'), ((33549, 33566), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (33557, 33566), True, 'import numpy as np\n'), ((33868, 33888), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {}), '(fn_test)\n', (33879, 33888), True, 'import pandas as pd\n'), ((34233, 34249), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (34241, 34249), True, 'import numpy as np\n'), ((34424, 34444), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {}), '(fn_test)\n', (34435, 34444), True, 'import pandas as pd\n'), ((34789, 34804), 'numpy.array', 'np.array', (['X_val'], {}), '(X_val)\n', (34797, 34804), True, 'import numpy as np\n'), ((35129, 35144), 'numpy.array', 'np.array', (['y_val'], {}), '(y_val)\n', (35137, 35144), True, 'import numpy as np\n'), ((35388, 35409), 'pandas.read_csv', 'pd.read_csv', (['fn_train'], {}), '(fn_train)\n', (35399, 35409), True, 'import pandas as pd\n'), ((35756, 35771), 'numpy.array', 'np.array', (['X_val'], {}), '(X_val)\n', (35764, 35771), True, 'import numpy as np\n'), ((36098, 36113), 'numpy.array', 'np.array', (['y_val'], {}), '(y_val)\n', (36106, 36113), True, 'import numpy as np\n'), ((36821, 36840), 'numpy.vstack', 'np.vstack', (['[X1, X2]'], {}), '([X1, X2])\n', (36830, 36840), True, 'import numpy as np\n'), ((37981, 38022), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {'index_col': '"""ImageId"""'}), "(fn_test, index_col='ImageId')\n", (37992, 38022), True, 'import pandas as pd\n'), ((39014, 39055), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {'index_col': '"""ImageId"""'}), "(fn_test, index_col='ImageId')\n", (39025, 39055), True, 'import pandas as pd\n'), ((46449, 46481), 'numpy.random.shuffle', 'np.random.shuffle', (['image_id_list'], {}), '(image_id_list)\n', (46466, 46481), True, 'import numpy as np\n'), ((47855, 47875), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {}), '(fn_test)\n', (47866, 47875), True, 'import pandas as pd\n'), ((48219, 48235), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (48227, 48235), True, 'import numpy as np\n'), ((48406, 48426), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {}), '(fn_test)\n', (48417, 48426), True, 'import pandas as pd\n'), ((48770, 48785), 'numpy.array', 'np.array', (['X_val'], {}), '(X_val)\n', (48778, 48785), True, 'import numpy as np\n'), ((49110, 49125), 'numpy.array', 'np.array', (['y_val'], {}), '(y_val)\n', (49118, 49125), True, 'import numpy as np\n'), ((49365, 49386), 'pandas.read_csv', 'pd.read_csv', (['fn_train'], {}), '(fn_train)\n', (49376, 49386), True, 'import pandas as pd\n'), ((49732, 49747), 'numpy.array', 'np.array', (['X_val'], {}), '(X_val)\n', (49740, 49747), True, 'import numpy as np\n'), ((50074, 50089), 'numpy.array', 'np.array', (['y_val'], {}), '(y_val)\n', (50082, 50089), True, 'import numpy as np\n'), ((51814, 51855), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {'index_col': '"""ImageId"""'}), "(fn_test, index_col='ImageId')\n", (51825, 51855), True, 'import pandas as pd\n'), ((53239, 53280), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {'index_col': '"""ImageId"""'}), "(fn_test, index_col='ImageId')\n", (53250, 53280), True, 'import pandas as pd\n'), ((54414, 54434), 'pandas.read_csv', 'pd.read_csv', (['fn_true'], {}), '(fn_true)\n', (54425, 54434), True, 'import pandas as pd\n'), ((54601, 54621), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {}), '(fn_test)\n', (54612, 54621), True, 'import pandas as pd\n'), ((55704, 55745), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {'index_col': '"""ImageId"""'}), "(fn_test, index_col='ImageId')\n", (55715, 55745), True, 'import pandas as pd\n'), ((58594, 58680), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_jaccard_coef_int"""', 'patience': '(10)', 'verbose': '(0)', 'mode': '"""max"""'}), "(monitor='val_jaccard_coef_int', patience=10, verbose=0, mode=\n 'max')\n", (58607, 58680), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, History\n'), ((58729, 58738), 'keras.callbacks.History', 'History', ([], {}), '()\n', (58736, 58738), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, History\n'), ((63315, 63356), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {'index_col': '"""ImageId"""'}), "(fn_test, index_col='ImageId')\n", (63326, 63356), True, 'import pandas as pd\n'), ((64435, 64453), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (64444, 64453), True, 'import pandas as pd\n'), ((1357, 1416), 'pathlib.Path', 'Path', (['"""summaryData/{prefix:s}_Train_Building_Solutions.csv"""'], {}), "('summaryData/{prefix:s}_Train_Building_Solutions.csv')\n", (1361, 1416), False, 'from pathlib import Path\n'), ((1507, 1561), 'pathlib.Path', 'Path', (['"""RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"""'], {}), "('RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif')\n", (1511, 1561), False, 'from pathlib import Path\n'), ((1657, 1711), 'pathlib.Path', 'Path', (['"""RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"""'], {}), "('RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif')\n", (1661, 1711), False, 'from pathlib import Path\n'), ((1804, 1858), 'pathlib.Path', 'Path', (['"""MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"""'], {}), "('MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif')\n", (1808, 1858), False, 'from pathlib import Path\n'), ((1956, 2010), 'pathlib.Path', 'Path', (['"""MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"""'], {}), "('MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif')\n", (1960, 2010), False, 'from pathlib import Path\n'), ((4707, 4721), 'pathlib.Path', 'Path', (['datapath'], {}), '(datapath)\n', (4711, 4721), False, 'from pathlib import Path\n'), ((11767, 11788), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (11779, 11788), True, 'import tables as tb\n'), ((12299, 12320), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (12311, 12320), True, 'import tables as tb\n'), ((12837, 12858), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (12849, 12858), True, 'import tables as tb\n'), ((13913, 13934), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (13925, 13934), True, 'import tables as tb\n'), ((14448, 14469), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (14460, 14469), True, 'import tables as tb\n'), ((14984, 15005), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (14996, 15005), True, 'import tables as tb\n'), ((15541, 15562), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (15553, 15562), True, 'import tables as tb\n'), ((16646, 16667), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (16658, 16667), True, 'import tables as tb\n'), ((17209, 17230), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (17221, 17230), True, 'import tables as tb\n'), ((18408, 18429), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (18420, 18429), True, 'import tables as tb\n'), ((18974, 18995), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (18986, 18995), True, 'import tables as tb\n'), ((23127, 23168), 'scipy.percentile', 'scipy.percentile', (['band_values[i_chan]', '(98)'], {}), '(band_values[i_chan], 98)\n', (23143, 23168), False, 'import scipy\n'), ((23219, 23259), 'scipy.percentile', 'scipy.percentile', (['band_values[i_chan]', '(2)'], {}), '(band_values[i_chan], 2)\n', (23235, 23259), False, 'import scipy\n'), ((25370, 25411), 'scipy.percentile', 'scipy.percentile', (['band_values[i_chan]', '(98)'], {}), '(band_values[i_chan], 98)\n', (25386, 25411), False, 'import scipy\n'), ((25462, 25502), 'scipy.percentile', 'scipy.percentile', (['band_values[i_chan]', '(2)'], {}), '(band_values[i_chan], 2)\n', (25478, 25502), False, 'import scipy\n'), ((25719, 25757), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {}), '(32, 3, 3, **conv_params)\n', (25732, 25757), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((25778, 25816), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {}), '(32, 3, 3, **conv_params)\n', (25791, 25816), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((25836, 25866), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (25848, 25866), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((25887, 25925), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {}), '(64, 3, 3, **conv_params)\n', (25900, 25925), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((25945, 25983), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {}), '(64, 3, 3, **conv_params)\n', (25958, 25983), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26003, 26033), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (26015, 26033), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26054, 26093), 'keras.layers.Convolution2D', 'Convolution2D', (['(128)', '(3)', '(3)'], {}), '(128, 3, 3, **conv_params)\n', (26067, 26093), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26113, 26152), 'keras.layers.Convolution2D', 'Convolution2D', (['(128)', '(3)', '(3)'], {}), '(128, 3, 3, **conv_params)\n', (26126, 26152), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26172, 26202), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (26184, 26202), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26223, 26262), 'keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3)', '(3)'], {}), '(256, 3, 3, **conv_params)\n', (26236, 26262), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26282, 26321), 'keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3)', '(3)'], {}), '(256, 3, 3, **conv_params)\n', (26295, 26321), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26341, 26371), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (26353, 26371), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26392, 26431), 'keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3)', '(3)'], {}), '(512, 3, 3, **conv_params)\n', (26405, 26431), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26451, 26490), 'keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3)', '(3)'], {}), '(512, 3, 3, **conv_params)\n', (26464, 26490), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26588, 26627), 'keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3)', '(3)'], {}), '(256, 3, 3, **conv_params)\n', (26601, 26627), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26645, 26684), 'keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3)', '(3)'], {}), '(256, 3, 3, **conv_params)\n', (26658, 26684), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26782, 26821), 'keras.layers.Convolution2D', 'Convolution2D', (['(128)', '(3)', '(3)'], {}), '(128, 3, 3, **conv_params)\n', (26795, 26821), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26839, 26878), 'keras.layers.Convolution2D', 'Convolution2D', (['(128)', '(3)', '(3)'], {}), '(128, 3, 3, **conv_params)\n', (26852, 26878), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26976, 27014), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {}), '(64, 3, 3, **conv_params)\n', (26989, 27014), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((27032, 27070), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {}), '(64, 3, 3, **conv_params)\n', (27045, 27070), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((27168, 27206), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {}), '(32, 3, 3, **conv_params)\n', (27181, 27206), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((27224, 27262), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {}), '(32, 3, 3, **conv_params)\n', (27237, 27262), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((27284, 27328), 'keras.layers.Convolution2D', 'Convolution2D', (['(1)', '(1)', '(1)'], {'activation': '"""sigmoid"""'}), "(1, 1, 1, activation='sigmoid')\n", (27297, 27328), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((27912, 27932), 'keras.backend.clip', 'K.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (27918, 27932), True, 'from keras import backend as K\n'), ((32927, 32951), 'tables.open_file', 'tb.open_file', (['fn_im', '"""r"""'], {}), "(fn_im, 'r')\n", (32939, 32951), True, 'import tables as tb\n'), ((33297, 33323), 'tables.open_file', 'tb.open_file', (['fn_mask', '"""r"""'], {}), "(fn_mask, 'r')\n", (33309, 33323), True, 'import tables as tb\n'), ((33961, 33985), 'tables.open_file', 'tb.open_file', (['fn_im', '"""r"""'], {}), "(fn_im, 'r')\n", (33973, 33985), True, 'import tables as tb\n'), ((34519, 34543), 'tables.open_file', 'tb.open_file', (['fn_im', '"""r"""'], {}), "(fn_im, 'r')\n", (34531, 34543), True, 'import tables as tb\n'), ((34882, 34908), 'tables.open_file', 'tb.open_file', (['fn_mask', '"""r"""'], {}), "(fn_mask, 'r')\n", (34894, 34908), True, 'import tables as tb\n'), ((35485, 35509), 'tables.open_file', 'tb.open_file', (['fn_im', '"""r"""'], {}), "(fn_im, 'r')\n", (35497, 35509), True, 'import tables as tb\n'), ((35850, 35876), 'tables.open_file', 'tb.open_file', (['fn_mask', '"""r"""'], {}), "(fn_mask, 'r')\n", (35862, 35876), True, 'import tables as tb\n'), ((36987, 37008), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (36999, 37008), True, 'import tables as tb\n'), ((37030, 37062), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['X_mean.dtype'], {}), '(X_mean.dtype)\n', (37048, 37062), True, 'import tables as tb\n'), ((37081, 37121), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (37091, 37121), True, 'import tables as tb\n'), ((41104, 41145), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {'index_col': '"""ImageId"""'}), "(fn_test, index_col='ImageId')\n", (41115, 41145), True, 'import pandas as pd\n'), ((42540, 42560), 'pandas.read_csv', 'pd.read_csv', (['fn_true'], {}), '(fn_true)\n', (42551, 42560), True, 'import pandas as pd\n'), ((42742, 42762), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {}), '(fn_test)\n', (42753, 42762), True, 'import pandas as pd\n'), ((47947, 47971), 'tables.open_file', 'tb.open_file', (['fn_im', '"""r"""'], {}), "(fn_im, 'r')\n", (47959, 47971), True, 'import tables as tb\n'), ((48500, 48524), 'tables.open_file', 'tb.open_file', (['fn_im', '"""r"""'], {}), "(fn_im, 'r')\n", (48512, 48524), True, 'import tables as tb\n'), ((48863, 48889), 'tables.open_file', 'tb.open_file', (['fn_mask', '"""r"""'], {}), "(fn_mask, 'r')\n", (48875, 48889), True, 'import tables as tb\n'), ((49461, 49485), 'tables.open_file', 'tb.open_file', (['fn_im', '"""r"""'], {}), "(fn_im, 'r')\n", (49473, 49485), True, 'import tables as tb\n'), ((49826, 49852), 'tables.open_file', 'tb.open_file', (['fn_mask', '"""r"""'], {}), "(fn_mask, 'r')\n", (49838, 49852), True, 'import tables as tb\n'), ((50631, 50652), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (50643, 50652), True, 'import tables as tb\n'), ((50674, 50706), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['y_pred.dtype'], {}), '(y_pred.dtype)\n', (50692, 50706), True, 'import tables as tb\n'), ((50725, 50765), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (50735, 50765), True, 'import tables as tb\n'), ((55887, 55908), 'tables.open_file', 'tb.open_file', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (55899, 55908), True, 'import tables as tb\n'), ((57058, 57078), 'pandas.read_csv', 'pd.read_csv', (['fn_true'], {}), '(fn_true)\n', (57069, 57078), True, 'import pandas as pd\n'), ((57260, 57280), 'pandas.read_csv', 'pd.read_csv', (['fn_test'], {}), '(fn_test)\n', (57271, 57280), True, 'import pandas as pd\n'), ((62585, 62637), 'pandas.DataFrame', 'pd.DataFrame', (["{'area_size': [mp.area], 'poly': [mp]}"], {}), "({'area_size': [mp.area], 'poly': [mp]})\n", (62597, 62637), True, 'import pandas as pd\n'), ((62696, 62770), 'pandas.DataFrame', 'pd.DataFrame', (["{'area_size': [p.area for p in mp], 'poly': [p for p in mp]}"], {}), "({'area_size': [p.area for p in mp], 'poly': [p for p in mp]})\n", (62708, 62770), True, 'import pandas as pd\n'), ((63408, 63429), 'tables.open_file', 'tb.open_file', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (63420, 63429), True, 'import tables as tb\n'), ((1304, 1318), 'pathlib.Path', 'Path', (['BASE_DIR'], {}), '(BASE_DIR)\n', (1308, 1318), False, 'from pathlib import Path\n'), ((1325, 1350), 'pathlib.Path', 'Path', (['"""{prefix:s}_Train/"""'], {}), "('{prefix:s}_Train/')\n", (1329, 1350), False, 'from pathlib import Path\n'), ((1454, 1468), 'pathlib.Path', 'Path', (['BASE_DIR'], {}), '(BASE_DIR)\n', (1458, 1468), False, 'from pathlib import Path\n'), ((1475, 1500), 'pathlib.Path', 'Path', (['"""{prefix:s}_Train/"""'], {}), "('{prefix:s}_Train/')\n", (1479, 1500), False, 'from pathlib import Path\n'), ((1598, 1612), 'pathlib.Path', 'Path', (['BASE_DIR'], {}), '(BASE_DIR)\n', (1602, 1612), False, 'from pathlib import Path\n'), ((1619, 1650), 'pathlib.Path', 'Path', (['"""{prefix:s}_Test_public/"""'], {}), "('{prefix:s}_Test_public/')\n", (1623, 1650), False, 'from pathlib import Path\n'), ((1751, 1765), 'pathlib.Path', 'Path', (['BASE_DIR'], {}), '(BASE_DIR)\n', (1755, 1765), False, 'from pathlib import Path\n'), ((1772, 1797), 'pathlib.Path', 'Path', (['"""{prefix:s}_Train/"""'], {}), "('{prefix:s}_Train/')\n", (1776, 1797), False, 'from pathlib import Path\n'), ((1897, 1911), 'pathlib.Path', 'Path', (['BASE_DIR'], {}), '(BASE_DIR)\n', (1901, 1911), False, 'from pathlib import Path\n'), ((1918, 1949), 'pathlib.Path', 'Path', (['"""{prefix:s}_Test_public/"""'], {}), "('{prefix:s}_Test_public/')\n", (1922, 1949), False, 'from pathlib import Path\n'), ((9290, 9331), 'numpy.clip', 'np.clip', (['values[chan_i]', 'min_val', 'max_val'], {}), '(values[chan_i], min_val, max_val)\n', (9297, 9331), True, 'import numpy as np\n'), ((9932, 9973), 'numpy.clip', 'np.clip', (['values[chan_i]', 'min_val', 'max_val'], {}), '(values[chan_i], min_val, max_val)\n', (9939, 9973), True, 'import numpy as np\n'), ((11957, 11985), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['im.dtype'], {}), '(im.dtype)\n', (11975, 11985), True, 'import tables as tb\n'), ((12008, 12048), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (12018, 12048), True, 'import tables as tb\n'), ((12492, 12520), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['im.dtype'], {}), '(im.dtype)\n', (12510, 12520), True, 'import tables as tb\n'), ((12543, 12583), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (12553, 12583), True, 'import tables as tb\n'), ((13032, 13065), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['im_mask.dtype'], {}), '(im_mask.dtype)\n', (13050, 13065), True, 'import tables as tb\n'), ((13088, 13128), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (13098, 13128), True, 'import tables as tb\n'), ((14103, 14131), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['im.dtype'], {}), '(im.dtype)\n', (14121, 14131), True, 'import tables as tb\n'), ((14154, 14194), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (14164, 14194), True, 'import tables as tb\n'), ((14636, 14664), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['im.dtype'], {}), '(im.dtype)\n', (14654, 14664), True, 'import tables as tb\n'), ((14687, 14727), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (14697, 14727), True, 'import tables as tb\n'), ((15179, 15212), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['im_mask.dtype'], {}), '(im_mask.dtype)\n', (15197, 15212), True, 'import tables as tb\n'), ((15235, 15275), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (15245, 15275), True, 'import tables as tb\n'), ((15734, 15767), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['im_mask.dtype'], {}), '(im_mask.dtype)\n', (15752, 15767), True, 'import tables as tb\n'), ((15790, 15830), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (15800, 15830), True, 'import tables as tb\n'), ((16866, 16894), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['im.dtype'], {}), '(im.dtype)\n', (16884, 16894), True, 'import tables as tb\n'), ((16917, 16957), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (16927, 16957), True, 'import tables as tb\n'), ((17432, 17460), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['im.dtype'], {}), '(im.dtype)\n', (17450, 17460), True, 'import tables as tb\n'), ((17483, 17523), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (17493, 17523), True, 'import tables as tb\n'), ((18628, 18656), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['im.dtype'], {}), '(im.dtype)\n', (18646, 18656), True, 'import tables as tb\n'), ((18679, 18719), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (18689, 18719), True, 'import tables as tb\n'), ((19192, 19220), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['im.dtype'], {}), '(im.dtype)\n', (19210, 19220), True, 'import tables as tb\n'), ((19243, 19283), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (19253, 19283), True, 'import tables as tb\n'), ((19948, 20002), 'pandas.DataFrame', 'pd.DataFrame', (["{'ImageId': image_id_list[:sz_valtrain]}"], {}), "({'ImageId': image_id_list[:sz_valtrain]})\n", (19960, 20002), True, 'import pandas as pd\n'), ((20095, 20149), 'pandas.DataFrame', 'pd.DataFrame', (["{'ImageId': image_id_list[sz_valtrain:]}"], {}), "({'ImageId': image_id_list[sz_valtrain:]})\n", (20107, 20149), True, 'import pandas as pd\n'), ((21537, 21555), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (21549, 21555), True, 'import pandas as pd\n'), ((23743, 23761), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (23755, 23761), True, 'import pandas as pd\n'), ((28702, 28726), 'tables.open_file', 'tb.open_file', (['fn_im', '"""r"""'], {}), "(fn_im, 'r')\n", (28714, 28726), True, 'import tables as tb\n'), ((30258, 30299), 'numpy.clip', 'np.clip', (['values[chan_i]', 'min_val', 'max_val'], {}), '(values[chan_i], min_val, max_val)\n', (30265, 30299), True, 'import numpy as np\n'), ((30832, 30873), 'numpy.clip', 'np.clip', (['values[chan_i]', 'min_val', 'max_val'], {}), '(values[chan_i], min_val, max_val)\n', (30839, 30873), True, 'import numpy as np\n'), ((31653, 31694), 'numpy.clip', 'np.clip', (['values[chan_i]', 'min_val', 'max_val'], {}), '(values[chan_i], min_val, max_val)\n', (31660, 31694), True, 'import numpy as np\n'), ((32228, 32269), 'numpy.clip', 'np.clip', (['values[chan_i]', 'min_val', 'max_val'], {}), '(values[chan_i], min_val, max_val)\n', (32235, 32269), True, 'import numpy as np\n'), ((33096, 33117), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(2)'], {}), '(im, 0, 2)\n', (33107, 33117), True, 'import numpy as np\n'), ((33135, 33156), 'numpy.swapaxes', 'np.swapaxes', (['im', '(1)', '(2)'], {}), '(im, 1, 2)\n', (33146, 33156), True, 'import numpy as np\n'), ((34129, 34150), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(2)'], {}), '(im, 0, 2)\n', (34140, 34150), True, 'import numpy as np\n'), ((34168, 34189), 'numpy.swapaxes', 'np.swapaxes', (['im', '(1)', '(2)'], {}), '(im, 1, 2)\n', (34179, 34189), True, 'import numpy as np\n'), ((34687, 34708), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(2)'], {}), '(im, 0, 2)\n', (34698, 34708), True, 'import numpy as np\n'), ((34726, 34747), 'numpy.swapaxes', 'np.swapaxes', (['im', '(1)', '(2)'], {}), '(im, 1, 2)\n', (34737, 34747), True, 'import numpy as np\n'), ((35654, 35675), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(2)'], {}), '(im, 0, 2)\n', (35665, 35675), True, 'import numpy as np\n'), ((35693, 35714), 'numpy.swapaxes', 'np.swapaxes', (['im', '(1)', '(2)'], {}), '(im, 1, 2)\n', (35704, 35714), True, 'import numpy as np\n'), ((38322, 38343), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (38334, 38343), True, 'import tables as tb\n'), ((38369, 38401), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['y_pred.dtype'], {}), '(y_pred.dtype)\n', (38387, 38401), True, 'import tables as tb\n'), ((38424, 38464), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (38434, 38464), True, 'import tables as tb\n'), ((40639, 40660), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (40651, 40660), True, 'import tables as tb\n'), ((40686, 40718), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['y_pred.dtype'], {}), '(y_pred.dtype)\n', (40704, 40718), True, 'import tables as tb\n'), ((40741, 40781), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (40751, 40781), True, 'import tables as tb\n'), ((41208, 41229), 'tables.open_file', 'tb.open_file', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (41220, 41229), True, 'import tables as tb\n'), ((44874, 44898), 'tables.open_file', 'tb.open_file', (['fn_im', '"""r"""'], {}), "(fn_im, 'r')\n", (44886, 44898), True, 'import tables as tb\n'), ((44925, 44951), 'tables.open_file', 'tb.open_file', (['fn_mask', '"""r"""'], {}), "(fn_mask, 'r')\n", (44937, 44951), True, 'import tables as tb\n'), ((46608, 46632), 'tables.open_file', 'tb.open_file', (['fn_im', '"""r"""'], {}), "(fn_im, 'r')\n", (46620, 46632), True, 'import tables as tb\n'), ((46659, 46685), 'tables.open_file', 'tb.open_file', (['fn_mask', '"""r"""'], {}), "(fn_mask, 'r')\n", (46671, 46685), True, 'import tables as tb\n'), ((48115, 48136), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(2)'], {}), '(im, 0, 2)\n', (48126, 48136), True, 'import numpy as np\n'), ((48154, 48175), 'numpy.swapaxes', 'np.swapaxes', (['im', '(1)', '(2)'], {}), '(im, 1, 2)\n', (48165, 48175), True, 'import numpy as np\n'), ((48668, 48689), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(2)'], {}), '(im, 0, 2)\n', (48679, 48689), True, 'import numpy as np\n'), ((48707, 48728), 'numpy.swapaxes', 'np.swapaxes', (['im', '(1)', '(2)'], {}), '(im, 1, 2)\n', (48718, 48728), True, 'import numpy as np\n'), ((49630, 49651), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(2)'], {}), '(im, 0, 2)\n', (49641, 49651), True, 'import numpy as np\n'), ((49669, 49690), 'numpy.swapaxes', 'np.swapaxes', (['im', '(1)', '(2)'], {}), '(im, 1, 2)\n', (49680, 49690), True, 'import numpy as np\n'), ((52214, 52235), 'tables.open_file', 'tb.open_file', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (52226, 52235), True, 'import tables as tb\n'), ((52261, 52293), 'tables.Atom.from_dtype', 'tb.Atom.from_dtype', (['y_pred.dtype'], {}), '(y_pred.dtype)\n', (52279, 52293), True, 'import tables as tb\n'), ((52316, 52356), 'tables.Filters', 'tb.Filters', ([], {'complib': '"""blosc"""', 'complevel': '(9)'}), "(complib='blosc', complevel=9)\n", (52326, 52356), True, 'import tables as tb\n'), ((59148, 59183), 'pandas.DataFrame', 'pd.DataFrame', (['model_history.history'], {}), '(model_history.history)\n', (59160, 59183), True, 'import pandas as pd\n'), ((61867, 61885), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (61879, 61885), True, 'import pandas as pd\n'), ((22234, 22274), 'numpy.array', 'np.array', (['[v for v in values_ if v != 0]'], {}), '([v for v in values_ if v != 0])\n', (22242, 22274), True, 'import numpy as np\n'), ((22819, 22859), 'numpy.array', 'np.array', (['[v for v in values_ if v != 0]'], {}), '([v for v in values_ if v != 0])\n', (22827, 22859), True, 'import numpy as np\n'), ((23033, 23068), 'numpy.concatenate', 'np.concatenate', (['band_values[i_chan]'], {}), '(band_values[i_chan])\n', (23047, 23068), True, 'import numpy as np\n'), ((24471, 24511), 'numpy.array', 'np.array', (['[v for v in values_ if v != 0]'], {}), '([v for v in values_ if v != 0])\n', (24479, 24511), True, 'import numpy as np\n'), ((25062, 25102), 'numpy.array', 'np.array', (['[v for v in values_ if v != 0]'], {}), '([v for v in values_ if v != 0])\n', (25070, 25102), True, 'import numpy as np\n'), ((25276, 25311), 'numpy.concatenate', 'np.concatenate', (['band_values[i_chan]'], {}), '(band_values[i_chan])\n', (25290, 25311), True, 'import numpy as np\n'), ((26518, 26543), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (26530, 26543), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26712, 26737), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (26724, 26737), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((26906, 26931), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (26918, 26931), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((27098, 27123), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (27110, 27123), False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout, Activation, BatchNormalization\n'), ((28650, 28683), 'math.floor', 'math.floor', (['(total_sz / batch_size)'], {}), '(total_sz / batch_size)\n', (28660, 28683), False, 'import math\n'), ((29434, 29450), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (29442, 29450), True, 'import numpy as np\n'), ((29476, 29492), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (29484, 29492), True, 'import numpy as np\n'), ((44822, 44855), 'math.floor', 'math.floor', (['(total_sz / batch_size)'], {}), '(total_sz / batch_size)\n', (44832, 44855), False, 'import math\n'), ((45711, 45728), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (45719, 45728), True, 'import numpy as np\n'), ((45755, 45772), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (45763, 45772), True, 'import numpy as np\n'), ((46556, 46589), 'math.floor', 'math.floor', (['(total_sz / batch_size)'], {}), '(total_sz / batch_size)\n', (46566, 46589), False, 'import math\n'), ((47444, 47461), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (47452, 47461), True, 'import numpy as np\n'), ((47488, 47505), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (47496, 47505), True, 'import numpy as np\n'), ((58197, 58212), 'pathlib.Path', 'Path', (['MODEL_DIR'], {}), '(MODEL_DIR)\n', (58201, 58212), False, 'from pathlib import Path\n'), ((58231, 58246), 'pathlib.Path', 'Path', (['MODEL_DIR'], {}), '(MODEL_DIR)\n', (58235, 58246), False, 'from pathlib import Path\n'), ((60798, 60816), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (60810, 60816), True, 'import pandas as pd\n'), ((61786, 61823), 'json.dumps', 'json.dumps', (['evaluate_record'], {'indent': '(4)'}), '(evaluate_record, indent=4)\n', (61796, 61823), False, 'import json\n'), ((7383, 7418), 're.findall', 're.findall', (['"""([\\\\d\\\\.]+)"""', 'lines[0]'], {}), "('([\\\\d\\\\.]+)', lines[0])\n", (7393, 7418), False, 'import re\n'), ((7438, 7468), 're.findall', 're.findall', (['"""(\\\\d+)"""', 'lines[3]'], {}), "('(\\\\d+)', lines[3])\n", (7448, 7468), False, 'import re\n'), ((7489, 7519), 're.findall', 're.findall', (['"""(\\\\d+)"""', 'lines[4]'], {}), "('(\\\\d+)', lines[4])\n", (7499, 7519), False, 'import re\n'), ((7540, 7570), 're.findall', 're.findall', (['"""(\\\\d+)"""', 'lines[5]'], {}), "('(\\\\d+)', lines[5])\n", (7550, 7570), False, 'import re\n'), ((7600, 7635), 're.findall', 're.findall', (['"""([\\\\d\\\\.]+)"""', 'lines[6]'], {}), "('([\\\\d\\\\.]+)', lines[6])\n", (7610, 7635), False, 'import re\n'), ((7661, 7696), 're.findall', 're.findall', (['"""([\\\\d\\\\.]+)"""', 'lines[7]'], {}), "('([\\\\d\\\\.]+)', lines[7])\n", (7671, 7696), False, 'import re\n'), ((7722, 7757), 're.findall', 're.findall', (['"""([\\\\d\\\\.]+)"""', 'lines[8]'], {}), "('([\\\\d\\\\.]+)', lines[8])\n", (7732, 7757), False, 'import re\n'), ((8701, 8721), 'pandas.read_csv', 'pd.read_csv', (['fn_hist'], {}), '(fn_hist)\n', (8712, 8721), True, 'import pandas as pd\n'), ((29182, 29203), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(2)'], {}), '(im, 0, 2)\n', (29193, 29203), True, 'import numpy as np\n'), ((29229, 29250), 'numpy.swapaxes', 'np.swapaxes', (['im', '(1)', '(2)'], {}), '(im, 1, 2)\n', (29240, 29250), True, 'import numpy as np\n'), ((45411, 45432), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(2)'], {}), '(im, 0, 2)\n', (45422, 45432), True, 'import numpy as np\n'), ((45458, 45479), 'numpy.swapaxes', 'np.swapaxes', (['im', '(1)', '(2)'], {}), '(im, 1, 2)\n', (45469, 45479), True, 'import numpy as np\n'), ((47145, 47166), 'numpy.swapaxes', 'np.swapaxes', (['im', '(0)', '(2)'], {}), '(im, 0, 2)\n', (47156, 47166), True, 'import numpy as np\n'), ((47192, 47213), 'numpy.swapaxes', 'np.swapaxes', (['im', '(1)', '(2)'], {}), '(im, 1, 2)\n', (47203, 47213), True, 'import numpy as np\n'), ((60709, 60746), 'json.dumps', 'json.dumps', (['evaluate_record'], {'indent': '(4)'}), '(evaluate_record, indent=4)\n', (60719, 60746), False, 'import json\n'), ((29317, 29351), 'numpy.zeros', 'np.zeros', (['(INPUT_SIZE, INPUT_SIZE)'], {}), '((INPUT_SIZE, INPUT_SIZE))\n', (29325, 29351), True, 'import numpy as np\n')] |
from random import randint
import numpy as np
try:
import tensorflow as tf
except ImportError:
tf = None
# ToDo: we are using a lot of tf.keras.backend modules below, can we use tf core instead?
class MaskingDense(tf.keras.layers.Layer):
""" Just copied code from keras Dense layer and added masking and a few other tricks:
- Direct auto-regressive connections to output
- Allows a second (non-autoregressive) input that is fully connected to first hidden
- Either 1 output or 2 outputs (concatenated) that are separately
auto-regressive wrt to the input
"""
def __init__(self, units, out_units,
hidden_layers=1,
dropout_rate=0.0,
random_input_order=False,
activation='elu',
out_activation='linear',
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
out_kernel_initializer='glorot_uniform',
out_bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
name=None,
batchnorm=False,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(MaskingDense, self).__init__(name=name, **kwargs)
self.input_sel = None
self.random_input_order = random_input_order
self.rate = min(1., max(0., dropout_rate))
self.kernel_sels = []
self.units = units
self.out_units = out_units
self.hidden_layers = hidden_layers
self.activation = tf.keras.activations.get(activation)
self.out_activation = tf.keras.activations.get(out_activation) # None gives linear activation
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.out_kernel_initializer = tf.keras.initializers.get(out_kernel_initializer)
self.out_bias_initializer = tf.keras.initializers.get(out_bias_initializer)
self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self.activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self.kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self.bias_constraint = tf.keras.constraints.get(bias_constraint)
self.batchnorm = batchnorm
def dropout_wrapper(self, inputs, training):
if 0. < self.rate < 1.:
def dropped_inputs():
return tf.keras.backend.dropout(inputs, self.rate, noise_shape=None, seed=None)
return tf.keras.backend.in_train_phase(dropped_inputs, inputs, training=training)
return inputs
def build_layer_weights(
self,
input_dim,
units,
use_bias=True,
is_output=False,
id=''
):
kernel_initializer = (self.kernel_initializer if not is_output
else self.out_kernel_initializer)
bias_initializer = (self.bias_initializer if not is_output
else self.out_bias_initializer)
kernel = self.add_weight(shape=(input_dim, units),
initializer=kernel_initializer,
name='kernel' + id,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if use_bias:
bias = self.add_weight(shape=(units,),
initializer=bias_initializer,
name='bias' + id,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
bias = None
return kernel, bias
def build_mask(self, shape, prev_sel, is_output):
if is_output:
if shape[-1] == len(self.input_sel):
input_sel = self.input_sel
else:
input_sel = self.input_sel * 2
else:
# Disallow D-1 because it would violate auto-regressive property
# Disallow unconnected units by sampling min from previous layer
input_sel = [randint(np.min(prev_sel), shape[-1] - 2) for i in range(shape[-1])]
def vals():
in_len = len(self.input_sel)
for x in range(shape[-2]):
for y in range(shape[-1]):
if is_output:
yield 1 if prev_sel[x] < input_sel[y % in_len] else 0
else:
yield 1 if prev_sel[x] <= input_sel[y] else 0
return tf.keras.backend.constant(list(vals()), dtype='float32', shape=shape), input_sel
def build(self, input_shape):
if isinstance(input_shape, list):
if len(input_shape) != 2:
raise ValueError('Only list only supported for exactly two inputs')
input_shape, other_input_shape = input_shape
# Build weights for other (non-autoregressive) vector
other_shape = (other_input_shape[-1], self.units)
self.other_kernel, self.other_bias = self.build_layer_weights(*other_shape, id='_h')
assert len(input_shape) >= 2
assert self.out_units == input_shape[-1] or self.out_units == 2 * input_shape[-1]
self.kernels, self.biases = [], []
self.kernel_masks, self.kernel_sels = [], []
self.batch_norms = []
shape = (input_shape[-1], self.units)
self.input_sel = np.arange(input_shape[-1])
if self.random_input_order:
np.random.shuffle(self.input_sel)
prev_sel = self.input_sel
for i in range(self.hidden_layers):
# Hidden layer
kernel, bias = self.build_layer_weights(*shape, id=str(i))
self.kernels.append(kernel)
self.biases.append(bias)
# Hidden layer mask
kernel_mask, kernel_sel = self.build_mask(shape, prev_sel, is_output=False)
self.kernel_masks.append(kernel_mask)
self.kernel_sels.append(kernel_sel)
prev_sel = kernel_sel
shape = (self.units, self.units)
self.batch_norms.append(tf.keras.layers.BatchNormalization(center=True, scale=True))
# Direct connection between input/output
if self.hidden_layers > 0:
direct_shape = (input_shape[-1], self.out_units)
self.direct_kernel, _ = self.build_layer_weights(
*direct_shape,
use_bias=False,
is_output=True,
id='_direct')
self.direct_kernel_mask, self.direct_sel = self.build_mask(direct_shape, self.input_sel,
is_output=True)
# Output layer
out_shape = (self.units, self.out_units)
self.out_kernel, self.out_bias = self.build_layer_weights(
*out_shape,
is_output=True,
id='_out')
self.out_kernel_mask, self.out_sel = self.build_mask(out_shape, prev_sel, is_output=True)
self.built = True
def call(self, inputs, training=None):
other_input = None
if isinstance(inputs, list):
assert len(inputs) == 2
assert self.hidden_layers > 0, "other input not supported if no hidden layers"
assert hasattr(self, 'other_kernel')
inputs, other_input = inputs
output = inputs
if other_input is not None:
other = tf.keras.backend.dot(other_input, self.other_kernel)
other = tf.keras.backend.bias_add(other, self.other_bias)
other = self.activation(other)
# Hidden layer + mask
for i in range(self.hidden_layers):
# i=0: input_dim -> masking_dim
# i>0: masking_dim -> masking_dim
weight = self.kernels[i] * self.kernel_masks[i]
output = tf.keras.backend.dot(output, weight)
# "other" input
if i == 0 and other_input is not None:
output = output + other
output = tf.keras.backend.bias_add(output, self.biases[i])
output = self.activation(output)
if self.batchnorm:
output = self.batch_norms[i](output)
output = self.dropout_wrapper(output, training)
# out_act(bias + (V dot M_v)h(x) + (A dot M_a)x + (other dot M_other)other)
# masking_dim -> input_dim
output = tf.keras.backend.dot(output, self.out_kernel * self.out_kernel_mask)
# Direct connection
if self.hidden_layers > 0:
# input_dim -> input_dim
direct = tf.keras.backend.dot(inputs, self.direct_kernel * self.direct_kernel_mask)
output = output + direct
output = tf.keras.backend.bias_add(output, self.out_bias)
output = self.out_activation(output)
output = self.dropout_wrapper(output, training)
return output
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
return (input_shape[0], self.out_units)
| [
"tensorflow.keras.constraints.get",
"tensorflow.keras.backend.dropout",
"tensorflow.keras.activations.get",
"tensorflow.keras.backend.in_train_phase",
"tensorflow.keras.backend.bias_add",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.initializers.get",
"tensorflow.keras.regularizers.... | [((1831, 1867), 'tensorflow.keras.activations.get', 'tf.keras.activations.get', (['activation'], {}), '(activation)\n', (1855, 1867), True, 'import tensorflow as tf\n'), ((1898, 1938), 'tensorflow.keras.activations.get', 'tf.keras.activations.get', (['out_activation'], {}), '(out_activation)\n', (1922, 1938), True, 'import tensorflow as tf\n'), ((2005, 2050), 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['kernel_initializer'], {}), '(kernel_initializer)\n', (2030, 2050), True, 'import tensorflow as tf\n'), ((2083, 2126), 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['bias_initializer'], {}), '(bias_initializer)\n', (2108, 2126), True, 'import tensorflow as tf\n'), ((2165, 2214), 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['out_kernel_initializer'], {}), '(out_kernel_initializer)\n', (2190, 2214), True, 'import tensorflow as tf\n'), ((2251, 2298), 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['out_bias_initializer'], {}), '(out_bias_initializer)\n', (2276, 2298), True, 'import tensorflow as tf\n'), ((2333, 2378), 'tensorflow.keras.regularizers.get', 'tf.keras.regularizers.get', (['kernel_regularizer'], {}), '(kernel_regularizer)\n', (2358, 2378), True, 'import tensorflow as tf\n'), ((2411, 2454), 'tensorflow.keras.regularizers.get', 'tf.keras.regularizers.get', (['bias_regularizer'], {}), '(bias_regularizer)\n', (2436, 2454), True, 'import tensorflow as tf\n'), ((2491, 2538), 'tensorflow.keras.regularizers.get', 'tf.keras.regularizers.get', (['activity_regularizer'], {}), '(activity_regularizer)\n', (2516, 2538), True, 'import tensorflow as tf\n'), ((2572, 2615), 'tensorflow.keras.constraints.get', 'tf.keras.constraints.get', (['kernel_constraint'], {}), '(kernel_constraint)\n', (2596, 2615), True, 'import tensorflow as tf\n'), ((2647, 2688), 'tensorflow.keras.constraints.get', 'tf.keras.constraints.get', (['bias_constraint'], {}), '(bias_constraint)\n', (2671, 2688), True, 'import tensorflow as tf\n'), ((5951, 5977), 'numpy.arange', 'np.arange', (['input_shape[-1]'], {}), '(input_shape[-1])\n', (5960, 5977), True, 'import numpy as np\n'), ((8944, 9012), 'tensorflow.keras.backend.dot', 'tf.keras.backend.dot', (['output', '(self.out_kernel * self.out_kernel_mask)'], {}), '(output, self.out_kernel * self.out_kernel_mask)\n', (8964, 9012), True, 'import tensorflow as tf\n'), ((9265, 9313), 'tensorflow.keras.backend.bias_add', 'tf.keras.backend.bias_add', (['output', 'self.out_bias'], {}), '(output, self.out_bias)\n', (9290, 9313), True, 'import tensorflow as tf\n'), ((2955, 3029), 'tensorflow.keras.backend.in_train_phase', 'tf.keras.backend.in_train_phase', (['dropped_inputs', 'inputs'], {'training': 'training'}), '(dropped_inputs, inputs, training=training)\n', (2986, 3029), True, 'import tensorflow as tf\n'), ((6026, 6059), 'numpy.random.shuffle', 'np.random.shuffle', (['self.input_sel'], {}), '(self.input_sel)\n', (6043, 6059), True, 'import numpy as np\n'), ((7977, 8029), 'tensorflow.keras.backend.dot', 'tf.keras.backend.dot', (['other_input', 'self.other_kernel'], {}), '(other_input, self.other_kernel)\n', (7997, 8029), True, 'import tensorflow as tf\n'), ((8050, 8099), 'tensorflow.keras.backend.bias_add', 'tf.keras.backend.bias_add', (['other', 'self.other_bias'], {}), '(other, self.other_bias)\n', (8075, 8099), True, 'import tensorflow as tf\n'), ((8389, 8425), 'tensorflow.keras.backend.dot', 'tf.keras.backend.dot', (['output', 'weight'], {}), '(output, weight)\n', (8409, 8425), True, 'import tensorflow as tf\n'), ((8568, 8617), 'tensorflow.keras.backend.bias_add', 'tf.keras.backend.bias_add', (['output', 'self.biases[i]'], {}), '(output, self.biases[i])\n', (8593, 8617), True, 'import tensorflow as tf\n'), ((9135, 9209), 'tensorflow.keras.backend.dot', 'tf.keras.backend.dot', (['inputs', '(self.direct_kernel * self.direct_kernel_mask)'], {}), '(inputs, self.direct_kernel * self.direct_kernel_mask)\n', (9155, 9209), True, 'import tensorflow as tf\n'), ((2863, 2935), 'tensorflow.keras.backend.dropout', 'tf.keras.backend.dropout', (['inputs', 'self.rate'], {'noise_shape': 'None', 'seed': 'None'}), '(inputs, self.rate, noise_shape=None, seed=None)\n', (2887, 2935), True, 'import tensorflow as tf\n'), ((6648, 6707), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'center': '(True)', 'scale': '(True)'}), '(center=True, scale=True)\n', (6682, 6707), True, 'import tensorflow as tf\n'), ((4633, 4649), 'numpy.min', 'np.min', (['prev_sel'], {}), '(prev_sel)\n', (4639, 4649), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
from argparse import ArgumentParser
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import to_hex
def main(args):
cmap = plt.get_cmap(args.cmap)
for x in np.linspace(0, 1, num=args.n_colors):
print(to_hex(cmap(x), keep_alpha=False))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
'cmap',
help='name of a Matplotlib color map (see '
'https://matplotlib.org/stable/tutorials/colors/colormaps.html'
'for a list of options)')
parser.add_argument(
'n_colors', type=int,
help='number of colors to sample from the color map')
main(parser.parse_args())
| [
"numpy.linspace",
"argparse.ArgumentParser",
"matplotlib.pyplot.get_cmap"
] | [((178, 201), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['args.cmap'], {}), '(args.cmap)\n', (190, 201), True, 'import matplotlib.pyplot as plt\n'), ((215, 251), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': 'args.n_colors'}), '(0, 1, num=args.n_colors)\n', (226, 251), True, 'import numpy as np\n'), ((344, 360), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (358, 360), False, 'from argparse import ArgumentParser\n')] |
import pandas as pd
from matplotlib import pyplot
from sklearn.externals import joblib
import numpy as np
import datetime
import pickle
import argparse
def string_to_timestamp(string):
date_time_obj = datetime.datetime.strptime(string, '%Y-%m-%d %H:%M:%S')
timestamp = date_time_obj.timestamp()
return timestamp
class data_prep():
def __init__(self, path):
self.df = pd.read_csv(path, sep='\s+', header=None)
def resampler(self,time = "5min"):
dataframe = self.df.set_index([0])
dataframe.index = pd.to_datetime(dataframe.index,unit = "s")
resample = dataframe.resample(time)
self.resampled_data = resample.sum()
def get_resampled_with_timestamp(self):
self.final_data = self.resampled_data.reset_index()
def get_final_data(self):
self.resampler()
self.get_resampled_with_timestamp()
return self.final_data
class to_serialised_on_off():
def __init__(self, data):
self.data = data
self.min_ = 0
self.avg_ = 0
self.max_ = 0
self.find_min_max(flag = 1)
def append_to_df(self, val):
self.data.append(val)
self.find_min_max()
def find_min_max(self, flag = 0):
if(flag == 0):
val = self.data.loc[-1][-1]
if( val > self.max_ ):
self.max_ = val
elif( val < self.min_ ):
self.min_ = val
self.avg_ += val/self.data.shape[0]
elif(flag):
self.max_ = max(self.data.loc[:][1])
self.min_ = min(self.data.loc[:][1])
self.avg_ = np.mean(self.data.loc[:][1].values)
self.calc_thresh()
def calc_thresh(self):
self.thresh = (self.max_ - self.min_)/self.avg_
def on_off(self, target_path):
of = []
for i in range(self.data.shape[0]):
if(self.data.loc[i][1] > self.thresh):
of.append(str(self.data.loc[i][0]))
else:
of.append(0)
np.save(target_path, np.array(of))
def __main__():
parser = argparse.ArgumentParser(description="Start and End Times")
parser.add_argument('--num_preds', type='int')
parser.add_argument('--data_path')
parser.add_argument('--target_path')
args = parser.parse_args()
prep = data_prep(args.data_path)
data = prep.get_final_data()
obj = to_serialised_on_off(data[:args.num_preds])
obj.on_off(args.target_path) | [
"numpy.mean",
"argparse.ArgumentParser",
"pandas.read_csv",
"datetime.datetime.strptime",
"numpy.array",
"pandas.to_datetime"
] | [((206, 261), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['string', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(string, '%Y-%m-%d %H:%M:%S')\n", (232, 261), False, 'import datetime\n'), ((1988, 2046), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Start and End Times"""'}), "(description='Start and End Times')\n", (2011, 2046), False, 'import argparse\n'), ((391, 433), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '"""\\\\s+"""', 'header': 'None'}), "(path, sep='\\\\s+', header=None)\n", (402, 433), True, 'import pandas as pd\n'), ((538, 579), 'pandas.to_datetime', 'pd.to_datetime', (['dataframe.index'], {'unit': '"""s"""'}), "(dataframe.index, unit='s')\n", (552, 579), True, 'import pandas as pd\n'), ((1937, 1949), 'numpy.array', 'np.array', (['of'], {}), '(of)\n', (1945, 1949), True, 'import numpy as np\n'), ((1544, 1579), 'numpy.mean', 'np.mean', (['self.data.loc[:][1].values'], {}), '(self.data.loc[:][1].values)\n', (1551, 1579), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.