code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
# coding: utf-8
'import the libiary and function'
import os
import csv
import re
import pandas as pd
import cv2
import numpy as np
import tensorflow as tf
import math
import matplotlib.pyplot as plt
import sklearn
from keras.utils import normalize
from sklearn.model_selection import train_test_split
from keras.layers import Input, MaxPooling2D, Conv2D, Cropping2D
from keras.models import Sequential
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.core import Dense, Activation, Flatten, Dropout
from sklearn.utils import shuffle
'Get the name of the samples to run'
paths = []
filenames = os.listdir('SimulationData/')
for filename in filenames:
paths.append('SimulationData/'+filename+'/')
paths.pop(paths.index('SimulationData/Track_2/')) # Not include in training
paths.pop(paths.index('SimulationData/Smooth_Corner/')) # Not include in training
samples = []
for path in paths:
with open(path+'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader) # skip the headers
for line in reader:
img_path = path+'IMG/'+line[0].split('/')[-1]
if os.path.exists(img_path):
line[0] = img_path
else: line[0] = path+'IMG/'+line[0].split('\\')[-1]
img_path = path+'IMG/'+line[1].split('/')[-1]
if os.path.exists(img_path):
line[1] = img_path
else: line[1] = path+'IMG/'+line[1].split('\\')[-1]
img_path = path+'IMG/'+line[2].split('/')[-1]
if os.path.exists(img_path):
line[2] = img_path
else: line[2] = path+'IMG/'+line[2].split('\\')[-1]
samples.append(line)
'''
samples_dataframe = pd.DataFrame(samples)
samples_dataframe.columns = ['center','left','right','steering','throttle','brake','speed']
samples_dataframe[:3]
'''
'Normalized the data'
def norm_center(imgs):
imgs = normalize(imgs, axis=3)
imgs -= 0.5
return imgs
'''
means = images.mean(axis=(1,2), dtype='float64')
stds = images.std(axis=(1,2), dtype='float64')
print('Data Before Normalizing & Center')
print('Means: %s' % means[:3])
print('stds: %s' % stds[:3])
print('Mins: %s \nMaxs: %s' % (images.min(axis=(1,2))[:3], images.max(axis=(1,2))[:3]))
images = norm_center(images)
means = images.mean(axis=(1,2), dtype='float64')
print('\n\nData After Normalizing & Center')
print('Means: %s' % means[:3])
print('Mins: %s \nMaxs: %s' % (images.min(axis=(1,2))[:3], images.max(axis=(1,2))[:3]))
'''
'Image Augmentation'
def image_aug(imgs, angles):
image_flipped = np.fliplr(imgs)
angles_flipped = -angles
return image_flipped, angles_flipped
'Create Training & Validation set'
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
print('len of train_samples is', len(train_samples))
print('len of validation_samples is', len(validation_samples))
angles = np.array([]) #For angle distribution study
def generator(samples, batch_size=32):
global angles
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images_center = []
images_center_aug = []
images_left = []
images_left_aug = []
images_right = []
images_right_aug = []
angles_center = []
angles_center_aug = []
angles_left = []
angles_left_aug = []
angles_right = []
angles_right_aug = []
X_train = []
y_train = []
correction = 0.12 # this is a parameter to tune
for batch_sample in batch_samples:
# Read images in the batch samples
image_center = plt.imread(batch_sample[0])
if image_center.shape != (160, 320, 3):
image_center = cv2.resize(image_center, (160, 320, 3), interpolation = cv2.INTER_AREA)
image_center = cv2.cvtColor(image_center, cv2.COLOR_RGB2YUV)
image_left = plt.imread(batch_sample[1])
if image_left.shape != (160, 320, 3):
image_left = cv2.resize(image_left, (160, 320, 3), interpolation = cv2.INTER_AREA)
image_left = cv2.cvtColor(image_left, cv2.COLOR_RGB2YUV)
image_right = plt.imread(batch_sample[2])
if image_right.shape != (160, 320, 3):
image_right = cv2.resize(image_right, (160, 320, 3), interpolation = cv2.INTER_AREA)
image_right = cv2.cvtColor(image_right, cv2.COLOR_RGB2YUV)
# Read steering angles
angle_center = float(batch_sample[3])
angle_left = angle_center + correction
angle_right = angle_center - correction
images_center.append(image_center)
images_left.append(image_left)
images_right.append(image_right)
angles_center.append(angle_center)
angles_left.append(angle_left)
angles_right.append(angle_right)
# Augment the images & steering angles
image_center_aug, angle_center_aug = image_aug(image_center, angle_center)
images_center_aug.append(image_center_aug)
angles_center_aug.append(angle_center_aug)
image_left_aug, angle_left_aug = image_aug(image_left, angle_left)
images_left_aug.append(image_left_aug)
angles_left_aug.append(angle_left_aug)
image_right_aug, angle_right_aug = image_aug(image_right, angle_right)
images_right_aug.append(image_right_aug)
angles_right_aug.append(angle_right_aug)
# Normalized the Data
X_train = np.vstack((images_center, images_left, images_right, images_center_aug, images_left_aug, images_right_aug))
X_train = norm_center(X_train)
angles_center = np.array(angles_center)
angles_left = np.array(angles_left)
angles_right = np.array(angles_right)
angles_center_aug = np.array(angles_center_aug)
angles_left_aug = np.array(angles_left_aug)
angles_right_aug = np.array(angles_right_aug)
y_train = np.hstack((angles_center, angles_left, angles_right, angles_center_aug, angles_left_aug, angles_right_aug))
angles = np.hstack((angles, y_train))
yield sklearn.utils.shuffle(X_train, y_train)
# Set the parameter
batch_size=32
dropout_ratio = 0.5
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
col, row, ch = 160, 320, 3 # Trimmed image format
input_shape=(col, row, ch)
'Model Architecture'
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
# Nvidia model
model.add(Cropping2D(input_shape=input_shape,
cropping=((70,25),(0,0))))
model.add(Conv2D(filters=24,
kernel_size=5,
strides=(2, 2),
activation='relu',
border_mode='valid'))
model.add(Dropout(p=dropout_ratio))
model.add(Conv2D(filters=36,
kernel_size=5,
strides=(2, 2),
activation='relu',
border_mode='valid'))
model.add(Dropout(p=dropout_ratio))
model.add(Conv2D(filters=48,
kernel_size=5,
strides=(2, 2),
activation='relu',
border_mode='valid'))
model.add(Dropout(p=dropout_ratio))
model.add(Conv2D(filters=64,
kernel_size=3,
strides=(1, 1),
activation='relu',
border_mode='valid'))
model.add(Dropout(p=dropout_ratio))
model.add(Conv2D(filters=64,
kernel_size=3,
strides=(1, 1),
activation='relu',
border_mode='valid'))
model.add(Dropout(p=dropout_ratio))
model.add(Flatten())
model.add(Dense(output_dim=1164,
activation='relu'))
model.add(Dense(output_dim=100,
activation='relu'))
model.add(Dense(output_dim=50,
activation='relu'))
model.add(Dense(output_dim=1))
model.summary()
# Create Models output folder
if os.path.exists("Models_Log/"):
print('Model will be saved to /Models_Log')
else:
print('Models_Log will be created.')
os.makedirs("Models_Log/")
my_callbacks = [
EarlyStopping(min_delta=0.0001, patience=3),
ModelCheckpoint(filepath='Models_Log/model.{epoch:02d}-{val_loss:.4f}.h5'),
]
model.compile(loss='mse', optimizer='adam')
model.fit_generator(generator=train_generator,
samples_per_epoch=batch_size,
nb_epoch=5,
validation_data=validation_generator,
nb_val_samples=batch_size,
callbacks=my_callbacks,
verbose=1)
model.save('model.h5')
print('Data feed to training is')
print(pd.DataFrame(paths))
print('Model will be saved as model.h5')
'Angles Distribution'
num_bins = 20
bins = (np.arange(num_bins+2)-(num_bins+1)/2)/10
x_label = (np.arange(num_bins+1)-num_bins/2)/10
num_samples_bin, _, _ = plt.hist(angles, bins=bins , rwidth=0.5)
print(pd.DataFrame((x_label,num_samples_bin)))
| [
"csv.reader",
"keras.layers.Cropping2D",
"sklearn.model_selection.train_test_split",
"numpy.arange",
"keras.layers.core.Flatten",
"matplotlib.pyplot.imread",
"pandas.DataFrame",
"cv2.cvtColor",
"os.path.exists",
"keras.layers.core.Dropout",
"cv2.resize",
"keras.layers.core.Dense",
"keras.cal... | [((645, 674), 'os.listdir', 'os.listdir', (['"""SimulationData/"""'], {}), "('SimulationData/')\n", (655, 674), False, 'import os\n'), ((2826, 2866), 'sklearn.model_selection.train_test_split', 'train_test_split', (['samples'], {'test_size': '(0.2)'}), '(samples, test_size=0.2)\n', (2842, 2866), False, 'from sklearn.model_selection import train_test_split\n'), ((2994, 3006), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3002, 3006), True, 'import numpy as np\n'), ((7393, 7405), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7403, 7405), False, 'from keras.models import Sequential\n'), ((8954, 8983), 'os.path.exists', 'os.path.exists', (['"""Models_Log/"""'], {}), "('Models_Log/')\n", (8968, 8983), False, 'import os\n'), ((9905, 9944), 'matplotlib.pyplot.hist', 'plt.hist', (['angles'], {'bins': 'bins', 'rwidth': '(0.5)'}), '(angles, bins=bins, rwidth=0.5)\n', (9913, 9944), True, 'import matplotlib.pyplot as plt\n'), ((2000, 2023), 'keras.utils.normalize', 'normalize', (['imgs'], {'axis': '(3)'}), '(imgs, axis=3)\n', (2009, 2023), False, 'from keras.utils import normalize\n'), ((2666, 2681), 'numpy.fliplr', 'np.fliplr', (['imgs'], {}), '(imgs)\n', (2675, 2681), True, 'import numpy as np\n'), ((7512, 7576), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'input_shape': 'input_shape', 'cropping': '((70, 25), (0, 0))'}), '(input_shape=input_shape, cropping=((70, 25), (0, 0)))\n', (7522, 7576), False, 'from keras.layers import Input, MaxPooling2D, Conv2D, Cropping2D\n'), ((7608, 7701), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(24)', 'kernel_size': '(5)', 'strides': '(2, 2)', 'activation': '"""relu"""', 'border_mode': '"""valid"""'}), "(filters=24, kernel_size=5, strides=(2, 2), activation='relu',\n border_mode='valid')\n", (7614, 7701), False, 'from keras.layers import Input, MaxPooling2D, Conv2D, Cropping2D\n'), ((7780, 7804), 'keras.layers.core.Dropout', 'Dropout', ([], {'p': 'dropout_ratio'}), '(p=dropout_ratio)\n', (7787, 7804), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((7817, 7910), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(36)', 'kernel_size': '(5)', 'strides': '(2, 2)', 'activation': '"""relu"""', 'border_mode': '"""valid"""'}), "(filters=36, kernel_size=5, strides=(2, 2), activation='relu',\n border_mode='valid')\n", (7823, 7910), False, 'from keras.layers import Input, MaxPooling2D, Conv2D, Cropping2D\n'), ((7989, 8013), 'keras.layers.core.Dropout', 'Dropout', ([], {'p': 'dropout_ratio'}), '(p=dropout_ratio)\n', (7996, 8013), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((8026, 8119), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(48)', 'kernel_size': '(5)', 'strides': '(2, 2)', 'activation': '"""relu"""', 'border_mode': '"""valid"""'}), "(filters=48, kernel_size=5, strides=(2, 2), activation='relu',\n border_mode='valid')\n", (8032, 8119), False, 'from keras.layers import Input, MaxPooling2D, Conv2D, Cropping2D\n'), ((8198, 8222), 'keras.layers.core.Dropout', 'Dropout', ([], {'p': 'dropout_ratio'}), '(p=dropout_ratio)\n', (8205, 8222), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((8235, 8328), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'strides': '(1, 1)', 'activation': '"""relu"""', 'border_mode': '"""valid"""'}), "(filters=64, kernel_size=3, strides=(1, 1), activation='relu',\n border_mode='valid')\n", (8241, 8328), False, 'from keras.layers import Input, MaxPooling2D, Conv2D, Cropping2D\n'), ((8407, 8431), 'keras.layers.core.Dropout', 'Dropout', ([], {'p': 'dropout_ratio'}), '(p=dropout_ratio)\n', (8414, 8431), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((8444, 8537), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'strides': '(1, 1)', 'activation': '"""relu"""', 'border_mode': '"""valid"""'}), "(filters=64, kernel_size=3, strides=(1, 1), activation='relu',\n border_mode='valid')\n", (8450, 8537), False, 'from keras.layers import Input, MaxPooling2D, Conv2D, Cropping2D\n'), ((8616, 8640), 'keras.layers.core.Dropout', 'Dropout', ([], {'p': 'dropout_ratio'}), '(p=dropout_ratio)\n', (8623, 8640), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((8653, 8662), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (8660, 8662), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((8675, 8716), 'keras.layers.core.Dense', 'Dense', ([], {'output_dim': '(1164)', 'activation': '"""relu"""'}), "(output_dim=1164, activation='relu')\n", (8680, 8716), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((8745, 8785), 'keras.layers.core.Dense', 'Dense', ([], {'output_dim': '(100)', 'activation': '"""relu"""'}), "(output_dim=100, activation='relu')\n", (8750, 8785), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((8814, 8853), 'keras.layers.core.Dense', 'Dense', ([], {'output_dim': '(50)', 'activation': '"""relu"""'}), "(output_dim=50, activation='relu')\n", (8819, 8853), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((8882, 8901), 'keras.layers.core.Dense', 'Dense', ([], {'output_dim': '(1)'}), '(output_dim=1)\n', (8887, 8901), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((9085, 9111), 'os.makedirs', 'os.makedirs', (['"""Models_Log/"""'], {}), "('Models_Log/')\n", (9096, 9111), False, 'import os\n'), ((9133, 9176), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'min_delta': '(0.0001)', 'patience': '(3)'}), '(min_delta=0.0001, patience=3)\n', (9146, 9176), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((9182, 9256), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""Models_Log/model.{epoch:02d}-{val_loss:.4f}.h5"""'}), "(filepath='Models_Log/model.{epoch:02d}-{val_loss:.4f}.h5')\n", (9197, 9256), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((9685, 9704), 'pandas.DataFrame', 'pd.DataFrame', (['paths'], {}), '(paths)\n', (9697, 9704), True, 'import pandas as pd\n'), ((9953, 9993), 'pandas.DataFrame', 'pd.DataFrame', (['(x_label, num_samples_bin)'], {}), '((x_label, num_samples_bin))\n', (9965, 9993), True, 'import pandas as pd\n'), ((1013, 1032), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (1023, 1032), False, 'import csv\n'), ((3201, 3217), 'sklearn.utils.shuffle', 'shuffle', (['samples'], {}), '(samples)\n', (3208, 3217), False, 'from sklearn.utils import shuffle\n'), ((9792, 9815), 'numpy.arange', 'np.arange', (['(num_bins + 2)'], {}), '(num_bins + 2)\n', (9801, 9815), True, 'import numpy as np\n'), ((9844, 9867), 'numpy.arange', 'np.arange', (['(num_bins + 1)'], {}), '(num_bins + 1)\n', (9853, 9867), True, 'import numpy as np\n'), ((1175, 1199), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (1189, 1199), False, 'import os\n'), ((1394, 1418), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (1408, 1418), False, 'import os\n'), ((1605, 1629), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (1619, 1629), False, 'import os\n'), ((6246, 6357), 'numpy.vstack', 'np.vstack', (['(images_center, images_left, images_right, images_center_aug,\n images_left_aug, images_right_aug)'], {}), '((images_center, images_left, images_right, images_center_aug,\n images_left_aug, images_right_aug))\n', (6255, 6357), True, 'import numpy as np\n'), ((6438, 6461), 'numpy.array', 'np.array', (['angles_center'], {}), '(angles_center)\n', (6446, 6461), True, 'import numpy as np\n'), ((6488, 6509), 'numpy.array', 'np.array', (['angles_left'], {}), '(angles_left)\n', (6496, 6509), True, 'import numpy as np\n'), ((6537, 6559), 'numpy.array', 'np.array', (['angles_right'], {}), '(angles_right)\n', (6545, 6559), True, 'import numpy as np\n'), ((6605, 6632), 'numpy.array', 'np.array', (['angles_center_aug'], {}), '(angles_center_aug)\n', (6613, 6632), True, 'import numpy as np\n'), ((6663, 6688), 'numpy.array', 'np.array', (['angles_left_aug'], {}), '(angles_left_aug)\n', (6671, 6688), True, 'import numpy as np\n'), ((6720, 6746), 'numpy.array', 'np.array', (['angles_right_aug'], {}), '(angles_right_aug)\n', (6728, 6746), True, 'import numpy as np\n'), ((6782, 6893), 'numpy.hstack', 'np.hstack', (['(angles_center, angles_left, angles_right, angles_center_aug,\n angles_left_aug, angles_right_aug)'], {}), '((angles_center, angles_left, angles_right, angles_center_aug,\n angles_left_aug, angles_right_aug))\n', (6791, 6893), True, 'import numpy as np\n'), ((6925, 6953), 'numpy.hstack', 'np.hstack', (['(angles, y_train)'], {}), '((angles, y_train))\n', (6934, 6953), True, 'import numpy as np\n'), ((3974, 4001), 'matplotlib.pyplot.imread', 'plt.imread', (['batch_sample[0]'], {}), '(batch_sample[0])\n', (3984, 4001), True, 'import matplotlib.pyplot as plt\n'), ((4196, 4241), 'cv2.cvtColor', 'cv2.cvtColor', (['image_center', 'cv2.COLOR_RGB2YUV'], {}), '(image_center, cv2.COLOR_RGB2YUV)\n', (4208, 4241), False, 'import cv2\n'), ((4305, 4332), 'matplotlib.pyplot.imread', 'plt.imread', (['batch_sample[1]'], {}), '(batch_sample[1])\n', (4315, 4332), True, 'import matplotlib.pyplot as plt\n'), ((4519, 4562), 'cv2.cvtColor', 'cv2.cvtColor', (['image_left', 'cv2.COLOR_RGB2YUV'], {}), '(image_left, cv2.COLOR_RGB2YUV)\n', (4531, 4562), False, 'import cv2\n'), ((4627, 4654), 'matplotlib.pyplot.imread', 'plt.imread', (['batch_sample[2]'], {}), '(batch_sample[2])\n', (4637, 4654), True, 'import matplotlib.pyplot as plt\n'), ((4845, 4889), 'cv2.cvtColor', 'cv2.cvtColor', (['image_right', 'cv2.COLOR_RGB2YUV'], {}), '(image_right, cv2.COLOR_RGB2YUV)\n', (4857, 4889), False, 'import cv2\n'), ((6985, 7024), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (7006, 7024), False, 'import sklearn\n'), ((4093, 4162), 'cv2.resize', 'cv2.resize', (['image_center', '(160, 320, 3)'], {'interpolation': 'cv2.INTER_AREA'}), '(image_center, (160, 320, 3), interpolation=cv2.INTER_AREA)\n', (4103, 4162), False, 'import cv2\n'), ((4420, 4487), 'cv2.resize', 'cv2.resize', (['image_left', '(160, 320, 3)'], {'interpolation': 'cv2.INTER_AREA'}), '(image_left, (160, 320, 3), interpolation=cv2.INTER_AREA)\n', (4430, 4487), False, 'import cv2\n'), ((4744, 4812), 'cv2.resize', 'cv2.resize', (['image_right', '(160, 320, 3)'], {'interpolation': 'cv2.INTER_AREA'}), '(image_right, (160, 320, 3), interpolation=cv2.INTER_AREA)\n', (4754, 4812), False, 'import cv2\n')] |
"""*****************************************************************************************
MIT License
Copyright (c) 2019 <NAME>, <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*****************************************************************************************"""
####################################### NOTES ############################################
# - Please cite our paper when using the code:
# "Fast and Accurate Least-Mean-Squares Solvers" (NIPS19' - Oral presentation)
# <NAME> and <NAME> and <NAME>
#
# - Faster algorithm for large values of the dimension d will be published soon.
##########################################################################################
import numpy as np
from sklearn import linear_model
from sklearn.model_selection import KFold
import time
import math
def Caratheodory(P, u, dtype='float64'):
"""
Implementation of the Caratheodory Theorem(1907)
input: a numpy array P containing n rows (points), each of size d, and a positive vector of weights u (that sums to 1)
output:a new vector of weights new_u that satisfies :
1. new_u is positive and sums to 1
2. new_u has at most d+1 non zero entries
3. the weighted sum of P and u (input) is the same as the weighted sum of P and new_u (output)
computation time: O(n^2d^2)
"""
while 1:
n = np.count_nonzero(u)
d = P.shape[1]
u_non_zero = np.nonzero(u)
if n <= d + 1:
return u
A = P[u_non_zero]
reduced_vec = np.outer(A[0], np.ones(A.shape[0]-1, dtype = dtype))
A = A[1:].T - reduced_vec
_, _, V = np.linalg.svd(A, full_matrices=True)
v=V[-1]
v = np.insert(v, [0], -1 * np.sum(v))
idx_good_alpha = np.nonzero(v > 0)
alpha = np.min(u[u_non_zero][idx_good_alpha]/v[idx_good_alpha])
w = np.zeros(u.shape[0] , dtype = dtype)
tmp_w = u[u_non_zero] - alpha * v
tmp_w[np.argmin(tmp_w)] = 0.0
w[u_non_zero] = tmp_w
w[u_non_zero][np.argmin(w[u_non_zero] )] = 0
u = w
def Fast_Caratheodory(P,u,coreset_size, dtype = 'float64'):
"""
Our fast and accurate implementation of Caratheodory's Theorem
Input: a numpy array P containing n rows (points), each of size d, and a positive vector of weights u (if u does not
sum to 1, we first normalize u by its sum, then multiply u back by its original sum before returning it)
Output: a new vector of positive weights new_u that satisfies :
1. new_u has at most d+1 non zero entries
2. the weighted sum of P and u (input) is the same as the weighted sum of P and new_u (output)
Computation time: O(nd+logn*d^4)
"""
d = P.shape[1]
n = P.shape[0]
m = 2*d + 2
if n <= d + 1:
return u.reshape(-1)
u_sum = np.sum(u)
u = u/u_sum
chunk_size = math.ceil(n/m)
current_m = math.ceil(n/chunk_size)
add_z = chunk_size - int (n%chunk_size)
u = u.reshape(-1,1)
if add_z != chunk_size:
zeros = np.zeros((add_z, P.shape[1]), dtype = dtype)
P = np.concatenate((P, zeros))
zeros = np.zeros((add_z, u.shape[1]), dtype = dtype)
u = np.concatenate((u, zeros))
idxarray = np.array(range(P.shape[0]) )
p_groups = P.reshape(current_m, chunk_size, P.shape[1])
u_groups = u.reshape(current_m, chunk_size)
idx_group = idxarray.reshape(current_m, chunk_size)
u_nonzero = np.count_nonzero(u)
if not coreset_size:
coreset_size = d+1
while u_nonzero > coreset_size:
groups_means = np.einsum('ijk,ij->ik',p_groups, u_groups)
group_weigts = np.ones(groups_means.shape[0], dtype = dtype)*1/current_m
Cara_u_idx = Caratheodory(groups_means , group_weigts,dtype = dtype )
IDX = np.nonzero(Cara_u_idx)
new_P = p_groups[IDX].reshape(-1,d)
subset_u = (current_m * u_groups[IDX] * Cara_u_idx[IDX][:, np.newaxis]).reshape(-1, 1)
new_idx_array = idx_group[IDX].reshape(-1,1)
##############################################################################3
u_nonzero = np.count_nonzero(subset_u)
chunk_size = math.ceil(new_P.shape[0]/ m)
current_m = math.ceil(new_P.shape[0]/ chunk_size)
add_z = chunk_size - int(new_P.shape[0] % chunk_size)
if add_z != chunk_size:
new_P = np.concatenate((new_P, np.zeros((add_z, new_P.shape[1]), dtype = dtype)))
subset_u = np.concatenate((subset_u, np.zeros((add_z, subset_u.shape[1]),dtype = dtype)))
new_idx_array = np.concatenate((new_idx_array, np.zeros((add_z, new_idx_array.shape[1]),dtype = dtype)))
p_groups = new_P.reshape(current_m, chunk_size, new_P.shape[1])
u_groups = subset_u.reshape(current_m, chunk_size)
idx_group = new_idx_array.reshape(current_m , chunk_size)
###########################################################
new_u = np.zeros(n)
subset_u = subset_u[(new_idx_array < n)]
new_idx_array = new_idx_array[(new_idx_array < n)].reshape(-1).astype(int)
new_u[new_idx_array] = subset_u
return u_sum * new_u
def linregcoreset(P, u, b=None, c_size=None, dtype='float64'):
"""
This function computes a coreset for linear regression.
Input: a numpy array P containing n rows (points), each of size d, a positive vector of weights u of size n, a labels
vector b of size n, coreset size c_size (not required).
Output: a new numpy array new_P containing the coreset points in its rows and a new vector new_u of positive weights,
and a new vector of labels new_b for the coreset. The output satisfies for every vector x that:
||sqrt(u.transpose())*(Px-b)||^2 = ||sqrt(new_u.transpose())*(new_Px-new_b)||^2
i.e., the output of a call to linearRegression with the original input or with the coreset is the same.
Computation time: O(nd^2+logn*d^8)
"""
if b is not None:
P_tag = np.append(P, b, axis=1)
else:
P_tag = P
n_tag = P_tag.shape[0]; d_tag = P_tag.shape[1]
P_tag = P_tag.reshape(n_tag, d_tag, 1)
P_tag = np.einsum("ikj,ijk->ijk",P_tag ,P_tag)
P_tag = P_tag.reshape(n_tag, -1)
n_tag = P_tag.shape[0]; d_tag = P_tag.shape[1]
coreset_weigts = Fast_Caratheodory(P_tag.reshape(n_tag,-1), u, c_size, dtype=dtype)
new_idx_array = np.nonzero(coreset_weigts)
coreset_weigts = coreset_weigts[new_idx_array]
if b is not None:
return P[new_idx_array], coreset_weigts.reshape(-1), b[new_idx_array]
else:
return P[new_idx_array], coreset_weigts.reshape(-1)
def stream_coreset(P, u, b, folds=None, dtype='float64'):
"""
This function computes a coreset for LMS solvers that use k-fold cross validation. It partitions the data into "folds"
parts, and computes a coreset for every part using the function linregcoreset.
Input: a numpy array P containing n rows (points), each of size d, a positive vector of weights u of size n, a labels
vector b of size n, and the number of folds used in the cross validation.
Output: a new numpy array new_P containing the coreset points in its rows and a new vector new_u of positive weights,
and a new vector of labels new_b for the coreset. The output satisfies for every vector x that:
||sqrt(u.transpose())*(Px-b)||^2 = ||sqrt(new_u.transpose())*(new_Px-new_b)||^2
i.e., the output of a call to linearRegression with the original input or with the coreset is the same.
Computation time: O(nd^2+logn*d^8)
"""
if folds is None:
return linregcoreset(P, u, b, dtype=dtype)
m = int(P.shape[0] / folds)
d = P.shape[1]
size_of_coreset = ((d+1)*(d+1)+1)
batches = folds
cc, uc, bc = linregcoreset(P[0:m], u[0:m], b[0:m], dtype=dtype)
if cc.shape[0] < size_of_coreset and folds:
add_z = size_of_coreset - cc.shape[0]
zeros = np.zeros((add_z, cc.shape[1]), dtype=dtype)
cc = np.concatenate((cc, zeros))
zeros = np.zeros((add_z), dtype=dtype)
uc = np.concatenate((uc, zeros))
zeros = np.zeros((add_z, bc.shape[1]), dtype=dtype)
bc = np.concatenate((bc, zeros))
for batch in range(1, batches):
coreset, new_u, new_b = linregcoreset(P[batch*m:(batch+1)*m], u[batch*m:(batch+1)*m], b[batch*m:(batch+1)*m], dtype=dtype)
if coreset.shape[0] < size_of_coreset and folds:
add_z = size_of_coreset - coreset.shape[0]
zeros = np.zeros((add_z, coreset.shape[1]), dtype=dtype)
coreset = np.concatenate((coreset, zeros))
zeros = np.zeros((add_z),dtype=dtype)
new_u = np.concatenate((new_u, zeros))
zeros = np.zeros((add_z, new_b.shape[1]), dtype=dtype)
new_b = np.concatenate((new_b, zeros))
bc = np.concatenate((bc, new_b))
cc = np.concatenate((cc, coreset))
uc = np.concatenate((uc, new_u))
return cc, uc, bc
###################################################################################
# general test whether the fit result match the original problem
def test_model(test_data, test_labels, test_weights, clf):
weighted_test_data = test_data * np.sqrt(test_weights[:, np.newaxis])
weighted_test_labels = test_labels * np.sqrt(test_weights[:, np.newaxis])
score = clf.score(weighted_test_data, weighted_test_labels)
return score
# normal train data methods0
def train_model(data, labels, weights, clf):
time_start = time.time()
weighted_data = data * np.sqrt(weights[:, np.newaxis])
weighted_labels = (labels * np.sqrt(weights[:, np.newaxis])).ravel()
clf.fit(weighted_data, weighted_labels)
time_end = time.time()
return time_end - time_start, clf
# K-fold validation to train, using this paper's coreset method
def coreset_train_model(data, labels, weights, clf, folds=None, solver='ridge'):
time_start = time.time()
coreset, coreset_weights, coreset_labels = stream_coreset(data, weights, labels, folds=folds)
weighted_coreset = coreset * np.sqrt(coreset_weights[:, np.newaxis])
weighted_coreset_labels = (coreset_labels * np.sqrt(coreset_weights[:, np.newaxis])).ravel()
if solver in ['lasso', 'elastic']:
const = np.sqrt(coreset.shape[0] / data.shape[0])
clf.fit(const * weighted_coreset, const * weighted_coreset_labels)
else:
clf.fit(weighted_coreset, weighted_coreset_labels)
time_end = time.time()
return time_end - time_start, clf
def get_new_clf(solver, folds=3, alphas=100):
kf=KFold(n_splits=folds,shuffle=False)
if "linear" == solver:
clf = linear_model.LinearRegression(fit_intercept=False)
if "ridge" == solver:
alphas = np.arange(1/alphas, 10+ 1/alphas, 10/alphas)
clf = linear_model.RidgeCV(alphas=alphas, fit_intercept=False, cv=kf)
elif "lasso" == solver:
clf=linear_model.LassoCV(n_alphas=alphas, fit_intercept=False, cv=kf)
elif "elastic" == solver:
clf = linear_model.ElasticNetCV(n_alphas=alphas, fit_intercept=False, cv=kf)
return clf
def main():
n = 240000
d = 3
data_range = 100
num_of_alphas = 300
folds = 3
data = np.floor(np.random.rand(n, d) * data_range)
labels = np.floor(np.random.rand(n, 1) * data_range)
weights = np.ones(n)
for solver in ["lasso", "ridge", "elastic"]:
#########RIDGE REGRESSION#############
clf = get_new_clf(solver, folds=folds, alphas=num_of_alphas)
time_coreset, clf_coreset = coreset_train_model(data, labels, weights, clf, folds=folds, solver=solver)
score_coreset = test_model(data, labels, weights, clf)
clf = get_new_clf(solver, folds=folds, alphas=num_of_alphas)
time_real, clf_real = train_model(data, labels, weights, clf)
score_real = test_model(data, labels, weights, clf)
print (" solver: {}\n number_of_alphas: {}, \nscore_diff = {}\n---->coef diff = {}\n---->coreset_time = {}\n---->data time = {}".format(
solver,
num_of_alphas,
np.abs(score_coreset - score_real),
np.sum(np.abs(clf_real.coef_ - clf_coreset.coef_)),
time_coreset,
time_real))
############################################
if __name__ == '__main__':
main() | [
"numpy.sum",
"numpy.abs",
"numpy.einsum",
"numpy.ones",
"numpy.argmin",
"numpy.linalg.svd",
"numpy.arange",
"sklearn.linear_model.RidgeCV",
"sklearn.linear_model.ElasticNetCV",
"numpy.append",
"math.ceil",
"sklearn.linear_model.LinearRegression",
"numpy.min",
"numpy.concatenate",
"sklear... | [((3996, 4005), 'numpy.sum', 'np.sum', (['u'], {}), '(u)\n', (4002, 4005), True, 'import numpy as np\n'), ((4041, 4057), 'math.ceil', 'math.ceil', (['(n / m)'], {}), '(n / m)\n', (4050, 4057), False, 'import math\n'), ((4073, 4098), 'math.ceil', 'math.ceil', (['(n / chunk_size)'], {}), '(n / chunk_size)\n', (4082, 4098), False, 'import math\n'), ((4643, 4662), 'numpy.count_nonzero', 'np.count_nonzero', (['u'], {}), '(u)\n', (4659, 4662), True, 'import numpy as np\n'), ((6172, 6183), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (6180, 6183), True, 'import numpy as np\n'), ((7414, 7453), 'numpy.einsum', 'np.einsum', (['"""ikj,ijk->ijk"""', 'P_tag', 'P_tag'], {}), "('ikj,ijk->ijk', P_tag, P_tag)\n", (7423, 7453), True, 'import numpy as np\n'), ((7656, 7682), 'numpy.nonzero', 'np.nonzero', (['coreset_weigts'], {}), '(coreset_weigts)\n', (7666, 7682), True, 'import numpy as np\n'), ((10923, 10934), 'time.time', 'time.time', ([], {}), '()\n', (10932, 10934), False, 'import time\n'), ((11130, 11141), 'time.time', 'time.time', ([], {}), '()\n', (11139, 11141), False, 'import time\n'), ((11352, 11363), 'time.time', 'time.time', ([], {}), '()\n', (11361, 11363), False, 'import time\n'), ((11901, 11912), 'time.time', 'time.time', ([], {}), '()\n', (11910, 11912), False, 'import time\n'), ((12013, 12049), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'folds', 'shuffle': '(False)'}), '(n_splits=folds, shuffle=False)\n', (12018, 12049), False, 'from sklearn.model_selection import KFold\n'), ((12789, 12799), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (12796, 12799), True, 'import numpy as np\n'), ((2468, 2487), 'numpy.count_nonzero', 'np.count_nonzero', (['u'], {}), '(u)\n', (2484, 2487), True, 'import numpy as np\n'), ((2534, 2547), 'numpy.nonzero', 'np.nonzero', (['u'], {}), '(u)\n', (2544, 2547), True, 'import numpy as np\n'), ((2757, 2793), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {'full_matrices': '(True)'}), '(A, full_matrices=True)\n', (2770, 2793), True, 'import numpy as np\n'), ((2888, 2905), 'numpy.nonzero', 'np.nonzero', (['(v > 0)'], {}), '(v > 0)\n', (2898, 2905), True, 'import numpy as np\n'), ((2923, 2980), 'numpy.min', 'np.min', (['(u[u_non_zero][idx_good_alpha] / v[idx_good_alpha])'], {}), '(u[u_non_zero][idx_good_alpha] / v[idx_good_alpha])\n', (2929, 2980), True, 'import numpy as np\n'), ((2994, 3027), 'numpy.zeros', 'np.zeros', (['u.shape[0]'], {'dtype': 'dtype'}), '(u.shape[0], dtype=dtype)\n', (3002, 3027), True, 'import numpy as np\n'), ((4215, 4257), 'numpy.zeros', 'np.zeros', (['(add_z, P.shape[1])'], {'dtype': 'dtype'}), '((add_z, P.shape[1]), dtype=dtype)\n', (4223, 4257), True, 'import numpy as np\n'), ((4273, 4299), 'numpy.concatenate', 'np.concatenate', (['(P, zeros)'], {}), '((P, zeros))\n', (4287, 4299), True, 'import numpy as np\n'), ((4317, 4359), 'numpy.zeros', 'np.zeros', (['(add_z, u.shape[1])'], {'dtype': 'dtype'}), '((add_z, u.shape[1]), dtype=dtype)\n', (4325, 4359), True, 'import numpy as np\n'), ((4375, 4401), 'numpy.concatenate', 'np.concatenate', (['(u, zeros)'], {}), '((u, zeros))\n', (4389, 4401), True, 'import numpy as np\n'), ((4782, 4825), 'numpy.einsum', 'np.einsum', (['"""ijk,ij->ik"""', 'p_groups', 'u_groups'], {}), "('ijk,ij->ik', p_groups, u_groups)\n", (4791, 4825), True, 'import numpy as np\n'), ((5005, 5027), 'numpy.nonzero', 'np.nonzero', (['Cara_u_idx'], {}), '(Cara_u_idx)\n', (5015, 5027), True, 'import numpy as np\n'), ((5337, 5363), 'numpy.count_nonzero', 'np.count_nonzero', (['subset_u'], {}), '(subset_u)\n', (5353, 5363), True, 'import numpy as np\n'), ((5386, 5415), 'math.ceil', 'math.ceil', (['(new_P.shape[0] / m)'], {}), '(new_P.shape[0] / m)\n', (5395, 5415), False, 'import math\n'), ((5436, 5474), 'math.ceil', 'math.ceil', (['(new_P.shape[0] / chunk_size)'], {}), '(new_P.shape[0] / chunk_size)\n', (5445, 5474), False, 'import math\n'), ((7243, 7266), 'numpy.append', 'np.append', (['P', 'b'], {'axis': '(1)'}), '(P, b, axis=1)\n', (7252, 7266), True, 'import numpy as np\n'), ((9285, 9328), 'numpy.zeros', 'np.zeros', (['(add_z, cc.shape[1])'], {'dtype': 'dtype'}), '((add_z, cc.shape[1]), dtype=dtype)\n', (9293, 9328), True, 'import numpy as np\n'), ((9347, 9374), 'numpy.concatenate', 'np.concatenate', (['(cc, zeros)'], {}), '((cc, zeros))\n', (9361, 9374), True, 'import numpy as np\n'), ((9396, 9424), 'numpy.zeros', 'np.zeros', (['add_z'], {'dtype': 'dtype'}), '(add_z, dtype=dtype)\n', (9404, 9424), True, 'import numpy as np\n'), ((9445, 9472), 'numpy.concatenate', 'np.concatenate', (['(uc, zeros)'], {}), '((uc, zeros))\n', (9459, 9472), True, 'import numpy as np\n'), ((9494, 9537), 'numpy.zeros', 'np.zeros', (['(add_z, bc.shape[1])'], {'dtype': 'dtype'}), '((add_z, bc.shape[1]), dtype=dtype)\n', (9502, 9537), True, 'import numpy as np\n'), ((9556, 9583), 'numpy.concatenate', 'np.concatenate', (['(bc, zeros)'], {}), '((bc, zeros))\n', (9570, 9583), True, 'import numpy as np\n'), ((10234, 10261), 'numpy.concatenate', 'np.concatenate', (['(bc, new_b)'], {}), '((bc, new_b))\n', (10248, 10261), True, 'import numpy as np\n'), ((10276, 10305), 'numpy.concatenate', 'np.concatenate', (['(cc, coreset)'], {}), '((cc, coreset))\n', (10290, 10305), True, 'import numpy as np\n'), ((10320, 10347), 'numpy.concatenate', 'np.concatenate', (['(uc, new_u)'], {}), '((uc, new_u))\n', (10334, 10347), True, 'import numpy as np\n'), ((10626, 10662), 'numpy.sqrt', 'np.sqrt', (['test_weights[:, np.newaxis]'], {}), '(test_weights[:, np.newaxis])\n', (10633, 10662), True, 'import numpy as np\n'), ((10705, 10741), 'numpy.sqrt', 'np.sqrt', (['test_weights[:, np.newaxis]'], {}), '(test_weights[:, np.newaxis])\n', (10712, 10741), True, 'import numpy as np\n'), ((10963, 10994), 'numpy.sqrt', 'np.sqrt', (['weights[:, np.newaxis]'], {}), '(weights[:, np.newaxis])\n', (10970, 10994), True, 'import numpy as np\n'), ((11499, 11538), 'numpy.sqrt', 'np.sqrt', (['coreset_weights[:, np.newaxis]'], {}), '(coreset_weights[:, np.newaxis])\n', (11506, 11538), True, 'import numpy as np\n'), ((11696, 11737), 'numpy.sqrt', 'np.sqrt', (['(coreset.shape[0] / data.shape[0])'], {}), '(coreset.shape[0] / data.shape[0])\n', (11703, 11737), True, 'import numpy as np\n'), ((12092, 12142), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (12121, 12142), False, 'from sklearn import linear_model\n'), ((12189, 12240), 'numpy.arange', 'np.arange', (['(1 / alphas)', '(10 + 1 / alphas)', '(10 / alphas)'], {}), '(1 / alphas, 10 + 1 / alphas, 10 / alphas)\n', (12198, 12240), True, 'import numpy as np\n'), ((12249, 12312), 'sklearn.linear_model.RidgeCV', 'linear_model.RidgeCV', ([], {'alphas': 'alphas', 'fit_intercept': '(False)', 'cv': 'kf'}), '(alphas=alphas, fit_intercept=False, cv=kf)\n', (12269, 12312), False, 'from sklearn import linear_model\n'), ((2663, 2699), 'numpy.ones', 'np.ones', (['(A.shape[0] - 1)'], {'dtype': 'dtype'}), '(A.shape[0] - 1, dtype=dtype)\n', (2670, 2699), True, 'import numpy as np\n'), ((3089, 3105), 'numpy.argmin', 'np.argmin', (['tmp_w'], {}), '(tmp_w)\n', (3098, 3105), True, 'import numpy as np\n'), ((3167, 3191), 'numpy.argmin', 'np.argmin', (['w[u_non_zero]'], {}), '(w[u_non_zero])\n', (3176, 3191), True, 'import numpy as np\n'), ((9892, 9940), 'numpy.zeros', 'np.zeros', (['(add_z, coreset.shape[1])'], {'dtype': 'dtype'}), '((add_z, coreset.shape[1]), dtype=dtype)\n', (9900, 9940), True, 'import numpy as np\n'), ((9964, 9996), 'numpy.concatenate', 'np.concatenate', (['(coreset, zeros)'], {}), '((coreset, zeros))\n', (9978, 9996), True, 'import numpy as np\n'), ((10018, 10046), 'numpy.zeros', 'np.zeros', (['add_z'], {'dtype': 'dtype'}), '(add_z, dtype=dtype)\n', (10026, 10046), True, 'import numpy as np\n'), ((10069, 10099), 'numpy.concatenate', 'np.concatenate', (['(new_u, zeros)'], {}), '((new_u, zeros))\n', (10083, 10099), True, 'import numpy as np\n'), ((10121, 10167), 'numpy.zeros', 'np.zeros', (['(add_z, new_b.shape[1])'], {'dtype': 'dtype'}), '((add_z, new_b.shape[1]), dtype=dtype)\n', (10129, 10167), True, 'import numpy as np\n'), ((10189, 10219), 'numpy.concatenate', 'np.concatenate', (['(new_b, zeros)'], {}), '((new_b, zeros))\n', (10203, 10219), True, 'import numpy as np\n'), ((12355, 12420), 'sklearn.linear_model.LassoCV', 'linear_model.LassoCV', ([], {'n_alphas': 'alphas', 'fit_intercept': '(False)', 'cv': 'kf'}), '(n_alphas=alphas, fit_intercept=False, cv=kf)\n', (12375, 12420), False, 'from sklearn import linear_model\n'), ((12681, 12701), 'numpy.random.rand', 'np.random.rand', (['n', 'd'], {}), '(n, d)\n', (12695, 12701), True, 'import numpy as np\n'), ((12739, 12759), 'numpy.random.rand', 'np.random.rand', (['n', '(1)'], {}), '(n, 1)\n', (12753, 12759), True, 'import numpy as np\n'), ((2849, 2858), 'numpy.sum', 'np.sum', (['v'], {}), '(v)\n', (2855, 2858), True, 'import numpy as np\n'), ((4849, 4892), 'numpy.ones', 'np.ones', (['groups_means.shape[0]'], {'dtype': 'dtype'}), '(groups_means.shape[0], dtype=dtype)\n', (4856, 4892), True, 'import numpy as np\n'), ((11028, 11059), 'numpy.sqrt', 'np.sqrt', (['weights[:, np.newaxis]'], {}), '(weights[:, np.newaxis])\n', (11035, 11059), True, 'import numpy as np\n'), ((11588, 11627), 'numpy.sqrt', 'np.sqrt', (['coreset_weights[:, np.newaxis]'], {}), '(coreset_weights[:, np.newaxis])\n', (11595, 11627), True, 'import numpy as np\n'), ((12467, 12537), 'sklearn.linear_model.ElasticNetCV', 'linear_model.ElasticNetCV', ([], {'n_alphas': 'alphas', 'fit_intercept': '(False)', 'cv': 'kf'}), '(n_alphas=alphas, fit_intercept=False, cv=kf)\n', (12492, 12537), False, 'from sklearn import linear_model\n'), ((13561, 13595), 'numpy.abs', 'np.abs', (['(score_coreset - score_real)'], {}), '(score_coreset - score_real)\n', (13567, 13595), True, 'import numpy as np\n'), ((5616, 5662), 'numpy.zeros', 'np.zeros', (['(add_z, new_P.shape[1])'], {'dtype': 'dtype'}), '((add_z, new_P.shape[1]), dtype=dtype)\n', (5624, 5662), True, 'import numpy as np\n'), ((5717, 5766), 'numpy.zeros', 'np.zeros', (['(add_z, subset_u.shape[1])'], {'dtype': 'dtype'}), '((add_z, subset_u.shape[1]), dtype=dtype)\n', (5725, 5766), True, 'import numpy as np\n'), ((5830, 5884), 'numpy.zeros', 'np.zeros', (['(add_z, new_idx_array.shape[1])'], {'dtype': 'dtype'}), '((add_z, new_idx_array.shape[1]), dtype=dtype)\n', (5838, 5884), True, 'import numpy as np\n'), ((13617, 13659), 'numpy.abs', 'np.abs', (['(clf_real.coef_ - clf_coreset.coef_)'], {}), '(clf_real.coef_ - clf_coreset.coef_)\n', (13623, 13659), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the udkm1Dsimpy module.
#
# udkm1Dsimpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2017 <NAME>
"""A :mod:`XrayKin` module """
__all__ = ["XrayKin"]
__docformat__ = "restructuredtext"
import numpy as np
from .xray import Xray
from .unitCell import UnitCell
from . import u
from time import time
class XrayKin(Xray):
"""XrayKin
Kinetic Xray simulations
Args:
S (object): sample to do simulations with
force_recalc (boolean): force recalculation of results
Attributes:
S (object): sample to do simulations with
force_recalc (boolean): force recalculation of results
polarization (float): polarization state
"""
def __init__(self, S, force_recalc, **kwargs):
super().__init__(S, force_recalc, **kwargs)
def __str__(self):
"""String representation of this class"""
class_str = 'Kinematical X-Ray Diffraction simulation properties:\n\n'
class_str += super().__str__()
return class_str
@u.wraps(None, (None, 'eV', 'm**-1', None), strict=False)
def get_uc_atomic_form_factors(self, energy, qz, uc):
""" get_uc_atomic_form_factors
Returns the energy- and angle-dependent atomic form factors
:math: `f(q_z, E)` of all atoms in the unit cell as a vector.
"""
if (not np.isscalar(energy)) and (not isinstance(energy, object)):
raise TypeError('Only scalars or pint quantities for the energy are allowd!')
f = np.zeros([uc.num_atoms, len(qz)], dtype=complex)
for i in range(uc.num_atoms):
f[i, :] = uc.atoms[i][0].get_cm_atomic_form_factor(energy, qz)
return f
@u.wraps(None, (None, 'eV', 'm**-1', None, None), strict=False)
def get_uc_structure_factor(self, energy, qz, uc, strain=0):
"""get_uc_structure_factor
Returns the energy-, angle-, and strain-dependent structure
factor .. math: `S(E,q_z,\epsilon)` of the unit cell
.. math::
S(E,q_z,\epsilon) = \sum_i^N f_i \, \exp(-i q_z z_i(\epsilon))
"""
if (not np.isscalar(energy)) and (not isinstance(energy, object)):
raise TypeError('Only scalars or pint quantities for the energy are allowd!')
if np.isscalar(qz):
qz = np.array([qz])
S = np.sum(self.get_uc_atomic_form_factors(energy, qz, uc)
* np.exp(1j * uc._c_axis
* np.outer(uc.get_atom_positions(strain), qz)), 0)
return S
def homogeneous_reflectivity(self, strains=0):
"""homogeneous_reflectivity
Returns the reflectivity :math:`R = E_p^t\,(E_p^t)^*` of a
homogeneous sample structure as well as the reflected field
:math:`E_p^N` of all substructures.
"""
if strains == 0:
strains = np.zeros([self.S.get_number_of_sub_structures(), 1])
t1 = time()
self.disp_message('Calculating _homogenous_reflectivity_ ...')
# get the reflected field of the structure for each energy
R = np.zeros_like(self._qz)
for i, energy in enumerate(self._energy):
qz = self._qz[i, :]
theta = self._theta[i, :]
Ept, A = self.homogeneous_reflected_field(self.S, energy, qz, theta, strains)
# calculate the real reflectivity from Ef
R[i, :] = np.real(Ept*np.conj(Ept))
self.disp_message('Elapsed time for _homogenous_reflectivity_: {:f} s'.format(time()-t1))
return R, A
@u.wraps((None, None), (None, None, 'eV', 'm**-1', 'rad', None), strict=False)
def homogeneous_reflected_field(self, S, energy, qz, theta, strains=0):
"""homogeneous_reflected_field
Calculates the reflected field :math:`E_p^t` of the whole
sample structure as well as for each sub structure
(:math:`E_p^N`). The reflected wave field :math:`E_p` from a
single layer of unit cells at the detector is calculated as
follows:[Ref. 1]
.. math::
E_p = \\frac{i}{\\varepsilon_0}\\frac{e^2}{m_e c_0^2}
\\frac{P(\\vartheta) S(E,q_z,\\epsilon)}{A q_z}
For the case of :math:`N` similar planes of unit cells one can
write:
.. math::
E_p^N = \sum_{n=0}^{N-1} E_p \exp(i q_z z n )
where :math:`z` is the distance between the planes (c-axis).
The above equation can be simplified to
.. math::
E_p^N = E_p \psi(q_z,z,N)
introducing the interference function
.. math::
\psi(q_z,z,N) = \sum_{n=0}^{N-1} \exp(i q_z z n)
= \\frac{1- \exp(i q_z z N)}{1- \exp(i q_z z)}
The total reflected wave field of all :math:`i = 1\ldots M`
homogeneous layers (:math:`E_p^t`) is the phase-correct
summation of all individual :math:`E_p^{N,i}`:
.. math::
E_p^t = \sum_{i=1}^M E_p^{N,i} \exp(i q_z Z_i)
where :math:`Z_i = \sum_{j=1}^{i-1} N_j z_j` is the distance
of the i-th layer from the surface.
"""
# if no strains are given we assume no strain (1)
if np.isscalar(strains) and strains == 0:
strains = np.zeros([self.S.get_number_of_sub_structures(), 1])
K = len(qz) # nb of qz
Ept = np.zeros([1, K]) # total reflected field
Z = 0 # total length of the substructure from the surface
A = list([0, 2]) # cell matrix of reflected fields EpN of substructures
strainCounter = 0 # the is the index of the strain vector if applied
# traverse substructures
for i, sub_structures in enumerate(S.sub_structures):
if isinstance(sub_structures[0], UnitCell):
# the substructure is an unit cell and we can calculate
# Ep directly
Ep = self.get_Ep(energy, qz, theta, sub_structures[0], strains[strainCounter])
z = sub_structures[0]._c_axis
strainCounter = strainCounter+1
else:
# the substructure is a structure, so we do a recursive
# call of this method
d = sub_structures[0].get_number_of_sub_structures()
Ep, temp = self.homogeneous_reflected_field(
sub_structures[0], energy, qz, theta,
strains[strainCounter:(strainCounter + d)])
z = sub_structures[0].get_length().magnitude
strainCounter = strainCounter + d
A.append([temp, [sub_structures[0].name + ' substructures']])
A.append([Ep, '{:d}x {:s}'.format(1, sub_structures[0].name)])
# calculate the interferece function for N repetitions of
# the substructure with the length z
psi = self.get_interference_function(qz, z, sub_structures[1])
# calculate the reflected field for N repetitions of
# the substructure with the length z
EpN = Ep * psi
# remember the result
A.append([EpN, '{:d}x {:s}'.format(sub_structures[1], sub_structures[0].name)])
# add the reflected field of the current substructre
# phase-correct to the already calculated substructures
Ept = Ept+(EpN*np.exp(1j*qz*Z))
# update the total length $Z$ of the already calculated
# substructures
Z = Z + z*sub_structures[1]
# add static substrate to kinXRD
if S.substrate != []:
temp, temp2 = self.homogeneous_reflected_field(S.substrate, energy, qz, theta)
A.append([temp2, 'static substrate'])
Ept = Ept+(temp*np.exp(1j*qz*Z))
return Ept, A
@u.wraps(None, (None, 'm**-1', 'm', None), strict=False)
def get_interference_function(self, qz, z, N):
"""get_interference_function
Calculates the interferece function for :math:`N`
repetitions of the structure with the length :math:`z`:
.. math::
\psi(q_z,z,N) = \sum_{n=0}^{N-1} \exp(i q_z z n)
= \\frac{1- \exp(i q_z z N)}{1- \exp(i q_z z)}
"""
psi = (1-np.exp(1j*qz*z*N)) / (1 - np.exp(1j*qz*z))
return psi
@u.wraps(None, (None, 'eV', 'm**-1', 'rad', None, None), strict=False)
def get_Ep(self, energy, qz, theta, uc, strain):
"""get_Ep
Calculates the reflected field :math:`E_p` for one unit cell
with a given strain :math:`\epsilon`:
.. math::
E_p = \\frac{i}{\\varepsilon_0} \\frac{e^2}{m_e c_0^2}
\\frac{P S(E,q_z,\epsilon)}{A q_z}
with :math:`e` as electron charge, :math:`m_e` as electron
mass, :math:`c_0` as vacuum light velocity,
:math:`\\varepsilon_0` as vacuum permittivity,
:math:`P` as polarization factor and :math:`S(E,q_z,\sigma)`
as energy-, angle-, and strain-dependent unit cell structure
factor.
"""
import scipy.constants as c
Ep = 1j/c.epsilon_0*c.elementary_charge**2/c.electron_mass/c.c**2 \
* (self.get_polarization_factor(theta)
* self.get_uc_structure_factor(energy, qz, uc, strain)
/ uc._area) / qz
return Ep
| [
"numpy.conj",
"numpy.zeros_like",
"numpy.isscalar",
"numpy.zeros",
"time.time",
"numpy.array",
"numpy.exp"
] | [((2914, 2929), 'numpy.isscalar', 'np.isscalar', (['qz'], {}), '(qz)\n', (2925, 2929), True, 'import numpy as np\n'), ((3557, 3563), 'time.time', 'time', ([], {}), '()\n', (3561, 3563), False, 'from time import time\n'), ((3714, 3737), 'numpy.zeros_like', 'np.zeros_like', (['self._qz'], {}), '(self._qz)\n', (3727, 3737), True, 'import numpy as np\n'), ((5961, 5977), 'numpy.zeros', 'np.zeros', (['[1, K]'], {}), '([1, K])\n', (5969, 5977), True, 'import numpy as np\n'), ((2948, 2962), 'numpy.array', 'np.array', (['[qz]'], {}), '([qz])\n', (2956, 2962), True, 'import numpy as np\n'), ((5800, 5820), 'numpy.isscalar', 'np.isscalar', (['strains'], {}), '(strains)\n', (5811, 5820), True, 'import numpy as np\n'), ((1990, 2009), 'numpy.isscalar', 'np.isscalar', (['energy'], {}), '(energy)\n', (2001, 2009), True, 'import numpy as np\n'), ((2753, 2772), 'numpy.isscalar', 'np.isscalar', (['energy'], {}), '(energy)\n', (2764, 2772), True, 'import numpy as np\n'), ((8828, 8853), 'numpy.exp', 'np.exp', (['(1.0j * qz * z * N)'], {}), '(1.0j * qz * z * N)\n', (8834, 8853), True, 'import numpy as np\n'), ((8854, 8875), 'numpy.exp', 'np.exp', (['(1.0j * qz * z)'], {}), '(1.0j * qz * z)\n', (8860, 8875), True, 'import numpy as np\n'), ((4036, 4048), 'numpy.conj', 'np.conj', (['Ept'], {}), '(Ept)\n', (4043, 4048), True, 'import numpy as np\n'), ((4136, 4142), 'time.time', 'time', ([], {}), '()\n', (4140, 4142), False, 'from time import time\n'), ((7950, 7971), 'numpy.exp', 'np.exp', (['(1.0j * qz * Z)'], {}), '(1.0j * qz * Z)\n', (7956, 7971), True, 'import numpy as np\n'), ((8345, 8366), 'numpy.exp', 'np.exp', (['(1.0j * qz * Z)'], {}), '(1.0j * qz * Z)\n', (8351, 8366), True, 'import numpy as np\n')] |
"""This is a test script for tracing back beta Pictoris stars.
This code is redundant. Some bits can be incorporated into other sections.
"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
from chronostar.retired.error_ellipse import plot_cov_ellipse
import chronostar.traceback as traceback
plt.ion()
#Read in numbers for beta Pic. For HIPPARCOS, we have *no* radial velocities in general.
bp=Table.read('data/betaPic.csv')
#Remove bad stars. "bp" stands for Beta Pictoris.
bp = bp[np.where([ (n.find('6070')<0) & (n.find('12545')<0) & (n.find('Tel')<0) for n in bp['Name']])[0]]
times = np.linspace(0,20,21)
#Which dimensions do we plot? 0=X, 1=Y, 2=Z
dims = [0,1]
dim1=dims[0]
dim2=dims[1]
xoffset = np.zeros(len(bp))
yoffset = np.zeros(len(bp))
#Some hardwired plotting options.
if (dims[0]==0) & (dims[1]==1):
yoffset[0:10] = [6,-8,-6,2,0,-4,0,0,0,-4]
yoffset[10:] = [0,-8,0,0,6,-6,0,0,0]
xoffset[10:] = [0,-4,0,0,-15,-10,0,0,-20]
axis_range = [-70,60,-40,120]
if (dims[0]==1) & (dims[1]==2):
axis_range = [-40,120,-30,100]
text_ix = [0,1,4,7]
xoffset[7]=-15
#Trace back orbits with plotting enabled.
tb = traceback.TraceBack(bp)
tb.traceback(times,xoffset=xoffset, yoffset=yoffset, axis_range=axis_range, dims=dims,plotit=True,savefile="results/traceback_save.pkl")
#Error ellipse for the association. This comes from "fit_group.py".
xyz_cov = np.array([[ 34.25840977, 35.33697325, 56.24666544],
[ 35.33697325, 46.18069795, 66.76389275],
[ 56.24666544, 66.76389275, 109.98883853]])
xyz = [ -6.221, 63.288, 23.408]
cov_ix1 = [[dims[0],dims[1]],[dims[0],dims[1]]]
cov_ix2 = [[dims[0],dims[0]],[dims[1],dims[1]]]
plot_cov_ellipse(xyz_cov[cov_ix1,cov_ix2],[xyz[dim1],xyz[dim2]],alpha=0.5,color='k')
plt.savefig('plots/test_bp_plot.png')
plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ion",
"chronostar.traceback.TraceBack",
"numpy.array",
"numpy.linspace",
"chronostar.retired.error_ellipse.plot_cov_ellipse",
"astropy.table.Table.read"
] | [((378, 387), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (385, 387), True, 'import matplotlib.pyplot as plt\n'), ((481, 511), 'astropy.table.Table.read', 'Table.read', (['"""data/betaPic.csv"""'], {}), "('data/betaPic.csv')\n", (491, 511), False, 'from astropy.table import Table\n'), ((676, 698), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(21)'], {}), '(0, 20, 21)\n', (687, 698), True, 'import numpy as np\n'), ((1230, 1253), 'chronostar.traceback.TraceBack', 'traceback.TraceBack', (['bp'], {}), '(bp)\n', (1249, 1253), True, 'import chronostar.traceback as traceback\n'), ((1471, 1610), 'numpy.array', 'np.array', (['[[34.25840977, 35.33697325, 56.24666544], [35.33697325, 46.18069795, \n 66.76389275], [56.24666544, 66.76389275, 109.98883853]]'], {}), '([[34.25840977, 35.33697325, 56.24666544], [35.33697325, \n 46.18069795, 66.76389275], [56.24666544, 66.76389275, 109.98883853]])\n', (1479, 1610), True, 'import numpy as np\n'), ((1765, 1859), 'chronostar.retired.error_ellipse.plot_cov_ellipse', 'plot_cov_ellipse', (['xyz_cov[cov_ix1, cov_ix2]', '[xyz[dim1], xyz[dim2]]'], {'alpha': '(0.5)', 'color': '"""k"""'}), "(xyz_cov[cov_ix1, cov_ix2], [xyz[dim1], xyz[dim2]], alpha=\n 0.5, color='k')\n", (1781, 1859), False, 'from chronostar.retired.error_ellipse import plot_cov_ellipse\n'), ((1851, 1888), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/test_bp_plot.png"""'], {}), "('plots/test_bp_plot.png')\n", (1862, 1888), True, 'import matplotlib.pyplot as plt\n'), ((1889, 1899), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1897, 1899), True, 'import matplotlib.pyplot as plt\n')] |
import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionAmaxTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_amax_scalar(self):
self.assertEqual(dnp.amax(0.5), np.amax(0.5))
self.assertEqual(dnp.amax(1), np.amax(1))
self.assertEqual(dnp.amax(-1), np.amax(-1))
self.assertEqual(dnp.amax(0), np.amax(0))
self.assertEqual(dnp.isnan(dnp.amax(dnp.nan)), True)
self.assertEqual(np.isnan(np.amax(np.nan)), True)
def test_function_math_amax_list(self):
npa = np.amax([1, 8, 27, -27, 0, 5, np.nan])
dnpa = dnp.amax([1, 8, 27, -27, 0, 5, dnp.nan])
assert_array_equal(dnpa, npa)
def test_function_math_amax_array(self):
npa = np.amax(np.array([1, 8, 27, -27, 0, 5, np.nan]))
dnpa = dnp.amax(dnp.array([1, 8, 27, -27, 0, 5, dnp.nan]))
assert_array_equal(dnpa, npa)
def test_function_math_amax_series(self):
ps = pd.Series([-1, 8, 27, -27, 0, 5, np.nan])
os = orca.Series(ps)
self.assertEqual(dnp.amax(os), np.amax(ps))
ps = pd.Series([-1, 8, 27, -27, 0, 5])
os = orca.Series(ps)
self.assertEqual(dnp.amax(os), np.amax(ps))
def test_function_math_amax_dataframe(self):
pdf = pd.DataFrame({"cola": [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, 2.0, np.nan],
"colb": [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, np.nan, 2.0]})
odf = orca.DataFrame(pdf)
assert_series_equal(dnp.amax(odf).to_pandas(), np.amax(pdf))
pdf = pd.DataFrame({"cola": [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0],
"colb": [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0]})
odf = orca.DataFrame(pdf)
assert_series_equal(dnp.amax(odf).to_pandas(), np.amax(pdf))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"pandas.DataFrame",
"dolphindb_numpy.array",
"orca.connect",
"orca.Series",
"numpy.amax",
"numpy.array",
"pandas.Series",
"dolphindb_numpy.amax",
"orca.DataFrame"
] | [((2082, 2097), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2095, 2097), False, 'import unittest\n'), ((324, 367), 'orca.connect', 'orca.connect', (['HOST', 'PORT', '"""admin"""', '"""123456"""'], {}), "(HOST, PORT, 'admin', '123456')\n", (336, 367), False, 'import orca\n'), ((799, 837), 'numpy.amax', 'np.amax', (['[1, 8, 27, -27, 0, 5, np.nan]'], {}), '([1, 8, 27, -27, 0, 5, np.nan])\n', (806, 837), True, 'import numpy as np\n'), ((853, 893), 'dolphindb_numpy.amax', 'dnp.amax', (['[1, 8, 27, -27, 0, 5, dnp.nan]'], {}), '([1, 8, 27, -27, 0, 5, dnp.nan])\n', (861, 893), True, 'import dolphindb_numpy as dnp\n'), ((1206, 1247), 'pandas.Series', 'pd.Series', (['[-1, 8, 27, -27, 0, 5, np.nan]'], {}), '([-1, 8, 27, -27, 0, 5, np.nan])\n', (1215, 1247), True, 'import pandas as pd\n'), ((1261, 1276), 'orca.Series', 'orca.Series', (['ps'], {}), '(ps)\n', (1272, 1276), False, 'import orca\n'), ((1343, 1376), 'pandas.Series', 'pd.Series', (['[-1, 8, 27, -27, 0, 5]'], {}), '([-1, 8, 27, -27, 0, 5])\n', (1352, 1376), True, 'import pandas as pd\n'), ((1390, 1405), 'orca.Series', 'orca.Series', (['ps'], {}), '(ps)\n', (1401, 1405), False, 'import orca\n'), ((1522, 1666), 'pandas.DataFrame', 'pd.DataFrame', (["{'cola': [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, 2.0, np.nan], 'colb': [-1,\n 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, np.nan, 2.0]}"], {}), "({'cola': [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, 2.0, np.nan],\n 'colb': [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, np.nan, 2.0]})\n", (1534, 1666), True, 'import pandas as pd\n'), ((1705, 1724), 'orca.DataFrame', 'orca.DataFrame', (['pdf'], {}), '(pdf)\n', (1719, 1724), False, 'import orca\n'), ((1809, 1921), 'pandas.DataFrame', 'pd.DataFrame', (["{'cola': [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0], 'colb': [-1, 8, 27, -27, 0,\n 5, 1.5, 1.7, 2.0]}"], {}), "({'cola': [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0], 'colb': [-1, 8,\n 27, -27, 0, 5, 1.5, 1.7, 2.0]})\n", (1821, 1921), True, 'import pandas as pd\n'), ((1960, 1979), 'orca.DataFrame', 'orca.DataFrame', (['pdf'], {}), '(pdf)\n', (1974, 1979), False, 'import orca\n'), ((440, 453), 'dolphindb_numpy.amax', 'dnp.amax', (['(0.5)'], {}), '(0.5)\n', (448, 453), True, 'import dolphindb_numpy as dnp\n'), ((455, 467), 'numpy.amax', 'np.amax', (['(0.5)'], {}), '(0.5)\n', (462, 467), True, 'import numpy as np\n'), ((494, 505), 'dolphindb_numpy.amax', 'dnp.amax', (['(1)'], {}), '(1)\n', (502, 505), True, 'import dolphindb_numpy as dnp\n'), ((507, 517), 'numpy.amax', 'np.amax', (['(1)'], {}), '(1)\n', (514, 517), True, 'import numpy as np\n'), ((544, 556), 'dolphindb_numpy.amax', 'dnp.amax', (['(-1)'], {}), '(-1)\n', (552, 556), True, 'import dolphindb_numpy as dnp\n'), ((558, 569), 'numpy.amax', 'np.amax', (['(-1)'], {}), '(-1)\n', (565, 569), True, 'import numpy as np\n'), ((596, 607), 'dolphindb_numpy.amax', 'dnp.amax', (['(0)'], {}), '(0)\n', (604, 607), True, 'import dolphindb_numpy as dnp\n'), ((609, 619), 'numpy.amax', 'np.amax', (['(0)'], {}), '(0)\n', (616, 619), True, 'import numpy as np\n'), ((1000, 1039), 'numpy.array', 'np.array', (['[1, 8, 27, -27, 0, 5, np.nan]'], {}), '([1, 8, 27, -27, 0, 5, np.nan])\n', (1008, 1039), True, 'import numpy as np\n'), ((1065, 1106), 'dolphindb_numpy.array', 'dnp.array', (['[1, 8, 27, -27, 0, 5, dnp.nan]'], {}), '([1, 8, 27, -27, 0, 5, dnp.nan])\n', (1074, 1106), True, 'import dolphindb_numpy as dnp\n'), ((1302, 1314), 'dolphindb_numpy.amax', 'dnp.amax', (['os'], {}), '(os)\n', (1310, 1314), True, 'import dolphindb_numpy as dnp\n'), ((1316, 1327), 'numpy.amax', 'np.amax', (['ps'], {}), '(ps)\n', (1323, 1327), True, 'import numpy as np\n'), ((1431, 1443), 'dolphindb_numpy.amax', 'dnp.amax', (['os'], {}), '(os)\n', (1439, 1443), True, 'import dolphindb_numpy as dnp\n'), ((1445, 1456), 'numpy.amax', 'np.amax', (['ps'], {}), '(ps)\n', (1452, 1456), True, 'import numpy as np\n'), ((1780, 1792), 'numpy.amax', 'np.amax', (['pdf'], {}), '(pdf)\n', (1787, 1792), True, 'import numpy as np\n'), ((2035, 2047), 'numpy.amax', 'np.amax', (['pdf'], {}), '(pdf)\n', (2042, 2047), True, 'import numpy as np\n'), ((656, 673), 'dolphindb_numpy.amax', 'dnp.amax', (['dnp.nan'], {}), '(dnp.nan)\n', (664, 673), True, 'import dolphindb_numpy as dnp\n'), ((716, 731), 'numpy.amax', 'np.amax', (['np.nan'], {}), '(np.nan)\n', (723, 731), True, 'import numpy as np\n'), ((1753, 1766), 'dolphindb_numpy.amax', 'dnp.amax', (['odf'], {}), '(odf)\n', (1761, 1766), True, 'import dolphindb_numpy as dnp\n'), ((2008, 2021), 'dolphindb_numpy.amax', 'dnp.amax', (['odf'], {}), '(odf)\n', (2016, 2021), True, 'import dolphindb_numpy as dnp\n')] |
import os
import tensorflow as tf
from PIL import Image
import sys
import numpy as np
import matplotlib.pyplot as plt
def creat_tf(imgpath):
cwd = os.getcwd()
classes = os.listdir(cwd + imgpath)
writer = tf.python_io.TFRecordWriter("New 3 labels.tfrecords")
for index, name in enumerate(classes):
class_path = cwd + imgpath + name + "/"
print(class_path)
if os.path.isdir(class_path):
for img_name in os.listdir(class_path):
img_path = class_path + img_name
img = Image.open(img_path)
img = img.resize((224, 224))
img_raw = img.tobytes()
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[int(name)])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
}))
writer.write(example.SerializeToString())
print(img_name)
writer.close()
def read_example():
for serialized_example in tf.python_io.tf_record_iterator("6 labels.tfrecords"):
example = tf.train.Example()
example.ParseFromString(serialized_example)
label = example.features.feature['label'].int64_list.value
print (label)
if __name__ == '__main__':
imgpath = '/New 3 labels/'
creat_tf(imgpath)
# read_example()
def read_and_decode(filename, batch_size):
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example,
features={
'label' : tf.FixedLenFeature([], tf.int64),
'img_raw' : tf.FixedLenFeature([], tf.string),
})
img = tf.decode_raw(features['img_raw'], tf.uint8)
img = tf.reshape(img, [224, 224, 3])
#img = tf.cast(img, tf.float32)#*(1. / 255)
label = tf.cast(features['label'], tf.int64)
img_batch, label_batch = tf.train.shuffle_batch([img, label],
batch_size = batch_size,
num_threads = 64,
capacity=2000,
min_after_dequeue=1500,
)
return img_batch, tf.reshape(label_batch, [batch_size])
tfrecords_file = '6 labels.tfrecords'
BATCH_SIZE = 20
image_batch, label_batch = read_and_decode(tfrecords_file, BATCH_SIZE)
with tf.Session() as sess:
i = 0
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord = coord)
try:
while not coord.should_stop() and i<1:
image, label = sess.run([image_batch, label_batch])
for j in np.arange(BATCH_SIZE):
print('label: %d' % label[j])
plt.imshow(image[j,:,:,:])
plt.show()
i += 1
except tf.errors.OutOfRangeError:
print('done!')
finally:
coord.request_stop() | [
"tensorflow.train.Coordinator",
"tensorflow.train.shuffle_batch",
"tensorflow.reshape",
"tensorflow.decode_raw",
"numpy.arange",
"tensorflow.python_io.tf_record_iterator",
"tensorflow.train.Example",
"matplotlib.pyplot.imshow",
"tensorflow.train.start_queue_runners",
"tensorflow.cast",
"tensorfl... | [((164, 175), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (173, 175), False, 'import os\n'), ((191, 216), 'os.listdir', 'os.listdir', (['(cwd + imgpath)'], {}), '(cwd + imgpath)\n', (201, 216), False, 'import os\n'), ((237, 290), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['"""New 3 labels.tfrecords"""'], {}), "('New 3 labels.tfrecords')\n", (264, 290), True, 'import tensorflow as tf\n'), ((1177, 1230), 'tensorflow.python_io.tf_record_iterator', 'tf.python_io.tf_record_iterator', (['"""6 labels.tfrecords"""'], {}), "('6 labels.tfrecords')\n", (1208, 1230), True, 'import tensorflow as tf\n'), ((1622, 1664), 'tensorflow.train.string_input_producer', 'tf.train.string_input_producer', (['[filename]'], {}), '([filename])\n', (1652, 1664), True, 'import tensorflow as tf\n'), ((1680, 1699), 'tensorflow.TFRecordReader', 'tf.TFRecordReader', ([], {}), '()\n', (1697, 1699), True, 'import tensorflow as tf\n'), ((2133, 2177), 'tensorflow.decode_raw', 'tf.decode_raw', (["features['img_raw']", 'tf.uint8'], {}), "(features['img_raw'], tf.uint8)\n", (2146, 2177), True, 'import tensorflow as tf\n'), ((2189, 2219), 'tensorflow.reshape', 'tf.reshape', (['img', '[224, 224, 3]'], {}), '(img, [224, 224, 3])\n', (2199, 2219), True, 'import tensorflow as tf\n'), ((2282, 2318), 'tensorflow.cast', 'tf.cast', (["features['label']", 'tf.int64'], {}), "(features['label'], tf.int64)\n", (2289, 2318), True, 'import tensorflow as tf\n'), ((2356, 2474), 'tensorflow.train.shuffle_batch', 'tf.train.shuffle_batch', (['[img, label]'], {'batch_size': 'batch_size', 'num_threads': '(64)', 'capacity': '(2000)', 'min_after_dequeue': '(1500)'}), '([img, label], batch_size=batch_size, num_threads=64,\n capacity=2000, min_after_dequeue=1500)\n', (2378, 2474), True, 'import tensorflow as tf\n'), ((2947, 2959), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2957, 2959), True, 'import tensorflow as tf\n'), ((3000, 3022), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (3020, 3022), True, 'import tensorflow as tf\n'), ((3038, 3079), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), '(coord=coord)\n', (3066, 3079), True, 'import tensorflow as tf\n'), ((429, 454), 'os.path.isdir', 'os.path.isdir', (['class_path'], {}), '(class_path)\n', (442, 454), False, 'import os\n'), ((1251, 1269), 'tensorflow.train.Example', 'tf.train.Example', ([], {}), '()\n', (1267, 1269), True, 'import tensorflow as tf\n'), ((2766, 2803), 'tensorflow.reshape', 'tf.reshape', (['label_batch', '[batch_size]'], {}), '(label_batch, [batch_size])\n', (2776, 2803), True, 'import tensorflow as tf\n'), ((485, 507), 'os.listdir', 'os.listdir', (['class_path'], {}), '(class_path)\n', (495, 507), False, 'import os\n'), ((3233, 3254), 'numpy.arange', 'np.arange', (['BATCH_SIZE'], {}), '(BATCH_SIZE)\n', (3242, 3254), True, 'import numpy as np\n'), ((582, 602), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (592, 602), False, 'from PIL import Image\n'), ((1936, 1968), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (1954, 1968), True, 'import tensorflow as tf\n'), ((2026, 2059), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (2044, 2059), True, 'import tensorflow as tf\n'), ((3320, 3349), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image[j, :, :, :]'], {}), '(image[j, :, :, :])\n', (3330, 3349), True, 'import matplotlib.pyplot as plt\n'), ((3364, 3374), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3372, 3374), True, 'import matplotlib.pyplot as plt\n'), ((952, 987), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[img_raw]'}), '(value=[img_raw])\n', (970, 987), True, 'import tensorflow as tf\n')] |
# Copyright (c) <NAME>, 2020. Distributed under MIT license
"""
The MPDS workflow for AiiDA that gets structure with MPDS query
"""
import os
import time
import random
import numpy as np
from aiida_crystal_dft.utils import get_data_class
from mpds_client import MPDSDataRetrieval, APIError
from .crystal import MPDSCrystalWorkChain
class MPDSStructureWorkChain(MPDSCrystalWorkChain):
@classmethod
def define(cls, spec):
super(MPDSStructureWorkChain, cls).define(spec)
# one required input: MPDS phase id
spec.input('mpds_query', valid_type=get_data_class('dict'), required=True)
# errors related to MPDS retrieval
spec.exit_code(501, 'ERROR_NO_MPDS_API_KEY', message='MPDS API key not set')
spec.exit_code(502, 'ERROR_API_ERROR', message='MPDS API Error')
spec.exit_code(503, 'ERROR_NO_HITS', message='Request returned nothing')
def get_geometry(self):
""" Getting geometry from MPDS database
"""
# check for API key
api_key = os.getenv('MPDS_KEY')
if not api_key:
return self.exit_codes.ERROR_NO_MPDS_API_KEY
client = MPDSDataRetrieval(api_key=api_key)
query_dict = self.inputs.mpds_query.get_dict()
# prepare query
query_dict['props'] = 'atomic structure'
if 'classes' in query_dict:
query_dict['classes'] += ', non-disordered'
else:
query_dict['classes'] = 'non-disordered'
try:
answer = client.get_data(
query_dict,
fields={'S': [
'cell_abc',
'sg_n',
'basis_noneq',
'els_noneq'
]}
)
except APIError as ex:
if ex.code == 429:
self.logger.warning("Too many parallel MPDS requests, chilling")
time.sleep(random.choice([2 * 2**m for m in range(5)]))
return self.get_geometry()
else:
self.report(f'MPDS API error: {str(ex)}')
self.logger.error(f'MPDS API error: {str(ex)}')
return self.exit_codes.ERROR_API_ERROR
structs = [client.compile_crystal(line, flavor='ase') for line in answer]
structs = list(filter(None, structs))
if not structs:
return self.exit_codes.ERROR_NO_HITS
minimal_struct = min([len(s) for s in structs])
# get structures with minimal number of atoms and find the one with median cell vectors
cells = np.array([s.get_cell().reshape(9) for s in structs if len(s) == minimal_struct])
median_cell = np.median(cells, axis=0)
median_idx = int(np.argmin(np.sum((cells - median_cell) ** 2, axis=1) ** 0.5))
return get_data_class('structure')(ase=structs[median_idx])
| [
"numpy.sum",
"mpds_client.MPDSDataRetrieval",
"numpy.median",
"os.getenv",
"aiida_crystal_dft.utils.get_data_class"
] | [((1033, 1054), 'os.getenv', 'os.getenv', (['"""MPDS_KEY"""'], {}), "('MPDS_KEY')\n", (1042, 1054), False, 'import os\n'), ((1153, 1187), 'mpds_client.MPDSDataRetrieval', 'MPDSDataRetrieval', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (1170, 1187), False, 'from mpds_client import MPDSDataRetrieval, APIError\n'), ((2674, 2698), 'numpy.median', 'np.median', (['cells'], {'axis': '(0)'}), '(cells, axis=0)\n', (2683, 2698), True, 'import numpy as np\n'), ((2801, 2828), 'aiida_crystal_dft.utils.get_data_class', 'get_data_class', (['"""structure"""'], {}), "('structure')\n", (2815, 2828), False, 'from aiida_crystal_dft.utils import get_data_class\n'), ((577, 599), 'aiida_crystal_dft.utils.get_data_class', 'get_data_class', (['"""dict"""'], {}), "('dict')\n", (591, 599), False, 'from aiida_crystal_dft.utils import get_data_class\n'), ((2734, 2776), 'numpy.sum', 'np.sum', (['((cells - median_cell) ** 2)'], {'axis': '(1)'}), '((cells - median_cell) ** 2, axis=1)\n', (2740, 2776), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2018 Ubisoft La Forge Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import pickle
import time
import numpy as np
from functools import partial
from config import METRICS, PLOTS
from utils import estimate_required_time, get_artificial_factors_dataset, \
get_experiment_seeds, get_nb_jobs, get_score, launch_multi_process, plot_curves
'''
Second Experiment of section 5.4: "Modular but not Compact"
'''
# set variables for the experiment
NB_JOBS = get_nb_jobs('max')
NB_RANDOM_REPRESENTATIONS = 100
NB_RUNS = 1
NB_EXAMPLES = 20000
NB_FACTORS = 4
DISTRIBUTION = [np.random.uniform, {'low': 0., 'high': 1.}]
def get_factors_codes_dataset():
''' Create factors-codes dataset
'''
# create factors dataset
dist, dist_kwargs = DISTRIBUTION
factors = get_artificial_factors_dataset(nb_examples=NB_EXAMPLES, nb_factors=NB_FACTORS,
distribution=dist, dist_kwargs=dist_kwargs)
# compute codes from continuous factors
codes = np.concatenate((factors, factors), axis=1)
return factors, codes
def run_compactness_experiment(sub_parser_args):
''' Run compactness experiment using several metrics and save score results
:param sub_parser_args: arguments of "run" sub-parser command
metrics (list): metrics to use in the experiment
output_dir (string): directory to save metric scores
'''
# extract sub-parser arguments
metrics = sub_parser_args.metrics
output_dir = sub_parser_args.output_dir
# seeds to use for the experiment
seeds = get_experiment_seeds(nb_representations=NB_RANDOM_REPRESENTATIONS, nb_runs=NB_RUNS)
# iterate over metrics
for metric in metrics:
# track time
begin = time.time()
print(f'Running {metric} metric')
# initialize scores array
scores_array = np.zeros((NB_RANDOM_REPRESENTATIONS, NB_RUNS)).squeeze()
# depending on the metric, we can have several scores per representation
if 'DCI' in metric and 'MIG' not in metric:
# DCI metric returns Modularity, Compactness and Explicitness scores
metric_scores = {f'{metric} Mod': scores_array.copy(),
f'{metric} Comp': scores_array.copy(),
f'{metric} Expl': scores_array.copy()}
else:
# only one score is returned
metric_scores = {f'{metric}': scores_array}
# set metric function and its hyper-params
metric_func = METRICS[metric]['function']
metric_kwargs = METRICS[metric]['kwargs']
metric_func = partial(metric_func, **metric_kwargs)
# run metric
for _ in range(1):
# get scores using multi-processing
scores = launch_multi_process(iterable=seeds, func=get_score, n_jobs=NB_JOBS, timer_verbose=False,
metric=metric_func, factors_codes_dataset=get_factors_codes_dataset)
# fill arrays
for idx, key in enumerate(metric_scores):
if len(metric_scores) == 1:
metric_scores[key][:] = [score for score in scores]
else:
metric_scores[key][:] = [score[idx] for score in scores]
# display remaining time
estimate_required_time(nb_items_in_list=len(seeds),
current_index=len(seeds) - 1,
time_elapsed=time.time() - begin)
# save dictionaries
if output_dir is not None:
os.makedirs(output_dir, exist_ok=True) # create output directory
for key in metric_scores:
with open(os.path.join(output_dir, key), 'wb') as output:
pickle.dump({f'{key}': metric_scores[key]}, output)
# display time
duration = (time.time() - begin) / 60
print(f'\nTotal time to run experiment on {metric} metric -- {duration:.2f} min')
def scores_table(sub_parser_args):
''' Get scores table
:param sub_parser_args: arguments of "plot" sub-parser command
output_dir (string): directory to save table plots
'''
# extract sub-parser arguments
output_dir = sub_parser_args.output_dir
# extract metric scores
scores = {}
metrics = [os.path.join(output_dir, x) for x in os.listdir(output_dir) if not x.endswith('.json')]
for metric in metrics:
with open(metric, 'rb') as input:
metric_scores = pickle.load(input)
scores.update(metric_scores)
# compute means
for metric, values in scores.items():
scores_array = values
scores[metric] = np.mean(scores_array).item()
# print to console results for easier overleaf update
overleaf_mean = ''
for _, metrics in PLOTS['FAMILIES'].items():
for metric in metrics:
if metric in scores:
mean = scores[metric]
overleaf_mean = f'{overleaf_mean} & {mean:.1f}'
else:
overleaf_mean = f'{overleaf_mean} & -'
print(overleaf_mean.strip())
# save dict to .json
output_file = os.path.join(output_dir, 'scores.json')
for metric in scores:
mean = scores[metric]
scores[metric] = f'{mean:.1f}'
with open(output_file, 'w') as fp:
json.dump(scores, fp, indent=4, sort_keys=True)
if __name__ == "__main__":
# project ROOT
FILE_ROOT = os.path.dirname(os.path.realpath(__file__))
PROJECT_ROOT = os.path.realpath(os.path.dirname(os.path.dirname(FILE_ROOT)))
# default metrics and default output directory
metrics = [metric for metric in METRICS]
output_dir = os.path.join(PROJECT_ROOT, 'results', 'section5.4_compactness2')
# create parser
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
# parser for the "run" command -- run compactness experiment
parser_run = subparsers.add_parser('run', help='compute scores')
parser_run.set_defaults(func=run_compactness_experiment)
parser_run.add_argument('--metrics', nargs='+', default=metrics, required=False,
help='metrics to use to compute scores: "metric_1" ... "metric_N"')
parser_run.add_argument('--output_dir', type=str, default=output_dir, required=False,
help='output directory to store scores results')
# parser fot the "plot" command -- plot table of metric scores
parser_table = subparsers.add_parser('plot', help='scores table')
parser_table.set_defaults(func=scores_table)
parser_table.add_argument('--output_dir', type=str, default=output_dir, required=False,
help='output directory to store scores table')
args = parser.parse_args()
args.func(args)
| [
"functools.partial",
"json.dump",
"utils.launch_multi_process",
"pickle.dump",
"argparse.ArgumentParser",
"os.makedirs",
"utils.get_nb_jobs",
"os.path.realpath",
"os.path.dirname",
"numpy.zeros",
"time.time",
"utils.get_artificial_factors_dataset",
"pickle.load",
"numpy.mean",
"utils.get... | [((1056, 1074), 'utils.get_nb_jobs', 'get_nb_jobs', (['"""max"""'], {}), "('max')\n", (1067, 1074), False, 'from utils import estimate_required_time, get_artificial_factors_dataset, get_experiment_seeds, get_nb_jobs, get_score, launch_multi_process, plot_curves\n'), ((1374, 1501), 'utils.get_artificial_factors_dataset', 'get_artificial_factors_dataset', ([], {'nb_examples': 'NB_EXAMPLES', 'nb_factors': 'NB_FACTORS', 'distribution': 'dist', 'dist_kwargs': 'dist_kwargs'}), '(nb_examples=NB_EXAMPLES, nb_factors=\n NB_FACTORS, distribution=dist, dist_kwargs=dist_kwargs)\n', (1404, 1501), False, 'from utils import estimate_required_time, get_artificial_factors_dataset, get_experiment_seeds, get_nb_jobs, get_score, launch_multi_process, plot_curves\n'), ((1603, 1645), 'numpy.concatenate', 'np.concatenate', (['(factors, factors)'], {'axis': '(1)'}), '((factors, factors), axis=1)\n', (1617, 1645), True, 'import numpy as np\n'), ((2240, 2328), 'utils.get_experiment_seeds', 'get_experiment_seeds', ([], {'nb_representations': 'NB_RANDOM_REPRESENTATIONS', 'nb_runs': 'NB_RUNS'}), '(nb_representations=NB_RANDOM_REPRESENTATIONS, nb_runs=\n NB_RUNS)\n', (2260, 2328), False, 'from utils import estimate_required_time, get_artificial_factors_dataset, get_experiment_seeds, get_nb_jobs, get_score, launch_multi_process, plot_curves\n'), ((5963, 6002), 'os.path.join', 'os.path.join', (['output_dir', '"""scores.json"""'], {}), "(output_dir, 'scores.json')\n", (5975, 6002), False, 'import os\n'), ((6500, 6564), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""results"""', '"""section5.4_compactness2"""'], {}), "(PROJECT_ROOT, 'results', 'section5.4_compactness2')\n", (6512, 6564), False, 'import os\n'), ((6603, 6628), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6626, 6628), False, 'import argparse\n'), ((2420, 2431), 'time.time', 'time.time', ([], {}), '()\n', (2429, 2431), False, 'import time\n'), ((3317, 3354), 'functools.partial', 'partial', (['metric_func'], {}), '(metric_func, **metric_kwargs)\n', (3324, 3354), False, 'from functools import partial\n'), ((5115, 5142), 'os.path.join', 'os.path.join', (['output_dir', 'x'], {}), '(output_dir, x)\n', (5127, 5142), False, 'import os\n'), ((6145, 6192), 'json.dump', 'json.dump', (['scores', 'fp'], {'indent': '(4)', 'sort_keys': '(True)'}), '(scores, fp, indent=4, sort_keys=True)\n', (6154, 6192), False, 'import json\n'), ((6273, 6299), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (6289, 6299), False, 'import os\n'), ((3481, 3648), 'utils.launch_multi_process', 'launch_multi_process', ([], {'iterable': 'seeds', 'func': 'get_score', 'n_jobs': 'NB_JOBS', 'timer_verbose': '(False)', 'metric': 'metric_func', 'factors_codes_dataset': 'get_factors_codes_dataset'}), '(iterable=seeds, func=get_score, n_jobs=NB_JOBS,\n timer_verbose=False, metric=metric_func, factors_codes_dataset=\n get_factors_codes_dataset)\n', (3501, 3648), False, 'from utils import estimate_required_time, get_artificial_factors_dataset, get_experiment_seeds, get_nb_jobs, get_score, launch_multi_process, plot_curves\n'), ((4322, 4360), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (4333, 4360), False, 'import os\n'), ((5152, 5174), 'os.listdir', 'os.listdir', (['output_dir'], {}), '(output_dir)\n', (5162, 5174), False, 'import os\n'), ((5301, 5319), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (5312, 5319), False, 'import pickle\n'), ((6353, 6379), 'os.path.dirname', 'os.path.dirname', (['FILE_ROOT'], {}), '(FILE_ROOT)\n', (6368, 6379), False, 'import os\n'), ((2540, 2586), 'numpy.zeros', 'np.zeros', (['(NB_RANDOM_REPRESENTATIONS, NB_RUNS)'], {}), '((NB_RANDOM_REPRESENTATIONS, NB_RUNS))\n', (2548, 2586), True, 'import numpy as np\n'), ((4624, 4635), 'time.time', 'time.time', ([], {}), '()\n', (4633, 4635), False, 'import time\n'), ((5479, 5500), 'numpy.mean', 'np.mean', (['scores_array'], {}), '(scores_array)\n', (5486, 5500), True, 'import numpy as np\n'), ((4520, 4571), 'pickle.dump', 'pickle.dump', (["{f'{key}': metric_scores[key]}", 'output'], {}), "({f'{key}': metric_scores[key]}, output)\n", (4531, 4571), False, 'import pickle\n'), ((4217, 4228), 'time.time', 'time.time', ([], {}), '()\n', (4226, 4228), False, 'import time\n'), ((4452, 4481), 'os.path.join', 'os.path.join', (['output_dir', 'key'], {}), '(output_dir, key)\n', (4464, 4481), False, 'import os\n')] |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.utils.scf."""
from typing import List, Tuple
from absl.testing import absltest
from absl.testing import parameterized
from ferminet.utils import scf
from ferminet.utils import system
import numpy as np
import pyscf
class ScfTest(parameterized.TestCase):
def setUp(self):
super(ScfTest, self).setUp()
# disable use of temp directory in pyscf.
# Test calculations are small enough to fit in RAM and we don't need
# checkpoint files.
pyscf.lib.param.TMPDIR = None
@parameterized.parameters(
{
'molecule': [system.Atom('He', (0, 0, 0))],
'nelectrons': (1, 1)
},
{
'molecule': [system.Atom('N', (0, 0, 0))],
'nelectrons': (5, 2)
},
{
'molecule': [system.Atom('N', (0, 0, 0))],
'nelectrons': (5, 3)
},
{
'molecule': [system.Atom('N', (0, 0, 0))],
'nelectrons': (4, 2)
},
{
'molecule': [system.Atom('O', (0, 0, 0))],
'nelectrons': (5, 3),
'restricted': False,
},
{
'molecule': [
system.Atom('N', (0, 0, 0)),
system.Atom('N', (0, 0, 1.4))
],
'nelectrons': (7, 7)
},
{
'molecule': [
system.Atom('O', (0, 0, 0)),
system.Atom('O', (0, 0, 1.4))
],
'nelectrons': (9, 7),
'restricted': False,
},
)
def test_scf_interface(self,
molecule: List[system.Atom],
nelectrons: Tuple[int, int],
restricted: bool = True):
"""Tests SCF interface to a pyscf calculation.
pyscf has its own tests so only check that we can run calculations over
atoms and simple diatomics using the interface in ferminet.scf.
Args:
molecule: List of system.Atom objects giving atoms in the molecule.
nelectrons: Tuple containing number of alpha and beta electrons.
restricted: If true, run a restricted Hartree-Fock calculation, otherwise
run an unrestricted Hartree-Fock calculation.
"""
npts = 100
xs = np.random.randn(npts, 3)
hf = scf.Scf(molecule=molecule,
nelectrons=nelectrons,
restricted=restricted)
hf.run()
mo_vals = hf.eval_mos(xs)
self.assertLen(mo_vals, 2) # alpha-spin orbitals and beta-spin orbitals.
for spin_mo_vals in mo_vals:
# Evalute npts points on M orbitals/functions - (npts, M) array.
self.assertEqual(spin_mo_vals.shape, (npts, hf._mol.nao_nr()))
if __name__ == '__main__':
absltest.main()
| [
"ferminet.utils.system.Atom",
"absl.testing.absltest.main",
"ferminet.utils.scf.Scf",
"numpy.random.randn"
] | [((3267, 3282), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3280, 3282), False, 'from absl.testing import absltest\n'), ((2801, 2825), 'numpy.random.randn', 'np.random.randn', (['npts', '(3)'], {}), '(npts, 3)\n', (2816, 2825), True, 'import numpy as np\n'), ((2835, 2907), 'ferminet.utils.scf.Scf', 'scf.Scf', ([], {'molecule': 'molecule', 'nelectrons': 'nelectrons', 'restricted': 'restricted'}), '(molecule=molecule, nelectrons=nelectrons, restricted=restricted)\n', (2842, 2907), False, 'from ferminet.utils import scf\n'), ((1206, 1234), 'ferminet.utils.system.Atom', 'system.Atom', (['"""He"""', '(0, 0, 0)'], {}), "('He', (0, 0, 0))\n", (1217, 1234), False, 'from ferminet.utils import system\n'), ((1308, 1335), 'ferminet.utils.system.Atom', 'system.Atom', (['"""N"""', '(0, 0, 0)'], {}), "('N', (0, 0, 0))\n", (1319, 1335), False, 'from ferminet.utils import system\n'), ((1409, 1436), 'ferminet.utils.system.Atom', 'system.Atom', (['"""N"""', '(0, 0, 0)'], {}), "('N', (0, 0, 0))\n", (1420, 1436), False, 'from ferminet.utils import system\n'), ((1510, 1537), 'ferminet.utils.system.Atom', 'system.Atom', (['"""N"""', '(0, 0, 0)'], {}), "('N', (0, 0, 0))\n", (1521, 1537), False, 'from ferminet.utils import system\n'), ((1611, 1638), 'ferminet.utils.system.Atom', 'system.Atom', (['"""O"""', '(0, 0, 0)'], {}), "('O', (0, 0, 0))\n", (1622, 1638), False, 'from ferminet.utils import system\n'), ((1759, 1786), 'ferminet.utils.system.Atom', 'system.Atom', (['"""N"""', '(0, 0, 0)'], {}), "('N', (0, 0, 0))\n", (1770, 1786), False, 'from ferminet.utils import system\n'), ((1802, 1831), 'ferminet.utils.system.Atom', 'system.Atom', (['"""N"""', '(0, 0, 1.4)'], {}), "('N', (0, 0, 1.4))\n", (1813, 1831), False, 'from ferminet.utils import system\n'), ((1931, 1958), 'ferminet.utils.system.Atom', 'system.Atom', (['"""O"""', '(0, 0, 0)'], {}), "('O', (0, 0, 0))\n", (1942, 1958), False, 'from ferminet.utils import system\n'), ((1974, 2003), 'ferminet.utils.system.Atom', 'system.Atom', (['"""O"""', '(0, 0, 1.4)'], {}), "('O', (0, 0, 1.4))\n", (1985, 2003), False, 'from ferminet.utils import system\n')] |
import os
import pygame
import torch
import numpy as np
from PIL import Image
from lib.CAModel2 import CAModel2
from lib.displayer import displayer
from lib.utils import mat_distance
from lib.CAModel import CAModel
from lib.utils_vis import to_rgb, make_seed
from unpad import replicate_edges
def get_mask(mask_path='data/pol.jpg', map_shape=(72,72), save=False):
im = Image.open(mask_path)
im = im.resize(map_shape)
im_arr = np.array(im)
im_arr = im_arr[:,:,0] < 200
if save:
im = Image.fromarray(im_arr)
im.save('tmp.png')
return np.expand_dims(im_arr, -1)
def run_sim(model_path, save_dir=None, mask_path=None):
eraser_radius = 15
pix_size = 4
map_shape = ( 120, 120)
CHANNEL_N = 16
CELL_FIRE_RATE = 0.2
device = torch.device("cpu")
if mask_path:
mask = get_mask(mask_path, map_shape=map_shape)
rows = np.arange(map_shape[0]).repeat(map_shape[1]).reshape([map_shape[0], map_shape[1]])
cols = np.arange(map_shape[1]).reshape([1, -1]).repeat(map_shape[0], axis=0)
map_pos = np.array([rows, cols]).transpose([1, 2, 0])
map = make_seed(map_shape, CHANNEL_N)
model = CAModel2(CHANNEL_N, CELL_FIRE_RATE, device).to(device)
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
output = model(torch.from_numpy(map.reshape([1, map_shape[0], map_shape[1], CHANNEL_N]).astype(np.float32)), 1)
disp = displayer(map_shape, pix_size)
isMouseDown = False
running = True
c = 0
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
isMouseDown = True
elif event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
isMouseDown = False
if isMouseDown:
try:
mouse_pos = np.array([int(event.pos[1]/pix_size), int(event.pos[0]/pix_size)])
should_keep = (mat_distance(map_pos, mouse_pos)>eraser_radius).reshape([map_shape[0],map_shape[1],1])
arr = replicate_edges(arr,{1:2,2:2})
arr = output.detach().numpy() * should_keep #*mask
output = torch.from_numpy(arr)
except AttributeError:
pass
arr = output.detach().numpy() #* mask
arr = replicate_edges(arr, {1: 2, 2: 2})
output = model(output)
map = to_rgb(output.detach().numpy()[0])*mask
if save_dir:
im = Image.fromarray((map*255).astype(np.uint8))
im.save(os.path.join(save_dir, '{}.png'.format(str(c).zfill(5))))
c += 1
disp.update(map)
if __name__ == '__main__':
run_sim('models/remaster_3.pth', mask_path='data/pol.jpg', save_dir='out/grow_pol_2') | [
"lib.utils.mat_distance",
"lib.CAModel2.CAModel2",
"pygame.event.get",
"lib.utils_vis.make_seed",
"numpy.expand_dims",
"PIL.Image.open",
"numpy.array",
"lib.displayer.displayer",
"numpy.arange",
"torch.device",
"PIL.Image.fromarray",
"unpad.replicate_edges",
"torch.from_numpy"
] | [((376, 397), 'PIL.Image.open', 'Image.open', (['mask_path'], {}), '(mask_path)\n', (386, 397), False, 'from PIL import Image\n'), ((441, 453), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (449, 453), True, 'import numpy as np\n'), ((575, 601), 'numpy.expand_dims', 'np.expand_dims', (['im_arr', '(-1)'], {}), '(im_arr, -1)\n', (589, 601), True, 'import numpy as np\n'), ((787, 806), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (799, 806), False, 'import torch\n'), ((1125, 1156), 'lib.utils_vis.make_seed', 'make_seed', (['map_shape', 'CHANNEL_N'], {}), '(map_shape, CHANNEL_N)\n', (1134, 1156), False, 'from lib.utils_vis import to_rgb, make_seed\n'), ((1437, 1467), 'lib.displayer.displayer', 'displayer', (['map_shape', 'pix_size'], {}), '(map_shape, pix_size)\n', (1446, 1467), False, 'from lib.displayer import displayer\n'), ((513, 536), 'PIL.Image.fromarray', 'Image.fromarray', (['im_arr'], {}), '(im_arr)\n', (528, 536), False, 'from PIL import Image\n'), ((1564, 1582), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1580, 1582), False, 'import pygame\n'), ((2463, 2501), 'unpad.replicate_edges', 'replicate_edges', (['arr', '{(1): 2, (2): 2}'], {}), '(arr, {(1): 2, (2): 2})\n', (2478, 2501), False, 'from unpad import replicate_edges\n'), ((1070, 1092), 'numpy.array', 'np.array', (['[rows, cols]'], {}), '([rows, cols])\n', (1078, 1092), True, 'import numpy as np\n'), ((1170, 1213), 'lib.CAModel2.CAModel2', 'CAModel2', (['CHANNEL_N', 'CELL_FIRE_RATE', 'device'], {}), '(CHANNEL_N, CELL_FIRE_RATE, device)\n', (1178, 1213), False, 'from lib.CAModel2 import CAModel2\n'), ((1287, 1306), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1299, 1306), False, 'import torch\n'), ((2200, 2238), 'unpad.replicate_edges', 'replicate_edges', (['arr', '{(1): 2, (2): 2}'], {}), '(arr, {(1): 2, (2): 2})\n', (2215, 2238), False, 'from unpad import replicate_edges\n'), ((2325, 2346), 'torch.from_numpy', 'torch.from_numpy', (['arr'], {}), '(arr)\n', (2341, 2346), False, 'import torch\n'), ((892, 915), 'numpy.arange', 'np.arange', (['map_shape[0]'], {}), '(map_shape[0])\n', (901, 915), True, 'import numpy as np\n'), ((986, 1009), 'numpy.arange', 'np.arange', (['map_shape[1]'], {}), '(map_shape[1])\n', (995, 1009), True, 'import numpy as np\n'), ((2091, 2123), 'lib.utils.mat_distance', 'mat_distance', (['map_pos', 'mouse_pos'], {}), '(map_pos, mouse_pos)\n', (2103, 2123), False, 'from lib.utils import mat_distance\n')] |
import os
from operator import itemgetter
from collections import defaultdict
import numpy as np
from tqdm import tqdm
from .base import Base
from ..utils.misc import time_block, colorize
from ..evaluation.evaluate import EvalMixin
from ..embedding import Item2Vec
class KnnEmbedding(Base, EvalMixin):
def __init__(
self,
task="ranking",
data_info=None,
embedding_method=None,
embed_size=16,
window_size=None,
k=10,
seed=42,
lower_upper_bound=None
):
Base.__init__(self, task, data_info, lower_upper_bound)
EvalMixin.__init__(self, task, data_info)
self.task = task
self.data_info = data_info
self.embed_size = embed_size
self.window_size = window_size
self.k = k
self.seed = seed
self.embed_algo = self._choose_embedding_algo(embedding_method)
self.n_users = data_info.n_users
self.n_items = data_info.n_items
self.user_consumed = data_info.user_consumed
self.topk_sim = None
self.item_vectors = None
self._item_norms = None
self.print_count = 0
self.all_args = locals()
def fit(self, train_data, n_threads=0, verbose=1, eval_data=None,
metrics=None, store_top_k=True):
assert self.task == "ranking", (
"KNNEmbedding model is only suitable for ranking"
)
self.show_start_time()
self.embed_algo.fit(n_threads, verbose)
self.item_vectors = self.embed_algo.item_vectors
if store_top_k:
self._compute_topk()
if verbose > 1:
self.print_metrics(eval_data=eval_data, metrics=metrics)
print("=" * 30)
def predict(self, user, item, cold="popular", inner_id=False):
user, item = self.convert_id(user, item, inner_id)
unknown_num, unknown_index, user, item = self._check_unknown(user, item)
if unknown_num > 0 and cold != "popular":
raise ValueError("KnnEmbedding only supports popular strategy.")
preds = []
for u, i in zip(user, item):
user_interacted = self.user_consumed[u]
num = (
len(user_interacted)
if len(user_interacted) < self.k
else self.k
)
interacted_sims = self._compute_sim(i, user_interacted)
k_sims = np.partition(interacted_sims, -num)[-num:]
preds.append(np.mean(k_sims)) # max ?
if unknown_num > 0:
preds[unknown_index] = self.default_prediction
return preds[0] if len(user) == 1 else preds
def recommend_user(self, user, n_rec, random_rec=False,
cold_start="popular", inner_id=False):
user_id = self._check_unknown_user(user, inner_id)
if user_id is None:
if cold_start == "popular":
return self.popular_recommends(inner_id, n_rec)
elif cold_start != "popular":
raise ValueError("KnnEmbedding only supports popular strategy.")
else:
raise ValueError(user)
u_consumed = set(self.user_consumed[user])
user_interacted = self.user_consumed[user]
result = defaultdict(lambda: 0.)
for i in user_interacted:
item_sim_topk = (
self.topk_sim[i]
if self.topk_sim is not None
else self.sort_topk_items(i)
)
for j, sim in item_sim_topk:
if j in u_consumed:
continue
result[j] += sim
if len(result) == 0:
self.print_count += 1
no_str = (f"no suitable recommendation for user {user}, "
f"return default recommendation")
if self.print_count < 7:
print(f"{colorize(no_str, 'red')}")
return self.data_info.popular_items[:n_rec]
rank_items = [(k, v) for k, v in result.items()]
rank_items.sort(key=lambda x: -x[1])
return rank_items[:n_rec]
def _compute_sim(self, item, u_interacted_items):
# cosine similarity
sim = self.item_vectors[item].dot(
self.item_vectors[u_interacted_items].T
) / (self.item_norms[item] * self.item_norms[u_interacted_items])
return sim
def sort_topk_items(self, item):
sim = self.item_vectors[item].dot(self.item_vectors.T) / (
self.item_norms[item] * self.item_norms
)
ids = np.argpartition(sim, -self.k)[-self.k:]
sorted_result = sorted(
zip(ids, sim[ids]),
key=itemgetter(1),
reverse=True
)
return sorted_result
def _compute_topk(self):
top_k = []
for i in tqdm(range(self.n_items), desc="top_k"):
top_k.append(self.sort_topk_items(i))
self.topk_sim = np.asarray(top_k)
def _choose_embedding_algo(self, embedding_method):
if embedding_method.lower().startswith("item2vec"):
return Item2Vec(
self.data_info, self.embed_size, self.window_size, self.seed
)
else:
raise ValueError(f"{embedding_method} not implemented, yet.")
@property
def item_norms(self):
if self._item_norms is None:
self._item_norms = np.linalg.norm(self.item_vectors, axis=-1)
self._item_norms[self._item_norms == 0] = 1e-10
return self._item_norms
def save(self, path, model_name, **kwargs):
raise NotImplementedError("KnnEmbedding doesn't support model saving.")
@classmethod
def load(cls, path, model_name, data_info, **kwargs):
raise NotImplementedError("KnnEmbedding doesn't support model loading.")
def rebuild_graph(self, path, model_name, full_assign=False):
raise NotImplementedError(
"KnnEmbedding doesn't support model retraining")
class KnnEmbeddingApproximate(KnnEmbedding):
def __init__(
self,
task="ranking",
data_info=None,
embedding_method=None,
embed_size=16,
window_size=None,
k=10,
seed=42,
lower_upper_bound=None
):
super(KnnEmbeddingApproximate, self).__init__(
task,
data_info,
embedding_method,
embed_size,
window_size,
k,
seed,
lower_upper_bound
)
self.approximate_algo = None
def fit(self, train_data, n_threads=0, verbose=1, eval_data=None,
metrics=None, store_top_k=True):
assert self.task == "ranking", (
"KNNEmbedding model is only suitable for ranking"
)
self.show_start_time()
self.embed_algo.fit(n_threads, verbose)
self.item_vectors = self.embed_algo.item_vectors
self.build_approximate_search(n_threads, verbose)
if store_top_k:
self._compute_topk()
if verbose > 1:
self.print_metrics(eval_data=eval_data, metrics=metrics)
print("=" * 30)
def build_approximate_search(self, n_threads, verbose):
try:
import hnswlib
except ModuleNotFoundError:
print_str = "hnswlib is needed when using approximate_search..."
print(f"{colorize(print_str, 'red')}")
raise
data_labels = np.arange(self.n_items)
self.approximate_algo = hnswlib.Index(
space="cosine", dim=self.embed_size
)
self.approximate_algo.init_index(
max_elements=self.n_items, ef_construction=200, M=32
)
with time_block("approximate search init", verbose):
self.approximate_algo.add_items(
data=self.item_vectors,
ids=data_labels,
num_threads=os.cpu_count() if not n_threads else n_threads
)
self.approximate_algo.set_ef(64)
# def _compute_sim(self, item):
# ids, sim = self.approximate_algo.knn_query(
# self.item_vectors[item], k=self.n_items
# )
# return sim[0][np.argsort(ids[0])]
def sort_topk_items(self, item):
ids, sim = self.approximate_algo.knn_query(
self.item_vectors[item], k=self.k
)
return list(zip(ids[0], sim[0]))
def _compute_topk(self):
top_k = self.approximate_algo.knn_query(self.item_vectors, k=self.k)
top_k = np.stack(top_k, axis=0)
self.topk_sim = np.transpose(top_k, [1, 2, 0])
def save(self, path, model_name, **kwargs):
raise NotImplementedError("KnnEmbedding doesn't support model saving.")
@classmethod
def load(cls, path, model_name, data_info, **kwargs):
raise NotImplementedError("KnnEmbedding doesn't support model loading.")
def rebuild_graph(self, path, model_name, full_assign=False):
raise NotImplementedError(
"KnnEmbedding doesn't support model retraining")
| [
"numpy.stack",
"numpy.partition",
"numpy.asarray",
"numpy.transpose",
"collections.defaultdict",
"numpy.argpartition",
"os.cpu_count",
"numpy.mean",
"numpy.arange",
"numpy.linalg.norm",
"hnswlib.Index",
"operator.itemgetter"
] | [((3303, 3328), 'collections.defaultdict', 'defaultdict', (['(lambda : 0.0)'], {}), '(lambda : 0.0)\n', (3314, 3328), False, 'from collections import defaultdict\n'), ((4972, 4989), 'numpy.asarray', 'np.asarray', (['top_k'], {}), '(top_k)\n', (4982, 4989), True, 'import numpy as np\n'), ((7513, 7536), 'numpy.arange', 'np.arange', (['self.n_items'], {}), '(self.n_items)\n', (7522, 7536), True, 'import numpy as np\n'), ((7569, 7619), 'hnswlib.Index', 'hnswlib.Index', ([], {'space': '"""cosine"""', 'dim': 'self.embed_size'}), "(space='cosine', dim=self.embed_size)\n", (7582, 7619), False, 'import hnswlib\n'), ((8579, 8602), 'numpy.stack', 'np.stack', (['top_k'], {'axis': '(0)'}), '(top_k, axis=0)\n', (8587, 8602), True, 'import numpy as np\n'), ((8627, 8657), 'numpy.transpose', 'np.transpose', (['top_k', '[1, 2, 0]'], {}), '(top_k, [1, 2, 0])\n', (8639, 8657), True, 'import numpy as np\n'), ((4592, 4621), 'numpy.argpartition', 'np.argpartition', (['sim', '(-self.k)'], {}), '(sim, -self.k)\n', (4607, 4621), True, 'import numpy as np\n'), ((5424, 5466), 'numpy.linalg.norm', 'np.linalg.norm', (['self.item_vectors'], {'axis': '(-1)'}), '(self.item_vectors, axis=-1)\n', (5438, 5466), True, 'import numpy as np\n'), ((2452, 2487), 'numpy.partition', 'np.partition', (['interacted_sims', '(-num)'], {}), '(interacted_sims, -num)\n', (2464, 2487), True, 'import numpy as np\n'), ((2520, 2535), 'numpy.mean', 'np.mean', (['k_sims'], {}), '(k_sims)\n', (2527, 2535), True, 'import numpy as np\n'), ((4712, 4725), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (4722, 4725), False, 'from operator import itemgetter\n'), ((7966, 7980), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (7978, 7980), False, 'import os\n')] |
import numpy
from PIL import Image, ImageDraw
# polygon = [(x1,y1),(x2,y2),...] or [x1,y1,x2,y2,...]
# width = ?
# height = ?
img = Image.new('L', (width, height), 0)
ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)
mask = numpy.array(img) | [
"PIL.ImageDraw.Draw",
"PIL.Image.new",
"numpy.array"
] | [((134, 168), 'PIL.Image.new', 'Image.new', (['"""L"""', '(width, height)', '(0)'], {}), "('L', (width, height), 0)\n", (143, 168), False, 'from PIL import Image, ImageDraw\n'), ((232, 248), 'numpy.array', 'numpy.array', (['img'], {}), '(img)\n', (243, 248), False, 'import numpy\n'), ((169, 188), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (183, 188), False, 'from PIL import Image, ImageDraw\n')] |
"""
===============================
Save a 2D static flatmap as SVG
===============================
Plot a 2D static flatmap and save it as SVG file.
**Some words on the `rechache` parameter before we begin:**
Setting the `recache=True` parameter recaches the flatmap cache located in
<filestore>/<subject>/cache. By default intermediate steps for a flatmap are
cached after the first generation to speed up the process for the future. If
any of the intermediate steps changes, the flatmap generation may fail.
`recache=True` will load these intermediate steps new.
This can be helpful if you think there is no reason that the
`quickflat.make_figure` to fail but it nevertheless fails. Try it, it's magic!
The default background is set to be a transparent image. If you want to change
that use the parameter `bgcolor`.
"""
import cortex
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(1234)
# Create a random pycortex Volume
volume = cortex.Volume.random(subject='S1', xfmname='fullhead')
# Plot a flatmap with the data projected onto the surface
_ = cortex.quickflat.make_figure(volume)
plt.show()
# Save this flatmap
filename = "./my_flatmap.svg"
_ = cortex.quickflat.make_png(filename, volume, recache=False)
| [
"matplotlib.pyplot.show",
"numpy.random.seed",
"cortex.Volume.random",
"cortex.quickflat.make_png",
"cortex.quickflat.make_figure"
] | [((892, 912), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (906, 912), True, 'import numpy as np\n'), ((957, 1011), 'cortex.Volume.random', 'cortex.Volume.random', ([], {'subject': '"""S1"""', 'xfmname': '"""fullhead"""'}), "(subject='S1', xfmname='fullhead')\n", (977, 1011), False, 'import cortex\n'), ((1075, 1111), 'cortex.quickflat.make_figure', 'cortex.quickflat.make_figure', (['volume'], {}), '(volume)\n', (1103, 1111), False, 'import cortex\n'), ((1112, 1122), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1120, 1122), True, 'import matplotlib.pyplot as plt\n'), ((1178, 1236), 'cortex.quickflat.make_png', 'cortex.quickflat.make_png', (['filename', 'volume'], {'recache': '(False)'}), '(filename, volume, recache=False)\n', (1203, 1236), False, 'import cortex\n')] |
import cv2
import numpy as np
import pyk4a
from pyk4a import Config, PyK4A
def main():
k4a = PyK4A(
Config(
color_resolution=pyk4a.ColorResolution.RES_720P,
depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
synchronized_images_only=True,
)
)
k4a.start()
# getters and setters directly get and set on device
k4a.whitebalance = 4500
assert k4a.whitebalance == 4500
k4a.whitebalance = 4510
assert k4a.whitebalance == 4510
while 1:
capture = k4a.get_capture()
if np.any(capture.color):
cv2.imshow("k4a", capture.color[:, :, :3])
key = cv2.waitKey(10)
if key != -1:
cv2.destroyAllWindows()
break
k4a.stop()
if __name__ == "__main__":
main()
| [
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.any",
"pyk4a.Config",
"cv2.imshow"
] | [((115, 248), 'pyk4a.Config', 'Config', ([], {'color_resolution': 'pyk4a.ColorResolution.RES_720P', 'depth_mode': 'pyk4a.DepthMode.NFOV_UNBINNED', 'synchronized_images_only': '(True)'}), '(color_resolution=pyk4a.ColorResolution.RES_720P, depth_mode=pyk4a.\n DepthMode.NFOV_UNBINNED, synchronized_images_only=True)\n', (121, 248), False, 'from pyk4a import Config, PyK4A\n'), ((560, 581), 'numpy.any', 'np.any', (['capture.color'], {}), '(capture.color)\n', (566, 581), True, 'import numpy as np\n'), ((595, 637), 'cv2.imshow', 'cv2.imshow', (['"""k4a"""', 'capture.color[:, :, :3]'], {}), "('k4a', capture.color[:, :, :3])\n", (605, 637), False, 'import cv2\n'), ((656, 671), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (667, 671), False, 'import cv2\n'), ((714, 737), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (735, 737), False, 'import cv2\n')] |
from torch.utils.data import Dataset
from torchvision import transforms, utils
import numpy as np
from scipy import ndimage
import torch
from PIL import Image
class ICLRDataset(Dataset):
def __init__(self, imgs, gts, split_type, index, transform, img_mix_enable = True):
if index is None:
self.imgs = imgs
self.gts = gts
else:
self.imgs = [imgs[i] for i in index]
self.gts = [gts[i] for i in index]
self.split_type = split_type
self.transform = transform
self.img_mix_enable = img_mix_enable
def __len__(self):
return len(self.imgs)
def augment(self, img, y):
p = np.random.random(1)
if p[0] > 0.5:
while True:
rnd_idx = np.random.randint(0, len(self.imgs))
if self.gts[rnd_idx] != y:
break
rnd_crop = self.transform(Image.fromarray(self.imgs[rnd_idx]))
d = 0.8
img = img * d + rnd_crop * (1 - d)
return img
def __getitem__(self, idx):
img = self.imgs[idx]
y = self.gts[idx]
img = Image.fromarray(img)
img = self.transform(img)
if (self.split_type == 'train') & self.img_mix_enable:
img = self.augment(img, y)
return img, y
| [
"PIL.Image.fromarray",
"numpy.random.random"
] | [((720, 739), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (736, 739), True, 'import numpy as np\n'), ((1182, 1202), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (1197, 1202), False, 'from PIL import Image\n'), ((957, 992), 'PIL.Image.fromarray', 'Image.fromarray', (['self.imgs[rnd_idx]'], {}), '(self.imgs[rnd_idx])\n', (972, 992), False, 'from PIL import Image\n')] |
import random
import json
import numpy as np
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import plot_model
from tensorflow.keras.metrics import AUC
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras import backend
import lovpy.logic.properties
from lovpy.graphs.timed_property_graph import TimedPropertyGraph
from lovpy.graphs.logical_operators import NotOperator
from . import io
from .train_config import TrainConfiguration
from .theorem_proving_model import TheoremProvingModel, TrainingResults
PREDICATES_NUM = 10
class SimpleModel(TheoremProvingModel):
"""A simple model based on an MLP for next theorem evaluation."""
def __init__(self, name, path, model=None, predicates_map=None):
super().__init__(name, path)
self.model = model
self.predicates_map = predicates_map
def train_core(self, dataset, properties, i_train, i_val, config: TrainConfiguration):
data, outputs, self.predicates_map = create_training_data(properties, dataset)
train_data = data[i_train]
train_outputs = outputs[i_train]
val_data = data[i_val]
val_outputs = outputs[i_val]
self.model = create_dense_model(self.predicates_map)
model_filename = ("selection_model"
+ "-epoch_{epoch:02d}"
+ "-val_acc_{val_acc:.2f}"
+ "-val_auc_{val_auc:.2f}")
model_checkpoint_cb = ModelCheckpoint(
filepath=config.selection_models_dir / model_filename,
)
best_model_path = config.selection_models_dir / "_best_selection_model"
best_model_cb = ModelCheckpoint(
filepath=best_model_path,
monitor="val_loss",
mode="min",
save_best_only=True
)
history = self.model.fit(x=train_data,
y=train_outputs,
validation_data=(val_data, val_outputs),
epochs=config.epochs,
batch_size=config.batch_size,
callbacks=[model_checkpoint_cb, best_model_cb])
best_epoch = np.argmax(history.history["loss"])
results = TrainingResults()
results.train_loss = history.history["loss"][best_epoch]
results.eval_loss = history.history["val_loss"][best_epoch]
results.train_accuracy = history.history["acc"][best_epoch]
results.eval_accuracy = history.history["val_acc"][best_epoch]
results.train_auc = history.history["auc"][best_epoch]
results.eval_auc = history.history["val_auc"][best_epoch]
# Finally, the best model is retained.
self.model = load_model(best_model_path)
return results
def predict(self,
current: TimedPropertyGraph,
theorem_applications: list,
goal: TimedPropertyGraph):
scores = []
for application in theorem_applications:
data = convert_state_to_matrix(
current, application.implication_graph, goal, self.predicates_map)
score = self.model(data)[0]
scores.append(score)
backend.clear_session()
return np.array(scores)
def save(self):
self.model.save(io.main_model_path)
json.dump(self.predicates_map.map, io.predicates_map_path.open('w'))
def plot(self, folder):
plot_model(
self.model,
to_file=folder / f"{self.name}.png",
show_shapes=True,
show_layer_names=True
)
@staticmethod
def load(path=None):
# TODO: Implement loading from different paths.
mlp = load_model(io.main_model_path)
predicates_map = PredicatesMap(pred_map=json.load(io.predicates_map_path.open('r')))
if mlp and predicates_map:
return SimpleModel("Simple Model", path, model=mlp, predicates_map=predicates_map)
else:
return None
class PredicatesMap:
def __init__(self, properties=None, pred_map=None):
if properties:
self.map = {}
self.properties = properties
self._build_map()
if pred_map:
self.map = pred_map
def __getitem__(self, item):
if not isinstance(item, TimedPropertyGraph):
raise RuntimeError("PredicatesMap can only be used for TimedPropertyGraph lookup.")
text, is_negated = self._get_base_text(item)
if is_negated:
text = f"NOT({text})"
if text in self.map:
return self.map[text]
else:
return 0
def __len__(self):
return len(self.map) + 1
def _build_map(self):
for prop in self.properties:
import lovpy.logic.prover as prover
basic_predicates = lovpy.logic.properties.convert_implication_to_and(
prop).get_basic_predicates()
for pred in basic_predicates:
base_text, _ = self._get_base_text(pred)
negative_base_text = f"NOT({base_text})"
self.map[base_text] = 0 # placeholder value
self.map[negative_base_text] = 0 # placeholder value
i = 1 # 0 deserved for None
for pred_name in self.map.keys():
self.map[pred_name] = i
i += 1
@staticmethod
def _get_base_text(predicate_graph):
if isinstance(predicate_graph.get_root_node(), NotOperator):
pred_name = str(
list(predicate_graph.graph.successors(predicate_graph.get_root_node()))[0])
is_negated = True
else:
pred_name = str(predicate_graph.get_root_node())
is_negated = False
return pred_name, is_negated
def create_dense_model(predicates_map):
dim = 3 * PREDICATES_NUM * (len(predicates_map) + 1)
model = Sequential()
model.add(Dense(dim, input_dim=dim, activation="relu"))
model.add(Dense(dim, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss=MeanSquaredError(), optimizer="adam", metrics=["acc", AUC(name="auc")])
print(model.summary())
return model
def create_training_data(properties, samples):
predicates_map = PredicatesMap(properties)
data = np.zeros((len(samples), PREDICATES_NUM*3*(len(predicates_map)+1)))
outputs = np.zeros(len(samples))
for i in range(len(samples)):
data[i], outputs[i] = convert_sample_to_data(samples[i], predicates_map)
return data, outputs, predicates_map
def convert_sample_to_data(sample, predicates_map):
current_table = convert_property_graph_to_matrix(sample.current_graph, predicates_map)
next_table = convert_property_graph_to_matrix(sample.next_theorem, predicates_map)
goal_table = convert_property_graph_to_matrix(sample.goal, predicates_map)
input_data = np.concatenate((current_table, next_table, goal_table), axis=0)
output_data = int(sample.is_provable and sample.next_theorem_correct)
return input_data, output_data
def convert_property_graph_to_matrix(property_graph: TimedPropertyGraph, predicates_map):
"""Converts a TimedPropertyGraph to a 2-D tensor."""
data = np.zeros((PREDICATES_NUM, len(predicates_map) + 1))
if property_graph:
predicates = property_graph.get_basic_predicates()
predicates_id = []
predicates_timestamp = []
max_timestamp = property_graph.get_most_recent_timestamp()
# Clean predicates from the ones not belonging in any properties.
for i, p in enumerate(predicates):
p_id = predicates_map[p]
if p_id > 0:
predicates_id.append(p_id)
predicates_timestamp.append(p.get_most_recent_timestamp())
# Sample predicates sequence to get at most PREDICATES_NUM predicates.
if len(predicates_id) > PREDICATES_NUM:
indexes_to_keep = set(random.sample(list(range(0, len(predicates_id))), PREDICATES_NUM))
predicates_id = [predicates_id[i] for i in indexes_to_keep]
predicates_timestamp = [p_t for p_t in predicates_timestamp
if p_t in predicates_timestamp]
for i in range(len(predicates_id)):
data[i, predicates_id[i]] = 1
if max_timestamp._value > 0:
data[i, -1] = \
float(predicates_timestamp[i]._value) / float(abs(max_timestamp._value))
else:
data[i, -1] = float(predicates_timestamp[i]._value)
return data.flatten()
def convert_state_to_matrix(current_graph, next_theorem, goal_property, predicates_map):
"""Converts a triple defining the current state of theorem proving to inference data."""
current_table = convert_property_graph_to_matrix(current_graph, predicates_map)
next_table = convert_property_graph_to_matrix(next_theorem, predicates_map)
goal_table = convert_property_graph_to_matrix(goal_property, predicates_map)
return np.concatenate((current_table, next_table, goal_table), axis=0).reshape((1, -1))
| [
"tensorflow.keras.metrics.AUC",
"tensorflow.keras.models.load_model",
"tensorflow.keras.losses.MeanSquaredError",
"numpy.argmax",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.utils.plot_model",
"numpy.array... | [((6064, 6076), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6074, 6076), False, 'from tensorflow.keras.models import Sequential, load_model\n'), ((7066, 7129), 'numpy.concatenate', 'np.concatenate', (['(current_table, next_table, goal_table)'], {'axis': '(0)'}), '((current_table, next_table, goal_table), axis=0)\n', (7080, 7129), True, 'import numpy as np\n'), ((1594, 1664), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '(config.selection_models_dir / model_filename)'}), '(filepath=config.selection_models_dir / model_filename)\n', (1609, 1664), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((1792, 1890), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'best_model_path', 'monitor': '"""val_loss"""', 'mode': '"""min"""', 'save_best_only': '(True)'}), "(filepath=best_model_path, monitor='val_loss', mode='min',\n save_best_only=True)\n", (1807, 1890), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((2337, 2371), 'numpy.argmax', 'np.argmax', (["history.history['loss']"], {}), "(history.history['loss'])\n", (2346, 2371), True, 'import numpy as np\n'), ((2878, 2905), 'tensorflow.keras.models.load_model', 'load_model', (['best_model_path'], {}), '(best_model_path)\n', (2888, 2905), False, 'from tensorflow.keras.models import Sequential, load_model\n'), ((3365, 3388), 'tensorflow.keras.backend.clear_session', 'backend.clear_session', ([], {}), '()\n', (3386, 3388), False, 'from tensorflow.keras import backend\n'), ((3405, 3421), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3413, 3421), True, 'import numpy as np\n'), ((3601, 3706), 'tensorflow.keras.utils.plot_model', 'plot_model', (['self.model'], {'to_file': "(folder / f'{self.name}.png')", 'show_shapes': '(True)', 'show_layer_names': '(True)'}), "(self.model, to_file=folder / f'{self.name}.png', show_shapes=\n True, show_layer_names=True)\n", (3611, 3706), False, 'from tensorflow.keras.utils import plot_model\n'), ((3874, 3904), 'tensorflow.keras.models.load_model', 'load_model', (['io.main_model_path'], {}), '(io.main_model_path)\n', (3884, 3904), False, 'from tensorflow.keras.models import Sequential, load_model\n'), ((6091, 6135), 'tensorflow.keras.layers.Dense', 'Dense', (['dim'], {'input_dim': 'dim', 'activation': '"""relu"""'}), "(dim, input_dim=dim, activation='relu')\n", (6096, 6135), False, 'from tensorflow.keras.layers import Dense\n'), ((6151, 6180), 'tensorflow.keras.layers.Dense', 'Dense', (['dim'], {'activation': '"""relu"""'}), "(dim, activation='relu')\n", (6156, 6180), False, 'from tensorflow.keras.layers import Dense\n'), ((6196, 6226), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (6201, 6226), False, 'from tensorflow.keras.layers import Dense\n'), ((6251, 6269), 'tensorflow.keras.losses.MeanSquaredError', 'MeanSquaredError', ([], {}), '()\n', (6267, 6269), False, 'from tensorflow.keras.losses import MeanSquaredError\n'), ((9204, 9267), 'numpy.concatenate', 'np.concatenate', (['(current_table, next_table, goal_table)'], {'axis': '(0)'}), '((current_table, next_table, goal_table), axis=0)\n', (9218, 9267), True, 'import numpy as np\n'), ((6305, 6320), 'tensorflow.keras.metrics.AUC', 'AUC', ([], {'name': '"""auc"""'}), "(name='auc')\n", (6308, 6320), False, 'from tensorflow.keras.metrics import AUC\n')] |
import numpy as np
import pandas as pd
from scipy import sparse, ndimage as ndi
from scipy.sparse import csgraph
from scipy import spatial
import numba
from .nputil import pad, raveled_steps_to_neighbors
## NBGraph and Numba-based implementation
csr_spec = [
('indptr', numba.int32[:]),
('indices', numba.int32[:]),
('data', numba.float64[:]),
('shape', numba.int32[:]),
('node_properties', numba.float64[:])
]
@numba.jitclass(csr_spec)
class NBGraph:
def __init__(self, indptr, indices, data, shape, node_props):
self.indptr = indptr
self.indices = indices
self.data = data
self.shape = shape
self.node_properties = node_props
def edge(self, i, j):
return _csrget(self.indices, self.indptr, self.data, i, j)
def neighbors(self, row):
loc, stop = self.indptr[row], self.indptr[row+1]
return self.indices[loc:stop]
@property
def has_node_props(self):
return self.node_properties.strides != (0,)
def csr_to_nbgraph(csr, node_props=None):
if node_props is None:
node_props = np.broadcast_to(1., csr.shape[0])
node_props.flags.writeable = True
return NBGraph(csr.indptr, csr.indices, csr.data,
np.array(csr.shape, dtype=np.int32), node_props)
def _pixel_graph(image, steps, distances, num_edges, height=None):
row = np.empty(num_edges, dtype=int)
col = np.empty(num_edges, dtype=int)
data = np.empty(num_edges, dtype=float)
if height is None:
k = _write_pixel_graph(image, steps, distances, row, col, data)
else:
k = _write_pixel_graph_height(image, height, steps, distances,
row, col, data)
graph = sparse.coo_matrix((data[:k], (row[:k], col[:k]))).tocsr()
return graph
@numba.jit(nopython=True, cache=True, nogil=True)
def _write_pixel_graph(image, steps, distances, row, col, data):
"""Step over `image` to build a graph of nonzero pixel neighbors.
Parameters
----------
image : int array
The input image.
steps : int array, shape (N,)
The raveled index steps to find a pixel's neighbors in `image`.
distances : float array, shape (N,)
The euclidean distance from a pixel to its corresponding
neighbor in `steps`.
row : int array
Output array to be filled with the "center" pixel IDs.
col : int array
Output array to be filled with the "neighbor" pixel IDs.
data : float array
Output array to be filled with the distances from center to
neighbor pixels.
Returns
-------
k : int
The number of entries written to row, col, and data.
Notes
-----
No size or bounds checking is performed. Users should ensure that
- No index in `indices` falls on any edge of `image` (or the
neighbor computation will fail or segfault).
- The `steps` and `distances` arrays have the same shape.
- The `row`, `col`, `data` are long enough to hold all of the
edges.
"""
image = image.ravel()
n_neighbors = steps.size
start_idx = np.max(steps)
end_idx = image.size + np.min(steps)
k = 0
for i in range(start_idx, end_idx + 1):
if image[i] != 0:
for j in range(n_neighbors):
n = steps[j] + i
if image[n] != 0 and image[n] != image[i]:
row[k] = image[i]
col[k] = image[n]
data[k] = distances[j]
k += 1
return k
@numba.jit(nopython=True, cache=True, nogil=True)
def _write_pixel_graph_height(image, height, steps, distances, row, col, data):
"""Step over `image` to build a graph of nonzero pixel neighbors.
Parameters
----------
image : int array
The input image.
height : float array, same shape as `image`
This is taken to be a height map along an additional
dimension (in addition to the image dimensions), so the distance
between two neighbors `i` and `n` separated by `j` is given by:
`np.sqrt(distances[j]**2 + (height[i] - height[n])**2)`
steps : int array, shape (N,)
The raveled index steps to find a pixel's neighbors in `image`.
distances : float array, shape (N,)
The euclidean distance from a pixel to its corresponding
neighbor in `steps`.
row : int array
Output array to be filled with the "center" pixel IDs.
col : int array
Output array to be filled with the "neighbor" pixel IDs.
data : float array
Output array to be filled with the distances from center to
neighbor pixels.
Returns
-------
k : int
The number of entries written to row, col, and data.
Notes
-----
No size or bounds checking is performed. Users should ensure that
- No index in `indices` falls on any edge of `image` (or the
neighbor computation will fail or segfault).
- The `steps` and `distances` arrays have the same shape.
- The `row`, `col`, `data` are long enough to hold all of the
edges.
"""
image = image.ravel()
height = height.ravel()
n_neighbors = steps.size
start_idx = np.max(steps)
end_idx = image.size + np.min(steps)
k = 0
for i in range(start_idx, end_idx + 1):
if image[i] != 0:
for j in range(n_neighbors):
n = steps[j] + i
if image[n] != 0 and image[n] != image[i]:
row[k] = image[i]
col[k] = image[n]
data[k] = np.sqrt(distances[j] ** 2 +
(height[i] - height[n]) ** 2)
k += 1
return k
@numba.jit(nopython=True, cache=False) # change this to True with Numba 1.0
def _build_paths(jgraph, indptr, indices, path_data, visited, degrees):
indptr_i = 0
indices_j = 0
# first, process all nodes in a path to an endpoint or junction
for node in range(1, jgraph.shape[0]):
if degrees[node] > 2 or degrees[node] == 1 and not visited[node]:
for neighbor in jgraph.neighbors(node):
if not visited[neighbor]:
n_steps = _walk_path(jgraph, node, neighbor, visited,
degrees, indices, path_data,
indices_j)
visited[node] = True
indptr[indptr_i + 1] = indptr[indptr_i] + n_steps
indptr_i += 1
indices_j += n_steps
# everything else is by definition in isolated cycles
for node in range(1, jgraph.shape[0]):
if degrees[node] > 0:
if not visited[node]:
visited[node] = True
neighbor = jgraph.neighbors(node)[0]
n_steps = _walk_path(jgraph, node, neighbor, visited, degrees,
indices, path_data, indices_j)
indptr[indptr_i + 1] = indptr[indptr_i] + n_steps
indptr_i += 1
indices_j += n_steps
return indptr_i + 1, indices_j
@numba.jit(nopython=True, cache=False) # change this to True with Numba 1.0
def _walk_path(jgraph, node, neighbor, visited, degrees, indices, path_data,
startj):
indices[startj] = node
path_data[startj] = jgraph.node_properties[node]
j = startj + 1
while degrees[neighbor] == 2 and not visited[neighbor]:
indices[j] = neighbor
path_data[j] = jgraph.node_properties[neighbor]
n1, n2 = jgraph.neighbors(neighbor)
nextneighbor = n1 if n1 != node else n2
node, neighbor = neighbor, nextneighbor
visited[node] = True
j += 1
indices[j] = neighbor
path_data[j] = jgraph.node_properties[neighbor]
visited[neighbor] = True
return j - startj + 1
def _build_skeleton_path_graph(graph, *, _buffer_size_offset=None):
if _buffer_size_offset is None:
max_num_cycles = graph.indices.size // 4
_buffer_size_offset = max_num_cycles
degrees = np.diff(graph.indptr)
visited = np.zeros(degrees.shape, dtype=bool)
endpoints = (degrees != 2)
endpoint_degrees = degrees[endpoints]
num_paths = np.sum(endpoint_degrees)
path_indptr = np.zeros(num_paths + _buffer_size_offset, dtype=int)
# the number of points that we need to save to store all skeleton
# paths is equal to the number of pixels plus the sum of endpoint
# degrees minus one (since the endpoints will have been counted once
# already in the number of pixels) *plus* the number of isolated
# cycles (since each cycle has one index repeated). We don't know
# the number of cycles ahead of time, but it is bounded by one quarter
# of the number of points.
n_points = (graph.indices.size + np.sum(endpoint_degrees - 1) +
max_num_cycles)
path_indices = np.zeros(n_points, dtype=int)
path_data = np.zeros(path_indices.shape, dtype=float)
m, n = _build_paths(graph, path_indptr, path_indices, path_data,
visited, degrees)
paths = sparse.csr_matrix((path_data[:n], path_indices[:n],
path_indptr[:m]), shape=(m-1, n))
return paths
class Skeleton:
"""Object to group together all the properties of a skeleton.
In the text below, we use the following notation:
- N: the number of points in the pixel skeleton,
- ndim: the dimensionality of the skeleton
- P: the number of paths in the skeleton (also the number of links in the
junction graph).
- J: the number of junction nodes
- Sd: the sum of the degrees of all the junction nodes
- [Nt], [Np], Nr, Nc: the dimensions of the source image
Parameters
----------
skeleton_image : array
The input skeleton (1-pixel/voxel thick skeleton, all other values 0).
Other Parameters
----------------
spacing : float or array of float, shape ``(ndim,)``
The scale of the pixel spacing along each axis.
source_image : array of float, same shape as `skeleton_image`
The image that `skeleton_image` represents / summarizes / was generated
from. This is used to produce visualizations as well as statistical
properties of paths.
keep_images : bool
Whether or not to keep the original input images. These can be useful
for visualization, but they may take up a lot of memory.
Attributes
----------
graph : scipy.sparse.csr_matrix, shape (N + 1, N + 1)
The skeleton pixel graph, where each node is a non-zero pixel in the
input image, and each edge connects adjacent pixels. The graph is
represented as an adjacency matrix in SciPy sparse matrix format. For
more information see the ``scipy.sparse`` documentation as well as
``scipy.sparse.csgraph``. Note: pixel numbering starts at 1, so the
shape of this matrix is ``(N + 1, N + 1)`` instead of ``(N, N)``.
nbgraph : NBGraph
A thin Numba wrapper around the ``csr_matrix`` format, this provides
faster graph methods. For example, it is much faster to get a list of
neighbors, or test for the presence of a specific edge.
coordinates : array, shape (N, ndim)
The image coordinates of each pixel in the skeleton.
paths : scipy.sparse.csr_matrix, shape (P, N + 1)
A csr_matrix where element [i, j] is on if node j is in path i. This
includes path endpoints. The number of nonzero elements is N - J + Sd.
n_paths : int
The number of paths, P. This is redundant information given `n_paths`,
but it is used often enough that it is worth keeping around.
distances : array of float, shape (P,)
The distance of each path.
skeleton_image : array or None
The input skeleton image. Only present if `keep_images` is True. Set to
False to preserve memory.
source_image : array or None
The image from which the skeleton was derived. Only present if
`keep_images` is True. This is useful for visualization.
"""
def __init__(self, skeleton_image, *, spacing=1, source_image=None,
_buffer_size_offset=None, keep_images=True,
unique_junctions=True):
graph, coords, degrees = skeleton_to_csgraph(skeleton_image,
spacing=spacing,
unique_junctions=unique_junctions)
if np.issubdtype(skeleton_image.dtype, np.float_):
pixel_values = ndi.map_coordinates(skeleton_image, coords.T,
order=3)
else:
pixel_values = None
self.graph = graph
self.nbgraph = csr_to_nbgraph(graph, pixel_values)
self.coordinates = coords
self.paths = _build_skeleton_path_graph(self.nbgraph,
_buffer_size_offset=_buffer_size_offset)
self.n_paths = self.paths.shape[0]
self.distances = np.empty(self.n_paths, dtype=float)
self._distances_initialized = False
self.skeleton_image = None
self.source_image = None
self.degrees_image = degrees
self.degrees = np.diff(self.graph.indptr)
self.spacing = (np.asarray(spacing) if not np.isscalar(spacing)
else np.full(skeleton_image.ndim, spacing))
if keep_images:
self.skeleton_image = skeleton_image
self.source_image = source_image
def path(self, index):
"""Return the pixel indices of path number `index`.
Parameters
----------
index : int
The desired path.
Returns
-------
path : array of int
The indices of the pixels belonging to the path, including
endpoints.
"""
# The below is equivalent to `self.paths[index].indices`, which is much
# more elegant. However the below version is about 25x faster!
# In [14]: %timeit mat[1].indices
# 128 µs ± 421 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
# In [16]: %%timeit
# ...: start, stop = mat.indptr[1:3]
# ...: mat.indices[start:stop]
# ...:
# 5.05 µs ± 77.2 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
start, stop = self.paths.indptr[index:index+2]
return self.paths.indices[start:stop]
def path_coordinates(self, index):
"""Return the image coordinates of the pixels in the path.
Parameters
----------
index : int
The desired path.
Returns
-------
path_coords : array of float
The (image) coordinates of points on the path, including endpoints.
"""
path_indices = self.path(index)
return self.coordinates[path_indices]
def path_with_data(self, index):
"""Return pixel indices and corresponding pixel values on a path.
Parameters
----------
index : int
The desired path.
Returns
-------
path : array of int
The indices of pixels on the path, including endpoints.
data : array of float
The values of pixels on the path.
"""
start, stop = self.paths.indptr[index:index+2]
return self.paths.indices[start:stop], self.paths.data[start:stop]
def path_lengths(self):
"""Return the length of each path on the skeleton.
Returns
-------
lengths : array of float
The length of all the paths in the skeleton.
"""
if not self._distances_initialized:
_compute_distances(self.nbgraph, self.paths.indptr,
self.paths.indices, self.distances)
self._distances_initialized = True
return self.distances
def paths_list(self):
"""List all the paths in the skeleton, including endpoints.
Returns
-------
paths : list of array of int
The list containing all the paths in the skeleton.
"""
return [list(self.path(i)) for i in range(self.n_paths)]
def path_means(self):
"""Compute the mean pixel value along each path.
Returns
-------
means : array of float
The average pixel value along each path in the skeleton.
"""
sums = np.add.reduceat(self.paths.data, self.paths.indptr[:-1])
lengths = np.diff(self.paths.indptr)
return sums / lengths
def path_stdev(self):
"""Compute the standard deviation of values along each path.
Returns
-------
stdevs : array of float
The standard deviation of pixel values along each path.
"""
data = self.paths.data
sumsq = np.add.reduceat(data * data, self.paths.indptr[:-1])
lengths = np.diff(self.paths.indptr)
means = self.path_means()
return np.sqrt(np.clip(sumsq/lengths - means*means, 0, None))
def summarize(skel: Skeleton):
"""Compute statistics for every skeleton and branch in ``skel``.
Parameters
----------
skel : skan.csr.Skeleton
A Skeleton object.
Returns
-------
summary : pandas.DataFrame
A summary of the branches including branch length, mean branch value,
branch euclidean distance, etc.
"""
summary = {}
ndim = skel.coordinates.shape[1]
_, skeleton_ids = csgraph.connected_components(skel.graph,
directed=False)
endpoints_src = skel.paths.indices[skel.paths.indptr[:-1]]
endpoints_dst = skel.paths.indices[skel.paths.indptr[1:] - 1]
summary['skeleton-id'] = skeleton_ids[endpoints_src]
summary['node-id-src'] = endpoints_src
summary['node-id-dst'] = endpoints_dst
summary['branch-distance'] = skel.path_lengths()
deg_src = skel.degrees[endpoints_src]
deg_dst = skel.degrees[endpoints_dst]
kind = np.full(deg_src.shape, 2) # default: junction-to-junction
kind[(deg_src == 1) | (deg_dst == 1)] = 1 # tip-junction
kind[(deg_src == 1) & (deg_dst == 1)] = 0 # tip-tip
kind[endpoints_src == endpoints_dst] = 3 # cycle
summary['branch-type'] = kind
summary['mean-pixel-value'] = skel.path_means()
summary['stdev-pixel-value'] = skel.path_stdev()
for i in range(ndim): # keep loops separate for best insertion order
summary[f'image-coord-src-{i}'] = skel.coordinates[endpoints_src, i]
for i in range(ndim):
summary[f'image-coord-dst-{i}'] = skel.coordinates[endpoints_dst, i]
coords_real_src = skel.coordinates[endpoints_src] * skel.spacing
for i in range(ndim):
summary[f'coord-src-{i}'] = coords_real_src[:, i]
coords_real_dst = skel.coordinates[endpoints_dst] * skel.spacing
for i in range(ndim):
summary[f'coord-dst-{i}'] = coords_real_dst[:, i]
summary['euclidean-distance'] = (
np.sqrt((coords_real_dst - coords_real_src)**2 @ np.ones(ndim))
)
df = pd.DataFrame(summary)
return df
@numba.jit(nopython=True, nogil=True, cache=False) # cache with Numba 1.0
def _compute_distances(graph, path_indptr, path_indices, distances):
for i in range(len(distances)):
start, stop = path_indptr[i:i+2]
path = path_indices[start:stop]
distances[i] = _path_distance(graph, path)
@numba.jit(nopython=True, nogil=True, cache=False) # cache with Numba 1.0
def _path_distance(graph, path):
d = 0.
n = len(path)
for i in range(n - 1):
u, v = path[i], path[i+1]
d += graph.edge(u, v)
return d
def _uniquify_junctions(csmat, pixel_indices, junction_labels,
junction_centroids, *, spacing=1):
"""Replace clustered pixels with degree > 2 by a single "floating" pixel.
Parameters
----------
csmat : NBGraph
The input graph.
pixel_indices : array of int
The raveled index in the image of every pixel represented in csmat.
spacing : float, or array-like of float, shape `len(shape)`, optional
The spacing between pixels in the source image along each dimension.
Returns
-------
final_graph : NBGraph
The output csmat.
"""
junctions = np.unique(junction_labels)[1:] # discard 0, background
junction_centroids_real = junction_centroids * spacing
for j, jloc in zip(junctions, junction_centroids_real):
loc, stop = csmat.indptr[j], csmat.indptr[j+1]
neighbors = csmat.indices[loc:stop]
neighbor_locations = pixel_indices[neighbors]
neighbor_locations *= spacing
distances = np.sqrt(np.sum((neighbor_locations - jloc)**2, axis=1))
csmat.data[loc:stop] = distances
tdata = csmat.T.tocsr().data
csmat.data = np.maximum(csmat.data, tdata)
def skeleton_to_csgraph(skel, *, spacing=1, value_is_height=False,
unique_junctions=True):
"""Convert a skeleton image of thin lines to a graph of neighbor pixels.
Parameters
----------
skel : array
An input image in which every nonzero pixel is considered part of
the skeleton, and links between pixels are determined by a full
n-dimensional neighborhood.
spacing : float, or array-like of float, shape `(skel.ndim,)`
A value indicating the distance between adjacent pixels. This can
either be a single value if the data has the same resolution along
all axes, or it can be an array of the same shape as `skel` to
indicate spacing along each axis.
Other Parameters
----------------
value_is_height : bool, optional
If `True`, the pixel value at each point of the skeleton will be
considered to be a height measurement, and this height will be
incorporated into skeleton branch lengths. Used for analysis of
atomic force microscopy (AFM) images.
unique_junctions : bool, optional
If True, adjacent junction nodes get collapsed into a single
conceptual node, with position at the centroid of all the connected
initial nodes.
Returns
-------
graph : sparse.csr_matrix
A graph of shape (Nnz + 1, Nnz + 1), where Nnz is the number of
nonzero pixels in `skel`. The value graph[i, j] is the distance
between adjacent pixels i and j. In a 2D image, that would be
1 for immediately adjacent pixels and sqrt(2) for diagonally
adjacent ones.
pixel_coordinates : array of float
An array of shape (Nnz + 1, skel.ndim), mapping indices in `graph`
to pixel coordinates in `degree_image` or `skel`.
degree_image : array of int, same shape as skel
An image where each pixel value contains the degree of its
corresponding node in `graph`. This is useful to classify nodes.
"""
height = pad(skel, 0.) if value_is_height else None
# ensure we have a bool image, since we later use it for bool indexing
skel = skel.astype(bool)
ndim = skel.ndim
spacing = np.ones(ndim, dtype=float) * spacing
pixel_indices = np.concatenate(([[0.] * ndim],
np.transpose(np.nonzero(skel))), axis=0)
skelint = np.zeros(skel.shape, dtype=int)
skelint[tuple(pixel_indices.T.astype(int))] = \
np.arange(pixel_indices.shape[0])
degree_kernel = np.ones((3,) * ndim)
degree_kernel[(1,) * ndim] = 0 # remove centre pixel
degree_image = ndi.convolve(skel.astype(int), degree_kernel,
mode='constant') * skel
if unique_junctions:
# group all connected junction nodes into "meganodes".
junctions = degree_image > 2
junction_ids = skelint[junctions]
labeled_junctions, centroids = compute_centroids(junctions)
labeled_junctions[junctions] = \
junction_ids[labeled_junctions[junctions] - 1]
skelint[junctions] = labeled_junctions[junctions]
pixel_indices[np.unique(labeled_junctions)[1:]] = centroids
num_edges = np.sum(degree_image) # *2, which is how many we need to store
skelint = pad(skelint, 0) # pad image to prevent looparound errors
steps, distances = raveled_steps_to_neighbors(skelint.shape, ndim,
spacing=spacing)
graph = _pixel_graph(skelint, steps, distances, num_edges, height)
if unique_junctions:
_uniquify_junctions(graph, pixel_indices,
labeled_junctions, centroids, spacing=spacing)
return graph, pixel_indices, degree_image
@numba.jit(nopython=True, cache=True)
def _csrget(indices, indptr, data, row, col):
"""Fast lookup of value in a scipy.sparse.csr_matrix format table.
Parameters
----------
indices, indptr, data : numpy arrays of int, int, float
The CSR format data.
row, col : int
The matrix coordinates of the desired value.
Returns
-------
dat: float
The data value in the matrix.
"""
start, end = indptr[row], indptr[row+1]
for i in range(start, end):
if indices[i] == col:
return data[i]
return 0.
@numba.jit(nopython=True)
def _expand_path(graph, source, step, visited, degrees):
"""Walk a path on a graph until reaching a tip or junction.
A path is a sequence of degree-2 nodes.
Parameters
----------
graph : NBGraph
A graph encoded identically to a SciPy sparse compressed sparse
row matrix. See the documentation of `NBGraph` for details.
source : int
The starting point of the walk. This must be a path node, or
the function's behaviour is undefined.
step : int
The initial direction of the walk. Must be a neighbor of
`source`.
visited : array of bool
An array mapping node ids to `False` (unvisited node) or `True`
(previously visited node).
degrees : array of int
An array mapping node ids to their degrees in `graph`.
Returns
-------
dest : int
The tip or junction node at the end of the path.
d : float
The distance travelled from `source` to `dest`.
n : int
The number of pixels along the path followed (excluding the source).
s : float
The sum of the pixel values along the path followed (also excluding
the source).
deg : int
The degree of `dest`.
"""
d = graph.edge(source, step)
s = 0.
n = 0
while degrees[step] == 2 and not visited[step]:
n1, n2 = graph.neighbors(step)
nextstep = n1 if n1 != source else n2
source, step = step, nextstep
d += graph.edge(source, step)
visited[source] = True
s += graph.node_properties[source]
n += 1
visited[step] = True
return step, d, n, s, degrees[step]
@numba.jit(nopython=True, nogil=True)
def _branch_statistics_loop(jgraph, degrees, visited, result):
num_results = 0
for node in range(1, jgraph.shape[0]):
if not visited[node]:
if degrees[node] == 2:
visited[node] = True
left, right = jgraph.neighbors(node)
id0, d0, n0, s0, deg0 = _expand_path(jgraph, node, left,
visited, degrees)
if id0 == node: # standalone cycle
id1, d1, n1, s1, deg1 = node, 0, 0, 0., 2
kind = 3
else:
id1, d1, n1, s1, deg1 = _expand_path(jgraph, node, right,
visited, degrees)
kind = 2 # default: junction-to-junction
if deg0 == 1 and deg1 == 1: # tip-tip
kind = 0
elif deg0 == 1 or deg1 == 1: # tip-junct, tip-path impossible
kind = 1
counts = n0 + n1 + 1
values = s0 + s1 + jgraph.node_properties[node]
result[num_results, :] = (float(id0), float(id1), d0 + d1,
float(kind), values / counts)
num_results += 1
elif degrees[node] == 1:
visited[node] = True
neighbor = jgraph.neighbors(node)[0]
id0, d0, n0, s0, deg0 = _expand_path(jgraph, node, neighbor,
visited, degrees)
kind = 1 if deg0 > 2 else 0 # tip-junct / tip-tip
counts = n0
values = s0
avg_value = np.nan if counts == 0 else values / counts
result[num_results, :] = (float(node), float(id0), d0,
float(kind), avg_value)
num_results += 1
return num_results
def branch_statistics(graph, pixel_values=None, *,
buffer_size_offset=0):
"""Compute the length and type of each branch in a skeleton graph.
Parameters
----------
graph : sparse.csr_matrix, shape (N, N)
A skeleton graph.
pixel_values : array of float, shape (N,)
A value for each pixel in the graph. Used to compute total
intensity statistics along each branch.
buffer_size_offset : int, optional
The buffer size is given by the sum of the degrees of non-path
nodes. This is usually 2x the amount needed, allowing room for
extra cycles of path-only nodes. However, if the image consists
*only* of such cycles, the buffer size will be 0, resulting in
an error. Until a more sophisticated, expandable-buffer
solution is implemented, you can manually set a bigger buffer
size using this parameter.
Returns
-------
branches : array of float, shape (N, {4, 5})
An array containing branch endpoint IDs, length, and branch type.
The types are:
- tip-tip (0)
- tip-junction (1)
- junction-junction (2)
- path-path (3) (This can only be a standalone cycle)
Optionally, the last column contains the average pixel value
along each branch (not including the endpoints).
"""
jgraph = csr_to_nbgraph(graph, pixel_values)
degrees = np.diff(graph.indptr)
visited = np.zeros(degrees.shape, dtype=bool)
endpoints = (degrees != 2)
num_paths = np.sum(degrees[endpoints])
result = np.zeros((num_paths + buffer_size_offset, 5), dtype=float)
num_results = _branch_statistics_loop(jgraph, degrees, visited, result)
num_columns = 5 if jgraph.has_node_props else 4
return result[:num_results, :num_columns]
def submatrix(M, idxs):
"""Return the outer-index product submatrix, `M[idxs, :][:, idxs]`.
Parameters
----------
M : scipy.sparse.spmatrix
Input (square) matrix
idxs : array of int
The indices to subset. No index in `idxs` should exceed the
number of rows of `M`.
Returns
-------
Msub : scipy.sparse.spmatrix
The subsetted matrix.
Examples
--------
>>> Md = np.arange(16).reshape((4, 4))
>>> M = sparse.csr_matrix(Md)
>>> print(submatrix(M, [0, 2]).toarray())
[[ 0 2]
[ 8 10]]
"""
Msub = M[idxs, :][:, idxs]
return Msub
def summarise(image, *, spacing=1, using_height=False):
"""Compute statistics for every disjoint skeleton in `image`.
**Note: this function is deprecated. Prefer** :func:`.summarize`.
Parameters
----------
image : array, shape (M, N, ..., P)
N-dimensional array, where nonzero entries correspond to an
object's single-pixel-wide skeleton. If the image is of type 'float',
the values are taken to be the height at that pixel, which is used
to compute the skeleton distances.
spacing : float, or array-like of float, shape `(skel.ndim,)`
A value indicating the distance between adjacent pixels. This can
either be a single value if the data has the same resolution along
all axes, or it can be an array of the same shape as `skel` to
indicate spacing along each axis.
using_height : bool, optional
If `True`, the pixel value at each point of the skeleton will be
considered to be a height measurement, and this height will be
incorporated into skeleton branch lengths, endpoint coordinates,
and euclidean distances. Used for analysis of atomic force
microscopy (AFM) images.
Returns
-------
df : pandas DataFrame
A data frame summarising the statistics of the skeletons in
`image`.
"""
ndim = image.ndim
spacing = np.ones(ndim, dtype=float) * spacing
g, coords_img, degrees = skeleton_to_csgraph(image, spacing=spacing,
value_is_height=using_height)
num_skeletons, skeleton_ids = csgraph.connected_components(g,
directed=False)
if np.issubdtype(image.dtype, np.float_) and not using_height:
pixel_values = ndi.map_coordinates(image, coords_img.T, order=3)
value_columns = ['mean pixel value']
value_column_types = [float]
else:
pixel_values = None
value_columns = []
value_column_types = []
stats = branch_statistics(g, pixel_values)
indices0 = stats[:, 0].astype(int)
indices1 = stats[:, 1].astype(int)
coords_img0 = coords_img[indices0]
coords_img1 = coords_img[indices1]
coords_real0 = coords_img0 * spacing
coords_real1 = coords_img1 * spacing
if using_height:
height_coords0 = ndi.map_coordinates(image, coords_img0.T, order=3)
coords_real0 = np.column_stack((height_coords0, coords_real0))
height_coords1 = ndi.map_coordinates(image, coords_img1.T, order=3)
coords_real1 = np.column_stack((height_coords1, coords_real1))
distances = np.sqrt(np.sum((coords_real0 - coords_real1)**2, axis=1))
skeleton_id = skeleton_ids[stats[:, 0].astype(int)]
table = np.column_stack((skeleton_id, stats, coords_img0, coords_img1,
coords_real0, coords_real1, distances))
height_ndim = ndim if not using_height else (ndim + 1)
columns = (['skeleton-id', 'node-id-0', 'node-id-1', 'branch-distance',
'branch-type'] +
value_columns +
['image-coord-src-%i' % i for i in range(ndim)] +
['image-coord-dst-%i' % i for i in range(ndim)] +
['coord-src-%i' % i for i in range(height_ndim)] +
['coord-dst-%i' % i for i in range(height_ndim)] +
['euclidean-distance'])
column_types = ([int, int, int, float, int] + value_column_types +
2 * ndim * [int] +
2 * height_ndim * [float] +
[float])
data_dict = {col: dat.astype(dtype)
for col, dat, dtype in zip(columns, table.T, column_types)}
df = pd.DataFrame(data_dict)
return df
def compute_centroids(image):
"""Find the centroids of all nonzero connected blobs in `image`.
Parameters
----------
image : ndarray
The input image.
Returns
-------
label_image : ndarray of int
The input image, with each connected region containing a different
integer label.
Examples
--------
>>> image = np.array([[1, 0, 1, 0, 0, 1, 1],
... [1, 0, 0, 1, 0, 0, 0]])
>>> labels, centroids = compute_centroids(image)
>>> print(labels)
[[1 0 2 0 0 3 3]
[1 0 0 2 0 0 0]]
>>> centroids
array([[0.5, 0. ],
[0.5, 2.5],
[0. , 5.5]])
"""
connectivity = np.ones((3,) * image.ndim)
labeled_image = ndi.label(image, connectivity)[0]
nz = np.nonzero(labeled_image)
nzpix = labeled_image[nz]
sizes = np.bincount(nzpix)
coords = np.transpose(nz)
grouping = np.argsort(nzpix)
sums = np.add.reduceat(coords[grouping], np.cumsum(sizes)[:-1])
means = sums / sizes[1:, np.newaxis]
return labeled_image, means
| [
"numpy.sum",
"numpy.maximum",
"numpy.empty",
"numpy.ones",
"numpy.add.reduceat",
"numpy.clip",
"numpy.argsort",
"numpy.arange",
"scipy.sparse.csgraph.connected_components",
"numpy.unique",
"numpy.full",
"pandas.DataFrame",
"numpy.transpose",
"numpy.cumsum",
"numpy.max",
"scipy.sparse.c... | [((437, 461), 'numba.jitclass', 'numba.jitclass', (['csr_spec'], {}), '(csr_spec)\n', (451, 461), False, 'import numba\n'), ((1819, 1867), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'cache': '(True)', 'nogil': '(True)'}), '(nopython=True, cache=True, nogil=True)\n', (1828, 1867), False, 'import numba\n'), ((3559, 3607), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'cache': '(True)', 'nogil': '(True)'}), '(nopython=True, cache=True, nogil=True)\n', (3568, 3607), False, 'import numba\n'), ((5751, 5788), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'cache': '(False)'}), '(nopython=True, cache=False)\n', (5760, 5788), False, 'import numba\n'), ((7168, 7205), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'cache': '(False)'}), '(nopython=True, cache=False)\n', (7177, 7205), False, 'import numba\n'), ((19275, 19324), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'nogil': '(True)', 'cache': '(False)'}), '(nopython=True, nogil=True, cache=False)\n', (19284, 19324), False, 'import numba\n'), ((19589, 19638), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'nogil': '(True)', 'cache': '(False)'}), '(nopython=True, nogil=True, cache=False)\n', (19598, 19638), False, 'import numba\n'), ((24865, 24901), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (24874, 24901), False, 'import numba\n'), ((25447, 25471), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (25456, 25471), False, 'import numba\n'), ((27128, 27164), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (27137, 27164), False, 'import numba\n'), ((1383, 1413), 'numpy.empty', 'np.empty', (['num_edges'], {'dtype': 'int'}), '(num_edges, dtype=int)\n', (1391, 1413), True, 'import numpy as np\n'), ((1424, 1454), 'numpy.empty', 'np.empty', (['num_edges'], {'dtype': 'int'}), '(num_edges, dtype=int)\n', (1432, 1454), True, 'import numpy as np\n'), ((1466, 1498), 'numpy.empty', 'np.empty', (['num_edges'], {'dtype': 'float'}), '(num_edges, dtype=float)\n', (1474, 1498), True, 'import numpy as np\n'), ((3130, 3143), 'numpy.max', 'np.max', (['steps'], {}), '(steps)\n', (3136, 3143), True, 'import numpy as np\n'), ((5238, 5251), 'numpy.max', 'np.max', (['steps'], {}), '(steps)\n', (5244, 5251), True, 'import numpy as np\n'), ((8121, 8142), 'numpy.diff', 'np.diff', (['graph.indptr'], {}), '(graph.indptr)\n', (8128, 8142), True, 'import numpy as np\n'), ((8157, 8192), 'numpy.zeros', 'np.zeros', (['degrees.shape'], {'dtype': 'bool'}), '(degrees.shape, dtype=bool)\n', (8165, 8192), True, 'import numpy as np\n'), ((8282, 8306), 'numpy.sum', 'np.sum', (['endpoint_degrees'], {}), '(endpoint_degrees)\n', (8288, 8306), True, 'import numpy as np\n'), ((8325, 8377), 'numpy.zeros', 'np.zeros', (['(num_paths + _buffer_size_offset)'], {'dtype': 'int'}), '(num_paths + _buffer_size_offset, dtype=int)\n', (8333, 8377), True, 'import numpy as np\n'), ((8955, 8984), 'numpy.zeros', 'np.zeros', (['n_points'], {'dtype': 'int'}), '(n_points, dtype=int)\n', (8963, 8984), True, 'import numpy as np\n'), ((9001, 9042), 'numpy.zeros', 'np.zeros', (['path_indices.shape'], {'dtype': 'float'}), '(path_indices.shape, dtype=float)\n', (9009, 9042), True, 'import numpy as np\n'), ((9166, 9258), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['(path_data[:n], path_indices[:n], path_indptr[:m])'], {'shape': '(m - 1, n)'}), '((path_data[:n], path_indices[:n], path_indptr[:m]), shape\n =(m - 1, n))\n', (9183, 9258), False, 'from scipy import sparse, ndimage as ndi\n'), ((17648, 17704), 'scipy.sparse.csgraph.connected_components', 'csgraph.connected_components', (['skel.graph'], {'directed': '(False)'}), '(skel.graph, directed=False)\n', (17676, 17704), False, 'from scipy.sparse import csgraph\n'), ((18176, 18201), 'numpy.full', 'np.full', (['deg_src.shape', '(2)'], {}), '(deg_src.shape, 2)\n', (18183, 18201), True, 'import numpy as np\n'), ((19236, 19257), 'pandas.DataFrame', 'pd.DataFrame', (['summary'], {}), '(summary)\n', (19248, 19257), True, 'import pandas as pd\n'), ((21001, 21030), 'numpy.maximum', 'np.maximum', (['csmat.data', 'tdata'], {}), '(csmat.data, tdata)\n', (21011, 21030), True, 'import numpy as np\n'), ((23439, 23470), 'numpy.zeros', 'np.zeros', (['skel.shape'], {'dtype': 'int'}), '(skel.shape, dtype=int)\n', (23447, 23470), True, 'import numpy as np\n'), ((23567, 23600), 'numpy.arange', 'np.arange', (['pixel_indices.shape[0]'], {}), '(pixel_indices.shape[0])\n', (23576, 23600), True, 'import numpy as np\n'), ((23622, 23642), 'numpy.ones', 'np.ones', (['((3,) * ndim)'], {}), '((3,) * ndim)\n', (23629, 23642), True, 'import numpy as np\n'), ((24321, 24341), 'numpy.sum', 'np.sum', (['degree_image'], {}), '(degree_image)\n', (24327, 24341), True, 'import numpy as np\n'), ((30556, 30577), 'numpy.diff', 'np.diff', (['graph.indptr'], {}), '(graph.indptr)\n', (30563, 30577), True, 'import numpy as np\n'), ((30592, 30627), 'numpy.zeros', 'np.zeros', (['degrees.shape'], {'dtype': 'bool'}), '(degrees.shape, dtype=bool)\n', (30600, 30627), True, 'import numpy as np\n'), ((30675, 30701), 'numpy.sum', 'np.sum', (['degrees[endpoints]'], {}), '(degrees[endpoints])\n', (30681, 30701), True, 'import numpy as np\n'), ((30715, 30773), 'numpy.zeros', 'np.zeros', (['(num_paths + buffer_size_offset, 5)'], {'dtype': 'float'}), '((num_paths + buffer_size_offset, 5), dtype=float)\n', (30723, 30773), True, 'import numpy as np\n'), ((33192, 33239), 'scipy.sparse.csgraph.connected_components', 'csgraph.connected_components', (['g'], {'directed': '(False)'}), '(g, directed=False)\n', (33220, 33239), False, 'from scipy.sparse import csgraph\n'), ((34364, 34470), 'numpy.column_stack', 'np.column_stack', (['(skeleton_id, stats, coords_img0, coords_img1, coords_real0, coords_real1,\n distances)'], {}), '((skeleton_id, stats, coords_img0, coords_img1, coords_real0,\n coords_real1, distances))\n', (34379, 34470), True, 'import numpy as np\n'), ((35309, 35332), 'pandas.DataFrame', 'pd.DataFrame', (['data_dict'], {}), '(data_dict)\n', (35321, 35332), True, 'import pandas as pd\n'), ((36039, 36065), 'numpy.ones', 'np.ones', (['((3,) * image.ndim)'], {}), '((3,) * image.ndim)\n', (36046, 36065), True, 'import numpy as np\n'), ((36129, 36154), 'numpy.nonzero', 'np.nonzero', (['labeled_image'], {}), '(labeled_image)\n', (36139, 36154), True, 'import numpy as np\n'), ((36197, 36215), 'numpy.bincount', 'np.bincount', (['nzpix'], {}), '(nzpix)\n', (36208, 36215), True, 'import numpy as np\n'), ((36229, 36245), 'numpy.transpose', 'np.transpose', (['nz'], {}), '(nz)\n', (36241, 36245), True, 'import numpy as np\n'), ((36261, 36278), 'numpy.argsort', 'np.argsort', (['nzpix'], {}), '(nzpix)\n', (36271, 36278), True, 'import numpy as np\n'), ((1106, 1140), 'numpy.broadcast_to', 'np.broadcast_to', (['(1.0)', 'csr.shape[0]'], {}), '(1.0, csr.shape[0])\n', (1121, 1140), True, 'import numpy as np\n'), ((1255, 1290), 'numpy.array', 'np.array', (['csr.shape'], {'dtype': 'np.int32'}), '(csr.shape, dtype=np.int32)\n', (1263, 1290), True, 'import numpy as np\n'), ((3171, 3184), 'numpy.min', 'np.min', (['steps'], {}), '(steps)\n', (3177, 3184), True, 'import numpy as np\n'), ((5279, 5292), 'numpy.min', 'np.min', (['steps'], {}), '(steps)\n', (5285, 5292), True, 'import numpy as np\n'), ((12588, 12634), 'numpy.issubdtype', 'np.issubdtype', (['skeleton_image.dtype', 'np.float_'], {}), '(skeleton_image.dtype, np.float_)\n', (12601, 12634), True, 'import numpy as np\n'), ((13138, 13173), 'numpy.empty', 'np.empty', (['self.n_paths'], {'dtype': 'float'}), '(self.n_paths, dtype=float)\n', (13146, 13173), True, 'import numpy as np\n'), ((13346, 13372), 'numpy.diff', 'np.diff', (['self.graph.indptr'], {}), '(self.graph.indptr)\n', (13353, 13372), True, 'import numpy as np\n'), ((16579, 16635), 'numpy.add.reduceat', 'np.add.reduceat', (['self.paths.data', 'self.paths.indptr[:-1]'], {}), '(self.paths.data, self.paths.indptr[:-1])\n', (16594, 16635), True, 'import numpy as np\n'), ((16654, 16680), 'numpy.diff', 'np.diff', (['self.paths.indptr'], {}), '(self.paths.indptr)\n', (16661, 16680), True, 'import numpy as np\n'), ((16999, 17051), 'numpy.add.reduceat', 'np.add.reduceat', (['(data * data)', 'self.paths.indptr[:-1]'], {}), '(data * data, self.paths.indptr[:-1])\n', (17014, 17051), True, 'import numpy as np\n'), ((17070, 17096), 'numpy.diff', 'np.diff', (['self.paths.indptr'], {}), '(self.paths.indptr)\n', (17077, 17096), True, 'import numpy as np\n'), ((20468, 20494), 'numpy.unique', 'np.unique', (['junction_labels'], {}), '(junction_labels)\n', (20477, 20494), True, 'import numpy as np\n'), ((23259, 23285), 'numpy.ones', 'np.ones', (['ndim'], {'dtype': 'float'}), '(ndim, dtype=float)\n', (23266, 23285), True, 'import numpy as np\n'), ((32969, 32995), 'numpy.ones', 'np.ones', (['ndim'], {'dtype': 'float'}), '(ndim, dtype=float)\n', (32976, 32995), True, 'import numpy as np\n'), ((33310, 33347), 'numpy.issubdtype', 'np.issubdtype', (['image.dtype', 'np.float_'], {}), '(image.dtype, np.float_)\n', (33323, 33347), True, 'import numpy as np\n'), ((33393, 33442), 'scipy.ndimage.map_coordinates', 'ndi.map_coordinates', (['image', 'coords_img.T'], {'order': '(3)'}), '(image, coords_img.T, order=3)\n', (33412, 33442), True, 'from scipy import sparse, ndimage as ndi\n'), ((33953, 34003), 'scipy.ndimage.map_coordinates', 'ndi.map_coordinates', (['image', 'coords_img0.T'], {'order': '(3)'}), '(image, coords_img0.T, order=3)\n', (33972, 34003), True, 'from scipy import sparse, ndimage as ndi\n'), ((34027, 34074), 'numpy.column_stack', 'np.column_stack', (['(height_coords0, coords_real0)'], {}), '((height_coords0, coords_real0))\n', (34042, 34074), True, 'import numpy as np\n'), ((34100, 34150), 'scipy.ndimage.map_coordinates', 'ndi.map_coordinates', (['image', 'coords_img1.T'], {'order': '(3)'}), '(image, coords_img1.T, order=3)\n', (34119, 34150), True, 'from scipy import sparse, ndimage as ndi\n'), ((34174, 34221), 'numpy.column_stack', 'np.column_stack', (['(height_coords1, coords_real1)'], {}), '((height_coords1, coords_real1))\n', (34189, 34221), True, 'import numpy as np\n'), ((34246, 34296), 'numpy.sum', 'np.sum', (['((coords_real0 - coords_real1) ** 2)'], {'axis': '(1)'}), '((coords_real0 - coords_real1) ** 2, axis=1)\n', (34252, 34296), True, 'import numpy as np\n'), ((36086, 36116), 'scipy.ndimage.label', 'ndi.label', (['image', 'connectivity'], {}), '(image, connectivity)\n', (36095, 36116), True, 'from scipy import sparse, ndimage as ndi\n'), ((1741, 1790), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(data[:k], (row[:k], col[:k]))'], {}), '((data[:k], (row[:k], col[:k])))\n', (1758, 1790), False, 'from scipy import sparse, ndimage as ndi\n'), ((8873, 8901), 'numpy.sum', 'np.sum', (['(endpoint_degrees - 1)'], {}), '(endpoint_degrees - 1)\n', (8879, 8901), True, 'import numpy as np\n'), ((12663, 12717), 'scipy.ndimage.map_coordinates', 'ndi.map_coordinates', (['skeleton_image', 'coords.T'], {'order': '(3)'}), '(skeleton_image, coords.T, order=3)\n', (12682, 12717), True, 'from scipy import sparse, ndimage as ndi\n'), ((13397, 13416), 'numpy.asarray', 'np.asarray', (['spacing'], {}), '(spacing)\n', (13407, 13416), True, 'import numpy as np\n'), ((13474, 13511), 'numpy.full', 'np.full', (['skeleton_image.ndim', 'spacing'], {}), '(skeleton_image.ndim, spacing)\n', (13481, 13511), True, 'import numpy as np\n'), ((17154, 17203), 'numpy.clip', 'np.clip', (['(sumsq / lengths - means * means)', '(0)', 'None'], {}), '(sumsq / lengths - means * means, 0, None)\n', (17161, 17203), True, 'import numpy as np\n'), ((19206, 19219), 'numpy.ones', 'np.ones', (['ndim'], {}), '(ndim)\n', (19213, 19219), True, 'import numpy as np\n'), ((20862, 20910), 'numpy.sum', 'np.sum', (['((neighbor_locations - jloc) ** 2)'], {'axis': '(1)'}), '((neighbor_locations - jloc) ** 2, axis=1)\n', (20868, 20910), True, 'import numpy as np\n'), ((36324, 36340), 'numpy.cumsum', 'np.cumsum', (['sizes'], {}), '(sizes)\n', (36333, 36340), True, 'import numpy as np\n'), ((13424, 13444), 'numpy.isscalar', 'np.isscalar', (['spacing'], {}), '(spacing)\n', (13435, 13444), True, 'import numpy as np\n'), ((23397, 23413), 'numpy.nonzero', 'np.nonzero', (['skel'], {}), '(skel)\n', (23407, 23413), True, 'import numpy as np\n'), ((24258, 24286), 'numpy.unique', 'np.unique', (['labeled_junctions'], {}), '(labeled_junctions)\n', (24267, 24286), True, 'import numpy as np\n'), ((5612, 5669), 'numpy.sqrt', 'np.sqrt', (['(distances[j] ** 2 + (height[i] - height[n]) ** 2)'], {}), '(distances[j] ** 2 + (height[i] - height[n]) ** 2)\n', (5619, 5669), True, 'import numpy as np\n')] |
#!/usr/bin/env python
####################################################################
### This is the PYTHON version of program 6.4 from page 203 of #
### "Modeling Infectious Disease in humans and animals" #
### by Keeling & Rohani. #
### #
### It is the SIR model (including births and deaths) with #
### (event-driven) demographic stochasticity approximated using #
### the tau-leap method and assuming Poisson distributions. #
### #
### This is a more complex stochastic model as 6 events are #
### possible: infection, recovery, birth, death of susceptible, #
### death of infected, death of recovered. #
### #
### Note: by default we are using a very small population size #
### to highlight the stochasticity. #
####################################################################
###################################
### Written by <NAME> #
### <EMAIL> (work) #
### <EMAIL> #
###################################
import numpy as np
import pylab as pl
beta = 1.0
gamma = 1 / 10.0
mu = 5e-4
N0 = 5000.0
### You may want to try with popylation size of 50 (small) to see the events
### In this case uncomment the next line
# N0=50.0
ND = MaxTime = 2 * 365.0
Y0 = pl.ceil(mu * N0 / gamma)
X0 = pl.floor(gamma * N0 / beta)
Z0 = N0 - X0 - Y0
tau = 1.0
INPUT = np.array((X0, Y0, Z0))
def stoc_eqs(INP):
V = INP
Rate = np.zeros((6))
Change = np.zeros((6, 3))
N = V[0] + V[1] + V[2]
Rate[0] = beta * V[0] * V[1] / N
Change[0, :] = [-1, +1, 0]
Rate[1] = gamma * V[1]
Change[1, :] = [0, -1, +1]
Rate[2] = mu * N
Change[2, :] = [+1, 0, 0]
Rate[3] = mu * V[0]
Change[3, :] = [-1, 0, 0]
Rate[4] = mu * V[1]
Change[4, :] = [0, -1, 0]
Rate[5] = mu * V[2]
Change[5, :] = [0, 0, -1]
for i in range(6):
Num = np.random.poisson(Rate[i] * tau)
## Make sure things don't go negative
Use = min([Num, V[pl.find(Change[i, :] < 0)]])
V = V + Change[i, :] * Use
return V
def Stoch_Iteration(INPUT):
lop = 0
S = [0]
I = [0]
R = [0]
for lop in T:
res = stoc_eqs(INPUT)
S.append(INPUT[0])
I.append(INPUT[1])
R.append(INPUT[2])
INPUT = res
return [S, I, R]
T = np.arange(0.0, ND, tau)
[S, I, R] = Stoch_Iteration(INPUT)
tT = np.array(T) / 365.0
tS = np.array(S)[
1:,
]
tI = np.array(I)[
1:,
]
tR = np.array(R)[
1:,
]
pl.subplot(311)
pl.plot(tT, tS, "g")
# pl.xlabel ('Time (years)')
pl.ylabel("Susceptible")
pl.subplot(312)
pl.plot(tT, tI, "r")
# pl.xlabel ('Time (years)')
pl.ylabel("Infectious")
pl.subplot(313)
pl.plot(tT, tR, "k")
pl.xlabel("Time (years)")
pl.ylabel("Recovered")
pl.show()
| [
"pylab.ceil",
"pylab.show",
"pylab.ylabel",
"numpy.zeros",
"pylab.floor",
"pylab.subplot",
"pylab.find",
"numpy.array",
"numpy.arange",
"pylab.xlabel",
"numpy.random.poisson",
"pylab.plot"
] | [((1310, 1334), 'pylab.ceil', 'pl.ceil', (['(mu * N0 / gamma)'], {}), '(mu * N0 / gamma)\n', (1317, 1334), True, 'import pylab as pl\n'), ((1340, 1367), 'pylab.floor', 'pl.floor', (['(gamma * N0 / beta)'], {}), '(gamma * N0 / beta)\n', (1348, 1367), True, 'import pylab as pl\n'), ((1404, 1426), 'numpy.array', 'np.array', (['(X0, Y0, Z0)'], {}), '((X0, Y0, Z0))\n', (1412, 1426), True, 'import numpy as np\n'), ((2354, 2377), 'numpy.arange', 'np.arange', (['(0.0)', 'ND', 'tau'], {}), '(0.0, ND, tau)\n', (2363, 2377), True, 'import numpy as np\n'), ((2524, 2539), 'pylab.subplot', 'pl.subplot', (['(311)'], {}), '(311)\n', (2534, 2539), True, 'import pylab as pl\n'), ((2540, 2560), 'pylab.plot', 'pl.plot', (['tT', 'tS', '"""g"""'], {}), "(tT, tS, 'g')\n", (2547, 2560), True, 'import pylab as pl\n'), ((2590, 2614), 'pylab.ylabel', 'pl.ylabel', (['"""Susceptible"""'], {}), "('Susceptible')\n", (2599, 2614), True, 'import pylab as pl\n'), ((2615, 2630), 'pylab.subplot', 'pl.subplot', (['(312)'], {}), '(312)\n', (2625, 2630), True, 'import pylab as pl\n'), ((2631, 2651), 'pylab.plot', 'pl.plot', (['tT', 'tI', '"""r"""'], {}), "(tT, tI, 'r')\n", (2638, 2651), True, 'import pylab as pl\n'), ((2681, 2704), 'pylab.ylabel', 'pl.ylabel', (['"""Infectious"""'], {}), "('Infectious')\n", (2690, 2704), True, 'import pylab as pl\n'), ((2705, 2720), 'pylab.subplot', 'pl.subplot', (['(313)'], {}), '(313)\n', (2715, 2720), True, 'import pylab as pl\n'), ((2721, 2741), 'pylab.plot', 'pl.plot', (['tT', 'tR', '"""k"""'], {}), "(tT, tR, 'k')\n", (2728, 2741), True, 'import pylab as pl\n'), ((2742, 2767), 'pylab.xlabel', 'pl.xlabel', (['"""Time (years)"""'], {}), "('Time (years)')\n", (2751, 2767), True, 'import pylab as pl\n'), ((2768, 2790), 'pylab.ylabel', 'pl.ylabel', (['"""Recovered"""'], {}), "('Recovered')\n", (2777, 2790), True, 'import pylab as pl\n'), ((2791, 2800), 'pylab.show', 'pl.show', ([], {}), '()\n', (2798, 2800), True, 'import pylab as pl\n'), ((1471, 1482), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (1479, 1482), True, 'import numpy as np\n'), ((1498, 1514), 'numpy.zeros', 'np.zeros', (['(6, 3)'], {}), '((6, 3))\n', (1506, 1514), True, 'import numpy as np\n'), ((2419, 2430), 'numpy.array', 'np.array', (['T'], {}), '(T)\n', (2427, 2430), True, 'import numpy as np\n'), ((2444, 2455), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (2452, 2455), True, 'import numpy as np\n'), ((2472, 2483), 'numpy.array', 'np.array', (['I'], {}), '(I)\n', (2480, 2483), True, 'import numpy as np\n'), ((2500, 2511), 'numpy.array', 'np.array', (['R'], {}), '(R)\n', (2508, 2511), True, 'import numpy as np\n'), ((1918, 1950), 'numpy.random.poisson', 'np.random.poisson', (['(Rate[i] * tau)'], {}), '(Rate[i] * tau)\n', (1935, 1950), True, 'import numpy as np\n'), ((2023, 2048), 'pylab.find', 'pl.find', (['(Change[i, :] < 0)'], {}), '(Change[i, :] < 0)\n', (2030, 2048), True, 'import pylab as pl\n')] |
import numpy as np
arr1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
new_arr = np.array_split(arr1, 3)
print(new_arr)
## Note: We also have the method split() available but it will not adjust the elements when elements are less in source array for
# splitting like in example above, array_split() worked properly but split() would fail.
## Accessing into the splitted arrays
print(new_arr[0])
print(new_arr[1])
print(new_arr[2])
## Splitting 2D Array
arr2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])
new_arr2 = np.array_split(arr2, 3)
print(new_arr2)
#Split the 2-D array into three 2-D arrays along rows.
new_arr2 = np.array_split(arr2, 3, axis=1)
print(new_arr2)
# An alternate solution is hsplit()
new_arr2 = np.hsplit(arr2, 2)
print(new_arr2)
#Note: Similar alternates to vstack() and dstack() are available as vsplit() and dsplit().
| [
"numpy.array_split",
"numpy.array",
"numpy.hsplit"
] | [((27, 61), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8])\n', (35, 61), True, 'import numpy as np\n'), ((72, 95), 'numpy.array_split', 'np.array_split', (['arr1', '(3)'], {}), '(arr1, 3)\n', (86, 95), True, 'import numpy as np\n'), ((457, 518), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]'], {}), '([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])\n', (465, 518), True, 'import numpy as np\n'), ((530, 553), 'numpy.array_split', 'np.array_split', (['arr2', '(3)'], {}), '(arr2, 3)\n', (544, 553), True, 'import numpy as np\n'), ((637, 668), 'numpy.array_split', 'np.array_split', (['arr2', '(3)'], {'axis': '(1)'}), '(arr2, 3, axis=1)\n', (651, 668), True, 'import numpy as np\n'), ((732, 750), 'numpy.hsplit', 'np.hsplit', (['arr2', '(2)'], {}), '(arr2, 2)\n', (741, 750), True, 'import numpy as np\n')] |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on July 10, 2013
@author: alfoa
"""
#External Modules------------------------------------------------------------------------------------
import numpy as np
import time
import sys
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .PostProcessorInterface import PostProcessorInterface
from utils import InputData, InputTypes
import Files
#Internal Modules End-----------------------------------------------------------
class TopologicalDecomposition(PostProcessorInterface):
"""
TopologicalDecomposition class - Computes an approximated hierarchical
Morse-Smale decomposition from an input point cloud consisting of an
arbitrary number of input parameters and a response value per input point
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
## This will replace the lines above
inputSpecification = super().getInputSpecification()
TDGraphInput = InputData.parameterInputFactory("graph", contentType=InputTypes.StringType)
inputSpecification.addSub(TDGraphInput)
TDGradientInput = InputData.parameterInputFactory("gradient", contentType=InputTypes.StringType)
inputSpecification.addSub(TDGradientInput)
TDBetaInput = InputData.parameterInputFactory("beta", contentType=InputTypes.FloatType)
inputSpecification.addSub(TDBetaInput)
TDKNNInput = InputData.parameterInputFactory("knn", contentType=InputTypes.IntegerType)
inputSpecification.addSub(TDKNNInput)
TDWeightedInput = InputData.parameterInputFactory("weighted", contentType=InputTypes.StringType) #bool
inputSpecification.addSub(TDWeightedInput)
TDInteractiveInput = InputData.parameterInputFactory("interactive", contentType=InputTypes.StringType) #bool
inputSpecification.addSub(TDInteractiveInput)
TDPersistenceInput = InputData.parameterInputFactory("persistence", contentType=InputTypes.StringType)
inputSpecification.addSub(TDPersistenceInput)
TDSimplificationInput = InputData.parameterInputFactory("simplification", contentType=InputTypes.FloatType)
inputSpecification.addSub(TDSimplificationInput)
TDParametersInput = InputData.parameterInputFactory("parameters", contentType=InputTypes.StringType)
inputSpecification.addSub(TDParametersInput)
TDResponseInput = InputData.parameterInputFactory("response", contentType=InputTypes.StringType)
inputSpecification.addSub(TDResponseInput)
TDNormalizationInput = InputData.parameterInputFactory("normalization", contentType=InputTypes.StringType)
inputSpecification.addSub(TDNormalizationInput)
return inputSpecification
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self.acceptedGraphParam = ['approximate knn', 'delaunay', 'beta skeleton', \
'relaxed beta skeleton']
self.acceptedPersistenceParam = ['difference','probability','count']#,'area']
self.acceptedGradientParam = ['steepest', 'maxflow']
self.acceptedNormalizationParam = ['feature', 'zscore', 'none']
# Some default arguments
self.gradient = 'steepest'
self.graph = 'beta skeleton'
self.beta = 1
self.knn = -1
self.simplification = 0
self.persistence = 'difference'
self.normalization = None
self.weighted = False
self.parameters = {}
def inputToInternal(self, currentInp):
"""
Function to convert the incoming input into a usable format
@ In, currentInp, list or DataObjects, The input object to process
@ Out, inputDict, dict, the converted input
"""
# TODO typechecking against what currentInp can be; so far it's a length=1 list with a dataobject inside
currentInp = currentInp[0]
currentInp.asDataset()
# nowadays, our only input should be DataObject
## if no "type", then you're not a PointSet or HistorySet
if not hasattr(currentInp,'type') or currentInp.type != 'PointSet':
self.raiseAnError(IOError, self.__class__.__name__,
' postprocessor only accepts PointSet DataObjects for input. ',
' Requested: ', type(currentInp))
# now we know we have a PointSet
## TODO FIXME maintaining old structure for now, in the future convert to use DataObject directly
## and not bother with inputToInternal
## This works particularly well since we only accept point sets.
data = currentInp.asDataset(outType='dict')['data']
inputDict = {'features':dict((var,data[var]) for var in self.parameters['features']),
'targets' :dict((var,data[var]) for var in self.parameters['targets' ]),
'metadata':currentInp.getMeta(general=True)}
#if 'PointProbability' in currentInp.getVars():
inputDict['metadata']['PointProbability'] = currentInp.getVarValues('PointProbability').values
#else:
# raise NotImplementedError # TODO
return inputDict
def _handleInput(self, paramInput):
"""
Function to handle the parsed paramInput for this class.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
for child in paramInput.subparts:
if child.getName() == "graph":
self.graph = child.value.lower()
if self.graph not in self.acceptedGraphParam:
self.raiseAnError(IOError, 'Requested unknown graph type: ',
self.graph, '. Available options: ',
self.acceptedGraphParam)
elif child.getName() == "gradient":
self.gradient = child.value.lower()
if self.gradient not in self.acceptedGradientParam:
self.raiseAnError(IOError, 'Requested unknown gradient method: ',
self.gradient, '. Available options: ',
self.acceptedGradientParam)
elif child.getName() == "beta":
self.beta = child.value
if self.beta <= 0 or self.beta > 2:
self.raiseAnError(IOError, 'Requested invalid beta value: ',
self.beta, '. Allowable range: (0,2]')
elif child.getName() == 'knn':
self.knn = child.value
elif child.getName() == 'simplification':
self.simplification = child.value
elif child.getName() == 'persistence':
self.persistence = child.value.lower()
if self.persistence not in self.acceptedPersistenceParam:
self.raiseAnError(IOError, 'Requested unknown persistence method: ',
self.persistence, '. Available options: ',
self.acceptedPersistenceParam)
elif child.getName() == 'parameters':
self.parameters['features'] = child.value.strip().split(',')
for i, parameter in enumerate(self.parameters['features']):
self.parameters['features'][i] = self.parameters['features'][i]
elif child.getName() == 'weighted':
self.weighted = child.value in ['True', 'true']
elif child.getName() == 'response':
self.parameters['targets'] = child.value
elif child.getName() == 'normalization':
self.normalization = child.value.lower()
if self.normalization not in self.acceptedNormalizationParam:
self.raiseAnError(IOError, 'Requested unknown normalization type: ',
self.normalization, '. Available options: ',
self.acceptedNormalizationParam)
# register metadata
self.addMetaKeys(['maxLabel','minLabel'])
def collectOutput(self, finishedJob, output):
"""
Function to place all of the computed data into the output object
@ In, finishedJob, JobHandler External or Internal instance, A JobHandler object that is in charge of running this post-processor
@ In, output, dataObjects, The object where we want to place our computed results
@ Out, None
"""
evaluation = finishedJob.getEvaluation()
inputList,outputDict = evaluation
if output.type == 'PointSet':
# TODO this is a slow dict-based implementation. It should be improved on need.
# TODO can inputList ever be multiple dataobjects?
if len(inputList) > 1:
self.raiseAnError(NotImplementedError, 'Need to implement looping over all inputs.')
fromInput = inputList[0].asDataset('dict')['data']
results = dict((var,fromInput[var]) for var in output.getVars() if var in fromInput.keys())
for label in ['minLabel','maxLabel']:
results[label] = outputDict[label]
output.load(results,style='dict')
output.addMeta(self.type,{'general':{'hierarchy':outputDict['hierarchy']}})
return
#### OLD ####
requestedInput = output.getParaKeys('input')
requestedOutput = output.getParaKeys('output')
dataLength = None
for inputData in inputList:
# Pass inputs from input data to output data
for key, value in inputData.getParametersValues('input').items():
if key in requestedInput:
# We need the size to ensure the data size is consistent, but there
# is no guarantee the data is not scalar, so this check is necessary
myLength = 1
if hasattr(value, "__len__"):
myLength = len(value)
if dataLength is None:
dataLength = myLength
elif dataLength != myLength:
dataLength = max(dataLength, myLength)
self.raiseAWarning('Data size is inconsistent. Currently set to '
+ str(dataLength) + '.')
for val in value:
output.updateInputValue(key, val)
# Pass outputs from input data to output data
for key, value in inputData.getParametersValues('output').items():
if key in requestedOutput:
# We need the size to ensure the data size is consistent, but there
# is no guarantee the data is not scalar, so this check is necessary
myLength = 1
if hasattr(value, "__len__"):
myLength = len(value)
if dataLength is None:
dataLength = myLength
elif dataLength != myLength:
dataLength = max(dataLength, myLength)
self.raiseAWarning('Data size is inconsistent. Currently set to '
+ str(dataLength) + '.')
for val in value:
output.updateOutputValue(key, val)
# Append the min/max labels to the data whether the user wants them or
# not, and place the hierarchy information into the metadata
for key, values in outputDict.items():
if key in ['minLabel', 'maxLabel']:
for value in values:
output.updateOutputValue(key, [value])
elif key in ['hierarchy']:
output.updateMetadata(key, [values])
else:
self.raiseAWarning('Output type ' + type(output).__name__ + ' not'
+ ' yet implemented. I am going to skip it.')
def userInteraction(self):
"""
A placeholder for allowing user's to interact and tweak the model in-situ
before saving the analysis results
@ In, None
@ Out, None
"""
pass
def run(self, inputIn):
"""
Function to finalize the filter => execute the filtering
@ In, inputIn, dict, dictionary of data to process
@ Out, outputDict, dict, Dictionary containing the post-processed results
"""
internalInput = self.inputToInternal(inputIn)
outputDict = {}
myDataIn = internalInput['features']
myDataOut = internalInput['targets']
self.outputData = myDataOut[self.parameters['targets']]
self.pointCount = len(self.outputData)
self.dimensionCount = len(self.parameters['features'])
self.inputData = np.zeros((self.pointCount, self.dimensionCount))
for i, lbl in enumerate(self.parameters['features']):
self.inputData[:, i] = myDataIn[lbl]
if self.weighted:
self.weights = internalInput['metadata']['PointProbability']
else:
self.weights = None
self.names = self.parameters['features'] + [self.parameters['targets']]
self.__amsc = None
self.userInteraction()
## Possibly load this here in case people have trouble building it, so it
## only errors if they try to use it?
from AMSC.AMSC_Object import AMSC_Object
if self.__amsc is None:
self.__amsc = AMSC_Object(X=self.inputData, Y=self.outputData,
w=self.weights, names=self.names,
graph=self.graph, gradient=self.gradient,
knn=self.knn, beta=self.beta,
normalization=self.normalization,
persistence=self.persistence, debug=False)
self.__amsc.Persistence(self.simplification)
partitions = self.__amsc.Partitions()
outputDict['minLabel'] = np.zeros(self.pointCount)
outputDict['maxLabel'] = np.zeros(self.pointCount)
for extPair, indices in partitions.items():
for idx in indices:
outputDict['minLabel'][idx] = extPair[0]
outputDict['maxLabel'][idx] = extPair[1]
outputDict['hierarchy'] = self.__amsc.PrintHierarchy()
self.__amsc.BuildModels()
linearFits = self.__amsc.SegmentFitCoefficients()
linearFitnesses = self.__amsc.SegmentFitnesses()
for key in linearFits.keys():
coefficients = linearFits[key]
rSquared = linearFitnesses[key]
outputDict['coefficients_%d_%d' % (key[0], key[1])] = coefficients
outputDict['R2_%d_%d' % (key[0], key[1])] = rSquared
return outputDict
try:
import PySide.QtCore as qtc
__QtAvailable = True
except ImportError as e:
try:
import PySide2.QtCore as qtc
__QtAvailable = True
except ImportError as e:
__QtAvailable = False
if __QtAvailable:
class mQTopologicalDecomposition(type(TopologicalDecomposition), type(qtc.QObject)):
"""
Class used to solve the metaclass conflict
"""
pass
class QTopologicalDecomposition(TopologicalDecomposition, qtc.QObject, metaclass=mQTopologicalDecomposition):
"""
TopologicalDecomposition class - Computes an approximated hierarchical
Morse-Smale decomposition from an input point cloud consisting of an
arbitrary number of input parameters and a response value per input point
"""
requestUI = qtc.Signal(str,str,dict)
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
inputSpecification = super(QTopologicalDecomposition, cls).getInputSpecification()
inputSpecification.addSub(InputData.parameterInputFactory("interactive"))
return inputSpecification
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
# TopologicalDecomposition.__init__(self)
# qtc.QObject.__init__(self)
self.interactive = False
self.uiDone = True ## If it has not been requested, then we are not waiting for a UI
def _localWhatDoINeed(self):
"""
This method is a local mirror of the general whatDoINeed method.
It is implemented by the samplers that need to request special objects
@ In , None, None
@ Out, needDict, list of objects needed
"""
return {'internal':[(None,'app')]}
def _localGenerateAssembler(self,initDict):
"""
Generates the assembler.
@ In, initDict, dict of init objects
@ Out, None
"""
self.app = initDict['internal']['app']
if self.app is None:
self.interactive = False
def _localReadMoreXML(self, xmlNode):
"""
Function to grab the names of the methods this post-processor will be
using
@ In, xmlNode : Xml element node
@ Out, None
"""
paramInput = QTopologicalDecomposition.getInputSpecification()()
paramInput.parseNode(xmlNode)
self._handleInput(paramInput)
def _handleInput(self, paramInput):
"""
Function to handle the parsed paramInput for this class.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
TopologicalDecomposition._handleInput(self, paramInput)
for child in paramInput.subparts:
if child.getName() == 'interactive':
self.interactive = True
def userInteraction(self):
"""
Launches an interface allowing the user to tweak specific model
parameters before saving the results to the output object(s).
@ In, None
@ Out, None
"""
self.uiDone = not self.interactive
if self.interactive:
## Connect our own signal to the slot on the main thread
self.requestUI.connect(self.app.createUI)
## Connect our own slot to listen for whenver the main thread signals a
## window has been closed
self.app.windowClosed.connect(self.signalDone)
## Give this UI a unique id in case other threads are requesting UI
## elements
uiID = str(id(self))
## Send the request for a UI thread to the main application
self.requestUI.emit('TopologyWindow', uiID,
{'X':self.inputData, 'Y':self.outputData,
'w':self.weights, 'names':self.names,
'graph':self.graph, 'gradient': self.gradient,
'knn':self.knn, 'beta':self.beta,
'normalization':self.normalization,
'views': ['TopologyMapView', 'SensitivityView',
'FitnessView', 'ScatterView2D',
'ScatterView3D']})
## Spinlock will wait until this instance's window has been closed
while(not self.uiDone):
time.sleep(1)
## First check that the requested UI exists, and then if that UI has the
## requested information, if not proceed as if it were not an
## interactive session.
if uiID in self.app.UIs and hasattr(self.app.UIs[uiID],'amsc'):
self.__amsc = self.app.UIs[uiID].amsc
self.simplification = self.app.UIs[uiID].amsc.Persistence()
else:
self.__amsc = None
def signalDone(self,uiID):
"""
In Qt language, this is a slot that will accept a signal from the UI
saying that it has completed, thus allowing the computation to begin
again with information updated by the user in the UI.
@In, uiID, string, the ID of the user interface that signaled its
completion. Thus, if several UI windows are open, we don't proceed,
until the correct one has signaled it is done.
@Out, None
"""
if uiID == str(id(self)):
self.uiDone = True
| [
"AMSC.AMSC_Object.AMSC_Object",
"utils.InputData.parameterInputFactory",
"numpy.zeros",
"time.sleep",
"PySide2.QtCore.Signal"
] | [((1936, 2011), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""graph"""'], {'contentType': 'InputTypes.StringType'}), "('graph', contentType=InputTypes.StringType)\n", (1967, 2011), False, 'from utils import InputData, InputTypes\n'), ((2079, 2157), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""gradient"""'], {'contentType': 'InputTypes.StringType'}), "('gradient', contentType=InputTypes.StringType)\n", (2110, 2157), False, 'from utils import InputData, InputTypes\n'), ((2224, 2297), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""beta"""'], {'contentType': 'InputTypes.FloatType'}), "('beta', contentType=InputTypes.FloatType)\n", (2255, 2297), False, 'from utils import InputData, InputTypes\n'), ((2359, 2433), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""knn"""'], {'contentType': 'InputTypes.IntegerType'}), "('knn', contentType=InputTypes.IntegerType)\n", (2390, 2433), False, 'from utils import InputData, InputTypes\n'), ((2499, 2577), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""weighted"""'], {'contentType': 'InputTypes.StringType'}), "('weighted', contentType=InputTypes.StringType)\n", (2530, 2577), False, 'from utils import InputData, InputTypes\n'), ((2657, 2743), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""interactive"""'], {'contentType': 'InputTypes.StringType'}), "('interactive', contentType=InputTypes.\n StringType)\n", (2688, 2743), False, 'from utils import InputData, InputTypes\n'), ((2821, 2907), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""persistence"""'], {'contentType': 'InputTypes.StringType'}), "('persistence', contentType=InputTypes.\n StringType)\n", (2852, 2907), False, 'from utils import InputData, InputTypes\n'), ((2982, 3070), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""simplification"""'], {'contentType': 'InputTypes.FloatType'}), "('simplification', contentType=InputTypes.\n FloatType)\n", (3013, 3070), False, 'from utils import InputData, InputTypes\n'), ((3144, 3229), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""parameters"""'], {'contentType': 'InputTypes.StringType'}), "('parameters', contentType=InputTypes.StringType\n )\n", (3175, 3229), False, 'from utils import InputData, InputTypes\n'), ((3297, 3375), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""response"""'], {'contentType': 'InputTypes.StringType'}), "('response', contentType=InputTypes.StringType)\n", (3328, 3375), False, 'from utils import InputData, InputTypes\n'), ((3451, 3539), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""normalization"""'], {'contentType': 'InputTypes.StringType'}), "('normalization', contentType=InputTypes.\n StringType)\n", (3482, 3539), False, 'from utils import InputData, InputTypes\n'), ((12863, 12911), 'numpy.zeros', 'np.zeros', (['(self.pointCount, self.dimensionCount)'], {}), '((self.pointCount, self.dimensionCount))\n', (12871, 12911), True, 'import numpy as np\n'), ((13996, 14021), 'numpy.zeros', 'np.zeros', (['self.pointCount'], {}), '(self.pointCount)\n', (14004, 14021), True, 'import numpy as np\n'), ((14051, 14076), 'numpy.zeros', 'np.zeros', (['self.pointCount'], {}), '(self.pointCount)\n', (14059, 14076), True, 'import numpy as np\n'), ((15469, 15495), 'PySide2.QtCore.Signal', 'qtc.Signal', (['str', 'str', 'dict'], {}), '(str, str, dict)\n', (15479, 15495), True, 'import PySide2.QtCore as qtc\n'), ((13482, 13728), 'AMSC.AMSC_Object.AMSC_Object', 'AMSC_Object', ([], {'X': 'self.inputData', 'Y': 'self.outputData', 'w': 'self.weights', 'names': 'self.names', 'graph': 'self.graph', 'gradient': 'self.gradient', 'knn': 'self.knn', 'beta': 'self.beta', 'normalization': 'self.normalization', 'persistence': 'self.persistence', 'debug': '(False)'}), '(X=self.inputData, Y=self.outputData, w=self.weights, names=self\n .names, graph=self.graph, gradient=self.gradient, knn=self.knn, beta=\n self.beta, normalization=self.normalization, persistence=self.\n persistence, debug=False)\n', (13493, 13728), False, 'from AMSC.AMSC_Object import AMSC_Object\n'), ((15974, 16020), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""interactive"""'], {}), "('interactive')\n", (16005, 16020), False, 'from utils import InputData, InputTypes\n'), ((19236, 19249), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (19246, 19249), False, 'import time\n')] |
"""
Copyright 2018 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import logging
import numpy as np
from scipy.special import gammaln
from ..hyp_defs import float_cpu
from ..hyp_model import HypModel
from ..utils.math import int2onehot, logdet_pdmat, invert_pdmat, softmax
class LinearGBE(HypModel):
"""Linear Gaussian Back-end.
Attributes:
mu: mean of the classes (num_classes, x_dim)
W: Within-class precision, shared for all classes (x_dim, x_dim)
update_mu: if True, it updates the means when calling the fit function.
update_W: if True, it updates the precision when calling the fit function.
x_dim: dimension of the input features.
num_classes: number of classes.
balance_class_weight: if True, all classes have the same weight in the estimation of W.
beta: beta param of Gaussian-Wishart distribution.
nu: nu (deegres of freedom) param of Wishart distribution.
prior: LinearGBE object containing a prior mean, precision, beta, nu (used for adaptation).
prior_beta: if given, it overwrites beta in the prior object.
prior_nu: if given, it overwrites nu in the prior object.
post_beta: if given, it fixes the value of beta in the posterior, overwriting the beta computed by the fit function.
post_nu: if given, it fixes the value of nu in the posterior, overwriting the beta computed by the fit function.
"""
def __init__(
self,
mu=None,
W=None,
update_mu=True,
update_W=True,
x_dim=1,
num_classes=None,
balance_class_weight=True,
beta=None,
nu=None,
prior=None,
prior_beta=None,
prior_nu=None,
post_beta=None,
post_nu=None,
**kwargs
):
super().__init__(**kwargs)
if mu is not None:
num_classes = mu.shape[0]
x_dim = mu.shape[1]
self.mu = mu
self.W = W
self.update_mu = update_mu
self.update_W = update_W
self.x_dim = x_dim
self.num_classes = num_classes
self.balance_class_weight = balance_class_weight
self.A = None
self.b = None
self.prior = prior
self.beta = beta
self.nu = nu
self.prior_beta = prior_beta
self.prior_nu = prior_nu
self.post_beta = post_beta
self.post_nu = post_nu
self._compute_Ab()
def get_config(self):
"""
Returns:
Dictionary with the hyperparameters of the model.
"""
config = {
"update_mu": self.update_mu,
"update_W": self.update_W,
"x_dim": self.x_dim,
"num_classes": self.num_classes,
"balance_class_weight": self.balance_class_weight,
"prior_beta": self.prior_beta,
"prior_nu": self.prior_nu,
"post_beta": self.post_beta,
"post_nu": self.post_nu,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def _load_prior(self):
if isinstance(self.prior, str):
self.prior = LinearGBE.load(self.prior)
num_classes = self.prior.mu.shape[0]
if self.prior_beta is not None:
self.prior.beta = self.prior_beta * np.ones(
(num_classes,), dtype=float_cpu()
)
if self.prior_nu is not None:
self.prior.nu = num_classes * self.prior_nu
def _change_post_r(self):
if self.post_beta is not None:
self.beta = self.post_beta * np.ones((self.num_classes,), dtype=float_cpu())
if self.post_nu is not None:
self.nu = self.num_classes * self.post_nu
def eval_linear(self, x):
"""Evals the class unnormalized log-likelihoods. which reduces to a linear function.
Args:
x: input features (num_trials, x_dim).
Returns:
Log-likelihoods (num_trials, num_classes).
"""
return np.dot(x, self.A) + self.b
def eval_llk(self, x):
"""Evals the class log-likelihoods
Args:
x: input features (num_trials, x_dim).
Returns:
Log-likelihoods (num_trials, num_classes).
"""
logp = np.dot(x, self.A) + self.b
K = 0.5 * logdet_pdmat(self.W) - 0.5 * self.x_dim * np.log(2 * np.pi)
K += -0.5 * np.sum(np.dot(x, self.W) * x, axis=1, keepdims=True)
logp += K
return logp
def eval_predictive(self, x):
"""Evals the log-predictive distribution, taking into account the uncertainty in mu and W.
It involves evaluating the Student-t distributions. For this we need to give priors
to the model parameters.
Args:
x: input features (num_trials, x_dim).
Returns:
Log-likelihoods (num_trials, num_classes).
"""
K = self.W / self.nu
c = self.nu + 1 - self.x_dim
r = self.beta / (self.beta + 1)
# T(mu, L, c) ; L = c r K
logg = (
gammaln((c + self.x_dim) / 2)
- gammaln(c / 2)
- 0.5 * self.x_dim * np.log(c * np.pi)
)
# 0.5*log|L| = 0.5*log|K| + 0.5*d*log(c r)
logK = logdet_pdmat(K)
logL_div_2 = 0.5 * logK + 0.5 * self.x_dim * r
# delta2_0 = (x-mu)^T W (x-mu)
delta2_0 = np.sum(np.dot(x, self.W) * x, axis=1, keepdims=True) - 2 * (
np.dot(x, self.A) + self.b
)
# delta2 = (x-mu)^T L (x-mu) = c r delta0 / nu
# delta2/c = r delta0 / nu
delta2_div_c = r * delta2_0 / self.nu
D = -0.5 * (c + self.x_dim) * np.log(1 + delta2_div_c)
logging.debug(self.nu)
logging.debug(c)
logging.debug(self.x_dim)
logging.debug(logg)
logging.debug(logL_div_2.shape)
logging.debug(D.shape)
logp = logg + logL_div_2 + D
return logp
def predict(self, x, eval_method="linear", normalize=False):
"""Evaluates the Gaussian back-end.
Args:
x: input features (num_trials, x_dim).
eval_method: evaluation method can be linear (evaluates linear function),
llk (evaluates exact log-likelihood),
or predictive (evaluates the predictive distribution).
normalize: if True, normalize log-likelihoods transforming them into log-posteriors.
Returns:
Log-LLK or log-posterior scores (num_trials, num_classes).
"""
if eval_method == "linear":
logp = self.eval_linear(x)
elif eval_method == "llk":
logp = self.eval_llk(x)
elif eval_method == "predictive":
logp = self.eval_predictive(x)
else:
raise ValueError("wrong eval method %s" % eval_method)
if normalize:
logp = np.log(softmax(logp, axis=1))
return logp
def __call__(self, x, eval_method="linear", normalize=False):
"""Evaluates the Gaussian back-end.
Args:
x: input features (num_trials, x_dim).
eval_method: evaluation method can be linear (evaluates linear function),
llk (evaluates exact log-likelihood),
or predictive (evaluates the predictive distribution).
normalize: if True, normalize log-likelihoods transforming them into log-posteriors.
Returns:
Log-LLK or log-posterior scores (num_trials, num_classes).
"""
return self.predict(x, eval_method, normalize)
def fit(self, x, class_ids=None, p_theta=None, sample_weight=None):
"""Trains the parameters of the model.
Args:
x: input features (num_samples, x_dim)
class_ids: integer vector (num_samples,) with elements in [0, num_classes)
indicating the class of each example.
p_theta: alternative to class_ids, it is a matrix (num_samples, num_classes)
indicating the prob. for example i to belong to class j.
sample_weight: indicates the weight of each sample in the estimation of the parameters (num_samples,).
"""
assert class_ids is not None or p_theta is not None
do_map = True if self.prior is not None else False
if do_map:
self._load_prior()
self.x_dim = x.shape[-1]
if self.num_classes is None:
if class_ids is not None:
self.num_classes = np.max(class_ids) + 1
else:
self.num_classes = p_theta.shape[-1]
if class_ids is not None:
p_theta = int2onehot(class_ids, self.num_classes)
if sample_weight is not None:
p_theta = sample_weight[:, None] * p_theta
N = np.sum(p_theta, axis=0)
F = np.dot(p_theta.T, x)
if self.update_mu:
xbar = F / N[:, None]
if do_map:
alpha_mu = (N / (N + self.prior.beta))[:, None]
self.mu = (1 - alpha_mu) * self.prior.mu + alpha_mu * xbar
self.beta = N + self.prior.beta
else:
self.mu = xbar
self.beta = N
else:
xbar = self.mu
if self.update_W:
if do_map:
nu0 = self.prior.nu
S0 = invert_pdmat(self.prior.W, return_inv=True)[-1]
if self.balance_class_weight:
alpha_W = (N / (N + nu0 / self.num_classes))[:, None]
S = (self.num_classes - np.sum(alpha_W)) * S0
else:
S = nu0 * S0
else:
nu0 = 0
S = np.zeros((x.shape[1], x.shape[1]), dtype=float_cpu())
for k in range(self.num_classes):
delta = x - xbar[k]
S_k = np.dot(p_theta[:, k] * delta.T, delta)
if do_map and self.update_mu:
mu_delta = xbar[k] - self.prior.mu[k]
S_k += N[k] * (1 - alpha_mu[k]) * np.outer(mu_delta, mu_delta)
if self.balance_class_weight:
S_k /= N[k] + nu0 / self.num_classes
S += S_k
if self.balance_class_weight:
S /= self.num_classes
else:
S /= nu0 + np.sum(N)
self.W = invert_pdmat(S, return_inv=True)[-1]
self.nu = np.sum(N) + nu0
self._change_post_r()
self._compute_Ab()
def save_params(self, f):
params = {"mu": self.mu, "W": self.W, "beta": self.beta, "nu": self.nu}
self._save_params_from_dict(f, params)
@classmethod
def load_params(cls, f, config):
param_list = ["mu", "W", "beta", "nu"]
params = cls._load_params_to_dict(f, config["name"], param_list)
kwargs = dict(list(config.items()) + list(params.items()))
return cls(**kwargs)
def _compute_Ab(self):
"""Computes the rotation and bias parameters for the linear scoring."""
if self.mu is not None and self.W is not None:
self.A = np.dot(self.W, self.mu.T)
self.b = -0.5 * np.sum(self.mu.T * self.A, axis=0)
@staticmethod
def filter_class_args(**kwargs):
"""Extracts the hyperparams of the class from a dictionary.
Returns:
Hyperparamter dictionary to initialize the class.
"""
valid_args = (
"update_mu",
"update_W",
"no_update_mu",
"no_update_W",
"balance_class_weight",
"prior",
"prior_beta",
"prior_nu",
"post_beta",
"post_nu",
"name",
)
d = dict((k, kwargs[k]) for k in valid_args if k in kwargs)
if "no_update_mu" in d:
d["update_mu"] = not d["no_update_mu"]
if "no_update_W" in d:
d["update_W"] = not d["no_update_W"]
return d
filter_train_args = filter_class_args
@staticmethod
def add_class_args(parser, prefix=None):
"""It adds the arguments corresponding to the class to jsonarparse.
Args:
parser: jsonargparse object
prefix: argument prefix.
"""
if prefix is None:
p1 = "--"
else:
p1 = "--" + prefix + "."
parser.add_argument(
p1 + "no-update-mu",
default=False,
action="store_true",
help="do not update mu",
)
parser.add_argument(
p1 + "no-update-W",
default=False,
action="store_true",
help="do not update W",
)
parser.add_argument(
p1 + "balance-class-weight",
default=False,
action="store_true",
help="Balances the weight of each class when computing W",
)
parser.add_argument(
p1 + "prior", default=None, help="prior file for MAP adaptation"
)
parser.add_argument(
p1 + "prior-beta",
default=16,
type=float,
help="relevance factor for the means",
)
parser.add_argument(
p1 + "prior-nu",
default=16,
type=float,
help="relevance factor for the variances",
)
parser.add_argument(
p1 + "post-beta",
default=None,
type=float,
help="relevance factor for the means",
)
parser.add_argument(
p1 + "post-nu",
default=None,
type=float,
help="relevance factor for the variances",
)
parser.add_argument(p1 + "name", default="lgbe", help="model name")
@staticmethod
def filter_eval_args(prefix, **kwargs):
"""Extracts the evaluation time hyperparams of the class from a dictionary.
Returns:
Hyperparameters to evaluate the class.
"""
valid_args = ("model_file", "normalize", "eval_method")
return dict((k, kwargs[k]) for k in valid_args if k in kwargs)
@staticmethod
def add_eval_args(parser, prefix=None):
"""It adds the arguments needed to evaluate the class to jsonarparse.
Args:
parser: jsonargparse object
prefix: argument prefix.
"""
if prefix is None:
p1 = "--"
else:
p1 = "--" + prefix + "."
parser.add_argument(p1 + "model-file", required=True, help=("model file"))
parser.add_argument(
p1 + "normalize",
default=False,
action="store_true",
help=("normalizes the ouput probabilities to sum to one"),
)
parser.add_argument(
p1 + "eval-method",
default="linear",
choices=["linear", "llk", "predictive"],
help=(
"evaluates full gaussian likelihood, linear function"
"or predictive distribution"
),
)
add_argparse_args = add_class_args
add_argparse_train_args = add_class_args
add_argparse_eval_args = add_eval_args
| [
"numpy.outer",
"logging.debug",
"numpy.log",
"numpy.sum",
"numpy.max",
"scipy.special.gammaln",
"numpy.dot"
] | [((5791, 5813), 'logging.debug', 'logging.debug', (['self.nu'], {}), '(self.nu)\n', (5804, 5813), False, 'import logging\n'), ((5822, 5838), 'logging.debug', 'logging.debug', (['c'], {}), '(c)\n', (5835, 5838), False, 'import logging\n'), ((5847, 5872), 'logging.debug', 'logging.debug', (['self.x_dim'], {}), '(self.x_dim)\n', (5860, 5872), False, 'import logging\n'), ((5881, 5900), 'logging.debug', 'logging.debug', (['logg'], {}), '(logg)\n', (5894, 5900), False, 'import logging\n'), ((5909, 5940), 'logging.debug', 'logging.debug', (['logL_div_2.shape'], {}), '(logL_div_2.shape)\n', (5922, 5940), False, 'import logging\n'), ((5949, 5971), 'logging.debug', 'logging.debug', (['D.shape'], {}), '(D.shape)\n', (5962, 5971), False, 'import logging\n'), ((8900, 8923), 'numpy.sum', 'np.sum', (['p_theta'], {'axis': '(0)'}), '(p_theta, axis=0)\n', (8906, 8923), True, 'import numpy as np\n'), ((8937, 8957), 'numpy.dot', 'np.dot', (['p_theta.T', 'x'], {}), '(p_theta.T, x)\n', (8943, 8957), True, 'import numpy as np\n'), ((4093, 4110), 'numpy.dot', 'np.dot', (['x', 'self.A'], {}), '(x, self.A)\n', (4099, 4110), True, 'import numpy as np\n'), ((4354, 4371), 'numpy.dot', 'np.dot', (['x', 'self.A'], {}), '(x, self.A)\n', (4360, 4371), True, 'import numpy as np\n'), ((5758, 5782), 'numpy.log', 'np.log', (['(1 + delta2_div_c)'], {}), '(1 + delta2_div_c)\n', (5764, 5782), True, 'import numpy as np\n'), ((11227, 11252), 'numpy.dot', 'np.dot', (['self.W', 'self.mu.T'], {}), '(self.W, self.mu.T)\n', (11233, 11252), True, 'import numpy as np\n'), ((4441, 4458), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (4447, 4458), True, 'import numpy as np\n'), ((5156, 5185), 'scipy.special.gammaln', 'gammaln', (['((c + self.x_dim) / 2)'], {}), '((c + self.x_dim) / 2)\n', (5163, 5185), False, 'from scipy.special import gammaln\n'), ((5200, 5214), 'scipy.special.gammaln', 'gammaln', (['(c / 2)'], {}), '(c / 2)\n', (5207, 5214), False, 'from scipy.special import gammaln\n'), ((5248, 5265), 'numpy.log', 'np.log', (['(c * np.pi)'], {}), '(c * np.pi)\n', (5254, 5265), True, 'import numpy as np\n'), ((9967, 10005), 'numpy.dot', 'np.dot', (['(p_theta[:, k] * delta.T)', 'delta'], {}), '(p_theta[:, k] * delta.T, delta)\n', (9973, 10005), True, 'import numpy as np\n'), ((10540, 10549), 'numpy.sum', 'np.sum', (['N'], {}), '(N)\n', (10546, 10549), True, 'import numpy as np\n'), ((11281, 11315), 'numpy.sum', 'np.sum', (['(self.mu.T * self.A)'], {'axis': '(0)'}), '(self.mu.T * self.A, axis=0)\n', (11287, 11315), True, 'import numpy as np\n'), ((4486, 4503), 'numpy.dot', 'np.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (4492, 4503), True, 'import numpy as np\n'), ((5480, 5497), 'numpy.dot', 'np.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (5486, 5497), True, 'import numpy as np\n'), ((5546, 5563), 'numpy.dot', 'np.dot', (['x', 'self.A'], {}), '(x, self.A)\n', (5552, 5563), True, 'import numpy as np\n'), ((8603, 8620), 'numpy.max', 'np.max', (['class_ids'], {}), '(class_ids)\n', (8609, 8620), True, 'import numpy as np\n'), ((10449, 10458), 'numpy.sum', 'np.sum', (['N'], {}), '(N)\n', (10455, 10458), True, 'import numpy as np\n'), ((10164, 10192), 'numpy.outer', 'np.outer', (['mu_delta', 'mu_delta'], {}), '(mu_delta, mu_delta)\n', (10172, 10192), True, 'import numpy as np\n'), ((9669, 9684), 'numpy.sum', 'np.sum', (['alpha_W'], {}), '(alpha_W)\n', (9675, 9684), True, 'import numpy as np\n')] |
import numpy as np
from pathlib import Path
from typing import List
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Input, Conv1D, BatchNormalization
from tensorflow.keras.layers import MaxPooling1D, Dropout, Flatten, Dense
from tensorflow.keras.losses import Reduction
from tensorflow.keras import Model, regularizers
from tensorflow_addons.optimizers import LazyAdam
from tensorflow_addons.losses import SigmoidFocalCrossEntropy
from .Meta import MetaModel, ModelType, PromoterType
class PromoterLCNN(MetaModel):
def __init__(self, name: str, load_paths: List[Path], m_type: ModelType = ModelType.SAVED_MODEL):
self.name = name
self.models = []
if (p_len := len(load_paths)) != 2:
raise ValueError(f"'load_paths' must be a list of two items, got {p_len} instead")
if m_type == ModelType.SAVED_MODEL:
for path in load_paths:
self.models.append(load_model(path))
else:
for path in load_paths:
is_first = len(self.models) == 0
self.models.append(self._create_model(is_first))
self.models[-1].load_weights(path)
@staticmethod
def _preprocess(data: List[str]) -> List[np.ndarray]:
## Check Seqs Length
for i, seq in enumerate(data):
if ( seq_len := len(seq) ) != 81:
raise ValueError(f"Each sequence must have a length of 81nt.\nSequence {i} has length {seq_len}nt")
encoding = {'A':[1,0,0,0],'T':[0,1,0,0],'C':[0,0,1,0],'G':[0,0,0,1]}
return [np.array([[encoding[x] for x in seq.upper()] for seq in data])]
@staticmethod
def _create_model(types: bool):
if types:
out_classes = 6
else:
out_classes = 2
# input
input_ = Input(shape =(81,4))
# 1st Conv Block
# Params for first Conv1D
hp_filters_1 = 128
hp_kernel_1 = 5
hp_kreg_1 = 1e-3
hp_breg_1 = 1e-2
# Params for second Conv1D
hp_filters_2 = 128
hp_kernel_2 = 9
hp_kreg_2 = 1e-3
hp_breg_2 = 1e-5
# Params for Dropout
hp_drop_1 = 0.45
x = Conv1D (filters=hp_filters_1, kernel_size=hp_kernel_1, padding ='same', activation='relu', kernel_regularizer = regularizers.l2(hp_kreg_1), bias_regularizer = regularizers.l2(hp_breg_1))(input_)
x = Conv1D (filters=hp_filters_2, kernel_size=hp_kernel_2, padding ='same', activation='relu', kernel_regularizer = regularizers.l2(hp_kreg_2), bias_regularizer = regularizers.l2(hp_breg_2))(x)
x = BatchNormalization()(x)
x = MaxPooling1D(pool_size =2, strides =2, padding ='same')(x)
x = Dropout(rate=hp_drop_1)(x)
# Fully connected layers
x = Flatten()(x)
hp_units = 32
x = Dense(units=hp_units, activation ='relu', kernel_regularizer = regularizers.l2(1e-3), bias_regularizer = regularizers.l2(1e-3))(x)
output = Dense(units = out_classes, activation ='softmax')(x)
# Creating the model
model = Model (inputs=input_, outputs=output)
model.compile(optimizer=LazyAdam(), loss=SigmoidFocalCrossEntropy(alpha=0.20, gamma=3, reduction=Reduction.AUTO), metrics=['accuracy'])
return model
def predict(self, data: List[str]):
# So much memory usage!!!
tmp_preds = {seq : PromoterType.NON_PROMOTER for seq in data}
preds = np.full(len(data), PromoterType.NON_PROMOTER, dtype=object)
data_array = np.array(data)
encoded = self._preprocess(data)
predictions = self.models[0].predict(encoded).argmax(axis=1).ravel()
# Get index for future use
indices_zero, indices_nonzero = self._generate_indices(predictions)
# Do not update return preds, as NON_PROMOTER is the default value
# Update arrays for next stage
# NOTE: Since Promoter is the positive class, and only
# in this first prediction, the nonzero indices are
# passed to the next model
data_array = data_array[indices_nonzero]
for i in range(len(encoded)):
encoded[i] = encoded[i][indices_nonzero]
still_left = True
if data_array.size == 0:
# Nothing left to classify
still_left = False
if still_left:
# Second stage, multiclass model
predictions = self.models[1].predict(encoded).argmax(axis=1).ravel()
# Update return_preds
tmp_preds.update({seq : PromoterType(predictions[i] + 2) for i, seq in enumerate(data_array)})
# NOTE: That +2 offset appears as a consequence of the training
for i, seq in enumerate(data):
preds[i] = tmp_preds[seq]
return preds
def train(self):
return "Not implemented yet!" | [
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.layers.MaxPooling1D",
"tensorflow_addons.optimizers.LazyAdam",
"tensorflow.keras.Model",
"nump... | [((1847, 1867), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(81, 4)'}), '(shape=(81, 4))\n', (1852, 1867), False, 'from tensorflow.keras.layers import Input, Conv1D, BatchNormalization\n'), ((3115, 3151), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'input_', 'outputs': 'output'}), '(inputs=input_, outputs=output)\n', (3120, 3151), False, 'from tensorflow.keras import Model, regularizers\n'), ((3566, 3580), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3574, 3580), True, 'import numpy as np\n'), ((2641, 2661), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2659, 2661), False, 'from tensorflow.keras.layers import Input, Conv1D, BatchNormalization\n'), ((2677, 2729), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)', 'strides': '(2)', 'padding': '"""same"""'}), "(pool_size=2, strides=2, padding='same')\n", (2689, 2729), False, 'from tensorflow.keras.layers import MaxPooling1D, Dropout, Flatten, Dense\n'), ((2748, 2771), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': 'hp_drop_1'}), '(rate=hp_drop_1)\n', (2755, 2771), False, 'from tensorflow.keras.layers import MaxPooling1D, Dropout, Flatten, Dense\n'), ((2821, 2830), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2828, 2830), False, 'from tensorflow.keras.layers import MaxPooling1D, Dropout, Flatten, Dense\n'), ((3016, 3062), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'out_classes', 'activation': '"""softmax"""'}), "(units=out_classes, activation='softmax')\n", (3021, 3062), False, 'from tensorflow.keras.layers import MaxPooling1D, Dropout, Flatten, Dense\n'), ((3185, 3195), 'tensorflow_addons.optimizers.LazyAdam', 'LazyAdam', ([], {}), '()\n', (3193, 3195), False, 'from tensorflow_addons.optimizers import LazyAdam\n'), ((3202, 3272), 'tensorflow_addons.losses.SigmoidFocalCrossEntropy', 'SigmoidFocalCrossEntropy', ([], {'alpha': '(0.2)', 'gamma': '(3)', 'reduction': 'Reduction.AUTO'}), '(alpha=0.2, gamma=3, reduction=Reduction.AUTO)\n', (3226, 3272), False, 'from tensorflow_addons.losses import SigmoidFocalCrossEntropy\n'), ((962, 978), 'tensorflow.keras.models.load_model', 'load_model', (['path'], {}), '(path)\n', (972, 978), False, 'from tensorflow.keras.models import load_model\n'), ((2344, 2370), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['hp_kreg_1'], {}), '(hp_kreg_1)\n', (2359, 2370), False, 'from tensorflow.keras import Model, regularizers\n'), ((2391, 2417), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['hp_breg_1'], {}), '(hp_breg_1)\n', (2406, 2417), False, 'from tensorflow.keras import Model, regularizers\n'), ((2551, 2577), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['hp_kreg_2'], {}), '(hp_kreg_2)\n', (2566, 2577), False, 'from tensorflow.keras import Model, regularizers\n'), ((2598, 2624), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['hp_breg_2'], {}), '(hp_breg_2)\n', (2613, 2624), False, 'from tensorflow.keras import Model, regularizers\n'), ((2931, 2953), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.001)'], {}), '(0.001)\n', (2946, 2953), False, 'from tensorflow.keras import Model, regularizers\n'), ((2973, 2995), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.001)'], {}), '(0.001)\n', (2988, 2995), False, 'from tensorflow.keras import Model, regularizers\n')] |
# -*- coding: utf-8 -*-
'''
该文件为可视化界面定义文件,设置基本布局样式及功能响应
'''
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import pandas as pd
import numpy as np
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1070, 888)
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
Form.setFont(font)
self.tabWidget = QtWidgets.QTabWidget(Form)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 1071, 891))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setSizeIncrement(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(False)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(50)
font.setStrikeOut(False)
self.tabWidget.setFont(font)
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North)
self.tabWidget.setTabShape(QtWidgets.QTabWidget.Triangular)
self.tabWidget.setIconSize(QtCore.QSize(20, 20))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.label = QtWidgets.QLabel(self.tab)
self.label.setGeometry(QtCore.QRect(40, 0, 130, 71))
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(self.tab)
self.lineEdit.setGeometry(QtCore.QRect(40, 60, 981, 61))
self.lineEdit.setObjectName("lineEdit")
self.label_2 = QtWidgets.QLabel(self.tab)
self.label_2.setGeometry(QtCore.QRect(40, 120, 130, 71))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.tab)
self.label_3.setGeometry(QtCore.QRect(40, 690, 130, 71))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.tab)
self.label_4.setGeometry(QtCore.QRect(200, 690, 130, 71))
self.label_4.setObjectName("label_4")
font = QtGui.QFont()
font.setPointSize(18)
# font.setBold(True)
self.label.setFont(font)
self.label_2.setFont(font)
self.label_3.setFont(font)
self.label_4.setFont(font)
self.lineEdit.setFont(font)
self.pushButton = QtWidgets.QPushButton(self.tab)
self.pushButton.setGeometry(QtCore.QRect(250, 760, 571, 61))
font = QtGui.QFont()
font.setPointSize(18)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.textEdit = QtWidgets.QTextEdit(self.tab)
self.textEdit.setGeometry(QtCore.QRect(40, 180, 981, 511))
self.textEdit.setObjectName("textEdit")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.label_5 = QtWidgets.QLabel(self.tab_2)
self.label_5.setGeometry(QtCore.QRect(30, 10, 171, 61))
self.label_5.setObjectName("label_5")
self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_2.setGeometry(QtCore.QRect(30, 60, 741, 51))
self.pushButton_2.setObjectName("pushButton_2")
self.lineEdit_2 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_2.setGeometry(QtCore.QRect(30, 120, 1001, 51))
self.lineEdit_2.setObjectName("lineEdit_2")
self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_3.setGeometry(QtCore.QRect(790, 60, 241, 51))
self.pushButton_3.setObjectName("pushButton_3")
self.label_6 = QtWidgets.QLabel(self.tab_2)
self.label_6.setGeometry(QtCore.QRect(30, 170, 291, 61))
self.label_6.setObjectName("label_6")
self.lineEdit_3 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_3.setGeometry(QtCore.QRect(30, 350, 1001, 51))
self.lineEdit_3.setObjectName("lineEdit_3")
self.pushButton_4 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_4.setGeometry(QtCore.QRect(30, 290, 741, 51))
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_5 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_5.setGeometry(QtCore.QRect(790, 290, 241, 51))
self.pushButton_5.setObjectName("pushButton_5")
self.label_7 = QtWidgets.QLabel(self.tab_2)
self.label_7.setGeometry(QtCore.QRect(30, 220, 251, 61))
self.label_7.setObjectName("label_7")
self.lineEdit_4 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_4.setGeometry(QtCore.QRect(160, 230, 871, 51))
self.lineEdit_4.setObjectName("lineEdit_4")
self.pushButton_6 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_6.setGeometry(QtCore.QRect(250, 420, 561, 51))
font = QtGui.QFont()
font.setPointSize(18)
self.pushButton_6.setFont(font)
self.pushButton_6.setObjectName("pushButton_6")
# 为按钮绑定功能函数
self.pushButton_2.clicked.connect(self.uploadfile) # 选择新闻excel文件
self.pushButton_3.clicked.connect(self.preview) # 预览excel文件
self.pushButton_4.clicked.connect(self.choosefile) # 设置输出文件路径
self.pushButton_5.clicked.connect(self.clear) # 清空选择
self.tableWidget = QtWidgets.QTableWidget(self.tab_2)
self.tableWidget.setGeometry(QtCore.QRect(30, 490, 1001, 351))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.tabWidget.addTab(self.tab_2, "")
# 设置背景图片
self.tabWidget.setStyleSheet("background-image: url(./resources/background.png);") # 设置背景图片
self.retranslateUi(Form)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "新闻智分系统"))
Form.setWindowIcon(QIcon('resources//logo.ico'))
self.label.setText(_translate("Form", "新闻标题:"))
self.label_2.setText(_translate("Form", "新闻正文:"))
self.label_3.setText(_translate("Form", "新闻类别:"))
self.label_4.setText(_translate("Form", "无"))
self.pushButton.setText(_translate("Form", "进行新闻文本分类"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Form", " 单条新闻文本分类 "))
self.label_5.setText(_translate("Form", "批量导入新闻:"))
self.pushButton_2.setText(_translate("Form", "选择要进行分类的新闻Excel文件"))
self.lineEdit_2.setPlaceholderText(_translate("Form", "选择的Excel文件路径"))
self.pushButton_3.setText(_translate("Form", "预览文件"))
self.label_6.setText(_translate("Form", "设置结果文件输出路径:"))
self.lineEdit_3.setPlaceholderText(_translate("Form", "结果文件输出路径"))
self.pushButton_4.setText(_translate("Form", "选择输出文件夹"))
self.pushButton_5.setText(_translate("Form", "清空选择"))
self.label_7.setText(_translate("Form", "文件名称:"))
self.lineEdit_4.setPlaceholderText(_translate("Form", "输出文件名称"))
self.pushButton_6.setText(_translate("Form", "进行新闻文本分类"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Form", " 批量新闻文本分类 "))
self.label.setStyleSheet("background: transparent;")
self.label_2.setStyleSheet("background: transparent;")
self.label_3.setStyleSheet("background: transparent;")
self.label_4.setStyleSheet("background: transparent;")
self.label_5.setStyleSheet("background: transparent;")
self.label_6.setStyleSheet("background: transparent;")
self.label_7.setStyleSheet("background: transparent;")
self.lineEdit.setStyleSheet("background: transparent;")
self.textEdit.setStyleSheet("background: transparent;")
self.lineEdit_2.setStyleSheet("background: transparent;")
self.lineEdit_3.setStyleSheet("background: transparent;")
self.lineEdit_4.setStyleSheet("background: transparent;")
self.tableWidget.setStyleSheet("background: transparent;")
self.pushButton.setStyleSheet(
'''QPushButton{background:#AFEEEE;border-radius:5px;}QPushButton:hover{background:#00FFFF;}''')
self.pushButton_2.setStyleSheet(
'''QPushButton{background:#AFEEEE;border-radius:5px;}QPushButton:hover{background:#00FFFF;}''')
self.pushButton_3.setStyleSheet(
'''QPushButton{background:#AFEEEE;border-radius:5px;}QPushButton:hover{background:#00FFFF;}''')
self.pushButton_4.setStyleSheet(
'''QPushButton{background:#AFEEEE;border-radius:15px;}QPushButton:hover{background:#00FFFF;}''')
self.pushButton_5.setStyleSheet(
'''QPushButton{background:#AFEEEE;border-radius:15px;}QPushButton:hover{background:#00FFFF;}''')
self.pushButton_6.setStyleSheet(
'''QPushButton{background:#AFEEEE;border-radius:15px;}QPushButton:hover{background:#00FFFF;}''')
font = QtGui.QFont()
font.setPointSize(14)
self.textEdit.setFont(font)
# 添加功能函数:
# 选择excel文件
def uploadfile(self, Filepath):
x, _ = QtWidgets.QFileDialog.getOpenFileName(None, "选取excel文件", "./", "*.xlsx")
self.lineEdit_2.setText(x)
# 预览excel文件
def preview(self):
excelFilepath = self.lineEdit_2.text()
if excelFilepath == '':
QMessageBox().warning(None, "警告", "请先选择要进行分类的Excel文件!", QMessageBox.Close)
else:
input_table = pd.read_excel(excelFilepath)
input_table_rows = input_table.shape[0]
input_table_colunms = input_table.shape[1]
input_table_header = input_table.columns.values.tolist()
###===========读取表格,转换表格,============================================
###======================给tablewidget设置行列表头============================
self.tableWidget.setColumnCount(input_table_colunms)
self.tableWidget.setRowCount(input_table_rows)
self.tableWidget.setHorizontalHeaderLabels(input_table_header)
# 自定义分配列宽
self.tableWidget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.tableWidget.horizontalHeader().setSectionResizeMode(0, QHeaderView.Interactive)
###======================给tablewidget设置行列表头============================
###================遍历表格每个元素,同时添加到tablewidget中========================
for i in range(input_table_rows):
input_table_rows_values = input_table.iloc[[i]]
#print(input_table_rows_values)
input_table_rows_values_array = np.array(input_table_rows_values)
input_table_rows_values_list = input_table_rows_values_array.tolist()[0]
for j in range(input_table_colunms):
input_table_items_list = input_table_rows_values_list[j]
###==============将遍历的元素添加到tablewidget中并显示=======================
input_table_items = str(input_table_items_list)
newItem = QTableWidgetItem(input_table_items)
newItem.setTextAlignment(Qt.AlignHCenter|Qt.AlignVCenter)
self.tableWidget.setItem(i, j, newItem)
###================遍历表格每个元素,同时添加到tablewidget中========================
# 设置输出路径
def choosefile(self, Filepath):
# 选择输出路径
f = QtWidgets.QFileDialog.getExistingDirectory(None, "选择输出文件夹", "./") # 起始路径
# 输出文件名称
filename = self.lineEdit_4.text()
if filename.endswith('.xlsx'):
filename = filename[:-5]
else:
filename = filename
if filename == '':
QMessageBox().warning(None, "警告", "请先补全输出文件名称!", QMessageBox.Close)
else:
f += '/'
f += filename
f += '.xlsx' # 后续此处需要添加支持其他的格式
self.lineEdit_3.setText(f)
# 清空输出文件路径
def clear(self):
self.lineEdit_3.setText('')
self.lineEdit_4.setText('')
# 警告与提示
def warn1(self):
QMessageBox().warning(None, "警告", "新闻内容不能为空!", QMessageBox.Close)
def warn2(self):
QMessageBox().warning(None, "警告", "未选择要进行分类的新闻Excel文件!", QMessageBox.Close)
def warn3(self):
QMessageBox().warning(None, "警告", "未设置结果文件输出路径!", QMessageBox.Close)
def success1(self):
QMessageBox().information(None, "提示", "单条新闻文本分类完成!", QMessageBox.Close)
def success2(self):
QMessageBox().information(None, "提示", "批量新闻文本分类完成!", QMessageBox.Close)
# 展示输出预测结果
def showresult(self):
excelFilepath = self.lineEdit_3.text()
if excelFilepath == '':
QMessageBox().warning(None, "警告", "请先设置结果文件输出路径!", QMessageBox.Close)
else:
input_table = pd.read_excel(excelFilepath)
input_table_rows = input_table.shape[0]
input_table_colunms = input_table.shape[1]
input_table_header = input_table.columns.values.tolist()
###===========读取表格,转换表格,============================================
###======================给tablewidget设置行列表头============================
self.tableWidget.setColumnCount(input_table_colunms)
self.tableWidget.setRowCount(input_table_rows)
self.tableWidget.setHorizontalHeaderLabels(input_table_header)
# 自定义分配列宽
self.tableWidget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.tableWidget.horizontalHeader().setSectionResizeMode(0, QHeaderView.Interactive)
###======================给tablewidget设置行列表头============================
###================遍历表格每个元素,同时添加到tablewidget中========================
for i in range(input_table_rows):
input_table_rows_values = input_table.iloc[[i]]
#print(input_table_rows_values)
input_table_rows_values_array = np.array(input_table_rows_values)
input_table_rows_values_list = input_table_rows_values_array.tolist()[0]
for j in range(input_table_colunms):
input_table_items_list = input_table_rows_values_list[j]
###==============将遍历的元素添加到tablewidget中并显示=======================
input_table_items = str(input_table_items_list)
newItem = QTableWidgetItem(input_table_items)
newItem.setTextAlignment(Qt.AlignHCenter|Qt.AlignVCenter)
self.tableWidget.setItem(i, j, newItem)
###================遍历表格每个元素,同时添加到tablewidget中========================
| [
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QSizePolicy",
"PyQt5.QtGui.QIcon",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QTableWidget",
"PyQt5.QtWidgets.QTextEdit",
"PyQt5.QtWidgets.QFileDialog.getExistingDirectory",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtWidgets.QPushButton"... | [((425, 438), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (436, 438), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((580, 606), 'PyQt5.QtWidgets.QTabWidget', 'QtWidgets.QTabWidget', (['Form'], {}), '(Form)\n', (600, 606), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((696, 788), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Expanding'], {}), '(QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Expanding)\n', (717, 788), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1084, 1097), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1095, 1097), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1588, 1607), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (1605, 1607), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1669, 1695), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab'], {}), '(self.tab)\n', (1685, 1695), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1826, 1855), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.tab'], {}), '(self.tab)\n', (1845, 1855), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1995, 2021), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab'], {}), '(self.tab)\n', (2011, 2021), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2159, 2185), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab'], {}), '(self.tab)\n', (2175, 2185), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2323, 2349), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab'], {}), '(self.tab)\n', (2339, 2349), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2482, 2495), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2493, 2495), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2765, 2796), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab'], {}), '(self.tab)\n', (2786, 2796), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2883, 2896), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2894, 2896), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3045, 3074), 'PyQt5.QtWidgets.QTextEdit', 'QtWidgets.QTextEdit', (['self.tab'], {}), '(self.tab)\n', (3064, 3074), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3259, 3278), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (3276, 3278), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3346, 3374), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_2'], {}), '(self.tab_2)\n', (3362, 3374), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3516, 3549), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_2'], {}), '(self.tab_2)\n', (3537, 3549), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3704, 3735), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.tab_2'], {}), '(self.tab_2)\n', (3723, 3735), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3888, 3921), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_2'], {}), '(self.tab_2)\n', (3909, 3921), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4074, 4102), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_2'], {}), '(self.tab_2)\n', (4090, 4102), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4243, 4274), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.tab_2'], {}), '(self.tab_2)\n', (4262, 4274), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4427, 4460), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_2'], {}), '(self.tab_2)\n', (4448, 4460), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4618, 4651), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_2'], {}), '(self.tab_2)\n', (4639, 4651), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4805, 4833), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab_2'], {}), '(self.tab_2)\n', (4821, 4833), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4974, 5005), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.tab_2'], {}), '(self.tab_2)\n', (4993, 5005), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5158, 5191), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.tab_2'], {}), '(self.tab_2)\n', (5179, 5191), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5280, 5293), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (5291, 5293), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5766, 5800), 'PyQt5.QtWidgets.QTableWidget', 'QtWidgets.QTableWidget', (['self.tab_2'], {}), '(self.tab_2)\n', (5788, 5800), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6270, 6313), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Form'], {}), '(Form)\n', (6307, 6313), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9595, 9608), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (9606, 9608), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9764, 9836), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', (['None', '"""选取excel文件"""', '"""./"""', '"""*.xlsx"""'], {}), "(None, '选取excel文件', './', '*.xlsx')\n", (9801, 9836), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12070, 12135), 'PyQt5.QtWidgets.QFileDialog.getExistingDirectory', 'QtWidgets.QFileDialog.getExistingDirectory', (['None', '"""选择输出文件夹"""', '"""./"""'], {}), "(None, '选择输出文件夹', './')\n", (12112, 12135), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((643, 672), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(1071)', '(891)'], {}), '(0, 0, 1071, 891)\n', (655, 672), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1048, 1066), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(0)'], {}), '(0, 0)\n', (1060, 1066), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1495, 1515), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(20)', '(20)'], {}), '(20, 20)\n', (1507, 1515), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1728, 1756), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(40)', '(0)', '(130)', '(71)'], {}), '(40, 0, 130, 71)\n', (1740, 1756), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1891, 1920), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(40)', '(60)', '(981)', '(61)'], {}), '(40, 60, 981, 61)\n', (1903, 1920), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2056, 2086), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(40)', '(120)', '(130)', '(71)'], {}), '(40, 120, 130, 71)\n', (2068, 2086), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2220, 2250), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(40)', '(690)', '(130)', '(71)'], {}), '(40, 690, 130, 71)\n', (2232, 2250), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2384, 2415), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(200)', '(690)', '(130)', '(71)'], {}), '(200, 690, 130, 71)\n', (2396, 2415), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2834, 2865), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(250)', '(760)', '(571)', '(61)'], {}), '(250, 760, 571, 61)\n', (2846, 2865), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3110, 3141), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(40)', '(180)', '(981)', '(511)'], {}), '(40, 180, 981, 511)\n', (3122, 3141), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3409, 3438), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(10)', '(171)', '(61)'], {}), '(30, 10, 171, 61)\n', (3421, 3438), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3589, 3618), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(60)', '(741)', '(51)'], {}), '(30, 60, 741, 51)\n', (3601, 3618), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3773, 3804), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(120)', '(1001)', '(51)'], {}), '(30, 120, 1001, 51)\n', (3785, 3804), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3961, 3991), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(790)', '(60)', '(241)', '(51)'], {}), '(790, 60, 241, 51)\n', (3973, 3991), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4137, 4167), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(170)', '(291)', '(61)'], {}), '(30, 170, 291, 61)\n', (4149, 4167), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4312, 4343), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(350)', '(1001)', '(51)'], {}), '(30, 350, 1001, 51)\n', (4324, 4343), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4500, 4530), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(290)', '(741)', '(51)'], {}), '(30, 290, 741, 51)\n', (4512, 4530), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4691, 4722), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(790)', '(290)', '(241)', '(51)'], {}), '(790, 290, 241, 51)\n', (4703, 4722), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4868, 4898), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(220)', '(251)', '(61)'], {}), '(30, 220, 251, 61)\n', (4880, 4898), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5043, 5074), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(160)', '(230)', '(871)', '(51)'], {}), '(160, 230, 871, 51)\n', (5055, 5074), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5231, 5262), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(250)', '(420)', '(561)', '(51)'], {}), '(250, 420, 561, 51)\n', (5243, 5262), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5839, 5871), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(490)', '(1001)', '(351)'], {}), '(30, 490, 1001, 351)\n', (5851, 5871), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6495, 6523), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""resources//logo.ico"""'], {}), "('resources//logo.ico')\n", (6500, 6523), False, 'from PyQt5.QtGui import QIcon\n'), ((10127, 10155), 'pandas.read_excel', 'pd.read_excel', (['excelFilepath'], {}), '(excelFilepath)\n', (10140, 10155), True, 'import pandas as pd\n'), ((13494, 13522), 'pandas.read_excel', 'pd.read_excel', (['excelFilepath'], {}), '(excelFilepath)\n', (13507, 13522), True, 'import pandas as pd\n'), ((11298, 11331), 'numpy.array', 'np.array', (['input_table_rows_values'], {}), '(input_table_rows_values)\n', (11306, 11331), True, 'import numpy as np\n'), ((12750, 12763), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (12761, 12763), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((12849, 12862), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (12860, 12862), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((12958, 12971), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (12969, 12971), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((13063, 13076), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (13074, 13076), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((13171, 13184), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (13182, 13184), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((14665, 14698), 'numpy.array', 'np.array', (['input_table_rows_values'], {}), '(input_table_rows_values)\n', (14673, 14698), True, 'import numpy as np\n'), ((10010, 10023), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (10021, 10023), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((12372, 12385), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (12383, 12385), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((13382, 13395), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (13393, 13395), False, 'from PyQt5.QtWidgets import QMessageBox\n')] |
from nisnap.utils.aseg import basal_ganglia_labels, cortical_labels
from nisnap.utils.aseg import amygdala_nuclei, hippocampal_subfields
__format__ = '.png'
def pick_labels(data, labels):
"""Replaces all values by zero in a Numpy array (`data`) except values
passed in `labels`. Used to select some specific labels in a given
segmentation."""
import numpy as np
data2 = np.where(np.isin(data, labels), data, 0)
return data2
def _aget_cmap_(labels=[]):
import json
import numpy as np
import nisnap
import os.path as op
n_labels = len(labels)
fp = op.join(op.dirname(nisnap.__file__), 'utils', 'colormap.json')
LUT = json.load(open(fp))
LUT = {int(k): v for k, v in list(LUT.items())}
if n_labels is None:
n_labels = len(list(LUT.keys()))
max_label = int(np.max(labels))
LUT = [LUT.get(i, [0, 0, 0]) for i in range(0, max_label + 1)]
LUT = np.array(LUT)
LUT = LUT / 255.0
return LUT
def _plot_contours_in_slice_(slice_seg, target_axis, labels=None):
"""Plots contour around the data in slice (after binarization)"""
import numpy as np
if labels is None: # if n_labels is not provided then take max from slice
labels = list(np.unique(slice_seg))
from matplotlib.colors import ListedColormap
cmap = ListedColormap(_aget_cmap_(labels))
num_labels = len(cmap.colors)
unique_labels = np.arange(num_labels, dtype='int16')
from matplotlib import cm, colors
normalize_labels = colors.Normalize(vmin=0, vmax=num_labels, clip=True)
seg_mapper = cm.ScalarMappable(norm=normalize_labels, cmap=cmap)
unique_labels_display = np.array(unique_labels) # np.setdiff1d(unique_labels, 0)
color_for_label = seg_mapper.to_rgba(unique_labels_display)
from matplotlib import pyplot as plt
plt.sca(target_axis)
for index, label in enumerate(unique_labels_display):
binary_slice_seg = slice_seg == index
if not binary_slice_seg.any():
continue
plt.contour(binary_slice_seg,
levels=[0.5, ],
colors=(color_for_label[index],),
linewidths=1,
alpha=1,
zorder=1)
return
def _snap_contours_(data, slices, axis, bg, figsize=None, bb=None, pbar=None):
from matplotlib import pyplot as plt
import numpy as np
import tempfile
import os
plt.style.use('dark_background')
paths = []
fd, path = tempfile.mkstemp(suffix='_%s%s' % (axis, __format__))
os.close(fd)
paths.append(path)
same_box = bb is not None
if not same_box:
bb = {}
lambdas = {'x': lambda y, x: y[:, :, x],
'y': lambda y, x: y[:, x, :],
'z': lambda y, x: y[x, :, :]}
labels = list(np.unique(data))
if figsize is None:
ratio = len(slices) / float(len(slices[0]))
figsize = (figsize, figsize * ratio)
fig = plt.figure(dpi=300, figsize=figsize)
abs_index = 0
for a, chunk in enumerate(slices):
if not same_box:
bb[a] = []
for i, slice_index in enumerate(chunk):
abs_index += 1
ax = fig.add_subplot(len(slices), len(slices[0]), abs_index,
label='%s_%s' % (axis, slice_index))
test = np.flip(np.swapaxes(np.abs(lambdas[axis](data,
int(slice_index))),
0, 1), 0)
if not same_box:
xs, ys = np.where(test != 0)
bb[a].append((xs, ys))
else:
xs, ys = bb[a][i]
if len(xs) == 0:
continue
test3 = np.flip(np.swapaxes(np.abs(lambdas[axis](bg,
int(slice_index))),
0, 1), 0)
test3 = test3[min(xs):max(xs) + 1, min(ys):max(ys) + 1]
ax.imshow(test3, interpolation='none', cmap='gray')
test = test[min(xs):max(xs) + 1, min(ys):max(ys) + 1]
_plot_contours_in_slice_(test, ax, labels=labels)
ax.axis('off')
ax.text(0, 0, '%i' % slice_index,
{'color': 'w', 'fontsize': 10}, va="bottom", ha="left")
if pbar is not None:
pbar.update(1)
fig.savefig(path, facecolor=fig.get_facecolor(),
bbox_inches='tight',
transparent=True,
pad_inches=0)
return paths, bb
def _snap_slices_(data, slices, axis, bb=None, figsize=None, pbar=None):
from matplotlib import pyplot as plt
import numpy as np
import tempfile
import os
labels = list(np.unique(data))
has_bb = bb is not None
d = data.ravel()
ratio = len(d[d <= 10]) / len(d)
# has_orig = len(labels) > 50 # not bb is None
is_raw = ratio < 0.80
paths = []
if not has_bb:
bb = {}
fig = plt.figure(dpi=300, figsize=figsize)
from nisnap.utils.slices import __get_lambdas__
lambdas = __get_lambdas__(data)
fd, path = tempfile.mkstemp(suffix='_%s%s' % (axis, __format__))
os.close(fd)
paths.append(path)
abs_index = 0
for a, chunk in enumerate(slices):
if not has_bb:
bb[a] = []
for i, slice_index in enumerate(chunk):
abs_index += 1
ax = fig.add_subplot(len(slices), len(slices[0]), abs_index,
label='%s_%s' % (axis, slice_index))
test = np.flip(np.swapaxes(np.abs(lambdas[axis](int(slice_index))),
0, 1), 0)
if not has_bb:
xs, ys = list(np.where(test != 0))[:2]
bb[a].append((xs, ys))
else:
xs, ys = bb[a][i]
if len(xs) == 0:
continue
if len(data.shape) == 4: # RGB mode (4D volume)
test = test[min(xs):max(xs) + 1, min(ys):max(ys) + 1, :]
ax.imshow((test * 255).astype(np.uint8), interpolation='none',)
else: # standard 3D label volume
if is_raw:
vmax, cmap = (None, 'gray')
else:
vmax = np.max(labels)
from matplotlib.colors import ListedColormap
cmap = ListedColormap(_aget_cmap_(labels))
test = test[min(xs):max(xs) + 1, min(ys):max(ys) + 1]
ax.imshow(test, interpolation='none', cmap=cmap,
vmin=0, vmax=vmax)
ax.axis('off')
ax.text(0, 0, '%i' % slice_index,
{'color': 'w', 'fontsize': 10}, va="bottom", ha="left")
if pbar is not None:
pbar.update(1)
fig.savefig(path, facecolor=fig.get_facecolor(),
bbox_inches='tight', transparent=True, pad_inches=0)
return paths, bb
def __snap__(data, axes='xyz', bg=None, slices=None, rowsize=None,
contours=False, figsize=None, samebox=False, margin=5):
from matplotlib import pyplot as plt
import logging as log
plt.rcParams['figure.facecolor'] = 'black'
plt.rcParams.update({'figure.max_open_warning': 0})
from nisnap.utils.slices import cut_slices, _fix_rowsize_, _fix_figsize_
from nisnap.utils.slices import __maxsize__
rowsize = _fix_rowsize_(axes, rowsize)
figsize = _fix_figsize_(axes, figsize)
t = int(__maxsize__(data)/3.0)
slices = cut_slices(data, axes, slices=slices, rowsize=rowsize,
threshold=t)
n_slices = sum([sum([len(each) for each in slices[e]]) for e in axes])
if n_slices == 0:
msg = 'Should provide at least one slice. %s' % slices
raise Exception(msg)
has_orig = bg is not None
if has_orig:
n_slices = 2 * n_slices
from tqdm import tqdm
pbar = tqdm(total=n_slices, leave=False)
paths, paths_orig = {}, {}
for axis in axes:
if samebox:
from nisnap.utils.slices import __get_abs_minmax
same_bb = __get_abs_minmax(data, axis, slices[axis], margin=margin)
log.warning('Using bounding box: %s (axis %s)'
% (same_bb[0][0], axis))
opt = {'slices': slices[axis],
'axis': axis,
'figsize': figsize[axis],
'pbar': pbar}
if contours:
# Rendering contours
if samebox:
opt['bb'] = same_bb
path, bb = _snap_contours_(data, bg=bg, **opt)
paths[axis] = path
else:
opt['bb'] = None if not samebox else same_bb
# Rendering masks
path, bb = _snap_slices_(data, **opt)
paths[axis] = path
if has_orig:
opt['bb'] = bb if not samebox else same_bb
path, _ = _snap_slices_(bg, **opt)
paths_orig[axis] = path
pbar.update(n_slices)
pbar.close()
return paths, paths_orig
def __stack_img__(filepaths):
import nibabel as nib
import numpy as np
stack = [np.asarray(nib.load(e).dataobj) for e in filepaths[:]]
data = np.stack(stack, axis=-1)
data2 = np.argmax(data, axis=-1) + 1
black_pixels_mask = np.all(data == [0, 0, 0], axis=-1)
data2[black_pixels_mask] = 0
return data2
def plot_segment(filepaths, axes='xyz', bg=None, opacity=90, slices=None,
animated=False, savefig=None, contours=False, rowsize=None,
figsize=None, samebox=False, labels=None, margin=5):
"""Plots a set of segmentation maps/masks.
Parameters
----------
filepaths: a list of str
Paths to segmentation maps (between 1 and 3). Must be of same
dimensions and in same reference space.
axes: string, or a tuple of str
Choose the direction of the cuts (among 'x', 'y', 'z')
bg: None or str
Path to the background image that the masks will be plotted on top of.
If nothing is specified, the segmentation maps/masks will be plotted
only. The opacity (in %) of the segmentation maps when plotted over a
background image. Only used if a background image is provided.
Default: 10
slices: None, or a tuple of floats
The indexes of the slices that will be rendered. If None is given, the
slices are selected automatically.
animated: boolean, optional
If True, the snapshot will be rendered as an animated GIF.
If False, the snapshot will be rendered as a static PNG image. Default:
False
savefig: string, optional
Filepath where the resulting snapshot will be created. If None is
given, a temporary file will be created and/or the result will be
displayed inline in a Jupyter Notebook.
contours: boolean, optional
If True, segmentations will be rendered as contoured regions. If False,
will be rendered as superimposed masks. Default: False
rowsize: None, or int, or dict
Set the number of slices per row in the final compiled figure.
Default: {'x': 9, 'y': 9, 'z': 6}
figsize: None, or float
Figure size (in inches) (matplotlib definition). Default: auto
samebox: boolean, optional
If True, bounding box will be fixed. If False, adjusted for each slice.
labels: None or a tuple of int
If a list of labels is provided, the label volume will be filtered to
keep these labels only and remove the others.
(Works with a label volume only, not with RGB mode)
See Also
--------
xnat.plot_segment : To plot segmentation maps directly providing their
experiment_id on an XNAT instance
"""
import matplotlib
import numpy as np
import logging as log
import os
import tempfile
matplotlib.use('Agg')
fp = savefig
if savefig is None:
if animated:
f, fp = tempfile.mkstemp(suffix='.gif')
else:
f, fp = tempfile.mkstemp(suffix=__format__)
os.close(f)
from nisnap.utils.parse import __check_axes__
axes = __check_axes__(axes)
# Creating snapshots (along given axes and original if needed)
log.info('* Creating snapshots...')
# Loading images
import nibabel as nib
if isinstance(filepaths, list): # RGB mode
data = __stack_img__(filepaths)
log.info('* RGB mode')
elif isinstance(filepaths, str): # 3D label volume
log.info('* Label volume')
if labels is not None:
from nisnap.utils import aseg
filepaths = aseg.__picklabel_fs__(filepaths, labels=labels)
data = np.asarray(nib.load(filepaths).dataobj)
if bg is not None:
bg = np.asarray(nib.load(bg).dataobj)
paths, paths_orig = __snap__(data, axes=axes, bg=bg,
slices=slices, contours=contours,
rowsize=rowsize, figsize=figsize,
samebox=samebox, margin=margin)
from nisnap.utils.montage import __montage__
has_orig = bg is not None
__montage__(paths, paths_orig, axes, opacity, has_orig, animated,
savefig=fp)
if savefig is None:
# Return image
from IPython.display import Image
return Image(filename=fp)
| [
"numpy.isin",
"numpy.argmax",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.arange",
"os.close",
"matplotlib.pyplot.contour",
"nisnap.utils.montage.__montage__",
"numpy.unique",
"matplotlib.colors.Normalize",
"logging.warning",
"matplotlib.cm.ScalarMappable",
"os.path.dir... | [((925, 938), 'numpy.array', 'np.array', (['LUT'], {}), '(LUT)\n', (933, 938), True, 'import numpy as np\n'), ((1414, 1450), 'numpy.arange', 'np.arange', (['num_labels'], {'dtype': '"""int16"""'}), "(num_labels, dtype='int16')\n", (1423, 1450), True, 'import numpy as np\n'), ((1513, 1565), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(0)', 'vmax': 'num_labels', 'clip': '(True)'}), '(vmin=0, vmax=num_labels, clip=True)\n', (1529, 1565), False, 'from matplotlib import cm, colors\n'), ((1583, 1634), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'norm': 'normalize_labels', 'cmap': 'cmap'}), '(norm=normalize_labels, cmap=cmap)\n', (1600, 1634), False, 'from matplotlib import cm, colors\n'), ((1663, 1686), 'numpy.array', 'np.array', (['unique_labels'], {}), '(unique_labels)\n', (1671, 1686), True, 'import numpy as np\n'), ((1832, 1852), 'matplotlib.pyplot.sca', 'plt.sca', (['target_axis'], {}), '(target_axis)\n', (1839, 1852), True, 'from matplotlib import pyplot as plt\n'), ((2435, 2467), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (2448, 2467), True, 'from matplotlib import pyplot as plt\n'), ((2499, 2552), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': "('_%s%s' % (axis, __format__))"}), "(suffix='_%s%s' % (axis, __format__))\n", (2515, 2552), False, 'import tempfile\n'), ((2557, 2569), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (2565, 2569), False, 'import os\n'), ((2966, 3002), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(300)', 'figsize': 'figsize'}), '(dpi=300, figsize=figsize)\n', (2976, 3002), True, 'from matplotlib import pyplot as plt\n'), ((5030, 5066), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(300)', 'figsize': 'figsize'}), '(dpi=300, figsize=figsize)\n', (5040, 5066), True, 'from matplotlib import pyplot as plt\n'), ((5134, 5155), 'nisnap.utils.slices.__get_lambdas__', '__get_lambdas__', (['data'], {}), '(data)\n', (5149, 5155), False, 'from nisnap.utils.slices import __get_lambdas__\n'), ((5172, 5225), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': "('_%s%s' % (axis, __format__))"}), "(suffix='_%s%s' % (axis, __format__))\n", (5188, 5225), False, 'import tempfile\n'), ((5230, 5242), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (5238, 5242), False, 'import os\n'), ((7276, 7327), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.max_open_warning': 0}"], {}), "({'figure.max_open_warning': 0})\n", (7295, 7327), True, 'from matplotlib import pyplot as plt\n'), ((7469, 7497), 'nisnap.utils.slices._fix_rowsize_', '_fix_rowsize_', (['axes', 'rowsize'], {}), '(axes, rowsize)\n', (7482, 7497), False, 'from nisnap.utils.slices import cut_slices, _fix_rowsize_, _fix_figsize_\n'), ((7512, 7540), 'nisnap.utils.slices._fix_figsize_', '_fix_figsize_', (['axes', 'figsize'], {}), '(axes, figsize)\n', (7525, 7540), False, 'from nisnap.utils.slices import cut_slices, _fix_rowsize_, _fix_figsize_\n'), ((7590, 7657), 'nisnap.utils.slices.cut_slices', 'cut_slices', (['data', 'axes'], {'slices': 'slices', 'rowsize': 'rowsize', 'threshold': 't'}), '(data, axes, slices=slices, rowsize=rowsize, threshold=t)\n', (7600, 7657), False, 'from nisnap.utils.slices import cut_slices, _fix_rowsize_, _fix_figsize_\n'), ((7989, 8022), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_slices', 'leave': '(False)'}), '(total=n_slices, leave=False)\n', (7993, 8022), False, 'from tqdm import tqdm\n'), ((9270, 9294), 'numpy.stack', 'np.stack', (['stack'], {'axis': '(-1)'}), '(stack, axis=-1)\n', (9278, 9294), True, 'import numpy as np\n'), ((9361, 9395), 'numpy.all', 'np.all', (['(data == [0, 0, 0])'], {'axis': '(-1)'}), '(data == [0, 0, 0], axis=-1)\n', (9367, 9395), True, 'import numpy as np\n'), ((11949, 11970), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (11963, 11970), False, 'import matplotlib\n'), ((12238, 12258), 'nisnap.utils.parse.__check_axes__', '__check_axes__', (['axes'], {}), '(axes)\n', (12252, 12258), False, 'from nisnap.utils.parse import __check_axes__\n'), ((12331, 12366), 'logging.info', 'log.info', (['"""* Creating snapshots..."""'], {}), "('* Creating snapshots...')\n", (12339, 12366), True, 'import logging as log\n'), ((13237, 13314), 'nisnap.utils.montage.__montage__', '__montage__', (['paths', 'paths_orig', 'axes', 'opacity', 'has_orig', 'animated'], {'savefig': 'fp'}), '(paths, paths_orig, axes, opacity, has_orig, animated, savefig=fp)\n', (13248, 13314), False, 'from nisnap.utils.montage import __montage__\n'), ((403, 424), 'numpy.isin', 'np.isin', (['data', 'labels'], {}), '(data, labels)\n', (410, 424), True, 'import numpy as np\n'), ((608, 635), 'os.path.dirname', 'op.dirname', (['nisnap.__file__'], {}), '(nisnap.__file__)\n', (618, 635), True, 'import os.path as op\n'), ((832, 846), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (838, 846), True, 'import numpy as np\n'), ((2026, 2141), 'matplotlib.pyplot.contour', 'plt.contour', (['binary_slice_seg'], {'levels': '[0.5]', 'colors': '(color_for_label[index],)', 'linewidths': '(1)', 'alpha': '(1)', 'zorder': '(1)'}), '(binary_slice_seg, levels=[0.5], colors=(color_for_label[index],\n ), linewidths=1, alpha=1, zorder=1)\n', (2037, 2141), True, 'from matplotlib import pyplot as plt\n'), ((2816, 2831), 'numpy.unique', 'np.unique', (['data'], {}), '(data)\n', (2825, 2831), True, 'import numpy as np\n'), ((4787, 4802), 'numpy.unique', 'np.unique', (['data'], {}), '(data)\n', (4796, 4802), True, 'import numpy as np\n'), ((9308, 9332), 'numpy.argmax', 'np.argmax', (['data'], {'axis': '(-1)'}), '(data, axis=-1)\n', (9317, 9332), True, 'import numpy as np\n'), ((12164, 12175), 'os.close', 'os.close', (['f'], {}), '(f)\n', (12172, 12175), False, 'import os\n'), ((12511, 12533), 'logging.info', 'log.info', (['"""* RGB mode"""'], {}), "('* RGB mode')\n", (12519, 12533), True, 'import logging as log\n'), ((13436, 13454), 'IPython.display.Image', 'Image', ([], {'filename': 'fp'}), '(filename=fp)\n', (13441, 13454), False, 'from IPython.display import Image\n'), ((1240, 1260), 'numpy.unique', 'np.unique', (['slice_seg'], {}), '(slice_seg)\n', (1249, 1260), True, 'import numpy as np\n'), ((7554, 7571), 'nisnap.utils.slices.__maxsize__', '__maxsize__', (['data'], {}), '(data)\n', (7565, 7571), False, 'from nisnap.utils.slices import __maxsize__\n'), ((8181, 8238), 'nisnap.utils.slices.__get_abs_minmax', '__get_abs_minmax', (['data', 'axis', 'slices[axis]'], {'margin': 'margin'}), '(data, axis, slices[axis], margin=margin)\n', (8197, 8238), False, 'from nisnap.utils.slices import __get_abs_minmax\n'), ((8251, 8322), 'logging.warning', 'log.warning', (["('Using bounding box: %s (axis %s)' % (same_bb[0][0], axis))"], {}), "('Using bounding box: %s (axis %s)' % (same_bb[0][0], axis))\n", (8262, 8322), True, 'import logging as log\n'), ((12054, 12085), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".gif"""'}), "(suffix='.gif')\n", (12070, 12085), False, 'import tempfile\n'), ((12120, 12155), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '__format__'}), '(suffix=__format__)\n', (12136, 12155), False, 'import tempfile\n'), ((12599, 12625), 'logging.info', 'log.info', (['"""* Label volume"""'], {}), "('* Label volume')\n", (12607, 12625), True, 'import logging as log\n'), ((3578, 3597), 'numpy.where', 'np.where', (['(test != 0)'], {}), '(test != 0)\n', (3586, 3597), True, 'import numpy as np\n'), ((9214, 9225), 'nibabel.load', 'nib.load', (['e'], {}), '(e)\n', (9222, 9225), True, 'import nibabel as nib\n'), ((12723, 12770), 'nisnap.utils.aseg.__picklabel_fs__', 'aseg.__picklabel_fs__', (['filepaths'], {'labels': 'labels'}), '(filepaths, labels=labels)\n', (12744, 12770), False, 'from nisnap.utils import aseg\n'), ((12874, 12886), 'nibabel.load', 'nib.load', (['bg'], {}), '(bg)\n', (12882, 12886), True, 'import nibabel as nib\n'), ((6335, 6349), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (6341, 6349), True, 'import numpy as np\n'), ((12797, 12816), 'nibabel.load', 'nib.load', (['filepaths'], {}), '(filepaths)\n', (12805, 12816), True, 'import nibabel as nib\n'), ((5777, 5796), 'numpy.where', 'np.where', (['(test != 0)'], {}), '(test != 0)\n', (5785, 5796), True, 'import numpy as np\n')] |
import pathlib
import shutil
import tempfile
import numpy as np
from PyQt5.QtTest import QTest
from PyQt5.QtCore import Qt, QPoint, QTimer, QSettings
from PyQt5.QtWidgets import QToolBar, QComboBox
from OpenGL.plugins import FormatHandler
from sscanss.app.dialogs import (InsertPrimitiveDialog, TransformDialog, SampleManager, InsertPointDialog,
InsertVectorDialog, VectorManager, PickPointDialog, JawControl, PositionerControl,
DetectorControl, PointManager, SimulationDialog, ScriptExportDialog, PathLengthPlotter,
ProjectDialog, Preferences, CalibrationErrorDialog, AlignmentErrorDialog)
from sscanss.app.window.view import MainWindow
import sscanss.config as config
from sscanss.core.instrument import Simulation
from sscanss.core.math import rigid_transform
from sscanss.core.scene import Node
from sscanss.core.util import Primitives, PointType, DockFlag
from tests.helpers import (QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box,
click_list_widget_item, edit_line_edit_text)
WAIT_TIME = 5000
FUNC = Simulation.execute
def wrapped(args):
import logging
logging.disable(level=logging.INFO)
return FUNC(args)
class TestMainWindow(QTestCase):
@classmethod
def setUpClass(cls):
cls.data_dir = pathlib.Path(tempfile.mkdtemp())
cls.ini_file = cls.data_dir / 'settings.ini'
config.settings.system = QSettings(str(cls.ini_file), QSettings.IniFormat)
config.LOG_PATH = cls.data_dir / 'logs'
FormatHandler('sscanss', 'OpenGL.arrays.numpymodule.NumpyHandler', ['sscanss.core.math.matrix.Matrix44'])
cls.window = MainWindow()
cls.toolbar = cls.window.findChild(QToolBar)
cls.model = cls.window.presenter.model
cls.window.show()
@classmethod
def tearDownClass(cls):
cls.window.undo_stack.setClean()
cls.window.close()
root_logger = config.logging.getLogger()
for _ in range(len(root_logger.handlers) - 1):
handler = root_logger.handlers[-1]
handler.close()
root_logger.removeHandler(handler)
config.logging.shutdown()
shutil.rmtree(cls.data_dir)
@classmethod
def triggerUndo(cls):
cls.window.undo_action.trigger()
QTest.qWait(WAIT_TIME//10)
@classmethod
def triggerRedo(cls):
cls.window.redo_action.trigger()
QTest.qWait(WAIT_TIME//10)
@staticmethod
def getDockedWidget(dock_manager, dock_flag):
if dock_flag == DockFlag.Bottom:
dock = dock_manager.bottom_dock
else:
dock = dock_manager.upper_dock
return dock.widget()
def testMainView(self):
self.createProject()
self.addSample()
self.assertFalse(self.window.gl_widget.show_bounding_box)
QTest.mouseClick(self.toolbar.widgetForAction(self.window.show_bounding_box_action), Qt.LeftButton)
self.assertTrue(self.window.gl_widget.show_bounding_box)
self.assertTrue(self.window.gl_widget.show_coordinate_frame)
self.window.show_coordinate_frame_action.trigger()
self.assertFalse(self.window.gl_widget.show_coordinate_frame)
camera = self.window.gl_widget.scene.camera
self.window.view_from_menu.actions()[0].trigger()
self.assertEqual(camera.mode, camera.Projection.Orthographic)
self.window.reset_camera_action.trigger()
self.assertEqual(camera.mode, camera.Projection.Perspective)
mouse_drag(self.window.gl_widget)
mouse_drag(self.window.gl_widget, button=Qt.RightButton)
mouse_wheel_scroll(self.window.gl_widget, delta=20)
mouse_wheel_scroll(self.window.gl_widget, delta=-10)
self.transformSample()
# render in transparent
QTest.mouseClick(self.toolbar.widgetForAction(self.window.blend_render_action), Qt.LeftButton)
self.assertEqual(self.window.scenes.sample_render_mode, Node.RenderMode.Transparent)
self.keyinFiducials()
self.keyinPoints()
# render in wireframe
QTest.mouseClick(self.toolbar.widgetForAction(self.window.line_render_action), Qt.LeftButton)
self.assertEqual(self.window.scenes.sample_render_mode, Node.RenderMode.Wireframe)
self.insertVectors()
self.jawControl()
self.pointPicking()
self.switchInstrument()
self.positionerControl()
self.detectorControl()
self.alignSample()
self.runSimulation()
def createProject(self):
self.window.showNewProjectDialog()
# Test project dialog validation
project_dialog = self.window.findChild(ProjectDialog)
self.assertTrue(project_dialog.isVisible())
self.assertEqual(project_dialog.validator_textbox.text(), '')
QTest.mouseClick(project_dialog.create_project_button, Qt.LeftButton)
self.assertNotEqual(project_dialog.validator_textbox.text(), '')
# Create new project
QTest.keyClicks(project_dialog.project_name_textbox, 'Test')
for _ in range(project_dialog.instrument_combobox.count()):
if project_dialog.instrument_combobox.currentText().strip().upper() == 'IMAT':
break
QTest.keyClick(project_dialog.instrument_combobox, Qt.Key_Down)
QTimer.singleShot(WAIT_TIME + 100, lambda: click_message_box(0))
QTest.mouseClick(project_dialog.create_project_button, Qt.LeftButton)
QTest.keyClick(project_dialog, Qt.Key_Escape) # should not close until the project is created
self.assertTrue(project_dialog.isVisible())
QTest.qWait(WAIT_TIME) # wait is necessary since instrument is created on another thread
self.assertEqual(self.model.project_data['name'], 'Test')
self.assertEqual(self.model.instrument.name, 'IMAT')
def addSample(self):
# Add sample
self.assertEqual(len(self.model.sample), 0)
self.window.docks.showInsertPrimitiveDialog(Primitives.Tube)
widget = self.getDockedWidget(self.window.docks, InsertPrimitiveDialog.dock_flag)
self.assertTrue(isinstance(widget, InsertPrimitiveDialog))
self.assertEqual(widget.primitive, Primitives.Tube)
self.assertTrue(widget.isVisible())
edit_line_edit_text(widget.textboxes['inner_radius'].form_lineedit, '10')
edit_line_edit_text(widget.textboxes['outer_radius'].form_lineedit, '10')
# equal inner radius and outer radius is an invalid tube so validation should trigger
self.assertFalse(widget.create_primitive_button.isEnabled())
# Adds '0' to '10' to make the radius '100'
QTest.keyClicks(widget.textboxes['outer_radius'].form_lineedit, '0')
self.assertTrue(widget.create_primitive_button.isEnabled())
QTest.mouseClick(widget.create_primitive_button, Qt.LeftButton)
self.assertEqual(len(self.model.sample), 1)
# Add a second sample
self.window.docks.showInsertPrimitiveDialog(Primitives.Tube)
widget_2 = self.getDockedWidget(self.window.docks, InsertPrimitiveDialog.dock_flag)
self.assertIs(widget, widget_2) # Since a Tube dialog is already open a new widget is not created
self.assertEqual(widget.primitive, Primitives.Tube)
self.window.docks.showInsertPrimitiveDialog(Primitives.Cuboid)
widget_2 = self.getDockedWidget(self.window.docks, InsertPrimitiveDialog.dock_flag)
self.assertIsNot(widget, widget_2)
QTimer.singleShot(100, lambda: click_message_box(0)) # click first button in message box
QTest.mouseClick(widget_2.create_primitive_button, Qt.LeftButton)
QTest.qWait(WAIT_TIME//20)
# Checks Sample Manager
widget = self.getDockedWidget(self.window.docks, SampleManager.dock_flag)
self.assertTrue(widget.isVisible())
self.assertEqual(list(self.model.sample.keys())[0], 'Tube')
QTest.mouseClick(widget.priority_button, Qt.LeftButton)
click_list_widget_item(widget.list_widget, 1)
QTest.mouseClick(widget.priority_button, Qt.LeftButton)
self.assertEqual(list(self.model.sample.keys())[0], 'Cuboid')
self.triggerUndo()
self.assertEqual(list(self.model.sample.keys())[0], 'Tube')
click_list_widget_item(widget.list_widget, 0)
QTest.mouseClick(widget.merge_button, Qt.LeftButton)
self.assertEqual(len(self.model.sample), 2)
click_list_widget_item(widget.list_widget, 1, Qt.ControlModifier)
QTest.mouseClick(widget.merge_button, Qt.LeftButton)
self.assertEqual(len(self.model.sample), 1)
self.triggerUndo()
self.assertEqual(len(self.model.sample), 2)
click_list_widget_item(widget.list_widget, 1)
QTest.mouseClick(widget.delete_button, Qt.LeftButton)
self.assertEqual(len(self.model.sample), 1)
self.triggerUndo()
self.assertEqual(len(self.model.sample), 2)
def transformSample(self):
# Transform Sample
sample = list(self.model.sample.items())[0][1]
np.testing.assert_array_almost_equal(sample.bounding_box.center, [0.0, 0.0, 0.0], decimal=5)
QTest.mouseClick(self.toolbar.widgetForAction(self.window.translate_sample_action), Qt.LeftButton)
widget = self.getDockedWidget(self.window.docks, TransformDialog.dock_flag)
QTest.keyClick(widget.tool.y_position.form_lineedit, Qt.Key_A, Qt.ControlModifier)
QTest.keyClick(widget.tool.y_position.form_lineedit, Qt.Key_Delete)
self.assertFalse(widget.tool.execute_button.isEnabled())
QTest.keyClicks(widget.tool.y_position.form_lineedit, '100')
self.assertTrue(widget.tool.execute_button.isEnabled())
QTest.mouseClick(widget.tool.execute_button, Qt.LeftButton)
sample = list(self.model.sample.items())[0][1]
np.testing.assert_array_almost_equal(sample.bounding_box.center, [0.0, 100.0, 0.0], decimal=5)
self.triggerUndo()
np.testing.assert_array_almost_equal(sample.bounding_box.center, [0.0, 0.0, 0.0], decimal=5)
self.triggerRedo()
np.testing.assert_array_almost_equal(sample.bounding_box.center, [0.0, 100.0, 0.0], decimal=5)
QTest.mouseClick(self.toolbar.widgetForAction(self.window.rotate_sample_action), Qt.LeftButton)
widget = self.getDockedWidget(self.window.docks, TransformDialog.dock_flag)
QTest.keyClick(widget.tool.z_rotation.form_lineedit, Qt.Key_A, Qt.ControlModifier)
QTest.keyClick(widget.tool.z_rotation.form_lineedit, Qt.Key_Delete)
self.assertFalse(widget.tool.execute_button.isEnabled())
QTest.keyClicks(widget.tool.z_rotation.form_lineedit, '90')
self.assertTrue(widget.tool.execute_button.isEnabled())
QTest.mouseClick(widget.tool.execute_button, Qt.LeftButton)
sample = list(self.model.sample.items())[0][1]
np.testing.assert_array_almost_equal(sample.bounding_box.center, [-100.0, 0.0, 0.0], decimal=5)
self.triggerUndo()
np.testing.assert_array_almost_equal(sample.bounding_box.center, [0.0, 100.0, 0.0], decimal=5)
self.triggerRedo()
np.testing.assert_array_almost_equal(sample.bounding_box.center, [-100.0, 0.0, 0.0], decimal=5)
QTest.mouseClick(self.toolbar.widgetForAction(self.window.transform_sample_action), Qt.LeftButton)
QTest.mouseClick(self.toolbar.widgetForAction(self.window.move_origin_action), Qt.LeftButton)
widget = self.getDockedWidget(self.window.docks, TransformDialog.dock_flag)
for i in range(widget.tool.move_combobox.count()):
QTest.keyClick(widget.tool.move_combobox, Qt.Key_Down)
for i in range(widget.tool.ignore_combobox.count()):
QTest.keyClick(widget.tool.ignore_combobox, Qt.Key_Down)
QTest.mouseClick(widget.tool.execute_button, Qt.LeftButton)
self.triggerUndo()
np.testing.assert_array_almost_equal(sample.bounding_box.center, [-100., 0.0, 0.0], decimal=5)
self.triggerRedo()
QTest.mouseClick(self.toolbar.widgetForAction(self.window.plane_align_action), Qt.LeftButton)
widget = self.getDockedWidget(self.window.docks, TransformDialog.dock_flag)
for i in range(widget.tool.plane_combobox.count()):
QTest.keyClick(widget.tool.plane_combobox, Qt.Key_Down)
QTest.mouseClick(widget.tool.execute_button, Qt.LeftButton)
QTest.mouseClick(widget.tool.pick_button, Qt.LeftButton)
QTest.mouseClick(self.window.gl_widget, Qt.LeftButton)
QTest.mouseClick(widget.tool.select_button, Qt.LeftButton)
def keyinFiducials(self):
# Add Fiducial Points
self.window.keyin_fiducial_action.trigger()
widget = self.getDockedWidget(self.window.docks, InsertPointDialog.dock_flag)
QTest.keyClick(widget.z_axis.form_lineedit, Qt.Key_A, Qt.ControlModifier)
QTest.keyClick(widget.z_axis.form_lineedit, Qt.Key_Delete)
self.assertFalse(widget.execute_button.isEnabled())
QTest.keyClicks(widget.z_axis.form_lineedit, '100')
self.assertTrue(widget.execute_button.isEnabled())
QTest.mouseClick(widget.execute_button, Qt.LeftButton)
QTest.keyClick(widget.x_axis.form_lineedit, Qt.Key_A, Qt.ControlModifier)
QTest.keyClicks(widget.x_axis.form_lineedit, '50')
QTest.mouseClick(widget.execute_button, Qt.LeftButton)
self.triggerUndo()
self.assertEqual(self.model.fiducials.size, 1)
self.triggerRedo()
self.assertEqual(self.model.fiducials.size, 2)
# Test Point Manager
widget = self.getDockedWidget(self.window.docks, PointManager.dock_flag)
self.assertTrue(widget.isVisible())
self.assertEqual(widget.point_type, PointType.Fiducial)
x_pos = widget.table_view.columnViewportPosition(0) + 5
y_pos = widget.table_view.rowViewportPosition(1) + 10
pos = QPoint(x_pos, y_pos)
QTest.mouseClick(widget.table_view.viewport(), Qt.LeftButton, pos=pos)
QTest.mouseClick(widget.move_up_button, Qt.LeftButton)
QTest.qWait(WAIT_TIME//20)
QTest.mouseClick(widget.move_down_button, Qt.LeftButton)
QTest.qWait(WAIT_TIME//20)
QTest.mouseDClick(widget.table_view.viewport(), Qt.LeftButton, pos=pos)
QTest.keyClicks(widget.table_view.viewport().focusWidget(), '100')
QTest.keyClick(widget.table_view.viewport().focusWidget(), Qt.Key_Enter)
QTest.qWait(WAIT_TIME//20)
np.testing.assert_array_almost_equal(self.model.fiducials[1].points, [100., 0., 100.], decimal=3)
self.triggerUndo()
np.testing.assert_array_almost_equal(self.model.fiducials[1].points, [50., 0., 100.], decimal=3)
QTest.qWait(WAIT_TIME//20)
QTest.mouseClick(widget.delete_button, Qt.LeftButton)
QTest.qWait(WAIT_TIME//20)
self.assertEqual(self.model.fiducials.size, 1)
self.triggerUndo()
self.assertEqual(self.model.fiducials.size, 2)
def keyinPoints(self):
# Add Measurement Points
self.window.keyin_measurement_action.trigger()
widget = self.getDockedWidget(self.window.docks, InsertPointDialog.dock_flag)
QTest.keyClick(widget.z_axis.form_lineedit, Qt.Key_A, Qt.ControlModifier)
QTest.keyClick(widget.z_axis.form_lineedit, Qt.Key_Delete)
self.assertFalse(widget.execute_button.isEnabled())
QTest.keyClicks(widget.z_axis.form_lineedit, '10')
self.assertTrue(widget.execute_button.isEnabled())
QTest.mouseClick(widget.execute_button, Qt.LeftButton)
QTest.keyClick(widget.x_axis.form_lineedit, Qt.Key_A, Qt.ControlModifier)
QTest.keyClicks(widget.x_axis.form_lineedit, '20')
QTest.mouseClick(widget.execute_button, Qt.LeftButton)
self.triggerUndo()
self.assertEqual(self.model.measurement_points.size, 1)
self.triggerRedo()
self.assertEqual(self.model.measurement_points.size, 2)
# Test Point Manager
widget = self.getDockedWidget(self.window.docks, PointManager.dock_flag)
self.assertTrue(widget.isVisible())
self.assertEqual(widget.point_type, PointType.Measurement)
def insertVectors(self):
# Add Vectors via the dialog
self.window.select_strain_component_action.trigger()
widget = self.getDockedWidget(self.window.docks, InsertVectorDialog.dock_flag)
detector_names = list(widget.parent_model.instrument.detectors.keys())
QTest.mouseClick(widget.execute_button, Qt.LeftButton)
QTest.keyClicks(widget.detector_combobox, detector_names[1][0], delay=50)
QTest.keyClick(widget.component_combobox, Qt.Key_Down)
click_check_box(widget.reverse_checkbox)
QTest.mouseClick(widget.execute_button, Qt.LeftButton)
QTest.qWait(WAIT_TIME//5) # wait is necessary since vectors are created on another thread
mv = widget.parent_model.measurement_vectors
self.assertEqual(mv.shape, (2, 6, 1))
np.testing.assert_array_almost_equal(mv[0, :, 0], [1, 0, 0, 0, -1, 0], decimal=5)
QTest.keyClicks(widget.alignment_combobox, 'a')
# QTest.mouseClick(widget.component_combobox, Qt.LeftButton, delay=100)
QTest.keyClick(widget.component_combobox, Qt.Key_Down)
QTest.mouseClick(widget.execute_button, Qt.LeftButton)
QTest.qWait(WAIT_TIME//5)
QTest.keyClicks(widget.component_combobox, 'k')
QTest.keyClicks(widget.detector_combobox, detector_names[0][0], delay=50)
edit_line_edit_text(widget.x_axis.form_lineedit, '1.0')
edit_line_edit_text(widget.y_axis.form_lineedit, '1.0')
QTest.mouseClick(widget.execute_button, Qt.LeftButton)
QTest.qWait(WAIT_TIME//5)
mv = widget.parent_model.measurement_vectors
self.assertEqual(mv.shape, (2, 6, 2))
np.testing.assert_array_almost_equal(mv[0, :, 1], [-0.70711, -0.70711, 0, 0, 0, -1.0], decimal=5)
# Test Vector Manager
widget = self.getDockedWidget(self.window.docks, VectorManager.dock_flag)
self.assertTrue(widget.isVisible())
def pointPicking(self):
# Add points graphically
self.window.pick_measurement_action.trigger()
widget = self.getDockedWidget(self.window.docks, PickPointDialog.dock_flag)
viewport = widget.view.viewport()
for i in range(widget.plane_combobox.count()):
QTest.keyClick(widget.plane_combobox, Qt.Key_Down)
mouse_drag(widget.plane_slider, QPoint(), QPoint(10, 0))
QTest.keyClick(widget.plane_lineedit, Qt.Key_A, Qt.ControlModifier)
QTest.keyClick(widget.plane_lineedit, Qt.Key_Delete)
QTest.keyClicks(widget.plane_lineedit, '-10')
QTest.keyClick(widget.plane_lineedit, Qt.Key_Enter)
widget.tabs.setCurrentIndex(2)
click_check_box(widget.show_grid_checkbox)
self.assertTrue(widget.view.show_grid)
click_check_box(widget.snap_to_grid_checkbox)
self.assertTrue(widget.view.snap_to_grid)
self.assertTrue(widget.grid_widget.isVisible())
combo = widget.grid_widget.findChild(QComboBox)
current_index = combo.currentIndex()
new_index = (current_index + 1) % combo.count()
grid_type = widget.view.grid.type
combo.setCurrentIndex(new_index)
QTest.qWait(WAIT_TIME//100) # Delay allow the grid to render
self.assertNotEqual(grid_type, widget.view.grid.type)
combo.setCurrentIndex(current_index)
QTest.qWait(WAIT_TIME//100) # Delay allow the grid to render
self.assertEqual(grid_type, widget.view.grid.type)
widget.tabs.setCurrentIndex(1)
QTest.mouseClick(widget.point_selector, Qt.LeftButton)
QTest.mouseClick(widget.execute_button, Qt.LeftButton)
self.assertEqual(self.model.measurement_points.size, 2)
QTest.mouseClick(viewport, Qt.LeftButton)
QTest.mouseClick(widget.execute_button, Qt.LeftButton)
self.assertEqual(self.model.measurement_points.size, 3)
widget.tabs.setCurrentIndex(1)
QTest.mouseClick(widget.line_selector, Qt.LeftButton)
self.assertTrue(widget.line_tool_widget.isVisible())
widget.line_point_count_spinbox.setValue(widget.scene.line_tool_size + 1)
expected_count = len(widget.scene.items()) + widget.scene.line_tool_size
mouse_drag(viewport)
self.assertEqual(len(widget.scene.items()), expected_count)
QTest.mouseClick(widget.area_selector, Qt.LeftButton)
self.assertFalse(widget.line_tool_widget.isVisible())
self.assertTrue(widget.area_tool_widget.isVisible())
widget.area_x_spinbox.setValue(widget.scene.area_tool_size[0] + 1)
widget.area_y_spinbox.setValue(widget.scene.area_tool_size[1] + 2)
expected_count = len(widget.scene.items()) + (widget.scene.area_tool_size[0] * widget.scene.area_tool_size[1])
mouse_drag(viewport)
self.assertEqual(len(widget.scene.items()), expected_count)
QTest.mouseClick(widget.object_selector, Qt.LeftButton)
self.assertFalse(widget.line_tool_widget.isVisible())
self.assertFalse(widget.area_tool_widget.isVisible())
mouse_drag(viewport)
selected_count = len(widget.scene.selectedItems())
QTest.keyClick(viewport, Qt.Key_Delete)
self.assertEqual(len(widget.scene.items()), expected_count - selected_count)
self.assertFalse(widget.view.has_foreground)
QTest.mouseClick(widget.help_button, Qt.LeftButton)
QTest.qWait(WAIT_TIME//100) # Delay allow the grid to render
self.assertTrue(widget.view.has_foreground and not widget.view.show_help)
self.assertTrue(widget.view.scene_transform.isIdentity())
mouse_drag(viewport, button=Qt.MiddleButton)
self.assertTrue(widget.view.scene_transform.isTranslating())
self.assertFalse(widget.view.scene_transform.isRotating())
mouse_drag(viewport, button=Qt.RightButton)
# QTransform type is always True for translation when rotation is True
self.assertTrue(widget.view.scene_transform.isTranslating())
self.assertTrue(widget.view.scene_transform.isRotating())
widget.view.resetTransform()
self.assertTrue(widget.view.transform().isIdentity())
self.assertFalse(widget.view.transform().isScaling())
mouse_wheel_scroll(viewport)
self.assertTrue(widget.view.transform().isScaling())
mouse_wheel_scroll(viewport, delta=-10)
self.assertTrue(widget.view.transform().isIdentity())
QTest.mouseClick(widget.reset_button, Qt.LeftButton)
self.assertTrue(widget.view.scene_transform.isIdentity())
def switchInstrument(self):
# switch instruments
self.assertNotEqual(self.window.undo_stack.count(), 0)
QTimer.singleShot(200, lambda: click_message_box(0)) # click first button in message box
self.window.presenter.changeInstrument('ENGIN-X')
QTest.qWait(WAIT_TIME)
self.assertEqual(self.window.undo_stack.count(), 0)
self.assertEqual(self.model.project_data['name'], 'Test')
self.assertEqual(self.model.instrument.name, 'ENGIN-X')
self.assertIs(self.window.scenes.active_scene, self.window.scenes.sample_scene)
QTest.mouseClick(self.toolbar.widgetForAction(self.window.toggle_scene_action), Qt.LeftButton)
self.assertIs(self.window.scenes.active_scene, self.window.scenes.instrument_scene)
QTest.mouseClick(self.toolbar.widgetForAction(self.window.toggle_scene_action), Qt.LeftButton)
self.assertIs(self.window.scenes.active_scene, self.window.scenes.sample_scene)
def jawControl(self):
# Test incident jaws Dialog and change jaw position
self.window.docks.showJawControl()
widget = self.getDockedWidget(self.window.docks, JawControl.dock_flag)
jaw_form = widget.position_form_group.form_controls[0]
jaw = self.model.instrument.jaws.positioner
new_value = jaw.links[0].lower_limit + (jaw.links[0].offset - jaw.links[0].lower_limit) / 2
edit_line_edit_text(jaw_form.form_lineedit, f'{new_value}')
QTest.mouseClick(widget.move_jaws_button, Qt.LeftButton)
set_point = self.model.instrument.jaws.positioner.set_points[0]
self.assertAlmostEqual(set_point, new_value, 3)
edit_line_edit_text(jaw_form.form_lineedit, f'{jaw.links[0].lower_limit - 1}')
self.assertFalse(jaw_form.valid)
self.assertFalse(widget.move_jaws_button.isEnabled())
QTest.mouseClick(jaw_form.extra[0], Qt.LeftButton)
self.assertTrue(jaw_form.valid)
self.assertTrue(widget.move_jaws_button.isEnabled())
self.triggerUndo()
self.assertFalse(jaw_form.valid)
self.assertFalse(widget.move_jaws_button.isEnabled())
# Change aperture of the jaw
aperture_form = widget.aperture_form_group.form_controls
edit_line_edit_text(aperture_form[0].form_lineedit, '5.000')
edit_line_edit_text(aperture_form[1].form_lineedit, '6.000')
old_aperture = self.model.instrument.jaws.aperture
QTest.mouseClick(widget.change_aperture_button, Qt.LeftButton)
aperture = self.model.instrument.jaws.aperture
np.testing.assert_array_almost_equal(aperture, (5.000, 6.000), decimal=3)
self.triggerUndo()
aperture = self.model.instrument.jaws.aperture
np.testing.assert_array_almost_equal(aperture, old_aperture, decimal=3)
def positionerControl(self):
# Test Positioner Dialog
self.window.docks.showPositionerControl()
widget = self.getDockedWidget(self.window.docks, PositionerControl.dock_flag)
positioner_name = self.model.instrument.positioning_stack.name
# QTest.mouseClick(widget.stack_combobox, Qt.LeftButton, delay=100)
QTest.keyClick(widget.stack_combobox, Qt.Key_Down)
self.assertNotEqual(self.model.instrument.positioning_stack.name, positioner_name)
self.triggerUndo()
self.assertEqual(self.model.instrument.positioning_stack.name, positioner_name)
form = widget.positioner_form_controls[0]
stack = self.model.instrument.positioning_stack
index = stack.order[0]
new_value = stack.links[index].upper_limit - (stack.links[index].upper_limit - stack.links[index].offset) / 2
edit_line_edit_text(form.form_lineedit, f'{new_value}')
form = widget.positioner_form_controls[1]
QTest.mouseClick(form.extra[0], Qt.LeftButton)
self.triggerUndo()
QTest.mouseClick(form.extra[1], Qt.LeftButton)
self.triggerUndo()
old_set_point = stack.toUserFormat(stack.set_points)[0]
self.window.scenes.switchToSampleScene()
QTest.mouseClick(widget.move_joints_button, Qt.LeftButton)
set_point = stack.toUserFormat(stack.set_points)[0]
self.assertAlmostEqual(set_point, new_value, 3)
self.triggerUndo()
set_point = stack.toUserFormat(stack.set_points)[0]
self.assertAlmostEqual(old_set_point, set_point, 3)
self.triggerRedo()
set_point = stack.toUserFormat(stack.set_points)[0]
self.assertAlmostEqual(new_value, set_point, 3)
def detectorControl(self):
# Test Detector Widget
detector_name = list(self.model.instrument.detectors.keys())[0]
self.window.docks.showDetectorControl(detector_name)
widget = self.getDockedWidget(self.window.docks, DetectorControl.dock_flag)
widget.hide()
detector = self.model.instrument.detectors[detector_name]
old_collimator = detector.current_collimator
self.window.presenter.changeCollimators(detector_name, None)
self.assertIs(detector.current_collimator, None)
self.triggerUndo()
self.assertEqual(detector.current_collimator, old_collimator)
def alignSample(self):
# Test Sample Alignment
self.window.docks.showAlignSample()
widget = self.getDockedWidget(self.window.docks, DetectorControl.dock_flag)
self.assertIsNone(self.model.alignment)
edit_line_edit_text(widget.x_position.form_lineedit, '5.000')
edit_line_edit_text(widget.y_position.form_lineedit, '6.000')
edit_line_edit_text(widget.z_position.form_lineedit, '9.000')
QTest.mouseClick(widget.execute_button, Qt.LeftButton, delay=100)
self.assertIsNotNone(self.model.alignment)
edit_line_edit_text(widget.x_rotation.form_lineedit, '20.000')
edit_line_edit_text(widget.y_rotation.form_lineedit, '90.000')
edit_line_edit_text(widget.z_rotation.form_lineedit, '-50.000')
QTest.mouseClick(widget.execute_button, Qt.LeftButton, delay=100)
self.assertIsNotNone(self.model.alignment)
self.triggerUndo()
self.assertIsNone(self.model.alignment)
self.triggerRedo()
self.assertIsNotNone(self.model.alignment)
def runSimulation(self):
self.model.alignment = self.model.alignment.identity()
self.window.check_collision_action.setChecked(True)
self.window.check_limits_action.setChecked(False)
self.window.compute_path_length_action.setChecked(True)
Simulation.execute = wrapped
self.window.run_simulation_action.trigger()
self.assertIsNotNone(self.model.simulation)
QTest.qWait(WAIT_TIME//5)
self.assertTrue(self.model.simulation.isRunning())
QTest.qWait(WAIT_TIME * 5)
self.assertFalse(self.model.simulation.isRunning())
self.assertEqual(len(self.model.simulation.results), 6)
widget = self.getDockedWidget(self.window.docks, SimulationDialog.dock_flag)
self.assertEqual(len(widget.result_list.panes), 6)
self.assertEqual(widget.result_counts[widget.ResultKey.Good], 2)
self.assertEqual(widget.result_counts[widget.ResultKey.Warn], 4)
self.assertEqual(widget.result_counts[widget.ResultKey.Fail], 0)
self.assertEqual(widget.result_counts[widget.ResultKey.Skip], 0)
QTest.mouseClick(widget.filter_button_group.button(2), Qt.LeftButton)
self.assertEqual([pane.isHidden() for pane in widget.result_list.panes].count(True), 0)
QTest.mouseClick(widget.filter_button_group.button(0), Qt.LeftButton)
self.assertEqual([pane.isHidden() for pane in widget.result_list.panes].count(True), 2)
QTest.mouseClick(widget.filter_button_group.button(1), Qt.LeftButton)
self.assertEqual([pane.isHidden() for pane in widget.result_list.panes].count(True), 6)
QTest.mouseClick(widget.filter_button_group.button(0), Qt.LeftButton)
self.assertEqual([pane.isHidden() for pane in widget.result_list.panes].count(True), 4)
QTest.mouseClick(widget.path_length_button, Qt.LeftButton)
path_length_plotter = self.window.findChild(PathLengthPlotter)
self.assertTrue(path_length_plotter.isVisible())
path_length_plotter.close()
self.assertFalse(path_length_plotter.isVisible())
QTest.mouseClick(widget.export_button, Qt.LeftButton)
script_exporter = self.window.findChild(ScriptExportDialog)
self.assertTrue(script_exporter.isVisible())
script_exporter.close()
self.assertFalse(script_exporter.isVisible())
self.window.fiducial_manager_action.trigger()
widget = self.getDockedWidget(self.window.docks, PointManager.dock_flag)
self.assertEqual(widget.point_type, PointType.Fiducial)
widget = self.getDockedWidget(self.window.docks, SimulationDialog.dock_flag)
self.window.simulation_dialog_action.trigger()
self.assertFalse(widget.simulation.isRunning())
self.assertEqual(len(widget.result_list.panes), 6)
def testOtherWindows(self):
self.window.show_about_action.trigger()
self.assertTrue(self.window.about_dialog.isVisible())
QTest.keyClick(self.window.about_dialog, Qt.Key_Escape)
self.assertFalse(self.window.about_dialog.isVisible())
# Test the Recent project menu
self.window.recent_projects = []
self.assertTrue(self.window.recent_menu.isEmpty())
self.window.populateRecentMenu()
self.assertEqual(len(self.window.recent_menu.actions()), 1)
self.assertEqual(self.window.recent_menu.actions()[0].text(), 'None')
self.window.recent_projects = ['c://test.hdf', 'c://test2.hdf', 'c://test3.hdf', 'c://test4.hdf',
'c://test5.hdf', 'c://test6.hdf', 'c://test7.hdf', 'c://test8.hdf']
self.window.populateRecentMenu()
self.assertEqual(len(self.window.recent_menu.actions()), 8)
self.window.undo_stack.setClean()
self.window.showNewProjectDialog()
project_dialog = self.window.findChild(ProjectDialog)
self.assertTrue(project_dialog.isVisible())
self.assertEqual(project_dialog.list_widget.count(), 6)
QTest.keyClick(project_dialog, Qt.Key_Escape)
self.assertFalse(project_dialog.isVisible())
self.window.undo_view_action.trigger()
self.assertTrue(self.window.undo_view.isVisible())
self.window.undo_view.close()
self.assertFalse(self.window.undo_view.isVisible())
self.window.progress_dialog.showMessage('Testing')
self.assertTrue(self.window.progress_dialog.isVisible())
QTest.keyClick(project_dialog, Qt.Key_Escape)
self.assertTrue(self.window.progress_dialog.isVisible())
self.window.progress_dialog.close()
self.assertFalse(self.window.progress_dialog.isVisible())
indices = np.array([0, 1, 2, 3])
enabled = np.array([True, True, True, True])
points = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]])
transform_result = rigid_transform(points, points)
end_q = [0.0] * 4
order_fix = [3, 2, 1, 0]
self.window.showAlignmentError(indices, enabled, points, transform_result, end_q, order_fix)
alignment_error = self.window.findChild(AlignmentErrorDialog)
self.assertTrue(alignment_error.isVisible())
alignment_error.close()
self.assertFalse(alignment_error.isVisible())
pose_id = np.array([1, 2, 3])
fiducial_id = np.array([3, 2, 1])
error = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
QTimer.singleShot(WAIT_TIME//5, lambda: self.window.findChild(CalibrationErrorDialog).accept())
self.assertTrue(self.window.showCalibrationError(pose_id, fiducial_id, error))
def testSettings(self):
log_filename = 'main.logs'
config.setup_logging(log_filename)
self.assertTrue((config.LOG_PATH / log_filename).exists())
self.assertTrue(config.settings.value(config.Key.Align_First))
config.settings.setValue(config.Key.Align_First, False, True)
self.assertFalse(config.settings.value(config.Key.Align_First))
config.settings.setValue(config.Key.Align_First, 'true', True)
self.assertTrue(config.settings.value(config.Key.Align_First))
config.settings.setValue(config.Key.Align_First, -2, True)
self.assertTrue(config.settings.value(config.Key.Align_First))
item = config.__defaults__[config.Key.Local_Max_Eval]
self.assertEqual(config.settings.value(config.Key.Local_Max_Eval), item.default)
config.settings.setValue(config.Key.Local_Max_Eval, item.limits[1] + 1, True)
self.assertEqual(config.settings.value(config.Key.Local_Max_Eval), item.default)
config.settings.setValue(config.Key.Local_Max_Eval, item.limits[0] - 1, True)
self.assertEqual(config.settings.value(config.Key.Local_Max_Eval), item.default)
config.settings.setValue(config.Key.Local_Max_Eval, item.limits[1] - 1, True)
self.assertEqual(config.settings.value(config.Key.Local_Max_Eval), item.limits[1] - 1)
item = config.__defaults__[config.Key.Angular_Stop_Val]
self.assertEqual(config.settings.value(config.Key.Angular_Stop_Val), item.default)
config.settings.setValue(config.Key.Angular_Stop_Val, item.limits[1] + 1, True)
self.assertEqual(config.settings.value(config.Key.Angular_Stop_Val), item.default)
config.settings.setValue(config.Key.Angular_Stop_Val, item.limits[0] - 1, True)
self.assertEqual(config.settings.value(config.Key.Angular_Stop_Val), item.default)
config.settings.setValue(config.Key.Angular_Stop_Val, item.limits[1] - 1, True)
self.assertEqual(config.settings.value(config.Key.Angular_Stop_Val), item.limits[1] - 1)
item = config.__defaults__[config.Key.Fiducial_Colour]
self.assertEqual(config.settings.value(config.Key.Fiducial_Colour), item.default)
config.settings.setValue(config.Key.Fiducial_Colour, (2, 3, 4, 5), True)
self.assertEqual(config.settings.value(config.Key.Fiducial_Colour), item.default)
config.settings.setValue(config.Key.Fiducial_Colour, (2, 3, 4), True)
self.assertEqual(config.settings.value(config.Key.Fiducial_Colour), item.default)
config.settings.setValue(config.Key.Fiducial_Colour, ("h", "1.0", "1.0", "1.0"), True)
self.assertEqual(config.settings.value(config.Key.Fiducial_Colour), item.default)
config.settings.setValue(config.Key.Fiducial_Colour, ("1.0", "1.0", "1.0", "1.0"), True)
self.assertEqual(config.settings.value(config.Key.Fiducial_Colour), (1, 1, 1, 1))
config.settings.setValue(config.Key.Fiducial_Colour, (2, 3, 4, 5), True)
self.assertEqual(config.settings.value(config.Key.Fiducial_Colour), item.default)
item = config.__defaults__[config.Key.Geometry]
self.assertEqual(config.settings.value(config.Key.Geometry), item.default)
config.settings.setValue(config.Key.Geometry, '12345', True)
self.assertEqual(config.settings.value(config.Key.Geometry), item.default)
config.settings.setValue(config.Key.Geometry, bytearray(b'12345'), True)
self.assertEqual(config.settings.value(config.Key.Geometry), bytearray(b'12345'))
item = config.__defaults__[config.Key.Recent_Projects]
self.assertEqual(config.settings.value(config.Key.Recent_Projects), item.default)
config.settings.setValue(config.Key.Recent_Projects, 'name', True)
self.assertEqual(config.settings.value(config.Key.Recent_Projects), ['name'])
config.settings.setValue(config.Key.Recent_Projects, ['name', 'other'], True)
self.assertEqual(config.settings.value(config.Key.Recent_Projects), ['name', 'other'])
config.settings.system.sync()
self.assertTrue(self.ini_file.samefile(config.settings.filename()))
config.settings.setValue(config.Key.Align_First, False, True)
config.settings.reset()
self.assertFalse(config.settings.value(config.Key.Align_First))
config.settings.reset(True)
self.assertTrue(config.settings.value(config.Key.Align_First))
config.settings.setValue(config.Key.Align_First, False)
self.assertNotEqual(config.settings.value(config.Key.Align_First),
config.settings.system.value(config.Key.Align_First.value))
self.window.showPreferences()
preferences = self.window.findChild(Preferences)
self.assertTrue(preferences.isVisible())
comboboxes = preferences.findChildren(QComboBox)
combo = comboboxes[0]
current_index = combo.currentIndex()
new_index = (current_index + 1) % combo.count()
combo.setCurrentIndex(new_index)
self.assertTrue(preferences.accept_button.isEnabled())
combo.setCurrentIndex(current_index)
self.assertFalse(preferences.accept_button.isEnabled())
combo.setCurrentIndex(new_index)
stored_key, old_value = combo.property(preferences.prop_name)
self.assertEqual(config.settings.value(stored_key), old_value)
QTest.mouseClick(preferences.accept_button, Qt.LeftButton, delay=100)
self.assertNotEqual(config.settings.value(stored_key), old_value)
self.assertFalse(preferences.isVisible())
QTest.qWait(WAIT_TIME//50)
self.window.showPreferences()
preferences = self.window.findChild(Preferences)
self.assertTrue(preferences.isVisible())
QTest.mouseClick(preferences.reset_button, Qt.LeftButton, delay=100)
self.assertFalse(preferences.isVisible())
QTest.qWait(WAIT_TIME // 50)
self.window.presenter.model.project_data = {}
self.window.showPreferences()
preferences = self.window.findChild(Preferences)
self.assertTrue(preferences.isVisible())
QTest.mouseClick(preferences.cancel_button, Qt.LeftButton, delay=100)
self.assertFalse(preferences.isVisible())
QTest.qWait(WAIT_TIME // 50)
| [
"tests.helpers.edit_line_edit_text",
"tests.helpers.click_list_widget_item",
"PyQt5.QtTest.QTest.mouseClick",
"sscanss.config.settings.value",
"shutil.rmtree",
"numpy.testing.assert_array_almost_equal",
"sscanss.config.setup_logging",
"sscanss.config.settings.setValue",
"tests.helpers.click_check_bo... | [((1228, 1263), 'logging.disable', 'logging.disable', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1243, 1263), False, 'import logging\n'), ((1611, 1721), 'OpenGL.plugins.FormatHandler', 'FormatHandler', (['"""sscanss"""', '"""OpenGL.arrays.numpymodule.NumpyHandler"""', "['sscanss.core.math.matrix.Matrix44']"], {}), "('sscanss', 'OpenGL.arrays.numpymodule.NumpyHandler', [\n 'sscanss.core.math.matrix.Matrix44'])\n", (1624, 1721), False, 'from OpenGL.plugins import FormatHandler\n'), ((1739, 1751), 'sscanss.app.window.view.MainWindow', 'MainWindow', ([], {}), '()\n', (1749, 1751), False, 'from sscanss.app.window.view import MainWindow\n'), ((2014, 2040), 'sscanss.config.logging.getLogger', 'config.logging.getLogger', ([], {}), '()\n', (2038, 2040), True, 'import sscanss.config as config\n'), ((2226, 2251), 'sscanss.config.logging.shutdown', 'config.logging.shutdown', ([], {}), '()\n', (2249, 2251), True, 'import sscanss.config as config\n'), ((2260, 2287), 'shutil.rmtree', 'shutil.rmtree', (['cls.data_dir'], {}), '(cls.data_dir)\n', (2273, 2287), False, 'import shutil\n'), ((2381, 2409), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 10)'], {}), '(WAIT_TIME // 10)\n', (2392, 2409), False, 'from PyQt5.QtTest import QTest\n'), ((2501, 2529), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 10)'], {}), '(WAIT_TIME // 10)\n', (2512, 2529), False, 'from PyQt5.QtTest import QTest\n'), ((3597, 3630), 'tests.helpers.mouse_drag', 'mouse_drag', (['self.window.gl_widget'], {}), '(self.window.gl_widget)\n', (3607, 3630), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((3639, 3695), 'tests.helpers.mouse_drag', 'mouse_drag', (['self.window.gl_widget'], {'button': 'Qt.RightButton'}), '(self.window.gl_widget, button=Qt.RightButton)\n', (3649, 3695), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((3704, 3755), 'tests.helpers.mouse_wheel_scroll', 'mouse_wheel_scroll', (['self.window.gl_widget'], {'delta': '(20)'}), '(self.window.gl_widget, delta=20)\n', (3722, 3755), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((3764, 3816), 'tests.helpers.mouse_wheel_scroll', 'mouse_wheel_scroll', (['self.window.gl_widget'], {'delta': '(-10)'}), '(self.window.gl_widget, delta=-10)\n', (3782, 3816), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((4903, 4972), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['project_dialog.create_project_button', 'Qt.LeftButton'], {}), '(project_dialog.create_project_button, Qt.LeftButton)\n', (4919, 4972), False, 'from PyQt5.QtTest import QTest\n'), ((5083, 5143), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (['project_dialog.project_name_textbox', '"""Test"""'], {}), "(project_dialog.project_name_textbox, 'Test')\n", (5098, 5143), False, 'from PyQt5.QtTest import QTest\n'), ((5482, 5551), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['project_dialog.create_project_button', 'Qt.LeftButton'], {}), '(project_dialog.create_project_button, Qt.LeftButton)\n', (5498, 5551), False, 'from PyQt5.QtTest import QTest\n'), ((5561, 5606), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['project_dialog', 'Qt.Key_Escape'], {}), '(project_dialog, Qt.Key_Escape)\n', (5575, 5606), False, 'from PyQt5.QtTest import QTest\n'), ((5716, 5738), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['WAIT_TIME'], {}), '(WAIT_TIME)\n', (5727, 5738), False, 'from PyQt5.QtTest import QTest\n'), ((6370, 6443), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (["widget.textboxes['inner_radius'].form_lineedit", '"""10"""'], {}), "(widget.textboxes['inner_radius'].form_lineedit, '10')\n", (6389, 6443), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((6452, 6525), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (["widget.textboxes['outer_radius'].form_lineedit", '"""10"""'], {}), "(widget.textboxes['outer_radius'].form_lineedit, '10')\n", (6471, 6525), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((6749, 6817), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (["widget.textboxes['outer_radius'].form_lineedit", '"""0"""'], {}), "(widget.textboxes['outer_radius'].form_lineedit, '0')\n", (6764, 6817), False, 'from PyQt5.QtTest import QTest\n'), ((6894, 6957), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.create_primitive_button', 'Qt.LeftButton'], {}), '(widget.create_primitive_button, Qt.LeftButton)\n', (6910, 6957), False, 'from PyQt5.QtTest import QTest\n'), ((7681, 7746), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget_2.create_primitive_button', 'Qt.LeftButton'], {}), '(widget_2.create_primitive_button, Qt.LeftButton)\n', (7697, 7746), False, 'from PyQt5.QtTest import QTest\n'), ((7755, 7783), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 20)'], {}), '(WAIT_TIME // 20)\n', (7766, 7783), False, 'from PyQt5.QtTest import QTest\n'), ((8017, 8072), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.priority_button', 'Qt.LeftButton'], {}), '(widget.priority_button, Qt.LeftButton)\n', (8033, 8072), False, 'from PyQt5.QtTest import QTest\n'), ((8081, 8126), 'tests.helpers.click_list_widget_item', 'click_list_widget_item', (['widget.list_widget', '(1)'], {}), '(widget.list_widget, 1)\n', (8103, 8126), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((8135, 8190), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.priority_button', 'Qt.LeftButton'], {}), '(widget.priority_button, Qt.LeftButton)\n', (8151, 8190), False, 'from PyQt5.QtTest import QTest\n'), ((8365, 8410), 'tests.helpers.click_list_widget_item', 'click_list_widget_item', (['widget.list_widget', '(0)'], {}), '(widget.list_widget, 0)\n', (8387, 8410), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((8419, 8471), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.merge_button', 'Qt.LeftButton'], {}), '(widget.merge_button, Qt.LeftButton)\n', (8435, 8471), False, 'from PyQt5.QtTest import QTest\n'), ((8532, 8597), 'tests.helpers.click_list_widget_item', 'click_list_widget_item', (['widget.list_widget', '(1)', 'Qt.ControlModifier'], {}), '(widget.list_widget, 1, Qt.ControlModifier)\n', (8554, 8597), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((8606, 8658), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.merge_button', 'Qt.LeftButton'], {}), '(widget.merge_button, Qt.LeftButton)\n', (8622, 8658), False, 'from PyQt5.QtTest import QTest\n'), ((8799, 8844), 'tests.helpers.click_list_widget_item', 'click_list_widget_item', (['widget.list_widget', '(1)'], {}), '(widget.list_widget, 1)\n', (8821, 8844), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((8853, 8906), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.delete_button', 'Qt.LeftButton'], {}), '(widget.delete_button, Qt.LeftButton)\n', (8869, 8906), False, 'from PyQt5.QtTest import QTest\n'), ((9160, 9256), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['sample.bounding_box.center', '[0.0, 0.0, 0.0]'], {'decimal': '(5)'}), '(sample.bounding_box.center, [0.0, 0.0,\n 0.0], decimal=5)\n', (9196, 9256), True, 'import numpy as np\n'), ((9453, 9540), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.tool.y_position.form_lineedit', 'Qt.Key_A', 'Qt.ControlModifier'], {}), '(widget.tool.y_position.form_lineedit, Qt.Key_A, Qt.\n ControlModifier)\n', (9467, 9540), False, 'from PyQt5.QtTest import QTest\n'), ((9544, 9611), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.tool.y_position.form_lineedit', 'Qt.Key_Delete'], {}), '(widget.tool.y_position.form_lineedit, Qt.Key_Delete)\n', (9558, 9611), False, 'from PyQt5.QtTest import QTest\n'), ((9686, 9746), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (['widget.tool.y_position.form_lineedit', '"""100"""'], {}), "(widget.tool.y_position.form_lineedit, '100')\n", (9701, 9746), False, 'from PyQt5.QtTest import QTest\n'), ((9819, 9878), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.tool.execute_button', 'Qt.LeftButton'], {}), '(widget.tool.execute_button, Qt.LeftButton)\n', (9835, 9878), False, 'from PyQt5.QtTest import QTest\n'), ((9942, 10041), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['sample.bounding_box.center', '[0.0, 100.0, 0.0]'], {'decimal': '(5)'}), '(sample.bounding_box.center, [0.0, \n 100.0, 0.0], decimal=5)\n', (9978, 10041), True, 'import numpy as np\n'), ((10072, 10168), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['sample.bounding_box.center', '[0.0, 0.0, 0.0]'], {'decimal': '(5)'}), '(sample.bounding_box.center, [0.0, 0.0,\n 0.0], decimal=5)\n', (10108, 10168), True, 'import numpy as np\n'), ((10200, 10299), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['sample.bounding_box.center', '[0.0, 100.0, 0.0]'], {'decimal': '(5)'}), '(sample.bounding_box.center, [0.0, \n 100.0, 0.0], decimal=5)\n', (10236, 10299), True, 'import numpy as np\n'), ((10492, 10579), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.tool.z_rotation.form_lineedit', 'Qt.Key_A', 'Qt.ControlModifier'], {}), '(widget.tool.z_rotation.form_lineedit, Qt.Key_A, Qt.\n ControlModifier)\n', (10506, 10579), False, 'from PyQt5.QtTest import QTest\n'), ((10583, 10650), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.tool.z_rotation.form_lineedit', 'Qt.Key_Delete'], {}), '(widget.tool.z_rotation.form_lineedit, Qt.Key_Delete)\n', (10597, 10650), False, 'from PyQt5.QtTest import QTest\n'), ((10725, 10784), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (['widget.tool.z_rotation.form_lineedit', '"""90"""'], {}), "(widget.tool.z_rotation.form_lineedit, '90')\n", (10740, 10784), False, 'from PyQt5.QtTest import QTest\n'), ((10857, 10916), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.tool.execute_button', 'Qt.LeftButton'], {}), '(widget.tool.execute_button, Qt.LeftButton)\n', (10873, 10916), False, 'from PyQt5.QtTest import QTest\n'), ((10980, 11080), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['sample.bounding_box.center', '[-100.0, 0.0, 0.0]'], {'decimal': '(5)'}), '(sample.bounding_box.center, [-100.0, \n 0.0, 0.0], decimal=5)\n', (11016, 11080), True, 'import numpy as np\n'), ((11111, 11210), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['sample.bounding_box.center', '[0.0, 100.0, 0.0]'], {'decimal': '(5)'}), '(sample.bounding_box.center, [0.0, \n 100.0, 0.0], decimal=5)\n', (11147, 11210), True, 'import numpy as np\n'), ((11241, 11341), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['sample.bounding_box.center', '[-100.0, 0.0, 0.0]'], {'decimal': '(5)'}), '(sample.bounding_box.center, [-100.0, \n 0.0, 0.0], decimal=5)\n', (11277, 11341), True, 'import numpy as np\n'), ((11898, 11957), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.tool.execute_button', 'Qt.LeftButton'], {}), '(widget.tool.execute_button, Qt.LeftButton)\n', (11914, 11957), False, 'from PyQt5.QtTest import QTest\n'), ((11993, 12093), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['sample.bounding_box.center', '[-100.0, 0.0, 0.0]'], {'decimal': '(5)'}), '(sample.bounding_box.center, [-100.0, \n 0.0, 0.0], decimal=5)\n', (12029, 12093), True, 'import numpy as np\n'), ((12439, 12498), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.tool.execute_button', 'Qt.LeftButton'], {}), '(widget.tool.execute_button, Qt.LeftButton)\n', (12455, 12498), False, 'from PyQt5.QtTest import QTest\n'), ((12508, 12564), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.tool.pick_button', 'Qt.LeftButton'], {}), '(widget.tool.pick_button, Qt.LeftButton)\n', (12524, 12564), False, 'from PyQt5.QtTest import QTest\n'), ((12573, 12627), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['self.window.gl_widget', 'Qt.LeftButton'], {}), '(self.window.gl_widget, Qt.LeftButton)\n', (12589, 12627), False, 'from PyQt5.QtTest import QTest\n'), ((12636, 12694), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.tool.select_button', 'Qt.LeftButton'], {}), '(widget.tool.select_button, Qt.LeftButton)\n', (12652, 12694), False, 'from PyQt5.QtTest import QTest\n'), ((12902, 12975), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.z_axis.form_lineedit', 'Qt.Key_A', 'Qt.ControlModifier'], {}), '(widget.z_axis.form_lineedit, Qt.Key_A, Qt.ControlModifier)\n', (12916, 12975), False, 'from PyQt5.QtTest import QTest\n'), ((12984, 13042), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.z_axis.form_lineedit', 'Qt.Key_Delete'], {}), '(widget.z_axis.form_lineedit, Qt.Key_Delete)\n', (12998, 13042), False, 'from PyQt5.QtTest import QTest\n'), ((13112, 13163), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (['widget.z_axis.form_lineedit', '"""100"""'], {}), "(widget.z_axis.form_lineedit, '100')\n", (13127, 13163), False, 'from PyQt5.QtTest import QTest\n'), ((13231, 13285), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.execute_button', 'Qt.LeftButton'], {}), '(widget.execute_button, Qt.LeftButton)\n', (13247, 13285), False, 'from PyQt5.QtTest import QTest\n'), ((13294, 13367), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.x_axis.form_lineedit', 'Qt.Key_A', 'Qt.ControlModifier'], {}), '(widget.x_axis.form_lineedit, Qt.Key_A, Qt.ControlModifier)\n', (13308, 13367), False, 'from PyQt5.QtTest import QTest\n'), ((13376, 13426), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (['widget.x_axis.form_lineedit', '"""50"""'], {}), "(widget.x_axis.form_lineedit, '50')\n", (13391, 13426), False, 'from PyQt5.QtTest import QTest\n'), ((13435, 13489), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.execute_button', 'Qt.LeftButton'], {}), '(widget.execute_button, Qt.LeftButton)\n', (13451, 13489), False, 'from PyQt5.QtTest import QTest\n'), ((14013, 14033), 'PyQt5.QtCore.QPoint', 'QPoint', (['x_pos', 'y_pos'], {}), '(x_pos, y_pos)\n', (14019, 14033), False, 'from PyQt5.QtCore import Qt, QPoint, QTimer, QSettings\n'), ((14121, 14175), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.move_up_button', 'Qt.LeftButton'], {}), '(widget.move_up_button, Qt.LeftButton)\n', (14137, 14175), False, 'from PyQt5.QtTest import QTest\n'), ((14184, 14212), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 20)'], {}), '(WAIT_TIME // 20)\n', (14195, 14212), False, 'from PyQt5.QtTest import QTest\n'), ((14219, 14275), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.move_down_button', 'Qt.LeftButton'], {}), '(widget.move_down_button, Qt.LeftButton)\n', (14235, 14275), False, 'from PyQt5.QtTest import QTest\n'), ((14284, 14312), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 20)'], {}), '(WAIT_TIME // 20)\n', (14295, 14312), False, 'from PyQt5.QtTest import QTest\n'), ((14556, 14584), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 20)'], {}), '(WAIT_TIME // 20)\n', (14567, 14584), False, 'from PyQt5.QtTest import QTest\n'), ((14591, 14695), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['self.model.fiducials[1].points', '[100.0, 0.0, 100.0]'], {'decimal': '(3)'}), '(self.model.fiducials[1].points, [100.0,\n 0.0, 100.0], decimal=3)\n', (14627, 14695), True, 'import numpy as np\n'), ((14724, 14827), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['self.model.fiducials[1].points', '[50.0, 0.0, 100.0]'], {'decimal': '(3)'}), '(self.model.fiducials[1].points, [50.0,\n 0.0, 100.0], decimal=3)\n', (14760, 14827), True, 'import numpy as np\n'), ((14829, 14857), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 20)'], {}), '(WAIT_TIME // 20)\n', (14840, 14857), False, 'from PyQt5.QtTest import QTest\n'), ((14865, 14918), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.delete_button', 'Qt.LeftButton'], {}), '(widget.delete_button, Qt.LeftButton)\n', (14881, 14918), False, 'from PyQt5.QtTest import QTest\n'), ((14927, 14955), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 20)'], {}), '(WAIT_TIME // 20)\n', (14938, 14955), False, 'from PyQt5.QtTest import QTest\n'), ((15301, 15374), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.z_axis.form_lineedit', 'Qt.Key_A', 'Qt.ControlModifier'], {}), '(widget.z_axis.form_lineedit, Qt.Key_A, Qt.ControlModifier)\n', (15315, 15374), False, 'from PyQt5.QtTest import QTest\n'), ((15383, 15441), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.z_axis.form_lineedit', 'Qt.Key_Delete'], {}), '(widget.z_axis.form_lineedit, Qt.Key_Delete)\n', (15397, 15441), False, 'from PyQt5.QtTest import QTest\n'), ((15511, 15561), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (['widget.z_axis.form_lineedit', '"""10"""'], {}), "(widget.z_axis.form_lineedit, '10')\n", (15526, 15561), False, 'from PyQt5.QtTest import QTest\n'), ((15629, 15683), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.execute_button', 'Qt.LeftButton'], {}), '(widget.execute_button, Qt.LeftButton)\n', (15645, 15683), False, 'from PyQt5.QtTest import QTest\n'), ((15693, 15766), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.x_axis.form_lineedit', 'Qt.Key_A', 'Qt.ControlModifier'], {}), '(widget.x_axis.form_lineedit, Qt.Key_A, Qt.ControlModifier)\n', (15707, 15766), False, 'from PyQt5.QtTest import QTest\n'), ((15775, 15825), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (['widget.x_axis.form_lineedit', '"""20"""'], {}), "(widget.x_axis.form_lineedit, '20')\n", (15790, 15825), False, 'from PyQt5.QtTest import QTest\n'), ((15834, 15888), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.execute_button', 'Qt.LeftButton'], {}), '(widget.execute_button, Qt.LeftButton)\n', (15850, 15888), False, 'from PyQt5.QtTest import QTest\n'), ((16596, 16650), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.execute_button', 'Qt.LeftButton'], {}), '(widget.execute_button, Qt.LeftButton)\n', (16612, 16650), False, 'from PyQt5.QtTest import QTest\n'), ((16659, 16732), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (['widget.detector_combobox', 'detector_names[1][0]'], {'delay': '(50)'}), '(widget.detector_combobox, detector_names[1][0], delay=50)\n', (16674, 16732), False, 'from PyQt5.QtTest import QTest\n'), ((16741, 16795), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.component_combobox', 'Qt.Key_Down'], {}), '(widget.component_combobox, Qt.Key_Down)\n', (16755, 16795), False, 'from PyQt5.QtTest import QTest\n'), ((16804, 16844), 'tests.helpers.click_check_box', 'click_check_box', (['widget.reverse_checkbox'], {}), '(widget.reverse_checkbox)\n', (16819, 16844), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((16853, 16907), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.execute_button', 'Qt.LeftButton'], {}), '(widget.execute_button, Qt.LeftButton)\n', (16869, 16907), False, 'from PyQt5.QtTest import QTest\n'), ((16916, 16943), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 5)'], {}), '(WAIT_TIME // 5)\n', (16927, 16943), False, 'from PyQt5.QtTest import QTest\n'), ((17115, 17200), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['mv[0, :, 0]', '[1, 0, 0, 0, -1, 0]'], {'decimal': '(5)'}), '(mv[0, :, 0], [1, 0, 0, 0, -1, 0],\n decimal=5)\n', (17151, 17200), True, 'import numpy as np\n'), ((17206, 17253), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (['widget.alignment_combobox', '"""a"""'], {}), "(widget.alignment_combobox, 'a')\n", (17221, 17253), False, 'from PyQt5.QtTest import QTest\n'), ((17342, 17396), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.component_combobox', 'Qt.Key_Down'], {}), '(widget.component_combobox, Qt.Key_Down)\n', (17356, 17396), False, 'from PyQt5.QtTest import QTest\n'), ((17405, 17459), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.execute_button', 'Qt.LeftButton'], {}), '(widget.execute_button, Qt.LeftButton)\n', (17421, 17459), False, 'from PyQt5.QtTest import QTest\n'), ((17468, 17495), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 5)'], {}), '(WAIT_TIME // 5)\n', (17479, 17495), False, 'from PyQt5.QtTest import QTest\n'), ((17503, 17550), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (['widget.component_combobox', '"""k"""'], {}), "(widget.component_combobox, 'k')\n", (17518, 17550), False, 'from PyQt5.QtTest import QTest\n'), ((17559, 17632), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (['widget.detector_combobox', 'detector_names[0][0]'], {'delay': '(50)'}), '(widget.detector_combobox, detector_names[0][0], delay=50)\n', (17574, 17632), False, 'from PyQt5.QtTest import QTest\n'), ((17641, 17696), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['widget.x_axis.form_lineedit', '"""1.0"""'], {}), "(widget.x_axis.form_lineedit, '1.0')\n", (17660, 17696), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((17705, 17760), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['widget.y_axis.form_lineedit', '"""1.0"""'], {}), "(widget.y_axis.form_lineedit, '1.0')\n", (17724, 17760), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((17769, 17823), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.execute_button', 'Qt.LeftButton'], {}), '(widget.execute_button, Qt.LeftButton)\n', (17785, 17823), False, 'from PyQt5.QtTest import QTest\n'), ((17832, 17859), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 5)'], {}), '(WAIT_TIME // 5)\n', (17843, 17859), False, 'from PyQt5.QtTest import QTest\n'), ((17966, 18067), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['mv[0, :, 1]', '[-0.70711, -0.70711, 0, 0, 0, -1.0]'], {'decimal': '(5)'}), '(mv[0, :, 1], [-0.70711, -0.70711, 0, 0,\n 0, -1.0], decimal=5)\n', (18002, 18067), True, 'import numpy as np\n'), ((18657, 18724), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.plane_lineedit', 'Qt.Key_A', 'Qt.ControlModifier'], {}), '(widget.plane_lineedit, Qt.Key_A, Qt.ControlModifier)\n', (18671, 18724), False, 'from PyQt5.QtTest import QTest\n'), ((18733, 18785), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.plane_lineedit', 'Qt.Key_Delete'], {}), '(widget.plane_lineedit, Qt.Key_Delete)\n', (18747, 18785), False, 'from PyQt5.QtTest import QTest\n'), ((18794, 18839), 'PyQt5.QtTest.QTest.keyClicks', 'QTest.keyClicks', (['widget.plane_lineedit', '"""-10"""'], {}), "(widget.plane_lineedit, '-10')\n", (18809, 18839), False, 'from PyQt5.QtTest import QTest\n'), ((18848, 18899), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.plane_lineedit', 'Qt.Key_Enter'], {}), '(widget.plane_lineedit, Qt.Key_Enter)\n', (18862, 18899), False, 'from PyQt5.QtTest import QTest\n'), ((18948, 18990), 'tests.helpers.click_check_box', 'click_check_box', (['widget.show_grid_checkbox'], {}), '(widget.show_grid_checkbox)\n', (18963, 18990), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((19046, 19091), 'tests.helpers.click_check_box', 'click_check_box', (['widget.snap_to_grid_checkbox'], {}), '(widget.snap_to_grid_checkbox)\n', (19061, 19091), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((19446, 19475), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 100)'], {}), '(WAIT_TIME // 100)\n', (19457, 19475), False, 'from PyQt5.QtTest import QTest\n'), ((19623, 19652), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 100)'], {}), '(WAIT_TIME // 100)\n', (19634, 19652), False, 'from PyQt5.QtTest import QTest\n'), ((19792, 19846), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.point_selector', 'Qt.LeftButton'], {}), '(widget.point_selector, Qt.LeftButton)\n', (19808, 19846), False, 'from PyQt5.QtTest import QTest\n'), ((19855, 19909), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.execute_button', 'Qt.LeftButton'], {}), '(widget.execute_button, Qt.LeftButton)\n', (19871, 19909), False, 'from PyQt5.QtTest import QTest\n'), ((19982, 20023), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['viewport', 'Qt.LeftButton'], {}), '(viewport, Qt.LeftButton)\n', (19998, 20023), False, 'from PyQt5.QtTest import QTest\n'), ((20032, 20086), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.execute_button', 'Qt.LeftButton'], {}), '(widget.execute_button, Qt.LeftButton)\n', (20048, 20086), False, 'from PyQt5.QtTest import QTest\n'), ((20199, 20252), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.line_selector', 'Qt.LeftButton'], {}), '(widget.line_selector, Qt.LeftButton)\n', (20215, 20252), False, 'from PyQt5.QtTest import QTest\n'), ((20485, 20505), 'tests.helpers.mouse_drag', 'mouse_drag', (['viewport'], {}), '(viewport)\n', (20495, 20505), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((20583, 20636), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.area_selector', 'Qt.LeftButton'], {}), '(widget.area_selector, Qt.LeftButton)\n', (20599, 20636), False, 'from PyQt5.QtTest import QTest\n'), ((21037, 21057), 'tests.helpers.mouse_drag', 'mouse_drag', (['viewport'], {}), '(viewport)\n', (21047, 21057), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((21134, 21189), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.object_selector', 'Qt.LeftButton'], {}), '(widget.object_selector, Qt.LeftButton)\n', (21150, 21189), False, 'from PyQt5.QtTest import QTest\n'), ((21322, 21342), 'tests.helpers.mouse_drag', 'mouse_drag', (['viewport'], {}), '(viewport)\n', (21332, 21342), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((21410, 21449), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['viewport', 'Qt.Key_Delete'], {}), '(viewport, Qt.Key_Delete)\n', (21424, 21449), False, 'from PyQt5.QtTest import QTest\n'), ((21597, 21648), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.help_button', 'Qt.LeftButton'], {}), '(widget.help_button, Qt.LeftButton)\n', (21613, 21648), False, 'from PyQt5.QtTest import QTest\n'), ((21657, 21686), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 100)'], {}), '(WAIT_TIME // 100)\n', (21668, 21686), False, 'from PyQt5.QtTest import QTest\n'), ((21876, 21920), 'tests.helpers.mouse_drag', 'mouse_drag', (['viewport'], {'button': 'Qt.MiddleButton'}), '(viewport, button=Qt.MiddleButton)\n', (21886, 21920), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((22065, 22108), 'tests.helpers.mouse_drag', 'mouse_drag', (['viewport'], {'button': 'Qt.RightButton'}), '(viewport, button=Qt.RightButton)\n', (22075, 22108), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((22492, 22520), 'tests.helpers.mouse_wheel_scroll', 'mouse_wheel_scroll', (['viewport'], {}), '(viewport)\n', (22510, 22520), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((22590, 22629), 'tests.helpers.mouse_wheel_scroll', 'mouse_wheel_scroll', (['viewport'], {'delta': '(-10)'}), '(viewport, delta=-10)\n', (22608, 22629), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((22700, 22752), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.reset_button', 'Qt.LeftButton'], {}), '(widget.reset_button, Qt.LeftButton)\n', (22716, 22752), False, 'from PyQt5.QtTest import QTest\n'), ((23108, 23130), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['WAIT_TIME'], {}), '(WAIT_TIME)\n', (23119, 23130), False, 'from PyQt5.QtTest import QTest\n'), ((24228, 24287), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['jaw_form.form_lineedit', 'f"""{new_value}"""'], {}), "(jaw_form.form_lineedit, f'{new_value}')\n", (24247, 24287), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((24296, 24352), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.move_jaws_button', 'Qt.LeftButton'], {}), '(widget.move_jaws_button, Qt.LeftButton)\n', (24312, 24352), False, 'from PyQt5.QtTest import QTest\n'), ((24490, 24568), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['jaw_form.form_lineedit', 'f"""{jaw.links[0].lower_limit - 1}"""'], {}), "(jaw_form.form_lineedit, f'{jaw.links[0].lower_limit - 1}')\n", (24509, 24568), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((24680, 24730), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['jaw_form.extra[0]', 'Qt.LeftButton'], {}), '(jaw_form.extra[0], Qt.LeftButton)\n', (24696, 24730), False, 'from PyQt5.QtTest import QTest\n'), ((25073, 25133), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['aperture_form[0].form_lineedit', '"""5.000"""'], {}), "(aperture_form[0].form_lineedit, '5.000')\n", (25092, 25133), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((25142, 25202), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['aperture_form[1].form_lineedit', '"""6.000"""'], {}), "(aperture_form[1].form_lineedit, '6.000')\n", (25161, 25202), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((25270, 25332), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.change_aperture_button', 'Qt.LeftButton'], {}), '(widget.change_aperture_button, Qt.LeftButton)\n', (25286, 25332), False, 'from PyQt5.QtTest import QTest\n'), ((25396, 25465), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['aperture', '(5.0, 6.0)'], {'decimal': '(3)'}), '(aperture, (5.0, 6.0), decimal=3)\n', (25432, 25465), True, 'import numpy as np\n'), ((25560, 25631), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['aperture', 'old_aperture'], {'decimal': '(3)'}), '(aperture, old_aperture, decimal=3)\n', (25596, 25631), True, 'import numpy as np\n'), ((25990, 26040), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.stack_combobox', 'Qt.Key_Down'], {}), '(widget.stack_combobox, Qt.Key_Down)\n', (26004, 26040), False, 'from PyQt5.QtTest import QTest\n'), ((26511, 26566), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['form.form_lineedit', 'f"""{new_value}"""'], {}), "(form.form_lineedit, f'{new_value}')\n", (26530, 26566), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((26626, 26672), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['form.extra[0]', 'Qt.LeftButton'], {}), '(form.extra[0], Qt.LeftButton)\n', (26642, 26672), False, 'from PyQt5.QtTest import QTest\n'), ((26708, 26754), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['form.extra[1]', 'Qt.LeftButton'], {}), '(form.extra[1], Qt.LeftButton)\n', (26724, 26754), False, 'from PyQt5.QtTest import QTest\n'), ((26903, 26961), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.move_joints_button', 'Qt.LeftButton'], {}), '(widget.move_joints_button, Qt.LeftButton)\n', (26919, 26961), False, 'from PyQt5.QtTest import QTest\n'), ((28257, 28318), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['widget.x_position.form_lineedit', '"""5.000"""'], {}), "(widget.x_position.form_lineedit, '5.000')\n", (28276, 28318), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((28327, 28388), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['widget.y_position.form_lineedit', '"""6.000"""'], {}), "(widget.y_position.form_lineedit, '6.000')\n", (28346, 28388), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((28397, 28458), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['widget.z_position.form_lineedit', '"""9.000"""'], {}), "(widget.z_position.form_lineedit, '9.000')\n", (28416, 28458), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((28467, 28532), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.execute_button', 'Qt.LeftButton'], {'delay': '(100)'}), '(widget.execute_button, Qt.LeftButton, delay=100)\n', (28483, 28532), False, 'from PyQt5.QtTest import QTest\n'), ((28592, 28654), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['widget.x_rotation.form_lineedit', '"""20.000"""'], {}), "(widget.x_rotation.form_lineedit, '20.000')\n", (28611, 28654), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((28663, 28725), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['widget.y_rotation.form_lineedit', '"""90.000"""'], {}), "(widget.y_rotation.form_lineedit, '90.000')\n", (28682, 28725), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((28734, 28797), 'tests.helpers.edit_line_edit_text', 'edit_line_edit_text', (['widget.z_rotation.form_lineedit', '"""-50.000"""'], {}), "(widget.z_rotation.form_lineedit, '-50.000')\n", (28753, 28797), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((28806, 28871), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.execute_button', 'Qt.LeftButton'], {'delay': '(100)'}), '(widget.execute_button, Qt.LeftButton, delay=100)\n', (28822, 28871), False, 'from PyQt5.QtTest import QTest\n'), ((29501, 29528), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 5)'], {}), '(WAIT_TIME // 5)\n', (29512, 29528), False, 'from PyQt5.QtTest import QTest\n'), ((29595, 29621), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME * 5)'], {}), '(WAIT_TIME * 5)\n', (29606, 29621), False, 'from PyQt5.QtTest import QTest\n'), ((30888, 30946), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.path_length_button', 'Qt.LeftButton'], {}), '(widget.path_length_button, Qt.LeftButton)\n', (30904, 30946), False, 'from PyQt5.QtTest import QTest\n'), ((31178, 31231), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['widget.export_button', 'Qt.LeftButton'], {}), '(widget.export_button, Qt.LeftButton)\n', (31194, 31231), False, 'from PyQt5.QtTest import QTest\n'), ((32045, 32100), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['self.window.about_dialog', 'Qt.Key_Escape'], {}), '(self.window.about_dialog, Qt.Key_Escape)\n', (32059, 32100), False, 'from PyQt5.QtTest import QTest\n'), ((33085, 33130), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['project_dialog', 'Qt.Key_Escape'], {}), '(project_dialog, Qt.Key_Escape)\n', (33099, 33130), False, 'from PyQt5.QtTest import QTest\n'), ((33522, 33567), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['project_dialog', 'Qt.Key_Escape'], {}), '(project_dialog, Qt.Key_Escape)\n', (33536, 33567), False, 'from PyQt5.QtTest import QTest\n'), ((33762, 33784), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (33770, 33784), True, 'import numpy as np\n'), ((33803, 33837), 'numpy.array', 'np.array', (['[True, True, True, True]'], {}), '([True, True, True, True])\n', (33811, 33837), True, 'import numpy as np\n'), ((33855, 33909), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]'], {}), '([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]])\n', (33863, 33909), True, 'import numpy as np\n'), ((33937, 33968), 'sscanss.core.math.rigid_transform', 'rigid_transform', (['points', 'points'], {}), '(points, points)\n', (33952, 33968), False, 'from sscanss.core.math import rigid_transform\n'), ((34358, 34377), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (34366, 34377), True, 'import numpy as np\n'), ((34400, 34419), 'numpy.array', 'np.array', (['[3, 2, 1]'], {}), '([3, 2, 1])\n', (34408, 34419), True, 'import numpy as np\n'), ((34436, 34497), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (34444, 34497), True, 'import numpy as np\n'), ((34752, 34786), 'sscanss.config.setup_logging', 'config.setup_logging', (['log_filename'], {}), '(log_filename)\n', (34772, 34786), True, 'import sscanss.config as config\n'), ((34934, 34995), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Align_First', '(False)', '(True)'], {}), '(config.Key.Align_First, False, True)\n', (34958, 34995), True, 'import sscanss.config as config\n'), ((35076, 35138), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Align_First', '"""true"""', '(True)'], {}), "(config.Key.Align_First, 'true', True)\n", (35100, 35138), True, 'import sscanss.config as config\n'), ((35218, 35276), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Align_First', '(-2)', '(True)'], {}), '(config.Key.Align_First, -2, True)\n', (35242, 35276), True, 'import sscanss.config as config\n'), ((35509, 35586), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Local_Max_Eval', '(item.limits[1] + 1)', '(True)'], {}), '(config.Key.Local_Max_Eval, item.limits[1] + 1, True)\n', (35533, 35586), True, 'import sscanss.config as config\n'), ((35684, 35761), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Local_Max_Eval', '(item.limits[0] - 1)', '(True)'], {}), '(config.Key.Local_Max_Eval, item.limits[0] - 1, True)\n', (35708, 35761), True, 'import sscanss.config as config\n'), ((35859, 35936), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Local_Max_Eval', '(item.limits[1] - 1)', '(True)'], {}), '(config.Key.Local_Max_Eval, item.limits[1] - 1, True)\n', (35883, 35936), True, 'import sscanss.config as config\n'), ((36196, 36275), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Angular_Stop_Val', '(item.limits[1] + 1)', '(True)'], {}), '(config.Key.Angular_Stop_Val, item.limits[1] + 1, True)\n', (36220, 36275), True, 'import sscanss.config as config\n'), ((36375, 36454), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Angular_Stop_Val', '(item.limits[0] - 1)', '(True)'], {}), '(config.Key.Angular_Stop_Val, item.limits[0] - 1, True)\n', (36399, 36454), True, 'import sscanss.config as config\n'), ((36554, 36633), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Angular_Stop_Val', '(item.limits[1] - 1)', '(True)'], {}), '(config.Key.Angular_Stop_Val, item.limits[1] - 1, True)\n', (36578, 36633), True, 'import sscanss.config as config\n'), ((36893, 36965), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Fiducial_Colour', '(2, 3, 4, 5)', '(True)'], {}), '(config.Key.Fiducial_Colour, (2, 3, 4, 5), True)\n', (36917, 36965), True, 'import sscanss.config as config\n'), ((37064, 37133), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Fiducial_Colour', '(2, 3, 4)', '(True)'], {}), '(config.Key.Fiducial_Colour, (2, 3, 4), True)\n', (37088, 37133), True, 'import sscanss.config as config\n'), ((37232, 37322), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Fiducial_Colour', "('h', '1.0', '1.0', '1.0')", '(True)'], {}), "(config.Key.Fiducial_Colour, ('h', '1.0', '1.0',\n '1.0'), True)\n", (37256, 37322), True, 'import sscanss.config as config\n'), ((37417, 37509), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Fiducial_Colour', "('1.0', '1.0', '1.0', '1.0')", '(True)'], {}), "(config.Key.Fiducial_Colour, ('1.0', '1.0', '1.0',\n '1.0'), True)\n", (37441, 37509), True, 'import sscanss.config as config\n'), ((37604, 37676), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Fiducial_Colour', '(2, 3, 4, 5)', '(True)'], {}), '(config.Key.Fiducial_Colour, (2, 3, 4, 5), True)\n', (37628, 37676), True, 'import sscanss.config as config\n'), ((37915, 37975), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Geometry', '"""12345"""', '(True)'], {}), "(config.Key.Geometry, '12345', True)\n", (37939, 37975), True, 'import sscanss.config as config\n'), ((38392, 38458), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Recent_Projects', '"""name"""', '(True)'], {}), "(config.Key.Recent_Projects, 'name', True)\n", (38416, 38458), True, 'import sscanss.config as config\n'), ((38553, 38630), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Recent_Projects', "['name', 'other']", '(True)'], {}), "(config.Key.Recent_Projects, ['name', 'other'], True)\n", (38577, 38630), True, 'import sscanss.config as config\n'), ((38735, 38764), 'sscanss.config.settings.system.sync', 'config.settings.system.sync', ([], {}), '()\n', (38762, 38764), True, 'import sscanss.config as config\n'), ((38850, 38911), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Align_First', '(False)', '(True)'], {}), '(config.Key.Align_First, False, True)\n', (38874, 38911), True, 'import sscanss.config as config\n'), ((38920, 38943), 'sscanss.config.settings.reset', 'config.settings.reset', ([], {}), '()\n', (38941, 38943), True, 'import sscanss.config as config\n'), ((39024, 39051), 'sscanss.config.settings.reset', 'config.settings.reset', (['(True)'], {}), '(True)\n', (39045, 39051), True, 'import sscanss.config as config\n'), ((39131, 39186), 'sscanss.config.settings.setValue', 'config.settings.setValue', (['config.Key.Align_First', '(False)'], {}), '(config.Key.Align_First, False)\n', (39155, 39186), True, 'import sscanss.config as config\n'), ((40087, 40156), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['preferences.accept_button', 'Qt.LeftButton'], {'delay': '(100)'}), '(preferences.accept_button, Qt.LeftButton, delay=100)\n', (40103, 40156), False, 'from PyQt5.QtTest import QTest\n'), ((40289, 40317), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 50)'], {}), '(WAIT_TIME // 50)\n', (40300, 40317), False, 'from PyQt5.QtTest import QTest\n'), ((40468, 40536), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['preferences.reset_button', 'Qt.LeftButton'], {'delay': '(100)'}), '(preferences.reset_button, Qt.LeftButton, delay=100)\n', (40484, 40536), False, 'from PyQt5.QtTest import QTest\n'), ((40595, 40623), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 50)'], {}), '(WAIT_TIME // 50)\n', (40606, 40623), False, 'from PyQt5.QtTest import QTest\n'), ((40830, 40899), 'PyQt5.QtTest.QTest.mouseClick', 'QTest.mouseClick', (['preferences.cancel_button', 'Qt.LeftButton'], {'delay': '(100)'}), '(preferences.cancel_button, Qt.LeftButton, delay=100)\n', (40846, 40899), False, 'from PyQt5.QtTest import QTest\n'), ((40958, 40986), 'PyQt5.QtTest.QTest.qWait', 'QTest.qWait', (['(WAIT_TIME // 50)'], {}), '(WAIT_TIME // 50)\n', (40969, 40986), False, 'from PyQt5.QtTest import QTest\n'), ((1399, 1417), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1415, 1417), False, 'import tempfile\n'), ((5337, 5400), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['project_dialog.instrument_combobox', 'Qt.Key_Down'], {}), '(project_dialog.instrument_combobox, Qt.Key_Down)\n', (5351, 5400), False, 'from PyQt5.QtTest import QTest\n'), ((11703, 11757), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.tool.move_combobox', 'Qt.Key_Down'], {}), '(widget.tool.move_combobox, Qt.Key_Down)\n', (11717, 11757), False, 'from PyQt5.QtTest import QTest\n'), ((11832, 11888), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.tool.ignore_combobox', 'Qt.Key_Down'], {}), '(widget.tool.ignore_combobox, Qt.Key_Down)\n', (11846, 11888), False, 'from PyQt5.QtTest import QTest\n'), ((12374, 12429), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.tool.plane_combobox', 'Qt.Key_Down'], {}), '(widget.tool.plane_combobox, Qt.Key_Down)\n', (12388, 12429), False, 'from PyQt5.QtTest import QTest\n'), ((18531, 18581), 'PyQt5.QtTest.QTest.keyClick', 'QTest.keyClick', (['widget.plane_combobox', 'Qt.Key_Down'], {}), '(widget.plane_combobox, Qt.Key_Down)\n', (18545, 18581), False, 'from PyQt5.QtTest import QTest\n'), ((18623, 18631), 'PyQt5.QtCore.QPoint', 'QPoint', ([], {}), '()\n', (18629, 18631), False, 'from PyQt5.QtCore import Qt, QPoint, QTimer, QSettings\n'), ((18633, 18646), 'PyQt5.QtCore.QPoint', 'QPoint', (['(10)', '(0)'], {}), '(10, 0)\n', (18639, 18646), False, 'from PyQt5.QtCore import Qt, QPoint, QTimer, QSettings\n'), ((34879, 34924), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Align_First'], {}), '(config.Key.Align_First)\n', (34900, 34924), True, 'import sscanss.config as config\n'), ((35021, 35066), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Align_First'], {}), '(config.Key.Align_First)\n', (35042, 35066), True, 'import sscanss.config as config\n'), ((35163, 35208), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Align_First'], {}), '(config.Key.Align_First)\n', (35184, 35208), True, 'import sscanss.config as config\n'), ((35302, 35347), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Align_First'], {}), '(config.Key.Align_First)\n', (35323, 35347), True, 'import sscanss.config as config\n'), ((35437, 35485), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Local_Max_Eval'], {}), '(config.Key.Local_Max_Eval)\n', (35458, 35485), True, 'import sscanss.config as config\n'), ((35612, 35660), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Local_Max_Eval'], {}), '(config.Key.Local_Max_Eval)\n', (35633, 35660), True, 'import sscanss.config as config\n'), ((35787, 35835), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Local_Max_Eval'], {}), '(config.Key.Local_Max_Eval)\n', (35808, 35835), True, 'import sscanss.config as config\n'), ((35962, 36010), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Local_Max_Eval'], {}), '(config.Key.Local_Max_Eval)\n', (35983, 36010), True, 'import sscanss.config as config\n'), ((36122, 36172), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Angular_Stop_Val'], {}), '(config.Key.Angular_Stop_Val)\n', (36143, 36172), True, 'import sscanss.config as config\n'), ((36301, 36351), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Angular_Stop_Val'], {}), '(config.Key.Angular_Stop_Val)\n', (36322, 36351), True, 'import sscanss.config as config\n'), ((36480, 36530), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Angular_Stop_Val'], {}), '(config.Key.Angular_Stop_Val)\n', (36501, 36530), True, 'import sscanss.config as config\n'), ((36659, 36709), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Angular_Stop_Val'], {}), '(config.Key.Angular_Stop_Val)\n', (36680, 36709), True, 'import sscanss.config as config\n'), ((36820, 36869), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Fiducial_Colour'], {}), '(config.Key.Fiducial_Colour)\n', (36841, 36869), True, 'import sscanss.config as config\n'), ((36991, 37040), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Fiducial_Colour'], {}), '(config.Key.Fiducial_Colour)\n', (37012, 37040), True, 'import sscanss.config as config\n'), ((37159, 37208), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Fiducial_Colour'], {}), '(config.Key.Fiducial_Colour)\n', (37180, 37208), True, 'import sscanss.config as config\n'), ((37344, 37393), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Fiducial_Colour'], {}), '(config.Key.Fiducial_Colour)\n', (37365, 37393), True, 'import sscanss.config as config\n'), ((37531, 37580), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Fiducial_Colour'], {}), '(config.Key.Fiducial_Colour)\n', (37552, 37580), True, 'import sscanss.config as config\n'), ((37702, 37751), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Fiducial_Colour'], {}), '(config.Key.Fiducial_Colour)\n', (37723, 37751), True, 'import sscanss.config as config\n'), ((37849, 37891), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Geometry'], {}), '(config.Key.Geometry)\n', (37870, 37891), True, 'import sscanss.config as config\n'), ((38001, 38043), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Geometry'], {}), '(config.Key.Geometry)\n', (38022, 38043), True, 'import sscanss.config as config\n'), ((38165, 38207), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Geometry'], {}), '(config.Key.Geometry)\n', (38186, 38207), True, 'import sscanss.config as config\n'), ((38319, 38368), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Recent_Projects'], {}), '(config.Key.Recent_Projects)\n', (38340, 38368), True, 'import sscanss.config as config\n'), ((38484, 38533), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Recent_Projects'], {}), '(config.Key.Recent_Projects)\n', (38505, 38533), True, 'import sscanss.config as config\n'), ((38656, 38705), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Recent_Projects'], {}), '(config.Key.Recent_Projects)\n', (38677, 38705), True, 'import sscanss.config as config\n'), ((38969, 39014), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Align_First'], {}), '(config.Key.Align_First)\n', (38990, 39014), True, 'import sscanss.config as config\n'), ((39076, 39121), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Align_First'], {}), '(config.Key.Align_First)\n', (39097, 39121), True, 'import sscanss.config as config\n'), ((39215, 39260), 'sscanss.config.settings.value', 'config.settings.value', (['config.Key.Align_First'], {}), '(config.Key.Align_First)\n', (39236, 39260), True, 'import sscanss.config as config\n'), ((39290, 39348), 'sscanss.config.settings.system.value', 'config.settings.system.value', (['config.Key.Align_First.value'], {}), '(config.Key.Align_First.value)\n', (39318, 39348), True, 'import sscanss.config as config\n'), ((40033, 40066), 'sscanss.config.settings.value', 'config.settings.value', (['stored_key'], {}), '(stored_key)\n', (40054, 40066), True, 'import sscanss.config as config\n'), ((40185, 40218), 'sscanss.config.settings.value', 'config.settings.value', (['stored_key'], {}), '(stored_key)\n', (40206, 40218), True, 'import sscanss.config as config\n'), ((5452, 5472), 'tests.helpers.click_message_box', 'click_message_box', (['(0)'], {}), '(0)\n', (5469, 5472), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((7614, 7634), 'tests.helpers.click_message_box', 'click_message_box', (['(0)'], {}), '(0)\n', (7631, 7634), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((22983, 23003), 'tests.helpers.click_message_box', 'click_message_box', (['(0)'], {}), '(0)\n', (23000, 23003), False, 'from tests.helpers import QTestCase, mouse_drag, mouse_wheel_scroll, click_check_box, click_message_box, click_list_widget_item, edit_line_edit_text\n'), ((38812, 38838), 'sscanss.config.settings.filename', 'config.settings.filename', ([], {}), '()\n', (38836, 38838), True, 'import sscanss.config as config\n')] |
#!/usr/bin/env python
"""
sampling funtion for 10_phase segment, it takes as argument:
1. start time/pos/vel
2. jerk value and duration of each phase
3. the time instant at which we need to calculate pos, vel, acc, jrk
and returns pos, vel, acc, jrk at that time instant
"""
import numpy as np
def find_phase(t, phase_times):
'''
this functions find the phase at which the time instant t belongs.
phase_times: contains the start and end time of each phase
'''
phase_times = np.asarray(phase_times)
ph = np.searchsorted(phase_times, t, side='left') -1
return ph
def sample( t, p_start, v_start, phase_times, phase_jrk, infl_points_acc, infl_points_vel, infl_points_pos):
'''
this functions is to sample a trajectory segment, it takes as argument:
1. starting time "t_start", starting position "p_start", and starting velocity "v_start"
2. jerk value and jerk duration of each phase: J: jerk value "Jrk_v", duration "T_ph"
3. the time instant "t" at which we need to calculate pos, vel, acc, jrk
4. pos,vel, and acc at each inflection point where phase changes: Pos_v, Vel_v, Acc_v
it returns:
pos, vel, acc, and jrk at that time instant "t"
J, T are vectors of size equal to number of phases
'''
ph = find_phase(t, phase_times)
if ph < 0: #before segment
jrk = 0.0
acc = infl_points_acc[0]
vel = infl_points_vel[0]
pos = infl_points_pos[0]
return pos, vel, acc, jrk
elif ph > 9: #after segment
jrk = 0.0
acc = infl_points_acc[-1]
vel = infl_points_vel[-1]
pos = infl_points_pos[-1]
return pos, vel, acc, jrk
else:
t = t - phase_times[ph]
jrk = phase_jrk[ph]
acc = phase_jrk[ph]*t + infl_points_acc[ph]
vel = phase_jrk[ph]*t**2/2.0 + infl_points_acc[ph]*t + infl_points_vel[ph]
pos = phase_jrk[ph]*t**3/6.0 + infl_points_acc[ph]*t**2/2.0 + infl_points_vel[ph]*t + infl_points_pos[ph]
return pos, vel, acc, jrk
def sample_segment(t, t_start, p_start, v_start, phase_jrk, phase_dur):
'''
this functions is to sample a trajectory segment, it takes as argument:
1. starting time "t_start", starting position "p_start", and starting velocity "v_start"
2. jerk value and jerk duration of each phase: J: jerk value "J", duration "T"
3. the time instant "t" at which we need to calculate pos, vel, acc, jrk
it returns:
pos, vel, acc, and jrk at that time instant "t"
J, T are vectors of size equal to number of phases
'''
#convert durations to times
phase_times = [ sum(phase_dur[0:i]) for i in range(0, len(phase_dur)+1) ]
#calculate pos,vel,acc at each inflection point where phase changes:
infl_points_acc=[0.0]
infl_points_vel=[v_start]
infl_points_pos=[p_start]
for i in range (0, 10):
infl_points_acc.append( phase_jrk[i]*phase_dur[i] + infl_points_acc[i] )
infl_points_vel.append( phase_jrk[i]*phase_dur[i]**2/2.0 + infl_points_acc[i]*phase_dur[i] + infl_points_vel[i] )
infl_points_pos.append( phase_jrk[i]*phase_dur[i]**3/6.0 + infl_points_acc[i]*phase_dur[i]**2/2.0 + infl_points_vel[i]*phase_dur[i] + infl_points_pos[i] )
return sample( t-t_start, p_start, v_start, phase_times, phase_jrk, infl_points_acc, infl_points_vel, infl_points_pos)
| [
"numpy.asarray",
"numpy.searchsorted"
] | [((499, 522), 'numpy.asarray', 'np.asarray', (['phase_times'], {}), '(phase_times)\n', (509, 522), True, 'import numpy as np\n'), ((532, 576), 'numpy.searchsorted', 'np.searchsorted', (['phase_times', 't'], {'side': '"""left"""'}), "(phase_times, t, side='left')\n", (547, 576), True, 'import numpy as np\n')] |
# references:
# https://github.com/weigq/3d_pose_baseline_pytorch/blob/master/src/datasets/human36m.py
# https://github.com/una-dinosauria/3d-pose-baseline/blob/master/src/linear_model.py#L247
import numpy as np
import torch
from torch.utils.data import Dataset
class Human36M(Dataset):
def __init__(self, pose_set_2d, pose_set_3d, camera_frame=True):
"""
Args:
pose_set_2d (dict[tuple, numpy.array]): 2d pose set.
pose_set_3d (dict[tuple, numpy.array]): 3d pose set.
camera_frame (bool, optional): Make this True if pose_set_3d is in camera coordinates. Defaults to True.
"""
self.poses_2d = []
self.poses_3d = []
self.actions = []
for key2d in pose_set_2d.keys():
subj, act, seqname = key2d
# Keys should be the same if 3d poses are in camera frame.
key3d = (
key2d
if camera_frame
else (subj, act, "{}.h5".format(seqname.split(".")[0]))
)
poses_2d = pose_set_2d[key2d] # [n, 16 x 2]
poses_3d = pose_set_3d[key3d] # [n, n_joints x 3]
assert len(poses_2d) == len(poses_3d)
actions = [act] * len(poses_2d) # [n,]
self.poses_2d.append(poses_2d)
self.poses_3d.append(poses_3d)
self.actions.extend(actions)
self.poses_2d = np.vstack(self.poses_2d) # [N, 16 x 2]
self.poses_3d = np.vstack(self.poses_3d) # [N, n_joints x 3]
self.actions = np.array(self.actions) # [N,]
assert len(self.poses_2d) == len(self.poses_3d) == len(self.actions)
def __getitem__(self, idx):
"""Get a set of 2d pose, 3d pose, and action.
Args:
idx (int): Index of the 2d/3d pose pair to get.
Returns:
(dict): a set of 2d pose, 3d pose, and action.
pose_2d (torch.Tensor): 2d pose (model input).
pose_3d (torch.Tensor): 3d pose (model output i.e., label).
action (str): Action to which the pose pair belongs.
"""
pose_2d = torch.from_numpy(self.poses_2d[idx]).float()
pose_3d = torch.from_numpy(self.poses_3d[idx]).float()
action = self.actions[idx]
return {"pose_2d": pose_2d, "pose_3d": pose_3d, "action": action}
def __len__(self):
"""Return the number of the samples.
Returns:
(int): Number of the samples.
"""
return len(self.poses_2d)
| [
"numpy.array",
"numpy.vstack",
"torch.from_numpy"
] | [((1415, 1439), 'numpy.vstack', 'np.vstack', (['self.poses_2d'], {}), '(self.poses_2d)\n', (1424, 1439), True, 'import numpy as np\n'), ((1479, 1503), 'numpy.vstack', 'np.vstack', (['self.poses_3d'], {}), '(self.poses_3d)\n', (1488, 1503), True, 'import numpy as np\n'), ((1548, 1570), 'numpy.array', 'np.array', (['self.actions'], {}), '(self.actions)\n', (1556, 1570), True, 'import numpy as np\n'), ((2122, 2158), 'torch.from_numpy', 'torch.from_numpy', (['self.poses_2d[idx]'], {}), '(self.poses_2d[idx])\n', (2138, 2158), False, 'import torch\n'), ((2185, 2221), 'torch.from_numpy', 'torch.from_numpy', (['self.poses_3d[idx]'], {}), '(self.poses_3d[idx])\n', (2201, 2221), False, 'import torch\n')] |
"""
Dataset generation file for the training of the neural network
Section [3.1] of project report.
Author: <NAME>
"""
import numpy as np
import tqdm
outputdir="C:\\Users\\Eliott\\Desktop\\cse-project\\reportdata\\"
#%% initial functions & schemes
def fstep(x, x1):
return (1 + np.tanh(100*(x-x1)))/2
def fwall(x, x1, x2):
return (np.tanh(100*(x-x1)) - np.tanh(100*(x-x2)))/2
def solve_step(a, M, N, T, x1):
M = int(M)
N = int(N)
tau = T/M # time step
h = 1/N # space step
U = np.zeros((M+1,N+1))
x = h * np.arange(0,N+1,1)
U[0] = fstep(x, x1)
U[:,0] = 0
for n in range(0,M):
icut = int(N * a * (n+1) * tau)
for i in range(icut+1,N+1):
U[n+1,i] = U[n,i] - a * tau/h * (U[n,i] - U[n,i-1])
return U[M,1:] # output is of size N
def solve_wall(a, M, N, T, x1, x2):
M = int(M)
N = int(N)
tau = T/M # time step
h = 1/N # space step
U = np.zeros((M+1,N+1))
x = h * np.arange(0,N+1,1)
U[0] = fwall(x, x1, x2)
U[:,0] = 0
for n in range(0,M):
icut = int(N * a * (n+1) * tau)
for i in range(icut+1,N+1):
U[n+1,i] = U[n,i] - a * tau/h * (U[n,i] - U[n,i-1])
return U[M,1:] # output is of size N
#%%
# parameters of the pde
a = 1e-2
T = 10
CFL = 0.5
N = 5e1
M = int(a * T * N / CFL)
# X stores the coarse approx, Y the fine ones
X = []
Y = []
size = 2500
x1 = np.linspace(0.1, 0.7, size)
# step function loop
for i in tqdm.tqdm(range(len(x1))):
X.append( solve_step(a, M, N, T, x1[i]) )
#Y.append( solve_step(a, 100*M, 100*N, T, x1[i]) )
size = 50
x1 = np.linspace(0.2, 0.4, size)
x2 = np.linspace(0.6, 0.8, size)
# wall function loop
for i in tqdm.tqdm(range(len(x1))):
for j in range(len(x2)):
X.append( solve_wall(a, M, N, T, x1[i], x2[j]) )
#Y.append( solve_wall(a, 100*M, 100*N, T, x1[i], x2[j]) )
X = np.array(X)
Y = np.array(Y)
#%% uncomment to save the data
#np.savetxt(outputdir+'x50.csv', X, delimiter=',')
#np.savetxt(outputdir+'y1000.csv', Y, delimiter=',')
#%% visualize initial condition, input, targeted output of neural network
import matplotlib.pyplot as plt
x1 = 0.4
x2 = 0.6
a = 1e-2
T = 10
CFL = 0.5
N = 5e1
M = int(a * T * N / CFL)
xcoarse = np.linspace(0,1,num=int(N+1))[1:]
xfine = np.linspace(0,1,num=int(20*N+1))[1:]
x = np.linspace(0,1,num=int(20*N+1))
ini = fwall(x, x1, x2)
sol1 = solve_wall(a, M, N, T, x1, x2)
sol2 = solve_wall(a, 20*M, 20*N, T, x1, x2)
fig = plt.figure()
plt.plot(x, ini, color = 'blue', label="$u_0(x)$")
plt.plot(xcoarse, sol1, color = 'black', label="$u(x,T)$, $N=50$")
plt.plot(xfine, sol2, color = 'red', label="$u(x,T)$, $N=1000$")
plt.xlabel('$x$')
plt.ylabel('$u$')
plt.legend()
plt.show()
#fig.savefig("1D-dataset.png", dpi=500)
#fig.savefig('1d.eps', format='eps') | [
"matplotlib.pyplot.show",
"numpy.tanh",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((1371, 1398), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.7)', 'size'], {}), '(0.1, 0.7, size)\n', (1382, 1398), True, 'import numpy as np\n'), ((1585, 1612), 'numpy.linspace', 'np.linspace', (['(0.2)', '(0.4)', 'size'], {}), '(0.2, 0.4, size)\n', (1596, 1612), True, 'import numpy as np\n'), ((1618, 1645), 'numpy.linspace', 'np.linspace', (['(0.6)', '(0.8)', 'size'], {}), '(0.6, 0.8, size)\n', (1629, 1645), True, 'import numpy as np\n'), ((1873, 1884), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1881, 1884), True, 'import numpy as np\n'), ((1892, 1903), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (1900, 1903), True, 'import numpy as np\n'), ((2471, 2483), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2481, 2483), True, 'import matplotlib.pyplot as plt\n'), ((2484, 2532), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ini'], {'color': '"""blue"""', 'label': '"""$u_0(x)$"""'}), "(x, ini, color='blue', label='$u_0(x)$')\n", (2492, 2532), True, 'import matplotlib.pyplot as plt\n'), ((2535, 2599), 'matplotlib.pyplot.plot', 'plt.plot', (['xcoarse', 'sol1'], {'color': '"""black"""', 'label': '"""$u(x,T)$, $N=50$"""'}), "(xcoarse, sol1, color='black', label='$u(x,T)$, $N=50$')\n", (2543, 2599), True, 'import matplotlib.pyplot as plt\n'), ((2602, 2664), 'matplotlib.pyplot.plot', 'plt.plot', (['xfine', 'sol2'], {'color': '"""red"""', 'label': '"""$u(x,T)$, $N=1000$"""'}), "(xfine, sol2, color='red', label='$u(x,T)$, $N=1000$')\n", (2610, 2664), True, 'import matplotlib.pyplot as plt\n'), ((2668, 2685), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (2678, 2685), True, 'import matplotlib.pyplot as plt\n'), ((2686, 2703), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$u$"""'], {}), "('$u$')\n", (2696, 2703), True, 'import matplotlib.pyplot as plt\n'), ((2704, 2716), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2714, 2716), True, 'import matplotlib.pyplot as plt\n'), ((2717, 2727), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2725, 2727), True, 'import matplotlib.pyplot as plt\n'), ((507, 531), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1)'], {}), '((M + 1, N + 1))\n', (515, 531), True, 'import numpy as np\n'), ((912, 936), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1)'], {}), '((M + 1, N + 1))\n', (920, 936), True, 'import numpy as np\n'), ((538, 560), 'numpy.arange', 'np.arange', (['(0)', '(N + 1)', '(1)'], {}), '(0, N + 1, 1)\n', (547, 560), True, 'import numpy as np\n'), ((943, 965), 'numpy.arange', 'np.arange', (['(0)', '(N + 1)', '(1)'], {}), '(0, N + 1, 1)\n', (952, 965), True, 'import numpy as np\n'), ((287, 310), 'numpy.tanh', 'np.tanh', (['(100 * (x - x1))'], {}), '(100 * (x - x1))\n', (294, 310), True, 'import numpy as np\n'), ((345, 368), 'numpy.tanh', 'np.tanh', (['(100 * (x - x1))'], {}), '(100 * (x - x1))\n', (352, 368), True, 'import numpy as np\n'), ((367, 390), 'numpy.tanh', 'np.tanh', (['(100 * (x - x2))'], {}), '(100 * (x - x2))\n', (374, 390), True, 'import numpy as np\n')] |
from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest
class AppTestSorting(BaseNumpyAppTest):
def test_argsort_dtypes(self):
from numpy import array, arange
assert array(2.0).argsort() == 0
nnp = self.non_native_prefix
for dtype in ['int', 'float', 'int16', 'float32', 'uint64',
nnp + 'i2', complex]:
a = array([6, 4, -1, 3, 8, 3, 256+20, 100, 101], dtype=dtype)
exp = list(a)
exp = sorted(range(len(exp)), key=exp.__getitem__)
c = a.copy()
res = a.argsort()
assert (res == exp).all(), 'Failed sortng %r\na=%r\nres=%r\nexp=%r' % (dtype,a,res,exp)
assert (a == c).all() # not modified
a = arange(100, dtype=dtype)
assert (a.argsort() == a).all()
def test_argsort_ndim(self):
from numpy import array
a = array([[4, 2], [1, 3]])
assert (a.argsort() == [[1, 0], [0, 1]]).all()
a = array(range(10) + range(10) + range(10))
b = a.argsort()
assert ((b[:3] == [0, 10, 20]).all() or
(b[:3] == [0, 20, 10]).all())
#trigger timsort 'run' mode which calls arg_getitem_slice
a = array(range(100) + range(100) + range(100))
b = a.argsort()
assert ((b[:3] == [0, 100, 200]).all() or
(b[:3] == [0, 200, 100]).all())
a = array([[[]]]).reshape(3,4,0)
b = a.argsort()
assert b.size == 0
def test_argsort_random(self):
from numpy import array
from _random import Random
rnd = Random(1)
a = array([rnd.random() for i in range(512*2)]).reshape(512,2)
a.argsort()
def test_argsort_axis(self):
from numpy import array
a = array([])
for axis in [None, -1, 0]:
assert a.argsort(axis=axis).shape == (0,)
a = array([[4, 2], [1, 3]])
assert (a.argsort(axis=None) == [2, 1, 3, 0]).all()
assert (a.argsort(axis=-1) == [[1, 0], [0, 1]]).all()
assert (a.argsort(axis=0) == [[1, 0], [0, 1]]).all()
assert (a.argsort(axis=1) == [[1, 0], [0, 1]]).all()
a = array([[3, 2, 1], [1, 2, 3]])
assert (a.argsort(axis=0) == [[1, 0, 0], [0, 1, 1]]).all()
assert (a.argsort(axis=1) == [[2, 1, 0], [0, 1, 2]]).all()
def test_sort_dtypes(self):
from numpy import array, arange
for dtype in ['int', 'float', 'int16', 'float32', 'uint64',
'i2', complex]:
a = array([6, 4, -1, 3, 8, 3, 256+20, 100, 101], dtype=dtype)
exp = sorted(list(a))
a.sort()
assert (a == exp).all(), 'Failed sorting %r\n%r\n%r' % (dtype, a, exp)
a = arange(100, dtype=dtype)
c = a.copy()
a.sort()
assert (a == c).all(), 'Failed sortng %r\na=%r\nc=%r' % (dtype,a,c)
def test_sort_nonnative(self):
from numpy import array
nnp = self.non_native_prefix
for dtype in [ nnp + 'i2']:
a = array([6, 4, -1, 3, 8, 3, 256+20, 100, 101], dtype=dtype)
b = array([-1, 3, 3, 4, 6, 8, 100, 101, 256+20], dtype=dtype)
c = a.copy()
import sys
if '__pypy__' in sys.builtin_module_names:
exc = raises(NotImplementedError, a.sort)
assert exc.value[0].find('supported') >= 0
#assert (a == b).all(), \
# 'a,orig,dtype %r,%r,%r' % (a,c,dtype)
def test_sort_noncontiguous(self):
from numpy import array
x = array([[2, 10], [1, 11]])
assert (x[:, 0].argsort() == [1, 0]).all()
x[:, 0].sort()
assert (x == [[1, 10], [2, 11]]).all()
# tests from numpy/tests/test_multiarray.py
def test_sort_corner_cases(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
from numpy import array, zeros, arange
from math import isnan
nan = float('nan')
a = array([nan, 1, 0])
b = a.copy()
b.sort()
assert [isnan(bb) for bb in b] == [isnan(aa) for aa in a[::-1]]
assert (b[:2] == a[::-1][:2]).all()
b = a.argsort()
assert (b == [2, 1, 0]).all()
# check complex
a = zeros(9, dtype='complex128')
a.real += [nan, nan, nan, 1, 0, 1, 1, 0, 0]
a.imag += [nan, 1, 0, nan, nan, 1, 0, 1, 0]
b = a.copy()
b.sort()
assert [isnan(bb) for bb in b] == [isnan(aa) for aa in a[::-1]]
assert (b[:4] == a[::-1][:4]).all()
b = a.argsort()
assert (b == [8, 7, 6, 5, 4, 3, 2, 1, 0]).all()
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "scalar sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
# test complex sorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h'] :
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert (c == ai).all(), msg
c = bi.copy();
c.sort(kind=kind)
assert (c == ai).all(), msg
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h'] :
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert (c == ai).all(), msg
c = bi.copy();
c.sort(kind=kind)
assert (c == ai).all(), msg
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = array([[3, 2], [1, 0]])
b = array([[1, 0], [3, 2]])
c = array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert (d == b).all(), "test sort with axis=0"
d = a.copy()
d.sort(axis=1)
assert (d == c).all(), "test sort with axis=1"
d = a.copy()
d.sort()
assert (d == c).all(), "test sort with default axis"
def test_sort_corner_cases_string_records(self):
from numpy import array, dtype
import sys
if '__pypy__' in sys.builtin_module_names:
skip('not implemented yet in PyPy')
# test string sorts.
s = 'aaaaaaaa'
a = array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "string sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
# test record array sorts.
dt =dtype([('f', float), ('i', int)])
a = array([(i, i) for i in range(101)], dtype = dt)
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
def test_sort_unicode(self):
import sys
from numpy import array
# test unicode sorts.
s = 'aaaaaaaa'
a = array([s + chr(i) for i in range(101)], dtype=unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
if '__pypy__' in sys.builtin_module_names:
exc = raises(NotImplementedError, "c.sort(kind=kind)")
assert 'non-numeric types' in exc.value.message
else:
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy()
if '__pypy__' in sys.builtin_module_names:
exc = raises(NotImplementedError, "c.sort(kind=kind)")
assert 'non-numeric types' in exc.value.message
else:
c.sort(kind=kind)
assert (c == a).all(), msg
def test_sort_objects(self):
# test object array sorts.
from numpy import empty
import sys
if '__pypy__' in sys.builtin_module_names:
skip('not implemented yet in PyPy')
try:
a = empty((101,), dtype=object)
except:
skip('object type not supported yet')
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
def test_sort_datetime(self):
from numpy import arange
# test datetime64 sorts.
try:
a = arange(0, 101, dtype='datetime64[D]')
except:
skip('datetime type not supported yet')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "datetime64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
# test timedelta64 sorts.
a = arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
c = b.copy();
c.sort(kind=kind)
assert (c == a).all(), msg
def test_sort_order(self):
from numpy import array, zeros
from sys import byteorder, builtin_module_names
if '__pypy__' in builtin_module_names:
skip('not implemented yet in PyPy')
# Test sorting an array with fields
x1 = array([21, 32, 14])
x2 = array(['my', 'first', 'name'])
x3=array([3.1, 4.5, 6.2])
r=zeros(3, dtype=[('id','i'),('word','S5'),('number','f')])
r['id'] = x1
r['word'] = x2
r['number'] = x3
r.sort(order=['id'])
assert (r['id'] == [14, 21, 32]).all()
assert (r['word'] == ['name', 'my', 'first']).all()
assert max(abs(r['number'] - [6.2, 3.1, 4.5])) < 1e-6
r.sort(order=['word'])
assert (r['id'] == [32, 21, 14]).all()
assert (r['word'] == ['first', 'my', 'name']).all()
assert max(abs(r['number'] - [4.5, 3.1, 6.2])) < 1e-6
r.sort(order=['number'])
assert (r['id'] == [21, 32, 14]).all()
assert (r['word'] == ['my', 'first', 'name']).all()
assert max(abs(r['number'] - [3.1, 4.5, 6.2])) < 1e-6
if byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', 'S5'), ('col2', strtype)]
r = array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype= mydtype)
r.sort(order='col2')
assert (r['col2'] == [1, 3, 255, 258]).all()
assert (r == array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype)).all()
# tests from numpy/core/tests/test_regression.py
def test_sort_bigendian(self):
from numpy import array, dtype
a = array(range(11), dtype='float64')
c = a.astype(dtype('<f8'))
c.sort()
assert max(abs(a-c)) < 1e-32
def test_string_argsort_with_zeros(self):
import numpy as np
import sys
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
if '__pypy__' in sys.builtin_module_names:
exc = raises(NotImplementedError, "x.argsort(kind='m')")
assert 'non-numeric types' in exc.value.message
exc = raises(NotImplementedError, "x.argsort(kind='q')")
assert 'non-numeric types' in exc.value.message
else:
assert (x.argsort(kind='m') == np.array([1, 0])).all()
assert (x.argsort(kind='q') == np.array([1, 0])).all()
def test_string_sort_with_zeros(self):
import numpy as np
import sys
x = np.fromstring("\x00\x02\x00\x01", dtype="S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="S2")
if '__pypy__' in sys.builtin_module_names:
exc = raises(NotImplementedError, "x.sort(kind='q')")
assert 'non-numeric types' in exc.value.message
else:
x.sort(kind='q')
assert (x == y).all()
def test_string_mergesort(self):
import numpy as np
import sys
x = np.array(['a'] * 32)
if '__pypy__' in sys.builtin_module_names:
exc = raises(NotImplementedError, "x.argsort(kind='m')")
assert 'non-numeric types' in exc.value.message
else:
assert (x.argsort(kind='m') == np.arange(32)).all()
def test_searchsort(self):
import numpy as np
a = np.array(2)
raises(ValueError, a.searchsorted, 3)
a = np.arange(1, 6)
ret = a.searchsorted(3)
assert ret == 2
assert isinstance(ret, np.generic)
ret = a.searchsorted(np.array(3))
assert ret == 2
assert isinstance(ret, np.generic)
ret = a.searchsorted(np.array([]))
assert isinstance(ret, np.ndarray)
assert ret.shape == (0,)
ret = a.searchsorted(np.array([3]))
assert ret == 2
assert isinstance(ret, np.ndarray)
ret = a.searchsorted(np.array([[2, 3]]))
assert (ret == [1, 2]).all()
assert ret.shape == (1, 2)
ret = a.searchsorted(3, side='right')
assert ret == 3
assert isinstance(ret, np.generic)
assert a.searchsorted(3.1) == 3
assert a.searchsorted(3.9) == 3
exc = raises(ValueError, a.searchsorted, 3, side=None)
assert str(exc.value) == "expected nonempty string for keyword 'side'"
exc = raises(ValueError, a.searchsorted, 3, side='')
assert str(exc.value) == "expected nonempty string for keyword 'side'"
exc = raises(ValueError, a.searchsorted, 3, side=2)
assert str(exc.value) == "expected nonempty string for keyword 'side'"
ret = a.searchsorted([-10, 10, 2, 3])
assert (ret == [0, 5, 1, 2]).all()
import sys
if '__pypy__' in sys.builtin_module_names:
raises(NotImplementedError, "a.searchsorted(3, sorter=range(6))")
| [
"math.isnan",
"numpy.empty",
"numpy.dtype",
"numpy.zeros",
"_random.Random",
"numpy.array",
"numpy.arange",
"numpy.fromstring"
] | [((904, 927), 'numpy.array', 'array', (['[[4, 2], [1, 3]]'], {}), '([[4, 2], [1, 3]])\n', (909, 927), False, 'from numpy import array, dtype\n'), ((1607, 1616), '_random.Random', 'Random', (['(1)'], {}), '(1)\n', (1613, 1616), False, 'from _random import Random\n'), ((1786, 1795), 'numpy.array', 'array', (['[]'], {}), '([])\n', (1791, 1795), False, 'from numpy import array, dtype\n'), ((1897, 1920), 'numpy.array', 'array', (['[[4, 2], [1, 3]]'], {}), '([[4, 2], [1, 3]])\n', (1902, 1920), False, 'from numpy import array, dtype\n'), ((2177, 2206), 'numpy.array', 'array', (['[[3, 2, 1], [1, 2, 3]]'], {}), '([[3, 2, 1], [1, 2, 3]])\n', (2182, 2206), False, 'from numpy import array, dtype\n'), ((3586, 3611), 'numpy.array', 'array', (['[[2, 10], [1, 11]]'], {}), '([[2, 10], [1, 11]])\n', (3591, 3611), False, 'from numpy import array, dtype\n'), ((4240, 4258), 'numpy.array', 'array', (['[nan, 1, 0]'], {}), '([nan, 1, 0])\n', (4245, 4258), False, 'from numpy import array, dtype\n'), ((4513, 4541), 'numpy.zeros', 'zeros', (['(9)'], {'dtype': '"""complex128"""'}), "(9, dtype='complex128')\n", (4518, 4541), False, 'from numpy import array, zeros\n'), ((5210, 5221), 'numpy.arange', 'arange', (['(101)'], {}), '(101)\n', (5216, 5221), False, 'from numpy import arange\n'), ((6469, 6492), 'numpy.array', 'array', (['[[3, 2], [1, 0]]'], {}), '([[3, 2], [1, 0]])\n', (6474, 6492), False, 'from numpy import array, dtype\n'), ((6505, 6528), 'numpy.array', 'array', (['[[1, 0], [3, 2]]'], {}), '([[1, 0], [3, 2]])\n', (6510, 6528), False, 'from numpy import array, dtype\n'), ((6541, 6564), 'numpy.array', 'array', (['[[2, 3], [0, 1]]'], {}), '([[2, 3], [0, 1]])\n', (6546, 6564), False, 'from numpy import array, dtype\n'), ((7529, 7562), 'numpy.dtype', 'dtype', (["[('f', float), ('i', int)]"], {}), "([('f', float), ('i', int)])\n", (7534, 7562), False, 'from numpy import array, dtype\n'), ((10110, 10148), 'numpy.arange', 'arange', (['(0)', '(101)'], {'dtype': '"""timedelta64[D]"""'}), "(0, 101, dtype='timedelta64[D]')\n", (10116, 10148), False, 'from numpy import arange\n'), ((10729, 10748), 'numpy.array', 'array', (['[21, 32, 14]'], {}), '([21, 32, 14])\n', (10734, 10748), False, 'from numpy import array, dtype\n'), ((10762, 10792), 'numpy.array', 'array', (["['my', 'first', 'name']"], {}), "(['my', 'first', 'name'])\n", (10767, 10792), False, 'from numpy import array, dtype\n'), ((10804, 10826), 'numpy.array', 'array', (['[3.1, 4.5, 6.2]'], {}), '([3.1, 4.5, 6.2])\n', (10809, 10826), False, 'from numpy import array, dtype\n'), ((10837, 10899), 'numpy.zeros', 'zeros', (['(3)'], {'dtype': "[('id', 'i'), ('word', 'S5'), ('number', 'f')]"}), "(3, dtype=[('id', 'i'), ('word', 'S5'), ('number', 'f')])\n", (10842, 10899), False, 'from numpy import array, zeros\n'), ((11738, 11804), 'numpy.array', 'array', (["[('a', 1), ('b', 255), ('c', 3), ('d', 258)]"], {'dtype': 'mydtype'}), "([('a', 1), ('b', 255), ('c', 3), ('d', 258)], dtype=mydtype)\n", (11743, 11804), False, 'from numpy import array, dtype\n'), ((12401, 12447), 'numpy.fromstring', 'np.fromstring', (["'\\x00\\x02\\x00\\x01'"], {'dtype': '"""|S2"""'}), "('\\x00\\x02\\x00\\x01', dtype='|S2')\n", (12414, 12447), True, 'import numpy as np\n'), ((13007, 13052), 'numpy.fromstring', 'np.fromstring', (["'\\x00\\x02\\x00\\x01'"], {'dtype': '"""S2"""'}), "('\\x00\\x02\\x00\\x01', dtype='S2')\n", (13020, 13052), True, 'import numpy as np\n'), ((13065, 13110), 'numpy.fromstring', 'np.fromstring', (["'\\x00\\x01\\x00\\x02'"], {'dtype': '"""S2"""'}), "('\\x00\\x01\\x00\\x02', dtype='S2')\n", (13078, 13110), True, 'import numpy as np\n'), ((13461, 13481), 'numpy.array', 'np.array', (["(['a'] * 32)"], {}), "(['a'] * 32)\n", (13469, 13481), True, 'import numpy as np\n'), ((13812, 13823), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (13820, 13823), True, 'import numpy as np\n'), ((13883, 13898), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (13892, 13898), True, 'import numpy as np\n'), ((389, 448), 'numpy.array', 'array', (['[6, 4, -1, 3, 8, 3, 256 + 20, 100, 101]'], {'dtype': 'dtype'}), '([6, 4, -1, 3, 8, 3, 256 + 20, 100, 101], dtype=dtype)\n', (394, 448), False, 'from numpy import array, dtype\n'), ((757, 781), 'numpy.arange', 'arange', (['(100)'], {'dtype': 'dtype'}), '(100, dtype=dtype)\n', (763, 781), False, 'from numpy import arange\n'), ((2536, 2595), 'numpy.array', 'array', (['[6, 4, -1, 3, 8, 3, 256 + 20, 100, 101]'], {'dtype': 'dtype'}), '([6, 4, -1, 3, 8, 3, 256 + 20, 100, 101], dtype=dtype)\n', (2541, 2595), False, 'from numpy import array, dtype\n'), ((2749, 2773), 'numpy.arange', 'arange', (['(100)'], {'dtype': 'dtype'}), '(100, dtype=dtype)\n', (2755, 2773), False, 'from numpy import arange\n'), ((3057, 3116), 'numpy.array', 'array', (['[6, 4, -1, 3, 8, 3, 256 + 20, 100, 101]'], {'dtype': 'dtype'}), '([6, 4, -1, 3, 8, 3, 256 + 20, 100, 101], dtype=dtype)\n', (3062, 3116), False, 'from numpy import array, dtype\n'), ((3131, 3190), 'numpy.array', 'array', (['[-1, 3, 3, 4, 6, 8, 100, 101, 256 + 20]'], {'dtype': 'dtype'}), '([-1, 3, 3, 4, 6, 8, 100, 101, 256 + 20], dtype=dtype)\n', (3136, 3190), False, 'from numpy import array, dtype\n'), ((9105, 9132), 'numpy.empty', 'empty', (['(101,)'], {'dtype': 'object'}), '((101,), dtype=object)\n', (9110, 9132), False, 'from numpy import empty\n'), ((9657, 9694), 'numpy.arange', 'arange', (['(0)', '(101)'], {'dtype': '"""datetime64[D]"""'}), "(0, 101, dtype='datetime64[D]')\n", (9663, 9694), False, 'from numpy import arange\n'), ((12228, 12240), 'numpy.dtype', 'dtype', (['"""<f8"""'], {}), "('<f8')\n", (12233, 12240), False, 'from numpy import array, dtype\n'), ((14029, 14040), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (14037, 14040), True, 'import numpy as np\n'), ((14139, 14151), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14147, 14151), True, 'import numpy as np\n'), ((14259, 14272), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (14267, 14272), True, 'import numpy as np\n'), ((14371, 14389), 'numpy.array', 'np.array', (['[[2, 3]]'], {}), '([[2, 3]])\n', (14379, 14389), True, 'import numpy as np\n'), ((1410, 1423), 'numpy.array', 'array', (['[[[]]]'], {}), '([[[]]])\n', (1415, 1423), False, 'from numpy import array, dtype\n'), ((4313, 4322), 'math.isnan', 'isnan', (['bb'], {}), '(bb)\n', (4318, 4322), False, 'from math import isnan\n'), ((4340, 4349), 'math.isnan', 'isnan', (['aa'], {}), '(aa)\n', (4345, 4349), False, 'from math import isnan\n'), ((4700, 4709), 'math.isnan', 'isnan', (['bb'], {}), '(bb)\n', (4705, 4709), False, 'from math import isnan\n'), ((4727, 4736), 'math.isnan', 'isnan', (['aa'], {}), '(aa)\n', (4732, 4736), False, 'from math import isnan\n'), ((198, 208), 'numpy.array', 'array', (['(2.0)'], {}), '(2.0)\n', (203, 208), False, 'from numpy import array, dtype\n'), ((11930, 11996), 'numpy.array', 'array', (["[('a', 1), ('c', 3), ('b', 255), ('d', 258)]"], {'dtype': 'mydtype'}), "([('a', 1), ('c', 3), ('b', 255), ('d', 258)], dtype=mydtype)\n", (11935, 11996), False, 'from numpy import array, dtype\n'), ((12814, 12830), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (12822, 12830), True, 'import numpy as np\n'), ((12881, 12897), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (12889, 12897), True, 'import numpy as np\n'), ((13719, 13732), 'numpy.arange', 'np.arange', (['(32)'], {}), '(32)\n', (13728, 13732), True, 'import numpy as np\n')] |
######################################################################
# #
# Copyright 2009 <NAME>. #
# This file is part of gdspy, distributed under the terms of the #
# Boost Software License - Version 1.0. See the accompanying #
# LICENSE file or <http://www.boost.org/LICENSE_1_0.txt> #
# #
######################################################################
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import sys
if sys.version_info.major < 3:
from builtins import zip
from builtins import open
from builtins import int
from builtins import round
from builtins import range
from builtins import super
from future import standard_library
standard_library.install_aliases()
else:
# Python 3 doesn't have basestring, as unicode is type string
# Python 2 doesn't equate unicode to string, but both are basestring
# Now isinstance(s, basestring) will be True for any python version
basestring = str
import struct
import itertools
import warnings
import numpy
from gdspy import clipper
from gdspy.hobby import _hobby
_directions_dict = {"+x": 0, "+y": 0.5, "-x": 1, "-y": -0.5}
_directions_list = ["+x", "+y", "-x", "-y"]
_halfpi = 0.5 * numpy.pi
_mpone = numpy.array((-1.0, 1.0))
class PolygonSet(object):
"""
Set of polygonal objects.
Parameters
----------
polygons : iterable of array-like[N][2]
List containing the coordinates of the vertices of each polygon.
layer : integer
The GDSII layer number for this element.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Attributes
----------
polygons : list of numpy array[N][2]
Coordinates of the vertices of each polygon.
layers : list of integer
The GDSII layer number for each element.
datatypes : list of integer
The GDSII datatype for each element (between 0 and 255).
properties : {integer: string} dictionary
Properties for these elements.
Notes
-----
The last point should not be equal to the first (polygons are
automatically closed).
The original GDSII specification supports only a maximum of 199
vertices per polygon.
"""
__slots__ = "layers", "datatypes", "polygons", "properties"
def __init__(self, polygons, layer=0, datatype=0):
self.polygons = [numpy.array(p) for p in polygons]
self.layers = [layer] * len(self.polygons)
self.datatypes = [datatype] * len(self.polygons)
self.properties = {}
def __str__(self):
return (
"PolygonSet ({} polygons, {} vertices, layers {}, datatypes {})"
).format(
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
def get_bounding_box(self):
"""
Calculate the bounding box of the polygons.
Returns
-------
out : Numpy array[2, 2] or None
Bounding box of this polygon in the form [[x_min, y_min],
[x_max, y_max]], or None if the polygon is empty.
"""
if len(self.polygons) == 0:
return None
return numpy.array(
(
(
min(pts[:, 0].min() for pts in self.polygons),
min(pts[:, 1].min() for pts in self.polygons),
),
(
max(pts[:, 0].max() for pts in self.polygons),
max(pts[:, 1].max() for pts in self.polygons),
),
)
)
def rotate(self, angle, center=(0, 0)):
"""
Rotate this object.
Parameters
----------
angle : number
The angle of rotation (in *radians*).
center : array-like[2]
Center point for the rotation.
Returns
-------
out : `PolygonSet`
This object.
"""
ca = numpy.cos(angle)
sa = numpy.sin(angle) * _mpone
c0 = numpy.array(center)
new_polys = []
for points in self.polygons:
pts = points - c0
new_polys.append(pts * ca + pts[:, ::-1] * sa + c0)
self.polygons = new_polys
return self
def scale(self, scalex, scaley=None, center=(0, 0)):
"""
Scale this object.
Parameters
----------
scalex : number
Scaling factor along the first axis.
scaley : number or None
Scaling factor along the second axis. If None, same as
`scalex`.
center : array-like[2]
Center point for the scaling operation.
Returns
-------
out : `PolygonSet`
This object.
"""
c0 = numpy.array(center)
s = scalex if scaley is None else numpy.array((scalex, scaley))
self.polygons = [(points - c0) * s + c0 for points in self.polygons]
return self
def to_gds(self, outfile, multiplier):
"""
Convert this object to a series of GDSII elements.
Parameters
----------
outfile : open file
Output to write the GDSII.
multiplier : number
A number that multiplies all dimensions written in the GDSII
elements.
"""
for ii in range(len(self.polygons)):
if len(self.polygons[ii]) > 8190:
warnings.warn(
"[GDSPY] Polygons with more than 8190 are not supported by the "
"official GDSII specification. This GDSII file might not be "
"compatible with all readers.",
stacklevel=4,
)
outfile.write(
struct.pack(
">4Hh2Hh",
4,
0x0800,
6,
0x0D02,
self.layers[ii],
6,
0x0E02,
self.datatypes[ii],
)
)
xy = numpy.empty((self.polygons[ii].shape[0] + 1, 2), dtype=">i4")
xy[:-1] = numpy.round(self.polygons[ii] * multiplier)
xy[-1] = xy[0]
i0 = 0
while i0 < xy.shape[0]:
i1 = min(i0 + 8190, xy.shape[0])
outfile.write(struct.pack(">2H", 4 + 8 * (i1 - i0), 0x1003))
outfile.write(xy[i0:i1].tobytes())
i0 = i1
else:
outfile.write(
struct.pack(
">4Hh2Hh2H",
4,
0x0800,
6,
0x0D02,
self.layers[ii],
6,
0x0E02,
self.datatypes[ii],
12 + 8 * len(self.polygons[ii]),
0x1003,
)
)
xy = numpy.round(self.polygons[ii] * multiplier).astype(">i4")
outfile.write(xy.tobytes())
outfile.write(xy[0].tobytes())
if self.properties is not None and len(self.properties) > 0:
size = 0
for attr, value in self.properties.items():
if len(value) % 2 != 0:
value = value + "\0"
outfile.write(
struct.pack(">5H", 6, 0x2B02, attr, 4 + len(value), 0x2C06)
)
outfile.write(value.encode("ascii"))
size += len(value) + 2
if size > 128:
warnings.warn(
"[GDSPY] Properties with size larger than 128 bytes are not "
"officially supported by the GDSII specification. This file "
"might not be compatible with all readers.",
stacklevel=4,
)
outfile.write(struct.pack(">2H", 4, 0x1100))
def to_svg(self, outfile, scaling, precision):
"""
Write an SVG fragment representation of this object.
Parameters
----------
outfile : open file
Output to write the SVG representation.
scaling : number
Scaling factor for the geometry.
precision : positive integer or `None`
Maximal number of digits for coordinates after scaling.
"""
for p, l, d in zip(self.polygons, self.layers, self.datatypes):
outfile.write('<polygon class="l{}d{}" points="'.format(l, d))
outfile.write(
" ".join(
",".join(
(
numpy.format_float_positional(
pt[0], trim="0", precision=precision
),
numpy.format_float_positional(
pt[1], trim="0", precision=precision
),
)
)
for pt in scaling * p
)
)
outfile.write('"/>\n')
def area(self, by_spec=False):
"""
Calculate the total area of this polygon set.
Parameters
----------
by_spec : bool
If True, the return value is a dictionary with
``{(layer, datatype): area}``.
Returns
-------
out : number, dictionary
Area of this object.
"""
if by_spec:
path_area = {}
for poly, key in zip(self.polygons, zip(self.layers, self.datatypes)):
poly_area = 0
for ii in range(1, len(poly) - 1):
poly_area += (poly[0][0] - poly[ii + 1][0]) * (
poly[ii][1] - poly[0][1]
) - (poly[0][1] - poly[ii + 1][1]) * (poly[ii][0] - poly[0][0])
if key in path_area:
path_area[key] += 0.5 * abs(poly_area)
else:
path_area[key] = 0.5 * abs(poly_area)
else:
path_area = 0
for points in self.polygons:
poly_area = 0
for ii in range(1, len(points) - 1):
poly_area += (points[0][0] - points[ii + 1][0]) * (
points[ii][1] - points[0][1]
) - (points[0][1] - points[ii + 1][1]) * (
points[ii][0] - points[0][0]
)
path_area += 0.5 * abs(poly_area)
return path_area
def fracture(self, max_points=199, precision=1e-3):
"""
Slice these polygons in the horizontal and vertical directions
so that each resulting piece has at most `max_points`. This
operation occurs in place.
Parameters
----------
max_points : integer
Maximal number of points in each resulting polygon (at least
5 for the fracture to occur).
precision : float
Desired precision for rounding vertice coordinates.
Returns
-------
out : `PolygonSet`
This object.
"""
if max_points > 4:
ii = 0
while ii < len(self.polygons):
if len(self.polygons[ii]) > max_points:
pts0 = sorted(self.polygons[ii][:, 0])
pts1 = sorted(self.polygons[ii][:, 1])
ncuts = len(pts0) // max_points
if pts0[-1] - pts0[0] > pts1[-1] - pts1[0]:
# Vertical cuts
cuts = [
pts0[int(i * len(pts0) / (ncuts + 1.0) + 0.5)]
for i in range(1, ncuts + 1)
]
chopped = clipper._chop(
self.polygons[ii], cuts, 0, 1 / precision
)
else:
# Horizontal cuts
cuts = [
pts1[int(i * len(pts1) / (ncuts + 1.0) + 0.5)]
for i in range(1, ncuts + 1)
]
chopped = clipper._chop(
self.polygons[ii], cuts, 1, 1 / precision
)
self.polygons.pop(ii)
layer = self.layers.pop(ii)
datatype = self.datatypes.pop(ii)
self.polygons.extend(
numpy.array(x) for x in itertools.chain.from_iterable(chopped)
)
npols = sum(len(c) for c in chopped)
self.layers.extend(layer for _ in range(npols))
self.datatypes.extend(datatype for _ in range(npols))
else:
ii += 1
return self
def fillet(self, radius, points_per_2pi=128, max_points=199, precision=1e-3):
"""
Round the corners of these polygons and fractures them into
polygons with less vertices if necessary.
Parameters
----------
radius : number, array-like
Radius of the corners. If number: all corners filleted by
that amount. If array: specify fillet radii on a
per-polygon basis (length must be equal to the number of
polygons in this `PolygonSet`). Each element in the array
can be a number (all corners filleted by the same amount) or
another array of numbers, one per polygon vertex.
Alternatively, the array can be flattened to have one radius
per `PolygonSet` vertex.
points_per_2pi : integer
Number of vertices used to approximate a full circle. The
number of vertices in each corner of the polygon will be the
fraction of this number corresponding to the angle
encompassed by that corner with respect to 2 pi.
max_points : integer
Maximal number of points in each resulting polygon (at least
5, otherwise the resulting polygon is not fractured).
precision : float
Desired precision for rounding vertice coordinates in case
of fracturing.
Returns
-------
out : `PolygonSet`
This object.
"""
two_pi = 2 * numpy.pi
fracture = False
if numpy.isscalar(radius):
radii = [[radius] * p.shape[0] for p in self.polygons]
else:
if len(radius) == len(self.polygons):
radii = []
for r, p in zip(radius, self.polygons):
if numpy.isscalar(r):
radii.append([r] * p.shape[0])
else:
if len(r) != p.shape[0]:
raise ValueError(
"[GDSPY] Wrong length in fillet radius list. "
"Found {} radii for polygon with {} vertices.".format(
len(r), len(p.shape[0])
)
)
radii.append(r)
else:
total = sum(p.shape[0] for p in self.polygons)
if len(radius) != total:
raise ValueError(
"[GDSPY] Wrong length in fillet radius list. "
"Expected lengths are {} or {}; got {}.".format(
len(self.polygons), total, len(radius)
)
)
radii = []
n = 0
for p in self.polygons:
radii.append(radius[n : n + p.shape[0]])
n += p.shape[0]
for jj in range(len(self.polygons)):
vec = self.polygons[jj].astype(float) - numpy.roll(self.polygons[jj], 1, 0)
length = (vec[:, 0] ** 2 + vec[:, 1] ** 2) ** 0.5
ii = numpy.flatnonzero(length)
if len(ii) < len(length):
self.polygons[jj] = numpy.array(self.polygons[jj][ii])
radii[jj] = [radii[jj][i] for i in ii]
vec = self.polygons[jj].astype(float) - numpy.roll(
self.polygons[jj], 1, 0
)
length = (vec[:, 0] ** 2 + vec[:, 1] ** 2) ** 0.5
vec[:, 0] = vec[:, 0] / length
vec[:, 1] = vec[:, 1] / length
dvec = numpy.roll(vec, -1, 0) - vec
norm = (dvec[:, 0] ** 2 + dvec[:, 1] ** 2) ** 0.5
ii = numpy.flatnonzero(norm)
dvec[ii, 0] = dvec[ii, 0] / norm[ii]
dvec[ii, 1] = dvec[ii, 1] / norm[ii]
dot = numpy.roll(vec, -1, 0) * vec
theta = numpy.arccos(dot[:, 0] + dot[:, 1])
ct = numpy.cos(theta * 0.5)
tt = numpy.tan(theta * 0.5)
new_points = []
for ii in range(-1, len(self.polygons[jj]) - 1):
if theta[ii] > 1e-6:
a0 = -vec[ii] * tt[ii] - dvec[ii] / ct[ii]
a0 = numpy.arctan2(a0[1], a0[0])
a1 = vec[ii + 1] * tt[ii] - dvec[ii] / ct[ii]
a1 = numpy.arctan2(a1[1], a1[0])
if a1 - a0 > numpy.pi:
a1 -= two_pi
elif a1 - a0 < -numpy.pi:
a1 += two_pi
n = max(
int(numpy.ceil(abs(a1 - a0) / two_pi * points_per_2pi) + 0.5), 2
)
a = numpy.linspace(a0, a1, n)
ll = radii[jj][ii] * tt[ii]
if ll > 0.49 * length[ii]:
r = 0.49 * length[ii] / tt[ii]
ll = 0.49 * length[ii]
else:
r = radii[jj][ii]
if ll > 0.49 * length[ii + 1]:
r = 0.49 * length[ii + 1] / tt[ii]
new_points.extend(
r * dvec[ii] / ct[ii]
+ self.polygons[jj][ii]
+ numpy.vstack((r * numpy.cos(a), r * numpy.sin(a))).transpose()
)
else:
new_points.append(self.polygons[jj][ii])
self.polygons[jj] = numpy.array(new_points)
if len(new_points) > max_points:
fracture = True
if fracture:
self.fracture(max_points, precision)
return self
def translate(self, dx, dy):
"""
Translate this polygon.
Parameters
----------
dx : number
Distance to move in the x-direction.
dy : number
Distance to move in the y-direction.
Returns
-------
out : `PolygonSet`
This object.
"""
vec = numpy.array((dx, dy))
self.polygons = [points + vec for points in self.polygons]
return self
def mirror(self, p1, p2=(0, 0)):
"""
Mirror the polygons over a line through points 1 and 2
Parameters
----------
p1 : array-like[2]
first point defining the reflection line
p2 : array-like[2]
second point defining the reflection line
Returns
-------
out : `PolygonSet`
This object.
"""
origin = numpy.array(p1)
vec = numpy.array(p2) - origin
vec_r = vec * (2 / numpy.inner(vec, vec))
self.polygons = [
numpy.outer(numpy.inner(points - origin, vec_r), vec) - points + 2 * origin
for points in self.polygons
]
return self
class Polygon(PolygonSet):
"""
Polygonal geometric object.
Parameters
----------
points : array-like[N][2]
Coordinates of the vertices of the polygon.
layer : integer
The GDSII layer number for this element.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Notes
-----
The last point should not be equal to the first (polygons are
automatically closed).
The original GDSII specification supports only a maximum of 199
vertices per polygon.
Examples
--------
>>> triangle_pts = [(0, 40), (15, 40), (10, 50)]
>>> triangle = gdspy.Polygon(triangle_pts)
>>> myCell.add(triangle)
"""
__slots__ = "layers", "datatypes", "polygons", "properties"
def __init__(self, points, layer=0, datatype=0):
self.layers = [layer]
self.datatypes = [datatype]
self.polygons = [numpy.array(points)]
self.properties = {}
def __str__(self):
return "Polygon ({} vertices, layer {}, datatype {})".format(
len(self.polygons[0]), self.layers[0], self.datatypes[0]
)
class Rectangle(PolygonSet):
"""
Rectangular geometric object.
Parameters
----------
point1 : array-like[2]
Coordinates of a corner of the rectangle.
point2 : array-like[2]
Coordinates of the corner of the rectangle opposite to `point1`.
layer : integer
The GDSII layer number for this element.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Examples
--------
>>> rectangle = gdspy.Rectangle((0, 0), (10, 20))
>>> myCell.add(rectangle)
"""
__slots__ = "layers", "datatypes", "polygons", "properties"
def __init__(self, point1, point2, layer=0, datatype=0):
self.layers = [layer]
self.datatypes = [datatype]
self.polygons = [
numpy.array(
[
[point1[0], point1[1]],
[point1[0], point2[1]],
[point2[0], point2[1]],
[point2[0], point1[1]],
]
)
]
self.properties = {}
def __str__(self):
return (
"Rectangle (({0[0]}, {0[1]}) to ({1[0]}, {1[1]}), layer {2}, datatype {3})"
).format(
self.polygons[0][0], self.polygons[0][2], self.layers[0], self.datatypes[0]
)
def __repr__(self):
return "Rectangle(({0[0]}, {0[1]}), ({1[0]}, {1[1]}), {2}, {3})".format(
self.polygons[0][0], self.polygons[0][2], self.layers[0], self.datatypes[0]
)
class Round(PolygonSet):
"""
Circular geometric object.
Represent a circle, ellipse, ring or their sections.
Parameters
----------
center : array-like[2]
Coordinates of the center of the circle/ring.
radius : number, array-like[2]
Radius of the circle/outer radius of the ring. To build an
ellipse an array of 2 numbers can be used, representing the
radii in the horizontal and vertical directions.
inner_radius : number, array-like[2]
Inner radius of the ring. To build an elliptical hole, an array
of 2 numbers can be used, representing the radii in the
horizontal and vertical directions.
initial_angle : number
Initial angle of the circular/ring section (in *radians*).
final_angle : number
Final angle of the circular/ring section (in *radians*).
tolerance : float
Approximate curvature resolution. The number of points is
automatically calculated.
number_of_points : integer or None
Manually define the number of vertices that form the object
(polygonal approximation). Overrides `tolerance`.
max_points : integer
If the number of points in the element is greater than
`max_points`, it will be fractured in smaller polygons with
at most `max_points` each. If `max_points` is zero no fracture
will occur.
layer : integer
The GDSII layer number for this element.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Notes
-----
The original GDSII specification supports only a maximum of 199
vertices per polygon.
Examples
--------
>>> circle = gdspy.Round((30, 5), 8)
>>> ell_ring = gdspy.Round((50, 5), (8, 7), inner_radius=(5, 4))
>>> pie_slice = gdspy.Round((30, 25), 8, initial_angle=0,
... final_angle=-5.0*numpy.pi/6.0)
>>> arc = gdspy.Round((50, 25), 8, inner_radius=5,
... initial_angle=-5.0*numpy.pi/6.0,
... final_angle=0)
"""
__slots__ = "layers", "datatypes", "polygons", "properties"
def __init__(
self,
center,
radius,
inner_radius=0,
initial_angle=0,
final_angle=0,
tolerance=0.01,
number_of_points=None,
max_points=199,
layer=0,
datatype=0,
):
if hasattr(radius, "__iter__"):
orx, ory = radius
radius = max(radius)
def outer_transform(a):
r = a - ((a + numpy.pi) % (2 * numpy.pi) - numpy.pi)
t = numpy.arctan2(orx * numpy.sin(a), ory * numpy.cos(a)) + r
t[a == numpy.pi] = numpy.pi
return t
else:
orx = ory = radius
def outer_transform(a):
return a
if hasattr(inner_radius, "__iter__"):
irx, iry = inner_radius
inner_radius = max(inner_radius)
def inner_transform(a):
r = a - ((a + numpy.pi) % (2 * numpy.pi) - numpy.pi)
t = numpy.arctan2(irx * numpy.sin(a), iry * numpy.cos(a)) + r
t[a == numpy.pi] = numpy.pi
return t
else:
irx = iry = inner_radius
def inner_transform(a):
return a
if isinstance(number_of_points, float):
warnings.warn(
"[GDSPY] Use of a floating number as number_of_points "
"is deprecated in favor of tolerance.",
category=DeprecationWarning,
stacklevel=2,
)
tolerance = number_of_points
number_of_points = None
if number_of_points is None:
full_angle = (
2 * numpy.pi
if final_angle == initial_angle
else abs(final_angle - initial_angle)
)
number_of_points = max(
3,
1 + int(0.5 * full_angle / numpy.arccos(1 - tolerance / radius) + 0.5),
)
if inner_radius > 0:
number_of_points *= 2
pieces = (
1
if max_points == 0
else int(numpy.ceil(number_of_points / float(max_points)))
)
number_of_points = number_of_points // pieces
self.layers = [layer] * pieces
self.datatypes = [datatype] * pieces
self.polygons = [numpy.zeros((number_of_points, 2)) for _ in range(pieces)]
self.properties = {}
if final_angle == initial_angle and pieces > 1:
final_angle += 2 * numpy.pi
angles = numpy.linspace(initial_angle, final_angle, pieces + 1)
oang = outer_transform(angles)
iang = inner_transform(angles)
for ii in range(pieces):
if oang[ii + 1] == oang[ii]:
if inner_radius <= 0:
t = (
numpy.arange(number_of_points)
* 2.0
* numpy.pi
/ number_of_points
)
self.polygons[ii][:, 0] = numpy.cos(t) * orx + center[0]
self.polygons[ii][:, 1] = numpy.sin(t) * ory + center[1]
else:
n2 = number_of_points // 2
n1 = number_of_points - n2
t = numpy.arange(n1) * 2.0 * numpy.pi / (n1 - 1)
self.polygons[ii][:n1, 0] = numpy.cos(t) * orx + center[0]
self.polygons[ii][:n1, 1] = numpy.sin(t) * ory + center[1]
t = numpy.arange(n2) * -2.0 * numpy.pi / (n2 - 1)
self.polygons[ii][n1:, 0] = numpy.cos(t) * irx + center[0]
self.polygons[ii][n1:, 1] = numpy.sin(t) * iry + center[1]
else:
if inner_radius <= 0:
t = numpy.linspace(oang[ii], oang[ii + 1], number_of_points - 1)
self.polygons[ii][1:, 0] = numpy.cos(t) * orx + center[0]
self.polygons[ii][1:, 1] = numpy.sin(t) * ory + center[1]
self.polygons[ii][0] += center
else:
n2 = number_of_points // 2
n1 = number_of_points - n2
t = numpy.linspace(oang[ii], oang[ii + 1], n1)
self.polygons[ii][:n1, 0] = numpy.cos(t) * orx + center[0]
self.polygons[ii][:n1, 1] = numpy.sin(t) * ory + center[1]
t = numpy.linspace(iang[ii + 1], iang[ii], n2)
self.polygons[ii][n1:, 0] = numpy.cos(t) * irx + center[0]
self.polygons[ii][n1:, 1] = numpy.sin(t) * iry + center[1]
def __str__(self):
return ("Round ({} polygons, {} vertices, layers {}, datatypes {})").format(
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
class Text(PolygonSet):
"""
Polygonal text object.
Each letter is formed by a series of polygons.
Parameters
----------
text : string
The text to be converted in geometric objects.
size : number
Height of the character. The width of a character and the
distance between characters are this value multiplied by 5 / 9
and 8 / 9, respectively. For vertical text, the distance is
multiplied by 11 / 9.
position : array-like[2]
Text position (lower left corner).
horizontal : bool
If True, the text is written from left to right; if
False, from top to bottom.
angle : number
The angle of rotation of the text.
layer : integer
The GDSII layer number for these elements.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Examples
--------
>>> text = gdspy.Text('Sample text', 20, (-10, -100))
>>> myCell.add(text)
"""
# fmt: off
_font = {
'!': [[(2, 2), (3, 2), (3, 3), (2, 3)], [(2, 4), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (2, 9), (2, 8), (2, 7), (2, 6), (2, 5)]],
'"': [[(1, 7), (2, 7), (2, 8), (2, 9), (1, 9), (1, 8)], [(3, 7), (4, 7), (4, 8), (4, 9), (3, 9), (3, 8)]],
'#': [[(0, 3), (1, 3), (1, 2), (2, 2), (2, 3), (2, 4), (2, 5), (3, 5), (3, 4), (2, 4), (2, 3), (3, 3), (3, 2), (4, 2), (4, 3), (5, 3), (5, 4), (4, 4), (4, 5), (5, 5), (5, 6), (4, 6), (4, 7), (3, 7), (3, 6), (2, 6), (2, 7), (1, 7), (1, 6), (0, 6), (0, 5), (1, 5), (1, 4), (0, 4)]],
'$': [[(0, 2), (1, 2), (2, 2), (2, 1), (3, 1), (3, 2), (4, 2), (4, 3), (3, 3), (3, 4), (4, 4), (4, 5), (3, 5), (3, 6), (3, 7), (4, 7), (5, 7), (5, 8), (4, 8), (3, 8), (3, 9), (2, 9), (2, 8), (1, 8), (1, 7), (2, 7), (2, 6), (1, 6), (1, 5), (2, 5), (2, 4), (2, 3), (1, 3), (0, 3)], [(0, 6), (1, 6), (1, 7), (0, 7)], [(4, 3), (5, 3), (5, 4), (4, 4)]],
'%': [[(0, 2), (1, 2), (1, 3), (1, 4), (0, 4), (0, 3)], [(0, 7), (1, 7), (2, 7), (2, 8), (2, 9), (1, 9), (0, 9), (0, 8)], [(1, 4), (2, 4), (2, 5), (1, 5)], [(2, 5), (3, 5), (3, 6), (2, 6)], [(3, 2), (4, 2), (5, 2), (5, 3), (5, 4), (4, 4), (3, 4), (3, 3)], [(3, 6), (4, 6), (4, 7), (3, 7)], [(4, 7), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8)]],
'&': [[(0, 3), (1, 3), (1, 4), (1, 5), (0, 5), (0, 4)], [(0, 6), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7)], [(1, 2), (2, 2), (3, 2), (3, 3), (2, 3), (1, 3)], [(1, 5), (2, 5), (3, 5), (3, 6), (3, 7), (3, 8), (2, 8), (2, 7), (2, 6), (1, 6)], [(1, 8), (2, 8), (2, 9), (1, 9)], [(3, 3), (4, 3), (4, 4), (4, 5), (3, 5), (3, 4)], [(4, 2), (5, 2), (5, 3), (4, 3)], [(4, 5), (5, 5), (5, 6), (4, 6)]], "'": [[(2, 7), (3, 7), (3, 8), (3, 9), (2, 9), (2, 8)]],
'(': [[(1, 4), (2, 4), (2, 5), (2, 6), (2, 7), (1, 7), (1, 6), (1, 5)], [(2, 3), (3, 3), (3, 4), (2, 4)], [(2, 7), (3, 7), (3, 8), (2, 8)], [(3, 2), (4, 2), (4, 3), (3, 3)], [(3, 8), (4, 8), (4, 9), (3, 9)]],
')': [[(3, 4), (4, 4), (4, 5), (4, 6), (4, 7), (3, 7), (3, 6), (3, 5)], [(1, 2), (2, 2), (2, 3), (1, 3)], [(1, 8), (2, 8), (2, 9), (1, 9)], [(2, 3), (3, 3), (3, 4), (2, 4)], [(2, 7), (3, 7), (3, 8), (2, 8)]],
'*': [[(0, 2), (1, 2), (1, 3), (0, 3)], [(0, 4), (1, 4), (1, 3), (2, 3), (2, 2), (3, 2), (3, 3), (4, 3), (4, 4), (5, 4), (5, 5), (4, 5), (4, 6), (3, 6), (3, 7), (2, 7), (2, 6), (1, 6), (1, 5), (0, 5)], [(0, 6), (1, 6), (1, 7), (0, 7)], [(4, 2), (5, 2), (5, 3), (4, 3)], [(4, 6), (5, 6), (5, 7), (4, 7)]],
'+': [[(0, 4), (1, 4), (2, 4), (2, 3), (2, 2), (3, 2), (3, 3), (3, 4), (4, 4), (5, 4), (5, 5), (4, 5), (3, 5), (3, 6), (3, 7), (2, 7), (2, 6), (2, 5), (1, 5), (0, 5)]],
',': [[(1, 0), (2, 0), (2, 1), (1, 1)], [(2, 1), (3, 1), (3, 2), (3, 3), (2, 3), (2, 2)]],
'-': [[(0, 4), (1, 4), (2, 4), (3, 4), (4, 4), (5, 4), (5, 5), (4, 5), (3, 5), (2, 5), (1, 5), (0, 5)]],
'.': [[(2, 2), (3, 2), (3, 3), (2, 3)]],
'/': [[(0, 2), (1, 2), (1, 3), (1, 4), (0, 4), (0, 3)], [(1, 4), (2, 4), (2, 5), (1, 5)], [(2, 5), (3, 5), (3, 6), (2, 6)], [(3, 6), (4, 6), (4, 7), (3, 7)], [(4, 7), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8)]],
'0': [[(0, 3), (1, 3), (1, 4), (2, 4), (2, 5), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(2, 5), (3, 5), (3, 6), (2, 6)], [(3, 6), (4, 6), (4, 5), (4, 4), (4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7), (3, 7)]],
'1': [[(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (2, 9), (2, 8), (1, 8), (1, 7), (2, 7), (2, 6), (2, 5), (2, 4), (2, 3), (1, 3)]],
'2': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (0, 4), (0, 3)], [(0, 7), (1, 7), (1, 8), (0, 8)], [(1, 4), (2, 4), (3, 4), (3, 5), (2, 5), (1, 5)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(3, 5), (4, 5), (4, 6), (3, 6)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'3': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3)], [(0, 8), (1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9)], [(1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6)], [(4, 3), (5, 3), (5, 4), (5, 5), (4, 5), (4, 4)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'4': [[(0, 4), (1, 4), (2, 4), (3, 4), (3, 3), (3, 2), (4, 2), (4, 3), (4, 4), (5, 4), (5, 5), (4, 5), (4, 6), (4, 7), (4, 8), (4, 9), (3, 9), (2, 9), (2, 8), (3, 8), (3, 7), (3, 6), (3, 5), (2, 5), (1, 5), (1, 6), (0, 6), (0, 5)], [(1, 6), (2, 6), (2, 7), (2, 8), (1, 8), (1, 7)]],
'5': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3)], [(0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6)], [(4, 3), (5, 3), (5, 4), (5, 5), (4, 5), (4, 4)]],
'6': [[(0, 3), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(4, 3), (5, 3), (5, 4), (5, 5), (4, 5), (4, 4)]],
'7': [[(0, 8), (1, 8), (2, 8), (3, 8), (4, 8), (4, 7), (4, 6), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9)], [(2, 2), (3, 2), (3, 3), (3, 4), (3, 5), (2, 5), (2, 4), (2, 3)], [(3, 5), (4, 5), (4, 6), (3, 6)]],
'8': [[(0, 3), (1, 3), (1, 4), (1, 5), (0, 5), (0, 4)], [(0, 6), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(4, 3), (5, 3), (5, 4), (5, 5), (4, 5), (4, 4)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'9': [[(0, 6), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 5), (2, 5), (3, 5), (4, 5), (4, 4), (4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7), (4, 6), (3, 6), (2, 6), (1, 6)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)]],
':': [[(2, 2), (3, 2), (3, 3), (2, 3)], [(2, 5), (3, 5), (3, 6), (2, 6)]],
';': [[(1, 0), (2, 0), (2, 1), (1, 1)], [(2, 1), (3, 1), (3, 2), (3, 3), (2, 3), (2, 2)], [(2, 4), (3, 4), (3, 5), (2, 5)]],
'<': [[(0, 5), (1, 5), (1, 6), (0, 6)], [(1, 4), (2, 4), (2, 5), (1, 5)], [(1, 6), (2, 6), (2, 7), (1, 7)], [(2, 3), (3, 3), (4, 3), (4, 4), (3, 4), (2, 4)], [(2, 7), (3, 7), (4, 7), (4, 8), (3, 8), (2, 8)], [(4, 2), (5, 2), (5, 3), (4, 3)], [(4, 8), (5, 8), (5, 9), (4, 9)]],
'=': [[(0, 3), (1, 3), (2, 3), (3, 3), (4, 3), (5, 3), (5, 4), (4, 4), (3, 4), (2, 4), (1, 4), (0, 4)], [(0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (5, 5), (5, 6), (4, 6), (3, 6), (2, 6), (1, 6), (0, 6)]],
'>': [[(0, 2), (1, 2), (1, 3), (0, 3)], [(0, 8), (1, 8), (1, 9), (0, 9)], [(1, 3), (2, 3), (3, 3), (3, 4), (2, 4), (1, 4)], [(1, 7), (2, 7), (3, 7), (3, 8), (2, 8), (1, 8)], [(3, 4), (4, 4), (4, 5), (3, 5)], [(3, 6), (4, 6), (4, 7), (3, 7)], [(4, 5), (5, 5), (5, 6), (4, 6)]],
'?': [[(0, 7), (1, 7), (1, 8), (0, 8)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(2, 2), (3, 2), (3, 3), (2, 3)], [(2, 4), (3, 4), (3, 5), (2, 5)], [(3, 5), (4, 5), (4, 6), (3, 6)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'@': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(2, 4), (3, 4), (4, 4), (4, 5), (3, 5), (3, 6), (3, 7), (2, 7), (2, 6), (2, 5)], [(4, 5), (5, 5), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7), (4, 6)]],
'A': [[(0, 2), (1, 2), (1, 3), (1, 4), (2, 4), (3, 4), (4, 4), (4, 3), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7), (4, 6), (4, 5), (3, 5), (2, 5), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)]],
'B': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 3), (5, 3), (5, 4), (5, 5), (4, 5), (4, 4)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'C': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9)]],
'D': [[(0, 2), (1, 2), (2, 2), (3, 2), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(3, 3), (4, 3), (4, 4), (3, 4)], [(3, 7), (4, 7), (4, 8), (3, 8)], [(4, 4), (5, 4), (5, 5), (5, 6), (5, 7), (4, 7), (4, 6), (4, 5)]],
'E': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)]],
'F': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)]],
'G': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (3, 6), (2, 6), (2, 5), (3, 5), (4, 5), (4, 4), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9)]],
'H': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (4, 4), (4, 3), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (4, 7), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)]],
'I': [[(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (1, 8), (2, 8), (2, 7), (2, 6), (2, 5), (2, 4), (2, 3), (1, 3)]],
'J': [[(0, 3), (1, 3), (1, 4), (0, 4)], [(0, 8), (1, 8), (2, 8), (3, 8), (3, 7), (3, 6), (3, 5), (3, 4), (3, 3), (4, 3), (4, 4), (4, 5), (4, 6), (4, 7), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9)], [(1, 2), (2, 2), (3, 2), (3, 3), (2, 3), (1, 3)]],
'K': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (2, 6), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(2, 4), (3, 4), (3, 5), (2, 5)], [(2, 6), (3, 6), (3, 7), (2, 7)], [(3, 3), (4, 3), (4, 4), (3, 4)], [(3, 7), (4, 7), (4, 8), (3, 8)], [(4, 2), (5, 2), (5, 3), (4, 3)], [(4, 8), (5, 8), (5, 9), (4, 9)]],
'L': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)]],
'M': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (2, 7), (2, 8), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(2, 5), (3, 5), (3, 6), (3, 7), (2, 7), (2, 6)], [(3, 7), (4, 7), (4, 6), (4, 5), (4, 4), (4, 3), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (3, 8)]],
'N': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (2, 7), (2, 8), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(2, 5), (3, 5), (3, 6), (3, 7), (2, 7), (2, 6)], [(3, 4), (4, 4), (4, 3), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (4, 7), (4, 6), (4, 5), (3, 5)]],
'O': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7), (4, 6), (4, 5), (4, 4)]],
'P': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'Q': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7), (4, 6), (4, 5), (4, 4), (3, 4), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(2, 4), (3, 4), (3, 5), (2, 5)]],
'R': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (3, 4), (4, 4), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 2), (5, 2), (5, 3), (5, 4), (4, 4), (4, 3)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'S': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3)], [(0, 6), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7)], [(1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6)], [(1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9)], [(4, 3), (5, 3), (5, 4), (5, 5), (4, 5), (4, 4)]],
'T': [[(0, 8), (1, 8), (2, 8), (2, 7), (2, 6), (2, 5), (2, 4), (2, 3), (2, 2), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9)]],
'U': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (4, 7), (4, 6), (4, 5), (4, 4)]],
'V': [[(0, 5), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6)], [(1, 3), (2, 3), (2, 4), (2, 5), (1, 5), (1, 4)], [(2, 2), (3, 2), (3, 3), (2, 3)], [(3, 3), (4, 3), (4, 4), (4, 5), (3, 5), (3, 4)], [(4, 5), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (4, 7), (4, 6)]],
'W': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (2, 3), (1, 3)], [(2, 3), (3, 3), (3, 4), (3, 5), (3, 6), (2, 6), (2, 5), (2, 4)], [(3, 2), (4, 2), (4, 3), (3, 3)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (4, 7), (4, 6), (4, 5), (4, 4)]],
'X': [[(0, 2), (1, 2), (1, 3), (1, 4), (0, 4), (0, 3)], [(0, 7), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8)], [(1, 4), (2, 4), (2, 5), (1, 5)], [(1, 6), (2, 6), (2, 7), (1, 7)], [(2, 5), (3, 5), (3, 6), (2, 6)], [(3, 4), (4, 4), (4, 5), (3, 5)], [(3, 6), (4, 6), (4, 7), (3, 7)], [(4, 2), (5, 2), (5, 3), (5, 4), (4, 4), (4, 3)], [(4, 7), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8)]],
'Y': [[(0, 7), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8)], [(1, 5), (2, 5), (2, 6), (2, 7), (1, 7), (1, 6)], [(2, 2), (3, 2), (3, 3), (3, 4), (3, 5), (2, 5), (2, 4), (2, 3)], [(3, 5), (4, 5), (4, 6), (4, 7), (3, 7), (3, 6)], [(4, 7), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8)]],
'Z': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (0, 4), (0, 3)], [(0, 8), (1, 8), (2, 8), (3, 8), (4, 8), (4, 7), (5, 7), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9)], [(1, 4), (2, 4), (2, 5), (1, 5)], [(2, 5), (3, 5), (3, 6), (2, 6)], [(3, 6), (4, 6), (4, 7), (3, 7)]],
'[': [[(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (1, 8), (1, 7), (1, 6), (1, 5), (1, 4), (1, 3)]],
'\\': [[(0, 7), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8)], [(1, 6), (2, 6), (2, 7), (1, 7)], [(2, 5), (3, 5), (3, 6), (2, 6)], [(3, 4), (4, 4), (4, 5), (3, 5)], [(4, 2), (5, 2), (5, 3), (5, 4), (4, 4), (4, 3)]],
']': [[(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (4, 4), (4, 5), (4, 6), (4, 7), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (1, 8), (2, 8), (3, 8), (3, 7), (3, 6), (3, 5), (3, 4), (3, 3), (2, 3), (1, 3)]],
'^': [[(0, 6), (1, 6), (1, 7), (0, 7)], [(1, 7), (2, 7), (2, 8), (1, 8)], [(2, 8), (3, 8), (3, 9), (2, 9)], [(3, 7), (4, 7), (4, 8), (3, 8)], [(4, 6), (5, 6), (5, 7), (4, 7)]],
'_': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3)]],
'`': [[(1, 8), (2, 8), (2, 9), (1, 9)], [(2, 7), (3, 7), (3, 8), (2, 8)]],
'a': [[(0, 3), (1, 3), (1, 4), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (3, 5), (2, 5), (1, 5), (1, 4), (2, 4), (3, 4), (4, 4), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7)]],
'b': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4)]],
'c': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 6), (2, 6), (3, 6), (4, 6), (5, 6), (5, 7), (4, 7), (3, 7), (2, 7), (1, 7)]],
'd': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (4, 7), (3, 7), (2, 7), (1, 7), (1, 6), (2, 6), (3, 6), (4, 6), (4, 5), (4, 4), (4, 3), (3, 3), (2, 3), (1, 3)]],
'e': [[(0, 3), (1, 3), (1, 4), (2, 4), (3, 4), (4, 4), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (3, 5), (2, 5), (1, 5), (1, 6), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7)]],
'f': [[(0, 5), (1, 5), (1, 4), (1, 3), (1, 2), (2, 2), (2, 3), (2, 4), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (2, 7), (2, 8), (1, 8), (1, 7), (1, 6), (0, 6)], [(2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9)]],
'g': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (0, 6), (0, 5), (0, 4)], [(1, 0), (2, 0), (3, 0), (4, 0), (4, 1), (3, 1), (2, 1), (1, 1)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 1), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7)]],
'h': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4), (4, 3)]],
'i': [[(1, 6), (2, 6), (2, 5), (2, 4), (2, 3), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (2, 7), (1, 7)], [(2, 8), (3, 8), (3, 9), (2, 9)]],
'j': [[(0, 0), (1, 0), (2, 0), (2, 1), (1, 1), (0, 1)], [(1, 6), (2, 6), (2, 5), (2, 4), (2, 3), (2, 2), (2, 1), (3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (2, 7), (1, 7)], [(2, 8), (3, 8), (3, 9), (2, 9)]],
'k': [[(0, 2), (1, 2), (1, 3), (1, 4), (2, 4), (3, 4), (3, 5), (2, 5), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(3, 3), (4, 3), (4, 4), (3, 4)], [(3, 5), (4, 5), (4, 6), (3, 6)], [(4, 2), (5, 2), (5, 3), (4, 3)], [(4, 6), (5, 6), (5, 7), (4, 7)]],
'l': [[(1, 8), (2, 8), (2, 7), (2, 6), (2, 5), (2, 4), (2, 3), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (2, 9), (1, 9)]],
'm': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (2, 6), (2, 5), (2, 4), (2, 3), (2, 2), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4), (4, 3)]],
'n': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4), (4, 3)]],
'o': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4)]],
'p': [[(0, 0), (1, 0), (1, 1), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3), (0, 2), (0, 1)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4)]],
'q': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 1), (4, 0), (5, 0), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7)]],
'r': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (3, 6), (2, 6), (1, 6), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(3, 6), (4, 6), (5, 6), (5, 7), (4, 7), (3, 7)]],
's': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3)], [(0, 5), (1, 5), (1, 6), (0, 6)], [(1, 4), (2, 4), (3, 4), (4, 4), (4, 5), (3, 5), (2, 5), (1, 5)], [(1, 6), (2, 6), (3, 6), (4, 6), (5, 6), (5, 7), (4, 7), (3, 7), (2, 7), (1, 7)], [(4, 3), (5, 3), (5, 4), (4, 4)]],
't': [[(1, 6), (2, 6), (2, 5), (2, 4), (2, 3), (3, 3), (3, 4), (3, 5), (3, 6), (4, 6), (5, 6), (5, 7), (4, 7), (3, 7), (3, 8), (3, 9), (2, 9), (2, 8), (2, 7), (1, 7)], [(3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3)]],
'u': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (4, 7), (4, 6), (4, 5), (4, 4)]],
'v': [[(0, 5), (1, 5), (1, 6), (1, 7), (0, 7), (0, 6)], [(1, 3), (2, 3), (2, 4), (2, 5), (1, 5), (1, 4)], [(2, 2), (3, 2), (3, 3), (2, 3)], [(3, 3), (4, 3), (4, 4), (4, 5), (3, 5), (3, 4)], [(4, 5), (5, 5), (5, 6), (5, 7), (4, 7), (4, 6)]],
'w': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (2, 3), (1, 3)], [(2, 3), (3, 3), (3, 4), (3, 5), (3, 6), (2, 6), (2, 5), (2, 4)], [(3, 2), (4, 2), (4, 3), (3, 3)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (4, 7), (4, 6), (4, 5), (4, 4)]],
'x': [[(0, 2), (1, 2), (1, 3), (0, 3)], [(0, 6), (1, 6), (1, 7), (0, 7)], [(1, 3), (2, 3), (2, 4), (1, 4)], [(1, 5), (2, 5), (2, 6), (1, 6)], [(2, 4), (3, 4), (3, 5), (2, 5)], [(3, 3), (4, 3), (4, 4), (3, 4)], [(3, 5), (4, 5), (4, 6), (3, 6)], [(4, 2), (5, 2), (5, 3), (4, 3)], [(4, 6), (5, 6), (5, 7), (4, 7)]],
'y': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 0), (2, 0), (3, 0), (4, 0), (4, 1), (3, 1), (2, 1), (1, 1)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 1), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (4, 7), (4, 6), (4, 5), (4, 4), (4, 3), (3, 3), (2, 3), (1, 3)]],
'z': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (2, 4), (1, 4), (1, 3), (0, 3)], [(0, 6), (1, 6), (2, 6), (3, 6), (3, 5), (4, 5), (4, 6), (5, 6), (5, 7), (4, 7), (3, 7), (2, 7), (1, 7), (0, 7)], [(2, 4), (3, 4), (3, 5), (2, 5)]],
'{': [[(1, 5), (2, 5), (2, 4), (2, 3), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (2, 8), (2, 7), (2, 6), (1, 6)], [(3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3)], [(3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9)]],
'|': [[(2, 2), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (2, 9), (2, 8), (2, 7), (2, 6), (2, 5), (2, 4), (2, 3)]],
'}': [[(0, 2), (1, 2), (2, 2), (2, 3), (1, 3), (0, 3)], [(0, 8), (1, 8), (2, 8), (2, 9), (1, 9), (0, 9)], [(2, 3), (3, 3), (3, 4), (3, 5), (4, 5), (4, 6), (3, 6), (3, 7), (3, 8), (2, 8), (2, 7), (2, 6), (2, 5), (2, 4)]],
'~': [[(0, 6), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7)], [(1, 8), (2, 8), (2, 9), (1, 9)], [(2, 7), (3, 7), (3, 8), (2, 8)], [(3, 6), (4, 6), (4, 7), (3, 7)], [(4, 7), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8)]],
}
# fmt: on
__slots__ = "layers", "datatypes", "polygons", "properties"
def __init__(
self, text, size, position=(0, 0), horizontal=True, angle=0, layer=0, datatype=0
):
self.polygons = []
posX = 0
posY = 0
text_multiplier = size / 9.0
if angle == 0:
ca = 1
sa = 0
else:
ca = numpy.cos(angle)
sa = numpy.sin(angle)
for jj in range(len(text)):
if text[jj] == "\n":
if horizontal:
posY -= 11
posX = 0
else:
posX += 8
posY = 0
elif text[jj] == "\t":
if horizontal:
posX = posX + 32 - (posX + 8) % 32
else:
posY = posY - 11 - (posY - 22) % 44
else:
if text[jj] in Text._font:
for p in Text._font[text[jj]]:
polygon = p[:]
for ii in range(len(polygon)):
xp = text_multiplier * (posX + polygon[ii][0])
yp = text_multiplier * (posY + polygon[ii][1])
polygon[ii] = (
position[0] + xp * ca - yp * sa,
position[1] + xp * sa + yp * ca,
)
self.polygons.append(numpy.array(polygon))
if horizontal:
posX += 8
else:
posY -= 11
self.layers = [layer] * len(self.polygons)
self.datatypes = [datatype] * len(self.polygons)
self.properties = {}
def __str__(self):
return ("Text ({} polygons, {} vertices, layers {}, datatypes {})").format(
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
class Path(PolygonSet):
"""
Series of geometric objects that form a path or a collection of
parallel paths.
Parameters
----------
width : number
The width of each path.
initial_point : array-like[2]
Starting position of the path.
number_of_paths : positive integer
Number of parallel paths to create simultaneously.
distance : number
Distance between the centers of adjacent paths.
Attributes
----------
x : number
Current position of the path in the x direction.
y : number
Current position of the path in the y direction.
w : number
*Half*-width of each path.
n : integer
Number of parallel paths.
direction : '+x', '-x', '+y', '-y' or number
Direction or angle (in *radians*) the path points to.
distance : number
Distance between the centers of adjacent paths.
length : number
Length of the central path axis. If only one path is created,
this is the real length of the path.
properties : {integer: string} dictionary
Properties for this path.
"""
__slots__ = (
"layers",
"datatypes",
"polygons",
"x",
"y",
"w",
"n",
"direction",
"distance",
"length",
"properties",
)
def __init__(self, width, initial_point=(0, 0), number_of_paths=1, distance=0):
self.x = initial_point[0]
self.y = initial_point[1]
self.w = width * 0.5
self.n = number_of_paths
self.direction = "+x"
self.distance = distance
self.length = 0.0
self.polygons = []
self.layers = []
self.datatypes = []
self.properties = {}
def __str__(self):
if self.n > 1:
return "Path (x{}, end at ({}, {}) towards {}, length {}, width {}, {} apart, {} polygons, {} vertices, layers {}, datatypes {})".format(
self.n,
self.x,
self.y,
self.direction,
self.length,
self.w * 2,
self.distance,
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
else:
return "Path (end at ({}, {}) towards {}, length {}, width {}, {} polygons, {} vertices, layers {}, datatypes {})".format(
self.x,
self.y,
self.direction,
self.length,
self.w * 2,
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
def translate(self, dx, dy):
"""
Translate this object.
Parameters
----------
dx : number
Distance to move in the x-direction.
dy : number
Distance to move in the y-direction.
Returns
-------
out : `Path`
This object.
"""
vec = numpy.array((dx, dy))
self.polygons = [points + vec for points in self.polygons]
self.x += dx
self.y += dy
return self
def rotate(self, angle, center=(0, 0)):
"""
Rotate this object.
Parameters
----------
angle : number
The angle of rotation (in *radians*).
center : array-like[2]
Center point for the rotation.
Returns
-------
out : `Path`
This object.
"""
ca = numpy.cos(angle)
sa = numpy.sin(angle) * _mpone
c0 = numpy.array(center)
if isinstance(self.direction, basestring):
self.direction = _directions_dict[self.direction] * numpy.pi
self.direction += angle
cur = numpy.array((self.x, self.y)) - c0
self.x, self.y = cur * ca + cur[::-1] * sa + c0
self.polygons = [
(points - c0) * ca + (points - c0)[:, ::-1] * sa + c0
for points in self.polygons
]
return self
def scale(self, scalex, scaley=None, center=(0, 0)):
"""
Scale this object.
Parameters
----------
scalex : number
Scaling factor along the first axis.
scaley : number or None
Scaling factor along the second axis. If None, same as
`scalex`.
center : array-like[2]
Center point for the scaling operation.
Returns
-------
out : `Path`
This object.
Notes
-----
The direction of the path is not modified by this method and
its width is scaled only by `scalex`.
"""
c0 = numpy.array(center)
s = scalex if scaley is None else numpy.array((scalex, scaley))
self.polygons = [(points - c0) * s + c0 for points in self.polygons]
self.x = (self.x - c0[0]) * scalex + c0[0]
self.y = (self.y - c0[1]) * (scalex if scaley is None else scaley) + c0[1]
self.w *= scalex
return self
def mirror(self, p1, p2=(0, 0)):
"""
Mirror the polygons over a line through points 1 and 2
Parameters
----------
p1 : array-like[2]
first point defining the reflection line
p2 : array-like[2]
second point defining the reflection line
Returns
-------
out : `Path`
This object.
"""
origin = numpy.array(p1)
vec = numpy.array(p2) - origin
vec_r = vec * (2 / numpy.inner(vec, vec))
self.polygons = [
numpy.outer(numpy.inner(points - origin, vec_r), vec) - points + 2 * origin
for points in self.polygons
]
dot = (self.x - origin[0]) * vec_r[0] + (self.y - origin[1]) * vec_r[1]
self.x = dot * vec[0] - self.x + 2 * origin[0]
self.y = dot * vec[1] - self.y + 2 * origin[1]
if isinstance(self.direction, basestring):
self.direction = _directions_dict[self.direction] * numpy.pi
self.direction = 2 * numpy.arctan2(vec[1], vec[0]) - self.direction
return self
def segment(
self,
length,
direction=None,
final_width=None,
final_distance=None,
axis_offset=0,
layer=0,
datatype=0,
):
"""
Add a straight section to the path.
Parameters
----------
length : number
Length of the section to add.
direction : '+x', '-x', '+y', '-y' or number
Direction or angle (in *radians*) of rotation of the
segment.
final_width : number
If set, the paths of this segment will have their widths
linearly changed from their current value to this one.
final_distance : number
If set, the distance between paths is linearly change from
its current value to this one along this segment.
axis_offset : number
If set, the paths will be offset from their direction by
this amount.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number
of paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between
0 and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : `Path`
This object.
"""
if direction is None:
direction = self.direction
else:
self.direction = direction
if direction == "+x":
ca = 1
sa = 0
elif direction == "-x":
ca = -1
sa = 0
elif direction == "+y":
ca = 0
sa = 1
elif direction == "-y":
ca = 0
sa = -1
else:
ca = numpy.cos(direction)
sa = numpy.sin(direction)
old_x = self.x
old_y = self.y
self.x += length * ca + axis_offset * sa
self.y += length * sa - axis_offset * ca
old_w = self.w
old_distance = self.distance
if final_width is not None:
self.w = final_width * 0.5
if final_distance is not None:
self.distance = final_distance
if (self.w != 0) or (old_w != 0):
for ii in range(self.n):
d0 = ii * self.distance - (self.n - 1) * self.distance * 0.5
old_d0 = ii * old_distance - (self.n - 1) * old_distance * 0.5
self.polygons.append(
numpy.array(
[
(
old_x + (old_d0 - old_w) * sa,
old_y - (old_d0 - old_w) * ca,
),
(
old_x + (old_d0 + old_w) * sa,
old_y - (old_d0 + old_w) * ca,
),
(self.x + (d0 + self.w) * sa, self.y - (d0 + self.w) * ca),
(self.x + (d0 - self.w) * sa, self.y - (d0 - self.w) * ca),
]
)
)
if self.w == 0:
self.polygons[-1] = self.polygons[-1][:-1]
if old_w == 0:
self.polygons[-1] = self.polygons[-1][1:]
self.length += (length ** 2 + axis_offset ** 2) ** 0.5
if isinstance(layer, list):
self.layers.extend((layer * (self.n // len(layer) + 1))[: self.n])
else:
self.layers.extend(layer for _ in range(self.n))
if isinstance(datatype, list):
self.datatypes.extend(
(datatype * (self.n // len(datatype) + 1))[: self.n]
)
else:
self.datatypes.extend(datatype for _ in range(self.n))
return self
def arc(
self,
radius,
initial_angle,
final_angle,
tolerance=0.01,
number_of_points=None,
max_points=199,
final_width=None,
final_distance=None,
layer=0,
datatype=0,
):
"""
Add a curved section to the path.
Parameters
----------
radius : number
Central radius of the section.
initial_angle : number
Initial angle of the curve (in *radians*).
final_angle : number
Final angle of the curve (in *radians*).
tolerance : float
Approximate curvature resolution. The number of points is
automatically calculated.
number_of_points : integer or None
Manually define the number of vertices that form the object
(polygonal approximation). Overrides `tolerance`.
max_points : integer
If the number of points in the element is greater than
`max_points`, it will be fractured in smaller polygons with
at most `max_points` each. If `max_points` is zero no
fracture will occur.
final_width : number
If set, the paths of this segment will have their widths
linearly changed from their current value to this one.
final_distance : number
If set, the distance between paths is linearly change from
its current value to this one along this segment.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : `Path`
This object.
Notes
-----
The original GDSII specification supports only a maximum of 199
vertices per polygon.
"""
cx = self.x - radius * numpy.cos(initial_angle)
cy = self.y - radius * numpy.sin(initial_angle)
self.x = cx + radius * numpy.cos(final_angle)
self.y = cy + radius * numpy.sin(final_angle)
if final_angle > initial_angle:
self.direction = final_angle + numpy.pi * 0.5
else:
self.direction = final_angle - numpy.pi * 0.5
old_w = self.w
old_distance = self.distance
if final_width is not None:
self.w = final_width * 0.5
if final_distance is not None:
self.distance = final_distance
if isinstance(number_of_points, float):
warnings.warn(
"[GDSPY] Use of a floating number as number_of_points "
"is deprecated in favor of tolerance.",
category=DeprecationWarning,
stacklevel=2,
)
tolerance = number_of_points
number_of_points = None
if number_of_points is None:
r = (
radius
+ max(old_distance, self.distance) * (self.n - 1) * 0.5
+ max(old_w, self.w)
)
number_of_points = max(
6,
2
+ 2
* int(
0.5
* abs(final_angle - initial_angle)
/ numpy.arccos(1 - tolerance / r)
+ 0.5
),
)
pieces = (
1
if max_points == 0
else int(numpy.ceil(number_of_points / float(max_points)))
)
number_of_points = number_of_points // pieces
widths = numpy.linspace(old_w, self.w, pieces + 1)
distances = numpy.linspace(old_distance, self.distance, pieces + 1)
angles = numpy.linspace(initial_angle, final_angle, pieces + 1)
if (self.w != 0) or (old_w != 0):
for jj in range(pieces):
for ii in range(self.n):
self.polygons.append(numpy.zeros((number_of_points, 2)))
r0 = (
radius
+ ii * distances[jj + 1]
- (self.n - 1) * distances[jj + 1] * 0.5
)
old_r0 = (
radius + ii * distances[jj] - (self.n - 1) * distances[jj] * 0.5
)
pts2 = number_of_points // 2
pts1 = number_of_points - pts2
ang = numpy.linspace(angles[jj], angles[jj + 1], pts1)
rad = numpy.linspace(old_r0 + widths[jj], r0 + widths[jj + 1], pts1)
self.polygons[-1][:pts1, 0] = numpy.cos(ang) * rad + cx
self.polygons[-1][:pts1, 1] = numpy.sin(ang) * rad + cy
if widths[jj + 1] == 0:
pts1 -= 1
pts2 += 1
if widths[jj] == 0:
self.polygons[-1][: pts1 - 1] = numpy.array(
self.polygons[-1][1:pts1]
)
pts1 -= 1
pts2 += 1
ang = numpy.linspace(angles[jj + 1], angles[jj], pts2)
rad = numpy.linspace(r0 - widths[jj + 1], old_r0 - widths[jj], pts2)
if rad[0] <= 0 or rad[-1] <= 0:
warnings.warn(
"[GDSPY] Path arc with width larger than radius "
"created: possible self-intersecting polygon.",
stacklevel=2,
)
self.polygons[-1][pts1:, 0] = numpy.cos(ang) * rad + cx
self.polygons[-1][pts1:, 1] = numpy.sin(ang) * rad + cy
self.length += abs((angles[jj + 1] - angles[jj]) * radius)
if isinstance(layer, list):
self.layers.extend((layer * (self.n // len(layer) + 1))[: self.n])
else:
self.layers.extend(layer for _ in range(self.n))
if isinstance(datatype, list):
self.datatypes.extend(
(datatype * (self.n // len(datatype) + 1))[: self.n]
)
else:
self.datatypes.extend(datatype for _ in range(self.n))
return self
def turn(
self,
radius,
angle,
tolerance=0.01,
number_of_points=None,
max_points=199,
final_width=None,
final_distance=None,
layer=0,
datatype=0,
):
"""
Add a curved section to the path.
Parameters
----------
radius : number
Central radius of the section.
angle : 'r', 'l', 'rr', 'll' or number
Angle (in *radians*) of rotation of the path. The values
'r' and 'l' represent 90-degree turns cw and ccw,
respectively; the values 'rr' and 'll' represent analogous
180-degree turns.
tolerance : float
Approximate curvature resolution. The number of points is
automatically calculated.
number_of_points : integer or None
Manually define the number of vertices that form the object
(polygonal approximation). Overrides `tolerance`.
max_points : integer
If the number of points in the element is greater than
`max_points`, it will be fractured in smaller polygons with
at most `max_points` each. If `max_points` is zero no
fracture will occur.
final_width : number
If set, the paths of this segment will have their widths
linearly changed from their current value to this one.
final_distance : number
If set, the distance between paths is linearly change from
its current value to this one along this segment.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : `Path`
This object.
Notes
-----
The original GDSII specification supports only a maximum of 199
vertices per polygon.
"""
exact = True
if angle == "r":
delta_i = _halfpi
delta_f = 0
elif angle == "rr":
delta_i = _halfpi
delta_f = -delta_i
elif angle == "l":
delta_i = -_halfpi
delta_f = 0
elif angle == "ll":
delta_i = -_halfpi
delta_f = -delta_i
elif angle < 0:
exact = False
delta_i = _halfpi
delta_f = delta_i + angle
else:
exact = False
delta_i = -_halfpi
delta_f = delta_i + angle
if self.direction == "+x":
self.direction = 0
elif self.direction == "-x":
self.direction = numpy.pi
elif self.direction == "+y":
self.direction = _halfpi
elif self.direction == "-y":
self.direction = -_halfpi
elif exact:
exact = False
self.arc(
radius,
self.direction + delta_i,
self.direction + delta_f,
tolerance,
number_of_points,
max_points,
final_width,
final_distance,
layer,
datatype,
)
if exact:
self.direction = _directions_list[int(round(self.direction / _halfpi)) % 4]
return self
def parametric(
self,
curve_function,
curve_derivative=None,
tolerance=0.01,
number_of_evaluations=5,
max_points=199,
final_width=None,
final_distance=None,
relative=True,
layer=0,
datatype=0,
):
"""
Add a parametric curve to the path.
`curve_function` will be evaluated uniformly in the interval
[0, 1] at least `number_of_points` times. More points will be
added to the curve at the midpoint between evaluations if that
points presents error larger than `tolerance`.
Parameters
----------
curve_function : callable
Function that defines the curve. Must be a function of one
argument (that varies from 0 to 1) that returns a 2-element
array with the coordinates of the curve.
curve_derivative : callable
If set, it should be the derivative of the curve function.
Must be a function of one argument (that varies from 0 to 1)
that returns a 2-element array. If None, the derivative
will be calculated numerically.
tolerance : number
Acceptable tolerance for the approximation of the curve
function by a finite number of evaluations.
number_of_evaluations : integer
Initial number of points where the curve function will be
evaluated. According to `tolerance`, more evaluations will
be performed.
max_points : integer
Elements will be fractured until each polygon has at most
`max_points`. If `max_points` is less than 4, no fracture
will occur.
final_width : number or function
If set to a number, the paths of this segment will have
their widths linearly changed from their current value to
this one. If set to a function, it must be a function of
one argument (that varies from 0 to 1) and returns the width
of the path.
final_distance : number or function
If set to a number, the distance between paths is linearly
change from its current value to this one. If set to a
function, it must be a function of one argument (that varies
from 0 to 1) and returns the width of the path.
relative : bool
If True, the return values of `curve_function` are used as
offsets from the current path position, i.e., to ensure a
continuous path, ``curve_function(0)`` must be (0, 0).
Otherwise, they are used as absolute coordinates.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : `Path`
This object.
Notes
-----
The norm of the vector returned by `curve_derivative` is not
important. Only the direction is used.
The original GDSII specification supports only a maximum of 199
vertices per polygon.
Examples
--------
>>> def my_parametric_curve(t):
... return (2**t, t**2)
>>> def my_parametric_curve_derivative(t):
... return (0.69315 * 2**t, 2 * t)
>>> my_path.parametric(my_parametric_curve,
... my_parametric_curve_derivative)
"""
err = tolerance ** 2
points = list(numpy.linspace(0, 1, number_of_evaluations))
values = [numpy.array(curve_function(u)) for u in points]
delta = points[1]
i = 1
while i < len(points):
midpoint = 0.5 * (points[i] + points[i - 1])
midvalue = numpy.array(curve_function(midpoint))
test_err = (values[i] + values[i - 1]) / 2 - midvalue
if test_err[0] ** 2 + test_err[1] ** 2 > err:
delta = min(delta, points[i] - midpoint)
points.insert(i, midpoint)
values.insert(i, midvalue)
else:
i += 1
points = numpy.array(points)
values = numpy.array(values)
dvs = values[1:] - values[:-1]
self.length += ((dvs[:, 0] ** 2 + dvs[:, 1] ** 2) ** 0.5).sum()
delta *= 0.5
if curve_derivative is None:
derivs = numpy.vstack(
(
numpy.array(curve_function(delta)) - values[0],
[
numpy.array(curve_function(u + delta))
- numpy.array(curve_function(u - delta))
for u in points[1:-1]
],
values[-1] - numpy.array(curve_function(1 - delta)),
)
)
else:
derivs = numpy.array([curve_derivative(u) for u in points])
if not callable(final_width):
if final_width is None:
width = numpy.full_like(points, self.w)
else:
width = self.w + (final_width * 0.5 - self.w) * points
self.w = final_width * 0.5
else:
width = numpy.array([0.5 * final_width(u) for u in points])
self.w = width[-1]
if not callable(final_distance):
if final_distance is None:
dist = numpy.full_like(points, self.distance)
else:
dist = self.distance + (final_distance - self.distance) * points
self.distance = final_distance
else:
dist = numpy.array([final_distance(u) for u in points])
self.distance = dist[-1]
np = points.shape[0]
sh = (np, 1)
if relative:
x0 = values + numpy.array((self.x, self.y))
else:
x0 = values
dx = (
derivs[:, ::-1]
* _mpone
/ ((derivs[:, 0] ** 2 + derivs[:, 1] ** 2) ** 0.5).reshape(sh)
)
width = width.reshape(sh)
dist = dist.reshape(sh)
self.x = x0[-1, 0]
self.y = x0[-1, 1]
self.direction = numpy.arctan2(-dx[-1, 0], dx[-1, 1])
if max_points < 4:
max_points = np
else:
max_points = max_points // 2
i0 = 0
while i0 < np - 1:
i1 = min(i0 + max_points, np)
for ii in range(self.n):
p1 = x0[i0:i1] + dx[i0:i1] * (
dist[i0:i1] * (ii - (self.n - 1) * 0.5) + width[i0:i1]
)
p2 = (
x0[i0:i1]
+ dx[i0:i1]
* (dist[i0:i1] * (ii - (self.n - 1) * 0.5) - width[i0:i1])
)[::-1]
if width[i1 - 1, 0] == 0:
p2 = p2[1:]
if width[i0, 0] == 0:
p1 = p1[1:]
self.polygons.append(numpy.concatenate((p1, p2)))
if isinstance(layer, list):
self.layers.extend((layer * (self.n // len(layer) + 1))[: self.n])
else:
self.layers.extend(layer for _ in range(self.n))
if isinstance(datatype, list):
self.datatypes.extend(
(datatype * (self.n // len(datatype) + 1))[: self.n]
)
else:
self.datatypes.extend(datatype for _ in range(self.n))
i0 = i1 - 1
return self
def bezier(
self,
points,
tolerance=0.01,
number_of_evaluations=5,
max_points=199,
final_width=None,
final_distance=None,
relative=True,
layer=0,
datatype=0,
):
"""
Add a Bezier curve to the path.
A Bezier curve is added to the path starting from its current
position and finishing at the last point in the `points` array.
Parameters
----------
points : array-like[N][2]
Control points defining the Bezier curve.
tolerance : number
Acceptable tolerance for the approximation of the curve
function by a finite number of evaluations.
number_of_evaluations : integer
Initial number of points where the curve function will be
evaluated. According to `tolerance`, more evaluations will
be performed.
max_points : integer
Elements will be fractured until each polygon has at most
`max_points`. If `max_points` is zero no fracture will
occur.
final_width : number or function
If set to a number, the paths of this segment will have
their widths linearly changed from their current value to
this one. If set to a function, it must be a function of
one argument (that varies from 0 to 1) and returns the width
of the path.
final_distance : number or function
If set to a number, the distance between paths is linearly
change from its current value to this one. If set to a
function, it must be a function of one argument (that varies
from 0 to 1) and returns the width of the path.
relative : bool
If True, all coordinates in the `points` array are used as
offsets from the current path position, i.e., if the path is
at (1, -2) and the last point in the array is (10, 25), the
constructed Bezier will end at (1 + 10, -2 + 25) = (11, 23).
Otherwise, the points are used as absolute coordinates.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : `Path`
This object.
Notes
-----
The original GDSII specification supports only a maximum of 199
vertices per polygon.
"""
if relative:
pts = numpy.vstack(([(0, 0)], points))
else:
pts = numpy.vstack(([(self.x, self.y)], points))
dpts = (pts.shape[0] - 1) * (pts[1:] - pts[:-1])
self.parametric(
_func_bezier(pts),
_func_bezier(dpts),
tolerance,
number_of_evaluations,
max_points,
final_width,
final_distance,
relative,
layer,
datatype,
)
return self
def smooth(
self,
points,
angles=None,
curl_start=1,
curl_end=1,
t_in=1,
t_out=1,
cycle=False,
tolerance=0.01,
number_of_evaluations=5,
max_points=199,
final_widths=None,
final_distances=None,
relative=True,
layer=0,
datatype=0,
):
"""
Add a smooth interpolating curve through the given points.
Uses the Hobby algorithm [1]_ to calculate a smooth
interpolating curve made of cubic Bezier segments between each
pair of points.
Parameters
----------
points : array-like[N][2]
Vertices in the interpolating curve.
angles : array-like[N + 1] or None
Tangent angles at each point (in *radians*). Any angles
defined as None are automatically calculated.
curl_start : number
Ratio between the mock curvatures at the first point and at
its neighbor. A value of 1 renders the first segment a good
approximation for a circular arc. A value of 0 will better
approximate a straight segment. It has no effect for closed
curves or when an angle is defined for the first point.
curl_end : number
Ratio between the mock curvatures at the last point and at
its neighbor. It has no effect for closed curves or when an
angle is defined for the first point.
t_in : number or array-like[N + 1]
Tension parameter when arriving at each point. One value
per point or a single value used for all points.
t_out : number or array-like[N + 1]
Tension parameter when leaving each point. One value per
point or a single value used for all points.
cycle : bool
If True, calculates control points for a closed curve,
with an additional segment connecting the first and last
points.
tolerance : number
Acceptable tolerance for the approximation of the curve
function by a finite number of evaluations.
number_of_evaluations : integer
Initial number of points where the curve function will be
evaluated. According to `tolerance`, more evaluations will
be performed.
max_points : integer
Elements will be fractured until each polygon has at most
`max_points`. If `max_points` is zero no fracture will
occur.
final_widths : array-like[M]
Each element corresponds to the final width of a segment in
the whole curve. If an element is a number, the paths of
this segment will have their widths linearly changed to this
value. If a function, it must be a function of one argument
(that varies from 0 to 1) and returns the width of the path.
The length of the array must be equal to the number of
segments in the curve, i.e., M = N - 1 for an open curve and
M = N for a closed one.
final_distances : array-like[M]
Each element corresponds to the final distance between paths
of a segment in the whole curve. If an element is a number,
the distance between paths is linearly change to this value.
If a function, it must be a function of one argument (that
varies from 0 to 1) and returns the width of the path. The
length of the array must be equal to the number of segments
in the curve, i.e., M = N - 1 for an open curve and M = N
for a closed one.
relative : bool
If True, all coordinates in the `points` array are used as
offsets from the current path position, i.e., if the path is
at (1, -2) and the last point in the array is (10, 25), the
constructed curve will end at (1 + 10, -2 + 25) = (11, 23).
Otherwise, the points are used as absolute coordinates.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : `Path`
This object.
Notes
-----
The original GDSII specification supports only a maximum of 199
vertices per polygon.
References
----------
.. [1] <NAME>. *Discrete Comput. Geom.* (1986) 1: 123.
`DOI: 10.1007/BF02187690
<https://doi.org/10.1007/BF02187690>`_
"""
if relative:
points = numpy.vstack(([(0.0, 0.0)], points)) + numpy.array(
(self.x, self.y)
)
else:
points = numpy.vstack(([(self.x, self.y)], points))
cta, ctb = _hobby(points, angles, curl_start, curl_end, t_in, t_out, cycle)
if final_widths is None:
final_widths = [None] * cta.shape[0]
if final_distances is None:
final_distances = [None] * cta.shape[0]
for i in range(points.shape[0] - 1):
self.bezier(
[cta[i], ctb[i], points[i + 1]],
tolerance,
number_of_evaluations,
max_points,
final_widths[i],
final_distances[i],
False,
layer,
datatype,
)
if cycle:
self.bezier(
[cta[-1], ctb[-1], points[0]],
tolerance,
number_of_evaluations,
max_points,
final_widths[-1],
final_distances[-1],
False,
layer,
datatype,
)
return self
_pmone = numpy.array((1.0, -1.0))
class L1Path(PolygonSet):
"""
Series of geometric objects that form a path or a collection of
parallel paths with Manhattan geometry.
.. deprecated:: 1.4
`L1Path` is deprecated in favor of FlexPath and will be removed
in a future version of Gdspy.
Parameters
----------
initial_point : array-like[2]
Starting position of the path.
direction : '+x', '+y', '-x', '-y'
Starting direction of the path.
width : number
The initial width of each path.
length : array-like
Lengths of each section to add.
turn : array-like
Direction to turn before each section. The sign indicate the
turn direction (ccw is positive), and the modulus is a
multiplicative factor for the path width after each turn. Must
have 1 element less then `length`.
number_of_paths : positive integer
Number of parallel paths to create simultaneously.
distance : number
Distance between the centers of adjacent paths.
max_points : integer
The paths will be fractured in polygons with at most
`max_points` (must be at least 6). If `max_points` is zero no
fracture will occur.
layer : integer, list
The GDSII layer numbers for the elements of each path. If the
number of layers in the list is less than the number of paths,
the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0 and
255). If the number of datatypes in the list is less than the
number of paths, the list is repeated.
Attributes
----------
x : number
Final position of the path in the x direction.
y : number
Final position of the path in the y direction.
direction : '+x', '-x', '+y', '-y' or number
Direction or angle (in *radians*) the path points to. The
numerical angle is returned only after a rotation of the object.
properties : {integer: string} dictionary
Properties for this path.
Examples
--------
>>> length = [10, 30, 15, 15, 15, 15, 10]
>>> turn = [1, -1, -1, 3, -1, 1]
>>> l1path = gdspy.L1Path((0, 0), '+x', 2, length, turn)
>>> myCell.add(l1path)
"""
__slots__ = "layers", "datatypes", "polygons", "direction", "x", "y", "properties"
def __init__(
self,
initial_point,
direction,
width,
length,
turn,
number_of_paths=1,
distance=0,
max_points=199,
layer=0,
datatype=0,
):
warnings.warn(
"[GDSPY] L1Path is deprecated favor of FlexPath and will be "
"removed in a future version of Gdspy.",
category=DeprecationWarning,
stacklevel=2,
)
if not isinstance(layer, list):
layer = [layer]
if not isinstance(datatype, list):
datatype = [datatype]
layer = (layer * (number_of_paths // len(layer) + 1))[:number_of_paths]
datatype = (datatype * (number_of_paths // len(datatype) + 1))[:number_of_paths]
w = width * 0.5
points = len(turn) + 1 if max_points == 0 else max_points // 2 - 1
paths = [[[], []] for ii in range(number_of_paths)]
self.polygons = []
self.layers = []
self.datatypes = []
self.properties = {}
self.x = initial_point[0]
self.y = initial_point[1]
if direction == "+x":
direction = 0
for ii in range(number_of_paths):
d0 = ii * distance - (number_of_paths - 1) * distance * 0.5
paths[ii][0].append((initial_point[0], d0 + initial_point[1] - w))
paths[ii][1].append((initial_point[0], d0 + initial_point[1] + w))
elif direction == "+y":
direction = 1
for ii in range(number_of_paths):
d0 = (number_of_paths - 1) * distance * 0.5 - ii * distance
paths[ii][0].append((d0 + initial_point[0] + w, initial_point[1]))
paths[ii][1].append((d0 + initial_point[0] - w, initial_point[1]))
elif direction == "-x":
direction = 2
for ii in range(number_of_paths):
d0 = (number_of_paths - 1) * distance * 0.5 - ii * distance
paths[ii][0].append((initial_point[0], d0 + initial_point[1] + w))
paths[ii][1].append((initial_point[0], d0 + initial_point[1] - w))
elif direction == "-y":
direction = 3
for ii in range(number_of_paths):
d0 = ii * distance - (number_of_paths - 1) * distance * 0.5
paths[ii][0].append((d0 + initial_point[0] - w, initial_point[1]))
paths[ii][1].append((d0 + initial_point[0] + w, initial_point[1]))
for jj in range(len(turn)):
points -= 1
if direction == 0:
for ii in range(number_of_paths):
d0 = ii * distance - (number_of_paths - 1) * distance * 0.5
paths[ii][0].append(
(self.x + length[jj] - (d0 - w) * turn[jj], paths[ii][0][-1][1])
)
paths[ii][1].append(
(self.x + length[jj] - (d0 + w) * turn[jj], paths[ii][1][-1][1])
)
self.x += length[jj]
elif direction == 1:
for ii in range(number_of_paths):
d0 = ii * distance - (number_of_paths - 1) * distance * 0.5
paths[ii][0].append(
(paths[ii][0][-1][0], self.y + length[jj] - (d0 - w) * turn[jj])
)
paths[ii][1].append(
(paths[ii][1][-1][0], self.y + length[jj] - (d0 + w) * turn[jj])
)
self.y += length[jj]
elif direction == 2:
for ii in range(number_of_paths):
d0 = (number_of_paths - 1) * distance * 0.5 - ii * distance
paths[ii][0].append(
(self.x - length[jj] - (d0 + w) * turn[jj], paths[ii][0][-1][1])
)
paths[ii][1].append(
(self.x - length[jj] - (d0 - w) * turn[jj], paths[ii][1][-1][1])
)
self.x -= length[jj]
elif direction == 3:
for ii in range(number_of_paths):
d0 = (number_of_paths - 1) * distance * 0.5 - ii * distance
paths[ii][0].append(
(paths[ii][0][-1][0], self.y - length[jj] - (d0 + w) * turn[jj])
)
paths[ii][1].append(
(paths[ii][1][-1][0], self.y - length[jj] - (d0 - w) * turn[jj])
)
self.y -= length[jj]
if points == 0:
for p in paths:
if direction % 2 == 0:
min_dist = 1e300
for x1 in [p[0][-2][0], p[1][-2][0]]:
for x2 in [p[0][-1][0], p[1][-1][0]]:
if abs(x1 - x2) < min_dist:
x0 = 0.5 * (x1 + x2)
min_dist = abs(x1 - x2)
p0 = (x0, p[0][-1][1])
p1 = (x0, p[1][-1][1])
else:
min_dist = 1e300
for y1 in [p[0][-2][1], p[1][-2][1]]:
for y2 in [p[0][-1][1], p[1][-1][1]]:
if abs(y1 - y2) < min_dist:
y0 = 0.5 * (y1 + y2)
min_dist = abs(y1 - y2)
p0 = (p[0][-1][0], y0)
p1 = (p[1][-1][0], y0)
self.polygons.append(
numpy.array(p[0][:-1] + [p0, p1] + p[1][-2::-1])
)
p[0] = [p0, p[0][-1]]
p[1] = [p1, p[1][-1]]
self.layers.extend(layer)
self.datatypes.extend(datatype)
points = max_points // 2 - 2
if turn[jj] > 0:
direction = (direction + 1) % 4
else:
direction = (direction - 1) % 4
if direction == 0:
for ii in range(number_of_paths):
d0 = ii * distance - (number_of_paths - 1) * distance * 0.5
paths[ii][0].append((self.x + length[-1], paths[ii][0][-1][1]))
paths[ii][1].append((self.x + length[-1], paths[ii][1][-1][1]))
self.x += length[-1]
elif direction == 1:
for ii in range(number_of_paths):
d0 = ii * distance - (number_of_paths - 1) * distance * 0.5
paths[ii][0].append((paths[ii][0][-1][0], self.y + length[-1]))
paths[ii][1].append((paths[ii][1][-1][0], self.y + length[-1]))
self.y += length[-1]
elif direction == 2:
for ii in range(number_of_paths):
d0 = (number_of_paths - 1) * distance * 0.5 - ii * distance
paths[ii][0].append((self.x - length[-1], paths[ii][0][-1][1]))
paths[ii][1].append((self.x - length[-1], paths[ii][1][-1][1]))
self.x -= length[-1]
elif direction == 3:
for ii in range(number_of_paths):
d0 = (number_of_paths - 1) * distance * 0.5 - ii * distance
paths[ii][0].append((paths[ii][0][-1][0], self.y - length[-1]))
paths[ii][1].append((paths[ii][1][-1][0], self.y - length[-1]))
self.y -= length[jj]
self.direction = ["+x", "+y", "-x", "-y"][direction]
self.polygons.extend(numpy.array(p[0] + p[1][::-1]) for p in paths)
self.layers.extend(layer)
self.datatypes.extend(datatype)
def __str__(self):
return "L1Path (end at ({}, {}) towards {}, {} polygons, {} vertices, layers {}, datatypes {})".format(
self.x,
self.y,
self.direction,
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
def rotate(self, angle, center=(0, 0)):
"""
Rotate this object.
Parameters
----------
angle : number
The angle of rotation (in *radians*).
center : array-like[2]
Center point for the rotation.
Returns
-------
out : `L1Path`
This object.
"""
ca = numpy.cos(angle)
sa = numpy.sin(angle) * _mpone
c0 = numpy.array(center)
if isinstance(self.direction, basestring):
self.direction = _directions_dict[self.direction] * numpy.pi
self.direction += angle
cur = numpy.array((self.x, self.y)) - c0
self.x, self.y = cur * ca + cur[::-1] * sa + c0
self.polygons = [
(points - c0) * ca + (points - c0)[:, ::-1] * sa + c0
for points in self.polygons
]
return self
class PolyPath(PolygonSet):
"""
Series of geometric objects that form a polygonal path or a
collection of parallel polygonal paths.
.. deprecated:: 1.4
`PolyPath` is deprecated in favor of FlexPath and will be removed
in a future version of Gdspy.
Parameters
----------
points : array-like[N][2]
Points along the center of the path.
width : number or array-like[N]
Width of the path. If an array is given, width at each
endpoint.
number_of_paths : positive integer
Number of parallel paths to create simultaneously.
distance : number or array-like[N]
Distance between the centers of adjacent paths. If an array is
given, distance at each endpoint.
corners : 'miter' or 'bevel'
Type of joins.
ends : 'flush', 'round', 'extended'
Type of end caps for the paths.
max_points : integer
The paths will be fractured in polygons with at most
`max_points` (must be at least 4). If `max_points` is zero no
fracture will occur.
layer : integer, list
The GDSII layer numbers for the elements of each path. If the
number of layers in the list is less than the number of paths,
the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0 and
255). If the number of datatypes in the list is less than the
number of paths, the list is repeated.
Notes
-----
The bevel join will give strange results if the number of paths is
greater than 1.
"""
__slots__ = "layers", "datatypes", "polygons", "properties"
def __init__(
self,
points,
width,
number_of_paths=1,
distance=0,
corners="miter",
ends="flush",
max_points=199,
layer=0,
datatype=0,
):
warnings.warn(
"[GDSPY] PolyPath is deprecated favor of FlexPath and will "
"be removed in a future version of Gdspy.",
category=DeprecationWarning,
stacklevel=2,
)
if not isinstance(layer, list):
layer = [layer]
if not isinstance(datatype, list):
datatype = [datatype]
if hasattr(width, "__iter__"):
width = numpy.array(width) * 0.5
else:
width = numpy.array([width * 0.5])
len_w = len(width)
if hasattr(distance, "__iter__"):
distance = numpy.array(distance)
else:
distance = numpy.array([distance])
len_d = len(distance)
points = numpy.array(points, dtype=float)
self.polygons = []
self.layers = []
self.datatypes = []
self.properties = {}
if points.shape[0] == 2 and number_of_paths == 1:
v = points[1] - points[0]
v = v / (v[0] ** 2 + v[1] ** 2) ** 0.5
w0 = width[0]
w1 = width[1 % len_w]
if ends == "round":
a = numpy.arctan2(v[1], v[0]) + _halfpi
self.polygons.append(
Round(
points[0],
w0,
initial_angle=a,
final_angle=a + numpy.pi,
number_of_points=33,
).polygons[0]
)
self.polygons.append(
Round(
points[1],
w1,
initial_angle=a - numpy.pi,
final_angle=a,
number_of_points=33,
).polygons[0]
)
self.layers.extend(layer[:1] * 2)
self.datatypes.extend(datatype[:1] * 2)
if ends == "extended":
points[0] = points[0] - v * w0
points[1] = points[1] + v * w1
u = numpy.array((-v[1], v[0]))
if w0 == 0:
self.polygons.append(
numpy.array((points[0], points[1] - u * w1, points[1] + u * w1))
)
elif w1 == 0:
self.polygons.append(
numpy.array((points[0] + u * w0, points[0] - u * w0, points[1]))
)
else:
self.polygons.append(
numpy.array(
(
points[0] + u * w0,
points[0] - u * w0,
points[1] - u * w1,
points[1] + u * w1,
)
)
)
self.layers.append(layer[0])
self.datatypes.append(datatype[0])
return
if corners not in ["miter", "bevel"]:
if corners in [0, 1]:
corners = ["miter", "bevel"][corners]
warnings.warn(
"[GDSPY] Argument corners must be one of 'miter' or 'bevel'.",
category=DeprecationWarning,
stacklevel=2,
)
else:
raise ValueError(
"[GDSPY] Argument corners must be one of 'miter' or 'bevel'."
)
bevel = corners == "bevel"
if ends not in ["flush", "round", "extended"]:
if ends in [0, 1, 2]:
ends = ["flush", "round", "extended"][ends]
warnings.warn(
"[GDSPY] Argument ends must be one of 'flush', "
"'round', or 'extended'.",
category=DeprecationWarning,
stacklevel=2,
)
else:
raise ValueError(
"[GDSPY] Argument ends must be one of 'flush', "
"'round', or 'extended'."
)
if ends == "extended":
v = points[0] - points[1]
v = v / (v[0] ** 2 + v[1] ** 2) ** 0.5
points[0] = points[0] + v * width[0]
v = points[-1] - points[-2]
v = v / (v[0] ** 2 + v[1] ** 2) ** 0.5
points[-1] = points[-1] + v * width[(points.shape[0] - 1) % len_w]
elif ends == "round":
v0 = points[1] - points[0]
angle0 = numpy.arctan2(v0[1], v0[0]) + _halfpi
v0 = numpy.array((-v0[1], v0[0])) / (v0[0] ** 2 + v0[1] ** 2) ** 0.5
d0 = 0.5 * (number_of_paths - 1) * distance[0]
v1 = points[-1] - points[-2]
angle1 = numpy.arctan2(v1[1], v1[0]) - _halfpi
v1 = numpy.array((-v1[1], v1[0])) / (v1[0] ** 2 + v1[1] ** 2) ** 0.5
j1w = (points.shape[0] - 1) % len_w
j1d = (points.shape[0] - 1) % len_d
d1 = 0.5 * (number_of_paths - 1) * distance[j1d]
self.polygons.extend(
(
Round(
points[0] + v0 * (ii * distance[0] - d0),
width[0],
initial_angle=angle0,
final_angle=angle0 + numpy.pi,
number_of_points=33,
).polygons[0]
for ii in range(number_of_paths)
)
)
self.polygons.extend(
(
Round(
points[-1] + v1 * (ii * distance[j1d] - d1),
width[j1w],
initial_angle=angle1,
final_angle=angle1 + numpy.pi,
number_of_points=33,
).polygons[0]
)
for ii in range(number_of_paths)
)
self.layers.extend(
((layer * (number_of_paths // len(layer) + 1))[:number_of_paths]) * 2
)
self.datatypes.extend(
((datatype * (number_of_paths // len(datatype) + 1))[:number_of_paths])
* 2
)
v = points[1] - points[0]
v = numpy.array((-v[1], v[0])) / (v[0] ** 2 + v[1] ** 2) ** 0.5
d0 = 0.5 * (number_of_paths - 1) * distance[0]
d1 = 0.5 * (number_of_paths - 1) * distance[1 % len_d]
paths = [
[
[points[0] + (ii * distance[0] - d0 - width[0]) * v],
[points[0] + (ii * distance[0] - d0 + width[0]) * v],
]
for ii in range(number_of_paths)
]
p1 = [
(
points[1] + (ii * distance[1 % len_d] - d1 - width[1 % len_w]) * v,
points[1] + (ii * distance[1 % len_d] - d1 + width[1 % len_w]) * v,
)
for ii in range(number_of_paths)
]
for jj in range(1, points.shape[0] - 1):
j0d = jj % len_d
j0w = jj % len_w
j1d = (jj + 1) % len_d
j1w = (jj + 1) % len_w
v = points[jj + 1] - points[jj]
v = numpy.array((-v[1], v[0])) / (v[0] ** 2 + v[1] ** 2) ** 0.5
d0 = d1
d1 = 0.5 * (number_of_paths - 1) * distance[j1d]
p0 = p1
p1 = []
pp = []
for ii in range(number_of_paths):
pp.append(
(
points[jj] + (ii * distance[j0d] - d0 - width[j0w]) * v,
points[jj] + (ii * distance[j0d] - d0 + width[j0w]) * v,
)
)
p1.append(
(
points[jj + 1] + (ii * distance[j1d] - d1 - width[j1w]) * v,
points[jj + 1] + (ii * distance[j1d] - d1 + width[j1w]) * v,
)
)
for kk in (0, 1):
p0m = paths[ii][kk][-1] - p0[ii][kk]
p1p = pp[ii][kk] - p1[ii][kk]
vec = p0m[0] * p1p[1] - p1p[0] * p0m[1]
if abs(vec) > 1e-30:
p = (
_pmone
* (
p0m * p1p[::-1] * p1[ii][kk]
- p1p * p0m[::-1] * p0[ii][kk]
+ p0m * p1p * (p0[ii][kk][::-1] - p1[ii][kk][::-1])
)
/ vec
)
l0 = (p - pp[ii][kk]) * p1p
l1 = (p - p0[ii][kk]) * p0m
if bevel and l0[0] + l0[1] > 0 and l1[0] + l1[1] < 0:
paths[ii][kk].append(p0[ii][kk])
paths[ii][kk].append(pp[ii][kk])
else:
paths[ii][kk].append(p)
if (
max_points > 0
and len(paths[ii][0]) + len(paths[ii][1]) + 3 > max_points
):
diff = paths[ii][0][0] - paths[ii][1][0]
if diff[0] ** 2 + diff[1] ** 2 == 0:
paths[ii][1] = paths[ii][1][1:]
diff = paths[ii][0][-1] - paths[ii][1][-1]
if diff[0] ** 2 + diff[1] ** 2 == 0:
self.polygons.append(
numpy.array(paths[ii][0] + paths[ii][1][-2::-1])
)
else:
self.polygons.append(
numpy.array(paths[ii][0] + paths[ii][1][::-1])
)
paths[ii][0] = paths[ii][0][-1:]
paths[ii][1] = paths[ii][1][-1:]
self.layers.append(layer[ii % len(layer)])
self.datatypes.append(datatype[ii % len(datatype)])
for ii in range(number_of_paths):
diff = paths[ii][0][0] - paths[ii][1][0]
if diff[0] ** 2 + diff[1] ** 2 == 0:
paths[ii][1] = paths[ii][1][1:]
diff = p1[ii][0] - p1[ii][1]
if diff[0] ** 2 + diff[1] ** 2 != 0:
paths[ii][0].append(p1[ii][0])
paths[ii][1].append(p1[ii][1])
self.polygons.extend(numpy.array(pol[0] + pol[1][::-1]) for pol in paths)
self.layers.extend(
(layer * (number_of_paths // len(layer) + 1))[:number_of_paths]
)
self.datatypes.extend(
(datatype * (number_of_paths // len(datatype) + 1))[:number_of_paths]
)
def __str__(self):
return "PolyPath ({} polygons, {} vertices, layers {}, datatypes {})".format(
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
from gdspy.path import _func_bezier
| [
"numpy.arctan2",
"future.standard_library.install_aliases",
"numpy.empty",
"gdspy.clipper._chop",
"builtins.round",
"numpy.sin",
"numpy.arange",
"numpy.inner",
"numpy.round",
"builtins.range",
"numpy.full_like",
"numpy.format_float_positional",
"struct.pack",
"numpy.tan",
"numpy.linspace... | [((1502, 1526), 'numpy.array', 'numpy.array', (['(-1.0, 1.0)'], {}), '((-1.0, 1.0))\n', (1513, 1526), False, 'import numpy\n'), ((96425, 96449), 'numpy.array', 'numpy.array', (['(1.0, -1.0)'], {}), '((1.0, -1.0))\n', (96436, 96449), False, 'import numpy\n'), ((970, 1004), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (1002, 1004), False, 'from future import standard_library\n'), ((4283, 4299), 'numpy.cos', 'numpy.cos', (['angle'], {}), '(angle)\n', (4292, 4299), False, 'import numpy\n'), ((4352, 4371), 'numpy.array', 'numpy.array', (['center'], {}), '(center)\n', (4363, 4371), False, 'import numpy\n'), ((5104, 5123), 'numpy.array', 'numpy.array', (['center'], {}), '(center)\n', (5115, 5123), False, 'import numpy\n'), ((8965, 9012), 'builtins.zip', 'zip', (['self.polygons', 'self.layers', 'self.datatypes'], {}), '(self.polygons, self.layers, self.datatypes)\n', (8968, 9012), False, 'from builtins import zip\n'), ((15039, 15061), 'numpy.isscalar', 'numpy.isscalar', (['radius'], {}), '(radius)\n', (15053, 15061), False, 'import numpy\n'), ((19556, 19577), 'numpy.array', 'numpy.array', (['(dx, dy)'], {}), '((dx, dy))\n', (19567, 19577), False, 'import numpy\n'), ((20092, 20107), 'numpy.array', 'numpy.array', (['p1'], {}), '(p1)\n', (20103, 20107), False, 'import numpy\n'), ((27758, 27812), 'numpy.linspace', 'numpy.linspace', (['initial_angle', 'final_angle', '(pieces + 1)'], {}), '(initial_angle, final_angle, pieces + 1)\n', (27772, 27812), False, 'import numpy\n'), ((27909, 27922), 'builtins.range', 'range', (['pieces'], {}), '(pieces)\n', (27914, 27922), False, 'from builtins import range\n'), ((61689, 61710), 'numpy.array', 'numpy.array', (['(dx, dy)'], {}), '((dx, dy))\n', (61700, 61710), False, 'import numpy\n'), ((62215, 62231), 'numpy.cos', 'numpy.cos', (['angle'], {}), '(angle)\n', (62224, 62231), False, 'import numpy\n'), ((62284, 62303), 'numpy.array', 'numpy.array', (['center'], {}), '(center)\n', (62295, 62303), False, 'import numpy\n'), ((63389, 63408), 'numpy.array', 'numpy.array', (['center'], {}), '(center)\n', (63400, 63408), False, 'import numpy\n'), ((64158, 64173), 'numpy.array', 'numpy.array', (['p1'], {}), '(p1)\n', (64169, 64173), False, 'import numpy\n'), ((72745, 72786), 'numpy.linspace', 'numpy.linspace', (['old_w', 'self.w', '(pieces + 1)'], {}), '(old_w, self.w, pieces + 1)\n', (72759, 72786), False, 'import numpy\n'), ((72807, 72862), 'numpy.linspace', 'numpy.linspace', (['old_distance', 'self.distance', '(pieces + 1)'], {}), '(old_distance, self.distance, pieces + 1)\n', (72821, 72862), False, 'import numpy\n'), ((72880, 72934), 'numpy.linspace', 'numpy.linspace', (['initial_angle', 'final_angle', '(pieces + 1)'], {}), '(initial_angle, final_angle, pieces + 1)\n', (72894, 72934), False, 'import numpy\n'), ((83576, 83595), 'numpy.array', 'numpy.array', (['points'], {}), '(points)\n', (83587, 83595), False, 'import numpy\n'), ((83613, 83632), 'numpy.array', 'numpy.array', (['values'], {}), '(values)\n', (83624, 83632), False, 'import numpy\n'), ((85583, 85619), 'numpy.arctan2', 'numpy.arctan2', (['(-dx[-1, 0])', 'dx[-1, 1]'], {}), '(-dx[-1, 0], dx[-1, 1])\n', (85596, 85619), False, 'import numpy\n'), ((95450, 95514), 'gdspy.hobby._hobby', '_hobby', (['points', 'angles', 'curl_start', 'curl_end', 't_in', 't_out', 'cycle'], {}), '(points, angles, curl_start, curl_end, t_in, t_out, cycle)\n', (95456, 95514), False, 'from gdspy.hobby import _hobby\n'), ((95702, 95728), 'builtins.range', 'range', (['(points.shape[0] - 1)'], {}), '(points.shape[0] - 1)\n', (95707, 95728), False, 'from builtins import range\n'), ((99063, 99229), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] L1Path is deprecated favor of FlexPath and will be removed in a future version of Gdspy."""'], {'category': 'DeprecationWarning', 'stacklevel': '(2)'}), "(\n '[GDSPY] L1Path is deprecated favor of FlexPath and will be removed in a future version of Gdspy.'\n , category=DeprecationWarning, stacklevel=2)\n", (99076, 99229), False, 'import warnings\n'), ((107227, 107243), 'numpy.cos', 'numpy.cos', (['angle'], {}), '(angle)\n', (107236, 107243), False, 'import numpy\n'), ((107296, 107315), 'numpy.array', 'numpy.array', (['center'], {}), '(center)\n', (107307, 107315), False, 'import numpy\n'), ((109652, 109820), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] PolyPath is deprecated favor of FlexPath and will be removed in a future version of Gdspy."""'], {'category': 'DeprecationWarning', 'stacklevel': '(2)'}), "(\n '[GDSPY] PolyPath is deprecated favor of FlexPath and will be removed in a future version of Gdspy.'\n , category=DeprecationWarning, stacklevel=2)\n", (109665, 109820), False, 'import warnings\n'), ((110385, 110417), 'numpy.array', 'numpy.array', (['points'], {'dtype': 'float'}), '(points, dtype=float)\n', (110396, 110417), False, 'import numpy\n'), ((116534, 116563), 'builtins.range', 'range', (['(1)', '(points.shape[0] - 1)'], {}), '(1, points.shape[0] - 1)\n', (116539, 116563), False, 'from builtins import range\n'), ((119562, 119584), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (119567, 119584), False, 'from builtins import range\n'), ((2646, 2660), 'numpy.array', 'numpy.array', (['p'], {}), '(p)\n', (2657, 2660), False, 'import numpy\n'), ((4313, 4329), 'numpy.sin', 'numpy.sin', (['angle'], {}), '(angle)\n', (4322, 4329), False, 'import numpy\n'), ((5166, 5195), 'numpy.array', 'numpy.array', (['(scalex, scaley)'], {}), '((scalex, scaley))\n', (5177, 5195), False, 'import numpy\n'), ((16645, 16670), 'numpy.flatnonzero', 'numpy.flatnonzero', (['length'], {}), '(length)\n', (16662, 16670), False, 'import numpy\n'), ((17244, 17267), 'numpy.flatnonzero', 'numpy.flatnonzero', (['norm'], {}), '(norm)\n', (17261, 17267), False, 'import numpy\n'), ((17433, 17468), 'numpy.arccos', 'numpy.arccos', (['(dot[:, 0] + dot[:, 1])'], {}), '(dot[:, 0] + dot[:, 1])\n', (17445, 17468), False, 'import numpy\n'), ((17486, 17508), 'numpy.cos', 'numpy.cos', (['(theta * 0.5)'], {}), '(theta * 0.5)\n', (17495, 17508), False, 'import numpy\n'), ((17526, 17548), 'numpy.tan', 'numpy.tan', (['(theta * 0.5)'], {}), '(theta * 0.5)\n', (17535, 17548), False, 'import numpy\n'), ((18998, 19021), 'numpy.array', 'numpy.array', (['new_points'], {}), '(new_points)\n', (19009, 19021), False, 'import numpy\n'), ((20122, 20137), 'numpy.array', 'numpy.array', (['p2'], {}), '(p2)\n', (20133, 20137), False, 'import numpy\n'), ((21303, 21322), 'numpy.array', 'numpy.array', (['points'], {}), '(points)\n', (21314, 21322), False, 'import numpy\n'), ((22314, 22427), 'numpy.array', 'numpy.array', (['[[point1[0], point1[1]], [point1[0], point2[1]], [point2[0], point2[1]], [\n point2[0], point1[1]]]'], {}), '([[point1[0], point1[1]], [point1[0], point2[1]], [point2[0],\n point2[1]], [point2[0], point1[1]]])\n', (22325, 22427), False, 'import numpy\n'), ((26501, 26660), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] Use of a floating number as number_of_points is deprecated in favor of tolerance."""'], {'category': 'DeprecationWarning', 'stacklevel': '(2)'}), "(\n '[GDSPY] Use of a floating number as number_of_points is deprecated in favor of tolerance.'\n , category=DeprecationWarning, stacklevel=2)\n", (26514, 26660), False, 'import warnings\n'), ((27557, 27591), 'numpy.zeros', 'numpy.zeros', (['(number_of_points, 2)'], {}), '((number_of_points, 2))\n', (27568, 27591), False, 'import numpy\n'), ((56866, 56882), 'numpy.cos', 'numpy.cos', (['angle'], {}), '(angle)\n', (56875, 56882), False, 'import numpy\n'), ((56900, 56916), 'numpy.sin', 'numpy.sin', (['angle'], {}), '(angle)\n', (56909, 56916), False, 'import numpy\n'), ((62245, 62261), 'numpy.sin', 'numpy.sin', (['angle'], {}), '(angle)\n', (62254, 62261), False, 'import numpy\n'), ((62474, 62503), 'numpy.array', 'numpy.array', (['(self.x, self.y)'], {}), '((self.x, self.y))\n', (62485, 62503), False, 'import numpy\n'), ((63451, 63480), 'numpy.array', 'numpy.array', (['(scalex, scaley)'], {}), '((scalex, scaley))\n', (63462, 63480), False, 'import numpy\n'), ((64188, 64203), 'numpy.array', 'numpy.array', (['p2'], {}), '(p2)\n', (64199, 64203), False, 'import numpy\n'), ((67241, 67254), 'builtins.range', 'range', (['self.n'], {}), '(self.n)\n', (67246, 67254), False, 'from builtins import range\n'), ((71711, 71870), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] Use of a floating number as number_of_points is deprecated in favor of tolerance."""'], {'category': 'DeprecationWarning', 'stacklevel': '(2)'}), "(\n '[GDSPY] Use of a floating number as number_of_points is deprecated in favor of tolerance.'\n , category=DeprecationWarning, stacklevel=2)\n", (71724, 71870), False, 'import warnings\n'), ((72999, 73012), 'builtins.range', 'range', (['pieces'], {}), '(pieces)\n', (73004, 73012), False, 'from builtins import range\n'), ((82951, 82994), 'numpy.linspace', 'numpy.linspace', (['(0)', '(1)', 'number_of_evaluations'], {}), '(0, 1, number_of_evaluations)\n', (82965, 82994), False, 'import numpy\n'), ((85838, 85851), 'builtins.range', 'range', (['self.n'], {}), '(self.n)\n', (85843, 85851), False, 'from builtins import range\n'), ((89774, 89806), 'numpy.vstack', 'numpy.vstack', (['([(0, 0)], points)'], {}), '(([(0, 0)], points))\n', (89786, 89806), False, 'import numpy\n'), ((89839, 89881), 'numpy.vstack', 'numpy.vstack', (['([(self.x, self.y)], points)'], {}), '(([(self.x, self.y)], points))\n', (89851, 89881), False, 'import numpy\n'), ((89976, 89993), 'gdspy.path._func_bezier', '_func_bezier', (['pts'], {}), '(pts)\n', (89988, 89993), False, 'from gdspy.path import _func_bezier\n'), ((90007, 90025), 'gdspy.path._func_bezier', '_func_bezier', (['dpts'], {}), '(dpts)\n', (90019, 90025), False, 'from gdspy.path import _func_bezier\n'), ((95388, 95430), 'numpy.vstack', 'numpy.vstack', (['([(self.x, self.y)], points)'], {}), '(([(self.x, self.y)], points))\n', (95400, 95430), False, 'import numpy\n'), ((100010, 100032), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (100015, 100032), False, 'from builtins import range\n'), ((104945, 104967), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (104950, 104967), False, 'from builtins import range\n'), ((107257, 107273), 'numpy.sin', 'numpy.sin', (['angle'], {}), '(angle)\n', (107266, 107273), False, 'import numpy\n'), ((107486, 107515), 'numpy.array', 'numpy.array', (['(self.x, self.y)'], {}), '((self.x, self.y))\n', (107497, 107515), False, 'import numpy\n'), ((110136, 110162), 'numpy.array', 'numpy.array', (['[width * 0.5]'], {}), '([width * 0.5])\n', (110147, 110162), False, 'import numpy\n'), ((110255, 110276), 'numpy.array', 'numpy.array', (['distance'], {}), '(distance)\n', (110266, 110276), False, 'import numpy\n'), ((110314, 110337), 'numpy.array', 'numpy.array', (['[distance]'], {}), '([distance])\n', (110325, 110337), False, 'import numpy\n'), ((111705, 111731), 'numpy.array', 'numpy.array', (['(-v[1], v[0])'], {}), '((-v[1], v[0]))\n', (111716, 111731), False, 'import numpy\n'), ((115831, 115857), 'numpy.array', 'numpy.array', (['(-v[1], v[0])'], {}), '((-v[1], v[0]))\n', (115842, 115857), False, 'import numpy\n'), ((116976, 116998), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (116981, 116998), False, 'from builtins import range\n'), ((5756, 5947), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] Polygons with more than 8190 are not supported by the official GDSII specification. This GDSII file might not be compatible with all readers."""'], {'stacklevel': '(4)'}), "(\n '[GDSPY] Polygons with more than 8190 are not supported by the official GDSII specification. This GDSII file might not be compatible with all readers.'\n , stacklevel=4)\n", (5769, 5947), False, 'import warnings\n'), ((6465, 6526), 'numpy.empty', 'numpy.empty', (['(self.polygons[ii].shape[0] + 1, 2)'], {'dtype': '""">i4"""'}), "((self.polygons[ii].shape[0] + 1, 2), dtype='>i4')\n", (6476, 6526), False, 'import numpy\n'), ((6553, 6596), 'numpy.round', 'numpy.round', (['(self.polygons[ii] * multiplier)'], {}), '(self.polygons[ii] * multiplier)\n', (6564, 6596), False, 'import numpy\n'), ((8470, 8497), 'struct.pack', 'struct.pack', (['""">2H"""', '(4)', '(4352)'], {}), "('>2H', 4, 4352)\n", (8481, 8497), False, 'import struct\n'), ((10145, 10177), 'builtins.zip', 'zip', (['self.layers', 'self.datatypes'], {}), '(self.layers, self.datatypes)\n', (10148, 10177), False, 'from builtins import zip\n'), ((15249, 15275), 'builtins.zip', 'zip', (['radius', 'self.polygons'], {}), '(radius, self.polygons)\n', (15252, 15275), False, 'from builtins import zip\n'), ((16530, 16565), 'numpy.roll', 'numpy.roll', (['self.polygons[jj]', '(1)', '(0)'], {}), '(self.polygons[jj], 1, 0)\n', (16540, 16565), False, 'import numpy\n'), ((16745, 16779), 'numpy.array', 'numpy.array', (['self.polygons[jj][ii]'], {}), '(self.polygons[jj][ii])\n', (16756, 16779), False, 'import numpy\n'), ((17136, 17158), 'numpy.roll', 'numpy.roll', (['vec', '(-1)', '(0)'], {}), '(vec, -1, 0)\n', (17146, 17158), False, 'import numpy\n'), ((17384, 17406), 'numpy.roll', 'numpy.roll', (['vec', '(-1)', '(0)'], {}), '(vec, -1, 0)\n', (17394, 17406), False, 'import numpy\n'), ((20174, 20195), 'numpy.inner', 'numpy.inner', (['vec', 'vec'], {}), '(vec, vec)\n', (20185, 20195), False, 'import numpy\n'), ((27601, 27614), 'builtins.range', 'range', (['pieces'], {}), '(pieces)\n', (27606, 27614), False, 'from builtins import range\n'), ((64240, 64261), 'numpy.inner', 'numpy.inner', (['vec', 'vec'], {}), '(vec, vec)\n', (64251, 64261), False, 'import numpy\n'), ((64770, 64799), 'numpy.arctan2', 'numpy.arctan2', (['vec[1]', 'vec[0]'], {}), '(vec[1], vec[0])\n', (64783, 64799), False, 'import numpy\n'), ((71075, 71099), 'numpy.cos', 'numpy.cos', (['initial_angle'], {}), '(initial_angle)\n', (71084, 71099), False, 'import numpy\n'), ((71131, 71155), 'numpy.sin', 'numpy.sin', (['initial_angle'], {}), '(initial_angle)\n', (71140, 71155), False, 'import numpy\n'), ((71187, 71209), 'numpy.cos', 'numpy.cos', (['final_angle'], {}), '(final_angle)\n', (71196, 71209), False, 'import numpy\n'), ((71241, 71263), 'numpy.sin', 'numpy.sin', (['final_angle'], {}), '(final_angle)\n', (71250, 71263), False, 'import numpy\n'), ((73040, 73053), 'builtins.range', 'range', (['self.n'], {}), '(self.n)\n', (73045, 73053), False, 'from builtins import range\n'), ((84433, 84464), 'numpy.full_like', 'numpy.full_like', (['points', 'self.w'], {}), '(points, self.w)\n', (84448, 84464), False, 'import numpy\n'), ((84818, 84856), 'numpy.full_like', 'numpy.full_like', (['points', 'self.distance'], {}), '(points, self.distance)\n', (84833, 84856), False, 'import numpy\n'), ((85220, 85249), 'numpy.array', 'numpy.array', (['(self.x, self.y)'], {}), '((self.x, self.y))\n', (85231, 85249), False, 'import numpy\n'), ((95254, 95290), 'numpy.vstack', 'numpy.vstack', (['([(0.0, 0.0)], points)'], {}), '(([(0.0, 0.0)], points))\n', (95266, 95290), False, 'import numpy\n'), ((95293, 95322), 'numpy.array', 'numpy.array', (['(self.x, self.y)'], {}), '((self.x, self.y))\n', (95304, 95322), False, 'import numpy\n'), ((99731, 99753), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (99736, 99753), False, 'from builtins import range\n'), ((100356, 100378), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (100361, 100378), False, 'from builtins import range\n'), ((101431, 101453), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (101436, 101453), False, 'from builtins import range\n'), ((105289, 105311), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (105294, 105311), False, 'from builtins import range\n'), ((106360, 106390), 'numpy.array', 'numpy.array', (['(p[0] + p[1][::-1])'], {}), '(p[0] + p[1][::-1])\n', (106371, 106390), False, 'import numpy\n'), ((110077, 110095), 'numpy.array', 'numpy.array', (['width'], {}), '(width)\n', (110088, 110095), False, 'import numpy\n'), ((112694, 112817), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] Argument corners must be one of \'miter\' or \'bevel\'."""'], {'category': 'DeprecationWarning', 'stacklevel': '(2)'}), '("[GDSPY] Argument corners must be one of \'miter\' or \'bevel\'.",\n category=DeprecationWarning, stacklevel=2)\n', (112707, 112817), False, 'import warnings\n'), ((113245, 113383), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] Argument ends must be one of \'flush\', \'round\', or \'extended\'."""'], {'category': 'DeprecationWarning', 'stacklevel': '(2)'}), '(\n "[GDSPY] Argument ends must be one of \'flush\', \'round\', or \'extended\'.",\n category=DeprecationWarning, stacklevel=2)\n', (113258, 113383), False, 'import warnings\n'), ((116217, 116239), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (116222, 116239), False, 'from builtins import range\n'), ((116483, 116505), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (116488, 116505), False, 'from builtins import range\n'), ((116753, 116779), 'numpy.array', 'numpy.array', (['(-v[1], v[0])'], {}), '((-v[1], v[0]))\n', (116764, 116779), False, 'import numpy\n'), ((119945, 119979), 'numpy.array', 'numpy.array', (['(pol[0] + pol[1][::-1])'], {}), '(pol[0] + pol[1][::-1])\n', (119956, 119979), False, 'import numpy\n'), ((6094, 6185), 'struct.pack', 'struct.pack', (['""">4Hh2Hh"""', '(4)', '(2048)', '(6)', '(3330)', 'self.layers[ii]', '(6)', '(3586)', 'self.datatypes[ii]'], {}), "('>4Hh2Hh', 4, 2048, 6, 3330, self.layers[ii], 6, 3586, self.\n datatypes[ii])\n", (6105, 6185), False, 'import struct\n'), ((8127, 8328), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] Properties with size larger than 128 bytes are not officially supported by the GDSII specification. This file might not be compatible with all readers."""'], {'stacklevel': '(4)'}), "(\n '[GDSPY] Properties with size larger than 128 bytes are not officially supported by the GDSII specification. This file might not be compatible with all readers.'\n , stacklevel=4)\n", (8140, 8328), False, 'import warnings\n'), ((15300, 15317), 'numpy.isscalar', 'numpy.isscalar', (['r'], {}), '(r)\n', (15314, 15317), False, 'import numpy\n'), ((16891, 16926), 'numpy.roll', 'numpy.roll', (['self.polygons[jj]', '(1)', '(0)'], {}), '(self.polygons[jj], 1, 0)\n', (16901, 16926), False, 'import numpy\n'), ((17764, 17791), 'numpy.arctan2', 'numpy.arctan2', (['a0[1]', 'a0[0]'], {}), '(a0[1], a0[0])\n', (17777, 17791), False, 'import numpy\n'), ((17883, 17910), 'numpy.arctan2', 'numpy.arctan2', (['a1[1]', 'a1[0]'], {}), '(a1[1], a1[0])\n', (17896, 17910), False, 'import numpy\n'), ((18238, 18263), 'numpy.linspace', 'numpy.linspace', (['a0', 'a1', 'n'], {}), '(a0, a1, n)\n', (18252, 18263), False, 'import numpy\n'), ((29019, 29079), 'numpy.linspace', 'numpy.linspace', (['oang[ii]', 'oang[ii + 1]', '(number_of_points - 1)'], {}), '(oang[ii], oang[ii + 1], number_of_points - 1)\n', (29033, 29079), False, 'import numpy\n'), ((29427, 29469), 'numpy.linspace', 'numpy.linspace', (['oang[ii]', 'oang[ii + 1]', 'n1'], {}), '(oang[ii], oang[ii + 1], n1)\n', (29441, 29469), False, 'import numpy\n'), ((29652, 29694), 'numpy.linspace', 'numpy.linspace', (['iang[ii + 1]', 'iang[ii]', 'n2'], {}), '(iang[ii + 1], iang[ii], n2)\n', (29666, 29694), False, 'import numpy\n'), ((67470, 67744), 'numpy.array', 'numpy.array', (['[(old_x + (old_d0 - old_w) * sa, old_y - (old_d0 - old_w) * ca), (old_x + (\n old_d0 + old_w) * sa, old_y - (old_d0 + old_w) * ca), (self.x + (d0 +\n self.w) * sa, self.y - (d0 + self.w) * ca), (self.x + (d0 - self.w) *\n sa, self.y - (d0 - self.w) * ca)]'], {}), '([(old_x + (old_d0 - old_w) * sa, old_y - (old_d0 - old_w) * ca),\n (old_x + (old_d0 + old_w) * sa, old_y - (old_d0 + old_w) * ca), (self.x +\n (d0 + self.w) * sa, self.y - (d0 + self.w) * ca), (self.x + (d0 - self.\n w) * sa, self.y - (d0 - self.w) * ca)])\n', (67481, 67744), False, 'import numpy\n'), ((73594, 73642), 'numpy.linspace', 'numpy.linspace', (['angles[jj]', 'angles[jj + 1]', 'pts1'], {}), '(angles[jj], angles[jj + 1], pts1)\n', (73608, 73642), False, 'import numpy\n'), ((73669, 73731), 'numpy.linspace', 'numpy.linspace', (['(old_r0 + widths[jj])', '(r0 + widths[jj + 1])', 'pts1'], {}), '(old_r0 + widths[jj], r0 + widths[jj + 1], pts1)\n', (73683, 73731), False, 'import numpy\n'), ((74279, 74327), 'numpy.linspace', 'numpy.linspace', (['angles[jj + 1]', 'angles[jj]', 'pts2'], {}), '(angles[jj + 1], angles[jj], pts2)\n', (74293, 74327), False, 'import numpy\n'), ((74354, 74416), 'numpy.linspace', 'numpy.linspace', (['(r0 - widths[jj + 1])', '(old_r0 - widths[jj])', 'pts2'], {}), '(r0 - widths[jj + 1], old_r0 - widths[jj], pts2)\n', (74368, 74416), False, 'import numpy\n'), ((86362, 86389), 'numpy.concatenate', 'numpy.concatenate', (['(p1, p2)'], {}), '((p1, p2))\n', (86379, 86389), False, 'import numpy\n'), ((100702, 100724), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (100707, 100724), False, 'from builtins import range\n'), ((101935, 101957), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (101940, 101957), False, 'from builtins import range\n'), ((105633, 105655), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (105638, 105655), False, 'from builtins import range\n'), ((110786, 110811), 'numpy.arctan2', 'numpy.arctan2', (['v[1]', 'v[0]'], {}), '(v[1], v[0])\n', (110799, 110811), False, 'import numpy\n'), ((111814, 111878), 'numpy.array', 'numpy.array', (['(points[0], points[1] - u * w1, points[1] + u * w1)'], {}), '((points[0], points[1] - u * w1, points[1] + u * w1))\n', (111825, 111878), False, 'import numpy\n'), ((114091, 114118), 'numpy.arctan2', 'numpy.arctan2', (['v0[1]', 'v0[0]'], {}), '(v0[1], v0[0])\n', (114104, 114118), False, 'import numpy\n'), ((114146, 114174), 'numpy.array', 'numpy.array', (['(-v0[1], v0[0])'], {}), '((-v0[1], v0[0]))\n', (114157, 114174), False, 'import numpy\n'), ((114331, 114358), 'numpy.arctan2', 'numpy.arctan2', (['v1[1]', 'v1[0]'], {}), '(v1[1], v1[0])\n', (114344, 114358), False, 'import numpy\n'), ((114386, 114414), 'numpy.array', 'numpy.array', (['(-v1[1], v1[0])'], {}), '((-v1[1], v1[0]))\n', (114397, 114414), False, 'import numpy\n'), ((6778, 6821), 'struct.pack', 'struct.pack', (['""">2H"""', '(4 + 8 * (i1 - i0))', '(4099)'], {}), "('>2H', 4 + 8 * (i1 - i0), 4099)\n", (6789, 6821), False, 'import struct\n'), ((7439, 7482), 'numpy.round', 'numpy.round', (['(self.polygons[ii] * multiplier)'], {}), '(self.polygons[ii] * multiplier)\n', (7450, 7482), False, 'import numpy\n'), ((12398, 12454), 'gdspy.clipper._chop', 'clipper._chop', (['self.polygons[ii]', 'cuts', '(0)', '(1 / precision)'], {}), '(self.polygons[ii], cuts, 0, 1 / precision)\n', (12411, 12454), False, 'from gdspy import clipper\n'), ((12802, 12858), 'gdspy.clipper._chop', 'clipper._chop', (['self.polygons[ii]', 'cuts', '(1)', '(1 / precision)'], {}), '(self.polygons[ii], cuts, 1, 1 / precision)\n', (12815, 12858), False, 'from gdspy import clipper\n'), ((20247, 20282), 'numpy.inner', 'numpy.inner', (['(points - origin)', 'vec_r'], {}), '(points - origin, vec_r)\n', (20258, 20282), False, 'import numpy\n'), ((64313, 64348), 'numpy.inner', 'numpy.inner', (['(points - origin)', 'vec_r'], {}), '(points - origin, vec_r)\n', (64324, 64348), False, 'import numpy\n'), ((66757, 66777), 'numpy.cos', 'numpy.cos', (['direction'], {}), '(direction)\n', (66766, 66777), False, 'import numpy\n'), ((66795, 66815), 'numpy.sin', 'numpy.sin', (['direction'], {}), '(direction)\n', (66804, 66815), False, 'import numpy\n'), ((73096, 73130), 'numpy.zeros', 'numpy.zeros', (['(number_of_points, 2)'], {}), '((number_of_points, 2))\n', (73107, 73130), False, 'import numpy\n'), ((74092, 74130), 'numpy.array', 'numpy.array', (['self.polygons[-1][1:pts1]'], {}), '(self.polygons[-1][1:pts1])\n', (74103, 74130), False, 'import numpy\n'), ((74493, 74625), 'warnings.warn', 'warnings.warn', (['"""[GDSPY] Path arc with width larger than radius created: possible self-intersecting polygon."""'], {'stacklevel': '(2)'}), "(\n '[GDSPY] Path arc with width larger than radius created: possible self-intersecting polygon.'\n , stacklevel=2)\n", (74506, 74625), False, 'import warnings\n'), ((79032, 79063), 'builtins.round', 'round', (['(self.direction / _halfpi)'], {}), '(self.direction / _halfpi)\n', (79037, 79063), False, 'from builtins import round\n'), ((101048, 101070), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (101053, 101070), False, 'from builtins import range\n'), ((102439, 102461), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (102444, 102461), False, 'from builtins import range\n'), ((104463, 104511), 'numpy.array', 'numpy.array', (['(p[0][:-1] + [p0, p1] + p[1][-2::-1])'], {}), '(p[0][:-1] + [p0, p1] + p[1][-2::-1])\n', (104474, 104511), False, 'import numpy\n'), ((105977, 105999), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (105982, 105999), False, 'from builtins import range\n'), ((111981, 112045), 'numpy.array', 'numpy.array', (['(points[0] + u * w0, points[0] - u * w0, points[1])'], {}), '((points[0] + u * w0, points[0] - u * w0, points[1]))\n', (111992, 112045), False, 'import numpy\n'), ((112140, 112238), 'numpy.array', 'numpy.array', (['(points[0] + u * w0, points[0] - u * w0, points[1] - u * w1, points[1] + u * w1\n )'], {}), '((points[0] + u * w0, points[0] - u * w0, points[1] - u * w1, \n points[1] + u * w1))\n', (112151, 112238), False, 'import numpy\n'), ((13123, 13137), 'numpy.array', 'numpy.array', (['x'], {}), '(x)\n', (13134, 13137), False, 'import numpy\n'), ((25730, 25742), 'numpy.sin', 'numpy.sin', (['a'], {}), '(a)\n', (25739, 25742), False, 'import numpy\n'), ((25750, 25762), 'numpy.cos', 'numpy.cos', (['a'], {}), '(a)\n', (25759, 25762), False, 'import numpy\n'), ((26219, 26231), 'numpy.sin', 'numpy.sin', (['a'], {}), '(a)\n', (26228, 26231), False, 'import numpy\n'), ((26239, 26251), 'numpy.cos', 'numpy.cos', (['a'], {}), '(a)\n', (26248, 26251), False, 'import numpy\n'), ((28260, 28272), 'numpy.cos', 'numpy.cos', (['t'], {}), '(t)\n', (28269, 28272), False, 'import numpy\n'), ((28337, 28349), 'numpy.sin', 'numpy.sin', (['t'], {}), '(t)\n', (28346, 28349), False, 'import numpy\n'), ((28601, 28613), 'numpy.cos', 'numpy.cos', (['t'], {}), '(t)\n', (28610, 28613), False, 'import numpy\n'), ((28680, 28692), 'numpy.sin', 'numpy.sin', (['t'], {}), '(t)\n', (28689, 28692), False, 'import numpy\n'), ((28829, 28841), 'numpy.cos', 'numpy.cos', (['t'], {}), '(t)\n', (28838, 28841), False, 'import numpy\n'), ((28908, 28920), 'numpy.sin', 'numpy.sin', (['t'], {}), '(t)\n', (28917, 28920), False, 'import numpy\n'), ((29127, 29139), 'numpy.cos', 'numpy.cos', (['t'], {}), '(t)\n', (29136, 29139), False, 'import numpy\n'), ((29205, 29217), 'numpy.sin', 'numpy.sin', (['t'], {}), '(t)\n', (29214, 29217), False, 'import numpy\n'), ((29518, 29530), 'numpy.cos', 'numpy.cos', (['t'], {}), '(t)\n', (29527, 29530), False, 'import numpy\n'), ((29597, 29609), 'numpy.sin', 'numpy.sin', (['t'], {}), '(t)\n', (29606, 29609), False, 'import numpy\n'), ((29743, 29755), 'numpy.cos', 'numpy.cos', (['t'], {}), '(t)\n', (29752, 29755), False, 'import numpy\n'), ((29822, 29834), 'numpy.sin', 'numpy.sin', (['t'], {}), '(t)\n', (29831, 29834), False, 'import numpy\n'), ((68571, 68584), 'builtins.range', 'range', (['self.n'], {}), '(self.n)\n', (68576, 68584), False, 'from builtins import range\n'), ((68833, 68846), 'builtins.range', 'range', (['self.n'], {}), '(self.n)\n', (68838, 68846), False, 'from builtins import range\n'), ((73782, 73796), 'numpy.cos', 'numpy.cos', (['ang'], {}), '(ang)\n', (73791, 73796), False, 'import numpy\n'), ((73858, 73872), 'numpy.sin', 'numpy.sin', (['ang'], {}), '(ang)\n', (73867, 73872), False, 'import numpy\n'), ((74780, 74794), 'numpy.cos', 'numpy.cos', (['ang'], {}), '(ang)\n', (74789, 74794), False, 'import numpy\n'), ((74856, 74870), 'numpy.sin', 'numpy.sin', (['ang'], {}), '(ang)\n', (74865, 74870), False, 'import numpy\n'), ((86582, 86595), 'builtins.range', 'range', (['self.n'], {}), '(self.n)\n', (86587, 86595), False, 'from builtins import range\n'), ((86844, 86857), 'builtins.range', 'range', (['self.n'], {}), '(self.n)\n', (86849, 86857), False, 'from builtins import range\n'), ((102943, 102965), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (102948, 102965), False, 'from builtins import range\n'), ((114996, 115018), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (115001, 115018), False, 'from builtins import range\n'), ((115459, 115481), 'builtins.range', 'range', (['number_of_paths'], {}), '(number_of_paths)\n', (115464, 115481), False, 'from builtins import range\n'), ((119055, 119103), 'numpy.array', 'numpy.array', (['(paths[ii][0] + paths[ii][1][-2::-1])'], {}), '(paths[ii][0] + paths[ii][1][-2::-1])\n', (119066, 119103), False, 'import numpy\n'), ((119230, 119276), 'numpy.array', 'numpy.array', (['(paths[ii][0] + paths[ii][1][::-1])'], {}), '(paths[ii][0] + paths[ii][1][::-1])\n', (119241, 119276), False, 'import numpy\n'), ((9226, 9293), 'numpy.format_float_positional', 'numpy.format_float_positional', (['pt[0]'], {'trim': '"""0"""', 'precision': 'precision'}), "(pt[0], trim='0', precision=precision)\n", (9255, 9293), False, 'import numpy\n'), ((9385, 9452), 'numpy.format_float_positional', 'numpy.format_float_positional', (['pt[1]'], {'trim': '"""0"""', 'precision': 'precision'}), "(pt[1], trim='0', precision=precision)\n", (9414, 9452), False, 'import numpy\n'), ((12318, 12337), 'builtins.range', 'range', (['(1)', '(ncuts + 1)'], {}), '(1, ncuts + 1)\n', (12323, 12337), False, 'from builtins import range\n'), ((12722, 12741), 'builtins.range', 'range', (['(1)', '(ncuts + 1)'], {}), '(1, ncuts + 1)\n', (12727, 12741), False, 'from builtins import range\n'), ((13147, 13185), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['chopped'], {}), '(chopped)\n', (13176, 13185), False, 'import itertools\n'), ((13319, 13331), 'builtins.range', 'range', (['npols'], {}), '(npols)\n', (13324, 13331), False, 'from builtins import range\n'), ((13393, 13405), 'builtins.range', 'range', (['npols'], {}), '(npols)\n', (13398, 13405), False, 'from builtins import range\n'), ((27118, 27154), 'numpy.arccos', 'numpy.arccos', (['(1 - tolerance / radius)'], {}), '(1 - tolerance / radius)\n', (27130, 27154), False, 'import numpy\n'), ((28053, 28083), 'numpy.arange', 'numpy.arange', (['number_of_points'], {}), '(number_of_points)\n', (28065, 28083), False, 'import numpy\n'), ((28508, 28524), 'numpy.arange', 'numpy.arange', (['n1'], {}), '(n1)\n', (28520, 28524), False, 'import numpy\n'), ((28735, 28751), 'numpy.arange', 'numpy.arange', (['n2'], {}), '(n2)\n', (28747, 28751), False, 'import numpy\n'), ((57962, 57982), 'numpy.array', 'numpy.array', (['polygon'], {}), '(polygon)\n', (57973, 57982), False, 'import numpy\n'), ((75164, 75177), 'builtins.range', 'range', (['self.n'], {}), '(self.n)\n', (75169, 75177), False, 'from builtins import range\n'), ((75450, 75463), 'builtins.range', 'range', (['self.n'], {}), '(self.n)\n', (75455, 75463), False, 'from builtins import range\n'), ((72438, 72469), 'numpy.arccos', 'numpy.arccos', (['(1 - tolerance / r)'], {}), '(1 - tolerance / r)\n', (72450, 72469), False, 'import numpy\n'), ((18816, 18828), 'numpy.cos', 'numpy.cos', (['a'], {}), '(a)\n', (18825, 18828), False, 'import numpy\n'), ((18834, 18846), 'numpy.sin', 'numpy.sin', (['a'], {}), '(a)\n', (18843, 18846), False, 'import numpy\n')] |
import sys
from model import TransformerDST
from pytorch_transformers import BertTokenizer, AdamW, WarmupLinearSchedule, BertConfig
from utils.data_utils import prepare_dataset, MultiWozDataset
from utils.data_utils import (
make_slot_meta,
domain2id,
OP_SET,
make_turn_label,
postprocessing,
)
from utils.eval_utils import compute_prf, compute_acc, per_domain_join_accuracy
from utils.ckpt_utils import download_ckpt, convert_ckpt_compatible
from evaluation import model_evaluation
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import numpy as np
import argparse
import random
import os
import json
import time
def masked_cross_entropy_for_value(logits, target, pad_idx=0):
mask = target.ne(pad_idx)
logits_flat = logits.view(-1, logits.size(-1))
log_probs_flat = torch.log(logits_flat)
target_flat = target.view(-1, 1)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
losses = losses_flat.view(*target.size())
losses = losses * mask.float()
loss = losses.sum() / (mask.sum().float())
return loss
def save(args, epoch, model, enc_optimizer, dec_optimizer=None):
model_to_save = (
model.module if hasattr(model, "module") else model
) # Only save the model it-self
model_file = os.path.join(args.save_dir, "model.e{:}.bin".format(epoch))
torch.save(model_to_save.state_dict(), model_file)
# enc_optim_file = os.path.join(
# args.save_dir, "enc_optim.e{:}.bin".format(epoch))
# torch.save(enc_optimizer.state_dict(), enc_optim_file)
#
# if dec_optimizer is not None:
# dec_optim_file = os.path.join(
# args.save_dir, "dec_optim.e{:}.bin".format(epoch))
# torch.save(dec_optimizer.state_dict(), dec_optim_file)
def load(args, epoch):
model_file = os.path.join(args.save_dir, "model.e{:}.bin".format(epoch))
model_recover = torch.load(model_file, map_location="cpu")
enc_optim_file = os.path.join(args.save_dir, "enc_optim.e{:}.bin".format(epoch))
enc_recover = torch.load(enc_optim_file, map_location="cpu")
if hasattr(enc_recover, "state_dict"):
enc_recover = enc_recover.state_dict()
dec_optim_file = os.path.join(args.save_dir, "dec_optim.e{:}.bin".format(epoch))
dec_recover = torch.load(dec_optim_file, map_location="cpu")
if hasattr(dec_recover, "state_dict"):
dec_recover = dec_recover.state_dict()
return model_recover, enc_recover, dec_recover
def main(args):
assert args.use_one_optim is True
if args.recover_e > 0:
raise NotImplementedError(
"This option is from my oldest code version. "
"I have not checked it for this code version."
)
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
print("### mkdir {:}".format(args.save_dir))
def worker_init_fn(worker_id):
np.random.seed(args.random_seed + worker_id)
n_gpu = 0
if torch.cuda.is_available() and (not args.use_cpu):
n_gpu = torch.cuda.device_count()
device = torch.device("cuda")
print("### Device: {:}".format(device))
else:
print("### Use CPU (Debugging)")
device = torch.device("cpu")
if args.random_seed < 0:
print("### Pick a random seed")
args.random_seed = random.sample(list(range(1, 100000)), 1)[0]
print("### Random Seed: {:}".format(args.random_seed))
np.random.seed(args.random_seed)
random.seed(args.random_seed)
rng = random.Random(args.random_seed)
torch.manual_seed(args.random_seed)
if n_gpu > 0:
if args.random_seed >= 0:
torch.cuda.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
ontology = json.load(open(args.ontology_data))
slot_meta, ontology = make_slot_meta(ontology)
op2id = OP_SET[args.op_code]
print(op2id)
tokenizer = BertTokenizer(args.vocab_path, do_lower_case=True)
train_path = os.path.join(args.data_root, "train.pt")
train_data_raw = torch.load(train_path)[:5000]
print("# train examples %d" % len(train_data_raw))
test_path = os.path.join(args.data_root, "test.pt")
test_data_raw = torch.load(test_path)
print("# test examples %d" % len(test_data_raw))
model_config = BertConfig.from_json_file(args.bert_config_path)
model_config.dropout = args.dropout
model_config.attention_probs_dropout_prob = args.attention_probs_dropout_prob
model_config.hidden_dropout_prob = args.hidden_dropout_prob
type_vocab_size = 4
dec_config = args
model = TransformerDST(
model_config,
dec_config,
len(op2id),
len(domain2id),
op2id["update"],
tokenizer.convert_tokens_to_ids(["[MASK]"])[0],
tokenizer.convert_tokens_to_ids(["[SEP]"])[0],
tokenizer.convert_tokens_to_ids(["[PAD]"])[0],
tokenizer.convert_tokens_to_ids(["-"])[0],
type_vocab_size,
args.exclude_domain,
)
test_epochs = [int(e) for e in args.load_epoch.strip().lower().split("-")]
for best_epoch in test_epochs:
print("### Epoch {:}...".format(best_epoch))
sys.stdout.flush()
ckpt_path = os.path.join(args.save_dir, "model.e{:}.bin".format(best_epoch))
ckpt = torch.load(ckpt_path, map_location="cpu")
model.load_state_dict(ckpt)
model.to(device)
# eval_res = model_evaluation(model, train_data_raw, tokenizer, slot_meta, best_epoch, args.op_code,
# use_full_slot=args.use_full_slot, use_dt_only=args.use_dt_only, no_dial=args.no_dial, n_gpu=n_gpu,
# is_gt_op=False, is_gt_p_state=False, is_gt_gen=False)
#
# print("### Epoch {:} Train Score : ".format(best_epoch), eval_res)
# print('\n'*2)
# sys.stdout.flush()
eval_res = model_evaluation(
model,
test_data_raw,
tokenizer,
slot_meta,
best_epoch,
args.op_code,
use_full_slot=args.use_full_slot,
use_dt_only=args.use_dt_only,
no_dial=args.no_dial,
n_gpu=n_gpu,
is_gt_op=False,
is_gt_p_state=False,
is_gt_gen=False,
)
print("### Epoch {:} Test Score : ".format(best_epoch), eval_res)
print("\n" * 2)
sys.stdout.flush()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--load_epoch",
default="20",
required=True,
type=str,
help="example: '10-11-12' ",
)
parser.add_argument(
"--use_cpu", action="store_true"
) # Just for my debugging. I have not tested whether it can be used for training model.
# w/o re-using dialogue
parser.add_argument("--no_dial", action="store_true")
# Using only D_t in generation
parser.add_argument("--use_dt_only", action="store_false")
# By default, "decoder" only attend on a specific [SLOT] position.
# If using this option, the "decoder" can access to this group of "[SLOT] domain slot - value".
# NEW: exclude "- value"
parser.add_argument("--use_full_slot", action="store_true")
parser.add_argument(
"--only_pred_op", action="store_true"
) # only train to predict state operation just for debugging
parser.add_argument("--use_one_optim", action="store_false") # I use one optim
parser.add_argument("--recover_e", default=0, type=int)
# Required parameters
parser.add_argument(
"--data_root", default="./code/Transformer-DST-main/data/v21/", type=str
)
parser.add_argument("--train_data", default="train_dials.json", type=str)
parser.add_argument("--dev_data", default="dev_dials.json", type=str)
parser.add_argument("--test_data", default="test_dials.json", type=str)
parser.add_argument("--ontology_data", default="ontology.json", type=str)
parser.add_argument("--vocab_path", default="assets/vocab.txt", type=str)
parser.add_argument(
"--bert_config_path", default="./assets/bert_config_base_uncased.json", type=str
)
parser.add_argument(
"--bert_ckpt_path",
default="./assets/bert-base-uncased-pytorch_model.bin",
type=str,
)
parser.add_argument(
"--save_dir",
default="./code/Transformer-DST-main/outputs/dt_e15_b16/",
type=str,
)
parser.add_argument("--random_seed", default=42, type=int)
parser.add_argument("--num_workers", default=0, type=int)
parser.add_argument("--batch_size", default=1, type=int)
parser.add_argument("--enc_warmup", default=0.1, type=float)
parser.add_argument("--dec_warmup", default=0.1, type=float)
parser.add_argument(
"--enc_lr", default=3e-5, type=float
) # my Transformer-AR uses 3e-5
parser.add_argument("--dec_lr", default=1e-4, type=float)
parser.add_argument("--n_epochs", default=30, type=int)
parser.add_argument("--eval_epoch", default=1, type=int)
parser.add_argument("--op_code", default="4", type=str)
parser.add_argument("--slot_token", default="[SLOT]", type=str)
parser.add_argument("--dropout", default=0.1, type=float)
parser.add_argument("--hidden_dropout_prob", default=0.1, type=float)
parser.add_argument("--attention_probs_dropout_prob", default=0.1, type=float)
parser.add_argument("--decoder_teacher_forcing", default=1, type=float)
parser.add_argument("--word_dropout", default=0.1, type=float)
parser.add_argument("--not_shuffle_state", default=False, action="store_true")
parser.add_argument("--shuffle_p", default=0.5, type=float)
parser.add_argument("--n_history", default=1, type=int)
parser.add_argument("--max_seq_length", default=256, type=int)
parser.add_argument("--msg", default=None, type=str)
parser.add_argument("--exclude_domain", default=False, action="store_true")
# generator
parser.add_argument(
"--beam_size", type=int, default=1, help="Beam size for searching"
)
parser.add_argument("--min_len", default=1, type=int)
parser.add_argument(
"--length_penalty", type=float, default=0, help="Length penalty for beam search"
)
parser.add_argument("--forbid_duplicate_ngrams", action="store_true")
parser.add_argument(
"--forbid_ignore_word",
type=str,
default=None,
help="Ignore the word during forbid_duplicate_ngrams",
)
parser.add_argument("--ngram_size", type=int, default=2)
args = parser.parse_args()
args.train_data_path = os.path.join(args.data_root, args.train_data)
args.dev_data_path = os.path.join(args.data_root, args.dev_data)
args.test_data_path = os.path.join(args.data_root, args.test_data)
args.ontology_data = os.path.join(args.data_root, args.ontology_data)
args.shuffle_state = False if args.not_shuffle_state else True
print("pytorch version: ", torch.__version__)
print(args)
main(args)
| [
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"pytorch_transformers.BertConfig.from_json_file",
"evaluation.model_evaluation",
"torch.cuda.device_count",
"sys.stdout.flush",
"pytorch_transformers.BertTokenizer",
"torch.device",
"os.path.join",
"torch.gather",
"random.Random",
"... | [((878, 900), 'torch.log', 'torch.log', (['logits_flat'], {}), '(logits_flat)\n', (887, 900), False, 'import torch\n'), ((1969, 2011), 'torch.load', 'torch.load', (['model_file'], {'map_location': '"""cpu"""'}), "(model_file, map_location='cpu')\n", (1979, 2011), False, 'import torch\n'), ((2116, 2162), 'torch.load', 'torch.load', (['enc_optim_file'], {'map_location': '"""cpu"""'}), "(enc_optim_file, map_location='cpu')\n", (2126, 2162), False, 'import torch\n'), ((2357, 2403), 'torch.load', 'torch.load', (['dec_optim_file'], {'map_location': '"""cpu"""'}), "(dec_optim_file, map_location='cpu')\n", (2367, 2403), False, 'import torch\n'), ((3504, 3536), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (3518, 3536), True, 'import numpy as np\n'), ((3541, 3570), 'random.seed', 'random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (3552, 3570), False, 'import random\n'), ((3581, 3612), 'random.Random', 'random.Random', (['args.random_seed'], {}), '(args.random_seed)\n', (3594, 3612), False, 'import random\n'), ((3617, 3652), 'torch.manual_seed', 'torch.manual_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (3634, 3652), False, 'import torch\n'), ((4067, 4091), 'utils.data_utils.make_slot_meta', 'make_slot_meta', (['ontology'], {}), '(ontology)\n', (4081, 4091), False, 'from utils.data_utils import make_slot_meta, domain2id, OP_SET, make_turn_label, postprocessing\n'), ((4159, 4209), 'pytorch_transformers.BertTokenizer', 'BertTokenizer', (['args.vocab_path'], {'do_lower_case': '(True)'}), '(args.vocab_path, do_lower_case=True)\n', (4172, 4209), False, 'from pytorch_transformers import BertTokenizer, AdamW, WarmupLinearSchedule, BertConfig\n'), ((4228, 4268), 'os.path.join', 'os.path.join', (['args.data_root', '"""train.pt"""'], {}), "(args.data_root, 'train.pt')\n", (4240, 4268), False, 'import os\n'), ((4392, 4431), 'os.path.join', 'os.path.join', (['args.data_root', '"""test.pt"""'], {}), "(args.data_root, 'test.pt')\n", (4404, 4431), False, 'import os\n'), ((4452, 4473), 'torch.load', 'torch.load', (['test_path'], {}), '(test_path)\n', (4462, 4473), False, 'import torch\n'), ((4547, 4595), 'pytorch_transformers.BertConfig.from_json_file', 'BertConfig.from_json_file', (['args.bert_config_path'], {}), '(args.bert_config_path)\n', (4572, 4595), False, 'from pytorch_transformers import BertTokenizer, AdamW, WarmupLinearSchedule, BertConfig\n'), ((6717, 6742), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6740, 6742), False, 'import argparse\n'), ((10887, 10932), 'os.path.join', 'os.path.join', (['args.data_root', 'args.train_data'], {}), '(args.data_root, args.train_data)\n', (10899, 10932), False, 'import os\n'), ((10958, 11001), 'os.path.join', 'os.path.join', (['args.data_root', 'args.dev_data'], {}), '(args.data_root, args.dev_data)\n', (10970, 11001), False, 'import os\n'), ((11028, 11072), 'os.path.join', 'os.path.join', (['args.data_root', 'args.test_data'], {}), '(args.data_root, args.test_data)\n', (11040, 11072), False, 'import os\n'), ((11098, 11146), 'os.path.join', 'os.path.join', (['args.data_root', 'args.ontology_data'], {}), '(args.data_root, args.ontology_data)\n', (11110, 11146), False, 'import os\n'), ((957, 1011), 'torch.gather', 'torch.gather', (['log_probs_flat'], {'dim': '(1)', 'index': 'target_flat'}), '(log_probs_flat, dim=1, index=target_flat)\n', (969, 1011), False, 'import torch\n'), ((2806, 2835), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (2820, 2835), False, 'import os\n'), ((2845, 2868), 'os.mkdir', 'os.mkdir', (['args.save_dir'], {}), '(args.save_dir)\n', (2853, 2868), False, 'import os\n'), ((2966, 3010), 'numpy.random.seed', 'np.random.seed', (['(args.random_seed + worker_id)'], {}), '(args.random_seed + worker_id)\n', (2980, 3010), True, 'import numpy as np\n'), ((3033, 3058), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3056, 3058), False, 'import torch\n'), ((3099, 3124), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3122, 3124), False, 'import torch\n'), ((3142, 3162), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3154, 3162), False, 'import torch\n'), ((3279, 3298), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3291, 3298), False, 'import torch\n'), ((3926, 3955), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (3940, 3955), False, 'import os\n'), ((3965, 3988), 'os.mkdir', 'os.mkdir', (['args.save_dir'], {}), '(args.save_dir)\n', (3973, 3988), False, 'import os\n'), ((4290, 4312), 'torch.load', 'torch.load', (['train_path'], {}), '(train_path)\n', (4300, 4312), False, 'import torch\n'), ((5421, 5439), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5437, 5439), False, 'import sys\n'), ((5540, 5581), 'torch.load', 'torch.load', (['ckpt_path'], {'map_location': '"""cpu"""'}), "(ckpt_path, map_location='cpu')\n", (5550, 5581), False, 'import torch\n'), ((6142, 6394), 'evaluation.model_evaluation', 'model_evaluation', (['model', 'test_data_raw', 'tokenizer', 'slot_meta', 'best_epoch', 'args.op_code'], {'use_full_slot': 'args.use_full_slot', 'use_dt_only': 'args.use_dt_only', 'no_dial': 'args.no_dial', 'n_gpu': 'n_gpu', 'is_gt_op': '(False)', 'is_gt_p_state': '(False)', 'is_gt_gen': '(False)'}), '(model, test_data_raw, tokenizer, slot_meta, best_epoch,\n args.op_code, use_full_slot=args.use_full_slot, use_dt_only=args.\n use_dt_only, no_dial=args.no_dial, n_gpu=n_gpu, is_gt_op=False,\n is_gt_p_state=False, is_gt_gen=False)\n', (6158, 6394), False, 'from evaluation import model_evaluation\n'), ((6656, 6674), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6672, 6674), False, 'import sys\n'), ((3718, 3758), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (3740, 3758), False, 'import torch\n'), ((3771, 3815), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.random_seed'], {}), '(args.random_seed)\n', (3797, 3815), False, 'import torch\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
import os
import sys
import cv2
import random
import datetime
import math
import argparse
import numpy as np
import scipy.io as sio
import zipfile
from .net_s3fd import s3fd
from .bbox import *
def detect(net, img):
img = img - np.array([104, 117, 123])
img = img.transpose(2, 0, 1)
img = img.reshape((1, ) + img.shape)
img = paddle.to_tensor(img).astype('float32')
BB, CC, HH, WW = img.shape
with paddle.no_grad():
olist = net(img)
bboxlist = []
for i in range(len(olist) // 2):
olist[i * 2] = F.softmax(olist[i * 2], axis=1)
for i in range(len(olist) // 2):
ocls, oreg = olist[i * 2], olist[i * 2 + 1]
FB, FC, FH, FW = ocls.shape # feature map size
stride = 2**(i + 2) # 4,8,16,32,64,128
anchor = stride * 4
poss = zip(*np.where(ocls.numpy()[:, 1, :, :] > 0.05))
for Iindex, hindex, windex in poss:
axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride
score = ocls.numpy()[0, 1, hindex, windex]
loc = oreg.numpy()[0, :, hindex, windex].reshape(1, 4)
priors = paddle.to_tensor(
[[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])
variances = [0.1, 0.2]
box = decode(paddle.to_tensor(loc), priors, variances)
x1, y1, x2, y2 = box[0].numpy() * 1.0
bboxlist.append([x1, y1, x2, y2, score])
bboxlist = np.array(bboxlist)
if 0 == len(bboxlist):
bboxlist = np.zeros((1, 5))
return bboxlist
def batch_detect(net, imgs):
imgs = imgs - np.array([104, 117, 123])
imgs = imgs.transpose(0, 3, 1, 2)
imgs = paddle.to_tensor(imgs).astype('float32')
BB, CC, HH, WW = imgs.shape
with paddle.no_grad():
olist = net(imgs)
bboxlist = []
for i in range(len(olist) // 2):
olist[i * 2] = F.softmax(olist[i * 2], axis=1)
for i in range(len(olist) // 2):
ocls, oreg = olist[i * 2], olist[i * 2 + 1]
FB, FC, FH, FW = ocls.shape # feature map size
stride = 2**(i + 2) # 4,8,16,32,64,128
anchor = stride * 4
poss = zip(*np.where(ocls.numpy()[:, 1, :, :] > 0.05))
for Iindex, hindex, windex in poss:
axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride
score = ocls.numpy()[:, 1, hindex, windex]
loc = oreg.numpy()[:, :, hindex, windex].reshape(BB, 1, 4)
priors = paddle.to_tensor(
[[axc / 1.0, ayc / 1.0, stride * 4 / 1.0,
stride * 4 / 1.0]]).reshape([1, 1, 4])
variances = [0.1, 0.2]
box = batch_decode(paddle.to_tensor(loc), priors, variances)
box = box[:, 0] * 1.0
bboxlist.append(
paddle.concat([box, paddle.to_tensor(score).unsqueeze(1)],
1).numpy())
bboxlist = np.array(bboxlist)
if 0 == len(bboxlist):
bboxlist = np.zeros((1, BB, 5))
return bboxlist
| [
"paddle.nn.functional.softmax",
"numpy.zeros",
"paddle.no_grad",
"numpy.array",
"paddle.to_tensor"
] | [((2109, 2127), 'numpy.array', 'np.array', (['bboxlist'], {}), '(bboxlist)\n', (2117, 2127), True, 'import numpy as np\n'), ((3567, 3585), 'numpy.array', 'np.array', (['bboxlist'], {}), '(bboxlist)\n', (3575, 3585), True, 'import numpy as np\n'), ((895, 920), 'numpy.array', 'np.array', (['[104, 117, 123]'], {}), '([104, 117, 123])\n', (903, 920), True, 'import numpy as np\n'), ((1086, 1102), 'paddle.no_grad', 'paddle.no_grad', ([], {}), '()\n', (1100, 1102), False, 'import paddle\n'), ((1208, 1239), 'paddle.nn.functional.softmax', 'F.softmax', (['olist[i * 2]'], {'axis': '(1)'}), '(olist[i * 2], axis=1)\n', (1217, 1239), True, 'import paddle.nn.functional as F\n'), ((2174, 2190), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {}), '((1, 5))\n', (2182, 2190), True, 'import numpy as np\n'), ((2261, 2286), 'numpy.array', 'np.array', (['[104, 117, 123]'], {}), '([104, 117, 123])\n', (2269, 2286), True, 'import numpy as np\n'), ((2419, 2435), 'paddle.no_grad', 'paddle.no_grad', ([], {}), '()\n', (2433, 2435), False, 'import paddle\n'), ((2542, 2573), 'paddle.nn.functional.softmax', 'F.softmax', (['olist[i * 2]'], {'axis': '(1)'}), '(olist[i * 2], axis=1)\n', (2551, 2573), True, 'import paddle.nn.functional as F\n'), ((3632, 3652), 'numpy.zeros', 'np.zeros', (['(1, BB, 5)'], {}), '((1, BB, 5))\n', (3640, 3652), True, 'import numpy as np\n'), ((1006, 1027), 'paddle.to_tensor', 'paddle.to_tensor', (['img'], {}), '(img)\n', (1022, 1027), False, 'import paddle\n'), ((1793, 1871), 'paddle.to_tensor', 'paddle.to_tensor', (['[[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]]'], {}), '([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])\n', (1809, 1871), False, 'import paddle\n'), ((2337, 2359), 'paddle.to_tensor', 'paddle.to_tensor', (['imgs'], {}), '(imgs)\n', (2353, 2359), False, 'import paddle\n'), ((1949, 1970), 'paddle.to_tensor', 'paddle.to_tensor', (['loc'], {}), '(loc)\n', (1965, 1970), False, 'import paddle\n'), ((3330, 3351), 'paddle.to_tensor', 'paddle.to_tensor', (['loc'], {}), '(loc)\n', (3346, 3351), False, 'import paddle\n'), ((3131, 3209), 'paddle.to_tensor', 'paddle.to_tensor', (['[[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]]'], {}), '([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])\n', (3147, 3209), False, 'import paddle\n'), ((3471, 3494), 'paddle.to_tensor', 'paddle.to_tensor', (['score'], {}), '(score)\n', (3487, 3494), False, 'import paddle\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import glob
import random
import imgaug
from imgaug import augmenters as iaa
from PIL import Image
from tqdm import tqdm
import matplotlib.pyplot as plt
import openslide
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, concatenate, Concatenate, UpSampling2D, Activation
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.applications.densenet import DenseNet121
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard
from tensorflow.keras import metrics
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms # noqa
import sklearn.metrics
import io
import itertools
from six.moves import range
import time
import argparse
import cv2
from skimage.color import rgb2hsv
from skimage.filters import threshold_otsu
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.getcwd())))
from models.seg_models import get_inception_resnet_v2_unet_softmax, unet_densenet121
from models.deeplabv3p_original import Deeplabv3
# Random Seeds
np.random.seed(0)
random.seed(0)
tf.set_random_seed(0)
import gc
import pandas as pd
import tifffile
import skimage.io as io
# Image Helper Functions
def imsave(*args, **kwargs):
"""
Concatenate the images given in args and saves them as a single image in the specified output destination.
Images should be numpy arrays and have same dimensions along the 0 axis.
imsave(im1,im2,out="sample.png")
"""
args_list = list(args)
for i in range(len(args_list)):
if type(args_list[i]) != np.ndarray:
print("Not a numpy array")
return 0
if len(args_list[i].shape) == 2:
args_list[i] = np.dstack([args_list[i]]*3)
if args_list[i].max() == 1:
args_list[i] = args_list[i]*255
out_destination = kwargs.get("out",'')
try:
concatenated_arr = np.concatenate(args_list,axis=1)
im = Image.fromarray(np.uint8(concatenated_arr))
except Exception as e:
print(e)
import ipdb; ipdb.set_trace()
return 0
if out_destination:
print("Saving to %s"%(out_destination))
im.save(out_destination)
else:
return im
def imshow(*args,**kwargs):
""" Handy function to show multiple plots in on row, possibly with different cmaps and titles
Usage:
imshow(img1, title="myPlot")
imshow(img1,img2, title=['title1','title2'])
imshow(img1,img2, cmap='hot')
imshow(img1,img2,cmap=['gray','Blues']) """
cmap = kwargs.get('cmap', 'gray')
title= kwargs.get('title','')
axis_off = kwargs.get('axis_off','')
if len(args)==0:
raise ValueError("No images given to imshow")
elif len(args)==1:
plt.title(title)
plt.imshow(args[0], interpolation='none')
else:
n=len(args)
if type(cmap)==str:
cmap = [cmap]*n
if type(title)==str:
title= [title]*n
plt.figure(figsize=(n*5,10))
for i in range(n):
plt.subplot(1,n,i+1)
plt.title(title[i])
plt.imshow(args[i], cmap[i])
if axis_off:
plt.axis('off')
plt.show()
def normalize_minmax(data):
"""
Normalize contrast across volume
"""
_min = np.float(np.min(data))
_max = np.float(np.max(data))
if (_max-_min)!=0:
img = (data - _min) / (_max-_min)
else:
img = np.zeros_like(data)
return img
# Functions
def BinMorphoProcessMask(mask,level):
"""
Binary operation performed on tissue mask
"""
close_kernel = np.ones((20, 20), dtype=np.uint8)
image_close = cv2.morphologyEx(np.array(mask), cv2.MORPH_CLOSE, close_kernel)
open_kernel = np.ones((5, 5), dtype=np.uint8)
image_open = cv2.morphologyEx(np.array(image_close), cv2.MORPH_OPEN, open_kernel)
if level == 2:
kernel = np.ones((60, 60), dtype=np.uint8)
elif level == 3:
kernel = np.ones((35, 35), dtype=np.uint8)
else:
raise ValueError
image = cv2.dilate(image_open,kernel,iterations = 1)
return image
def get_bbox(cont_img, rgb_image=None):
temp_img = np.uint8(cont_img.copy())
_,contours, _ = cv2.findContours(temp_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rgb_contour = None
if rgb_image is not None:
rgb_contour = rgb_image.copy()
line_color = (0, 0, 255) # blue color code
cv2.drawContours(rgb_contour, contours, -1, line_color, 2)
bounding_boxes = [cv2.boundingRect(c) for c in contours]
for x, y, h, w in bounding_boxes:
rgb_contour = cv2.rectangle(rgb_contour,(x,y),(x+h,y+w),(0,255,0),2)
return bounding_boxes, rgb_contour
def get_all_bbox_masks(mask, stride_factor):
"""
Find the bbox and corresponding masks
"""
bbox_mask = np.zeros_like(mask)
bounding_boxes, _ = get_bbox(mask)
y_size, x_size = bbox_mask.shape
for x, y, h, w in bounding_boxes:
x_min = x - stride_factor
x_max = x + h + stride_factor
y_min = y - stride_factor
y_max = y + w + stride_factor
if x_min < 0:
x_min = 0
if y_min < 0:
y_min = 0
if x_max > x_size:
x_max = x_size - 1
if y_max > y_size:
y_max = y_size - 1
bbox_mask[y_min:y_max, x_min:x_max]=1
return bbox_mask
def get_all_bbox_masks_with_stride(mask, stride_factor):
"""
Find the bbox and corresponding masks
"""
bbox_mask = np.zeros_like(mask)
bounding_boxes, _ = get_bbox(mask)
y_size, x_size = bbox_mask.shape
for x, y, h, w in bounding_boxes:
x_min = x - stride_factor
x_max = x + h + stride_factor
y_min = y - stride_factor
y_max = y + w + stride_factor
if x_min < 0:
x_min = 0
if y_min < 0:
y_min = 0
if x_max > x_size:
x_max = x_size - 1
if y_max > y_size:
y_max = y_size - 1
bbox_mask[y_min:y_max:stride_factor, x_min:x_max:stride_factor]=1
return bbox_mask
def find_largest_bbox(mask, stride_factor):
"""
Find the largest bounding box encompassing all the blobs
"""
y_size, x_size = mask.shape
x, y = np.where(mask==1)
bbox_mask = np.zeros_like(mask)
x_min = np.min(x) - stride_factor
x_max = np.max(x) + stride_factor
y_min = np.min(y) - stride_factor
y_max = np.max(y) + stride_factor
if x_min < 0:
x_min = 0
if y_min < 0:
y_min = 0
if x_max > x_size:
x_max = x_size - 1
if y_min > y_size:
y_max = y_size - 1
bbox_mask[x_min:x_max, y_min:y_max]=1
return bbox_mask
def TissueMaskGeneration(slide_obj, level, RGB_min=50):
img_RGB = slide_obj.read_region((0, 0),level,slide_obj.level_dimensions[level])
img_RGB = np.transpose(np.array(img_RGB.convert('RGB')),axes=[1,0,2])
img_HSV = rgb2hsv(img_RGB)
background_R = img_RGB[:, :, 0] > threshold_otsu(img_RGB[:, :, 0])
background_G = img_RGB[:, :, 1] > threshold_otsu(img_RGB[:, :, 1])
background_B = img_RGB[:, :, 2] > threshold_otsu(img_RGB[:, :, 2])
tissue_RGB = np.logical_not(background_R & background_G & background_B)
tissue_S = img_HSV[:, :, 1] > threshold_otsu(img_HSV[:, :, 1])
min_R = img_RGB[:, :, 0] > RGB_min
min_G = img_RGB[:, :, 1] > RGB_min
min_B = img_RGB[:, :, 2] > RGB_min
tissue_mask = tissue_S & tissue_RGB & min_R & min_G & min_B
# r = img_RGB[:,:,0] < 235
# g = img_RGB[:,:,1] < 210
# b = img_RGB[:,:,2] < 235
# tissue_mask = np.logical_or(r,np.logical_or(g,b))
return tissue_mask
def TissueMaskGenerationPatch(patchRGB):
'''
Returns mask of tissue that obeys the threshold set by paip
'''
r = patchRGB[:,:,0] < 235
g = patchRGB[:,:,1] < 210
b = patchRGB[:,:,2] < 235
tissue_mask = np.logical_or(r,np.logical_or(g,b))
return tissue_mask
def TissueMaskGeneration_BIN(slide_obj, level):
img_RGB = np.transpose(np.array(slide_obj.read_region((0, 0),
level,
slide_obj.level_dimensions[level]).convert('RGB')),
axes=[1, 0, 2])
img_HSV = cv2.cvtColor(img_RGB, cv2.COLOR_BGR2HSV)
img_S = img_HSV[:, :, 1]
_,tissue_mask = cv2.threshold(img_S, 0, 255, cv2.THRESH_BINARY)
return np.array(tissue_mask)
def TissueMaskGeneration_BIN_OTSU(slide_obj, level):
img_RGB = np.transpose(np.array(slide_obj.read_region((0, 0),
level,
slide_obj.level_dimensions[level]).convert('RGB')),
axes=[1, 0, 2])
img_HSV = cv2.cvtColor(img_RGB, cv2.COLOR_BGR2HSV)
img_S = img_HSV[:, :, 1]
_,tissue_mask = cv2.threshold(img_S, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
return np.array(tissue_mask)
def labelthreshold(image, threshold=0.5):
np.place(image,image>=threshold, 1)
np.place(image,image<threshold, 0)
return np.uint8(image)
def calc_jacc_score(x,y,smoothing=1):
for var in [x,y]:
np.place(var,var==255,1)
numerator = np.sum(x*y)
denominator = np.sum(np.logical_or(x,y))
return (numerator+smoothing)/(denominator+smoothing)
# DataLoader Implementation
class WSIStridedPatchDataset(Dataset):
"""
Data producer that generate all the square grids, e.g. 3x3, of patches,
from a WSI and its tissue mask, and their corresponding indices with
respect to the tissue mask
"""
def __init__(self, wsi_path, mask_path, label_path=None, image_size=256,
normalize=True, flip='NONE', rotate='NONE',
level=5, sampling_stride=16, roi_masking=True):
"""
Initialize the data producer.
Arguments:
wsi_path: string, path to WSI file
mask_path: string, path to mask file in numpy format OR None
label_mask_path: string, path to ground-truth label mask path in tif file or
None (incase of Normal WSI or test-time)
image_size: int, size of the image before splitting into grid, e.g. 768
patch_size: int, size of the patch, e.g. 256
crop_size: int, size of the final crop that is feed into a CNN,
e.g. 224 for ResNet
normalize: bool, if normalize the [0, 255] pixel values to [-1, 1],
mostly False for debuging purpose
flip: string, 'NONE' or 'FLIP_LEFT_RIGHT' indicating the flip type
rotate: string, 'NONE' or 'ROTATE_90' or 'ROTATE_180' or
'ROTATE_270', indicating the rotate type
level: Level to extract the WSI tissue mask
roi_masking: True: Multiplies the strided WSI with tissue mask to eliminate white spaces,
False: Ensures inference is done on the entire WSI
sampling_stride: Number of pixels to skip in the tissue mask, basically it's the overlap
fraction when patches are extracted from WSI during inference.
stride=1 -> consecutive pixels are utilized
stride= image_size/pow(2, level) -> non-overalaping patches
"""
self._wsi_path = wsi_path
self._mask_path = mask_path
self._label_path = label_path
self._image_size = image_size
self._normalize = normalize
self._flip = flip
self._rotate = rotate
self._level = level
self._sampling_stride = sampling_stride
self._roi_masking = roi_masking
self._preprocess()
def _preprocess(self):
self._slide = openslide.OpenSlide(self._wsi_path)
if self._label_path is not None:
self._label_slide = openslide.OpenSlide(self._label_path)
X_slide, Y_slide = self._slide.level_dimensions[0]
print("Image dimensions: (%d,%d)" %(X_slide,Y_slide))
factor = self._sampling_stride
if self._mask_path is not None:
mask_file_name = os.path.basename(self._mask_path)
if mask_file_name.endswith('.tiff'):
mask_obj = openslide.OpenSlide(self._mask_path)
self._mask = np.array(mask_obj.read_region((0, 0),
self._level,
mask_obj.level_dimensions[self._level]).convert('L')).T
np.place(self._mask,self._mask>0,255)
else:
# Generate tissue mask on the fly
self._mask = TissueMaskGeneration(self._slide, self._level)
# morphological operations ensure the holes are filled in tissue mask
# and minor points are aggregated to form a larger chunk
self._mask = BinMorphoProcessMask(np.uint8(self._mask),self._level)
# self._all_bbox_mask = get_all_bbox_masks(self._mask, factor)
# self._largest_bbox_mask = find_largest_bbox(self._mask, factor)
# self._all_strided_bbox_mask = get_all_bbox_masks_with_stride(self._mask, factor)
X_mask, Y_mask = self._mask.shape
print('Mask (%d,%d) and Slide(%d,%d) '%(X_mask,Y_mask,X_slide,Y_slide))
if X_slide // X_mask != Y_slide // Y_mask:
raise Exception('Slide/Mask dimension does not match ,'
' X_slide / X_mask : {} / {},'
' Y_slide / Y_mask : {} / {}'
.format(X_slide, X_mask, Y_slide, Y_mask))
self._resolution = np.round(X_slide * 1.0 / X_mask)
if not np.log2(self._resolution).is_integer():
raise Exception('Resolution (X_slide / X_mask) is not power of 2 :'
' {}'.format(self._resolution))
# all the idces for tissue region from the tissue mask
self._strided_mask = np.ones_like(self._mask)
ones_mask = np.zeros_like(self._mask)
ones_mask[::factor, ::factor] = self._strided_mask[::factor, ::factor]
if self._roi_masking:
self._strided_mask = ones_mask*self._mask
# self._strided_mask = ones_mask*self._largest_bbox_mask
# self._strided_mask = ones_mask*self._all_bbox_mask
# self._strided_mask = self._all_strided_bbox_mask
else:
self._strided_mask = ones_mask
# print (np.count_nonzero(self._strided_mask), np.count_nonzero(self._mask[::factor, ::factor]))
# imshow(self._strided_mask.T, self._mask[::factor, ::factor].T)
# imshow(self._mask.T, self._strided_mask.T)
self._X_idcs, self._Y_idcs = np.where(self._strided_mask)
self._idcs_num = len(self._X_idcs)
def __len__(self):
return self._idcs_num
def save_scaled_imgs(self):
scld_dms = self._slide.level_dimensions[self._level]
self._slide_scaled = self._slide.read_region((0,0),self._level,scld_dms)
if self._label_path is not None:
self._label_scaled = np.array(self._label_slide.read_region((0,0),4,scld_dms).convert('L'))
np.place(self._label_scaled,self._label_scaled>0,255)
def save_get_mask(self, save_path):
np.save(save_path, self._mask)
def get_mask(self):
return self._mask
def get_strided_mask(self):
return self._strided_mask
def __getitem__(self, idx):
x_coord, y_coord = self._X_idcs[idx], self._Y_idcs[idx]
x_max_dim,y_max_dim = self._slide.level_dimensions[0]
x = int(x_coord * self._resolution - self._image_size//2)
y = int(y_coord * self._resolution - self._image_size//2)
#If Image goes out of bounds
if x>(x_max_dim - image_size):
x = x_max_dim - image_size
elif x<0:
x = 0
if y>(y_max_dim - image_size):
y = y_max_dim - image_size
elif y<0:
y = 0
#Converting pil image to np array transposes the w and h
img = np.transpose(self._slide.read_region(
(x, y), 0, (self._image_size, self._image_size)).convert('RGB'),[1,0,2])
if self._label_path is not None:
label_img = self._label_slide.read_region(
(x, y), 0, (self._image_size, self._image_size)).convert('L')
else:
label_img = Image.fromarray(np.zeros((self._image_size, self._image_size), dtype=np.uint8))
if self._flip == 'FLIP_LEFT_RIGHT':
img = img.transpose(Image.FLIP_LEFT_RIGHT)
label_img = label_img.transpose(Image.FLIP_LEFT_RIGHT)
if self._rotate == 'ROTATE_90':
img = img.transpose(Image.ROTATE_90)
label_img = label_img.transpose(Image.ROTATE_90)
if self._rotate == 'ROTATE_180':
img = img.transpose(Image.ROTATE_180)
label_img = label_img.transpose(Image.ROTATE_180)
if self._rotate == 'ROTATE_270':
img = img.transpose(Image.ROTATE_270)
label_img = label_img.transpose(Image.ROTATE_270)
# PIL image: H x W x C
img = np.array(img, dtype=np.float32)
label_img = np.array(label_img, dtype=np.uint8)
np.place(label_img, label_img>0, 255)
if self._normalize:
img = (img - 128.0)/128.0
return (img, x, y, label_img)
def load_incep_resnet(model_path):
model = get_inception_resnet_v2_unet_softmax((None, None), weights=None)
model.load_weights(model_path)
print ("Loaded Model Weights %s" % model_path)
return model
def load_unet_densenet(model_path):
model = unet_densenet121((None, None), weights=None)
model.load_weights(model_path)
print ("Loaded Model Weights %s" % model_path)
return model
def load_deeplabv3(model_path, OS):
model = Deeplabv3(input_shape=(image_size, image_size, 3),weights=None,classes=2,activation='softmax',backbone='xception',OS=OS)
model.load_weights(model_path)
print ("Loaded Model Weights %s" % model_path)
return model
if __name__ == '__main__':
#CONFIG
CONFIG = {
"in_folder": "/Path", #Path to folder containing folders of each input images
"label": "True", #If true, the ground truth for each sample must be place in the adjacent to the input image. Output images will include the label for easy comparison
"out_folder": "/Path", #Path to folder to output results
"memmap_folder": "/Path", #Path to folder for storing numpy memmap arrays
"GPU": "0",
"batch_size": 32,
"patch_size": 1024,
"stride": 512,
"models": {
'id1': {"model_type": "inception", "model_path": "/Path.h5"},
'id2': {"model_type": "densenet", "model_path": "/Path.h5"},
'id3': {"model_type": "deeplab", "model_path": "/Path.h5"},
'id4': {"model_type": "ensemble"},
},
"models_to_save": ['id4'],
}
batch_size = CONFIG["batch_size"]
image_size = CONFIG["patch_size"]
sampling_stride = CONFIG["stride"]
out_dir_root = CONFIG["out_folder"]
#Model loading
os.environ["CUDA_VISIBLE_DEVICES"] = CONFIG["GPU"]
core_config = tf.ConfigProto()
core_config.gpu_options.allow_growth = False
# core_config.gpu_options.per_process_gpu_memory_fraction=0.47
session =tf.Session(config=core_config)
K.set_session(session)
infer_paths = glob.glob(os.path.join(out_dir_root,"infer-*"))
infer_paths.sort()
last_infer_path_id = int(infer_paths[-1].split('-')[-1])
model_dict= {}
for k,v in CONFIG["models"]:
model_type = v["model_type"]
model_path = v["model_path"]
if model_type == 'inception':
model_dict[k] = load_incep_resnet(model_path)
elif model_type == 'densenet':
model_dict[k] = load_unet_densenet(model_path)
elif model_type == 'deeplab':
model_dict[k] = load_deeplabv3(model_path,16)
elif model_type == 'ensemble':
model_dict[k] = 'ensemble'
model_keys = list(model_dict.keys())
ensemble_key = 'train_43'
models_to_save = CONFIG["models_to_save"]
out_dir_dict = {}
for key in models_to_save:
out_dir_dict[key] = os.path.join(out_dir_root,key)
try:
os.makedirs(out_dir_dict[key])
print("Saving to %s" % (out_dir_dict[key]))
except FileExistsError:
if os.listdir(out_dir_dict[key]) != []:
print("Out folder exists and is non-empty, continue?")
print(out_dir_dict[key])
input()
#Stitcher
start_time = time.time()
sample_ids = os.listdir(CONFIG["in_folder"])
for i,sample_id in enumerate(sample_ids):
print(i+1,'/', len(sample_ids),sample_id)
sample_dir = os.path.join(CONFIG["in_folder"],sample_id)
wsi_path = glob.glob(os.path.join(sample_dir,'*.svs'))[0]
if CONFIG["label"]== "True":
label_path = glob.glob(os.path.join(sample_dir,'*viable*.tiff'))[0]
else:
label_path=None
wsi_obj = openslide.OpenSlide(wsi_path)
x_max_dim,y_max_dim = wsi_obj.level_dimensions[0]
count_map = np.zeros(wsi_obj.level_dimensions[0],dtype='uint8')
prd_im_fll_dict = {}
for key in models_to_save:
prd_im_fll_dict[key] = np.memmap(os.path.join(CONFIG["memmap_folder"],'%s.dat'%(key)), dtype=np.float32,mode='w+', shape=(wsi_obj.level_dimensions[0]))
if len(wsi_obj.level_dimensions) == 3:
level = 2
elif len(wsi_obj.level_dimensions) == 4:
level = 3
scld_dms = wsi_obj.level_dimensions[level]
scale_sampling_stride = sampling_stride//int(wsi_obj.level_downsamples[level])
print("Level %d , stride %d, scale stride %d" %(level,sampling_stride, scale_sampling_stride))
scale = lambda x: cv2.resize(x,tuple(reversed(scld_dms))).T
mask_path = None
start_time = time.time()
dataset_obj = WSIStridedPatchDataset(wsi_path,
mask_path,
label_path,
image_size=image_size,
normalize=True,
flip=None, rotate=None,
level=level, sampling_stride=scale_sampling_stride, roi_masking=True)
dataloader = DataLoader(dataset_obj, batch_size=batch_size, num_workers=batch_size, drop_last=True)
dataset_obj.save_scaled_imgs()
out_file = sample_id
print(dataset_obj.get_mask().shape)
st_im = dataset_obj.get_strided_mask()
mask_im = np.dstack([dataset_obj.get_mask().T]*3).astype('uint8')*255
st_im = np.dstack([dataset_obj.get_strided_mask().T]*3).astype('uint8')*255
im_im = np.array(dataset_obj._slide_scaled.convert('RGB'))
ov_im = mask_im/2 + im_im/2
ov_im_stride = st_im/2 + im_im/2
for key in models_to_save:
imsave(ov_im.astype('uint8'),mask_im,ov_im_stride,(im_im), out=os.path.join(out_dir_dict[key],'mask_'+out_file+'.png'))
print("Total iterations: %d %d" % (dataloader.__len__(), dataloader.dataset.__len__()))
for i,(data, xes, ys, label) in enumerate(dataloader):
tmp_pls= lambda x: x + image_size
tmp_mns= lambda x: x
image_patches = data.cpu().data.numpy()
image_patches = data.cpu().data.numpy()
pred_map_dict = {}
pred_map_dict[ensemble_key] = 0
for key in model_keys:
pred_map_dict[key] = model_dict[key].predict(image_patches,verbose=0,batch_size=8)
# pred_map_dict[key] = model_dict[key].predict(image_patches,verbose=0,batch_size=1)
pred_map_dict[ensemble_key]+=pred_map_dict[key]
pred_map_dict[ensemble_key]/=len(model_keys)
actual_batch_size = image_patches.shape[0]
for j in range(actual_batch_size):
x = int(xes[j])
y = int(ys[j])
wsi_img = image_patches[j]*128+128
patch_mask = TissueMaskGenerationPatch(wsi_img)
#CRF
# prediction = red_map[j,:,:,:]
# prediction = post_process_crf(wsi_img,prediction,2)
for key in models_to_save:
prediction = pred_map_dict[key][j,:,:,1]
prediction*=patch_mask
prd_im_fll_dict[key][tmp_mns(x):tmp_pls(x),tmp_mns(y):tmp_pls(y)] += prediction
count_map[tmp_mns(x):tmp_pls(x),tmp_mns(y):tmp_pls(y)] += np.ones((image_size,image_size),dtype='uint8')
if (i+1)%100==0 or i==0 or i<10:
print("Completed %i Time elapsed %.2f min | Max count %d "%(i,(time.time()-start_time)/60,count_map.max()))
print("Fully completed %i Time elapsed %.2f min | Max count %d "%(i,(time.time()-start_time)/60,count_map.max()))
start_time = time.time()
print("\t Dividing by count_map")
np.place(count_map, count_map==0, 1)
for key in models_to_save:
prd_im_fll_dict[key]/=count_map
# scaled_count_map = scale(count_map)
# scaled_count_map = scaled_count_map*255//scaled_count_map.max()
del count_map
gc.collect()
print("\t Scaling prediciton")
prob_map_dict = {}
for key in models_to_save:
prob_map_dict[key] = scale(prd_im_fll_dict[key])
prob_map_dict[key] = (prob_map_dict[key]*255).astype('uint8')
print("\t Thresholding prediction")
threshold = 0.5
for key in models_to_save:
np.place(prd_im_fll_dict[key],prd_im_fll_dict[key]>=threshold, 1)
np.place(prd_im_fll_dict[key],prd_im_fll_dict[key]<threshold, 0)
print("\t Saving ground truth")
save_model_keys = models_to_save
for key in save_model_keys:
print("\t Saving to %s %s" %(out_file,key))
tifffile.imsave(os.path.join(out_dir_dict[key],out_file)+'.tif', prd_im_fll_dict[key].T, compress=9)
print("\t Calculated in %f" % ((time.time() - start_time)/60))
start_time = time.time()
scaled_prd_im_fll_dict = {}
for key in models_to_save:
scaled_prd_im_fll_dict[key] = scale(prd_im_fll_dict[key])
del prd_im_fll_dict
gc.collect()
# mask_im = np.dstack([dataset_obj.get_mask().T]*3).astype('uint8')*255
mask_im = np.dstack([TissueMaskGenerationPatch(im_im)]*3).astype('uint8')*255
for key in models_to_save:
mask_im[:,:,0] = scaled_prd_im_fll_dict[key]*255
ov_prob_stride = st_im + (np.dstack([prob_map_dict[key]]*3)*255).astype('uint8')
np.place(ov_prob_stride,ov_prob_stride>255,255)
imsave(mask_im,ov_prob_stride,prob_map_dict[key],scaled_prd_im_fll_dict[key],im_im,out=os.path.join(out_dir_dict[key],'ref_'+out_file)+'.png')
# for key in models_to_save:
# with open(os.path.join(out_dir_dict[key],'jacc_scores.txt'), 'a') as f:
# f.write("Total,%f\n" %(total_jacc_score_dict[key]/len(sample_ids)))
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"numpy.sum",
"ipdb.set_trace",
"numpy.ones",
"tensorflow.ConfigProto",
"gc.collect",
"matplotlib.pyplot.figure",
"cv2.rectangle",
"numpy.round",
"os.path.join",
"openslide.OpenSlide",
"models.deeplabv3p_original.Deeplabv3",
"numpy.zeros_like"... | [((1430, 1447), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1444, 1447), True, 'import numpy as np\n'), ((1448, 1462), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (1459, 1462), False, 'import random\n'), ((1463, 1484), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(0)'], {}), '(0)\n', (1481, 1484), True, 'import tensorflow as tf\n'), ((3596, 3606), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3604, 3606), True, 'import matplotlib.pyplot as plt\n'), ((4024, 4057), 'numpy.ones', 'np.ones', (['(20, 20)'], {'dtype': 'np.uint8'}), '((20, 20), dtype=np.uint8)\n', (4031, 4057), True, 'import numpy as np\n'), ((4158, 4189), 'numpy.ones', 'np.ones', (['(5, 5)'], {'dtype': 'np.uint8'}), '((5, 5), dtype=np.uint8)\n', (4165, 4189), True, 'import numpy as np\n'), ((4465, 4509), 'cv2.dilate', 'cv2.dilate', (['image_open', 'kernel'], {'iterations': '(1)'}), '(image_open, kernel, iterations=1)\n', (4475, 4509), False, 'import cv2\n'), ((4629, 4699), 'cv2.findContours', 'cv2.findContours', (['temp_img', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(temp_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (4645, 4699), False, 'import cv2\n'), ((5246, 5265), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (5259, 5265), True, 'import numpy as np\n'), ((5925, 5944), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (5938, 5944), True, 'import numpy as np\n'), ((6674, 6693), 'numpy.where', 'np.where', (['(mask == 1)'], {}), '(mask == 1)\n', (6682, 6693), True, 'import numpy as np\n'), ((6708, 6727), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (6721, 6727), True, 'import numpy as np\n'), ((7370, 7386), 'skimage.color.rgb2hsv', 'rgb2hsv', (['img_RGB'], {}), '(img_RGB)\n', (7377, 7386), False, 'from skimage.color import rgb2hsv\n'), ((7617, 7675), 'numpy.logical_not', 'np.logical_not', (['(background_R & background_G & background_B)'], {}), '(background_R & background_G & background_B)\n', (7631, 7675), True, 'import numpy as np\n'), ((8668, 8708), 'cv2.cvtColor', 'cv2.cvtColor', (['img_RGB', 'cv2.COLOR_BGR2HSV'], {}), '(img_RGB, cv2.COLOR_BGR2HSV)\n', (8680, 8708), False, 'import cv2\n'), ((8758, 8805), 'cv2.threshold', 'cv2.threshold', (['img_S', '(0)', '(255)', 'cv2.THRESH_BINARY'], {}), '(img_S, 0, 255, cv2.THRESH_BINARY)\n', (8771, 8805), False, 'import cv2\n'), ((8817, 8838), 'numpy.array', 'np.array', (['tissue_mask'], {}), '(tissue_mask)\n', (8825, 8838), True, 'import numpy as np\n'), ((9121, 9161), 'cv2.cvtColor', 'cv2.cvtColor', (['img_RGB', 'cv2.COLOR_BGR2HSV'], {}), '(img_RGB, cv2.COLOR_BGR2HSV)\n', (9133, 9161), False, 'import cv2\n'), ((9211, 9276), 'cv2.threshold', 'cv2.threshold', (['img_S', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(img_S, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (9224, 9276), False, 'import cv2\n'), ((9286, 9307), 'numpy.array', 'np.array', (['tissue_mask'], {}), '(tissue_mask)\n', (9294, 9307), True, 'import numpy as np\n'), ((9355, 9393), 'numpy.place', 'np.place', (['image', '(image >= threshold)', '(1)'], {}), '(image, image >= threshold, 1)\n', (9363, 9393), True, 'import numpy as np\n'), ((9395, 9432), 'numpy.place', 'np.place', (['image', '(image < threshold)', '(0)'], {}), '(image, image < threshold, 0)\n', (9403, 9432), True, 'import numpy as np\n'), ((9441, 9456), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (9449, 9456), True, 'import numpy as np\n'), ((9572, 9585), 'numpy.sum', 'np.sum', (['(x * y)'], {}), '(x * y)\n', (9578, 9585), True, 'import numpy as np\n'), ((17970, 18034), 'models.seg_models.get_inception_resnet_v2_unet_softmax', 'get_inception_resnet_v2_unet_softmax', (['(None, None)'], {'weights': 'None'}), '((None, None), weights=None)\n', (18006, 18034), False, 'from models.seg_models import get_inception_resnet_v2_unet_softmax, unet_densenet121\n'), ((18187, 18231), 'models.seg_models.unet_densenet121', 'unet_densenet121', (['(None, None)'], {'weights': 'None'}), '((None, None), weights=None)\n', (18203, 18231), False, 'from models.seg_models import get_inception_resnet_v2_unet_softmax, unet_densenet121\n'), ((18384, 18513), 'models.deeplabv3p_original.Deeplabv3', 'Deeplabv3', ([], {'input_shape': '(image_size, image_size, 3)', 'weights': 'None', 'classes': '(2)', 'activation': '"""softmax"""', 'backbone': '"""xception"""', 'OS': 'OS'}), "(input_shape=(image_size, image_size, 3), weights=None, classes=2,\n activation='softmax', backbone='xception', OS=OS)\n", (18393, 18513), False, 'from models.deeplabv3p_original import Deeplabv3\n'), ((19765, 19781), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (19779, 19781), True, 'import tensorflow as tf\n'), ((19911, 19941), 'tensorflow.Session', 'tf.Session', ([], {'config': 'core_config'}), '(config=core_config)\n', (19921, 19941), True, 'import tensorflow as tf\n'), ((19947, 19969), 'tensorflow.keras.backend.set_session', 'K.set_session', (['session'], {}), '(session)\n', (19960, 19969), True, 'from tensorflow.keras import backend as K\n'), ((21221, 21232), 'time.time', 'time.time', ([], {}), '()\n', (21230, 21232), False, 'import time\n'), ((21251, 21282), 'os.listdir', 'os.listdir', (["CONFIG['in_folder']"], {}), "(CONFIG['in_folder'])\n", (21261, 21282), False, 'import os\n'), ((2301, 2334), 'numpy.concatenate', 'np.concatenate', (['args_list'], {'axis': '(1)'}), '(args_list, axis=1)\n', (2315, 2334), True, 'import numpy as np\n'), ((3708, 3720), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (3714, 3720), True, 'import numpy as np\n'), ((3742, 3754), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (3748, 3754), True, 'import numpy as np\n'), ((3845, 3864), 'numpy.zeros_like', 'np.zeros_like', (['data'], {}), '(data)\n', (3858, 3864), True, 'import numpy as np\n'), ((4093, 4107), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (4101, 4107), True, 'import numpy as np\n'), ((4224, 4245), 'numpy.array', 'np.array', (['image_close'], {}), '(image_close)\n', (4232, 4245), True, 'import numpy as np\n'), ((4312, 4345), 'numpy.ones', 'np.ones', (['(60, 60)'], {'dtype': 'np.uint8'}), '((60, 60), dtype=np.uint8)\n', (4319, 4345), True, 'import numpy as np\n'), ((4852, 4910), 'cv2.drawContours', 'cv2.drawContours', (['rgb_contour', 'contours', '(-1)', 'line_color', '(2)'], {}), '(rgb_contour, contours, -1, line_color, 2)\n', (4868, 4910), False, 'import cv2\n'), ((4933, 4952), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (4949, 4952), False, 'import cv2\n'), ((5032, 5098), 'cv2.rectangle', 'cv2.rectangle', (['rgb_contour', '(x, y)', '(x + h, y + w)', '(0, 255, 0)', '(2)'], {}), '(rgb_contour, (x, y), (x + h, y + w), (0, 255, 0), 2)\n', (5045, 5098), False, 'import cv2\n'), ((6740, 6749), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (6746, 6749), True, 'import numpy as np\n'), ((6778, 6787), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (6784, 6787), True, 'import numpy as np\n'), ((6816, 6825), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (6822, 6825), True, 'import numpy as np\n'), ((6854, 6863), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (6860, 6863), True, 'import numpy as np\n'), ((7425, 7457), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['img_RGB[:, :, 0]'], {}), '(img_RGB[:, :, 0])\n', (7439, 7457), False, 'from skimage.filters import threshold_otsu\n'), ((7496, 7528), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['img_RGB[:, :, 1]'], {}), '(img_RGB[:, :, 1])\n', (7510, 7528), False, 'from skimage.filters import threshold_otsu\n'), ((7567, 7599), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['img_RGB[:, :, 2]'], {}), '(img_RGB[:, :, 2])\n', (7581, 7599), False, 'from skimage.filters import threshold_otsu\n'), ((7710, 7742), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['img_HSV[:, :, 1]'], {}), '(img_HSV[:, :, 1])\n', (7724, 7742), False, 'from skimage.filters import threshold_otsu\n'), ((8343, 8362), 'numpy.logical_or', 'np.logical_or', (['g', 'b'], {}), '(g, b)\n', (8356, 8362), True, 'import numpy as np\n'), ((9526, 9554), 'numpy.place', 'np.place', (['var', '(var == 255)', '(1)'], {}), '(var, var == 255, 1)\n', (9534, 9554), True, 'import numpy as np\n'), ((9609, 9628), 'numpy.logical_or', 'np.logical_or', (['x', 'y'], {}), '(x, y)\n', (9622, 9628), True, 'import numpy as np\n'), ((12154, 12189), 'openslide.OpenSlide', 'openslide.OpenSlide', (['self._wsi_path'], {}), '(self._wsi_path)\n', (12173, 12189), False, 'import openslide\n'), ((14022, 14054), 'numpy.round', 'np.round', (['(X_slide * 1.0 / X_mask)'], {}), '(X_slide * 1.0 / X_mask)\n', (14030, 14054), True, 'import numpy as np\n'), ((14359, 14383), 'numpy.ones_like', 'np.ones_like', (['self._mask'], {}), '(self._mask)\n', (14371, 14383), True, 'import numpy as np\n'), ((14404, 14429), 'numpy.zeros_like', 'np.zeros_like', (['self._mask'], {}), '(self._mask)\n', (14417, 14429), True, 'import numpy as np\n'), ((15146, 15174), 'numpy.where', 'np.where', (['self._strided_mask'], {}), '(self._strided_mask)\n', (15154, 15174), True, 'import numpy as np\n'), ((15741, 15771), 'numpy.save', 'np.save', (['save_path', 'self._mask'], {}), '(save_path, self._mask)\n', (15748, 15771), True, 'import numpy as np\n'), ((17678, 17709), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (17686, 17709), True, 'import numpy as np\n'), ((17730, 17765), 'numpy.array', 'np.array', (['label_img'], {'dtype': 'np.uint8'}), '(label_img, dtype=np.uint8)\n', (17738, 17765), True, 'import numpy as np\n'), ((17774, 17813), 'numpy.place', 'np.place', (['label_img', '(label_img > 0)', '(255)'], {}), '(label_img, label_img > 0, 255)\n', (17782, 17813), True, 'import numpy as np\n'), ((20000, 20037), 'os.path.join', 'os.path.join', (['out_dir_root', '"""infer-*"""'], {}), "(out_dir_root, 'infer-*')\n", (20012, 20037), False, 'import os\n'), ((20826, 20857), 'os.path.join', 'os.path.join', (['out_dir_root', 'key'], {}), '(out_dir_root, key)\n', (20838, 20857), False, 'import os\n'), ((21400, 21444), 'os.path.join', 'os.path.join', (["CONFIG['in_folder']", 'sample_id'], {}), "(CONFIG['in_folder'], sample_id)\n", (21412, 21444), False, 'import os\n'), ((21688, 21717), 'openslide.OpenSlide', 'openslide.OpenSlide', (['wsi_path'], {}), '(wsi_path)\n', (21707, 21717), False, 'import openslide\n'), ((21796, 21848), 'numpy.zeros', 'np.zeros', (['wsi_obj.level_dimensions[0]'], {'dtype': '"""uint8"""'}), "(wsi_obj.level_dimensions[0], dtype='uint8')\n", (21804, 21848), True, 'import numpy as np\n'), ((22632, 22643), 'time.time', 'time.time', ([], {}), '()\n', (22641, 22643), False, 'import time\n'), ((23142, 23232), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_obj'], {'batch_size': 'batch_size', 'num_workers': 'batch_size', 'drop_last': '(True)'}), '(dataset_obj, batch_size=batch_size, num_workers=batch_size,\n drop_last=True)\n', (23152, 23232), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((25775, 25786), 'time.time', 'time.time', ([], {}), '()\n', (25784, 25786), False, 'import time\n'), ((25838, 25876), 'numpy.place', 'np.place', (['count_map', '(count_map == 0)', '(1)'], {}), '(count_map, count_map == 0, 1)\n', (25846, 25876), True, 'import numpy as np\n'), ((26104, 26116), 'gc.collect', 'gc.collect', ([], {}), '()\n', (26114, 26116), False, 'import gc\n'), ((26995, 27006), 'time.time', 'time.time', ([], {}), '()\n', (27004, 27006), False, 'import time\n'), ((27186, 27198), 'gc.collect', 'gc.collect', ([], {}), '()\n', (27196, 27198), False, 'import gc\n'), ((1265, 1276), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1274, 1276), False, 'import os\n'), ((2101, 2130), 'numpy.dstack', 'np.dstack', (['([args_list[i]] * 3)'], {}), '([args_list[i]] * 3)\n', (2110, 2130), True, 'import numpy as np\n'), ((2364, 2390), 'numpy.uint8', 'np.uint8', (['concatenated_arr'], {}), '(concatenated_arr)\n', (2372, 2390), True, 'import numpy as np\n'), ((2460, 2476), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (2474, 2476), False, 'import ipdb\n'), ((3153, 3169), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3162, 3169), True, 'import matplotlib.pyplot as plt\n'), ((3178, 3219), 'matplotlib.pyplot.imshow', 'plt.imshow', (['args[0]'], {'interpolation': '"""none"""'}), "(args[0], interpolation='none')\n", (3188, 3219), True, 'import matplotlib.pyplot as plt\n'), ((3372, 3403), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(n * 5, 10)'}), '(figsize=(n * 5, 10))\n', (3382, 3403), True, 'import matplotlib.pyplot as plt\n'), ((3418, 3426), 'six.moves.range', 'range', (['n'], {}), '(n)\n', (3423, 3426), False, 'from six.moves import range\n'), ((4384, 4417), 'numpy.ones', 'np.ones', (['(35, 35)'], {'dtype': 'np.uint8'}), '((35, 35), dtype=np.uint8)\n', (4391, 4417), True, 'import numpy as np\n'), ((12272, 12309), 'openslide.OpenSlide', 'openslide.OpenSlide', (['self._label_path'], {}), '(self._label_path)\n', (12291, 12309), False, 'import openslide\n'), ((12567, 12600), 'os.path.basename', 'os.path.basename', (['self._mask_path'], {}), '(self._mask_path)\n', (12583, 12600), False, 'import os\n'), ((13294, 13314), 'numpy.uint8', 'np.uint8', (['self._mask'], {}), '(self._mask)\n', (13302, 13314), True, 'import numpy as np\n'), ((15630, 15687), 'numpy.place', 'np.place', (['self._label_scaled', '(self._label_scaled > 0)', '(255)'], {}), '(self._label_scaled, self._label_scaled > 0, 255)\n', (15638, 15687), True, 'import numpy as np\n'), ((20882, 20912), 'os.makedirs', 'os.makedirs', (['out_dir_dict[key]'], {}), '(out_dir_dict[key])\n', (20893, 20912), False, 'import os\n'), ((24736, 24760), 'six.moves.range', 'range', (['actual_batch_size'], {}), '(actual_batch_size)\n', (24741, 24760), False, 'from six.moves import range\n'), ((26472, 26540), 'numpy.place', 'np.place', (['prd_im_fll_dict[key]', '(prd_im_fll_dict[key] >= threshold)', '(1)'], {}), '(prd_im_fll_dict[key], prd_im_fll_dict[key] >= threshold, 1)\n', (26480, 26540), True, 'import numpy as np\n'), ((26550, 26617), 'numpy.place', 'np.place', (['prd_im_fll_dict[key]', '(prd_im_fll_dict[key] < threshold)', '(0)'], {}), '(prd_im_fll_dict[key], prd_im_fll_dict[key] < threshold, 0)\n', (26558, 26617), True, 'import numpy as np\n'), ((27568, 27619), 'numpy.place', 'np.place', (['ov_prob_stride', '(ov_prob_stride > 255)', '(255)'], {}), '(ov_prob_stride, ov_prob_stride > 255, 255)\n', (27576, 27619), True, 'import numpy as np\n'), ((3440, 3464), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'n', '(i + 1)'], {}), '(1, n, i + 1)\n', (3451, 3464), True, 'import matplotlib.pyplot as plt\n'), ((3473, 3492), 'matplotlib.pyplot.title', 'plt.title', (['title[i]'], {}), '(title[i])\n', (3482, 3492), True, 'import matplotlib.pyplot as plt\n'), ((3505, 3533), 'matplotlib.pyplot.imshow', 'plt.imshow', (['args[i]', 'cmap[i]'], {}), '(args[i], cmap[i])\n', (3515, 3533), True, 'import matplotlib.pyplot as plt\n'), ((12677, 12713), 'openslide.OpenSlide', 'openslide.OpenSlide', (['self._mask_path'], {}), '(self._mask_path)\n', (12696, 12713), False, 'import openslide\n'), ((12912, 12953), 'numpy.place', 'np.place', (['self._mask', '(self._mask > 0)', '(255)'], {}), '(self._mask, self._mask > 0, 255)\n', (12920, 12953), True, 'import numpy as np\n'), ((16908, 16970), 'numpy.zeros', 'np.zeros', (['(self._image_size, self._image_size)'], {'dtype': 'np.uint8'}), '((self._image_size, self._image_size), dtype=np.uint8)\n', (16916, 16970), True, 'import numpy as np\n'), ((21473, 21506), 'os.path.join', 'os.path.join', (['sample_dir', '"""*.svs"""'], {}), "(sample_dir, '*.svs')\n", (21485, 21506), False, 'import os\n'), ((21957, 22010), 'os.path.join', 'os.path.join', (["CONFIG['memmap_folder']", "('%s.dat' % key)"], {}), "(CONFIG['memmap_folder'], '%s.dat' % key)\n", (21969, 22010), False, 'import os\n'), ((25403, 25451), 'numpy.ones', 'np.ones', (['(image_size, image_size)'], {'dtype': '"""uint8"""'}), "((image_size, image_size), dtype='uint8')\n", (25410, 25451), True, 'import numpy as np\n'), ((3574, 3589), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3582, 3589), True, 'import matplotlib.pyplot as plt\n'), ((14070, 14095), 'numpy.log2', 'np.log2', (['self._resolution'], {}), '(self._resolution)\n', (14077, 14095), True, 'import numpy as np\n'), ((21016, 21045), 'os.listdir', 'os.listdir', (['out_dir_dict[key]'], {}), '(out_dir_dict[key])\n', (21026, 21045), False, 'import os\n'), ((21582, 21623), 'os.path.join', 'os.path.join', (['sample_dir', '"""*viable*.tiff"""'], {}), "(sample_dir, '*viable*.tiff')\n", (21594, 21623), False, 'import os\n'), ((23805, 23865), 'os.path.join', 'os.path.join', (['out_dir_dict[key]', "('mask_' + out_file + '.png')"], {}), "(out_dir_dict[key], 'mask_' + out_file + '.png')\n", (23817, 23865), False, 'import os\n'), ((26818, 26859), 'os.path.join', 'os.path.join', (['out_dir_dict[key]', 'out_file'], {}), '(out_dir_dict[key], out_file)\n', (26830, 26859), False, 'import os\n'), ((26943, 26954), 'time.time', 'time.time', ([], {}), '()\n', (26952, 26954), False, 'import time\n'), ((27715, 27765), 'os.path.join', 'os.path.join', (['out_dir_dict[key]', "('ref_' + out_file)"], {}), "(out_dir_dict[key], 'ref_' + out_file)\n", (27727, 27765), False, 'import os\n'), ((25709, 25720), 'time.time', 'time.time', ([], {}), '()\n', (25718, 25720), False, 'import time\n'), ((27501, 27536), 'numpy.dstack', 'np.dstack', (['([prob_map_dict[key]] * 3)'], {}), '([prob_map_dict[key]] * 3)\n', (27510, 27536), True, 'import numpy as np\n'), ((25574, 25585), 'time.time', 'time.time', ([], {}), '()\n', (25583, 25585), False, 'import time\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis
from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency
pytest.importorskip("healpy")
axes1 = [MapAxis(np.logspace(0.0, 3.0, 3), interp="log")]
hpx_test_allsky_geoms = [
(8, False, "galactic", None, None),
(8, False, "galactic", None, axes1),
([4, 8], False, "galactic", None, axes1),
]
hpx_test_partialsky_geoms = [
([4, 8], False, "galactic", "DISK(110.,75.,30.)", axes1),
(8, False, "galactic", "DISK(110.,75.,10.)", [MapAxis(np.logspace(0.0, 3.0, 4))]),
(
8,
False,
"galactic",
"DISK(110.,75.,10.)",
[
MapAxis(np.logspace(0.0, 3.0, 4), name="axis0"),
MapAxis(np.logspace(0.0, 2.0, 3), name="axis1"),
],
),
]
hpx_test_geoms = hpx_test_allsky_geoms + hpx_test_partialsky_geoms
def create_map(nside, nested, frame, region, axes):
return HpxNDMap(
HpxGeom(nside=nside, nest=nested, frame=frame, region=region, axes=axes)
)
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_init(nside, nested, frame, region, axes):
geom = HpxGeom(nside=nside, nest=nested, frame=frame, region=region, axes=axes)
shape = [int(np.max(geom.npix))]
if axes:
shape += [ax.nbin for ax in axes]
shape = shape[::-1]
data = np.random.uniform(0, 1, shape)
m = HpxNDMap(geom)
assert m.data.shape == data.shape
m = HpxNDMap(geom, data)
assert_allclose(m.data, data)
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_create(nside, nested, frame, region, axes):
create_map(nside, nested, frame, region, axes)
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_read_write(tmp_path, nside, nested, frame, region, axes):
path = tmp_path / "tmp.fits"
m = create_map(nside, nested, frame, region, axes)
m.write(path, sparse=True, overwrite=True)
m2 = HpxNDMap.read(path)
m4 = Map.read(path, map_type="hpx")
msk = np.ones_like(m2.data[...], dtype=bool)
assert_allclose(m.data[...][msk], m2.data[...][msk])
assert_allclose(m.data[...][msk], m4.data[...][msk])
m.write(path, overwrite=True)
m2 = HpxNDMap.read(path)
m3 = HpxMap.read(path, map_type="hpx")
m4 = Map.read(path, map_type="hpx")
assert_allclose(m.data[...][msk], m2.data[...][msk])
assert_allclose(m.data[...][msk], m3.data[...][msk])
assert_allclose(m.data[...][msk], m4.data[...][msk])
# Specify alternate HDU name for IMAGE and BANDS table
m.write(path, sparse=True, hdu="IMAGE", hdu_bands="TEST", overwrite=True)
m2 = HpxNDMap.read(path)
m3 = Map.read(path)
m4 = Map.read(path, map_type="hpx")
def test_hpxmap_read_write_fgst(tmp_path):
path = tmp_path / "tmp.fits"
axis = MapAxis.from_bounds(100.0, 1000.0, 4, name="energy", unit="MeV")
# Test Counts Cube
m = create_map(8, False, "galactic", None, [axis])
m.write(path, format="fgst-ccube", overwrite=True)
with fits.open(path, memmap=False) as hdulist:
assert "SKYMAP" in hdulist
assert "EBOUNDS" in hdulist
assert hdulist["SKYMAP"].header["HPX_CONV"] == "FGST-CCUBE"
assert hdulist["SKYMAP"].header["TTYPE1"] == "CHANNEL1"
m2 = Map.read(path)
# Test Model Cube
m.write(path, format="fgst-template", overwrite=True)
with fits.open(path, memmap=False) as hdulist:
assert "SKYMAP" in hdulist
assert "ENERGIES" in hdulist
assert hdulist["SKYMAP"].header["HPX_CONV"] == "FGST-TEMPLATE"
assert hdulist["SKYMAP"].header["TTYPE1"] == "ENERGY1"
m2 = Map.read(path)
@requires_data()
def test_read_fgst_exposure():
exposure = Map.read("$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz")
energy_axis = exposure.geom.axes["energy_true"]
assert energy_axis.node_type == "center"
assert exposure.unit == "cm2 s"
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_set_get_by_pix(nside, nested, frame, region, axes):
m = create_map(nside, nested, frame, region, axes)
coords = m.geom.get_coord(flat=True)
idx = m.geom.get_idx(flat=True)
m.set_by_pix(idx, coords[0])
assert_allclose(coords[0], m.get_by_pix(idx))
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_set_get_by_coord(nside, nested, frame, region, axes):
m = create_map(nside, nested, frame, region, axes)
coords = m.geom.get_coord(flat=True)
m.set_by_coord(coords, coords[0])
assert_allclose(coords[0], m.get_by_coord(coords))
# Test with SkyCoords
m = create_map(nside, nested, frame, region, axes)
coords = m.geom.get_coord(flat=True)
skydir = SkyCoord(coords[0], coords[1], unit="deg", frame=m.geom.frame)
skydir_cel = skydir.transform_to("icrs")
skydir_gal = skydir.transform_to("galactic")
m.set_by_coord((skydir_gal,) + tuple(coords[2:]), coords[0])
assert_allclose(coords[0], m.get_by_coord(coords))
assert_allclose(
m.get_by_coord((skydir_cel,) + tuple(coords[2:])),
m.get_by_coord((skydir_gal,) + tuple(coords[2:])),
)
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_interp_by_coord(nside, nested, frame, region, axes):
m = HpxNDMap(
HpxGeom(nside=nside, nest=nested, frame=frame, region=region, axes=axes)
)
coords = m.geom.get_coord(flat=True)
m.set_by_coord(coords, coords[1])
assert_allclose(m.get_by_coord(coords), m.interp_by_coord(coords, method="linear"))
def test_hpxmap_interp_by_coord_quantities():
ax = MapAxis(np.logspace(0.0, 3.0, 3), interp="log", name="energy", unit="TeV")
geom = HpxGeom(nside=1, axes=[ax])
m = HpxNDMap(geom=geom)
coords_dict = {"lon": 99, "lat": 42, "energy": 1000 * u.GeV}
coords = m.geom.get_coord(flat=True)
m.set_by_coord(coords, coords["lat"])
coords_dict["energy"] = 1 * u.TeV
val = m.interp_by_coord(coords_dict)
assert_allclose(val, 42, rtol=1e-2)
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_fill_by_coord(nside, nested, frame, region, axes):
m = create_map(nside, nested, frame, region, axes)
coords = m.geom.get_coord(flat=True)
m.fill_by_coord(coords, coords[1])
m.fill_by_coord(coords, coords[1])
assert_allclose(m.get_by_coord(coords), 2.0 * coords[1])
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_to_wcs(nside, nested, frame, region, axes):
m = HpxNDMap(
HpxGeom(nside=nside, nest=nested, frame=frame, region=region, axes=axes)
)
m.to_wcs(sum_bands=False, oversample=2, normalize=False)
m.to_wcs(sum_bands=True, oversample=2, normalize=False)
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_swap_scheme(nside, nested, frame, region, axes):
m = HpxNDMap(
HpxGeom(nside=nside, nest=nested, frame=frame, region=region, axes=axes)
)
m.data = np.arange(m.data.size).reshape(m.geom.data_shape)
m2 = m.to_swapped()
coords = m.geom.get_coord(flat=True)
assert_allclose(m.get_by_coord(coords), m2.get_by_coord(coords))
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_ud_grade(nside, nested, frame, region, axes):
m = HpxNDMap(
HpxGeom(nside=nside, nest=nested, frame=frame, region=region, axes=axes)
)
m.to_ud_graded(4)
@pytest.mark.parametrize(
("nside", "nested", "frame", "region", "axes"), hpx_test_partialsky_geoms
)
def test_hpxmap_pad(nside, nested, frame, region, axes):
m = HpxNDMap(
HpxGeom(nside=nside, nest=nested, frame=frame, region=region, axes=axes)
)
m.set_by_pix(m.geom.get_idx(flat=True), 1.0)
cval = 2.2
m_pad = m.pad(1, mode="constant", cval=cval)
coords_pad = m_pad.geom.get_coord(flat=True)
msk = m.geom.contains(coords_pad)
coords_out = tuple([c[~msk] for c in coords_pad])
assert_allclose(m_pad.get_by_coord(coords_out), cval * np.ones_like(coords_out[0]))
coords_in = tuple([c[msk] for c in coords_pad])
assert_allclose(m_pad.get_by_coord(coords_in), np.ones_like(coords_in[0]))
@pytest.mark.parametrize(
("nside", "nested", "frame", "region", "axes"), hpx_test_partialsky_geoms
)
def test_hpxmap_crop(nside, nested, frame, region, axes):
m = HpxNDMap(
HpxGeom(nside=nside, nest=nested, frame=frame, region=region, axes=axes)
)
m.crop(1)
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_upsample(nside, nested, frame, region, axes):
m = HpxNDMap(
HpxGeom(nside=nside, nest=nested, frame=frame, region=region, axes=axes),
unit="m2",
)
m.set_by_pix(m.geom.get_idx(flat=True), 1.0)
m_up = m.upsample(2, preserve_counts=True)
assert_allclose(np.nansum(m.data), np.nansum(m_up.data))
m_up = m.upsample(2, preserve_counts=False)
assert_allclose(4.0 * np.nansum(m.data), np.nansum(m_up.data))
assert m.unit == m_up.unit
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_downsample(nside, nested, frame, region, axes):
m = HpxNDMap(
HpxGeom(nside=nside, nest=nested, frame=frame, region=region, axes=axes),
unit="m2",
)
m.set_by_pix(m.geom.get_idx(flat=True), 1.0)
m_down = m.downsample(2, preserve_counts=True)
assert_allclose(np.nansum(m.data), np.nansum(m_down.data))
assert m.unit == m_down.unit
@pytest.mark.parametrize(("nside", "nested", "frame", "region", "axes"), hpx_test_geoms)
def test_hpxmap_sum_over_axes(nside, nested, frame, region, axes):
m = HpxNDMap(
HpxGeom(nside=nside, nest=nested, frame=frame, region=region, axes=axes)
)
coords = m.geom.get_coord(flat=True)
m.fill_by_coord(coords, coords[0])
msum = m.sum_over_axes()
if m.geom.is_regular:
assert_allclose(np.nansum(m.data), np.nansum(msum.data))
def test_coadd_unit():
geom = HpxGeom.create(nside=128)
m1 = HpxNDMap(geom, unit="m2")
m2 = HpxNDMap(geom, unit="cm2")
idx = geom.get_idx()
weights = u.Quantity(np.ones_like(idx[0]), unit="cm2")
m1.fill_by_idx(idx, weights=weights)
assert_allclose(m1.data, 0.0001)
weights = u.Quantity(np.ones_like(idx[0]), unit="m2")
m1.fill_by_idx(idx, weights=weights)
m1.coadd(m2)
assert_allclose(m1.data, 1.0001)
@requires_dependency("matplotlib")
def test_plot():
m = HpxNDMap.create(binsz=10)
with mpl_plot_check():
m.plot()
@requires_dependency("matplotlib")
def test_plot_grid():
axis = MapAxis([0, 1, 2], node_type="edges")
m = HpxNDMap.create(binsz=0.1 * u.deg, width=1, axes=[axis])
with mpl_plot_check():
m.plot_grid()
@requires_dependency("matplotlib")
def test_plot_poly():
m = HpxNDMap.create(binsz=10)
with mpl_plot_check():
m.plot(method="poly")
def test_hpxndmap_resample_axis():
axis_1 = MapAxis.from_edges([1, 2, 3, 4, 5], name="test-1")
axis_2 = MapAxis.from_edges([1, 2, 3, 4], name="test-2")
geom = HpxGeom.create(nside=16, axes=[axis_1, axis_2])
m = HpxNDMap(geom, unit="m2")
m.data += 1
new_axis = MapAxis.from_edges([2, 3, 5], name="test-1")
m2 = m.resample_axis(axis=new_axis)
assert m2.data.shape == (3, 2, 3072)
assert_allclose(m2.data[0, :, 0], [1, 2])
# Test without all interval covered
new_axis = MapAxis.from_edges([1.7, 4], name="test-1")
m3 = m.resample_axis(axis=new_axis)
assert m3.data.shape == (3, 1, 3072)
assert_allclose(m3.data, 2)
| [
"numpy.logspace",
"numpy.arange",
"pytest.mark.parametrize",
"gammapy.utils.testing.requires_dependency",
"gammapy.maps.HpxGeom",
"gammapy.maps.MapAxis.from_bounds",
"gammapy.maps.HpxNDMap",
"numpy.max",
"numpy.testing.assert_allclose",
"gammapy.maps.HpxGeom.create",
"numpy.nansum",
"numpy.one... | [((390, 419), 'pytest.importorskip', 'pytest.importorskip', (['"""healpy"""'], {}), "('healpy')\n", (409, 419), False, 'import pytest\n'), ((1282, 1373), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (1305, 1373), False, 'import pytest\n'), ((1797, 1888), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (1820, 1888), False, 'import pytest\n'), ((1999, 2090), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (2022, 2090), False, 'import pytest\n'), ((4014, 4029), 'gammapy.utils.testing.requires_data', 'requires_data', ([], {}), '()\n', (4027, 4029), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((4286, 4377), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (4309, 4377), False, 'import pytest\n'), ((4660, 4751), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (4683, 4751), False, 'import pytest\n'), ((5568, 5659), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (5591, 5659), False, 'import pytest\n'), ((6469, 6560), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (6492, 6560), False, 'import pytest\n'), ((6862, 6953), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (6885, 6953), False, 'import pytest\n'), ((7239, 7330), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (7262, 7330), False, 'import pytest\n'), ((7697, 7788), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (7720, 7788), False, 'import pytest\n'), ((7977, 8079), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_partialsky_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_partialsky_geoms)\n", (8000, 8079), False, 'import pytest\n'), ((8720, 8822), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_partialsky_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_partialsky_geoms)\n", (8743, 8822), False, 'import pytest\n'), ((9005, 9096), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (9028, 9096), False, 'import pytest\n'), ((9586, 9677), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (9609, 9677), False, 'import pytest\n'), ((10062, 10153), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nside', 'nested', 'frame', 'region', 'axes')", 'hpx_test_geoms'], {}), "(('nside', 'nested', 'frame', 'region', 'axes'),\n hpx_test_geoms)\n", (10085, 10153), False, 'import pytest\n'), ((10978, 11011), 'gammapy.utils.testing.requires_dependency', 'requires_dependency', (['"""matplotlib"""'], {}), "('matplotlib')\n", (10997, 11011), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((11109, 11142), 'gammapy.utils.testing.requires_dependency', 'requires_dependency', (['"""matplotlib"""'], {}), "('matplotlib')\n", (11128, 11142), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((11331, 11364), 'gammapy.utils.testing.requires_dependency', 'requires_dependency', (['"""matplotlib"""'], {}), "('matplotlib')\n", (11350, 11364), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((1439, 1511), 'gammapy.maps.HpxGeom', 'HpxGeom', ([], {'nside': 'nside', 'nest': 'nested', 'frame': 'frame', 'region': 'region', 'axes': 'axes'}), '(nside=nside, nest=nested, frame=frame, region=region, axes=axes)\n', (1446, 1511), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((1639, 1669), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'shape'], {}), '(0, 1, shape)\n', (1656, 1669), True, 'import numpy as np\n'), ((1678, 1692), 'gammapy.maps.HpxNDMap', 'HpxNDMap', (['geom'], {}), '(geom)\n', (1686, 1692), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((1739, 1759), 'gammapy.maps.HpxNDMap', 'HpxNDMap', (['geom', 'data'], {}), '(geom, data)\n', (1747, 1759), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((1764, 1793), 'numpy.testing.assert_allclose', 'assert_allclose', (['m.data', 'data'], {}), '(m.data, data)\n', (1779, 1793), False, 'from numpy.testing import assert_allclose\n'), ((2307, 2326), 'gammapy.maps.HpxNDMap.read', 'HpxNDMap.read', (['path'], {}), '(path)\n', (2320, 2326), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((2336, 2366), 'gammapy.maps.Map.read', 'Map.read', (['path'], {'map_type': '"""hpx"""'}), "(path, map_type='hpx')\n", (2344, 2366), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((2377, 2415), 'numpy.ones_like', 'np.ones_like', (['m2.data[...]'], {'dtype': 'bool'}), '(m2.data[...], dtype=bool)\n', (2389, 2415), True, 'import numpy as np\n'), ((2421, 2473), 'numpy.testing.assert_allclose', 'assert_allclose', (['m.data[...][msk]', 'm2.data[...][msk]'], {}), '(m.data[...][msk], m2.data[...][msk])\n', (2436, 2473), False, 'from numpy.testing import assert_allclose\n'), ((2478, 2530), 'numpy.testing.assert_allclose', 'assert_allclose', (['m.data[...][msk]', 'm4.data[...][msk]'], {}), '(m.data[...][msk], m4.data[...][msk])\n', (2493, 2530), False, 'from numpy.testing import assert_allclose\n'), ((2575, 2594), 'gammapy.maps.HpxNDMap.read', 'HpxNDMap.read', (['path'], {}), '(path)\n', (2588, 2594), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((2604, 2637), 'gammapy.maps.HpxMap.read', 'HpxMap.read', (['path'], {'map_type': '"""hpx"""'}), "(path, map_type='hpx')\n", (2615, 2637), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((2647, 2677), 'gammapy.maps.Map.read', 'Map.read', (['path'], {'map_type': '"""hpx"""'}), "(path, map_type='hpx')\n", (2655, 2677), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((2682, 2734), 'numpy.testing.assert_allclose', 'assert_allclose', (['m.data[...][msk]', 'm2.data[...][msk]'], {}), '(m.data[...][msk], m2.data[...][msk])\n', (2697, 2734), False, 'from numpy.testing import assert_allclose\n'), ((2739, 2791), 'numpy.testing.assert_allclose', 'assert_allclose', (['m.data[...][msk]', 'm3.data[...][msk]'], {}), '(m.data[...][msk], m3.data[...][msk])\n', (2754, 2791), False, 'from numpy.testing import assert_allclose\n'), ((2796, 2848), 'numpy.testing.assert_allclose', 'assert_allclose', (['m.data[...][msk]', 'm4.data[...][msk]'], {}), '(m.data[...][msk], m4.data[...][msk])\n', (2811, 2848), False, 'from numpy.testing import assert_allclose\n'), ((2996, 3015), 'gammapy.maps.HpxNDMap.read', 'HpxNDMap.read', (['path'], {}), '(path)\n', (3009, 3015), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((3025, 3039), 'gammapy.maps.Map.read', 'Map.read', (['path'], {}), '(path)\n', (3033, 3039), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((3049, 3079), 'gammapy.maps.Map.read', 'Map.read', (['path'], {'map_type': '"""hpx"""'}), "(path, map_type='hpx')\n", (3057, 3079), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((3170, 3234), 'gammapy.maps.MapAxis.from_bounds', 'MapAxis.from_bounds', (['(100.0)', '(1000.0)', '(4)'], {'name': '"""energy"""', 'unit': '"""MeV"""'}), "(100.0, 1000.0, 4, name='energy', unit='MeV')\n", (3189, 3234), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((3633, 3647), 'gammapy.maps.Map.read', 'Map.read', (['path'], {}), '(path)\n', (3641, 3647), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((3996, 4010), 'gammapy.maps.Map.read', 'Map.read', (['path'], {}), '(path)\n', (4004, 4010), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((4076, 4149), 'gammapy.maps.Map.read', 'Map.read', (['"""$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz"""'], {}), "('$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz')\n", (4084, 4149), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((5143, 5205), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['coords[0]', 'coords[1]'], {'unit': '"""deg"""', 'frame': 'm.geom.frame'}), "(coords[0], coords[1], unit='deg', frame=m.geom.frame)\n", (5151, 5205), False, 'from astropy.coordinates import SkyCoord\n'), ((6140, 6167), 'gammapy.maps.HpxGeom', 'HpxGeom', ([], {'nside': '(1)', 'axes': '[ax]'}), '(nside=1, axes=[ax])\n', (6147, 6167), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((6176, 6195), 'gammapy.maps.HpxNDMap', 'HpxNDMap', ([], {'geom': 'geom'}), '(geom=geom)\n', (6184, 6195), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((6430, 6465), 'numpy.testing.assert_allclose', 'assert_allclose', (['val', '(42)'], {'rtol': '(0.01)'}), '(val, 42, rtol=0.01)\n', (6445, 6465), False, 'from numpy.testing import assert_allclose\n'), ((10559, 10584), 'gammapy.maps.HpxGeom.create', 'HpxGeom.create', ([], {'nside': '(128)'}), '(nside=128)\n', (10573, 10584), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((10594, 10619), 'gammapy.maps.HpxNDMap', 'HpxNDMap', (['geom'], {'unit': '"""m2"""'}), "(geom, unit='m2')\n", (10602, 10619), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((10629, 10655), 'gammapy.maps.HpxNDMap', 'HpxNDMap', (['geom'], {'unit': '"""cm2"""'}), "(geom, unit='cm2')\n", (10637, 10655), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((10787, 10819), 'numpy.testing.assert_allclose', 'assert_allclose', (['m1.data', '(0.0001)'], {}), '(m1.data, 0.0001)\n', (10802, 10819), False, 'from numpy.testing import assert_allclose\n'), ((10942, 10974), 'numpy.testing.assert_allclose', 'assert_allclose', (['m1.data', '(1.0001)'], {}), '(m1.data, 1.0001)\n', (10957, 10974), False, 'from numpy.testing import assert_allclose\n'), ((11037, 11062), 'gammapy.maps.HpxNDMap.create', 'HpxNDMap.create', ([], {'binsz': '(10)'}), '(binsz=10)\n', (11052, 11062), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((11176, 11213), 'gammapy.maps.MapAxis', 'MapAxis', (['[0, 1, 2]'], {'node_type': '"""edges"""'}), "([0, 1, 2], node_type='edges')\n", (11183, 11213), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((11222, 11278), 'gammapy.maps.HpxNDMap.create', 'HpxNDMap.create', ([], {'binsz': '(0.1 * u.deg)', 'width': '(1)', 'axes': '[axis]'}), '(binsz=0.1 * u.deg, width=1, axes=[axis])\n', (11237, 11278), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((11395, 11420), 'gammapy.maps.HpxNDMap.create', 'HpxNDMap.create', ([], {'binsz': '(10)'}), '(binsz=10)\n', (11410, 11420), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((11528, 11578), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[1, 2, 3, 4, 5]'], {'name': '"""test-1"""'}), "([1, 2, 3, 4, 5], name='test-1')\n", (11546, 11578), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((11592, 11639), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[1, 2, 3, 4]'], {'name': '"""test-2"""'}), "([1, 2, 3, 4], name='test-2')\n", (11610, 11639), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((11652, 11699), 'gammapy.maps.HpxGeom.create', 'HpxGeom.create', ([], {'nside': '(16)', 'axes': '[axis_1, axis_2]'}), '(nside=16, axes=[axis_1, axis_2])\n', (11666, 11699), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((11708, 11733), 'gammapy.maps.HpxNDMap', 'HpxNDMap', (['geom'], {'unit': '"""m2"""'}), "(geom, unit='m2')\n", (11716, 11733), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((11766, 11810), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[2, 3, 5]'], {'name': '"""test-1"""'}), "([2, 3, 5], name='test-1')\n", (11784, 11810), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((11896, 11937), 'numpy.testing.assert_allclose', 'assert_allclose', (['m2.data[0, :, 0]', '[1, 2]'], {}), '(m2.data[0, :, 0], [1, 2])\n', (11911, 11937), False, 'from numpy.testing import assert_allclose\n'), ((11994, 12037), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[1.7, 4]'], {'name': '"""test-1"""'}), "([1.7, 4], name='test-1')\n", (12012, 12037), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((12123, 12150), 'numpy.testing.assert_allclose', 'assert_allclose', (['m3.data', '(2)'], {}), '(m3.data, 2)\n', (12138, 12150), False, 'from numpy.testing import assert_allclose\n'), ((438, 462), 'numpy.logspace', 'np.logspace', (['(0.0)', '(3.0)', '(3)'], {}), '(0.0, 3.0, 3)\n', (449, 462), True, 'import numpy as np\n'), ((1200, 1272), 'gammapy.maps.HpxGeom', 'HpxGeom', ([], {'nside': 'nside', 'nest': 'nested', 'frame': 'frame', 'region': 'region', 'axes': 'axes'}), '(nside=nside, nest=nested, frame=frame, region=region, axes=axes)\n', (1207, 1272), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((3378, 3407), 'astropy.io.fits.open', 'fits.open', (['path'], {'memmap': '(False)'}), '(path, memmap=False)\n', (3387, 3407), False, 'from astropy.io import fits\n'), ((3738, 3767), 'astropy.io.fits.open', 'fits.open', (['path'], {'memmap': '(False)'}), '(path, memmap=False)\n', (3747, 3767), False, 'from astropy.io import fits\n'), ((5751, 5823), 'gammapy.maps.HpxGeom', 'HpxGeom', ([], {'nside': 'nside', 'nest': 'nested', 'frame': 'frame', 'region': 'region', 'axes': 'axes'}), '(nside=nside, nest=nested, frame=frame, region=region, axes=axes)\n', (5758, 5823), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((6062, 6086), 'numpy.logspace', 'np.logspace', (['(0.0)', '(3.0)', '(3)'], {}), '(0.0, 3.0, 3)\n', (6073, 6086), True, 'import numpy as np\n'), ((7036, 7108), 'gammapy.maps.HpxGeom', 'HpxGeom', ([], {'nside': 'nside', 'nest': 'nested', 'frame': 'frame', 'region': 'region', 'axes': 'axes'}), '(nside=nside, nest=nested, frame=frame, region=region, axes=axes)\n', (7043, 7108), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((7418, 7490), 'gammapy.maps.HpxGeom', 'HpxGeom', ([], {'nside': 'nside', 'nest': 'nested', 'frame': 'frame', 'region': 'region', 'axes': 'axes'}), '(nside=nside, nest=nested, frame=frame, region=region, axes=axes)\n', (7425, 7490), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((7873, 7945), 'gammapy.maps.HpxGeom', 'HpxGeom', ([], {'nside': 'nside', 'nest': 'nested', 'frame': 'frame', 'region': 'region', 'axes': 'axes'}), '(nside=nside, nest=nested, frame=frame, region=region, axes=axes)\n', (7880, 7945), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((8165, 8237), 'gammapy.maps.HpxGeom', 'HpxGeom', ([], {'nside': 'nside', 'nest': 'nested', 'frame': 'frame', 'region': 'region', 'axes': 'axes'}), '(nside=nside, nest=nested, frame=frame, region=region, axes=axes)\n', (8172, 8237), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((8689, 8715), 'numpy.ones_like', 'np.ones_like', (['coords_in[0]'], {}), '(coords_in[0])\n', (8701, 8715), True, 'import numpy as np\n'), ((8909, 8981), 'gammapy.maps.HpxGeom', 'HpxGeom', ([], {'nside': 'nside', 'nest': 'nested', 'frame': 'frame', 'region': 'region', 'axes': 'axes'}), '(nside=nside, nest=nested, frame=frame, region=region, axes=axes)\n', (8916, 8981), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((9181, 9253), 'gammapy.maps.HpxGeom', 'HpxGeom', ([], {'nside': 'nside', 'nest': 'nested', 'frame': 'frame', 'region': 'region', 'axes': 'axes'}), '(nside=nside, nest=nested, frame=frame, region=region, axes=axes)\n', (9188, 9253), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((9396, 9413), 'numpy.nansum', 'np.nansum', (['m.data'], {}), '(m.data)\n', (9405, 9413), True, 'import numpy as np\n'), ((9415, 9435), 'numpy.nansum', 'np.nansum', (['m_up.data'], {}), '(m_up.data)\n', (9424, 9435), True, 'import numpy as np\n'), ((9530, 9550), 'numpy.nansum', 'np.nansum', (['m_up.data'], {}), '(m_up.data)\n', (9539, 9550), True, 'import numpy as np\n'), ((9764, 9836), 'gammapy.maps.HpxGeom', 'HpxGeom', ([], {'nside': 'nside', 'nest': 'nested', 'frame': 'frame', 'region': 'region', 'axes': 'axes'}), '(nside=nside, nest=nested, frame=frame, region=region, axes=axes)\n', (9771, 9836), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((9983, 10000), 'numpy.nansum', 'np.nansum', (['m.data'], {}), '(m.data)\n', (9992, 10000), True, 'import numpy as np\n'), ((10002, 10024), 'numpy.nansum', 'np.nansum', (['m_down.data'], {}), '(m_down.data)\n', (10011, 10024), True, 'import numpy as np\n'), ((10243, 10315), 'gammapy.maps.HpxGeom', 'HpxGeom', ([], {'nside': 'nside', 'nest': 'nested', 'frame': 'frame', 'region': 'region', 'axes': 'axes'}), '(nside=nside, nest=nested, frame=frame, region=region, axes=axes)\n', (10250, 10315), False, 'from gammapy.maps import HpxGeom, HpxMap, HpxNDMap, Map, MapAxis\n'), ((10708, 10728), 'numpy.ones_like', 'np.ones_like', (['idx[0]'], {}), '(idx[0])\n', (10720, 10728), True, 'import numpy as np\n'), ((10846, 10866), 'numpy.ones_like', 'np.ones_like', (['idx[0]'], {}), '(idx[0])\n', (10858, 10866), True, 'import numpy as np\n'), ((11072, 11088), 'gammapy.utils.testing.mpl_plot_check', 'mpl_plot_check', ([], {}), '()\n', (11086, 11088), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((11288, 11304), 'gammapy.utils.testing.mpl_plot_check', 'mpl_plot_check', ([], {}), '()\n', (11302, 11304), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((11430, 11446), 'gammapy.utils.testing.mpl_plot_check', 'mpl_plot_check', ([], {}), '()\n', (11444, 11446), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((1529, 1546), 'numpy.max', 'np.max', (['geom.npix'], {}), '(geom.npix)\n', (1535, 1546), True, 'import numpy as np\n'), ((7510, 7532), 'numpy.arange', 'np.arange', (['m.data.size'], {}), '(m.data.size)\n', (7519, 7532), True, 'import numpy as np\n'), ((8557, 8584), 'numpy.ones_like', 'np.ones_like', (['coords_out[0]'], {}), '(coords_out[0])\n', (8569, 8584), True, 'import numpy as np\n'), ((9511, 9528), 'numpy.nansum', 'np.nansum', (['m.data'], {}), '(m.data)\n', (9520, 9528), True, 'import numpy as np\n'), ((10482, 10499), 'numpy.nansum', 'np.nansum', (['m.data'], {}), '(m.data)\n', (10491, 10499), True, 'import numpy as np\n'), ((10501, 10521), 'numpy.nansum', 'np.nansum', (['msum.data'], {}), '(msum.data)\n', (10510, 10521), True, 'import numpy as np\n'), ((786, 810), 'numpy.logspace', 'np.logspace', (['(0.0)', '(3.0)', '(4)'], {}), '(0.0, 3.0, 4)\n', (797, 810), True, 'import numpy as np\n'), ((927, 951), 'numpy.logspace', 'np.logspace', (['(0.0)', '(3.0)', '(4)'], {}), '(0.0, 3.0, 4)\n', (938, 951), True, 'import numpy as np\n'), ((988, 1012), 'numpy.logspace', 'np.logspace', (['(0.0)', '(2.0)', '(3)'], {}), '(0.0, 2.0, 3)\n', (999, 1012), True, 'import numpy as np\n')] |
from config import get_config
from Learner import face_learner
import argparse
import random
import glob
import os
import json
import gen_params, transforms
from data.online_dataset import OnlineFontDataset
from data.hook_dataloader import HookDataLoader
from pathlib import Path
# python train.py -net mobilefacenet -b 200 -w 4
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='for common image metric learning')
parser.add_argument("-e", "--epochs", help="training epochs", default=20, type=int)
parser.add_argument("-net", "--net_mode", help="which network, [ir, ir_se, mobilefacenet]", default='ir_se',
type=str)
parser.add_argument("--embedding_size", help="embedding_size", default=512, type=int)
parser.add_argument("-depth", "--net_depth", help="how many layers [50,100,152]", default=50, type=int)
parser.add_argument('-lr', '--lr', help='learning rate', default=1e-3, type=float)
parser.add_argument("-b", "--batch_size", help="batch_size", default=96, type=int)
parser.add_argument("-w", "--num_workers", help="workers number", default=3, type=int)
parser.add_argument("-d", "--data_mode", help="use which database, [vgg, ms1m, emore, concat]", default='common',
type=str)
parser.add_argument("--max_positive_cnt", default=1000, type=int)
parser.add_argument('--pin_memory', default=False, action="store_true")
parser.add_argument("--val_batch_size", default=256, type=int)
parser.add_argument("--val_pin_memory", default=False, action='store_true')
parser.add_argument("--not_use_pos", default=False, action='store_true')
parser.add_argument("--not_use_neg", default=False, action='store_true')
parser.add_argument('--work_path', type=str, default=None, required=False)
parser.add_argument('--seed', type=int, default=None)
parser.add_argument('--benchmark', default=False, action="store_true")
parser.add_argument('--font_dir', type=str, default=None, required=False)
parser.add_argument('-f', '--font_list', type=str, default='./db/train_font_list.json', required=False)
parser.add_argument('--bg_dir', type=str, default='resource/train_bg', required=False)
parser.add_argument('--val_img_dirs', type=str,
default='{"mix":"","kr":"","eng":"","num":""}',
required=False)
parser.add_argument('--train_dataset_param_func', type=str, default='get_train_params', # get_params_noraml
required=False) # func or gen_param.json
parser.add_argument('--train_transform_func_name', type=str, default='get_train_transforms',
# get_train_transforms_normal
required=False) # func or gen_param.json
parser.add_argument('--val_transform_func_name', type=str, default='get_test_transforms',
# get_train_transforms_normal
required=False) # func or gen_param.json
parser.add_argument('--num_sample_each_class', type=int, default=1000)
parser.add_argument('--min_num_chars', type=int, default=1)
parser.add_argument('--max_num_chars', type=int, default=10)
parser.add_argument('--input_size', type=int, default=112)
parser.add_argument('--use_random_crop', default=False, action="store_true")
parser.add_argument('--use_gray', default=False, action="store_true")
parser.add_argument('--use_same_random_crop_in_batch', default=False, action="store_true")
parser.add_argument('--same_text_in_batch_prob', default=1., type=float)
parser.add_argument('--same_font_size_in_batch_prob', default=1., type=float)
parser.add_argument('--same_text_params_in_batch_prob', default=1., type=float)
parser.add_argument('--use_text_persp_trans_prob', default=0.1, type=float)
parser.add_argument('--use_img_persp_trans_prob', default=0.3, type=float)
parser.add_argument('--han_unicode_file', type=str, default="db/union_korean_unicodes.json")
parser.add_argument('--eng_unicode_file', type=str, default="db/eng_unicodes.json")
parser.add_argument('--num_unicode_file', type=str, default="db/number_unicodes.json")
parser.add_argument('--han_prob', type=float, default=0.4)
parser.add_argument('--eng_prob', type=float, default=0.3)
parser.add_argument('--num_prob', type=float, default=0.3)
parser.add_argument('--mix_prob', type=float, default=0.5)
parser.add_argument('--simple_img_prob', type=float, default=0.3)
parser.add_argument('--font_size_range', type=str, default='10,220')
parser.add_argument('--dataset_debug', default=False, action="store_true")
parser.add_argument('--only_use_pixel_transform', default=False, action="store_true")
parser.add_argument('--use_blur', default=False, action="store_true")
parser.add_argument('--use_flip', default=False, action="store_true")
parser.add_argument('--optimizer', default='sgd', type=str)
parser.add_argument('--pooling', default='GeM', type=str)
parser.add_argument('--last_fc_dropout', type=float, default=0.0)
parser.add_argument('--pretrained', default=False, action="store_true")
parser.add_argument('--loss_module', default='arcface', type=str)
parser.add_argument('--s', type=float, default=30.0)
parser.add_argument('--margin', type=float, default=0.3)
parser.add_argument('--ls_eps', type=float, default=0.0)
parser.add_argument('--theta_zero', type=float, default=1.25)
parser.add_argument('--wd', type=float, default=1e-5)
parser.add_argument('--restore_suffix', default=None, type=str)
parser.add_argument('--train', default=False, action="store_true")
parser.add_argument('--ft_model_path', default=None, type=str)
parser.add_argument('--no_strict', default=False, action="store_true")
args = parser.parse_args()
conf = get_config()
for arg in vars(args):
print(arg, getattr(args, arg))
setattr(conf, arg, getattr(args, arg))
conf.work_path = Path(conf.work_path)
conf.model_path = conf.work_path / 'models'
conf.log_path = conf.work_path / 'log'
conf.save_path = conf.work_path / 'save'
if args.net_mode == 'mobilefacenet':
conf.use_mobilfacenet = True
else:
conf.net_mode = args.net_mode
conf.net_depth = args.net_depth
if args.seed is not None:
import numpy as np
import torch
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if args.benchmark:
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
cudnn.deterministic = True
bg_list = glob.glob(os.path.join(args.bg_dir, "*"))
generation_params = getattr(gen_params, args.train_dataset_param_func)()
if hasattr(transforms, args.train_transform_func_name):
train_transform_func = getattr(transforms, args.train_transform_func_name)
train_transforms = train_transform_func(args.input_size, args.use_random_crop, args.use_gray,
use_same_random_crop_in_batch=args.use_same_random_crop_in_batch,
only_use_pixel_transform=args.only_use_pixel_transform,
use_flip=args.use_flip, use_blur=args.use_blur
)
else:
train_transforms = transforms.get_simple_transforms(input_size=args.input_size,
use_random_crop=args.use_random_crop,
use_same_random_crop_in_batch=args.use_same_random_crop_in_batch,
use_gray=args.use_gray)
han_unicodes = json.load(open(args.han_unicode_file))
eng_unicodes = json.load(open(args.eng_unicode_file))
num_unicodes = json.load(open(args.num_unicode_file))
font_size_range = args.font_size_range.split(",")
font_size_range = list(range(int(font_size_range[0]), int(font_size_range[1]) + 1))
font_list = [os.path.join(args.font_dir, font_name) for font_name in json.load(open(args.font_list))]
font_list.sort()
num_classes = len(font_list)
conf.num_classes = num_classes
dataset = OnlineFontDataset(font_list, transform=train_transforms, generation_params=generation_params,
bg_list=bg_list,
num_sample_each_class=args.num_sample_each_class,
min_chars=args.min_num_chars, max_chars=args.max_num_chars,
hangul_unicodes=han_unicodes, eng_unicodes=eng_unicodes,
number_unicodes=num_unicodes,
hangul_prob=args.han_prob, eng_prob=args.eng_prob,
num_prob=args.num_prob, mix_prob=args.mix_prob,
simple_img_prob=args.simple_img_prob,
font_size_range=font_size_range,
same_text_in_batch_prob=args.same_text_in_batch_prob,
same_font_size_in_batch_prob=args.same_font_size_in_batch_prob,
same_text_params_in_batch_prob=args.same_text_params_in_batch_prob,
use_text_persp_trans_prob=args.use_text_persp_trans_prob,
use_img_persp_trans_prob=args.use_img_persp_trans_prob,
skip_exception=True,
input_size=args.input_size,
use_same_random_crop_in_batch=args.use_same_random_crop_in_batch,
use_debug=args.dataset_debug
)
train_loader = HookDataLoader(dataset, num_workers=args.num_workers,
pin_memory=args.pin_memory,
batch_size=args.batch_size)
val_transform_func = getattr(transforms, args.val_transform_func_name)
val_transforms = val_transform_func(input_size=args.input_size, use_gray=args.use_gray)
learner = face_learner(conf, val_transforms=val_transforms, train_loader=train_loader)
learner.train_font(conf, args.epochs)
| [
"data.online_dataset.OnlineFontDataset",
"numpy.random.seed",
"argparse.ArgumentParser",
"transforms.get_simple_transforms",
"Learner.face_learner",
"torch.manual_seed",
"data.hook_dataloader.HookDataLoader",
"torch.cuda.manual_seed",
"pathlib.Path",
"random.seed",
"config.get_config",
"os.pat... | [((371, 442), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""for common image metric learning"""'}), "(description='for common image metric learning')\n", (394, 442), False, 'import argparse\n'), ((5896, 5908), 'config.get_config', 'get_config', ([], {}), '()\n', (5906, 5908), False, 'from config import get_config\n'), ((6045, 6065), 'pathlib.Path', 'Path', (['conf.work_path'], {}), '(conf.work_path)\n', (6049, 6065), False, 'from pathlib import Path\n'), ((8407, 9387), 'data.online_dataset.OnlineFontDataset', 'OnlineFontDataset', (['font_list'], {'transform': 'train_transforms', 'generation_params': 'generation_params', 'bg_list': 'bg_list', 'num_sample_each_class': 'args.num_sample_each_class', 'min_chars': 'args.min_num_chars', 'max_chars': 'args.max_num_chars', 'hangul_unicodes': 'han_unicodes', 'eng_unicodes': 'eng_unicodes', 'number_unicodes': 'num_unicodes', 'hangul_prob': 'args.han_prob', 'eng_prob': 'args.eng_prob', 'num_prob': 'args.num_prob', 'mix_prob': 'args.mix_prob', 'simple_img_prob': 'args.simple_img_prob', 'font_size_range': 'font_size_range', 'same_text_in_batch_prob': 'args.same_text_in_batch_prob', 'same_font_size_in_batch_prob': 'args.same_font_size_in_batch_prob', 'same_text_params_in_batch_prob': 'args.same_text_params_in_batch_prob', 'use_text_persp_trans_prob': 'args.use_text_persp_trans_prob', 'use_img_persp_trans_prob': 'args.use_img_persp_trans_prob', 'skip_exception': '(True)', 'input_size': 'args.input_size', 'use_same_random_crop_in_batch': 'args.use_same_random_crop_in_batch', 'use_debug': 'args.dataset_debug'}), '(font_list, transform=train_transforms, generation_params=\n generation_params, bg_list=bg_list, num_sample_each_class=args.\n num_sample_each_class, min_chars=args.min_num_chars, max_chars=args.\n max_num_chars, hangul_unicodes=han_unicodes, eng_unicodes=eng_unicodes,\n number_unicodes=num_unicodes, hangul_prob=args.han_prob, eng_prob=args.\n eng_prob, num_prob=args.num_prob, mix_prob=args.mix_prob,\n simple_img_prob=args.simple_img_prob, font_size_range=font_size_range,\n same_text_in_batch_prob=args.same_text_in_batch_prob,\n same_font_size_in_batch_prob=args.same_font_size_in_batch_prob,\n same_text_params_in_batch_prob=args.same_text_params_in_batch_prob,\n use_text_persp_trans_prob=args.use_text_persp_trans_prob,\n use_img_persp_trans_prob=args.use_img_persp_trans_prob, skip_exception=\n True, input_size=args.input_size, use_same_random_crop_in_batch=args.\n use_same_random_crop_in_batch, use_debug=args.dataset_debug)\n', (8424, 9387), False, 'from data.online_dataset import OnlineFontDataset\n'), ((9959, 10073), 'data.hook_dataloader.HookDataLoader', 'HookDataLoader', (['dataset'], {'num_workers': 'args.num_workers', 'pin_memory': 'args.pin_memory', 'batch_size': 'args.batch_size'}), '(dataset, num_workers=args.num_workers, pin_memory=args.\n pin_memory, batch_size=args.batch_size)\n', (9973, 10073), False, 'from data.hook_dataloader import HookDataLoader\n'), ((10319, 10395), 'Learner.face_learner', 'face_learner', (['conf'], {'val_transforms': 'val_transforms', 'train_loader': 'train_loader'}), '(conf, val_transforms=val_transforms, train_loader=train_loader)\n', (10331, 10395), False, 'from Learner import face_learner\n'), ((6457, 6479), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (6468, 6479), False, 'import random\n'), ((6488, 6513), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (6502, 6513), True, 'import numpy as np\n'), ((6522, 6550), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (6539, 6550), False, 'import torch\n'), ((6559, 6592), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (6581, 6592), False, 'import torch\n'), ((6754, 6784), 'os.path.join', 'os.path.join', (['args.bg_dir', '"""*"""'], {}), "(args.bg_dir, '*')\n", (6766, 6784), False, 'import os\n'), ((7510, 7707), 'transforms.get_simple_transforms', 'transforms.get_simple_transforms', ([], {'input_size': 'args.input_size', 'use_random_crop': 'args.use_random_crop', 'use_same_random_crop_in_batch': 'args.use_same_random_crop_in_batch', 'use_gray': 'args.use_gray'}), '(input_size=args.input_size,\n use_random_crop=args.use_random_crop, use_same_random_crop_in_batch=\n args.use_same_random_crop_in_batch, use_gray=args.use_gray)\n', (7542, 7707), False, 'import gen_params, transforms\n'), ((8214, 8252), 'os.path.join', 'os.path.join', (['args.font_dir', 'font_name'], {}), '(args.font_dir, font_name)\n', (8226, 8252), False, 'import os\n')] |
import numpy as np
# https://stats.stackexchange.com/questions/179835/how-to-build-a-confusion-matrix-for-a-multiclass-classifier
def calculate_miou(confusion_matrix):
MIoU = np.divide(np.diag(confusion_matrix), (
np.sum(confusion_matrix, axis=1) + np.sum(confusion_matrix, axis=0) -
np.diag(confusion_matrix)))
MIoU = np.nanmean(MIoU)
return MIoU
class Evaluator(object):
def __init__(self, num_class):
np.seterr(divide='ignore', invalid='ignore')
self.num_class = num_class
self.confusion_matrix = np.zeros((self.num_class,) * 2)
def Pixel_Accuracy(self):
return np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()
def Pixel_Accuracy_Class(self):
Acc = np.divide(np.diag(self.confusion_matrix), self.confusion_matrix.sum(axis=1))
Acc = np.nanmean(Acc)
return Acc
def Mean_Intersection_over_Union(self):
MIoU = np.divide(np.diag(self.confusion_matrix), (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix)))
MIoU = np.nanmean(MIoU)
return MIoU
def Mean_Intersection_over_Union_20(self):
MIoU = 0
if self.num_class > 20:
subset_20 = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 23, 27, 32, 33, 35, 38])
confusion_matrix = self.confusion_matrix[subset_20[:, None], subset_20]
MIoU = np.divide(np.diag(confusion_matrix), (
np.sum(confusion_matrix, axis=1) + np.sum(confusion_matrix, axis=0) -
np.diag(confusion_matrix)))
MIoU = np.nanmean(MIoU)
return MIoU
def Frequency_Weighted_Intersection_over_Union(self):
freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)
iu = np.divide(np.diag(self.confusion_matrix), (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix)))
FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
return FWIoU
def _generate_matrix(self, gt_image, pre_image):
mask = (gt_image >= 0) & (gt_image < self.num_class)
label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]
count = np.bincount(label, minlength=self.num_class**2)
confusion_matrix = count.reshape(self.num_class, self.num_class)
return confusion_matrix
def add_batch(self, gt_image, pre_image, return_miou=False):
assert gt_image.shape == pre_image.shape
confusion_matrix = self._generate_matrix(gt_image, pre_image)
self.confusion_matrix += confusion_matrix
if return_miou:
return calculate_miou(confusion_matrix)
def reset(self):
self.confusion_matrix = np.zeros((self.num_class,) * 2)
def dump_matrix(self, path):
np.save(path, self.confusion_matrix)
| [
"numpy.save",
"numpy.sum",
"numpy.seterr",
"numpy.zeros",
"numpy.array",
"numpy.diag",
"numpy.bincount",
"numpy.nanmean"
] | [((345, 361), 'numpy.nanmean', 'np.nanmean', (['MIoU'], {}), '(MIoU)\n', (355, 361), True, 'import numpy as np\n'), ((191, 216), 'numpy.diag', 'np.diag', (['confusion_matrix'], {}), '(confusion_matrix)\n', (198, 216), True, 'import numpy as np\n'), ((448, 492), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (457, 492), True, 'import numpy as np\n'), ((560, 591), 'numpy.zeros', 'np.zeros', (['((self.num_class,) * 2)'], {}), '((self.num_class,) * 2)\n', (568, 591), True, 'import numpy as np\n'), ((847, 862), 'numpy.nanmean', 'np.nanmean', (['Acc'], {}), '(Acc)\n', (857, 862), True, 'import numpy as np\n'), ((1138, 1154), 'numpy.nanmean', 'np.nanmean', (['MIoU'], {}), '(MIoU)\n', (1148, 1154), True, 'import numpy as np\n'), ((2330, 2379), 'numpy.bincount', 'np.bincount', (['label'], {'minlength': '(self.num_class ** 2)'}), '(label, minlength=self.num_class ** 2)\n', (2341, 2379), True, 'import numpy as np\n'), ((2848, 2879), 'numpy.zeros', 'np.zeros', (['((self.num_class,) * 2)'], {}), '((self.num_class,) * 2)\n', (2856, 2879), True, 'import numpy as np\n'), ((2922, 2958), 'numpy.save', 'np.save', (['path', 'self.confusion_matrix'], {}), '(path, self.confusion_matrix)\n', (2929, 2958), True, 'import numpy as np\n'), ((306, 331), 'numpy.diag', 'np.diag', (['confusion_matrix'], {}), '(confusion_matrix)\n', (313, 331), True, 'import numpy as np\n'), ((766, 796), 'numpy.diag', 'np.diag', (['self.confusion_matrix'], {}), '(self.confusion_matrix)\n', (773, 796), True, 'import numpy as np\n'), ((952, 982), 'numpy.diag', 'np.diag', (['self.confusion_matrix'], {}), '(self.confusion_matrix)\n', (959, 982), True, 'import numpy as np\n'), ((1296, 1381), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 23, 27, 32, 33, 35, 38]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 23, 27, 32, 33, 35, 38]\n )\n', (1304, 1381), True, 'import numpy as np\n'), ((1668, 1684), 'numpy.nanmean', 'np.nanmean', (['MIoU'], {}), '(MIoU)\n', (1678, 1684), True, 'import numpy as np\n'), ((1779, 1816), 'numpy.sum', 'np.sum', (['self.confusion_matrix'], {'axis': '(1)'}), '(self.confusion_matrix, axis=1)\n', (1785, 1816), True, 'import numpy as np\n'), ((1819, 1848), 'numpy.sum', 'np.sum', (['self.confusion_matrix'], {}), '(self.confusion_matrix)\n', (1825, 1848), True, 'import numpy as np\n'), ((1872, 1902), 'numpy.diag', 'np.diag', (['self.confusion_matrix'], {}), '(self.confusion_matrix)\n', (1879, 1902), True, 'import numpy as np\n'), ((228, 260), 'numpy.sum', 'np.sum', (['confusion_matrix'], {'axis': '(1)'}), '(confusion_matrix, axis=1)\n', (234, 260), True, 'import numpy as np\n'), ((263, 295), 'numpy.sum', 'np.sum', (['confusion_matrix'], {'axis': '(0)'}), '(confusion_matrix, axis=0)\n', (269, 295), True, 'import numpy as np\n'), ((1090, 1120), 'numpy.diag', 'np.diag', (['self.confusion_matrix'], {}), '(self.confusion_matrix)\n', (1097, 1120), True, 'import numpy as np\n'), ((1490, 1515), 'numpy.diag', 'np.diag', (['confusion_matrix'], {}), '(confusion_matrix)\n', (1497, 1515), True, 'import numpy as np\n'), ((2010, 2040), 'numpy.diag', 'np.diag', (['self.confusion_matrix'], {}), '(self.confusion_matrix)\n', (2017, 2040), True, 'import numpy as np\n'), ((638, 668), 'numpy.diag', 'np.diag', (['self.confusion_matrix'], {}), '(self.confusion_matrix)\n', (645, 668), True, 'import numpy as np\n'), ((998, 1035), 'numpy.sum', 'np.sum', (['self.confusion_matrix'], {'axis': '(1)'}), '(self.confusion_matrix, axis=1)\n', (1004, 1035), True, 'import numpy as np\n'), ((1038, 1075), 'numpy.sum', 'np.sum', (['self.confusion_matrix'], {'axis': '(0)'}), '(self.confusion_matrix, axis=0)\n', (1044, 1075), True, 'import numpy as np\n'), ((1621, 1646), 'numpy.diag', 'np.diag', (['confusion_matrix'], {}), '(confusion_matrix)\n', (1628, 1646), True, 'import numpy as np\n'), ((1918, 1955), 'numpy.sum', 'np.sum', (['self.confusion_matrix'], {'axis': '(1)'}), '(self.confusion_matrix, axis=1)\n', (1924, 1955), True, 'import numpy as np\n'), ((1958, 1995), 'numpy.sum', 'np.sum', (['self.confusion_matrix'], {'axis': '(0)'}), '(self.confusion_matrix, axis=0)\n', (1964, 1995), True, 'import numpy as np\n'), ((1535, 1567), 'numpy.sum', 'np.sum', (['confusion_matrix'], {'axis': '(1)'}), '(confusion_matrix, axis=1)\n', (1541, 1567), True, 'import numpy as np\n'), ((1570, 1602), 'numpy.sum', 'np.sum', (['confusion_matrix'], {'axis': '(0)'}), '(confusion_matrix, axis=0)\n', (1576, 1602), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
# Ref:
# J. Chem. Phys. 117, 7433
#
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.dft import rks
from pyscf.dft import numint
from pyscf.scf import cphf
from pyscf.grad import rks as rks_grad
from pyscf.grad import tdrhf
#
# Given Y = 0, TDDFT gradients (XAX+XBY+YBX+YAY)^1 turn to TDA gradients (XAX)^1
#
def grad_elec(td_grad, x_y, singlet=True, atmlst=None,
max_memory=2000, verbose=logger.INFO):
'''
Electronic part of TDA, TDDFT nuclear gradients
Args:
td_grad : grad.tdrhf.Gradients or grad.tdrks.Gradients object.
x_y : a two-element list of numpy arrays
TDDFT X and Y amplitudes. If Y is set to 0, this function computes
TDA energy gradients.
'''
log = logger.new_logger(td_grad, verbose)
time0 = time.clock(), time.time()
mol = td_grad.mol
mf = td_grad.base._scf
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
nao, nmo = mo_coeff.shape
nocc = (mo_occ>0).sum()
nvir = nmo - nocc
x, y = x_y
xpy = (x+y).reshape(nocc,nvir).T
xmy = (x-y).reshape(nocc,nvir).T
orbv = mo_coeff[:,nocc:]
orbo = mo_coeff[:,:nocc]
dvv = numpy.einsum('ai,bi->ab', xpy, xpy) + numpy.einsum('ai,bi->ab', xmy, xmy)
doo =-numpy.einsum('ai,aj->ij', xpy, xpy) - numpy.einsum('ai,aj->ij', xmy, xmy)
dmxpy = reduce(numpy.dot, (orbv, xpy, orbo.T))
dmxmy = reduce(numpy.dot, (orbv, xmy, orbo.T))
dmzoo = reduce(numpy.dot, (orbo, doo, orbo.T))
dmzoo+= reduce(numpy.dot, (orbv, dvv, orbv.T))
mem_now = lib.current_memory()[0]
max_memory = max(2000, td_grad.max_memory*.9-mem_now)
ni = mf._numint
ni.libxc.test_deriv_order(mf.xc, 3, raise_error=True)
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, mol.spin)
# dm0 = mf.make_rdm1(mo_coeff, mo_occ), but it is not used when computing
# fxc since rho0 is passed to fxc function.
dm0 = None
rho0, vxc, fxc = ni.cache_xc_kernel(mf.mol, mf.grids, mf.xc,
[mo_coeff]*2, [mo_occ*.5]*2, spin=1)
f1vo, f1oo, vxc1, k1ao = \
_contract_xc_kernel(td_grad, mf.xc, dmxpy,
dmzoo, True, True, singlet, max_memory)
if abs(hyb) > 1e-10:
dm = (dmzoo, dmxpy+dmxpy.T, dmxmy-dmxmy.T)
vj, vk = mf.get_jk(mol, dm, hermi=0)
vk *= hyb
if abs(omega) > 1e-10:
vk += mf.get_k(mol, dm, hermi=0, omega=omega) * (alpha-hyb)
veff0doo = vj[0] * 2 - vk[0] + f1oo[0] + k1ao[0] * 2
wvo = reduce(numpy.dot, (orbv.T, veff0doo, orbo)) * 2
if singlet:
veff = vj[1] * 2 - vk[1] + f1vo[0] * 2
else:
veff = -vk[1] + f1vo[0] * 2
veff0mop = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mop[nocc:,nocc:], xpy) * 2
veff = -vk[2]
veff0mom = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mom[:nocc,:nocc], xmy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mom[nocc:,nocc:], xmy) * 2
else:
vj = mf.get_j(mol, (dmzoo, dmxpy+dmxpy.T), hermi=1)
veff0doo = vj[0] * 2 + f1oo[0] + k1ao[0] * 2
wvo = reduce(numpy.dot, (orbv.T, veff0doo, orbo)) * 2
if singlet:
veff = vj[1] * 2 + f1vo[0] * 2
else:
veff = f1vo[0] * 2
veff0mop = reduce(numpy.dot, (mo_coeff.T, veff, mo_coeff))
wvo -= numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy) * 2
wvo += numpy.einsum('ac,ai->ci', veff0mop[nocc:,nocc:], xpy) * 2
veff0mom = numpy.zeros((nmo,nmo))
# set singlet=None, generate function for CPHF type response kernel
vresp = mf.gen_response(singlet=None, hermi=1)
def fvind(x):
dm = reduce(numpy.dot, (orbv, x.reshape(nvir,nocc)*2, orbo.T))
v1ao = vresp(dm+dm.T)
return reduce(numpy.dot, (orbv.T, v1ao, orbo)).ravel()
z1 = cphf.solve(fvind, mo_energy, mo_occ, wvo,
max_cycle=td_grad.cphf_max_cycle,
tol=td_grad.cphf_conv_tol)[0]
z1 = z1.reshape(nvir,nocc)
time1 = log.timer('Z-vector using CPHF solver', *time0)
z1ao = reduce(numpy.dot, (orbv, z1, orbo.T))
veff = vresp(z1ao+z1ao.T)
im0 = numpy.zeros((nmo,nmo))
im0[:nocc,:nocc] = reduce(numpy.dot, (orbo.T, veff0doo+veff, orbo))
im0[:nocc,:nocc]+= numpy.einsum('ak,ai->ki', veff0mop[nocc:,:nocc], xpy)
im0[:nocc,:nocc]+= numpy.einsum('ak,ai->ki', veff0mom[nocc:,:nocc], xmy)
im0[nocc:,nocc:] = numpy.einsum('ci,ai->ac', veff0mop[nocc:,:nocc], xpy)
im0[nocc:,nocc:]+= numpy.einsum('ci,ai->ac', veff0mom[nocc:,:nocc], xmy)
im0[nocc:,:nocc] = numpy.einsum('ki,ai->ak', veff0mop[:nocc,:nocc], xpy)*2
im0[nocc:,:nocc]+= numpy.einsum('ki,ai->ak', veff0mom[:nocc,:nocc], xmy)*2
zeta = lib.direct_sum('i+j->ij', mo_energy, mo_energy) * .5
zeta[nocc:,:nocc] = mo_energy[:nocc]
zeta[:nocc,nocc:] = mo_energy[nocc:]
dm1 = numpy.zeros((nmo,nmo))
dm1[:nocc,:nocc] = doo
dm1[nocc:,nocc:] = dvv
dm1[nocc:,:nocc] = z1
dm1[:nocc,:nocc] += numpy.eye(nocc)*2 # for ground state
im0 = reduce(numpy.dot, (mo_coeff, im0+zeta*dm1, mo_coeff.T))
# Initialize hcore_deriv with the underlying SCF object because some
# extensions (e.g. QM/MM, solvent) modifies the SCF object only.
mf_grad = td_grad.base._scf.nuc_grad_method()
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
dmz1doo = z1ao + dmzoo
oo0 = reduce(numpy.dot, (orbo, orbo.T))
if abs(hyb) > 1e-10:
dm = (oo0, dmz1doo+dmz1doo.T, dmxpy+dmxpy.T, dmxmy-dmxmy.T)
vj, vk = td_grad.get_jk(mol, dm)
vk *= hyb
if abs(omega) > 1e-10:
with mol.with_range_coulomb(omega):
vk += td_grad.get_k(mol, dm) * (alpha-hyb)
vj = vj.reshape(-1,3,nao,nao)
vk = vk.reshape(-1,3,nao,nao)
if singlet:
veff1 = vj * 2 - vk
else:
veff1 = numpy.vstack((vj[:2]*2-vk[:2], -vk[2:]))
else:
vj = td_grad.get_j(mol, (oo0, dmz1doo+dmz1doo.T, dmxpy+dmxpy.T))
vj = vj.reshape(-1,3,nao,nao)
veff1 = numpy.zeros((4,3,nao,nao))
if singlet:
veff1[:3] = vj * 2
else:
veff1[:2] = vj[:2] * 2
fxcz1 = _contract_xc_kernel(td_grad, mf.xc, z1ao, None,
False, False, True, max_memory)[0]
veff1[0] += vxc1[1:]
veff1[1] +=(f1oo[1:] + fxcz1[1:] + k1ao[1:]*2)*2 # *2 for dmz1doo+dmz1oo.T
veff1[2] += f1vo[1:] * 2
time1 = log.timer('2e AO integral derivatives', *time1)
if atmlst is None:
atmlst = range(mol.natm)
offsetdic = mol.offset_nr_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
# Ground state gradients
h1ao = hcore_deriv(ia)
h1ao[:,p0:p1] += veff1[0,:,p0:p1]
h1ao[:,:,p0:p1] += veff1[0,:,p0:p1].transpose(0,2,1)
# oo0*2 for doubly occupied orbitals
e1 = numpy.einsum('xpq,pq->x', h1ao, oo0) * 2
e1 += numpy.einsum('xpq,pq->x', h1ao, dmz1doo)
e1 -= numpy.einsum('xpq,pq->x', s1[:,p0:p1], im0[p0:p1])
e1 -= numpy.einsum('xqp,pq->x', s1[:,p0:p1], im0[:,p0:p1])
e1 += numpy.einsum('xij,ij->x', veff1[1,:,p0:p1], oo0[p0:p1])
e1 += numpy.einsum('xij,ij->x', veff1[2,:,p0:p1], dmxpy[p0:p1,:]) * 2
e1 += numpy.einsum('xij,ij->x', veff1[3,:,p0:p1], dmxmy[p0:p1,:]) * 2
e1 += numpy.einsum('xji,ij->x', veff1[2,:,p0:p1], dmxpy[:,p0:p1]) * 2
e1 -= numpy.einsum('xji,ij->x', veff1[3,:,p0:p1], dmxmy[:,p0:p1]) * 2
de[k] = e1
log.timer('TDDFT nuclear gradients', *time0)
return de
# dmvo, dmoo in AO-representation
# Note spin-trace is applied for fxc, kxc
#TODO: to include the response of grids
def _contract_xc_kernel(td_grad, xc_code, dmvo, dmoo=None, with_vxc=True,
with_kxc=True, singlet=True, max_memory=2000):
mol = td_grad.mol
mf = td_grad.base._scf
grids = mf.grids
ni = mf._numint
xctype = ni._xc_type(xc_code)
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
nao, nmo = mo_coeff.shape
shls_slice = (0, mol.nbas)
ao_loc = mol.ao_loc_nr()
# dmvo ~ reduce(numpy.dot, (orbv, Xai, orbo.T))
dmvo = (dmvo + dmvo.T) * .5 # because K_{ia,jb} == K_{ia,jb}
f1vo = numpy.zeros((4,nao,nao)) # 0th-order, d/dx, d/dy, d/dz
deriv = 2
if dmoo is not None:
f1oo = numpy.zeros((4,nao,nao))
else:
f1oo = None
if with_vxc:
v1ao = numpy.zeros((4,nao,nao))
else:
v1ao = None
if with_kxc:
k1ao = numpy.zeros((4,nao,nao))
deriv = 3
else:
k1ao = None
if xctype == 'LDA':
ao_deriv = 1
if singlet:
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho = ni.eval_rho2(mol, ao[0], mo_coeff, mo_occ, mask, 'LDA')
vxc, fxc, kxc = ni.eval_xc(xc_code, rho, 0, deriv=deriv)[1:]
wfxc = fxc[0] * weight * 2 # *2 for alpha+beta
rho1 = ni.eval_rho(mol, ao[0], dmvo, mask, 'LDA')
aow = numpy.einsum('pi,p,p->pi', ao[0], wfxc, rho1)
for k in range(4):
f1vo[k] += numint._dot_ao_ao(mol, ao[k], aow, mask, shls_slice, ao_loc)
if dmoo is not None:
rho2 = ni.eval_rho(mol, ao[0], dmoo, mask, 'LDA')
aow = numpy.einsum('pi,p,p->pi', ao[0], wfxc, rho2)
for k in range(4):
f1oo[k] += numint._dot_ao_ao(mol, ao[k], aow, mask, shls_slice, ao_loc)
if with_vxc:
aow = numpy.einsum('pi,p,p->pi', ao[0], vxc[0], weight)
for k in range(4):
v1ao[k] += numint._dot_ao_ao(mol, ao[k], aow, mask, shls_slice, ao_loc)
if with_kxc:
aow = numpy.einsum('pi,p,p,p->pi', ao[0], kxc[0], weight, rho1**2)
for k in range(4):
k1ao[k] += numint._dot_ao_ao(mol, ao[k], aow, mask, shls_slice, ao_loc)
vxc = fxc = kxc = aow = rho = rho1 = rho2 = None
if with_kxc: # for (rho1*2)^2, *2 for alpha+beta in singlet
k1ao *= 4
else:
raise NotImplementedError('LDA triplet')
elif xctype == 'GGA':
if singlet:
def gga_sum_(vmat, ao, wv, mask):
aow = numpy.einsum('pi,p->pi', ao[0], wv[0])
aow += numpy.einsum('npi,np->pi', ao[1:4], wv[1:])
tmp = numint._dot_ao_ao(mol, ao[0], aow, mask, shls_slice, ao_loc)
vmat[0] += tmp + tmp.T
rks_grad._gga_grad_sum_(vmat[1:], mol, ao, wv, mask, ao_loc)
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho = ni.eval_rho2(mol, ao, mo_coeff, mo_occ, mask, 'GGA')
vxc, fxc, kxc = ni.eval_xc(xc_code, rho, 0, deriv=deriv)[1:]
rho1 = ni.eval_rho(mol, ao, dmvo, mask, 'GGA') * 2 # *2 for alpha + beta
wv = numint._rks_gga_wv1(rho, rho1, vxc, fxc, weight)
gga_sum_(f1vo, ao, wv, mask)
if dmoo is not None:
rho2 = ni.eval_rho(mol, ao, dmoo, mask, 'GGA') * 2
wv = numint._rks_gga_wv1(rho, rho2, vxc, fxc, weight)
gga_sum_(f1oo, ao, wv, mask)
if with_vxc:
wv = numint._rks_gga_wv0(rho, vxc, weight)
gga_sum_(v1ao, ao, wv, mask)
if with_kxc:
wv = numint._rks_gga_wv2(rho, rho1, fxc, kxc, weight)
gga_sum_(k1ao, ao, wv, mask)
vxc = fxc = kxc = rho = rho1 = None
else:
raise NotImplementedError('GGA triplet')
else:
raise NotImplementedError('meta-GGA')
f1vo[1:] *= -1
if f1oo is not None: f1oo[1:] *= -1
if v1ao is not None: v1ao[1:] *= -1
if k1ao is not None: k1ao[1:] *= -1
return f1vo, f1oo, v1ao, k1ao
class Gradients(tdrhf.Gradients):
@lib.with_doc(grad_elec.__doc__)
def grad_elec(self, xy, singlet, atmlst=None):
return grad_elec(self, xy, singlet, atmlst, self.max_memory, self.verbose)
Grad = Gradients
from pyscf import tdscf
tdscf.rks.TDA.Gradients = tdscf.rks.TDDFT.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import gto
from pyscf import dft
from pyscf import tddft
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H' , (0. , 0. , 1.804)],
['F' , (0. , 0. , 0.)], ]
mol.unit = 'B'
mol.basis = '631g'
mol.build()
mf = dft.RKS(mol)
mf.xc = 'LDA,'
mf.grids.prune = False
mf.conv_tol = 1e-14
# mf.grids.level = 6
mf.scf()
td = tddft.TDDFT(mf)
td.nstates = 3
e, z = td.kernel()
tdg = td.Gradients()
g1 = tdg.kernel(state=3)
print(g1)
# [[ 0 0 -1.31315477e-01]
# [ 0 0 1.31319442e-01]]
td_solver = td.as_scanner()
e1 = td_solver(mol.set_geom_('H 0 0 1.805; F 0 0 0', unit='B'))
e2 = td_solver(mol.set_geom_('H 0 0 1.803; F 0 0 0', unit='B'))
print(abs((e1[2]-e2[2])/.002 - g1[0,2]).max())
mol.set_geom_('H 0 0 1.804; F 0 0 0', unit='B')
mf = dft.RKS(mol)
mf.xc = 'b3lyp'
mf._numint.libxc = dft.xcfun
mf.grids.prune = False
mf.conv_tol = 1e-14
mf.scf()
td = tddft.TDA(mf)
td.nstates = 3
e, z = td.kernel()
tdg = td.Gradients()
g1 = tdg.kernel(state=3)
print(g1)
# [[ 0 0 -1.21504524e-01]
# [ 0 0 1.21505341e-01]]
td_solver = td.as_scanner()
e1 = td_solver(mol.set_geom_('H 0 0 1.805; F 0 0 0', unit='B'))
e2 = td_solver(mol.set_geom_('H 0 0 1.803; F 0 0 0', unit='B'))
print(abs((e1[2]-e2[2])/.002 - g1[0,2]).max())
mol.set_geom_('H 0 0 1.804; F 0 0 0', unit='B')
mf = dft.RKS(mol)
mf.xc = 'lda,'
mf.conv_tol = 1e-14
mf.kernel()
td = tddft.TDA(mf)
td.nstates = 3
td.singlet = False
e, z = td.kernel()
tdg = Gradients(td)
g1 = tdg.kernel(state=2)
print(g1)
# [[ 0 0 -0.3633334]
# [ 0 0 0.3633334]]
td_solver = td.as_scanner()
e1 = td_solver(mol.set_geom_('H 0 0 1.805; F 0 0 0', unit='B'))
e2 = td_solver(mol.set_geom_('H 0 0 1.803; F 0 0 0', unit='B'))
print(abs((e1[2]-e2[2])/.002 - g1[0,2]).max())
mf = dft.RKS(mol)
mf.xc = 'b3lyp'
mf.conv_tol = 1e-14
mf.kernel()
td = tddft.TDA(mf)
td.nstates = 3
td.singlet = False
e, z = td.kernel()
tdg = td.Gradients()
g1 = tdg.kernel(state=2)
print(g1)
# [[ 0 0 -0.3633334]
# [ 0 0 0.3633334]]
td_solver = td.as_scanner()
e1 = td_solver(mol.set_geom_('H 0 0 1.805; F 0 0 0', unit='B'))
e2 = td_solver(mol.set_geom_('H 0 0 1.803; F 0 0 0', unit='B'))
print(abs((e1[2]-e2[2])/.002 - g1[0,2]).max())
| [
"numpy.einsum",
"pyscf.dft.numint._dot_ao_ao",
"pyscf.lib.current_memory",
"pyscf.dft.RKS",
"pyscf.dft.numint._rks_gga_wv2",
"time.clock",
"pyscf.grad.rks._gga_grad_sum_",
"pyscf.tddft.TDDFT",
"pyscf.scf.cphf.solve",
"pyscf.dft.numint._rks_gga_wv1",
"numpy.vstack",
"pyscf.lib.logger.new_logger... | [((13423, 13453), 'pyscf.lib.class_as_method', 'lib.class_as_method', (['Gradients'], {}), '(Gradients)\n', (13442, 13453), False, 'from pyscf import lib\n'), ((1474, 1509), 'pyscf.lib.logger.new_logger', 'logger.new_logger', (['td_grad', 'verbose'], {}), '(td_grad, verbose)\n', (1491, 1509), False, 'from pyscf.lib import logger\n'), ((2085, 2123), 'functools.reduce', 'reduce', (['numpy.dot', '(orbv, xpy, orbo.T)'], {}), '(numpy.dot, (orbv, xpy, orbo.T))\n', (2091, 2123), False, 'from functools import reduce\n'), ((2136, 2174), 'functools.reduce', 'reduce', (['numpy.dot', '(orbv, xmy, orbo.T)'], {}), '(numpy.dot, (orbv, xmy, orbo.T))\n', (2142, 2174), False, 'from functools import reduce\n'), ((2187, 2225), 'functools.reduce', 'reduce', (['numpy.dot', '(orbo, doo, orbo.T)'], {}), '(numpy.dot, (orbo, doo, orbo.T))\n', (2193, 2225), False, 'from functools import reduce\n'), ((2238, 2276), 'functools.reduce', 'reduce', (['numpy.dot', '(orbv, dvv, orbv.T)'], {}), '(numpy.dot, (orbv, dvv, orbv.T))\n', (2244, 2276), False, 'from functools import reduce\n'), ((5011, 5048), 'functools.reduce', 'reduce', (['numpy.dot', '(orbv, z1, orbo.T)'], {}), '(numpy.dot, (orbv, z1, orbo.T))\n', (5017, 5048), False, 'from functools import reduce\n'), ((5090, 5113), 'numpy.zeros', 'numpy.zeros', (['(nmo, nmo)'], {}), '((nmo, nmo))\n', (5101, 5113), False, 'import numpy\n'), ((5136, 5186), 'functools.reduce', 'reduce', (['numpy.dot', '(orbo.T, veff0doo + veff, orbo)'], {}), '(numpy.dot, (orbo.T, veff0doo + veff, orbo))\n', (5142, 5186), False, 'from functools import reduce\n'), ((5208, 5262), 'numpy.einsum', 'numpy.einsum', (['"""ak,ai->ki"""', 'veff0mop[nocc:, :nocc]', 'xpy'], {}), "('ak,ai->ki', veff0mop[nocc:, :nocc], xpy)\n", (5220, 5262), False, 'import numpy\n'), ((5285, 5339), 'numpy.einsum', 'numpy.einsum', (['"""ak,ai->ki"""', 'veff0mom[nocc:, :nocc]', 'xmy'], {}), "('ak,ai->ki', veff0mom[nocc:, :nocc], xmy)\n", (5297, 5339), False, 'import numpy\n'), ((5362, 5416), 'numpy.einsum', 'numpy.einsum', (['"""ci,ai->ac"""', 'veff0mop[nocc:, :nocc]', 'xpy'], {}), "('ci,ai->ac', veff0mop[nocc:, :nocc], xpy)\n", (5374, 5416), False, 'import numpy\n'), ((5439, 5493), 'numpy.einsum', 'numpy.einsum', (['"""ci,ai->ac"""', 'veff0mom[nocc:, :nocc]', 'xmy'], {}), "('ci,ai->ac', veff0mom[nocc:, :nocc], xmy)\n", (5451, 5493), False, 'import numpy\n'), ((5808, 5831), 'numpy.zeros', 'numpy.zeros', (['(nmo, nmo)'], {}), '((nmo, nmo))\n', (5819, 5831), False, 'import numpy\n'), ((5982, 6041), 'functools.reduce', 'reduce', (['numpy.dot', '(mo_coeff, im0 + zeta * dm1, mo_coeff.T)'], {}), '(numpy.dot, (mo_coeff, im0 + zeta * dm1, mo_coeff.T))\n', (5988, 6041), False, 'from functools import reduce\n'), ((6347, 6380), 'functools.reduce', 'reduce', (['numpy.dot', '(orbo, orbo.T)'], {}), '(numpy.dot, (orbo, orbo.T))\n', (6353, 6380), False, 'from functools import reduce\n'), ((9258, 9284), 'numpy.zeros', 'numpy.zeros', (['(4, nao, nao)'], {}), '((4, nao, nao))\n', (9269, 9284), False, 'import numpy\n'), ((13160, 13191), 'pyscf.lib.with_doc', 'lib.with_doc', (['grad_elec.__doc__'], {}), '(grad_elec.__doc__)\n', (13172, 13191), False, 'from pyscf import lib\n'), ((13573, 13583), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (13581, 13583), False, 'from pyscf import gto\n'), ((13781, 13793), 'pyscf.dft.RKS', 'dft.RKS', (['mol'], {}), '(mol)\n', (13788, 13793), False, 'from pyscf import dft\n'), ((13911, 13926), 'pyscf.tddft.TDDFT', 'tddft.TDDFT', (['mf'], {}), '(mf)\n', (13922, 13926), False, 'from pyscf import tddft\n'), ((14375, 14387), 'pyscf.dft.RKS', 'dft.RKS', (['mol'], {}), '(mol)\n', (14382, 14387), False, 'from pyscf import dft\n'), ((14515, 14528), 'pyscf.tddft.TDA', 'tddft.TDA', (['mf'], {}), '(mf)\n', (14524, 14528), False, 'from pyscf import tddft\n'), ((14977, 14989), 'pyscf.dft.RKS', 'dft.RKS', (['mol'], {}), '(mol)\n', (14984, 14989), False, 'from pyscf import dft\n'), ((15058, 15071), 'pyscf.tddft.TDA', 'tddft.TDA', (['mf'], {}), '(mf)\n', (15067, 15071), False, 'from pyscf import tddft\n'), ((15480, 15492), 'pyscf.dft.RKS', 'dft.RKS', (['mol'], {}), '(mol)\n', (15487, 15492), False, 'from pyscf import dft\n'), ((15562, 15575), 'pyscf.tddft.TDA', 'tddft.TDA', (['mf'], {}), '(mf)\n', (15571, 15575), False, 'from pyscf import tddft\n'), ((1522, 1534), 'time.clock', 'time.clock', ([], {}), '()\n', (1532, 1534), False, 'import time\n'), ((1536, 1547), 'time.time', 'time.time', ([], {}), '()\n', (1545, 1547), False, 'import time\n'), ((1915, 1950), 'numpy.einsum', 'numpy.einsum', (['"""ai,bi->ab"""', 'xpy', 'xpy'], {}), "('ai,bi->ab', xpy, xpy)\n", (1927, 1950), False, 'import numpy\n'), ((1953, 1988), 'numpy.einsum', 'numpy.einsum', (['"""ai,bi->ab"""', 'xmy', 'xmy'], {}), "('ai,bi->ab', xmy, xmy)\n", (1965, 1988), False, 'import numpy\n'), ((2037, 2072), 'numpy.einsum', 'numpy.einsum', (['"""ai,aj->ij"""', 'xmy', 'xmy'], {}), "('ai,aj->ij', xmy, xmy)\n", (2049, 2072), False, 'import numpy\n'), ((2292, 2312), 'pyscf.lib.current_memory', 'lib.current_memory', ([], {}), '()\n', (2310, 2312), False, 'from pyscf import lib\n'), ((3469, 3516), 'functools.reduce', 'reduce', (['numpy.dot', '(mo_coeff.T, veff, mo_coeff)'], {}), '(numpy.dot, (mo_coeff.T, veff, mo_coeff))\n', (3475, 3516), False, 'from functools import reduce\n'), ((3704, 3751), 'functools.reduce', 'reduce', (['numpy.dot', '(mo_coeff.T, veff, mo_coeff)'], {}), '(numpy.dot, (mo_coeff.T, veff, mo_coeff))\n', (3710, 3751), False, 'from functools import reduce\n'), ((4210, 4257), 'functools.reduce', 'reduce', (['numpy.dot', '(mo_coeff.T, veff, mo_coeff)'], {}), '(numpy.dot, (mo_coeff.T, veff, mo_coeff))\n', (4216, 4257), False, 'from functools import reduce\n'), ((4423, 4446), 'numpy.zeros', 'numpy.zeros', (['(nmo, nmo)'], {}), '((nmo, nmo))\n', (4434, 4446), False, 'import numpy\n'), ((4761, 4867), 'pyscf.scf.cphf.solve', 'cphf.solve', (['fvind', 'mo_energy', 'mo_occ', 'wvo'], {'max_cycle': 'td_grad.cphf_max_cycle', 'tol': 'td_grad.cphf_conv_tol'}), '(fvind, mo_energy, mo_occ, wvo, max_cycle=td_grad.cphf_max_cycle,\n tol=td_grad.cphf_conv_tol)\n', (4771, 4867), False, 'from pyscf.scf import cphf\n'), ((5516, 5570), 'numpy.einsum', 'numpy.einsum', (['"""ki,ai->ak"""', 'veff0mop[:nocc, :nocc]', 'xpy'], {}), "('ki,ai->ak', veff0mop[:nocc, :nocc], xpy)\n", (5528, 5570), False, 'import numpy\n'), ((5595, 5649), 'numpy.einsum', 'numpy.einsum', (['"""ki,ai->ak"""', 'veff0mom[:nocc, :nocc]', 'xmy'], {}), "('ki,ai->ak', veff0mom[:nocc, :nocc], xmy)\n", (5607, 5649), False, 'import numpy\n'), ((5663, 5710), 'pyscf.lib.direct_sum', 'lib.direct_sum', (['"""i+j->ij"""', 'mo_energy', 'mo_energy'], {}), "('i+j->ij', mo_energy, mo_energy)\n", (5677, 5710), False, 'from pyscf import lib\n'), ((5935, 5950), 'numpy.eye', 'numpy.eye', (['nocc'], {}), '(nocc)\n', (5944, 5950), False, 'import numpy\n'), ((7011, 7040), 'numpy.zeros', 'numpy.zeros', (['(4, 3, nao, nao)'], {}), '((4, 3, nao, nao))\n', (7022, 7040), False, 'import numpy\n'), ((7959, 7999), 'numpy.einsum', 'numpy.einsum', (['"""xpq,pq->x"""', 'h1ao', 'dmz1doo'], {}), "('xpq,pq->x', h1ao, dmz1doo)\n", (7971, 7999), False, 'import numpy\n'), ((8014, 8065), 'numpy.einsum', 'numpy.einsum', (['"""xpq,pq->x"""', 's1[:, p0:p1]', 'im0[p0:p1]'], {}), "('xpq,pq->x', s1[:, p0:p1], im0[p0:p1])\n", (8026, 8065), False, 'import numpy\n'), ((8079, 8133), 'numpy.einsum', 'numpy.einsum', (['"""xqp,pq->x"""', 's1[:, p0:p1]', 'im0[:, p0:p1]'], {}), "('xqp,pq->x', s1[:, p0:p1], im0[:, p0:p1])\n", (8091, 8133), False, 'import numpy\n'), ((8147, 8204), 'numpy.einsum', 'numpy.einsum', (['"""xij,ij->x"""', 'veff1[1, :, p0:p1]', 'oo0[p0:p1]'], {}), "('xij,ij->x', veff1[1, :, p0:p1], oo0[p0:p1])\n", (8159, 8204), False, 'import numpy\n'), ((9368, 9394), 'numpy.zeros', 'numpy.zeros', (['(4, nao, nao)'], {}), '((4, nao, nao))\n', (9379, 9394), False, 'import numpy\n'), ((9455, 9481), 'numpy.zeros', 'numpy.zeros', (['(4, nao, nao)'], {}), '((4, nao, nao))\n', (9466, 9481), False, 'import numpy\n'), ((9542, 9568), 'numpy.zeros', 'numpy.zeros', (['(4, nao, nao)'], {}), '((4, nao, nao))\n', (9553, 9568), False, 'import numpy\n'), ((1999, 2034), 'numpy.einsum', 'numpy.einsum', (['"""ai,aj->ij"""', 'xpy', 'xpy'], {}), "('ai,aj->ij', xpy, xpy)\n", (2011, 2034), False, 'import numpy\n'), ((3277, 3320), 'functools.reduce', 'reduce', (['numpy.dot', '(orbv.T, veff0doo, orbo)'], {}), '(numpy.dot, (orbv.T, veff0doo, orbo))\n', (3283, 3320), False, 'from functools import reduce\n'), ((3532, 3586), 'numpy.einsum', 'numpy.einsum', (['"""ki,ai->ak"""', 'veff0mop[:nocc, :nocc]', 'xpy'], {}), "('ki,ai->ak', veff0mop[:nocc, :nocc], xpy)\n", (3544, 3586), False, 'import numpy\n'), ((3605, 3659), 'numpy.einsum', 'numpy.einsum', (['"""ac,ai->ci"""', 'veff0mop[nocc:, nocc:]', 'xpy'], {}), "('ac,ai->ci', veff0mop[nocc:, nocc:], xpy)\n", (3617, 3659), False, 'import numpy\n'), ((3767, 3821), 'numpy.einsum', 'numpy.einsum', (['"""ki,ai->ak"""', 'veff0mom[:nocc, :nocc]', 'xmy'], {}), "('ki,ai->ak', veff0mom[:nocc, :nocc], xmy)\n", (3779, 3821), False, 'import numpy\n'), ((3840, 3894), 'numpy.einsum', 'numpy.einsum', (['"""ac,ai->ci"""', 'veff0mom[nocc:, nocc:]', 'xmy'], {}), "('ac,ai->ci', veff0mom[nocc:, nocc:], xmy)\n", (3852, 3894), False, 'import numpy\n'), ((4035, 4078), 'functools.reduce', 'reduce', (['numpy.dot', '(orbv.T, veff0doo, orbo)'], {}), '(numpy.dot, (orbv.T, veff0doo, orbo))\n', (4041, 4078), False, 'from functools import reduce\n'), ((4273, 4327), 'numpy.einsum', 'numpy.einsum', (['"""ki,ai->ak"""', 'veff0mop[:nocc, :nocc]', 'xpy'], {}), "('ki,ai->ak', veff0mop[:nocc, :nocc], xpy)\n", (4285, 4327), False, 'import numpy\n'), ((4346, 4400), 'numpy.einsum', 'numpy.einsum', (['"""ac,ai->ci"""', 'veff0mop[nocc:, nocc:]', 'xpy'], {}), "('ac,ai->ci', veff0mop[nocc:, nocc:], xpy)\n", (4358, 4400), False, 'import numpy\n'), ((6833, 6877), 'numpy.vstack', 'numpy.vstack', (['(vj[:2] * 2 - vk[:2], -vk[2:])'], {}), '((vj[:2] * 2 - vk[:2], -vk[2:]))\n', (6845, 6877), False, 'import numpy\n'), ((7903, 7939), 'numpy.einsum', 'numpy.einsum', (['"""xpq,pq->x"""', 'h1ao', 'oo0'], {}), "('xpq,pq->x', h1ao, oo0)\n", (7915, 7939), False, 'import numpy\n'), ((8217, 8279), 'numpy.einsum', 'numpy.einsum', (['"""xij,ij->x"""', 'veff1[2, :, p0:p1]', 'dmxpy[p0:p1, :]'], {}), "('xij,ij->x', veff1[2, :, p0:p1], dmxpy[p0:p1, :])\n", (8229, 8279), False, 'import numpy\n'), ((8295, 8357), 'numpy.einsum', 'numpy.einsum', (['"""xij,ij->x"""', 'veff1[3, :, p0:p1]', 'dmxmy[p0:p1, :]'], {}), "('xij,ij->x', veff1[3, :, p0:p1], dmxmy[p0:p1, :])\n", (8307, 8357), False, 'import numpy\n'), ((8373, 8435), 'numpy.einsum', 'numpy.einsum', (['"""xji,ij->x"""', 'veff1[2, :, p0:p1]', 'dmxpy[:, p0:p1]'], {}), "('xji,ij->x', veff1[2, :, p0:p1], dmxpy[:, p0:p1])\n", (8385, 8435), False, 'import numpy\n'), ((8451, 8513), 'numpy.einsum', 'numpy.einsum', (['"""xji,ij->x"""', 'veff1[3, :, p0:p1]', 'dmxmy[:, p0:p1]'], {}), "('xji,ij->x', veff1[3, :, p0:p1], dmxmy[:, p0:p1])\n", (8463, 8513), False, 'import numpy\n'), ((4704, 4743), 'functools.reduce', 'reduce', (['numpy.dot', '(orbv.T, v1ao, orbo)'], {}), '(numpy.dot, (orbv.T, v1ao, orbo))\n', (4710, 4743), False, 'from functools import reduce\n'), ((10109, 10154), 'numpy.einsum', 'numpy.einsum', (['"""pi,p,p->pi"""', 'ao[0]', 'wfxc', 'rho1'], {}), "('pi,p,p->pi', ao[0], wfxc, rho1)\n", (10121, 10154), False, 'import numpy\n'), ((10221, 10281), 'pyscf.dft.numint._dot_ao_ao', 'numint._dot_ao_ao', (['mol', 'ao[k]', 'aow', 'mask', 'shls_slice', 'ao_loc'], {}), '(mol, ao[k], aow, mask, shls_slice, ao_loc)\n', (10238, 10281), False, 'from pyscf.dft import numint\n'), ((10415, 10460), 'numpy.einsum', 'numpy.einsum', (['"""pi,p,p->pi"""', 'ao[0]', 'wfxc', 'rho2'], {}), "('pi,p,p->pi', ao[0], wfxc, rho2)\n", (10427, 10460), False, 'import numpy\n'), ((10651, 10700), 'numpy.einsum', 'numpy.einsum', (['"""pi,p,p->pi"""', 'ao[0]', 'vxc[0]', 'weight'], {}), "('pi,p,p->pi', ao[0], vxc[0], weight)\n", (10663, 10700), False, 'import numpy\n'), ((10891, 10953), 'numpy.einsum', 'numpy.einsum', (['"""pi,p,p,p->pi"""', 'ao[0]', 'kxc[0]', 'weight', '(rho1 ** 2)'], {}), "('pi,p,p,p->pi', ao[0], kxc[0], weight, rho1 ** 2)\n", (10903, 10953), False, 'import numpy\n'), ((11435, 11473), 'numpy.einsum', 'numpy.einsum', (['"""pi,p->pi"""', 'ao[0]', 'wv[0]'], {}), "('pi,p->pi', ao[0], wv[0])\n", (11447, 11473), False, 'import numpy\n'), ((11497, 11540), 'numpy.einsum', 'numpy.einsum', (['"""npi,np->pi"""', 'ao[1:4]', 'wv[1:]'], {}), "('npi,np->pi', ao[1:4], wv[1:])\n", (11509, 11540), False, 'import numpy\n'), ((11563, 11623), 'pyscf.dft.numint._dot_ao_ao', 'numint._dot_ao_ao', (['mol', 'ao[0]', 'aow', 'mask', 'shls_slice', 'ao_loc'], {}), '(mol, ao[0], aow, mask, shls_slice, ao_loc)\n', (11580, 11623), False, 'from pyscf.dft import numint\n'), ((11679, 11739), 'pyscf.grad.rks._gga_grad_sum_', 'rks_grad._gga_grad_sum_', (['vmat[1:]', 'mol', 'ao', 'wv', 'mask', 'ao_loc'], {}), '(vmat[1:], mol, ao, wv, mask, ao_loc)\n', (11702, 11739), True, 'from pyscf.grad import rks as rks_grad\n'), ((12149, 12197), 'pyscf.dft.numint._rks_gga_wv1', 'numint._rks_gga_wv1', (['rho', 'rho1', 'vxc', 'fxc', 'weight'], {}), '(rho, rho1, vxc, fxc, weight)\n', (12168, 12197), False, 'from pyscf.dft import numint\n'), ((10535, 10595), 'pyscf.dft.numint._dot_ao_ao', 'numint._dot_ao_ao', (['mol', 'ao[k]', 'aow', 'mask', 'shls_slice', 'ao_loc'], {}), '(mol, ao[k], aow, mask, shls_slice, ao_loc)\n', (10552, 10595), False, 'from pyscf.dft import numint\n'), ((10775, 10835), 'pyscf.dft.numint._dot_ao_ao', 'numint._dot_ao_ao', (['mol', 'ao[k]', 'aow', 'mask', 'shls_slice', 'ao_loc'], {}), '(mol, ao[k], aow, mask, shls_slice, ao_loc)\n', (10792, 10835), False, 'from pyscf.dft import numint\n'), ((11026, 11086), 'pyscf.dft.numint._dot_ao_ao', 'numint._dot_ao_ao', (['mol', 'ao[k]', 'aow', 'mask', 'shls_slice', 'ao_loc'], {}), '(mol, ao[k], aow, mask, shls_slice, ao_loc)\n', (11043, 11086), False, 'from pyscf.dft import numint\n'), ((12377, 12425), 'pyscf.dft.numint._rks_gga_wv1', 'numint._rks_gga_wv1', (['rho', 'rho2', 'vxc', 'fxc', 'weight'], {}), '(rho, rho2, vxc, fxc, weight)\n', (12396, 12425), False, 'from pyscf.dft import numint\n'), ((12529, 12566), 'pyscf.dft.numint._rks_gga_wv0', 'numint._rks_gga_wv0', (['rho', 'vxc', 'weight'], {}), '(rho, vxc, weight)\n', (12548, 12566), False, 'from pyscf.dft import numint\n'), ((12670, 12718), 'pyscf.dft.numint._rks_gga_wv2', 'numint._rks_gga_wv2', (['rho', 'rho1', 'fxc', 'kxc', 'weight'], {}), '(rho, rho1, fxc, kxc, weight)\n', (12689, 12718), False, 'from pyscf.dft import numint\n')] |
from collections import OrderedDict
import numpy as np
np.random.seed(123)
def softmax(z):
# Numerically stable softmax. Already implemented.
z = z - np.max(z, axis=1, keepdims=True)
_exp = np.exp(z)
_sum = np.sum(_exp, axis=1, keepdims=True)
sm = _exp / _sum
return sm
def convolution2d(x, kernel, stride):
"""
Convolution 2D : Do Convolution on 'x' with filter = 'kernel', stride = 'stride'
입력 x에 대해 'kernel'을 filter로 사용하여 2D Convolution을 수행하시오.
[Input]
x: 2D data (e.g. image)
- Shape : (Height, Width)
kernel : 2D convolution filter
- Shape : (Kernel size, Kernel size)
stride : Stride size
- dtype : int
[Output]
conv_out : convolution result
- Shape : (Conv_Height, Conv_Width)
- Conv_Height & Conv_Width can be calculated using 'Height', 'Width', 'Kernel size', 'Stride'
"""
height, width = x.shape
kernel_size = kernel.shape[0]
conv_out = None
# =============================== EDIT HERE ===============================
# conv_out shape : x - kern + 1
conv_out = np.zeros((height - kernel_size + 1 , width - kernel_size + 1))
for h in range(0, conv_out.shape[0], stride):
for w in range(0, conv_out.shape[1], stride):
res = 0
for i in range(kernel_size):
for j in range(kernel_size):
res += x[h + i][w + j] * kernel[i][j]
conv_out[h][w] = res
# =========================================================================
return conv_out
class ReLU:
"""
ReLU Function. ReLU(x) = max(0, x)
Implement forward & backward path of ReLU.
ReLU(x) = x if x > 0. 0 otherwise.
Be careful. It's '>', not '>='.
(ReLU in previous HW might be different.)
"""
def __init__(self):
# 1 (True) if ReLU input <= 0
self.zero_mask = None
def forward(self, z):
"""
ReLU Forward.
ReLU(x) = max(0, x)
z --> (ReLU) --> out
[Inputs]
z : ReLU input in any shape.
[Outputs]
out : ReLU(z).
"""
out = None
# =============================== EDIT HERE ===============================
out = np.zeros_like(z)
self.zero_mask = np.zeros_like(z)
out = np.zeros_like(z)
self.zero_mask = np.zeros_like(z)
if(len(z.shape) < 2):
for i in range(z.shape[0]):
if z[i] > 0:
out[i] = z[i]
else:
self.zero_mask[i] = 1
out[i] = 0
return out
for i in range(z.shape[0]):
for j in range(z.shape[1]):
if z[i][j] > 0:
out[i][j] = z[i][j]
else:
self.zero_mask[i][j] = 1
out[i][j] = 0
# =========================================================================
return out
def backward(self, d_prev):
"""
ReLU Backward.
z --> (ReLU) --> out
dz <-- (dReLU) <-- d_prev(dL/dout)
[Inputs]
d_prev : Gradients until now.
d_prev = dL/dk, where k = ReLU(z).
[Outputs]
dz : Gradients w.r.t. ReLU input z.
"""
dz = None
# =============================== EDIT HERE ===============================
dz = np.zeros_like(d_prev)
shape = np.shape(self.zero_mask)
if np.shape(self.zero_mask) != np.shape(d_prev):
print('Oops, something wormg...')
return None
if len(shape) < 2:
for i in range(len(d_prev)):
if self.zero_mask[i] == [1]:
dz[i] = 0
else:
dz[i] = d_prev[i]
else:
for i in range(shape[0]):
for j in range(shape[1]):
if self.zero_mask[i][j] == 1:
dz[i][j] = 0
else:
dz[i][j] = d_prev[i][j]
# =========================================================================
return dz
def update(self, learning_rate):
# NOT USED IN ReLU
pass
def summary(self):
return 'ReLU Activation'
################################################################################################################
# ** ConvolutionLayer ** #
# Single Convolution Layer. #
# #
# Given input images, #
# 'Convolution Layer' do convolution on input with kernels and convolution options (stride, pad ...). #
# #
# You need to implement forward and backward pass of single convolution layer. #
# (This is NOT an entire CNN model.) #
# #
# ** ConvolutionLayer ** #
# 단일 합성곱 계층 #
# #
# 이미지 입력을 받아서, 합성곱 계층은 주어진 세팅과 kernel(self.W) 및 bias(self.b)를 활용하여# #
# 입력에 대하여 합성곱을 수행한다. #
# #
# 합성곱 계층의 Forward, Backward 함수를 구현하시오 #
# (CNN 모델 전체가 아닙니다.) #
################################################################################################################
class ConvolutionLayer:
def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad=0):
self.W = np.random.randn(out_channels, in_channels, kernel_size, kernel_size)
self.b = np.zeros(out_channels, dtype=np.float32)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.pad = pad
def forward(self, x):
"""
Convolution Layer Forward.
합성곱 신경망 Forward.
[Input]
x: 4-D input batch data
- Shape : (Batch size, In Channel, Height, Width)
[Output]
conv_out : convolution result
- Shape : (Conv_Height, Conv_Width)
- Conv_Height & Conv_Width can be calculated using 'Height', 'Width', 'Kernel size', 'Stride'
이 부분은 구현이 필요없습니다.
"""
self.x = x
batch_size, in_channel, _, _ = x.shape
conv = self.convolution(x, self.W, self.b, self.stride, self.pad)
self.output_shape = conv.shape
return conv
def convolution(self, x, kernel, bias=None, stride=1, pad=0):
"""
Convolution Operation.
Add bias if bias is not none
Use
variables --> self.W, self.b, self.stride, self.pad, self.kernel_size
function --> convolution2d (what you already implemented above.)
위 변수와 함수를 활용하여 구현하세요.
bias는 None이 아닐 때, 더해집니다.
[Input]
x: 4-D input batch data
- Shape : (Batch size, In Channel, Height, Width)
kernel: 4-D convolution filter
- Shape : (Out Channel, In Channel, Kernel size, Kernel size)
bias: 1-D bias
- Shape : (Out Channel)
- default : None
stride : Stride size
- dtype : int
- default : 1
pad: pad value, how much to pad
- dtype : int
- default : 0
[Output]
conv_out : convolution result
- Shape : (Conv_Height, Conv_Width)
- Conv_Height & Conv_Width can be calculated using 'Height', 'Width', 'Kernel size', 'Stride'
"""
batch_size, in_channel, _, _ = x.shape
if pad > 0:
x = self.zero_pad(x, pad)
_, _, height, width = x.shape
out_channel, _, kernel_size, _ = kernel.shape
assert x.shape[1] == kernel.shape[1]
conv = None
# =============================== EDIT HERE ===============================
if bias is not None:
# Add bias
print('bias:', bias)
pass
#print(kernel)
conv = np.zeros((2,1,8,8))
print(x.shape, conv.shape, kernel.shape)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
conv[i][j] = convolution2d(x[i][j], kernel[i][j], 1)
print(conv.shape)
conv = self.zero_pad(conv, kernel_size - 2)
print(conv.shape)
# =========================================================================
return conv
def backward(self, d_prev):
"""
Convolution Layer Backward.
Compute derivatives w.r.t x, W, b (self.x, self.W, self.b)
x, W, b에 대한 gradient (dx, dW, db)를 구하시오.
** [HINT] **
See lecture notes.
"convolution2d" & "self.convolution" functions might be useful I guess...
강의 노트를 보세요. (강의 노트에는 1 input channel, 1 output channel간의 gradient 계산)
"convolution2d" & "self.convolution" 함수가 유용할지도 모릅니다...
[Input]
d_prev: Gradients value so far in back-propagation process.
[Output]
self.dx : Gradient values of input x (self.x)
- Shape : (Batch size, channel, Heigth, Width)
"""
batch_size, in_channel, height, width = self.x.shape
out_channel, _, kernel_size, _ = self.W.shape
if len(d_prev.shape) < 3:
d_prev = d_prev.reshape(*self.output_shape)
self.dW = np.zeros_like(self.W, dtype=np.float64)
self.db = np.zeros_like(self.b, dtype=np.float64)
dx = np.zeros_like(self.x, dtype=np.float64)
# =============================== EDIT HERE ===============================
# dW
# db
# dx
# =========================================================================
return dx
def zero_pad(self, x, pad):
"""
Zero padding
Given x and pad value, pad input 'x' around height & width.
입력 x에 대하여 좌우상하 'pad'만큼을 '0'으로 padding 하시오.
[Input]
x: 4-D input batch data
- Shape : (Batch size, In Channel, Height, Width)
pad: pad value. how much to pad on one side.
e.g. pad=2 => pad 2 zeros on left, right, up & down.
[Output]
padded_x : padded x
- Shape : (Batch size, In Channel, Padded_Height, Padded_Width)
"""
padded_x = None
batch_size, in_channel, height, width = x.shape
# =============================== EDIT HERE ===============================
padded_x = np.zeros((batch_size, in_channel, height+2, width+2))
for i in range(batch_size):
for j in range(in_channel):
padded_x [i][j]= np.pad(x[i][j], pad, 'constant', constant_values=(0))
# =========================================================================
return padded_x
def update(self, learning_rate):
# Update weights
self.W -= self.dW * learning_rate
self.b -= self.db * learning_rate
def summary(self):
return 'Filter Size : ' + str(self.W.shape) + \
' Stride : %d, Zero padding: %d' % (self.stride, self.pad)
################################################################################################################
# ** Max-Pooling Layer ** #
# Single Max-Pooling Layer. #
# #
# Given input images, #
# 'Max-Pooling Layer' max_pool (or subsample) maximum value in certain region of input #
# #
# You need to implement forward and backward pass of single max-pooling layer. #
# (This is NOT an entire model.) #
# #
# 이미지를 입력으로 받아, #
# 'Max-Pooling Layer'는 stride를 해가며 해당하는 구역에서 가장 큰 값을 뽑는다. #
# #
# Max-pooling layer의 Forward와 Backward를 구현하시오. #
################################################################################################################
class MaxPoolingLayer:
def __init__(self, kernel_size, stride=2):
self.kernel_size = kernel_size
self.stride = stride
def forward(self, x):
"""
Max-Pooling Layer Forward. Pool maximum value by striding kernel.
If image size is not divisible by pooling size (e.g. 4x4 image, 3x3 pool, stride=2),
only pool from valid region, not go beyond the input image.
4x4 image, 3x3 pool, stride=2 => 1x1 out
(* Actually you should set kernel/pooling size, stride and pad properly, so that this does not happen.)
만야 이미지 크기가 pooling 크기로 나누어 떨어지지 않는다면 (e.g. 4x4 image, 3x3 pool, stride=2),
padding 없이 유효한 범위 내에서만 수행한다.
4x4 image, 3x3 pool, stride=2 => 1x1 out
(* 사실 애초에 kernel 크기, stride, pooling, padding을 이런 상황이 없이 세팅하는 것이 좋다.)
[Input]
x: 4-D input batch data
- Shape : (Batch size, In Channel, Height, Width)
[Output]
pool_out : max_pool result
- Shape : (Pool_Height, Pool_Width)
- Pool_Height & Pool_Width can be calculated using 'Height', 'Width', 'Kernel size', 'Stride'
"""
max_pool = None
batch_size, channel, height, width = x.shape
# Where it came from x. (1 if it is pooled, 0 otherwise.)
# Might be useful when backward
self.mask = np.zeros_like(x)
# =============================== EDIT HERE ===============================
# =========================================================================
self.output_shape = max_pool.shape
return max_pool
def backward(self, d_prev=1):
"""
Max-Pooling Layer Backward.
In backward pass, Max-pool distributes gradients to where it came from in forward pass.
[Input]
d_prev: Gradients value so far in back-propagation process.
- Shape can be varies since either Conv. layer or FC-layer can follow.
(Batch_size, Channel, Height, Width)
or
(Batch_size, FC Dimension)
[Output]
d_max : max_pool gradients
- Shape : (batch_size, channel, height, width) - same shape as input x
"""
d_max = None
if len(d_prev.shape) < 3:
d_prev = d_prev.reshape(*self.output_shape)
batch, channel, height, width = d_prev.shape
# =============================== EDIT HERE ===============================
# =========================================================================
return d_max
def update(self, learning_rate):
# NOT USED IN MAX-POOL
pass
def summary(self):
return 'Pooling Size : ' + str((self.kernel_size, self.kernel_size)) + \
' Stride : %d' % (self.stride)
################################################################################################################
# ** Fully-Connected Layer ** #
# Single Fully-Connected Layer. #
# #
# Given input features, #
# FC layer linearly transform input with weights (self.W) & bias (self.b) #
# #
# 입력 특징을 받아, #
# FC Layer는 weight와 bias를 활용하여 특징을 변형한다. #
# #
# You need to implement forward and backward pass #
# This FC Layer works same as one in HW-4, so you can copy your codes if you need any. #
# 이 FC Layer는 HW-4의 Hidden Layer와 동일 혹은 비슷하게 동장하므로 가져올 코드가 있다면 참조해도 좋다. #
# #
################################################################################################################
class FCLayer:
def __init__(self, input_dim, output_dim):
# Weight Initialization
self.W = np.random.randn(input_dim, output_dim) / np.sqrt(input_dim / 2)
self.b = np.zeros(output_dim)
def forward(self, x):
"""
FC Layer Forward.
Use variables : self.x, self.W, self.b
[Input]
x: Input features.
- Shape : (Batch size, In Channel, Height, Width)
or
- Shape : (Batch size, input_dim)
[Output]
self.out : fc result
- Shape : (Batch size, output_dim)
"""
if len(x.shape) > 2:
batch_size = x.shape[0]
x = x.reshape(batch_size, -1)
self.x = x
# =============================== EDIT HERE ===============================
# =========================================================================
return self.out
def backward(self, d_prev):
"""
FC Layer Backward.
Use variables : self.x, self.W
[Input]
d_prev: Gradients value so far in back-propagation process.
[Output]
dx : Gradients w.r.t input x
- Shape : (batch_size, input_dim) - same shape as input x
"""
self.dW = np.zeros_like(self.W, dtype=np.float64) # Gradient w.r.t. weight (self.W)
self.db = np.zeros_like(self.b, dtype=np.float64) # Gradient w.r.t. bias (self.b)
dx = np.zeros_like(self.x, dtype=np.float64) # Gradient w.r.t. input x
# =============================== EDIT HERE ===============================
# =========================================================================
return dx
def update(self, learning_rate):
self.W -= self.dW * learning_rate
self.b -= self.db * learning_rate
def summary(self):
return 'Input -> Hidden : %d -> %d ' % (self.W.shape[0], self.W.shape[1])
################################################################################################################
# ** Softmax Layer ** #
# Softmax Layer applies softmax (WITHOUT any weights or bias) #
# Softmax Layer는 softmax만을 적용한다. (weights나 bias가 전혀 없이!!!) #
# #
# Given an score, #
# 'Softmax Layer' applies softmax to make probability distribution. (Not log softmax or else...) #
# 'Softmax Layer' softmax를 적용하여 class에 대한 확률분포를 만든다. (log softmax 나 다른 것이 아닌 softmax) #
# #
# BE CAREFUL!!!!! #
# This is different from 'SoftmaxOutputLayer' in HW-4 #
# This layer doesn't have any weights or bias. It only applies softmax!!! #
# (HW4 'SoftmaxOutputLayer' is separated into 'FCLayer' & 'SoftmaxLayer' in HW5) #
# #
# 중요!!!!! #
# 이 Layer는 HW-4의 SoftmaxOutputLayer와 다릅니다. #
# 이 Layer는 Weight나 Bias없이 단순 softmax를 적용하는 layer입니다. #
# (간단히 말하면 HW-4의 'SoftmaxOutputLayer'가 HW-5에서 'FC Layer'와 'SoftmaxLayer'로 분리되었습니다.) #
# #
# You need to implement forward and backward pass #
# (This is NOT an entire model.) #
################################################################################################################
class SoftmaxLayer:
def __init__(self):
# No parameters
pass
def forward(self, x):
"""
Softmax Layer Forward.
Apply softmax (not log softmax or others...) on axis-1
Use 'softmax' function above in this file.
[Input]
x: Score to apply softmax
- Shape: (N, C)
[Output]
y_hat: Softmax probability distribution.
- Shape: (N, C)
"""
y_hat = None
# =============================== EDIT HERE ===============================
# =========================================================================
return self.y_hat
def backward(self, d_prev=1):
"""
Softmax Layer Backward.
Gradients w.r.t input score.
That is,
Forward : softmax prob = softmax(score)
Backward : dL / dscore => 'dx'
Compute dx (dL / dscore).
Check loss function in HW5 pdf file.
"""
batch_size = self.y.shape[0]
dx = None
# =============================== EDIT HERE ===============================
# =========================================================================
return dx
def ce_loss(self, y_hat, y):
"""
Compute Cross-entropy Loss.
Use epsilon (eps) for numerical stability in log.
Epsilon 값을 계산의 안정성을 위해 log에 사용하세요.
Check loss function in HW5 pdf file.
Loss Function 을 과제 파일에서 확인하세요.
[Input]
y_hat: Probability after softmax.
- Shape : (Batch_size, # of class)
y: One-hot true label
- Shape : (Batch_size, # of class)
[Output]
self.loss : cross-entropy loss
- Single float
"""
self.loss = None
eps = 1e-10
self.y_hat = y_hat
self.y = y
# =============================== EDIT HERE ===============================
# =========================================================================
return self.loss
def update(self, learning_rate):
# Not used in softmax layer.
pass
def summary(self):
return 'Softmax layer'
################################################################################################################
# ** CNN Classifier ** #
# This is an class for entire CNN classifier. #
# All the functions and variables are already implemented. #
# Look at the codes below and see how the codes work. #
# #
# ** CNN Classifier ** #
# 이 코드들은 CNN Classifier 통합 모델을 위한 코드입니다. #
# 필요한 모든 변수와 함수들은 이미 구현이 되어있습니다. #
# 아래 코드를 보면서 모델이 어떻게 동작하는지 확인하세요. #
# #
# <<< DO NOT CHANGE ANY THING HERE>>> #
# #
################################################################################################################
class CNN_Classifier:
def __init__(self):
self.layers = OrderedDict()
self.softmax_layer = None
self.loss = None
self.pred = None
def predict(self, x):
# Outputs model softmax score
for name, layer in self.layers.items():
x = layer.forward(x)
x = self.softmax_layer.forward(x)
return x
def forward(self, x, y):
# Predicts and Compute CE Loss
self.pred = self.predict(x)
self.loss = self.softmax_layer.ce_loss(self.pred, y)
return self.loss
def backward(self):
# Back-propagation
d_prev = 1
d_prev = self.softmax_layer.backward(d_prev)
for name, layer in list(self.layers.items())[::-1]:
d_prev = layer.backward(d_prev)
def update(self, learning_rate):
# Update weights in every layer with dW, db
for name, layer in self.layers.items():
layer.update(learning_rate)
def add_layer(self, name, layer):
# Add Neural Net layer with name.
if isinstance(layer, SoftmaxLayer):
if self.softmax_layer is None:
self.softmax_layer = layer
else:
raise ValueError('Softmax Layer already exists!')
else:
self.layers[name] = layer
def summary(self):
# Print model architecture.
print('======= Model Summary =======')
for name, layer in self.layers.items():
print('[%s] ' % name + layer.summary())
print('[Softmax Layer] ' + self.softmax_layer.summary())
print() | [
"numpy.pad",
"numpy.zeros_like",
"numpy.sum",
"numpy.random.seed",
"numpy.random.randn",
"numpy.zeros",
"numpy.shape",
"numpy.max",
"numpy.exp",
"collections.OrderedDict",
"numpy.sqrt"
] | [((59, 78), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (73, 78), True, 'import numpy as np\n'), ((212, 221), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (218, 221), True, 'import numpy as np\n'), ((234, 269), 'numpy.sum', 'np.sum', (['_exp'], {'axis': '(1)', 'keepdims': '(True)'}), '(_exp, axis=1, keepdims=True)\n', (240, 269), True, 'import numpy as np\n'), ((1128, 1189), 'numpy.zeros', 'np.zeros', (['(height - kernel_size + 1, width - kernel_size + 1)'], {}), '((height - kernel_size + 1, width - kernel_size + 1))\n', (1136, 1189), True, 'import numpy as np\n'), ((167, 199), 'numpy.max', 'np.max', (['z'], {'axis': '(1)', 'keepdims': '(True)'}), '(z, axis=1, keepdims=True)\n', (173, 199), True, 'import numpy as np\n'), ((2331, 2347), 'numpy.zeros_like', 'np.zeros_like', (['z'], {}), '(z)\n', (2344, 2347), True, 'import numpy as np\n'), ((2374, 2390), 'numpy.zeros_like', 'np.zeros_like', (['z'], {}), '(z)\n', (2387, 2390), True, 'import numpy as np\n'), ((2408, 2424), 'numpy.zeros_like', 'np.zeros_like', (['z'], {}), '(z)\n', (2421, 2424), True, 'import numpy as np\n'), ((2451, 2467), 'numpy.zeros_like', 'np.zeros_like', (['z'], {}), '(z)\n', (2464, 2467), True, 'import numpy as np\n'), ((3570, 3591), 'numpy.zeros_like', 'np.zeros_like', (['d_prev'], {}), '(d_prev)\n', (3583, 3591), True, 'import numpy as np\n'), ((3609, 3633), 'numpy.shape', 'np.shape', (['self.zero_mask'], {}), '(self.zero_mask)\n', (3617, 3633), True, 'import numpy as np\n'), ((6757, 6825), 'numpy.random.randn', 'np.random.randn', (['out_channels', 'in_channels', 'kernel_size', 'kernel_size'], {}), '(out_channels, in_channels, kernel_size, kernel_size)\n', (6772, 6825), True, 'import numpy as np\n'), ((6844, 6884), 'numpy.zeros', 'np.zeros', (['out_channels'], {'dtype': 'np.float32'}), '(out_channels, dtype=np.float32)\n', (6852, 6884), True, 'import numpy as np\n'), ((9310, 9332), 'numpy.zeros', 'np.zeros', (['(2, 1, 8, 8)'], {}), '((2, 1, 8, 8))\n', (9318, 9332), True, 'import numpy as np\n'), ((10720, 10759), 'numpy.zeros_like', 'np.zeros_like', (['self.W'], {'dtype': 'np.float64'}), '(self.W, dtype=np.float64)\n', (10733, 10759), True, 'import numpy as np\n'), ((10779, 10818), 'numpy.zeros_like', 'np.zeros_like', (['self.b'], {'dtype': 'np.float64'}), '(self.b, dtype=np.float64)\n', (10792, 10818), True, 'import numpy as np\n'), ((10833, 10872), 'numpy.zeros_like', 'np.zeros_like', (['self.x'], {'dtype': 'np.float64'}), '(self.x, dtype=np.float64)\n', (10846, 10872), True, 'import numpy as np\n'), ((11874, 11931), 'numpy.zeros', 'np.zeros', (['(batch_size, in_channel, height + 2, width + 2)'], {}), '((batch_size, in_channel, height + 2, width + 2))\n', (11882, 11931), True, 'import numpy as np\n'), ((15592, 15608), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (15605, 15608), True, 'import numpy as np\n'), ((18980, 19000), 'numpy.zeros', 'np.zeros', (['output_dim'], {}), '(output_dim)\n', (18988, 19000), True, 'import numpy as np\n'), ((20086, 20125), 'numpy.zeros_like', 'np.zeros_like', (['self.W'], {'dtype': 'np.float64'}), '(self.W, dtype=np.float64)\n', (20099, 20125), True, 'import numpy as np\n'), ((20181, 20220), 'numpy.zeros_like', 'np.zeros_like', (['self.b'], {'dtype': 'np.float64'}), '(self.b, dtype=np.float64)\n', (20194, 20220), True, 'import numpy as np\n'), ((20269, 20308), 'numpy.zeros_like', 'np.zeros_like', (['self.x'], {'dtype': 'np.float64'}), '(self.x, dtype=np.float64)\n', (20282, 20308), True, 'import numpy as np\n'), ((27193, 27206), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (27204, 27206), False, 'from collections import OrderedDict\n'), ((3646, 3670), 'numpy.shape', 'np.shape', (['self.zero_mask'], {}), '(self.zero_mask)\n', (3654, 3670), True, 'import numpy as np\n'), ((3674, 3690), 'numpy.shape', 'np.shape', (['d_prev'], {}), '(d_prev)\n', (3682, 3690), True, 'import numpy as np\n'), ((18898, 18936), 'numpy.random.randn', 'np.random.randn', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (18913, 18936), True, 'import numpy as np\n'), ((18939, 18961), 'numpy.sqrt', 'np.sqrt', (['(input_dim / 2)'], {}), '(input_dim / 2)\n', (18946, 18961), True, 'import numpy as np\n'), ((12040, 12091), 'numpy.pad', 'np.pad', (['x[i][j]', 'pad', '"""constant"""'], {'constant_values': '(0)'}), "(x[i][j], pad, 'constant', constant_values=0)\n", (12046, 12091), True, 'import numpy as np\n')] |
from utils import calculate_variance
import numpy as np
from math import sqrt
def display_as_percentage(val):
return '{:.1f}%'.format(val * 100)
returns_disney = [0.10, 0.12, 0.15, 0.05, 0.04]
returns_cbs = [-0.80, -0.15, 0.31, -0.06, -0.29]
stddev_disney = np.std(returns_disney)
stddev_cbs = np.std(returns_cbs)
dataset = [10, 8, 9, 10, 12]
def calculate_stddev(dataset):
variance = calculate_variance(dataset)
stddev = sqrt(variance)
return stddev
stddev_disney = calculate_stddev(returns_disney)
stddev_cbs = calculate_stddev(returns_cbs)
print('The standard deviation of Disney stock returns is', display_as_percentage(stddev_disney))
print('The standard deviation of CBS stock returns is', display_as_percentage(stddev_cbs))
| [
"numpy.std",
"utils.calculate_variance",
"math.sqrt"
] | [((275, 297), 'numpy.std', 'np.std', (['returns_disney'], {}), '(returns_disney)\n', (281, 297), True, 'import numpy as np\n'), ((312, 331), 'numpy.std', 'np.std', (['returns_cbs'], {}), '(returns_cbs)\n', (318, 331), True, 'import numpy as np\n'), ((412, 439), 'utils.calculate_variance', 'calculate_variance', (['dataset'], {}), '(dataset)\n', (430, 439), False, 'from utils import calculate_variance\n'), ((452, 466), 'math.sqrt', 'sqrt', (['variance'], {}), '(variance)\n', (456, 466), False, 'from math import sqrt\n')] |
"""Fast-Weights Q&A Network
Multi-layered bi-directional implementation of the Fast-Weights RNN using
the cell from lnfw_rnn_cell.py.
The framework for this implementation (including sentence and question encoders)
is based on the end-to-end memory network, but the actual memory network module
is replaced with a Fast-Weights RNN.
"""
from __future__ import absolute_import
from __future__ import division
import tensorflow as tf
import numpy as np
from six.moves import range
from . import FastWeightsLSTMCell
def position_encoding(sentence_size, embedding_size):
"""
Position Encoding described in section 4.1 of End to end Memory Networks
"""
encoding = np.ones((embedding_size, sentence_size), dtype=np.float32)
ls = sentence_size+1
le = embedding_size+1
for i in range(1, le):
for j in range(1, ls):
encoding[i-1, j-1] = (i - (embedding_size+1)/2) * (j - (sentence_size+1)/2)
encoding = 1 + 4 * encoding / embedding_size / sentence_size
# Make position encoding of time words identity to avoid modifying them
encoding[:, -1] = 1.0
return np.transpose(encoding)
def zero_nil_slot(t, name=None):
"""
Overwrites the nil_slot (first row) of the input Tensor with zeros.
The nil_slot is a dummy slot and should not be trained and influence
the training algorithm.
"""
with tf.op_scope([t], name, "zero_nil_slot") as name:
t = tf.convert_to_tensor(t, name="t")
s = tf.shape(t)[1]
z = tf.zeros(tf.stack([1, s]))
return tf.concat(axis=0, values=[z, tf.slice(t, [1, 0], [-1, -1])], name=name)
def add_gradient_noise(t, stddev=1e-3, name=None):
"""
Adds gradient noise as described in http://arxiv.org/abs/1511.06807 [2].
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks [2].
"""
with tf.op_scope([t, stddev], name, "add_gradient_noise") as name:
t = tf.convert_to_tensor(t, name="t")
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn, name=name)
class FWQA_DeepBiLSTM(object):
"""Fast Weights Q&A network."""
def __init__(self, batch_size, vocab_size, sentence_size, memory_size, embedding_size, num_hidden_units,
S=1,
S_Q=0,
num_layers=1,
tied_output=False,
max_grad_norm=40.0,
max_pooling=True,
layer_norm=True,
forget_bias=1.0,
nonlin=None,
initializer=tf.random_normal_initializer(stddev=0.1),
encoding=position_encoding,
log_dir='./results/',
session=tf.Session(),
name='FWQA_LSTM'):
"""Creates a Fast-Weights Q&A network
Args:
batch_size: The size of the batch.
vocab_size: The size of the vocabulary (should include the nil word). The nil word
one-hot encoding should be 0.
sentence_size: The max size of a sentence in the data. All sentences should be padded
to this length. If padding is required it should be done with nil one-hot encoding (0).
memory_size: The max size of the memory. Since Tensorflow currently does not support jagged arrays
all memories must be padded to this length. If padding is required, the extra memories should be
empty memories; memories filled with the nil word ([0, 0, 0, ......, 0]).
embedding_size: The size of the word embedding.
max_grad_norm: Maximum L2 norm clipping value. Defaults to `40.0`.
nonlin: Non-linearity. Defaults to `None`.
initializer: Weight initializer. Defaults to `tf.random_normal_initializer(stddev=0.1)`.
optimizer: Optimizer algorithm used for SGD. Defaults to `tf.train.AdamOptimizer(learning_rate=1e-2)`.
encoding: A function returning a 2D Tensor (sentence_size, embedding_size). Defaults to `position_encoding`.
session: Tensorflow Session the model is run with. Defaults to `tf.Session()`.
name: Name of the End-To-End Memory Network. Defaults to `MemN2N`.
"""
self._batch_size = batch_size
self._vocab_size = vocab_size
self._sentence_size = sentence_size
self._memory_size = memory_size
self._embedding_size = embedding_size
self._num_hidden_units = num_hidden_units
self._num_layers = num_layers
self._tied_output = tied_output
self._max_pooling = max_pooling
self._layer_norm = layer_norm
self._forget_bias = forget_bias
self._max_grad_norm = max_grad_norm
self._nonlin = nonlin
self._init = initializer
self._log_dir = log_dir
self._name = name
self._S = S
self._S_Q = S_Q
self._build_inputs()
self._build_vars()
self._opt = tf.train.AdamOptimizer(learning_rate=self._lr)
self._encoding = tf.constant(encoding(self._sentence_size, self._embedding_size), name="encoding")
# cross entropy
logits = self._inference(self._stories_wq, self._sq_lens) # (batch_size, vocab_size)
correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(logits), 1), tf.cast(tf.argmax(self._answers, 1), tf.int64))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf.cast(self._answers, tf.float32), name="cross_entropy")
cross_entropy_sum = tf.reduce_sum(cross_entropy, name="cross_entropy_sum")
# loss op
loss_op = cross_entropy_sum
tf.summary.scalar('cross_entropy_sum', loss_op)
# gradient pipeline
grads_and_vars = self._opt.compute_gradients(loss_op)
grads_and_vars = [(tf.clip_by_norm(g, self._max_grad_norm), v) for g,v in grads_and_vars if g is not None]
grads_and_vars = [(add_gradient_noise(g), v) for g,v in grads_and_vars]
nil_grads_and_vars = []
for g, v in grads_and_vars:
if v.name in self._nil_vars:
nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
nil_grads_and_vars.append((g, v))
train_op = self._opt.apply_gradients(nil_grads_and_vars, name="train_op")
# predict ops
predict_op = tf.argmax(logits, 1, name="predict_op")
predict_proba_op = tf.nn.softmax(logits, name="predict_proba_op")
predict_log_proba_op = tf.log(predict_proba_op, name="predict_log_proba_op")
# assign ops
self.loss_op = loss_op
self.predict_op = predict_op
self.predict_proba_op = predict_proba_op
self.predict_log_proba_op = predict_log_proba_op
self.train_op = train_op
self.merged_train_summaries = tf.summary.merge_all()
self.all_correct_predictions = tf.equal(self.all_answers_pred, self.all_answers_true)
self.mean_accuracy_all = tf.reduce_mean(tf.cast(self.all_correct_predictions, tf.float32))
self.mean_acc_all_summary = tf.summary.scalar('mean_epoch_accuracy', self.mean_accuracy_all)
self.train_writer = tf.summary.FileWriter(self._log_dir + 'train', session.graph)
self.val_writer = tf.summary.FileWriter(self._log_dir + 'val')
self.test_writer = tf.summary.FileWriter(self._log_dir + 'test')
init_op = tf.global_variables_initializer()
self._sess = session
self._sess.run(init_op)
def _build_inputs(self):
self._stories_wq = tf.placeholder(tf.int32, [None, self._memory_size, self._sentence_size], name="stories_queries")
self._sq_lens = tf.placeholder(tf.int32, [None], name="sq_lens")
self._answers = tf.placeholder(tf.int32, [None, self._vocab_size], name="answers")
self._lr = tf.placeholder(tf.float32, [], name="learning_rate")
self._l_1 = tf.placeholder(tf.float32, [], name="decay_lambda_l1") # Decay lambda
self._e_1 = tf.placeholder(tf.float32, [], name="FW_learning_rate_l1") # fast weights learning rate (eta)
self._l_2 = tf.placeholder(tf.float32, [], name="decay_lambda_l2") # Decay lambda
self._e_2 = tf.placeholder(tf.float32, [], name="FW_learning_rate_l2") # fast weights learning rate (eta)
self._l_3 = tf.placeholder(tf.float32, [], name="decay_lambda_l3") # Decay lambda
self._e_3 = tf.placeholder(tf.float32, [], name="FW_learning_rate_l3") # fast weights learning rate (eta)
self._keep_prob = tf.placeholder_with_default(1.0, [], name="keep_prob")
self.etas = [self._e_1, self._e_2, self._e_3]
self.lambdas = [self._l_1, self._l_2, self._l_3]
# For the summary:
self.all_answers_pred = tf.placeholder(tf.int32, [None], name="all_answers_pred")
self.all_answers_true = tf.placeholder(tf.int32, [None], name="all_answers_true")
def _build_vars(self):
with tf.variable_scope(self._name):
nil_word_slot = tf.zeros([1, self._embedding_size])
LUT = tf.concat(axis=0, values=[nil_word_slot, self._init([self._vocab_size-1, self._embedding_size])])
self.LUT = tf.Variable(LUT, name="LUT")
self.fw_cells_list = []
self.bw_cells_list = []
with tf.variable_scope("fast_weights_rnn"):
for layernum in range(self._num_layers):
if layernum == 0:
W_ifoj_shape = [self._embedding_size + self._num_hidden_units, self._num_hidden_units * 4]
else:
W_ifoj_shape = [self._num_hidden_units * 4, self._num_hidden_units * 4]
for scope in ["fw/multi_rnn_cell/cell_{}".format(layernum), "bw/multi_rnn_cell/cell_{}".format(layernum)]:
with tf.variable_scope(scope):
# input weights (proper initialization)
self.W_ifoj = tf.get_variable(name="W_ifoj",
# shape=W_ifoj_shape,
initializer=tf.random_uniform(W_ifoj_shape,
-np.sqrt(2.0/W_ifoj_shape[0]), np.sqrt(2.0/W_ifoj_shape[0])),
dtype=tf.float32)
self.b_ifoj = tf.get_variable(name="b_ifoj",
# shape=[self._num_hidden_units],
initializer=tf.zeros([self._num_hidden_units * 4]),
dtype=tf.float32)
# # hidden weights (See Hinton's video @ 21:20)
# self.W_h = tf.get_variable(name="W_h",
# # shape=[self._num_hidden_units, self._num_hidden_units],
# initializer=tf.constant(0.05 * np.identity(self._num_hidden_units), dtype=tf.float32),
# dtype=tf.float32)
# scale and shift for new_c layernorm
self.gain_state = tf.get_variable(name='gain_state',
# shape=[self._num_hidden_units],
initializer=tf.ones([self._num_hidden_units]),
dtype=tf.float32)
self.bias_state = tf.get_variable(name='bias_state',
# shape=[self._num_hidden_units],
initializer=tf.zeros([self._num_hidden_units]),
dtype=tf.float32)
# scale and shift for ifoj layernorm
self.gain_ifoj = tf.get_variable(name='gain_ifoj',
# shape=[self._num_hidden_units],
initializer=tf.ones([self._num_hidden_units * 4]),
dtype=tf.float32)
self.bias_ifoj = tf.get_variable(name='bias_ifoj',
# shape=[self._num_hidden_units],
initializer=tf.zeros([self._num_hidden_units * 4]),
dtype=tf.float32)
self.fw_cells_list.append(FastWeightsLSTMCell(num_hidden_units=self._num_hidden_units,
batch_size=self._batch_size, loop_steps=self._S,
forget_bias=self._forget_bias, layer_norm=self._layer_norm,
decay_rate=self.lambdas[layernum], eta=self.etas[layernum],
dropout_keep_prob=self._keep_prob))
self.bw_cells_list.append(FastWeightsLSTMCell(num_hidden_units=self._num_hidden_units,
batch_size=self._batch_size, loop_steps=self._S,
forget_bias=self._forget_bias, layer_norm=self._layer_norm,
decay_rate=self.lambdas[layernum], eta=self.etas[layernum],
dropout_keep_prob=self._keep_prob))
self.fw_cells = tf.nn.rnn_cell.MultiRNNCell(self.fw_cells_list)
self.bw_cells = tf.nn.rnn_cell.MultiRNNCell(self.bw_cells_list)
with tf.variable_scope(self._name):
# Final output layer weights
if not self._tied_output:
self.W_softmax = tf.Variable(tf.random_uniform(
[2 * self._num_hidden_units, self._vocab_size],
-np.sqrt(2.0/self._vocab_size),
np.sqrt(2.0/self._vocab_size)),
dtype=tf.float32, name="W_softmax")
self.b_softmax = tf.Variable(tf.zeros(
[self._vocab_size]),
dtype=tf.float32, name="b_softmax")
self._nil_vars = set([self.LUT.name])
def _inference(self, stories_wq, sq_lens):
with tf.variable_scope(self._name):
# Use LUT for all word embeddings
sq_emb = tf.nn.embedding_lookup(self.LUT, stories_wq)
input_seq = tf.reduce_sum(sq_emb * self._encoding, axis=2)
with tf.variable_scope("fast_weights_rnn", reuse=True) as scope:
outputs, states = tf.nn.bidirectional_dynamic_rnn(self.fw_cells, self.bw_cells,
input_seq, sequence_length=sq_lens,
initial_state_fw=self.fw_cells.zero_state(batch_size=self._batch_size, dtype=tf.float32),
initial_state_bw=self.bw_cells.zero_state(batch_size=self._batch_size, dtype=tf.float32),
time_major=False, scope=scope)
fw_states, bw_states = states
fw_outputs, bw_outputs = outputs
if self._max_pooling:
pooled_fw_state = tf.layers.max_pooling1d(fw_outputs, self._memory_size, (1,))
pooled_bw_state = tf.layers.max_pooling1d(bw_outputs, self._memory_size, (1,))
final_fw_state = tf.squeeze(pooled_fw_state)
final_bw_state = tf.squeeze(pooled_bw_state)
else:
final_fw_state = fw_states[-1][0]
final_bw_state = bw_states[-1][0]
if self._tied_output:
with tf.variable_scope("fast_weights_rnn/fw/multi_rnn_cell/cell_0", reuse=True):
fw_emb = tf.matmul(final_fw_state, tf.transpose(self.W_x, [1, 0]))
with tf.variable_scope("fast_weights_rnn/bw/multi_rnn_cell/cell_0", reuse=True):
bw_emb = tf.matmul(final_bw_state, tf.transpose(self.W_x, [1, 0]))
with tf.variable_scope(self._name):
logits_fw = tf.matmul(fw_emb, tf.transpose(self.LUT, [1, 0]))
logits_bw = tf.matmul(bw_emb, tf.transpose(self.LUT, [1, 0]))
logits = tf.maximum(logits_fw, logits_bw)
else:
with tf.variable_scope(self._name):
final_output = tf.concat([final_fw_state, final_bw_state], 1)
logits = tf.matmul(final_output, self.W_softmax) + self.b_softmax
return logits
def batch_fit(self, stories_wq, sq_lens, answers, learning_rate, etas, decay_lambdas, keep_prob):
"""Runs the training algorithm over the passed batch
Args:
stories: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
answers: Tensor (None, vocab_size)
Returns:
loss: floating-point number, the loss computed for the batch
"""
feed_dict = {self._stories_wq: stories_wq, self._sq_lens: sq_lens,
self._answers: answers, self._lr: learning_rate,
self._e_1: etas[0], self._l_1: decay_lambdas[0],
self._e_2: etas[1], self._l_2: decay_lambdas[1],
self._e_3: etas[2], self._l_3: decay_lambdas[2],
self._keep_prob: keep_prob}
summary, loss, _ = self._sess.run([self.merged_train_summaries, self.loss_op, self.train_op], feed_dict=feed_dict)
return summary, loss
def predict(self, stories_wq, sq_lens, answers, etas, decay_lambdas):
"""Predicts answers as one-hot encoding.
Args:
stories: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self._stories_wq: stories_wq, self._sq_lens: sq_lens, self._answers: answers,
self._e_1: etas[0], self._l_1: decay_lambdas[0],
self._e_2: etas[1], self._l_2: decay_lambdas[1],
self._e_3: etas[2], self._l_3: decay_lambdas[2]}
summary, loss = self._sess.run([self.merged_train_summaries, self.predict_op], feed_dict=feed_dict)
return summary, loss
def predict_proba(self, stories_wq, sq_lens, answers, etas, decay_lambdas):
"""Predicts probabilities of answers.
Args:
stories: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self._stories_wq: stories_wq, self._sq_lens: sq_lens, self._answers: answers,
self._e_1: etas[0], self._l_1: decay_lambdas[0],
self._e_2: etas[1], self._l_2: decay_lambdas[1],
self._e_3: etas[2], self._l_3: decay_lambdas[2]}
return self._sess.run(self.predict_proba_op, feed_dict=feed_dict)
def predict_log_proba(self, stories_wq, sq_lens, answers, etas, decay_lambdas):
"""Predicts log probabilities of answers.
Args:
stories: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self._stories_wq: stories_wq, self._sq_lens: sq_lens, self._answers: answers,
self._e_1: etas[0], self._l_1: decay_lambdas[0],
self._e_2: etas[1], self._l_2: decay_lambdas[1],
self._e_3: etas[2], self._l_3: decay_lambdas[2]}
return self._sess.run(self.predict_log_proba_op, feed_dict=feed_dict)
def compute_mean_accuracy(self, answers_pred, answers_true):
"""Computes the mean accuracy of all batches for summary writer
Args:
answers_pred:
answers_true:
"""
feed_dict = {self.all_answers_pred: answers_pred, self.all_answers_true: answers_true}
return self._sess.run([self.mean_acc_all_summary, self.mean_accuracy_all], feed_dict=feed_dict)
| [
"tensorflow.op_scope",
"tensorflow.reduce_sum",
"tensorflow.maximum",
"numpy.ones",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.layers.max_pooling1d",
"tensorflow.nn.softmax",
"six.moves.range",
"tensorflow.placeholder_with_default",
"numpy.transpose",
"tensorflow.variable_scope",
... | [((680, 738), 'numpy.ones', 'np.ones', (['(embedding_size, sentence_size)'], {'dtype': 'np.float32'}), '((embedding_size, sentence_size), dtype=np.float32)\n', (687, 738), True, 'import numpy as np\n'), ((803, 815), 'six.moves.range', 'range', (['(1)', 'le'], {}), '(1, le)\n', (808, 815), False, 'from six.moves import range\n'), ((1116, 1138), 'numpy.transpose', 'np.transpose', (['encoding'], {}), '(encoding)\n', (1128, 1138), True, 'import numpy as np\n'), ((834, 846), 'six.moves.range', 'range', (['(1)', 'ls'], {}), '(1, ls)\n', (839, 846), False, 'from six.moves import range\n'), ((1372, 1411), 'tensorflow.op_scope', 'tf.op_scope', (['[t]', 'name', '"""zero_nil_slot"""'], {}), "([t], name, 'zero_nil_slot')\n", (1383, 1411), True, 'import tensorflow as tf\n'), ((1433, 1466), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['t'], {'name': '"""t"""'}), "(t, name='t')\n", (1453, 1466), True, 'import tensorflow as tf\n'), ((1938, 1990), 'tensorflow.op_scope', 'tf.op_scope', (['[t, stddev]', 'name', '"""add_gradient_noise"""'], {}), "([t, stddev], name, 'add_gradient_noise')\n", (1949, 1990), True, 'import tensorflow as tf\n'), ((2012, 2045), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['t'], {'name': '"""t"""'}), "(t, name='t')\n", (2032, 2045), True, 'import tensorflow as tf\n'), ((2119, 2143), 'tensorflow.add', 'tf.add', (['t', 'gn'], {'name': 'name'}), '(t, gn, name=name)\n', (2125, 2143), True, 'import tensorflow as tf\n'), ((2543, 2583), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (2571, 2583), True, 'import tensorflow as tf\n'), ((2667, 2679), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2677, 2679), True, 'import tensorflow as tf\n'), ((4923, 4969), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self._lr'}), '(learning_rate=self._lr)\n', (4945, 4969), True, 'import tensorflow as tf\n'), ((5403, 5442), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (5420, 5442), True, 'import tensorflow as tf\n'), ((5616, 5670), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cross_entropy'], {'name': '"""cross_entropy_sum"""'}), "(cross_entropy, name='cross_entropy_sum')\n", (5629, 5670), True, 'import tensorflow as tf\n'), ((5734, 5781), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cross_entropy_sum"""', 'loss_op'], {}), "('cross_entropy_sum', loss_op)\n", (5751, 5781), True, 'import tensorflow as tf\n'), ((6436, 6475), 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {'name': '"""predict_op"""'}), "(logits, 1, name='predict_op')\n", (6445, 6475), True, 'import tensorflow as tf\n'), ((6503, 6549), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'name': '"""predict_proba_op"""'}), "(logits, name='predict_proba_op')\n", (6516, 6549), True, 'import tensorflow as tf\n'), ((6581, 6634), 'tensorflow.log', 'tf.log', (['predict_proba_op'], {'name': '"""predict_log_proba_op"""'}), "(predict_proba_op, name='predict_log_proba_op')\n", (6587, 6634), True, 'import tensorflow as tf\n'), ((6903, 6925), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (6923, 6925), True, 'import tensorflow as tf\n'), ((6966, 7020), 'tensorflow.equal', 'tf.equal', (['self.all_answers_pred', 'self.all_answers_true'], {}), '(self.all_answers_pred, self.all_answers_true)\n', (6974, 7020), True, 'import tensorflow as tf\n'), ((7156, 7220), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean_epoch_accuracy"""', 'self.mean_accuracy_all'], {}), "('mean_epoch_accuracy', self.mean_accuracy_all)\n", (7173, 7220), True, 'import tensorflow as tf\n'), ((7250, 7311), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(self._log_dir + 'train')", 'session.graph'], {}), "(self._log_dir + 'train', session.graph)\n", (7271, 7311), True, 'import tensorflow as tf\n'), ((7338, 7382), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(self._log_dir + 'val')"], {}), "(self._log_dir + 'val')\n", (7359, 7382), True, 'import tensorflow as tf\n'), ((7410, 7455), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(self._log_dir + 'test')"], {}), "(self._log_dir + 'test')\n", (7431, 7455), True, 'import tensorflow as tf\n'), ((7475, 7508), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7506, 7508), True, 'import tensorflow as tf\n'), ((7627, 7727), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self._memory_size, self._sentence_size]'], {'name': '"""stories_queries"""'}), "(tf.int32, [None, self._memory_size, self._sentence_size],\n name='stories_queries')\n", (7641, 7727), True, 'import tensorflow as tf\n'), ((7748, 7796), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""sq_lens"""'}), "(tf.int32, [None], name='sq_lens')\n", (7762, 7796), True, 'import tensorflow as tf\n'), ((7821, 7887), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self._vocab_size]'], {'name': '"""answers"""'}), "(tf.int32, [None, self._vocab_size], name='answers')\n", (7835, 7887), True, 'import tensorflow as tf\n'), ((7907, 7959), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {'name': '"""learning_rate"""'}), "(tf.float32, [], name='learning_rate')\n", (7921, 7959), True, 'import tensorflow as tf\n'), ((7980, 8034), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {'name': '"""decay_lambda_l1"""'}), "(tf.float32, [], name='decay_lambda_l1')\n", (7994, 8034), True, 'import tensorflow as tf\n'), ((8070, 8128), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {'name': '"""FW_learning_rate_l1"""'}), "(tf.float32, [], name='FW_learning_rate_l1')\n", (8084, 8128), True, 'import tensorflow as tf\n'), ((8184, 8238), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {'name': '"""decay_lambda_l2"""'}), "(tf.float32, [], name='decay_lambda_l2')\n", (8198, 8238), True, 'import tensorflow as tf\n'), ((8274, 8332), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {'name': '"""FW_learning_rate_l2"""'}), "(tf.float32, [], name='FW_learning_rate_l2')\n", (8288, 8332), True, 'import tensorflow as tf\n'), ((8388, 8442), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {'name': '"""decay_lambda_l3"""'}), "(tf.float32, [], name='decay_lambda_l3')\n", (8402, 8442), True, 'import tensorflow as tf\n'), ((8478, 8536), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {'name': '"""FW_learning_rate_l3"""'}), "(tf.float32, [], name='FW_learning_rate_l3')\n", (8492, 8536), True, 'import tensorflow as tf\n'), ((8598, 8652), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(1.0)', '[]'], {'name': '"""keep_prob"""'}), "(1.0, [], name='keep_prob')\n", (8625, 8652), True, 'import tensorflow as tf\n'), ((8834, 8891), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""all_answers_pred"""'}), "(tf.int32, [None], name='all_answers_pred')\n", (8848, 8891), True, 'import tensorflow as tf\n'), ((8924, 8981), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""all_answers_true"""'}), "(tf.int32, [None], name='all_answers_true')\n", (8938, 8981), True, 'import tensorflow as tf\n'), ((1479, 1490), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (1487, 1490), True, 'import tensorflow as tf\n'), ((1515, 1531), 'tensorflow.stack', 'tf.stack', (['[1, s]'], {}), '([1, s])\n', (1523, 1531), True, 'import tensorflow as tf\n'), ((2076, 2087), 'tensorflow.shape', 'tf.shape', (['t'], {}), '(t)\n', (2084, 2087), True, 'import tensorflow as tf\n'), ((5354, 5393), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (5361, 5393), True, 'import tensorflow as tf\n'), ((7069, 7118), 'tensorflow.cast', 'tf.cast', (['self.all_correct_predictions', 'tf.float32'], {}), '(self.all_correct_predictions, tf.float32)\n', (7076, 7118), True, 'import tensorflow as tf\n'), ((9023, 9052), 'tensorflow.variable_scope', 'tf.variable_scope', (['self._name'], {}), '(self._name)\n', (9040, 9052), True, 'import tensorflow as tf\n'), ((9082, 9117), 'tensorflow.zeros', 'tf.zeros', (['[1, self._embedding_size]'], {}), '([1, self._embedding_size])\n', (9090, 9117), True, 'import tensorflow as tf\n'), ((9257, 9285), 'tensorflow.Variable', 'tf.Variable', (['LUT'], {'name': '"""LUT"""'}), "(LUT, name='LUT')\n", (9268, 9285), True, 'import tensorflow as tf\n'), ((9365, 9402), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fast_weights_rnn"""'], {}), "('fast_weights_rnn')\n", (9382, 9402), True, 'import tensorflow as tf\n'), ((9432, 9455), 'six.moves.range', 'range', (['self._num_layers'], {}), '(self._num_layers)\n', (9437, 9455), False, 'from six.moves import range\n'), ((13359, 13406), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['self.fw_cells_list'], {}), '(self.fw_cells_list)\n', (13386, 13406), True, 'import tensorflow as tf\n'), ((13435, 13482), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['self.bw_cells_list'], {}), '(self.bw_cells_list)\n', (13462, 13482), True, 'import tensorflow as tf\n'), ((13498, 13527), 'tensorflow.variable_scope', 'tf.variable_scope', (['self._name'], {}), '(self._name)\n', (13515, 13527), True, 'import tensorflow as tf\n'), ((14161, 14190), 'tensorflow.variable_scope', 'tf.variable_scope', (['self._name'], {}), '(self._name)\n', (14178, 14190), True, 'import tensorflow as tf\n'), ((14259, 14303), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.LUT', 'stories_wq'], {}), '(self.LUT, stories_wq)\n', (14281, 14303), True, 'import tensorflow as tf\n'), ((14328, 14374), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(sq_emb * self._encoding)'], {'axis': '(2)'}), '(sq_emb * self._encoding, axis=2)\n', (14341, 14374), True, 'import tensorflow as tf\n'), ((14389, 14438), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fast_weights_rnn"""'], {'reuse': '(True)'}), "('fast_weights_rnn', reuse=True)\n", (14406, 14438), True, 'import tensorflow as tf\n'), ((5245, 5266), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (5258, 5266), True, 'import tensorflow as tf\n'), ((5280, 5307), 'tensorflow.argmax', 'tf.argmax', (['self._answers', '(1)'], {}), '(self._answers, 1)\n', (5289, 5307), True, 'import tensorflow as tf\n'), ((5530, 5564), 'tensorflow.cast', 'tf.cast', (['self._answers', 'tf.float32'], {}), '(self._answers, tf.float32)\n', (5537, 5564), True, 'import tensorflow as tf\n'), ((5900, 5939), 'tensorflow.clip_by_norm', 'tf.clip_by_norm', (['g', 'self._max_grad_norm'], {}), '(g, self._max_grad_norm)\n', (5915, 5939), True, 'import tensorflow as tf\n'), ((15092, 15152), 'tensorflow.layers.max_pooling1d', 'tf.layers.max_pooling1d', (['fw_outputs', 'self._memory_size', '(1,)'], {}), '(fw_outputs, self._memory_size, (1,))\n', (15115, 15152), True, 'import tensorflow as tf\n'), ((15187, 15247), 'tensorflow.layers.max_pooling1d', 'tf.layers.max_pooling1d', (['bw_outputs', 'self._memory_size', '(1,)'], {}), '(bw_outputs, self._memory_size, (1,))\n', (15210, 15247), True, 'import tensorflow as tf\n'), ((15282, 15309), 'tensorflow.squeeze', 'tf.squeeze', (['pooled_fw_state'], {}), '(pooled_fw_state)\n', (15292, 15309), True, 'import tensorflow as tf\n'), ((15343, 15370), 'tensorflow.squeeze', 'tf.squeeze', (['pooled_bw_state'], {}), '(pooled_bw_state)\n', (15353, 15370), True, 'import tensorflow as tf\n'), ((15538, 15612), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fast_weights_rnn/fw/multi_rnn_cell/cell_0"""'], {'reuse': '(True)'}), "('fast_weights_rnn/fw/multi_rnn_cell/cell_0', reuse=True)\n", (15555, 15612), True, 'import tensorflow as tf\n'), ((15715, 15789), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fast_weights_rnn/bw/multi_rnn_cell/cell_0"""'], {'reuse': '(True)'}), "('fast_weights_rnn/bw/multi_rnn_cell/cell_0', reuse=True)\n", (15732, 15789), True, 'import tensorflow as tf\n'), ((15904, 15933), 'tensorflow.variable_scope', 'tf.variable_scope', (['self._name'], {}), '(self._name)\n', (15921, 15933), True, 'import tensorflow as tf\n'), ((16117, 16149), 'tensorflow.maximum', 'tf.maximum', (['logits_fw', 'logits_bw'], {}), '(logits_fw, logits_bw)\n', (16127, 16149), True, 'import tensorflow as tf\n'), ((16182, 16211), 'tensorflow.variable_scope', 'tf.variable_scope', (['self._name'], {}), '(self._name)\n', (16199, 16211), True, 'import tensorflow as tf\n'), ((16244, 16290), 'tensorflow.concat', 'tf.concat', (['[final_fw_state, final_bw_state]', '(1)'], {}), '([final_fw_state, final_bw_state], 1)\n', (16253, 16290), True, 'import tensorflow as tf\n'), ((1577, 1606), 'tensorflow.slice', 'tf.slice', (['t', '[1, 0]', '[-1, -1]'], {}), '(t, [1, 0], [-1, -1])\n', (1585, 1606), True, 'import tensorflow as tf\n'), ((13946, 13974), 'tensorflow.zeros', 'tf.zeros', (['[self._vocab_size]'], {}), '([self._vocab_size])\n', (13954, 13974), True, 'import tensorflow as tf\n'), ((15665, 15695), 'tensorflow.transpose', 'tf.transpose', (['self.W_x', '[1, 0]'], {}), '(self.W_x, [1, 0])\n', (15677, 15695), True, 'import tensorflow as tf\n'), ((15842, 15872), 'tensorflow.transpose', 'tf.transpose', (['self.W_x', '[1, 0]'], {}), '(self.W_x, [1, 0])\n', (15854, 15872), True, 'import tensorflow as tf\n'), ((15981, 16011), 'tensorflow.transpose', 'tf.transpose', (['self.LUT', '[1, 0]'], {}), '(self.LUT, [1, 0])\n', (15993, 16011), True, 'import tensorflow as tf\n'), ((16059, 16089), 'tensorflow.transpose', 'tf.transpose', (['self.LUT', '[1, 0]'], {}), '(self.LUT, [1, 0])\n', (16071, 16089), True, 'import tensorflow as tf\n'), ((16316, 16355), 'tensorflow.matmul', 'tf.matmul', (['final_output', 'self.W_softmax'], {}), '(final_output, self.W_softmax)\n', (16325, 16355), True, 'import tensorflow as tf\n'), ((9865, 9889), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (9882, 9889), True, 'import tensorflow as tf\n'), ((13812, 13843), 'numpy.sqrt', 'np.sqrt', (['(2.0 / self._vocab_size)'], {}), '(2.0 / self._vocab_size)\n', (13819, 13843), True, 'import numpy as np\n'), ((13761, 13792), 'numpy.sqrt', 'np.sqrt', (['(2.0 / self._vocab_size)'], {}), '(2.0 / self._vocab_size)\n', (13768, 13792), True, 'import numpy as np\n'), ((10545, 10583), 'tensorflow.zeros', 'tf.zeros', (['[self._num_hidden_units * 4]'], {}), '([self._num_hidden_units * 4])\n', (10553, 10583), True, 'import tensorflow as tf\n'), ((11272, 11305), 'tensorflow.ones', 'tf.ones', (['[self._num_hidden_units]'], {}), '([self._num_hidden_units])\n', (11279, 11305), True, 'import tensorflow as tf\n'), ((11533, 11567), 'tensorflow.zeros', 'tf.zeros', (['[self._num_hidden_units]'], {}), '([self._num_hidden_units])\n', (11541, 11567), True, 'import tensorflow as tf\n'), ((11854, 11891), 'tensorflow.ones', 'tf.ones', (['[self._num_hidden_units * 4]'], {}), '([self._num_hidden_units * 4])\n', (11861, 11891), True, 'import tensorflow as tf\n'), ((12117, 12155), 'tensorflow.zeros', 'tf.zeros', (['[self._num_hidden_units * 4]'], {}), '([self._num_hidden_units * 4])\n', (12125, 12155), True, 'import tensorflow as tf\n'), ((10268, 10298), 'numpy.sqrt', 'np.sqrt', (['(2.0 / W_ifoj_shape[0])'], {}), '(2.0 / W_ifoj_shape[0])\n', (10275, 10298), True, 'import numpy as np\n'), ((10238, 10268), 'numpy.sqrt', 'np.sqrt', (['(2.0 / W_ifoj_shape[0])'], {}), '(2.0 / W_ifoj_shape[0])\n', (10245, 10268), True, 'import numpy as np\n')] |
import torch
import numpy as np
import pandas as pd
import math
import analysis.distances as distances
def sim_check(
model, in_set, batch_set,
in_dis_measure='hamming',
spk_hidden_measure='euclidean',
msg_dis_measure='edit',
lis_hidden_measure='euclidean',
corr_measure='pearson',
label_mode=False
):
in_np_set = []
for in_str in in_set:
if label_mode:
in_np_set.append(distances.label2np_array(in_str))
else:
in_np_set.append(distances.instr2np_array(in_str))
spk_hset = reproduce_spk_hidden_set(model, batch_set)
msg_set = reproduce_msg_set(model, batch_set)
lis_hset = reproduce_lis_hidden_set(model, batch_set)
assert len(in_set) == len(batch_set)
mean_distances = []
spk_h_distances = []
msg_distances = []
lis_h_distances = []
for i in range(len(in_set)-1):
for j in range(i+1, len(in_set)):
mean_distances.append(get_in_dis(in_np_set[i], in_np_set[j], measure=in_dis_measure))
spk_h_distances.append(get_hidden_dis(spk_hset[i], spk_hset[j], measure=spk_hidden_measure))
msg_distances.append(get_msg_dis(msg_set[i], msg_set[j], measure=msg_dis_measure))
lis_h_distances.append(get_hidden_dis(lis_hset[i], lis_hset[j], measure=lis_hidden_measure))
mean_distances = np.asarray(mean_distances)
spk_h_distances = np.asarray(spk_h_distances, dtype=float)
msg_distances = np.asarray(msg_distances, dtype=float)
lis_h_distances = np.asarray(lis_h_distances, dtype=float)
spk_h_distances = check_distance_set(spk_h_distances)
msg_distances = check_distance_set(msg_distances)
lis_h_distances = check_distance_set(lis_h_distances)
dis_table = pd.DataFrame(
{
'MD': mean_distances,
'SHD': spk_h_distances,
'MSD': msg_distances,
'LHD': lis_h_distances
}
)
mean_spkh_corr = dis_table.corr(corr_measure)['MD']['SHD']
mean_msg_corr = dis_table.corr(corr_measure)['MD']['MSD']
mean_lish_corr = dis_table.corr(corr_measure)['MD']['LHD']
spkh_lish_corr = dis_table.corr(corr_measure)['SHD']['LHD']
return mean_spkh_corr, mean_msg_corr, mean_lish_corr, spkh_lish_corr
def check_distance_set(distances):
if distances.sum() == 0:
distances += 0.1
distances[-1] -= 0.01
return distances
def reproduce_spk_hidden_set(model, batch_set):
hidden_set = []
for batch in batch_set:
hidden = model.reproduce_speaker_hidden(batch)
hidden_set.append(hidden.squeeze().detach().cpu().numpy())
return hidden_set
def reproduce_msg_set(model, batch_set):
msg_set = []
for batch in batch_set:
message = model.reproduce_message(batch)
message = message.squeeze().detach().cpu().numpy()
msg_array = []
for r_idx in range(message.shape[0]):
cur_v = np.argmax(message[r_idx])
msg_array.append(cur_v)
msg_set.append(np.asarray(msg_array))
return msg_set
def reproduce_lis_hidden_set(model, batch_set):
hidden_set = []
for batch in batch_set:
hidden = model.reproduce_listener_hidden(batch)
hidden_set.append(hidden.squeeze().detach().cpu().numpy())
return hidden_set
def get_hidden_dis(h1, h2, measure='euclidean'):
if measure == 'euclidean':
return distances.euclid_dis(h1, h2)
else:
raise NotImplementedError
def get_in_dis(in1, in2, measure='hamming'):
if measure == 'hamming':
return distances.in_ham_dis(in1, in2)
elif measure == 'edit':
return distances.in_edit_dis(in1, in2)
elif measure == 'euclidean':
return distances.euclid_dis(in1, in2)
else:
raise NotImplementedError
def get_msg_dis(m1, m2, measure='edit'):
if measure == 'edit':
return distances.editdistance.eval(m1, m2)
elif measure == 'hamming':
return distances.msg_ham_dis(m1, m2)
elif measure == 'euclidean':
return distances.euclid_dis(m1, m2)
else:
raise NotImplementedError
| [
"pandas.DataFrame",
"analysis.distances.in_edit_dis",
"numpy.argmax",
"analysis.distances.msg_ham_dis",
"numpy.asarray",
"analysis.distances.in_ham_dis",
"analysis.distances.sum",
"analysis.distances.label2np_array",
"analysis.distances.instr2np_array",
"analysis.distances.editdistance.eval",
"a... | [((1356, 1382), 'numpy.asarray', 'np.asarray', (['mean_distances'], {}), '(mean_distances)\n', (1366, 1382), True, 'import numpy as np\n'), ((1405, 1445), 'numpy.asarray', 'np.asarray', (['spk_h_distances'], {'dtype': 'float'}), '(spk_h_distances, dtype=float)\n', (1415, 1445), True, 'import numpy as np\n'), ((1466, 1504), 'numpy.asarray', 'np.asarray', (['msg_distances'], {'dtype': 'float'}), '(msg_distances, dtype=float)\n', (1476, 1504), True, 'import numpy as np\n'), ((1527, 1567), 'numpy.asarray', 'np.asarray', (['lis_h_distances'], {'dtype': 'float'}), '(lis_h_distances, dtype=float)\n', (1537, 1567), True, 'import numpy as np\n'), ((1756, 1866), 'pandas.DataFrame', 'pd.DataFrame', (["{'MD': mean_distances, 'SHD': spk_h_distances, 'MSD': msg_distances, 'LHD':\n lis_h_distances}"], {}), "({'MD': mean_distances, 'SHD': spk_h_distances, 'MSD':\n msg_distances, 'LHD': lis_h_distances})\n", (1768, 1866), True, 'import pandas as pd\n'), ((2339, 2354), 'analysis.distances.sum', 'distances.sum', ([], {}), '()\n', (2352, 2354), True, 'import analysis.distances as distances\n'), ((3434, 3462), 'analysis.distances.euclid_dis', 'distances.euclid_dis', (['h1', 'h2'], {}), '(h1, h2)\n', (3454, 3462), True, 'import analysis.distances as distances\n'), ((3598, 3628), 'analysis.distances.in_ham_dis', 'distances.in_ham_dis', (['in1', 'in2'], {}), '(in1, in2)\n', (3618, 3628), True, 'import analysis.distances as distances\n'), ((3911, 3946), 'analysis.distances.editdistance.eval', 'distances.editdistance.eval', (['m1', 'm2'], {}), '(m1, m2)\n', (3938, 3946), True, 'import analysis.distances as distances\n'), ((2966, 2991), 'numpy.argmax', 'np.argmax', (['message[r_idx]'], {}), '(message[r_idx])\n', (2975, 2991), True, 'import numpy as np\n'), ((3051, 3072), 'numpy.asarray', 'np.asarray', (['msg_array'], {}), '(msg_array)\n', (3061, 3072), True, 'import numpy as np\n'), ((3672, 3703), 'analysis.distances.in_edit_dis', 'distances.in_edit_dis', (['in1', 'in2'], {}), '(in1, in2)\n', (3693, 3703), True, 'import analysis.distances as distances\n'), ((3993, 4022), 'analysis.distances.msg_ham_dis', 'distances.msg_ham_dis', (['m1', 'm2'], {}), '(m1, m2)\n', (4014, 4022), True, 'import analysis.distances as distances\n'), ((428, 460), 'analysis.distances.label2np_array', 'distances.label2np_array', (['in_str'], {}), '(in_str)\n', (452, 460), True, 'import analysis.distances as distances\n'), ((505, 537), 'analysis.distances.instr2np_array', 'distances.instr2np_array', (['in_str'], {}), '(in_str)\n', (529, 537), True, 'import analysis.distances as distances\n'), ((3752, 3782), 'analysis.distances.euclid_dis', 'distances.euclid_dis', (['in1', 'in2'], {}), '(in1, in2)\n', (3772, 3782), True, 'import analysis.distances as distances\n'), ((4071, 4099), 'analysis.distances.euclid_dis', 'distances.euclid_dis', (['m1', 'm2'], {}), '(m1, m2)\n', (4091, 4099), True, 'import analysis.distances as distances\n')] |
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Tests for code in C{modcost}.
"""
from numpy import array
from pypower.totcost import totcost
from pypower.modcost import modcost
from pypower.t.t_begin import t_begin
from pypower.t.t_is import t_is
from pypower.t.t_end import t_end
def t_modcost(quiet=False):
"""Tests for code in C{modcost}.
@author: <NAME> (PSERC Cornell)
"""
n_tests = 80
t_begin(n_tests, quiet)
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
gencost0 = array([
[2, 0, 0, 3, 0.01, 0.1, 1, 0, 0, 0, 0, 0],
[2, 0, 0, 5, 0.0006, 0.005, 0.04, 0.3, 2, 0, 0, 0],
[1, 0, 0, 4, 0, 0, 10, 200, 20, 600, 30, 1200],
[1, 0, 0, 4, -30, -2400, -20, -1800, -10, -1000, 0, 0]
])
gencost = modcost(gencost0, 5, 'SCALE_F')
##----- POLYSHIFT -----
t = 'modcost SCALE_F - quadratic'
t_is(totcost(gencost, array([0, 0, 0, 0])) / 5, [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([1, 0, 0, 0])) / 5, [1.11, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([2, 0, 0, 0])) / 5, [1.24, 2, 0, 0], 8, t)
t = 'modcost SCALE_F - 4th order polynomial'
t_is(totcost(gencost, array([0, 0, 0, 0])) / 5, [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 1, 0, 0])) / 5, [1, 2.3456, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 2, 0, 0])) / 5, [1, 2.8096, 0, 0], 8, t)
t = 'modcost SCALE_F - pwl (gen)'
t_is(totcost(gencost, array([0, 0, 5, 0 ])) / 5, [1, 2, 100, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 10, 0])) / 5, [1, 2, 200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 15, 0])) / 5, [1, 2, 400, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 20, 0])) / 5, [1, 2, 600, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 25, 0])) / 5, [1, 2, 900, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 30, 0])) / 5, [1, 2, 1200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 35, 0])) / 5, [1, 2, 1500, 0], 8, t)
t = 'modcost SCALE_F - pwl (load)'
t_is(totcost(gencost, array([0, 0, 0, -5 ])) / 5, [1, 2, 0, -500], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -10])) / 5, [1, 2, 0, -1000], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -15])) / 5, [1, 2, 0, -1400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -20])) / 5, [1, 2, 0, -1800], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -25])) / 5, [1, 2, 0, -2100], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -30])) / 5, [1, 2, 0, -2400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -35])) / 5, [1, 2, 0, -2700], 8, t)
gencost = modcost(gencost0, 2, 'SCALE_X')
t = 'modcost SCALE_X - quadratic'
t_is(totcost(gencost, array([0, 0, 0, 0]) * 2), [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([1, 0, 0, 0]) * 2), [1.11, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([2, 0, 0, 0]) * 2), [1.24, 2, 0, 0], 8, t)
t = 'modcost SCALE_X - 4th order polynomial'
t_is(totcost(gencost, array([0, 0, 0, 0]) * 2), [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 1, 0, 0]) * 2), [1, 2.3456, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 2, 0, 0]) * 2), [1, 2.8096, 0, 0], 8, t)
t = 'modcost SCALE_X - pwl (gen)'
t_is(totcost(gencost, array([0, 0, 5, 0 ]) * 2), [1, 2, 100, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 10, 0]) * 2), [1, 2, 200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 15, 0]) * 2), [1, 2, 400, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 20, 0]) * 2), [1, 2, 600, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 25, 0]) * 2), [1, 2, 900, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 30, 0]) * 2), [1, 2, 1200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 35, 0]) * 2), [1, 2, 1500, 0], 8, t)
t = 'modcost SCALE_X - pwl (load)'
t_is(totcost(gencost, array([0, 0, 0, -5 ]) * 2), [1, 2, 0, -500], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -10]) * 2), [1, 2, 0, -1000], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -15]) * 2), [1, 2, 0, -1400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -20]) * 2), [1, 2, 0, -1800], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -25]) * 2), [1, 2, 0, -2100], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -30]) * 2), [1, 2, 0, -2400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -35]) * 2), [1, 2, 0, -2700], 8, t)
gencost = modcost(gencost0, 3, 'SHIFT_F')
t = 'modcost SHIFT_F - quadratic'
t_is(totcost(gencost, array([0, 0, 0, 0])) - 3, [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([1, 0, 0, 0])) - 3, [1.11, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([2, 0, 0, 0])) - 3, [1.24, 2, 0, 0], 8, t)
t = 'modcost SHIFT_F - 4th order polynomial'
t_is(totcost(gencost, array([0, 0, 0, 0])) - 3, [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 1, 0, 0])) - 3, [1, 2.3456, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 2, 0, 0])) - 3, [1, 2.8096, 0, 0], 8, t)
t = 'modcost SHIFT_F - pwl (gen)'
t_is(totcost(gencost, array([0, 0, 5, 0 ])) - 3, [1, 2, 100, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 10, 0])) - 3, [1, 2, 200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 15, 0])) - 3, [1, 2, 400, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 20, 0])) - 3, [1, 2, 600, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 25, 0])) - 3, [1, 2, 900, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 30, 0])) - 3, [1, 2, 1200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 35, 0])) - 3, [1, 2, 1500, 0], 8, t)
t = 'modcost SHIFT_F - pwl (load)'
t_is(totcost(gencost, array([0, 0, 0, -5 ])) - 3, [1, 2, 0, -500], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -10])) - 3, [1, 2, 0, -1000], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -15])) - 3, [1, 2, 0, -1400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -20])) - 3, [1, 2, 0, -1800], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -25])) - 3, [1, 2, 0, -2100], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -30])) - 3, [1, 2, 0, -2400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -35])) - 3, [1, 2, 0, -2700], 8, t)
gencost = modcost(gencost0, -4, 'SHIFT_X')
t = 'modcost SHIFT_X - quadratic'
t_is(totcost(gencost, array([0, 0, 0, 0]) - 4), [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([1, 0, 0, 0]) - 4), [1.11, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([2, 0, 0, 0]) - 4), [1.24, 2, 0, 0], 8, t)
t = 'modcost SHIFT_X - 4th order polynomial'
t_is(totcost(gencost, array([0, 0, 0, 0]) - 4), [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 1, 0, 0]) - 4), [1, 2.3456, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 2, 0, 0]) - 4), [1, 2.8096, 0, 0], 8, t)
t = 'modcost SHIFT_X - pwl (gen)'
t_is(totcost(gencost, array([0, 0, 5, 0 ]) - 4), [1, 2, 100, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 10, 0]) - 4), [1, 2, 200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 15, 0]) - 4), [1, 2, 400, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 20, 0]) - 4), [1, 2, 600, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 25, 0]) - 4), [1, 2, 900, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 30, 0]) - 4), [1, 2, 1200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 35, 0]) - 4), [1, 2, 1500, 0], 8, t)
t = 'modcost SHIFT_X - pwl (load)'
t_is(totcost(gencost, array([0, 0, 0, -5 ]) - 4), [1, 2, 0, -500], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -10]) - 4), [1, 2, 0, -1000], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -15]) - 4), [1, 2, 0, -1400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -20]) - 4), [1, 2, 0, -1800], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -25]) - 4), [1, 2, 0, -2100], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -30]) - 4), [1, 2, 0, -2400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -35]) - 4), [1, 2, 0, -2700], 8, t)
t_end()
if __name__ == '__main__':
t_modcost(quiet=False)
| [
"pypower.t.t_end.t_end",
"pypower.t.t_begin.t_begin",
"numpy.array",
"pypower.modcost.modcost"
] | [((531, 554), 'pypower.t.t_begin.t_begin', 't_begin', (['n_tests', 'quiet'], {}), '(n_tests, quiet)\n', (538, 554), False, 'from pypower.t.t_begin import t_begin\n'), ((730, 945), 'numpy.array', 'array', (['[[2, 0, 0, 3, 0.01, 0.1, 1, 0, 0, 0, 0, 0], [2, 0, 0, 5, 0.0006, 0.005, \n 0.04, 0.3, 2, 0, 0, 0], [1, 0, 0, 4, 0, 0, 10, 200, 20, 600, 30, 1200],\n [1, 0, 0, 4, -30, -2400, -20, -1800, -10, -1000, 0, 0]]'], {}), '([[2, 0, 0, 3, 0.01, 0.1, 1, 0, 0, 0, 0, 0], [2, 0, 0, 5, 0.0006, \n 0.005, 0.04, 0.3, 2, 0, 0, 0], [1, 0, 0, 4, 0, 0, 10, 200, 20, 600, 30,\n 1200], [1, 0, 0, 4, -30, -2400, -20, -1800, -10, -1000, 0, 0]])\n', (735, 945), False, 'from numpy import array\n'), ((1039, 1070), 'pypower.modcost.modcost', 'modcost', (['gencost0', '(5)', '"""SCALE_F"""'], {}), "(gencost0, 5, 'SCALE_F')\n", (1046, 1070), False, 'from pypower.modcost import modcost\n'), ((2810, 2841), 'pypower.modcost.modcost', 'modcost', (['gencost0', '(2)', '"""SCALE_X"""'], {}), "(gencost0, 2, 'SCALE_X')\n", (2817, 2841), False, 'from pypower.modcost import modcost\n'), ((4551, 4582), 'pypower.modcost.modcost', 'modcost', (['gencost0', '(3)', '"""SHIFT_F"""'], {}), "(gencost0, 3, 'SHIFT_F')\n", (4558, 4582), False, 'from pypower.modcost import modcost\n'), ((6295, 6327), 'pypower.modcost.modcost', 'modcost', (['gencost0', '(-4)', '"""SHIFT_X"""'], {}), "(gencost0, -4, 'SHIFT_X')\n", (6302, 6327), False, 'from pypower.modcost import modcost\n'), ((8029, 8036), 'pypower.t.t_end.t_end', 't_end', ([], {}), '()\n', (8034, 8036), False, 'from pypower.t.t_end import t_end\n'), ((1166, 1185), 'numpy.array', 'array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (1171, 1185), False, 'from numpy import array\n'), ((1238, 1257), 'numpy.array', 'array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (1243, 1257), False, 'from numpy import array\n'), ((1313, 1332), 'numpy.array', 'array', (['[2, 0, 0, 0]'], {}), '([2, 0, 0, 0])\n', (1318, 1332), False, 'from numpy import array\n'), ((1438, 1457), 'numpy.array', 'array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (1443, 1457), False, 'from numpy import array\n'), ((1515, 1534), 'numpy.array', 'array', (['[0, 1, 0, 0]'], {}), '([0, 1, 0, 0])\n', (1520, 1534), False, 'from numpy import array\n'), ((1592, 1611), 'numpy.array', 'array', (['[0, 2, 0, 0]'], {}), '([0, 2, 0, 0])\n', (1597, 1611), False, 'from numpy import array\n'), ((1708, 1727), 'numpy.array', 'array', (['[0, 0, 5, 0]'], {}), '([0, 0, 5, 0])\n', (1713, 1727), False, 'from numpy import array\n'), ((1783, 1803), 'numpy.array', 'array', (['[0, 0, 10, 0]'], {}), '([0, 0, 10, 0])\n', (1788, 1803), False, 'from numpy import array\n'), ((1858, 1878), 'numpy.array', 'array', (['[0, 0, 15, 0]'], {}), '([0, 0, 15, 0])\n', (1863, 1878), False, 'from numpy import array\n'), ((1933, 1953), 'numpy.array', 'array', (['[0, 0, 20, 0]'], {}), '([0, 0, 20, 0])\n', (1938, 1953), False, 'from numpy import array\n'), ((2008, 2028), 'numpy.array', 'array', (['[0, 0, 25, 0]'], {}), '([0, 0, 25, 0])\n', (2013, 2028), False, 'from numpy import array\n'), ((2083, 2103), 'numpy.array', 'array', (['[0, 0, 30, 0]'], {}), '([0, 0, 30, 0])\n', (2088, 2103), False, 'from numpy import array\n'), ((2159, 2179), 'numpy.array', 'array', (['[0, 0, 35, 0]'], {}), '([0, 0, 35, 0])\n', (2164, 2179), False, 'from numpy import array\n'), ((2275, 2295), 'numpy.array', 'array', (['[0, 0, 0, -5]'], {}), '([0, 0, 0, -5])\n', (2280, 2295), False, 'from numpy import array\n'), ((2352, 2373), 'numpy.array', 'array', (['[0, 0, 0, -10]'], {}), '([0, 0, 0, -10])\n', (2357, 2373), False, 'from numpy import array\n'), ((2430, 2451), 'numpy.array', 'array', (['[0, 0, 0, -15]'], {}), '([0, 0, 0, -15])\n', (2435, 2451), False, 'from numpy import array\n'), ((2508, 2529), 'numpy.array', 'array', (['[0, 0, 0, -20]'], {}), '([0, 0, 0, -20])\n', (2513, 2529), False, 'from numpy import array\n'), ((2586, 2607), 'numpy.array', 'array', (['[0, 0, 0, -25]'], {}), '([0, 0, 0, -25])\n', (2591, 2607), False, 'from numpy import array\n'), ((2664, 2685), 'numpy.array', 'array', (['[0, 0, 0, -30]'], {}), '([0, 0, 0, -30])\n', (2669, 2685), False, 'from numpy import array\n'), ((2742, 2763), 'numpy.array', 'array', (['[0, 0, 0, -35]'], {}), '([0, 0, 0, -35])\n', (2747, 2763), False, 'from numpy import array\n'), ((2907, 2926), 'numpy.array', 'array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (2912, 2926), False, 'from numpy import array\n'), ((2979, 2998), 'numpy.array', 'array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (2984, 2998), False, 'from numpy import array\n'), ((3054, 3073), 'numpy.array', 'array', (['[2, 0, 0, 0]'], {}), '([2, 0, 0, 0])\n', (3059, 3073), False, 'from numpy import array\n'), ((3179, 3198), 'numpy.array', 'array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (3184, 3198), False, 'from numpy import array\n'), ((3256, 3275), 'numpy.array', 'array', (['[0, 1, 0, 0]'], {}), '([0, 1, 0, 0])\n', (3261, 3275), False, 'from numpy import array\n'), ((3333, 3352), 'numpy.array', 'array', (['[0, 2, 0, 0]'], {}), '([0, 2, 0, 0])\n', (3338, 3352), False, 'from numpy import array\n'), ((3449, 3468), 'numpy.array', 'array', (['[0, 0, 5, 0]'], {}), '([0, 0, 5, 0])\n', (3454, 3468), False, 'from numpy import array\n'), ((3524, 3544), 'numpy.array', 'array', (['[0, 0, 10, 0]'], {}), '([0, 0, 10, 0])\n', (3529, 3544), False, 'from numpy import array\n'), ((3599, 3619), 'numpy.array', 'array', (['[0, 0, 15, 0]'], {}), '([0, 0, 15, 0])\n', (3604, 3619), False, 'from numpy import array\n'), ((3674, 3694), 'numpy.array', 'array', (['[0, 0, 20, 0]'], {}), '([0, 0, 20, 0])\n', (3679, 3694), False, 'from numpy import array\n'), ((3749, 3769), 'numpy.array', 'array', (['[0, 0, 25, 0]'], {}), '([0, 0, 25, 0])\n', (3754, 3769), False, 'from numpy import array\n'), ((3824, 3844), 'numpy.array', 'array', (['[0, 0, 30, 0]'], {}), '([0, 0, 30, 0])\n', (3829, 3844), False, 'from numpy import array\n'), ((3900, 3920), 'numpy.array', 'array', (['[0, 0, 35, 0]'], {}), '([0, 0, 35, 0])\n', (3905, 3920), False, 'from numpy import array\n'), ((4016, 4036), 'numpy.array', 'array', (['[0, 0, 0, -5]'], {}), '([0, 0, 0, -5])\n', (4021, 4036), False, 'from numpy import array\n'), ((4093, 4114), 'numpy.array', 'array', (['[0, 0, 0, -10]'], {}), '([0, 0, 0, -10])\n', (4098, 4114), False, 'from numpy import array\n'), ((4171, 4192), 'numpy.array', 'array', (['[0, 0, 0, -15]'], {}), '([0, 0, 0, -15])\n', (4176, 4192), False, 'from numpy import array\n'), ((4249, 4270), 'numpy.array', 'array', (['[0, 0, 0, -20]'], {}), '([0, 0, 0, -20])\n', (4254, 4270), False, 'from numpy import array\n'), ((4327, 4348), 'numpy.array', 'array', (['[0, 0, 0, -25]'], {}), '([0, 0, 0, -25])\n', (4332, 4348), False, 'from numpy import array\n'), ((4405, 4426), 'numpy.array', 'array', (['[0, 0, 0, -30]'], {}), '([0, 0, 0, -30])\n', (4410, 4426), False, 'from numpy import array\n'), ((4483, 4504), 'numpy.array', 'array', (['[0, 0, 0, -35]'], {}), '([0, 0, 0, -35])\n', (4488, 4504), False, 'from numpy import array\n'), ((4648, 4667), 'numpy.array', 'array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (4653, 4667), False, 'from numpy import array\n'), ((4723, 4742), 'numpy.array', 'array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (4728, 4742), False, 'from numpy import array\n'), ((4798, 4817), 'numpy.array', 'array', (['[2, 0, 0, 0]'], {}), '([2, 0, 0, 0])\n', (4803, 4817), False, 'from numpy import array\n'), ((4923, 4942), 'numpy.array', 'array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (4928, 4942), False, 'from numpy import array\n'), ((5000, 5019), 'numpy.array', 'array', (['[0, 1, 0, 0]'], {}), '([0, 1, 0, 0])\n', (5005, 5019), False, 'from numpy import array\n'), ((5077, 5096), 'numpy.array', 'array', (['[0, 2, 0, 0]'], {}), '([0, 2, 0, 0])\n', (5082, 5096), False, 'from numpy import array\n'), ((5193, 5212), 'numpy.array', 'array', (['[0, 0, 5, 0]'], {}), '([0, 0, 5, 0])\n', (5198, 5212), False, 'from numpy import array\n'), ((5268, 5288), 'numpy.array', 'array', (['[0, 0, 10, 0]'], {}), '([0, 0, 10, 0])\n', (5273, 5288), False, 'from numpy import array\n'), ((5343, 5363), 'numpy.array', 'array', (['[0, 0, 15, 0]'], {}), '([0, 0, 15, 0])\n', (5348, 5363), False, 'from numpy import array\n'), ((5418, 5438), 'numpy.array', 'array', (['[0, 0, 20, 0]'], {}), '([0, 0, 20, 0])\n', (5423, 5438), False, 'from numpy import array\n'), ((5493, 5513), 'numpy.array', 'array', (['[0, 0, 25, 0]'], {}), '([0, 0, 25, 0])\n', (5498, 5513), False, 'from numpy import array\n'), ((5568, 5588), 'numpy.array', 'array', (['[0, 0, 30, 0]'], {}), '([0, 0, 30, 0])\n', (5573, 5588), False, 'from numpy import array\n'), ((5644, 5664), 'numpy.array', 'array', (['[0, 0, 35, 0]'], {}), '([0, 0, 35, 0])\n', (5649, 5664), False, 'from numpy import array\n'), ((5760, 5780), 'numpy.array', 'array', (['[0, 0, 0, -5]'], {}), '([0, 0, 0, -5])\n', (5765, 5780), False, 'from numpy import array\n'), ((5837, 5858), 'numpy.array', 'array', (['[0, 0, 0, -10]'], {}), '([0, 0, 0, -10])\n', (5842, 5858), False, 'from numpy import array\n'), ((5915, 5936), 'numpy.array', 'array', (['[0, 0, 0, -15]'], {}), '([0, 0, 0, -15])\n', (5920, 5936), False, 'from numpy import array\n'), ((5993, 6014), 'numpy.array', 'array', (['[0, 0, 0, -20]'], {}), '([0, 0, 0, -20])\n', (5998, 6014), False, 'from numpy import array\n'), ((6071, 6092), 'numpy.array', 'array', (['[0, 0, 0, -25]'], {}), '([0, 0, 0, -25])\n', (6076, 6092), False, 'from numpy import array\n'), ((6149, 6170), 'numpy.array', 'array', (['[0, 0, 0, -30]'], {}), '([0, 0, 0, -30])\n', (6154, 6170), False, 'from numpy import array\n'), ((6227, 6248), 'numpy.array', 'array', (['[0, 0, 0, -35]'], {}), '([0, 0, 0, -35])\n', (6232, 6248), False, 'from numpy import array\n'), ((6393, 6412), 'numpy.array', 'array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (6398, 6412), False, 'from numpy import array\n'), ((6468, 6487), 'numpy.array', 'array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (6473, 6487), False, 'from numpy import array\n'), ((6543, 6562), 'numpy.array', 'array', (['[2, 0, 0, 0]'], {}), '([2, 0, 0, 0])\n', (6548, 6562), False, 'from numpy import array\n'), ((6668, 6687), 'numpy.array', 'array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (6673, 6687), False, 'from numpy import array\n'), ((6745, 6764), 'numpy.array', 'array', (['[0, 1, 0, 0]'], {}), '([0, 1, 0, 0])\n', (6750, 6764), False, 'from numpy import array\n'), ((6822, 6841), 'numpy.array', 'array', (['[0, 2, 0, 0]'], {}), '([0, 2, 0, 0])\n', (6827, 6841), False, 'from numpy import array\n'), ((6938, 6957), 'numpy.array', 'array', (['[0, 0, 5, 0]'], {}), '([0, 0, 5, 0])\n', (6943, 6957), False, 'from numpy import array\n'), ((7013, 7033), 'numpy.array', 'array', (['[0, 0, 10, 0]'], {}), '([0, 0, 10, 0])\n', (7018, 7033), False, 'from numpy import array\n'), ((7088, 7108), 'numpy.array', 'array', (['[0, 0, 15, 0]'], {}), '([0, 0, 15, 0])\n', (7093, 7108), False, 'from numpy import array\n'), ((7163, 7183), 'numpy.array', 'array', (['[0, 0, 20, 0]'], {}), '([0, 0, 20, 0])\n', (7168, 7183), False, 'from numpy import array\n'), ((7238, 7258), 'numpy.array', 'array', (['[0, 0, 25, 0]'], {}), '([0, 0, 25, 0])\n', (7243, 7258), False, 'from numpy import array\n'), ((7313, 7333), 'numpy.array', 'array', (['[0, 0, 30, 0]'], {}), '([0, 0, 30, 0])\n', (7318, 7333), False, 'from numpy import array\n'), ((7389, 7409), 'numpy.array', 'array', (['[0, 0, 35, 0]'], {}), '([0, 0, 35, 0])\n', (7394, 7409), False, 'from numpy import array\n'), ((7505, 7525), 'numpy.array', 'array', (['[0, 0, 0, -5]'], {}), '([0, 0, 0, -5])\n', (7510, 7525), False, 'from numpy import array\n'), ((7582, 7603), 'numpy.array', 'array', (['[0, 0, 0, -10]'], {}), '([0, 0, 0, -10])\n', (7587, 7603), False, 'from numpy import array\n'), ((7660, 7681), 'numpy.array', 'array', (['[0, 0, 0, -15]'], {}), '([0, 0, 0, -15])\n', (7665, 7681), False, 'from numpy import array\n'), ((7738, 7759), 'numpy.array', 'array', (['[0, 0, 0, -20]'], {}), '([0, 0, 0, -20])\n', (7743, 7759), False, 'from numpy import array\n'), ((7816, 7837), 'numpy.array', 'array', (['[0, 0, 0, -25]'], {}), '([0, 0, 0, -25])\n', (7821, 7837), False, 'from numpy import array\n'), ((7894, 7915), 'numpy.array', 'array', (['[0, 0, 0, -30]'], {}), '([0, 0, 0, -30])\n', (7899, 7915), False, 'from numpy import array\n'), ((7972, 7993), 'numpy.array', 'array', (['[0, 0, 0, -35]'], {}), '([0, 0, 0, -35])\n', (7977, 7993), False, 'from numpy import array\n')] |
# Anatomical ontology management
# Author: <NAME>, 2019
"""Handle ontology lookup.
"""
import os
from collections import OrderedDict
from enum import Enum
import json
import numpy as np
import pandas as pd
from magmap.settings import config
from magmap.io import libmag
NODE = "node"
PARENT_IDS = "parent_ids"
MIRRORED = "mirrored"
RIGHT_SUFFIX = " (R)"
LEFT_SUFFIX = " (L)"
class LabelColumns(Enum):
"""Label data frame columns enumeration."""
FROM_LABEL = "FromLabel"
TO_LABEL = "ToLabel"
def load_labels_ref(path):
"""Load labels from a reference JSON or CSV file.
Args:
path (str): Path to labels reference.
Returns:
Union[dict, :class:`pandas.DataFrame`]: A JSON decoded object
(eg dictionary) if the path has a JSON extension, or a data frame.
"""
path_split = os.path.splitext(path)
if path_split[1] == ".json":
with open(path, "r") as f:
labels_ref = json.load(f)
else:
labels_ref = pd.read_csv(path)
return labels_ref
def convert_itksnap_to_df(path):
"""Convert an ITK-SNAP labels description file to a CSV file
compatible with MagellanMapper.
Args:
path: Path to description file.
Returns:
Pandas data frame of the description file.
"""
# load description file and convert contiguous spaces to separators,
# remove comments, and add headers
df = pd.read_csv(
path, sep="\s+", comment="#",
names=[e.value for e in config.ItkSnapLabels])
return df
def get_node(nested_dict, key, value, key_children):
"""Get a node from a nested dictionary by iterating through all
dictionaries until the specified value is found.
Args:
nested_dict: A dictionary that contains a list of dictionaries in
the key_children entry.
key: Key to check for the value.
value: Value to find, assumed to be unique for the given key.
key_children: Name of the children key, which contains a list of
further dictionaries but can be empty.
Returns:
The node matching the key-value pair, or None if not found.
"""
try:
#print("checking for key {}...".format(key), end="")
found_val = nested_dict[key]
#print("found {}".format(found_val))
if found_val == value:
return nested_dict
children = nested_dict[key_children]
for child in children:
result = get_node(child, key, value, key_children)
if result is not None:
return result
except KeyError as e:
print(e)
return None
def create_aba_reverse_lookup(labels_ref):
"""Create a reverse lookup dictionary for Allen Brain Atlas style
ontology files.
Args:
labels_ref: The ontology file as a parsed JSON dictionary.
Returns:
Reverse lookup dictionary as output by
:func:`ontology.create_reverse_lookup`.
"""
return create_reverse_lookup(
labels_ref["msg"][0], config.ABAKeys.ABA_ID.value,
config.ABAKeys.CHILDREN.value)
def create_reverse_lookup(nested_dict, key, key_children, id_dict=None,
parent_list=None):
"""Create a reveres lookup dictionary with the values of the original
dictionary as the keys of the new dictionary.
Each value of the new dictionary is another dictionary that contains
"node", the dictionary with the given key-value pair, and "parent_ids",
a list of all the parents of the given node. This entry can be used to
track all superceding dictionaries, and the node can be used to find
all its children.
Args:
nested_dict (dict): A dictionary that contains a list of dictionaries
in the key_children entry.
key (Any): Key that contains the values to use as keys in the new
dictionary. The values of this key should be unique throughout
the entire nested_dict and thus serve as IDs.
key_children (Any): Name of the children key, which contains a list of
further dictionaries but can be empty.
id_dict (OrderedDict): The output dictionary as an OrderedDict to
preserve key order (though not hierarchical structure) so that
children will come after their parents. Defaults to None to create
an empty `OrderedDict`.
parent_list (list[Any]): List of values for the given key in all parent
dictionaries.
Returns:
OrderedDict: A dictionary with the original values as the keys, which
each map to another dictionary containing an entry with the dictionary
holding the given value and another entry with a list of all parent
dictionary values for the given key.
"""
if id_dict is None:
id_dict = OrderedDict()
value = nested_dict[key]
sub_dict = {NODE: nested_dict}
if parent_list is not None:
sub_dict[PARENT_IDS] = parent_list
id_dict[value] = sub_dict
try:
children = nested_dict[key_children]
parent_list = [] if parent_list is None else list(parent_list)
parent_list.append(value)
for child in children:
#print("parents: {}".format(parent_list))
create_reverse_lookup(
child, key, key_children, id_dict, parent_list)
except KeyError as e:
print(e)
return id_dict
def create_lookup_pd(df):
"""Create a lookup dictionary from a Pandas data frame.
Args:
df: Pandas data frame, assumed to have at least columns
corresponding to :const:``config.ABAKeys.ABA_ID`` and
:const:``config.ABAKeys.ABA_NAME``.
Returns:
Dictionary similar to that generated from
:meth:``create_reverse_lookup``, with IDs as keys and values
corresponding of another dictionary with :const:``NODE`` and
:const:``PARENT_IDS`` as keys. :const:``NODE`` in turn
contains a dictionary with entries for each Enum in
:const:``config.ABAKeys``.
"""
id_dict = OrderedDict()
ids = df[config.ABAKeys.ABA_ID.value]
for region_id in ids:
region = df[df[config.ABAKeys.ABA_ID.value] == region_id]
region_dict = region.to_dict("records")[0]
if config.ABAKeys.NAME.value not in region_dict:
region_dict[config.ABAKeys.NAME.value] = str(region_id)
region_dict[config.ABAKeys.LEVEL.value] = 1
region_dict[config.ABAKeys.CHILDREN.value] = []
region_dict[config.ABAKeys.ACRONYM.value] = ""
sub_dict = {NODE: region_dict, PARENT_IDS: []}
id_dict[region_id] = sub_dict
return id_dict
def _get_children(labels_ref_lookup, label_id, children_all=[]):
"""Recursively get the children of a given non-negative atlas ID.
Used as a helper function to :func:``get_children_from_id``.
Args:
labels_ref_lookup: The labels reference lookup, assumed to be
generated by :func:`create_reverse_lookup` to look up by ID.
label_id: ID of the label to find, assumed to be >= 0 since
IDs in ``labels_ref_lookup`` are generally non-negative.
children_all: List of all children of this ID, used recursively;
defaults to an empty list. To include the ID itself, pass in a
list with this ID alone.
Returns:
A list of all children of the given ID, in order from highest
(numerically lowest) level to lowest.
"""
label = labels_ref_lookup.get(label_id)
if label:
# recursively gather the children of the label
children = label[NODE][config.ABAKeys.CHILDREN.value]
for child in children:
child_id = child[config.ABAKeys.ABA_ID.value]
#print("child_id: {}".format(child_id))
children_all.append(child_id)
_get_children(labels_ref_lookup, child_id, children_all)
return children_all
def _mirror_label_ids(label_ids, combine=False):
"""Mirror label IDs, assuming that a "mirrored" ID is the negative
of the given ID.
Args:
label_ids (Union[int, List[int]]): Single ID or sequence of IDs.
combine (bool): True to return a list of ``label_ids`` along with
their mirrored IDs; defaults to False to return on the mirrored IDs.
Returns:
Union[int, List[int]]: A single mirrored ID if ``label_ids`` is
one ID and ``combine`` is False, or a list of IDs.
"""
if libmag.is_seq(label_ids):
mirrored = [-1 * n for n in label_ids]
if combine:
mirrored = list(label_ids).extend(mirrored)
else:
mirrored = -1 * label_ids
if combine:
mirrored = [label_ids, mirrored]
return mirrored
def get_children_from_id(labels_ref_lookup, label_id, incl_parent=True,
both_sides=False):
"""Get the children of a given atlas ID.
Args:
labels_ref_lookup: The labels reference lookup, assumed to be
generated by :func:`create_reverse_lookup` to look up by ID.
label_id: ID of the label to find, which can be negative.
incl_parent: True to include ``label_id`` itself in the list of
children; defaults to True.
both_sides: True to include both sides, ie positive and negative
values of each ID. Defaults to False.
Returns:
A list of all children of the given ID, in order from highest
(numerically lowest) level to lowest.
"""
id_abs = abs(label_id)
children_all = [id_abs] if incl_parent else []
region_ids = _get_children(labels_ref_lookup, id_abs, children_all)
if both_sides:
region_ids.extend(_mirror_label_ids(region_ids))
elif label_id < 0:
region_ids = _mirror_label_ids(region_ids)
#print("region IDs: {}".format(region_ids))
return region_ids
def labels_to_parent(labels_ref_lookup, level=None,
allow_parent_same_level=False):
"""Generate a dictionary mapping label IDs to parent IDs at a given level.
Parents are considered to be "below" (numerically lower level) their
children, or at least at the same level if ``allow_parent_same_level``
is True.
Args:
labels_ref_lookup (dict): The labels reference lookup, assumed to be an
OrderedDict generated by :func:`ontology.create_reverse_lookup`
to look up by ID while preserving key order to ensure that
parents of any child will be reached prior to the child.
level (int): Level at which to find parent for each label; defaults to
None to get the parent immediately below the given label.
allow_parent_same_level (bool): True to allow selecting a parent at
the same level as the label; False to require the parent to be
at least one level below. Defaults to False.
Returns:
dict: Dictionary of label IDs to parent IDs at the given level. Labels
at the given level will be assigned to their own ID, and labels below
or without a parent at the level will be given a default level of 0.
"""
# similar to volumes_dict_level_grouping but without checking for neg
# keys or grouping values
label_parents = {}
ids = list(labels_ref_lookup.keys())
for label_id in ids:
parent_at_level = 0
label = labels_ref_lookup[label_id]
# find ancestor above (numerically below) label's level
label_level = label[NODE][config.ABAKeys.LEVEL.value]
target_level = label_level - 1 if level is None else level
if label_level == target_level:
# use label's own ID if at target level
parent_at_level = label_id
elif label_level > target_level:
parents = label.get(PARENT_IDS)
if parents:
for parent in parents[::-1]:
# assume that parents are ordered by decreasing
# (numerically higher) level
parent_level = labels_ref_lookup[
parent][NODE][config.ABAKeys.LEVEL.value]
if (parent_level <= target_level
or allow_parent_same_level
and parent_level == label_level):
# use first parent below (or at least at) target level
parent_at_level = parent
break
else:
print("No parents at level", label_level, "for label", label_id)
parent_ref = label[NODE][config.ABAKeys.PARENT_ID.value]
try:
# check for discrepancies between parent listed in ontology file
# and derived from parsed parent IDs
assert parent_ref == parent_at_level
except AssertionError:
print("Parent at level {} or lower for label {} does not match"
"parent listed in reference file, {}"
.format(target_level, label_id, parent_at_level, parent_ref))
label_parents[label_id] = parent_at_level
return label_parents
def get_label_item(label, item_key, key=NODE):
"""Convenience function to get the item from the sub-label.
Args:
label (dict): The label dictionary. Assumes that ``label`` is a
nested dictionary.
item_key (str): Key for item to retrieve from within ``label[key]``.
key (str): First level key; defaults to :const:`NODE`.
Returns:
The label item, or None if not found.
"""
item = None
try:
if label is not None:
sub = label[key]
if sub is not None:
item = sub[item_key]
except KeyError as e:
print(e, item_key)
return item
def get_label_name(label, side=False):
"""Get the atlas region name from the label.
Args:
label (dict): The label dictionary.
side (bool):
Returns:
The atlas region name, or None if not found.
"""
name = None
try:
if label is not None:
node = label[NODE]
if node is not None:
name = node[config.ABAKeys.NAME.value]
print("name: {}".format(name), label[MIRRORED])
if side:
if label[MIRRORED]:
name += LEFT_SUFFIX
else:
name += RIGHT_SUFFIX
except KeyError as e:
print(e, name)
return name
def get_label_side(label_id):
"""Convert label IDs into side strings.
The convention used here is that positive values = right, negative
values = left.
TODO: consider making pos/neg side correspondence configurable.
Args:
label_id (int, List[int]): Label ID or sequence of IDs to convert,
where all negative labels are considered right, all positive
are left, and any mix of pos, neg, or zero are both.
Returns:
:str: Value of corresponding :class:`config.HemSides` enum.
"""
if np.all(np.greater(label_id, 0)):
return config.HemSides.RIGHT.value
elif np.all(np.less(label_id, 0)):
return config.HemSides.LEFT.value
return config.HemSides.BOTH.value
def scale_coords(coord, scaling=None, clip_shape=None):
"""Get the atlas label IDs for the given coordinates.
Args:
coord (:class:`numpy.ndarray`): Coordinates of experiment image in
``z,y,x`` order. Can be an ``[n, 3]`` array of coordinates.
scaling (Sequence[int]): Scaling factor for the labels image size
compared with the experiment image as ``z,y,x``; defaults to None.
clip_shape (Sequence[int]): Max image shape as ``z,y,x``, used to
round coordinates for extra precision. For simplicity, scaled
values are simply floored. Repeated scaling such as upsampling
after downsampling can lead to errors. If this parameter is given,
values will instead by rounded to minimize errors while giving
ints. Rounded values will be clipped to this shape minus 1 to
stay within bounds.
Returns:
:class:`numpy.ndarray`: An scaled array of the same shape as ``coord``.
"""
libmag.printv(
"getting label IDs from coordinates using scaling", scaling)
coord_scaled = coord
if scaling is not None:
# scale coordinates to atlas image size
coord_scaled = np.multiply(coord, scaling)
if clip_shape is not None:
# round when extra precision is necessary, such as during reverse
# scaling, which requires clipping so coordinates don't exceed labels
# image shape
coord_scaled = np.around(coord_scaled).astype(np.int)
coord_scaled = np.clip(
coord_scaled, None, np.subtract(clip_shape, 1))
else:
# typically don't round to stay within bounds
coord_scaled = coord_scaled.astype(np.int)
return coord_scaled
def get_label_ids_from_position(coord_scaled, labels_img):
"""Get the atlas label IDs for the given coordinates.
Args:
coord_scaled (:class:`numpy.ndarray`): 2D array of coordinates in
``[[z,y,x], ...]`` format, or a single row as a 1D array.
labels_img (:class:`numpy.ndarray`): Labeled image from which to
extract labels at coordinates in ``coord_scaled``.
Returns:
:class:`numpy.ndarray`: An array of label IDs corresponding to
``coord``, or a scalar of one ID if only one coordinate is given.
"""
# index blob coordinates into labels image by int array indexing to
# get the corresponding label IDs
coordsi = libmag.coords_for_indexing(coord_scaled)
label_ids = labels_img[tuple(coordsi)][0]
return label_ids
def get_label(coord, labels_img, labels_ref, scaling, level=None,
rounding=False):
"""Get the atlas label for the given coordinates.
Args:
coord: Coordinates of experiment image in (z, y, x) order.
labels_img: The registered image whose intensity values correspond to
label IDs.
labels_ref: The labels reference lookup, assumed to be generated by
:func:`ontology.create_reverse_lookup` to look up by ID.
scaling: Scaling factor for the labels image size compared with the
experiment image.
level: The ontology level as an integer to target; defaults to None.
If None, level will be ignored, and the exact matching label
to the given coordinates will be returned. If a level is given,
the label at the highest (numerically lowest) level encompassing
this region will be returned.
rounding: True to round coordinates after scaling (see
:func:``get_label_ids_from_position``); defaults to False.
Returns:
The label dictionary at those coordinates, or None if no label is
found.
"""
coord_scaled = scale_coords(
coord, scaling, labels_img.shape if rounding else None)
label_id = get_label_ids_from_position(coord_scaled, labels_img)
libmag.printv("found label_id: {}".format(label_id))
mirrored = label_id < 0
if mirrored:
label_id = -1 * label_id
label = None
try:
label = labels_ref[label_id]
if level is not None and label[
NODE][config.ABAKeys.LEVEL.value] > level:
# search for parent at "higher" (numerically lower) level
# that matches the target level
parents = label[PARENT_IDS]
label = None
if label_id < 0:
parents = np.multiply(parents, -1)
for parent in parents:
parent_label = labels_ref[parent]
if parent_label[NODE][config.ABAKeys.LEVEL.value] == level:
label = parent_label
break
if label is not None:
label[MIRRORED] = mirrored
libmag.printv(
"label ID at level {}: {}".format(level, label_id))
except KeyError as e:
libmag.printv(
"could not find label id {} or its parent (error {})"
.format(label_id, e))
return label
def get_region_middle(labels_ref_lookup, label_id, labels_img, scaling,
both_sides=False, incl_children=True):
"""Approximate the middle position of a region by taking the middle
value of its sorted list of coordinates.
The region's coordinate sorting prioritizes z, followed by y, etc, meaning
that the middle value will be closest to the middle of z but may fall
be slightly away from midline in the other axes if this z does not
contain y/x's around midline. Getting the coordinate at the middle
of this list rather than another coordinate midway between other values
in the region ensures that the returned coordinate will reside within
the region itself, including non-contingous regions that may be
intermixed with coordinates not part of the region.
Args:
labels_ref_lookup (Dict[int, Dict]): The labels reference lookup,
assumed to be generated by :func:`ontology.create_reverse_lookup`
to look up by ID.
label_id (int, List[int]): ID of the label to find, or sequence of IDs.
labels_img (:obj:`np.ndarray`): The registered image whose intensity
values correspond to label IDs.
scaling (:obj:`np.ndarray`): Scaling factors as a Numpy array in z,y,x
for the labels image size compared with the experiment image.
both_sides (bool, List[bool]): True to include both sides, or
sequence of booleans corresponding to ``label_id``; defaults
to False.
incl_children (bool): True to include children of ``label_id``,
False to include only ``label_id``; defaults to True.
Returns:
List[int], :obj:`np.ndarray`, List[int]: ``coord``, the middle value
of a list of all coordinates in the region at the given ID;
``img_region``, a boolean mask of the region within ``labels_img``;
and ``region_ids``, a list of the IDs included in the region.
If ``labels_ref_lookup`` is None, all values are None.
"""
if not labels_ref_lookup:
return None, None, None
# gather IDs for label, including children and opposite sides
if not libmag.is_seq(label_id):
label_id = [label_id]
if not libmag.is_seq(both_sides):
both_sides = [both_sides]
region_ids = []
for region_id, both in zip(label_id, both_sides):
if incl_children:
# add children of the label +/- both sides
region_ids.extend(get_children_from_id(
labels_ref_lookup, region_id, incl_parent=True,
both_sides=both))
else:
# add the label +/- its mirrored version
region_ids.append(region_id)
if both:
region_ids.append(_mirror_label_ids(region_id))
# get a list of all the region's coordinates to sort
img_region = np.isin(labels_img, region_ids)
region_coords = np.where(img_region)
#print("region_coords:\n{}".format(region_coords))
def get_middle(coords):
# recursively get value at middle of list for each axis
sort_ind = np.lexsort(coords[::-1]) # last axis is primary key
num_coords = len(sort_ind)
if num_coords > 0:
mid_ind = sort_ind[int(num_coords / 2)]
mid = coords[0][mid_ind]
if len(coords) > 1:
# shift to next axis in tuple of coords
mask = coords[0] == mid
coords = tuple(c[mask] for c in coords[1:])
return (mid, *get_middle(coords))
return (mid, )
return None
coord = None
coord_labels = get_middle(region_coords)
if coord_labels:
print("coord_labels (unscaled): {}".format(coord_labels))
print("ID at middle coord: {} (in region? {})"
.format(labels_img[coord_labels], img_region[coord_labels]))
coord = tuple(np.around(coord_labels / scaling).astype(np.int))
print("coord at middle: {}".format(coord))
return coord, img_region, region_ids
def rel_to_abs_ages(rel_ages, gestation=19):
"""Convert sample names to ages.
Args:
rel_ages (List[str]): Sequence of strings in the format,
``[stage][relative_age_in_days]``, where stage
is either "E" = "embryonic" or "P" = "postnatal", such as
"E3.5" for 3.5 days after conception, or "P10" for 10 days
after birth.
gestation (int): Number of days from conception until birth.
Returns:
Dictionary of ``{name: age_in_days}``.
"""
ages = {}
for val in rel_ages:
age = float(val[1:])
if val[0].lower() == "p":
age += gestation
ages[val] = age
return ages
def replace_labels(labels_img, df, clear=False, ref=None, combine_sides=False):
"""Replace labels based on a data frame.
Args:
labels_img (:class:`numpy.ndarray`): Labels image array whose values
will be converted in-place.
df (:class:`pandas.DataFrame`): Pandas data frame with from and to
columns specified by :class:`LabelColumns` values.
clear (bool): True to clear all other label values.
ref (dict): Dictionary to get all children from each label;
defaults to None.
combine_sides (bool): True to combine sides by converting both
positive labels and their corresponding negative label;
defaults to False.
Returns:
:class:`numpy.ndarray`: ``labels_img`` with values replaced in-place.
"""
labels_img_orig = labels_img
if clear:
# clear all labels, replacing based on copy
labels_img_orig = np.copy(labels_img)
labels_img[:] = 0
from_labels = df[LabelColumns.FROM_LABEL.value]
to_labels = df[LabelColumns.TO_LABEL.value]
for to_label in to_labels.unique():
# replace all labels matching the given target label
to_convert = from_labels.loc[to_labels == to_label]
if ref:
to_convert_all = []
for lbl in to_convert:
# get all children for the label
to_convert_all.extend(get_children_from_id(
ref, lbl, both_sides=combine_sides))
else:
to_convert_all = to_convert.values
print("Converting labels from {} to {}"
.format(to_convert_all, to_label))
labels_img[np.isin(labels_img_orig, to_convert_all)] = to_label
print("Converted image labels:", np.unique(labels_img))
return labels_img
| [
"numpy.isin",
"json.load",
"numpy.multiply",
"numpy.subtract",
"numpy.copy",
"numpy.less",
"pandas.read_csv",
"numpy.lexsort",
"magmap.io.libmag.printv",
"numpy.greater",
"magmap.io.libmag.coords_for_indexing",
"numpy.around",
"numpy.where",
"os.path.splitext",
"collections.OrderedDict",... | [((846, 868), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (862, 868), False, 'import os\n'), ((1437, 1531), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '"""\\\\s+"""', 'comment': '"""#"""', 'names': '[e.value for e in config.ItkSnapLabels]'}), "(path, sep='\\\\s+', comment='#', names=[e.value for e in config.\n ItkSnapLabels])\n", (1448, 1531), True, 'import pandas as pd\n'), ((6165, 6178), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6176, 6178), False, 'from collections import OrderedDict\n'), ((8596, 8620), 'magmap.io.libmag.is_seq', 'libmag.is_seq', (['label_ids'], {}), '(label_ids)\n', (8609, 8620), False, 'from magmap.io import libmag\n'), ((16472, 16546), 'magmap.io.libmag.printv', 'libmag.printv', (['"""getting label IDs from coordinates using scaling"""', 'scaling'], {}), "('getting label IDs from coordinates using scaling', scaling)\n", (16485, 16546), False, 'from magmap.io import libmag\n'), ((17915, 17955), 'magmap.io.libmag.coords_for_indexing', 'libmag.coords_for_indexing', (['coord_scaled'], {}), '(coord_scaled)\n', (17941, 17955), False, 'from magmap.io import libmag\n'), ((23441, 23472), 'numpy.isin', 'np.isin', (['labels_img', 'region_ids'], {}), '(labels_img, region_ids)\n', (23448, 23472), True, 'import numpy as np\n'), ((23493, 23513), 'numpy.where', 'np.where', (['img_region'], {}), '(img_region)\n', (23501, 23513), True, 'import numpy as np\n'), ((1006, 1023), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (1017, 1023), True, 'import pandas as pd\n'), ((4903, 4916), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4914, 4916), False, 'from collections import OrderedDict\n'), ((15255, 15278), 'numpy.greater', 'np.greater', (['label_id', '(0)'], {}), '(label_id, 0)\n', (15265, 15278), True, 'import numpy as np\n'), ((16680, 16707), 'numpy.multiply', 'np.multiply', (['coord', 'scaling'], {}), '(coord, scaling)\n', (16691, 16707), True, 'import numpy as np\n'), ((22737, 22760), 'magmap.io.libmag.is_seq', 'libmag.is_seq', (['label_id'], {}), '(label_id)\n', (22750, 22760), False, 'from magmap.io import libmag\n'), ((22803, 22828), 'magmap.io.libmag.is_seq', 'libmag.is_seq', (['both_sides'], {}), '(both_sides)\n', (22816, 22828), False, 'from magmap.io import libmag\n'), ((23685, 23709), 'numpy.lexsort', 'np.lexsort', (['coords[::-1]'], {}), '(coords[::-1])\n', (23695, 23709), True, 'import numpy as np\n'), ((26271, 26290), 'numpy.copy', 'np.copy', (['labels_img'], {}), '(labels_img)\n', (26278, 26290), True, 'import numpy as np\n'), ((27094, 27115), 'numpy.unique', 'np.unique', (['labels_img'], {}), '(labels_img)\n', (27103, 27115), True, 'import numpy as np\n'), ((962, 974), 'json.load', 'json.load', (['f'], {}), '(f)\n', (971, 974), False, 'import json\n'), ((15340, 15360), 'numpy.less', 'np.less', (['label_id', '(0)'], {}), '(label_id, 0)\n', (15347, 15360), True, 'import numpy as np\n'), ((17041, 17067), 'numpy.subtract', 'np.subtract', (['clip_shape', '(1)'], {}), '(clip_shape, 1)\n', (17052, 17067), True, 'import numpy as np\n'), ((27004, 27044), 'numpy.isin', 'np.isin', (['labels_img_orig', 'to_convert_all'], {}), '(labels_img_orig, to_convert_all)\n', (27011, 27044), True, 'import numpy as np\n'), ((16938, 16961), 'numpy.around', 'np.around', (['coord_scaled'], {}), '(coord_scaled)\n', (16947, 16961), True, 'import numpy as np\n'), ((19921, 19945), 'numpy.multiply', 'np.multiply', (['parents', '(-1)'], {}), '(parents, -1)\n', (19932, 19945), True, 'import numpy as np\n'), ((24480, 24513), 'numpy.around', 'np.around', (['(coord_labels / scaling)'], {}), '(coord_labels / scaling)\n', (24489, 24513), True, 'import numpy as np\n')] |
from copy import deepcopy
import numpy as np
from numpy.random import randint
from numpy.random import random
def GenerateRandomTree(functions, terminals, max_height, curr_height=0):
if curr_height == max_height:
idx = randint(len(terminals))
n = deepcopy( terminals[idx] )
else:
# Make sure that there is no tree generated with only one node
if curr_height > 0 and random() < 0.5:
n = deepcopy( terminals[randint(len(terminals))] )
else:
idx = randint( len(functions) )
n = deepcopy( functions[idx] )
for i in range(n.arity):
c = GenerateRandomTree( functions, terminals, max_height, curr_height=curr_height + 1 )
n.AppendChild( c ) # do not use n.children.append because that won't set the n as parent node of c
return n
def SubtreeMutation( individual, functions, terminals, max_height=4 ):
mutation_branch = GenerateRandomTree( functions, terminals, max_height )
nodes = individual.GetSubtree()
nodes = __GetCandidateNodesAtUniformRandomDepth( nodes )
to_replace = nodes[randint(len(nodes))]
if not to_replace.parent:
del individual
return mutation_branch
p = to_replace.parent
idx = p.DetachChild(to_replace)
p.InsertChildAtPosition(idx, mutation_branch)
return individual
def SubtreeCrossover( individual, donor ):
# this version of crossover returns 1 child
nodes1 = individual.GetSubtree()
nodes2 = donor.GetSubtree() # no need to deep copy all nodes of parent2
nodes1 = __GetCandidateNodesAtUniformRandomDepth( nodes1 )
nodes2 = __GetCandidateNodesAtUniformRandomDepth( nodes2 )
to_swap1 = nodes1[ randint(len(nodes1)) ]
to_swap2 = deepcopy( nodes2[ randint(len(nodes2)) ] ) # we deep copy now, only the sutbree from parent2
p1 = to_swap1.parent
if not p1:
return to_swap2
idx = p1.DetachChild(to_swap1)
p1.InsertChildAtPosition(idx, to_swap2)
return individual
def __GetCandidateNodesAtUniformRandomDepth( nodes ):
depths = np.unique( [x.GetDepth() for x in nodes] )
chosen_depth = depths[randint(len(depths))]
candidates = [x for x in nodes if x.GetDepth() == chosen_depth]
return candidates
| [
"copy.deepcopy",
"numpy.random.random"
] | [((255, 279), 'copy.deepcopy', 'deepcopy', (['terminals[idx]'], {}), '(terminals[idx])\n', (263, 279), False, 'from copy import deepcopy\n'), ((499, 523), 'copy.deepcopy', 'deepcopy', (['functions[idx]'], {}), '(functions[idx])\n', (507, 523), False, 'from copy import deepcopy\n'), ((379, 387), 'numpy.random.random', 'random', ([], {}), '()\n', (385, 387), False, 'from numpy.random import random\n')] |
from __future__ import absolute_import, division, print_function
import math
import six
# we want a round function which does the same on Python 2.7 and 3.x
if six.PY2:
from numpy import around as round
def format_float_with_standard_uncertainty(value, standard_uncertainty, minimum=1e-12):
"""
Formats a float, including the uncertainty in its value.
Parameters
----------
value : float
standard_uncertainty : float
minimum : float
Returns
-------
str
Examples
--------
>>> format_float_with_standard_uncertainty(5e-3, 1e-3)
'0.0050(10)'
>>> format_float_with_standard_uncertainty(5e-3, 1e-6)
'0.0050000(10)'
"""
if standard_uncertainty <= minimum:
dp = -int(math.log10(minimum))
return str(round(value, dp))
precision = -int(round(math.log10(standard_uncertainty)))
if precision > -1:
su = standard_uncertainty * math.pow(10, precision)
if round(su, 1) < 2:
su *= 10
precision += 1
return "{value:.{precision}f}({irsu})".format(
value=value, precision=precision, irsu=int(round(su))
)
else:
precision += 1
su = int(round(standard_uncertainty, precision))
fmt_str = "%.0f(%i)"
return fmt_str % (round(value, precision), su)
def show_mask_info(expt_list):
"""Print the number of masked pixels for each module in the detector,
for each experiment in the input list."""
for i in expt_list.imagesets():
d = i.get_detector()
m = i.get_mask(0)
print("---- ----")
print(d)
for j, _m in enumerate(m):
print(
"Module {} has {} masked pixels of {}".format(
j, _m.count(False), _m.size()
)
)
| [
"numpy.around",
"math.log10",
"math.pow"
] | [((792, 808), 'numpy.around', 'round', (['value', 'dp'], {}), '(value, dp)\n', (797, 808), True, 'from numpy import around as round\n'), ((931, 954), 'math.pow', 'math.pow', (['(10)', 'precision'], {}), '(10, precision)\n', (939, 954), False, 'import math\n'), ((966, 978), 'numpy.around', 'round', (['su', '(1)'], {}), '(su, 1)\n', (971, 978), True, 'from numpy import around as round\n'), ((1213, 1251), 'numpy.around', 'round', (['standard_uncertainty', 'precision'], {}), '(standard_uncertainty, precision)\n', (1218, 1251), True, 'from numpy import around as round\n'), ((752, 771), 'math.log10', 'math.log10', (['minimum'], {}), '(minimum)\n', (762, 771), False, 'import math\n'), ((837, 869), 'math.log10', 'math.log10', (['standard_uncertainty'], {}), '(standard_uncertainty)\n', (847, 869), False, 'import math\n'), ((1308, 1331), 'numpy.around', 'round', (['value', 'precision'], {}), '(value, precision)\n', (1313, 1331), True, 'from numpy import around as round\n'), ((1142, 1151), 'numpy.around', 'round', (['su'], {}), '(su)\n', (1147, 1151), True, 'from numpy import around as round\n')] |
import numpy as np
import function
import random
"""
RNN化バージョン0509作成s-PFCのための層内結合ありのネットワーク
rnn_neuronの複製0519
"""
# ニューロン単独
# このプログラムでは行列で定義されたニューロンの出力結果を返す
# ユニットの定義は上層で行う
# 引数として上から結合加重Wとバイアスbを受け取る
# forによる繰り返しは行わないあくまで一回のみ計算
"""
基本パラメーター
**************************
"""
Tau = 10
dt = 0.01
random_del = 0.5
class neuron:
def __init__(self):
self.random_del = 1
self.dt = 0.01
self.Tau = 10
self.V = None
self.S = None
# 基本関数の定義と各パラメーターの設定
self.function = function.function()
self.function.def_parameter(
self.Tau, self.dt, self.random_del)
def def_parameter(self, Tau, dt, random_del):
self.random_del = random_del
self.dt = dt
self.Tau = Tau
self.function.def_parameter(
self.Tau, self.dt, self.random_del)
# print("**neuron")
# print(self.random_del)
def forward(self, v, si, W, b, mode="None"):
noise = self.random_del * (random.random()-0.5)
# 変更点:層内結合の追加
RI = -noise + np.dot(si, W) + b
v = self.function.eular_V(v, RI)
s = self.function.sigmoid(v)
if mode == "RI":
return v, s, RI
return v, s
def forward_dry(self, v, si, W, b, mode="None"):
noise = self.random_del * (random.random()-0.5)
#print("si", si)
# 変更点:層内結合の追加
RI = -noise + np.dot(si, W) + b
#print("RI:", RI)
v += RI
s = self.function.sigmoid(v)
if mode == "RI":
return v, s, RI
return v, s
| [
"numpy.dot",
"function.function",
"random.random"
] | [((552, 571), 'function.function', 'function.function', ([], {}), '()\n', (569, 571), False, 'import function\n'), ((1034, 1049), 'random.random', 'random.random', ([], {}), '()\n', (1047, 1049), False, 'import random\n'), ((1101, 1114), 'numpy.dot', 'np.dot', (['si', 'W'], {}), '(si, W)\n', (1107, 1114), True, 'import numpy as np\n'), ((1367, 1382), 'random.random', 'random.random', ([], {}), '()\n', (1380, 1382), False, 'import random\n'), ((1460, 1473), 'numpy.dot', 'np.dot', (['si', 'W'], {}), '(si, W)\n', (1466, 1473), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Import Libraries
from zipfile import ZipFile
import cv2
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from skimage import util, exposure
from sklearn.utils import shuffle
from tensorflow.keras import models, layers
# Setup the parameters
batch_size = 32
learning_rate = 0.0003
epochs = 10
# Exctract
with ZipFile('data.zip', 'r') as zipObj:
zipObj.extractall()
print('Finish extracting')
#Load the csv
log_data = pd.read_csv('./data/driving_log.csv')
log_data.drop(columns=['throttle','brake','speed'],inplace=True)
'''
Gets side cameras to improve driving correction
'''
def get_side_cameras(log_line,correction = 0.25):
steering_left = log_line.steering + correction
path_left = './data/'+log_line.left.strip()
image_left = cv2.imread(path_left)
image_left = cv2.cvtColor(image_left,cv2.COLOR_BGR2RGB)
steering_right = log_line.steering - correction
path_right = './data/'+log_line.right.strip()
image_right = cv2.imread(path_right)
image_right = cv2.cvtColor(image_right,cv2.COLOR_BGR2RGB)
return image_left,steering_left,image_right,steering_right
'''
Loads a batch of images from the log and preprocess the images
'''
def batch_loader(log_select):
images = []
measurements = []
for l in log_select.itertuples():
path = './data/'+l.center
image = cv2.imread(path)
# Fix for udacity simulator
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
if np.random.rand() > .5:
image = exposure.adjust_gamma(image, np.random.uniform(0.75,1.10))
images.append(image)
measure = l.steering
measurements.append(measure)
#Include side cameras
img_left,measure_left,img_right,measure_right = get_side_cameras(l)
images.append(img_left)
measurements.append(measure_left)
images.append(img_right)
measurements.append(measure_right)
return np.asarray(images),np.asarray(measurements)
'''
Split the data in train, test and validation
'''
def train_test_val(log_data_frame, train_ratio, test_ratio, val_ratio):
assert sum([train_ratio, test_ratio, val_ratio])==1.0
log_data_frame = shuffle(log_data_frame)
data_size = len(log_data_frame)
id_train = int(round(data_size*train_ratio))
id_test = int(round(data_size*(train_ratio+test_ratio)))
train = log_data_frame[:id_train]
test = log_data_frame[id_train:id_test]
validation = log_data_frame[id_test:]
#print(len(log_data_frame),len(train)+len(test)+len(validation))
return train, test, validation
'''
Flip images and measure to help with the data distribution to one side
'''
def flip(in_images,in_labels):
result_images =[]
result_measures =[]
for pos,img in enumerate(in_images):
result_images.append(np.fliplr(img))
flip_measure = 0.0
if in_labels[pos] != 0.0:
flip_measure = - in_labels[pos]
result_measures.append(flip_measure)
result_images = np.asarray(result_images)
result_measures = np.asarray(result_measures)
assert len(in_images)==len(result_images)
return result_images,result_measures
'''
Function to load a batch at a time in memory
'''
def image_generator(logs, batch_s, training=True):
while True:
start = 0
end = batch_s
while start < len(logs):
selected = logs[start:end]
images,labels = batch_loader(selected)
if training:
flip_img,flip_l = flip(images,labels)
images = np.vstack((flip_img,images))
labels = np.hstack((flip_l,labels))
yield images,labels
start += batch_size
end += batch_size
train_log, test_log, validation_log = train_test_val(log_data,0.6,0.2,0.2)
steps_per_epoch = math.ceil(len(train_log)/batch_size)
validation_steps_per_epoch = math.ceil(len(validation_log)/batch_size)
test_steps_per_epoch = math.ceil(len(test_log)/batch_size)
# ## Neural network architectures
''' Testing keras functionality '''
def simple_net(input_shape):
m = models.Sequential()
m.add(layers.Lambda(lambda x: x/127.5-1.,input_shape=input_shape))
m.add(layers.Cropping2D(cropping=((50,20), (0,0)) ))
m.add(layers.Convolution2D(24,5,5,activation='relu'))
m.add(layers.MaxPooling2D())
m.add(layers.Flatten())
m.add(layers.Dense(120))
m.add(layers.Dense(1))
return m
''' LeNet5 Architecture '''
def le_net_5(input_shape,dropout):
m = models.Sequential()
m.add(layers.Lambda(lambda x: x/127.5 - 1., input_shape=input_shape))
m.add(layers.Convolution2D(64,5,5,activation='relu'))
m.add(layers.MaxPooling2D((2,2)))
m.add(layers.Dropout(dropout))
m.add(layers.Convolution2D(36,5,5,activation='relu'))
m.add(layers.MaxPooling2D((2,2)))
m.add(layers.Flatten())
m.add(layers.Dense(120))
m.add(layers.Dropout(dropout))
m.add(layers.Dense(84))
m.add(layers.Dense(1))
return m
''' Nvidia proposed network with dropouts '''
def nvidia_model(input_shape,dropout):
m = models.Sequential()
m.add(layers.Lambda(lambda x: x/255.0-0.5,input_shape=input_shape))
m.add(layers.Cropping2D(cropping=((70,25), (0,0))))
m.add(layers.Convolution2D(24,5,2,activation='relu'))
m.add(layers.Convolution2D(36,5,2,activation='relu'))
m.add(layers.Convolution2D(48,5,2,activation='relu'))
m.add(layers.Dropout(dropout))
m.add(layers.Convolution2D(64,3,activation='relu'))
m.add(layers.Convolution2D(64,3,activation='relu'))
m.add(layers.Flatten())
m.add(layers.Dense(100))
m.add(layers.Dropout(dropout))
m.add(layers.Dense(50))
m.add(layers.Dense(10))
m.add(layers.Dense(1))
return m
# Network training
# model = simple_net(img_shape,0.001)
# model = LeNet5(img_shape,0.25,0.0003)
model = nvidia_model((160, 320, 3),0.25)
model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(learning_rate),metrics=['accuracy'])
history = model.fit(image_generator(train_log,batch_size),
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=image_generator(validation_log,128,training=False),
validation_steps=validation_steps_per_epoch,
shuffle=True,
verbose=1)
scores = model.evaluate(image_generator(test_log,128,training=False),
verbose=1,
steps=test_steps_per_epoch)
print(scores)
#model.save('model.h5')
| [
"tensorflow.keras.layers.Cropping2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten",
"cv2.cvtColor",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dropout",
"numpy.a... | [((508, 545), 'pandas.read_csv', 'pd.read_csv', (['"""./data/driving_log.csv"""'], {}), "('./data/driving_log.csv')\n", (519, 545), True, 'import pandas as pd\n'), ((393, 417), 'zipfile.ZipFile', 'ZipFile', (['"""data.zip"""', '"""r"""'], {}), "('data.zip', 'r')\n", (400, 417), False, 'from zipfile import ZipFile\n'), ((834, 855), 'cv2.imread', 'cv2.imread', (['path_left'], {}), '(path_left)\n', (844, 855), False, 'import cv2\n'), ((873, 916), 'cv2.cvtColor', 'cv2.cvtColor', (['image_left', 'cv2.COLOR_BGR2RGB'], {}), '(image_left, cv2.COLOR_BGR2RGB)\n', (885, 916), False, 'import cv2\n'), ((1037, 1059), 'cv2.imread', 'cv2.imread', (['path_right'], {}), '(path_right)\n', (1047, 1059), False, 'import cv2\n'), ((1078, 1122), 'cv2.cvtColor', 'cv2.cvtColor', (['image_right', 'cv2.COLOR_BGR2RGB'], {}), '(image_right, cv2.COLOR_BGR2RGB)\n', (1090, 1122), False, 'import cv2\n'), ((2244, 2267), 'sklearn.utils.shuffle', 'shuffle', (['log_data_frame'], {}), '(log_data_frame)\n', (2251, 2267), False, 'from sklearn.utils import shuffle\n'), ((3060, 3085), 'numpy.asarray', 'np.asarray', (['result_images'], {}), '(result_images)\n', (3070, 3085), True, 'import numpy as np\n'), ((3108, 3135), 'numpy.asarray', 'np.asarray', (['result_measures'], {}), '(result_measures)\n', (3118, 3135), True, 'import numpy as np\n'), ((4161, 4180), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (4178, 4180), False, 'from tensorflow.keras import models, layers\n'), ((4569, 4588), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (4586, 4588), False, 'from tensorflow.keras import models, layers\n'), ((5144, 5163), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (5161, 5163), False, 'from tensorflow.keras import models, layers\n'), ((1413, 1429), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (1423, 1429), False, 'import cv2\n'), ((1482, 1520), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1494, 1520), False, 'import cv2\n'), ((1995, 2013), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (2005, 2013), True, 'import numpy as np\n'), ((2014, 2038), 'numpy.asarray', 'np.asarray', (['measurements'], {}), '(measurements)\n', (2024, 2038), True, 'import numpy as np\n'), ((4191, 4256), 'tensorflow.keras.layers.Lambda', 'layers.Lambda', (['(lambda x: x / 127.5 - 1.0)'], {'input_shape': 'input_shape'}), '(lambda x: x / 127.5 - 1.0, input_shape=input_shape)\n', (4204, 4256), False, 'from tensorflow.keras import models, layers\n'), ((4262, 4308), 'tensorflow.keras.layers.Cropping2D', 'layers.Cropping2D', ([], {'cropping': '((50, 20), (0, 0))'}), '(cropping=((50, 20), (0, 0)))\n', (4279, 4308), False, 'from tensorflow.keras import models, layers\n'), ((4319, 4368), 'tensorflow.keras.layers.Convolution2D', 'layers.Convolution2D', (['(24)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(24, 5, 5, activation='relu')\n", (4339, 4368), False, 'from tensorflow.keras import models, layers\n'), ((4377, 4398), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {}), '()\n', (4396, 4398), False, 'from tensorflow.keras import models, layers\n'), ((4410, 4426), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (4424, 4426), False, 'from tensorflow.keras import models, layers\n'), ((4438, 4455), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(120)'], {}), '(120)\n', (4450, 4455), False, 'from tensorflow.keras import models, layers\n'), ((4467, 4482), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (4479, 4482), False, 'from tensorflow.keras import models, layers\n'), ((4599, 4664), 'tensorflow.keras.layers.Lambda', 'layers.Lambda', (['(lambda x: x / 127.5 - 1.0)'], {'input_shape': 'input_shape'}), '(lambda x: x / 127.5 - 1.0, input_shape=input_shape)\n', (4612, 4664), False, 'from tensorflow.keras import models, layers\n'), ((4673, 4722), 'tensorflow.keras.layers.Convolution2D', 'layers.Convolution2D', (['(64)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(64, 5, 5, activation='relu')\n", (4693, 4722), False, 'from tensorflow.keras import models, layers\n'), ((4731, 4758), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (4750, 4758), False, 'from tensorflow.keras import models, layers\n'), ((4769, 4792), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['dropout'], {}), '(dropout)\n', (4783, 4792), False, 'from tensorflow.keras import models, layers\n'), ((4804, 4853), 'tensorflow.keras.layers.Convolution2D', 'layers.Convolution2D', (['(36)', '(5)', '(5)'], {'activation': '"""relu"""'}), "(36, 5, 5, activation='relu')\n", (4824, 4853), False, 'from tensorflow.keras import models, layers\n'), ((4862, 4889), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (4881, 4889), False, 'from tensorflow.keras import models, layers\n'), ((4900, 4916), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (4914, 4916), False, 'from tensorflow.keras import models, layers\n'), ((4928, 4945), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(120)'], {}), '(120)\n', (4940, 4945), False, 'from tensorflow.keras import models, layers\n'), ((4957, 4980), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['dropout'], {}), '(dropout)\n', (4971, 4980), False, 'from tensorflow.keras import models, layers\n'), ((4992, 5008), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(84)'], {}), '(84)\n', (5004, 5008), False, 'from tensorflow.keras import models, layers\n'), ((5020, 5035), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (5032, 5035), False, 'from tensorflow.keras import models, layers\n'), ((5174, 5239), 'tensorflow.keras.layers.Lambda', 'layers.Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': 'input_shape'}), '(lambda x: x / 255.0 - 0.5, input_shape=input_shape)\n', (5187, 5239), False, 'from tensorflow.keras import models, layers\n'), ((5246, 5292), 'tensorflow.keras.layers.Cropping2D', 'layers.Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (5263, 5292), False, 'from tensorflow.keras import models, layers\n'), ((5302, 5351), 'tensorflow.keras.layers.Convolution2D', 'layers.Convolution2D', (['(24)', '(5)', '(2)'], {'activation': '"""relu"""'}), "(24, 5, 2, activation='relu')\n", (5322, 5351), False, 'from tensorflow.keras import models, layers\n'), ((5360, 5409), 'tensorflow.keras.layers.Convolution2D', 'layers.Convolution2D', (['(36)', '(5)', '(2)'], {'activation': '"""relu"""'}), "(36, 5, 2, activation='relu')\n", (5380, 5409), False, 'from tensorflow.keras import models, layers\n'), ((5418, 5467), 'tensorflow.keras.layers.Convolution2D', 'layers.Convolution2D', (['(48)', '(5)', '(2)'], {'activation': '"""relu"""'}), "(48, 5, 2, activation='relu')\n", (5438, 5467), False, 'from tensorflow.keras import models, layers\n'), ((5476, 5499), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['dropout'], {}), '(dropout)\n', (5490, 5499), False, 'from tensorflow.keras import models, layers\n'), ((5511, 5557), 'tensorflow.keras.layers.Convolution2D', 'layers.Convolution2D', (['(64)', '(3)'], {'activation': '"""relu"""'}), "(64, 3, activation='relu')\n", (5531, 5557), False, 'from tensorflow.keras import models, layers\n'), ((5567, 5613), 'tensorflow.keras.layers.Convolution2D', 'layers.Convolution2D', (['(64)', '(3)'], {'activation': '"""relu"""'}), "(64, 3, activation='relu')\n", (5587, 5613), False, 'from tensorflow.keras import models, layers\n'), ((5623, 5639), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (5637, 5639), False, 'from tensorflow.keras import models, layers\n'), ((5651, 5668), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(100)'], {}), '(100)\n', (5663, 5668), False, 'from tensorflow.keras import models, layers\n'), ((5680, 5703), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['dropout'], {}), '(dropout)\n', (5694, 5703), False, 'from tensorflow.keras import models, layers\n'), ((5715, 5731), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(50)'], {}), '(50)\n', (5727, 5731), False, 'from tensorflow.keras import models, layers\n'), ((5743, 5759), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {}), '(10)\n', (5755, 5759), False, 'from tensorflow.keras import models, layers\n'), ((5771, 5786), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (5783, 5786), False, 'from tensorflow.keras import models, layers\n'), ((5979, 6018), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['learning_rate'], {}), '(learning_rate)\n', (6003, 6018), True, 'import tensorflow as tf\n'), ((1531, 1547), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1545, 1547), True, 'import numpy as np\n'), ((2869, 2883), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (2878, 2883), True, 'import numpy as np\n'), ((1603, 1631), 'numpy.random.uniform', 'np.random.uniform', (['(0.75)', '(1.1)'], {}), '(0.75, 1.1)\n', (1620, 1631), True, 'import numpy as np\n'), ((3613, 3642), 'numpy.vstack', 'np.vstack', (['(flip_img, images)'], {}), '((flip_img, images))\n', (3622, 3642), True, 'import numpy as np\n'), ((3667, 3694), 'numpy.hstack', 'np.hstack', (['(flip_l, labels)'], {}), '((flip_l, labels))\n', (3676, 3694), True, 'import numpy as np\n')] |
import numpy as np
import copy
def loss_func(af0, af1, vf0, vf1, confi):
# 权重越大越重要,基础权重要将2种特征的影响力调到等额
weight0_base = 3 # 强度的基础权重,差距很小则权重有奖励,强度差得越大则权重有惩罚
weight1_base = 1 # 碰撞边的基础权重,置信系数=0则权重很小
# 特征0的损失函数
# 第1列为差值区间,第2列为附加权重
feat0_loss_ref = np.array([[0, 0.05, 0.1, 0.5, 0.8, 1], [0.4, 0.6, 0.8, 1, 3, 100]])
d0 = abs(af0-vf0)
for i in range(1, feat0_loss_ref.shape[1]):
start = feat0_loss_ref[0, i-1]
end = feat0_loss_ref[0, i]
if (start <= d0) and (d0 <= end):
loss0 = d0*weight0_base*feat0_loss_ref[1, i-1]
break
# 特征1的损失函数
unknown_weight = 0.1
d1 = abs(af1-vf1) % 2 # 距离至多差2
loss1 = d1*weight1_base*max(confi, unknown_weight)
loss = loss0 + loss1
return loss
def find_best_match(audio_feat_mat, video_feat_mat, video_confi):
'''使用递归,每次给出一组待匹配的音频与视频,找到最佳匹配并返回
Args:
audio_feat_mat : 音频特征矩阵
video_feat_mat : 视频特征矩阵
video_confi : 视频碰撞边置信系数
Returns:
[字典]: 匹配结果,例:{匹配的音频编号: 匹配的视频编号}
'''
if audio_feat_mat.shape[0] == 0: # 已经没有audio匹配
if video_feat_mat.shape[0] == 0: # 同时也没有video匹配
return {'all_empty': 'null'}
else:
return {'no_audio': 'null'}
if video_feat_mat.shape[0] == 0: # 已经没有video匹配
return {'no_video': 'null'}
# 寻找2个非空集合间损失函数最小的配对
thres = 3
loss_min = None
best_match = None
audio_num = audio_feat_mat.shape[0]
video_num = video_feat_mat.shape[0]
for audio_iter in range(0, audio_num):
for video_iter in range(0, video_num):
af0 = audio_feat_mat[audio_iter, 0]
af1 = audio_feat_mat[audio_iter, 1]
vf0 = video_feat_mat[video_iter, 0]
vf1 = video_feat_mat[video_iter, 1]
confi = video_confi[video_iter]
loss = loss_func(af0, af1, vf0, vf1, confi)
if loss_min is None:
loss_min = loss
best_match = {audio_iter: video_iter}
elif loss_min > loss:
loss_min = loss
best_match = {audio_iter: video_iter} # 都是编号
if loss_min < thres:
return best_match
else:
return {'loss_too_big': 'null'}
def set_within_class_match(video_feat_dict, audio_feat_dict):
'''针对一个测试组中的一类物体间的匹配,已提取出2维特征
第1维特征为(视频最大帧间位移,音频最大幅度)
第2维特征为(视频预测碰撞边,音频预测碰撞边)
Args:
video_feat_dict (字典): {‘视频文件夹名’:[最大位移值,[最大速度后碰撞边编号,置信系数]]}
audio_feat_dict (字典): {‘音频名’:[短时功率的最大值,最明显碰撞的碰撞边编号]}
Returns:
match_result : {'音频名': '匹配上的视频文件夹名'}
unmatched : # {‘video’ or 'audio': [名字]},没有未匹配上的则返回match
'''
max_intense = 1 # 特征0的正则化系数
# 若有1个为空,则直接返回
if not bool(video_feat_dict): # video为空
if not bool(audio_feat_dict): # audio为空
all_result = None
else:
all_result = {}
for audio_name in audio_feat_dict.keys():
all_result.update({audio_name: -1})
return all_result
if not bool(audio_feat_dict): # audio为空
return None
# 转化为矩阵形式,去除文件名
video_feat_list = []
hit_confidence = []
for video_feat in video_feat_dict.values():
video_feat_list.append([video_feat[0], video_feat[1][0]])
hit_confidence.append(video_feat[1][1]) # 碰撞边的置信系数
audio_feat_list = []
for audio_feat in audio_feat_dict.values():
audio_feat_list.append(audio_feat)
video_feat_mat = np.array(video_feat_list, dtype='float64')
audio_feat_mat = np.array(audio_feat_list, dtype='float64')
# 所有文件名
all_video_name = list(video_feat_dict.keys())
all_audio_name = list(audio_feat_dict.keys())
# 对第1维特征做归max_intense化
video_1_max = max(video_feat_mat[:, 0].max(), 1)
video_feat_mat[:, 0] = video_feat_mat[:, 0]/video_1_max*max_intense
audio_1_max = max(audio_feat_mat[:, 0].max(), 1)
audio_feat_mat[:, 0] = audio_feat_mat[:, 0]/audio_1_max*max_intense
# 贪心匹配
match_result = {} # 所有已匹配音频视频结果:{音频名:视频名}
unmatch_result = {} # 所有未匹配的音频:{音频名:-1}
audio_feat_elimin = copy.deepcopy(audio_feat_mat)
video_feat_elimin = copy.deepcopy(video_feat_mat)
video_confi_elimin = copy.deepcopy(hit_confidence)
audio_name_elimin = copy.deepcopy(all_audio_name)
video_name_elimin = copy.deepcopy(all_video_name)
# 开始匹配
while True:
best_match = find_best_match(audio_feat_elimin,
video_feat_elimin, video_confi_elimin)
match_label = list(best_match.keys())[0] # 匹配结果标识
if match_label == 'all_empty': # 已全部匹配上
break
elif match_label == 'no_audio': # 音频已全部匹配上
break
# 视频已全部匹配上或损失函数过大
elif (match_label == 'no_video') or (match_label == 'loss_too_big'):
for i in range(0, audio_feat_elimin.shape[0]):
unmatch_format = {audio_name_elimin[i]: -1}
unmatch_result.update(unmatch_format)
break
else: # 存在满足一组匹配
audio_idx = match_label
video_idx = best_match[match_label]
match_audio_name = audio_name_elimin[audio_idx]
match_video_name = video_name_elimin[video_idx]
match_format = {match_audio_name: match_video_name}
match_result.update(match_format)
# 约减集合
audio_feat_elimin = np.delete(audio_feat_elimin, audio_idx, axis=0)
video_feat_elimin = np.delete(video_feat_elimin, video_idx, axis=0)
video_confi_elimin = np.delete(video_confi_elimin, video_idx, axis=0)
del audio_name_elimin[audio_idx]
del video_name_elimin[video_idx]
# 简写视频名
for audio_name, video_name in match_result.items():
video_idx = int(video_name[-2]+video_name[-1])
match_result[audio_name] = video_idx
# 合并匹配结果和未匹配结果
all_result = {**match_result, **unmatch_result}
return all_result
def main():
video_feats = {'video00': [0, [1, 1]], 'video01': [1, [0, 1]], 'video02': [1, [2, 1]]}
audio_feats = {'audio0': [2, 0], 'audio1': [1, 1], 'audio2': [0, 1], 'audio3': [2, 1]}
set_within_class_match(video_feats, audio_feats)
if __name__ == '__main__':
main()
| [
"copy.deepcopy",
"numpy.array",
"numpy.delete"
] | [((270, 337), 'numpy.array', 'np.array', (['[[0, 0.05, 0.1, 0.5, 0.8, 1], [0.4, 0.6, 0.8, 1, 3, 100]]'], {}), '([[0, 0.05, 0.1, 0.5, 0.8, 1], [0.4, 0.6, 0.8, 1, 3, 100]])\n', (278, 337), True, 'import numpy as np\n'), ((3465, 3507), 'numpy.array', 'np.array', (['video_feat_list'], {'dtype': '"""float64"""'}), "(video_feat_list, dtype='float64')\n", (3473, 3507), True, 'import numpy as np\n'), ((3529, 3571), 'numpy.array', 'np.array', (['audio_feat_list'], {'dtype': '"""float64"""'}), "(audio_feat_list, dtype='float64')\n", (3537, 3571), True, 'import numpy as np\n'), ((4089, 4118), 'copy.deepcopy', 'copy.deepcopy', (['audio_feat_mat'], {}), '(audio_feat_mat)\n', (4102, 4118), False, 'import copy\n'), ((4143, 4172), 'copy.deepcopy', 'copy.deepcopy', (['video_feat_mat'], {}), '(video_feat_mat)\n', (4156, 4172), False, 'import copy\n'), ((4198, 4227), 'copy.deepcopy', 'copy.deepcopy', (['hit_confidence'], {}), '(hit_confidence)\n', (4211, 4227), False, 'import copy\n'), ((4252, 4281), 'copy.deepcopy', 'copy.deepcopy', (['all_audio_name'], {}), '(all_audio_name)\n', (4265, 4281), False, 'import copy\n'), ((4306, 4335), 'copy.deepcopy', 'copy.deepcopy', (['all_video_name'], {}), '(all_video_name)\n', (4319, 4335), False, 'import copy\n'), ((5376, 5423), 'numpy.delete', 'np.delete', (['audio_feat_elimin', 'audio_idx'], {'axis': '(0)'}), '(audio_feat_elimin, audio_idx, axis=0)\n', (5385, 5423), True, 'import numpy as np\n'), ((5456, 5503), 'numpy.delete', 'np.delete', (['video_feat_elimin', 'video_idx'], {'axis': '(0)'}), '(video_feat_elimin, video_idx, axis=0)\n', (5465, 5503), True, 'import numpy as np\n'), ((5537, 5585), 'numpy.delete', 'np.delete', (['video_confi_elimin', 'video_idx'], {'axis': '(0)'}), '(video_confi_elimin, video_idx, axis=0)\n', (5546, 5585), True, 'import numpy as np\n')] |
import cv2
import math
import numpy as np
"""
#图像编码模块:
run_length_encode: 将mask图像转换为rle编码。</br>
run_length_decode: 将rle编码还原为mask图像。</br>
"""
def run_length_encode(mask):
"""
Descripition: Convert mask image into run-length enconde.
Args
-----
mask: A simple of gray image. The simple consists of digital signals(1 and 0).
Returns
--------
rle: A list of positions which encoded by run-length.
"""
signals = np.where(mask == 255)[0] # get positions of mask pixels.
# brance 0: no mask in image.
if signals is None:
print("Check input image, there isn't mask.")
return None
# print(signals)
head = signals[0] # first pixel of run-length encode.
signal = head # recode previous position.
rle = [[head+1,1]] # branch 1: only one pixel
for dot in signals:
if dot - signal > 1: # check if position is adjacent(nearby)
rle[-1][1] = signal - head + 1 #branch 2: no adjacent, new encode and update length of previous encode
rle.append([dot+1, 1])
head = dot # update head and signal
signal = dot
# branch 4: no end-signal
else:
rle[-1][1] = signals[-1] - head + 1
return rle
def run_length_decode(shape, rle):
"""
Descripition: Convert run-length encode into mask image.
Args
----
shape: size of origin image.
rle: Run-length encode.
Returns
-------
mask: Mask image.
"""
# width, heigth = shape[0], shape[1]
image = np.zeros(shape)
image = image.flatten()
for [pos, range] in rle:
image[pos-1:pos+range-1] = 255
image = np.reshape(image, shape).T
return image
def test_mask():
"""
Test mask moudle.
-----------------
"""
# import moudles
import matplotlib.pyplot as plt
from utils.common import ROOT_DIR
# define file path
test_file = ROOT_DIR + "/data/train/0a7d30b252359a10fd298b638b90cb9ada3acced4e0c0e5a3692013f432ee4e9/masks/39acbc082528c996d9fc3a3181938580f6c48461ee9eea91ff273e9d2c4499d0.png"
# test run_length_encode and run_length_decode
mask_file = np.array(cv2.imread(test_file, cv2.IMREAD_GRAYSCALE))
shape = mask_file.shape
mask = mask_file.T.flatten()
rle = run_length_encode(mask)
print(rle)
img = run_length_decode(shape, rle)
plt.imshow(img)
n_rle = run_length_encode(img.T.flatten())
print(n_rle)
plt.show() | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"cv2.imread",
"numpy.where",
"numpy.reshape"
] | [((1631, 1646), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1639, 1646), True, 'import numpy as np\n'), ((2458, 2473), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2468, 2473), True, 'import matplotlib.pyplot as plt\n'), ((2542, 2552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2550, 2552), True, 'import matplotlib.pyplot as plt\n'), ((469, 490), 'numpy.where', 'np.where', (['(mask == 255)'], {}), '(mask == 255)\n', (477, 490), True, 'import numpy as np\n'), ((1755, 1779), 'numpy.reshape', 'np.reshape', (['image', 'shape'], {}), '(image, shape)\n', (1765, 1779), True, 'import numpy as np\n'), ((2259, 2302), 'cv2.imread', 'cv2.imread', (['test_file', 'cv2.IMREAD_GRAYSCALE'], {}), '(test_file, cv2.IMREAD_GRAYSCALE)\n', (2269, 2302), False, 'import cv2\n')] |
from blocks.bricks import (
Initializable, Linear, Random)
from blocks.bricks.base import lazy, application
from blocks.bricks.lookup import LookupTable
from blocks.bricks.parallel import Fork
from blocks.bricks.recurrent import GatedRecurrent, Bidirectional
from blocks.roles import add_role, INITIAL_STATE, PARAMETER
from blocks.utils import shared_floatx_zeros, dict_union
from blocks.bricks import Brick
import numpy
import theano
from theano import tensor, function
import sys
sys.path.insert(1, '.')
sys.path.insert(1, './sampleRNN')
from models.conditional import three_tier
floatX = theano.config.floatX
def _simple_norm(x, eps=1e-5):
output = (x - tensor.shape_padright(x.mean(-1))) / \
(eps + tensor.shape_padright(x.std(-1)))
return output
def _apply_norm(x, layer_norm=True):
if layer_norm:
return _simple_norm(x)
else:
return x
def logsumexp(x, axis=None):
x_max = tensor.max(x, axis=axis, keepdims=True)
z = tensor.log(
tensor.sum(tensor.exp(x - x_max), axis=axis, keepdims=True)) + x_max
return z.sum(axis=axis)
def predict(probs, axis=-1):
return tensor.argmax(probs, axis=axis)
# https://gist.github.com/benanne/2300591
def one_hot(t, r=None):
"""Compute one hot encoding.
given a tensor t of dimension d with integer values from range(r), return a
new tensor of dimension d + 1 with values 0/1, where the last dimension
gives a one-hot representation of the values in t.
if r is not given, r is set to max(t) + 1
"""
if r is None:
r = tensor.max(t) + 1
ranges = tensor.shape_padleft(tensor.arange(r), t.ndim)
return tensor.eq(ranges, tensor.shape_padright(t, 1))
def cost_gmm(y, mu, sig, weight):
"""Gaussian mixture model negative log-likelihood.
Computes the cost.
"""
n_dim = y.ndim
shape_y = y.shape
k = weight.shape[-1]
y = y.reshape((-1, shape_y[-1]))
y = tensor.shape_padright(y)
mu = mu.reshape((-1, shape_y[-1], k))
sig = sig.reshape((-1, shape_y[-1], k))
weight = weight.reshape((-1, k))
diff = tensor.sqr(y - mu)
inner = -0.5 * tensor.sum(
diff / sig**2 +
2 * tensor.log(sig) + tensor.log(2 * numpy.pi), axis=-2)
nll = -logsumexp(tensor.log(weight) + inner, axis=-1)
return nll.reshape(shape_y[:-1], ndim=n_dim - 1)
def sample_gmm(mu, sigma, weight, theano_rng):
k = weight.shape[-1]
dim = mu.shape[-1] / k
shape_result = weight.shape
shape_result = tensor.set_subtensor(shape_result[-1], dim)
ndim_result = weight.ndim
mu = mu.reshape((-1, dim, k))
sigma = sigma.reshape((-1, dim, k))
weight = weight.reshape((-1, k))
sample_weight = theano_rng.multinomial(pvals=weight, dtype=weight.dtype)
idx = predict(sample_weight, axis=-1)
mu = mu[tensor.arange(mu.shape[0]), :, idx]
sigma = sigma[tensor.arange(sigma.shape[0]), :, idx]
epsilon = theano_rng.normal(
size=mu.shape, avg=0., std=1., dtype=mu.dtype)
result = mu + sigma * epsilon
return result.reshape(shape_result, ndim=ndim_result)
class SampleRnn(Brick):
def __init__(self, **kwargs):
super(SampleRnn, self).__init__(**kwargs)
_, _, self.parameters, _, _, _, _ = three_tier.compute_cost(*self.raw_inputs())
for p in self.parameters:
add_role(p, PARAMETER)
self.N_RNN = three_tier.N_RNN
def raw_inputs(self):
seq = tensor.imatrix('rseq')
feat = tensor.tensor3('rfeat')
h0_ = tensor.tensor3('rh0')
big_h0_ = tensor.tensor3('rbigh0')
res_ = tensor.scalar('rscalar')
mask_ = tensor.matrix('rmask')
return seq, feat, h0_, big_h0_, res_, mask_
@application
def apply(self, sequences, features, h0, big_h0, reset, mask):
cost, ip_cost, all_params, ip_params, other_params, new_h0, new_big_h0 = \
three_tier.compute_cost(sequences, features, h0, big_h0, reset, mask)
return cost, ip_cost, all_params, ip_params, other_params, new_h0, new_big_h0
def initial_states(self, batch_size):
big_h0_shape = (batch_size, three_tier.N_RNN, three_tier.H0_MULT*three_tier.BIG_DIM)
last_big_h0 = shared_floatx_zeros(big_h0_shape)
h0_shape = (batch_size, three_tier.N_RNN, three_tier.H0_MULT*three_tier.DIM)
last_h0 = shared_floatx_zeros(h0_shape)
return last_h0, last_big_h0
def sample_raw(self, test_feats, features_length, tag, path_to_save):
seq, feat, h0_, big_h0_, res_, mask_ = self.raw_inputs()
big_frame_gen, frame_gen, sample_gen = three_tier.getting_generation_functions(
seq, h0_, big_h0_, res_, feat)
three_tier.generate_and_save_samples(
tag,
path_to_save=path_to_save,
features=test_feats,
features_length=features_length,
noise_level=0.,
big_frame_level_generate_fn=big_frame_gen,
frame_level_generate_fn=frame_gen,
sample_level_generate_fn=sample_gen,
npy_address=None)
class RecurrentWithFork(Initializable):
# Obtained from Dima's code. @rizar
# https://github.com/rizar/attention-lvcsr/blob/master/lvsr/bricks/__init__.py
@lazy(allocation=['input_dim'])
def __init__(self, recurrent, input_dim, **kwargs):
super(RecurrentWithFork, self).__init__(**kwargs)
self.recurrent = recurrent
self.input_dim = input_dim
self.fork = Fork(
[name for name in self.recurrent.sequences
if name != 'mask'], prototype=Linear())
self.children = [recurrent.brick, self.fork]
def _push_allocation_config(self):
self.fork.input_dim = self.input_dim
self.fork.output_dims = [self.recurrent.brick.get_dim(name)
for name in self.fork.output_names]
@application(inputs=['input_', 'mask'])
def apply(self, input_, mask=None, **kwargs):
return self.recurrent(
mask=mask, **dict_union(self.fork.apply(input_, as_dict=True),
kwargs))
@apply.property('outputs')
def apply_outputs(self):
return self.recurrent.states
class Encoder(Initializable):
def __init__(
self,
encoder_type,
num_characters,
input_dim,
encoder_dim,
**kwargs):
assert encoder_type in [None, 'bidirectional']
self.encoder_type = encoder_type
super(Encoder, self).__init__(**kwargs)
self.children = []
if encoder_type in ['lookup', 'bidirectional']:
self.embed_label = LookupTable(
num_characters,
input_dim,
name='embed_label')
self.children += [
self.embed_label]
else:
# If there is no encoder.
assert num_characters == input_dim
if encoder_type == 'bidirectional':
transition = RecurrentWithFork(
GatedRecurrent(dim=encoder_dim).apply,
input_dim, name='encoder_transition')
self.encoder = Bidirectional(transition, name='encoder')
self.children.append(self.encoder)
@application
def apply(self, x, x_mask=None):
if self.encoder_type is None:
return x
if self.encoder_type in ['lookup', 'bidirectional']:
embed_x = self.embed_label.apply(x)
if self.encoder_type == 'lookup':
encoded_x = embed_x
if self.encoder_type == 'bidirectional':
encoded_x = self.encoder.apply(embed_x, x_mask)
return encoded_x
class Parrot(Initializable, Random):
def __init__(
self,
input_dim=420, # Dimension of the text labels
output_dim=63, # Dimension of vocoder fram
rnn_h_dim=1024, # Size of rnn hidden state
readouts_dim=1024, # Size of readouts (summary of rnn)
weak_feedback=False, # Feedback to the top rnn layer
full_feedback=False, # Feedback to all rnn layers
feedback_noise_level=None, # Amount of noise in feedback
layer_norm=False, # Use simple normalization?
use_speaker=False, # Condition on the speaker id?
num_speakers=21, # How many speakers there are?
speaker_dim=128, # Size of speaker embedding
which_cost='MSE', # Train with MSE or GMM
k_gmm=20, # How many components in the GMM
sampling_bias=0, # Make samples more likely (Graves13)
epsilon=1e-5, # Numerical stabilities
num_characters=43, # how many chars in the labels
attention_type='graves', # graves or softmax
attention_size=10, # number of gaussians in the attention
attention_alignment=1., # audio steps per letter at initialization
sharpening_coeff=1.,
timing_coeff=1.,
encoder_type=None,
encoder_dim=128,
raw_output=False,
**kwargs):
super(Parrot, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.rnn_h_dim = rnn_h_dim
self.readouts_dim = readouts_dim
self.layer_norm = layer_norm
self.which_cost = which_cost
self.use_speaker = use_speaker
self.full_feedback = full_feedback
self.feedback_noise_level = feedback_noise_level
self.epsilon = epsilon
self.num_characters = num_characters
self.attention_type = attention_type
self.attention_alignment = attention_alignment
self.attention_size = attention_size
self.sharpening_coeff = sharpening_coeff
self.timing_coeff = timing_coeff
self.encoder_type = encoder_type
self.encoder_dim = encoder_dim
self.encoded_input_dim = input_dim
self.raw_output = raw_output
if self.encoder_type == 'bidirectional':
self.encoded_input_dim = 2 * encoder_dim
if self.feedback_noise_level is not None:
self.noise_level_var = tensor.scalar('feedback_noise_level')
self.rnn1 = GatedRecurrent(dim=rnn_h_dim, name='rnn1')
self.rnn2 = GatedRecurrent(dim=rnn_h_dim, name='rnn2')
self.rnn3 = GatedRecurrent(dim=rnn_h_dim, name='rnn3')
self.h1_to_readout = Linear(
input_dim=rnn_h_dim,
output_dim=readouts_dim,
name='h1_to_readout')
self.h2_to_readout = Linear(
input_dim=rnn_h_dim,
output_dim=readouts_dim,
name='h2_to_readout')
self.h3_to_readout = Linear(
input_dim=rnn_h_dim,
output_dim=readouts_dim,
name='h3_to_readout')
self.h1_to_h2 = Fork(
output_names=['rnn2_inputs', 'rnn2_gates'],
input_dim=rnn_h_dim,
output_dims=[rnn_h_dim, 2 * rnn_h_dim],
name='h1_to_h2')
self.h1_to_h3 = Fork(
output_names=['rnn3_inputs', 'rnn3_gates'],
input_dim=rnn_h_dim,
output_dims=[rnn_h_dim, 2 * rnn_h_dim],
name='h1_to_h3')
self.h2_to_h3 = Fork(
output_names=['rnn3_inputs', 'rnn3_gates'],
input_dim=rnn_h_dim,
output_dims=[rnn_h_dim, 2 * rnn_h_dim],
name='h2_to_h3')
if which_cost == 'MSE':
self.readout_to_output = Linear(
input_dim=readouts_dim,
output_dim=output_dim,
name='readout_to_output')
elif which_cost == 'GMM':
self.sampling_bias = sampling_bias
self.k_gmm = k_gmm
self.readout_to_output = Fork(
output_names=['gmm_mu', 'gmm_sigma', 'gmm_coeff'],
input_dim=readouts_dim,
output_dims=[output_dim * k_gmm, output_dim * k_gmm, k_gmm],
name='readout_to_output')
self.encoder = Encoder(
encoder_type,
num_characters,
input_dim,
encoder_dim,
name='encoder')
self.children = [
self.encoder,
self.rnn1,
self.rnn2,
self.rnn3,
self.h1_to_readout,
self.h2_to_readout,
self.h3_to_readout,
self.h1_to_h2,
self.h1_to_h3,
self.h2_to_h3,
self.readout_to_output]
self.inp_to_h1 = Fork(
output_names=['rnn1_inputs', 'rnn1_gates'],
input_dim=self.encoded_input_dim,
output_dims=[rnn_h_dim, 2 * rnn_h_dim],
name='inp_to_h1')
self.inp_to_h2 = Fork(
output_names=['rnn2_inputs', 'rnn2_gates'],
input_dim=self.encoded_input_dim,
output_dims=[rnn_h_dim, 2 * rnn_h_dim],
name='inp_to_h2')
self.inp_to_h3 = Fork(
output_names=['rnn3_inputs', 'rnn3_gates'],
input_dim=self.encoded_input_dim,
output_dims=[rnn_h_dim, 2 * rnn_h_dim],
name='inp_to_h3')
self.children += [
self.inp_to_h1,
self.inp_to_h2,
self.inp_to_h3]
self.h1_to_att = Fork(
output_names=['alpha', 'beta', 'kappa'],
input_dim=rnn_h_dim,
output_dims=[attention_size] * 3,
name='h1_to_att')
self.att_to_readout = Linear(
input_dim=self.encoded_input_dim,
output_dim=readouts_dim,
name='att_to_readout')
self.children += [
self.h1_to_att,
self.att_to_readout]
if use_speaker:
self.num_speakers = num_speakers
self.speaker_dim = speaker_dim
self.embed_speaker = LookupTable(num_speakers, speaker_dim)
self.speaker_to_h1 = Fork(
output_names=['rnn1_inputs', 'rnn1_gates'],
input_dim=speaker_dim,
output_dims=[rnn_h_dim, 2 * rnn_h_dim],
name='speaker_to_h1')
self.speaker_to_h2 = Fork(
output_names=['rnn2_inputs', 'rnn2_gates'],
input_dim=speaker_dim,
output_dims=[rnn_h_dim, 2 * rnn_h_dim],
name='speaker_to_h2')
self.speaker_to_h3 = Fork(
output_names=['rnn3_inputs', 'rnn3_gates'],
input_dim=speaker_dim,
output_dims=[rnn_h_dim, 2 * rnn_h_dim],
name='speaker_to_h3')
self.speaker_to_readout = Linear(
input_dim=speaker_dim,
output_dim=readouts_dim,
name='speaker_to_readout')
if which_cost == 'MSE':
self.speaker_to_output = Linear(
input_dim=speaker_dim,
output_dim=output_dim,
name='speaker_to_output')
elif which_cost == 'GMM':
self.speaker_to_output = Fork(
output_names=['gmm_mu', 'gmm_sigma', 'gmm_coeff'],
input_dim=speaker_dim,
output_dims=[
output_dim * k_gmm, output_dim * k_gmm, k_gmm],
name='speaker_to_output')
self.children += [
self.embed_speaker,
self.speaker_to_h1,
self.speaker_to_h2,
self.speaker_to_h3,
self.speaker_to_readout,
self.speaker_to_output]
if full_feedback:
self.out_to_h2 = Fork(
output_names=['rnn2_inputs', 'rnn2_gates'],
input_dim=output_dim,
output_dims=[rnn_h_dim, 2 * rnn_h_dim],
name='out_to_h2')
self.out_to_h3 = Fork(
output_names=['rnn3_inputs', 'rnn3_gates'],
input_dim=output_dim,
output_dims=[rnn_h_dim, 2 * rnn_h_dim],
name='out_to_h3')
self.children += [
self.out_to_h2,
self.out_to_h3]
weak_feedback = True
self.weak_feedback = weak_feedback
if weak_feedback:
self.out_to_h1 = Fork(
output_names=['rnn1_inputs', 'rnn1_gates'],
input_dim=output_dim,
output_dims=[rnn_h_dim, 2 * rnn_h_dim],
name='out_to_h1')
self.children += [
self.out_to_h1]
if self.raw_output:
self.sampleRnn = SampleRnn()
self.children += [self.sampleRnn]
def _allocate(self):
self.initial_w = shared_floatx_zeros(
(self.encoded_input_dim,), name="initial_w")
add_role(self.initial_w, INITIAL_STATE)
def symbolic_input_variables(self):
features = tensor.tensor3('features')
features_mask = tensor.matrix('features_mask')
labels = tensor.imatrix('labels')
labels_mask = tensor.matrix('labels_mask')
start_flag = tensor.scalar('start_flag')
if self.use_speaker:
speaker = tensor.imatrix('speaker_index')
else:
speaker = None
if self.raw_output:
raw_sequence = tensor.itensor3('raw_audio')
else:
raw_sequence = None
return features, features_mask, labels, labels_mask, \
speaker, start_flag, raw_sequence
def initial_states(self, batch_size):
initial_h1 = self.rnn1.initial_states(batch_size)
initial_h2 = self.rnn2.initial_states(batch_size)
initial_h3 = self.rnn3.initial_states(batch_size)
last_h1 = shared_floatx_zeros((batch_size, self.rnn_h_dim))
last_h2 = shared_floatx_zeros((batch_size, self.rnn_h_dim))
last_h3 = shared_floatx_zeros((batch_size, self.rnn_h_dim))
# Defining for all
initial_k = tensor.zeros(
(batch_size, self.attention_size), dtype=floatX)
last_k = shared_floatx_zeros((batch_size, self.attention_size))
# Trainable initial state for w. Why not for k?
initial_w = tensor.repeat(self.initial_w[None, :], batch_size, 0)
last_w = shared_floatx_zeros((batch_size, self.encoded_input_dim))
return initial_h1, last_h1, initial_h2, last_h2, initial_h3, last_h3, \
initial_w, last_w, initial_k, last_k
@application
def compute_cost(
self, features, features_mask, labels, labels_mask,
speaker, start_flag, batch_size, raw_audio=None):
if speaker is None:
assert not self.use_speaker
target_features = features[1:]
mask = features_mask[1:]
cell_shape = (mask.shape[0], batch_size, self.rnn_h_dim)
gat_shape = (mask.shape[0], batch_size, 2 * self.rnn_h_dim)
cell_h1 = tensor.zeros(cell_shape, dtype=floatX)
cell_h2 = tensor.zeros(cell_shape, dtype=floatX)
cell_h3 = tensor.zeros(cell_shape, dtype=floatX)
gat_h1 = tensor.zeros(gat_shape, dtype=floatX)
gat_h2 = tensor.zeros(gat_shape, dtype=floatX)
gat_h3 = tensor.zeros(gat_shape, dtype=floatX)
if self.weak_feedback:
input_features = features[:-1]
if self.feedback_noise_level:
noise = self.theano_rng.normal(
size=input_features.shape,
avg=0., std=1.)
input_features += self.noise_level_var * noise
out_cell_h1, out_gat_h1 = self.out_to_h1.apply(input_features)
to_normalize = [
out_cell_h1, out_gat_h1]
out_cell_h1, out_gat_h1 = \
[_apply_norm(x, self.layer_norm) for x in to_normalize]
cell_h1 += out_cell_h1
gat_h1 += out_gat_h1
if self.full_feedback:
assert self.weak_feedback
out_cell_h2, out_gat_h2 = self.out_to_h2.apply(input_features)
out_cell_h3, out_gat_h3 = self.out_to_h3.apply(input_features)
to_normalize = [
out_cell_h2, out_gat_h2, out_cell_h3, out_gat_h3]
out_cell_h2, out_gat_h2, out_cell_h3, out_gat_h3 = \
[_apply_norm(x, self.layer_norm) for x in to_normalize]
cell_h2 += out_cell_h2
gat_h2 += out_gat_h2
cell_h3 += out_cell_h3
gat_h3 += out_gat_h3
if self.use_speaker:
speaker = speaker[:, 0]
emb_speaker = self.embed_speaker.apply(speaker)
emb_speaker = tensor.shape_padleft(emb_speaker)
spk_cell_h1, spk_gat_h1 = self.speaker_to_h1.apply(emb_speaker)
spk_cell_h2, spk_gat_h2 = self.speaker_to_h2.apply(emb_speaker)
spk_cell_h3, spk_gat_h3 = self.speaker_to_h3.apply(emb_speaker)
to_normalize = [
spk_cell_h1, spk_gat_h1, spk_cell_h2, spk_gat_h2,
spk_cell_h3, spk_gat_h3]
spk_cell_h1, spk_gat_h1, spk_cell_h2, spk_gat_h2, \
spk_cell_h3, spk_gat_h3, = \
[_apply_norm(x, self.layer_norm) for x in to_normalize]
cell_h1 = spk_cell_h1 + cell_h1
cell_h2 = spk_cell_h2 + cell_h2
cell_h3 = spk_cell_h3 + cell_h3
gat_h1 = spk_gat_h1 + gat_h1
gat_h2 = spk_gat_h2 + gat_h2
gat_h3 = spk_gat_h3 + gat_h3
initial_h1, last_h1, initial_h2, last_h2, initial_h3, last_h3, \
initial_w, last_w, initial_k, last_k = \
self.initial_states(batch_size)
# If it's a new example, use initial states.
input_h1 = tensor.switch(
start_flag, initial_h1, last_h1)
input_h2 = tensor.switch(
start_flag, initial_h2, last_h2)
input_h3 = tensor.switch(
start_flag, initial_h3, last_h3)
input_w = tensor.switch(
start_flag, initial_w, last_w)
input_k = tensor.switch(
start_flag, initial_k, last_k)
context_oh = self.encoder.apply(labels) * \
tensor.shape_padright(labels_mask)
u = tensor.shape_padleft(
tensor.arange(labels.shape[1], dtype=floatX), 2)
def step(
inp_h1_t, gat_h1_t, inp_h2_t, gat_h2_t, inp_h3_t, gat_h3_t,
h1_tm1, h2_tm1, h3_tm1, k_tm1, w_tm1, context_oh):
attinp_h1, attgat_h1 = self.inp_to_h1.apply(w_tm1)
inp_h1_t += attinp_h1
gat_h1_t += attgat_h1
h1_t = self.rnn1.apply(
inp_h1_t,
gat_h1_t,
h1_tm1, iterate=False)
a_t, b_t, k_t = self.h1_to_att.apply(h1_t)
if self.attention_type == "softmax":
a_t = tensor.nnet.softmax(a_t) + self.epsilon
else:
a_t = tensor.exp(a_t) + self.epsilon
b_t = tensor.exp(b_t) + self.epsilon
k_t = k_tm1 + self.attention_alignment * tensor.exp(k_t)
a_t_ = a_t
a_t = tensor.shape_padright(a_t)
b_t = tensor.shape_padright(b_t)
k_t_ = tensor.shape_padright(k_t)
# batch size X att size X len context
if self.attention_type == "softmax":
# numpy.sqrt(1/(2*numpy.pi)) is the weird number
phi_t = 0.3989422917366028 * tensor.sum(
a_t * tensor.sqrt(b_t) *
tensor.exp(-0.5 * b_t * (k_t_ - u)**2), axis=1)
else:
phi_t = tensor.sum(
a_t * tensor.exp(-b_t * (k_t_ - u)**2), axis=1)
# batch size X len context X num letters
w_t = (tensor.shape_padright(phi_t) * context_oh).sum(axis=1)
attinp_h2, attgat_h2 = self.inp_to_h2.apply(w_t)
attinp_h3, attgat_h3 = self.inp_to_h3.apply(w_t)
inp_h2_t += attinp_h2
gat_h2_t += attgat_h2
inp_h3_t += attinp_h3
gat_h3_t += attgat_h3
h1inp_h2, h1gat_h2 = self.h1_to_h2.apply(h1_t)
h1inp_h3, h1gat_h3 = self.h1_to_h3.apply(h1_t)
to_normalize = [
h1inp_h2, h1gat_h2, h1inp_h3, h1gat_h3]
h1inp_h2, h1gat_h2, h1inp_h3, h1gat_h3 = \
[_apply_norm(x, self.layer_norm) for x in to_normalize]
h2_t = self.rnn2.apply(
inp_h2_t + h1inp_h2,
gat_h2_t + h1gat_h2,
h2_tm1, iterate=False)
h2inp_h3, h2gat_h3 = self.h2_to_h3.apply(h2_t)
to_normalize = [
h2inp_h3, h2gat_h3]
h2inp_h3, h2gat_h3 = \
[_apply_norm(x, self.layer_norm) for x in to_normalize]
h3_t = self.rnn3.apply(
inp_h3_t + h1inp_h3 + h2inp_h3,
gat_h3_t + h1gat_h3 + h2gat_h3,
h3_tm1, iterate=False)
return h1_t, h2_t, h3_t, k_t, w_t, phi_t, a_t_
(h1, h2, h3, k, w, phi, pi_att), scan_updates = theano.scan(
fn=step,
sequences=[cell_h1, gat_h1, cell_h2, gat_h2, cell_h3, gat_h3],
non_sequences=[context_oh],
outputs_info=[
input_h1,
input_h2,
input_h3,
input_k,
input_w,
None,
None])
h1_out = self.h1_to_readout.apply(h1)
h2_out = self.h2_to_readout.apply(h2)
h3_out = self.h3_to_readout.apply(h3)
to_normalize = [
h1_out, h2_out, h3_out]
h1_out, h2_out, h3_out = \
[_apply_norm(x, self.layer_norm) for x in to_normalize]
readouts = h1_out + h2_out + h3_out
if self.use_speaker:
readouts += self.speaker_to_readout.apply(emb_speaker)
readouts += self.att_to_readout.apply(w)
predicted = self.readout_to_output.apply(readouts)
if self.which_cost == 'MSE':
if self.use_speaker:
predicted += self.speaker_to_output.apply(emb_speaker)
cost = tensor.sum((predicted - target_features) ** 2, axis=-1)
next_x = predicted
# Dummy value for coeff
coeff = predicted
elif self.which_cost == 'GMM':
mu, sigma, coeff = predicted
if self.use_speaker:
spk_to_out = self.speaker_to_output.apply(emb_speaker)
mu += spk_to_out[0]
sigma += spk_to_out[1]
coeff += spk_to_out[2]
# When training there should not be sampling_bias
sigma = tensor.exp(sigma) + self.epsilon
coeff = tensor.nnet.softmax(
coeff.reshape(
(-1, self.k_gmm))).reshape(
coeff.shape) + self.epsilon
cost = cost_gmm(target_features, mu, sigma, coeff)
next_x = sample_gmm(mu, sigma, coeff, self.theano_rng)
cost = (cost * mask).sum() / (mask.sum() + 1e-5) + 0. * start_flag
updates = []
updates.append((last_h1, h1[-1]))
updates.append((last_h2, h2[-1]))
updates.append((last_h3, h3[-1]))
updates.append((last_k, k[-1]))
updates.append((last_w, w[-1]))
cost_raw = None
if self.raw_output:
raw_mask = tensor.extra_ops.repeat(features_mask, 80, axis=0)
raw_mask = raw_mask.dimshuffle(1, 0)
# breakpointOp = PdbBreakpoint("Raw mask breakpoint")
# condition = tensor.gt(raw_mask.shape[0], 0)
# raw_mask = breakpointOp(condition, raw_mask)
predicted_transposed = predicted.dimshuffle(1, 0, 2)
last_h0, last_big_h0 = self.sampleRnn.initial_states(batch_size)
raw_audio_reshaped = raw_audio.dimshuffle(1, 0, 2)
raw_audio_reshaped = raw_audio_reshaped.reshape((raw_audio_reshaped.shape[0], -1))
cost_raw, ip_cost, all_params, ip_params, other_params, new_h0, new_big_h0 =\
self.sampleRnn.apply(raw_audio_reshaped, predicted_transposed, last_h0, last_big_h0, start_flag, raw_mask)
if self.sampleRnn.N_RNN == 1:
new_h0 = tensor.unbroadcast(new_h0, 1)
new_big_h0 = tensor.unbroadcast(new_big_h0, 1)
updates.append((last_h0, new_h0))
updates.append((last_big_h0, new_big_h0))
# cost = cost + 80.*cost_raw
alpha_ = numpy.float32(0.)
beta_ = numpy.float32(1.)
cost = alpha_*cost + beta_*cost_raw
attention_vars = [next_x, k, w, coeff, phi, pi_att]
return cost, scan_updates + updates, attention_vars, cost_raw
@application
def sample_model_fun(
self, labels, labels_mask, speaker, num_samples, seq_size):
initial_h1, last_h1, initial_h2, last_h2, initial_h3, last_h3, \
initial_w, last_w, initial_k, last_k = \
self.initial_states(num_samples)
initial_x = numpy.zeros(
(num_samples, self.output_dim), dtype=floatX)
cell_shape = (seq_size, num_samples, self.rnn_h_dim)
gat_shape = (seq_size, num_samples, 2 * self.rnn_h_dim)
cell_h1 = tensor.zeros(cell_shape, dtype=floatX)
cell_h2 = tensor.zeros(cell_shape, dtype=floatX)
cell_h3 = tensor.zeros(cell_shape, dtype=floatX)
gat_h1 = tensor.zeros(gat_shape, dtype=floatX)
gat_h2 = tensor.zeros(gat_shape, dtype=floatX)
gat_h3 = tensor.zeros(gat_shape, dtype=floatX)
if self.use_speaker:
speaker = speaker[:, 0]
emb_speaker = self.embed_speaker.apply(speaker)
# Applied before the broadcast.
spk_readout = self.speaker_to_readout.apply(emb_speaker)
spk_output = self.speaker_to_output.apply(emb_speaker)
# Add dimension to repeat with time.
emb_speaker = tensor.shape_padleft(emb_speaker)
spk_cell_h1, spk_gat_h1 = self.speaker_to_h1.apply(emb_speaker)
spk_cell_h2, spk_gat_h2 = self.speaker_to_h2.apply(emb_speaker)
spk_cell_h3, spk_gat_h3 = self.speaker_to_h3.apply(emb_speaker)
to_normalize = [
spk_cell_h1, spk_gat_h1, spk_cell_h2, spk_gat_h2,
spk_cell_h3, spk_gat_h3]
spk_cell_h1, spk_gat_h1, spk_cell_h2, spk_gat_h2, \
spk_cell_h3, spk_gat_h3, = \
[_apply_norm(x, self.layer_norm) for x in to_normalize]
cell_h1 += spk_cell_h1
cell_h2 += spk_cell_h2
cell_h3 += spk_cell_h3
gat_h1 += spk_gat_h1
gat_h2 += spk_gat_h2
gat_h3 += spk_gat_h3
context_oh = self.encoder.apply(labels) * \
tensor.shape_padright(labels_mask)
u = tensor.shape_padleft(
tensor.arange(labels.shape[1], dtype=floatX), 2)
def sample_step(
inp_cell_h1_t, inp_gat_h1_t, inp_cell_h2_t, inp_gat_h2_t,
inp_cell_h3_t, inp_gat_h3_t, x_tm1, h1_tm1, h2_tm1, h3_tm1,
k_tm1, w_tm1):
cell_h1_t = inp_cell_h1_t
cell_h2_t = inp_cell_h2_t
cell_h3_t = inp_cell_h3_t
gat_h1_t = inp_gat_h1_t
gat_h2_t = inp_gat_h2_t
gat_h3_t = inp_gat_h3_t
attinp_h1, attgat_h1 = self.inp_to_h1.apply(w_tm1)
cell_h1_t += attinp_h1
gat_h1_t += attgat_h1
if self.weak_feedback:
out_cell_h1_t, out_gat_h1_t = self.out_to_h1.apply(x_tm1)
to_normalize = [
out_cell_h1_t, out_gat_h1_t]
out_cell_h1_t, out_gat_h1_t = \
[_apply_norm(x, self.layer_norm) for x in to_normalize]
cell_h1_t += out_cell_h1_t
gat_h1_t += out_gat_h1_t
if self.full_feedback:
out_cell_h2_t, out_gat_h2_t = self.out_to_h2.apply(x_tm1)
out_cell_h3_t, out_gat_h3_t = self.out_to_h3.apply(x_tm1)
to_normalize = [
out_cell_h2_t, out_gat_h2_t,
out_cell_h3_t, out_gat_h3_t]
out_cell_h2_t, out_gat_h2_t, \
out_cell_h3_t, out_gat_h3_t = \
[_apply_norm(x, self.layer_norm) for x in to_normalize]
cell_h2_t += out_cell_h2_t
cell_h3_t += out_cell_h3_t
gat_h2_t += out_gat_h2_t
gat_h3_t += out_gat_h3_t
h1_t = self.rnn1.apply(
cell_h1_t,
gat_h1_t,
h1_tm1, iterate=False)
a_t, b_t, k_t = self.h1_to_att.apply(h1_t)
if self.attention_type == "softmax":
a_t = tensor.nnet.softmax(a_t) + self.epsilon
else:
a_t = tensor.exp(a_t) + self.epsilon
b_t = tensor.exp(b_t) * self.sharpening_coeff + self.epsilon
k_t = k_tm1 + self.attention_alignment * \
tensor.exp(k_t) / self.timing_coeff
a_t_ = a_t
a_t = tensor.shape_padright(a_t)
b_t = tensor.shape_padright(b_t)
k_t_ = tensor.shape_padright(k_t)
# batch size X att size X len context
if self.attention_type == "softmax":
# numpy.sqrt(1/(2*numpy.pi)) is the weird number
phi_t = 0.3989422917366028 * tensor.sum(
a_t * tensor.sqrt(b_t) *
tensor.exp(-0.5 * b_t * (k_t_ - u)**2), axis=1)
else:
phi_t = tensor.sum(
a_t * tensor.exp(-b_t * (k_t_ - u)**2), axis=1)
# batch size X len context X num letters
w_t = (tensor.shape_padright(phi_t) * context_oh).sum(axis=1)
attinp_h2, attgat_h2 = self.inp_to_h2.apply(w_t)
attinp_h3, attgat_h3 = self.inp_to_h3.apply(w_t)
cell_h2_t += attinp_h2
gat_h2_t += attgat_h2
cell_h3_t += attinp_h3
gat_h3_t += attgat_h3
h1inp_h2, h1gat_h2 = self.h1_to_h2.apply(h1_t)
h1inp_h3, h1gat_h3 = self.h1_to_h3.apply(h1_t)
to_normalize = [
h1inp_h2, h1gat_h2, h1inp_h3, h1gat_h3]
h1inp_h2, h1gat_h2, h1inp_h3, h1gat_h3 = \
[_apply_norm(x, self.layer_norm) for x in to_normalize]
h2_t = self.rnn2.apply(
cell_h2_t + h1inp_h2,
gat_h2_t + h1gat_h2,
h2_tm1, iterate=False)
h2inp_h3, h2gat_h3 = self.h2_to_h3.apply(h2_t)
to_normalize = [
h2inp_h3, h2gat_h3]
h2inp_h3, h2gat_h3 = \
[_apply_norm(x, self.layer_norm) for x in to_normalize]
h3_t = self.rnn3.apply(
cell_h3_t + h1inp_h3 + h2inp_h3,
gat_h3_t + h1gat_h3 + h2gat_h3,
h3_tm1, iterate=False)
h1_out_t = self.h1_to_readout.apply(h1_t)
h2_out_t = self.h2_to_readout.apply(h2_t)
h3_out_t = self.h3_to_readout.apply(h3_t)
to_normalize = [
h1_out_t, h2_out_t, h3_out_t]
h1_out_t, h2_out_t, h3_out_t = \
[_apply_norm(x, self.layer_norm) for x in to_normalize]
readout_t = h1_out_t + h2_out_t + h3_out_t
readout_t += self.att_to_readout.apply(w_t)
if self.use_speaker:
readout_t += spk_readout
output_t = self.readout_to_output.apply(readout_t)
if self.which_cost == 'MSE':
predicted_x_t = output_t
if self.use_speaker:
predicted_x_t += spk_output
# Dummy value for coeff_t
coeff_t = predicted_x_t
elif self.which_cost == "GMM":
mu_t, sigma_t, coeff_t = output_t
if self.use_speaker:
mu_t += spk_output[0]
sigma_t += spk_output[1]
coeff_t += spk_output[2]
sigma_t = tensor.exp(sigma_t - self.sampling_bias) + \
self.epsilon
coeff_t = tensor.nnet.softmax(
coeff_t.reshape(
(-1, self.k_gmm)) * (1. + self.sampling_bias)).reshape(
coeff_t.shape) + self.epsilon
predicted_x_t = sample_gmm(
mu_t, sigma_t, coeff_t, self.theano_rng)
return predicted_x_t, h1_t, h2_t, h3_t, \
k_t, w_t, coeff_t, phi_t, a_t_
(sample_x, h1, h2, h3, k, w, pi, phi, pi_att), updates = theano.scan(
fn=sample_step,
sequences=[
cell_h1,
gat_h1,
cell_h2,
gat_h2,
cell_h3,
gat_h3],
non_sequences=[],
outputs_info=[
initial_x,
initial_h1,
initial_h2,
initial_h3,
initial_k,
initial_w,
None,
None,
None])
return sample_x, k, w, pi, phi, pi_att, updates
def sample_model(
self, labels_tr, labels_mask_tr, features_mask_tr,
speaker_tr, num_samples, num_steps):
features, features_mask, labels, labels_mask, speaker, start_flag, raw_sequence = \
self.symbolic_input_variables()
sample_x, k, w, pi, phi, pi_att, updates = \
self.sample_model_fun(
labels, labels_mask, speaker,
num_samples, num_steps)
theano_inputs = [labels, labels_mask]
numpy_inputs = (labels_tr, labels_mask_tr)
if self.use_speaker:
theano_inputs += [speaker]
numpy_inputs += (speaker_tr,)
return function(
theano_inputs,
[sample_x, k, w, pi, phi, pi_att],
updates=updates)(*numpy_inputs)
def sample_using_input(self, data_tr, num_samples):
# Used to predict the values using the dataset
features, features_mask, labels, labels_mask, speaker, start_flag, raw_sequence = \
self.symbolic_input_variables()
cost, updates, attention_vars = self.compute_cost(
features, features_mask, labels, labels_mask,
speaker, start_flag, num_samples)
sample_x, k, w, pi, phi, pi_att = attention_vars
theano_vars = [
features, features_mask, labels, labels_mask, speaker, start_flag]
theano_vars = [x for x in theano_vars if x is not None]
theano_vars = list(set(theano_vars))
theano_vars = {x.name: x for x in theano_vars}
theano_inputs = []
numpy_inputs = []
for key in data_tr.keys():
theano_inputs.append(theano_vars[key])
numpy_inputs.append(data_tr[key])
return function(
theano_inputs, [sample_x, k, w, pi, phi, pi_att],
updates=updates)(*numpy_inputs)
| [
"theano.tensor.tensor3",
"blocks.bricks.Linear",
"theano.tensor.extra_ops.repeat",
"theano.tensor.sqrt",
"theano.tensor.shape_padleft",
"theano.tensor.max",
"theano.tensor.scalar",
"models.conditional.three_tier.generate_and_save_samples",
"theano.tensor.log",
"blocks.bricks.parallel.Fork",
"mod... | [((489, 512), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""."""'], {}), "(1, '.')\n", (504, 512), False, 'import sys\n'), ((513, 546), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""./sampleRNN"""'], {}), "(1, './sampleRNN')\n", (528, 546), False, 'import sys\n'), ((936, 975), 'theano.tensor.max', 'tensor.max', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (946, 975), False, 'from theano import tensor, function\n'), ((1143, 1174), 'theano.tensor.argmax', 'tensor.argmax', (['probs'], {'axis': 'axis'}), '(probs, axis=axis)\n', (1156, 1174), False, 'from theano import tensor, function\n'), ((1947, 1971), 'theano.tensor.shape_padright', 'tensor.shape_padright', (['y'], {}), '(y)\n', (1968, 1971), False, 'from theano import tensor, function\n'), ((2108, 2126), 'theano.tensor.sqr', 'tensor.sqr', (['(y - mu)'], {}), '(y - mu)\n', (2118, 2126), False, 'from theano import tensor, function\n'), ((2515, 2558), 'theano.tensor.set_subtensor', 'tensor.set_subtensor', (['shape_result[-1]', 'dim'], {}), '(shape_result[-1], dim)\n', (2535, 2558), False, 'from theano import tensor, function\n'), ((5284, 5314), 'blocks.bricks.base.lazy', 'lazy', ([], {'allocation': "['input_dim']"}), "(allocation=['input_dim'])\n", (5288, 5314), False, 'from blocks.bricks.base import lazy, application\n'), ((5914, 5952), 'blocks.bricks.base.application', 'application', ([], {'inputs': "['input_', 'mask']"}), "(inputs=['input_', 'mask'])\n", (5925, 5952), False, 'from blocks.bricks.base import lazy, application\n'), ((1626, 1642), 'theano.tensor.arange', 'tensor.arange', (['r'], {}), '(r)\n', (1639, 1642), False, 'from theano import tensor, function\n'), ((1681, 1708), 'theano.tensor.shape_padright', 'tensor.shape_padright', (['t', '(1)'], {}), '(t, 1)\n', (1702, 1708), False, 'from theano import tensor, function\n'), ((3456, 3478), 'theano.tensor.imatrix', 'tensor.imatrix', (['"""rseq"""'], {}), "('rseq')\n", (3470, 3478), False, 'from theano import tensor, function\n'), ((3494, 3517), 'theano.tensor.tensor3', 'tensor.tensor3', (['"""rfeat"""'], {}), "('rfeat')\n", (3508, 3517), False, 'from theano import tensor, function\n'), ((3532, 3553), 'theano.tensor.tensor3', 'tensor.tensor3', (['"""rh0"""'], {}), "('rh0')\n", (3546, 3553), False, 'from theano import tensor, function\n'), ((3572, 3596), 'theano.tensor.tensor3', 'tensor.tensor3', (['"""rbigh0"""'], {}), "('rbigh0')\n", (3586, 3596), False, 'from theano import tensor, function\n'), ((3612, 3636), 'theano.tensor.scalar', 'tensor.scalar', (['"""rscalar"""'], {}), "('rscalar')\n", (3625, 3636), False, 'from theano import tensor, function\n'), ((3653, 3675), 'theano.tensor.matrix', 'tensor.matrix', (['"""rmask"""'], {}), "('rmask')\n", (3666, 3675), False, 'from theano import tensor, function\n'), ((3909, 3978), 'models.conditional.three_tier.compute_cost', 'three_tier.compute_cost', (['sequences', 'features', 'h0', 'big_h0', 'reset', 'mask'], {}), '(sequences, features, h0, big_h0, reset, mask)\n', (3932, 3978), False, 'from models.conditional import three_tier\n'), ((4224, 4257), 'blocks.utils.shared_floatx_zeros', 'shared_floatx_zeros', (['big_h0_shape'], {}), '(big_h0_shape)\n', (4243, 4257), False, 'from blocks.utils import shared_floatx_zeros, dict_union\n'), ((4362, 4391), 'blocks.utils.shared_floatx_zeros', 'shared_floatx_zeros', (['h0_shape'], {}), '(h0_shape)\n', (4381, 4391), False, 'from blocks.utils import shared_floatx_zeros, dict_union\n'), ((4616, 4686), 'models.conditional.three_tier.getting_generation_functions', 'three_tier.getting_generation_functions', (['seq', 'h0_', 'big_h0_', 'res_', 'feat'], {}), '(seq, h0_, big_h0_, res_, feat)\n', (4655, 4686), False, 'from models.conditional import three_tier\n'), ((4733, 5018), 'models.conditional.three_tier.generate_and_save_samples', 'three_tier.generate_and_save_samples', (['tag'], {'path_to_save': 'path_to_save', 'features': 'test_feats', 'features_length': 'features_length', 'noise_level': '(0.0)', 'big_frame_level_generate_fn': 'big_frame_gen', 'frame_level_generate_fn': 'frame_gen', 'sample_level_generate_fn': 'sample_gen', 'npy_address': 'None'}), '(tag, path_to_save=path_to_save,\n features=test_feats, features_length=features_length, noise_level=0.0,\n big_frame_level_generate_fn=big_frame_gen, frame_level_generate_fn=\n frame_gen, sample_level_generate_fn=sample_gen, npy_address=None)\n', (4769, 5018), False, 'from models.conditional import three_tier\n'), ((10290, 10332), 'blocks.bricks.recurrent.GatedRecurrent', 'GatedRecurrent', ([], {'dim': 'rnn_h_dim', 'name': '"""rnn1"""'}), "(dim=rnn_h_dim, name='rnn1')\n", (10304, 10332), False, 'from blocks.bricks.recurrent import GatedRecurrent, Bidirectional\n'), ((10353, 10395), 'blocks.bricks.recurrent.GatedRecurrent', 'GatedRecurrent', ([], {'dim': 'rnn_h_dim', 'name': '"""rnn2"""'}), "(dim=rnn_h_dim, name='rnn2')\n", (10367, 10395), False, 'from blocks.bricks.recurrent import GatedRecurrent, Bidirectional\n'), ((10416, 10458), 'blocks.bricks.recurrent.GatedRecurrent', 'GatedRecurrent', ([], {'dim': 'rnn_h_dim', 'name': '"""rnn3"""'}), "(dim=rnn_h_dim, name='rnn3')\n", (10430, 10458), False, 'from blocks.bricks.recurrent import GatedRecurrent, Bidirectional\n'), ((10489, 10563), 'blocks.bricks.Linear', 'Linear', ([], {'input_dim': 'rnn_h_dim', 'output_dim': 'readouts_dim', 'name': '"""h1_to_readout"""'}), "(input_dim=rnn_h_dim, output_dim=readouts_dim, name='h1_to_readout')\n", (10495, 10563), False, 'from blocks.bricks import Initializable, Linear, Random\n'), ((10631, 10705), 'blocks.bricks.Linear', 'Linear', ([], {'input_dim': 'rnn_h_dim', 'output_dim': 'readouts_dim', 'name': '"""h2_to_readout"""'}), "(input_dim=rnn_h_dim, output_dim=readouts_dim, name='h2_to_readout')\n", (10637, 10705), False, 'from blocks.bricks import Initializable, Linear, Random\n'), ((10773, 10847), 'blocks.bricks.Linear', 'Linear', ([], {'input_dim': 'rnn_h_dim', 'output_dim': 'readouts_dim', 'name': '"""h3_to_readout"""'}), "(input_dim=rnn_h_dim, output_dim=readouts_dim, name='h3_to_readout')\n", (10779, 10847), False, 'from blocks.bricks import Initializable, Linear, Random\n'), ((10910, 11040), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['rnn2_inputs', 'rnn2_gates']", 'input_dim': 'rnn_h_dim', 'output_dims': '[rnn_h_dim, 2 * rnn_h_dim]', 'name': '"""h1_to_h2"""'}), "(output_names=['rnn2_inputs', 'rnn2_gates'], input_dim=rnn_h_dim,\n output_dims=[rnn_h_dim, 2 * rnn_h_dim], name='h1_to_h2')\n", (10914, 11040), False, 'from blocks.bricks.parallel import Fork\n'), ((11111, 11241), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['rnn3_inputs', 'rnn3_gates']", 'input_dim': 'rnn_h_dim', 'output_dims': '[rnn_h_dim, 2 * rnn_h_dim]', 'name': '"""h1_to_h3"""'}), "(output_names=['rnn3_inputs', 'rnn3_gates'], input_dim=rnn_h_dim,\n output_dims=[rnn_h_dim, 2 * rnn_h_dim], name='h1_to_h3')\n", (11115, 11241), False, 'from blocks.bricks.parallel import Fork\n'), ((11312, 11442), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['rnn3_inputs', 'rnn3_gates']", 'input_dim': 'rnn_h_dim', 'output_dims': '[rnn_h_dim, 2 * rnn_h_dim]', 'name': '"""h2_to_h3"""'}), "(output_names=['rnn3_inputs', 'rnn3_gates'], input_dim=rnn_h_dim,\n output_dims=[rnn_h_dim, 2 * rnn_h_dim], name='h2_to_h3')\n", (11316, 11442), False, 'from blocks.bricks.parallel import Fork\n'), ((12592, 12742), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['rnn1_inputs', 'rnn1_gates']", 'input_dim': 'self.encoded_input_dim', 'output_dims': '[rnn_h_dim, 2 * rnn_h_dim]', 'name': '"""inp_to_h1"""'}), "(output_names=['rnn1_inputs', 'rnn1_gates'], input_dim=self.\n encoded_input_dim, output_dims=[rnn_h_dim, 2 * rnn_h_dim], name='inp_to_h1'\n )\n", (12596, 12742), False, 'from blocks.bricks.parallel import Fork\n'), ((12808, 12958), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['rnn2_inputs', 'rnn2_gates']", 'input_dim': 'self.encoded_input_dim', 'output_dims': '[rnn_h_dim, 2 * rnn_h_dim]', 'name': '"""inp_to_h2"""'}), "(output_names=['rnn2_inputs', 'rnn2_gates'], input_dim=self.\n encoded_input_dim, output_dims=[rnn_h_dim, 2 * rnn_h_dim], name='inp_to_h2'\n )\n", (12812, 12958), False, 'from blocks.bricks.parallel import Fork\n'), ((13024, 13174), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['rnn3_inputs', 'rnn3_gates']", 'input_dim': 'self.encoded_input_dim', 'output_dims': '[rnn_h_dim, 2 * rnn_h_dim]', 'name': '"""inp_to_h3"""'}), "(output_names=['rnn3_inputs', 'rnn3_gates'], input_dim=self.\n encoded_input_dim, output_dims=[rnn_h_dim, 2 * rnn_h_dim], name='inp_to_h3'\n )\n", (13028, 13174), False, 'from blocks.bricks.parallel import Fork\n'), ((13352, 13474), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['alpha', 'beta', 'kappa']", 'input_dim': 'rnn_h_dim', 'output_dims': '([attention_size] * 3)', 'name': '"""h1_to_att"""'}), "(output_names=['alpha', 'beta', 'kappa'], input_dim=rnn_h_dim,\n output_dims=[attention_size] * 3, name='h1_to_att')\n", (13356, 13474), False, 'from blocks.bricks.parallel import Fork\n'), ((13551, 13644), 'blocks.bricks.Linear', 'Linear', ([], {'input_dim': 'self.encoded_input_dim', 'output_dim': 'readouts_dim', 'name': '"""att_to_readout"""'}), "(input_dim=self.encoded_input_dim, output_dim=readouts_dim, name=\n 'att_to_readout')\n", (13557, 13644), False, 'from blocks.bricks import Initializable, Linear, Random\n'), ((16772, 16836), 'blocks.utils.shared_floatx_zeros', 'shared_floatx_zeros', (['(self.encoded_input_dim,)'], {'name': '"""initial_w"""'}), "((self.encoded_input_dim,), name='initial_w')\n", (16791, 16836), False, 'from blocks.utils import shared_floatx_zeros, dict_union\n'), ((16859, 16898), 'blocks.roles.add_role', 'add_role', (['self.initial_w', 'INITIAL_STATE'], {}), '(self.initial_w, INITIAL_STATE)\n', (16867, 16898), False, 'from blocks.roles import add_role, INITIAL_STATE, PARAMETER\n'), ((16959, 16985), 'theano.tensor.tensor3', 'tensor.tensor3', (['"""features"""'], {}), "('features')\n", (16973, 16985), False, 'from theano import tensor, function\n'), ((17010, 17040), 'theano.tensor.matrix', 'tensor.matrix', (['"""features_mask"""'], {}), "('features_mask')\n", (17023, 17040), False, 'from theano import tensor, function\n'), ((17058, 17082), 'theano.tensor.imatrix', 'tensor.imatrix', (['"""labels"""'], {}), "('labels')\n", (17072, 17082), False, 'from theano import tensor, function\n'), ((17105, 17133), 'theano.tensor.matrix', 'tensor.matrix', (['"""labels_mask"""'], {}), "('labels_mask')\n", (17118, 17133), False, 'from theano import tensor, function\n'), ((17156, 17183), 'theano.tensor.scalar', 'tensor.scalar', (['"""start_flag"""'], {}), "('start_flag')\n", (17169, 17183), False, 'from theano import tensor, function\n'), ((17786, 17835), 'blocks.utils.shared_floatx_zeros', 'shared_floatx_zeros', (['(batch_size, self.rnn_h_dim)'], {}), '((batch_size, self.rnn_h_dim))\n', (17805, 17835), False, 'from blocks.utils import shared_floatx_zeros, dict_union\n'), ((17854, 17903), 'blocks.utils.shared_floatx_zeros', 'shared_floatx_zeros', (['(batch_size, self.rnn_h_dim)'], {}), '((batch_size, self.rnn_h_dim))\n', (17873, 17903), False, 'from blocks.utils import shared_floatx_zeros, dict_union\n'), ((17922, 17971), 'blocks.utils.shared_floatx_zeros', 'shared_floatx_zeros', (['(batch_size, self.rnn_h_dim)'], {}), '((batch_size, self.rnn_h_dim))\n', (17941, 17971), False, 'from blocks.utils import shared_floatx_zeros, dict_union\n'), ((18020, 18081), 'theano.tensor.zeros', 'tensor.zeros', (['(batch_size, self.attention_size)'], {'dtype': 'floatX'}), '((batch_size, self.attention_size), dtype=floatX)\n', (18032, 18081), False, 'from theano import tensor, function\n'), ((18112, 18166), 'blocks.utils.shared_floatx_zeros', 'shared_floatx_zeros', (['(batch_size, self.attention_size)'], {}), '((batch_size, self.attention_size))\n', (18131, 18166), False, 'from blocks.utils import shared_floatx_zeros, dict_union\n'), ((18244, 18297), 'theano.tensor.repeat', 'tensor.repeat', (['self.initial_w[None, :]', 'batch_size', '(0)'], {}), '(self.initial_w[None, :], batch_size, 0)\n', (18257, 18297), False, 'from theano import tensor, function\n'), ((18316, 18373), 'blocks.utils.shared_floatx_zeros', 'shared_floatx_zeros', (['(batch_size, self.encoded_input_dim)'], {}), '((batch_size, self.encoded_input_dim))\n', (18335, 18373), False, 'from blocks.utils import shared_floatx_zeros, dict_union\n'), ((18964, 19002), 'theano.tensor.zeros', 'tensor.zeros', (['cell_shape'], {'dtype': 'floatX'}), '(cell_shape, dtype=floatX)\n', (18976, 19002), False, 'from theano import tensor, function\n'), ((19021, 19059), 'theano.tensor.zeros', 'tensor.zeros', (['cell_shape'], {'dtype': 'floatX'}), '(cell_shape, dtype=floatX)\n', (19033, 19059), False, 'from theano import tensor, function\n'), ((19078, 19116), 'theano.tensor.zeros', 'tensor.zeros', (['cell_shape'], {'dtype': 'floatX'}), '(cell_shape, dtype=floatX)\n', (19090, 19116), False, 'from theano import tensor, function\n'), ((19134, 19171), 'theano.tensor.zeros', 'tensor.zeros', (['gat_shape'], {'dtype': 'floatX'}), '(gat_shape, dtype=floatX)\n', (19146, 19171), False, 'from theano import tensor, function\n'), ((19189, 19226), 'theano.tensor.zeros', 'tensor.zeros', (['gat_shape'], {'dtype': 'floatX'}), '(gat_shape, dtype=floatX)\n', (19201, 19226), False, 'from theano import tensor, function\n'), ((19244, 19281), 'theano.tensor.zeros', 'tensor.zeros', (['gat_shape'], {'dtype': 'floatX'}), '(gat_shape, dtype=floatX)\n', (19256, 19281), False, 'from theano import tensor, function\n'), ((21746, 21792), 'theano.tensor.switch', 'tensor.switch', (['start_flag', 'initial_h1', 'last_h1'], {}), '(start_flag, initial_h1, last_h1)\n', (21759, 21792), False, 'from theano import tensor, function\n'), ((21825, 21871), 'theano.tensor.switch', 'tensor.switch', (['start_flag', 'initial_h2', 'last_h2'], {}), '(start_flag, initial_h2, last_h2)\n', (21838, 21871), False, 'from theano import tensor, function\n'), ((21904, 21950), 'theano.tensor.switch', 'tensor.switch', (['start_flag', 'initial_h3', 'last_h3'], {}), '(start_flag, initial_h3, last_h3)\n', (21917, 21950), False, 'from theano import tensor, function\n'), ((21982, 22026), 'theano.tensor.switch', 'tensor.switch', (['start_flag', 'initial_w', 'last_w'], {}), '(start_flag, initial_w, last_w)\n', (21995, 22026), False, 'from theano import tensor, function\n'), ((22058, 22102), 'theano.tensor.switch', 'tensor.switch', (['start_flag', 'initial_k', 'last_k'], {}), '(start_flag, initial_k, last_k)\n', (22071, 22102), False, 'from theano import tensor, function\n'), ((25100, 25294), 'theano.scan', 'theano.scan', ([], {'fn': 'step', 'sequences': '[cell_h1, gat_h1, cell_h2, gat_h2, cell_h3, gat_h3]', 'non_sequences': '[context_oh]', 'outputs_info': '[input_h1, input_h2, input_h3, input_k, input_w, None, None]'}), '(fn=step, sequences=[cell_h1, gat_h1, cell_h2, gat_h2, cell_h3,\n gat_h3], non_sequences=[context_oh], outputs_info=[input_h1, input_h2,\n input_h3, input_k, input_w, None, None])\n', (25111, 25294), False, 'import theano\n'), ((29087, 29144), 'numpy.zeros', 'numpy.zeros', (['(num_samples, self.output_dim)'], {'dtype': 'floatX'}), '((num_samples, self.output_dim), dtype=floatX)\n', (29098, 29144), False, 'import numpy\n'), ((29302, 29340), 'theano.tensor.zeros', 'tensor.zeros', (['cell_shape'], {'dtype': 'floatX'}), '(cell_shape, dtype=floatX)\n', (29314, 29340), False, 'from theano import tensor, function\n'), ((29359, 29397), 'theano.tensor.zeros', 'tensor.zeros', (['cell_shape'], {'dtype': 'floatX'}), '(cell_shape, dtype=floatX)\n', (29371, 29397), False, 'from theano import tensor, function\n'), ((29416, 29454), 'theano.tensor.zeros', 'tensor.zeros', (['cell_shape'], {'dtype': 'floatX'}), '(cell_shape, dtype=floatX)\n', (29428, 29454), False, 'from theano import tensor, function\n'), ((29472, 29509), 'theano.tensor.zeros', 'tensor.zeros', (['gat_shape'], {'dtype': 'floatX'}), '(gat_shape, dtype=floatX)\n', (29484, 29509), False, 'from theano import tensor, function\n'), ((29527, 29564), 'theano.tensor.zeros', 'tensor.zeros', (['gat_shape'], {'dtype': 'floatX'}), '(gat_shape, dtype=floatX)\n', (29539, 29564), False, 'from theano import tensor, function\n'), ((29582, 29619), 'theano.tensor.zeros', 'tensor.zeros', (['gat_shape'], {'dtype': 'floatX'}), '(gat_shape, dtype=floatX)\n', (29594, 29619), False, 'from theano import tensor, function\n'), ((36779, 36997), 'theano.scan', 'theano.scan', ([], {'fn': 'sample_step', 'sequences': '[cell_h1, gat_h1, cell_h2, gat_h2, cell_h3, gat_h3]', 'non_sequences': '[]', 'outputs_info': '[initial_x, initial_h1, initial_h2, initial_h3, initial_k, initial_w, None,\n None, None]'}), '(fn=sample_step, sequences=[cell_h1, gat_h1, cell_h2, gat_h2,\n cell_h3, gat_h3], non_sequences=[], outputs_info=[initial_x, initial_h1,\n initial_h2, initial_h3, initial_k, initial_w, None, None, None])\n', (36790, 36997), False, 'import theano\n'), ((1573, 1586), 'theano.tensor.max', 'tensor.max', (['t'], {}), '(t)\n', (1583, 1586), False, 'from theano import tensor, function\n'), ((2834, 2860), 'theano.tensor.arange', 'tensor.arange', (['mu.shape[0]'], {}), '(mu.shape[0])\n', (2847, 2860), False, 'from theano import tensor, function\n'), ((2888, 2917), 'theano.tensor.arange', 'tensor.arange', (['sigma.shape[0]'], {}), '(sigma.shape[0])\n', (2901, 2917), False, 'from theano import tensor, function\n'), ((3354, 3376), 'blocks.roles.add_role', 'add_role', (['p', 'PARAMETER'], {}), '(p, PARAMETER)\n', (3362, 3376), False, 'from blocks.roles import add_role, INITIAL_STATE, PARAMETER\n'), ((6705, 6763), 'blocks.bricks.lookup.LookupTable', 'LookupTable', (['num_characters', 'input_dim'], {'name': '"""embed_label"""'}), "(num_characters, input_dim, name='embed_label')\n", (6716, 6763), False, 'from blocks.bricks.lookup import LookupTable\n'), ((7202, 7243), 'blocks.bricks.recurrent.Bidirectional', 'Bidirectional', (['transition'], {'name': '"""encoder"""'}), "(transition, name='encoder')\n", (7215, 7243), False, 'from blocks.bricks.recurrent import GatedRecurrent, Bidirectional\n'), ((10231, 10268), 'theano.tensor.scalar', 'tensor.scalar', (['"""feedback_noise_level"""'], {}), "('feedback_noise_level')\n", (10244, 10268), False, 'from theano import tensor, function\n'), ((11558, 11637), 'blocks.bricks.Linear', 'Linear', ([], {'input_dim': 'readouts_dim', 'output_dim': 'output_dim', 'name': '"""readout_to_output"""'}), "(input_dim=readouts_dim, output_dim=output_dim, name='readout_to_output')\n", (11564, 11637), False, 'from blocks.bricks import Initializable, Linear, Random\n'), ((13912, 13950), 'blocks.bricks.lookup.LookupTable', 'LookupTable', (['num_speakers', 'speaker_dim'], {}), '(num_speakers, speaker_dim)\n', (13923, 13950), False, 'from blocks.bricks.lookup import LookupTable\n'), ((13985, 14122), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['rnn1_inputs', 'rnn1_gates']", 'input_dim': 'speaker_dim', 'output_dims': '[rnn_h_dim, 2 * rnn_h_dim]', 'name': '"""speaker_to_h1"""'}), "(output_names=['rnn1_inputs', 'rnn1_gates'], input_dim=speaker_dim,\n output_dims=[rnn_h_dim, 2 * rnn_h_dim], name='speaker_to_h1')\n", (13989, 14122), False, 'from blocks.bricks.parallel import Fork\n'), ((14218, 14355), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['rnn2_inputs', 'rnn2_gates']", 'input_dim': 'speaker_dim', 'output_dims': '[rnn_h_dim, 2 * rnn_h_dim]', 'name': '"""speaker_to_h2"""'}), "(output_names=['rnn2_inputs', 'rnn2_gates'], input_dim=speaker_dim,\n output_dims=[rnn_h_dim, 2 * rnn_h_dim], name='speaker_to_h2')\n", (14222, 14355), False, 'from blocks.bricks.parallel import Fork\n'), ((14451, 14588), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['rnn3_inputs', 'rnn3_gates']", 'input_dim': 'speaker_dim', 'output_dims': '[rnn_h_dim, 2 * rnn_h_dim]', 'name': '"""speaker_to_h3"""'}), "(output_names=['rnn3_inputs', 'rnn3_gates'], input_dim=speaker_dim,\n output_dims=[rnn_h_dim, 2 * rnn_h_dim], name='speaker_to_h3')\n", (14455, 14588), False, 'from blocks.bricks.parallel import Fork\n'), ((14689, 14775), 'blocks.bricks.Linear', 'Linear', ([], {'input_dim': 'speaker_dim', 'output_dim': 'readouts_dim', 'name': '"""speaker_to_readout"""'}), "(input_dim=speaker_dim, output_dim=readouts_dim, name=\n 'speaker_to_readout')\n", (14695, 14775), False, 'from blocks.bricks import Initializable, Linear, Random\n'), ((15702, 15834), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['rnn2_inputs', 'rnn2_gates']", 'input_dim': 'output_dim', 'output_dims': '[rnn_h_dim, 2 * rnn_h_dim]', 'name': '"""out_to_h2"""'}), "(output_names=['rnn2_inputs', 'rnn2_gates'], input_dim=output_dim,\n output_dims=[rnn_h_dim, 2 * rnn_h_dim], name='out_to_h2')\n", (15706, 15834), False, 'from blocks.bricks.parallel import Fork\n'), ((15926, 16058), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['rnn3_inputs', 'rnn3_gates']", 'input_dim': 'output_dim', 'output_dims': '[rnn_h_dim, 2 * rnn_h_dim]', 'name': '"""out_to_h3"""'}), "(output_names=['rnn3_inputs', 'rnn3_gates'], input_dim=output_dim,\n output_dims=[rnn_h_dim, 2 * rnn_h_dim], name='out_to_h3')\n", (15930, 16058), False, 'from blocks.bricks.parallel import Fork\n'), ((16348, 16480), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['rnn1_inputs', 'rnn1_gates']", 'input_dim': 'output_dim', 'output_dims': '[rnn_h_dim, 2 * rnn_h_dim]', 'name': '"""out_to_h1"""'}), "(output_names=['rnn1_inputs', 'rnn1_gates'], input_dim=output_dim,\n output_dims=[rnn_h_dim, 2 * rnn_h_dim], name='out_to_h1')\n", (16352, 16480), False, 'from blocks.bricks.parallel import Fork\n'), ((17236, 17267), 'theano.tensor.imatrix', 'tensor.imatrix', (['"""speaker_index"""'], {}), "('speaker_index')\n", (17250, 17267), False, 'from theano import tensor, function\n'), ((17365, 17393), 'theano.tensor.itensor3', 'tensor.itensor3', (['"""raw_audio"""'], {}), "('raw_audio')\n", (17380, 17393), False, 'from theano import tensor, function\n'), ((20664, 20697), 'theano.tensor.shape_padleft', 'tensor.shape_padleft', (['emb_speaker'], {}), '(emb_speaker)\n', (20684, 20697), False, 'from theano import tensor, function\n'), ((22181, 22215), 'theano.tensor.shape_padright', 'tensor.shape_padright', (['labels_mask'], {}), '(labels_mask)\n', (22202, 22215), False, 'from theano import tensor, function\n'), ((22263, 22307), 'theano.tensor.arange', 'tensor.arange', (['labels.shape[1]'], {'dtype': 'floatX'}), '(labels.shape[1], dtype=floatX)\n', (22276, 22307), False, 'from theano import tensor, function\n'), ((23134, 23160), 'theano.tensor.shape_padright', 'tensor.shape_padright', (['a_t'], {}), '(a_t)\n', (23155, 23160), False, 'from theano import tensor, function\n'), ((23179, 23205), 'theano.tensor.shape_padright', 'tensor.shape_padright', (['b_t'], {}), '(b_t)\n', (23200, 23205), False, 'from theano import tensor, function\n'), ((23225, 23251), 'theano.tensor.shape_padright', 'tensor.shape_padright', (['k_t'], {}), '(k_t)\n', (23246, 23251), False, 'from theano import tensor, function\n'), ((26166, 26221), 'theano.tensor.sum', 'tensor.sum', (['((predicted - target_features) ** 2)'], {'axis': '(-1)'}), '((predicted - target_features) ** 2, axis=-1)\n', (26176, 26221), False, 'from theano import tensor, function\n'), ((27418, 27468), 'theano.tensor.extra_ops.repeat', 'tensor.extra_ops.repeat', (['features_mask', '(80)'], {'axis': '(0)'}), '(features_mask, 80, axis=0)\n', (27441, 27468), False, 'from theano import tensor, function\n'), ((28542, 28560), 'numpy.float32', 'numpy.float32', (['(0.0)'], {}), '(0.0)\n', (28555, 28560), False, 'import numpy\n'), ((28580, 28598), 'numpy.float32', 'numpy.float32', (['(1.0)'], {}), '(1.0)\n', (28593, 28598), False, 'import numpy\n'), ((30003, 30036), 'theano.tensor.shape_padleft', 'tensor.shape_padleft', (['emb_speaker'], {}), '(emb_speaker)\n', (30023, 30036), False, 'from theano import tensor, function\n'), ((30855, 30889), 'theano.tensor.shape_padright', 'tensor.shape_padright', (['labels_mask'], {}), '(labels_mask)\n', (30876, 30889), False, 'from theano import tensor, function\n'), ((30937, 30981), 'theano.tensor.arange', 'tensor.arange', (['labels.shape[1]'], {'dtype': 'floatX'}), '(labels.shape[1], dtype=floatX)\n', (30950, 30981), False, 'from theano import tensor, function\n'), ((33203, 33229), 'theano.tensor.shape_padright', 'tensor.shape_padright', (['a_t'], {}), '(a_t)\n', (33224, 33229), False, 'from theano import tensor, function\n'), ((33248, 33274), 'theano.tensor.shape_padright', 'tensor.shape_padright', (['b_t'], {}), '(b_t)\n', (33269, 33274), False, 'from theano import tensor, function\n'), ((33294, 33320), 'theano.tensor.shape_padright', 'tensor.shape_padright', (['k_t'], {}), '(k_t)\n', (33315, 33320), False, 'from theano import tensor, function\n'), ((38010, 38085), 'theano.function', 'function', (['theano_inputs', '[sample_x, k, w, pi, phi, pi_att]'], {'updates': 'updates'}), '(theano_inputs, [sample_x, k, w, pi, phi, pi_att], updates=updates)\n', (38018, 38085), False, 'from theano import tensor, function\n'), ((39079, 39154), 'theano.function', 'function', (['theano_inputs', '[sample_x, k, w, pi, phi, pi_att]'], {'updates': 'updates'}), '(theano_inputs, [sample_x, k, w, pi, phi, pi_att], updates=updates)\n', (39087, 39154), False, 'from theano import tensor, function\n'), ((1015, 1036), 'theano.tensor.exp', 'tensor.exp', (['(x - x_max)'], {}), '(x - x_max)\n', (1025, 1036), False, 'from theano import tensor, function\n'), ((2213, 2237), 'theano.tensor.log', 'tensor.log', (['(2 * numpy.pi)'], {}), '(2 * numpy.pi)\n', (2223, 2237), False, 'from theano import tensor, function\n'), ((2270, 2288), 'theano.tensor.log', 'tensor.log', (['weight'], {}), '(weight)\n', (2280, 2288), False, 'from theano import tensor, function\n'), ((5623, 5631), 'blocks.bricks.Linear', 'Linear', ([], {}), '()\n', (5629, 5631), False, 'from blocks.bricks import Initializable, Linear, Random\n'), ((11836, 12011), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['gmm_mu', 'gmm_sigma', 'gmm_coeff']", 'input_dim': 'readouts_dim', 'output_dims': '[output_dim * k_gmm, output_dim * k_gmm, k_gmm]', 'name': '"""readout_to_output"""'}), "(output_names=['gmm_mu', 'gmm_sigma', 'gmm_coeff'], input_dim=\n readouts_dim, output_dims=[output_dim * k_gmm, output_dim * k_gmm,\n k_gmm], name='readout_to_output')\n", (11840, 12011), False, 'from blocks.bricks.parallel import Fork\n'), ((14898, 14976), 'blocks.bricks.Linear', 'Linear', ([], {'input_dim': 'speaker_dim', 'output_dim': 'output_dim', 'name': '"""speaker_to_output"""'}), "(input_dim=speaker_dim, output_dim=output_dim, name='speaker_to_output')\n", (14904, 14976), False, 'from blocks.bricks import Initializable, Linear, Random\n'), ((22992, 23007), 'theano.tensor.exp', 'tensor.exp', (['b_t'], {}), '(b_t)\n', (23002, 23007), False, 'from theano import tensor, function\n'), ((28285, 28314), 'theano.tensor.unbroadcast', 'tensor.unbroadcast', (['new_h0', '(1)'], {}), '(new_h0, 1)\n', (28303, 28314), False, 'from theano import tensor, function\n'), ((28344, 28377), 'theano.tensor.unbroadcast', 'tensor.unbroadcast', (['new_big_h0', '(1)'], {}), '(new_big_h0, 1)\n', (28362, 28377), False, 'from theano import tensor, function\n'), ((7082, 7113), 'blocks.bricks.recurrent.GatedRecurrent', 'GatedRecurrent', ([], {'dim': 'encoder_dim'}), '(dim=encoder_dim)\n', (7096, 7113), False, 'from blocks.bricks.recurrent import GatedRecurrent, Bidirectional\n'), ((15117, 15292), 'blocks.bricks.parallel.Fork', 'Fork', ([], {'output_names': "['gmm_mu', 'gmm_sigma', 'gmm_coeff']", 'input_dim': 'speaker_dim', 'output_dims': '[output_dim * k_gmm, output_dim * k_gmm, k_gmm]', 'name': '"""speaker_to_output"""'}), "(output_names=['gmm_mu', 'gmm_sigma', 'gmm_coeff'], input_dim=\n speaker_dim, output_dims=[output_dim * k_gmm, output_dim * k_gmm, k_gmm\n ], name='speaker_to_output')\n", (15121, 15292), False, 'from blocks.bricks.parallel import Fork\n'), ((22862, 22886), 'theano.tensor.nnet.softmax', 'tensor.nnet.softmax', (['a_t'], {}), '(a_t)\n', (22881, 22886), False, 'from theano import tensor, function\n'), ((22942, 22957), 'theano.tensor.exp', 'tensor.exp', (['a_t'], {}), '(a_t)\n', (22952, 22957), False, 'from theano import tensor, function\n'), ((23076, 23091), 'theano.tensor.exp', 'tensor.exp', (['k_t'], {}), '(k_t)\n', (23086, 23091), False, 'from theano import tensor, function\n'), ((26701, 26718), 'theano.tensor.exp', 'tensor.exp', (['sigma'], {}), '(sigma)\n', (26711, 26718), False, 'from theano import tensor, function\n'), ((32869, 32893), 'theano.tensor.nnet.softmax', 'tensor.nnet.softmax', (['a_t'], {}), '(a_t)\n', (32888, 32893), False, 'from theano import tensor, function\n'), ((32949, 32964), 'theano.tensor.exp', 'tensor.exp', (['a_t'], {}), '(a_t)\n', (32959, 32964), False, 'from theano import tensor, function\n'), ((32999, 33014), 'theano.tensor.exp', 'tensor.exp', (['b_t'], {}), '(b_t)\n', (33009, 33014), False, 'from theano import tensor, function\n'), ((2195, 2210), 'theano.tensor.log', 'tensor.log', (['sig'], {}), '(sig)\n', (2205, 2210), False, 'from theano import tensor, function\n'), ((23667, 23701), 'theano.tensor.exp', 'tensor.exp', (['(-b_t * (k_t_ - u) ** 2)'], {}), '(-b_t * (k_t_ - u) ** 2)\n', (23677, 23701), False, 'from theano import tensor, function\n'), ((23782, 23810), 'theano.tensor.shape_padright', 'tensor.shape_padright', (['phi_t'], {}), '(phi_t)\n', (23803, 23810), False, 'from theano import tensor, function\n'), ((33125, 33140), 'theano.tensor.exp', 'tensor.exp', (['k_t'], {}), '(k_t)\n', (33135, 33140), False, 'from theano import tensor, function\n'), ((33736, 33770), 'theano.tensor.exp', 'tensor.exp', (['(-b_t * (k_t_ - u) ** 2)'], {}), '(-b_t * (k_t_ - u) ** 2)\n', (33746, 33770), False, 'from theano import tensor, function\n'), ((33851, 33879), 'theano.tensor.shape_padright', 'tensor.shape_padright', (['phi_t'], {}), '(phi_t)\n', (33872, 33879), False, 'from theano import tensor, function\n'), ((36204, 36244), 'theano.tensor.exp', 'tensor.exp', (['(sigma_t - self.sampling_bias)'], {}), '(sigma_t - self.sampling_bias)\n', (36214, 36244), False, 'from theano import tensor, function\n'), ((23539, 23579), 'theano.tensor.exp', 'tensor.exp', (['(-0.5 * b_t * (k_t_ - u) ** 2)'], {}), '(-0.5 * b_t * (k_t_ - u) ** 2)\n', (23549, 23579), False, 'from theano import tensor, function\n'), ((33608, 33648), 'theano.tensor.exp', 'tensor.exp', (['(-0.5 * b_t * (k_t_ - u) ** 2)'], {}), '(-0.5 * b_t * (k_t_ - u) ** 2)\n', (33618, 33648), False, 'from theano import tensor, function\n'), ((23500, 23516), 'theano.tensor.sqrt', 'tensor.sqrt', (['b_t'], {}), '(b_t)\n', (23511, 23516), False, 'from theano import tensor, function\n'), ((33569, 33585), 'theano.tensor.sqrt', 'tensor.sqrt', (['b_t'], {}), '(b_t)\n', (33580, 33585), False, 'from theano import tensor, function\n')] |
#!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import sys
import tempfile
from distutils.version import LooseVersion
import matplotlib as mpl # isort:skip
mpl.use("Agg") # noqa: E402
import matplotlib.pyplot as pl
import numpy as np
import pytest
from wradlib import georef, util, vis
from . import requires_data, requires_secrets
cartopy = util.import_optional("cartopy")
class TestPolarPlot:
img = np.zeros((360, 10), dtype=np.float32)
img[2, 2] = 10 # isolated pixel
img[5, 6:8] = 10 # line
img[20, :] = 5 # spike
img[60:120, 2:7] = 11 # precip field
r = np.arange(0, 100000, 10000)
az = np.arange(0, 360)
el = np.arange(0, 90)
th = np.zeros_like(az)
az1 = np.ones_like(el) * 225
img = img
proj = georef.create_osr("dwd-radolan")
da_ppi = georef.create_xarray_dataarray(img, r, az, th)
da_ppi = georef.georeference_dataset(da_ppi, proj=None)
da_rhi = georef.create_xarray_dataarray(img[0:90], r, az1, el)
da_rhi = georef.georeference_dataset(da_rhi, proj=None)
def test_plot_ppi(self):
ax, pm = vis.plot_ppi(self.img, re=6371000.0, ke=(4.0 / 3.0))
ax, pm = vis.plot_ppi(self.img, self.r, self.az, re=6371000.0, ke=(4.0 / 3.0))
ax, pm = vis.plot_ppi(
self.img, self.r, self.az, re=6371000.0, ke=(4.0 / 3.0), ax=ax
)
ax, pm = vis.plot_ppi(
self.img, self.r, self.az, re=6371000.0, ke=(4.0 / 3.0), ax=212
)
ax, pm = vis.plot_ppi(self.img)
vis.plot_ppi_crosshair(site=(0, 0, 0), ranges=[2, 4, 8])
vis.plot_ppi_crosshair(
site=(0, 0, 0),
ranges=[2, 4, 8],
angles=[0, 45, 90, 180, 270],
line=dict(color="white", linestyle="solid"),
)
ax, pm = vis.plot_ppi(self.img, self.r, site=(10.0, 45.0, 0.0), proj=self.proj)
vis.plot_ppi_crosshair(
site=(10.0, 45.0, 0.0),
ranges=[2, 4, 8],
angles=[0, 45, 90, 180, 270],
proj=self.proj,
line=dict(color="white", linestyle="solid"),
)
ax, pm = vis.plot_ppi(self.img, func="contour")
ax, pm = vis.plot_ppi(self.img, func="contourf")
ax, pm = vis.plot_ppi(self.img, self.r, self.az, proj=self.proj, site=(0, 0, 0))
with pytest.warns(UserWarning):
ax, pm = vis.plot_ppi(self.img, site=(10.0, 45.0, 0.0), proj=self.proj)
with pytest.warns(UserWarning):
ax, pm = vis.plot_ppi(self.img, proj=None, site=(0, 0, 0))
with pytest.raises(TypeError):
ax, pm = vis.plot_ppi(self.img, proj=self.proj)
with pytest.raises(ValueError):
ax, pm = vis.plot_ppi(self.img, site=(0, 0), proj=self.proj)
with pytest.raises(ValueError):
vis.plot_ppi_crosshair(site=(0, 0), ranges=[2, 4, 8])
def test_plot_ppi_xarray(self):
self.da_ppi.wradlib.rays
self.da_ppi.wradlib.plot()
self.da_ppi.wradlib.plot_ppi()
self.da_ppi.wradlib.contour()
self.da_ppi.wradlib.contourf()
self.da_ppi.wradlib.pcolormesh()
self.da_ppi.wradlib.plot(proj="cg")
self.da_ppi.wradlib.plot_ppi(proj="cg")
self.da_ppi.wradlib.contour(proj="cg")
self.da_ppi.wradlib.contourf(proj="cg")
self.da_ppi.wradlib.pcolormesh(proj="cg")
with pytest.raises(TypeError):
self.da_ppi.wradlib.pcolormesh(proj=self.proj)
fig = pl.figure()
ax = fig.add_subplot(111)
with pytest.raises(TypeError):
self.da_ppi.wradlib.pcolormesh(proj={"rot": 0, "scale": 1}, ax=ax)
@pytest.mark.skipif("cartopy" not in sys.modules, reason="without Cartopy")
def test_plot_ppi_cartopy(self):
if cartopy:
if (LooseVersion(cartopy.__version__) < LooseVersion("0.18.0")) and (
LooseVersion(mpl.__version__) >= LooseVersion("3.3.0")
):
pytest.skip("fails for cartopy < 0.18.0 and matplotlib >= 3.3.0")
site = (7, 45, 0.0)
map_proj = cartopy.crs.Mercator(central_longitude=site[1])
ax, pm = vis.plot_ppi(self.img, self.r, self.az, proj=map_proj)
assert isinstance(ax, cartopy.mpl.geoaxes.GeoAxes)
fig = pl.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection=map_proj)
self.da_ppi.wradlib.plot_ppi(ax=ax)
ax.gridlines(draw_labels=True)
def test_plot_rhi(self):
ax, pm = vis.plot_rhi(self.img[0:90, :])
ax, pm = vis.plot_rhi(self.img[0:90, :], th_res=0.5)
ax, pm = vis.plot_rhi(self.img[0:90, :], th_res=0.5, ax=212)
ax, pm = vis.plot_rhi(self.img[0:90, :], r=np.arange(10), th=np.arange(90))
ax, pm = vis.plot_rhi(self.img[0:90, :], func="contour")
ax, pm = vis.plot_rhi(self.img[0:90, :], func="contourf")
ax, pm = vis.plot_rhi(
self.img[0:90, :],
r=np.arange(10),
th=np.arange(90),
proj=self.proj,
site=(0, 0, 0),
)
def test_plot_rhi_xarray(self):
assert (
repr(self.da_rhi.wradlib).split("\n", 1)[1]
== repr(self.da_rhi).split("\n", 1)[1]
)
self.da_rhi.wradlib.rays
self.da_rhi.wradlib.plot()
self.da_rhi.wradlib.plot_rhi()
self.da_rhi.wradlib.contour()
self.da_rhi.wradlib.contourf()
self.da_rhi.wradlib.pcolormesh()
self.da_rhi.wradlib.plot(proj="cg")
self.da_rhi.wradlib.plot_rhi(proj="cg")
self.da_rhi.wradlib.contour(proj="cg")
self.da_rhi.wradlib.contourf(proj="cg")
self.da_rhi.wradlib.pcolormesh(proj="cg")
def test_plot_cg_ppi(self):
cgax, pm = vis.plot_ppi(self.img, elev=2.0, proj="cg")
cgax, pm = vis.plot_ppi(self.img, elev=2.0, proj="cg", site=(0, 0, 0))
cgax, pm = vis.plot_ppi(self.img, elev=2.0, proj="cg", ax=cgax)
fig, ax = pl.subplots(2, 2)
with pytest.raises(TypeError):
vis.plot_ppi(self.img, elev=2.0, proj="cg", ax=ax[0, 0])
cgax, pm = vis.plot_ppi(self.img, elev=2.0, proj="cg", ax=111)
cgax, pm = vis.plot_ppi(self.img, elev=2.0, proj="cg", ax=121)
cgax, pm = vis.plot_ppi(self.img, proj="cg")
cgax, pm = vis.plot_ppi(self.img, func="contour", proj="cg")
cgax, pm = vis.plot_ppi(self.img, func="contourf", proj="cg")
cgax, pm = vis.plot_ppi(self.img, func="contourf", proj="cg")
def test_plot_cg_rhi(self):
cgax, pm = vis.plot_rhi(self.img[0:90, :], proj="cg")
cgax, pm = vis.plot_rhi(self.img[0:90, :], proj="cg", ax=cgax)
fig, ax = pl.subplots(2, 2)
with pytest.raises(TypeError):
vis.plot_rhi(self.img[0:90, :], proj="cg", ax=ax[0, 0])
cgax, pm = vis.plot_rhi(self.img[0:90, :], th_res=0.5, proj="cg")
cgax, pm = vis.plot_rhi(self.img[0:90, :], proj="cg")
cgax, pm = vis.plot_rhi(
self.img[0:90, :], r=np.arange(10), th=np.arange(90), proj="cg"
)
cgax, pm = vis.plot_rhi(self.img[0:90, :], func="contour", proj="cg")
cgax, pm = vis.plot_rhi(self.img[0:90, :], func="contourf", proj="cg")
def test_create_cg(self):
cgax, caax, paax = vis.create_cg()
cgax, caax, paax = vis.create_cg(subplot=121)
class TestMiscPlot:
@requires_data
def test_plot_scan_strategy(self):
ranges = np.arange(0, 10000, 100)
elevs = np.arange(1, 30, 3)
site = (7.0, 53.0, 100.0)
vis.plot_scan_strategy(ranges, elevs, site)
vis.plot_scan_strategy(ranges, elevs, site, cg=True)
@requires_data
@requires_secrets
def test_plot_scan_strategy_terrain(self):
ranges = np.arange(0, 10000, 100)
elevs = np.arange(1, 30, 3)
site = (7.0, 53.0, 100.0)
vis.plot_scan_strategy(ranges, elevs, site, terrain=True)
vis.plot_scan_strategy(ranges, elevs, site, cg=True, terrain=True)
def test_plot_plan_and_vert(self):
x = np.arange(0, 10)
y = np.arange(0, 10)
z = np.arange(0, 5)
dataxy = np.zeros((len(x), len(y)))
datazx = np.zeros((len(z), len(x)))
datazy = np.zeros((len(z), len(y)))
vol = np.zeros((len(z), len(y), len(x)))
vis.plot_plan_and_vert(x, y, z, dataxy, datazx, datazy)
vis.plot_plan_and_vert(x, y, z, dataxy, datazx, datazy, title="Test")
tmp = tempfile.NamedTemporaryFile(mode="w+b").name
vis.plot_plan_and_vert(x, y, z, dataxy, datazx, datazy, saveto=tmp)
vis.plot_max_plan_and_vert(x, y, z, vol)
def test_add_lines(self):
fig, ax = pl.subplots()
x = np.arange(0, 10)
y = np.arange(0, 10)
xy = np.dstack((x, y))
vis.add_lines(ax, xy)
vis.add_lines(ax, np.array([xy]))
def test_add_patches(self):
fig, ax = pl.subplots()
x = np.arange(0, 10)
y = np.arange(0, 10)
xy = np.dstack((x, y))
vis.add_patches(ax, xy)
vis.add_patches(ax, np.array([xy]))
| [
"wradlib.georef.create_osr",
"wradlib.vis.plot_max_plan_and_vert",
"wradlib.vis.plot_ppi_crosshair",
"wradlib.vis.plot_rhi",
"matplotlib.pyplot.figure",
"pytest.mark.skipif",
"numpy.arange",
"numpy.zeros_like",
"wradlib.vis.add_patches",
"pytest.warns",
"pytest.raises",
"matplotlib.pyplot.subp... | [((249, 263), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (256, 263), True, 'import matplotlib as mpl\n'), ((439, 470), 'wradlib.util.import_optional', 'util.import_optional', (['"""cartopy"""'], {}), "('cartopy')\n", (459, 470), False, 'from wradlib import georef, util, vis\n'), ((504, 541), 'numpy.zeros', 'np.zeros', (['(360, 10)'], {'dtype': 'np.float32'}), '((360, 10), dtype=np.float32)\n', (512, 541), True, 'import numpy as np\n'), ((686, 713), 'numpy.arange', 'np.arange', (['(0)', '(100000)', '(10000)'], {}), '(0, 100000, 10000)\n', (695, 713), True, 'import numpy as np\n'), ((723, 740), 'numpy.arange', 'np.arange', (['(0)', '(360)'], {}), '(0, 360)\n', (732, 740), True, 'import numpy as np\n'), ((750, 766), 'numpy.arange', 'np.arange', (['(0)', '(90)'], {}), '(0, 90)\n', (759, 766), True, 'import numpy as np\n'), ((776, 793), 'numpy.zeros_like', 'np.zeros_like', (['az'], {}), '(az)\n', (789, 793), True, 'import numpy as np\n'), ((852, 884), 'wradlib.georef.create_osr', 'georef.create_osr', (['"""dwd-radolan"""'], {}), "('dwd-radolan')\n", (869, 884), False, 'from wradlib import georef, util, vis\n'), ((899, 945), 'wradlib.georef.create_xarray_dataarray', 'georef.create_xarray_dataarray', (['img', 'r', 'az', 'th'], {}), '(img, r, az, th)\n', (929, 945), False, 'from wradlib import georef, util, vis\n'), ((959, 1005), 'wradlib.georef.georeference_dataset', 'georef.georeference_dataset', (['da_ppi'], {'proj': 'None'}), '(da_ppi, proj=None)\n', (986, 1005), False, 'from wradlib import georef, util, vis\n'), ((1019, 1072), 'wradlib.georef.create_xarray_dataarray', 'georef.create_xarray_dataarray', (['img[0:90]', 'r', 'az1', 'el'], {}), '(img[0:90], r, az1, el)\n', (1049, 1072), False, 'from wradlib import georef, util, vis\n'), ((1086, 1132), 'wradlib.georef.georeference_dataset', 'georef.georeference_dataset', (['da_rhi'], {'proj': 'None'}), '(da_rhi, proj=None)\n', (1113, 1132), False, 'from wradlib import georef, util, vis\n'), ((3716, 3790), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('cartopy' not in sys.modules)"], {'reason': '"""without Cartopy"""'}), "('cartopy' not in sys.modules, reason='without Cartopy')\n", (3734, 3790), False, 'import pytest\n'), ((804, 820), 'numpy.ones_like', 'np.ones_like', (['el'], {}), '(el)\n', (816, 820), True, 'import numpy as np\n'), ((1180, 1230), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'re': '(6371000.0)', 'ke': '(4.0 / 3.0)'}), '(self.img, re=6371000.0, ke=4.0 / 3.0)\n', (1192, 1230), False, 'from wradlib import georef, util, vis\n'), ((1250, 1317), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img', 'self.r', 'self.az'], {'re': '(6371000.0)', 'ke': '(4.0 / 3.0)'}), '(self.img, self.r, self.az, re=6371000.0, ke=4.0 / 3.0)\n', (1262, 1317), False, 'from wradlib import georef, util, vis\n'), ((1337, 1411), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img', 'self.r', 'self.az'], {'re': '(6371000.0)', 'ke': '(4.0 / 3.0)', 'ax': 'ax'}), '(self.img, self.r, self.az, re=6371000.0, ke=4.0 / 3.0, ax=ax)\n', (1349, 1411), False, 'from wradlib import georef, util, vis\n'), ((1453, 1528), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img', 'self.r', 'self.az'], {'re': '(6371000.0)', 'ke': '(4.0 / 3.0)', 'ax': '(212)'}), '(self.img, self.r, self.az, re=6371000.0, ke=4.0 / 3.0, ax=212)\n', (1465, 1528), False, 'from wradlib import georef, util, vis\n'), ((1570, 1592), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {}), '(self.img)\n', (1582, 1592), False, 'from wradlib import georef, util, vis\n'), ((1601, 1657), 'wradlib.vis.plot_ppi_crosshair', 'vis.plot_ppi_crosshair', ([], {'site': '(0, 0, 0)', 'ranges': '[2, 4, 8]'}), '(site=(0, 0, 0), ranges=[2, 4, 8])\n', (1623, 1657), False, 'from wradlib import georef, util, vis\n'), ((1874, 1944), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img', 'self.r'], {'site': '(10.0, 45.0, 0.0)', 'proj': 'self.proj'}), '(self.img, self.r, site=(10.0, 45.0, 0.0), proj=self.proj)\n', (1886, 1944), False, 'from wradlib import georef, util, vis\n'), ((2197, 2235), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'func': '"""contour"""'}), "(self.img, func='contour')\n", (2209, 2235), False, 'from wradlib import georef, util, vis\n'), ((2253, 2292), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'func': '"""contourf"""'}), "(self.img, func='contourf')\n", (2265, 2292), False, 'from wradlib import georef, util, vis\n'), ((2310, 2381), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img', 'self.r', 'self.az'], {'proj': 'self.proj', 'site': '(0, 0, 0)'}), '(self.img, self.r, self.az, proj=self.proj, site=(0, 0, 0))\n', (2322, 2381), False, 'from wradlib import georef, util, vis\n'), ((3546, 3557), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (3555, 3557), True, 'import matplotlib.pyplot as pl\n'), ((4583, 4614), 'wradlib.vis.plot_rhi', 'vis.plot_rhi', (['self.img[0:90, :]'], {}), '(self.img[0:90, :])\n', (4595, 4614), False, 'from wradlib import georef, util, vis\n'), ((4632, 4675), 'wradlib.vis.plot_rhi', 'vis.plot_rhi', (['self.img[0:90, :]'], {'th_res': '(0.5)'}), '(self.img[0:90, :], th_res=0.5)\n', (4644, 4675), False, 'from wradlib import georef, util, vis\n'), ((4693, 4744), 'wradlib.vis.plot_rhi', 'vis.plot_rhi', (['self.img[0:90, :]'], {'th_res': '(0.5)', 'ax': '(212)'}), '(self.img[0:90, :], th_res=0.5, ax=212)\n', (4705, 4744), False, 'from wradlib import georef, util, vis\n'), ((4846, 4893), 'wradlib.vis.plot_rhi', 'vis.plot_rhi', (['self.img[0:90, :]'], {'func': '"""contour"""'}), "(self.img[0:90, :], func='contour')\n", (4858, 4893), False, 'from wradlib import georef, util, vis\n'), ((4911, 4959), 'wradlib.vis.plot_rhi', 'vis.plot_rhi', (['self.img[0:90, :]'], {'func': '"""contourf"""'}), "(self.img[0:90, :], func='contourf')\n", (4923, 4959), False, 'from wradlib import georef, util, vis\n'), ((5832, 5875), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'elev': '(2.0)', 'proj': '"""cg"""'}), "(self.img, elev=2.0, proj='cg')\n", (5844, 5875), False, 'from wradlib import georef, util, vis\n'), ((5895, 5954), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'elev': '(2.0)', 'proj': '"""cg"""', 'site': '(0, 0, 0)'}), "(self.img, elev=2.0, proj='cg', site=(0, 0, 0))\n", (5907, 5954), False, 'from wradlib import georef, util, vis\n'), ((5974, 6026), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'elev': '(2.0)', 'proj': '"""cg"""', 'ax': 'cgax'}), "(self.img, elev=2.0, proj='cg', ax=cgax)\n", (5986, 6026), False, 'from wradlib import georef, util, vis\n'), ((6045, 6062), 'matplotlib.pyplot.subplots', 'pl.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (6056, 6062), True, 'import matplotlib.pyplot as pl\n'), ((6190, 6241), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'elev': '(2.0)', 'proj': '"""cg"""', 'ax': '(111)'}), "(self.img, elev=2.0, proj='cg', ax=111)\n", (6202, 6241), False, 'from wradlib import georef, util, vis\n'), ((6261, 6312), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'elev': '(2.0)', 'proj': '"""cg"""', 'ax': '(121)'}), "(self.img, elev=2.0, proj='cg', ax=121)\n", (6273, 6312), False, 'from wradlib import georef, util, vis\n'), ((6332, 6365), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'proj': '"""cg"""'}), "(self.img, proj='cg')\n", (6344, 6365), False, 'from wradlib import georef, util, vis\n'), ((6385, 6434), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'func': '"""contour"""', 'proj': '"""cg"""'}), "(self.img, func='contour', proj='cg')\n", (6397, 6434), False, 'from wradlib import georef, util, vis\n'), ((6454, 6504), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'func': '"""contourf"""', 'proj': '"""cg"""'}), "(self.img, func='contourf', proj='cg')\n", (6466, 6504), False, 'from wradlib import georef, util, vis\n'), ((6524, 6574), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'func': '"""contourf"""', 'proj': '"""cg"""'}), "(self.img, func='contourf', proj='cg')\n", (6536, 6574), False, 'from wradlib import georef, util, vis\n'), ((6627, 6669), 'wradlib.vis.plot_rhi', 'vis.plot_rhi', (['self.img[0:90, :]'], {'proj': '"""cg"""'}), "(self.img[0:90, :], proj='cg')\n", (6639, 6669), False, 'from wradlib import georef, util, vis\n'), ((6689, 6740), 'wradlib.vis.plot_rhi', 'vis.plot_rhi', (['self.img[0:90, :]'], {'proj': '"""cg"""', 'ax': 'cgax'}), "(self.img[0:90, :], proj='cg', ax=cgax)\n", (6701, 6740), False, 'from wradlib import georef, util, vis\n'), ((6759, 6776), 'matplotlib.pyplot.subplots', 'pl.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (6770, 6776), True, 'import matplotlib.pyplot as pl\n'), ((6903, 6957), 'wradlib.vis.plot_rhi', 'vis.plot_rhi', (['self.img[0:90, :]'], {'th_res': '(0.5)', 'proj': '"""cg"""'}), "(self.img[0:90, :], th_res=0.5, proj='cg')\n", (6915, 6957), False, 'from wradlib import georef, util, vis\n'), ((6977, 7019), 'wradlib.vis.plot_rhi', 'vis.plot_rhi', (['self.img[0:90, :]'], {'proj': '"""cg"""'}), "(self.img[0:90, :], proj='cg')\n", (6989, 7019), False, 'from wradlib import georef, util, vis\n'), ((7158, 7216), 'wradlib.vis.plot_rhi', 'vis.plot_rhi', (['self.img[0:90, :]'], {'func': '"""contour"""', 'proj': '"""cg"""'}), "(self.img[0:90, :], func='contour', proj='cg')\n", (7170, 7216), False, 'from wradlib import georef, util, vis\n'), ((7236, 7295), 'wradlib.vis.plot_rhi', 'vis.plot_rhi', (['self.img[0:90, :]'], {'func': '"""contourf"""', 'proj': '"""cg"""'}), "(self.img[0:90, :], func='contourf', proj='cg')\n", (7248, 7295), False, 'from wradlib import georef, util, vis\n'), ((7354, 7369), 'wradlib.vis.create_cg', 'vis.create_cg', ([], {}), '()\n', (7367, 7369), False, 'from wradlib import georef, util, vis\n'), ((7397, 7423), 'wradlib.vis.create_cg', 'vis.create_cg', ([], {'subplot': '(121)'}), '(subplot=121)\n', (7410, 7423), False, 'from wradlib import georef, util, vis\n'), ((7521, 7545), 'numpy.arange', 'np.arange', (['(0)', '(10000)', '(100)'], {}), '(0, 10000, 100)\n', (7530, 7545), True, 'import numpy as np\n'), ((7562, 7581), 'numpy.arange', 'np.arange', (['(1)', '(30)', '(3)'], {}), '(1, 30, 3)\n', (7571, 7581), True, 'import numpy as np\n'), ((7624, 7667), 'wradlib.vis.plot_scan_strategy', 'vis.plot_scan_strategy', (['ranges', 'elevs', 'site'], {}), '(ranges, elevs, site)\n', (7646, 7667), False, 'from wradlib import georef, util, vis\n'), ((7676, 7728), 'wradlib.vis.plot_scan_strategy', 'vis.plot_scan_strategy', (['ranges', 'elevs', 'site'], {'cg': '(True)'}), '(ranges, elevs, site, cg=True)\n', (7698, 7728), False, 'from wradlib import georef, util, vis\n'), ((7835, 7859), 'numpy.arange', 'np.arange', (['(0)', '(10000)', '(100)'], {}), '(0, 10000, 100)\n', (7844, 7859), True, 'import numpy as np\n'), ((7876, 7895), 'numpy.arange', 'np.arange', (['(1)', '(30)', '(3)'], {}), '(1, 30, 3)\n', (7885, 7895), True, 'import numpy as np\n'), ((7938, 7995), 'wradlib.vis.plot_scan_strategy', 'vis.plot_scan_strategy', (['ranges', 'elevs', 'site'], {'terrain': '(True)'}), '(ranges, elevs, site, terrain=True)\n', (7960, 7995), False, 'from wradlib import georef, util, vis\n'), ((8004, 8070), 'wradlib.vis.plot_scan_strategy', 'vis.plot_scan_strategy', (['ranges', 'elevs', 'site'], {'cg': '(True)', 'terrain': '(True)'}), '(ranges, elevs, site, cg=True, terrain=True)\n', (8026, 8070), False, 'from wradlib import georef, util, vis\n'), ((8123, 8139), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (8132, 8139), True, 'import numpy as np\n'), ((8152, 8168), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (8161, 8168), True, 'import numpy as np\n'), ((8181, 8196), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (8190, 8196), True, 'import numpy as np\n'), ((8386, 8441), 'wradlib.vis.plot_plan_and_vert', 'vis.plot_plan_and_vert', (['x', 'y', 'z', 'dataxy', 'datazx', 'datazy'], {}), '(x, y, z, dataxy, datazx, datazy)\n', (8408, 8441), False, 'from wradlib import georef, util, vis\n'), ((8450, 8519), 'wradlib.vis.plot_plan_and_vert', 'vis.plot_plan_and_vert', (['x', 'y', 'z', 'dataxy', 'datazx', 'datazy'], {'title': '"""Test"""'}), "(x, y, z, dataxy, datazx, datazy, title='Test')\n", (8472, 8519), False, 'from wradlib import georef, util, vis\n'), ((8587, 8654), 'wradlib.vis.plot_plan_and_vert', 'vis.plot_plan_and_vert', (['x', 'y', 'z', 'dataxy', 'datazx', 'datazy'], {'saveto': 'tmp'}), '(x, y, z, dataxy, datazx, datazy, saveto=tmp)\n', (8609, 8654), False, 'from wradlib import georef, util, vis\n'), ((8663, 8703), 'wradlib.vis.plot_max_plan_and_vert', 'vis.plot_max_plan_and_vert', (['x', 'y', 'z', 'vol'], {}), '(x, y, z, vol)\n', (8689, 8703), False, 'from wradlib import georef, util, vis\n'), ((8753, 8766), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {}), '()\n', (8764, 8766), True, 'import matplotlib.pyplot as pl\n'), ((8779, 8795), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (8788, 8795), True, 'import numpy as np\n'), ((8808, 8824), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (8817, 8824), True, 'import numpy as np\n'), ((8838, 8855), 'numpy.dstack', 'np.dstack', (['(x, y)'], {}), '((x, y))\n', (8847, 8855), True, 'import numpy as np\n'), ((8864, 8885), 'wradlib.vis.add_lines', 'vis.add_lines', (['ax', 'xy'], {}), '(ax, xy)\n', (8877, 8885), False, 'from wradlib import georef, util, vis\n'), ((8979, 8992), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {}), '()\n', (8990, 8992), True, 'import matplotlib.pyplot as pl\n'), ((9005, 9021), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (9014, 9021), True, 'import numpy as np\n'), ((9034, 9050), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (9043, 9050), True, 'import numpy as np\n'), ((9064, 9081), 'numpy.dstack', 'np.dstack', (['(x, y)'], {}), '((x, y))\n', (9073, 9081), True, 'import numpy as np\n'), ((9090, 9113), 'wradlib.vis.add_patches', 'vis.add_patches', (['ax', 'xy'], {}), '(ax, xy)\n', (9105, 9113), False, 'from wradlib import georef, util, vis\n'), ((2395, 2420), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (2407, 2420), False, 'import pytest\n'), ((2443, 2505), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'site': '(10.0, 45.0, 0.0)', 'proj': 'self.proj'}), '(self.img, site=(10.0, 45.0, 0.0), proj=self.proj)\n', (2455, 2505), False, 'from wradlib import georef, util, vis\n'), ((2519, 2544), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (2531, 2544), False, 'import pytest\n'), ((2567, 2616), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'proj': 'None', 'site': '(0, 0, 0)'}), '(self.img, proj=None, site=(0, 0, 0))\n', (2579, 2616), False, 'from wradlib import georef, util, vis\n'), ((2630, 2654), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2643, 2654), False, 'import pytest\n'), ((2677, 2715), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'proj': 'self.proj'}), '(self.img, proj=self.proj)\n', (2689, 2715), False, 'from wradlib import georef, util, vis\n'), ((2729, 2754), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2742, 2754), False, 'import pytest\n'), ((2777, 2828), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'site': '(0, 0)', 'proj': 'self.proj'}), '(self.img, site=(0, 0), proj=self.proj)\n', (2789, 2828), False, 'from wradlib import georef, util, vis\n'), ((2842, 2867), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2855, 2867), False, 'import pytest\n'), ((2881, 2934), 'wradlib.vis.plot_ppi_crosshair', 'vis.plot_ppi_crosshair', ([], {'site': '(0, 0)', 'ranges': '[2, 4, 8]'}), '(site=(0, 0), ranges=[2, 4, 8])\n', (2903, 2934), False, 'from wradlib import georef, util, vis\n'), ((3447, 3471), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3460, 3471), False, 'import pytest\n'), ((3605, 3629), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3618, 3629), False, 'import pytest\n'), ((4222, 4276), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img', 'self.r', 'self.az'], {'proj': 'map_proj'}), '(self.img, self.r, self.az, proj=map_proj)\n', (4234, 4276), False, 'from wradlib import georef, util, vis\n'), ((4358, 4385), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (4367, 4385), True, 'import matplotlib.pyplot as pl\n'), ((6076, 6100), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (6089, 6100), False, 'import pytest\n'), ((6114, 6170), 'wradlib.vis.plot_ppi', 'vis.plot_ppi', (['self.img'], {'elev': '(2.0)', 'proj': '"""cg"""', 'ax': 'ax[0, 0]'}), "(self.img, elev=2.0, proj='cg', ax=ax[0, 0])\n", (6126, 6170), False, 'from wradlib import georef, util, vis\n'), ((6790, 6814), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (6803, 6814), False, 'import pytest\n'), ((6828, 6883), 'wradlib.vis.plot_rhi', 'vis.plot_rhi', (['self.img[0:90, :]'], {'proj': '"""cg"""', 'ax': 'ax[0, 0]'}), "(self.img[0:90, :], proj='cg', ax=ax[0, 0])\n", (6840, 6883), False, 'from wradlib import georef, util, vis\n'), ((8534, 8573), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w+b"""'}), "(mode='w+b')\n", (8561, 8573), False, 'import tempfile\n'), ((8912, 8926), 'numpy.array', 'np.array', (['[xy]'], {}), '([xy])\n', (8920, 8926), True, 'import numpy as np\n'), ((9142, 9156), 'numpy.array', 'np.array', (['[xy]'], {}), '([xy])\n', (9150, 9156), True, 'import numpy as np\n'), ((4032, 4097), 'pytest.skip', 'pytest.skip', (['"""fails for cartopy < 0.18.0 and matplotlib >= 3.3.0"""'], {}), "('fails for cartopy < 0.18.0 and matplotlib >= 3.3.0')\n", (4043, 4097), False, 'import pytest\n'), ((4796, 4809), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4805, 4809), True, 'import numpy as np\n'), ((4814, 4827), 'numpy.arange', 'np.arange', (['(90)'], {}), '(90)\n', (4823, 4827), True, 'import numpy as np\n'), ((5036, 5049), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5045, 5049), True, 'import numpy as np\n'), ((5066, 5079), 'numpy.arange', 'np.arange', (['(90)'], {}), '(90)\n', (5075, 5079), True, 'import numpy as np\n'), ((7086, 7099), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (7095, 7099), True, 'import numpy as np\n'), ((7104, 7117), 'numpy.arange', 'np.arange', (['(90)'], {}), '(90)\n', (7113, 7117), True, 'import numpy as np\n'), ((3864, 3897), 'distutils.version.LooseVersion', 'LooseVersion', (['cartopy.__version__'], {}), '(cartopy.__version__)\n', (3876, 3897), False, 'from distutils.version import LooseVersion\n'), ((3900, 3922), 'distutils.version.LooseVersion', 'LooseVersion', (['"""0.18.0"""'], {}), "('0.18.0')\n", (3912, 3922), False, 'from distutils.version import LooseVersion\n'), ((3946, 3975), 'distutils.version.LooseVersion', 'LooseVersion', (['mpl.__version__'], {}), '(mpl.__version__)\n', (3958, 3975), False, 'from distutils.version import LooseVersion\n'), ((3979, 4000), 'distutils.version.LooseVersion', 'LooseVersion', (['"""3.3.0"""'], {}), "('3.3.0')\n", (3991, 4000), False, 'from distutils.version import LooseVersion\n')] |
import os
import torch
import torchvision
import glob
import numpy as np
import torch.optim as optim
import cv2
import matplotlib
import matplotlib.pyplot as plt
from skimage import io, transform
from skimage.transform import resize
from torch.autograd import Variable
import d_net
import config
import time
import vanilla_gan
import vanilla_gan.vanilla_gan
import vanilla_gan.video_gan
import data_loader
import loss_funs
import torch.nn as nn
import torch.nn.functional as F
import config
dtype = config.dtype
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
nin, nout = 3, 32
self.conv1_depthwise = nn.Conv2d(
nin, nout, 4, stride=2, padding=1, groups=1
).type(dtype)
# self.conv1_pointwise = nn.Conv2d(nin, nout, 1).type(dtype)
nn.init.xavier_normal(self.conv1_depthwise.weight)
# nn.init.xavier_normal(self.conv1_pointwise.weight)
self.bn1 = nn.BatchNorm2d(32).type(dtype)
nin, nout = 32, 64
self.conv2_depthwise = nn.Conv2d(
nin, nout, 4, stride=2, padding=1, groups=1
).type(dtype)
# self.conv2_pointwise = nn.Conv2d(nin, nout, 1).type(dtype)
nn.init.xavier_normal(self.conv2_depthwise.weight)
# nn.init.xavier_normal(self.conv2_pointwise.weight)
self.bn2 = nn.BatchNorm2d(64).type(dtype)
nin, nout = 64, 128
self.conv3_depthwise = nn.Conv2d(
nin, nout, 4, stride=2, padding=1, groups=1
).type(dtype)
# self.conv3_pointwise = nn.Conv2d(nin, nout, 1).type(dtype)
nn.init.xavier_normal(self.conv3_depthwise.weight)
# nn.init.xavier_normal(self.conv3_pointwise.weight)
self.bn3 = nn.BatchNorm2d(128).type(dtype)
nin, nout = 128, 1
self.conv4_depthwise = nn.Conv2d(
nin, nout, 4, stride=1, padding=1, groups=1
).type(dtype)
# self.conv4_pointwise = nn.Conv2d(nin, nout, 1).type(dtype)
nn.init.xavier_normal(self.conv4_depthwise.weight)
# nn.init.xavier_normal(self.conv4_pointwise.weight)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = x.type(dtype)
# Conv 1
out = self.conv1_depthwise(x)
# out = self.conv1_pointwise(out)
out = self.bn1(out)
out = F.relu(out)
# Conv 2
out = self.conv2_depthwise(out)
# out = self.conv2_pointwise(out)
out = self.bn2(out)
out = F.relu(out)
# Conv 3
out = self.conv3_depthwise(out)
# out = self.conv3_pointwise(out)
out = self.bn3(out)
out = F.relu(out)
# Conv 4
out = self.conv4_depthwise(out)
# out = self.conv4_pointwise(out)
if not config.use_wgan_loss:
out = self.sigmoid(out)
return out
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.deconv1 = nn.ConvTranspose2d(100, 128, 4, stride=4, padding=0).type(dtype)
nn.init.xavier_normal(self.deconv1.weight)
self.bn1 = nn.BatchNorm2d(128).type(dtype)
self.deconv2 = nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv2.weight)
self.bn2 = nn.BatchNorm2d(64).type(dtype)
self.deconv3 = nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv3.weight)
self.bn3 = nn.BatchNorm2d(32).type(dtype)
self.deconv4 = nn.ConvTranspose2d(32, 3, 4, stride=2, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv4.weight)
def forward(self, x):
out = self.deconv1(x.type(dtype))
# TODO: Investigate putting Batch Norm before versus after the RELU layer
# Resources:
# https://www.reddit.com/r/MachineLearning/comments/67gonq/d_batch_normalization_before_or_after_relu/
# https://www.youtube.com/watch?v=Xogn6veSyxA&feature=youtu.be&t=325
out = self.bn1(out)
out = F.relu(out)
out = self.deconv2(out)
out = self.bn2(out)
out = F.relu(out)
out = self.deconv3(out)
out = self.bn3(out)
out = F.relu(out)
out = self.deconv4(out)
out = torch.tanh(out)
return out
class GeneratorSkipConnections(nn.Module):
def make_resblock(self, map_size):
conv1_depthwise = nn.ConvTranspose2d(
map_size, map_size, 3, stride=1, padding=1, groups=map_size
).type(dtype)
conv1_pointwise = nn.ConvTranspose2d(map_size, map_size, 1).type(dtype)
nn.init.xavier_normal(conv1_depthwise.weight)
nn.init.xavier_normal(conv1_pointwise.weight)
bn = nn.BatchNorm2d(map_size).type(dtype)
conv2_depthwise = nn.ConvTranspose2d(
map_size, map_size, 3, stride=1, padding=1, groups=map_size
).type(dtype)
conv2_pointwise = nn.ConvTranspose2d(map_size, map_size, 1).type(dtype)
nn.init.xavier_normal(conv2_depthwise.weight)
nn.init.xavier_normal(conv2_pointwise.weight)
resblock = nn.ModuleList()
resblock.append(conv1_depthwise)
resblock.append(conv1_pointwise)
resblock.append(bn)
resblock.append(conv2_depthwise)
resblock.append(conv2_pointwise)
return resblock
def apply_resblock(self, out, resblock):
out = resblock[0](out)
out = resblock[1](out)
out = resblock[2](out)
out = F.relu(out)
out = resblock[3](out)
out = resblock[4](out)
return out
def __init__(self):
super(GeneratorSkipConnections, self).__init__()
# TODO: Change convolutions to DepthWise Seperable convolutions
# TODO: Need to fix Mode Collapse that is occuring in the GAN
# More info: https://www.quora.com/What-does-it-mean-if-all-produced-images-of-a-GAN-look-the-same
# Upsampling layer
nin, nout = 100, 128
self.deconv1_depthwise = nn.ConvTranspose2d(
nin, nin, 4, stride=4, padding=0, groups=nin
).type(dtype)
self.deconv1_pointwise = nn.ConvTranspose2d(nin, nout, 1).type(dtype)
nn.init.xavier_normal(self.deconv1_depthwise.weight)
nn.init.xavier_normal(self.deconv1_pointwise.weight)
self.bn1 = nn.BatchNorm2d(128).type(dtype)
# Resnet block
self.resblock1A = self.make_resblock(128)
# Upsampling layer
nin, nout = 128, 64
self.deconv2_depthwise = nn.ConvTranspose2d(
nin, nin, 4, stride=2, padding=1, groups=nin
).type(dtype)
self.deconv2_pointwise = nn.ConvTranspose2d(nin, nout, 1).type(dtype)
nn.init.xavier_normal(self.deconv2_depthwise.weight)
nn.init.xavier_normal(self.deconv2_pointwise.weight)
self.bn2 = nn.BatchNorm2d(64).type(dtype)
# Resnet block
self.resblock2A = self.make_resblock(64)
# Upsampling layer 3
nin, nout = 64, 32
self.deconv3_depthwise = nn.ConvTranspose2d(
nin, nin, 4, stride=2, padding=1, groups=nin
).type(dtype)
self.deconv3_pointwise = nn.ConvTranspose2d(nin, nout, 1).type(dtype)
nn.init.xavier_normal(self.deconv3_depthwise.weight)
nn.init.xavier_normal(self.deconv3_pointwise.weight)
self.bn3 = nn.BatchNorm2d(32).type(dtype)
# Resnet block
self.resblock3A = self.make_resblock(32)
# Upsampling layer 4
nin, nout = 32, 3
self.deconv4_depthwise = nn.ConvTranspose2d(
nin, nin, 4, stride=2, padding=1, groups=nin
).type(dtype)
self.deconv4_pointwise = nn.ConvTranspose2d(nin, nout, 1).type(dtype)
nn.init.xavier_normal(self.deconv4_depthwise.weight)
nn.init.xavier_normal(self.deconv4_pointwise.weight)
# Resnet block
self.resblock4A = self.make_resblock(3)
def forward(self, x):
x = x.type(dtype)
out = x
# Multi scale image generation seems quite similar to using ResNet skip connections
# In this case, we only use a single Resnet block instead of the entire Generator so the network is small enough to run on my laptop
#
# Upsample 1
out = self.deconv1_depthwise(out)
out = self.deconv1_pointwise(out)
out = self.bn1(out)
out = upsampled = F.relu(out)
# Resnet block 1
out += self.apply_resblock(out.clone(), self.resblock1A)
# Upsample 2
out = self.deconv2_depthwise(out)
out = self.deconv2_pointwise(out)
out = self.bn2(out)
out = upsampled = F.relu(out)
# Resnet block 2
out += self.apply_resblock(out.clone(), self.resblock2A)
# Upsample 3
out = self.deconv3_depthwise(out)
out = self.deconv3_pointwise(out)
out = self.bn3(out)
out = upsampled = F.relu(out)
# Resnet block 3
out += self.apply_resblock(out.clone(), self.resblock3A)
# Upsample 4
out = self.deconv4_depthwise(out)
out = self.deconv4_pointwise(out)
# Resnet block 4
out += self.apply_resblock(out.clone(), self.resblock4A)
out = torch.tanh(out)
return out
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
nin, nout = 3, 32
self.conv1_depthwise = nn.Conv2d(
nin, nout, 4, stride=2, padding=1, groups=1
).type(dtype)
# self.conv1_pointwise = nn.Conv2d(nin, nout, 1).type(dtype)
nn.init.xavier_normal(self.conv1_depthwise.weight)
# nn.init.xavier_normal(self.conv1_pointwise.weight)
self.bn1 = nn.BatchNorm2d(32).type(dtype)
nin, nout = 32, 64
self.conv2_depthwise = nn.Conv2d(
nin, nout, 4, stride=2, padding=1, groups=1
).type(dtype)
# self.conv2_pointwise = nn.Conv2d(nin, nout, 1).type(dtype)
nn.init.xavier_normal(self.conv2_depthwise.weight)
# nn.init.xavier_normal(self.conv2_pointwise.weight)
self.bn2 = nn.BatchNorm2d(64).type(dtype)
nin, nout = 64, 128
self.conv3_depthwise = nn.Conv2d(
nin, nout, 4, stride=2, padding=1, groups=1
).type(dtype)
# self.conv3_pointwise = nn.Conv2d(nin, nout, 1).type(dtype)
nn.init.xavier_normal(self.conv3_depthwise.weight)
# nn.init.xavier_normal(self.conv3_pointwise.weight)
self.bn3 = nn.BatchNorm2d(128).type(dtype)
nin, nout = 128, 1
self.conv4_depthwise = nn.Conv2d(
nin, nout, 4, stride=1, padding=1, groups=1
).type(dtype)
# self.conv4_pointwise = nn.Conv2d(nin, nout, 1).type(dtype)
nn.init.xavier_normal(self.conv4_depthwise.weight)
# nn.init.xavier_normal(self.conv4_pointwise.weight)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = x.type(dtype)
# Conv 1
out = self.conv1_depthwise(x)
# out = self.conv1_pointwise(out)
out = self.bn1(out)
out = F.relu(out)
# Conv 2
out = self.conv2_depthwise(out)
# out = self.conv2_pointwise(out)
out = self.bn2(out)
out = F.relu(out)
# Conv 3
out = self.conv3_depthwise(out)
# out = self.conv3_pointwise(out)
out = self.bn3(out)
out = F.relu(out)
# Conv 4
out = self.conv4_depthwise(out)
# out = self.conv4_pointwise(out)
if not config.use_wgan_loss:
out = self.sigmoid(out)
return out
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.deconv1 = nn.ConvTranspose2d(12, 128, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv1.weight)
self.bn1 = nn.BatchNorm2d(128).type(dtype)
self.deconv2 = nn.ConvTranspose2d(128, 64, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv2.weight)
self.bn2 = nn.BatchNorm2d(64).type(dtype)
self.deconv3 = nn.ConvTranspose2d(64, 32, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv3.weight)
self.bn3 = nn.BatchNorm2d(32).type(dtype)
self.deconv4 = nn.ConvTranspose2d(32, 3, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv4.weight)
def forward(self, x):
out = self.deconv1(x).type(dtype)
# TODO: Investigate putting Batch Norm before versus after the RELU layer
# Resources:
# https://www.reddit.com/r/MachineLearning/comments/67gonq/d_batch_normalization_before_or_after_relu/
# https://www.youtube.com/watch?v=Xogn6veSyxA&feature=youtu.be&t=325
out = self.bn1(out)
out = F.relu(out)
out = self.deconv2(out)
out = self.bn2(out)
out = F.relu(out)
out = self.deconv3(out)
out = self.bn3(out)
out = F.relu(out)
out = self.deconv4(out)
out = torch.tanh(out)
return out
class Gen1(nn.Module):
def __init__(self):
super(Gen1, self).__init__()
# Generator #1
self.g1 = nn.ModuleList()
self.deconv1 = nn.Conv2d(12, 128, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv1.weight)
self.bn1 = nn.BatchNorm2d(128).type(dtype)
self.deconv2 = nn.Conv2d(128, 256, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv2.weight)
self.bn2 = nn.BatchNorm2d(256).type(dtype)
self.deconv3 = nn.ConvTranspose2d(256, 128, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv3.weight)
self.bn3 = nn.BatchNorm2d(128).type(dtype)
self.deconv4 = nn.ConvTranspose2d(128, 3, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv4.weight)
self.g1.append(self.deconv1)
self.g1.append(self.bn1)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv2)
self.g1.append(self.bn2)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv3)
self.g1.append(self.bn3)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv4)
def forward(self, x):
out = x.type(dtype)
for layer in self.g1:
out = layer(out)
return out
class Gen2(nn.Module):
def __init__(self):
super(Gen2, self).__init__()
# Generator #2
self.g1 = nn.ModuleList()
self.deconv1 = nn.Conv2d(15, 128, 5, stride=1, padding=2).type(dtype)
nn.init.xavier_normal(self.deconv1.weight)
self.bn1 = nn.BatchNorm2d(128).type(dtype)
self.deconv2 = nn.Conv2d(128, 256, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv2.weight)
self.bn2 = nn.BatchNorm2d(256).type(dtype)
self.deconv3 = nn.ConvTranspose2d(256, 128, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv3.weight)
self.bn3 = nn.BatchNorm2d(128).type(dtype)
self.deconv4 = nn.ConvTranspose2d(128, 3, 5, stride=1, padding=2).type(dtype)
nn.init.xavier_normal(self.deconv4.weight)
self.g1.append(self.deconv1)
self.g1.append(self.bn1)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv2)
self.g1.append(self.bn2)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv3)
self.g1.append(self.bn3)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv4)
def forward(self, x):
out = x.type(dtype)
for layer in self.g1:
out = layer(out)
return out
class Gen3(nn.Module):
def __init__(self):
super(Gen3, self).__init__()
# Generator #3
self.g1 = nn.ModuleList()
self.deconv1 = nn.Conv2d(15, 128, 5, stride=1, padding=2).type(dtype)
nn.init.xavier_normal(self.deconv1.weight)
self.bn1 = nn.BatchNorm2d(128).type(dtype)
self.deconv2 = nn.Conv2d(128, 256, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv2.weight)
self.bn2 = nn.BatchNorm2d(256).type(dtype)
self.deconv3 = nn.ConvTranspose2d(256, 512, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv3.weight)
self.bn3 = nn.BatchNorm2d(512).type(dtype)
self.deconv4 = nn.ConvTranspose2d(512, 256, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv4.weight)
self.bn4 = nn.BatchNorm2d(256).type(dtype)
self.deconv5 = nn.ConvTranspose2d(256, 128, 3, stride=1, padding=1).type(dtype)
nn.init.xavier_normal(self.deconv5.weight)
self.bn5 = nn.BatchNorm2d(128).type(dtype)
self.deconv6 = nn.ConvTranspose2d(128, 3, 5, stride=1, padding=2).type(dtype)
nn.init.xavier_normal(self.deconv4.weight)
self.g1.append(self.deconv1)
self.g1.append(self.bn1)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv2)
self.g1.append(self.bn2)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv3)
self.g1.append(self.bn3)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv4)
self.g1.append(self.bn4)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv5)
self.g1.append(self.bn5)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv6)
def forward(self, x):
out = x.type(dtype)
for layer in self.g1:
out = layer(out)
return out
class Gen4(nn.Module):
def __init__(self):
super(Gen4, self).__init__()
# Generator #4
self.g1 = nn.ModuleList()
self.deconv1 = nn.Conv2d(15, 128, 7, stride=1, padding=3).type(dtype)
nn.init.xavier_normal(self.deconv1.weight)
self.bn1 = nn.BatchNorm2d(128).type(dtype)
self.deconv2 = nn.Conv2d(128, 256, 5, stride=1, padding=2).type(dtype)
nn.init.xavier_normal(self.deconv2.weight)
self.bn2 = nn.BatchNorm2d(256).type(dtype)
self.deconv3 = nn.ConvTranspose2d(256, 512, 5, stride=1, padding=2).type(dtype)
nn.init.xavier_normal(self.deconv3.weight)
self.bn3 = nn.BatchNorm2d(512).type(dtype)
self.deconv4 = nn.ConvTranspose2d(512, 256, 5, stride=1, padding=2).type(dtype)
nn.init.xavier_normal(self.deconv4.weight)
self.bn4 = nn.BatchNorm2d(256).type(dtype)
self.deconv5 = nn.ConvTranspose2d(256, 128, 5, stride=1, padding=2).type(dtype)
nn.init.xavier_normal(self.deconv5.weight)
self.bn5 = nn.BatchNorm2d(128).type(dtype)
self.deconv6 = nn.ConvTranspose2d(128, 3, 7, stride=1, padding=3).type(dtype)
nn.init.xavier_normal(self.deconv4.weight)
self.g1.append(self.deconv1)
self.g1.append(self.bn1)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv2)
self.g1.append(self.bn2)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv3)
self.g1.append(self.bn3)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv4)
self.g1.append(self.bn4)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv5)
self.g1.append(self.bn5)
self.g1.append(nn.ReLU())
self.g1.append(self.deconv6)
def forward(self, x):
out = x.type(dtype)
for layer in self.g1:
out = layer(out)
return out
class VideoGANGenerator(nn.Module):
"""This class implements the full VideoGAN Generator Network.
Currently a placeholder that copies the Vanilla GAN Generator network
"""
def __init__(self):
super(VideoGANGenerator, self).__init__()
self.up1 = nn.ConvTranspose2d(
3, 3, 3, stride=2, padding=1, output_padding=1
).type(dtype)
self.up2 = nn.ConvTranspose2d(
3, 3, 3, stride=2, padding=1, output_padding=1
).type(dtype)
self.up3 = nn.ConvTranspose2d(
3, 3, 3, stride=2, padding=1, output_padding=1
).type(dtype)
# Generator #1
self.g1 = Gen1()
self.g2 = Gen2()
self.g3 = Gen3()
self.g4 = Gen4()
def forward(self, x):
out = x.type(dtype)
# TODO: Change the image size
img1 = F.interpolate(out, size=(4, 4))
img2 = F.interpolate(out, size=(8, 8))
img3 = F.interpolate(out, size=(16, 16))
img4 = out
out = self.g1(img1)
upsample1 = self.up1(out)
out = upsample1 + self.g2(torch.cat([img2, upsample1], dim=1))
upsample2 = self.up2(out)
out = upsample2 + self.g3(torch.cat([img3, upsample2], dim=1))
upsample3 = self.up3(out)
out = upsample3 + self.g4(torch.cat([img4, upsample3], dim=1))
# Apply tanh at the end
out = torch.tanh(out)
return out
VIDEO_GAN = True
VANILLA_GAN = not VIDEO_GAN
def save_samples(generated_images, iteration, prefix):
import scipy
generated_images = generated_images.data.cpu().numpy()
num_images, channels, cell_h, cell_w = generated_images.shape
ncols = int(np.sqrt(num_images))
nrows = int(np.math.floor(num_images / float(ncols)))
result = np.zeros(
(cell_h * nrows, cell_w * ncols, channels), dtype=generated_images.dtype
)
for i in range(0, nrows):
for j in range(0, ncols):
result[
i * cell_h : (i + 1) * cell_h, j * cell_w : (j + 1) * cell_w, :
] = generated_images[i * ncols + j].transpose(1, 2, 0)
grid = result
if not os.path.exists("output"):
os.makedirs("output")
scipy.misc.imsave("output/{}_{:05d}.jpg".format(prefix, iteration), grid)
def sample_noise(batch_size, dim):
result = torch.rand(batch_size, dim) * 2 - 1
result = Variable(result).unsqueeze(2).unsqueeze(3)
return result
def get_emoji_loader(emoji_type):
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
num_workers = 1
batch_size = 16
image_size = 32
transform = transforms.Compose(
[
transforms.Scale(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
train_path = os.path.join("./emojis", emoji_type)
test_path = os.path.join("./emojis", "Test_{}".format(emoji_type))
train_dataset = datasets.ImageFolder(train_path, transform)
test_dataset = datasets.ImageFolder(test_path, transform)
train_dloader = DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
)
test_dloader = DataLoader(
dataset=test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
)
return train_dloader, test_dloader
def main():
SCALE_CONV_FSM_D = [
[3, 64],
[3, 64, 128, 128],
[3, 128, 256, 256],
[3, 128, 256, 512, 128],
]
SCALE_KERNEL_SIZES_D = [[3], [3, 3, 3], [5, 5, 5], [7, 7, 5, 5]]
SCALE_FC_LAYER_SIZES_D = [
[512, 256, 1],
[1024, 512, 1],
[1024, 512, 1],
[1024, 512, 1],
]
loss_fp = open("losses.csv", "w")
if VIDEO_GAN:
# TODO: Remove logic.
if False:
video_d_net = vanilla_gan.video_gan.Discriminator()
video_d_net.type(dtype)
video_g_net = vanilla_gan.video_gan.Generator()
video_g_net.type(dtype)
else:
video_d_net = d_net.DiscriminatorModel(
kernel_sizes_list=SCALE_KERNEL_SIZES_D,
conv_layer_fms_list=SCALE_CONV_FSM_D,
scale_fc_layer_sizes_list=SCALE_FC_LAYER_SIZES_D,
)
video_d_net.type(dtype)
video_g_net = vanilla_gan.video_gan.VideoGANGenerator()
video_g_net.type(dtype)
video_d_optimizer = optim.Adam(video_d_net.parameters(), lr=0.0001)
video_g_optimizer = optim.Adam(video_g_net.parameters(), lr=0.0001)
# Load Pacman dataset
max_size = len(os.listdir("train"))
pacman_dataloader = data_loader.DataLoader(
"train", min(max_size, 500000), 16, 32, 32, 4
)
# Load emojis
train_dataloader, _ = get_emoji_loader("Windows")
count = 0
for i in range(1, 5000):
for batch in train_dataloader:
if VIDEO_GAN:
clips_x, clips_y = pacman_dataloader.get_train_batch()
clips_x = torch.tensor(np.rollaxis(clips_x, 3, 1)).type(dtype)
clips_y = torch.tensor(np.rollaxis(clips_y, 3, 1)).type(dtype)
if VIDEO_GAN:
video_d_optimizer.zero_grad()
video_g_optimizer.zero_grad()
# batch_size x noise_size x 1 x 1
batch_size = 16
noise_size = 100
sampled_noise = sample_noise(batch_size, noise_size)
# WGAN loss
# https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py
if VIDEO_GAN:
video_images = video_g_net(clips_x)
# TESTING: Vanilla Video Gan
video_d_loss_real = (video_d_net(clips_y) - 1).pow(2).mean()
video_d_loss_fake = (video_d_net(video_images)).pow(2).mean()
# Fake batch
labels = torch.zeros(batch_size, 4).t().unsqueeze(2).type(dtype)
video_d_loss_fake = loss_funs.adv_loss(
video_d_net(video_images), labels
) # TODO: Validate if it's right.
video_d_optimizer.zero_grad()
video_d_loss_fake.backward()
video_d_optimizer.step()
# Real batch
labels = torch.ones(batch_size, 4).t().unsqueeze(2).type(dtype)
video_d_loss_real = loss_funs.adv_loss(
video_d_net(clips_y), labels
) # TODO: Validate if it's right.
video_d_optimizer.zero_grad()
video_d_loss_real.backward()
video_d_optimizer.step()
# video_d_loss.backward()
# video_d_optimizer.step()
# video_d_loss_real.backward()
# batch_size x noise_size x 1 x 1
batch_size = 16
noise_size = 100
sampled_noise = sample_noise(batch_size, noise_size)
# print('G_Time:', end - start)
# TESTING: Vanilla Video Gan
video_images = video_g_net(clips_x)
video_g_loss_fake = (video_d_net(video_images) - 1).pow(2).mean()
d_preds = video_d_net(video_images).type(
dtype
) # TODO: Make sure this is working.
gt_frames = clips_y.type(
dtype
) # TODO: make clips_y at different scales.
gen_frames = video_images.type(
dtype
) # TODO: make the generated frames multi scale.
video_g_loss = loss_funs.combined_loss(gen_frames, gt_frames, d_preds)
video_g_loss.backward()
video_g_optimizer.step()
if count % 20 == 0:
if VIDEO_GAN:
save_samples(clips_y, count, "video_real")
save_samples(video_images, count, "video_fake")
loss_fp.write(
"{},{},{},{}".format(
count, video_d_loss_real, video_d_loss_fake, video_g_loss
)
)
torch.save(video_g_net.state_dict(), "generator_net.pth.tmp")
count += 1
loss_fp.close()
# Final Generator save.
torch.save(video_g_net.state_dict(), "generator_net.pth")
if __name__ == "__main__":
main()
| [
"torch.cat",
"torchvision.transforms.Normalize",
"numpy.sqrt",
"os.path.join",
"torch.nn.init.xavier_normal",
"torch.ones",
"torch.utils.data.DataLoader",
"torchvision.transforms.Scale",
"loss_funs.combined_loss",
"os.path.exists",
"torch.nn.functional.relu",
"numpy.rollaxis",
"torch.zeros",... | [((21354, 21441), 'numpy.zeros', 'np.zeros', (['(cell_h * nrows, cell_w * ncols, channels)'], {'dtype': 'generated_images.dtype'}), '((cell_h * nrows, cell_w * ncols, channels), dtype=generated_images\n .dtype)\n', (21362, 21441), True, 'import numpy as np\n'), ((22450, 22486), 'os.path.join', 'os.path.join', (['"""./emojis"""', 'emoji_type'], {}), "('./emojis', emoji_type)\n", (22462, 22486), False, 'import os\n'), ((22579, 22622), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_path', 'transform'], {}), '(train_path, transform)\n', (22599, 22622), False, 'from torchvision import datasets\n'), ((22642, 22684), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_path', 'transform'], {}), '(test_path, transform)\n', (22662, 22684), False, 'from torchvision import datasets\n'), ((22706, 22805), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'num_workers'}), '(dataset=train_dataset, batch_size=batch_size, shuffle=True,\n num_workers=num_workers)\n', (22716, 22805), False, 'from torch.utils.data import DataLoader\n'), ((22860, 22959), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers'}), '(dataset=test_dataset, batch_size=batch_size, shuffle=False,\n num_workers=num_workers)\n', (22870, 22959), False, 'from torch.utils.data import DataLoader\n'), ((846, 896), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.conv1_depthwise.weight'], {}), '(self.conv1_depthwise.weight)\n', (867, 896), True, 'import torch.nn as nn\n'), ((1233, 1283), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.conv2_depthwise.weight'], {}), '(self.conv2_depthwise.weight)\n', (1254, 1283), True, 'import torch.nn as nn\n'), ((1621, 1671), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.conv3_depthwise.weight'], {}), '(self.conv3_depthwise.weight)\n', (1642, 1671), True, 'import torch.nn as nn\n'), ((2009, 2059), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.conv4_depthwise.weight'], {}), '(self.conv4_depthwise.weight)\n', (2030, 2059), True, 'import torch.nn as nn\n'), ((2145, 2157), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2155, 2157), True, 'import torch.nn as nn\n'), ((2350, 2361), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (2356, 2361), True, 'import torch.nn.functional as F\n'), ((2504, 2515), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (2510, 2515), True, 'import torch.nn.functional as F\n'), ((2658, 2669), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (2664, 2669), True, 'import torch.nn.functional as F\n'), ((3056, 3098), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv1.weight'], {}), '(self.deconv1.weight)\n', (3077, 3098), True, 'import torch.nn as nn\n'), ((3246, 3288), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv2.weight'], {}), '(self.deconv2.weight)\n', (3267, 3288), True, 'import torch.nn as nn\n'), ((3434, 3476), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv3.weight'], {}), '(self.deconv3.weight)\n', (3455, 3476), True, 'import torch.nn as nn\n'), ((3621, 3663), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv4.weight'], {}), '(self.deconv4.weight)\n', (3642, 3663), True, 'import torch.nn as nn\n'), ((4066, 4077), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (4072, 4077), True, 'import torch.nn.functional as F\n'), ((4153, 4164), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (4159, 4164), True, 'import torch.nn.functional as F\n'), ((4240, 4251), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (4246, 4251), True, 'import torch.nn.functional as F\n'), ((4299, 4314), 'torch.tanh', 'torch.tanh', (['out'], {}), '(out)\n', (4309, 4314), False, 'import torch\n'), ((4647, 4692), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['conv1_depthwise.weight'], {}), '(conv1_depthwise.weight)\n', (4668, 4692), True, 'import torch.nn as nn\n'), ((4701, 4746), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['conv1_pointwise.weight'], {}), '(conv1_pointwise.weight)\n', (4722, 4746), True, 'import torch.nn as nn\n'), ((5025, 5070), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['conv2_depthwise.weight'], {}), '(conv2_depthwise.weight)\n', (5046, 5070), True, 'import torch.nn as nn\n'), ((5079, 5124), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['conv2_pointwise.weight'], {}), '(conv2_pointwise.weight)\n', (5100, 5124), True, 'import torch.nn as nn\n'), ((5145, 5160), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (5158, 5160), True, 'import torch.nn as nn\n'), ((5531, 5542), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (5537, 5542), True, 'import torch.nn.functional as F\n'), ((6232, 6284), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv1_depthwise.weight'], {}), '(self.deconv1_depthwise.weight)\n', (6253, 6284), True, 'import torch.nn as nn\n'), ((6293, 6345), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv1_pointwise.weight'], {}), '(self.deconv1_pointwise.weight)\n', (6314, 6345), True, 'import torch.nn as nn\n'), ((6745, 6797), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv2_depthwise.weight'], {}), '(self.deconv2_depthwise.weight)\n', (6766, 6797), True, 'import torch.nn as nn\n'), ((6806, 6858), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv2_pointwise.weight'], {}), '(self.deconv2_pointwise.weight)\n', (6827, 6858), True, 'import torch.nn as nn\n'), ((7257, 7309), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv3_depthwise.weight'], {}), '(self.deconv3_depthwise.weight)\n', (7278, 7309), True, 'import torch.nn as nn\n'), ((7318, 7370), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv3_pointwise.weight'], {}), '(self.deconv3_pointwise.weight)\n', (7339, 7370), True, 'import torch.nn as nn\n'), ((7768, 7820), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv4_depthwise.weight'], {}), '(self.deconv4_depthwise.weight)\n', (7789, 7820), True, 'import torch.nn as nn\n'), ((7829, 7881), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv4_pointwise.weight'], {}), '(self.deconv4_pointwise.weight)\n', (7850, 7881), True, 'import torch.nn as nn\n'), ((8426, 8437), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (8432, 8437), True, 'import torch.nn.functional as F\n'), ((8689, 8700), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (8695, 8700), True, 'import torch.nn.functional as F\n'), ((8951, 8962), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (8957, 8962), True, 'import torch.nn.functional as F\n'), ((9265, 9280), 'torch.tanh', 'torch.tanh', (['out'], {}), '(out)\n', (9275, 9280), False, 'import torch\n'), ((9629, 9679), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.conv1_depthwise.weight'], {}), '(self.conv1_depthwise.weight)\n', (9650, 9679), True, 'import torch.nn as nn\n'), ((10016, 10066), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.conv2_depthwise.weight'], {}), '(self.conv2_depthwise.weight)\n', (10037, 10066), True, 'import torch.nn as nn\n'), ((10404, 10454), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.conv3_depthwise.weight'], {}), '(self.conv3_depthwise.weight)\n', (10425, 10454), True, 'import torch.nn as nn\n'), ((10792, 10842), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.conv4_depthwise.weight'], {}), '(self.conv4_depthwise.weight)\n', (10813, 10842), True, 'import torch.nn as nn\n'), ((10928, 10940), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (10938, 10940), True, 'import torch.nn as nn\n'), ((11133, 11144), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (11139, 11144), True, 'import torch.nn.functional as F\n'), ((11287, 11298), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (11293, 11298), True, 'import torch.nn.functional as F\n'), ((11441, 11452), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (11447, 11452), True, 'import torch.nn.functional as F\n'), ((11838, 11880), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv1.weight'], {}), '(self.deconv1.weight)\n', (11859, 11880), True, 'import torch.nn as nn\n'), ((12028, 12070), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv2.weight'], {}), '(self.deconv2.weight)\n', (12049, 12070), True, 'import torch.nn as nn\n'), ((12216, 12258), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv3.weight'], {}), '(self.deconv3.weight)\n', (12237, 12258), True, 'import torch.nn as nn\n'), ((12403, 12445), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv4.weight'], {}), '(self.deconv4.weight)\n', (12424, 12445), True, 'import torch.nn as nn\n'), ((12848, 12859), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (12854, 12859), True, 'import torch.nn.functional as F\n'), ((12935, 12946), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (12941, 12946), True, 'import torch.nn.functional as F\n'), ((13022, 13033), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (13028, 13033), True, 'import torch.nn.functional as F\n'), ((13081, 13096), 'torch.tanh', 'torch.tanh', (['out'], {}), '(out)\n', (13091, 13096), False, 'import torch\n'), ((13245, 13260), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (13258, 13260), True, 'import torch.nn as nn\n'), ((13347, 13389), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv1.weight'], {}), '(self.deconv1.weight)\n', (13368, 13389), True, 'import torch.nn as nn\n'), ((13529, 13571), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv2.weight'], {}), '(self.deconv2.weight)\n', (13550, 13571), True, 'import torch.nn as nn\n'), ((13720, 13762), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv3.weight'], {}), '(self.deconv3.weight)\n', (13741, 13762), True, 'import torch.nn as nn\n'), ((13909, 13951), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv4.weight'], {}), '(self.deconv4.weight)\n', (13930, 13951), True, 'import torch.nn as nn\n'), ((14563, 14578), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (14576, 14578), True, 'import torch.nn as nn\n'), ((14665, 14707), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv1.weight'], {}), '(self.deconv1.weight)\n', (14686, 14707), True, 'import torch.nn as nn\n'), ((14847, 14889), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv2.weight'], {}), '(self.deconv2.weight)\n', (14868, 14889), True, 'import torch.nn as nn\n'), ((15038, 15080), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv3.weight'], {}), '(self.deconv3.weight)\n', (15059, 15080), True, 'import torch.nn as nn\n'), ((15227, 15269), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv4.weight'], {}), '(self.deconv4.weight)\n', (15248, 15269), True, 'import torch.nn as nn\n'), ((15881, 15896), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15894, 15896), True, 'import torch.nn as nn\n'), ((15983, 16025), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv1.weight'], {}), '(self.deconv1.weight)\n', (16004, 16025), True, 'import torch.nn as nn\n'), ((16165, 16207), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv2.weight'], {}), '(self.deconv2.weight)\n', (16186, 16207), True, 'import torch.nn as nn\n'), ((16356, 16398), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv3.weight'], {}), '(self.deconv3.weight)\n', (16377, 16398), True, 'import torch.nn as nn\n'), ((16547, 16589), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv4.weight'], {}), '(self.deconv4.weight)\n', (16568, 16589), True, 'import torch.nn as nn\n'), ((16738, 16780), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv5.weight'], {}), '(self.deconv5.weight)\n', (16759, 16780), True, 'import torch.nn as nn\n'), ((16927, 16969), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv4.weight'], {}), '(self.deconv4.weight)\n', (16948, 16969), True, 'import torch.nn as nn\n'), ((17789, 17804), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (17802, 17804), True, 'import torch.nn as nn\n'), ((17891, 17933), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv1.weight'], {}), '(self.deconv1.weight)\n', (17912, 17933), True, 'import torch.nn as nn\n'), ((18073, 18115), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv2.weight'], {}), '(self.deconv2.weight)\n', (18094, 18115), True, 'import torch.nn as nn\n'), ((18264, 18306), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv3.weight'], {}), '(self.deconv3.weight)\n', (18285, 18306), True, 'import torch.nn as nn\n'), ((18455, 18497), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv4.weight'], {}), '(self.deconv4.weight)\n', (18476, 18497), True, 'import torch.nn as nn\n'), ((18646, 18688), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv5.weight'], {}), '(self.deconv5.weight)\n', (18667, 18688), True, 'import torch.nn as nn\n'), ((18835, 18877), 'torch.nn.init.xavier_normal', 'nn.init.xavier_normal', (['self.deconv4.weight'], {}), '(self.deconv4.weight)\n', (18856, 18877), True, 'import torch.nn as nn\n'), ((20424, 20455), 'torch.nn.functional.interpolate', 'F.interpolate', (['out'], {'size': '(4, 4)'}), '(out, size=(4, 4))\n', (20437, 20455), True, 'import torch.nn.functional as F\n'), ((20471, 20502), 'torch.nn.functional.interpolate', 'F.interpolate', (['out'], {'size': '(8, 8)'}), '(out, size=(8, 8))\n', (20484, 20502), True, 'import torch.nn.functional as F\n'), ((20518, 20551), 'torch.nn.functional.interpolate', 'F.interpolate', (['out'], {'size': '(16, 16)'}), '(out, size=(16, 16))\n', (20531, 20551), True, 'import torch.nn.functional as F\n'), ((20962, 20977), 'torch.tanh', 'torch.tanh', (['out'], {}), '(out)\n', (20972, 20977), False, 'import torch\n'), ((21262, 21281), 'numpy.sqrt', 'np.sqrt', (['num_images'], {}), '(num_images)\n', (21269, 21281), True, 'import numpy as np\n'), ((21712, 21736), 'os.path.exists', 'os.path.exists', (['"""output"""'], {}), "('output')\n", (21726, 21736), False, 'import os\n'), ((21746, 21767), 'os.makedirs', 'os.makedirs', (['"""output"""'], {}), "('output')\n", (21757, 21767), False, 'import os\n'), ((24285, 24304), 'os.listdir', 'os.listdir', (['"""train"""'], {}), "('train')\n", (24295, 24304), False, 'import os\n'), ((14046, 14055), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14053, 14055), True, 'import torch.nn as nn\n'), ((14150, 14159), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14157, 14159), True, 'import torch.nn as nn\n'), ((14254, 14263), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14261, 14263), True, 'import torch.nn as nn\n'), ((15364, 15373), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (15371, 15373), True, 'import torch.nn as nn\n'), ((15468, 15477), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (15475, 15477), True, 'import torch.nn as nn\n'), ((15572, 15581), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (15579, 15581), True, 'import torch.nn as nn\n'), ((17064, 17073), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (17071, 17073), True, 'import torch.nn as nn\n'), ((17168, 17177), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (17175, 17177), True, 'import torch.nn as nn\n'), ((17272, 17281), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (17279, 17281), True, 'import torch.nn as nn\n'), ((17376, 17385), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (17383, 17385), True, 'import torch.nn as nn\n'), ((17480, 17489), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (17487, 17489), True, 'import torch.nn as nn\n'), ((18972, 18981), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (18979, 18981), True, 'import torch.nn as nn\n'), ((19076, 19085), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (19083, 19085), True, 'import torch.nn as nn\n'), ((19180, 19189), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (19187, 19189), True, 'import torch.nn as nn\n'), ((19284, 19293), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (19291, 19293), True, 'import torch.nn as nn\n'), ((19388, 19397), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (19395, 19397), True, 'import torch.nn as nn\n'), ((21896, 21923), 'torch.rand', 'torch.rand', (['batch_size', 'dim'], {}), '(batch_size, dim)\n', (21906, 21923), False, 'import torch\n'), ((22283, 22311), 'torchvision.transforms.Scale', 'transforms.Scale', (['image_size'], {}), '(image_size)\n', (22299, 22311), False, 'from torchvision import transforms\n'), ((22325, 22346), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (22344, 22346), False, 'from torchvision import transforms\n'), ((22360, 22414), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (22380, 22414), False, 'from torchvision import transforms\n'), ((23518, 23555), 'vanilla_gan.video_gan.Discriminator', 'vanilla_gan.video_gan.Discriminator', ([], {}), '()\n', (23553, 23555), False, 'import vanilla_gan\n'), ((23619, 23652), 'vanilla_gan.video_gan.Generator', 'vanilla_gan.video_gan.Generator', ([], {}), '()\n', (23650, 23652), False, 'import vanilla_gan\n'), ((23729, 23890), 'd_net.DiscriminatorModel', 'd_net.DiscriminatorModel', ([], {'kernel_sizes_list': 'SCALE_KERNEL_SIZES_D', 'conv_layer_fms_list': 'SCALE_CONV_FSM_D', 'scale_fc_layer_sizes_list': 'SCALE_FC_LAYER_SIZES_D'}), '(kernel_sizes_list=SCALE_KERNEL_SIZES_D,\n conv_layer_fms_list=SCALE_CONV_FSM_D, scale_fc_layer_sizes_list=\n SCALE_FC_LAYER_SIZES_D)\n', (23753, 23890), False, 'import d_net\n'), ((24008, 24049), 'vanilla_gan.video_gan.VideoGANGenerator', 'vanilla_gan.video_gan.VideoGANGenerator', ([], {}), '()\n', (24047, 24049), False, 'import vanilla_gan\n'), ((680, 734), 'torch.nn.Conv2d', 'nn.Conv2d', (['nin', 'nout', '(4)'], {'stride': '(2)', 'padding': '(1)', 'groups': '(1)'}), '(nin, nout, 4, stride=2, padding=1, groups=1)\n', (689, 734), True, 'import torch.nn as nn\n'), ((977, 995), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (991, 995), True, 'import torch.nn as nn\n'), ((1067, 1121), 'torch.nn.Conv2d', 'nn.Conv2d', (['nin', 'nout', '(4)'], {'stride': '(2)', 'padding': '(1)', 'groups': '(1)'}), '(nin, nout, 4, stride=2, padding=1, groups=1)\n', (1076, 1121), True, 'import torch.nn as nn\n'), ((1364, 1382), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (1378, 1382), True, 'import torch.nn as nn\n'), ((1455, 1509), 'torch.nn.Conv2d', 'nn.Conv2d', (['nin', 'nout', '(4)'], {'stride': '(2)', 'padding': '(1)', 'groups': '(1)'}), '(nin, nout, 4, stride=2, padding=1, groups=1)\n', (1464, 1509), True, 'import torch.nn as nn\n'), ((1752, 1771), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (1766, 1771), True, 'import torch.nn as nn\n'), ((1843, 1897), 'torch.nn.Conv2d', 'nn.Conv2d', (['nin', 'nout', '(4)'], {'stride': '(1)', 'padding': '(1)', 'groups': '(1)'}), '(nin, nout, 4, stride=1, padding=1, groups=1)\n', (1852, 1897), True, 'import torch.nn as nn\n'), ((2983, 3035), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(100)', '(128)', '(4)'], {'stride': '(4)', 'padding': '(0)'}), '(100, 128, 4, stride=4, padding=0)\n', (3001, 3035), True, 'import torch.nn as nn\n'), ((3118, 3137), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3132, 3137), True, 'import torch.nn as nn\n'), ((3174, 3225), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(64)', '(4)'], {'stride': '(2)', 'padding': '(1)'}), '(128, 64, 4, stride=2, padding=1)\n', (3192, 3225), True, 'import torch.nn as nn\n'), ((3308, 3326), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (3322, 3326), True, 'import torch.nn as nn\n'), ((3363, 3413), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(64)', '(32)', '(4)'], {'stride': '(2)', 'padding': '(1)'}), '(64, 32, 4, stride=2, padding=1)\n', (3381, 3413), True, 'import torch.nn as nn\n'), ((3496, 3514), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (3510, 3514), True, 'import torch.nn as nn\n'), ((3551, 3600), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(32)', '(3)', '(4)'], {'stride': '(2)', 'padding': '(1)'}), '(32, 3, 4, stride=2, padding=1)\n', (3569, 3600), True, 'import torch.nn as nn\n'), ((4445, 4524), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['map_size', 'map_size', '(3)'], {'stride': '(1)', 'padding': '(1)', 'groups': 'map_size'}), '(map_size, map_size, 3, stride=1, padding=1, groups=map_size)\n', (4463, 4524), True, 'import torch.nn as nn\n'), ((4585, 4626), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['map_size', 'map_size', '(1)'], {}), '(map_size, map_size, 1)\n', (4603, 4626), True, 'import torch.nn as nn\n'), ((4760, 4784), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['map_size'], {}), '(map_size)\n', (4774, 4784), True, 'import torch.nn as nn\n'), ((4823, 4902), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['map_size', 'map_size', '(3)'], {'stride': '(1)', 'padding': '(1)', 'groups': 'map_size'}), '(map_size, map_size, 3, stride=1, padding=1, groups=map_size)\n', (4841, 4902), True, 'import torch.nn as nn\n'), ((4963, 5004), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['map_size', 'map_size', '(1)'], {}), '(map_size, map_size, 1)\n', (4981, 5004), True, 'import torch.nn as nn\n'), ((6047, 6111), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nin', 'nin', '(4)'], {'stride': '(4)', 'padding': '(0)', 'groups': 'nin'}), '(nin, nin, 4, stride=4, padding=0, groups=nin)\n', (6065, 6111), True, 'import torch.nn as nn\n'), ((6179, 6211), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nin', 'nout', '(1)'], {}), '(nin, nout, 1)\n', (6197, 6211), True, 'import torch.nn as nn\n'), ((6365, 6384), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (6379, 6384), True, 'import torch.nn as nn\n'), ((6560, 6624), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nin', 'nin', '(4)'], {'stride': '(2)', 'padding': '(1)', 'groups': 'nin'}), '(nin, nin, 4, stride=2, padding=1, groups=nin)\n', (6578, 6624), True, 'import torch.nn as nn\n'), ((6692, 6724), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nin', 'nout', '(1)'], {}), '(nin, nout, 1)\n', (6710, 6724), True, 'import torch.nn as nn\n'), ((6878, 6896), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (6892, 6896), True, 'import torch.nn as nn\n'), ((7072, 7136), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nin', 'nin', '(4)'], {'stride': '(2)', 'padding': '(1)', 'groups': 'nin'}), '(nin, nin, 4, stride=2, padding=1, groups=nin)\n', (7090, 7136), True, 'import torch.nn as nn\n'), ((7204, 7236), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nin', 'nout', '(1)'], {}), '(nin, nout, 1)\n', (7222, 7236), True, 'import torch.nn as nn\n'), ((7390, 7408), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (7404, 7408), True, 'import torch.nn as nn\n'), ((7583, 7647), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nin', 'nin', '(4)'], {'stride': '(2)', 'padding': '(1)', 'groups': 'nin'}), '(nin, nin, 4, stride=2, padding=1, groups=nin)\n', (7601, 7647), True, 'import torch.nn as nn\n'), ((7715, 7747), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nin', 'nout', '(1)'], {}), '(nin, nout, 1)\n', (7733, 7747), True, 'import torch.nn as nn\n'), ((9463, 9517), 'torch.nn.Conv2d', 'nn.Conv2d', (['nin', 'nout', '(4)'], {'stride': '(2)', 'padding': '(1)', 'groups': '(1)'}), '(nin, nout, 4, stride=2, padding=1, groups=1)\n', (9472, 9517), True, 'import torch.nn as nn\n'), ((9760, 9778), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (9774, 9778), True, 'import torch.nn as nn\n'), ((9850, 9904), 'torch.nn.Conv2d', 'nn.Conv2d', (['nin', 'nout', '(4)'], {'stride': '(2)', 'padding': '(1)', 'groups': '(1)'}), '(nin, nout, 4, stride=2, padding=1, groups=1)\n', (9859, 9904), True, 'import torch.nn as nn\n'), ((10147, 10165), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (10161, 10165), True, 'import torch.nn as nn\n'), ((10238, 10292), 'torch.nn.Conv2d', 'nn.Conv2d', (['nin', 'nout', '(4)'], {'stride': '(2)', 'padding': '(1)', 'groups': '(1)'}), '(nin, nout, 4, stride=2, padding=1, groups=1)\n', (10247, 10292), True, 'import torch.nn as nn\n'), ((10535, 10554), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (10549, 10554), True, 'import torch.nn as nn\n'), ((10626, 10680), 'torch.nn.Conv2d', 'nn.Conv2d', (['nin', 'nout', '(4)'], {'stride': '(1)', 'padding': '(1)', 'groups': '(1)'}), '(nin, nout, 4, stride=1, padding=1, groups=1)\n', (10635, 10680), True, 'import torch.nn as nn\n'), ((11766, 11817), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(12)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(12, 128, 3, stride=1, padding=1)\n', (11784, 11817), True, 'import torch.nn as nn\n'), ((11900, 11919), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (11914, 11919), True, 'import torch.nn as nn\n'), ((11956, 12007), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 64, 3, stride=1, padding=1)\n', (11974, 12007), True, 'import torch.nn as nn\n'), ((12090, 12108), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (12104, 12108), True, 'import torch.nn as nn\n'), ((12145, 12195), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(64)', '(32)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 32, 3, stride=1, padding=1)\n', (12163, 12195), True, 'import torch.nn as nn\n'), ((12278, 12296), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (12292, 12296), True, 'import torch.nn as nn\n'), ((12333, 12382), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(32)', '(3)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(32, 3, 3, stride=1, padding=1)\n', (12351, 12382), True, 'import torch.nn as nn\n'), ((13284, 13326), 'torch.nn.Conv2d', 'nn.Conv2d', (['(12)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(12, 128, 3, stride=1, padding=1)\n', (13293, 13326), True, 'import torch.nn as nn\n'), ((13409, 13428), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (13423, 13428), True, 'import torch.nn as nn\n'), ((13465, 13508), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 256, 3, stride=1, padding=1)\n', (13474, 13508), True, 'import torch.nn as nn\n'), ((13591, 13610), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (13605, 13610), True, 'import torch.nn as nn\n'), ((13647, 13699), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(256, 128, 3, stride=1, padding=1)\n', (13665, 13699), True, 'import torch.nn as nn\n'), ((13782, 13801), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (13796, 13801), True, 'import torch.nn as nn\n'), ((13838, 13888), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(3)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 3, 3, stride=1, padding=1)\n', (13856, 13888), True, 'import torch.nn as nn\n'), ((14602, 14644), 'torch.nn.Conv2d', 'nn.Conv2d', (['(15)', '(128)', '(5)'], {'stride': '(1)', 'padding': '(2)'}), '(15, 128, 5, stride=1, padding=2)\n', (14611, 14644), True, 'import torch.nn as nn\n'), ((14727, 14746), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (14741, 14746), True, 'import torch.nn as nn\n'), ((14783, 14826), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 256, 3, stride=1, padding=1)\n', (14792, 14826), True, 'import torch.nn as nn\n'), ((14909, 14928), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (14923, 14928), True, 'import torch.nn as nn\n'), ((14965, 15017), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(256, 128, 3, stride=1, padding=1)\n', (14983, 15017), True, 'import torch.nn as nn\n'), ((15100, 15119), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (15114, 15119), True, 'import torch.nn as nn\n'), ((15156, 15206), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(3)', '(5)'], {'stride': '(1)', 'padding': '(2)'}), '(128, 3, 5, stride=1, padding=2)\n', (15174, 15206), True, 'import torch.nn as nn\n'), ((15920, 15962), 'torch.nn.Conv2d', 'nn.Conv2d', (['(15)', '(128)', '(5)'], {'stride': '(1)', 'padding': '(2)'}), '(15, 128, 5, stride=1, padding=2)\n', (15929, 15962), True, 'import torch.nn as nn\n'), ((16045, 16064), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (16059, 16064), True, 'import torch.nn as nn\n'), ((16101, 16144), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 256, 3, stride=1, padding=1)\n', (16110, 16144), True, 'import torch.nn as nn\n'), ((16227, 16246), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (16241, 16246), True, 'import torch.nn as nn\n'), ((16283, 16335), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(512)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(256, 512, 3, stride=1, padding=1)\n', (16301, 16335), True, 'import torch.nn as nn\n'), ((16418, 16437), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (16432, 16437), True, 'import torch.nn as nn\n'), ((16474, 16526), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(512)', '(256)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(512, 256, 3, stride=1, padding=1)\n', (16492, 16526), True, 'import torch.nn as nn\n'), ((16609, 16628), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (16623, 16628), True, 'import torch.nn as nn\n'), ((16665, 16717), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(256, 128, 3, stride=1, padding=1)\n', (16683, 16717), True, 'import torch.nn as nn\n'), ((16800, 16819), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (16814, 16819), True, 'import torch.nn as nn\n'), ((16856, 16906), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(3)', '(5)'], {'stride': '(1)', 'padding': '(2)'}), '(128, 3, 5, stride=1, padding=2)\n', (16874, 16906), True, 'import torch.nn as nn\n'), ((17828, 17870), 'torch.nn.Conv2d', 'nn.Conv2d', (['(15)', '(128)', '(7)'], {'stride': '(1)', 'padding': '(3)'}), '(15, 128, 7, stride=1, padding=3)\n', (17837, 17870), True, 'import torch.nn as nn\n'), ((17953, 17972), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (17967, 17972), True, 'import torch.nn as nn\n'), ((18009, 18052), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(5)'], {'stride': '(1)', 'padding': '(2)'}), '(128, 256, 5, stride=1, padding=2)\n', (18018, 18052), True, 'import torch.nn as nn\n'), ((18135, 18154), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (18149, 18154), True, 'import torch.nn as nn\n'), ((18191, 18243), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(512)', '(5)'], {'stride': '(1)', 'padding': '(2)'}), '(256, 512, 5, stride=1, padding=2)\n', (18209, 18243), True, 'import torch.nn as nn\n'), ((18326, 18345), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (18340, 18345), True, 'import torch.nn as nn\n'), ((18382, 18434), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(512)', '(256)', '(5)'], {'stride': '(1)', 'padding': '(2)'}), '(512, 256, 5, stride=1, padding=2)\n', (18400, 18434), True, 'import torch.nn as nn\n'), ((18517, 18536), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (18531, 18536), True, 'import torch.nn as nn\n'), ((18573, 18625), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(128)', '(5)'], {'stride': '(1)', 'padding': '(2)'}), '(256, 128, 5, stride=1, padding=2)\n', (18591, 18625), True, 'import torch.nn as nn\n'), ((18708, 18727), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (18722, 18727), True, 'import torch.nn as nn\n'), ((18764, 18814), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(3)', '(7)'], {'stride': '(1)', 'padding': '(3)'}), '(128, 3, 7, stride=1, padding=3)\n', (18782, 18814), True, 'import torch.nn as nn\n'), ((19850, 19916), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(3)', '(3)', '(3)'], {'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)'}), '(3, 3, 3, stride=2, padding=1, output_padding=1)\n', (19868, 19916), True, 'import torch.nn as nn\n'), ((19970, 20036), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(3)', '(3)', '(3)'], {'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)'}), '(3, 3, 3, stride=2, padding=1, output_padding=1)\n', (19988, 20036), True, 'import torch.nn as nn\n'), ((20090, 20156), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(3)', '(3)', '(3)'], {'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)'}), '(3, 3, 3, stride=2, padding=1, output_padding=1)\n', (20108, 20156), True, 'import torch.nn as nn\n'), ((20668, 20703), 'torch.cat', 'torch.cat', (['[img2, upsample1]'], {'dim': '(1)'}), '([img2, upsample1], dim=1)\n', (20677, 20703), False, 'import torch\n'), ((20773, 20808), 'torch.cat', 'torch.cat', (['[img3, upsample2]'], {'dim': '(1)'}), '([img3, upsample2], dim=1)\n', (20782, 20808), False, 'import torch\n'), ((20878, 20913), 'torch.cat', 'torch.cat', (['[img4, upsample3]'], {'dim': '(1)'}), '([img4, upsample3], dim=1)\n', (20887, 20913), False, 'import torch\n'), ((27300, 27355), 'loss_funs.combined_loss', 'loss_funs.combined_loss', (['gen_frames', 'gt_frames', 'd_preds'], {}), '(gen_frames, gt_frames, d_preds)\n', (27323, 27355), False, 'import loss_funs\n'), ((21945, 21961), 'torch.autograd.Variable', 'Variable', (['result'], {}), '(result)\n', (21953, 21961), False, 'from torch.autograd import Variable\n'), ((24706, 24732), 'numpy.rollaxis', 'np.rollaxis', (['clips_x', '(3)', '(1)'], {}), '(clips_x, 3, 1)\n', (24717, 24732), True, 'import numpy as np\n'), ((24785, 24811), 'numpy.rollaxis', 'np.rollaxis', (['clips_y', '(3)', '(1)'], {}), '(clips_y, 3, 1)\n', (24796, 24811), True, 'import numpy as np\n'), ((25568, 25594), 'torch.zeros', 'torch.zeros', (['batch_size', '(4)'], {}), '(batch_size, 4)\n', (25579, 25594), False, 'import torch\n'), ((25972, 25997), 'torch.ones', 'torch.ones', (['batch_size', '(4)'], {}), '(batch_size, 4)\n', (25982, 25997), False, 'import torch\n')] |
import numpy as np
import pytest
from stable_baselines import PPO, TD3
from stable_baselines.common.noise import NormalActionNoise
action_noise = NormalActionNoise(np.zeros(1), 0.1 * np.ones(1))
def test_td3():
model = TD3(
'MlpPolicy',
'Pendulum-v0',
policy_kwargs=dict(net_arch=[64, 64]),
seed=0,
learning_starts=100,
verbose=1,
create_eval_env=True,
action_noise=action_noise,
)
model.learn(total_timesteps=10000, eval_freq=5000)
# model.save("test_save")
# model.load("test_save")
# os.remove("test_save.zip")
@pytest.mark.parametrize("model_class", [PPO])
@pytest.mark.parametrize("env_id", ['CartPole-v1', 'Pendulum-v0'])
def test_onpolicy(model_class, env_id):
model = model_class(
'MlpPolicy',
env_id,
policy_kwargs=dict(net_arch=[16]),
verbose=1,
create_eval_env=True,
)
model.learn(total_timesteps=1000, eval_freq=500)
# model.save("test_save")
# model.load("test_save")
# os.remove("test_save.zip")
| [
"pytest.mark.parametrize",
"numpy.zeros",
"numpy.ones"
] | [((608, 653), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model_class"""', '[PPO]'], {}), "('model_class', [PPO])\n", (631, 653), False, 'import pytest\n'), ((655, 720), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""env_id"""', "['CartPole-v1', 'Pendulum-v0']"], {}), "('env_id', ['CartPole-v1', 'Pendulum-v0'])\n", (678, 720), False, 'import pytest\n'), ((165, 176), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (173, 176), True, 'import numpy as np\n'), ((184, 194), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (191, 194), True, 'import numpy as np\n')] |
from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse
import numpy as np
from pandapower.plotting.plotting_toolbox import _rotate_dim2, get_color_list, get_angle_list, \
get_linewidth_list
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def node_patches(node_coords, size, patch_type, colors=None, **kwargs):
"""
Creates node patches from coordinates translating the patch type into patches.
:param node_coords: coordinates of the nodes to draw
:type node_coords: iterable
:param size: size of the patch (can be interpreted differently, depending on the patch type)
:type size: float
:param patch_type: type of patches to create - can be one of
- "circle" or "ellipse" for an ellipse (cirlces are just ellipses with the same width \
+ height)\
- "rect" or "rectangle" for a rectangle\
- "poly<n>" for a polygon with n edges
:type patch_type: str
:param colors: colors or color of the patches
:type colors: iterable, float
:param kwargs: additional keyword arguments to pass to the patch initialization \
(might contain "width", "height", "angle" depending on the patch type)
:type kwargs: dict
:return: patches - list of rectangle patches for the nodes
"""
if patch_type.lower() == 'ellipse' or patch_type.lower() == 'circle':
# circles are just ellipses
if patch_type.lower() == "circle" and len(set(kwargs.keys()) & {"width", "height"}) == 1:
wh = kwargs["width"] if "width" in kwargs else kwargs["height"]
width = wh
height = wh
else:
width = kwargs.pop("width", 2 * size)
height = kwargs.pop("height", 2 * size)
angle = kwargs.pop('angle', 0)
return ellipse_patches(node_coords, width, height, angle, color=colors, **kwargs)
elif patch_type.lower() == "rect" or patch_type.lower() == "rectangle":
width = kwargs.pop("width", 2 * size)
height = kwargs.pop("height", 2 * size)
return rectangle_patches(node_coords, width, height, color=colors, **kwargs)
elif patch_type.lower().startswith("poly"):
edges = int(patch_type[4:])
return polygon_patches(node_coords, size, edges, color=colors, **kwargs)
else:
logger.error("Wrong patchtype. Please choose a correct patch type.")
raise ValueError("Wrong patchtype")
def ellipse_patches(node_coords, width, height, angle=0, color=None, **kwargs):
"""
Function to create a list of ellipse patches from node coordinates.
:param node_coords: coordinates of the nodes to draw
:type node_coords: iterable
:param width: width of the ellipse (described by an exterior rectangle)
:type width: float
:param height: height of the ellipse (described by an exterior rectangle)
:type height: float
:param angle: angle by which to rotate the ellipse
:type angle: float
:param color: color or colors of the patches
:type color: iterable, float
:param kwargs: additional keyword arguments to pass to the Ellipse initialization
:type kwargs: dict
:return: patches - list of ellipse patches for the nodes
"""
patches = list()
angles = get_angle_list(angle, len(node_coords))
if color is not None:
colors = get_color_list(color, len(node_coords))
for (x, y), col, ang in zip(node_coords, colors, angles):
patches.append(Ellipse((x, y), width, height, angle=ang, color=col, **kwargs))
else:
for (x, y), ang in zip(node_coords, angles):
patches.append(Ellipse((x, y), width, height, angle=ang, **kwargs))
return patches
def rectangle_patches(node_coords, width, height, color=None, **kwargs):
"""
Function to create a list of rectangle patches from node coordinates.
:param node_coords: coordinates of the nodes to draw
:type node_coords: iterable
:param width: width of the rectangle
:type width: float
:param height: height of the rectangle
:type height: float
:param color: color or colors of the patches
:type color: iterable, float
:param kwargs: additional keyword arguments to pass to the Rectangle initialization
:type kwargs: dict
:return: patches - list of rectangle patches for the nodes
"""
patches = list()
if color is not None:
colors = get_color_list(color, len(node_coords))
for (x, y), col in zip(node_coords, colors):
patches.append(Rectangle((x - width / 2, y - height / 2), width, height, color=color,
**kwargs))
else:
for x, y in node_coords:
patches.append(Rectangle((x - width / 2, y - height / 2), width, height, **kwargs))
return patches
def polygon_patches(node_coords, radius, num_edges, color=None, **kwargs):
"""
Function to create a list of polygon patches from node coordinates. The number of edges for the
polygon can be defined.
:param node_coords: coordinates of the nodes to draw
:type node_coords: iterable
:param radius: radius for the polygon (from centroid to edges)
:type radius: float
:param num_edges: number of edges of the polygon
:type num_edges: int
:param color: color or colors of the patches
:type color: iterable, float
:param kwargs: additional keyword arguments to pass to the Polygon initialization
:type kwargs: dict
:return: patches - list of rectangle patches for the nodes
"""
patches = list()
if color is not None:
colors = get_color_list(color, len(node_coords))
for (x, y), col in zip(node_coords, colors):
patches.append(RegularPolygon([x, y], numVertices=num_edges, radius=radius, color=color,
**kwargs))
else:
for x, y in node_coords:
patches.append(RegularPolygon([x, y], numVertices=num_edges, radius=radius, **kwargs))
return patches
def load_patches(node_coords, size, angles, **kwargs):
"""
Creation function of patches for loads.
:param node_coords: coordinates of the nodes that the loads belong to.
:type node_coords: iterable
:param size: size of the patch
:type size: float
:param angles: angles by which to rotate the patches (in radians)
:type angles: iterable(float), float
:param kwargs: additional keyword arguments (might contain parameters "offset",\
"patch_edgecolor" and "patch_facecolor")
:type kwargs:
:return: Return values are: \
- lines (list) - list of coordinates for lines leading to load patches\
- polys (list of RegularPolygon) - list containing the load patches\
- keywords (set) - set of keywords removed from kwargs
"""
offset = kwargs.get("offset", 1.2 * size)
all_angles = get_angle_list(angles, len(node_coords))
edgecolor = kwargs.get("patch_edgecolor", "w")
facecolor = kwargs.get("patch_facecolor", "w")
edgecolors = get_color_list(edgecolor, len(node_coords))
facecolors = get_color_list(facecolor, len(node_coords))
polys, lines = list(), list()
for i, node_geo in enumerate(node_coords):
p2 = node_geo + _rotate_dim2(np.array([0, offset + size]), all_angles[i])
p3 = node_geo + _rotate_dim2(np.array([0, offset + size / 2]), all_angles[i])
polys.append(RegularPolygon(p2, numVertices=3, radius=size, orientation=-all_angles[i],
fc=facecolors[i], ec=edgecolors[i]))
lines.append((node_geo, p3))
return lines, polys, {"offset", "patch_edgecolor", "patch_facecolor"}
def gen_patches(node_coords, size, angles, **kwargs):
"""
Creation function of patches for generators.
:param node_coords: coordinates of the nodes that the generators belong to.
:type node_coords: iterable
:param size: size of the patch
:type size: float
:param angles: angles by which to rotate the patches (in radians)
:type angles: iterable(float), float
:param kwargs: additional keyword arguments (might contain parameters "offset",\
"patch_edgecolor" and "patch_facecolor")
:type kwargs:
:return: Return values are: \
- lines (list) - list of coordinates for lines leading to generator patches\
- polys (list of RegularPolygon) - list containing the generator patches\
- keywords (set) - set of keywords removed from kwargs
"""
polys, lines = list(), list()
offset = kwargs.get("offset", 2. * size)
all_angles = get_angle_list(angles, len(node_coords))
edgecolor = kwargs.get("patch_edgecolor", "k")
facecolor = kwargs.get("patch_facecolor", (1, 0, 0, 0))
edgecolors = get_color_list(edgecolor, len(node_coords))
facecolors = get_color_list(facecolor, len(node_coords))
for i, node_geo in enumerate(node_coords):
p2 = node_geo + _rotate_dim2(np.array([0, size + offset]), all_angles[i])
polys.append(Circle(p2, size, fc=facecolors[i], ec=edgecolors[i]))
polys.append(
Arc(p2 + np.array([-size / 6.2, -size / 2.6]), size / 2, size, theta1=65, theta2=120,
ec=edgecolors[i]))
polys.append(
Arc(p2 + np.array([size / 6.2, size / 2.6]), size / 2, size, theta1=245, theta2=300,
ec=edgecolors[i]))
lines.append((node_geo, p2 + _rotate_dim2(np.array([0, size]), -all_angles[i])))
return lines, polys, {"offset", "patch_edgecolor", "patch_facecolor"}
def sgen_patches(node_coords, size, angles, **kwargs):
"""
Creation function of patches for static generators.
:param node_coords: coordinates of the nodes that the static generators belong to.
:type node_coords: iterable
:param size: size of the patch
:type size: float
:param angles: angles by which to rotate the patches (in radians)
:type angles: iterable(float), float
:param kwargs: additional keyword arguments (might contain parameters "offset", "r_triangle",\
"patch_edgecolor" and "patch_facecolor")
:type kwargs:
:return: Return values are: \
- lines (list) - list of coordinates for lines leading to static generator patches\
- polys (list of RegularPolygon) - list containing the static generator patches\
- keywords (set) - set of keywords removed from kwargs
"""
polys, lines = list(), list()
offset = kwargs.get("offset", 2 * size)
r_triangle = kwargs.get("r_triangles", size * 0.4)
edgecolor = kwargs.get("patch_edgecolor", "w")
facecolor = kwargs.get("patch_facecolor", "w")
edgecolors = get_color_list(edgecolor, len(node_coords))
facecolors = get_color_list(facecolor, len(node_coords))
for i, node_geo in enumerate(node_coords):
mid_circ = node_geo + _rotate_dim2(np.array([0, offset + size]), angles[i])
circ_edge = node_geo + _rotate_dim2(np.array([0, offset]), angles[i])
mid_tri1 = mid_circ + _rotate_dim2(np.array([r_triangle, -r_triangle / 4]), angles[i])
mid_tri2 = mid_circ + _rotate_dim2(np.array([-r_triangle, r_triangle / 4]), angles[i])
# dropped perpendicular foot of triangle1
perp_foot1 = mid_tri1 + _rotate_dim2(np.array([0, -r_triangle / 2]), angles[i])
line_end1 = perp_foot1 + + _rotate_dim2(np.array([-2.5 * r_triangle, 0]), angles[i])
perp_foot2 = mid_tri2 + _rotate_dim2(np.array([0, r_triangle / 2]), angles[i])
line_end2 = perp_foot2 + + _rotate_dim2(np.array([2.5 * r_triangle, 0]), angles[i])
polys.append(Circle(mid_circ, size, fc=facecolors[i], ec=edgecolors[i]))
polys.append(RegularPolygon(mid_tri1, numVertices=3, radius=r_triangle,
orientation=-angles[i], fc=facecolors[i], ec=edgecolors[i]))
polys.append(RegularPolygon(mid_tri2, numVertices=3, radius=r_triangle,
orientation=np.pi - angles[i], fc=facecolors[i],
ec=edgecolors[i]))
lines.append((node_geo, circ_edge))
lines.append((perp_foot1, line_end1))
lines.append((perp_foot2, line_end2))
return lines, polys, {"offset", "r_triangle", "patch_edgecolor", "patch_facecolor"}
def storage_patches(node_coords, size, angles, **kwargs):
"""
Creation function of patches for storage systems.
:param node_coords: coordinates of the nodes that the storage system belong to.
:type node_coords: iterable
:param size: size of the patch
:type size: float
:param angles: angles by which to rotate the patches (in radians)
:type angles: iterable(float), float
:param kwargs: additional keyword arguments (might contain parameters "offset", "r_triangle",\
"patch_edgecolor" and "patch_facecolor")
:type kwargs:
:return: Return values are: \
- lines (list) - list of coordinates for lines leading to storage patches\
- polys (list of RegularPolygon) - list containing the storage patches\
- keywords (set) - set of keywords removed from kwargs
"""
polys, lines = list(), list()
offset = kwargs.get("offset", 1 * size)
r_triangle = kwargs.get("r_triangles", size * 0.4)
for i, node_geo in enumerate(node_coords):
mid_circ = node_geo + _rotate_dim2(np.array([0, offset + r_triangle * 2.]), angles[i])
circ_edge = node_geo + _rotate_dim2(np.array([0, offset]), angles[i])
mid_tri1 = mid_circ + _rotate_dim2(np.array([-r_triangle, -r_triangle]), angles[i])
# dropped perpendicular foot of triangle1
perp_foot1 = mid_tri1 + _rotate_dim2(np.array([r_triangle * 0.5, -r_triangle/4]), angles[i])
line_end1 = perp_foot1 + _rotate_dim2(np.array([1 * r_triangle, 0]), angles[i])
perp_foot2 = mid_tri1 + _rotate_dim2(np.array([0, -r_triangle]), angles[i])
line_end2 = perp_foot2 + _rotate_dim2(np.array([2. * r_triangle, 0]), angles[i])
lines.append((node_geo, circ_edge))
lines.append((perp_foot1, line_end1))
lines.append((perp_foot2, line_end2))
return lines, polys, {"offset", "r_triangle", "patch_edgecolor", "patch_facecolor"}
def ext_grid_patches(node_coords, size, angles, **kwargs):
"""
Creation function of patches for external grids.
:param node_coords: coordinates of the nodes that the external grids belong to.
:type node_coords: iterable
:param size: size of the patch
:type size: float
:param angles: angles by which to rotate the patches (in radians)
:type angles: iterable(float), float
:param kwargs: additional keyword arguments (might contain parameters "offset",\
"patch_edgecolor" and "patch_facecolor")
:type kwargs:
:return: Return values are: \
- lines (list) - list of coordinates for lines leading to external grid patches\
- polys (list of RegularPolygon) - list containing the external grid patches\
- keywords (set) - set of keywords removed from kwargs (empty
"""
offset = kwargs.get("offset", 2 * size)
all_angles = get_angle_list(angles, len(node_coords))
edgecolor = kwargs.get("patch_edgecolor", "w")
facecolor = kwargs.get("patch_facecolor", "w")
edgecolors = get_color_list(edgecolor, len(node_coords))
facecolors = get_color_list(facecolor, len(node_coords))
polys, lines = list(), list()
for i, node_geo in enumerate(node_coords):
p2 = node_geo + _rotate_dim2(np.array([0, offset]), all_angles[i])
p_ll = p2 + _rotate_dim2(np.array([-size, 0]), all_angles[i])
polys.append(Rectangle(p_ll, 2 * size, 2 * size, angle=(-all_angles[i] / np.pi * 180),
fc=facecolors[i], ec=edgecolors[i], hatch="XXX"))
lines.append((node_geo, p2))
return lines, polys, {"offset", "patch_edgecolor", "patch_facecolor"}
def trafo_patches(coords, size, **kwargs):
"""
Creates a list of patches and line coordinates representing transformers each connecting two
nodes.
:param coords: list of connecting node coordinates (usually should be \
`[((x11, y11), (x12, y12)), ((x21, y21), (x22, y22)), ...]`)
:type coords: (N, (2, 2)) shaped iterable
:param size: size of the trafo patches
:type size: float
:param kwargs: additional keyword arguments (might contain parameters "patch_edgecolor" and\
"patch_facecolor")
:type kwargs:
:return: Return values are: \
- lines (list) - list of coordinates for lines connecting nodes and transformer patches\
- circles (list of Circle) - list containing the transformer patches (rings)
"""
edgecolor = kwargs.get("patch_edgecolor", "w")
facecolor = kwargs.get("patch_facecolor", (1, 0, 0, 0))
edgecolors = get_color_list(edgecolor, len(coords))
facecolors = get_color_list(facecolor, len(coords))
linewidths = kwargs.get("linewidths", 2.)
linewidths = get_linewidth_list(linewidths, len(coords), name_entries="trafos")
circles, lines = list(), list()
for i, (p1, p2) in enumerate(coords):
p1 = np.array(p1)
p2 = np.array(p2)
if np.all(p1 == p2):
continue
d = np.sqrt(np.sum((p1 - p2) ** 2))
if size is None:
size_this = np.sqrt(d) / 5
else:
size_this = size
off = size_this * 0.35
circ1 = (0.5 - off / d) * (p1 - p2) + p2
circ2 = (0.5 + off / d) * (p1 - p2) + p2
circles.append(Circle(circ1, size_this, fc=facecolors[i], ec=edgecolors[i],
lw=linewidths[i]))
circles.append(Circle(circ2, size_this, fc=facecolors[i], ec=edgecolors[i],
lw=linewidths[i]))
lp1 = (0.5 - off / d - size_this / d) * (p2 - p1) + p1
lp2 = (0.5 - off / d - size_this / d) * (p1 - p2) + p2
lines.append([p1, lp1])
lines.append([p2, lp2])
return lines, circles, {"patch_edgecolor", "patch_facecolor"}
| [
"matplotlib.patches.RegularPolygon",
"numpy.sum",
"matplotlib.patches.Rectangle",
"logging.getLogger",
"matplotlib.patches.Circle",
"numpy.array",
"matplotlib.patches.Ellipse",
"numpy.all",
"numpy.sqrt"
] | [((301, 328), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (318, 328), False, 'import logging\n'), ((17143, 17155), 'numpy.array', 'np.array', (['p1'], {}), '(p1)\n', (17151, 17155), True, 'import numpy as np\n'), ((17169, 17181), 'numpy.array', 'np.array', (['p2'], {}), '(p2)\n', (17177, 17181), True, 'import numpy as np\n'), ((17193, 17209), 'numpy.all', 'np.all', (['(p1 == p2)'], {}), '(p1 == p2)\n', (17199, 17209), True, 'import numpy as np\n'), ((7453, 7567), 'matplotlib.patches.RegularPolygon', 'RegularPolygon', (['p2'], {'numVertices': '(3)', 'radius': 'size', 'orientation': '(-all_angles[i])', 'fc': 'facecolors[i]', 'ec': 'edgecolors[i]'}), '(p2, numVertices=3, radius=size, orientation=-all_angles[i],\n fc=facecolors[i], ec=edgecolors[i])\n', (7467, 7567), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((9050, 9102), 'matplotlib.patches.Circle', 'Circle', (['p2', 'size'], {'fc': 'facecolors[i]', 'ec': 'edgecolors[i]'}), '(p2, size, fc=facecolors[i], ec=edgecolors[i])\n', (9056, 9102), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((11624, 11682), 'matplotlib.patches.Circle', 'Circle', (['mid_circ', 'size'], {'fc': 'facecolors[i]', 'ec': 'edgecolors[i]'}), '(mid_circ, size, fc=facecolors[i], ec=edgecolors[i])\n', (11630, 11682), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((11705, 11828), 'matplotlib.patches.RegularPolygon', 'RegularPolygon', (['mid_tri1'], {'numVertices': '(3)', 'radius': 'r_triangle', 'orientation': '(-angles[i])', 'fc': 'facecolors[i]', 'ec': 'edgecolors[i]'}), '(mid_tri1, numVertices=3, radius=r_triangle, orientation=-\n angles[i], fc=facecolors[i], ec=edgecolors[i])\n', (11719, 11828), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((11882, 12012), 'matplotlib.patches.RegularPolygon', 'RegularPolygon', (['mid_tri2'], {'numVertices': '(3)', 'radius': 'r_triangle', 'orientation': '(np.pi - angles[i])', 'fc': 'facecolors[i]', 'ec': 'edgecolors[i]'}), '(mid_tri2, numVertices=3, radius=r_triangle, orientation=np.\n pi - angles[i], fc=facecolors[i], ec=edgecolors[i])\n', (11896, 12012), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((15649, 15774), 'matplotlib.patches.Rectangle', 'Rectangle', (['p_ll', '(2 * size)', '(2 * size)'], {'angle': '(-all_angles[i] / np.pi * 180)', 'fc': 'facecolors[i]', 'ec': 'edgecolors[i]', 'hatch': '"""XXX"""'}), "(p_ll, 2 * size, 2 * size, angle=-all_angles[i] / np.pi * 180, fc=\n facecolors[i], ec=edgecolors[i], hatch='XXX')\n", (15658, 15774), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((17252, 17274), 'numpy.sum', 'np.sum', (['((p1 - p2) ** 2)'], {}), '((p1 - p2) ** 2)\n', (17258, 17274), True, 'import numpy as np\n'), ((17535, 17613), 'matplotlib.patches.Circle', 'Circle', (['circ1', 'size_this'], {'fc': 'facecolors[i]', 'ec': 'edgecolors[i]', 'lw': 'linewidths[i]'}), '(circ1, size_this, fc=facecolors[i], ec=edgecolors[i], lw=linewidths[i])\n', (17541, 17613), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((17668, 17746), 'matplotlib.patches.Circle', 'Circle', (['circ2', 'size_this'], {'fc': 'facecolors[i]', 'ec': 'edgecolors[i]', 'lw': 'linewidths[i]'}), '(circ2, size_this, fc=facecolors[i], ec=edgecolors[i], lw=linewidths[i])\n', (17674, 17746), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((3521, 3583), 'matplotlib.patches.Ellipse', 'Ellipse', (['(x, y)', 'width', 'height'], {'angle': 'ang', 'color': 'col'}), '((x, y), width, height, angle=ang, color=col, **kwargs)\n', (3528, 3583), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((3675, 3726), 'matplotlib.patches.Ellipse', 'Ellipse', (['(x, y)', 'width', 'height'], {'angle': 'ang'}), '((x, y), width, height, angle=ang, **kwargs)\n', (3682, 3726), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((4573, 4658), 'matplotlib.patches.Rectangle', 'Rectangle', (['(x - width / 2, y - height / 2)', 'width', 'height'], {'color': 'color'}), '((x - width / 2, y - height / 2), width, height, color=color, **kwargs\n )\n', (4582, 4658), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((4762, 4829), 'matplotlib.patches.Rectangle', 'Rectangle', (['(x - width / 2, y - height / 2)', 'width', 'height'], {}), '((x - width / 2, y - height / 2), width, height, **kwargs)\n', (4771, 4829), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((5768, 5855), 'matplotlib.patches.RegularPolygon', 'RegularPolygon', (['[x, y]'], {'numVertices': 'num_edges', 'radius': 'radius', 'color': 'color'}), '([x, y], numVertices=num_edges, radius=radius, color=color,\n **kwargs)\n', (5782, 5855), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((5965, 6035), 'matplotlib.patches.RegularPolygon', 'RegularPolygon', (['[x, y]'], {'numVertices': 'num_edges', 'radius': 'radius'}), '([x, y], numVertices=num_edges, radius=radius, **kwargs)\n', (5979, 6035), False, 'from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse\n'), ((7301, 7329), 'numpy.array', 'np.array', (['[0, offset + size]'], {}), '([0, offset + size])\n', (7309, 7329), True, 'import numpy as np\n'), ((7383, 7415), 'numpy.array', 'np.array', (['[0, offset + size / 2]'], {}), '([0, offset + size / 2])\n', (7391, 7415), True, 'import numpy as np\n'), ((8984, 9012), 'numpy.array', 'np.array', (['[0, size + offset]'], {}), '([0, size + offset])\n', (8992, 9012), True, 'import numpy as np\n'), ((10884, 10912), 'numpy.array', 'np.array', (['[0, offset + size]'], {}), '([0, offset + size])\n', (10892, 10912), True, 'import numpy as np\n'), ((10969, 10990), 'numpy.array', 'np.array', (['[0, offset]'], {}), '([0, offset])\n', (10977, 10990), True, 'import numpy as np\n'), ((11046, 11085), 'numpy.array', 'np.array', (['[r_triangle, -r_triangle / 4]'], {}), '([r_triangle, -r_triangle / 4])\n', (11054, 11085), True, 'import numpy as np\n'), ((11141, 11180), 'numpy.array', 'np.array', (['[-r_triangle, r_triangle / 4]'], {}), '([-r_triangle, r_triangle / 4])\n', (11149, 11180), True, 'import numpy as np\n'), ((11288, 11318), 'numpy.array', 'np.array', (['[0, -r_triangle / 2]'], {}), '([0, -r_triangle / 2])\n', (11296, 11318), True, 'import numpy as np\n'), ((11469, 11498), 'numpy.array', 'np.array', (['[0, r_triangle / 2]'], {}), '([0, r_triangle / 2])\n', (11477, 11498), True, 'import numpy as np\n'), ((13369, 13409), 'numpy.array', 'np.array', (['[0, offset + r_triangle * 2.0]'], {}), '([0, offset + r_triangle * 2.0])\n', (13377, 13409), True, 'import numpy as np\n'), ((13465, 13486), 'numpy.array', 'np.array', (['[0, offset]'], {}), '([0, offset])\n', (13473, 13486), True, 'import numpy as np\n'), ((13542, 13578), 'numpy.array', 'np.array', (['[-r_triangle, -r_triangle]'], {}), '([-r_triangle, -r_triangle])\n', (13550, 13578), True, 'import numpy as np\n'), ((13687, 13732), 'numpy.array', 'np.array', (['[r_triangle * 0.5, -r_triangle / 4]'], {}), '([r_triangle * 0.5, -r_triangle / 4])\n', (13695, 13732), True, 'import numpy as np\n'), ((13789, 13818), 'numpy.array', 'np.array', (['[1 * r_triangle, 0]'], {}), '([1 * r_triangle, 0])\n', (13797, 13818), True, 'import numpy as np\n'), ((13877, 13903), 'numpy.array', 'np.array', (['[0, -r_triangle]'], {}), '([0, -r_triangle])\n', (13885, 13903), True, 'import numpy as np\n'), ((13962, 13993), 'numpy.array', 'np.array', (['[2.0 * r_triangle, 0]'], {}), '([2.0 * r_triangle, 0])\n', (13970, 13993), True, 'import numpy as np\n'), ((15520, 15541), 'numpy.array', 'np.array', (['[0, offset]'], {}), '([0, offset])\n', (15528, 15541), True, 'import numpy as np\n'), ((15591, 15611), 'numpy.array', 'np.array', (['[-size, 0]'], {}), '([-size, 0])\n', (15599, 15611), True, 'import numpy as np\n'), ((17325, 17335), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (17332, 17335), True, 'import numpy as np\n'), ((9147, 9183), 'numpy.array', 'np.array', (['[-size / 6.2, -size / 2.6]'], {}), '([-size / 6.2, -size / 2.6])\n', (9155, 9183), True, 'import numpy as np\n'), ((9302, 9336), 'numpy.array', 'np.array', (['[size / 6.2, size / 2.6]'], {}), '([size / 6.2, size / 2.6])\n', (9310, 9336), True, 'import numpy as np\n'), ((11379, 11411), 'numpy.array', 'np.array', (['[-2.5 * r_triangle, 0]'], {}), '([-2.5 * r_triangle, 0])\n', (11387, 11411), True, 'import numpy as np\n'), ((11559, 11590), 'numpy.array', 'np.array', (['[2.5 * r_triangle, 0]'], {}), '([2.5 * r_triangle, 0])\n', (11567, 11590), True, 'import numpy as np\n'), ((9463, 9482), 'numpy.array', 'np.array', (['[0, size]'], {}), '([0, size])\n', (9471, 9482), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
from datetime import datetime as dt
from datetime import timedelta
import glob
from scipy.stats import gamma
import sys
sys.path.insert(0,'model') # I hate this too but it allows everything to use the same helper functions.
from helper_functions import read_in_NNDSS
#Code taken from read_in_cases from Reff_functions. Preprocessing was not helpful for this situation.
def read_cases_lambda(case_file_date):
"""
Read in NNDSS data
"""
df_NNDSS = read_in_NNDSS(case_file_date)
df_interim = df_NNDSS[['date_inferred','STATE','imported','local']]
return(df_interim)
def tidy_cases_lambda(interim_data, remove_territories=True):
#Remove non-existent notification dates
interim_data = interim_data[~np.isnat(interim_data.date_inferred)]
#Filter out territories
if(remove_territories):
df_linel = interim_data[(interim_data['STATE']!='NT') & (interim_data['STATE']!='ACT')]
#Melt down so that imported and local are no longer columns. Allows multiple draws for infection date.
#i.e. create linelist data
df_linel = df_linel.melt(id_vars = ['date_inferred','STATE'], var_name = 'SOURCE',value_name='n_cases')
#Reset index or the joining doesn't work
df_linel = df_linel[df_linel.n_cases!=0]
df_linel = df_linel.reset_index(drop=True)
return(df_linel)
##gamma draws take arguments (shape, scale)
def draw_inf_dates(df_linelist, shape_rd=2.77, scale_rd=3.17, offset_rd=0,
shape_inc=5.807, scale_inc=0.948, offset_inc=1,nreplicates=1):
notification_dates = df_linelist['date_inferred']
nsamples = notification_dates.shape[0]
# DEFINE DELAY DISTRIBUTION
# mean_rd = 5.47
# sd_rd = 4.04
#scale_rd = shape_rd/(scale_rd)**2
#shape_rd = shape_rd/scale_rd
# DEFINE INCUBATION PERIOD DISTRIBUTION
# Taken from Lauer et al 2020
# mean_inc = 5.5 days
# sd_inc = 1.52
#scale_inc = (scale_inc)**2/shape_inc #scale**2 = var / shape
#shape_inc =(scale_inc)**2/scale_inc**2
#Draw from distributions - these are long vectors
inc_period = offset_inc+np.random.gamma(shape_inc, scale_inc, size = (nsamples*nreplicates))
rep_delay = offset_rd+np.random.gamma(shape_rd, scale_rd, size = (nsamples*nreplicates))
#infection date is id_nd_diff days before notification date. This is also a long vector.
id_nd_diff = inc_period + rep_delay
#Minutes aren't included in df. Take the ceiling because the day runs from 0000 to 2359. This can still be a long vector.
whole_day_diff = np.ceil(id_nd_diff)
time_day_diffmat = whole_day_diff.astype('timedelta64[D]').reshape((nsamples, nreplicates))
#Vector must be coerced into a nsamples by nreplicates array. Then each column must be subtracted from notification_dates.
#Subtract days off of notification dates.
notification_mat = np.tile(notification_dates, (nreplicates,1)).T #notification_dates is repeated as a column nreplicates times.
infection_dates = notification_mat - time_day_diffmat
#Make infection dates into a dataframe
datecolnames = [*map(str,range(nreplicates))]
infdates_df = pd.DataFrame(infection_dates,columns = datecolnames)
#Uncomment this if theres errors
#print([df_linelist.shape, infdates_df.shape])
#Combine infection dates and original dataframe
df_inf = pd.concat([df_linelist, infdates_df], axis=1, verify_integrity=True)
return(df_inf)
def index_by_infection_date(infections_wide):
datecolnames = [*infections_wide.columns[4:]]
df_combined = infections_wide[['STATE','SOURCE',datecolnames[0],'n_cases']].groupby(['STATE', datecolnames[0],'SOURCE']).sum()
#For each column (cn=column number): concatenate each sample as a column.
for cn in range(1,len(datecolnames)):
df_addin = infections_wide[['STATE','SOURCE',datecolnames[cn],'n_cases']].groupby(['STATE', datecolnames[cn],'SOURCE']).sum()
df_combined = pd.concat([df_combined,df_addin], axis=1, ignore_index = True)
#NaNs are inserted for missing values when concatenating. If it's missing, there were zero infections
df_combined[np.isnan(df_combined)]=0
#Rename the index.
df_combined.index.set_names(["STATE","INFECTION_DATE","SOURCE"], inplace=True)
#return(df_combined)
##INCLUDE ALL DAYS WITH ZERO INFECTIONS IN THE INDEX AS WELL.
# Reindex to include days with zero total infections.
local_infs = df_combined.xs('local',level='SOURCE')
imported_infs = df_combined.xs('imported',level='SOURCE')
statelist = [*df_combined.index.get_level_values('STATE').unique()]
#Should all states have the same start date? Current code starts from the first case in each state.
#For the same start date:
local_statedict = dict(zip(statelist, np.repeat(None, len(statelist))))
imported_statedict = dict(zip(statelist, np.repeat(None, len(statelist))))
#Determine start date as the first infection date for all.
#start_date = np.datetime64("2020-02-01")
start_date = df_combined.index.get_level_values('INFECTION_DATE').min()
#Determine end dates as the last infected date by state.
index_only = df_combined.index.to_frame()
index_only = index_only.reset_index(drop=True)
maxdates = index_only['INFECTION_DATE'].max()
for aus_state in statelist:
state_data = local_infs.xs(aus_state, level='STATE')
#start_date = state_data.index.min()
#dftest.index=dftest.reindex(alldates, fill_value=0)
alldates = pd.date_range(start_date, maxdates) #All days from start_date to the last infection day.
local_statedict[aus_state] = state_data.reindex(alldates, fill_value=0)
for aus_state in statelist:
state_data = imported_infs.xs(aus_state, level='STATE')
alldates = pd.date_range(start_date, maxdates)
imported_statedict[aus_state] = state_data.reindex(alldates, fill_value=0)
#Convert dictionaries to data frames
df_local_inc_zeros = pd.concat(local_statedict)
df_local_inc_zeros['SOURCE']='local'
df_imp_inc_zeros = pd.concat(imported_statedict)
df_imp_inc_zeros['SOURCE']='imported'
#Merge dataframes and reindex.
df_inc_zeros = pd.concat([df_local_inc_zeros, df_imp_inc_zeros])
df_inc_zeros = df_inc_zeros.reset_index()
df_inc_zeros= df_inc_zeros.groupby(['level_0',"level_1","SOURCE"]).sum()
df_inc_zeros.index = df_inc_zeros.index.rename(['STATE','INFECTION_DATE',"SOURCE"])
return(df_inc_zeros)
def generate_lambda(infection_dates, shape_gen=3.64/3.07, scale_gen=3.07,
trunc_days=21,shift=0, offset=1):
"""
Given array of infection_dates (N_dates by N_samples), where values are possible
number of cases infected on this day, generate the force of infection Lambda_t,
a N_dates-tau by N_samples array.
Default generation interval parameters taken from Ganyani et al 2020.
"""
from scipy.stats import gamma
#scale_gen = mean_gen/(sd_gen)**2
#shape_gen = mean_gen/scale_gen
xmids = [x+shift for x in range(trunc_days+1)] #Find midpoints for discretisation
gamma_vals = gamma.pdf(xmids, a=shape_gen, scale=scale_gen) #double check parameterisation of scipy
#renormalise the pdf
disc_gamma = gamma_vals/sum(gamma_vals)
ws = disc_gamma[:trunc_days]
#offset
ws[offset:] = disc_gamma[:trunc_days-offset]
ws[:offset] = 0
lambda_t = np.zeros(shape=(infection_dates.shape[0]-trunc_days+1, infection_dates.shape[1]))
for n in range(infection_dates.shape[1]):
lambda_t[:,n] = np.convolve(infection_dates[:,n], ws, mode='valid')
return lambda_t
def lambda_all_states(df_infection, **kwargs):
"""
Use geenrate lambda on every state
"""
statelist = [*df_infection.index.get_level_values('STATE').unique()]
lambda_dict ={}
for state in statelist:
df_total_infections = df_infection.groupby(['STATE','INFECTION_DATE']).agg(sum)
lambda_dict[state] = generate_lambda(
df_total_infections.loc[state].values,
**kwargs
)
return lambda_dict
def Reff_from_case(cases_by_infection, lamb, prior_a=1, prior_b=5, tau=7, samples=1000):
"""
Using Cori at al. 2013, given case incidence by date of infection, and the force
of infection \Lambda_t on day t, estimate the effective reproduction number at time
t with smoothing parameter \tau.
cases_by_infection: A T by N array, for T days and N samples
lamb : A T by N array, for T days and N samples
"""
csum_incidence = np.cumsum(cases_by_infection, axis = 0)
#remove first few incidences to align with size of lambda
# Generation interval length 20
csum_incidence = csum_incidence[20:,:]
csum_lambda = np.cumsum(lamb, axis =0)
roll_sum_incidence = csum_incidence[tau:, :] - csum_incidence[:-tau, :]
roll_sum_lambda = csum_lambda[tau:,:] - csum_lambda[:-tau,:]
a = prior_a + roll_sum_incidence
b = 1/(1/prior_b + roll_sum_lambda)
R = np.random.gamma(a,b) #shape, scale
#Need to empty R when there is too few cases...
#Use array inputs to output to same size
#inputs are T-tau by N, output will be T-tau by N
#
return a,b, R
def generate_summary(samples, dates_by='rows'):
"""
Given an array of samples (T by N) where rows index the dates,
generate summary statistics and quantiles
"""
if dates_by=='rows':
#quantiles of the columns
ax = 1
else:
#quantiles of the rows
ax = 0
mean = np.mean(samples, axis = ax)
bottom, lower, median, upper, top = np.quantile(samples,
(0.05, 0.25, 0.5, 0.75, 0.95),
axis =ax)
std = np.std(samples, axis = ax)
output = {
'mean':mean,
'std':std,
'bottom':bottom,
'lower':lower,
'median':median,
'upper':upper,
'top': top,
}
return output
def plot_Reff(Reff:dict, dates=None, ax_arg=None, truncate=None, **kwargs):
"""
Given summary statistics of Reff as a dictionary, plot the distribution over time
"""
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
from datetime import datetime as dt
if ax_arg is None:
fig, ax = plt.subplots(figsize=(12,9))
else:
fig, ax = ax_arg
color_cycle = ax._get_lines.prop_cycler
curr_color = next(color_cycle)['color']
if dates is None:
dates = range(len(Reff['mean']))
if truncate is None:
ax.plot(dates, Reff['mean'], color= curr_color, **kwargs)
ax.fill_between(dates, Reff['lower'],Reff['upper'], alpha=0.4, color = curr_color)
ax.fill_between(dates, Reff['bottom'],Reff['top'], alpha=0.4, color= curr_color)
else:
ax.plot(dates[truncate[0]:truncate[1]], Reff['mean'][truncate[0]:truncate[1]], color= curr_color, **kwargs)
ax.fill_between(dates[truncate[0]:truncate[1]], Reff['lower'][truncate[0]:truncate[1]],
Reff['upper'][truncate[0]:truncate[1]],
alpha=0.4, color = curr_color)
ax.fill_between(dates[truncate[0]:truncate[1]], Reff['bottom'][truncate[0]:truncate[1]],
Reff['top'][truncate[0]:truncate[1]],
alpha=0.4, color= curr_color)
#plt.legend()
#grid line at R_eff =1
ax.set_yticks([1],minor=True,)
ax.set_yticks([0,2,3],minor=False)
ax.set_yticklabels([0,2,3],minor=False)
ax.yaxis.grid(which='minor',linestyle='--',color='black',linewidth=2)
ax.tick_params(axis='x', rotation = 90)
return fig, ax
def plot_all_states(R_summ_states,df_interim, dates,
start='2020-03-01',end='2020-08-01',save=True, date =None, tau = 7,
nowcast_truncation=-10):
"""
Plot results over time for all jurisdictions.
dates: dictionary of (region, date) pairs where date holds the relevant
dates for plotting cases by inferred symptom-onset
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
states = df_interim.STATE.unique().tolist()
states.remove('NT')
states.remove('ACT')
date_filter = pd.date_range(start=start,end=end)
#prepare NNDSS cases
df_cases = df_interim.groupby(['date_inferred','STATE']).agg(sum)
df_cases = df_cases.reset_index()
fig, ax = plt.subplots(nrows=2, ncols=3,
sharex=True, sharey=True,
figsize=(15,12)
)
for i,state in enumerate(states):
row = i//3
col = i%3
R_summary = R_summ_states[state]
#a,b,R = Reff_from_case(df_state_I.values,lambda_state,prior_a=1, prior_b=2, tau=tau)
#R_summary = generate_summary(R)
fig, ax[row,col] = plot_Reff(R_summary,
dates=dates[state],
ax_arg=(fig, ax[row,col]),
truncate=(0,nowcast_truncation),
label='Our Model')
fig, ax[row,col] = plot_Reff(R_summary,
dates=dates[state],
ax_arg=(fig, ax[row,col]),
truncate=(nowcast_truncation,None),
label='Nowcast')
#plot formatting
ax[row,col].set_title(state)
ax[row,col].set_ylim((0,4))
ax[row,col].set_xlim((pd.to_datetime(start),pd.to_datetime(end)))
#plot cases behind
ax2 = ax[row,col].twinx()
ax2.bar(df_cases.loc[df_cases.STATE==state,'date_inferred'],
df_cases.loc[df_cases.STATE==state,'local']+df_cases.loc[df_cases.STATE==state,'imported'],
color='grey',
alpha=0.3
)
ax2.bar(df_cases.loc[df_cases.STATE==state,'date_inferred'],
df_cases.loc[df_cases.STATE==state,'local'],
color='grey',
alpha=0.8
)
# Set common labels
fig.text(0.5, 0.01, 'Date', ha='center', va='center',
fontsize=20)
fig.text(0.08, 0.5,
'Effective \nReproduction Number',
ha='center', va='center', rotation='vertical',
fontsize=20)
fig.text(0.95, 0.5, 'Local Cases', ha='center', va='center',
rotation=270,
fontsize=20)
#plot old LSHTM estimates
#df_june = df_L_R.loc[(df_L_R.date_of_analysis=='2020-07-27')&(df_L_R.state==state)]
#df = df_june.loc[(df_june.date.isin(date_filter))]
#ax[row,col].plot(df.date, df['median'], label='Old LSHTM',color='C1')
#ax[row,col].fill_between(df.date, df['bottom'], df['top'],color='C1', alpha=0.3)
#ax[row,col].fill_between(df.date, df['lower'], df['upper'],color='C1', alpha=0.3)
if save:
import os
os.makedirs("figs/EpyReff/", exist_ok=True)
plt.savefig("figs/EpyReff/Reff_tau_"+str(tau)+"_"+date+".pdf",format='pdf')
return fig, ax
| [
"helper_functions.read_in_NNDSS",
"numpy.isnat",
"numpy.isnan",
"numpy.random.gamma",
"matplotlib.pyplot.style.use",
"numpy.mean",
"numpy.tile",
"numpy.convolve",
"pandas.DataFrame",
"numpy.std",
"numpy.cumsum",
"matplotlib.pyplot.subplots",
"pandas.concat",
"scipy.stats.gamma.pdf",
"pan... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((112, 143), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-poster"""'], {}), "('seaborn-poster')\n", (125, 143), True, 'import matplotlib.pyplot as plt\n'), ((270, 297), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""model"""'], {}), "(0, 'model')\n", (285, 297), False, 'import sys\n'), ((613, 642), 'helper_functions.read_in_NNDSS', 'read_in_NNDSS', (['case_file_date'], {}), '(case_file_date)\n', (626, 642), False, 'from helper_functions import read_in_NNDSS\n'), ((2710, 2729), 'numpy.ceil', 'np.ceil', (['id_nd_diff'], {}), '(id_nd_diff)\n', (2717, 2729), True, 'import numpy as np\n'), ((3307, 3358), 'pandas.DataFrame', 'pd.DataFrame', (['infection_dates'], {'columns': 'datecolnames'}), '(infection_dates, columns=datecolnames)\n', (3319, 3358), True, 'import pandas as pd\n'), ((3528, 3596), 'pandas.concat', 'pd.concat', (['[df_linelist, infdates_df]'], {'axis': '(1)', 'verify_integrity': '(True)'}), '([df_linelist, infdates_df], axis=1, verify_integrity=True)\n', (3537, 3596), True, 'import pandas as pd\n'), ((6159, 6185), 'pandas.concat', 'pd.concat', (['local_statedict'], {}), '(local_statedict)\n', (6168, 6185), True, 'import pandas as pd\n'), ((6250, 6279), 'pandas.concat', 'pd.concat', (['imported_statedict'], {}), '(imported_statedict)\n', (6259, 6279), True, 'import pandas as pd\n'), ((6378, 6427), 'pandas.concat', 'pd.concat', (['[df_local_inc_zeros, df_imp_inc_zeros]'], {}), '([df_local_inc_zeros, df_imp_inc_zeros])\n', (6387, 6427), True, 'import pandas as pd\n'), ((7313, 7359), 'scipy.stats.gamma.pdf', 'gamma.pdf', (['xmids'], {'a': 'shape_gen', 'scale': 'scale_gen'}), '(xmids, a=shape_gen, scale=scale_gen)\n', (7322, 7359), False, 'from scipy.stats import gamma\n'), ((7608, 7698), 'numpy.zeros', 'np.zeros', ([], {'shape': '(infection_dates.shape[0] - trunc_days + 1, infection_dates.shape[1])'}), '(shape=(infection_dates.shape[0] - trunc_days + 1, infection_dates.\n shape[1]))\n', (7616, 7698), True, 'import numpy as np\n'), ((8779, 8816), 'numpy.cumsum', 'np.cumsum', (['cases_by_infection'], {'axis': '(0)'}), '(cases_by_infection, axis=0)\n', (8788, 8816), True, 'import numpy as np\n'), ((8978, 9001), 'numpy.cumsum', 'np.cumsum', (['lamb'], {'axis': '(0)'}), '(lamb, axis=0)\n', (8987, 9001), True, 'import numpy as np\n'), ((9239, 9260), 'numpy.random.gamma', 'np.random.gamma', (['a', 'b'], {}), '(a, b)\n', (9254, 9260), True, 'import numpy as np\n'), ((9785, 9810), 'numpy.mean', 'np.mean', (['samples'], {'axis': 'ax'}), '(samples, axis=ax)\n', (9792, 9810), True, 'import numpy as np\n'), ((9853, 9913), 'numpy.quantile', 'np.quantile', (['samples', '(0.05, 0.25, 0.5, 0.75, 0.95)'], {'axis': 'ax'}), '(samples, (0.05, 0.25, 0.5, 0.75, 0.95), axis=ax)\n', (9864, 9913), True, 'import numpy as np\n'), ((10031, 10055), 'numpy.std', 'np.std', (['samples'], {'axis': 'ax'}), '(samples, axis=ax)\n', (10037, 10055), True, 'import numpy as np\n'), ((10486, 10517), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-poster"""'], {}), "('seaborn-poster')\n", (10499, 10517), True, 'import matplotlib.pyplot as plt\n'), ((12525, 12560), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start', 'end': 'end'}), '(start=start, end=end)\n', (12538, 12560), True, 'import pandas as pd\n'), ((12710, 12784), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)', 'sharex': '(True)', 'sharey': '(True)', 'figsize': '(15, 12)'}), '(nrows=2, ncols=3, sharex=True, sharey=True, figsize=(15, 12))\n', (12722, 12784), True, 'import matplotlib.pyplot as plt\n'), ((2266, 2332), 'numpy.random.gamma', 'np.random.gamma', (['shape_inc', 'scale_inc'], {'size': '(nsamples * nreplicates)'}), '(shape_inc, scale_inc, size=nsamples * nreplicates)\n', (2281, 2332), True, 'import numpy as np\n'), ((2361, 2425), 'numpy.random.gamma', 'np.random.gamma', (['shape_rd', 'scale_rd'], {'size': '(nsamples * nreplicates)'}), '(shape_rd, scale_rd, size=nsamples * nreplicates)\n', (2376, 2425), True, 'import numpy as np\n'), ((3026, 3071), 'numpy.tile', 'np.tile', (['notification_dates', '(nreplicates, 1)'], {}), '(notification_dates, (nreplicates, 1))\n', (3033, 3071), True, 'import numpy as np\n'), ((4122, 4183), 'pandas.concat', 'pd.concat', (['[df_combined, df_addin]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([df_combined, df_addin], axis=1, ignore_index=True)\n', (4131, 4183), True, 'import pandas as pd\n'), ((4308, 4329), 'numpy.isnan', 'np.isnan', (['df_combined'], {}), '(df_combined)\n', (4316, 4329), True, 'import numpy as np\n'), ((5688, 5723), 'pandas.date_range', 'pd.date_range', (['start_date', 'maxdates'], {}), '(start_date, maxdates)\n', (5701, 5723), True, 'import pandas as pd\n'), ((5973, 6008), 'pandas.date_range', 'pd.date_range', (['start_date', 'maxdates'], {}), '(start_date, maxdates)\n', (5986, 6008), True, 'import pandas as pd\n'), ((7760, 7812), 'numpy.convolve', 'np.convolve', (['infection_dates[:, n]', 'ws'], {'mode': '"""valid"""'}), "(infection_dates[:, n], ws, mode='valid')\n", (7771, 7812), True, 'import numpy as np\n'), ((10604, 10633), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (10616, 10633), True, 'import matplotlib.pyplot as plt\n'), ((15296, 15339), 'os.makedirs', 'os.makedirs', (['"""figs/EpyReff/"""'], {'exist_ok': '(True)'}), "('figs/EpyReff/', exist_ok=True)\n", (15307, 15339), False, 'import os\n'), ((880, 916), 'numpy.isnat', 'np.isnat', (['interim_data.date_inferred'], {}), '(interim_data.date_inferred)\n', (888, 916), True, 'import numpy as np\n'), ((13856, 13877), 'pandas.to_datetime', 'pd.to_datetime', (['start'], {}), '(start)\n', (13870, 13877), True, 'import pandas as pd\n'), ((13878, 13897), 'pandas.to_datetime', 'pd.to_datetime', (['end'], {}), '(end)\n', (13892, 13897), True, 'import pandas as pd\n')] |
import gym
from datetime import datetime
from collections import deque
import numpy as np
class Monitor:
def __init__(
self,
monitor_param,
agent_prototype,
model_prototype,
memory_prototype,
env_prototype,
):
self.logger = monitor_param.logger
self.logger.info("-----------------------------[ Monitor ]------------------")
self.visualize = monitor_param.visualize
self.env_render = monitor_param.env_render
if self.visualize:
self.refs = monitor_param.refs
self.visdom = monitor_param.vis
if self.env_render:
self.imsave = monitor_param.imsave
self.img_dir = monitor_param.img_dir
self.testing_at_end_training = monitor_param.testing
self.train_n_episodes = monitor_param.train_n_episodes
self.test_n_episodes = monitor_param.test_n_episodes
self.max_steps_in_episode = monitor_param.max_steps_in_episode
self.eval_during_training = monitor_param.eval_during_training
self.eval_freq = monitor_param.eval_freq_by_episodes
self.eval_steps = monitor_param.eval_steps
self.seed = monitor_param.seed
self.report_freq = monitor_param.report_freq_by_episodes
self.reward_solved_criteria = monitor_param.reward_solved_criteria
self.logger.info("-----------------------------[ Env ]------------------")
self.logger.info(
f"Creating {{{monitor_param.env_type} | {monitor_param.game}}} w/ seed {self.seed}"
)
self.env = env_prototype(monitor_param.env_params)
state_shape = self.env.get_state_shape()
action_size = self.env.get_action_size()
self.output_filename = monitor_param.output_filename
self.agent = agent_prototype(
agent_params=monitor_param.agent_params,
state_shape=state_shape,
action_size=action_size,
model_prototype=model_prototype,
memory_prototype=memory_prototype,
)
self.actions_legend = monitor_param.actions_legend
self._reset_log()
def _reset_log(self):
self.summaries = {}
for summary in [
"eval_steps_avg",
"eval_reward_avg",
"eval_n_episodes_solved",
"training_rolling_reward_avg",
"training_rolling_loss",
"training_epsilon",
"training_rolling_steps_avg",
"text_elapsed_time",
"eval_state_values",
]:
if "text" in summary:
self.summaries[summary] = {"log": "", "type": "text"}
else:
self.summaries[summary] = {"log": [], "type": "line"}
self.counter_steps = 0
def _train_on_episode(self):
state = self.env.reset()
episode_steps = 0
episode_reward = 0.0
losses = deque(maxlen=100)
for t in range(self.max_steps_in_episode):
action = self.agent.act(state)
next_state, reward, done = self.env.step(action)
self.agent.step(state, action, reward, next_state, done)
if self.agent.t_step == 0:
loss = self.agent.learn()
if loss is not None:
losses.append(loss)
state = next_state
episode_reward += reward
episode_steps += 1
self.counter_steps += 1
if done:
break
return episode_reward, episode_steps, np.mean(losses)
def _when_resolved(self, rewards_window, i_episode, start_time, steps_window, loss):
self._report_log_visual(
i_episode, True, start_time, rewards_window, steps_window, loss
)
self.logger.info(f"+-+-+-+-+-+-+-+ Saving model ... +-+-+-+-+-+-+-+")
self.agent.save(self.output_filename)
self.logger.warning(
f"nununununununununununununu Evaluating @ Step {self.counter_steps} nununununununununununununu"
)
self.eval_agent()
if self.visualize:
self._visual()
if self.testing_at_end_training:
self.logger.warning(
f"nununununununununununununu Testing Agent nununununununununununununu"
)
self.test_agent()
def train(self):
self.agent.training = True
self.env.training = True
self.logger.warning(
"nununununununununununununu Training ... nununununununununununununu"
)
start_time = datetime.now()
rewards_window = deque(maxlen=100)
steps_window = deque(maxlen=100)
for i_episode in range(1, self.train_n_episodes + 1):
episode_reward, episode_steps, loss = self._train_on_episode()
self.agent.update_epsilon()
rewards_window.append(episode_reward)
steps_window.append(episode_steps)
# If resolved
if np.mean(rewards_window) >= self.reward_solved_criteria:
self._when_resolved(
rewards_window, i_episode, start_time, steps_window, loss
)
break
if i_episode % self.report_freq == 0:
self._report_log_visual(
i_episode, False, start_time, rewards_window, steps_window, loss
)
# evaluation & checkpointing
if self.eval_during_training and i_episode % self.eval_freq == 0:
self.logger.warning(
f"nununununununununununununu Evaluating @ Step {self.counter_steps} nununununununununununununu"
)
self.eval_agent()
self.agent.training = True
self.logger.warning(
f"nununununununununununununu Resume Training @ Step {self.counter_steps} nununununununununununununu"
)
if self.visualize:
self._visual()
def _report_log_visual(
self, i_episode, resolved, start_time, rewards_window, steps_window, loss
):
self.logger.info(
f"\033[1m Reporting @ Episode {i_episode} | @ Step {self.counter_steps}"
)
if resolved:
self.logger.warning(f"Environment solved in {i_episode} episodes!")
self.logger.info(f"Training Stats: elapsed time:\t{ datetime.now()-start_time}")
self.logger.info(f"Training Stats: epsilon:\t{self.agent.eps}")
self.logger.info(f"Training Stats: avg reward:\t{np.mean(rewards_window)}")
self.logger.info(
f"Training Stats: avg steps by episode:\t{np.mean(steps_window)}"
)
self.logger.info(f"Training Stats: last loss:\t{loss}")
if self.visualize:
self.summaries["training_epsilon"]["log"].append(
[i_episode, self.agent.eps]
)
self.summaries["training_rolling_reward_avg"]["log"].append(
[i_episode, np.mean(rewards_window)]
)
self.summaries["training_rolling_steps_avg"]["log"].append(
[i_episode, np.mean(steps_window)]
)
if loss is not None:
self.summaries["training_rolling_loss"]["log"].append(
[i_episode, float(loss)]
)
self.summaries["text_elapsed_time"][
"log"
] = f"Elapsed time \t{datetime.now()-start_time}"
self._visual()
def eval_agent(self):
self.agent.training = False
self.env.training = False
eval_step = 0
eval_nepisodes_solved = 0
eval_episode_steps = 0
eval_episode_reward = 0
eval_episode_reward_log = []
eval_episode_steps_log = []
eval_state_value_log = []
state = self.env.reset()
while eval_step < self.eval_steps:
state_processed = self.agent.memory.get_recent_states(state).flatten()
eval_action, q_values = self.agent.get_raw_actions(state_processed)
next_state, reward, done = self.env.step(eval_action)
self.agent.memory.append_recent(state, done)
self._render(eval_step, "eval")
self._show_values(q_values)
eval_state_value_log.append([eval_step, np.mean(q_values)])
eval_episode_reward += reward
eval_episode_steps += 1
state = next_state
if done:
eval_nepisodes_solved += 1
eval_episode_steps_log.append([eval_episode_steps])
eval_episode_reward_log.append([eval_episode_reward])
eval_episode_steps = 0
eval_episode_reward = 0
state = self.env.reset()
eval_step += 1
self.summaries["eval_steps_avg"]["log"].append(
[self.counter_steps, np.mean(eval_episode_steps_log)]
)
del eval_episode_steps_log
self.summaries["eval_reward_avg"]["log"].append(
[self.counter_steps, np.mean(eval_episode_reward_log)]
)
del eval_episode_reward_log
self.summaries["eval_n_episodes_solved"]["log"].append(
[self.counter_steps, eval_nepisodes_solved]
)
self.summaries["eval_state_values"]["log"] = eval_state_value_log
for key in self.summaries.keys():
if self.summaries[key]["type"] == "line" and "eval" in key:
self.logger.info(
f"@ Step {self.counter_steps}; {key}: {self.summaries[key]['log'][-1][1]}"
)
def test_agent(self, checkpoint=""):
self.agent.training = False
self.env.training = False
self.agent.load(checkpoint)
self.env_render = True
step = 0
for i in range(self.test_n_episodes):
state = self.env.reset()
done = False
while not done:
action = self.agent.act(state)
next_state, reward, done = self.env.step(action)
self._render(step, "test")
state = next_state
step += 1
def _render(self, frame_ind, subdir):
if self.env_render:
frame = self.env.render()
if frame is not None:
frame_name = self.img_dir + f"{subdir}/{frame_ind:05d}.jpg"
self.imsave(frame_name, frame)
if self.visualize:
frame = self.env.render()
if frame is not None:
self.visdom.image(
np.transpose(frame, (2, 0, 1)),
env=self.refs,
win="state",
opts=dict(title="render"),
)
def _show_values(self, values):
if self.visualize:
self.visdom.bar(
values.T,
env=self.refs,
win="q_values",
opts=dict(title="q_values", legend=self.actions_legend),
)
def _visual(self):
for key in self.summaries.keys():
if self.summaries[key]["type"] == "line":
data = np.array(self.summaries[key]["log"])
if data.ndim < 2:
continue
self.visdom.line(
X=data[:, 0],
Y=data[:, 1],
env=self.refs,
win=f"win_{key}",
opts=dict(title=key),
)
elif self.summaries[key]["type"] == "text":
self.visdom.text(
self.summaries[key]["log"],
env=self.refs,
win=f"win_{key}",
opts=dict(title=key),
)
| [
"numpy.transpose",
"numpy.mean",
"numpy.array",
"datetime.datetime.now",
"collections.deque"
] | [((2922, 2939), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (2927, 2939), False, 'from collections import deque\n'), ((4570, 4584), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4582, 4584), False, 'from datetime import datetime\n'), ((4611, 4628), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (4616, 4628), False, 'from collections import deque\n'), ((4652, 4669), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (4657, 4669), False, 'from collections import deque\n'), ((3552, 3567), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3559, 3567), True, 'import numpy as np\n'), ((4989, 5012), 'numpy.mean', 'np.mean', (['rewards_window'], {}), '(rewards_window)\n', (4996, 5012), True, 'import numpy as np\n'), ((8928, 8959), 'numpy.mean', 'np.mean', (['eval_episode_steps_log'], {}), '(eval_episode_steps_log)\n', (8935, 8959), True, 'import numpy as np\n'), ((9096, 9128), 'numpy.mean', 'np.mean', (['eval_episode_reward_log'], {}), '(eval_episode_reward_log)\n', (9103, 9128), True, 'import numpy as np\n'), ((11190, 11226), 'numpy.array', 'np.array', (["self.summaries[key]['log']"], {}), "(self.summaries[key]['log'])\n", (11198, 11226), True, 'import numpy as np\n'), ((6572, 6595), 'numpy.mean', 'np.mean', (['rewards_window'], {}), '(rewards_window)\n', (6579, 6595), True, 'import numpy as np\n'), ((6679, 6700), 'numpy.mean', 'np.mean', (['steps_window'], {}), '(steps_window)\n', (6686, 6700), True, 'import numpy as np\n'), ((7026, 7049), 'numpy.mean', 'np.mean', (['rewards_window'], {}), '(rewards_window)\n', (7033, 7049), True, 'import numpy as np\n'), ((7165, 7186), 'numpy.mean', 'np.mean', (['steps_window'], {}), '(steps_window)\n', (7172, 7186), True, 'import numpy as np\n'), ((8357, 8374), 'numpy.mean', 'np.mean', (['q_values'], {}), '(q_values)\n', (8364, 8374), True, 'import numpy as np\n'), ((10613, 10643), 'numpy.transpose', 'np.transpose', (['frame', '(2, 0, 1)'], {}), '(frame, (2, 0, 1))\n', (10625, 10643), True, 'import numpy as np\n'), ((6414, 6428), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6426, 6428), False, 'from datetime import datetime\n'), ((7475, 7489), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7487, 7489), False, 'from datetime import datetime\n')] |
from __future__ import division, print_function, absolute_import
import argparse
import functools
import json
import logging
import math
import os
import random
import numpy as np
import pandas as pd
import tensorflow as tf
import tqdm
try:
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
except:
pass
import examples.cnn3d.model as model
import examples.cnn3d.feature_qm9 as feature_qm9
import examples.cnn3d.subgrid_gen as subgrid_gen
import examples.cnn3d.util as util
import dotenv as de
de.load_dotenv(de.find_dotenv(usecwd=True))
def compute_global_correlations(results):
res = {}
all_true = results['true'].astype(float)
all_pred = results['pred'].astype(float)
res['all_pearson'] = all_true.corr(all_pred, method='pearson')
res['all_kendall'] = all_true.corr(all_pred, method='kendall')
res['all_spearman'] = all_true.corr(all_pred, method='spearman')
return res
def compute_mean_absolute_error(results):
all_true = results['true'].astype(float)
all_pred = results['pred'].astype(float)
mae = np.abs(all_true-all_pred).mean()
return mae
# Construct model and loss
def conv_model(feature, target, is_training, conv_drop_rate, fc_drop_rate,
top_nn_drop_rate, args):
num_conv = args.num_conv
conv_filters = [32 * (2**n) for n in range(num_conv)]
conv_kernel_size = 3
max_pool_positions = [0, 1]*int((num_conv+1)/2)
max_pool_sizes = [2]*num_conv
max_pool_strides = [2]*num_conv
fc_units = [512]
feature = tf.Print(feature, [tf.shape(feature)],
message="feature: ",
first_n=1, summarize=10)
output = model.single_model(
feature,
is_training,
conv_drop_rate,
fc_drop_rate,
top_nn_drop_rate,
conv_filters, conv_kernel_size,
max_pool_positions,
max_pool_sizes, max_pool_strides,
fc_units,
batch_norm=args.use_batch_norm,
dropout=not args.no_dropout,
top_nn_activation=args.top_nn_activation)
# Prediction
predict = tf.identity(output, name='predict')
# Loss
loss = tf.losses.mean_squared_error(target, predict)
return predict, loss
def batch_dataset_generator(gen, args, is_testing=False):
grid_size = subgrid_gen.grid_size(args.grid_config)
channel_size = subgrid_gen.num_channels(args.grid_config)
dataset = tf.data.Dataset.from_generator(
gen,
output_types=(tf.string, tf.float32, tf.float32),
output_shapes=((), (grid_size, grid_size, grid_size, channel_size), (1,))
)
# Shuffle dataset
if not is_testing:
if args.shuffle:
dataset = dataset.repeat(count=None)
else:
dataset = dataset.apply(
tf.contrib.data.shuffle_and_repeat(buffer_size=1000))
dataset = dataset.batch(args.batch_size)
dataset = dataset.prefetch(8)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
return dataset, next_element
def train_model(sess, args):
# tf Graph input
# Subgrid maps for each residue in a protein
logging.debug('Create input placeholder...')
grid_size = subgrid_gen.grid_size(args.grid_config)
channel_size = subgrid_gen.num_channels(args.grid_config)
feature_placeholder = tf.placeholder(
tf.float32,
[None, grid_size, grid_size, grid_size, channel_size],
name='main_input')
label_placeholder = tf.placeholder(tf.float32, [None, 1], 'label')
# Placeholder for model parameters
training_placeholder = tf.placeholder(tf.bool, shape=[], name='is_training')
conv_drop_rate_placeholder = tf.placeholder(tf.float32, name='conv_drop_rate')
fc_drop_rate_placeholder = tf.placeholder(tf.float32, name='fc_drop_rate')
top_nn_drop_rate_placeholder = tf.placeholder(tf.float32, name='top_nn_drop_rate')
# Define loss and optimizer
logging.debug('Define loss and optimizer...')
predict_op, loss_op = conv_model(
feature_placeholder, label_placeholder, training_placeholder,
conv_drop_rate_placeholder, fc_drop_rate_placeholder,
top_nn_drop_rate_placeholder, args)
logging.debug('Generate training ops...')
train_op = model.training(loss_op, args.learning_rate)
# Initialize the variables (i.e. assign their default value)
logging.debug('Initializing global variables...')
init = tf.global_variables_initializer()
# Create saver and summaries.
logging.debug('Initializing saver...')
saver = tf.train.Saver(max_to_keep=100000)
logging.debug('Finished initializing saver...')
if args.resume_training:
if args.use_ckpt_num == None:
with open(os.path.join(args.model_dir, 'run_info.json')) as f:
run_info = json.load(f)
to_use = run_info['best_ckpt']
else:
to_use = os.path.join(
args.model_dir, 'model-ckpt-{:}'.format(args.use_ckpt_num))
saver = tf.train.import_meta_graph(to_use + '.meta')
def __loop(generator, mode, num_iters):
tf_dataset, next_element = batch_dataset_generator(
generator, args, is_testing=(mode=='test'))
structs, losses, preds, labels = [], [], [], []
epoch_loss = 0
progress_format = mode + ' loss: {:6.6f}'
# Loop over all batches (one batch is all feature for 1 protein)
num_batches = int(math.ceil(float(num_iters)/args.batch_size))
#print('Running {:} -> {:} iters in {:} batches (batch size: {:})'.format(
# mode, num_iters, num_batches, args.batch_size))
with tqdm.tqdm(total=num_batches, desc=progress_format.format(0)) as t:
for i in range(num_batches):
try:
struct_, feature_, label_ = sess.run(next_element)
_, pred, loss = sess.run(
[train_op, predict_op, loss_op],
feed_dict={feature_placeholder: feature_,
label_placeholder: label_,
training_placeholder: (mode == 'train'),
conv_drop_rate_placeholder:
args.conv_drop_rate if mode == 'train' else 0.0,
fc_drop_rate_placeholder:
args.fc_drop_rate if mode == 'train' else 0.0,
top_nn_drop_rate_placeholder:
args.top_nn_drop_rate if mode == 'train' else 0.0})
epoch_loss += (np.mean(loss) - epoch_loss) / (i + 1)
structs.extend(struct_)
losses.append(loss)
preds.extend(pred)
labels.extend(label_)
t.set_description(progress_format.format(epoch_loss))
t.update(1)
except StopIteration:
logging.info("\nEnd of dataset at iteration {:}".format(i))
break
def __concatenate(array):
try:
array = np.concatenate(array)
return array
except:
return array
structs = __concatenate(structs)
preds = __concatenate(preds)
labels = __concatenate(labels)
losses = __concatenate(losses)
return structs, preds, labels, losses, epoch_loss
# Run the initializer
logging.debug('Running initializer...')
sess.run(init)
logging.debug('Finished running initializer...')
##### Training + validation
if not args.test_only:
prev_val_loss, best_val_loss = float("inf"), float("inf")
if (args.max_mols_train == None):
df = pd.read_hdf(args.train_data_filename, 'structures')
train_num_structs = len(df.structure.unique())
else:
train_num_structs = args.max_mols_train
if (args.max_mols_val == None):
df = pd.read_hdf(args.val_data_filename, 'structures')
val_num_structs = len(df.structure.unique())
else:
val_num_structs = args.max_mols_val
logging.info("Start training with {:} structs for train and {:} structs for val per epoch".format(
train_num_structs, val_num_structs))
def _save():
ckpt = saver.save(sess, os.path.join(args.output_dir, 'model-ckpt'),
global_step=epoch)
return ckpt
run_info_filename = os.path.join(args.output_dir, 'run_info.json')
run_info = {}
def __update_and_write_run_info(key, val):
run_info[key] = val
with open(run_info_filename, 'w') as f:
json.dump(run_info, f, indent=4)
per_epoch_val_losses = []
for epoch in range(1, args.num_epochs+1):
random_seed = args.random_seed
logging.info('Epoch {:} - random_seed: {:}'.format(epoch, args.random_seed))
logging.debug('Creating train generator...')
train_generator_callable = functools.partial(
feature_qm9.dataset_generator,
args.train_data_filename,
args.train_labels_filename,
args.grid_config,
label_type=args.label_type,
shuffle=args.shuffle,
repeat=1,
max_mols=args.max_mols_train,
random_seed=random_seed)
logging.debug('Creating val generator...')
val_generator_callable = functools.partial(
feature_qm9.dataset_generator,
args.val_data_filename,
args.val_labels_filename,
args.grid_config,
label_type=args.label_type,
shuffle=args.shuffle,
repeat=1,
max_mols=args.max_mols_val,
random_seed=random_seed)
# Training
train_structs, train_preds, train_labels, _, curr_train_loss = __loop(
train_generator_callable, 'train', num_iters=train_num_structs)
# Validation
val_structs, val_preds, val_labels, _, curr_val_loss = __loop(
val_generator_callable, 'val', num_iters=val_num_structs)
per_epoch_val_losses.append(curr_val_loss)
__update_and_write_run_info('val_losses', per_epoch_val_losses)
if args.use_best or args.early_stopping:
if curr_val_loss < best_val_loss:
# Found new best epoch.
best_val_loss = curr_val_loss
ckpt = _save()
__update_and_write_run_info('val_best_loss', best_val_loss)
__update_and_write_run_info('best_ckpt', ckpt)
logging.info("New best {:}".format(ckpt))
if (epoch == args.num_epochs - 1 and not args.use_best):
# At end and just using final checkpoint.
ckpt = _save()
__update_and_write_run_info('best_ckpt', ckpt)
logging.info("Last checkpoint {:}".format(ckpt))
if args.save_all_ckpts:
# Save at every checkpoint
ckpt = _save()
logging.info("Saving checkpoint {:}".format(ckpt))
if args.early_stopping and curr_val_loss >= prev_val_loss:
logging.info("Validation loss stopped decreasing, stopping...")
break
else:
prev_val_loss = curr_val_loss
logging.info("Finished training")
## Save last train and val results
logging.info("Saving train and val results")
train_df = pd.DataFrame(
np.array([train_structs, train_labels, train_preds]).T,
columns=['structure', 'true', 'pred'],
)
train_df.to_pickle(os.path.join(args.output_dir, 'train_result.pkl'))
val_df = pd.DataFrame(
np.array([val_structs, val_labels, val_preds]).T,
columns=['structure', 'true', 'pred'],
)
val_df.to_pickle(os.path.join(args.output_dir, 'val_result.pkl'))
##### Testing
logging.debug("Run testing")
if not args.test_only:
to_use = run_info['best_ckpt'] if args.use_best else ckpt
else:
if args.use_ckpt_num == None:
with open(os.path.join(args.model_dir, 'run_info.json')) as f:
run_info = json.load(f)
to_use = run_info['best_ckpt']
else:
to_use = os.path.join(
args.model_dir, 'model-ckpt-{:}'.format(args.use_ckpt_num))
saver = tf.train.import_meta_graph(to_use + '.meta')
test_generator_callable = functools.partial(
feature_qm9.dataset_generator,
args.test_data_filename,
args.test_labels_filename,
args.grid_config,
label_type=args.label_type,
shuffle=args.shuffle,
repeat=1,
max_mols=args.max_mols_test,
random_seed=args.random_seed)
if (args.max_mols_test == None):
df = pd.read_hdf(args.test_data_filename, 'structures')
test_num_structs = len(df.structure.unique())
else:
test_num_structs = args.max_mols_test
logging.info("Start testing with {:} structs".format(test_num_structs))
test_structs, test_preds, test_labels, _, test_loss = __loop(
test_generator_callable, 'test', num_iters=test_num_structs)
logging.info("Finished testing")
test_df = pd.DataFrame(
np.array([test_structs, test_labels, test_preds]).T,
columns=['structure', 'true', 'pred'],
)
test_df.to_pickle(os.path.join(args.output_dir, 'test_result.pkl'))
# Compute global correlations
res = compute_global_correlations(test_df)
logging.info(
'\nCorrelations (Pearson, Kendall, Spearman)\n'
' all averaged: ({:.3f}, {:.3f}, {:.3f})'.format(
float(res["all_pearson"]),
float(res["all_kendall"]),
float(res["all_spearman"])))
# Compute mean absolute error
mae = compute_mean_absolute_error(test_df)
logging.info('Mean absolute error: {:.4f}'.format(mae))
def create_train_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--train_data_filename', type=str,
default=os.environ['QM9_TRAIN_DATA_FILENAME'])
parser.add_argument(
'--train_labels_filename', type=str,
default=os.environ['QM9_TRAIN_LABELS_FILENAME'])
parser.add_argument(
'--val_data_filename', type=str,
default=os.environ['QM9_VAL_DATA_FILENAME'])
parser.add_argument(
'--val_labels_filename', type=str,
default=os.environ['QM9_VAL_LABELS_FILENAME'])
parser.add_argument(
'--test_data_filename', type=str,
default=os.environ['QM9_TEST_DATA_FILENAME'])
parser.add_argument(
'--test_labels_filename', type=str,
default=os.environ['QM9_TEST_LABELS_FILENAME'])
parser.add_argument(
'--output_dir', type=str,
default=os.environ['MODEL_DIR'])
# Training parameters
parser.add_argument('--max_mols_train', type=int, default=None)
parser.add_argument('--max_mols_val', type=int, default=None)
parser.add_argument('--max_mols_test', type=int, default=None)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--conv_drop_rate', type=float, default=0.1)
parser.add_argument('--fc_drop_rate', type=float, default=0.25)
parser.add_argument('--top_nn_drop_rate', type=float, default=0.5)
parser.add_argument('--top_nn_activation', type=str, default=None)
parser.add_argument('--num_epochs', type=int, default=5)
parser.add_argument('--num_conv', type=int, default=4)
parser.add_argument('--use_batch_norm', action='store_true', default=False)
parser.add_argument('--no_dropout', action='store_true', default=False)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--shuffle', action='store_true', default=False)
parser.add_argument('--early_stopping', action='store_true', default=False)
parser.add_argument('--use_best', action='store_true', default=False)
parser.add_argument('--random_seed', type=int, default=random.randint(1, 10e6))
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--unobserved', action='store_true', default=False)
parser.add_argument('--save_all_ckpts', action='store_true', default=False)
# Model parameters
parser.add_argument('--label_type', type=str, default='alpha')
# Resume training
parser.add_argument('--resume_training', action='store_true', default=False)
# Test only
parser.add_argument('--test_only', action='store_true', default=False)
parser.add_argument('--model_dir', type=str, default=None)
parser.add_argument('--use_ckpt_num', type=int, default=None)
return parser
def main():
parser = create_train_parser()
args = parser.parse_args()
args.__dict__['grid_config'] = feature_qm9.grid_config
if args.test_only or args.resume_training:
test_only = args.test_only
resume_training = args.resume_training
batch_size = args.batch_size
unobserved = args.unobserved
num_epochs = args.num_epochs
model_dir = args.model_dir
use_ckpt_num = args.use_ckpt_num
with open(os.path.join(args.model_dir, 'config.json')) as f:
model_config = json.load(f)
args.__dict__ = model_config
if 'grid_config' in model_config:
args.__dict__['grid_config'] = util.dotdict(
model_config['grid_config'])
args.resume_training = resume_training
args.test_only = test_only
args.unobserved = unobserved
args.num_epochs = num_epochs
args.model_dir = model_dir
args.use_ckpt_num = use_ckpt_num
if args.test_only:
args.batch_size = batch_size
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
if args.debug:
logging.basicConfig(level=logging.DEBUG, format=log_fmt)
else:
logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.info("Running 3D CNN QM9 training...")
if args.unobserved:
args.output_dir = os.path.join(args.output_dir, 'None')
os.makedirs(args.output_dir, exist_ok=True)
else:
num = 0
while True:
dirpath = os.path.join(args.output_dir, str(num))
if os.path.exists(dirpath):
num += 1
else:
try:
args.output_dir = dirpath
logging.info('Creating output directory {:}'.format(args.output_dir))
os.mkdir(args.output_dir)
break
except:
pass
logging.info("\n" + str(json.dumps(args.__dict__, indent=4)) + "\n")
# Save config
with open(os.path.join(args.output_dir, 'config.json'), 'w') as f:
json.dump(args.__dict__, f, indent=4)
logging.info("Writing all output to {:}".format(args.output_dir))
with tf.Session() as sess:
tf.set_random_seed(args.random_seed)
train_model(sess, args)
if __name__ == '__main__':
main()
| [
"os.mkdir",
"numpy.abs",
"argparse.ArgumentParser",
"dotenv.find_dotenv",
"tensorflow.identity",
"json.dumps",
"numpy.mean",
"os.path.join",
"pandas.read_hdf",
"random.randint",
"os.path.exists",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"functools.partial",
"json.dump",
... | [((248, 310), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (282, 310), True, 'import tensorflow as tf\n'), ((535, 562), 'dotenv.find_dotenv', 'de.find_dotenv', ([], {'usecwd': '(True)'}), '(usecwd=True)\n', (549, 562), True, 'import dotenv as de\n'), ((1652, 1957), 'examples.cnn3d.model.single_model', 'model.single_model', (['feature', 'is_training', 'conv_drop_rate', 'fc_drop_rate', 'top_nn_drop_rate', 'conv_filters', 'conv_kernel_size', 'max_pool_positions', 'max_pool_sizes', 'max_pool_strides', 'fc_units'], {'batch_norm': 'args.use_batch_norm', 'dropout': '(not args.no_dropout)', 'top_nn_activation': 'args.top_nn_activation'}), '(feature, is_training, conv_drop_rate, fc_drop_rate,\n top_nn_drop_rate, conv_filters, conv_kernel_size, max_pool_positions,\n max_pool_sizes, max_pool_strides, fc_units, batch_norm=args.\n use_batch_norm, dropout=not args.no_dropout, top_nn_activation=args.\n top_nn_activation)\n', (1670, 1957), True, 'import examples.cnn3d.model as model\n'), ((2069, 2104), 'tensorflow.identity', 'tf.identity', (['output'], {'name': '"""predict"""'}), "(output, name='predict')\n", (2080, 2104), True, 'import tensorflow as tf\n'), ((2127, 2172), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['target', 'predict'], {}), '(target, predict)\n', (2155, 2172), True, 'import tensorflow as tf\n'), ((2274, 2313), 'examples.cnn3d.subgrid_gen.grid_size', 'subgrid_gen.grid_size', (['args.grid_config'], {}), '(args.grid_config)\n', (2295, 2313), True, 'import examples.cnn3d.subgrid_gen as subgrid_gen\n'), ((2333, 2375), 'examples.cnn3d.subgrid_gen.num_channels', 'subgrid_gen.num_channels', (['args.grid_config'], {}), '(args.grid_config)\n', (2357, 2375), True, 'import examples.cnn3d.subgrid_gen as subgrid_gen\n'), ((2390, 2559), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['gen'], {'output_types': '(tf.string, tf.float32, tf.float32)', 'output_shapes': '((), (grid_size, grid_size, grid_size, channel_size), (1,))'}), '(gen, output_types=(tf.string, tf.float32, tf\n .float32), output_shapes=((), (grid_size, grid_size, grid_size,\n channel_size), (1,)))\n', (2420, 2559), True, 'import tensorflow as tf\n'), ((3132, 3176), 'logging.debug', 'logging.debug', (['"""Create input placeholder..."""'], {}), "('Create input placeholder...')\n", (3145, 3176), False, 'import logging\n'), ((3193, 3232), 'examples.cnn3d.subgrid_gen.grid_size', 'subgrid_gen.grid_size', (['args.grid_config'], {}), '(args.grid_config)\n', (3214, 3232), True, 'import examples.cnn3d.subgrid_gen as subgrid_gen\n'), ((3252, 3294), 'examples.cnn3d.subgrid_gen.num_channels', 'subgrid_gen.num_channels', (['args.grid_config'], {}), '(args.grid_config)\n', (3276, 3294), True, 'import examples.cnn3d.subgrid_gen as subgrid_gen\n'), ((3321, 3425), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, grid_size, grid_size, grid_size, channel_size]'], {'name': '"""main_input"""'}), "(tf.float32, [None, grid_size, grid_size, grid_size,\n channel_size], name='main_input')\n", (3335, 3425), True, 'import tensorflow as tf\n'), ((3471, 3517), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""label"""'], {}), "(tf.float32, [None, 1], 'label')\n", (3485, 3517), True, 'import tensorflow as tf\n'), ((3585, 3638), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '[]', 'name': '"""is_training"""'}), "(tf.bool, shape=[], name='is_training')\n", (3599, 3638), True, 'import tensorflow as tf\n'), ((3672, 3721), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""conv_drop_rate"""'}), "(tf.float32, name='conv_drop_rate')\n", (3686, 3721), True, 'import tensorflow as tf\n'), ((3753, 3800), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""fc_drop_rate"""'}), "(tf.float32, name='fc_drop_rate')\n", (3767, 3800), True, 'import tensorflow as tf\n'), ((3836, 3887), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""top_nn_drop_rate"""'}), "(tf.float32, name='top_nn_drop_rate')\n", (3850, 3887), True, 'import tensorflow as tf\n'), ((3925, 3970), 'logging.debug', 'logging.debug', (['"""Define loss and optimizer..."""'], {}), "('Define loss and optimizer...')\n", (3938, 3970), False, 'import logging\n'), ((4189, 4230), 'logging.debug', 'logging.debug', (['"""Generate training ops..."""'], {}), "('Generate training ops...')\n", (4202, 4230), False, 'import logging\n'), ((4246, 4289), 'examples.cnn3d.model.training', 'model.training', (['loss_op', 'args.learning_rate'], {}), '(loss_op, args.learning_rate)\n', (4260, 4289), True, 'import examples.cnn3d.model as model\n'), ((4360, 4409), 'logging.debug', 'logging.debug', (['"""Initializing global variables..."""'], {}), "('Initializing global variables...')\n", (4373, 4409), False, 'import logging\n'), ((4421, 4454), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4452, 4454), True, 'import tensorflow as tf\n'), ((4494, 4532), 'logging.debug', 'logging.debug', (['"""Initializing saver..."""'], {}), "('Initializing saver...')\n", (4507, 4532), False, 'import logging\n'), ((4545, 4579), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(100000)'}), '(max_to_keep=100000)\n', (4559, 4579), True, 'import tensorflow as tf\n'), ((4584, 4631), 'logging.debug', 'logging.debug', (['"""Finished initializing saver..."""'], {}), "('Finished initializing saver...')\n", (4597, 4631), False, 'import logging\n'), ((7510, 7549), 'logging.debug', 'logging.debug', (['"""Running initializer..."""'], {}), "('Running initializer...')\n", (7523, 7549), False, 'import logging\n'), ((7573, 7621), 'logging.debug', 'logging.debug', (['"""Finished running initializer..."""'], {}), "('Finished running initializer...')\n", (7586, 7621), False, 'import logging\n'), ((12270, 12298), 'logging.debug', 'logging.debug', (['"""Run testing"""'], {}), "('Run testing')\n", (12283, 12298), False, 'import logging\n'), ((12815, 13064), 'functools.partial', 'functools.partial', (['feature_qm9.dataset_generator', 'args.test_data_filename', 'args.test_labels_filename', 'args.grid_config'], {'label_type': 'args.label_type', 'shuffle': 'args.shuffle', 'repeat': '(1)', 'max_mols': 'args.max_mols_test', 'random_seed': 'args.random_seed'}), '(feature_qm9.dataset_generator, args.test_data_filename,\n args.test_labels_filename, args.grid_config, label_type=args.label_type,\n shuffle=args.shuffle, repeat=1, max_mols=args.max_mols_test,\n random_seed=args.random_seed)\n', (12832, 13064), False, 'import functools\n'), ((13555, 13587), 'logging.info', 'logging.info', (['"""Finished testing"""'], {}), "('Finished testing')\n", (13567, 13587), False, 'import logging\n'), ((14315, 14340), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14338, 14340), False, 'import argparse\n'), ((18368, 18414), 'logging.info', 'logging.info', (['"""Running 3D CNN QM9 training..."""'], {}), "('Running 3D CNN QM9 training...')\n", (18380, 18414), False, 'import logging\n'), ((4999, 5043), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(to_use + '.meta')"], {}), "(to_use + '.meta')\n", (5025, 5043), True, 'import tensorflow as tf\n'), ((8575, 8621), 'os.path.join', 'os.path.join', (['args.output_dir', '"""run_info.json"""'], {}), "(args.output_dir, 'run_info.json')\n", (8587, 8621), False, 'import os\n'), ((11638, 11671), 'logging.info', 'logging.info', (['"""Finished training"""'], {}), "('Finished training')\n", (11650, 11671), False, 'import logging\n'), ((11724, 11768), 'logging.info', 'logging.info', (['"""Saving train and val results"""'], {}), "('Saving train and val results')\n", (11736, 11768), False, 'import logging\n'), ((12739, 12783), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(to_use + '.meta')"], {}), "(to_use + '.meta')\n", (12765, 12783), True, 'import tensorflow as tf\n'), ((13177, 13227), 'pandas.read_hdf', 'pd.read_hdf', (['args.test_data_filename', '"""structures"""'], {}), "(args.test_data_filename, 'structures')\n", (13188, 13227), True, 'import pandas as pd\n'), ((13757, 13805), 'os.path.join', 'os.path.join', (['args.output_dir', '"""test_result.pkl"""'], {}), "(args.output_dir, 'test_result.pkl')\n", (13769, 13805), False, 'import os\n'), ((18233, 18289), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': 'log_fmt'}), '(level=logging.DEBUG, format=log_fmt)\n', (18252, 18289), False, 'import logging\n'), ((18308, 18363), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_fmt'}), '(level=logging.INFO, format=log_fmt)\n', (18327, 18363), False, 'import logging\n'), ((18466, 18503), 'os.path.join', 'os.path.join', (['args.output_dir', '"""None"""'], {}), "(args.output_dir, 'None')\n", (18478, 18503), False, 'import os\n'), ((18512, 18555), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (18523, 18555), False, 'import os\n'), ((19197, 19234), 'json.dump', 'json.dump', (['args.__dict__', 'f'], {'indent': '(4)'}), '(args.__dict__, f, indent=4)\n', (19206, 19234), False, 'import json\n'), ((19315, 19327), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (19325, 19327), True, 'import tensorflow as tf\n'), ((19345, 19381), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (19363, 19381), True, 'import tensorflow as tf\n'), ((1073, 1100), 'numpy.abs', 'np.abs', (['(all_true - all_pred)'], {}), '(all_true - all_pred)\n', (1079, 1100), True, 'import numpy as np\n'), ((1554, 1571), 'tensorflow.shape', 'tf.shape', (['feature'], {}), '(feature)\n', (1562, 1571), True, 'import tensorflow as tf\n'), ((7808, 7859), 'pandas.read_hdf', 'pd.read_hdf', (['args.train_data_filename', '"""structures"""'], {}), "(args.train_data_filename, 'structures')\n", (7819, 7859), True, 'import pandas as pd\n'), ((8043, 8092), 'pandas.read_hdf', 'pd.read_hdf', (['args.val_data_filename', '"""structures"""'], {}), "(args.val_data_filename, 'structures')\n", (8054, 8092), True, 'import pandas as pd\n'), ((9058, 9102), 'logging.debug', 'logging.debug', (['"""Creating train generator..."""'], {}), "('Creating train generator...')\n", (9071, 9102), False, 'import logging\n'), ((9142, 9391), 'functools.partial', 'functools.partial', (['feature_qm9.dataset_generator', 'args.train_data_filename', 'args.train_labels_filename', 'args.grid_config'], {'label_type': 'args.label_type', 'shuffle': 'args.shuffle', 'repeat': '(1)', 'max_mols': 'args.max_mols_train', 'random_seed': 'random_seed'}), '(feature_qm9.dataset_generator, args.train_data_filename,\n args.train_labels_filename, args.grid_config, label_type=args.\n label_type, shuffle=args.shuffle, repeat=1, max_mols=args.\n max_mols_train, random_seed=random_seed)\n', (9159, 9391), False, 'import functools\n'), ((9536, 9578), 'logging.debug', 'logging.debug', (['"""Creating val generator..."""'], {}), "('Creating val generator...')\n", (9549, 9578), False, 'import logging\n'), ((9616, 9858), 'functools.partial', 'functools.partial', (['feature_qm9.dataset_generator', 'args.val_data_filename', 'args.val_labels_filename', 'args.grid_config'], {'label_type': 'args.label_type', 'shuffle': 'args.shuffle', 'repeat': '(1)', 'max_mols': 'args.max_mols_val', 'random_seed': 'random_seed'}), '(feature_qm9.dataset_generator, args.val_data_filename,\n args.val_labels_filename, args.grid_config, label_type=args.label_type,\n shuffle=args.shuffle, repeat=1, max_mols=args.max_mols_val, random_seed\n =random_seed)\n', (9633, 9858), False, 'import functools\n'), ((11962, 12011), 'os.path.join', 'os.path.join', (['args.output_dir', '"""train_result.pkl"""'], {}), "(args.output_dir, 'train_result.pkl')\n", (11974, 12011), False, 'import os\n'), ((12197, 12244), 'os.path.join', 'os.path.join', (['args.output_dir', '"""val_result.pkl"""'], {}), "(args.output_dir, 'val_result.pkl')\n", (12209, 12244), False, 'import os\n'), ((13625, 13674), 'numpy.array', 'np.array', (['[test_structs, test_labels, test_preds]'], {}), '([test_structs, test_labels, test_preds])\n', (13633, 13674), True, 'import numpy as np\n'), ((16386, 16415), 'random.randint', 'random.randint', (['(1)', '(10000000.0)'], {}), '(1, 10000000.0)\n', (16400, 16415), False, 'import random\n'), ((17625, 17637), 'json.load', 'json.load', (['f'], {}), '(f)\n', (17634, 17637), False, 'import json\n'), ((18679, 18702), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (18693, 18702), False, 'import os\n'), ((19132, 19176), 'os.path.join', 'os.path.join', (['args.output_dir', '"""config.json"""'], {}), "(args.output_dir, 'config.json')\n", (19144, 19176), False, 'import os\n'), ((2772, 2824), 'tensorflow.contrib.data.shuffle_and_repeat', 'tf.contrib.data.shuffle_and_repeat', ([], {'buffer_size': '(1000)'}), '(buffer_size=1000)\n', (2806, 2824), True, 'import tensorflow as tf\n'), ((4802, 4814), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4811, 4814), False, 'import json\n'), ((7164, 7185), 'numpy.concatenate', 'np.concatenate', (['array'], {}), '(array)\n', (7178, 7185), True, 'import numpy as np\n'), ((8428, 8471), 'os.path.join', 'os.path.join', (['args.output_dir', '"""model-ckpt"""'], {}), "(args.output_dir, 'model-ckpt')\n", (8440, 8471), False, 'import os\n'), ((8795, 8827), 'json.dump', 'json.dump', (['run_info', 'f'], {'indent': '(4)'}), '(run_info, f, indent=4)\n', (8804, 8827), False, 'import json\n'), ((11479, 11542), 'logging.info', 'logging.info', (['"""Validation loss stopped decreasing, stopping..."""'], {}), "('Validation loss stopped decreasing, stopping...')\n", (11491, 11542), False, 'import logging\n'), ((11814, 11866), 'numpy.array', 'np.array', (['[train_structs, train_labels, train_preds]'], {}), '([train_structs, train_labels, train_preds])\n', (11822, 11866), True, 'import numpy as np\n'), ((12057, 12103), 'numpy.array', 'np.array', (['[val_structs, val_labels, val_preds]'], {}), '([val_structs, val_labels, val_preds])\n', (12065, 12103), True, 'import numpy as np\n'), ((12542, 12554), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12551, 12554), False, 'import json\n'), ((17547, 17590), 'os.path.join', 'os.path.join', (['args.model_dir', '"""config.json"""'], {}), "(args.model_dir, 'config.json')\n", (17559, 17590), False, 'import os\n'), ((17772, 17813), 'examples.cnn3d.util.dotdict', 'util.dotdict', (["model_config['grid_config']"], {}), "(model_config['grid_config'])\n", (17784, 17813), True, 'import examples.cnn3d.util as util\n'), ((4722, 4767), 'os.path.join', 'os.path.join', (['args.model_dir', '"""run_info.json"""'], {}), "(args.model_dir, 'run_info.json')\n", (4734, 4767), False, 'import os\n'), ((12462, 12507), 'os.path.join', 'os.path.join', (['args.model_dir', '"""run_info.json"""'], {}), "(args.model_dir, 'run_info.json')\n", (12474, 12507), False, 'import os\n'), ((18924, 18949), 'os.mkdir', 'os.mkdir', (['args.output_dir'], {}), '(args.output_dir)\n', (18932, 18949), False, 'import os\n'), ((19054, 19089), 'json.dumps', 'json.dumps', (['args.__dict__'], {'indent': '(4)'}), '(args.__dict__, indent=4)\n', (19064, 19089), False, 'import json\n'), ((6634, 6647), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (6641, 6647), True, 'import numpy as np\n')] |
import dgl
import torch as th
import numpy as np
import itertools
import time
from collections import *
Graph = namedtuple('Graph',
['g', 'src', 'tgt', 'tgt_y', 'nids', 'eids', 'nid_arr', 'n_nodes', 'n_edges', 'n_tokens'])
def graph_to_device(g, device):
if g.tgt_y is not None:
tgt_y = g.tgt_y.to(device)
return Graph(
g=g.g.to(device),
src=(g.src[0].to(device), g.src[1].to(device)),
tgt=(g.tgt[0].to(device), g.tgt[1].to(device)),
tgt_y=tgt_y.to(device),
nids = {'enc': g.nids['enc'].to(device), 'dec': g.nids['dec'].to(device)},
eids = {'ee': g.eids['ee'].to(device), 'ed': g.eids['ed'].to(device), 'dd': g.eids['dd'].to(device)},
nid_arr = {'enc': [x.to(device) for x in g.nid_arr['enc']], 'dec': [x.to(device) for x in g.nid_arr['dec']]},
n_nodes=g.n_nodes,
n_edges=g.n_edges,
n_tokens=g.n_tokens
)
else:
tgt_y = None
return Graph(
g=g.g.to(device),
src=(g.src[0].to(device), g.src[1].to(device)),
tgt=(g.tgt[0].to(device), g.tgt[1].to(device)),
tgt_y=tgt_y,
nids = {'enc': g.nids['enc'].to(device), 'dec': g.nids['dec'].to(device)},
eids = {'ee': g.eids['ee'].to(device), 'ed': g.eids['ed'].to(device), 'dd': g.eids['dd'].to(device)},
nid_arr = {'enc': [x.to(device) for x in g.nid_arr['enc']], 'dec': [x.to(device) for x in g.nid_arr['dec']]},
n_nodes=g.n_nodes,
n_edges=g.n_edges,
n_tokens=g.n_tokens
)
class GraphPool:
"Create a graph pool in advance to accelerate graph building phase in Transformer."
def __init__(self, n=50, m=50):
'''
args:
n: maximum length of input sequence.
m: maximum length of output sequence.
'''
print('start creating graph pool...')
tic = time.time()
self.n, self.m = n, m
g_pool = [[dgl.DGLGraph() for _ in range(m)] for _ in range(n)]
num_edges = {
'ee': np.zeros((n, m)).astype(int),
'ed': np.zeros((n, m)).astype(int),
'dd': np.zeros((n, m)).astype(int)
}
for i, j in itertools.product(range(n), range(m)):
src_length = i + 1
tgt_length = j + 1
g_pool[i][j].add_nodes(src_length + tgt_length)
enc_nodes = th.arange(src_length, dtype=th.long)
dec_nodes = th.arange(tgt_length, dtype=th.long) + src_length
# enc -> enc
us = enc_nodes.unsqueeze(-1).repeat(1, src_length).view(-1)
vs = enc_nodes.repeat(src_length)
g_pool[i][j].add_edges(us, vs)
num_edges['ee'][i][j] = len(us)
# enc -> dec
us = enc_nodes.unsqueeze(-1).repeat(1, tgt_length).view(-1)
vs = dec_nodes.repeat(src_length)
g_pool[i][j].add_edges(us, vs)
num_edges['ed'][i][j] = len(us)
# dec -> dec
indices = th.triu(th.ones(tgt_length, tgt_length)) == 1
us = dec_nodes.unsqueeze(-1).repeat(1, tgt_length)[indices]
vs = dec_nodes.unsqueeze(0).repeat(tgt_length, 1)[indices]
g_pool[i][j].add_edges(us, vs)
num_edges['dd'][i][j] = len(us)
print('successfully created graph pool, time: {0:0.3f}s'.format(time.time() - tic))
self.g_pool = g_pool
self.num_edges = num_edges
def beam(self, src_buf, start_sym, max_len, k, device='cpu'):
'''
Return a batched graph for beam search during inference of Transformer.
args:
src_buf: a list of input sequence
start_sym: the index of start-of-sequence symbol
max_len: maximum length for decoding
k: beam size
device: 'cpu' or 'cuda:*'
'''
g_list = []
src_lens = [len(_) for _ in src_buf]
tgt_lens = [max_len] * len(src_buf)
num_edges = {'ee': [], 'ed': [], 'dd': []}
for src_len, tgt_len in zip(src_lens, tgt_lens):
i, j = src_len - 1, tgt_len - 1
for _ in range(k):
g_list.append(self.g_pool[i][j])
for key in ['ee', 'ed', 'dd']:
num_edges[key].append(int(self.num_edges[key][i][j]))
g = dgl.batch(g_list)
src, tgt = [], []
src_pos, tgt_pos = [], []
enc_ids, dec_ids = [], []
e2e_eids, e2d_eids, d2d_eids = [], [], []
n_nodes, n_edges, n_tokens = 0, 0, 0
for src_sample, n, n_ee, n_ed, n_dd in zip(src_buf, src_lens, num_edges['ee'], num_edges['ed'], num_edges['dd']):
for _ in range(k):
src.append(th.tensor(src_sample, dtype=th.long, device=device))
src_pos.append(th.arange(n, dtype=th.long, device=device))
enc_ids.append(th.arange(n_nodes, n_nodes + n, dtype=th.long, device=device))
n_nodes += n
e2e_eids.append(th.arange(n_edges, n_edges + n_ee, dtype=th.long, device=device))
n_edges += n_ee
tgt_seq = th.zeros(max_len, dtype=th.long, device=device)
tgt_seq[0] = start_sym
tgt.append(tgt_seq)
tgt_pos.append(th.arange(max_len, dtype=th.long, device=device))
dec_ids.append(th.arange(n_nodes, n_nodes + max_len, dtype=th.long, device=device))
n_nodes += max_len
e2d_eids.append(th.arange(n_edges, n_edges + n_ed, dtype=th.long, device=device))
n_edges += n_ed
d2d_eids.append(th.arange(n_edges, n_edges + n_dd, dtype=th.long, device=device))
n_edges += n_dd
g.set_n_initializer(dgl.init.zero_initializer)
g.set_e_initializer(dgl.init.zero_initializer)
return Graph(g=g,
src=(th.cat(src), th.cat(src_pos)),
tgt=(th.cat(tgt), th.cat(tgt_pos)),
tgt_y=None,
nids = {'enc': th.cat(enc_ids), 'dec': th.cat(dec_ids)},
eids = {'ee': th.cat(e2e_eids), 'ed': th.cat(e2d_eids), 'dd': th.cat(d2d_eids)},
nid_arr = {'enc': enc_ids, 'dec': dec_ids},
n_nodes=n_nodes,
n_edges=n_edges,
n_tokens=n_tokens)
def __call__(self, src_buf, tgt_buf, device='cpu'):
'''
Return a batched graph for the training phase of Transformer.
args:
src_buf: a set of input sequence arrays.
tgt_buf: a set of output sequence arrays.
device: 'cpu' or 'cuda:*'
'''
g_list = []
src_lens = [len(_) for _ in src_buf]
tgt_lens = [len(_) - 1 for _ in tgt_buf]
num_edges = {'ee': [], 'ed': [], 'dd': []}
for src_len, tgt_len in zip(src_lens, tgt_lens):
i, j = src_len - 1, tgt_len - 1
g_list.append(self.g_pool[i][j])
for key in ['ee', 'ed', 'dd']:
num_edges[key].append(int(self.num_edges[key][i][j]))
g = dgl.batch(g_list)
src, tgt, tgt_y = [], [], []
src_pos, tgt_pos = [], []
enc_ids, dec_ids = [], []
e2e_eids, d2d_eids, e2d_eids = [], [], []
n_nodes, n_edges, n_tokens = 0, 0, 0
for src_sample, tgt_sample, n, m, n_ee, n_ed, n_dd in zip(src_buf, tgt_buf, src_lens, tgt_lens, num_edges['ee'], num_edges['ed'], num_edges['dd']):
src.append(th.tensor(src_sample, dtype=th.long, device=device))
tgt.append(th.tensor(tgt_sample[:-1], dtype=th.long, device=device))
tgt_y.append(th.tensor(tgt_sample[1:], dtype=th.long, device=device))
src_pos.append(th.arange(n, dtype=th.long, device=device))
tgt_pos.append(th.arange(m, dtype=th.long, device=device))
enc_ids.append(th.arange(n_nodes, n_nodes + n, dtype=th.long, device=device))
n_nodes += n
dec_ids.append(th.arange(n_nodes, n_nodes + m, dtype=th.long, device=device))
n_nodes += m
e2e_eids.append(th.arange(n_edges, n_edges + n_ee, dtype=th.long, device=device))
n_edges += n_ee
e2d_eids.append(th.arange(n_edges, n_edges + n_ed, dtype=th.long, device=device))
n_edges += n_ed
d2d_eids.append(th.arange(n_edges, n_edges + n_dd, dtype=th.long, device=device))
n_edges += n_dd
n_tokens += m
g.set_n_initializer(dgl.init.zero_initializer)
g.set_e_initializer(dgl.init.zero_initializer)
return Graph(g=g,
src=(th.cat(src), th.cat(src_pos)),
tgt=(th.cat(tgt), th.cat(tgt_pos)),
tgt_y=th.cat(tgt_y),
nids = {'enc': th.cat(enc_ids), 'dec': th.cat(dec_ids)},
eids = {'ee': th.cat(e2e_eids), 'ed': th.cat(e2d_eids), 'dd': th.cat(d2d_eids)},
nid_arr = {'enc': enc_ids, 'dec': dec_ids},
n_nodes=n_nodes,
n_edges=n_edges,
n_tokens=n_tokens)
| [
"torch.ones",
"dgl.batch",
"numpy.zeros",
"torch.cat",
"dgl.DGLGraph",
"time.time",
"torch.arange",
"torch.zeros",
"torch.tensor"
] | [((1968, 1979), 'time.time', 'time.time', ([], {}), '()\n', (1977, 1979), False, 'import time\n'), ((4387, 4404), 'dgl.batch', 'dgl.batch', (['g_list'], {}), '(g_list)\n', (4396, 4404), False, 'import dgl\n'), ((7174, 7191), 'dgl.batch', 'dgl.batch', (['g_list'], {}), '(g_list)\n', (7183, 7191), False, 'import dgl\n'), ((2463, 2499), 'torch.arange', 'th.arange', (['src_length'], {'dtype': 'th.long'}), '(src_length, dtype=th.long)\n', (2472, 2499), True, 'import torch as th\n'), ((2029, 2043), 'dgl.DGLGraph', 'dgl.DGLGraph', ([], {}), '()\n', (2041, 2043), False, 'import dgl\n'), ((2524, 2560), 'torch.arange', 'th.arange', (['tgt_length'], {'dtype': 'th.long'}), '(tgt_length, dtype=th.long)\n', (2533, 2560), True, 'import torch as th\n'), ((5181, 5228), 'torch.zeros', 'th.zeros', (['max_len'], {'dtype': 'th.long', 'device': 'device'}), '(max_len, dtype=th.long, device=device)\n', (5189, 5228), True, 'import torch as th\n'), ((7571, 7622), 'torch.tensor', 'th.tensor', (['src_sample'], {'dtype': 'th.long', 'device': 'device'}), '(src_sample, dtype=th.long, device=device)\n', (7580, 7622), True, 'import torch as th\n'), ((7647, 7703), 'torch.tensor', 'th.tensor', (['tgt_sample[:-1]'], {'dtype': 'th.long', 'device': 'device'}), '(tgt_sample[:-1], dtype=th.long, device=device)\n', (7656, 7703), True, 'import torch as th\n'), ((7730, 7785), 'torch.tensor', 'th.tensor', (['tgt_sample[1:]'], {'dtype': 'th.long', 'device': 'device'}), '(tgt_sample[1:], dtype=th.long, device=device)\n', (7739, 7785), True, 'import torch as th\n'), ((7814, 7856), 'torch.arange', 'th.arange', (['n'], {'dtype': 'th.long', 'device': 'device'}), '(n, dtype=th.long, device=device)\n', (7823, 7856), True, 'import torch as th\n'), ((7885, 7927), 'torch.arange', 'th.arange', (['m'], {'dtype': 'th.long', 'device': 'device'}), '(m, dtype=th.long, device=device)\n', (7894, 7927), True, 'import torch as th\n'), ((7956, 8017), 'torch.arange', 'th.arange', (['n_nodes', '(n_nodes + n)'], {'dtype': 'th.long', 'device': 'device'}), '(n_nodes, n_nodes + n, dtype=th.long, device=device)\n', (7965, 8017), True, 'import torch as th\n'), ((8071, 8132), 'torch.arange', 'th.arange', (['n_nodes', '(n_nodes + m)'], {'dtype': 'th.long', 'device': 'device'}), '(n_nodes, n_nodes + m, dtype=th.long, device=device)\n', (8080, 8132), True, 'import torch as th\n'), ((8187, 8251), 'torch.arange', 'th.arange', (['n_edges', '(n_edges + n_ee)'], {'dtype': 'th.long', 'device': 'device'}), '(n_edges, n_edges + n_ee, dtype=th.long, device=device)\n', (8196, 8251), True, 'import torch as th\n'), ((8309, 8373), 'torch.arange', 'th.arange', (['n_edges', '(n_edges + n_ed)'], {'dtype': 'th.long', 'device': 'device'}), '(n_edges, n_edges + n_ed, dtype=th.long, device=device)\n', (8318, 8373), True, 'import torch as th\n'), ((8431, 8495), 'torch.arange', 'th.arange', (['n_edges', '(n_edges + n_dd)'], {'dtype': 'th.long', 'device': 'device'}), '(n_edges, n_edges + n_dd, dtype=th.long, device=device)\n', (8440, 8495), True, 'import torch as th\n'), ((8831, 8844), 'torch.cat', 'th.cat', (['tgt_y'], {}), '(tgt_y)\n', (8837, 8844), True, 'import torch as th\n'), ((2122, 2138), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (2130, 2138), True, 'import numpy as np\n'), ((2170, 2186), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (2178, 2186), True, 'import numpy as np\n'), ((2218, 2234), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (2226, 2234), True, 'import numpy as np\n'), ((3090, 3121), 'torch.ones', 'th.ones', (['tgt_length', 'tgt_length'], {}), '(tgt_length, tgt_length)\n', (3097, 3121), True, 'import torch as th\n'), ((3431, 3442), 'time.time', 'time.time', ([], {}), '()\n', (3440, 3442), False, 'import time\n'), ((4774, 4825), 'torch.tensor', 'th.tensor', (['src_sample'], {'dtype': 'th.long', 'device': 'device'}), '(src_sample, dtype=th.long, device=device)\n', (4783, 4825), True, 'import torch as th\n'), ((4858, 4900), 'torch.arange', 'th.arange', (['n'], {'dtype': 'th.long', 'device': 'device'}), '(n, dtype=th.long, device=device)\n', (4867, 4900), True, 'import torch as th\n'), ((4933, 4994), 'torch.arange', 'th.arange', (['n_nodes', '(n_nodes + n)'], {'dtype': 'th.long', 'device': 'device'}), '(n_nodes, n_nodes + n, dtype=th.long, device=device)\n', (4942, 4994), True, 'import torch as th\n'), ((5057, 5121), 'torch.arange', 'th.arange', (['n_edges', '(n_edges + n_ee)'], {'dtype': 'th.long', 'device': 'device'}), '(n_edges, n_edges + n_ee, dtype=th.long, device=device)\n', (5066, 5121), True, 'import torch as th\n'), ((5335, 5383), 'torch.arange', 'th.arange', (['max_len'], {'dtype': 'th.long', 'device': 'device'}), '(max_len, dtype=th.long, device=device)\n', (5344, 5383), True, 'import torch as th\n'), ((5417, 5484), 'torch.arange', 'th.arange', (['n_nodes', '(n_nodes + max_len)'], {'dtype': 'th.long', 'device': 'device'}), '(n_nodes, n_nodes + max_len, dtype=th.long, device=device)\n', (5426, 5484), True, 'import torch as th\n'), ((5553, 5617), 'torch.arange', 'th.arange', (['n_edges', '(n_edges + n_ed)'], {'dtype': 'th.long', 'device': 'device'}), '(n_edges, n_edges + n_ed, dtype=th.long, device=device)\n', (5562, 5617), True, 'import torch as th\n'), ((5683, 5747), 'torch.arange', 'th.arange', (['n_edges', '(n_edges + n_dd)'], {'dtype': 'th.long', 'device': 'device'}), '(n_edges, n_edges + n_dd, dtype=th.long, device=device)\n', (5692, 5747), True, 'import torch as th\n'), ((5945, 5956), 'torch.cat', 'th.cat', (['src'], {}), '(src)\n', (5951, 5956), True, 'import torch as th\n'), ((5958, 5973), 'torch.cat', 'th.cat', (['src_pos'], {}), '(src_pos)\n', (5964, 5973), True, 'import torch as th\n'), ((6002, 6013), 'torch.cat', 'th.cat', (['tgt'], {}), '(tgt)\n', (6008, 6013), True, 'import torch as th\n'), ((6015, 6030), 'torch.cat', 'th.cat', (['tgt_pos'], {}), '(tgt_pos)\n', (6021, 6030), True, 'import torch as th\n'), ((6102, 6117), 'torch.cat', 'th.cat', (['enc_ids'], {}), '(enc_ids)\n', (6108, 6117), True, 'import torch as th\n'), ((6126, 6141), 'torch.cat', 'th.cat', (['dec_ids'], {}), '(dec_ids)\n', (6132, 6141), True, 'import torch as th\n'), ((6179, 6195), 'torch.cat', 'th.cat', (['e2e_eids'], {}), '(e2e_eids)\n', (6185, 6195), True, 'import torch as th\n'), ((6203, 6219), 'torch.cat', 'th.cat', (['e2d_eids'], {}), '(e2d_eids)\n', (6209, 6219), True, 'import torch as th\n'), ((6227, 6243), 'torch.cat', 'th.cat', (['d2d_eids'], {}), '(d2d_eids)\n', (6233, 6243), True, 'import torch as th\n'), ((8716, 8727), 'torch.cat', 'th.cat', (['src'], {}), '(src)\n', (8722, 8727), True, 'import torch as th\n'), ((8729, 8744), 'torch.cat', 'th.cat', (['src_pos'], {}), '(src_pos)\n', (8735, 8744), True, 'import torch as th\n'), ((8773, 8784), 'torch.cat', 'th.cat', (['tgt'], {}), '(tgt)\n', (8779, 8784), True, 'import torch as th\n'), ((8786, 8801), 'torch.cat', 'th.cat', (['tgt_pos'], {}), '(tgt_pos)\n', (8792, 8801), True, 'import torch as th\n'), ((8882, 8897), 'torch.cat', 'th.cat', (['enc_ids'], {}), '(enc_ids)\n', (8888, 8897), True, 'import torch as th\n'), ((8906, 8921), 'torch.cat', 'th.cat', (['dec_ids'], {}), '(dec_ids)\n', (8912, 8921), True, 'import torch as th\n'), ((8959, 8975), 'torch.cat', 'th.cat', (['e2e_eids'], {}), '(e2e_eids)\n', (8965, 8975), True, 'import torch as th\n'), ((8983, 8999), 'torch.cat', 'th.cat', (['e2d_eids'], {}), '(e2d_eids)\n', (8989, 8999), True, 'import torch as th\n'), ((9007, 9023), 'torch.cat', 'th.cat', (['d2d_eids'], {}), '(d2d_eids)\n', (9013, 9023), True, 'import torch as th\n')] |
import tempfile
import numpy as np
from sklearn.base import TransformerMixin
from biom.util import biom_open
from skbio.stats.composition import clr
from skbio.stats import subsample_counts
from skbio.stats.ordination import pcoa
from friendly_guacamole.utils import as_dense
import pandas as pd
from unifrac import ssu
class AsDense(TransformerMixin):
"""
converts a biom.Table into a pd.DataFrame
"""
def fit(self, X, y=None):
"""
Parameters
----------
X : biom.Table
feature table
y : None
ignored
Returns
-------
self : object
Fitted transformer
"""
return self
def transform(self, X, y=None):
"""
Parameters
----------
X : biom.Table
feature table
y : None
ignored
Returns
-------
X_new : pd.DataFrame
Transformed data
"""
return as_dense(X)
class UniFrac(TransformerMixin):
"""
computes the UniFrac distance on a biom.Table
Parameters
----------
tree_path : string
Path to a phylogeny containing all IDs in the candidate tables
unifrac_method : string
UniFrac method to use. See `unifrac` package.
"""
def __init__(self, tree_path, unifrac_method='unweighted'):
self.tree_path = tree_path
self.unifrac_method = unifrac_method
self.table = None
def fit(self, X, y=None):
"""
X : biom.Table
feature table
y : None
ignored
Returns
-------
self : object
fitted transformer
"""
self.table = X
return self
def transform(self, X):
"""
X : biom.Table
feature table
y : None
ignored
Returns
-------
X_new : pd.DataFrame
Transformed data
"""
sub_dm = self._get_distances(X)
return sub_dm
def _get_distances(self, X):
dm = self._get_distance_matrix(X)
sub_dm = self._extract_sub_matrix(X, dm)
return sub_dm
def _extract_sub_matrix(self, X, dm):
# get indices of test ID's
X_idx = [dm.index(name) for name in X.ids('sample')]
# get indices of table ID's
ref_idx = [dm.index(name) for name in self.table.ids('sample')]
# extract sub-distance matrix
idxs = np.ix_(X_idx, ref_idx)
sub_dm = dm.data[idxs]
return sub_dm
def _get_distance_matrix(self, X):
"""
computes UniFrac distances with the fitted samples
Parameters
----------
X : biom.Table
new samples
Returns
-------
dm : DistanceMatrix
distances from old samples to new samples
"""
# TODO one problem with this approach is that
# if any samples in X overlap self.table, the counts will
# be doubled
merged_table = self.table.merge(X)
with tempfile.NamedTemporaryFile() as f:
with biom_open(f.name, 'w') as b:
merged_table.to_hdf5(b, "merged")
dm = ssu(f.name, self.tree_path,
unifrac_method=self.unifrac_method,
variance_adjust=False,
alpha=1.0,
bypass_tips=False,
threads=1,
)
return dm
class RarefactionBIOM(TransformerMixin):
"""
rarefies a biom.Table
Parameters
----------
depth : int
rarefaction depth
replace : bool, optional
indicates whether sampling should
with replacement. default=False.
"""
def __init__(self, depth, replace=False):
self.depth = depth
self.replace = replace
self.index = None
self.features = None
def fit(self, X, y=None):
"""
X : biom.Table
feature table
y : None
ignored
Returns
-------
self : object
fitted transformer
"""
self.index = X.subsample(n=self.depth,
with_replacement=self.replace)
self.features = self.index.ids('observation')
return self
def transform(self, X):
""" rarefies a biom.Table
Parameters
----------
X : biom.Table
feature table
Returns
-------
X_new : biom.Table
rarefied table
"""
# TODO There is an unaccounted for edge case here,
# when samples have fewer counts than the depth
X = X.filter(ids_to_keep=self.features, axis='observation',
inplace=False)
index_ids = set(self.index.ids('sample'))
known_ids = [id_ for id_ in X.ids('sample') if id_ in index_ids]
unknown_ids = [id_ for id_ in X.ids('sample') if id_ not in index_ids]
known_counts = self.index.filter(ids_to_keep=known_ids, axis='sample',
inplace=False
)
unknown_counts = X.filter(ids_to_keep=unknown_ids, axis='sample',
inplace=False
)
unknown_counts = unknown_counts.subsample(
n=self.depth,
with_replacement=self.replace
)
# TODO arghhh this really needs unit tests
self.index.merge(unknown_counts)
all_counts = known_counts.merge(unknown_counts)
all_counts.sort_order(X.ids('sample'), axis='sample')
return all_counts
class Rarefaction(TransformerMixin):
"""
Rarefies an array-like
Parameters
----------
depth : int
rarefaction depth
replace : bool, optional
indicates whether sampling should
with replacement. default=False.
"""
def __init__(self, depth, replace=False):
self.depth = depth
self.replace = replace
self.idx = None
def fit(self, X, y=None):
"""
X : array-like
feature table
y : None
ignored
Returns
-------
self : object
fitted transformer
"""
X, self.idx = self._find_nonzero_idx(X)
return self
def transform(self, X, y=None):
""" rarefies a feature table
Caution: this will return different results for the same sample
Parameters
----------
X : array-like
feature table
Returns
-------
X_new : array-like
rarefied table
"""
if isinstance(X, pd.DataFrame):
idx = np.array([True] * len(X.columns))
idx[self.idx[:, 1]] = False
X = X.loc[:, idx]
else:
X = np.delete(X, self.idx, axis=1)
X = self._subsample(X)
return X
def _find_nonzero_idx(self, X):
X = self._subsample(X)
# remove columns with zero counts
row_sums = X.sum(axis=0, keepdims=True)
idx = np.argwhere(row_sums == 0)
return X, idx
def _subsample(self, X):
X = X.astype(int)
X_out = list()
iter_var = X.values if isinstance(X, pd.DataFrame) else X
for row in iter_var:
new_X = subsample_counts(row, n=self.depth, replace=self.replace)
X_out.append(new_X)
X = np.vstack(X_out)
return X
class CLR(TransformerMixin):
"""
performs the center log ratio transform with pseudo-count
Parameters
----------
pseudocount : int, optional
Count to add to every entry of the table
"""
def __init__(self, pseudocount=1):
self.pseudocount = pseudocount
def fit(self, X, y=None):
"""
X : array-like
feature table
y : None
ignored
Returns
-------
self : object
fitted transformer
"""
return self
def transform(self, X, y=None):
"""
Parameters
----------
X : pd.DataFrame
feature table
y : None
ignored
Returns
-------
X_new : pd.DataFrame
Transformed data
"""
transfored_data = clr(X + self.pseudocount)
if X.shape[0] == 1:
transfored_data = transfored_data.reshape(1, -1)
return transfored_data
class PCoA(TransformerMixin):
def __init__(self, metric='precomputed'):
"""Performas a PCoA on the data
Parameters
----------
metric : str, default='precomputed'
metric to compute PCoA on. If 'precomputed', a distance matrix is
expected
"""
self.metric = metric
self.embedding_ = None
def fit(self, X, y=None):
"""
Parameters
----------
X : array-like
Feature table or distance matrix
y : None
ignored
Returns
-------
self
fitted pcoa
"""
if self.metric == 'precomputed':
self.embedding_ = pcoa(X).samples
else:
raise NotImplementedError()
return self
def fit_transform(self, X, y=None):
"""
Parameters
----------
X : array-like
Feature table or distance matrix
Returns
-------
array-like
embeddings of the samples
"""
self.fit(X, y)
return self.embedding_
class FilterSamples(TransformerMixin):
def __init__(self, min_count=0):
"""
Filters samples
Parameters
----------
min_count : int
Minimum number of feature counts neede dto retain sample
"""
self.min_count = min_count
def fit(self, X, y=None):
"""
Parameters
----------
X : biom.Table
Returns
-------
self
fitted transformer
"""
return self
def transform(self, X, y=None):
"""
Parameters
----------
X : biom.Table
Returns
-------
biom.Table
The filtered table.
"""
sample_counts = X.sum(axis='sample')
insufficient_counts = (sample_counts < self.min_count)
ids_to_remove = set(X.ids('sample')[insufficient_counts])
return X.filter(ids_to_remove, invert=True, inplace=False)
| [
"tempfile.NamedTemporaryFile",
"skbio.stats.composition.clr",
"biom.util.biom_open",
"numpy.ix_",
"skbio.stats.subsample_counts",
"unifrac.ssu",
"friendly_guacamole.utils.as_dense",
"numpy.argwhere",
"skbio.stats.ordination.pcoa",
"numpy.delete",
"numpy.vstack"
] | [((996, 1007), 'friendly_guacamole.utils.as_dense', 'as_dense', (['X'], {}), '(X)\n', (1004, 1007), False, 'from friendly_guacamole.utils import as_dense\n'), ((2501, 2523), 'numpy.ix_', 'np.ix_', (['X_idx', 'ref_idx'], {}), '(X_idx, ref_idx)\n', (2507, 2523), True, 'import numpy as np\n'), ((7218, 7244), 'numpy.argwhere', 'np.argwhere', (['(row_sums == 0)'], {}), '(row_sums == 0)\n', (7229, 7244), True, 'import numpy as np\n'), ((7563, 7579), 'numpy.vstack', 'np.vstack', (['X_out'], {}), '(X_out)\n', (7572, 7579), True, 'import numpy as np\n'), ((8453, 8478), 'skbio.stats.composition.clr', 'clr', (['(X + self.pseudocount)'], {}), '(X + self.pseudocount)\n', (8456, 8478), False, 'from skbio.stats.composition import clr\n'), ((3101, 3130), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (3128, 3130), False, 'import tempfile\n'), ((3251, 3382), 'unifrac.ssu', 'ssu', (['f.name', 'self.tree_path'], {'unifrac_method': 'self.unifrac_method', 'variance_adjust': '(False)', 'alpha': '(1.0)', 'bypass_tips': '(False)', 'threads': '(1)'}), '(f.name, self.tree_path, unifrac_method=self.unifrac_method,\n variance_adjust=False, alpha=1.0, bypass_tips=False, threads=1)\n', (3254, 3382), False, 'from unifrac import ssu\n'), ((6966, 6996), 'numpy.delete', 'np.delete', (['X', 'self.idx'], {'axis': '(1)'}), '(X, self.idx, axis=1)\n', (6975, 6996), True, 'import numpy as np\n'), ((7461, 7518), 'skbio.stats.subsample_counts', 'subsample_counts', (['row'], {'n': 'self.depth', 'replace': 'self.replace'}), '(row, n=self.depth, replace=self.replace)\n', (7477, 7518), False, 'from skbio.stats import subsample_counts\n'), ((3154, 3176), 'biom.util.biom_open', 'biom_open', (['f.name', '"""w"""'], {}), "(f.name, 'w')\n", (3163, 3176), False, 'from biom.util import biom_open\n'), ((9314, 9321), 'skbio.stats.ordination.pcoa', 'pcoa', (['X'], {}), '(X)\n', (9318, 9321), False, 'from skbio.stats.ordination import pcoa\n')] |
import numpy as np
from gym.spaces import Box
from sequential_inference.envs.sawyer.asset_path_utils import full_v2_path_for
from sequential_inference.envs.sawyer.mujoco.sawyer_xyz.sawyer_xyz_env import (
SawyerXYZEnv,
_assert_task_is_set,
)
from sequential_inference.envs.sawyer.mujoco.utils.rotation import euler2quat
from pyquaternion import Quaternion
class SawyerHammerEnv(SawyerXYZEnv):
def __init__(
self,
xml_path=None,
goal_site_name=None,
action_mode="joint_position",
reward_scaling=1.0,
):
liftThresh = 0.12
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
# obj_low = (-0.1, 0.4, 0.0)
# obj_high = (0.1, 0.5, 0.0)
goal_low = (-0.3, 0.85, 0.0)
goal_high = (0.3, 0.85, 0.0)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
"hammer_init_pos": np.array([0.07, 0.4, 0.2]),
"hand_init_pos": np.array([0, 0.4, 0.2]),
}
self.goal = np.array([-0.3, 0.85, 0.0])
self.hammer_init_pos = self.init_config["hammer_init_pos"]
self.hand_init_pos = self.init_config["hand_init_pos"]
self.liftThresh = liftThresh
self.max_path_length = 200
self._random_reset_space = Box(np.array(goal_low), np.array(goal_high))
self.goal_space = self._random_reset_space
self.max_nail_dist = None
self.max_hammer_dist = None
self.maxHammerDist = 0.2
# TODO only fixed. rotz works under MPPI motion planning
# rotMode fixed, rotz, quat, euler
self.rotMode = "fixed"
if self.rotMode == "fixed":
self.action_space = Box(
np.array([-1, -1, -1, -1]),
np.array([1, 1, 1, 1]),
)
elif self.rotMode == "rotz":
self.action_rot_scale = 1.0 / 50
self.action_space = Box(
np.array([-1, -1, -1, -np.pi, -1]),
np.array([1, 1, 1, np.pi, 1]),
)
elif self.rotMode == "quat":
self.action_space = Box(
np.array([-1, -1, -1, 0, -1, -1, -1, -1]),
np.array([1, 1, 1, 2 * np.pi, 1, 1, 1, 1]),
)
elif self.rotMode == "euler":
self.action_space = Box(
np.array([-1, -1, -1, -np.pi / 2, -np.pi / 2, 0, -1]),
np.array([1, 1, 1, np.pi / 2, np.pi / 2, np.pi * 2, 1]),
)
else:
raise NotImplementedError
def get_obs_dim(self):
return len(self._get_obs())
@property
def model_name(self):
return full_v2_path_for("sawyer_xyz/sawyer_hammer.xml")
def step(self, action):
if self.rotMode == "euler":
action_ = np.zeros(7)
action_[:3] = action[:3]
action_[3:] = euler2quat(action[3:6])
self.set_xyz_action_rot(action_)
elif self.rotMode == "fixed":
self.set_xyz_action(action[:3])
elif self.rotMode == "rotz":
self.set_xyz_action_rotz(action[:4])
else:
self.set_xyz_action_rot(action[:7])
self.do_simulation([action[-1], -action[-1]])
ob = self._get_obs()
reward, _, reachDist, pickRew, _, _, screwDist = self.compute_reward(action, ob)
# self.curr_path_length += 1
info = {
"reachDist": reachDist,
"pickRew": pickRew,
"epRew": reward,
"goalDist": screwDist,
"success": float(screwDist <= 0.05),
}
return ob, reward, False, info
def _get_pos_objects(self):
return np.hstack(
(self.get_body_com("hammer").copy(), self.get_body_com("nail_link").copy())
)
def _set_hammer_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
# Set position of box & nail (these are not randomized)
self.sim.model.body_pos[self.model.body_name2id("box")] = self.goal
# Update _target_pos
self._target_pos = self._get_site_pos("goal")
# Randomize hammer position
self.hammer_init_pos = self.init_config["hammer_init_pos"]
self._set_hammer_xyz(self.hammer_init_pos)
# Update heights (for use in reward function)
self.hammerHeight = self.get_body_com("hammer").copy()[2]
self.heightTarget = self.hammerHeight + self.liftThresh
# Update distances (for use in reward function)
nail_init_pos = self._get_site_pos("nailHead")
self.max_nail_dist = (self._target_pos - nail_init_pos)[1]
self.max_hammer_dist = np.linalg.norm(
np.array(
[self.hammer_init_pos[0], self.hammer_init_pos[1], self.heightTarget]
)
- nail_init_pos
+ self.heightTarget
+ np.abs(self.max_nail_dist)
)
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
self.pickCompleted = False
def compute_reward(self, actions, obs):
hammerPos = obs[3:6]
nail_pos = self.get_body_com("nail_link").copy()
hammerHeadPos = self.data.get_geom_xpos("hammer_head").copy()
hammerHandlePos = self.data.get_geom_xpos("hammer_handle").copy()
objPos = self.data.site_xpos[self.model.site_name2id("nailHead")]
rightFinger, leftFinger = (
self._get_site_pos("rightEndEffector"),
self._get_site_pos("leftEndEffector"),
)
fingerCOM = (rightFinger + leftFinger) / 2
heightTarget = self.heightTarget
hammerDist = np.linalg.norm(objPos - hammerHeadPos)
screwDist = np.abs(nail_pos[1] - self._target_pos[1])
reachDist = np.linalg.norm(hammerHandlePos - fingerCOM)
rewards = 0
# penalty for dropping the hammer
drop_thresh = 0.03
if hammerPos[2] < objPos[2] - drop_thresh:
rewards -= 10
hammer_nail_dist_reward = 1 - np.tanh(hammerDist)
rewards += hammer_nail_dist_reward
nail_strike_reward = 1 - np.tanh(screwDist)
# nail_strike_reward = np.tanh(objPos[1]- self.original_pos[1])
nail_striike_weight = 100
rewards += nail_striike_weight * nail_strike_reward
return [rewards, 0, reachDist, 0, 0, hammerDist, screwDist]
def viewer_setup(self):
# side view
self.viewer.cam.trackbodyid = -1
self.viewer.cam.elevation = -75
self.viewer.cam.azimuth = 90
self.viewer.cam.distance = 2.0
self.viewer.cam.lookat[1] = 0.5
self.viewer.cam.lookat[0] = 0.0
self.viewer.cam.lookat[2] = 0.02
def render(self, mode="human", width=500, height=500):
if mode == "rgb_array":
# self._get_viewer(mode='rgb_array').render()
# window size used for old mujoco-py:
# width, height = 500, 500
self._get_viewer(mode="rgb_array").render(width, height)
data = self._get_viewer(mode="rgb_array").read_pixels(
width, height, depth=False
)
return np.flipud(data)
elif mode == "human":
self._get_viewer().render()
class SawyerHammerEnvGoal(SawyerHammerEnv):
"""
This env is the multi-task version of sawyer with different position of the goal
"""
def __init__(
self,
is_eval_env=False,
reward_scaling=1.0,
xml_path=None,
goal_site_name=None,
action_mode="joint_position",
mode="ee",
level="easy",
):
self.goal_range = Box(
low=np.array([-0.3, 0.85, 0.0]), high=np.array([0.3, 0.85, 0.0])
)
self.task_list = self.generate_list_of_tasks(
num_tasks=30, is_eval_env=is_eval_env
)
super().__init__()
def reset_task(self, task=None):
if task is None:
task_idx = np.random.randint(len(self.task_list))
else:
task_idx = task
self.set_task(self.task_list[task_idx])
def reset(self):
# original mujoco reset
self.sim.reset()
ob = self.reset_model()
return ob
def generate_list_of_tasks(self, num_tasks, is_eval_env):
"""To be called externally to obtain samples from the task distribution"""
if is_eval_env:
np.random.seed(100) # pick eval tasks as random from diff seed
else:
np.random.seed(101)
possible_goals = [self.goal_range.sample() for _ in range(num_tasks)]
np.random.seed()
return possible_goals
def set_task(self, goal):
"""To be called externally to set the task for this environment"""
self.sim.model.body_pos[self.model.body_name2id("box")] = goal
self.goal = goal
def get_task(self):
return self.goal
def step(self, action):
obs, rew, done, info = super().step(action)
info["task"] = self.get_task()
return obs, rew, done, info
| [
"numpy.abs",
"numpy.random.seed",
"numpy.tanh",
"numpy.zeros",
"sequential_inference.envs.sawyer.mujoco.utils.rotation.euler2quat",
"numpy.flipud",
"numpy.linalg.norm",
"numpy.array",
"sequential_inference.envs.sawyer.asset_path_utils.full_v2_path_for"
] | [((1113, 1140), 'numpy.array', 'np.array', (['[-0.3, 0.85, 0.0]'], {}), '([-0.3, 0.85, 0.0])\n', (1121, 1140), True, 'import numpy as np\n'), ((2736, 2784), 'sequential_inference.envs.sawyer.asset_path_utils.full_v2_path_for', 'full_v2_path_for', (['"""sawyer_xyz/sawyer_hammer.xml"""'], {}), "('sawyer_xyz/sawyer_hammer.xml')\n", (2752, 2784), False, 'from sequential_inference.envs.sawyer.asset_path_utils import full_v2_path_for\n'), ((5891, 5929), 'numpy.linalg.norm', 'np.linalg.norm', (['(objPos - hammerHeadPos)'], {}), '(objPos - hammerHeadPos)\n', (5905, 5929), True, 'import numpy as np\n'), ((5950, 5991), 'numpy.abs', 'np.abs', (['(nail_pos[1] - self._target_pos[1])'], {}), '(nail_pos[1] - self._target_pos[1])\n', (5956, 5991), True, 'import numpy as np\n'), ((6012, 6055), 'numpy.linalg.norm', 'np.linalg.norm', (['(hammerHandlePos - fingerCOM)'], {}), '(hammerHandlePos - fingerCOM)\n', (6026, 6055), True, 'import numpy as np\n'), ((8835, 8851), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (8849, 8851), True, 'import numpy as np\n'), ((1000, 1026), 'numpy.array', 'np.array', (['[0.07, 0.4, 0.2]'], {}), '([0.07, 0.4, 0.2])\n', (1008, 1026), True, 'import numpy as np\n'), ((1057, 1080), 'numpy.array', 'np.array', (['[0, 0.4, 0.2]'], {}), '([0, 0.4, 0.2])\n', (1065, 1080), True, 'import numpy as np\n'), ((1384, 1402), 'numpy.array', 'np.array', (['goal_low'], {}), '(goal_low)\n', (1392, 1402), True, 'import numpy as np\n'), ((1404, 1423), 'numpy.array', 'np.array', (['goal_high'], {}), '(goal_high)\n', (1412, 1423), True, 'import numpy as np\n'), ((2872, 2883), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (2880, 2883), True, 'import numpy as np\n'), ((2947, 2970), 'sequential_inference.envs.sawyer.mujoco.utils.rotation.euler2quat', 'euler2quat', (['action[3:6]'], {}), '(action[3:6])\n', (2957, 2970), False, 'from sequential_inference.envs.sawyer.mujoco.utils.rotation import euler2quat\n'), ((6263, 6282), 'numpy.tanh', 'np.tanh', (['hammerDist'], {}), '(hammerDist)\n', (6270, 6282), True, 'import numpy as np\n'), ((6360, 6378), 'numpy.tanh', 'np.tanh', (['screwDist'], {}), '(screwDist)\n', (6367, 6378), True, 'import numpy as np\n'), ((7392, 7407), 'numpy.flipud', 'np.flipud', (['data'], {}), '(data)\n', (7401, 7407), True, 'import numpy as np\n'), ((8638, 8657), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (8652, 8657), True, 'import numpy as np\n'), ((8728, 8747), 'numpy.random.seed', 'np.random.seed', (['(101)'], {}), '(101)\n', (8742, 8747), True, 'import numpy as np\n'), ((1810, 1836), 'numpy.array', 'np.array', (['[-1, -1, -1, -1]'], {}), '([-1, -1, -1, -1])\n', (1818, 1836), True, 'import numpy as np\n'), ((1854, 1876), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (1862, 1876), True, 'import numpy as np\n'), ((5114, 5140), 'numpy.abs', 'np.abs', (['self.max_nail_dist'], {}), '(self.max_nail_dist)\n', (5120, 5140), True, 'import numpy as np\n'), ((7898, 7925), 'numpy.array', 'np.array', (['[-0.3, 0.85, 0.0]'], {}), '([-0.3, 0.85, 0.0])\n', (7906, 7925), True, 'import numpy as np\n'), ((7932, 7958), 'numpy.array', 'np.array', (['[0.3, 0.85, 0.0]'], {}), '([0.3, 0.85, 0.0])\n', (7940, 7958), True, 'import numpy as np\n'), ((2027, 2061), 'numpy.array', 'np.array', (['[-1, -1, -1, -np.pi, -1]'], {}), '([-1, -1, -1, -np.pi, -1])\n', (2035, 2061), True, 'import numpy as np\n'), ((2079, 2108), 'numpy.array', 'np.array', (['[1, 1, 1, np.pi, 1]'], {}), '([1, 1, 1, np.pi, 1])\n', (2087, 2108), True, 'import numpy as np\n'), ((2214, 2255), 'numpy.array', 'np.array', (['[-1, -1, -1, 0, -1, -1, -1, -1]'], {}), '([-1, -1, -1, 0, -1, -1, -1, -1])\n', (2222, 2255), True, 'import numpy as np\n'), ((2273, 2315), 'numpy.array', 'np.array', (['[1, 1, 1, 2 * np.pi, 1, 1, 1, 1]'], {}), '([1, 1, 1, 2 * np.pi, 1, 1, 1, 1])\n', (2281, 2315), True, 'import numpy as np\n'), ((4930, 5009), 'numpy.array', 'np.array', (['[self.hammer_init_pos[0], self.hammer_init_pos[1], self.heightTarget]'], {}), '([self.hammer_init_pos[0], self.hammer_init_pos[1], self.heightTarget])\n', (4938, 5009), True, 'import numpy as np\n'), ((2422, 2475), 'numpy.array', 'np.array', (['[-1, -1, -1, -np.pi / 2, -np.pi / 2, 0, -1]'], {}), '([-1, -1, -1, -np.pi / 2, -np.pi / 2, 0, -1])\n', (2430, 2475), True, 'import numpy as np\n'), ((2493, 2548), 'numpy.array', 'np.array', (['[1, 1, 1, np.pi / 2, np.pi / 2, np.pi * 2, 1]'], {}), '([1, 1, 1, np.pi / 2, np.pi / 2, np.pi * 2, 1])\n', (2501, 2548), True, 'import numpy as np\n')] |
import os
import numpy as np
from tqdm import trange
from pynvml import *
from nibabel import load as load_nii
import nibabel as nib
from utils.patch_dataloader import *
def test_scan_uncertainty(model, test_x_data, scan, options, intermediate=None, save_nifti=False, uncertainty=True, candidate_mask=None, T=20):
"""
Test data based on one model
Input:
- test_x_data: a nested dictionary containing training image paths:
train_x_data['scan_name']['modality'] = path_to_image_modality
- save_nifti: save image segmentation
- candidate_mask: a binary masks containing voxels to classify
Output:
- test_scan = Output image containing the probability output segmentation
- If save_nifti --> Saves a nii file at specified location options['test_folder']/['test_scan']
"""
nvmlInit()
handle = nvmlDeviceGetHandleByIndex(0)
info = nvmlDeviceGetMemoryInfo(handle)
bsize = info.total/1024/1024
# print "total GPU memory available: %d MB" % (bsize)
if bsize < 2000:
batch_size = 384
print("reducing batch_size to : {}".format(batch_size))
options['batch_size'] = 100352
else:
if options['hostname'].startswith("hamlet"):
# batch_size = 2200
batch_size = 3000
options['batch_size'] = 350000
else:
# batch_size = 2800
batch_size = 2000
# get_scan name and create an empty nii image to store segmentation
tmp = {}
tmp[scan] = test_x_data
test_x_data = tmp
scans = test_x_data.keys()
flair_scans = [test_x_data[s]['FLAIR'] for s in scans]
flair_image = load_nii(flair_scans[0]).get_data()
header = load_nii(flair_scans[0]).header
affine = header.get_qform()
seg_image = np.zeros_like(flair_image)
var_image = np.zeros_like(flair_image)
thresh_image = np.zeros_like(flair_image)
# get test paths
_, scan = os.path.split(flair_scans[0])
test_folder = os.path.join('/host/silius/local_raid/ravnoor/01_Projects/55_Bayesian_DeepLesion_LoSo/data/predictions', options['experiment'])
# test_folder = '/host/silius/local_raid/ravnoor/01_Projects/06_DeepLesion_LoSo/data/predictions
if not os.path.exists(test_folder):
# os.path.join(test_folder, options['experiment'])
os.mkdir(test_folder)
print('-'*60)
print(str.replace(scan, '_flair.nii.gz', ''))
print('-'*60)
# compute lesion segmentation in batches of size options['batch_size']
for batch, centers in load_test_patches(test_x_data, options, options['patch_size'], options['batch_size'], options['min_th'], candidate_mask):
print("predicting uncertainty")
y_pred, y_pred_var = predict_uncertainty(model, batch, batch_size=batch_size, T=T)
[x, y, z] = np.stack(centers, axis=1)
seg_image[x, y, z] = y_pred[:, 1]
var_image[x, y, z] = y_pred_var[:, 1]
if intermediate is not None:
# test_folder = str.replace(test_folder, 'brain', 'predictions')
if not os.path.exists(test_folder):
os.mkdir(test_folder)
# out_scan = nib.Nifti1Image(seg_image, np.eye(4))
out_scan = nib.Nifti1Image(seg_image, affine, header)
test_name = str.replace(scan, '_flair.nii.gz', '') + '_out_pred_mean_0.nii.gz'
out_scan.to_filename(os.path.join(test_folder, test_name))
out_scan = nib.Nifti1Image(var_image, affine, header)
test_name = str.replace(scan, '_flair.nii.gz', '') + '_out_pred_var_0.nii.gz'
out_scan.to_filename(os.path.join(test_folder, test_name))
# test_folder = str.replace(test_folder, 'brain', 'predictions')
if not os.path.exists(os.path.join(test_folder, options['experiment'])):
os.mkdir(os.path.join(test_folder, options['experiment']))
out_scan = nib.Nifti1Image(seg_image, affine, header)
#out_scan.to_filename(os.path.join(options['test_folder'], options['test_scan'], options['experiment'], options['test_name']))
test_name = str.replace(scan, '_flair.nii.gz', '') + '_out_pred_0.nii.gz'
out_scan.to_filename(os.path.join(test_folder, test_name))
thresh_image = seg_image.copy()
return thresh_image
def select_voxels_from_previous_model(model, train_x_data, options):
"""
Select training voxels from image segmentation masks
"""
threshold = options['th_dnn_train_2']
# get_scan names
scans = list(train_x_data.keys())
# print(scans)
print(dict(train_x_data[scans[0]]))
# mask = [test_scan_uncertainty(model, dict(train_x_data.items()[s:s+1]), options, intermediate=1, uncertainty=True)[0] > threshold for s in trange(len(scans), desc='sel_vox_prev_model_pred_mean')]
mask = [test_scan_uncertainty(model, dict(train_x_data[scans[s]]), scans[s], options, intermediate=1, uncertainty=True)[0] > threshold for s in trange(len(scans), desc='sel_vox_prev_model_pred_mean')]
return mask
def predict_uncertainty(model, data, batch_size, T=10):
input = model.layers[0].input
output = model.layers[-1].output
f_stochastic = K.function([input, K.learning_phase()], output) # instantiates a Keras function.
K.set_image_dim_ordering('th')
K.set_image_data_format('channels_first')
Yt_hat = np.array([predict_stochastic(f_stochastic, data, batch_size=batch_size) for _ in tqdm(xrange(T), ascii=True, desc="predict_stochastic")])
MC_pred = np.mean(Yt_hat, 0)
MC_pred_var = np.var(Yt_hat, 0)
return MC_pred, MC_pred_var
def predict_stochastic(f, ins, batch_size=128, verbose=0):
'''
Abstract method to loop over some data in batches.
'''
nb_sample = len(ins)
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f([ins_batch, 1])
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
return outs[0]
| [
"numpy.stack",
"os.mkdir",
"nibabel.Nifti1Image",
"numpy.zeros_like",
"nibabel.load",
"os.path.exists",
"numpy.zeros",
"numpy.var",
"numpy.mean",
"numpy.arange",
"os.path.split",
"os.path.join"
] | [((1781, 1807), 'numpy.zeros_like', 'np.zeros_like', (['flair_image'], {}), '(flair_image)\n', (1794, 1807), True, 'import numpy as np\n'), ((1824, 1850), 'numpy.zeros_like', 'np.zeros_like', (['flair_image'], {}), '(flair_image)\n', (1837, 1850), True, 'import numpy as np\n'), ((1870, 1896), 'numpy.zeros_like', 'np.zeros_like', (['flair_image'], {}), '(flair_image)\n', (1883, 1896), True, 'import numpy as np\n'), ((1933, 1962), 'os.path.split', 'os.path.split', (['flair_scans[0]'], {}), '(flair_scans[0])\n', (1946, 1962), False, 'import os\n'), ((1981, 2118), 'os.path.join', 'os.path.join', (['"""/host/silius/local_raid/ravnoor/01_Projects/55_Bayesian_DeepLesion_LoSo/data/predictions"""', "options['experiment']"], {}), "(\n '/host/silius/local_raid/ravnoor/01_Projects/55_Bayesian_DeepLesion_LoSo/data/predictions'\n , options['experiment'])\n", (1993, 2118), False, 'import os\n'), ((5435, 5453), 'numpy.mean', 'np.mean', (['Yt_hat', '(0)'], {}), '(Yt_hat, 0)\n', (5442, 5453), True, 'import numpy as np\n'), ((5472, 5489), 'numpy.var', 'np.var', (['Yt_hat', '(0)'], {}), '(Yt_hat, 0)\n', (5478, 5489), True, 'import numpy as np\n'), ((5831, 5851), 'numpy.arange', 'np.arange', (['nb_sample'], {}), '(nb_sample)\n', (5840, 5851), True, 'import numpy as np\n'), ((1701, 1725), 'nibabel.load', 'load_nii', (['flair_scans[0]'], {}), '(flair_scans[0])\n', (1709, 1725), True, 'from nibabel import load as load_nii\n'), ((2221, 2248), 'os.path.exists', 'os.path.exists', (['test_folder'], {}), '(test_folder)\n', (2235, 2248), False, 'import os\n'), ((2317, 2338), 'os.mkdir', 'os.mkdir', (['test_folder'], {}), '(test_folder)\n', (2325, 2338), False, 'import os\n'), ((2800, 2825), 'numpy.stack', 'np.stack', (['centers'], {'axis': '(1)'}), '(centers, axis=1)\n', (2808, 2825), True, 'import numpy as np\n'), ((3177, 3219), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['seg_image', 'affine', 'header'], {}), '(seg_image, affine, header)\n', (3192, 3219), True, 'import nibabel as nib\n'), ((3394, 3436), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['var_image', 'affine', 'header'], {}), '(var_image, affine, header)\n', (3409, 3436), True, 'import nibabel as nib\n'), ((3836, 3878), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['seg_image', 'affine', 'header'], {}), '(seg_image, affine, header)\n', (3851, 3878), True, 'import nibabel as nib\n'), ((1652, 1676), 'nibabel.load', 'load_nii', (['flair_scans[0]'], {}), '(flair_scans[0])\n', (1660, 1676), True, 'from nibabel import load as load_nii\n'), ((3036, 3063), 'os.path.exists', 'os.path.exists', (['test_folder'], {}), '(test_folder)\n', (3050, 3063), False, 'import os\n'), ((3077, 3098), 'os.mkdir', 'os.mkdir', (['test_folder'], {}), '(test_folder)\n', (3085, 3098), False, 'import os\n'), ((3336, 3372), 'os.path.join', 'os.path.join', (['test_folder', 'test_name'], {}), '(test_folder, test_name)\n', (3348, 3372), False, 'import os\n'), ((3552, 3588), 'os.path.join', 'os.path.join', (['test_folder', 'test_name'], {}), '(test_folder, test_name)\n', (3564, 3588), False, 'import os\n'), ((4125, 4161), 'os.path.join', 'os.path.join', (['test_folder', 'test_name'], {}), '(test_folder, test_name)\n', (4137, 4161), False, 'import os\n'), ((3694, 3742), 'os.path.join', 'os.path.join', (['test_folder', "options['experiment']"], {}), "(test_folder, options['experiment'])\n", (3706, 3742), False, 'import os\n'), ((3766, 3814), 'os.path.join', 'os.path.join', (['test_folder', "options['experiment']"], {}), "(test_folder, options['experiment'])\n", (3778, 3814), False, 'import os\n'), ((6292, 6307), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (6300, 6307), True, 'import numpy as np\n')] |
from tqdm import tqdm
from interne.constants import *
from interne.Awele import Awele
from interne.CPUPlayer import CPUPlayer
from interne.algo_base import est_terminale
import numpy as np
def cpu_vs_cpu(niv_sud: int, niv_nord: int):
position = Awele(TAILLE)
sud = CPUPlayer(niv_sud, JOEUR_SUD)
nord = CPUPlayer(niv_nord, JOEUR_NORD)
while not est_terminale(position):
coup = nord.jouer(position) if position.trait == JOEUR_NORD else sud.jouer(position)
if not coup:
break
position.jouer(coup)
if position.butin[JOEUR_SUD] > position.butin[JOEUR_NORD]:
return "SUD", position.butin[JOEUR_SUD] - position.butin[JOEUR_NORD]
elif position.butin[JOEUR_SUD] < position.butin[JOEUR_NORD]:
return "NORD", position.butin[JOEUR_NORD] - position.butin[JOEUR_NORD]
return "DRAW", 0
def results(n_partie, niv_sud, niv_nord):
winners = []
ecart = 0
for _ in tqdm(range(n_partie)):
try:
nom, score = cpu_vs_cpu(niv_sud, niv_nord)
winners.append(nom)
ecart += score
except KeyboardInterrupt:
break
u, c = np.unique(winners, return_counts=True)
print("parties gagnees : ", u, c)
print("ecart moyen pour le gagnant :", ecart/n_partie)
if __name__ == '__main__':
niv_sud = 2
niv_nord = 3
results(30, niv_sud, niv_nord) | [
"interne.algo_base.est_terminale",
"interne.CPUPlayer.CPUPlayer",
"numpy.unique",
"interne.Awele.Awele"
] | [((247, 260), 'interne.Awele.Awele', 'Awele', (['TAILLE'], {}), '(TAILLE)\n', (252, 260), False, 'from interne.Awele import Awele\n'), ((268, 297), 'interne.CPUPlayer.CPUPlayer', 'CPUPlayer', (['niv_sud', 'JOEUR_SUD'], {}), '(niv_sud, JOEUR_SUD)\n', (277, 297), False, 'from interne.CPUPlayer import CPUPlayer\n'), ((306, 337), 'interne.CPUPlayer.CPUPlayer', 'CPUPlayer', (['niv_nord', 'JOEUR_NORD'], {}), '(niv_nord, JOEUR_NORD)\n', (315, 337), False, 'from interne.CPUPlayer import CPUPlayer\n'), ((1036, 1074), 'numpy.unique', 'np.unique', (['winners'], {'return_counts': '(True)'}), '(winners, return_counts=True)\n', (1045, 1074), True, 'import numpy as np\n'), ((350, 373), 'interne.algo_base.est_terminale', 'est_terminale', (['position'], {}), '(position)\n', (363, 373), False, 'from interne.algo_base import est_terminale\n')] |
import numpy as np
def scalling(scallingFactor):
return (scallingFactor* (np.identity(2))) # return scalling matrix
def rotation(angle):
arr = np.empty([2, 2])
c = np.cos(angle)
s = np.sin(angle)
arr[0][0] = c
arr[0][1] = -s
arr[1][0] = s
arr[1][1] = c
return arr #return arr rotation matrix
def projection(angle):
arr = np.empty([2, 2])
c = np.cos(angle)
s = np.sin(angle)
arr[0][0] = c*c
arr[0][1] = c*s
arr[1][0] = c*s
arr[1][1] = s*s
return arr
def reflection(angle):
arr = np.empty([2, 2])
c = np.cos(angle)
s = np.sin(angle)
arr[0][0] = (2*c) -1
arr[0][1] = 2*s*c
arr[1][0] = 2*s*c
arr[1][0] = (2*s) -1
return arr
| [
"numpy.empty",
"numpy.sin",
"numpy.identity",
"numpy.cos"
] | [((153, 169), 'numpy.empty', 'np.empty', (['[2, 2]'], {}), '([2, 2])\n', (161, 169), True, 'import numpy as np\n'), ((178, 191), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (184, 191), True, 'import numpy as np\n'), ((200, 213), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (206, 213), True, 'import numpy as np\n'), ((365, 381), 'numpy.empty', 'np.empty', (['[2, 2]'], {}), '([2, 2])\n', (373, 381), True, 'import numpy as np\n'), ((390, 403), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (396, 403), True, 'import numpy as np\n'), ((412, 425), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (418, 425), True, 'import numpy as np\n'), ((556, 572), 'numpy.empty', 'np.empty', (['[2, 2]'], {}), '([2, 2])\n', (564, 572), True, 'import numpy as np\n'), ((581, 594), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (587, 594), True, 'import numpy as np\n'), ((603, 616), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (609, 616), True, 'import numpy as np\n'), ((79, 93), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (90, 93), True, 'import numpy as np\n')] |
import tensorflow as tf
import time,logging,glob
import numpy as np
import h5py
logger = logging.getLogger(__name__)
config = None
def get_datasets(config_file):
global config
config = config_file
train = simple_dataset_from_glob(config['data']['train_glob'])
valid = simple_dataset_from_glob(config['data']['valid_glob'])
return train,valid
def simple_dataset_from_glob(glob_string):
# glob for the input files
filelist = tf.data.Dataset.list_files(glob_string)
# shuffle and repeat at the input file level
filelist = filelist.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=10000,count=config['training']['epochs']))
# map to read files in parallel
ds = filelist.map(load_file_and_preprocess,num_parallel_calls=config['data']['num_parallel_readers'])
# flatten the inputs across file boundaries
ds = ds.flat_map(lambda *x: tf.data.Dataset.from_tensor_slices(x))
# speficy batch size
ds = ds.batch(config['data']['batch_size'])
# how many inputs to prefetch to improve pipeline performance
ds = ds.prefetch(buffer_size=config['data']['prefectch_buffer_size'])
return ds
def load_file_and_preprocess(path):
pyf = tf.py_func(wrapped_loader,[path],(tf.float32,tf.int32))
return pyf
#def load_file_and_preprocess(path):
def wrapped_loader(path):
hf = h5py.File(path,'r')
images = hf['raw']
images = np.float32(images)
labels = hf['truth']
labels = np.int32(labels)
shuffle_in_unison(images,labels)
# could do some preprocessing here
return (images,labels)
def shuffle_in_unison(a, b):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
| [
"tensorflow.contrib.data.shuffle_and_repeat",
"h5py.File",
"numpy.random.shuffle",
"tensorflow.py_func",
"numpy.random.get_state",
"numpy.float32",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.random.set_state",
"tensorflow.data.Dataset.list_files",
"numpy.int32",
"logging.getLogger"
] | [((89, 116), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (106, 116), False, 'import time, logging, glob\n'), ((452, 491), 'tensorflow.data.Dataset.list_files', 'tf.data.Dataset.list_files', (['glob_string'], {}), '(glob_string)\n', (478, 491), True, 'import tensorflow as tf\n'), ((1201, 1259), 'tensorflow.py_func', 'tf.py_func', (['wrapped_loader', '[path]', '(tf.float32, tf.int32)'], {}), '(wrapped_loader, [path], (tf.float32, tf.int32))\n', (1211, 1259), True, 'import tensorflow as tf\n'), ((1348, 1368), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (1357, 1368), False, 'import h5py\n'), ((1402, 1420), 'numpy.float32', 'np.float32', (['images'], {}), '(images)\n', (1412, 1420), True, 'import numpy as np\n'), ((1457, 1473), 'numpy.int32', 'np.int32', (['labels'], {}), '(labels)\n', (1465, 1473), True, 'import numpy as np\n'), ((1627, 1648), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (1646, 1648), True, 'import numpy as np\n'), ((1653, 1673), 'numpy.random.shuffle', 'np.random.shuffle', (['a'], {}), '(a)\n', (1670, 1673), True, 'import numpy as np\n'), ((1678, 1708), 'numpy.random.set_state', 'np.random.set_state', (['rng_state'], {}), '(rng_state)\n', (1697, 1708), True, 'import numpy as np\n'), ((1713, 1733), 'numpy.random.shuffle', 'np.random.shuffle', (['b'], {}), '(b)\n', (1730, 1733), True, 'import numpy as np\n'), ((569, 663), 'tensorflow.contrib.data.shuffle_and_repeat', 'tf.contrib.data.shuffle_and_repeat', ([], {'buffer_size': '(10000)', 'count': "config['training']['epochs']"}), "(buffer_size=10000, count=config[\n 'training']['epochs'])\n", (603, 663), True, 'import tensorflow as tf\n'), ((881, 918), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['x'], {}), '(x)\n', (915, 918), True, 'import tensorflow as tf\n')] |
import os
import sys
import pickle
import copy
from malpi.ui.DriveFormat import DriveFormat
from collections import defaultdict
import numpy as np
import json
import re
from donkeycar.parts.datastore import Tub
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
class TubFormat(DriveFormat):
""" A class to represent a DonkeyCar Tub drive on disc.
Current assumptions:
Tub records are 1 indexed and sequential with no gaps.
We only care about editing steering and throttle.
Steering and throttle should be clipped to -1/1.
TODO: Change actions to be a dictionary of dictionaries, with the outer key being a record's real index.
Images would need to be included in that (which is how the Tub class does it).
"""
def __init__( self, path ):
DriveFormat.__init__(self)
if not os.path.exists(path):
raise IOError( "TubFormat directory does not exist: {}".format( path ) )
if not os.path.isdir(path):
raise IOError( "TubFormat path is not a directory: {}".format( path ) )
self.path = path
self.tub = Tub(path)
self.meta = self.tub.meta
self.edit_list = set()
self.shape = None
self.auxMeta = {}
self.aux_clean = True
#(self.images, self.actions) = self._load(path)
def _load( self, path, image_norm=True, progress=None ):
records = {}
indexes = self.tub.get_index(shuffled=False)
for idx in indexes:
rec = self.tub.get_record(idx)
if self.shape is None:
self.shape = rec['cam/image_array'].shape
records[idx] = rec
self.records = records
self.indexes = indexes
if 'auxiliary' in self.tub.meta:
self.auxMeta = self.tub.meta['auxiliary']
# "sim/info": {"done": true, "pos": [32.82384, 5.567082, -9.720116], "reward": -1.0, "hit": "none", "cte": 2.948259, "speed": 9.52644}
if 'sim/info' in self.tub.meta['inputs']:
self.auxMeta['done'] = { "name": "done", "type": "categorical", "categories": ["True", "False"]}
self.auxMeta['reward'] = { "name": "reward", "type": "continuous"}
def load( self, progress=None ):
self._load(self.path, progress=progress)
self.setClean()
def save( self ):
if not self.aux_clean:
# update meta with new aux meta and write it out
for name, aux in self.auxMeta.items():
if name not in self.tub.meta['inputs']:
self.tub.meta['inputs'].append(name)
if 'continuous' == aux['type']:
aux_type = 'float'
elif 'categorical' == aux['type']:
aux_type = 'int'
else:
raise ValueError( "Unknown auxiliary data type: {}".format( aux['type'] ) )
self.tub.meta['types'].append(aux_type)
self.tub.meta['auxiliary'] = self.auxMeta
with open(self.tub.meta_path, 'w') as f:
json.dump(self.tub.meta, f)
self.aux_clean = True
if self.isClean():
return
self.tub.write_exclude()
for ix in self.edit_list:
rec = self.records[ix]
path = self.tub.get_json_record_path(ix)
try:
with open(path, 'r') as fp:
old_rec = json.load(fp)
except TypeError:
print('troubles with record:', path)
except FileNotFoundError:
raise
except:
print("Unexpected error:", sys.exc_info()[0])
raise
# Copy over only the keys we might have modified
chg_keys = ['user/angle', 'user/throttle', 'orig/angle', 'orig/throttle']
for key in chg_keys:
if key in rec:
old_rec[key] = rec[key]
# Now do any auxiliary data
for key in self.auxMeta.keys():
if key in rec:
if rec[key] is None:
old_rec.pop(key,None)
else:
#if self.auxMeta[key]['type'] == "categorical":
# val = self.auxMeta[key]['categories'].index(rec[key])
#else:
# val = rec[key]
old_rec[key] = rec[key]
try:
with open(path, 'w') as fp:
json.dump(old_rec, fp)
except TypeError:
print('troubles with record:', path)
except FileNotFoundError:
raise
except:
print("Unexpected error:", sys.exc_info()[0])
raise
self.edit_list.clear()
self.setClean()
def count( self ):
return len(self.records)
def imageForIndex( self, index ):
idx = self.indexes[index]
img = self.records[idx]['cam/image_array']
if self.tub.excluded(index + 1):
# This grayed out image ends up looking ugly, can't figure out why
tmp = img.mean(axis=-1,dtype=img.dtype,keepdims=False)
tmp = np.repeat( tmp[:,:,np.newaxis], 3, axis=2 )
return tmp
return img
def actionForIndex( self, index ):
idx = self.indexes[index]
rec = self.records[idx]
angle, throttle = Tub.get_angle_throttle(rec)
return [angle, throttle]
def setActionForIndex( self, new_action, index ):
idx = self.indexes[index]
rec = self.records[idx]
angle, throttle = Tub.get_angle_throttle(rec)
old_action = [angle, throttle]
if not np.array_equal( old_action, new_action ):
if (rec["user/angle"] != new_action[0]) or (rec["user/throttle"] != new_action[1]):
# Save the original values if not already done
if "orig/angle" not in rec:
rec["orig/angle"] = rec["user/angle"]
if "orig/throttle" not in rec:
rec["orig/throttle"] = rec["user/throttle"]
rec["user/angle"] = new_action[0]
rec["user/throttle"] = new_action[1]
self.edit_list.add(idx)
self.setDirty()
def actionForKey(self,keybind,oldAction=None):
oldAction = copy.copy(oldAction)
if keybind == 'w':
oldAction[1] += 0.1
elif keybind == 'x':
oldAction[1] -= 0.1
elif keybind == 'a':
oldAction[0] -= 0.1
elif keybind == 'd':
oldAction[0] += 0.1
elif keybind == 's':
oldAction[0] = 0.0
oldAction[1] = 0.0
else:
return None
return np.clip(oldAction, -1.0, 1.0)
def deleteIndex( self, index ):
if index >= 0 and index < self.count():
index += 1
if self.tub.excluded(index):
self.tub.include_index(index)
else:
self.tub.exclude_index(index)
self.setDirty()
def isIndexDeleted(self, index):
if index >= 0 and index < self.count():
index += 1
return self.tub.excluded(index)
return False
def metaString(self):
#{"inputs": ["cam/image_array", "user/angle", "user/throttle", "user/mode"], "start": 1550950724.8622544, "types": ["image_array", "float", "float", "str"]}
ret = ""
for k, v in self.tub.meta.items():
ret += "{}: {}\n".format( k, v )
return ret
def actionStats(self):
stats = defaultdict(int)
if self.count() > 0:
actions = []
for i in range(self.count()):
act = self.actionForIndex( i )
actions.append(act)
stats["Min"] = np.min(actions)
stats["Max"] = np.max(actions)
stats["Mean"] = np.mean(actions)
stats["StdDev"] = np.std(actions)
return stats
def supportsAuxData(self):
return True
def getAuxMeta(self):
return self.auxMeta
def addAuxData(self, meta):
# TODO Check to make sure the meta data is all the same
if meta["name"] not in self.auxMeta:
self.auxMeta[meta["name"]] = meta
self.aux_clean = False
def auxDataAtIndex(self, auxName, index):
if not auxName in self.auxMeta:
return None
idx = self.indexes[index]
rec = self.records[idx]
if auxName in rec:
if rec[auxName] is not None and self.auxMeta[auxName]['type'] == "categorical":
return self.auxMeta[auxName]['categories'][rec[auxName]]
return rec[auxName]
elif 'sim/info' in rec and auxName in rec['sim/info']:
rec = rec['sim/info']
if rec[auxName] is not None and self.auxMeta[auxName]['type'] == "categorical":
return str(rec[auxName])
return rec[auxName]
return None
def setAuxDataAtIndex(self, auxName, auxData, index):
if not auxName in self.auxMeta:
return False
idx = self.indexes[index]
rec = self.records[idx]
if auxName not in rec or rec[auxName] != auxData:
if auxData is not None and self.auxMeta[auxName]['type'] == "categorical":
auxData = self.auxMeta[auxName]['categories'].index(auxData)
rec[auxName] = auxData
self.edit_list.add(idx)
self.setDirty()
return True
@classmethod
def canOpenFile( cls, path ):
if not os.path.exists(path):
return False
if not os.path.isdir(path):
return False
meta_file = os.path.join( path, "meta.json" )
if not os.path.exists(meta_file):
return False
#if os.path.splitext(path)[1] == ".tub":
# return True
return True
@staticmethod
def defaultInputTypes():
return [{"name":"Images", "type":"numpy image", "shape":(120,160,3)}]
def inputTypes(self):
res = TubFormat.defaultInputTypes()
if self.shape is not None:
res[0]["shape"] = self.shape
return res
@staticmethod
def defaultOutputTypes():
return [{"name":"Actions", "type":"continuous", "range":(-1.0,1.0)}]
def outputTypes(self):
res = []
for act in ["user/angle", "user/throttle"]:
display_name = act.split("/")[1]
res.append( {"name":display_name, "type":"continuous", "range":(-1.0,1.0)} )
return res
""" Register this format with the base class """
DriveFormat.registerFormat( "TubFormat", TubFormat )
def runTests(args):
DriveFormat.testFormat( TubFormat, args.file[0], [2.0, -5.5] )
d = TubFormat(args.file[0])
print( d.inputTypes() )
print( d.outputTypes() )
def getOptions():
import argparse
parser = argparse.ArgumentParser(description='Test Tub file format.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--file', nargs=1, metavar="File", default="test.tub", help='Recorded Tub data file to open')
parser.add_argument('--test_only', action="store_true", default=True, help='run tests, then exit')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = getOptions()
if args.test_only:
runTests(args)
exit()
| [
"malpi.ui.DriveFormat.DriveFormat.registerFormat",
"argparse.ArgumentParser",
"numpy.clip",
"collections.defaultdict",
"numpy.mean",
"sys.exc_info",
"os.path.join",
"donkeycar.parts.datastore.Tub",
"numpy.std",
"os.path.exists",
"numpy.max",
"numpy.repeat",
"json.dump",
"re.split",
"donk... | [((11052, 11102), 'malpi.ui.DriveFormat.DriveFormat.registerFormat', 'DriveFormat.registerFormat', (['"""TubFormat"""', 'TubFormat'], {}), "('TubFormat', TubFormat)\n", (11078, 11102), False, 'from malpi.ui.DriveFormat import DriveFormat\n'), ((11130, 11190), 'malpi.ui.DriveFormat.DriveFormat.testFormat', 'DriveFormat.testFormat', (['TubFormat', 'args.file[0]', '[2.0, -5.5]'], {}), '(TubFormat, args.file[0], [2.0, -5.5])\n', (11152, 11190), False, 'from malpi.ui.DriveFormat import DriveFormat\n'), ((11336, 11456), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test Tub file format."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Test Tub file format.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (11359, 11456), False, 'import argparse\n'), ((1109, 1135), 'malpi.ui.DriveFormat.DriveFormat.__init__', 'DriveFormat.__init__', (['self'], {}), '(self)\n', (1129, 1135), False, 'from malpi.ui.DriveFormat import DriveFormat\n'), ((1424, 1433), 'donkeycar.parts.datastore.Tub', 'Tub', (['path'], {}), '(path)\n', (1427, 1433), False, 'from donkeycar.parts.datastore import Tub\n'), ((5792, 5819), 'donkeycar.parts.datastore.Tub.get_angle_throttle', 'Tub.get_angle_throttle', (['rec'], {}), '(rec)\n', (5814, 5819), False, 'from donkeycar.parts.datastore import Tub\n'), ((6000, 6027), 'donkeycar.parts.datastore.Tub.get_angle_throttle', 'Tub.get_angle_throttle', (['rec'], {}), '(rec)\n', (6022, 6027), False, 'from donkeycar.parts.datastore import Tub\n'), ((6744, 6764), 'copy.copy', 'copy.copy', (['oldAction'], {}), '(oldAction)\n', (6753, 6764), False, 'import copy\n'), ((7151, 7180), 'numpy.clip', 'np.clip', (['oldAction', '(-1.0)', '(1.0)'], {}), '(oldAction, -1.0, 1.0)\n', (7158, 7180), True, 'import numpy as np\n'), ((8002, 8018), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (8013, 8018), False, 'from collections import defaultdict\n'), ((10137, 10168), 'os.path.join', 'os.path.join', (['path', '"""meta.json"""'], {}), "(path, 'meta.json')\n", (10149, 10168), False, 'import os\n'), ((512, 536), 're.split', 're.split', (['"""(\\\\d+)"""', 'text'], {}), "('(\\\\d+)', text)\n", (520, 536), False, 'import re\n'), ((1152, 1172), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1166, 1172), False, 'import os\n'), ((1274, 1293), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1287, 1293), False, 'import os\n'), ((5574, 5617), 'numpy.repeat', 'np.repeat', (['tmp[:, :, np.newaxis]', '(3)'], {'axis': '(2)'}), '(tmp[:, :, np.newaxis], 3, axis=2)\n', (5583, 5617), True, 'import numpy as np\n'), ((6082, 6120), 'numpy.array_equal', 'np.array_equal', (['old_action', 'new_action'], {}), '(old_action, new_action)\n', (6096, 6120), True, 'import numpy as np\n'), ((8225, 8240), 'numpy.min', 'np.min', (['actions'], {}), '(actions)\n', (8231, 8240), True, 'import numpy as np\n'), ((8268, 8283), 'numpy.max', 'np.max', (['actions'], {}), '(actions)\n', (8274, 8283), True, 'import numpy as np\n'), ((8312, 8328), 'numpy.mean', 'np.mean', (['actions'], {}), '(actions)\n', (8319, 8328), True, 'import numpy as np\n'), ((8359, 8374), 'numpy.std', 'np.std', (['actions'], {}), '(actions)\n', (8365, 8374), True, 'import numpy as np\n'), ((10008, 10028), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (10022, 10028), False, 'import os\n'), ((10070, 10089), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (10083, 10089), False, 'import os\n'), ((10186, 10211), 'os.path.exists', 'os.path.exists', (['meta_file'], {}), '(meta_file)\n', (10200, 10211), False, 'import os\n'), ((3392, 3419), 'json.dump', 'json.dump', (['self.tub.meta', 'f'], {}), '(self.tub.meta, f)\n', (3401, 3419), False, 'import json\n'), ((3749, 3762), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (3758, 3762), False, 'import json\n'), ((4874, 4896), 'json.dump', 'json.dump', (['old_rec', 'fp'], {}), '(old_rec, fp)\n', (4883, 4896), False, 'import json\n'), ((3969, 3983), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3981, 3983), False, 'import sys\n'), ((5103, 5117), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5115, 5117), False, 'import sys\n')] |
import math
import textwrap
import sys
import pytest
import threading
import traceback
import time
import numpy as np
from numpy.testing import IS_PYPY
from . import util
class TestF77Callback(util.F2PyTest):
sources = [util.getpath("tests", "src", "callback", "foo.f")]
@pytest.mark.parametrize("name", "t,t2".split(","))
def test_all(self, name):
self.check_function(name)
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_docstring(self):
expected = textwrap.dedent("""\
a = t(fun,[fun_extra_args])
Wrapper for ``t``.
Parameters
----------
fun : call-back function
Other Parameters
----------------
fun_extra_args : input tuple, optional
Default: ()
Returns
-------
a : int
Notes
-----
Call-back functions::
def fun(): return a
Return objects:
a : int
""")
assert self.module.t.__doc__ == expected
def check_function(self, name):
t = getattr(self.module, name)
r = t(lambda: 4)
assert r == 4
r = t(lambda a: 5, fun_extra_args=(6, ))
assert r == 5
r = t(lambda a: a, fun_extra_args=(6, ))
assert r == 6
r = t(lambda a: 5 + a, fun_extra_args=(7, ))
assert r == 12
r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, ))
assert r == 180
r = t(math.degrees, fun_extra_args=(math.pi, ))
assert r == 180
r = t(self.module.func, fun_extra_args=(6, ))
assert r == 17
r = t(self.module.func0)
assert r == 11
r = t(self.module.func0._cpointer)
assert r == 11
class A:
def __call__(self):
return 7
def mth(self):
return 9
a = A()
r = t(a)
assert r == 7
r = t(a.mth)
assert r == 9
@pytest.mark.skipif(sys.platform == "win32",
reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_string_callback(self):
def callback(code):
if code == "r":
return 0
else:
return 1
f = getattr(self.module, "string_callback")
r = f(callback)
assert r == 0
@pytest.mark.skipif(sys.platform == "win32",
reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_string_callback_array(self):
# See gh-10027
cu = np.zeros((1, 8), "S1")
def callback(cu, lencu):
if cu.shape != (lencu, 8):
return 1
if cu.dtype != "S1":
return 2
if not np.all(cu == b""):
return 3
return 0
f = getattr(self.module, "string_callback_array")
res = f(callback, cu, len(cu))
assert res == 0
def test_threadsafety(self):
# Segfaults if the callback handling is not threadsafe
errors = []
def cb():
# Sleep here to make it more likely for another thread
# to call their callback at the same time.
time.sleep(1e-3)
# Check reentrancy
r = self.module.t(lambda: 123)
assert r == 123
return 42
def runner(name):
try:
for j in range(50):
r = self.module.t(cb)
assert r == 42
self.check_function(name)
except Exception:
errors.append(traceback.format_exc())
threads = [
threading.Thread(target=runner, args=(arg, ))
for arg in ("t", "t2") for n in range(20)
]
for t in threads:
t.start()
for t in threads:
t.join()
errors = "\n\n".join(errors)
if errors:
raise AssertionError(errors)
def test_hidden_callback(self):
try:
self.module.hidden_callback(2)
except Exception as msg:
assert str(msg).startswith("Callback global_f not defined")
try:
self.module.hidden_callback2(2)
except Exception as msg:
assert str(msg).startswith("cb: Callback global_f not defined")
self.module.global_f = lambda x: x + 1
r = self.module.hidden_callback(2)
assert r == 3
self.module.global_f = lambda x: x + 2
r = self.module.hidden_callback(2)
assert r == 4
del self.module.global_f
try:
self.module.hidden_callback(2)
except Exception as msg:
assert str(msg).startswith("Callback global_f not defined")
self.module.global_f = lambda x=0: x + 3
r = self.module.hidden_callback(2)
assert r == 5
# reproducer of gh18341
r = self.module.hidden_callback2(2)
assert r == 3
class TestF77CallbackPythonTLS(TestF77Callback):
"""
Callback tests using Python thread-local storage instead of
compiler-provided
"""
options = ["-DF2PY_USE_PYTHON_TLS"]
class TestF90Callback(util.F2PyTest):
sources = [util.getpath("tests", "src", "callback", "gh17797.f90")]
def test_gh17797(self):
def incr(x):
return x + 123
y = np.array([1, 2, 3], dtype=np.int64)
r = self.module.gh17797(incr, y)
assert r == 123 + 1 + 2 + 3
class TestGH18335(util.F2PyTest):
"""The reproduction of the reported issue requires specific input that
extensions may break the issue conditions, so the reproducer is
implemented as a separate test class. Do not extend this test with
other tests!
"""
sources = [util.getpath("tests", "src", "callback", "gh18335.f90")]
def test_gh18335(self):
def foo(x):
x[0] += 1
y = np.array([1, 2, 3], dtype=np.int8)
r = self.module.gh18335(foo)
assert r == 123 + 1
| [
"textwrap.dedent",
"threading.Thread",
"numpy.zeros",
"time.sleep",
"pytest.mark.skipif",
"numpy.array",
"traceback.format_exc",
"math.degrees",
"numpy.all",
"pytest.mark.xfail"
] | [((405, 491), 'pytest.mark.xfail', 'pytest.mark.xfail', (['IS_PYPY'], {'reason': '"""PyPy cannot modify tp_doc after PyType_Ready"""'}), "(IS_PYPY, reason=\n 'PyPy cannot modify tp_doc after PyType_Ready')\n", (422, 491), False, 'import pytest\n'), ((2040, 2140), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(sys.platform == 'win32')"], {'reason': '"""Fails with MinGW64 Gfortran (Issue #9673)"""'}), "(sys.platform == 'win32', reason=\n 'Fails with MinGW64 Gfortran (Issue #9673)')\n", (2058, 2140), False, 'import pytest\n'), ((2425, 2525), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(sys.platform == 'win32')"], {'reason': '"""Fails with MinGW64 Gfortran (Issue #9673)"""'}), "(sys.platform == 'win32', reason=\n 'Fails with MinGW64 Gfortran (Issue #9673)')\n", (2443, 2525), False, 'import pytest\n'), ((559, 1051), 'textwrap.dedent', 'textwrap.dedent', (['""" a = t(fun,[fun_extra_args])\n\n Wrapper for ``t``.\n\n Parameters\n ----------\n fun : call-back function\n\n Other Parameters\n ----------------\n fun_extra_args : input tuple, optional\n Default: ()\n\n Returns\n -------\n a : int\n\n Notes\n -----\n Call-back functions::\n\n def fun(): return a\n Return objects:\n a : int\n """'], {}), '(\n """ a = t(fun,[fun_extra_args])\n\n Wrapper for ``t``.\n\n Parameters\n ----------\n fun : call-back function\n\n Other Parameters\n ----------------\n fun_extra_args : input tuple, optional\n Default: ()\n\n Returns\n -------\n a : int\n\n Notes\n -----\n Call-back functions::\n\n def fun(): return a\n Return objects:\n a : int\n """\n )\n', (574, 1051), False, 'import textwrap\n'), ((2623, 2645), 'numpy.zeros', 'np.zeros', (['(1, 8)', '"""S1"""'], {}), "((1, 8), 'S1')\n", (2631, 2645), True, 'import numpy as np\n'), ((5443, 5478), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.int64'}), '([1, 2, 3], dtype=np.int64)\n', (5451, 5478), True, 'import numpy as np\n'), ((5988, 6022), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.int8'}), '([1, 2, 3], dtype=np.int8)\n', (5996, 6022), True, 'import numpy as np\n'), ((3279, 3296), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (3289, 3296), False, 'import time\n'), ((3742, 3786), 'threading.Thread', 'threading.Thread', ([], {'target': 'runner', 'args': '(arg,)'}), '(target=runner, args=(arg,))\n', (3758, 3786), False, 'import threading\n'), ((1458, 1473), 'math.degrees', 'math.degrees', (['a'], {}), '(a)\n', (1470, 1473), False, 'import math\n'), ((2821, 2838), 'numpy.all', 'np.all', (["(cu == b'')"], {}), "(cu == b'')\n", (2827, 2838), True, 'import numpy as np\n'), ((3685, 3707), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3705, 3707), False, 'import traceback\n')] |
import numpy as np
import cv2
import matplotlib.image as mpimg
def perspect_transform(img, src, dst):
# Get transform matrix using cv2.getPerspectivTransform()
M = cv2.getPerspectiveTransform(src, dst)
# Warp image using cv2.warpPerspective()
# keep same size as input image
warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))
# Return the result
return warped
def color_thresh(img, rgb_thresh=(160, 160, 160)):
# Create an array of zeros same xy size as img, but single channel
color_select = np.zeros_like(img[:,:,0])
# Require that each pixel be above all thre threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
above_thresh = (img[:,:,0] > rgb_thresh[0]) \
& (img[:,:,1] > rgb_thresh[1]) \
& (img[:,:,2] > rgb_thresh[2])
# Index the array of zeros with the boolean array and set to 1
color_select[above_thresh] = 1
# Return the binary image
return color_select
image = mpimg.imread('sample.jpg')
# Define calibration box in source (actual) and destination (desired) coordinates
# These source and destination points are defined to warp the image
# to a grid where each 10x10 pixel square represents 1 square meter
dst_size = 5
# Set a bottom offset to account for the fact that the bottom of the image
# is not the position of the rover but a bit in front of it
bottom_offset = 6
source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])
destination = np.float32([[image.shape[1]/2 - dst_size, image.shape[0] - bottom_offset],
[image.shape[1]/2 + dst_size, image.shape[0] - bottom_offset],
[image.shape[1]/2 + dst_size, image.shape[0] - 2*dst_size - bottom_offset],
[image.shape[1]/2 - dst_size, image.shape[0] - 2*dst_size - bottom_offset],
])
| [
"matplotlib.image.imread",
"cv2.warpPerspective",
"numpy.zeros_like",
"cv2.getPerspectiveTransform",
"numpy.float32"
] | [((1073, 1099), 'matplotlib.image.imread', 'mpimg.imread', (['"""sample.jpg"""'], {}), "('sample.jpg')\n", (1085, 1099), True, 'import matplotlib.image as mpimg\n'), ((1495, 1552), 'numpy.float32', 'np.float32', (['[[14, 140], [301, 140], [200, 96], [118, 96]]'], {}), '([[14, 140], [301, 140], [200, 96], [118, 96]])\n', (1505, 1552), True, 'import numpy as np\n'), ((1566, 1886), 'numpy.float32', 'np.float32', (['[[image.shape[1] / 2 - dst_size, image.shape[0] - bottom_offset], [image.\n shape[1] / 2 + dst_size, image.shape[0] - bottom_offset], [image.shape[\n 1] / 2 + dst_size, image.shape[0] - 2 * dst_size - bottom_offset], [\n image.shape[1] / 2 - dst_size, image.shape[0] - 2 * dst_size -\n bottom_offset]]'], {}), '([[image.shape[1] / 2 - dst_size, image.shape[0] - bottom_offset],\n [image.shape[1] / 2 + dst_size, image.shape[0] - bottom_offset], [image\n .shape[1] / 2 + dst_size, image.shape[0] - 2 * dst_size - bottom_offset\n ], [image.shape[1] / 2 - dst_size, image.shape[0] - 2 * dst_size -\n bottom_offset]])\n', (1576, 1886), True, 'import numpy as np\n'), ((174, 211), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (201, 211), False, 'import cv2\n'), ((306, 363), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(img.shape[1], img.shape[0])'], {}), '(img, M, (img.shape[1], img.shape[0]))\n', (325, 363), False, 'import cv2\n'), ((566, 593), 'numpy.zeros_like', 'np.zeros_like', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (579, 593), True, 'import numpy as np\n')] |
import numpy as np
from numpy.random import seed
from sklearn.model_selection import train_test_split
import pandas as pd
# pca_dims = 50
# TESTING_DATASET_SIZE = 0.3
# the path of HAPT_Data_Set dir
ROOT = "Acti-tracker Data Set/"
# config path and intermediate files
# REDUCED_FEATURE_FILE = str(pca_dims) + "reduced_features.txt"
REDUCED_FEATURE_FILE = "reduced_features" #.txt"
NORMALIZED_FEATURE_FILE = "normalized_features" #.txt"
LABEL_FILE = "labels.txt"
MY_LABELS = ['Walking', 'Jogging', 'Sitting', 'Standing', 'Upstairs', 'Downstairs']
def main(pca_dims, rate, test_data_ratio):
moderated = str(int(20 // rate))
print("start...............................")
datafile = 'actitracker-new.txt'
# with open(datafile) as f:
# container = f.readlines()
#
# result = []
# for line in container:
# tmp1 = line.strip()
# tmp2 = tmp1.replace(' ', ' ') # removes inconsistent blank spaces
# tmp_ary = list(map(float, tmp2.split(' ')))
# result.append(tmp_ary)
# without_labels = np.array(result)
#
#
# label_file = 'Acti-tracker Data Set/RawData/actitracker_labels.txt'
# with open(label_file) as f:
# container = f.readlines()
#
# result = []
# for line in container:
# num_str = line.strip()
# result.append(int(num_str))
# labels= np.array(result)
data = pd.read_csv(datafile, sep=" ", header=None)
without_labels = data.iloc[:, 1:] # remove user id and experiment id
labels = data.iloc[:, 0]
# without_labels = data.iloc[:, 1:]
seed(2020)
# split data
X_train, X_test, y_train, y_test = train_test_split(without_labels, labels, test_size=test_data_ratio)
# plot_label_distribution(y_train)
return X_train, X_test, y_train, y_test
#
# if __name__ == '__main__':
# X_train, X_test, y_train, y_test = main(30, 1, 0.3)
| [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.random.seed"
] | [((1446, 1489), 'pandas.read_csv', 'pd.read_csv', (['datafile'], {'sep': '""" """', 'header': 'None'}), "(datafile, sep=' ', header=None)\n", (1457, 1489), True, 'import pandas as pd\n'), ((1643, 1653), 'numpy.random.seed', 'seed', (['(2020)'], {}), '(2020)\n', (1647, 1653), False, 'from numpy.random import seed\n'), ((1712, 1779), 'sklearn.model_selection.train_test_split', 'train_test_split', (['without_labels', 'labels'], {'test_size': 'test_data_ratio'}), '(without_labels, labels, test_size=test_data_ratio)\n', (1728, 1779), False, 'from sklearn.model_selection import train_test_split\n')] |
#!/usr/bin/env python3
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
parser.add_argument('-n', type=int, default=10)
args = parser.parse_args()
with open(args.input, 'rb') as input_file, open(args.output, 'wb') as output_file:
n, dim = np.load(input_file)
n = min(args.n, n)
np.save(output_file, (n, dim))
for _ in range(n):
feats = np.load(input_file)
np.save(output_file, feats)
| [
"numpy.load",
"numpy.save",
"argparse.ArgumentParser"
] | [((68, 93), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (91, 93), False, 'import argparse\n'), ((326, 345), 'numpy.load', 'np.load', (['input_file'], {}), '(input_file)\n', (333, 345), True, 'import numpy as np\n'), ((373, 403), 'numpy.save', 'np.save', (['output_file', '(n, dim)'], {}), '(output_file, (n, dim))\n', (380, 403), True, 'import numpy as np\n'), ((443, 462), 'numpy.load', 'np.load', (['input_file'], {}), '(input_file)\n', (450, 462), True, 'import numpy as np\n'), ((471, 498), 'numpy.save', 'np.save', (['output_file', 'feats'], {}), '(output_file, feats)\n', (478, 498), True, 'import numpy as np\n')] |
# uniform content loss + adaptive threshold + per_class_input + recursive G
# improvement upon cqf37
# Use learning to see in the dark CNN
# test BIT Fu Ying noise model, who claims that synthetic noise can outperform real noise
from __future__ import division
import os, time, scipy.io
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import rawpy
import glob
gt_dir = '../../data/dataset/learnDark/Sony/Sony/long/'
checkpoint_dir = '../../workplace/sid_bit_test/'
result_dir = '../../workplace/sid_bit_test/'
# get train IDs
train_fns = glob.glob(gt_dir + '0*.ARW')
train_ids = [int(os.path.basename(train_fn)[0:5]) for train_fn in train_fns]
ps = 512 # patch size for training
save_freq = 50
DEBUG = 0
if DEBUG == 1:
save_freq = 2
train_ids = train_ids[0:5]
def seeraw(some_patch):
# (1,1024,1024,4) -> (1024,1024,3)
raw = some_patch[0,:,:,:]
H = raw.shape[0]
W = raw.shape[1]
r = raw[:,:,0]
g = (raw[:,:,1]+raw[:,:,3])/2.
b = raw[:,:,2]
seeimg = np.zeros([H,W,3])
seeimg[:,:,0] = r
seeimg[:,:,1] = g
seeimg[:,:,2] = b
return seeimg
def add_noise(img):
im01 = img
import zmf
import math
# 1_Poisson
logK = zmf.uniform(0.8,2.6)
K = math.exp(logK)
# print("POISSON: K =",K)
noise_1 = (zmf.poisson(im01*255,im01.shape)-im01*255) / 255. * K
# zmf.imshow(noise_1+im01)
# 2_Tukey_Lambda
lam = -0.06
mean = 0.
logscale = (5./6.) * logK + (0.6-5./6.*1.4) + zmf.uniform(-0.05,0.05)
scale = math.exp(logscale)
# print("TL: lam = -0.26, mean = 0, scale =",scale)
noise_2 = zmf.tukeylambda(lam,mean,scale/20.,im01.shape) # divide by 20 is not in the paper, but if not, the result noise is too strong
# zmf.imshow(noise_1 + noise_2 + im01)
# 3_Row
noise_3 = np.zeros(im01.shape)
for rgbg in range(noise_3.shape[3]):
for row in range(noise_3.shape[1]):
logdev = 0.75 * logK - 2.2 + zmf.uniform(-0.375,0.375)
dev = math.exp(logdev)
row_shift = zmf.normal(0,dev)
# print(row_shift)
noise_3[:,row,:,rgbg] = row_shift/10. # divide by 10 is not in the paper, but if not, the result noise is too strong
# show
# print(logK, logscale)
# print(logK, logdev)
# zmf.imshow(noise_1 + noise_2 + noise_3 + im01)
return noise_1 + noise_2 + noise_3 + im01
def lrelu(x):
return tf.maximum(x * 0.2, x)
def upsample_and_concat(x1, x2, output_channels, in_channels):
pool_size = 2
deconv_filter = tf.Variable(tf.truncated_normal([pool_size, pool_size, output_channels, in_channels], stddev=0.02))
deconv = tf.nn.conv2d_transpose(x1, deconv_filter, tf.shape(x2), strides=[1, pool_size, pool_size, 1])
deconv_output = tf.concat([deconv, x2], 3)
deconv_output.set_shape([None, None, None, output_channels * 2])
return deconv_output
def network(input):
conv1 = slim.conv2d(input, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv1_1')
conv1 = slim.conv2d(conv1, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv1_2')
pool1 = slim.max_pool2d(conv1, [2, 2], padding='SAME')
conv2 = slim.conv2d(pool1, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv2_1')
conv2 = slim.conv2d(conv2, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv2_2')
pool2 = slim.max_pool2d(conv2, [2, 2], padding='SAME')
conv3 = slim.conv2d(pool2, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv3_1')
conv3 = slim.conv2d(conv3, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv3_2')
pool3 = slim.max_pool2d(conv3, [2, 2], padding='SAME')
conv4 = slim.conv2d(pool3, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv4_1')
conv4 = slim.conv2d(conv4, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv4_2')
pool4 = slim.max_pool2d(conv4, [2, 2], padding='SAME')
conv5 = slim.conv2d(pool4, 512, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv5_1')
conv5 = slim.conv2d(conv5, 512, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv5_2')
up6 = upsample_and_concat(conv5, conv4, 256, 512)
conv6 = slim.conv2d(up6, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv6_1')
conv6 = slim.conv2d(conv6, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv6_2')
up7 = upsample_and_concat(conv6, conv3, 128, 256)
conv7 = slim.conv2d(up7, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv7_1')
conv7 = slim.conv2d(conv7, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv7_2')
up8 = upsample_and_concat(conv7, conv2, 64, 128)
conv8 = slim.conv2d(up8, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv8_1')
conv8 = slim.conv2d(conv8, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv8_2')
up9 = upsample_and_concat(conv8, conv1, 32, 64)
conv9 = slim.conv2d(up9, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv9_1')
conv9 = slim.conv2d(conv9, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv9_2')
conv10 = slim.conv2d(conv9, 12, [1, 1], rate=1, activation_fn=None, scope='g_conv10')
out = tf.depth_to_space(conv10, 2)
return out
def pack_raw(raw):
# pack Bayer image to 4 channels
im = raw.raw_image_visible.astype(np.float32)
im = np.maximum(im - 512, 0) / (16383 - 512) # subtract the black level
im = np.expand_dims(im, axis=2)
img_shape = im.shape
H = img_shape[0]
W = img_shape[1]
out = np.concatenate((im[0:H:2, 0:W:2, :],
im[0:H:2, 1:W:2, :],
im[1:H:2, 1:W:2, :],
im[1:H:2, 0:W:2, :]), axis=2)
return out
sess = tf.Session()
in_image = tf.placeholder(tf.float32, [None, None, None, 4])
gt_image = tf.placeholder(tf.float32, [None, None, None, 3])
out_image = network(in_image)
G_loss = tf.reduce_mean(tf.abs(out_image - gt_image))
t_vars = tf.trainable_variables()
lr = tf.placeholder(tf.float32)
G_opt = tf.train.AdamOptimizer(learning_rate=lr).minimize(G_loss)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt:
print('loaded ' + ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
# Raw data takes long time to load. Keep them in memory after loaded.
gt_images = [None] * 6000
input_images = {}
input_images['300'] = [None] * len(train_ids)
input_images['250'] = [None] * len(train_ids)
input_images['100'] = [None] * len(train_ids)
g_loss = np.zeros((5000, 1))
allfolders = glob.glob(result_dir + '*0')
lastepoch = 0
for folder in allfolders:
lastepoch = np.maximum(lastepoch, int(folder[-4:]))
learning_rate = 1e-4
for epoch in range(lastepoch, 4001):
if os.path.isdir(result_dir + '%04d' % epoch):
continue
cnt = 0
if epoch > 2000:
learning_rate = 1e-5
for ind in np.random.permutation(len(train_ids)):
# get the path from image id
train_id = train_ids[ind]
in_files = glob.glob(gt_dir + '%05d_00*.ARW' % train_id)
in_path = in_files[0]
in_fn = os.path.basename(in_path)
gt_files = glob.glob(gt_dir + '%05d_00*.ARW' % train_id)
gt_path = gt_files[0]
gt_fn = os.path.basename(gt_path)
# in_exposure = float(in_fn[9:-5])
# gt_exposure = float(gt_fn[9:-5])
ratio = 1. #min(gt_exposure / in_exposure, 300)
st = time.time()
cnt += 1
if input_images['300'][ind] is None:
gt_raw = rawpy.imread(gt_path)
orig_im = np.expand_dims(pack_raw(gt_raw), axis=0)
# add noise
noisy_im = add_noise(orig_im)
# print(noisy_im.shape) (1,1424,2128,4)
# print(np.mean(noisy_im)) 0.23
# done
input_images['300'][ind] = noisy_im
im = gt_raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
gt_images[ind] = np.expand_dims(np.float32(im / 65535.0), axis=0)
# crop
H = input_images['300'][ind].shape[1]
W = input_images['300'][ind].shape[2]
xx = np.random.randint(0, W - ps)
yy = np.random.randint(0, H - ps)
input_patch = input_images['300'][ind][:, yy:yy + ps, xx:xx + ps, :]
gt_patch = gt_images[ind][:, yy * 2:yy * 2 + ps * 2, xx * 2:xx * 2 + ps * 2, :]
if np.random.randint(2, size=1)[0] == 1: # random flip
input_patch = np.flip(input_patch, axis=1)
gt_patch = np.flip(gt_patch, axis=1)
if np.random.randint(2, size=1)[0] == 1:
input_patch = np.flip(input_patch, axis=2)
gt_patch = np.flip(gt_patch, axis=2)
if np.random.randint(2, size=1)[0] == 1: # random transpose
input_patch = np.transpose(input_patch, (0, 2, 1, 3))
gt_patch = np.transpose(gt_patch, (0, 2, 1, 3))
input_patch = np.minimum(input_patch, 1.0)
_, G_current, output = sess.run([G_opt, G_loss, out_image],
feed_dict={in_image: input_patch, gt_image: gt_patch, lr: learning_rate})
output = np.minimum(np.maximum(output, 0), 1)
g_loss[ind] = G_current
print("%d %d Loss=%.3f Time=%.3f" % (epoch, cnt, np.mean(g_loss[np.where(g_loss)]), time.time() - st))
if epoch % save_freq == 0:
if not os.path.isdir(result_dir + '%04d' % epoch):
os.makedirs(result_dir + '%04d' % epoch)
input2 = np.concatenate((seeraw(input_patch),seeraw(input_patch)),axis=0)
#print(input2.shape)
#print(gt_patch[0,:,:,:].shape)
temp = np.concatenate((input2,gt_patch[0, :, :, :], output[0, :, :, :]), axis=1)
scipy.misc.toimage(temp * 255, high=255, low=0, cmin=0, cmax=255).save(
result_dir + '%04d/%05d_00_train_input_gt_output.jpg' % (epoch, train_id))
saver.save(sess, checkpoint_dir + 'model.ckpt')
| [
"numpy.maximum",
"tensorflow.trainable_variables",
"tensorflow.maximum",
"zmf.tukeylambda",
"numpy.random.randint",
"glob.glob",
"tensorflow.truncated_normal",
"tensorflow.contrib.slim.conv2d",
"zmf.poisson",
"tensorflow.abs",
"zmf.normal",
"numpy.transpose",
"tensorflow.concat",
"tensorfl... | [((575, 603), 'glob.glob', 'glob.glob', (["(gt_dir + '0*.ARW')"], {}), "(gt_dir + '0*.ARW')\n", (584, 603), False, 'import glob\n'), ((5678, 5690), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5688, 5690), True, 'import tensorflow as tf\n'), ((5702, 5751), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, None, 4]'], {}), '(tf.float32, [None, None, None, 4])\n', (5716, 5751), True, 'import tensorflow as tf\n'), ((5763, 5812), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, None, 3]'], {}), '(tf.float32, [None, None, None, 3])\n', (5777, 5812), True, 'import tensorflow as tf\n'), ((5908, 5932), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (5930, 5932), True, 'import tensorflow as tf\n'), ((5938, 5964), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (5952, 5964), True, 'import tensorflow as tf\n'), ((6040, 6056), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6054, 6056), True, 'import tensorflow as tf\n'), ((6108, 6153), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (6137, 6153), True, 'import tensorflow as tf\n'), ((6528, 6547), 'numpy.zeros', 'np.zeros', (['(5000, 1)'], {}), '((5000, 1))\n', (6536, 6547), True, 'import numpy as np\n'), ((6562, 6590), 'glob.glob', 'glob.glob', (["(result_dir + '*0')"], {}), "(result_dir + '*0')\n", (6571, 6590), False, 'import glob\n'), ((1033, 1052), 'numpy.zeros', 'np.zeros', (['[H, W, 3]'], {}), '([H, W, 3])\n', (1041, 1052), True, 'import numpy as np\n'), ((1230, 1251), 'zmf.uniform', 'zmf.uniform', (['(0.8)', '(2.6)'], {}), '(0.8, 2.6)\n', (1241, 1251), False, 'import zmf\n'), ((1259, 1273), 'math.exp', 'math.exp', (['logK'], {}), '(logK)\n', (1267, 1273), False, 'import math\n'), ((1542, 1560), 'math.exp', 'math.exp', (['logscale'], {}), '(logscale)\n', (1550, 1560), False, 'import math\n'), ((1631, 1683), 'zmf.tukeylambda', 'zmf.tukeylambda', (['lam', 'mean', '(scale / 20.0)', 'im01.shape'], {}), '(lam, mean, scale / 20.0, im01.shape)\n', (1646, 1683), False, 'import zmf\n'), ((1827, 1847), 'numpy.zeros', 'np.zeros', (['im01.shape'], {}), '(im01.shape)\n', (1835, 1847), True, 'import numpy as np\n'), ((2428, 2450), 'tensorflow.maximum', 'tf.maximum', (['(x * 0.2)', 'x'], {}), '(x * 0.2, x)\n', (2438, 2450), True, 'import tensorflow as tf\n'), ((2782, 2808), 'tensorflow.concat', 'tf.concat', (['[deconv, x2]', '(3)'], {}), '([deconv, x2], 3)\n', (2791, 2808), True, 'import tensorflow as tf\n'), ((2938, 3016), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['input', '(32)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv1_1"""'}), "(input, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv1_1')\n", (2949, 3016), True, 'import tensorflow.contrib.slim as slim\n'), ((3029, 3107), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['conv1', '(32)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv1_2"""'}), "(conv1, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv1_2')\n", (3040, 3107), True, 'import tensorflow.contrib.slim as slim\n'), ((3120, 3166), 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['conv1', '[2, 2]'], {'padding': '"""SAME"""'}), "(conv1, [2, 2], padding='SAME')\n", (3135, 3166), True, 'import tensorflow.contrib.slim as slim\n'), ((3180, 3258), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['pool1', '(64)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv2_1"""'}), "(pool1, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv2_1')\n", (3191, 3258), True, 'import tensorflow.contrib.slim as slim\n'), ((3271, 3349), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['conv2', '(64)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv2_2"""'}), "(conv2, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv2_2')\n", (3282, 3349), True, 'import tensorflow.contrib.slim as slim\n'), ((3362, 3408), 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['conv2', '[2, 2]'], {'padding': '"""SAME"""'}), "(conv2, [2, 2], padding='SAME')\n", (3377, 3408), True, 'import tensorflow.contrib.slim as slim\n'), ((3422, 3501), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['pool2', '(128)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv3_1"""'}), "(pool2, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv3_1')\n", (3433, 3501), True, 'import tensorflow.contrib.slim as slim\n'), ((3514, 3593), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['conv3', '(128)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv3_2"""'}), "(conv3, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv3_2')\n", (3525, 3593), True, 'import tensorflow.contrib.slim as slim\n'), ((3606, 3652), 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['conv3', '[2, 2]'], {'padding': '"""SAME"""'}), "(conv3, [2, 2], padding='SAME')\n", (3621, 3652), True, 'import tensorflow.contrib.slim as slim\n'), ((3666, 3745), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['pool3', '(256)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv4_1"""'}), "(pool3, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv4_1')\n", (3677, 3745), True, 'import tensorflow.contrib.slim as slim\n'), ((3758, 3837), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['conv4', '(256)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv4_2"""'}), "(conv4, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv4_2')\n", (3769, 3837), True, 'import tensorflow.contrib.slim as slim\n'), ((3850, 3896), 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['conv4', '[2, 2]'], {'padding': '"""SAME"""'}), "(conv4, [2, 2], padding='SAME')\n", (3865, 3896), True, 'import tensorflow.contrib.slim as slim\n'), ((3910, 3989), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['pool4', '(512)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv5_1"""'}), "(pool4, 512, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv5_1')\n", (3921, 3989), True, 'import tensorflow.contrib.slim as slim\n'), ((4002, 4081), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['conv5', '(512)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv5_2"""'}), "(conv5, 512, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv5_2')\n", (4013, 4081), True, 'import tensorflow.contrib.slim as slim\n'), ((4149, 4226), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['up6', '(256)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv6_1"""'}), "(up6, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv6_1')\n", (4160, 4226), True, 'import tensorflow.contrib.slim as slim\n'), ((4239, 4318), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['conv6', '(256)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv6_2"""'}), "(conv6, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv6_2')\n", (4250, 4318), True, 'import tensorflow.contrib.slim as slim\n'), ((4386, 4463), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['up7', '(128)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv7_1"""'}), "(up7, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv7_1')\n", (4397, 4463), True, 'import tensorflow.contrib.slim as slim\n'), ((4476, 4555), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['conv7', '(128)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv7_2"""'}), "(conv7, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv7_2')\n", (4487, 4555), True, 'import tensorflow.contrib.slim as slim\n'), ((4622, 4698), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['up8', '(64)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv8_1"""'}), "(up8, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv8_1')\n", (4633, 4698), True, 'import tensorflow.contrib.slim as slim\n'), ((4711, 4789), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['conv8', '(64)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv8_2"""'}), "(conv8, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv8_2')\n", (4722, 4789), True, 'import tensorflow.contrib.slim as slim\n'), ((4855, 4931), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['up9', '(32)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv9_1"""'}), "(up9, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv9_1')\n", (4866, 4931), True, 'import tensorflow.contrib.slim as slim\n'), ((4944, 5022), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['conv9', '(32)', '[3, 3]'], {'rate': '(1)', 'activation_fn': 'lrelu', 'scope': '"""g_conv9_2"""'}), "(conv9, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv9_2')\n", (4955, 5022), True, 'import tensorflow.contrib.slim as slim\n'), ((5037, 5113), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['conv9', '(12)', '[1, 1]'], {'rate': '(1)', 'activation_fn': 'None', 'scope': '"""g_conv10"""'}), "(conv9, 12, [1, 1], rate=1, activation_fn=None, scope='g_conv10')\n", (5048, 5113), True, 'import tensorflow.contrib.slim as slim\n'), ((5124, 5152), 'tensorflow.depth_to_space', 'tf.depth_to_space', (['conv10', '(2)'], {}), '(conv10, 2)\n', (5141, 5152), True, 'import tensorflow as tf\n'), ((5363, 5389), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(2)'}), '(im, axis=2)\n', (5377, 5389), True, 'import numpy as np\n'), ((5468, 5580), 'numpy.concatenate', 'np.concatenate', (['(im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2, :], im[1:H:2, 0\n :W:2, :])'], {'axis': '(2)'}), '((im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2,\n :], im[1:H:2, 0:W:2, :]), axis=2)\n', (5482, 5580), True, 'import numpy as np\n'), ((5868, 5896), 'tensorflow.abs', 'tf.abs', (['(out_image - gt_image)'], {}), '(out_image - gt_image)\n', (5874, 5896), True, 'import tensorflow as tf\n'), ((6066, 6099), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6097, 6099), True, 'import tensorflow as tf\n'), ((6753, 6795), 'os.path.isdir', 'os.path.isdir', (["(result_dir + '%04d' % epoch)"], {}), "(result_dir + '%04d' % epoch)\n", (6766, 6795), False, 'import os, time, scipy.io\n'), ((1506, 1530), 'zmf.uniform', 'zmf.uniform', (['(-0.05)', '(0.05)'], {}), '(-0.05, 0.05)\n', (1517, 1530), False, 'import zmf\n'), ((2566, 2656), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[pool_size, pool_size, output_channels, in_channels]'], {'stddev': '(0.02)'}), '([pool_size, pool_size, output_channels, in_channels],\n stddev=0.02)\n', (2585, 2656), True, 'import tensorflow as tf\n'), ((2709, 2721), 'tensorflow.shape', 'tf.shape', (['x2'], {}), '(x2)\n', (2717, 2721), True, 'import tensorflow as tf\n'), ((5285, 5308), 'numpy.maximum', 'np.maximum', (['(im - 512)', '(0)'], {}), '(im - 512, 0)\n', (5295, 5308), True, 'import numpy as np\n'), ((5973, 6013), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (5995, 6013), True, 'import tensorflow as tf\n'), ((7020, 7065), 'glob.glob', 'glob.glob', (["(gt_dir + '%05d_00*.ARW' % train_id)"], {}), "(gt_dir + '%05d_00*.ARW' % train_id)\n", (7029, 7065), False, 'import glob\n'), ((7112, 7137), 'os.path.basename', 'os.path.basename', (['in_path'], {}), '(in_path)\n', (7128, 7137), False, 'import os, time, scipy.io\n'), ((7158, 7203), 'glob.glob', 'glob.glob', (["(gt_dir + '%05d_00*.ARW' % train_id)"], {}), "(gt_dir + '%05d_00*.ARW' % train_id)\n", (7167, 7203), False, 'import glob\n'), ((7250, 7275), 'os.path.basename', 'os.path.basename', (['gt_path'], {}), '(gt_path)\n', (7266, 7275), False, 'import os, time, scipy.io\n'), ((7432, 7443), 'time.time', 'time.time', ([], {}), '()\n', (7441, 7443), False, 'import os, time, scipy.io\n'), ((8152, 8180), 'numpy.random.randint', 'np.random.randint', (['(0)', '(W - ps)'], {}), '(0, W - ps)\n', (8169, 8180), True, 'import numpy as np\n'), ((8194, 8222), 'numpy.random.randint', 'np.random.randint', (['(0)', '(H - ps)'], {}), '(0, H - ps)\n', (8211, 8222), True, 'import numpy as np\n'), ((8928, 8956), 'numpy.minimum', 'np.minimum', (['input_patch', '(1.0)'], {}), '(input_patch, 1.0)\n', (8938, 8956), True, 'import numpy as np\n'), ((621, 647), 'os.path.basename', 'os.path.basename', (['train_fn'], {}), '(train_fn)\n', (637, 647), False, 'import os, time, scipy.io\n'), ((2018, 2034), 'math.exp', 'math.exp', (['logdev'], {}), '(logdev)\n', (2026, 2034), False, 'import math\n'), ((2059, 2077), 'zmf.normal', 'zmf.normal', (['(0)', 'dev'], {}), '(0, dev)\n', (2069, 2077), False, 'import zmf\n'), ((7528, 7549), 'rawpy.imread', 'rawpy.imread', (['gt_path'], {}), '(gt_path)\n', (7540, 7549), False, 'import rawpy\n'), ((8479, 8507), 'numpy.flip', 'np.flip', (['input_patch'], {'axis': '(1)'}), '(input_patch, axis=1)\n', (8486, 8507), True, 'import numpy as np\n'), ((8531, 8556), 'numpy.flip', 'np.flip', (['gt_patch'], {'axis': '(1)'}), '(gt_patch, axis=1)\n', (8538, 8556), True, 'import numpy as np\n'), ((8632, 8660), 'numpy.flip', 'np.flip', (['input_patch'], {'axis': '(2)'}), '(input_patch, axis=2)\n', (8639, 8660), True, 'import numpy as np\n'), ((8684, 8709), 'numpy.flip', 'np.flip', (['gt_patch'], {'axis': '(2)'}), '(gt_patch, axis=2)\n', (8691, 8709), True, 'import numpy as np\n'), ((8805, 8844), 'numpy.transpose', 'np.transpose', (['input_patch', '(0, 2, 1, 3)'], {}), '(input_patch, (0, 2, 1, 3))\n', (8817, 8844), True, 'import numpy as np\n'), ((8868, 8904), 'numpy.transpose', 'np.transpose', (['gt_patch', '(0, 2, 1, 3)'], {}), '(gt_patch, (0, 2, 1, 3))\n', (8880, 8904), True, 'import numpy as np\n'), ((9168, 9189), 'numpy.maximum', 'np.maximum', (['output', '(0)'], {}), '(output, 0)\n', (9178, 9189), True, 'import numpy as np\n'), ((9676, 9750), 'numpy.concatenate', 'np.concatenate', (['(input2, gt_patch[0, :, :, :], output[0, :, :, :])'], {'axis': '(1)'}), '((input2, gt_patch[0, :, :, :], output[0, :, :, :]), axis=1)\n', (9690, 9750), True, 'import numpy as np\n'), ((1319, 1354), 'zmf.poisson', 'zmf.poisson', (['(im01 * 255)', 'im01.shape'], {}), '(im01 * 255, im01.shape)\n', (1330, 1354), False, 'import zmf\n'), ((1974, 2000), 'zmf.uniform', 'zmf.uniform', (['(-0.375)', '(0.375)'], {}), '(-0.375, 0.375)\n', (1985, 2000), False, 'import zmf\n'), ((7996, 8020), 'numpy.float32', 'np.float32', (['(im / 65535.0)'], {}), '(im / 65535.0)\n', (8006, 8020), True, 'import numpy as np\n'), ((8400, 8428), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(1)'}), '(2, size=1)\n', (8417, 8428), True, 'import numpy as np\n'), ((8568, 8596), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(1)'}), '(2, size=1)\n', (8585, 8596), True, 'import numpy as np\n'), ((8721, 8749), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(1)'}), '(2, size=1)\n', (8738, 8749), True, 'import numpy as np\n'), ((9393, 9435), 'os.path.isdir', 'os.path.isdir', (["(result_dir + '%04d' % epoch)"], {}), "(result_dir + '%04d' % epoch)\n", (9406, 9435), False, 'import os, time, scipy.io\n'), ((9453, 9493), 'os.makedirs', 'os.makedirs', (["(result_dir + '%04d' % epoch)"], {}), "(result_dir + '%04d' % epoch)\n", (9464, 9493), False, 'import os, time, scipy.io\n'), ((9319, 9330), 'time.time', 'time.time', ([], {}), '()\n', (9328, 9330), False, 'import os, time, scipy.io\n'), ((9299, 9315), 'numpy.where', 'np.where', (['g_loss'], {}), '(g_loss)\n', (9307, 9315), True, 'import numpy as np\n')] |
"""
Collection of Numpy linear algebra functions, wrapped to fit Ivy syntax and signature.
"""
# global
import numpy as _np
import ivy as _ivy
from typing import Union, Tuple
def matrix_norm(x, p=2, axes=None, keepdims=False):
axes = (-2, -1) if axes is None else axes
if isinstance(axes, int):
raise Exception('if specified, axes must be a length-2 sequence of ints,'
'but found {} of type {}'.format(axes, type(axes)))
elif isinstance(axes, list):
axes = tuple(axes)
ret = _np.array(_np.linalg.norm(x, p, axes, keepdims))
if ret.shape == ():
return _np.expand_dims(ret, 0)
return ret
cholesky = _np.linalg.cholesky
def vector_to_skew_symmetric_matrix(vector):
batch_shape = list(vector.shape[:-1])
# BS x 3 x 1
vector_expanded = _np.expand_dims(vector, -1)
# BS x 1 x 1
a1s = vector_expanded[..., 0:1, :]
a2s = vector_expanded[..., 1:2, :]
a3s = vector_expanded[..., 2:3, :]
# BS x 1 x 1
zs = _np.zeros(batch_shape + [1, 1])
# BS x 1 x 3
row1 = _np.concatenate((zs, -a3s, a2s), -1)
row2 = _np.concatenate((a3s, zs, -a1s), -1)
row3 = _np.concatenate((-a2s, a1s, zs), -1)
# BS x 3 x 3
return _np.concatenate((row1, row2, row3), -2)
def qr(x, mode):
return _np.linalg.qr(x, mode=mode)
| [
"numpy.linalg.qr",
"numpy.zeros",
"numpy.expand_dims",
"numpy.linalg.norm",
"numpy.concatenate"
] | [((822, 849), 'numpy.expand_dims', '_np.expand_dims', (['vector', '(-1)'], {}), '(vector, -1)\n', (837, 849), True, 'import numpy as _np\n'), ((1010, 1041), 'numpy.zeros', '_np.zeros', (['(batch_shape + [1, 1])'], {}), '(batch_shape + [1, 1])\n', (1019, 1041), True, 'import numpy as _np\n'), ((1070, 1106), 'numpy.concatenate', '_np.concatenate', (['(zs, -a3s, a2s)', '(-1)'], {}), '((zs, -a3s, a2s), -1)\n', (1085, 1106), True, 'import numpy as _np\n'), ((1118, 1154), 'numpy.concatenate', '_np.concatenate', (['(a3s, zs, -a1s)', '(-1)'], {}), '((a3s, zs, -a1s), -1)\n', (1133, 1154), True, 'import numpy as _np\n'), ((1166, 1202), 'numpy.concatenate', '_np.concatenate', (['(-a2s, a1s, zs)', '(-1)'], {}), '((-a2s, a1s, zs), -1)\n', (1181, 1202), True, 'import numpy as _np\n'), ((1231, 1270), 'numpy.concatenate', '_np.concatenate', (['(row1, row2, row3)', '(-2)'], {}), '((row1, row2, row3), -2)\n', (1246, 1270), True, 'import numpy as _np\n'), ((1300, 1327), 'numpy.linalg.qr', '_np.linalg.qr', (['x'], {'mode': 'mode'}), '(x, mode=mode)\n', (1313, 1327), True, 'import numpy as _np\n'), ((545, 582), 'numpy.linalg.norm', '_np.linalg.norm', (['x', 'p', 'axes', 'keepdims'], {}), '(x, p, axes, keepdims)\n', (560, 582), True, 'import numpy as _np\n'), ((623, 646), 'numpy.expand_dims', '_np.expand_dims', (['ret', '(0)'], {}), '(ret, 0)\n', (638, 646), True, 'import numpy as _np\n')] |
import json
import numpy as np
# Returns the Euclidean distance score between user1 and user2
def euclidean_score(dataset, user1, user2):
if user1 not in dataset:
raise TypeError('User ' + user1 + ' not present in the dataset')
if user2 not in dataset:
raise TypeError('User ' + user2 + ' not present in the dataset')
# Movies rated by both user1 and user2
rated_by_both = {} #dictionary (or set)
for item in dataset[user1]:
if item in dataset[user2]:
rated_by_both[item] = 1
# If there are no common movies, the score is 0
if len(rated_by_both) == 0:
return 0
squared_differences = []
for item in dataset[user1]:
if item in dataset[user2]:
squared_differences.append(np.square(dataset[user1][item] - dataset[user2][item]))
return 1 / (1 + np.sqrt(np.sum(squared_differences)))
if __name__=='__main__':
data_file = 'movie_ratings.json'
with open(data_file, 'r') as f:
data = json.loads(f.read())
user1 = '<NAME>'
user2 = '<NAME>'
print("\nEuclidean score:")
print(euclidean_score(data, user1, user2) )
| [
"numpy.square",
"numpy.sum"
] | [((779, 833), 'numpy.square', 'np.square', (['(dataset[user1][item] - dataset[user2][item])'], {}), '(dataset[user1][item] - dataset[user2][item])\n', (788, 833), True, 'import numpy as np\n'), ((872, 899), 'numpy.sum', 'np.sum', (['squared_differences'], {}), '(squared_differences)\n', (878, 899), True, 'import numpy as np\n')] |
import random
import numpy as np
def play(board, column, mark, config):
EMPTY = 0
columns = config.columns
rows = config.rows
row = max([r for r in range(rows) if board[column + (r * columns)] == EMPTY])
board[column + (row * columns)] = mark
def is_win(board, column, mark, config, has_played=True):
EMPTY = 0
columns = config.columns
rows = config.rows
inarow = config.inarow - 1
row = (
min([r for r in range(rows) if board[column + (r * columns)] == mark])
if has_played
else max([r for r in range(rows) if board[column + (r * columns)] == EMPTY])
)
def negamax_agent(obs, config):
columns = config.columns
rows = config.rows
size = rows * columns
from random import choice # connect the library for working with random numbers
# Due to compute/time constraints the tree depth must be limited.
max_depth = 4
EMPTY = 0
def negamax(board, mark, depth):
moves = sum(1 if cell != EMPTY else 0 for cell in board)
# Tie Game
if moves == size:
return (0, None)
# Can win next.
for column in range(columns):
if board[column] == EMPTY and is_win(board, column, mark, config, False):
return ((size + 1 - moves) / 2, column)
# Recursively check all columns.
best_score = -size
best_column = None
for column in range(columns):
if board[column] == EMPTY:
# Max depth reached. Score based on cell proximity for a clustering effect.
if depth <= 0:
row = max(
[
r
for r in range(rows)
if board[column + (r * columns)] == EMPTY
]
)
score = (size + 1 - moves) / 2
if column > 0 and board[row * columns + column - 1] == mark:
score += 1
if (
column < columns - 1
and board[row * columns + column + 1] == mark
):
score += 1
if row > 0 and board[(row - 1) * columns + column] == mark:
score += 1
if row < rows - 2 and board[(row + 1) * columns + column] == mark:
score += 1
else:
next_board = board[:]
play(next_board, column, mark, config)
(score, _) = negamax(next_board,
1 if mark == 2 else 2, depth - 1)
score = score * -1
if score > best_score or (score == best_score and choice([True, False])):
best_score = score
best_column = column
return (best_score, best_column)
_, column = negamax(obs.board[:], obs.mark, max_depth)
if column == None:
column = choice([c for c in range(columns) if obs.board[c] == EMPTY])
return column
'''
Helper Functions:
- score_move_a: calculates score if agent drops piece in selected column
- score_move_b: calculates score if opponent drops piece in selected column
- drop_piece: return grid status after player drops a piece
- get_heuristic: calculates value of heuristic for grid
- get_heuristic_optimised: calculates value of heuristic optimised
- check_window: checks if window satisfies heuristic conditions
- count_windows: counts number of windows satisfying specified heuristic conditions
- count_windows_optimised: counts number of windows satisfying specified heuristic optimised conditions
'''
# Calculates score if agent drops piece in selected column
def score_move_a(grid, col, mark, config, start_score, n_steps):
next_grid, pos = drop_piece(grid, col, mark, config)
row, col = pos
score = get_heuristic_optimised(grid,next_grid,mark,config, row, col,start_score)
valid_moves = [col for col in range (config.columns) if next_grid[0][col]==0]
'''Since we have just dropped our piece there is only the possibility of us getting 4 in a row and not the opponent.
Thus score can only be +infinity'''
scores = []
if len(valid_moves)==0 or n_steps ==0 or score == float("inf"):
return score
else :
for col in valid_moves:
current = score_move_b(next_grid,col,mark,config,score,n_steps-1)
scores.append(current)
score = min(scores)
return score
# calculates score if opponent drops piece in selected column
def score_move_b(grid, col, mark, config, start_score, n_steps):
next_grid, pos = drop_piece(grid,col,(mark%2)+1,config)
row, col = pos
score = get_heuristic_optimised(grid,next_grid,mark,config, row, col,start_score)
valid_moves = [col for col in range (config.columns) if next_grid[0][col]==0]
'''
Since we have just dropped opponent piece there is only the possibility of opponent getting 4 in a row and not us.
Thus score can only be -infinity.
'''
scores = []
if len(valid_moves)==0 or n_steps ==0 or score == float ("-inf"):
return score
else :
for col in valid_moves:
current = score_move_a (next_grid,col,mark,config,score,n_steps-1)
scores.append(current)
score = max(scores)
return score
# Gets board at next step if agent drops piece in selected column
def drop_piece(grid, col, mark, config): #
next_grid = grid.copy() # make a copy of the location of the chips on the playing field for its further transformation
for row in range(config.rows-1, -1, -1): # iterate over all rows in the playing field
if next_grid[row][col] == 0: # we are not interested in empty cells
break # we skip them if we meet such
next_grid[row][col] = mark # mark the cell in which our chip will fall
return next_grid,(row,col) # return board at next step
# calculates value of heuristic for grid
def get_heuristic(grid, mark, config):
score = 0
num = count_windows(grid,mark,config)
for i in range(config.inarow):
#num = count_windows (grid,i+1,mark,config)
if (i==(config.inarow-1) and num[i+1] >= 1):
return float("inf")
score += (4**(i))*num[i+1]
num_opp = count_windows (grid,mark%2+1,config)
for i in range(config.inarow):
if (i==(config.inarow-1) and num_opp[i+1] >= 1):
return float ("-inf")
score-= (2**((2*i)+1))*num_opp[i+1]
return score
# calculates value of heuristic optimised
def get_heuristic_optimised(grid, next_grid, mark, config, row, col, start_score):
score = 0
num1 = count_windows_optimised(grid,mark,config,row,col)
num2 = count_windows_optimised(next_grid,mark,config,row,col)
for i in range(config.inarow):
if (i==(config.inarow-1) and (num2[i+1]-num1[i+1]) >= 1):
return float("inf")
score += (4**(i))*(num2[i+1]-num1[i+1])
num1_opp = count_windows_optimised(grid,mark%2+1,config,row,col)
num2_opp = count_windows_optimised(next_grid,mark%2+1,config,row,col)
for i in range(config.inarow):
if (i==(config.inarow-1) and num2_opp[i+1]-num1_opp[i+1] >= 1):
return float ("-inf")
score-= (2**((2*i)+1))*(num2_opp[i+1]-num1_opp[i+1])
score+= start_score
return score
# checks if window satisfies heuristic conditions
def check_window(window, piece, config):
if window.count((piece%2)+1)==0:
return window.count(piece)
else:
return -1
# counts number of windows satisfying specified heuristic conditions
def count_windows(grid, piece, config):
num_windows = np.zeros(config.inarow+1)
# horizontal
for row in range(config.rows):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[row, col:col+config.inarow])
type_window = check_window(window, piece, config)
if type_window != -1:
num_windows[type_window] += 1
# vertical
for row in range(config.rows-(config.inarow-1)):
for col in range(config.columns):
window = list(grid[row:row+config.inarow, col])
type_window = check_window(window, piece, config)
if type_window != -1:
num_windows[type_window] += 1
# positive diagonal
for row in range(config.rows-(config.inarow-1)):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[range(row, row+config.inarow), range(col, col+config.inarow)])
type_window = check_window(window, piece, config)
if type_window != -1:
num_windows[type_window] += 1
# negative diagonal
for row in range(config.inarow-1, config.rows):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[range(row, row-config.inarow, -1), range(col, col+config.inarow)])
type_window = check_window(window, piece, config)
if type_window != -1:
num_windows[type_window] += 1
return num_windows
# counts number of windows satisfying specified heuristic optimised conditions
def count_windows_optimised(grid, piece, config, row, col):
num_windows = np.zeros(config.inarow+1)
# horizontal
for acol in range(max(0,col-(config.inarow-1)),min(col+1,(config.columns-(config.inarow-1)))):
window = list(grid[row, acol:acol+config.inarow])
type_window = check_window(window, piece, config)
if type_window != -1:
num_windows[type_window] += 1
# vertical
for arow in range(max(0,row-(config.inarow-1)),min(row+1,(config.rows-(config.inarow-1)))):
window = list(grid[arow:arow+config.inarow, col])
type_window = check_window(window, piece, config)
if type_window != -1:
num_windows[type_window] += 1
# positive diagonal
for arow, acol in zip(range(row-(config.inarow-1),row+1),range(col-(config.inarow-1),col+1)):
if (arow>=0 and acol>=0 and arow<=(config.rows-config.inarow) and acol<=(config.columns-config.inarow)):
window = list(grid[range(arow, arow+config.inarow), range(acol, acol+config.inarow)])
type_window = check_window(window, piece, config)
if type_window != -1:
num_windows[type_window] += 1
# negative diagonal
for arow,acol in zip(range(row,row+config.inarow),range(col,col-config.inarow,-1)):
if (arow >= (config.inarow-1) and acol >=0 and arow <= (config.rows-1) and acol <= (config.columns-config.inarow)):
window = list(grid[range(arow, arow-config.inarow, -1), range(acol, acol+config.inarow)])
type_window = check_window(window, piece, config)
if type_window != -1:
num_windows[type_window] += 1
return num_windows
# main function of our agent
def n_ahead_fast_agent(n):
def agent(obs, config):
grid = np.asarray(obs.board).reshape(config.rows, config.columns)
valid_moves = [c for c in range(config.columns) if grid[0][c] == 0]
scores = {}
start_score = get_heuristic(grid, obs.mark, config)
for col in valid_moves:
scores[col] = score_move_a(grid, col, obs.mark, config,start_score, n)
max_cols = [key for key in scores.keys() if scores[key] == max(scores.values())]
return random.choice(max_cols)
return agent
def ddqn_as_agent(ddqn, flip=False):
model = ddqn
def agent(obs, config):
action = model.select_action(obs, train=False, flip=flip)
return action
agent_name = model.name
return agent, agent_name
def get_agent_map():
agents = {
"random": "random",
"negamax": negamax_agent,
"1_ahead": n_ahead_fast_agent(1),
"2_ahead": n_ahead_fast_agent(2),
"3_ahead": n_ahead_fast_agent(3),
"4_ahead": n_ahead_fast_agent(4)
}
return agents
| [
"numpy.asarray",
"numpy.zeros",
"random.choice"
] | [((7769, 7796), 'numpy.zeros', 'np.zeros', (['(config.inarow + 1)'], {}), '(config.inarow + 1)\n', (7777, 7796), True, 'import numpy as np\n'), ((9351, 9378), 'numpy.zeros', 'np.zeros', (['(config.inarow + 1)'], {}), '(config.inarow + 1)\n', (9359, 9378), True, 'import numpy as np\n'), ((11492, 11515), 'random.choice', 'random.choice', (['max_cols'], {}), '(max_cols)\n', (11505, 11515), False, 'import random\n'), ((11058, 11079), 'numpy.asarray', 'np.asarray', (['obs.board'], {}), '(obs.board)\n', (11068, 11079), True, 'import numpy as np\n'), ((2815, 2836), 'random.choice', 'choice', (['[True, False]'], {}), '([True, False])\n', (2821, 2836), False, 'from random import choice\n')] |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from statsmodels.stats.multitest import multipletests
from sklearn.ensemble import IsolationForest
from sklearn.svm import OneClassSVM
from scipy import stats
from sklearn.metrics import mean_squared_error, mean_absolute_error
from scripts.python.pheno.datasets.filter import filter_pheno
from scripts.python.pheno.datasets.features import get_column_name, get_status_dict, get_sex_dict
from scripts.python.routines.plot.scatter import add_scatter_trace
import plotly.graph_objects as go
import pathlib
from scripts.python.routines.manifest import get_manifest
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.layout import add_layout
from sklearn.neighbors import LocalOutlierFactor
from sklearn.decomposition import PCA
from scipy.stats import chi2
from pyod.models.abod import ABOD
from pyod.models.knn import KNN
from pyod.models.copod import COPOD
from pyod.models.ecod import ECOD
from pyod.models.sos import SOS
from pyod.models.suod import SUOD
def mahalanobis(x=None, data=None, cov=None):
"""Compute the Mahalanobis Distance between each row of x and the data
x : vector or matrix of data with, say, p columns.
data : ndarray of the distribution from which Mahalanobis distance of each observation of x is to be computed.
cov : covariance matrix (p x p) of the distribution. If None, will be computed from data.
"""
x_mu = x - np.mean(data)
if not cov:
cov = np.cov(data.values.T)
inv_covmat = np.linalg.inv(cov)
left = np.dot(x_mu, inv_covmat)
mahal = np.dot(left, x_mu.T)
return mahal.diagonal()
def calc_metrics(model, X, y, comment, params):
y_pred = model.predict(X)
score = model.score(X, y)
rmse = np.sqrt(mean_squared_error(y, y_pred))
mae = mean_absolute_error(y, y_pred)
params[f'{comment} R2'] = score
params[f'{comment} RMSE'] = rmse
params[f'{comment} MAE'] = mae
return y_pred
def analyze_outliers(ctrl, features, data_type):
corr_df = pd.DataFrame(data=np.zeros((len(features), 2)), index=features, columns=['pearson_corr', 'pearson_pval'])
for f in features:
corr, pval = stats.pearsonr(ctrl.loc[:, f].values, ctrl.loc[:, 'Age'].values)
corr_df.at[f, 'pearson_corr'] = corr
corr_df.at[f, 'pearson_pval'] = pval
_, corr_df['pearson_pval_fdr_bh'], _, _ = multipletests(corr_df.loc[:, 'pearson_pval'].values, 0.05, method='fdr_bh')
aa_features = corr_df.index[corr_df['pearson_pval_fdr_bh'] < 0.01].values
features = aa_features
iqr_feature = []
for f in features:
q1 = ctrl[f].quantile(0.25)
q3 = ctrl[f].quantile(0.75)
iqr = q3 - q1
filter = (ctrl[f] >= q1 - 1.5 * iqr) & (ctrl[f] <= q3 + 1.5 * iqr)
iqr_feature.append(f"{f}_IsIQR")
ctrl[f"{f}_IsIQR"] = filter
ctrl[f"NumIQR_{data_type}"] = len(aa_features) - ctrl[iqr_feature].sum(axis=1)
ctrl[f"PassedByNumIQR_{data_type}"] = ctrl[f"NumIQR_{data_type}"] < 1
X_ctrl = ctrl.loc[:, features].to_numpy()
lof = LocalOutlierFactor()
ctrl[f"LocalOutlierFactor_{data_type}"] = lof.fit_predict(X_ctrl)
ctrl[f"LocalOutlierFactor_{data_type}"].replace({1: True, -1: False}, inplace=True)
iso = IsolationForest()
ctrl[f"IsolationForest_{data_type}"] = iso.fit_predict(X_ctrl)
ctrl[f"IsolationForest_{data_type}"].replace({1: True, -1: False}, inplace=True)
ee = OneClassSVM()
ctrl[f"OneClassSVM_{data_type}"] = ee.fit_predict(X_ctrl)
ctrl[f"OneClassSVM_{data_type}"].replace({1: True, -1: False}, inplace=True)
ctrl[f'mahalanobis_d_{data_type}'] = mahalanobis(x=ctrl[features], data=ctrl[features])
ctrl[f'mahalanobis_p_{data_type}'] = 1 - chi2.cdf(ctrl[f'mahalanobis_d_{data_type}'], 3)
ctrl[f"PassedByMahalanobis_{data_type}"] = ctrl[f'mahalanobis_p_{data_type}'] <= 0.05
outlier_fraction = 0.25
classifiers = {
'ABOD': ABOD(contamination=outlier_fraction),
'KNN': KNN(contamination=outlier_fraction),
'COPOD': COPOD(contamination=outlier_fraction),
'ECOD': ECOD(contamination=outlier_fraction),
'SOS': SOS(contamination=outlier_fraction),
'SUOD': SUOD(contamination=outlier_fraction)
}
outlier_types = ["PassedByImmunoAgeDiff", f"PassedByNumIQR_{data_type}"]
for i, (clf_name, clf) in enumerate(classifiers.items()):
clf.fit(X_ctrl)
ctrl[f"{clf_name}_{data_type}"] = clf.predict(X_ctrl)
ctrl[f"{clf_name}_{data_type}"].replace({0: True, 1: False}, inplace=True)
outlier_types.append(f"{clf_name}_{data_type}")
outlier_types += [f"{x}_{data_type}" for x in ["LocalOutlierFactor", "IsolationForest", "OneClassSVM", "PassedByMahalanobis"]]
ctrl[f"OutInPartOfAAFeatures_{data_type}"] = np.sum((np.abs(stats.zscore(ctrl[aa_features])) > 3), axis=1) / len(aa_features)
ctrl[f"PassedByOutInPartOfAAFeatures_{data_type}"] = ctrl[f"OutInPartOfAAFeatures_{data_type}"] < 0.01
outlier_types += [f"PassedByOutInPartOfAAFeatures_{data_type}"]
pca = PCA(n_components=2)
pcs = pca.fit_transform(ctrl[features])
for pc_id in range(pcs.shape[1]):
ctrl[f"PC_{pc_id + 1}_{data_type}"] = pcs[:, pc_id]
ctrl[f"PC_{pc_id + 1}_{data_type}_log"] = np.sign(pcs[:, pc_id]) * np.log10(1 + np.abs(pcs[:, pc_id]))
pc_plot_list = [
{'x_col': f"PC_1_{data_type}", 'y_col': f"PC_2_{data_type}", 'x_name': f"PC1", 'y_name': f"PC2",
'path': f"{path_save}/outliers/{data_type}", 'name': '0_PC'},
{'x_col': f"PC_1_{data_type}_log", 'y_col': f"PC_2_{data_type}_log", 'x_name': f"sign(PC1) log(1 + |PC1|)",
'y_name': f"sign(PC2) log(1 + |PC2|)", 'path': f"{path_save}/outliers/{data_type}", 'name': '0_PC_log'}
]
for pc_plot in pc_plot_list:
fig = go.Figure()
add_scatter_trace(fig, ctrl.loc[ctrl['Source'] == 1, pc_plot['x_col']].values, ctrl.loc[ctrl['Source'] == 1, pc_plot['y_col']].values, f"First and Second")
add_scatter_trace(fig, ctrl.loc[ctrl['Source'] == 2, pc_plot['x_col']].values, ctrl.loc[ctrl['Source'] == 2, pc_plot['y_col']].values, f"Third and Fourth")
add_layout(fig, pc_plot['x_name'], pc_plot['y_name'], f"")
fig.update_layout({'colorway': ['red', 'blue']})
fig.update_layout(legend_font_size=20)
fig.update_layout(
margin=go.layout.Margin(
l=110,
r=20,
b=75,
t=45,
pad=0
)
)
save_figure(fig, f"{pc_plot['path']}/{pc_plot['name']}")
for ot_id, ot in enumerate(outlier_types):
n_total = ctrl.loc[(ctrl[ot] == True), :].shape[0]
n_in_intersection = ctrl.loc[(ctrl[ot] == True) & (ctrl['PassedByImmunoAgeDiff'] == True),:].shape[0]
print(f"Number of common subject of {ot} with PassedByImmunoAgeDiff: {n_in_intersection} from {n_total}")
pc_plot_list[0]['name'] = f"{ot_id + 1}_{ot}"
pc_plot_list[1]['name'] = f"{ot_id + 1}_{ot}_log"
for pc_plot in pc_plot_list:
fig = go.Figure()
add_scatter_trace(fig, ctrl.loc[ctrl[ot] == True, pc_plot['x_col']].values, ctrl.loc[ctrl[ot] == True, pc_plot['y_col']].values, f"Inlier")
add_scatter_trace(fig, ctrl.loc[ctrl[ot] == False, pc_plot['x_col']].values, ctrl.loc[ctrl[ot] == False, pc_plot['y_col']].values, f"Outlier")
add_layout(fig, pc_plot['x_name'], pc_plot['y_name'], f"{ot}")
fig.update_layout({'colorway': ['blue', 'red']})
fig.update_layout(legend_font_size=20)
fig.update_layout(
margin=go.layout.Margin(
l=110,
r=20,
b=75,
t=85,
pad=0
)
)
save_figure(fig, f"{pc_plot['path']}/{pc_plot['name']}")
thld_abs_diff = 16
dataset = "GSEUNN"
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets_info = pd.read_excel(f"{path}/datasets.xlsx", index_col='dataset')
platform = datasets_info.loc[dataset, 'platform']
manifest = get_manifest(platform)
status_col = get_column_name(dataset, 'Status').replace(' ','_')
age_col = get_column_name(dataset, 'Age').replace(' ','_')
sex_col = get_column_name(dataset, 'Sex').replace(' ','_')
status_dict = get_status_dict(dataset)
status_passed_fields = status_dict['Control'] + status_dict['Case']
sex_dict = get_sex_dict(dataset)
continuous_vars = {}
categorical_vars = {status_col: [x.column for x in status_passed_fields], sex_col: list(sex_dict.values())}
pheno = pd.read_pickle(f"{path}/{platform}/{dataset}/pheno_xtd.pkl")
pheno = filter_pheno(dataset, pheno, continuous_vars, categorical_vars)
pheno['Source'] = 1
part_3_4 = pd.read_excel(f"{path}/{platform}/{dataset}/data/immuno/part3_part4_with_age_sex.xlsx", index_col='ID')
part_3_4 = part_3_4[~part_3_4.index.str.startswith(('Q', 'H'))]
part_3_4['Group'] = 'Control'
part_3_4['Source'] = 2
pheno.set_index('ID', inplace=True)
df = pheno.append(part_3_4, verify_integrity=True)
df[f'ImmunoAgeDiff'] = df[f'ImmunoAge'] - df[f'Age']
df[f"PassedByImmunoAgeDiff"] = abs(df[f'ImmunoAgeDiff']) <= thld_abs_diff
df[f"PassedAll"] = True
ctrl = df.loc[df['Group'] == 'Control']
esrd = df.loc[df['Group'] == 'ESRD']
path_save = f"{path}/{platform}/{dataset}/special/017_outlier_detection_in_controls/"
pathlib.Path(f"{path_save}/outliers/origin").mkdir(parents=True, exist_ok=True)
pathlib.Path(f"{path_save}/outliers/scaled").mkdir(parents=True, exist_ok=True)
with open(f'{path}/{platform}/{dataset}/features/immuno.txt') as f:
features = f.read().splitlines()
scalers = {}
features_scaled = []
for f in features:
scaler = StandardScaler()
scaler.fit(ctrl.loc[:, f].values.reshape(-1, 1))
scalers[f] = scaler
features_scaled.append(f"{f}_scaled")
ctrl[f"{f}_scaled"] = scaler.transform(ctrl.loc[:, f].values.reshape(-1, 1))
analyze_outliers(ctrl, features, 'origin')
analyze_outliers(ctrl, features_scaled, 'scaled')
ctrl.to_excel(f'{path_save}/ctrl.xlsx', index=True)
| [
"scripts.python.pheno.datasets.filter.filter_pheno",
"pyod.models.suod.SUOD",
"sklearn.preprocessing.StandardScaler",
"numpy.abs",
"pyod.models.copod.COPOD",
"sklearn.neighbors.LocalOutlierFactor",
"sklearn.metrics.mean_absolute_error",
"pathlib.Path",
"numpy.mean",
"scripts.python.routines.plot.s... | [((8060, 8119), 'pandas.read_excel', 'pd.read_excel', (['f"""{path}/datasets.xlsx"""'], {'index_col': '"""dataset"""'}), "(f'{path}/datasets.xlsx', index_col='dataset')\n", (8073, 8119), True, 'import pandas as pd\n'), ((8181, 8203), 'scripts.python.routines.manifest.get_manifest', 'get_manifest', (['platform'], {}), '(platform)\n', (8193, 8203), False, 'from scripts.python.routines.manifest import get_manifest\n'), ((8401, 8425), 'scripts.python.pheno.datasets.features.get_status_dict', 'get_status_dict', (['dataset'], {}), '(dataset)\n', (8416, 8425), False, 'from scripts.python.pheno.datasets.features import get_column_name, get_status_dict, get_sex_dict\n'), ((8505, 8526), 'scripts.python.pheno.datasets.features.get_sex_dict', 'get_sex_dict', (['dataset'], {}), '(dataset)\n', (8517, 8526), False, 'from scripts.python.pheno.datasets.features import get_column_name, get_status_dict, get_sex_dict\n'), ((8664, 8724), 'pandas.read_pickle', 'pd.read_pickle', (['f"""{path}/{platform}/{dataset}/pheno_xtd.pkl"""'], {}), "(f'{path}/{platform}/{dataset}/pheno_xtd.pkl')\n", (8678, 8724), True, 'import pandas as pd\n'), ((8733, 8796), 'scripts.python.pheno.datasets.filter.filter_pheno', 'filter_pheno', (['dataset', 'pheno', 'continuous_vars', 'categorical_vars'], {}), '(dataset, pheno, continuous_vars, categorical_vars)\n', (8745, 8796), False, 'from scripts.python.pheno.datasets.filter import filter_pheno\n'), ((8829, 8941), 'pandas.read_excel', 'pd.read_excel', (['f"""{path}/{platform}/{dataset}/data/immuno/part3_part4_with_age_sex.xlsx"""'], {'index_col': '"""ID"""'}), "(\n f'{path}/{platform}/{dataset}/data/immuno/part3_part4_with_age_sex.xlsx',\n index_col='ID')\n", (8842, 8941), True, 'import pandas as pd\n'), ((1581, 1599), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (1594, 1599), True, 'import numpy as np\n'), ((1611, 1635), 'numpy.dot', 'np.dot', (['x_mu', 'inv_covmat'], {}), '(x_mu, inv_covmat)\n', (1617, 1635), True, 'import numpy as np\n'), ((1648, 1668), 'numpy.dot', 'np.dot', (['left', 'x_mu.T'], {}), '(left, x_mu.T)\n', (1654, 1668), True, 'import numpy as np\n'), ((1867, 1897), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y', 'y_pred'], {}), '(y, y_pred)\n', (1886, 1897), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((2440, 2515), 'statsmodels.stats.multitest.multipletests', 'multipletests', (["corr_df.loc[:, 'pearson_pval'].values", '(0.05)'], {'method': '"""fdr_bh"""'}), "(corr_df.loc[:, 'pearson_pval'].values, 0.05, method='fdr_bh')\n", (2453, 2515), False, 'from statsmodels.stats.multitest import multipletests\n'), ((3128, 3148), 'sklearn.neighbors.LocalOutlierFactor', 'LocalOutlierFactor', ([], {}), '()\n', (3146, 3148), False, 'from sklearn.neighbors import LocalOutlierFactor\n'), ((3317, 3334), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {}), '()\n', (3332, 3334), False, 'from sklearn.ensemble import IsolationForest\n'), ((3496, 3509), 'sklearn.svm.OneClassSVM', 'OneClassSVM', ([], {}), '()\n', (3507, 3509), False, 'from sklearn.svm import OneClassSVM\n'), ((5120, 5139), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (5123, 5139), False, 'from sklearn.decomposition import PCA\n'), ((9787, 9803), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9801, 9803), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1498, 1511), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (1505, 1511), True, 'import numpy as np\n'), ((1542, 1563), 'numpy.cov', 'np.cov', (['data.values.T'], {}), '(data.values.T)\n', (1548, 1563), True, 'import numpy as np\n'), ((1826, 1855), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y', 'y_pred'], {}), '(y, y_pred)\n', (1844, 1855), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((2239, 2303), 'scipy.stats.pearsonr', 'stats.pearsonr', (['ctrl.loc[:, f].values', "ctrl.loc[:, 'Age'].values"], {}), "(ctrl.loc[:, f].values, ctrl.loc[:, 'Age'].values)\n", (2253, 2303), False, 'from scipy import stats\n'), ((3790, 3837), 'scipy.stats.chi2.cdf', 'chi2.cdf', (["ctrl[f'mahalanobis_d_{data_type}']", '(3)'], {}), "(ctrl[f'mahalanobis_d_{data_type}'], 3)\n", (3798, 3837), False, 'from scipy.stats import chi2\n'), ((3993, 4029), 'pyod.models.abod.ABOD', 'ABOD', ([], {'contamination': 'outlier_fraction'}), '(contamination=outlier_fraction)\n', (3997, 4029), False, 'from pyod.models.abod import ABOD\n'), ((4046, 4081), 'pyod.models.knn.KNN', 'KNN', ([], {'contamination': 'outlier_fraction'}), '(contamination=outlier_fraction)\n', (4049, 4081), False, 'from pyod.models.knn import KNN\n'), ((4100, 4137), 'pyod.models.copod.COPOD', 'COPOD', ([], {'contamination': 'outlier_fraction'}), '(contamination=outlier_fraction)\n', (4105, 4137), False, 'from pyod.models.copod import COPOD\n'), ((4155, 4191), 'pyod.models.ecod.ECOD', 'ECOD', ([], {'contamination': 'outlier_fraction'}), '(contamination=outlier_fraction)\n', (4159, 4191), False, 'from pyod.models.ecod import ECOD\n'), ((4208, 4243), 'pyod.models.sos.SOS', 'SOS', ([], {'contamination': 'outlier_fraction'}), '(contamination=outlier_fraction)\n', (4211, 4243), False, 'from pyod.models.sos import SOS\n'), ((4261, 4297), 'pyod.models.suod.SUOD', 'SUOD', ([], {'contamination': 'outlier_fraction'}), '(contamination=outlier_fraction)\n', (4265, 4297), False, 'from pyod.models.suod import SUOD\n'), ((5873, 5884), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (5882, 5884), True, 'import plotly.graph_objects as go\n'), ((5893, 6057), 'scripts.python.routines.plot.scatter.add_scatter_trace', 'add_scatter_trace', (['fig', "ctrl.loc[ctrl['Source'] == 1, pc_plot['x_col']].values", "ctrl.loc[ctrl['Source'] == 1, pc_plot['y_col']].values", 'f"""First and Second"""'], {}), "(fig, ctrl.loc[ctrl['Source'] == 1, pc_plot['x_col']].\n values, ctrl.loc[ctrl['Source'] == 1, pc_plot['y_col']].values,\n f'First and Second')\n", (5910, 6057), False, 'from scripts.python.routines.plot.scatter import add_scatter_trace\n'), ((6057, 6221), 'scripts.python.routines.plot.scatter.add_scatter_trace', 'add_scatter_trace', (['fig', "ctrl.loc[ctrl['Source'] == 2, pc_plot['x_col']].values", "ctrl.loc[ctrl['Source'] == 2, pc_plot['y_col']].values", 'f"""Third and Fourth"""'], {}), "(fig, ctrl.loc[ctrl['Source'] == 2, pc_plot['x_col']].\n values, ctrl.loc[ctrl['Source'] == 2, pc_plot['y_col']].values,\n f'Third and Fourth')\n", (6074, 6221), False, 'from scripts.python.routines.plot.scatter import add_scatter_trace\n'), ((6221, 6279), 'scripts.python.routines.plot.layout.add_layout', 'add_layout', (['fig', "pc_plot['x_name']", "pc_plot['y_name']", 'f""""""'], {}), "(fig, pc_plot['x_name'], pc_plot['y_name'], f'')\n", (6231, 6279), False, 'from scripts.python.routines.plot.layout import add_layout\n'), ((6591, 6647), 'scripts.python.routines.plot.save.save_figure', 'save_figure', (['fig', 'f"""{pc_plot[\'path\']}/{pc_plot[\'name\']}"""'], {}), '(fig, f"{pc_plot[\'path\']}/{pc_plot[\'name\']}")\n', (6602, 6647), False, 'from scripts.python.routines.plot.save import save_figure\n'), ((8217, 8251), 'scripts.python.pheno.datasets.features.get_column_name', 'get_column_name', (['dataset', '"""Status"""'], {}), "(dataset, 'Status')\n", (8232, 8251), False, 'from scripts.python.pheno.datasets.features import get_column_name, get_status_dict, get_sex_dict\n'), ((8279, 8310), 'scripts.python.pheno.datasets.features.get_column_name', 'get_column_name', (['dataset', '"""Age"""'], {}), "(dataset, 'Age')\n", (8294, 8310), False, 'from scripts.python.pheno.datasets.features import get_column_name, get_status_dict, get_sex_dict\n'), ((8338, 8369), 'scripts.python.pheno.datasets.features.get_column_name', 'get_column_name', (['dataset', '"""Sex"""'], {}), "(dataset, 'Sex')\n", (8353, 8369), False, 'from scripts.python.pheno.datasets.features import get_column_name, get_status_dict, get_sex_dict\n'), ((9454, 9498), 'pathlib.Path', 'pathlib.Path', (['f"""{path_save}/outliers/origin"""'], {}), "(f'{path_save}/outliers/origin')\n", (9466, 9498), False, 'import pathlib\n'), ((9534, 9578), 'pathlib.Path', 'pathlib.Path', (['f"""{path_save}/outliers/scaled"""'], {}), "(f'{path_save}/outliers/scaled')\n", (9546, 9578), False, 'import pathlib\n'), ((5332, 5354), 'numpy.sign', 'np.sign', (['pcs[:, pc_id]'], {}), '(pcs[:, pc_id])\n', (5339, 5354), True, 'import numpy as np\n'), ((7146, 7157), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (7155, 7157), True, 'import plotly.graph_objects as go\n'), ((7170, 7313), 'scripts.python.routines.plot.scatter.add_scatter_trace', 'add_scatter_trace', (['fig', "ctrl.loc[ctrl[ot] == True, pc_plot['x_col']].values", "ctrl.loc[ctrl[ot] == True, pc_plot['y_col']].values", 'f"""Inlier"""'], {}), "(fig, ctrl.loc[ctrl[ot] == True, pc_plot['x_col']].values,\n ctrl.loc[ctrl[ot] == True, pc_plot['y_col']].values, f'Inlier')\n", (7187, 7313), False, 'from scripts.python.routines.plot.scatter import add_scatter_trace\n'), ((7322, 7468), 'scripts.python.routines.plot.scatter.add_scatter_trace', 'add_scatter_trace', (['fig', "ctrl.loc[ctrl[ot] == False, pc_plot['x_col']].values", "ctrl.loc[ctrl[ot] == False, pc_plot['y_col']].values", 'f"""Outlier"""'], {}), "(fig, ctrl.loc[ctrl[ot] == False, pc_plot['x_col']].values,\n ctrl.loc[ctrl[ot] == False, pc_plot['y_col']].values, f'Outlier')\n", (7339, 7468), False, 'from scripts.python.routines.plot.scatter import add_scatter_trace\n'), ((7477, 7539), 'scripts.python.routines.plot.layout.add_layout', 'add_layout', (['fig', "pc_plot['x_name']", "pc_plot['y_name']", 'f"""{ot}"""'], {}), "(fig, pc_plot['x_name'], pc_plot['y_name'], f'{ot}')\n", (7487, 7539), False, 'from scripts.python.routines.plot.layout import add_layout\n'), ((7899, 7955), 'scripts.python.routines.plot.save.save_figure', 'save_figure', (['fig', 'f"""{pc_plot[\'path\']}/{pc_plot[\'name\']}"""'], {}), '(fig, f"{pc_plot[\'path\']}/{pc_plot[\'name\']}")\n', (7910, 7955), False, 'from scripts.python.routines.plot.save import save_figure\n'), ((6430, 6478), 'plotly.graph_objects.layout.Margin', 'go.layout.Margin', ([], {'l': '(110)', 'r': '(20)', 'b': '(75)', 't': '(45)', 'pad': '(0)'}), '(l=110, r=20, b=75, t=45, pad=0)\n', (6446, 6478), True, 'import plotly.graph_objects as go\n'), ((4866, 4897), 'scipy.stats.zscore', 'stats.zscore', (['ctrl[aa_features]'], {}), '(ctrl[aa_features])\n', (4878, 4897), False, 'from scipy import stats\n'), ((5370, 5391), 'numpy.abs', 'np.abs', (['pcs[:, pc_id]'], {}), '(pcs[:, pc_id])\n', (5376, 5391), True, 'import numpy as np\n'), ((7706, 7754), 'plotly.graph_objects.layout.Margin', 'go.layout.Margin', ([], {'l': '(110)', 'r': '(20)', 'b': '(75)', 't': '(85)', 'pad': '(0)'}), '(l=110, r=20, b=75, t=85, pad=0)\n', (7722, 7754), True, 'import plotly.graph_objects as go\n')] |
from __future__ import division, print_function, absolute_import
import os
import numpy as np
from numpy.testing import assert_array_almost_equal
import pytest
from pytest import raises as assert_raises
from scipy.linalg import solve_sylvester
from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov
from scipy.linalg import solve_continuous_are, solve_discrete_are
from scipy.linalg import block_diag, solve, LinAlgError
def _load_data(name):
"""
Load npz data file under data/
Returns a copy of the data, rather than keeping the npz file open.
"""
filename = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', name)
with np.load(filename) as f:
return dict(f.items())
class TestSolveLyapunov(object):
cases = [
(np.array([[1, 2], [3, 4]]),
np.array([[9, 10], [11, 12]])),
# a, q all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a real; q complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a complex; q real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0, 2.0], [-1.0, 2.0]])),
# An example from Kitagawa, 1977
(np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3],
[1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]),
np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3],
[0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])),
# Companion matrix example. a complex; q real; a.shape[0] = 11
(np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j,
0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j,
0.010+0.j],
[1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j]]),
np.eye(11)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.array(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T))),
]
def test_continuous_squareness_and_shape(self):
nsq = np.ones((3, 2))
sq = np.eye(3)
assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2))
def check_continuous_case(self, a, q):
x = solve_continuous_lyapunov(a, q)
assert_array_almost_equal(
np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
def check_discrete_case(self, a, q, method=None):
x = solve_discrete_lyapunov(a, q, method=method)
assert_array_almost_equal(
np.dot(np.dot(a, x), a.conj().transpose()) - x, -1.0*q)
def test_cases(self):
for case in self.cases:
self.check_continuous_case(case[0], case[1])
self.check_discrete_case(case[0], case[1])
self.check_discrete_case(case[0], case[1], method='direct')
self.check_discrete_case(case[0], case[1], method='bilinear')
def test_solve_continuous_are():
mat6 = _load_data('carex_6_data.npz')
mat15 = _load_data('carex_15_data.npz')
mat18 = _load_data('carex_18_data.npz')
mat19 = _load_data('carex_19_data.npz')
mat20 = _load_data('carex_20_data.npz')
cases = [
# Carex examples taken from (with default parameters):
# [1] P.BENNER, <NAME>, <NAME>: 'A Collection of Benchmark
# Examples for the Numerical Solution of Algebraic Riccati
# Equations II: Continuous-Time Case', Tech. Report SPC 95_23,
# Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
#
# The format of the data is (a, b, q, r, knownfailure), where
# knownfailure is None if the test passes or a string
# indicating the reason for failure.
#
# Test Case 0: carex #1
(np.diag([1.], 1),
np.array([[0], [1]]),
block_diag(1., 2.),
1,
None),
# Test Case 1: carex #2
(np.array([[4, 3], [-4.5, -3.5]]),
np.array([[1], [-1]]),
np.array([[9, 6], [6, 4.]]),
1,
None),
# Test Case 2: carex #3
(np.array([[0, 1, 0, 0],
[0, -1.89, 0.39, -5.53],
[0, -0.034, -2.98, 2.43],
[0.034, -0.0011, -0.99, -0.21]]),
np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]),
np.array([[2.313, 2.727, 0.688, 0.023],
[2.727, 4.271, 1.148, 0.323],
[0.688, 1.148, 0.313, 0.102],
[0.023, 0.323, 0.102, 0.083]]),
np.eye(2),
None),
# Test Case 3: carex #4
(np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0],
[0.522, -1.051, 0.596, 0, 0, 0, 0, 0],
[0, 0.522, -1.118, 0.596, 0, 0, 0, 0],
[0, 0, 0.522, -1.548, 0.718, 0, 0, 0],
[0, 0, 0, 0.922, -1.64, 0.799, 0, 0],
[0, 0, 0, 0, 0.922, -1.721, 0.901, 0],
[0, 0, 0, 0, 0, 0.922, -1.823, 1.021],
[0, 0, 0, 0, 0, 0, 0.922, -1.943]]),
np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00],
[-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]]
).T * 0.001,
np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1],
[0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0],
[0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]),
np.eye(2),
None),
# Test Case 4: carex #5
(np.array(
[[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870],
[-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970],
[-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680],
[-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390],
[-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400],
[0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.],
[0, 0, 0, 0, 0, 94.000, -147.200, 0, 0],
[0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0],
[0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]),
np.array([[0.010, -0.011, -0.151],
[0.003, -0.021, 0.000],
[0.009, -0.059, 0.000],
[0.024, -0.162, 0.000],
[0.068, -0.445, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
np.eye(9),
np.eye(3),
None),
# Test Case 5: carex #6
(mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None),
# Test Case 6: carex #7
(np.array([[1, 0], [0, -2.]]),
np.array([[1e-6], [0]]),
np.ones((2, 2)),
1.,
'Bad residual accuracy'),
# Test Case 7: carex #8
(block_diag(-0.1, -0.02),
np.array([[0.100, 0.000], [0.001, 0.010]]),
np.array([[100, 1000], [1000, 10000]]),
np.ones((2, 2)) + block_diag(1e-6, 0),
None),
# Test Case 8: carex #9
(np.array([[0, 1e6], [0, 0]]),
np.array([[0], [1.]]),
np.eye(2),
1.,
None),
# Test Case 9: carex #10
(np.array([[1.0000001, 1], [1., 1.0000001]]),
np.eye(2),
np.eye(2),
np.eye(2),
None),
# Test Case 10: carex #11
(np.array([[3, 1.], [4, 2]]),
np.array([[1], [1]]),
np.array([[-11, -5], [-5, -2.]]),
1.,
None),
# Test Case 11: carex #12
(np.array([[7000000., 2000000., -0.],
[2000000., 6000000., -2000000.],
[0., -2000000., 5000000.]]) / 3,
np.eye(3),
np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot(
np.diag([1e-6, 1, 1e6])).dot(
np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9,
np.eye(3) * 1e6,
'Bad Residual Accuracy'),
# Test Case 12: carex #13
(np.array([[0, 0.4, 0, 0],
[0, 0, 0.345, 0],
[0, -0.524e6, -0.465e6, 0.262e6],
[0, 0, 0, -1e6]]),
np.array([[0, 0, 0, 1e6]]).T,
np.diag([1, 0, 1, 0]),
1.,
None),
# Test Case 13: carex #14
(np.array([[-1e-6, 1, 0, 0],
[-1, -1e-6, 0, 0],
[0, 0, 1e-6, 1],
[0, 0, -1, 1e-6]]),
np.ones((4, 1)),
np.ones((4, 4)),
1.,
None),
# Test Case 14: carex #15
(mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None),
# Test Case 15: carex #16
(np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90(
block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1),
np.eye(64),
np.eye(64),
np.eye(64),
None),
# Test Case 16: carex #17
(np.diag(np.ones((20, )), 1),
np.flipud(np.eye(21, 1)),
np.eye(21, 1) * np.eye(21, 1).T,
1,
'Bad Residual Accuracy'),
# Test Case 17: carex #18
(mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None),
# Test Case 18: carex #19
(mat19['A'], mat19['B'], mat19['Q'], mat19['R'],
'Bad Residual Accuracy'),
# Test Case 19: carex #20
(mat20['A'], mat20['B'], mat20['Q'], mat20['R'],
'Bad Residual Accuracy')
]
# Makes the minimum precision requirements customized to the test.
# Here numbers represent the number of decimals that agrees with zero
# matrix when the solution x is plugged in to the equation.
#
# res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
#
# If the test is failing use "None" for that entry.
#
min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14,
None, 9, 14, 13, 14, None, 12, None, None)
def _test_factory(case, dec):
"""Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true"""
a, b, q, r, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_continuous_are(a, b, q, r)
res = x.dot(a) + a.conj().T.dot(x) + q
out_fact = x.dot(b)
res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
def test_solve_discrete_are():
cases = [
# Darex examples taken from (with default parameters):
# [1] P.BENNER, <NAME>, <NAME>: 'A Collection of Benchmark
# Examples for the Numerical Solution of Algebraic Riccati
# Equations II: Discrete-Time Case', Tech. Report SPC 95_23,
# <NAME>, TU Chemnitz-Zwickau (Germany), 1995.
# [2] <NAME>, <NAME>, <NAME>: 'Scaling of the
# Discrete-Time Algebraic Riccati Equation to Enhance Stability
# of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4)
#
# The format of the data is (a, b, q, r, knownfailure), where
# knownfailure is None if the test passes or a string
# indicating the reason for failure.
#
# TEST CASE 0 : Complex a; real b, q, r
(np.array([[2, 1-2j], [0, -3j]]),
np.array([[0], [1]]),
np.array([[1, 0], [0, 2]]),
np.array([[1]]),
None),
# TEST CASE 1 :Real a, q, r; complex b
(np.array([[2, 1], [0, -1]]),
np.array([[-2j], [1j]]),
np.array([[1, 0], [0, 2]]),
np.array([[1]]),
None),
# TEST CASE 2 : Real a, b; complex q, r
(np.array([[3, 1], [0, -1]]),
np.array([[1, 2], [1, 3]]),
np.array([[1, 1+1j], [1-1j, 2]]),
np.array([[2, -2j], [2j, 3]]),
None),
# TEST CASE 3 : User-reported gh-2251 (Trac #1732)
(np.array([[0.63399379, 0.54906824, 0.76253406],
[0.5404729, 0.53745766, 0.08731853],
[0.27524045, 0.84922129, 0.4681622]]),
np.array([[0.96861695], [0.05532739], [0.78934047]]),
np.eye(3),
np.eye(1),
None),
# TEST CASE 4 : darex #1
(np.array([[4, 3], [-4.5, -3.5]]),
np.array([[1], [-1]]),
np.array([[9, 6], [6, 4]]),
np.array([[1]]),
None),
# TEST CASE 5 : darex #2
(np.array([[0.9512, 0], [0, 0.9048]]),
np.array([[4.877, 4.877], [-1.1895, 3.569]]),
np.array([[0.005, 0], [0, 0.02]]),
np.array([[1/3, 0], [0, 3]]),
None),
# TEST CASE 6 : darex #3
(np.array([[2, -1], [1, 0]]),
np.array([[1], [0]]),
np.array([[0, 0], [0, 1]]),
np.array([[0]]),
None),
# TEST CASE 7 : darex #4 (skipped the gen. Ric. term S)
(np.array([[0, 1], [0, -1]]),
np.array([[1, 0], [2, 1]]),
np.array([[-4, -4], [-4, 7]]) * (1/11),
np.array([[9, 3], [3, 1]]),
None),
# TEST CASE 8 : darex #5
(np.array([[0, 1], [0, 0]]),
np.array([[0], [1]]),
np.array([[1, 2], [2, 4]]),
np.array([[1]]),
None),
# TEST CASE 9 : darex #6
(np.array([[0.998, 0.067, 0, 0],
[-.067, 0.998, 0, 0],
[0, 0, 0.998, 0.153],
[0, 0, -.153, 0.998]]),
np.array([[0.0033, 0.0200],
[0.1000, -.0007],
[0.0400, 0.0073],
[-.0028, 0.1000]]),
np.array([[1.87, 0, 0, -0.244],
[0, 0.744, 0.205, 0],
[0, 0.205, 0.589, 0],
[-0.244, 0, 0, 1.048]]),
np.eye(2),
None),
# TEST CASE 10 : darex #7
(np.array([[0.984750, -.079903, 0.0009054, -.0010765],
[0.041588, 0.998990, -.0358550, 0.0126840],
[-.546620, 0.044916, -.3299100, 0.1931800],
[2.662400, -.100450, -.9245500, -.2632500]]),
np.array([[0.0037112, 0.0007361],
[-.0870510, 9.3411e-6],
[-1.198440, -4.1378e-4],
[-3.192700, 9.2535e-4]]),
np.eye(4)*1e-2,
np.eye(2),
None),
# TEST CASE 11 : darex #8
(np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180],
[1.0000000, 0.6000000, 0.8000000, 3.3999820],
[0.0000000, 1.0000000, 1.8000000, 3.7999820],
[0.0000000, 0.0000000, 0.0000000, -0.9999820]]),
np.array([[1.0, -1.0, -1.0, -1.0],
[0.0, 1.0, -1.0, -1.0],
[0.0, 0.0, 1.0, -1.0],
[0.0, 0.0, 0.0, 1.0]]),
np.array([[2, 1, 3, 6],
[1, 2, 2, 5],
[3, 2, 6, 11],
[6, 5, 11, 22]]),
np.eye(4),
None),
# TEST CASE 12 : darex #9
(np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190],
[40.8490, 41.3170, 16.0840, 4.4679, 1.1971],
[12.2170, 26.3260, 36.1490, 15.9300, 12.3830],
[4.1118, 12.8580, 27.2090, 21.4420, 40.9760],
[0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01,
np.array([[0.0434, -0.0122],
[2.6606, -1.0453],
[3.7530, -5.5100],
[3.6076, -6.6000],
[0.4617, -0.9148]]) * 0.01,
np.eye(5),
np.eye(2),
None),
# TEST CASE 13 : darex #10
(np.kron(np.eye(2), np.diag([1, 1], k=1)),
np.kron(np.eye(2), np.array([[0], [0], [1]])),
np.array([[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0],
[0, 0, 0, -1, 1, 0],
[0, 0, 0, 0, 0, 0]]),
np.array([[3, 0], [0, 1]]),
None),
# TEST CASE 14 : darex #11
(0.001 * np.array(
[[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249],
[76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499],
[-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72],
[-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82],
[-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9],
[-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6],
[-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67],
[-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54],
[-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]),
np.array([[4.7600, -0.5701, -83.6800],
[0.8790, -4.7730, -2.7300],
[1.4820, -13.1200, 8.8760],
[3.8920, -35.1300, 24.8000],
[10.3400, -92.7500, 66.8000],
[7.2030, -61.5900, 38.3400],
[4.4540, -36.8300, 20.2900],
[1.9710, -15.5400, 6.9370],
[3.7730, -30.2800, 14.6900]]) * 0.001,
np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]),
np.eye(3),
None),
# TEST CASE 15 : darex #12 - numerically least accurate example
(np.array([[0, 1e6], [0, 0]]),
np.array([[0], [1]]),
np.eye(2),
np.array([[1]]),
None),
# TEST CASE 16 : darex #13
(np.array([[16, 10, -2],
[10, 13, -8],
[-2, -8, 7]]) * (1/9),
np.eye(3),
1e6 * np.eye(3),
1e6 * np.eye(3),
None),
# TEST CASE 17 : darex #14
(np.array([[1 - 1/1e8, 0, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]),
np.array([[1e-08], [0], [0], [0]]),
np.diag([0, 0, 0, 1]),
np.array([[0.25]]),
None),
# TEST CASE 18 : darex #15
(np.eye(100, k=1),
np.flipud(np.eye(100, 1)),
np.eye(100),
np.array([[1]]),
None)
]
# Makes the minimum precision requirements customized to the test.
# Here numbers represent the number of decimals that agrees with zero
# matrix when the solution x is plugged in to the equation.
#
# res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
#
# If the test is failing use "None" for that entry.
#
min_decimal = (12, 14, 13, 14, 13, 16, 18, 14, 15, 13,
14, 13, 13, 14, 12, 2, 5, 6, 10)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_discrete_are(a, b, q, r)
res = a.conj().T.dot(x.dot(a)) - x + q
res -= a.conj().T.dot(x.dot(b)).dot(
solve(r+b.conj().T.dot(x.dot(b)), b.conj().T).dot(x.dot(a))
)
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
# An infeasible example taken from https://arxiv.org/abs/1505.04861v1
A = np.triu(np.ones((3, 3)))
A[0, 1] = -1
B = np.array([[1, 1, 0], [0, 0, 1]]).T
Q = -2*np.ones_like(A) + np.diag([8, -1, -1.9])
R = np.diag([-10, 0.1])
assert_raises(LinAlgError, solve_continuous_are, A, B, Q, R)
def test_solve_generalized_continuous_are():
cases = [
# Two random examples differ by s term
# in the absence of any literature for demanding examples.
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.zeros((3, 2)),
None),
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.ones((3, 2)),
None)
]
min_decimal = (10, 10)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, e, s, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_continuous_are(a, b, q, r, e, s)
res = a.conj().T.dot(x.dot(e)) + e.conj().T.dot(x.dot(a)) + q
out_fact = e.conj().T.dot(x).dot(b) + s
res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
def test_solve_generalized_discrete_are():
mat20170120 = _load_data('gendare_20170120_data.npz')
cases = [
# Two random examples differ by s term
# in the absence of any literature for demanding examples.
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.zeros((3, 2)),
None),
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.ones((3, 2)),
None),
# user-reported (under PR-6616) 20-Jan-2017
# tests against the case where E is None but S is provided
(mat20170120['A'],
mat20170120['B'],
mat20170120['Q'],
mat20170120['R'],
None,
mat20170120['S'],
None),
]
min_decimal = (11, 11, 16)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, e, s, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_discrete_are(a, b, q, r, e, s)
if e is None:
e = np.eye(a.shape[0])
if s is None:
s = np.zeros_like(b)
res = a.conj().T.dot(x.dot(a)) - e.conj().T.dot(x.dot(e)) + q
res -= (a.conj().T.dot(x.dot(b)) + s).dot(
solve(r+b.conj().T.dot(x.dot(b)),
(b.conj().T.dot(x.dot(a)) + s.conj().T)
)
)
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
def test_are_validate_args():
def test_square_shape():
nsq = np.ones((3, 2))
sq = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, nsq, 1, 1, 1)
assert_raises(ValueError, x, sq, sq, nsq, 1)
assert_raises(ValueError, x, sq, sq, sq, nsq)
assert_raises(ValueError, x, sq, sq, sq, sq, nsq)
def test_compatible_sizes():
nsq = np.ones((3, 2))
sq = np.eye(4)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sq, nsq, 1, 1)
assert_raises(ValueError, x, sq, sq, sq, sq, sq, nsq)
assert_raises(ValueError, x, sq, sq, np.eye(3), sq)
assert_raises(ValueError, x, sq, sq, sq, np.eye(3))
assert_raises(ValueError, x, sq, sq, sq, sq, np.eye(3))
def test_symmetry():
nsym = np.arange(9).reshape(3, 3)
sym = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sym, sym, nsym, sym)
assert_raises(ValueError, x, sym, sym, sym, nsym)
def test_singularity():
sing = 1e12 * np.ones((3, 3))
sing[2, 2] -= 1
sq = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sq, sq, sq, sq, sing)
assert_raises(ValueError, solve_continuous_are, sq, sq, sq, sing)
def test_finiteness():
nm = np.ones((2, 2)) * np.nan
sq = np.eye(2)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, nm, sq, sq, sq)
assert_raises(ValueError, x, sq, nm, sq, sq)
assert_raises(ValueError, x, sq, sq, nm, sq)
assert_raises(ValueError, x, sq, sq, sq, nm)
assert_raises(ValueError, x, sq, sq, sq, sq, nm)
assert_raises(ValueError, x, sq, sq, sq, sq, sq, nm)
class TestSolveSylvester(object):
cases = [
# a, b, c all real.
(np.array([[1, 2], [0, 4]]),
np.array([[5, 6], [0, 8]]),
np.array([[9, 10], [11, 12]])),
# a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their
# quasi-triangular form.
(np.array([[1.0, 0, 0, 0],
[0, 1.0, 2.0, 0.0],
[0, 0, 3.0, -4],
[0, 0, 2, 5]]),
np.array([[2.0, 0, 0, 1.0],
[0, 1.0, 0.0, 0.0],
[0, 0, 1.0, -1],
[0, 0, 1, 1]]),
np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 1.0]])),
# a, b, c all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 2j], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a and b real; c complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a and c complex; b real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a complex; b and c real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0, 2.0], [-1.0, 2.0]])),
# not square matrices, real
(np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5]]),
np.array([[1, 2], [3, 4], [5, 6]])),
# not square matrices, complex
(np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5-1j]]),
np.array([[1, 2j], [3, 4j], [5j, 6+7j]])),
]
def check_case(self, a, b, c):
x = solve_sylvester(a, b, c)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2])
def test_trivial(self):
a = np.array([[1.0, 0.0], [0.0, 1.0]])
b = np.array([[1.0]])
c = np.array([2.0, 2.0]).reshape(-1, 1)
x = solve_sylvester(a, b, c)
assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1, 1))
| [
"numpy.load",
"numpy.ones",
"pytest.xfail",
"numpy.arange",
"scipy.linalg.solve_sylvester",
"numpy.diag",
"numpy.atleast_2d",
"numpy.zeros_like",
"os.path.dirname",
"scipy.linalg.solve_continuous_lyapunov",
"scipy.linalg.solve_discrete_are",
"pytest.raises",
"numpy.ones_like",
"scipy.linal... | [((21939, 21958), 'numpy.diag', 'np.diag', (['[-10, 0.1]'], {}), '([-10, 0.1])\n', (21946, 21958), True, 'import numpy as np\n'), ((21963, 22023), 'pytest.raises', 'assert_raises', (['LinAlgError', 'solve_continuous_are', 'A', 'B', 'Q', 'R'], {}), '(LinAlgError, solve_continuous_are, A, B, Q, R)\n', (21976, 22023), True, 'from pytest import raises as assert_raises\n'), ((714, 731), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (721, 731), True, 'import numpy as np\n'), ((4073, 4088), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (4080, 4088), True, 'import numpy as np\n'), ((4102, 4111), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4108, 4111), True, 'import numpy as np\n'), ((4120, 4181), 'pytest.raises', 'assert_raises', (['ValueError', 'solve_continuous_lyapunov', 'nsq', 'sq'], {}), '(ValueError, solve_continuous_lyapunov, nsq, sq)\n', (4133, 4181), True, 'from pytest import raises as assert_raises\n'), ((4190, 4251), 'pytest.raises', 'assert_raises', (['ValueError', 'solve_continuous_lyapunov', 'sq', 'nsq'], {}), '(ValueError, solve_continuous_lyapunov, sq, nsq)\n', (4203, 4251), True, 'from pytest import raises as assert_raises\n'), ((4384, 4415), 'scipy.linalg.solve_continuous_lyapunov', 'solve_continuous_lyapunov', (['a', 'q'], {}), '(a, q)\n', (4409, 4415), False, 'from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov\n'), ((4595, 4639), 'scipy.linalg.solve_discrete_lyapunov', 'solve_discrete_lyapunov', (['a', 'q'], {'method': 'method'}), '(a, q, method=method)\n', (4618, 4639), False, 'from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov\n'), ((12601, 12633), 'scipy.linalg.solve_continuous_are', 'solve_continuous_are', (['a', 'b', 'q', 'r'], {}), '(a, b, q, r)\n', (12621, 12633), False, 'from scipy.linalg import solve_continuous_are, solve_discrete_are\n'), ((21328, 21358), 'scipy.linalg.solve_discrete_are', 'solve_discrete_are', (['a', 'b', 'q', 'r'], {}), '(a, b, q, r)\n', (21346, 21358), False, 'from scipy.linalg import solve_continuous_are, solve_discrete_are\n'), ((21802, 21817), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (21809, 21817), True, 'import numpy as np\n'), ((21844, 21876), 'numpy.array', 'np.array', (['[[1, 1, 0], [0, 0, 1]]'], {}), '([[1, 1, 0], [0, 0, 1]])\n', (21852, 21876), True, 'import numpy as np\n'), ((21908, 21930), 'numpy.diag', 'np.diag', (['[8, -1, -1.9]'], {}), '([8, -1, -1.9])\n', (21915, 21930), True, 'import numpy as np\n'), ((23696, 23734), 'scipy.linalg.solve_continuous_are', 'solve_continuous_are', (['a', 'b', 'q', 'r', 'e', 's'], {}), '(a, b, q, r, e, s)\n', (23716, 23734), False, 'from scipy.linalg import solve_continuous_are, solve_discrete_are\n'), ((26102, 26138), 'scipy.linalg.solve_discrete_are', 'solve_discrete_are', (['a', 'b', 'q', 'r', 'e', 's'], {}), '(a, b, q, r, e, s)\n', (26120, 26138), False, 'from scipy.linalg import solve_continuous_are, solve_discrete_are\n'), ((26772, 26787), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (26779, 26787), True, 'import numpy as np\n'), ((26801, 26810), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (26807, 26810), True, 'import numpy as np\n'), ((27152, 27167), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (27159, 27167), True, 'import numpy as np\n'), ((27181, 27190), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (27187, 27190), True, 'import numpy as np\n'), ((27652, 27661), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (27658, 27661), True, 'import numpy as np\n'), ((27951, 27960), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (27957, 27960), True, 'import numpy as np\n'), ((28094, 28159), 'pytest.raises', 'assert_raises', (['ValueError', 'solve_continuous_are', 'sq', 'sq', 'sq', 'sing'], {}), '(ValueError, solve_continuous_are, sq, sq, sq, sing)\n', (28107, 28159), True, 'from pytest import raises as assert_raises\n'), ((28239, 28248), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (28245, 28248), True, 'import numpy as np\n'), ((30551, 30575), 'scipy.linalg.solve_sylvester', 'solve_sylvester', (['a', 'b', 'c'], {}), '(a, b, c)\n', (30566, 30575), False, 'from scipy.linalg import solve_sylvester\n'), ((30797, 30831), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (30805, 30831), True, 'import numpy as np\n'), ((30844, 30861), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (30852, 30861), True, 'import numpy as np\n'), ((30922, 30946), 'scipy.linalg.solve_sylvester', 'solve_sylvester', (['a', 'b', 'c'], {}), '(a, b, c)\n', (30937, 30946), False, 'from scipy.linalg import solve_sylvester\n'), ((635, 660), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (650, 660), False, 'import os\n'), ((828, 854), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (836, 854), True, 'import numpy as np\n'), ((865, 894), 'numpy.array', 'np.array', (['[[9, 10], [11, 12]]'], {}), '([[9, 10], [11, 12]])\n', (873, 894), True, 'import numpy as np\n'), ((934, 982), 'numpy.array', 'np.array', (['[[1.0 + 1.0j, 2.0], [3.0 - 4.0j, 5.0]]'], {}), '([[1.0 + 1.0j, 2.0], [3.0 - 4.0j, 5.0]])\n', (942, 982), True, 'import numpy as np\n'), ((987, 1043), 'numpy.array', 'np.array', (['[[2.0 - 2.0j, 2.0 + 2.0j], [-1.0 - 1.0j, 2.0]]'], {}), '([[2.0 - 2.0j, 2.0 + 2.0j], [-1.0 - 1.0j, 2.0]])\n', (995, 1043), True, 'import numpy as np\n'), ((1072, 1106), 'numpy.array', 'np.array', (['[[1.0, 2.0], [3.0, 5.0]]'], {}), '([[1.0, 2.0], [3.0, 5.0]])\n', (1080, 1106), True, 'import numpy as np\n'), ((1117, 1173), 'numpy.array', 'np.array', (['[[2.0 - 2.0j, 2.0 + 2.0j], [-1.0 - 1.0j, 2.0]]'], {}), '([[2.0 - 2.0j, 2.0 + 2.0j], [-1.0 - 1.0j, 2.0]])\n', (1125, 1173), True, 'import numpy as np\n'), ((1202, 1250), 'numpy.array', 'np.array', (['[[1.0 + 1.0j, 2.0], [3.0 - 4.0j, 5.0]]'], {}), '([[1.0 + 1.0j, 2.0], [3.0 - 4.0j, 5.0]])\n', (1210, 1250), True, 'import numpy as np\n'), ((1255, 1290), 'numpy.array', 'np.array', (['[[2.0, 2.0], [-1.0, 2.0]]'], {}), '([[2.0, 2.0], [-1.0, 2.0]])\n', (1263, 1290), True, 'import numpy as np\n'), ((1343, 1443), 'numpy.array', 'np.array', (['[[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3], [1, 5, 2, 0, 7], [5, 3,\n 3, 1, 5]]'], {}), '([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3], [1, 5, 2, 0, 7\n ], [5, 3, 3, 1, 5]])\n', (1351, 1443), True, 'import numpy as np\n'), ((1468, 1568), 'numpy.array', 'np.array', (['[[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3], [0, 2, 0, 1, 0], [1, 0,\n 3, 0, 4]]'], {}), '([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3], [0, 2, 0, 1, 0\n ], [1, 0, 3, 0, 4]])\n', (1476, 1568), True, 'import numpy as np\n'), ((1665, 3252), 'numpy.array', 'np.array', (['[[0.1 + 0.0j, 0.091 + 0.0j, 0.082 + 0.0j, 0.073 + 0.0j, 0.064 + 0.0j, 0.055 +\n 0.0j, 0.046 + 0.0j, 0.037 + 0.0j, 0.028 + 0.0j, 0.019 + 0.0j, 0.01 + \n 0.0j], [1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 +\n 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 +\n 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j], [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 1.0 + 0.0j, 0.0 + 0.0j]]'], {}), '([[0.1 + 0.0j, 0.091 + 0.0j, 0.082 + 0.0j, 0.073 + 0.0j, 0.064 + \n 0.0j, 0.055 + 0.0j, 0.046 + 0.0j, 0.037 + 0.0j, 0.028 + 0.0j, 0.019 + \n 0.0j, 0.01 + 0.0j], [1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, \n 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j,\n 0.0 + 0.0j], [0.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + \n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j], [0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j], [0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 +\n 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j]])\n', (1673, 3252), True, 'import numpy as np\n'), ((3669, 3679), 'numpy.eye', 'np.eye', (['(11)'], {}), '(11)\n', (3675, 3679), True, 'import numpy as np\n'), ((3744, 3777), 'numpy.matrix', 'np.matrix', (['[[0, 1], [-1 / 2, -1]]'], {}), '([[0, 1], [-1 / 2, -1]])\n', (3753, 3777), True, 'import numpy as np\n'), ((3896, 3929), 'numpy.matrix', 'np.matrix', (['[[0, 1], [-1 / 2, -1]]'], {}), '([[0, 1], [-1 / 2, -1]])\n', (3905, 3929), True, 'import numpy as np\n'), ((4317, 4326), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (4323, 4326), True, 'import numpy as np\n'), ((5922, 5939), 'numpy.diag', 'np.diag', (['[1.0]', '(1)'], {}), '([1.0], 1)\n', (5929, 5939), True, 'import numpy as np\n'), ((5949, 5969), 'numpy.array', 'np.array', (['[[0], [1]]'], {}), '([[0], [1]])\n', (5957, 5969), True, 'import numpy as np\n'), ((5980, 6000), 'scipy.linalg.block_diag', 'block_diag', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (5990, 6000), False, 'from scipy.linalg import block_diag, solve, LinAlgError\n'), ((6069, 6101), 'numpy.array', 'np.array', (['[[4, 3], [-4.5, -3.5]]'], {}), '([[4, 3], [-4.5, -3.5]])\n', (6077, 6101), True, 'import numpy as np\n'), ((6112, 6133), 'numpy.array', 'np.array', (['[[1], [-1]]'], {}), '([[1], [-1]])\n', (6120, 6133), True, 'import numpy as np\n'), ((6144, 6172), 'numpy.array', 'np.array', (['[[9, 6], [6, 4.0]]'], {}), '([[9, 6], [6, 4.0]])\n', (6152, 6172), True, 'import numpy as np\n'), ((6242, 6353), 'numpy.array', 'np.array', (['[[0, 1, 0, 0], [0, -1.89, 0.39, -5.53], [0, -0.034, -2.98, 2.43], [0.034, -\n 0.0011, -0.99, -0.21]]'], {}), '([[0, 1, 0, 0], [0, -1.89, 0.39, -5.53], [0, -0.034, -2.98, 2.43],\n [0.034, -0.0011, -0.99, -0.21]])\n', (6250, 6353), True, 'import numpy as np\n'), ((6417, 6477), 'numpy.array', 'np.array', (['[[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]'], {}), '([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]])\n', (6425, 6477), True, 'import numpy as np\n'), ((6488, 6623), 'numpy.array', 'np.array', (['[[2.313, 2.727, 0.688, 0.023], [2.727, 4.271, 1.148, 0.323], [0.688, 1.148,\n 0.313, 0.102], [0.023, 0.323, 0.102, 0.083]]'], {}), '([[2.313, 2.727, 0.688, 0.023], [2.727, 4.271, 1.148, 0.323], [\n 0.688, 1.148, 0.313, 0.102], [0.023, 0.323, 0.102, 0.083]])\n', (6496, 6623), True, 'import numpy as np\n'), ((6686, 6695), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6692, 6695), True, 'import numpy as np\n'), ((6754, 7085), 'numpy.array', 'np.array', (['[[-0.991, 0.529, 0, 0, 0, 0, 0, 0], [0.522, -1.051, 0.596, 0, 0, 0, 0, 0],\n [0, 0.522, -1.118, 0.596, 0, 0, 0, 0], [0, 0, 0.522, -1.548, 0.718, 0, \n 0, 0], [0, 0, 0, 0.922, -1.64, 0.799, 0, 0], [0, 0, 0, 0, 0.922, -1.721,\n 0.901, 0], [0, 0, 0, 0, 0, 0.922, -1.823, 1.021], [0, 0, 0, 0, 0, 0, \n 0.922, -1.943]]'], {}), '([[-0.991, 0.529, 0, 0, 0, 0, 0, 0], [0.522, -1.051, 0.596, 0, 0, 0,\n 0, 0], [0, 0.522, -1.118, 0.596, 0, 0, 0, 0], [0, 0, 0.522, -1.548, \n 0.718, 0, 0, 0], [0, 0, 0, 0.922, -1.64, 0.799, 0, 0], [0, 0, 0, 0, \n 0.922, -1.721, 0.901, 0], [0, 0, 0, 0, 0, 0.922, -1.823, 1.021], [0, 0,\n 0, 0, 0, 0, 0.922, -1.943]])\n', (6762, 7085), True, 'import numpy as np\n'), ((7389, 7754), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1], [0.0, 1.0, 0.0, 0.0, 0.1, 0.0, \n 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0], [0.0, 0.0, 0.0, \n 1.0, 0.0, 0.0, 0.0, 0.0], [0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0], [\n 0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.1, 0.0], [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1], [0.0, 1.0, 0.0, 0.0, \n 0.1, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0], [0.0, \n 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0,\n 0.0], [0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.1, 0.0], [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]])\n', (7397, 7754), True, 'import numpy as np\n'), ((7879, 7888), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (7885, 7888), True, 'import numpy as np\n'), ((7947, 8441), 'numpy.array', 'np.array', (['[[-4.019, 5.12, 0.0, 0.0, -2.082, 0.0, 0.0, 0.0, 0.87], [-0.346, 0.986, 0.0,\n 0.0, -2.34, 0.0, 0.0, 0.0, 0.97], [-7.909, 15.407, -4.069, 0.0, -6.45, \n 0.0, 0.0, 0.0, 2.68], [-21.816, 35.606, -0.339, -3.87, -17.8, 0.0, 0.0,\n 0.0, 7.39], [-60.196, 98.188, -7.907, 0.34, -53.008, 0.0, 0.0, 0.0, \n 20.4], [0, 0, 0, 0, 94.0, -147.2, 0.0, 53.2, 0.0], [0, 0, 0, 0, 0, 94.0,\n -147.2, 0, 0], [0, 0, 0, 0, 0, 12.8, 0.0, -31.6, 0], [0, 0, 0, 0, 12.8,\n 0.0, 0.0, 18.8, -31.6]]'], {}), '([[-4.019, 5.12, 0.0, 0.0, -2.082, 0.0, 0.0, 0.0, 0.87], [-0.346, \n 0.986, 0.0, 0.0, -2.34, 0.0, 0.0, 0.0, 0.97], [-7.909, 15.407, -4.069, \n 0.0, -6.45, 0.0, 0.0, 0.0, 2.68], [-21.816, 35.606, -0.339, -3.87, -\n 17.8, 0.0, 0.0, 0.0, 7.39], [-60.196, 98.188, -7.907, 0.34, -53.008, \n 0.0, 0.0, 0.0, 20.4], [0, 0, 0, 0, 94.0, -147.2, 0.0, 53.2, 0.0], [0, 0,\n 0, 0, 0, 94.0, -147.2, 0, 0], [0, 0, 0, 0, 0, 12.8, 0.0, -31.6, 0], [0,\n 0, 0, 0, 12.8, 0.0, 0.0, 18.8, -31.6]])\n', (7955, 8441), True, 'import numpy as np\n'), ((8540, 8740), 'numpy.array', 'np.array', (['[[0.01, -0.011, -0.151], [0.003, -0.021, 0.0], [0.009, -0.059, 0.0], [0.024,\n -0.162, 0.0], [0.068, -0.445, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [\n 0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[0.01, -0.011, -0.151], [0.003, -0.021, 0.0], [0.009, -0.059, 0.0\n ], [0.024, -0.162, 0.0], [0.068, -0.445, 0.0], [0.0, 0.0, 0.0], [0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n', (8548, 8740), True, 'import numpy as np\n'), ((8926, 8935), 'numpy.eye', 'np.eye', (['(9)'], {}), '(9)\n', (8932, 8935), True, 'import numpy as np\n'), ((8946, 8955), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (8952, 8955), True, 'import numpy as np\n'), ((9106, 9135), 'numpy.array', 'np.array', (['[[1, 0], [0, -2.0]]'], {}), '([[1, 0], [0, -2.0]])\n', (9114, 9135), True, 'import numpy as np\n'), ((9145, 9169), 'numpy.array', 'np.array', (['[[1e-06], [0]]'], {}), '([[1e-06], [0]])\n', (9153, 9169), True, 'import numpy as np\n'), ((9179, 9194), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (9186, 9194), True, 'import numpy as np\n'), ((9285, 9308), 'scipy.linalg.block_diag', 'block_diag', (['(-0.1)', '(-0.02)'], {}), '(-0.1, -0.02)\n', (9295, 9308), False, 'from scipy.linalg import block_diag, solve, LinAlgError\n'), ((9319, 9356), 'numpy.array', 'np.array', (['[[0.1, 0.0], [0.001, 0.01]]'], {}), '([[0.1, 0.0], [0.001, 0.01]])\n', (9327, 9356), True, 'import numpy as np\n'), ((9372, 9410), 'numpy.array', 'np.array', (['[[100, 1000], [1000, 10000]]'], {}), '([[100, 1000], [1000, 10000]])\n', (9380, 9410), True, 'import numpy as np\n'), ((9517, 9551), 'numpy.array', 'np.array', (['[[0, 1000000.0], [0, 0]]'], {}), '([[0, 1000000.0], [0, 0]])\n', (9525, 9551), True, 'import numpy as np\n'), ((9556, 9578), 'numpy.array', 'np.array', (['[[0], [1.0]]'], {}), '([[0], [1.0]])\n', (9564, 9578), True, 'import numpy as np\n'), ((9588, 9597), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (9594, 9597), True, 'import numpy as np\n'), ((9670, 9714), 'numpy.array', 'np.array', (['[[1.0000001, 1], [1.0, 1.0000001]]'], {}), '([[1.0000001, 1], [1.0, 1.0000001]])\n', (9678, 9714), True, 'import numpy as np\n'), ((9724, 9733), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (9730, 9733), True, 'import numpy as np\n'), ((9744, 9753), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (9750, 9753), True, 'import numpy as np\n'), ((9764, 9773), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (9770, 9773), True, 'import numpy as np\n'), ((9834, 9862), 'numpy.array', 'np.array', (['[[3, 1.0], [4, 2]]'], {}), '([[3, 1.0], [4, 2]])\n', (9842, 9862), True, 'import numpy as np\n'), ((9872, 9892), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (9880, 9892), True, 'import numpy as np\n'), ((9903, 9936), 'numpy.array', 'np.array', (['[[-11, -5], [-5, -2.0]]'], {}), '([[-11, -5], [-5, -2.0]])\n', (9911, 9936), True, 'import numpy as np\n'), ((10159, 10168), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (10165, 10168), True, 'import numpy as np\n'), ((10470, 10579), 'numpy.array', 'np.array', (['[[0, 0.4, 0, 0], [0, 0, 0.345, 0], [0, -524000.0, -465000.0, 262000.0], [0,\n 0, 0, -1000000.0]]'], {}), '([[0, 0.4, 0, 0], [0, 0, 0.345, 0], [0, -524000.0, -465000.0, \n 262000.0], [0, 0, 0, -1000000.0]])\n', (10478, 10579), True, 'import numpy as np\n'), ((10672, 10693), 'numpy.diag', 'np.diag', (['[1, 0, 1, 0]'], {}), '([1, 0, 1, 0])\n', (10679, 10693), True, 'import numpy as np\n'), ((10767, 10858), 'numpy.array', 'np.array', (['[[-1e-06, 1, 0, 0], [-1, -1e-06, 0, 0], [0, 0, 1e-06, 1], [0, 0, -1, 1e-06]]'], {}), '([[-1e-06, 1, 0, 0], [-1, -1e-06, 0, 0], [0, 0, 1e-06, 1], [0, 0, -\n 1, 1e-06]])\n', (10775, 10858), True, 'import numpy as np\n'), ((10917, 10932), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (10924, 10932), True, 'import numpy as np\n'), ((10943, 10958), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (10950, 10958), True, 'import numpy as np\n'), ((11273, 11283), 'numpy.eye', 'np.eye', (['(64)'], {}), '(64)\n', (11279, 11283), True, 'import numpy as np\n'), ((11294, 11304), 'numpy.eye', 'np.eye', (['(64)'], {}), '(64)\n', (11300, 11304), True, 'import numpy as np\n'), ((11315, 11325), 'numpy.eye', 'np.eye', (['(64)'], {}), '(64)\n', (11321, 11325), True, 'import numpy as np\n'), ((12554, 12587), 'pytest.xfail', 'pytest.xfail', ([], {'reason': 'knownfailure'}), '(reason=knownfailure)\n', (12566, 12587), False, 'import pytest\n'), ((12820, 12838), 'numpy.zeros_like', 'np.zeros_like', (['res'], {}), '(res)\n', (12833, 12838), True, 'import numpy as np\n'), ((13781, 13818), 'numpy.array', 'np.array', (['[[2, 1 - 2.0j], [0, -3.0j]]'], {}), '([[2, 1 - 2.0j], [0, -3.0j]])\n', (13789, 13818), True, 'import numpy as np\n'), ((13823, 13843), 'numpy.array', 'np.array', (['[[0], [1]]'], {}), '([[0], [1]])\n', (13831, 13843), True, 'import numpy as np\n'), ((13854, 13880), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (13862, 13880), True, 'import numpy as np\n'), ((13891, 13906), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (13899, 13906), True, 'import numpy as np\n'), ((13980, 14007), 'numpy.array', 'np.array', (['[[2, 1], [0, -1]]'], {}), '([[2, 1], [0, -1]])\n', (13988, 14007), True, 'import numpy as np\n'), ((14018, 14045), 'numpy.array', 'np.array', (['[[-2.0j], [1.0j]]'], {}), '([[-2.0j], [1.0j]])\n', (14026, 14045), True, 'import numpy as np\n'), ((14052, 14078), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (14060, 14078), True, 'import numpy as np\n'), ((14089, 14104), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (14097, 14104), True, 'import numpy as np\n'), ((14179, 14206), 'numpy.array', 'np.array', (['[[3, 1], [0, -1]]'], {}), '([[3, 1], [0, -1]])\n', (14187, 14206), True, 'import numpy as np\n'), ((14217, 14243), 'numpy.array', 'np.array', (['[[1, 2], [1, 3]]'], {}), '([[1, 2], [1, 3]])\n', (14225, 14243), True, 'import numpy as np\n'), ((14254, 14294), 'numpy.array', 'np.array', (['[[1, 1 + 1.0j], [1 - 1.0j, 2]]'], {}), '([[1, 1 + 1.0j], [1 - 1.0j, 2]])\n', (14262, 14294), True, 'import numpy as np\n'), ((14297, 14330), 'numpy.array', 'np.array', (['[[2, -2.0j], [2.0j, 3]]'], {}), '([[2, -2.0j], [2.0j, 3]])\n', (14305, 14330), True, 'import numpy as np\n'), ((14412, 14539), 'numpy.array', 'np.array', (['[[0.63399379, 0.54906824, 0.76253406], [0.5404729, 0.53745766, 0.08731853],\n [0.27524045, 0.84922129, 0.4681622]]'], {}), '([[0.63399379, 0.54906824, 0.76253406], [0.5404729, 0.53745766, \n 0.08731853], [0.27524045, 0.84922129, 0.4681622]])\n', (14420, 14539), True, 'import numpy as np\n'), ((14583, 14635), 'numpy.array', 'np.array', (['[[0.96861695], [0.05532739], [0.78934047]]'], {}), '([[0.96861695], [0.05532739], [0.78934047]])\n', (14591, 14635), True, 'import numpy as np\n'), ((14646, 14655), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (14652, 14655), True, 'import numpy as np\n'), ((14666, 14675), 'numpy.eye', 'np.eye', (['(1)'], {}), '(1)\n', (14672, 14675), True, 'import numpy as np\n'), ((14735, 14767), 'numpy.array', 'np.array', (['[[4, 3], [-4.5, -3.5]]'], {}), '([[4, 3], [-4.5, -3.5]])\n', (14743, 14767), True, 'import numpy as np\n'), ((14778, 14799), 'numpy.array', 'np.array', (['[[1], [-1]]'], {}), '([[1], [-1]])\n', (14786, 14799), True, 'import numpy as np\n'), ((14810, 14836), 'numpy.array', 'np.array', (['[[9, 6], [6, 4]]'], {}), '([[9, 6], [6, 4]])\n', (14818, 14836), True, 'import numpy as np\n'), ((14847, 14862), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (14855, 14862), True, 'import numpy as np\n'), ((14922, 14958), 'numpy.array', 'np.array', (['[[0.9512, 0], [0, 0.9048]]'], {}), '([[0.9512, 0], [0, 0.9048]])\n', (14930, 14958), True, 'import numpy as np\n'), ((14969, 15013), 'numpy.array', 'np.array', (['[[4.877, 4.877], [-1.1895, 3.569]]'], {}), '([[4.877, 4.877], [-1.1895, 3.569]])\n', (14977, 15013), True, 'import numpy as np\n'), ((15024, 15057), 'numpy.array', 'np.array', (['[[0.005, 0], [0, 0.02]]'], {}), '([[0.005, 0], [0, 0.02]])\n', (15032, 15057), True, 'import numpy as np\n'), ((15068, 15098), 'numpy.array', 'np.array', (['[[1 / 3, 0], [0, 3]]'], {}), '([[1 / 3, 0], [0, 3]])\n', (15076, 15098), True, 'import numpy as np\n'), ((15156, 15183), 'numpy.array', 'np.array', (['[[2, -1], [1, 0]]'], {}), '([[2, -1], [1, 0]])\n', (15164, 15183), True, 'import numpy as np\n'), ((15194, 15214), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (15202, 15214), True, 'import numpy as np\n'), ((15225, 15251), 'numpy.array', 'np.array', (['[[0, 0], [0, 1]]'], {}), '([[0, 0], [0, 1]])\n', (15233, 15251), True, 'import numpy as np\n'), ((15262, 15277), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (15270, 15277), True, 'import numpy as np\n'), ((15368, 15395), 'numpy.array', 'np.array', (['[[0, 1], [0, -1]]'], {}), '([[0, 1], [0, -1]])\n', (15376, 15395), True, 'import numpy as np\n'), ((15406, 15432), 'numpy.array', 'np.array', (['[[1, 0], [2, 1]]'], {}), '([[1, 0], [2, 1]])\n', (15414, 15432), True, 'import numpy as np\n'), ((15492, 15518), 'numpy.array', 'np.array', (['[[9, 3], [3, 1]]'], {}), '([[9, 3], [3, 1]])\n', (15500, 15518), True, 'import numpy as np\n'), ((15578, 15604), 'numpy.array', 'np.array', (['[[0, 1], [0, 0]]'], {}), '([[0, 1], [0, 0]])\n', (15586, 15604), True, 'import numpy as np\n'), ((15615, 15635), 'numpy.array', 'np.array', (['[[0], [1]]'], {}), '([[0], [1]])\n', (15623, 15635), True, 'import numpy as np\n'), ((15646, 15672), 'numpy.array', 'np.array', (['[[1, 2], [2, 4]]'], {}), '([[1, 2], [2, 4]])\n', (15654, 15672), True, 'import numpy as np\n'), ((15683, 15698), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (15691, 15698), True, 'import numpy as np\n'), ((15758, 15862), 'numpy.array', 'np.array', (['[[0.998, 0.067, 0, 0], [-0.067, 0.998, 0, 0], [0, 0, 0.998, 0.153], [0, 0, \n -0.153, 0.998]]'], {}), '([[0.998, 0.067, 0, 0], [-0.067, 0.998, 0, 0], [0, 0, 0.998, 0.153],\n [0, 0, -0.153, 0.998]])\n', (15766, 15862), True, 'import numpy as np\n'), ((15924, 15998), 'numpy.array', 'np.array', (['[[0.0033, 0.02], [0.1, -0.0007], [0.04, 0.0073], [-0.0028, 0.1]]'], {}), '([[0.0033, 0.02], [0.1, -0.0007], [0.04, 0.0073], [-0.0028, 0.1]])\n', (15932, 15998), True, 'import numpy as np\n'), ((16074, 16177), 'numpy.array', 'np.array', (['[[1.87, 0, 0, -0.244], [0, 0.744, 0.205, 0], [0, 0.205, 0.589, 0], [-0.244,\n 0, 0, 1.048]]'], {}), '([[1.87, 0, 0, -0.244], [0, 0.744, 0.205, 0], [0, 0.205, 0.589, 0],\n [-0.244, 0, 0, 1.048]])\n', (16082, 16177), True, 'import numpy as np\n'), ((16241, 16250), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (16247, 16250), True, 'import numpy as np\n'), ((16311, 16498), 'numpy.array', 'np.array', (['[[0.98475, -0.079903, 0.0009054, -0.0010765], [0.041588, 0.99899, -0.035855,\n 0.012684], [-0.54662, 0.044916, -0.32991, 0.19318], [2.6624, -0.10045, \n -0.92455, -0.26325]]'], {}), '([[0.98475, -0.079903, 0.0009054, -0.0010765], [0.041588, 0.99899, \n -0.035855, 0.012684], [-0.54662, 0.044916, -0.32991, 0.19318], [2.6624,\n -0.10045, -0.92455, -0.26325]])\n', (16319, 16498), True, 'import numpy as np\n'), ((16565, 16677), 'numpy.array', 'np.array', (['[[0.0037112, 0.0007361], [-0.087051, 9.3411e-06], [-1.19844, -0.00041378],\n [-3.1927, 0.00092535]]'], {}), '([[0.0037112, 0.0007361], [-0.087051, 9.3411e-06], [-1.19844, -\n 0.00041378], [-3.1927, 0.00092535]])\n', (16573, 16677), True, 'import numpy as np\n'), ((16765, 16774), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (16771, 16774), True, 'import numpy as np\n'), ((16835, 16963), 'numpy.array', 'np.array', (['[[-0.6, -2.2, -3.6, -5.400018], [1.0, 0.6, 0.8, 3.399982], [0.0, 1.0, 1.8, \n 3.799982], [0.0, 0.0, 0.0, -0.999982]]'], {}), '([[-0.6, -2.2, -3.6, -5.400018], [1.0, 0.6, 0.8, 3.399982], [0.0, \n 1.0, 1.8, 3.799982], [0.0, 0.0, 0.0, -0.999982]])\n', (16843, 16963), True, 'import numpy as np\n'), ((17102, 17211), 'numpy.array', 'np.array', (['[[1.0, -1.0, -1.0, -1.0], [0.0, 1.0, -1.0, -1.0], [0.0, 0.0, 1.0, -1.0], [\n 0.0, 0.0, 0.0, 1.0]]'], {}), '([[1.0, -1.0, -1.0, -1.0], [0.0, 1.0, -1.0, -1.0], [0.0, 0.0, 1.0, \n -1.0], [0.0, 0.0, 0.0, 1.0]])\n', (17110, 17211), True, 'import numpy as np\n'), ((17274, 17343), 'numpy.array', 'np.array', (['[[2, 1, 3, 6], [1, 2, 2, 5], [3, 2, 6, 11], [6, 5, 11, 22]]'], {}), '([[2, 1, 3, 6], [1, 2, 2, 5], [3, 2, 6, 11], [6, 5, 11, 22]])\n', (17282, 17343), True, 'import numpy as np\n'), ((17411, 17420), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (17417, 17420), True, 'import numpy as np\n'), ((18008, 18017), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (18014, 18017), True, 'import numpy as np\n'), ((18028, 18037), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (18034, 18037), True, 'import numpy as np\n'), ((18206, 18342), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 1, -\n 1, 0], [0, 0, 0, -1, 1, 0], [0, 0, 0, 0, 0, 0]]'], {}), '([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0,\n 0, 1, -1, 0], [0, 0, 0, -1, 1, 0], [0, 0, 0, 0, 0, 0]])\n', (18214, 18342), True, 'import numpy as np\n'), ((18444, 18470), 'numpy.array', 'np.array', (['[[3, 0], [0, 1]]'], {}), '([[3, 0], [0, 1]])\n', (18452, 18470), True, 'import numpy as np\n'), ((19659, 19697), 'numpy.diag', 'np.diag', (['[50, 0, 0, 0, 50, 0, 0, 0, 0]'], {}), '([50, 0, 0, 0, 50, 0, 0, 0, 0])\n', (19666, 19697), True, 'import numpy as np\n'), ((19708, 19717), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (19714, 19717), True, 'import numpy as np\n'), ((19816, 19850), 'numpy.array', 'np.array', (['[[0, 1000000.0], [0, 0]]'], {}), '([[0, 1000000.0], [0, 0]])\n', (19824, 19850), True, 'import numpy as np\n'), ((19855, 19875), 'numpy.array', 'np.array', (['[[0], [1]]'], {}), '([[0], [1]])\n', (19863, 19875), True, 'import numpy as np\n'), ((19886, 19895), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (19892, 19895), True, 'import numpy as np\n'), ((19906, 19921), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (19914, 19921), True, 'import numpy as np\n'), ((20089, 20098), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (20095, 20098), True, 'import numpy as np\n'), ((20212, 20300), 'numpy.array', 'np.array', (['[[1 - 1 / 100000000.0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]'], {}), '([[1 - 1 / 100000000.0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0,\n 1, 0]])\n', (20220, 20300), True, 'import numpy as np\n'), ((20351, 20385), 'numpy.array', 'np.array', (['[[1e-08], [0], [0], [0]]'], {}), '([[1e-08], [0], [0], [0]])\n', (20359, 20385), True, 'import numpy as np\n'), ((20396, 20417), 'numpy.diag', 'np.diag', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (20403, 20417), True, 'import numpy as np\n'), ((20428, 20446), 'numpy.array', 'np.array', (['[[0.25]]'], {}), '([[0.25]])\n', (20436, 20446), True, 'import numpy as np\n'), ((20508, 20524), 'numpy.eye', 'np.eye', (['(100)'], {'k': '(1)'}), '(100, k=1)\n', (20514, 20524), True, 'import numpy as np\n'), ((20571, 20582), 'numpy.eye', 'np.eye', (['(100)'], {}), '(100)\n', (20577, 20582), True, 'import numpy as np\n'), ((20593, 20608), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (20601, 20608), True, 'import numpy as np\n'), ((21281, 21314), 'pytest.xfail', 'pytest.xfail', ([], {'reason': 'knownfailure'}), '(reason=knownfailure)\n', (21293, 21314), False, 'import pytest\n'), ((21592, 21610), 'numpy.zeros_like', 'np.zeros_like', (['res'], {}), '(res)\n', (21605, 21610), True, 'import numpy as np\n'), ((21890, 21905), 'numpy.ones_like', 'np.ones_like', (['A'], {}), '(A)\n', (21902, 21905), True, 'import numpy as np\n'), ((22208, 22329), 'numpy.array', 'np.array', (['[[0.276923, 0.8234578, 0.950222], [0.04617139, 0.6948286, 0.03444608], [\n 0.09713178, 0.3170995, 0.4387444]]'], {}), '([[0.276923, 0.8234578, 0.950222], [0.04617139, 0.6948286, \n 0.03444608], [0.09713178, 0.3170995, 0.4387444]])\n', (22216, 22329), True, 'import numpy as np\n'), ((22399, 22486), 'numpy.array', 'np.array', (['[[0.3815585, 0.1868726], [0.7655168, 0.4897644], [0.7951999, 0.4455862]]'], {}), '([[0.3815585, 0.1868726], [0.7655168, 0.4897644], [0.7951999, \n 0.4455862]])\n', (22407, 22486), True, 'import numpy as np\n'), ((22548, 22557), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (22554, 22557), True, 'import numpy as np\n'), ((22568, 22577), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (22574, 22577), True, 'import numpy as np\n'), ((22588, 22706), 'numpy.array', 'np.array', (['[[0.646313, 0.2760251, 0.1626117], [0.7093648, 0.6797027, 0.1189977], [\n 0.7546867, 0.655098, 0.4983641]]'], {}), '([[0.646313, 0.2760251, 0.1626117], [0.7093648, 0.6797027, \n 0.1189977], [0.7546867, 0.655098, 0.4983641]])\n', (22596, 22706), True, 'import numpy as np\n'), ((22779, 22795), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (22787, 22795), True, 'import numpy as np\n'), ((22822, 22943), 'numpy.array', 'np.array', (['[[0.276923, 0.8234578, 0.950222], [0.04617139, 0.6948286, 0.03444608], [\n 0.09713178, 0.3170995, 0.4387444]]'], {}), '([[0.276923, 0.8234578, 0.950222], [0.04617139, 0.6948286, \n 0.03444608], [0.09713178, 0.3170995, 0.4387444]])\n', (22830, 22943), True, 'import numpy as np\n'), ((23013, 23100), 'numpy.array', 'np.array', (['[[0.3815585, 0.1868726], [0.7655168, 0.4897644], [0.7951999, 0.4455862]]'], {}), '([[0.3815585, 0.1868726], [0.7655168, 0.4897644], [0.7951999, \n 0.4455862]])\n', (23021, 23100), True, 'import numpy as np\n'), ((23162, 23171), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (23168, 23171), True, 'import numpy as np\n'), ((23182, 23191), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (23188, 23191), True, 'import numpy as np\n'), ((23202, 23320), 'numpy.array', 'np.array', (['[[0.646313, 0.2760251, 0.1626117], [0.7093648, 0.6797027, 0.1189977], [\n 0.7546867, 0.655098, 0.4983641]]'], {}), '([[0.646313, 0.2760251, 0.1626117], [0.7093648, 0.6797027, \n 0.1189977], [0.7546867, 0.655098, 0.4983641]])\n', (23210, 23320), True, 'import numpy as np\n'), ((23393, 23408), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (23400, 23408), True, 'import numpy as np\n'), ((23649, 23682), 'pytest.xfail', 'pytest.xfail', ([], {'reason': 'knownfailure'}), '(reason=knownfailure)\n', (23661, 23682), False, 'import pytest\n'), ((23964, 23982), 'numpy.zeros_like', 'np.zeros_like', (['res'], {}), '(res)\n', (23977, 23982), True, 'import numpy as np\n'), ((24324, 24445), 'numpy.array', 'np.array', (['[[0.276923, 0.8234578, 0.950222], [0.04617139, 0.6948286, 0.03444608], [\n 0.09713178, 0.3170995, 0.4387444]]'], {}), '([[0.276923, 0.8234578, 0.950222], [0.04617139, 0.6948286, \n 0.03444608], [0.09713178, 0.3170995, 0.4387444]])\n', (24332, 24445), True, 'import numpy as np\n'), ((24515, 24602), 'numpy.array', 'np.array', (['[[0.3815585, 0.1868726], [0.7655168, 0.4897644], [0.7951999, 0.4455862]]'], {}), '([[0.3815585, 0.1868726], [0.7655168, 0.4897644], [0.7951999, \n 0.4455862]])\n', (24523, 24602), True, 'import numpy as np\n'), ((24664, 24673), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (24670, 24673), True, 'import numpy as np\n'), ((24684, 24693), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (24690, 24693), True, 'import numpy as np\n'), ((24704, 24822), 'numpy.array', 'np.array', (['[[0.646313, 0.2760251, 0.1626117], [0.7093648, 0.6797027, 0.1189977], [\n 0.7546867, 0.655098, 0.4983641]]'], {}), '([[0.646313, 0.2760251, 0.1626117], [0.7093648, 0.6797027, \n 0.1189977], [0.7546867, 0.655098, 0.4983641]])\n', (24712, 24822), True, 'import numpy as np\n'), ((24895, 24911), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (24903, 24911), True, 'import numpy as np\n'), ((24938, 25059), 'numpy.array', 'np.array', (['[[0.276923, 0.8234578, 0.950222], [0.04617139, 0.6948286, 0.03444608], [\n 0.09713178, 0.3170995, 0.4387444]]'], {}), '([[0.276923, 0.8234578, 0.950222], [0.04617139, 0.6948286, \n 0.03444608], [0.09713178, 0.3170995, 0.4387444]])\n', (24946, 25059), True, 'import numpy as np\n'), ((25129, 25216), 'numpy.array', 'np.array', (['[[0.3815585, 0.1868726], [0.7655168, 0.4897644], [0.7951999, 0.4455862]]'], {}), '([[0.3815585, 0.1868726], [0.7655168, 0.4897644], [0.7951999, \n 0.4455862]])\n', (25137, 25216), True, 'import numpy as np\n'), ((25278, 25287), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (25284, 25287), True, 'import numpy as np\n'), ((25298, 25307), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (25304, 25307), True, 'import numpy as np\n'), ((25318, 25436), 'numpy.array', 'np.array', (['[[0.646313, 0.2760251, 0.1626117], [0.7093648, 0.6797027, 0.1189977], [\n 0.7546867, 0.655098, 0.4983641]]'], {}), '([[0.646313, 0.2760251, 0.1626117], [0.7093648, 0.6797027, \n 0.1189977], [0.7546867, 0.655098, 0.4983641]])\n', (25326, 25436), True, 'import numpy as np\n'), ((25509, 25524), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (25516, 25524), True, 'import numpy as np\n'), ((26055, 26088), 'pytest.xfail', 'pytest.xfail', ([], {'reason': 'knownfailure'}), '(reason=knownfailure)\n', (26067, 26088), False, 'import pytest\n'), ((26177, 26195), 'numpy.eye', 'np.eye', (['a.shape[0]'], {}), '(a.shape[0])\n', (26183, 26195), True, 'import numpy as np\n'), ((26234, 26250), 'numpy.zeros_like', 'np.zeros_like', (['b'], {}), '(b)\n', (26247, 26250), True, 'import numpy as np\n'), ((26577, 26595), 'numpy.zeros_like', 'np.zeros_like', (['res'], {}), '(res)\n', (26590, 26595), True, 'import numpy as np\n'), ((26884, 26926), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'nsq', '(1)', '(1)', '(1)'], {}), '(ValueError, x, nsq, 1, 1, 1)\n', (26897, 26926), True, 'from pytest import raises as assert_raises\n'), ((26939, 26983), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sq', 'sq', 'nsq', '(1)'], {}), '(ValueError, x, sq, sq, nsq, 1)\n', (26952, 26983), True, 'from pytest import raises as assert_raises\n'), ((26996, 27041), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sq', 'sq', 'sq', 'nsq'], {}), '(ValueError, x, sq, sq, sq, nsq)\n', (27009, 27041), True, 'from pytest import raises as assert_raises\n'), ((27054, 27103), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sq', 'sq', 'sq', 'sq', 'nsq'], {}), '(ValueError, x, sq, sq, sq, sq, nsq)\n', (27067, 27103), True, 'from pytest import raises as assert_raises\n'), ((27264, 27307), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sq', 'nsq', '(1)', '(1)'], {}), '(ValueError, x, sq, nsq, 1, 1)\n', (27277, 27307), True, 'from pytest import raises as assert_raises\n'), ((27320, 27373), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sq', 'sq', 'sq', 'sq', 'sq', 'nsq'], {}), '(ValueError, x, sq, sq, sq, sq, sq, nsq)\n', (27333, 27373), True, 'from pytest import raises as assert_raises\n'), ((27735, 27784), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sym', 'sym', 'nsym', 'sym'], {}), '(ValueError, x, sym, sym, nsym, sym)\n', (27748, 27784), True, 'from pytest import raises as assert_raises\n'), ((27797, 27846), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sym', 'sym', 'sym', 'nsym'], {}), '(ValueError, x, sym, sym, sym, nsym)\n', (27810, 27846), True, 'from pytest import raises as assert_raises\n'), ((27898, 27913), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (27905, 27913), True, 'import numpy as np\n'), ((28034, 28084), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sq', 'sq', 'sq', 'sq', 'sing'], {}), '(ValueError, x, sq, sq, sq, sq, sing)\n', (28047, 28084), True, 'from pytest import raises as assert_raises\n'), ((28201, 28216), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (28208, 28216), True, 'import numpy as np\n'), ((28322, 28366), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'nm', 'sq', 'sq', 'sq'], {}), '(ValueError, x, nm, sq, sq, sq)\n', (28335, 28366), True, 'from pytest import raises as assert_raises\n'), ((28379, 28423), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sq', 'nm', 'sq', 'sq'], {}), '(ValueError, x, sq, nm, sq, sq)\n', (28392, 28423), True, 'from pytest import raises as assert_raises\n'), ((28436, 28480), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sq', 'sq', 'nm', 'sq'], {}), '(ValueError, x, sq, sq, nm, sq)\n', (28449, 28480), True, 'from pytest import raises as assert_raises\n'), ((28493, 28537), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sq', 'sq', 'sq', 'nm'], {}), '(ValueError, x, sq, sq, sq, nm)\n', (28506, 28537), True, 'from pytest import raises as assert_raises\n'), ((28550, 28598), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sq', 'sq', 'sq', 'sq', 'nm'], {}), '(ValueError, x, sq, sq, sq, sq, nm)\n', (28563, 28598), True, 'from pytest import raises as assert_raises\n'), ((28611, 28663), 'pytest.raises', 'assert_raises', (['ValueError', 'x', 'sq', 'sq', 'sq', 'sq', 'sq', 'nm'], {}), '(ValueError, x, sq, sq, sq, sq, sq, nm)\n', (28624, 28663), True, 'from pytest import raises as assert_raises\n'), ((28752, 28778), 'numpy.array', 'np.array', (['[[1, 2], [0, 4]]'], {}), '([[1, 2], [0, 4]])\n', (28760, 28778), True, 'import numpy as np\n'), ((28789, 28815), 'numpy.array', 'np.array', (['[[5, 6], [0, 8]]'], {}), '([[5, 6], [0, 8]])\n', (28797, 28815), True, 'import numpy as np\n'), ((28826, 28855), 'numpy.array', 'np.array', (['[[9, 10], [11, 12]]'], {}), '([[9, 10], [11, 12]])\n', (28834, 28855), True, 'import numpy as np\n'), ((28977, 29054), 'numpy.array', 'np.array', (['[[1.0, 0, 0, 0], [0, 1.0, 2.0, 0.0], [0, 0, 3.0, -4], [0, 0, 2, 5]]'], {}), '([[1.0, 0, 0, 0], [0, 1.0, 2.0, 0.0], [0, 0, 3.0, -4], [0, 0, 2, 5]])\n', (28985, 29054), True, 'import numpy as np\n'), ((29122, 29201), 'numpy.array', 'np.array', (['[[2.0, 0, 0, 1.0], [0, 1.0, 0.0, 0.0], [0, 0, 1.0, -1], [0, 0, 1, 1]]'], {}), '([[2.0, 0, 0, 1.0], [0, 1.0, 0.0, 0.0], [0, 0, 1.0, -1], [0, 0, 1, 1]])\n', (29130, 29201), True, 'import numpy as np\n'), ((29269, 29343), 'numpy.array', 'np.array', (['[[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]]'], {}), '([[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]])\n', (29277, 29343), True, 'import numpy as np\n'), ((29443, 29491), 'numpy.array', 'np.array', (['[[1.0 + 1.0j, 2.0], [3.0 - 4.0j, 5.0]]'], {}), '([[1.0 + 1.0j, 2.0], [3.0 - 4.0j, 5.0]])\n', (29451, 29491), True, 'import numpy as np\n'), ((29496, 29532), 'numpy.array', 'np.array', (['[[-1.0, 2.0j], [3.0, 4.0]]'], {}), '([[-1.0, 2.0j], [3.0, 4.0]])\n', (29504, 29532), True, 'import numpy as np\n'), ((29541, 29597), 'numpy.array', 'np.array', (['[[2.0 - 2.0j, 2.0 + 2.0j], [-1.0 - 1.0j, 2.0]]'], {}), '([[2.0 - 2.0j, 2.0 + 2.0j], [-1.0 - 1.0j, 2.0]])\n', (29549, 29597), True, 'import numpy as np\n'), ((29632, 29666), 'numpy.array', 'np.array', (['[[1.0, 2.0], [3.0, 5.0]]'], {}), '([[1.0, 2.0], [3.0, 5.0]])\n', (29640, 29666), True, 'import numpy as np\n'), ((29677, 29710), 'numpy.array', 'np.array', (['[[-1.0, 0], [3.0, 4.0]]'], {}), '([[-1.0, 0], [3.0, 4.0]])\n', (29685, 29710), True, 'import numpy as np\n'), ((29721, 29777), 'numpy.array', 'np.array', (['[[2.0 - 2.0j, 2.0 + 2.0j], [-1.0 - 1.0j, 2.0]]'], {}), '([[2.0 - 2.0j, 2.0 + 2.0j], [-1.0 - 1.0j, 2.0]])\n', (29729, 29777), True, 'import numpy as np\n'), ((29812, 29860), 'numpy.array', 'np.array', (['[[1.0 + 1.0j, 2.0], [3.0 - 4.0j, 5.0]]'], {}), '([[1.0 + 1.0j, 2.0], [3.0 - 4.0j, 5.0]])\n', (29820, 29860), True, 'import numpy as np\n'), ((29865, 29898), 'numpy.array', 'np.array', (['[[-1.0, 0], [3.0, 4.0]]'], {}), '([[-1.0, 0], [3.0, 4.0]])\n', (29873, 29898), True, 'import numpy as np\n'), ((29909, 29965), 'numpy.array', 'np.array', (['[[2.0 - 2.0j, 2.0 + 2.0j], [-1.0 - 1.0j, 2.0]]'], {}), '([[2.0 - 2.0j, 2.0 + 2.0j], [-1.0 - 1.0j, 2.0]])\n', (29917, 29965), True, 'import numpy as np\n'), ((30000, 30048), 'numpy.array', 'np.array', (['[[1.0 + 1.0j, 2.0], [3.0 - 4.0j, 5.0]]'], {}), '([[1.0 + 1.0j, 2.0], [3.0 - 4.0j, 5.0]])\n', (30008, 30048), True, 'import numpy as np\n'), ((30053, 30086), 'numpy.array', 'np.array', (['[[-1.0, 0], [3.0, 4.0]]'], {}), '([[-1.0, 0], [3.0, 4.0]])\n', (30061, 30086), True, 'import numpy as np\n'), ((30097, 30132), 'numpy.array', 'np.array', (['[[2.0, 2.0], [-1.0, 2.0]]'], {}), '([[2.0, 2.0], [-1.0, 2.0]])\n', (30105, 30132), True, 'import numpy as np\n'), ((30180, 30223), 'numpy.array', 'np.array', (['[[8, 1, 6], [3, 5, 7], [4, 9, 2]]'], {}), '([[8, 1, 6], [3, 5, 7], [4, 9, 2]])\n', (30188, 30223), True, 'import numpy as np\n'), ((30234, 30260), 'numpy.array', 'np.array', (['[[2, 3], [4, 5]]'], {}), '([[2, 3], [4, 5]])\n', (30242, 30260), True, 'import numpy as np\n'), ((30271, 30305), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (30279, 30305), True, 'import numpy as np\n'), ((30356, 30409), 'numpy.array', 'np.array', (['[[8, 1.0j, 6 + 2.0j], [3, 5, 7], [4, 9, 2]]'], {}), '([[8, 1.0j, 6 + 2.0j], [3, 5, 7], [4, 9, 2]])\n', (30364, 30409), True, 'import numpy as np\n'), ((30414, 30447), 'numpy.array', 'np.array', (['[[2, 3], [4, 5 - 1.0j]]'], {}), '([[2, 3], [4, 5 - 1.0j]])\n', (30422, 30447), True, 'import numpy as np\n'), ((30454, 30504), 'numpy.array', 'np.array', (['[[1, 2.0j], [3, 4.0j], [5.0j, 6 + 7.0j]]'], {}), '([[1, 2.0j], [3, 4.0j], [5.0j, 6 + 7.0j]])\n', (30462, 30504), True, 'import numpy as np\n'), ((4477, 4489), 'numpy.dot', 'np.dot', (['a', 'x'], {}), '(a, x)\n', (4483, 4489), True, 'import numpy as np\n'), ((9421, 9436), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (9428, 9436), True, 'import numpy as np\n'), ((9439, 9459), 'scipy.linalg.block_diag', 'block_diag', (['(1e-06)', '(0)'], {}), '(1e-06, 0)\n', (9449, 9459), False, 'from scipy.linalg import block_diag, solve, LinAlgError\n'), ((10009, 10119), 'numpy.array', 'np.array', (['[[7000000.0, 2000000.0, -0.0], [2000000.0, 6000000.0, -2000000.0], [0.0, -\n 2000000.0, 5000000.0]]'], {}), '([[7000000.0, 2000000.0, -0.0], [2000000.0, 6000000.0, -2000000.0],\n [0.0, -2000000.0, 5000000.0]])\n', (10017, 10119), True, 'import numpy as np\n'), ((10375, 10384), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (10381, 10384), True, 'import numpy as np\n'), ((10633, 10665), 'numpy.array', 'np.array', (['[[0, 0, 0, 1000000.0]]'], {}), '([[0, 0, 0, 1000000.0]])\n', (10641, 10665), True, 'import numpy as np\n'), ((11243, 11262), 'numpy.eye', 'np.eye', (['(64)', '(64)'], {'k': '(1)'}), '(64, 64, k=1)\n', (11249, 11262), True, 'import numpy as np\n'), ((11394, 11408), 'numpy.ones', 'np.ones', (['(20,)'], {}), '((20,))\n', (11401, 11408), True, 'import numpy as np\n'), ((11434, 11447), 'numpy.eye', 'np.eye', (['(21)', '(1)'], {}), '(21, 1)\n', (11440, 11447), True, 'import numpy as np\n'), ((11459, 11472), 'numpy.eye', 'np.eye', (['(21)', '(1)'], {}), '(21, 1)\n', (11465, 11472), True, 'import numpy as np\n'), ((12743, 12759), 'numpy.atleast_2d', 'np.atleast_2d', (['r'], {}), '(r)\n', (12756, 12759), True, 'import numpy as np\n'), ((15443, 15472), 'numpy.array', 'np.array', (['[[-4, -4], [-4, 7]]'], {}), '([[-4, -4], [-4, 7]])\n', (15451, 15472), True, 'import numpy as np\n'), ((16740, 16749), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (16746, 16749), True, 'import numpy as np\n'), ((17481, 17706), 'numpy.array', 'np.array', (['[[95.407, 1.9643, 0.3597, 0.0673, 0.019], [40.849, 41.317, 16.084, 4.4679, \n 1.1971], [12.217, 26.326, 36.149, 15.93, 12.383], [4.1118, 12.858, \n 27.209, 21.442, 40.976], [0.1305, 0.5808, 1.875, 3.6162, 94.28]]'], {}), '([[95.407, 1.9643, 0.3597, 0.0673, 0.019], [40.849, 41.317, 16.084,\n 4.4679, 1.1971], [12.217, 26.326, 36.149, 15.93, 12.383], [4.1118, \n 12.858, 27.209, 21.442, 40.976], [0.1305, 0.5808, 1.875, 3.6162, 94.28]])\n', (17489, 17706), True, 'import numpy as np\n'), ((17809, 17913), 'numpy.array', 'np.array', (['[[0.0434, -0.0122], [2.6606, -1.0453], [3.753, -5.51], [3.6076, -6.6], [\n 0.4617, -0.9148]]'], {}), '([[0.0434, -0.0122], [2.6606, -1.0453], [3.753, -5.51], [3.6076, -\n 6.6], [0.4617, -0.9148]])\n', (17817, 17913), True, 'import numpy as np\n'), ((18107, 18116), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (18113, 18116), True, 'import numpy as np\n'), ((18118, 18138), 'numpy.diag', 'np.diag', (['[1, 1]'], {'k': '(1)'}), '([1, 1], k=1)\n', (18125, 18138), True, 'import numpy as np\n'), ((18158, 18167), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (18164, 18167), True, 'import numpy as np\n'), ((18169, 18194), 'numpy.array', 'np.array', (['[[0], [0], [1]]'], {}), '([[0], [0], [1]])\n', (18177, 18194), True, 'import numpy as np\n'), ((18540, 19162), 'numpy.array', 'np.array', (['[[870.1, 135.0, 11.59, 0.5014, -37.22, 0.3484, 0, 4.242, 7.249], [76.55, \n 897.4, 12.72, 0.5504, -40.16, 0.3743, 0, 4.53, 7.499], [-127.2, 357.5, \n 817, 1.455, -102.8, 0.987, 0, 11.85, 18.72], [-363.5, 633.9, 74.91, \n 796.6, -273.5, 2.653, 0, 31.72, 48.82], [-960, 1645.9, -128.9, -5.597, \n 71.42, 7.108, 0, 84.52, 125.9], [-664.4, 112.96, -88.89, -3.854, 84.47,\n 13.6, 0, 144.3, 101.6], [-410.2, 693, -54.71, -2.371, 66.49, 12.49, \n 0.1063, 99.97, 69.67], [-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0,\n 213.9, 35.54], [-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, \n 215.2]]'], {}), '([[870.1, 135.0, 11.59, 0.5014, -37.22, 0.3484, 0, 4.242, 7.249], [\n 76.55, 897.4, 12.72, 0.5504, -40.16, 0.3743, 0, 4.53, 7.499], [-127.2, \n 357.5, 817, 1.455, -102.8, 0.987, 0, 11.85, 18.72], [-363.5, 633.9, \n 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82], [-960, 1645.9, -128.9, -\n 5.597, 71.42, 7.108, 0, 84.52, 125.9], [-664.4, 112.96, -88.89, -3.854,\n 84.47, 13.6, 0, 144.3, 101.6], [-410.2, 693, -54.71, -2.371, 66.49, \n 12.49, 0.1063, 99.97, 69.67], [-179.9, 301.7, -23.93, -1.035, 60.59, \n 22.16, 0, 213.9, 35.54], [-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, \n 0, 219.1, 215.2]])\n', (18548, 19162), True, 'import numpy as np\n'), ((19219, 19458), 'numpy.array', 'np.array', (['[[4.76, -0.5701, -83.68], [0.879, -4.773, -2.73], [1.482, -13.12, 8.876], [\n 3.892, -35.13, 24.8], [10.34, -92.75, 66.8], [7.203, -61.59, 38.34], [\n 4.454, -36.83, 20.29], [1.971, -15.54, 6.937], [3.773, -30.28, 14.69]]'], {}), '([[4.76, -0.5701, -83.68], [0.879, -4.773, -2.73], [1.482, -13.12, \n 8.876], [3.892, -35.13, 24.8], [10.34, -92.75, 66.8], [7.203, -61.59, \n 38.34], [4.454, -36.83, 20.29], [1.971, -15.54, 6.937], [3.773, -30.28,\n 14.69]])\n', (19227, 19458), True, 'import numpy as np\n'), ((19983, 20034), 'numpy.array', 'np.array', (['[[16, 10, -2], [10, 13, -8], [-2, -8, 7]]'], {}), '([[16, 10, -2], [10, 13, -8], [-2, -8, 7]])\n', (19991, 20034), True, 'import numpy as np\n'), ((20115, 20124), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (20121, 20124), True, 'import numpy as np\n'), ((20141, 20150), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (20147, 20150), True, 'import numpy as np\n'), ((20545, 20559), 'numpy.eye', 'np.eye', (['(100)', '(1)'], {}), '(100, 1)\n', (20551, 20559), True, 'import numpy as np\n'), ((23887, 23903), 'numpy.atleast_2d', 'np.atleast_2d', (['r'], {}), '(r)\n', (23900, 23903), True, 'import numpy as np\n'), ((27423, 27432), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (27429, 27432), True, 'import numpy as np\n'), ((27491, 27500), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (27497, 27500), True, 'import numpy as np\n'), ((27559, 27568), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (27565, 27568), True, 'import numpy as np\n'), ((27611, 27623), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (27620, 27623), True, 'import numpy as np\n'), ((30610, 30622), 'numpy.dot', 'np.dot', (['a', 'x'], {}), '(a, x)\n', (30616, 30622), True, 'import numpy as np\n'), ((30625, 30637), 'numpy.dot', 'np.dot', (['x', 'b'], {}), '(x, b)\n', (30631, 30637), True, 'import numpy as np\n'), ((30874, 30894), 'numpy.array', 'np.array', (['[2.0, 2.0]'], {}), '([2.0, 2.0])\n', (30882, 30894), True, 'import numpy as np\n'), ((3787, 3804), 'numpy.matrix', 'np.matrix', (['[0, 3]'], {}), '([0, 3])\n', (3796, 3804), True, 'import numpy as np\n'), ((4704, 4716), 'numpy.dot', 'np.dot', (['a', 'x'], {}), '(a, x)\n', (4710, 4716), True, 'import numpy as np\n'), ((7211, 7331), 'numpy.array', 'np.array', (['[[3.84, 4.0, 37.6, 3.08, 2.36, 2.88, 3.08, 3.0], [-2.88, -3.04, -2.8, -2.32,\n -3.32, -3.82, -4.12, -3.96]]'], {}), '([[3.84, 4.0, 37.6, 3.08, 2.36, 2.88, 3.08, 3.0], [-2.88, -3.04, -\n 2.8, -2.32, -3.32, -3.82, -4.12, -3.96]])\n', (7219, 7331), True, 'import numpy as np\n'), ((10301, 10368), 'numpy.array', 'np.array', (['[[1.0, -2.0, -2.0], [-2.0, 1.0, -2.0], [-2.0, -2.0, 1.0]]'], {}), '([[1.0, -2.0, -2.0], [-2.0, 1.0, -2.0], [-2.0, -2.0, 1.0]])\n', (10309, 10368), True, 'import numpy as np\n'), ((11475, 11488), 'numpy.eye', 'np.eye', (['(21)', '(1)'], {}), '(21, 1)\n', (11481, 11488), True, 'import numpy as np\n'), ((30984, 31004), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (30992, 31004), True, 'import numpy as np\n'), ((3809, 3826), 'numpy.matrix', 'np.matrix', (['[0, 3]'], {}), '([0, 3])\n', (3818, 3826), True, 'import numpy as np\n'), ((3948, 3965), 'numpy.matrix', 'np.matrix', (['[0, 3]'], {}), '([0, 3])\n', (3957, 3965), True, 'import numpy as np\n'), ((11130, 11150), 'numpy.eye', 'np.eye', (['(64)', '(64)'], {'k': '(-1)'}), '(64, 64, k=-1)\n', (11136, 11150), True, 'import numpy as np\n'), ((3970, 3987), 'numpy.matrix', 'np.matrix', (['[0, 3]'], {}), '([0, 3])\n', (3979, 3987), True, 'import numpy as np\n'), ((10259, 10289), 'numpy.diag', 'np.diag', (['[1e-06, 1, 1000000.0]'], {}), '([1e-06, 1, 1000000.0])\n', (10266, 10289), True, 'import numpy as np\n'), ((11153, 11167), 'numpy.eye', 'np.eye', (['(64)', '(64)'], {}), '(64, 64)\n', (11159, 11167), True, 'import numpy as np\n'), ((11217, 11235), 'numpy.zeros', 'np.zeros', (['(62, 62)'], {}), '((62, 62))\n', (11225, 11235), True, 'import numpy as np\n'), ((10179, 10246), 'numpy.array', 'np.array', (['[[1.0, -2.0, -2.0], [-2.0, 1.0, -2.0], [-2.0, -2.0, 1.0]]'], {}), '([[1.0, -2.0, -2.0], [-2.0, 1.0, -2.0], [-2.0, -2.0, 1.0]])\n', (10187, 10246), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from ...module_base import ModuleBase
from ..loss_base import TRACK_LOSSES
from .utils import SafeLog
eps = np.finfo(np.float32).tiny
@TRACK_LOSSES.register
class SigmoidCrossEntropyCenterness(ModuleBase):
default_hyper_params = dict(
name="centerness",
background=0,
ignore_label=-1,
weight=1.0,
)
def __init__(self, background=0, ignore_label=-1):
super(SigmoidCrossEntropyCenterness, self).__init__()
self.safelog = SafeLog()
self.register_buffer("t_one", torch.tensor(1., requires_grad=False))
def update_params(self, ):
self.background = self._hyper_params["background"]
self.ignore_label = self._hyper_params["ignore_label"]
self.weight = self._hyper_params["weight"]
def forward(self, pred_data, target_data):
r"""
Center-ness loss
Computation technique originated from this implementation:
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Arguments
---------
pred: torch.Tensor
center-ness logits (BEFORE Sigmoid)
format: (B, HW)
label: torch.Tensor
training label
format: (B, HW)
Returns
-------
torch.Tensor
scalar loss
format: (,)
"""
pred = pred_data["ctr_pred"]
label = target_data["ctr_gt"]
mask = (~(label == self.background)).type(torch.Tensor).to(pred.device)
not_neg_mask = (pred >= 0).type(torch.Tensor).to(pred.device)
loss = (pred * not_neg_mask - pred * label +
self.safelog(1. + torch.exp(-torch.abs(pred)))) * mask
loss_residual = (-label * self.safelog(label) -
(1 - label) * self.safelog(1 - label)
) * mask # suppress loss residual (original vers.)
loss = loss - loss_residual.detach()
loss = loss.sum() / torch.max(mask.sum(),
self.t_one) * self._hyper_params["weight"]
extra = dict()
return loss, extra
if __name__ == '__main__':
B = 16
HW = 17 * 17
pred_cls = pred_ctr = torch.tensor(
np.random.rand(B, HW, 1).astype(np.float32))
pred_reg = torch.tensor(np.random.rand(B, HW, 4).astype(np.float32))
gt_cls = torch.tensor(np.random.randint(2, size=(B, HW, 1)),
dtype=torch.int8)
gt_ctr = torch.tensor(np.random.rand(B, HW, 1).astype(np.float32))
gt_reg = torch.tensor(np.random.rand(B, HW, 4).astype(np.float32))
criterion_cls = SigmoidCrossEntropyRetina()
loss_cls = criterion_cls(pred_cls, gt_cls)
criterion_ctr = SigmoidCrossEntropyCenterness()
loss_ctr = criterion_ctr(pred_ctr, gt_ctr, gt_cls)
criterion_reg = IOULoss()
loss_reg = criterion_reg(pred_reg, gt_reg, gt_cls)
from IPython import embed
embed()
| [
"IPython.embed",
"numpy.finfo",
"numpy.random.randint",
"numpy.random.rand",
"torch.abs",
"torch.tensor"
] | [((219, 239), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (227, 239), True, 'import numpy as np\n'), ((3044, 3051), 'IPython.embed', 'embed', ([], {}), '()\n', (3049, 3051), False, 'from IPython import embed\n'), ((2494, 2531), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(B, HW, 1)'}), '(2, size=(B, HW, 1))\n', (2511, 2531), True, 'import numpy as np\n'), ((642, 680), 'torch.tensor', 'torch.tensor', (['(1.0)'], {'requires_grad': '(False)'}), '(1.0, requires_grad=False)\n', (654, 680), False, 'import torch\n'), ((2349, 2373), 'numpy.random.rand', 'np.random.rand', (['B', 'HW', '(1)'], {}), '(B, HW, 1)\n', (2363, 2373), True, 'import numpy as np\n'), ((2422, 2446), 'numpy.random.rand', 'np.random.rand', (['B', 'HW', '(4)'], {}), '(B, HW, 4)\n', (2436, 2446), True, 'import numpy as np\n'), ((2603, 2627), 'numpy.random.rand', 'np.random.rand', (['B', 'HW', '(1)'], {}), '(B, HW, 1)\n', (2617, 2627), True, 'import numpy as np\n'), ((2674, 2698), 'numpy.random.rand', 'np.random.rand', (['B', 'HW', '(4)'], {}), '(B, HW, 4)\n', (2688, 2698), True, 'import numpy as np\n'), ((1794, 1809), 'torch.abs', 'torch.abs', (['pred'], {}), '(pred)\n', (1803, 1809), False, 'import torch\n')] |
import warnings
from typing import Any, Callable, Iterable, List, Optional, Union
import numpy as np
import sympy
from openfermion import SymbolicOperator
from .circuits import Circuit
from .estimation import (
estimate_expectation_values_by_averaging,
evaluate_estimation_circuits,
)
from .gradients import finite_differences_gradient
from .interfaces.ansatz import Ansatz
from .interfaces.ansatz_utils import combine_ansatz_params
from .interfaces.backend import QuantumBackend
from .interfaces.cost_function import CostFunction, ParameterPreprocessor
from .interfaces.estimation import (
EstimateExpectationValues,
EstimationPreprocessor,
EstimationTask,
EstimationTasksFactory,
)
from .interfaces.functions import (
FunctionWithGradient,
FunctionWithGradientStoringArtifacts,
StoreArtifact,
function_with_gradient,
)
from .measurement import (
ExpectationValues,
concatenate_expectation_values,
expectation_values_to_real,
)
from .typing import SupportsLessThan
from .utils import ValueEstimate, create_symbols_map
GradientFactory = Callable[[Callable], Callable[[np.ndarray], np.ndarray]]
SymbolsSortKey = Callable[[sympy.Symbol], SupportsLessThan]
def _get_sorted_set_of_circuit_symbols(
estimation_tasks: List[EstimationTask], key: SymbolsSortKey = str
) -> List[sympy.Symbol]:
return sorted(
list(
{param for task in estimation_tasks for param in task.circuit.free_symbols}
),
key=key,
)
_by_averaging = estimate_expectation_values_by_averaging
def get_ground_state_cost_function(
target_operator: SymbolicOperator,
parametrized_circuit: Circuit,
backend: QuantumBackend,
estimation_method: EstimateExpectationValues = _by_averaging,
estimation_preprocessors: List[EstimationPreprocessor] = None,
fixed_parameters: Optional[np.ndarray] = None,
parameter_precision: Optional[float] = None,
parameter_precision_seed: Optional[int] = None,
gradient_function: Callable = finite_differences_gradient,
symbols_sort_key: SymbolsSortKey = str,
) -> Union[FunctionWithGradient, FunctionWithGradientStoringArtifacts]:
"""Returns a function that returns the estimated expectation value of the input
target operator with respect to the state prepared by the parameterized quantum
circuit when evaluated to the input parameters. The function also has a .gradient
method when returns the gradient with respect the input parameters.
Args:
target_operator: operator to be evaluated and find the ground state of
parametrized_circuit: parameterized circuit to prepare quantum states
backend: backend used for evaluation
estimation_method: estimation_method used to compute expectation value of target
operator
estimation_preprocessors: A list of callable functions that adhere to the
EstimationPreprocessor protocol and are used to create the estimation tasks.
fixed_parameters: values for the circuit parameters that should be fixed.
parameter_precision: the standard deviation of the Gaussian noise to add to each
parameter, if any.
parameter_precision_seed: seed for randomly generating parameter deviation if
using parameter_precision
gradient_function: a function which returns a function used to compute the
gradient of the cost function (see
zquantum.core.gradients.finite_differences_gradient for reference)
symbols_sort_key: key defining ordering on parametrized_circuits free symbols.
If s1,...,sN are all free symbols in parametrized_circuit, and cost function
is called with `parameters` then the following binding occurs:
parameters[i] -> sorted([s1,...,sN], key=symbols_sort_key)[i]
Returns:
Callable
"""
warnings.warn(
DeprecationWarning(
"""get_ground_state_cost_function is deprecated. Use create_cost_function with
expectation_value_estimation_tasks_factory instead.
"""
)
)
estimation_tasks = [
EstimationTask(
operator=target_operator,
circuit=parametrized_circuit,
number_of_shots=None,
)
]
if estimation_preprocessors is None:
estimation_preprocessors = []
for estimation_preprocessor in estimation_preprocessors:
estimation_tasks = estimation_preprocessor(estimation_tasks)
circuit_symbols = _get_sorted_set_of_circuit_symbols(
estimation_tasks, symbols_sort_key
)
def ground_state_cost_function(
parameters: np.ndarray, store_artifact: StoreArtifact = None
) -> ValueEstimate:
"""Evaluates the expectation value of the op
Args:
parameters: parameters for the parameterized quantum circuit
Returns:
value: estimated energy of the target operator with respect to the circuit
"""
nonlocal estimation_tasks
parameters = parameters.copy()
if fixed_parameters is not None:
parameters = combine_ansatz_params(fixed_parameters, parameters)
if parameter_precision is not None:
rng = np.random.default_rng(parameter_precision_seed)
noise_array = rng.normal(0.0, parameter_precision, len(parameters))
parameters += noise_array
symbols_map = create_symbols_map(circuit_symbols, parameters)
current_estimation_tasks = evaluate_estimation_circuits(
estimation_tasks, [symbols_map for _ in estimation_tasks]
)
expectation_values_list = estimation_method(backend, current_estimation_tasks)
partial_sums: List[Any] = [
np.sum(expectation_values.values)
for expectation_values in expectation_values_list
]
summed_values = np.sum(partial_sums)
if isinstance(summed_values, float):
return ValueEstimate(summed_values)
else:
raise ValueError(f"Result {summed_values} is not a float.")
return function_with_gradient(
ground_state_cost_function, gradient_function(ground_state_cost_function)
)
def sum_expectation_values(expectation_values: ExpectationValues) -> ValueEstimate:
"""Compute the sum of expectation values.
If correlations are available, the precision of the sum is computed as
\\epsilon = \\sqrt{\\sum_k \\sigma^2_k}
where the sum runs over frames and \\sigma^2_k is the estimated variance of
the estimated contribution of frame k to the total. This is calculated as
\\sigma^2_k = \\sum_{i,j} Cov(o_{k,i}, o_{k, j})
where Cov(o_{k,i}, o_{k, j}) is the estimated covariance in the estimated
expectation values of operators i and j of frame k.
Args:
expectation_values: The expectation values to sum.
Returns:
The value of the sum, including a precision if the expectation values
included covariances.
"""
value = np.sum(expectation_values.values)
precision = None
if expectation_values.estimator_covariances:
estimator_variance = 0.0
for frame_covariance in expectation_values.estimator_covariances:
estimator_variance += float(np.sum(frame_covariance, (0, 1)))
precision = np.sqrt(estimator_variance)
return ValueEstimate(value, precision)
class AnsatzBasedCostFunction:
"""Cost function used for evaluating given operator using given ansatz.
Args:
target_operator: operator to be evaluated
ansatz: ansatz used to evaluate cost function
backend: backend used for evaluation
estimation_method: estimation_method used to compute expectation value of target
operator
estimation_preprocessors: A list of callable functions that adhere to the
EstimationPreprocessor protocol and are used to create the estimation tasks.
fixed_parameters: values for the circuit parameters that should be fixed.
parameter_precision: the standard deviation of the Gaussian noise to add to each
parameter, if any.
parameter_precision_seed: seed for randomly generating parameter deviation if
using parameter_precision
Params:
backend: see Args
estimation_method: see Args
fixed_parameters (np.ndarray): see Args
parameter_precision: see Args
parameter_precision_seed: see Args
estimation_tasks: A list of EstimationTask objects with circuits to run and
operators to measure
circuit_symbols: A list of all symbolic parameters used in any estimation task
"""
def __init__(
self,
target_operator: SymbolicOperator,
ansatz: Ansatz,
backend: QuantumBackend,
estimation_method: EstimateExpectationValues = _by_averaging,
estimation_preprocessors: List[EstimationPreprocessor] = None,
fixed_parameters: Optional[np.ndarray] = None,
parameter_precision: Optional[float] = None,
parameter_precision_seed: Optional[int] = None,
):
warnings.warn(
DeprecationWarning(
"""AnsatzBasedCostFunction is deprecated. Use create_cost_function
instead."""
)
)
self.backend = backend
self.fixed_parameters = fixed_parameters
self.parameter_precision = parameter_precision
self.parameter_precision_seed = parameter_precision_seed
self.estimation_method: EstimateExpectationValues
if estimation_method is None:
self.estimation_method = estimate_expectation_values_by_averaging
else:
self.estimation_method = estimation_method
if estimation_preprocessors is None:
estimation_preprocessors = []
self.estimation_tasks = [
EstimationTask(
operator=target_operator,
circuit=ansatz.parametrized_circuit,
number_of_shots=None,
)
]
for estimation_preprocessor in estimation_preprocessors:
self.estimation_tasks = estimation_preprocessor(self.estimation_tasks)
self.circuit_symbols = _get_sorted_set_of_circuit_symbols(
self.estimation_tasks, ansatz.symbols_sort_key
)
def __call__(self, parameters: np.ndarray) -> ValueEstimate:
"""Evaluates the value of the cost function for given parameters.
Args:
parameters: parameters for which the evaluation should occur.
Returns:
value: cost function value for given parameters.
"""
full_parameters = parameters.copy()
if self.fixed_parameters is not None:
full_parameters = combine_ansatz_params(self.fixed_parameters, parameters)
if self.parameter_precision is not None:
rng = np.random.default_rng(self.parameter_precision_seed)
noise_array = rng.normal(
0.0, self.parameter_precision, len(full_parameters)
)
full_parameters += noise_array
symbols_map = create_symbols_map(self.circuit_symbols, full_parameters)
estimation_tasks = evaluate_estimation_circuits(
self.estimation_tasks, [symbols_map for _ in self.estimation_tasks]
)
expectation_values_list = self.estimation_method(self.backend, estimation_tasks)
combined_expectation_values = expectation_values_to_real(
concatenate_expectation_values(expectation_values_list)
)
return sum_expectation_values(combined_expectation_values)
def fix_parameters(fixed_parameters: np.ndarray) -> ParameterPreprocessor:
"""Preprocessor appending fixed parameters.
Args:
fixed_parameters: parameters to be appended to the ones being preprocessed.
Returns:
preprocessor
"""
def _preprocess(parameters: np.ndarray) -> np.ndarray:
return combine_ansatz_params(fixed_parameters, parameters)
return _preprocess
def add_normal_noise(
parameter_precision, parameter_precision_seed
) -> ParameterPreprocessor:
"""Preprocessor adding noise to the parameters.
The added noise is iid normal with mean=0.0 and stdev=`parameter_precision`.
Args:
parameter_precision: stddev of the noise distribution
parameter_precision_seed: seed for random number generator. The generator
is seeded during preprocessor creation (not during each preprocessor call).
Returns:
preprocessor
"""
rng = np.random.default_rng(parameter_precision_seed)
def _preprocess(parameters: np.ndarray) -> np.ndarray:
noise = rng.normal(0.0, parameter_precision, len(parameters))
return parameters + noise
return _preprocess
def create_cost_function(
backend: QuantumBackend,
estimation_tasks_factory: EstimationTasksFactory,
estimation_method: EstimateExpectationValues = _by_averaging,
parameter_preprocessors: Iterable[ParameterPreprocessor] = None,
gradient_function: GradientFactory = finite_differences_gradient,
) -> CostFunction:
"""This function can be used to generate callable cost functions for parametric
circuits. This function is the main entry to use other functions in this module.
Args:
backend: quantum backend used for evaluation.
estimation_tasks_factory: function that produces estimation tasks from
parameters. See example use case below for clarification.
estimation_method: the estimator used to compute expectation value of target
operator.
parameter_preprocessors: a list of callable functions that are applied to
parameters prior to estimation task evaluation. These functions have to
adhere to the ParameterPreprocessor protocol.
gradient_function: a function which returns a function used to compute the
gradient of the cost function (see
zquantum.core.gradients.finite_differences_gradient for reference)
Returns:
A callable CostFunction object.
Example use case:
target_operator = ...
ansatz = ...
estimation_factory = substitution_based_estimation_tasks_factory(
target_operator, ansatz
)
noise_preprocessor = add_normal_noise(1e-5, seed=1234)
cost_function = create_cost_function(
backend,
estimation_factory,
parameter_preprocessors=[noise_preprocessor]
)
optimizer = ...
initial_params = ...
opt_results = optimizer.minimize(cost_function, initial_params)
"""
def _cost_function(parameters: np.ndarray) -> Union[float, ValueEstimate]:
for preprocessor in (
[] if parameter_preprocessors is None else parameter_preprocessors
):
parameters = preprocessor(parameters)
estimation_tasks = estimation_tasks_factory(parameters)
expectation_values_list = estimation_method(backend, estimation_tasks)
combined_expectation_values = expectation_values_to_real(
concatenate_expectation_values(expectation_values_list)
)
return sum_expectation_values(combined_expectation_values)
return function_with_gradient(_cost_function, gradient_function(_cost_function))
def expectation_value_estimation_tasks_factory(
target_operator: SymbolicOperator,
parametrized_circuit: Circuit,
estimation_preprocessors: List[EstimationPreprocessor] = None,
symbols_sort_key: SymbolsSortKey = str,
) -> EstimationTasksFactory:
"""Creates a EstimationTasksFactory object that can be used to create
estimation tasks that returns the estimated expectation value of the input
target operator with respect to the state prepared by the parameterized
quantum circuit when evaluated to the input parameters.
To be used with `create_cost_function` to create ground state cost functions.
See `create_cost_function` docstring for an example use case.
Args:
target_operator: operator to be evaluated
parametrized_circuit: parameterized circuit to prepare quantum states
estimation_preprocessors: A list of callable functions used to create the
estimation tasks. Each function must adhere to the EstimationPreprocessor
protocol.
symbols_sort_key: key defining ordering on parametrized_circuits free symbols.
If s1,...,sN are all free symbols in parametrized_circuit, and cost function
is called with `parameters` then the following binding occurs:
parameters[i] -> sorted([s1,...,sN], key=symbols_sort_key)[i]
Returns:
An EstimationTasksFactory object.
"""
if estimation_preprocessors is None:
estimation_preprocessors = []
estimation_tasks = [
EstimationTask(
operator=target_operator,
circuit=parametrized_circuit,
number_of_shots=None,
)
]
for preprocessor in estimation_preprocessors:
estimation_tasks = preprocessor(estimation_tasks)
circuit_symbols = _get_sorted_set_of_circuit_symbols(
estimation_tasks, symbols_sort_key
)
def _tasks_factory(parameters: np.ndarray) -> List[EstimationTask]:
symbols_map = create_symbols_map(circuit_symbols, parameters)
return evaluate_estimation_circuits(
estimation_tasks, [symbols_map for _ in estimation_tasks]
)
return _tasks_factory
def substitution_based_estimation_tasks_factory(
target_operator: SymbolicOperator,
ansatz: Ansatz,
estimation_preprocessors: List[EstimationPreprocessor] = None,
) -> EstimationTasksFactory:
"""Creates a EstimationTasksFactory object that can be used to create
estimation tasks dynamically with parameters provided on the fly. These
tasks will evaluate the parametric circuit of an ansatz, using a symbol-
parameter map. Wow, a factory for factories! This is so meta.
To be used with `create_cost_function`. See `create_cost_function` docstring
for an example use case.
Args:
target_operator: operator to be evaluated
ansatz: ansatz used to evaluate cost function
estimation_preprocessors: A list of callable functions used to create the
estimation tasks. Each function must adhere to the EstimationPreprocessor
protocol.
Returns:
An EstimationTasksFactory object.
"""
return expectation_value_estimation_tasks_factory(
target_operator,
ansatz.parametrized_circuit,
estimation_preprocessors,
ansatz.symbols_sort_key,
)
def dynamic_circuit_estimation_tasks_factory(
target_operator: SymbolicOperator,
ansatz: Ansatz,
estimation_preprocessors: List[EstimationPreprocessor] = None,
) -> EstimationTasksFactory:
"""Creates a EstimationTasksFactory object that can be used to create
estimation tasks dynamically with parameters provided on the fly. These
tasks will evaluate the parametric circuit of an ansatz, without using
a symbol-parameter map. Wow, a factory for factories!
To be used with `create_cost_function`. See `create_cost_function` docstring
for an example use case.
Args:
target_operator: operator to be evaluated
ansatz: ansatz used to evaluate cost function
estimation_preprocessors: A list of callable functions used to create the
estimation tasks. Each function must adhere to the EstimationPreprocessor
protocol.
Returns:
An EstimationTasksFactory object.
"""
def _tasks_factory(parameters: np.ndarray) -> List[EstimationTask]:
# TODO: In some ansatzes, `ansatz._generate_circuit(parameters)` does not
# produce an executable circuit, but rather, they ignore the parameters and
# returns a parametrized circuit with sympy symbols.
# (Ex. see ansatzes in z-quantum-qaoa)
#
# Combined with how this is a private method, we will probably have to somewhat
# refactor the ansatz class.
circuit = ansatz._generate_circuit(parameters)
estimation_tasks = [
EstimationTask(
operator=target_operator, circuit=circuit, number_of_shots=None
)
]
for preprocessor in (
[] if estimation_preprocessors is None else estimation_preprocessors
):
estimation_tasks = preprocessor(estimation_tasks)
return estimation_tasks
return _tasks_factory
| [
"numpy.random.default_rng",
"numpy.sum",
"numpy.sqrt"
] | [((7047, 7080), 'numpy.sum', 'np.sum', (['expectation_values.values'], {}), '(expectation_values.values)\n', (7053, 7080), True, 'import numpy as np\n'), ((12640, 12687), 'numpy.random.default_rng', 'np.random.default_rng', (['parameter_precision_seed'], {}), '(parameter_precision_seed)\n', (12661, 12687), True, 'import numpy as np\n'), ((5905, 5925), 'numpy.sum', 'np.sum', (['partial_sums'], {}), '(partial_sums)\n', (5911, 5925), True, 'import numpy as np\n'), ((7354, 7381), 'numpy.sqrt', 'np.sqrt', (['estimator_variance'], {}), '(estimator_variance)\n', (7361, 7381), True, 'import numpy as np\n'), ((5257, 5304), 'numpy.random.default_rng', 'np.random.default_rng', (['parameter_precision_seed'], {}), '(parameter_precision_seed)\n', (5278, 5304), True, 'import numpy as np\n'), ((5775, 5808), 'numpy.sum', 'np.sum', (['expectation_values.values'], {}), '(expectation_values.values)\n', (5781, 5808), True, 'import numpy as np\n'), ((10953, 11005), 'numpy.random.default_rng', 'np.random.default_rng', (['self.parameter_precision_seed'], {}), '(self.parameter_precision_seed)\n', (10974, 11005), True, 'import numpy as np\n'), ((7300, 7332), 'numpy.sum', 'np.sum', (['frame_covariance', '(0, 1)'], {}), '(frame_covariance, (0, 1))\n', (7306, 7332), True, 'import numpy as np\n')] |
import keras.backend as K
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
from attack_utils import gen_adv_loss
from keras.models import save_model
import time
import sys
FLAGS = flags.FLAGS
EVAL_FREQUENCY = 1000
BATCH_SIZE = 64
BATCH_EVAL_NUM = 100
def batch_eval(tf_inputs, tf_outputs, numpy_inputs):
"""
A helper function that computes a tensor on numpy inputs by batches.
From: https://github.com/openai/cleverhans/blob/master/cleverhans/utils_tf.py
"""
n = len(numpy_inputs)
assert n > 0
assert n == len(tf_inputs)
m = numpy_inputs[0].shape[0]
for i in range(1, n):
assert numpy_inputs[i].shape[0] == m
out = []
for _ in tf_outputs:
out.append([])
for start in range(0, m, BATCH_SIZE):
batch = start // BATCH_SIZE
# Compute batch start and end indices
start = batch * BATCH_SIZE
end = start + BATCH_SIZE
numpy_input_batches = [numpy_input[start:end]
for numpy_input in numpy_inputs]
cur_batch_size = numpy_input_batches[0].shape[0]
assert cur_batch_size <= BATCH_SIZE
for e in numpy_input_batches:
assert e.shape[0] == cur_batch_size
feed_dict = dict(zip(tf_inputs, numpy_input_batches))
feed_dict[K.learning_phase()] = 0
numpy_output_batches = K.get_session().run(tf_outputs,
feed_dict=feed_dict)
for e in numpy_output_batches:
assert e.shape[0] == cur_batch_size, e.shape
for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
out_elem.append(numpy_output_batch)
out = [np.concatenate(x, axis=0) for x in out]
for e in out:
assert e.shape[0] == m, e.shape
return out
def tf_train(x, y, model, X_train, Y_train, generator, x_advs=None, benign = None, cross_lip=None):
old_vars = set(tf.global_variables())
train_size = Y_train.shape[0]
# Generate cross-entropy loss for training
logits = model(x)
#print(K.int_shape(logits))
preds = K.softmax(logits)
l1 = gen_adv_loss(logits, y, mean=True)
# add adversarial training loss
if x_advs is not None:
idx = tf.placeholder(dtype=np.int32)
logits_adv = model(tf.stack(x_advs)[idx])
l2 = gen_adv_loss(logits_adv, y, mean=True)
if benign == 0:
loss = l2
elif benign == 1:
loss = 0.5*(l1+l2)
else:
l2 = tf.constant(0)
loss = l1
optimizer = tf.train.AdamOptimizer().minimize(loss)
saver = tf.train.Saver(set(tf.global_variables()) - old_vars)
# Run all the initializers to prepare the trainable parameters.
K.get_session().run(tf.initialize_variables(
set(tf.global_variables()) - old_vars))
start_time = time.time()
print('Initialized!')
# Loop through training steps.
num_steps = int(FLAGS.NUM_EPOCHS * train_size + BATCH_SIZE - 1) // BATCH_SIZE
step = 0
training_loss = 0
epoch_count = 0
step_old = 0
for (batch_data, batch_labels) \
in generator.flow(X_train, Y_train, batch_size=BATCH_SIZE):
if len(batch_data) < BATCH_SIZE:
k = BATCH_SIZE - len(batch_data)
batch_data = np.concatenate([batch_data, X_train[0:k]])
batch_labels = np.concatenate([batch_labels, Y_train[0:k]])
feed_dict = {x: batch_data,
y: batch_labels,
K.learning_phase(): 1}
# choose source of adversarial examples at random
# (for ensemble adversarial training)
if x_advs is not None:
feed_dict[idx] = np.random.randint(len(x_advs))
# Run the graph
_, curr_loss, curr_l1, curr_l2, curr_preds, _ = \
K.get_session().run([optimizer, loss, l1, l2, preds]
+ [model.updates],
feed_dict=feed_dict)
training_loss += curr_loss
epoch = float(step) * BATCH_SIZE / train_size
if epoch >= epoch_count:
epoch_count += 1
elapsed_time = time.time() - start_time
start_time = time.time()
print('Step %d (epoch %.2f), %.2f s' %
(step, float(step) * BATCH_SIZE / train_size,
elapsed_time))
print('Training loss: %.3f' % (training_loss/(step - step_old)))
training_loss = 0
step_old = step
print('Minibatch loss: %.3f (%.3f, %.3f)' % (curr_loss, curr_l1, curr_l2))
_, _, minibatch_error = error_rate(curr_preds, batch_labels)
print('Minibatch error: %.1f%%' % minibatch_error)
# if epoch % 10 == 0 or (step == (num_steps-1)):
# save_path = saver.save(K.get_session(), "/tmp/model.ckpt")
# save_model(model, 'tmp/model.ckpt')
# print("Model saved in file: %s" % 'model.ckpt')
sys.stdout.flush()
step += 1
if step == num_steps:
break
def tf_test_error_rate(model, x, X_test, y_test):
"""
Compute test error.
"""
assert len(X_test) == len(y_test)
# Predictions for the test set
eval_prediction = K.softmax(model(x))
predictions = batch_eval([x], [eval_prediction], [X_test])[0]
return error_rate(predictions, y_test)
def error_rate(predictions, labels):
"""
Return the error rate in percent.
"""
assert len(predictions) == len(labels)
preds = np.argmax(predictions, 1)
orig = np.argmax(labels, 1)
error_rate = 100.0 - (100.0 * np.sum(preds == orig) / predictions.shape[0])
return preds, orig, error_rate
| [
"numpy.sum",
"keras.backend.learning_phase",
"numpy.argmax",
"keras.backend.get_session",
"attack_utils.gen_adv_loss",
"time.time",
"tensorflow.constant",
"tensorflow.placeholder",
"tensorflow.global_variables",
"keras.backend.softmax",
"sys.stdout.flush",
"tensorflow.stack",
"tensorflow.tra... | [((2134, 2151), 'keras.backend.softmax', 'K.softmax', (['logits'], {}), '(logits)\n', (2143, 2151), True, 'import keras.backend as K\n'), ((2161, 2195), 'attack_utils.gen_adv_loss', 'gen_adv_loss', (['logits', 'y'], {'mean': '(True)'}), '(logits, y, mean=True)\n', (2173, 2195), False, 'from attack_utils import gen_adv_loss\n'), ((2873, 2884), 'time.time', 'time.time', ([], {}), '()\n', (2882, 2884), False, 'import time\n'), ((5558, 5583), 'numpy.argmax', 'np.argmax', (['predictions', '(1)'], {}), '(predictions, 1)\n', (5567, 5583), True, 'import numpy as np\n'), ((5596, 5616), 'numpy.argmax', 'np.argmax', (['labels', '(1)'], {}), '(labels, 1)\n', (5605, 5616), True, 'import numpy as np\n'), ((1729, 1754), 'numpy.concatenate', 'np.concatenate', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1743, 1754), True, 'import numpy as np\n'), ((1963, 1984), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (1982, 1984), True, 'import tensorflow as tf\n'), ((2274, 2304), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'np.int32'}), '(dtype=np.int32)\n', (2288, 2304), True, 'import tensorflow as tf\n'), ((2368, 2406), 'attack_utils.gen_adv_loss', 'gen_adv_loss', (['logits_adv', 'y'], {'mean': '(True)'}), '(logits_adv, y, mean=True)\n', (2380, 2406), False, 'from attack_utils import gen_adv_loss\n'), ((2533, 2547), 'tensorflow.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (2544, 2547), True, 'import tensorflow as tf\n'), ((5003, 5021), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5019, 5021), False, 'import sys\n'), ((1338, 1356), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (1354, 1356), True, 'import keras.backend as K\n'), ((2583, 2607), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (2605, 2607), True, 'import tensorflow as tf\n'), ((2763, 2778), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (2776, 2778), True, 'import keras.backend as K\n'), ((3323, 3365), 'numpy.concatenate', 'np.concatenate', (['[batch_data, X_train[0:k]]'], {}), '([batch_data, X_train[0:k]])\n', (3337, 3365), True, 'import numpy as np\n'), ((3393, 3437), 'numpy.concatenate', 'np.concatenate', (['[batch_labels, Y_train[0:k]]'], {}), '([batch_labels, Y_train[0:k]])\n', (3407, 3437), True, 'import numpy as np\n'), ((3534, 3552), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (3550, 3552), True, 'import keras.backend as K\n'), ((4234, 4245), 'time.time', 'time.time', ([], {}), '()\n', (4243, 4245), False, 'import time\n'), ((1393, 1408), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (1406, 1408), True, 'import keras.backend as K\n'), ((2332, 2348), 'tensorflow.stack', 'tf.stack', (['x_advs'], {}), '(x_advs)\n', (2340, 2348), True, 'import tensorflow as tf\n'), ((2655, 2676), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2674, 2676), True, 'import tensorflow as tf\n'), ((3848, 3863), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (3861, 3863), True, 'import keras.backend as K\n'), ((4184, 4195), 'time.time', 'time.time', ([], {}), '()\n', (4193, 4195), False, 'import time\n'), ((5652, 5673), 'numpy.sum', 'np.sum', (['(preds == orig)'], {}), '(preds == orig)\n', (5658, 5673), True, 'import numpy as np\n'), ((2820, 2841), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2839, 2841), True, 'import tensorflow as tf\n')] |
from __future__ import annotations
__all__ = ["ComponentNumberInvalidError",
"BaseDistribution",
"normal_frequency",
"NormalDistribution",
"weibull_frequency",
"WeibullDistribution",
"skew_normal_frequency",
"SkewNormalDistribution"]
import typing
from abc import ABC, abstractproperty, abstractstaticmethod
from threading import Lock
import numpy as np
from QGrain import DistributionType
from scipy.optimize import minimize
from scipy.stats import norm, skewnorm, weibull_min
class ComponentNumberInvalidError(Exception):
"""
Raises while the component number is invalid.
"""
pass
class BaseDistribution(ABC):
_INFINITESIMAL = 1e-2
_CACHE_LOCK = Lock()
_CACHE = {}
def __init__(self, n_components: int):
if not BaseDistribution.check_n_components(n_components):
raise ComponentNumberInvalidError(n_components)
self.__n_components = n_components
@abstractstaticmethod
def get_name() -> str:
pass
@abstractstaticmethod
def get_type() -> DistributionType:
pass
@abstractstaticmethod
def get_parameter_names() -> typing.Tuple[str]:
pass
@abstractstaticmethod
def get_parameter_bounds() -> \
typing.Tuple[
typing.Tuple[
typing.Union[None, int, float],
typing.Union[None, int, float]]]:
pass
@abstractstaticmethod
def get_reference_parameters(mean: float, std: float, skewness: float) -> np.ndarray:
pass
@abstractstaticmethod
def get_moments(*args) -> dict:
pass
@property
def bounds(self) -> \
typing.Tuple[
typing.Tuple[
typing.Union[None, int, float],
typing.Union[None, int, float]]]:
bounds = []
param_bounds = self.get_parameter_bounds()
for i in range(self.n_components):
for bound in param_bounds:
bounds.append(bound)
for i in range(self.n_components-1):
bounds.append((0.0, 1.0))
return tuple(bounds)
@abstractproperty
def defaults(self) -> typing.Tuple[float]:
pass
@property
def constrains(self) -> typing.Tuple[typing.Dict]:
if self.__n_components == 1:
return ()
else:
return ({'type': 'ineq', 'fun': lambda args: 1 - sum(args[1-self.__n_components:]) + BaseDistribution._INFINITESIMAL})
@abstractproperty
def single_function(self) -> typing.Callable:
pass
@abstractproperty
def mixed_function(self) -> typing.Callable:
pass
@property
def parameter_count(self) -> int:
return len(self.get_parameter_names())
@property
def n_components(self) -> int:
return self.__n_components
@property
def total_parameter_count(self) -> int:
return (self.parameter_count + 1) * self.n_components - 1
@staticmethod
def check_n_components(n_components: int) -> bool:
if not isinstance(n_components, int):
return False
if n_components < 1:
return False
return True
def unpack_parameters(self, fitted_parameters) -> typing.Tuple[typing.Tuple, float]:
assert len(fitted_parameters) == self.total_parameter_count
if self.n_components == 1:
return ((tuple(fitted_parameters), 1.0),)
else:
expanded = list(fitted_parameters) + [1.0-sum(fitted_parameters[self.n_components*self.parameter_count:])]
return tuple(((tuple(expanded[i*self.parameter_count:(i+1)*self.parameter_count]), expanded[self.n_components*self.parameter_count+i]) for i in range(self.n_components)))
@staticmethod
def get_lambda_string(base_function_name: str, n_components: int, parameter_names: typing.List[str]) -> str:
if n_components == 1:
parameter_string = ", ".join(["x"] + list(parameter_names))
return f"lambda {parameter_string}: {base_function_name}({parameter_string})"
else:
parameter_string = ", ".join(["x"] + [f"{name}{i+1}" for i in range(n_components) for name in parameter_names] + [f"f{i+1}" for i in range(n_components-1)])
# " + " to connect each sub-function
# the previous sub-function str list means the m-1 sub-functions with n params `fj * base_func(x, param_1_j, ..., param_i_j, ..., param_n_j)`
# the last sub-function str which represents `(1-f_1-...-f_j-...-f_m-1) * base_func(x, param_1_j, ..., param_i_j, ..., param_n_j)`
previous_sub_function_strings = [f"f{i+1} * {base_function_name}(x, {', '.join([f'{name}{i+1}' for name in parameter_names])})" for i in range(n_components-1)]
last_sub_function_string = f"({'-'.join(['1']+[f'f{i+1}' for i in range(n_components-1)])}) * {base_function_name}(x, {', '.join([f'{name}{n_components}' for name in parameter_names])})"
lambda_string = f"lambda {parameter_string}: {' + '.join(previous_sub_function_strings + [last_sub_function_string])}"
return lambda_string
@staticmethod
def get_initial_guess(distribution_type: DistributionType, reference: typing.Iterable[typing.Dict], fractions=None):
parameters = []
n_components = len(reference)
distribution_class = None
for sub_class in BaseDistribution.__subclasses__():
if sub_class.get_type() == distribution_type:
distribution_class = sub_class
if distribution_class is None:
raise ValueError("There is no corresponding sub-class of this distribution type.")
for component_ref in reference:
component_params = distribution_class.get_reference_parameters(**component_ref)
parameters.extend(component_params)
if fractions is None:
for i in range(n_components-1):
parameters.append(1/n_components)
else:
assert len(fractions) == n_components
parameters.extend(fractions[:-1])
return np.array(parameters)
@staticmethod
def get_distribution(distribution_type: DistributionType, n_components: int) -> BaseDistribution:
key = (distribution_type, n_components)
distribution = None
BaseDistribution._CACHE_LOCK.acquire()
if key in BaseDistribution._CACHE.keys():
distribution = BaseDistribution._CACHE[key]
else:
for sub_class in BaseDistribution.__subclasses__():
if sub_class.get_type() == distribution_type:
distribution = sub_class(n_components)
BaseDistribution._CACHE[key] = distribution
BaseDistribution._CACHE_LOCK.release()
if distribution is None:
raise ValueError("There is no corresponding sub-class of this distribution type.")
else:
return distribution
def normal_frequency(classes_φ, loc, scale):
interval = abs((classes_φ[0]-classes_φ[-1]) / (len(classes_φ)-1))
pdf = norm.pdf(classes_φ, loc=loc, scale=scale)
frequency = pdf * interval
return frequency
class NormalDistribution(BaseDistribution):
@staticmethod
def get_name() -> str:
return "Normal"
@staticmethod
def get_type() -> DistributionType:
return DistributionType.Normal
@staticmethod
def get_parameter_names() -> typing.Tuple[str]:
return ("loc", "scale")
@staticmethod
def get_parameter_bounds() -> \
typing.Tuple[
typing.Tuple[
typing.Union[None, int, float],
typing.Union[None, int, float]]]:
return ((None, None),
(BaseDistribution._INFINITESIMAL, None))
@property
def defaults(self) -> typing.Tuple[float]:
defaults = []
for i in range(1, self.n_components+1):
defaults.append(5 + i) # loc
defaults.append(1 + 0.1*i) # scale
for i in range(self.n_components-1):
defaults.append(1.0 / self.n_components) # fraction
return tuple(defaults)
@property
def single_function(self) -> typing.Callable:
return normal_frequency
@property
def mixed_function(self) -> typing.Callable:
lambda_string = self.get_lambda_string("normal_frequency", self.n_components, self.get_parameter_names())
local_params = {"mixed_function": None}
exec(f"mixed_function = {lambda_string}", None, local_params)
return local_params["mixed_function"]
@staticmethod
def get_moments(*args) -> dict:
assert len(args) == len(NormalDistribution.get_parameter_names())
m, v, s, k = norm.stats(*args, moments="mvsk")
std = np.sqrt(v)
moments = dict(mean=m, std=std, skewness=s, kurtosis=k)
return moments
@staticmethod
def get_reference_parameters(mean: float, std: float, skewness: float=0.0) -> np.ndarray:
return np.array([mean, std])
def weibull_frequency(classes_φ, shape, loc, scale):
interval = abs((classes_φ[0]-classes_φ[-1]) / (len(classes_φ)-1))
pdf = weibull_min.pdf(classes_φ, shape, loc=loc, scale=scale)
frequency = pdf * interval
return frequency
class WeibullDistribution(BaseDistribution):
@staticmethod
def get_name() -> str:
return "Weibull"
@staticmethod
def get_type() -> DistributionType:
return DistributionType.Weibull
@staticmethod
def get_parameter_names() -> typing.Tuple[str]:
return ("shape", "loc", "scale")
@staticmethod
def get_parameter_bounds() -> \
typing.Tuple[
typing.Tuple[
typing.Union[None, int, float],
typing.Union[None, int, float]]]:
return ((BaseDistribution._INFINITESIMAL, None),
(None, None),
(BaseDistribution._INFINITESIMAL, None))
@property
def defaults(self) -> typing.Tuple[float]:
defaults = []
for i in range(1, self.n_components+1):
defaults.append(3.60234942) # shape while skewness is 0
defaults.append(5.0 + i)
defaults.append(1.0 + 0.1*i)
for i in range(self.n_components-1):
defaults.append(1.0 / self.n_components)
return tuple(defaults)
@property
def single_function(self) -> typing.Callable:
return weibull_frequency
@property
def mixed_function(self) -> typing.Callable:
lambda_string = self.get_lambda_string("weibull_frequency", self.n_components, self.get_parameter_names())
local_params = {"mixed_function": None}
exec(f"mixed_function = {lambda_string}", None, local_params)
return local_params["mixed_function"]
@staticmethod
def get_moments(*args) -> dict:
assert len(args) == len(WeibullDistribution.get_parameter_names())
m, v, s, k = weibull_min.stats(*args, moments="mvsk")
std = np.sqrt(v)
moments = dict(mean=m, std=std, skewness=s, kurtosis=k)
return moments
@staticmethod
def get_reference_parameters(mean: float, std: float, skewness: float) -> np.ndarray:
shape_skew_0 = 3.60234942
x0 = [shape_skew_0, 0.0, 1.0]
target = {"mean": mean, "std": std, "skewness": skewness}
def closure(args):
current = WeibullDistribution.get_moments(*args)
errors = sum([(current[key]-target[key])**2 for key in target.keys()])
return errors
res = minimize(closure, x0=x0,
bounds=WeibullDistribution.get_parameter_bounds(),
method="SLSQP",
options={"maxiter": 100, "ftol": 1e-6, "disp": False})
return res.x
def skew_normal_frequency(classes_φ, shape, loc, scale):
interval = abs((classes_φ[0]-classes_φ[-1]) / (len(classes_φ)-1))
pdf = skewnorm.pdf(classes_φ, shape, loc=loc, scale=scale)
frequency = pdf * interval
return frequency
class SkewNormalDistribution(BaseDistribution):
@staticmethod
def get_name() -> str:
return "Skew Normal"
@staticmethod
def get_type() -> DistributionType:
return DistributionType.SkewNormal
@staticmethod
def get_parameter_names() -> typing.Tuple[str]:
return ("shape", "loc", "scale")
@staticmethod
def get_parameter_bounds() -> \
typing.Tuple[
typing.Tuple[
typing.Union[None, int, float],
typing.Union[None, int, float]]]:
return ((None, None),
(None, None),
(BaseDistribution._INFINITESIMAL, None))
@property
def defaults(self) -> typing.Tuple[float]:
defaults = []
for i in range(1, self.n_components+1):
defaults.append(0.0) # shape while skewness is 0
defaults.append(5 + i) # loc
defaults.append(1 + 0.1*i) # scale
for i in range(self.n_components-1):
defaults.append(1.0 / self.n_components) # fraction
return tuple(defaults)
@property
def single_function(self) -> typing.Callable:
return skew_normal_frequency
@property
def mixed_function(self) -> typing.Callable:
lambda_string = self.get_lambda_string("skew_normal_frequency", self.n_components, self.get_parameter_names())
local_params = {"mixed_function": None}
exec(f"mixed_function = {lambda_string}", None, local_params)
return local_params["mixed_function"]
@staticmethod
def get_moments(*args) -> dict:
assert len(args) == len(SkewNormalDistribution.get_parameter_names())
m, v, s, k = skewnorm.stats(*args, moments="mvsk")
std = np.sqrt(v)
moments = dict(mean=m, std=std, skewness=s, kurtosis=k)
return moments
@staticmethod
def get_reference_parameters(mean: float, std: float, skewness: float=0.0) -> np.ndarray:
x0 = [np.random.rand()*0.1, mean, std]
target = {"mean": mean, "std": std, "skewness": skewness}
def closure(args):
current = SkewNormalDistribution.get_moments(*args)
errors = sum([(current[key]-target[key])**2 for key in target.keys()])
return errors
res = minimize(closure, x0=x0,
bounds=SkewNormalDistribution.get_parameter_bounds(),
method="SLSQP",
options={"maxiter": 100, "ftol": 1e-6, "disp": False})
return res.x
def log10MSE_distance(values: np.ndarray, targets: np.ndarray) -> float:
return np.log10(np.mean(np.square(values - targets)))
def MSE_distance(values: np.ndarray, targets: np.ndarray) -> float:
return np.mean(np.square(values - targets))
def p_norm(values: np.ndarray, targets: np.ndarray, p=2) -> float:
return np.sum(np.abs(values - targets) ** p) ** (1 / p)
def cosine_distance(values: np.ndarray, targets: np.ndarray) -> float:
if np.all(np.equal(values, 0.0)) or np.all(np.equal(targets, 0.0)):
return 1.0
cosine = np.sum(values * targets) / (np.sqrt(np.sum(np.square(values))) * np.sqrt(np.sum(np.square(targets))))
return abs(cosine)
def angular_distance(values: np.ndarray, targets: np.ndarray) -> float:
cosine = cosine_distance(values, targets)
angular = 2 * np.arccos(cosine) / np.pi
return angular
def get_distance_func_by_name(distance: str):
if distance[-4:] == "norm":
p = int(distance[0])
return lambda x, y: p_norm(x, y, p)
elif distance == "MSE":
return lambda x, y: MSE_distance(x, y)
elif distance == "log10MSE":
return lambda x, y: log10MSE_distance(x, y)
elif distance == "cosine":
return lambda x, y: cosine_distance(x, y)
elif distance == "angular":
return lambda x, y: angular_distance(x, y)
else:
raise NotImplementedError(distance)
| [
"scipy.stats.skewnorm.pdf",
"numpy.sum",
"numpy.abs",
"scipy.stats.weibull_min.stats",
"numpy.square",
"scipy.stats.weibull_min.pdf",
"scipy.stats.norm.pdf",
"numpy.equal",
"threading.Lock",
"scipy.stats.skewnorm.stats",
"numpy.array",
"scipy.stats.norm.stats",
"numpy.random.rand",
"numpy.... | [((755, 761), 'threading.Lock', 'Lock', ([], {}), '()\n', (759, 761), False, 'from threading import Lock\n'), ((7108, 7149), 'scipy.stats.norm.pdf', 'norm.pdf', (['classes_φ'], {'loc': 'loc', 'scale': 'scale'}), '(classes_φ, loc=loc, scale=scale)\n', (7116, 7149), False, 'from scipy.stats import norm, skewnorm, weibull_min\n'), ((9184, 9239), 'scipy.stats.weibull_min.pdf', 'weibull_min.pdf', (['classes_φ', 'shape'], {'loc': 'loc', 'scale': 'scale'}), '(classes_φ, shape, loc=loc, scale=scale)\n', (9199, 9239), False, 'from scipy.stats import norm, skewnorm, weibull_min\n'), ((11951, 12003), 'scipy.stats.skewnorm.pdf', 'skewnorm.pdf', (['classes_φ', 'shape'], {'loc': 'loc', 'scale': 'scale'}), '(classes_φ, shape, loc=loc, scale=scale)\n', (11963, 12003), False, 'from scipy.stats import norm, skewnorm, weibull_min\n'), ((6126, 6146), 'numpy.array', 'np.array', (['parameters'], {}), '(parameters)\n', (6134, 6146), True, 'import numpy as np\n'), ((8754, 8787), 'scipy.stats.norm.stats', 'norm.stats', (['*args'], {'moments': '"""mvsk"""'}), "(*args, moments='mvsk')\n", (8764, 8787), False, 'from scipy.stats import norm, skewnorm, weibull_min\n'), ((8802, 8812), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (8809, 8812), True, 'import numpy as np\n'), ((9028, 9049), 'numpy.array', 'np.array', (['[mean, std]'], {}), '([mean, std])\n', (9036, 9049), True, 'import numpy as np\n'), ((10963, 11003), 'scipy.stats.weibull_min.stats', 'weibull_min.stats', (['*args'], {'moments': '"""mvsk"""'}), "(*args, moments='mvsk')\n", (10980, 11003), False, 'from scipy.stats import norm, skewnorm, weibull_min\n'), ((11018, 11028), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (11025, 11028), True, 'import numpy as np\n'), ((13735, 13772), 'scipy.stats.skewnorm.stats', 'skewnorm.stats', (['*args'], {'moments': '"""mvsk"""'}), "(*args, moments='mvsk')\n", (13749, 13772), False, 'from scipy.stats import norm, skewnorm, weibull_min\n'), ((13787, 13797), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (13794, 13797), True, 'import numpy as np\n'), ((14788, 14815), 'numpy.square', 'np.square', (['(values - targets)'], {}), '(values - targets)\n', (14797, 14815), True, 'import numpy as np\n'), ((15121, 15145), 'numpy.sum', 'np.sum', (['(values * targets)'], {}), '(values * targets)\n', (15127, 15145), True, 'import numpy as np\n'), ((14670, 14697), 'numpy.square', 'np.square', (['(values - targets)'], {}), '(values - targets)\n', (14679, 14697), True, 'import numpy as np\n'), ((15031, 15052), 'numpy.equal', 'np.equal', (['values', '(0.0)'], {}), '(values, 0.0)\n', (15039, 15052), True, 'import numpy as np\n'), ((15064, 15086), 'numpy.equal', 'np.equal', (['targets', '(0.0)'], {}), '(targets, 0.0)\n', (15072, 15086), True, 'import numpy as np\n'), ((15383, 15400), 'numpy.arccos', 'np.arccos', (['cosine'], {}), '(cosine)\n', (15392, 15400), True, 'import numpy as np\n'), ((14012, 14028), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (14026, 14028), True, 'import numpy as np\n'), ((14903, 14927), 'numpy.abs', 'np.abs', (['(values - targets)'], {}), '(values - targets)\n', (14909, 14927), True, 'import numpy as np\n'), ((15164, 15181), 'numpy.square', 'np.square', (['values'], {}), '(values)\n', (15173, 15181), True, 'import numpy as np\n'), ((15201, 15219), 'numpy.square', 'np.square', (['targets'], {}), '(targets)\n', (15210, 15219), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(0)
import time
import matplotlib.pyplot as plt
from gpflow.likelihoods import Gaussian
from gpflow.kernels import RBF, White
from gpflow.mean_functions import Constant
from gpflow.models.sgpr import SGPR, GPRFITC
from gpflow.models.svgp import SVGP
from gpflow.models.gpr import GPR
from gpflow.training import AdamOptimizer, ScipyOptimizer, NatGradOptimizer
from gpflow.actions import Action, Loop
from scipy.cluster.vq import kmeans2
from scipy.stats import norm
from scipy.special import logsumexp
from doubly_stochastic_dgp.dgp import DGP
from datasets import Datasets
datasets = Datasets(data_path='/data/')
data = datasets.all_datasets['kin8nm'].get_data()
X, Y, Xs, Ys, Y_std = [data[_] for _ in ['X', 'Y', 'Xs', 'Ys', 'Y_std']]
print('N: {}, D: {}, Ns: {}'.format(X.shape[0], X.shape[1], Xs.shape[0]))
def make_single_layer_models(X, Y, Z):
D = X.shape[1]
m_sgpr = SGPR(X, Y, RBF(D), Z.copy())
m_svgp = SVGP(X, Y, RBF(D), Gaussian(), Z.copy())
m_fitc = GPRFITC(X, Y, RBF(D), Z.copy())
for m in m_sgpr, m_svgp, m_fitc:
m.likelihood.variance = 0.01
return [m_sgpr, m_svgp, m_fitc], ['{} {}'.format(n, len(Z)) for n in ['SGPR', 'SVGP', 'FITC']]
Z_100 = kmeans2(X, 100, minit='points')[0]
models_single_layer, names_single_layer = make_single_layer_models(X, Y, Z_100)
# ## DGP models
#
# We'll include a DGP with a single layer here for comparision. We've used a largish minibatch size of $\text{min}(1000, N)$, but it works fine for smaller batches too
#
# In the paper we used 1 sample. Here we'll go up to 5 in celebration of the new implementation (which is much more efficient)
# In[5]:
def make_dgp_models(X, Y, Z):
models, names = [], []
for L in range(1, 4):
D = X.shape[1]
# the layer shapes are defined by the kernel dims, so here all hidden layers are D dimensional
kernels = []
for l in range(L):
kernels.append(RBF(D))
# between layer noise (doesn't actually make much difference but we include it anyway)
for kernel in kernels[:-1]:
kernel += White(D, variance=1e-5)
mb = 1000 if X.shape[0] > 1000 else None
model = DGP(X, Y, Z, kernels, Gaussian(), num_samples=5, minibatch_size=mb)
# start the inner layers almost deterministically
for layer in model.layers[:-1]:
layer.q_sqrt = layer.q_sqrt.value * 1e-5
models.append(model)
names.append('DGP{} {}'.format(L, len(Z)))
return models, names
models_dgp, names_dgp = make_dgp_models(X, Y, Z_100)
# ## Prediction
#
# We'll calculate test rmse and likelihood in batches (so the larger datasets don't cause memory problems)
#
# For the DGP models we need to take an average over the samples for the rmse. The `predict_density` function already does this internally
#
# In[6]:
def batch_assess(model, assess_model, X, Y):
n_batches = max(int(X.shape[0]/1000.), 1)
lik, sq_diff = [], []
for X_batch, Y_batch in zip(np.array_split(X, n_batches), np.array_split(Y, n_batches)):
l, sq = assess_model(model, X_batch, Y_batch)
lik.append(l)
sq_diff.append(sq)
lik = np.concatenate(lik, 0)
sq_diff = np.array(np.concatenate(sq_diff, 0), dtype=float)
return np.average(lik), np.average(sq_diff)**0.5
def assess_single_layer(model, X_batch, Y_batch):
m, v = model.predict_y(X_batch)
lik = np.sum(norm.logpdf(Y_batch*Y_std, loc=m*Y_std, scale=Y_std*v**0.5), 1)
sq_diff = Y_std**2*((m - Y_batch)**2)
return lik, sq_diff
S = 100
def assess_sampled(model, X_batch, Y_batch):
m, v = model.predict_y(X_batch, S)
S_lik = np.sum(norm.logpdf(Y_batch*Y_std, loc=m*Y_std, scale=Y_std*v**0.5), 2)
lik = logsumexp(S_lik, 0, b=1/float(S))
mean = np.average(m, 0)
sq_diff = Y_std**2*((mean - Y_batch)**2)
return lik, sq_diff
# ## Training
#
# We'll optimize single layer models and using LFBGS and the dgp models with Adam. It will be interesting to compare the result of `m_svgp` compared to `m_dgp1`: if there is a difference it will be down to the optimizer.
#
# We'll show here also the reuslt of using a small and large number of iterations.
# In[7]:
iterations_few = 100
iterations_many = 5000
s = '{:<16} lik: {:.4f}, rmse: {:.4f}'
# In[8]:
for iterations in [iterations_few, iterations_many]:
print('after {} iterations'.format(iterations))
for m, name in zip(models_single_layer, names_single_layer):
ScipyOptimizer().minimize(m, maxiter=iterations)
lik, rmse = batch_assess(m, assess_single_layer, Xs, Ys)
print(s.format(name, lik, rmse))
# Now for the DGP models. First we use Adam for all parameters (as in the Doubly Stochastic VI for DGPs paper)
# In[9]:
for iterations in [iterations_few, iterations_many]:
print('after {} iterations'.format(iterations))
for m, name in zip(models_dgp, names_dgp):
AdamOptimizer(0.01).minimize(m, maxiter=iterations)
lik, rmse = batch_assess(m, assess_sampled, Xs, Ys)
print(s.format(name, lik, rmse))
# We can also use natural gradients for the final layer, which can help considerably.
# In[10]:
for iterations in [iterations_few, iterations_many]:
print('after {} iterations'.format(iterations))
for m, name in zip(models_dgp, names_dgp):
ng_vars = [[m.layers[-1].q_mu, m.layers[-1].q_sqrt]]
for v in ng_vars[0]:
v.set_trainable(False)
ng_action = NatGradOptimizer(gamma=0.1).make_optimize_action(m, var_list=ng_vars)
adam_action = AdamOptimizer(0.01).make_optimize_action(m)
Loop([ng_action, adam_action], stop=iterations)()
lik, rmse = batch_assess(m, assess_sampled, Xs, Ys)
print(s.format(name, lik, rmse))
# Note that even after 100 iterations we get a good result, which is not the case using ordinary gradients.
| [
"gpflow.likelihoods.Gaussian",
"numpy.average",
"gpflow.kernels.White",
"scipy.stats.norm.logpdf",
"tensorflow.logging.set_verbosity",
"gpflow.actions.Loop",
"gpflow.training.AdamOptimizer",
"scipy.cluster.vq.kmeans2",
"gpflow.kernels.RBF",
"gpflow.training.NatGradOptimizer",
"numpy.array_split"... | [((43, 70), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['(0)'], {}), '(0)\n', (67, 70), True, 'import tensorflow as tf\n'), ((657, 685), 'datasets.Datasets', 'Datasets', ([], {'data_path': '"""/data/"""'}), "(data_path='/data/')\n", (665, 685), False, 'from datasets import Datasets\n'), ((1268, 1299), 'scipy.cluster.vq.kmeans2', 'kmeans2', (['X', '(100)'], {'minit': '"""points"""'}), "(X, 100, minit='points')\n", (1275, 1299), False, 'from scipy.cluster.vq import kmeans2\n'), ((3248, 3270), 'numpy.concatenate', 'np.concatenate', (['lik', '(0)'], {}), '(lik, 0)\n', (3262, 3270), True, 'import numpy as np\n'), ((3860, 3876), 'numpy.average', 'np.average', (['m', '(0)'], {}), '(m, 0)\n', (3870, 3876), True, 'import numpy as np\n'), ((969, 975), 'gpflow.kernels.RBF', 'RBF', (['D'], {}), '(D)\n', (972, 975), False, 'from gpflow.kernels import RBF, White\n'), ((1011, 1017), 'gpflow.kernels.RBF', 'RBF', (['D'], {}), '(D)\n', (1014, 1017), False, 'from gpflow.kernels import RBF, White\n'), ((1019, 1029), 'gpflow.likelihoods.Gaussian', 'Gaussian', ([], {}), '()\n', (1027, 1029), False, 'from gpflow.likelihoods import Gaussian\n'), ((1068, 1074), 'gpflow.kernels.RBF', 'RBF', (['D'], {}), '(D)\n', (1071, 1074), False, 'from gpflow.kernels import RBF, White\n'), ((3074, 3102), 'numpy.array_split', 'np.array_split', (['X', 'n_batches'], {}), '(X, n_batches)\n', (3088, 3102), True, 'import numpy as np\n'), ((3104, 3132), 'numpy.array_split', 'np.array_split', (['Y', 'n_batches'], {}), '(Y, n_batches)\n', (3118, 3132), True, 'import numpy as np\n'), ((3294, 3320), 'numpy.concatenate', 'np.concatenate', (['sq_diff', '(0)'], {}), '(sq_diff, 0)\n', (3308, 3320), True, 'import numpy as np\n'), ((3346, 3361), 'numpy.average', 'np.average', (['lik'], {}), '(lik)\n', (3356, 3361), True, 'import numpy as np\n'), ((3492, 3559), 'scipy.stats.norm.logpdf', 'norm.logpdf', (['(Y_batch * Y_std)'], {'loc': '(m * Y_std)', 'scale': '(Y_std * v ** 0.5)'}), '(Y_batch * Y_std, loc=m * Y_std, scale=Y_std * v ** 0.5)\n', (3503, 3559), False, 'from scipy.stats import norm\n'), ((3736, 3803), 'scipy.stats.norm.logpdf', 'norm.logpdf', (['(Y_batch * Y_std)'], {'loc': '(m * Y_std)', 'scale': '(Y_std * v ** 0.5)'}), '(Y_batch * Y_std, loc=m * Y_std, scale=Y_std * v ** 0.5)\n', (3747, 3803), False, 'from scipy.stats import norm\n'), ((2162, 2186), 'gpflow.kernels.White', 'White', (['D'], {'variance': '(1e-05)'}), '(D, variance=1e-05)\n', (2167, 2186), False, 'from gpflow.kernels import RBF, White\n'), ((2276, 2286), 'gpflow.likelihoods.Gaussian', 'Gaussian', ([], {}), '()\n', (2284, 2286), False, 'from gpflow.likelihoods import Gaussian\n'), ((3363, 3382), 'numpy.average', 'np.average', (['sq_diff'], {}), '(sq_diff)\n', (3373, 3382), True, 'import numpy as np\n'), ((5702, 5749), 'gpflow.actions.Loop', 'Loop', (['[ng_action, adam_action]'], {'stop': 'iterations'}), '([ng_action, adam_action], stop=iterations)\n', (5706, 5749), False, 'from gpflow.actions import Action, Loop\n'), ((2000, 2006), 'gpflow.kernels.RBF', 'RBF', (['D'], {}), '(D)\n', (2003, 2006), False, 'from gpflow.kernels import RBF, White\n'), ((4561, 4577), 'gpflow.training.ScipyOptimizer', 'ScipyOptimizer', ([], {}), '()\n', (4575, 4577), False, 'from gpflow.training import AdamOptimizer, ScipyOptimizer, NatGradOptimizer\n'), ((5001, 5020), 'gpflow.training.AdamOptimizer', 'AdamOptimizer', (['(0.01)'], {}), '(0.01)\n', (5014, 5020), False, 'from gpflow.training import AdamOptimizer, ScipyOptimizer, NatGradOptimizer\n'), ((5557, 5584), 'gpflow.training.NatGradOptimizer', 'NatGradOptimizer', ([], {'gamma': '(0.1)'}), '(gamma=0.1)\n', (5573, 5584), False, 'from gpflow.training import AdamOptimizer, ScipyOptimizer, NatGradOptimizer\n'), ((5649, 5668), 'gpflow.training.AdamOptimizer', 'AdamOptimizer', (['(0.01)'], {}), '(0.01)\n', (5662, 5668), False, 'from gpflow.training import AdamOptimizer, ScipyOptimizer, NatGradOptimizer\n')] |
"""Generate plot of the mean vowel sample spectra
"""
import torch
import wavetorch
from torch.utils.data import TensorDataset, DataLoader
import argparse
import yaml
import time
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.gridspec import GridSpec
import pandas as pd
import librosa
try:
from helpers.plot import mpl_set_latex
mpl_set_latex()
except ImportError:
import warnings
warnings.warn('The helpers package is unavailable', ImportWarning)
n_fft = 2048
sr = 10000
vowels = ['ae', 'ei', 'iy']
colors = ['#fcaf3e', '#ad7fa8', '#ef2929']
# vowels = ['ae', 'eh', 'ih', 'oo', 'ah', 'ei', 'iy', 'uh', 'aw', 'er', 'oa', 'uw']
gender = 'both'
fig,ax=plt.subplots(1,1,constrained_layout=True, figsize=(3.5,2.75))
for i, vowel in enumerate(vowels):
X, _, _ = wavetorch.data.load_all_vowels([vowel], gender=gender, sr=sr)
X_ft = [np.abs(librosa.core.stft(Xi.numpy(),n_fft=n_fft)) for Xi in X]
X_ft_int = np.vstack([Xi.sum(axis=1) for Xi in X_ft])
X_ft_mean = np.mean(X_ft_int,axis=0)
X_ft_std = np.std(X_ft_int,axis=0)
ax.fill_between(librosa.core.fft_frequencies(sr=sr, n_fft=n_fft),
X_ft_mean,
alpha=0.30, color=colors[i], edgecolor="none", zorder=i ,lw=0)
ax.plot(librosa.core.fft_frequencies(sr=sr, n_fft=n_fft),
X_ft_mean,
color=colors[i],zorder=i, label=vowel + ' vowel class', lw=1.0)
# ax.plot(librosa.core.fft_frequencies(sr=sr, n_fft=n_fft),
# X_ft_std, '-',
# label=vowel + ' vowel class', color=colors[i], lw=1, zorder=i)
# ax.set_xlim([0,5000])
# ax.set_ylim([0,13])
ax.set_xlabel("Frequency (Hz)")
ax.set_ylabel("Mean energy spectrum (a.u.)")
ax.legend()
plt.show(block=False)
| [
"matplotlib.pyplot.show",
"numpy.std",
"librosa.core.fft_frequencies",
"wavetorch.data.load_all_vowels",
"helpers.plot.mpl_set_latex",
"numpy.mean",
"warnings.warn",
"matplotlib.pyplot.subplots"
] | [((732, 796), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'constrained_layout': '(True)', 'figsize': '(3.5, 2.75)'}), '(1, 1, constrained_layout=True, figsize=(3.5, 2.75))\n', (744, 796), True, 'import matplotlib.pyplot as plt\n'), ((1811, 1832), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (1819, 1832), True, 'import matplotlib.pyplot as plt\n'), ((399, 414), 'helpers.plot.mpl_set_latex', 'mpl_set_latex', ([], {}), '()\n', (412, 414), False, 'from helpers.plot import mpl_set_latex\n'), ((844, 905), 'wavetorch.data.load_all_vowels', 'wavetorch.data.load_all_vowels', (['[vowel]'], {'gender': 'gender', 'sr': 'sr'}), '([vowel], gender=gender, sr=sr)\n', (874, 905), False, 'import wavetorch\n'), ((1057, 1082), 'numpy.mean', 'np.mean', (['X_ft_int'], {'axis': '(0)'}), '(X_ft_int, axis=0)\n', (1064, 1082), True, 'import numpy as np\n'), ((1097, 1121), 'numpy.std', 'np.std', (['X_ft_int'], {'axis': '(0)'}), '(X_ft_int, axis=0)\n', (1103, 1121), True, 'import numpy as np\n'), ((459, 525), 'warnings.warn', 'warnings.warn', (['"""The helpers package is unavailable"""', 'ImportWarning'], {}), "('The helpers package is unavailable', ImportWarning)\n", (472, 525), False, 'import warnings\n'), ((1142, 1190), 'librosa.core.fft_frequencies', 'librosa.core.fft_frequencies', ([], {'sr': 'sr', 'n_fft': 'n_fft'}), '(sr=sr, n_fft=n_fft)\n', (1170, 1190), False, 'import librosa\n'), ((1320, 1368), 'librosa.core.fft_frequencies', 'librosa.core.fft_frequencies', ([], {'sr': 'sr', 'n_fft': 'n_fft'}), '(sr=sr, n_fft=n_fft)\n', (1348, 1368), False, 'import librosa\n')] |
# --------------------------------------------------------
# Relation Networks for Object Detection
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, <NAME>
# --------------------------------------------------------
"""
Nms Multi-thresh Target Operator selects foreground and background roi,
and assigns label, bbox_transform to them.
"""
import mxnet as mx
import numpy as np
from bbox.bbox_transform import bbox_overlaps
class NmsMultiTargetOp(mx.operator.CustomOp):
def __init__(self, target_thresh):
super(NmsMultiTargetOp, self).__init__()
self._target_thresh = target_thresh
self._num_thresh = len(target_thresh)
def forward(self, is_train, req, in_data, out_data, aux):
# bbox, [first_n, num_fg_classes, 4]
bbox = in_data[0].asnumpy()
num_boxes = bbox.shape[0]
num_fg_classes = bbox.shape[1]
gt_box = in_data[1].asnumpy()
# score, [first_n, num_fg_classes]
score = in_data[2].asnumpy()
batch_image, num_gt, code_size = gt_box.shape
assert batch_image == 1, 'only support batch_image=1, but receive %d' % num_gt
assert code_size == 5, 'code_size of gt should be 5, but receive %d' % code_size
assert len(score.shape) == 2, 'shape of score is %d instead of 2.' % len(score.shape)
assert score.shape[1] == num_fg_classes, 'number of fg classes should be same for boxes and scores'
output_list = []
for cls_idx in range(0, num_fg_classes):
valid_gt_mask = (gt_box[0, :, -1].astype(np.int32) == (cls_idx + 1))
valid_gt_box = gt_box[0, valid_gt_mask, :]
num_valid_gt = len(valid_gt_box)
if num_valid_gt == 0:
output = np.zeros(shape=(num_boxes, self._num_thresh), dtype=np.float32)
output_list.append(output)
else:
bbox_per_class = bbox[:, cls_idx, :]
score_per_class = score[:, cls_idx:cls_idx + 1]
overlap_mat = bbox_overlaps(bbox_per_class.astype(np.float),
valid_gt_box[:, :-1].astype(np.float))
eye_matrix = np.eye(num_valid_gt)
output_list_per_class = []
for thresh in self._target_thresh:
# following mAP metric
overlap_mask = (overlap_mat > thresh)
valid_bbox_indices = np.where(overlap_mask)[0]
# require score be 2-dim
overlap_score = np.tile(score_per_class, (1, num_valid_gt))
overlap_score *= overlap_mask
max_overlap_indices = np.argmax(overlap_mat, axis=1)
max_overlap_mask = eye_matrix[max_overlap_indices]
overlap_score *= max_overlap_mask
max_score_indices = np.argmax(overlap_score, axis=0)
output = np.zeros((num_boxes,))
output[np.intersect1d(max_score_indices, valid_bbox_indices)] = 1
output_list_per_class.append(output)
output_per_class = np.stack(output_list_per_class, axis=-1)
output_list.append(output_per_class)
blob = np.stack(output_list, axis=1).astype(np.float32, copy=False)
self.assign(out_data[0], req[0], blob)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 0)
self.assign(in_grad[1], req[1], 0)
self.assign(in_grad[2], req[2], 0)
@mx.operator.register("nms_multi_target")
class NmsMultiTargetProp(mx.operator.CustomOpProp):
def __init__(self, target_thresh):
super(NmsMultiTargetProp, self).__init__(need_top_grad=False)
self._target_thresh = np.fromstring(target_thresh[1:-1], dtype=float, sep=' ')
self._num_thresh = len(self._target_thresh)
def list_arguments(self):
return ['bbox', 'gt_bbox', 'score']
def list_outputs(self):
return ['nms_multi_target']
def infer_shape(self, in_shape):
bbox_shape = in_shape[0]
# gt_box_shape = in_shape[1]
score_shape = in_shape[2]
assert bbox_shape[0] == score_shape[0], 'ROI number should be same for bbox and score'
num_boxes = bbox_shape[0]
num_fg_classes = bbox_shape[1]
output_shape = (num_boxes, num_fg_classes, self._num_thresh)
return in_shape, [output_shape]
def create_operator(self, ctx, shapes, dtypes):
return NmsMultiTargetOp(self._target_thresh)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
| [
"numpy.stack",
"numpy.argmax",
"numpy.zeros",
"mxnet.operator.register",
"numpy.where",
"numpy.tile",
"numpy.eye",
"numpy.intersect1d",
"numpy.fromstring"
] | [((3614, 3654), 'mxnet.operator.register', 'mx.operator.register', (['"""nms_multi_target"""'], {}), "('nms_multi_target')\n", (3634, 3654), True, 'import mxnet as mx\n'), ((3846, 3902), 'numpy.fromstring', 'np.fromstring', (['target_thresh[1:-1]'], {'dtype': 'float', 'sep': '""" """'}), "(target_thresh[1:-1], dtype=float, sep=' ')\n", (3859, 3902), True, 'import numpy as np\n'), ((1800, 1863), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_boxes, self._num_thresh)', 'dtype': 'np.float32'}), '(shape=(num_boxes, self._num_thresh), dtype=np.float32)\n', (1808, 1863), True, 'import numpy as np\n'), ((2232, 2252), 'numpy.eye', 'np.eye', (['num_valid_gt'], {}), '(num_valid_gt)\n', (2238, 2252), True, 'import numpy as np\n'), ((3192, 3232), 'numpy.stack', 'np.stack', (['output_list_per_class'], {'axis': '(-1)'}), '(output_list_per_class, axis=-1)\n', (3200, 3232), True, 'import numpy as np\n'), ((3301, 3330), 'numpy.stack', 'np.stack', (['output_list'], {'axis': '(1)'}), '(output_list, axis=1)\n', (3309, 3330), True, 'import numpy as np\n'), ((2597, 2640), 'numpy.tile', 'np.tile', (['score_per_class', '(1, num_valid_gt)'], {}), '(score_per_class, (1, num_valid_gt))\n', (2604, 2640), True, 'import numpy as np\n'), ((2733, 2763), 'numpy.argmax', 'np.argmax', (['overlap_mat'], {'axis': '(1)'}), '(overlap_mat, axis=1)\n', (2742, 2763), True, 'import numpy as np\n'), ((2929, 2961), 'numpy.argmax', 'np.argmax', (['overlap_score'], {'axis': '(0)'}), '(overlap_score, axis=0)\n', (2938, 2961), True, 'import numpy as np\n'), ((2991, 3013), 'numpy.zeros', 'np.zeros', (['(num_boxes,)'], {}), '((num_boxes,))\n', (2999, 3013), True, 'import numpy as np\n'), ((2490, 2512), 'numpy.where', 'np.where', (['overlap_mask'], {}), '(overlap_mask)\n', (2498, 2512), True, 'import numpy as np\n'), ((3041, 3094), 'numpy.intersect1d', 'np.intersect1d', (['max_score_indices', 'valid_bbox_indices'], {}), '(max_score_indices, valid_bbox_indices)\n', (3055, 3094), True, 'import numpy as np\n')] |
"""
Geometric methods to compute volumes, areas, distances of the mesh entities
"""
# Geoemtric Module
# Created by <NAME>, <NAME> and <NAME>.
import numpy as np
from pymoab import topo_util, types, rng
def normal_vec_2d(coords0, coords1):
vec = coords1 - coords0
vec_norm = np.linalg.norm(vec, axis = 1)
return np.array([vec[:,1], -vec[:,0], vec[:,2] ]).T / vec_norm[:,np.newaxis]
def normal_vec(coords1, coords2, coords3):
vec1 = coords1 - coords3
vec2 = coords2 - coords3
cross_product = np.cross(vec1, vec2)
norm_cross = np.power(np.linalg.norm(cross_product,axis=1),-1)
cross_product[:, 0] = norm_cross * cross_product[:, 0]
cross_product[:, 1] = norm_cross * cross_product[:, 1]
cross_product[:, 2] = norm_cross * cross_product[:, 2]
return cross_product
def point_distance(coords_1, coords_2):
dist_vector = coords_1 - coords_2
distance = np.sqrt(np.dot(dist_vector, dist_vector))
return distance
def get_average(coords_list):
N = len(coords_list)
return sum(coords_list)*(1/N)
def cross_product_3d(u, v):
w = np.zeros(3)
w[0] = u[1]*v[2] - u[2]*v[1]
w[1] = u[2]*v[0] - u[0]*v[2]
w[2] = u[0]*v[1] - u[1]*v[0]
return w
def triangle_area(v1, v2, v3):
w = cross_product_3d(v1 - v2, v1 - v3)
area = 0.5*np.linalg.norm(w)
return area
def polygon_area(moab_core, polygon):
"""
Calculate the area of a polygon by triangulation.
"""
# Retrieve vertices handles and coordinates from face handle.
vertices = moab_core.get_adjacencies(polygon, 0)
vert_coords = moab_core.get_coords(vertices).reshape(len(vertices), 3)
vertices_dict = dict(zip(vertices, vert_coords))
# If the polygon is a triangle, then just compute the area by
# definition.
if moab_core.type_from_handle(polygon) == types.MBTRI or vertices.size() == 3:
return triangle_area(vert_coords[0], vert_coords[1], vert_coords[2])
# Else, compute a triangulation for this shape.
mtu = topo_util.MeshTopoUtil(moab_core)
# Choose a vertex to start and compute its neighbors, a.k.a, the vertices
# sharing an edge with it.
v0 = vertices[0]
v0_neighbors = rng.intersect(vertices, mtu.get_bridge_adjacencies(v0, 1, 0))
v0_coords = vertices_dict[v0]
# vi is the vertice currently being visited, and vj is its neighbor not
# visited yet. At each iteration, we compute the area of the triangle (v0, vi, vj).
vi, vj = v0_neighbors[0], None
# At each iteration, we store the vertices that already took part in
# a triangle to avoid recalculating the same triangle.
visited_verts = rng.Range([v0, vi])
# While there still vertices to be visited, compute a new triangle.
area = 0
while visited_verts.size() < vertices.size():
vi_neighbors = rng.intersect(vertices, mtu.get_bridge_adjacencies(vi, 1, 0))
vj = rng.subtract(vi_neighbors, visited_verts)[0]
vi_coords, vj_coords = vertices_dict[vi], vertices_dict[vj]
area += triangle_area(v0_coords, vi_coords, vj_coords)
visited_verts.insert(vj)
vi = vj
return area
def pyramid_volume(moab_core, face, v):
"""
Compute the volume of a pyramid.
moab_core: a PyMOAB core instance.
face: a PyMOAB EntityHandle representing the base of the pyramid.
v: a NumPy array representing the coordinates of the top vertex.
"""
# Get three vertices from the base.
vertices_handles = moab_core.get_connectivity(face)[0:3]
p1, p2, p3 = moab_core.get_coords(vertices_handles).reshape((3,3))
# Compute the area of the base.
A = polygon_area(moab_core, face)
# Compute the distance from v to the base plane.
u = cross_product_3d(p2 - p1, p3 - p1)
n = u / np.linalg.norm(u)
h = np.abs(n.dot(v - p1))
return (A*h) / 3
def polyhedron_volume(moab_core, polyhedron, center):
"""
Computes the volume of a convex polyhedron.
moab_core: a PyMOAB core instance.
face: a PyMOAB EntityHandle representing the polyhedron.
center: a NumPy array containing the coordinates of the
centroid.
"""
faces = moab_core.get_adjacencies(polyhedron, 2)
vertices = moab_core.get_adjacencies(polyhedron, 0)
# If the polyhedron is a pyramid or a tetrahedron, then compute
# its volume straight ahead.
if moab_core.type_from_handle(polyhedron) == types.MBTET:
base = faces[0]
base_vertices = moab_core.get_adjacencies(base, 0)
top_vertex = rng.subtract(vertices, base_vertices)[0]
top_vertex_coords = moab_core.get_coords(top_vertex)
volume = pyramid_volume(moab_core, base, top_vertex_coords)
elif moab_core.type_from_handle(polyhedron) == types.MBPYRAMID:
base = [face for face in faces if moab_core.type_from_handle(face) != types.MBTRI][0]
base_vertices = moab_core.get_adjacencies(base, 0)
top_vertex = rng.subtract(vertices, base_vertices)[0]
top_vertex_coords = moab_core.get_coords(top_vertex)
volume = pyramid_volume(moab_core, base, top_vertex_coords)
# Otherwise, compute the volume by splitting the polyhedron
# into pyramids, each face acting like the base and the centroid
# acting like the top vertex.
else:
volume = sum([pyramid_volume(moab_core, face, center) for face in faces])
return volume
| [
"pymoab.topo_util.MeshTopoUtil",
"pymoab.rng.Range",
"numpy.zeros",
"numpy.cross",
"pymoab.rng.subtract",
"numpy.linalg.norm",
"numpy.array",
"numpy.dot"
] | [((285, 312), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {'axis': '(1)'}), '(vec, axis=1)\n', (299, 312), True, 'import numpy as np\n'), ((518, 538), 'numpy.cross', 'np.cross', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (526, 538), True, 'import numpy as np\n'), ((1091, 1102), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1099, 1102), True, 'import numpy as np\n'), ((2007, 2040), 'pymoab.topo_util.MeshTopoUtil', 'topo_util.MeshTopoUtil', (['moab_core'], {}), '(moab_core)\n', (2029, 2040), False, 'from pymoab import topo_util, types, rng\n'), ((2640, 2659), 'pymoab.rng.Range', 'rng.Range', (['[v0, vi]'], {}), '([v0, vi])\n', (2649, 2659), False, 'from pymoab import topo_util, types, rng\n'), ((565, 602), 'numpy.linalg.norm', 'np.linalg.norm', (['cross_product'], {'axis': '(1)'}), '(cross_product, axis=1)\n', (579, 602), True, 'import numpy as np\n'), ((910, 942), 'numpy.dot', 'np.dot', (['dist_vector', 'dist_vector'], {}), '(dist_vector, dist_vector)\n', (916, 942), True, 'import numpy as np\n'), ((1305, 1322), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {}), '(w)\n', (1319, 1322), True, 'import numpy as np\n'), ((3773, 3790), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (3787, 3790), True, 'import numpy as np\n'), ((326, 370), 'numpy.array', 'np.array', (['[vec[:, 1], -vec[:, 0], vec[:, 2]]'], {}), '([vec[:, 1], -vec[:, 0], vec[:, 2]])\n', (334, 370), True, 'import numpy as np\n'), ((2894, 2935), 'pymoab.rng.subtract', 'rng.subtract', (['vi_neighbors', 'visited_verts'], {}), '(vi_neighbors, visited_verts)\n', (2906, 2935), False, 'from pymoab import topo_util, types, rng\n'), ((4522, 4559), 'pymoab.rng.subtract', 'rng.subtract', (['vertices', 'base_vertices'], {}), '(vertices, base_vertices)\n', (4534, 4559), False, 'from pymoab import topo_util, types, rng\n'), ((4934, 4971), 'pymoab.rng.subtract', 'rng.subtract', (['vertices', 'base_vertices'], {}), '(vertices, base_vertices)\n', (4946, 4971), False, 'from pymoab import topo_util, types, rng\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 20 12:22:58 2020
This code computes average maps from individual synaptic maps
AMPLITUDE way : inputs are amplitude (pA) based maps
@author: ludov
"""
#----------------------------Check this info-----------------------------------
#General directory to find the data
dataDir = 'C:/Users/klab/Documents/SpaethBahugunaData'
#Where to save datas/figures
outputDir = 'C:/Users/klab/Desktop/testOutput'
#-----------------------------------------------------------------------------------------------------
#--------------------------------------the code-------------------------------------------------------
import numpy as np
from numpy import genfromtxt as gen
from matplotlib import pyplot as plt
import os
import pandas as pd
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
inputDir = '{}//ProcessedData/Development_Dataset'.format(dataDir)
#The groups to analyse
conditions = ['P9P10','P12P13','P14P18','P30P40']
colors = ['lightskyblue','skyblue','deepskyblue','royalblue']
#The sheetnames in excel file
sheets=['P9P10','P12P13','P14P18','P30P40']
#To constrict 1D plots
ylim = 20
binForMedian = 20 #In % of P1- : 10 is the last value used
left,right = 210,210 #Borders in %P1-
zscoreLimit =2.0 #Limit of significance for zscore
vmin, vmax = -150,-10 #For maps plot to align every conditions
minz, maxz = 3.09,20
#Interpolation ?
interpolationType = 'sinc'
#Do we save anything ?
saveFig = True #For figures
saveData = True
#------------------------------------FUNCTIONS---------------------------------------------------------
#---------------------------------DO NOT MODIFY--------------------------------------------------------
def MAD(a,axis=None):
'''
Computes median absolute deviation of an array along given axis
'''
#Median along given axis but keep reduced axis so that result can still broadcast along a
med = np.nanmedian(a, axis=axis, keepdims=True)
mad = np.median(np.abs(a-med),axis=axis) #MAD along the given axis
return mad
def stack_lines(_list):
stacked_map = np.vstack((_list[0],
_list[1],
_list[2],
_list[3]))
return stacked_map
#Iterate for each condition
for condition,sheet,i in zip(conditions,sheets,range(len(conditions))):
parentDir = '{}/{}'.format(inputDir,condition)
print(parentDir)
listOfExperiments = os.listdir(parentDir)
#Matrix to append maps and positions
H = 4 #Map height in sites
L = 32 #Map width in sites
N = len(listOfExperiments)
_mat = np.zeros((H,L,N,3)) #[0] for map, [1] for position and [2] for Zscore
#Iterate on each experiment
for experiment,idx in zip(listOfExperiments,range(N)):
print (experiment)
manipPath = '{}/{}'.format(parentDir,experiment)
#Load map in Matrix
_mat[:,:,idx,0]=gen('{}/{}_Amp_2D_OK.csv'.format(manipPath,experiment),delimiter=',')
#Get the positions
pos = gen('{}/{}_Positions_cp_centered_OK.csv'.format(manipPath,experiment),delimiter=',')
pos_2D = (pos,pos,pos,pos)
_mat[:,:,idx,1]=np.reshape(pos_2D,(H,L))
#And now the 2D Zscore
_mat[:,:,idx,2]=gen('{}/{}_Amp_zscore_2D_OK.csv'.format(manipPath,experiment),delimiter=',')
#Get 1D Zscore
zscore_1D_for_plot = np.nanmax(_mat[:,:,idx,2], axis=0)
#FOR 2D ANALYSIS--------------------------------------------------------------------------------------------
fig, ax = plt.subplots(H,1, figsize=(7,9),sharex=True,sharey=True)
plt.suptitle('{} 2D maps line by line'.format(condition))
_MEDIAN_ZSCORE_2D, _AVERAGE_AMP_2D, _COUNT_2D, _POSITIONS_2D, _SUM_2D = [], [],[],[],[]
for j in range(H):
for y in range(N):
#Create basis for concatenation at first loop
if y == 0 :
POSITIONS_2D = _mat[j,:,y,1]
ZSCORES_2D = _mat[j,:,y,2]
AMPS_2D = _mat[j,:,y,0]
#Concatenate patterns for next loops
else :
POSITIONS_2D = np.concatenate((POSITIONS_2D,_mat[j,:,y,1]),axis=0)
ZSCORES_2D = np.concatenate((ZSCORES_2D,_mat[j,:,y,2]),axis=0)
AMPS_2D = np.concatenate((AMPS_2D,_mat[j,:,y,0]), axis=0)
#SORT AMPLS AND ZSCORE ACCORDING TO POSITIONS
SORTED_2D_AMPS = [x for _, x in sorted(zip(POSITIONS_2D,AMPS_2D))]
SORTED_2D_ZSCORES = [x for _, x in sorted(zip(POSITIONS_2D,ZSCORES_2D))]
ax[j].plot(sorted(POSITIONS_2D),SORTED_2D_ZSCORES,color=colors[i])
ax[j].plot(sorted(POSITIONS_2D),np.ones(len(sorted(POSITIONS_2D)))*zscoreLimit,linestyle='--')
label_line = j+1
ax[j].set_ylabel('Zscore line {}'.format(label_line))
if j == H-1:
ax[j].set_xlabel('Distance (P1- norm)')
#BINNING FOR MEDIAN CALCUL
step = binForMedian #In % of P1- : 10 is the last used
binning = np.arange(-left,right+step,step)
_MEDS, _MADS, _POS, _COUNTS, _AMPS, _SUM = [],[],[],[],[],[]
for y in range(len(binning)):
if y == len(binning)-1:
break
start, stop = binning[y],binning[y+1]
_meds, _mads, _pos, _count, _amps, _sum = [],[],[],[],[],[]
#print ('Bin %s to %s'%(start, stop))
SORTED_POSITIONS = sorted(POSITIONS_2D)
for j in range(len(SORTED_POSITIONS)):
if start < SORTED_POSITIONS[j] <= stop:
if np.isnan(SORTED_2D_ZSCORES[j])==False:
_meds.append(SORTED_2D_ZSCORES[j])
_pos.append(SORTED_POSITIONS[j])
_amps.append(SORTED_2D_AMPS[j])
_sum.append(SORTED_2D_AMPS[j])
_MEDS.append(np.nanmedian(_meds))
_COUNTS.append(np.count_nonzero(_meds))
_POS.append(np.nanmedian(_pos))
_MADS.append(MAD(_meds, axis=0))
_AMPS.append(np.nanmean(_amps,axis=0))
_SUM.append(np.nansum(_sum,axis=0))
_MEDIAN_ZSCORE_2D.append(np.asarray(_MEDS))
_AVERAGE_AMP_2D.append(np.asarray(_AMPS))
_COUNT_2D.append(np.asarray(_COUNTS))
_POSITIONS_2D.append(np.asarray(_POS))
_SUM_2D.append(np.asarray(_SUM))
if saveFig == True:
plt.savefig('{}/{}_2D_raw_sorting.pdf'.format(outputDir,condition))
plt.savefig('{}/{}_2D_raw_sorting.png'.format(outputDir,condition))
fig, ax = plt.subplots(2,1,figsize=(14,5))
plt.suptitle('{} 2D maps'.format(condition))
ax[0].set_title('Median Zscore')
median_zscore_2d = ax[0].imshow(stack_lines(_MEDIAN_ZSCORE_2D),interpolation=interpolationType, cmap='magma',vmin=minz,vmax=maxz,aspect='auto')
fig.colorbar(median_zscore_2d, ax=ax[0])
ax[1].set_title('Average Amplitude')
ax[1].set_xticks(np.arange(0,len(_POSITIONS_2D[0]),1))
ax[1].set_xticklabels(_POSITIONS_2D[0].astype(int),rotation=-90)
mean_amplitude_2d = ax[1].imshow(stack_lines(_AVERAGE_AMP_2D),interpolation=interpolationType, cmap= 'magma_r',vmax=vmax,vmin=vmin,aspect='auto')
fig.colorbar(mean_amplitude_2d,ax=ax[1])
if saveFig == True:
plt.savefig('{}/{}_2D_MedianZscore_and_AvgAmplitude.pdf'.format(outputDir,condition))
plt.savefig('{}/{}_2D_MedianZscore_and_AvgAmplitude.png'.format(outputDir,condition))
if saveData == True:
np.savetxt('{}/{}_2D_MedianZscore.csv'.format(outputDir,condition),
stack_lines(_MEDIAN_ZSCORE_2D),delimiter=',')
np.savetxt('{}/{}_2D_AvgAmplitude.csv'.format(outputDir,condition),
stack_lines(_AVERAGE_AMP_2D),delimiter=',')
np.savetxt('{}/{}_POSITIONAL_ARRAY.csv'.format(outputDir,condition),
binning,delimiter=',')
| [
"numpy.nansum",
"numpy.abs",
"numpy.concatenate",
"numpy.nanmedian",
"numpy.count_nonzero",
"numpy.nanmax",
"numpy.asarray",
"numpy.zeros",
"numpy.isnan",
"numpy.arange",
"numpy.reshape",
"matplotlib.pyplot.subplots",
"os.listdir",
"numpy.vstack",
"numpy.nanmean"
] | [((2002, 2043), 'numpy.nanmedian', 'np.nanmedian', (['a'], {'axis': 'axis', 'keepdims': '(True)'}), '(a, axis=axis, keepdims=True)\n', (2014, 2043), True, 'import numpy as np\n'), ((2191, 2242), 'numpy.vstack', 'np.vstack', (['(_list[0], _list[1], _list[2], _list[3])'], {}), '((_list[0], _list[1], _list[2], _list[3]))\n', (2200, 2242), True, 'import numpy as np\n'), ((2578, 2599), 'os.listdir', 'os.listdir', (['parentDir'], {}), '(parentDir)\n', (2588, 2599), False, 'import os\n'), ((2787, 2809), 'numpy.zeros', 'np.zeros', (['(H, L, N, 3)'], {}), '((H, L, N, 3))\n', (2795, 2809), True, 'import numpy as np\n'), ((3835, 3895), 'matplotlib.pyplot.subplots', 'plt.subplots', (['H', '(1)'], {'figsize': '(7, 9)', 'sharex': '(True)', 'sharey': '(True)'}), '(H, 1, figsize=(7, 9), sharex=True, sharey=True)\n', (3847, 3895), True, 'from matplotlib import pyplot as plt\n'), ((7089, 7124), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(14, 5)'}), '(2, 1, figsize=(14, 5))\n', (7101, 7124), True, 'from matplotlib import pyplot as plt\n'), ((2065, 2080), 'numpy.abs', 'np.abs', (['(a - med)'], {}), '(a - med)\n', (2071, 2080), True, 'import numpy as np\n'), ((3405, 3431), 'numpy.reshape', 'np.reshape', (['pos_2D', '(H, L)'], {}), '(pos_2D, (H, L))\n', (3415, 3431), True, 'import numpy as np\n'), ((3634, 3671), 'numpy.nanmax', 'np.nanmax', (['_mat[:, :, idx, 2]'], {'axis': '(0)'}), '(_mat[:, :, idx, 2], axis=0)\n', (3643, 3671), True, 'import numpy as np\n'), ((5391, 5427), 'numpy.arange', 'np.arange', (['(-left)', '(right + step)', 'step'], {}), '(-left, right + step, step)\n', (5400, 5427), True, 'import numpy as np\n'), ((6672, 6689), 'numpy.asarray', 'np.asarray', (['_MEDS'], {}), '(_MEDS)\n', (6682, 6689), True, 'import numpy as np\n'), ((6723, 6740), 'numpy.asarray', 'np.asarray', (['_AMPS'], {}), '(_AMPS)\n', (6733, 6740), True, 'import numpy as np\n'), ((6768, 6787), 'numpy.asarray', 'np.asarray', (['_COUNTS'], {}), '(_COUNTS)\n', (6778, 6787), True, 'import numpy as np\n'), ((6819, 6835), 'numpy.asarray', 'np.asarray', (['_POS'], {}), '(_POS)\n', (6829, 6835), True, 'import numpy as np\n'), ((6861, 6877), 'numpy.asarray', 'np.asarray', (['_SUM'], {}), '(_SUM)\n', (6871, 6877), True, 'import numpy as np\n'), ((4445, 4501), 'numpy.concatenate', 'np.concatenate', (['(POSITIONS_2D, _mat[j, :, y, 1])'], {'axis': '(0)'}), '((POSITIONS_2D, _mat[j, :, y, 1]), axis=0)\n', (4459, 4501), True, 'import numpy as np\n'), ((4527, 4581), 'numpy.concatenate', 'np.concatenate', (['(ZSCORES_2D, _mat[j, :, y, 2])'], {'axis': '(0)'}), '((ZSCORES_2D, _mat[j, :, y, 2]), axis=0)\n', (4541, 4581), True, 'import numpy as np\n'), ((4604, 4655), 'numpy.concatenate', 'np.concatenate', (['(AMPS_2D, _mat[j, :, y, 0])'], {'axis': '(0)'}), '((AMPS_2D, _mat[j, :, y, 0]), axis=0)\n', (4618, 4655), True, 'import numpy as np\n'), ((6362, 6381), 'numpy.nanmedian', 'np.nanmedian', (['_meds'], {}), '(_meds)\n', (6374, 6381), True, 'import numpy as np\n'), ((6411, 6434), 'numpy.count_nonzero', 'np.count_nonzero', (['_meds'], {}), '(_meds)\n', (6427, 6434), True, 'import numpy as np\n'), ((6461, 6479), 'numpy.nanmedian', 'np.nanmedian', (['_pos'], {}), '(_pos)\n', (6473, 6479), True, 'import numpy as np\n'), ((6553, 6578), 'numpy.nanmean', 'np.nanmean', (['_amps'], {'axis': '(0)'}), '(_amps, axis=0)\n', (6563, 6578), True, 'import numpy as np\n'), ((6604, 6627), 'numpy.nansum', 'np.nansum', (['_sum'], {'axis': '(0)'}), '(_sum, axis=0)\n', (6613, 6627), True, 'import numpy as np\n'), ((6044, 6074), 'numpy.isnan', 'np.isnan', (['SORTED_2D_ZSCORES[j]'], {}), '(SORTED_2D_ZSCORES[j])\n', (6052, 6074), True, 'import numpy as np\n')] |
from causal_world.intervention_actors.base_actor import \
BaseInterventionActorPolicy
import numpy as np
class JointsInterventionActorPolicy(BaseInterventionActorPolicy):
def __init__(self, **kwargs):
"""
This class indicates the joint intervention actor which intervenes on
the joints of the robot in a random fashion.
:param kwargs:
"""
super(JointsInterventionActorPolicy, self).__init__()
self.task_intervention_space = None
self._inverse_kinemetics_func = None
self._stage_bb = None
def initialize(self, env):
"""
This functions allows the intervention actor to query things from the env, such
as intervention spaces or to have access to sampling funcs for goals..etc
:param env: (causal_world.env.CausalWorld) the environment used for the
intervention actor to query
different methods from it.
:return:
"""
self.task_intervention_space = env.get_variable_space_used()
self._inverse_kinemetics_func = env.get_robot().inverse_kinematics
self._stage_bb = env.get_stage().get_stage_bb()
return
def _act(self, variables_dict):
"""
:param variables_dict:
:return:
"""
interventions_dict = dict()
desired_tip_positions = np.random.uniform(self._stage_bb[0],
self._stage_bb[1],
size=[3, 3]).flatten()
interventions_dict['joint_positions'] = \
self._inverse_kinemetics_func(desired_tip_positions,
rest_pose=np.zeros(9,).tolist())
return interventions_dict
def get_params(self):
"""
returns parameters that could be used in recreating this intervention
actor.
:return: (dict) specifying paramters to create this intervention actor
again.
"""
return {'joints_actor': dict()}
| [
"numpy.random.uniform",
"numpy.zeros"
] | [((1447, 1515), 'numpy.random.uniform', 'np.random.uniform', (['self._stage_bb[0]', 'self._stage_bb[1]'], {'size': '[3, 3]'}), '(self._stage_bb[0], self._stage_bb[1], size=[3, 3])\n', (1464, 1515), True, 'import numpy as np\n'), ((1794, 1805), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (1802, 1805), True, 'import numpy as np\n')] |
#!/usr/bin/env python
u"""
convert_harmonics.py
Written by <NAME> (12/2021)
Converts a file from the spatial domain into the spherical harmonic domain
CALLING SEQUENCE:
python convert_harmonics.py -F 2 --lmax 60 -U 1 infile outfile
COMMAND LINE OPTIONS:
--help: list the command line options
-l X, --lmax X: maximum spherical harmonic degree
-m X, --mmax X: maximum spherical harmonic order
-n X, --love X: Load Love numbers dataset
0: Han and Wahr (1995) values from PREM
1: Gegout (2005) values from PREM
2: Wang et al. (2012) values from PREM
--reference X: Reference frame for load love numbers
CF: Center of Surface Figure (default)
CM: Center of Mass of Earth System
CE: Center of Mass of Solid Earth
-U X, --units X: input units
1: cm of water thickness (cmwe)
2: Gigatonnes (Gt)
3: mm of water thickness kg/m^2
-S X, --spacing X: spatial resolution of input data (dlon,dlat)
-I X, --interval X: input grid interval
1: (0:360, 90:-90)
2: (degree spacing/2)
-f X, --fill-value X: set fill_value for input spatial fields
--header X: number of header rows to skip in input ascii files
-F X, --format X: input and output data format
ascii
netCDF4
HDF5
-V, --verbose: verbose output of processing run
-M X, --mode X: Permissions mode of the files created
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
(https://unidata.github.io/netcdf4-python/netCDF4/index.html)
h5py: Pythonic interface to the HDF5 binary data format.
http://www.h5py.org/
future: Compatibility layer between Python 2 and Python 3
http://python-future.org/
PROGRAM DEPENDENCIES:
read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995)
gen_stokes.py: converts a spatial field into a series of spherical harmonics
plm_holmes.py: Computes fully normalized associated Legendre polynomials
harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO
destripe_harmonics.py: calculates the decorrelation (destriping) filter
and filters the GRACE/GRACE-FO coefficients for striping errors
ncdf_read_stokes.py: reads spherical harmonic netcdf files
ncdf_stokes.py: writes output spherical harmonic data to netcdf
hdf5_read_stokes.py: reads spherical harmonic HDF5 files
hdf5_stokes.py: writes output spherical harmonic data to HDF5
spatial.py: spatial data class for reading, writing and processing data
ncdf_read.py: reads input spatial data from netCDF4 files
hdf5_read.py: reads input spatial data from HDF5 files
ncdf_write.py: writes output spatial data to netCDF4
hdf5_write.py: writes output spatial data to HDF5
units.py: class for converting GRACE/GRACE-FO Level-2 data to specific units
utilities.py: download and management utilities for files
UPDATE HISTORY:
Updated 12/2021: can use variable loglevels for verbose output
Updated 10/2021: using python logging for handling verbose output
Updated 09/2021: fix to use fill values for input ascii files
use functions for converting to and from GRACE months
Updated 08/2021: fix spherical harmonic orders if not set
Updated 06/2021: can use input files to define command line arguments
Updated 05/2021: define int/float precision to prevent deprecation warning
Updated 01/2021: harmonics object output from gen_stokes.py
Updated 12/2020: added more love number options
Updated 10/2020: use argparse to set command line parameters
Updated 08/2020: use utilities to define path to load love numbers file
Updated 04/2020: updates to reading load love numbers
Written 10/2019
"""
from __future__ import print_function
import sys
import os
import re
import logging
import argparse
import traceback
import numpy as np
import gravity_toolkit.utilities as utilities
from gravity_toolkit.read_love_numbers import read_love_numbers
from gravity_toolkit.gen_stokes import gen_stokes
from gravity_toolkit.plm_holmes import plm_holmes
from gravity_toolkit.harmonics import harmonics
from gravity_toolkit.spatial import spatial
from gravity_toolkit.time import calendar_to_grace
#-- PURPOSE: keep track of threads
def info(args):
logging.info(os.path.basename(sys.argv[0]))
logging.info(args)
logging.info('module name: {0}'.format(__name__))
if hasattr(os, 'getppid'):
logging.info('parent process: {0:d}'.format(os.getppid()))
logging.info('process id: {0:d}'.format(os.getpid()))
#-- PURPOSE: read load love numbers for the range of spherical harmonic degrees
def load_love_numbers(LMAX, LOVE_NUMBERS=0, REFERENCE='CF'):
"""
Reads PREM load Love numbers for the range of spherical harmonic degrees
and applies isomorphic parameters
Arguments
---------
LMAX: maximum spherical harmonic degree
Keyword arguments
-----------------
LOVE_NUMBERS: Load Love numbers dataset
0: Han and Wahr (1995) values from PREM
1: Gegout (2005) values from PREM
2: Wang et al. (2012) values from PREM
REFERENCE: Reference frame for calculating degree 1 love numbers
CF: Center of Surface Figure (default)
CM: Center of Mass of Earth System
CE: Center of Mass of Solid Earth
Returns
-------
kl: Love number of Gravitational Potential
hl: Love number of Vertical Displacement
ll: Love number of Horizontal Displacement
"""
#-- load love numbers file
if (LOVE_NUMBERS == 0):
#-- PREM outputs from Han and Wahr (1995)
#-- https://doi.org/10.1111/j.1365-246X.1995.tb01819.x
love_numbers_file = utilities.get_data_path(
['data','love_numbers'])
header = 2
columns = ['l','hl','kl','ll']
elif (LOVE_NUMBERS == 1):
#-- PREM outputs from Gegout (2005)
#-- http://gemini.gsfc.nasa.gov/aplo/
love_numbers_file = utilities.get_data_path(
['data','Load_Love2_CE.dat'])
header = 3
columns = ['l','hl','ll','kl']
elif (LOVE_NUMBERS == 2):
#-- PREM outputs from Wang et al. (2012)
#-- https://doi.org/10.1016/j.cageo.2012.06.022
love_numbers_file = utilities.get_data_path(
['data','PREM-LLNs-truncated.dat'])
header = 1
columns = ['l','hl','ll','kl','nl','nk']
#-- LMAX of load love numbers from Han and Wahr (1995) is 696.
#-- from Wahr (2007) linearly interpolating kl works
#-- however, as we are linearly extrapolating out, do not make
#-- LMAX too much larger than 696
#-- read arrays of kl, hl, and ll Love Numbers
hl,kl,ll = read_love_numbers(love_numbers_file, LMAX=LMAX, HEADER=header,
COLUMNS=columns, REFERENCE=REFERENCE, FORMAT='tuple')
#-- return a tuple of load love numbers
return (hl,kl,ll)
#-- PURPOSE: converts from the spatial domain into the spherical harmonic domain
def convert_harmonics(INPUT_FILE, OUTPUT_FILE,
LMAX=None,
MMAX=None,
UNITS=None,
LOVE_NUMBERS=0,
REFERENCE=None,
DDEG=None,
INTERVAL=None,
FILL_VALUE=None,
HEADER=None,
DATAFORM=None,
MODE=0o775):
#-- verify that output directory exists
DIRECTORY = os.path.abspath(os.path.dirname(OUTPUT_FILE))
if not os.access(DIRECTORY, os.F_OK):
os.makedirs(DIRECTORY,MODE,exist_ok=True)
#-- Grid spacing
dlon,dlat = (DDEG,DDEG) if (np.ndim(DDEG) == 0) else (DDEG[0],DDEG[1])
#-- Grid dimensions
if (INTERVAL == 1):#-- (0:360, 90:-90)
nlon = np.int64((360.0/dlon)+1.0)
nlat = np.int64((180.0/dlat)+1.0)
elif (INTERVAL == 2):#-- degree spacing/2
nlon = np.int64((360.0/dlon))
nlat = np.int64((180.0/dlat))
#-- read spatial file in data format
#-- expand dimensions
if (DATAFORM == 'ascii'):
#-- ascii (.txt)
input_spatial = spatial(spacing=[dlon,dlat],nlat=nlat,
nlon=nlon,fill_value=FILL_VALUE).from_ascii(INPUT_FILE,
header=HEADER).expand_dims()
elif (DATAFORM == 'netCDF4'):
#-- netcdf (.nc)
input_spatial = spatial().from_netCDF4(INPUT_FILE).expand_dims()
elif (DATAFORM == 'HDF5'):
#-- HDF5 (.H5)
input_spatial = spatial().from_HDF5(INPUT_FILE).expand_dims()
#-- convert missing values to zero
input_spatial.replace_invalid(0.0)
#-- input data shape
nlat,nlon,nt = input_spatial.shape
#-- read arrays of kl, hl, and ll Love Numbers
LOVE = load_love_numbers(LMAX, LOVE_NUMBERS=LOVE_NUMBERS,
REFERENCE=REFERENCE)
#-- upper bound of spherical harmonic orders (default = LMAX)
if MMAX is None:
MMAX = np.copy(LMAX)
#-- calculate associated Legendre polynomials
th = (90.0 - input_spatial.lat)*np.pi/180.0
PLM,dPLM = plm_holmes(LMAX,np.cos(th))
#-- create list of harmonics objects
Ylms_list = []
for i,t in enumerate(input_spatial.time):
#-- convert spatial field to spherical harmonics
output_Ylms = gen_stokes(input_spatial.data[:,:,i].T,
input_spatial.lon, input_spatial.lat, UNITS=UNITS,
LMIN=0, LMAX=LMAX, MMAX=MMAX, PLM=PLM, LOVE=LOVE)
output_Ylms.time = np.copy(t)
output_Ylms.month = calendar_to_grace(t)
#-- append to list
Ylms_list.append(output_Ylms)
#-- convert Ylms list for output spherical harmonics
Ylms = harmonics().from_list(Ylms_list)
Ylms_list = None
#-- outputting data to file
if (DATAFORM == 'ascii'):
#-- ascii (.txt)
Ylms.to_ascii(OUTPUT_FILE)
elif (DATAFORM == 'netCDF4'):
#-- netCDF4 (.nc)
Ylms.to_netCDF4(OUTPUT_FILE)
elif (DATAFORM == 'HDF5'):
#-- HDF5 (.H5)
Ylms.to_HDF5(OUTPUT_FILE)
#-- change output permissions level to MODE
os.chmod(OUTPUT_FILE,MODE)
#-- This is the main part of the program that calls the individual modules
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Converts a file from the spatial domain into the
spherical harmonic domain
""",
fromfile_prefix_chars="@"
)
parser.convert_arg_line_to_args = utilities.convert_arg_line_to_args
#-- command line parameters
#-- input and output file
parser.add_argument('infile',
type=lambda p: os.path.abspath(os.path.expanduser(p)), nargs='?',
help='Input spatial file')
parser.add_argument('outfile',
type=lambda p: os.path.abspath(os.path.expanduser(p)), nargs='?',
help='Output harmonic file')
#-- maximum spherical harmonic degree and order
parser.add_argument('--lmax','-l',
type=int, default=60,
help='Maximum spherical harmonic degree')
parser.add_argument('--mmax','-m',
type=int, default=None,
help='Maximum spherical harmonic order')
#-- different treatments of the load Love numbers
#-- 0: Han and Wahr (1995) values from PREM
#-- 1: Gegout (2005) values from PREM
#-- 2: Wang et al. (2012) values from PREM
parser.add_argument('--love','-n',
type=int, default=0, choices=[0,1,2],
help='Treatment of the Load Love numbers')
#-- option for setting reference frame for gravitational load love number
#-- reference frame options (CF, CM, CE)
parser.add_argument('--reference',
type=str.upper, default='CF', choices=['CF','CM','CE'],
help='Reference frame for load Love numbers')
#-- output units
parser.add_argument('--units','-U',
type=int, default=1, choices=[1,2,3],
help='Output units')
#-- output grid parameters
parser.add_argument('--spacing','-S',
type=float, nargs='+', default=[0.5,0.5], metavar=('dlon','dlat'),
help='Spatial resolution of output data')
parser.add_argument('--interval','-I',
type=int, default=2, choices=[1,2],
help='Input grid interval (1: global, 2: centered global)')
#-- fill value for ascii
parser.add_argument('--fill-value','-f',
type=float,
help='Set fill_value for input spatial fields')
#-- ascii parameters
parser.add_argument('--header',
type=int,
help='Number of header rows to skip in input ascii files')
#-- input and output data format (ascii, netCDF4, HDF5)
parser.add_argument('--format','-F',
type=str, default='netCDF4', choices=['ascii','netCDF4','HDF5'],
help='Input and output data format')
#-- print information about each input and output file
parser.add_argument('--verbose','-V',
action='count', default=0,
help='Verbose output of run')
#-- permissions mode of the output files (octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='permissions mode of output files')
args,_ = parser.parse_known_args()
#-- create logger
loglevels = [logging.CRITICAL,logging.INFO,logging.DEBUG]
logging.basicConfig(level=loglevels[args.verbose])
#-- run program with parameters
try:
info(args)
convert_harmonics(args.infile, args.outfile,
LMAX=args.lmax,
MMAX=args.mmax,
LOVE_NUMBERS=args.love,
REFERENCE=args.reference,
UNITS=args.units,
DDEG=args.spacing,
INTERVAL=args.interval,
FILL_VALUE=args.fill_value,
HEADER=args.header,
DATAFORM=args.format,
MODE=args.mode)
except Exception as e:
#-- if there has been an error exception
#-- print the type, value, and stack trace of the
#-- current exception being handled
logging.critical('process id {0:d} failed'.format(os.getpid()))
logging.error(traceback.format_exc())
#-- run main program
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"gravity_toolkit.gen_stokes.gen_stokes",
"gravity_toolkit.utilities.get_data_path",
"os.getppid",
"gravity_toolkit.harmonics.harmonics",
"numpy.copy",
"gravity_toolkit.read_love_numbers.read_love_numbers",
"os.path.dirname",
"numpy.ndim",
"gravity_toolkit.time.calendar_t... | [((4527, 4545), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (4539, 4545), False, 'import logging\n'), ((6885, 7006), 'gravity_toolkit.read_love_numbers.read_love_numbers', 'read_love_numbers', (['love_numbers_file'], {'LMAX': 'LMAX', 'HEADER': 'header', 'COLUMNS': 'columns', 'REFERENCE': 'REFERENCE', 'FORMAT': '"""tuple"""'}), "(love_numbers_file, LMAX=LMAX, HEADER=header, COLUMNS=\n columns, REFERENCE=REFERENCE, FORMAT='tuple')\n", (6902, 7006), False, 'from gravity_toolkit.read_love_numbers import read_love_numbers\n'), ((10048, 10075), 'os.chmod', 'os.chmod', (['OUTPUT_FILE', 'MODE'], {}), '(OUTPUT_FILE, MODE)\n', (10056, 10075), False, 'import os\n'), ((10235, 10414), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Converts a file from the spatial domain into the\n spherical harmonic domain\n """', 'fromfile_prefix_chars': '"""@"""'}), '(description=\n """Converts a file from the spatial domain into the\n spherical harmonic domain\n """\n , fromfile_prefix_chars=\'@\')\n', (10258, 10414), False, 'import argparse\n'), ((13249, 13299), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'loglevels[args.verbose]'}), '(level=loglevels[args.verbose])\n', (13268, 13299), False, 'import logging\n'), ((4492, 4521), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (4508, 4521), False, 'import os\n'), ((5893, 5942), 'gravity_toolkit.utilities.get_data_path', 'utilities.get_data_path', (["['data', 'love_numbers']"], {}), "(['data', 'love_numbers'])\n", (5916, 5942), True, 'import gravity_toolkit.utilities as utilities\n'), ((7476, 7504), 'os.path.dirname', 'os.path.dirname', (['OUTPUT_FILE'], {}), '(OUTPUT_FILE)\n', (7491, 7504), False, 'import os\n'), ((7517, 7546), 'os.access', 'os.access', (['DIRECTORY', 'os.F_OK'], {}), '(DIRECTORY, os.F_OK)\n', (7526, 7546), False, 'import os\n'), ((7556, 7599), 'os.makedirs', 'os.makedirs', (['DIRECTORY', 'MODE'], {'exist_ok': '(True)'}), '(DIRECTORY, MODE, exist_ok=True)\n', (7567, 7599), False, 'import os\n'), ((7777, 7805), 'numpy.int64', 'np.int64', (['(360.0 / dlon + 1.0)'], {}), '(360.0 / dlon + 1.0)\n', (7785, 7805), True, 'import numpy as np\n'), ((7819, 7847), 'numpy.int64', 'np.int64', (['(180.0 / dlat + 1.0)'], {}), '(180.0 / dlat + 1.0)\n', (7827, 7847), True, 'import numpy as np\n'), ((8907, 8920), 'numpy.copy', 'np.copy', (['LMAX'], {}), '(LMAX)\n', (8914, 8920), True, 'import numpy as np\n'), ((9051, 9061), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (9057, 9061), True, 'import numpy as np\n'), ((9249, 9396), 'gravity_toolkit.gen_stokes.gen_stokes', 'gen_stokes', (['input_spatial.data[:, :, i].T', 'input_spatial.lon', 'input_spatial.lat'], {'UNITS': 'UNITS', 'LMIN': '(0)', 'LMAX': 'LMAX', 'MMAX': 'MMAX', 'PLM': 'PLM', 'LOVE': 'LOVE'}), '(input_spatial.data[:, :, i].T, input_spatial.lon, input_spatial.\n lat, UNITS=UNITS, LMIN=0, LMAX=LMAX, MMAX=MMAX, PLM=PLM, LOVE=LOVE)\n', (9259, 9396), False, 'from gravity_toolkit.gen_stokes import gen_stokes\n'), ((9441, 9451), 'numpy.copy', 'np.copy', (['t'], {}), '(t)\n', (9448, 9451), True, 'import numpy as np\n'), ((9480, 9500), 'gravity_toolkit.time.calendar_to_grace', 'calendar_to_grace', (['t'], {}), '(t)\n', (9497, 9500), False, 'from gravity_toolkit.time import calendar_to_grace\n'), ((4742, 4753), 'os.getpid', 'os.getpid', ([], {}), '()\n', (4751, 4753), False, 'import os\n'), ((6161, 6215), 'gravity_toolkit.utilities.get_data_path', 'utilities.get_data_path', (["['data', 'Load_Love2_CE.dat']"], {}), "(['data', 'Load_Love2_CE.dat'])\n", (6184, 6215), True, 'import gravity_toolkit.utilities as utilities\n'), ((7652, 7665), 'numpy.ndim', 'np.ndim', (['DDEG'], {}), '(DDEG)\n', (7659, 7665), True, 'import numpy as np\n'), ((7907, 7929), 'numpy.int64', 'np.int64', (['(360.0 / dlon)'], {}), '(360.0 / dlon)\n', (7915, 7929), True, 'import numpy as np\n'), ((7945, 7967), 'numpy.int64', 'np.int64', (['(180.0 / dlat)'], {}), '(180.0 / dlat)\n', (7953, 7967), True, 'import numpy as np\n'), ((9634, 9645), 'gravity_toolkit.harmonics.harmonics', 'harmonics', ([], {}), '()\n', (9643, 9645), False, 'from gravity_toolkit.harmonics import harmonics\n'), ((4683, 4695), 'os.getppid', 'os.getppid', ([], {}), '()\n', (4693, 4695), False, 'import os\n'), ((6449, 6509), 'gravity_toolkit.utilities.get_data_path', 'utilities.get_data_path', (["['data', 'PREM-LLNs-truncated.dat']"], {}), "(['data', 'PREM-LLNs-truncated.dat'])\n", (6472, 6509), True, 'import gravity_toolkit.utilities as utilities\n'), ((14051, 14073), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (14071, 14073), False, 'import traceback\n'), ((10635, 10656), 'os.path.expanduser', 'os.path.expanduser', (['p'], {}), '(p)\n', (10653, 10656), False, 'import os\n'), ((10779, 10800), 'os.path.expanduser', 'os.path.expanduser', (['p'], {}), '(p)\n', (10797, 10800), False, 'import os\n'), ((14015, 14026), 'os.getpid', 'os.getpid', ([], {}), '()\n', (14024, 14026), False, 'import os\n'), ((8115, 8189), 'gravity_toolkit.spatial.spatial', 'spatial', ([], {'spacing': '[dlon, dlat]', 'nlat': 'nlat', 'nlon': 'nlon', 'fill_value': 'FILL_VALUE'}), '(spacing=[dlon, dlat], nlat=nlat, nlon=nlon, fill_value=FILL_VALUE)\n', (8122, 8189), False, 'from gravity_toolkit.spatial import spatial\n'), ((8346, 8355), 'gravity_toolkit.spatial.spatial', 'spatial', ([], {}), '()\n', (8353, 8355), False, 'from gravity_toolkit.spatial import spatial\n'), ((8473, 8482), 'gravity_toolkit.spatial.spatial', 'spatial', ([], {}), '()\n', (8480, 8482), False, 'from gravity_toolkit.spatial import spatial\n')] |
#!/usr/bin/env python3
import argparse, os, glob, math
import subprocess
import numpy as np
os.system('export GMX_MAXBACKUP=-1')
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("-gmx", "--gmx_path", type=str, default="gmx")
args = parser.parse_args()
return args
box_size_initial=35 #35 # in nm
box_size_step=2 # nm
droplet_thickness_initial=0.6 #0.6
droplet_shell_precision=2 # percentage as integer
crowder_total=13
biomolecular_fraction=0.30
ionic_strength=0.150 # M
make_droplets = False
make_box = False
make_topology = True
path_mdp='mdp'
path_output='soup'
mdp_min='min.mdp'
# number of gro files for each crowder
crowders = {}
for c in range(1,crowder_total+1):
crowders[c] = {}
crowders[1]['copies'] = 6
crowders[2]['copies'] = 7
crowders[3]['copies'] = 2
crowders[4]['copies'] = 1
crowders[5]['copies'] = 3
crowders[6]['copies'] = 1
crowders[7]['copies'] = 1
crowders[8]['copies'] = 1
crowders[9]['copies'] = 5
crowders[10]['copies'] = 1436
crowders[11]['copies'] = 144
crowders[12]['copies'] = 225
crowders[13]['copies'] = 255
def grp_list( gro ):
# Creates a temporary index file to check what type of molecule is the crowder
# print( 'q\n | gmx make_ndx -f crowder_n%s/npt.1.gro -o temporary.ndx >/dev/null 2>/dev/null ' %(crowder))
# with open('q.txt', 'w') as outfile:
# outfile.write("q\n")
os.system('echo "q""\\n" | %s make_ndx -f %s -o temporary.ndx >/dev/null 2>/dev/null' % (gmx_path, gro) )
with open('temporary.ndx', 'r') as infile:
lines = infile.readlines()
grps = [x.split()[1] for x in lines if x[0]=='[']
return grps
os.remove('temporary.ndx')
def crowder_type( group_list ):
if 'Protein' in group_list:
return 'Protein', 'protein'
elif 'RNA' in group_list:
return 'RNA', 'nucleic acid'
elif 'Other' in group_list:
return 'Other', 'metabolite'
else:
return 'Unknown', 'unknown'
def crowder_mass( crowder_id ):
with open('crowder_n%d/crowder_n%d.itp' % (crowder_id, crowder_id), 'r') as infile:
mass = np.array([float(x.split()[-4]) for x in infile.readlines() if (('qtot' in x.split()) and (x.split()[0]!=';'))]).sum()
return mass
def count_waters( gro_file ):
waters = 0
with open(gro_file, 'r') as infile:
for line in infile.readlines():
try:
words = line.split()
if (words[0][-3:]=='SOL') and (words[1][:2]=='OW'):
waters += 1
except:
pass
return waters
if __name__=='__main__':
args = parseArguments()
gmx_path = args.gmx_path
print('Gathering data about each crowder')
for crowder_id in range(1, crowder_total+1):
# Creates a temporary index file to check what type of molecule is the crowder
grps = grp_list('crowder_n'+str(crowder_id)+'/npt.1.gro')
crowders[crowder_id]['type'], crowders[crowder_id]['class'] = crowder_type(grps)
crowders[crowder_id]['mass'] = crowder_mass(crowder_id)
print('\tCrowder %d is a %s with mass = %.2f Da' % (crowder_id, crowders[crowder_id]['class'], crowders[crowder_id]['mass']) )
biomolecular_mass=0
for crowder_id in crowders:
biomolecular_mass += crowders[crowder_id]['copies'] * crowders[crowder_id]['mass']
print('')
print('\tTotal biomolecular mass = %.2f Da' % (biomolecular_mass))
print('\tCalculating the number of waters necessary to achieve the biomolecular fraction of %f %%' % (biomolecular_fraction*100))
water_mass = (biomolecular_mass/biomolecular_fraction) - biomolecular_mass
water_molecules = int(water_mass / 18)
print('\t%d water molecules are necessary to reach the biomolecular fraction of %.1f %%' % (water_molecules, biomolecular_fraction*100))
print('')
print('')
crowders_sorted = sorted( crowders.items() , key= lambda tup: tup[1]['mass'], reverse=True)
crowders_order = [x[0] for x in crowders_sorted]
if make_droplets:
print('Making droplets for each crowder')
print('\tFinding the optimum droplet shell thickness which comprises a total of %d water molecules for all crowders\n' % water_molecules)
print('\tThe maximum difference that is allowed between the number of water molecules inside the droplets and the number of molecules we need is %d %%' % droplet_shell_precision)
droplet_thickness_step=0.1 # nm
droplet_thickness=droplet_thickness_initial # nm
droplet_thickness_prev=droplet_thickness
while True:
water_added_to_the_soup = 0
for crowder_id in range(1,crowder_total+1):
print('\t\tMaking droplet for crowder %d ... ' % crowder_id, end='')
os.chdir('crowder_n%d' % crowder_id)
os.system('echo "System" | %s trjconv -f npt.1.gro -s npt.1.tpr -pbc atom -ur compact -o eq_pbc.gro >/dev/null 2>/dev/null' % args.gmx_path)
grps = grp_list('npt.1.gro')
if 'MG' in grps:
grp_mg = 0
grp_count = 0
for grp in grps:
if (grp == 'MG') and (grp_mg == 0):
grp_mg = grp_count
grp_count += 1
os.system('%s select -f eq_pbc.gro -on index_droplet.ndx -select "(group %s ) or (same residue as (group "SOL" and within %f of group %s) ) or (group %d and within %s of group %s )" -s npt.1.tpr >/dev/null 2>/dev/null' % (args.gmx_path, crowders[crowder_id]['type'], droplet_thickness, crowders[crowder_id]['type'], grp_mg, droplet_thickness, crowders[crowder_id]['type']))
else:
os.system('%s select -f eq_pbc.gro -on index_droplet.ndx -select "(group %s ) or (same residue as (group "SOL" and within %f of group %s) )" -s npt.1.tpr >/dev/null 2>/dev/null' % (args.gmx_path, crowders[crowder_id]['type'], droplet_thickness, crowders[crowder_id]['type']))
os.system('%s trjconv -f eq_pbc.gro -s npt.1.tpr -o droplet.gro -n index_droplet.ndx >/dev/null 2>/dev/null' % args.gmx_path)
water_added_by_one_droplet = count_waters('droplet.gro')
water_added_by_this_crowder = water_added_by_one_droplet * crowders[crowder_id]['copies']
water_added_to_the_soup += water_added_by_this_crowder
os.chdir('..')
print('ok')
water_added_perc = float(water_added_to_the_soup/water_molecules)*100
error_perc = np.absolute(water_added_perc - 100)
current_biomolecular_fraction = biomolecular_mass / (biomolecular_mass + (water_added_to_the_soup*18.0))
print('\t\t\tDroplet thickness = %f' % droplet_thickness)
print('\t\t\tCurrent biomolecular fraction = %f' % current_biomolecular_fraction)
print('\t\t\t%% of water added = %f' % water_added_perc)
print('\t\t\t%% error = %f' % error_perc)
if error_perc <= droplet_shell_precision:
break
else:
if water_added_perc < 100 :
droplet_thickness_prev = droplet_thickness
else:
droplet_thickness = droplet_thickness_prev
droplet_thickness_step = droplet_thickness_step/10
droplet_thickness = droplet_thickness + droplet_thickness_step
print('')
print('\tDroplet shell thickness converged to %.1f nm.' % droplet_thickness)
print('\t%d water molecules will be added to the box.' % water_added_to_the_soup)
print('\tThis is %.2f %% of the precise number required' % water_added_perc)
print('\tThat\'s within the requested error margin of %.2f %%' % droplet_shell_precision)
print('')
print('')
if make_box:
print('Finding the smallest cubic box in which all components of the soup fit')
print('\tSorting the crowders by mass')
for crowder_id in crowders_order:
print('\t\tCrowder %d, mass = %d' % (crowder_id, crowders[crowder_id]['mass']) )
print('')
try:
os.mkdir(path_output)
except:
pass
box_size = box_size_initial
ok = False
while True:
with open('%s/box.gro' % (path_output), 'w') as outfile:
outfile.write('Cytoplasm model\n')
outfile.write('0\n')
outfile.write('%.4f\t%.4f\t%.4f\n' % (box_size, box_size, box_size))
print('\tSide lenght = %.4f nm' % box_size)
crowder_count = 1
for crowder_id in crowders_order:
print('\tTrying to add %d copies of crowder number %d ... ' % (crowders[crowder_id]['copies'], crowder_id), end='')
os.system('%s insert-molecules -f %s/box.gro -ci crowder_n%d/droplet.gro -o %s/box.gro -nmol %d -try 10 >out 2>err' % (gmx_path, path_output, crowder_id, path_output, crowders[crowder_id]['copies']))
with open('err', 'r') as infile:
for line in infile.readlines():
try:
if line.split()[0]=='Added':
number_added = int(line.split()[1])
except:
pass
os.remove("err")
os.remove("out")
print('added %d ...' % number_added, end='')
if number_added == crowders[crowder_id]['copies']:
print('ok')
if crowder_count == crowder_total:
ok = True
break
else:
crowder_count += 1
else:
print('didn\'t fit')
box_size = box_size + box_size_step
break
if ok:
break
if make_topology:
print('')
print('Writing the .gro file for the soup ... ', end='')
sections = ['title', 'atoms', 'biomol', 'sol', 'mg', 'box']
gro = {}
for grp in sections:
gro[grp] = []
with open('%s/box.gro' % path_output , 'r') as infile:
lines = infile.readlines()
gro['title'].append(lines[0])
gro['atoms'].append(lines[1])
gro['box'].append(lines[-1])
del lines[-1]
del lines[:2]
for line in lines:
words = line.split()
if words[0][-3:] == 'SOL':
gro['sol'].append(line)
elif words[0][-2:] == 'MG' and words[1][:2]=='MG':
gro['mg'].append(line)
else:
gro['biomol'].append(line)
with open('%s/box_ordered.gro' % path_output, 'w') as outfile:
for grp in sections:
for line in gro[grp]:
outfile.write(line)
print('ok')
print('Writing the topology file for the soup ... ', end='')
num_sol = int(len(gro['sol'])/4)
num_mg = int(len(gro['mg']))
with open('%s/topology.top' % path_output, 'w') as outfile:
outfile.write('; Include forcefield parameters\n' )
outfile.write('#include "../amber99sbws-dangK.ff/forcefield.itp"\n')
outfile.write('#include "../gaff-types.itp"\n')
outfile.write('\n')
outfile.write('; Include chain topologies\n')
for crowder_id in crowders_order:
outfile.write('#include "crowder_n%d.itp"\n' % crowder_id)
os.system('cp crowder_n%d/*.itp %s/' % (crowder_id, path_output) )
outfile.write('\n')
outfile.write('; Include water topology\n')
outfile.write('#include "../amber99sbws-dangK.ff/tip4p2005s.itp"\n')
outfile.write('\n')
outfile.write('; Include topology for ions\n' )
outfile.write('#include "../amber99sbws-dangK.ff/ions.itp"\n')
outfile.write('\n')
outfile.write('[ system ]\n')
outfile.write('; Name\n')
outfile.write('E.coli K12 cytoplasm model\n')
outfile.write('\n')
outfile.write('[ molecules ]\n' )
outfile.write('; Compound #mols\n' )
for crowder_id in crowders_order:
outfile.write('crowder_n%d\t%d\n' % (crowder_id, crowders[crowder_id]['copies']) )
outfile.write('SOL\t%d\n' % num_sol )
outfile.write('MG\t%d\n' % num_mg )
print('ok')
print('')
print('')
print('Neutralizing charges in the box with K+ or Cl- ... ', end='')
os.chdir(path_output)
os.system('%s grompp -f ../mdp/%s -p topology.top -c box_ordered.gro -o ion_neutralize.tpr -maxwarn 1 >/dev/null 2>/dev/null' % (args.gmx_path, mdp_min))
os.system('echo SOL | %s genion -s ion_neutralize.tpr -p topology.top -o ion_neutralize.gro -pname K -nname CL -pq 1 -nq -1 -neutral >/dev/null 2>/dev/null' % args.gmx_path)
os.remove('mdout.mdp')
print('ok')
print('Adding KCl to reach the ionic strength of %f mol/L ... ' % ionic_strength , end='' )
num_kcl = int(ionic_strength * (num_sol/55.555))
os.system('%s grompp -f ../mdp/%s -p topology.top -c ion_neutralize.gro -o ion_ionicstrength.tpr -maxwarn 1 >/dev/null 2>/dev/null' % (args.gmx_path, mdp_min))
os.system('echo SOL | %s genion -s ion_ionicstrength.tpr -p topology.top -o ion_ionicstrength.gro -pname K -nname CL -pq 1 -nq -1 -np %d -nn %d >/dev/null 2>/dev/null' % (args.gmx_path, num_kcl, num_kcl))
os.remove('mdout.mdp')
print('ok')
| [
"numpy.absolute",
"os.remove",
"os.mkdir",
"argparse.ArgumentParser",
"os.system",
"os.chdir"
] | [((94, 130), 'os.system', 'os.system', (['"""export GMX_MAXBACKUP=-1"""'], {}), "('export GMX_MAXBACKUP=-1')\n", (103, 130), False, 'import argparse, os, glob, math\n'), ((165, 190), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (188, 190), False, 'import argparse, os, glob, math\n'), ((1399, 1513), 'os.system', 'os.system', (['(\'echo "q""\\\\n" | %s make_ndx -f %s -o temporary.ndx >/dev/null 2>/dev/null\' %\n (gmx_path, gro))'], {}), '(\n \'echo "q""\\\\n" | %s make_ndx -f %s -o temporary.ndx >/dev/null 2>/dev/null\'\n % (gmx_path, gro))\n', (1408, 1513), False, 'import argparse, os, glob, math\n'), ((1646, 1672), 'os.remove', 'os.remove', (['"""temporary.ndx"""'], {}), "('temporary.ndx')\n", (1655, 1672), False, 'import argparse, os, glob, math\n'), ((11134, 11155), 'os.chdir', 'os.chdir', (['path_output'], {}), '(path_output)\n', (11142, 11155), False, 'import argparse, os, glob, math\n'), ((11157, 11320), 'os.system', 'os.system', (["('%s grompp -f ../mdp/%s -p topology.top -c box_ordered.gro -o ion_neutralize.tpr -maxwarn 1 >/dev/null 2>/dev/null'\n % (args.gmx_path, mdp_min))"], {}), "(\n '%s grompp -f ../mdp/%s -p topology.top -c box_ordered.gro -o ion_neutralize.tpr -maxwarn 1 >/dev/null 2>/dev/null'\n % (args.gmx_path, mdp_min))\n", (11166, 11320), False, 'import argparse, os, glob, math\n'), ((11312, 11495), 'os.system', 'os.system', (["('echo SOL | %s genion -s ion_neutralize.tpr -p topology.top -o ion_neutralize.gro -pname K -nname CL -pq 1 -nq -1 -neutral >/dev/null 2>/dev/null'\n % args.gmx_path)"], {}), "(\n 'echo SOL | %s genion -s ion_neutralize.tpr -p topology.top -o ion_neutralize.gro -pname K -nname CL -pq 1 -nq -1 -neutral >/dev/null 2>/dev/null'\n % args.gmx_path)\n", (11321, 11495), False, 'import argparse, os, glob, math\n'), ((11487, 11509), 'os.remove', 'os.remove', (['"""mdout.mdp"""'], {}), "('mdout.mdp')\n", (11496, 11509), False, 'import argparse, os, glob, math\n'), ((11671, 11840), 'os.system', 'os.system', (["('%s grompp -f ../mdp/%s -p topology.top -c ion_neutralize.gro -o ion_ionicstrength.tpr -maxwarn 1 >/dev/null 2>/dev/null'\n % (args.gmx_path, mdp_min))"], {}), "(\n '%s grompp -f ../mdp/%s -p topology.top -c ion_neutralize.gro -o ion_ionicstrength.tpr -maxwarn 1 >/dev/null 2>/dev/null'\n % (args.gmx_path, mdp_min))\n", (11680, 11840), False, 'import argparse, os, glob, math\n'), ((11832, 12046), 'os.system', 'os.system', (["('echo SOL | %s genion -s ion_ionicstrength.tpr -p topology.top -o ion_ionicstrength.gro -pname K -nname CL -pq 1 -nq -1 -np %d -nn %d >/dev/null 2>/dev/null'\n % (args.gmx_path, num_kcl, num_kcl))"], {}), "(\n 'echo SOL | %s genion -s ion_ionicstrength.tpr -p topology.top -o ion_ionicstrength.gro -pname K -nname CL -pq 1 -nq -1 -np %d -nn %d >/dev/null 2>/dev/null'\n % (args.gmx_path, num_kcl, num_kcl))\n", (11841, 12046), False, 'import argparse, os, glob, math\n'), ((12038, 12060), 'os.remove', 'os.remove', (['"""mdout.mdp"""'], {}), "('mdout.mdp')\n", (12047, 12060), False, 'import argparse, os, glob, math\n'), ((6097, 6132), 'numpy.absolute', 'np.absolute', (['(water_added_perc - 100)'], {}), '(water_added_perc - 100)\n', (6108, 6132), True, 'import numpy as np\n'), ((7499, 7520), 'os.mkdir', 'os.mkdir', (['path_output'], {}), '(path_output)\n', (7507, 7520), False, 'import argparse, os, glob, math\n'), ((10239, 10304), 'os.system', 'os.system', (["('cp crowder_n%d/*.itp %s/' % (crowder_id, path_output))"], {}), "('cp crowder_n%d/*.itp %s/' % (crowder_id, path_output))\n", (10248, 10304), False, 'import argparse, os, glob, math\n'), ((4521, 4557), 'os.chdir', 'os.chdir', (["('crowder_n%d' % crowder_id)"], {}), "('crowder_n%d' % crowder_id)\n", (4529, 4557), False, 'import argparse, os, glob, math\n'), ((4567, 4717), 'os.system', 'os.system', (['(\'echo "System" | %s trjconv -f npt.1.gro -s npt.1.tpr -pbc atom -ur compact -o eq_pbc.gro >/dev/null 2>/dev/null\'\n % args.gmx_path)'], {}), '(\n \'echo "System" | %s trjconv -f npt.1.gro -s npt.1.tpr -pbc atom -ur compact -o eq_pbc.gro >/dev/null 2>/dev/null\'\n % args.gmx_path)\n', (4576, 4717), False, 'import argparse, os, glob, math\n'), ((5604, 5739), 'os.system', 'os.system', (["('%s trjconv -f eq_pbc.gro -s npt.1.tpr -o droplet.gro -n index_droplet.ndx >/dev/null 2>/dev/null'\n % args.gmx_path)"], {}), "(\n '%s trjconv -f eq_pbc.gro -s npt.1.tpr -o droplet.gro -n index_droplet.ndx >/dev/null 2>/dev/null'\n % args.gmx_path)\n", (5613, 5739), False, 'import argparse, os, glob, math\n'), ((5973, 5987), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (5981, 5987), False, 'import argparse, os, glob, math\n'), ((8037, 8251), 'os.system', 'os.system', (["('%s insert-molecules -f %s/box.gro -ci crowder_n%d/droplet.gro -o %s/box.gro -nmol %d -try 10 >out 2>err'\n % (gmx_path, path_output, crowder_id, path_output, crowders[crowder_id\n ]['copies']))"], {}), "(\n '%s insert-molecules -f %s/box.gro -ci crowder_n%d/droplet.gro -o %s/box.gro -nmol %d -try 10 >out 2>err'\n % (gmx_path, path_output, crowder_id, path_output, crowders[crowder_id\n ]['copies']))\n", (8046, 8251), False, 'import argparse, os, glob, math\n'), ((8432, 8448), 'os.remove', 'os.remove', (['"""err"""'], {}), "('err')\n", (8441, 8448), False, 'import argparse, os, glob, math\n'), ((8453, 8469), 'os.remove', 'os.remove', (['"""out"""'], {}), "('out')\n", (8462, 8469), False, 'import argparse, os, glob, math\n'), ((4930, 5322), 'os.system', 'os.system', (['(\'%s select -f eq_pbc.gro -on index_droplet.ndx -select "(group %s ) or (same residue as (group "SOL" and within %f of group %s) ) or (group %d and within %s of group %s )" -s npt.1.tpr >/dev/null 2>/dev/null\'\n % (args.gmx_path, crowders[crowder_id][\'type\'], droplet_thickness,\n crowders[crowder_id][\'type\'], grp_mg, droplet_thickness, crowders[\n crowder_id][\'type\']))'], {}), '(\n \'%s select -f eq_pbc.gro -on index_droplet.ndx -select "(group %s ) or (same residue as (group "SOL" and within %f of group %s) ) or (group %d and within %s of group %s )" -s npt.1.tpr >/dev/null 2>/dev/null\'\n % (args.gmx_path, crowders[crowder_id][\'type\'], droplet_thickness,\n crowders[crowder_id][\'type\'], grp_mg, droplet_thickness, crowders[\n crowder_id][\'type\']))\n', (4939, 5322), False, 'import argparse, os, glob, math\n'), ((5319, 5608), 'os.system', 'os.system', (['(\'%s select -f eq_pbc.gro -on index_droplet.ndx -select "(group %s ) or (same residue as (group "SOL" and within %f of group %s) )" -s npt.1.tpr >/dev/null 2>/dev/null\'\n % (args.gmx_path, crowders[crowder_id][\'type\'], droplet_thickness,\n crowders[crowder_id][\'type\']))'], {}), '(\n \'%s select -f eq_pbc.gro -on index_droplet.ndx -select "(group %s ) or (same residue as (group "SOL" and within %f of group %s) )" -s npt.1.tpr >/dev/null 2>/dev/null\'\n % (args.gmx_path, crowders[crowder_id][\'type\'], droplet_thickness,\n crowders[crowder_id][\'type\']))\n', (5328, 5608), False, 'import argparse, os, glob, math\n')] |
"""
pyart.io.nexrad_level2
======================
.. autosummary::
:toctree: generated/
:template: dev_template.rst
NEXRADLevel2File
.. autosummary::
:toctree: generated/
_decompress_records
_get_record_from_buf
_get_msg31_data_block
_structure_size
_unpack_from_buf
_unpack_structure
"""
# This file is part of the Py-ART, the Python ARM Radar Toolkit
# https://github.com/ARM-DOE/pyart
# Care has been taken to keep this file free from extraneous dependancies
# so that it can be used by other projects with no/minimal modification.
# Please feel free to use this file in other project provided the license
# below is followed. Keeping the above comment lines would also be helpful
# to direct other back to the Py-ART project and the source of this file.
LICENSE = """
Copyright (c) 2013, UChicago Argonne, LLC
All rights reserved.
Copyright 2013 UChicago Argonne, LLC. This software was produced under U.S.
Government contract DE-AC02-06CH11357 for Argonne National Laboratory (ANL),
which is operated by UChicago Argonne, LLC for the U.S. Department of Energy.
The U.S. Government has rights to use, reproduce, and distribute this
software. NEITHER THE GOVERNMENT NOR UCHICAGO ARGONNE, LLC MAKES ANY
WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF THIS
SOFTWARE. If software is modified to produce derivative works, such modified
software should be clearly marked, so as not to confuse it with the version
available from ANL.
Additionally, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of UChicago Argonne, LLC, Argonne National
Laboratory, ANL, the U.S. Government, nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY UCHICAGO ARGONNE, LLC AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL UCHICAGO ARGONNE, LLC OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import bz2
import struct
from datetime import datetime, timedelta
import numpy as np
class NEXRADLevel2File(object):
"""
Class for accessing data in a NEXRAD (WSR-88D) Level II file.
NEXRAD Level II files [1]_, also know as NEXRAD Archive Level II or
WSR-88D Archive level 2, are available from the NOAA National Climate Data
Center [2]_ as well as on the UCAR THREDDS Data Server [3]_. Files with
uncompressed messages and compressed messages are supported. Currently
only "message 31" type files are supported, "message 1" file cannot be
read using this class.
Parameters
----------
filename : str
Filename of Archive II file to read.
Attributes
----------
msg31s : list
Message 31 message in the file.
nscans : int
Number of scans in the file.
scan_msgs : list of arrays
Each element specifies the indices of the message in msg31s which
belong to a given scan.
volume_header : dict
Volume header.
vcp : dict
VCP information dictionary.
_records : list
A list of all records (message) in the file.
_fh : file-like
File like object from which data is read.
References
----------
.. [1] http://www.roc.noaa.gov/WSR88D/Level_II/Level2Info.aspx
.. [2] http://www.ncdc.noaa.gov/
.. [3] http://thredds.ucar.edu/thredds/catalog.html
"""
def __init__(self, filename):
""" initalize the object. """
# read in the volume header and compression_record
if hasattr(filename, 'read'):
fh = filename
else:
fh = open(filename, 'rb')
size = _structure_size(VOLUME_HEADER)
self.volume_header = _unpack_structure(fh.read(size), VOLUME_HEADER)
compression_record = fh.read(COMPRESSION_RECORD_SIZE)
# read the records in the file, decompressing as needed
s = slice(CONTROL_WORD_SIZE, CONTROL_WORD_SIZE + 2)
if compression_record[s] == b'BZ':
buf = _decompress_records(fh)
elif compression_record[s] == b'\x00\x00':
buf = fh.read()
else:
raise IOError('unknown compression record')
self._fh = fh
# read the records from the buffer
self._records = []
buf_length = len(buf)
pos = 0
while pos < buf_length:
pos, dic = _get_record_from_buf(buf, pos)
self._records.append(dic)
# pull out msg31 records which contain the moment data.
self.msg31s = [r for r in self._records if r['header']['type'] == 31]
if len(self.msg31s) == 0:
raise ValueError('No MSG31 records found, cannot read file')
elev_nums = np.array([m['msg31_header']['elevation_number']
for m in self.msg31s])
self.scan_msgs = [np.where(elev_nums == i + 1)[0]
for i in range(elev_nums.max())]
self.nscans = len(self.scan_msgs)
# pull out the vcp record
self.vcp = [r for r in self._records if r['header']['type'] == 5][0]
return
def close(self):
""" Close the file. """
self._fh.close()
def location(self):
"""
Find the location of the radar.
Returns
-------
latitude: float
Latitude of the radar in degrees.
longitude: float
Longitude of the radar in degrees.
height : int
Height of radar and feedhorn in meters above mean sea level.
"""
dic = self.msg31s[0]['VOL']
return dic['lat'], dic['lon'], dic['height'] + dic['feedhorn_height']
def scan_info(self):
"""
Return a list of dictionaries with scan information.
Returns
-------
scan_info : list
A list of the scan performed with a dictionary with keys
'moments', 'ngates', and 'nrays' for each scan. The 'moments'
and 'ngates' keys are lists of the NEXRAD moments and number
of gates for that moment collected during the specific scan.
The 'nrays' key provides the number of radials collected in the
given scan.
"""
info = []
for scan in range(self.nscans):
nrays = self.get_nrays(scan)
msg31_number = self.scan_msgs[scan][0]
msg = self.msg31s[msg31_number]
nexrad_moments = ['REF', 'VEL', 'SW', 'ZDR', 'PHI', 'RHO']
moments = [f for f in nexrad_moments if f in msg]
ngates = [msg[f]['ngates'] for f in moments]
info.append({
'nrays': nrays,
'ngates': ngates,
'moments': moments})
return info
def get_nrays(self, scan):
"""
Return the number of rays in a given scan.
Parameters
----------
scan : int
Scan of interest (0 based)
Returns
-------
nrays : int
Number of rays (radials) in the scan.
"""
return len(self.scan_msgs[scan])
def get_range(self, scan_num, moment):
"""
Return an array of gate ranges for a given scan and moment.
Parameters
----------
scan_num : int
Scan number (0 based).
moment : 'REF', 'VEL', 'SW', 'ZDR', 'PHI', or 'RHO'
Moment of interest.
Returns
-------
range : ndarray
Range in meters from the antenna to the center of gate (bin).
"""
dic = self.msg31s[self.scan_msgs[scan_num][0]][moment]
ngates = dic['ngates']
first_gate = dic['first_gate']
gate_spacing = dic['gate_spacing']
return np.arange(ngates) * gate_spacing + first_gate
# helper functions for looping over scans
def _msg_nums(self, scans):
""" Find the all message number for a list of scans. """
return np.concatenate([self.scan_msgs[i] for i in scans])
def _msg31_array(self, scans, key):
"""
Return an array of msg31 header elements for all rays in scans.
"""
msg_nums = self._msg_nums(scans)
t = [self.msg31s[i]['msg31_header'][key] for i in msg_nums]
return np.array(t)
def _msg31_rad_array(self, scans, key):
"""
Return an array of msg31 RAD elements for all rays in scans.
"""
msg_nums = self._msg_nums(scans)
t = [self.msg31s[i]['RAD'][key] for i in msg_nums]
return np.array(t)
def get_times(self, scans=None):
"""
Retrieve the times at which the rays were collected.
Parameters
----------
scans : list or None
Scans (0-based) to retrieve ray (radial) collection times from.
None (the default) will return the times for all scans in the
volume.
Returns
-------
time_start : Datetime
Initial time.
time : ndarray
Offset in seconds from the initial time at which the rays
in the requested scans were collected.
"""
if scans is None:
scans = range(self.nscans)
days = self._msg31_array(scans, 'collect_date')
secs = self._msg31_array(scans, 'collect_ms') / 1000.
offset = timedelta(days=int(days[0]) - 1, seconds=int(secs[0]))
time_start = datetime(1970, 1, 1) + offset
time = secs - int(secs[0]) + (days - days[0]) * 86400
return time_start, time
def get_azimuth_angles(self, scans=None):
"""
Retrieve the azimuth angles of all rays in the requested scans.
Parameters
----------
scans : list ot None
Scans (0 based) for which ray (radial) azimuth angles will be
retrieved. None (the default) will return the angles for all
scans in the volume.
Returns
-------
angles : ndarray
Azimuth angles in degress for all rays in the requested scans.
"""
if scans is None:
scans = range(self.nscans)
return self._msg31_array(scans, 'azimuth_angle')
def get_elevation_angles(self, scans=None):
"""
Retrieve the elevation angles of all rays in the requested scans.
Parameters
----------
scans : list or None
Scans (0 based) for which ray (radial) azimuth angles will be
retrieved. None (the default) will return the angles for
all scans in the volume.
Returns
-------
angles : ndarray
Elevation angles in degress for all rays in the requested scans.
"""
if scans is None:
scans = range(self.nscans)
return self._msg31_array(scans, 'elevation_angle')
def get_target_angles(self, scans=None):
"""
Retrieve the target elevation angle of the requested scans.
Parameters
----------
scans : list or None
Scans (0 based) for which the target elevation angles will be
retrieved. None (the default) will return the angles for all
scans in the volume.
Returns
-------
angles : ndarray
Target elevation angles in degress for the requested scans.
"""
if scans is None:
scans = range(self.nscans)
cp = self.vcp['cut_parameters']
scale = 360. / 65536.
return np.array([cp[i]['elevation_angle'] * scale for i in scans],
dtype='float32')
def get_nyquist_vel(self, scans=None):
"""
Retrieve the Nyquist velocities of the requested scans.
Parameters
----------
scans : list or None
Scans (0 based) for which the Nyquist velocities will be
retrieved. None (the default) will return the velocities for all
scans in the volume.
Returns
-------
velocities : ndarray
Nyquist velocities (in m/s) for the requested scans.
"""
if scans is None:
scans = range(self.nscans)
return self._msg31_rad_array(scans, 'nyquist_vel') * 0.01
def get_unambigous_range(self, scans=None):
"""
Retrieve the unambiguous range of the requested scans.
Parameters
----------
scans : list or None
Scans (0 based) for which the unambiguous range will be retrieved.
None (the default) will return the range for all scans in the
volume.
Returns
-------
unambiguous_range : ndarray
Unambiguous range (in meters) for the requested scans.
"""
if scans is None:
scans = range(self.nscans)
return self._msg31_rad_array(scans, 'unambig_range') * 0.1
def get_data(self, moment, max_ngates, scans=None, raw_data=False):
"""
Retrieve moment data for a given set of scans.
Masked points indicate that the data was not collected, below
threshold or is range folded.
Parameters
----------
moment : 'REF', 'VEL', 'SW', 'ZDR', 'PHI', or 'RHO'
Moment for which to to retrieve data.
max_ngates : int
Maximum number of gates (bins) in any ray.
requested.
raw_data : bool
True to return the raw data, False to perform masking as well as
applying the appropiate scale and offset to the data. When
raw_data is True values of 1 in the data likely indicate that
the gate was not present in the sweep, in some cases in will
indicate range folded data.
scans : list or None.
Scans to retrieve data from (0 based). None (the default) will
get the data for all scans in the volume.
Returns
-------
data : ndarray
"""
if scans is None:
scans = range(self.nscans)
# determine the number of rays
msg_nums = self._msg_nums(scans)
nrays = len(msg_nums)
# extract the data
if moment != 'PHI':
data = np.ones((nrays, max_ngates), dtype='u1')
else:
data = np.ones((nrays, max_ngates), dtype='u2')
for i, msg_num in enumerate(msg_nums):
msg = self.msg31s[msg_num]
if moment not in msg.keys():
continue
ngates = msg[moment]['ngates']
data[i, :ngates] = msg[moment]['data']
# return raw data if requested
if raw_data:
return data
# mask, scan and offset, assume that the offset and scale
# are the same in all scans/gates
for scan in scans: # find a scan which contains the moment
msg_num = self.scan_msgs[scan][0]
msg = self.msg31s[msg_num]
if moment in msg.keys():
offset = np.float32(msg[moment]['offset'])
scale = np.float32(msg[moment]['scale'])
return (np.ma.masked_less_equal(data, 1) - offset) / (scale)
# moment is not present in any scan, mask all values
return np.ma.masked_less_equal(data, 1)
def _decompress_records(file_handler):
"""
Decompressed the records from an BZ2 compressed Archive 2 file.
"""
file_handler.seek(0)
cbuf = file_handler.read() # read all data from the file
decompressor = bz2.BZ2Decompressor()
skip = _structure_size(VOLUME_HEADER) + CONTROL_WORD_SIZE
buf = decompressor.decompress(cbuf[skip:])
while len(decompressor.unused_data):
cbuf = decompressor.unused_data
decompressor = bz2.BZ2Decompressor()
buf += decompressor.decompress(cbuf[CONTROL_WORD_SIZE:])
return buf[COMPRESSION_RECORD_SIZE:]
def _get_record_from_buf(buf, pos):
""" Retrieve and unpack a NEXRAD record from a buffer. """
dic = {'header': _unpack_from_buf(buf, pos, MSG_HEADER)}
msg_type = dic['header']['type']
if msg_type == 31:
msg_size = dic['header']['size'] * 2 - 4
msg_header_size = _structure_size(MSG_HEADER)
new_pos = pos + msg_header_size + msg_size
mbuf = buf[pos + msg_header_size:new_pos]
msg_31_header = _unpack_from_buf(mbuf, 0, MSG_31)
block_pointers = [v for k, v in msg_31_header.items()
if k.startswith('block_pointer') and v > 0]
for block_pointer in block_pointers:
block_name, block_dic = _get_msg31_data_block(mbuf, block_pointer)
dic[block_name] = block_dic
dic['msg31_header'] = msg_31_header
elif msg_type == 5:
msg_header_size = _structure_size(MSG_HEADER)
msg5_header_size = _structure_size(MSG_5)
msg5_elev_size = _structure_size(MSG_5_ELEV)
dic['msg5_header'] = _unpack_from_buf(buf, pos + msg_header_size,
MSG_5)
dic['cut_parameters'] = []
for i in range(dic['msg5_header']['num_cuts']):
p = pos + msg_header_size + msg5_header_size + msg5_elev_size * i
dic['cut_parameters'].append(_unpack_from_buf(buf, p, MSG_5_ELEV))
new_pos = pos + RECORD_SIZE
else: # not message 31 or 1, no decoding performed
new_pos = pos + RECORD_SIZE
return new_pos, dic
def _get_msg31_data_block(buf, ptr):
""" Unpack a msg_31 data block into a dictionary. """
block_name = buf[ptr + 1: ptr + 4].decode('ascii').strip()
if block_name == 'VOL':
dic = _unpack_from_buf(buf, ptr, VOLUME_DATA_BLOCK)
elif block_name == 'ELV':
dic = _unpack_from_buf(buf, ptr, ELEVATION_DATA_BLOCK)
elif block_name == 'RAD':
dic = _unpack_from_buf(buf, ptr, RADIAL_DATA_BLOCK)
elif block_name in ['REF', 'VEL', 'SW', 'ZDR', 'PHI', 'RHO']:
dic = _unpack_from_buf(buf, ptr, GENERIC_DATA_BLOCK)
ngates = dic['ngates']
ptr2 = ptr + _structure_size(GENERIC_DATA_BLOCK)
if block_name == 'PHI':
data = np.fromstring(buf[ptr2: ptr2 + ngates * 2], '>u2')
else:
data = np.fromstring(buf[ptr2: ptr2 + ngates], '>u1')
dic['data'] = data
else:
dic = {}
return block_name, dic
def _structure_size(structure):
""" Find the size of a structure in bytes. """
return struct.calcsize('>' + ''.join([i[1] for i in structure]))
def _unpack_from_buf(buf, pos, structure):
""" Unpack a structure from a buffer. """
size = _structure_size(structure)
return _unpack_structure(buf[pos:pos + size], structure)
def _unpack_structure(string, structure):
""" Unpack a structure from a string """
fmt = '>' + ''.join([i[1] for i in structure]) # NEXRAD is big-endian
l = struct.unpack(fmt, string)
return dict(zip([i[0] for i in structure], l))
# NEXRAD Level II file structures and sizes
# The deails on these structures are documented in:
# "Interface Control Document for the Achive II/User" RPG Build 12.0
# Document Number 2620010E
# and
# "Interface Control Document for the RDA/RPG" Open Build 13.0
# Document Number 2620002M
# Tables and page number refer to those in the second document unless
# otherwise noted.
RECORD_SIZE = 2432
COMPRESSION_RECORD_SIZE = 12
CONTROL_WORD_SIZE = 4
# format of structure elements
# section 3.2.1, page 3-2
CODE1 = 'B'
CODE2 = 'H'
INT1 = 'B'
INT2 = 'H'
INT4 = 'I'
REAL4 = 'f'
REAL8 = 'd'
SINT1 = 'b'
SINT2 = 'h'
SINT4 = 'i'
# Figure 1 in Interface Control Document for the Archive II/User
# page 7-2
VOLUME_HEADER = (
('tape', '9s'),
('extension', '3s'),
('date', 'I'),
('time', 'I'),
('icao', '4s')
)
# Table II Message Header Data
# page 3-7
MSG_HEADER = (
('size', INT2), # size of data, no including header
('channels', INT1),
('type', INT1),
('seq_id', INT2),
('date', INT2),
('ms', INT4),
('segments', INT2),
('seg_num', INT2),
)
# Table XVII Digital Radar Generic Format Blocks (Message Type 31)
# pages 3-87 to 3-89
MSG_31 = (
('id', '4s'), # 0-3
('collect_ms', INT4), # 4-7
('collect_date', INT2), # 8-9
('azimuth_number', INT2), # 10-11
('azimuth_angle', REAL4), # 12-15
('compress_flag', CODE1), # 16
('spare_0', INT1), # 17
('radial_length', INT2), # 18-19
('azimuth_resolution', CODE1), # 20
('radial_spacing', CODE1), # 21
('elevation_number', INT1), # 22
('cut_sector', INT1), # 23
('elevation_angle', REAL4), # 24-27
('radial_blanking', CODE1), # 28
('azimuth_mode', SINT1), # 29
('block_count', INT2), # 30-31
('block_pointer_1', INT4), # 32-35 Volume Data Constant XVII-E
('block_pointer_2', INT4), # 36-39 Elevation Data Constant XVII-F
('block_pointer_3', INT4), # 40-43 Radial Data Constant XVII-H
('block_pointer_4', INT4), # 44-47 Moment "REF" XVII-{B/I}
('block_pointer_5', INT4), # 48-51 Moment "VEL"
('block_pointer_6', INT4), # 52-55 Moment "SW"
('block_pointer_7', INT4), # 56-59 Moment "ZDR"
('block_pointer_8', INT4), # 60-63 Moment "PHI"
('block_pointer_9', INT4), # 64-67 Moment "RHO"
)
# Table XI Volume Coverage Pattern Data (Message Type 5 & 7)
# pages 3-51 to 3-54
MSG_5 = (
('msg_size', INT2),
('pattern_type', CODE2),
('pattern_number', INT2),
('num_cuts', INT2),
('clutter_map_group', INT2),
('doppler_vel_res', CODE1), # 2: 0.5 degrees, 4: 1.0 degrees
('pulse_width', CODE1), # 2: short, 4: long
('spare', '10s') # halfwords 7-11 (10 bytes, 5 halfwords)
)
MSG_5_ELEV = (
('elevation_angle', CODE2), # scaled by 360/65536 for value in degrees.
('channel_config', CODE1),
('waveform_type', CODE1),
('super_resolution', CODE1),
('prf_number', INT1),
('prf_pulse_count', INT2),
('azimuth_rate', CODE2),
('ref_thresh', SINT2),
('vel_thresh', SINT2),
('sw_thresh', SINT2),
('zdr_thres', SINT2),
('phi_thres', SINT2),
('rho_thres', SINT2),
('edge_angle_1', CODE2),
('dop_prf_num_1', INT2),
('dop_prf_pulse_count_1', INT2),
('spare_1', '2s'),
('edge_angle_2', CODE2),
('dop_prf_num_2', INT2),
('dop_prf_pulse_count_2', INT2),
('spare_2', '2s'),
('edge_angle_3', CODE2),
('dop_prf_num_3', INT2),
('dop_prf_pulse_count_3', INT2),
('spare_3', '2s'),
)
# Table XVII-B Data Block (Descriptor of Generic Data Moment Type)
# pages 3-90 and 3-91
GENERIC_DATA_BLOCK = (
('block_type', '1s'),
('data_name', '3s'), # VEL, REF, SW, RHO, PHI, ZDR
('reserved', INT4),
('ngates', INT2),
('first_gate', SINT2),
('gate_spacing', SINT2),
('thresh', SINT2),
('snr_thres', SINT2),
('flags', CODE1),
('word_size', INT1),
('scale', REAL4),
('offset', REAL4),
# then data
)
# Table XVII-E Data Block (Volume Data Constant Type)
# page 3-92
VOLUME_DATA_BLOCK = (
('block_type', '1s'),
('data_name', '3s'),
('lrtup', INT2),
('version_major', INT1),
('version_minor', INT1),
('lat', REAL4),
('lon', REAL4),
('height', SINT2),
('feedhorn_height', INT2),
('refl_calib', REAL4),
('power_h', REAL4),
('power_v', REAL4),
('diff_refl_calib', REAL4),
('init_phase', REAL4),
('vcp', INT2),
('spare', '2s'),
)
# Table XVII-F Data Block (Elevation Data Constant Type)
# page 3-93
ELEVATION_DATA_BLOCK = (
('block_type', '1s'),
('data_name', '3s'),
('lrtup', INT2),
('atmos', SINT2),
('refl_calib', REAL4),
)
# Table XVII-H Data Block (Radial Data Constant Type)
# pages 3-93
RADIAL_DATA_BLOCK = (
('block_type', '1s'),
('data_name', '3s'),
('lrtup', INT2),
('unambig_range', SINT2),
('noise_h', REAL4),
('noise_v', REAL4),
('nyquist_vel', SINT2),
('spare', '2s')
)
| [
"bz2.BZ2Decompressor",
"numpy.float32",
"numpy.ma.masked_less_equal",
"struct.unpack",
"numpy.ones",
"datetime.datetime",
"numpy.where",
"numpy.array",
"numpy.arange",
"numpy.fromstring",
"numpy.concatenate"
] | [((16597, 16618), 'bz2.BZ2Decompressor', 'bz2.BZ2Decompressor', ([], {}), '()\n', (16616, 16618), False, 'import bz2\n'), ((19926, 19952), 'struct.unpack', 'struct.unpack', (['fmt', 'string'], {}), '(fmt, string)\n', (19939, 19952), False, 'import struct\n'), ((5800, 5870), 'numpy.array', 'np.array', (["[m['msg31_header']['elevation_number'] for m in self.msg31s]"], {}), "([m['msg31_header']['elevation_number'] for m in self.msg31s])\n", (5808, 5870), True, 'import numpy as np\n'), ((9036, 9086), 'numpy.concatenate', 'np.concatenate', (['[self.scan_msgs[i] for i in scans]'], {}), '([self.scan_msgs[i] for i in scans])\n', (9050, 9086), True, 'import numpy as np\n'), ((9348, 9359), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (9356, 9359), True, 'import numpy as np\n'), ((9613, 9624), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (9621, 9624), True, 'import numpy as np\n'), ((12590, 12668), 'numpy.array', 'np.array', (["[(cp[i]['elevation_angle'] * scale) for i in scans]"], {'dtype': '"""float32"""'}), "([(cp[i]['elevation_angle'] * scale) for i in scans], dtype='float32')\n", (12598, 12668), True, 'import numpy as np\n'), ((16331, 16363), 'numpy.ma.masked_less_equal', 'np.ma.masked_less_equal', (['data', '(1)'], {}), '(data, 1)\n', (16354, 16363), True, 'import numpy as np\n'), ((16832, 16853), 'bz2.BZ2Decompressor', 'bz2.BZ2Decompressor', ([], {}), '()\n', (16851, 16853), False, 'import bz2\n'), ((10496, 10516), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (10504, 10516), False, 'from datetime import datetime, timedelta\n'), ((15316, 15356), 'numpy.ones', 'np.ones', (['(nrays, max_ngates)'], {'dtype': '"""u1"""'}), "((nrays, max_ngates), dtype='u1')\n", (15323, 15356), True, 'import numpy as np\n'), ((15390, 15430), 'numpy.ones', 'np.ones', (['(nrays, max_ngates)'], {'dtype': '"""u2"""'}), "((nrays, max_ngates), dtype='u2')\n", (15397, 15430), True, 'import numpy as np\n'), ((5926, 5954), 'numpy.where', 'np.where', (['(elev_nums == i + 1)'], {}), '(elev_nums == i + 1)\n', (5934, 5954), True, 'import numpy as np\n'), ((8831, 8848), 'numpy.arange', 'np.arange', (['ngates'], {}), '(ngates)\n', (8840, 8848), True, 'import numpy as np\n'), ((16086, 16119), 'numpy.float32', 'np.float32', (["msg[moment]['offset']"], {}), "(msg[moment]['offset'])\n", (16096, 16119), True, 'import numpy as np\n'), ((16144, 16176), 'numpy.float32', 'np.float32', (["msg[moment]['scale']"], {}), "(msg[moment]['scale'])\n", (16154, 16176), True, 'import numpy as np\n'), ((16201, 16233), 'numpy.ma.masked_less_equal', 'np.ma.masked_less_equal', (['data', '(1)'], {}), '(data, 1)\n', (16224, 16233), True, 'import numpy as np\n'), ((19198, 19247), 'numpy.fromstring', 'np.fromstring', (['buf[ptr2:ptr2 + ngates * 2]', '""">u2"""'], {}), "(buf[ptr2:ptr2 + ngates * 2], '>u2')\n", (19211, 19247), True, 'import numpy as np\n'), ((19282, 19327), 'numpy.fromstring', 'np.fromstring', (['buf[ptr2:ptr2 + ngates]', '""">u1"""'], {}), "(buf[ptr2:ptr2 + ngates], '>u1')\n", (19295, 19327), True, 'import numpy as np\n')] |
import numpy as np
from .base import BASE
# 继承于BASE类
class DETECTION(BASE):
# 只重写了初始化函数
def __init__(self, db_config):
super(DETECTION, self).__init__()
# Configs for training
# 训练时参数[类别、一般的尺度、最小尺度、最大尺度、步长]
self._configs["categories"] = 10
self._configs["rand_scales"] = [1]
self._configs["rand_scale_min"] = 0.8
self._configs["rand_scale_max"] = 1.4
self._configs["rand_scale_step"] = 0.2
# Configs for both training and testing
# [输入尺寸、输出尺寸]
self._configs["input_size"] = [383, 383]
self._configs["output_sizes"] = [[96, 96], [48, 48], [24, 24], [12, 12]]
# [阈值]
self._configs["score_threshold"] = 0.05
self._configs["nms_threshold"] = 0.7
self._configs["max_per_set"] = 40
self._configs["max_per_image"] = 100
self._configs["top_k"] = 20
self._configs["ae_threshold"] = 1
self._configs["nms_kernel"] = 3
self._configs["num_dets"] = 1000
self._configs["nms_algorithm"] = "exp_soft_nms"
self._configs["weight_exp"] = 8
self._configs["merge_bbox"] = False
# self._configs["merge_bbox"] = True
self._configs["data_aug"] = True
self._configs["lighting"] = True
self._configs["border"] = 64
self._configs["gaussian_bump"] = False
self._configs["gaussian_iou"] = 0.7
self._configs["gaussian_radius"] = -1
self._configs["rand_crop"] = False
self._configs["rand_color"] = False
self._configs["rand_center"] = True
self._configs["init_sizes"] = [192, 255]
self._configs["view_sizes"] = []
self._configs["min_scale"] = 16
self._configs["max_scale"] = 32
self._configs["att_sizes"] = [[16, 16], [32, 32], [64, 64]]
self._configs["att_ranges"] = [[96, 256], [32, 96], [0, 32]]
self._configs["att_ratios"] = [16, 8, 4]
self._configs["att_scales"] = [1, 1.5, 2]
self._configs["att_thresholds"] = [0.3, 0.3, 0.3, 0.3]
self._configs["att_nms_ks"] = [3, 3, 3]
self._configs["att_max_crops"] = 8
self._configs["ref_dets"] = True
# Configs for testing
self._configs["test_scales"] = [1]
self._configs["test_flipped"] = True
self.update_config(db_config)
if self._configs["rand_scales"] is None:
self._configs["rand_scales"] = np.arange(
self._configs["rand_scale_min"],
self._configs["rand_scale_max"],
self._configs["rand_scale_step"]
)
| [
"numpy.arange"
] | [((2632, 2745), 'numpy.arange', 'np.arange', (["self._configs['rand_scale_min']", "self._configs['rand_scale_max']", "self._configs['rand_scale_step']"], {}), "(self._configs['rand_scale_min'], self._configs['rand_scale_max'],\n self._configs['rand_scale_step'])\n", (2641, 2745), True, 'import numpy as np\n')] |
import pickle
import numpy as np
filename = 'Full_os_results.pickle'
with open(filename, 'rb') as f:
x = pickle.load(f)
MSE = {"ASVI": (np.mean([np.sqrt(error) for error in x["PE"]["MSE"]]), np.std([np.sqrt(error) for error in x["PE"]["MSE"]])/np.sqrt(len(x["PE"]["MSE"]))),
"ADVI (MF)": (np.mean([np.sqrt(error) for error in x["ADVI (MF)"]["MSE"]]), np.std([np.sqrt(error) for error in x["ADVI (MF)"]["MSE"]])/np.sqrt(len(x["ADVI (MF)"]["MSE"]))),
"ADVI (MN)": (np.mean([np.sqrt(error) for error in x["ADVI (MN)"]["MSE"]]), np.std([np.sqrt(error) for error in x["ADVI (MN)"]["MSE"]])/np.sqrt(len(x["ADVI (MN)"]["MSE"]))),
"NN": (np.mean([np.sqrt(error) for error in x["NN"]["MSE"]]), np.std([np.sqrt(error) for error in x["NN"]["MSE"]])/np.sqrt(len(x["NN"]["MSE"])))}
for key, val in MSE.items():
print(key + ": {} +- {}".format(val[0], val[1])) | [
"pickle.load",
"numpy.sqrt"
] | [((111, 125), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (122, 125), False, 'import pickle\n'), ((152, 166), 'numpy.sqrt', 'np.sqrt', (['error'], {}), '(error)\n', (159, 166), True, 'import numpy as np\n'), ((305, 319), 'numpy.sqrt', 'np.sqrt', (['error'], {}), '(error)\n', (312, 319), True, 'import numpy as np\n'), ((479, 493), 'numpy.sqrt', 'np.sqrt', (['error'], {}), '(error)\n', (486, 493), True, 'import numpy as np\n'), ((646, 660), 'numpy.sqrt', 'np.sqrt', (['error'], {}), '(error)\n', (653, 660), True, 'import numpy as np\n'), ((206, 220), 'numpy.sqrt', 'np.sqrt', (['error'], {}), '(error)\n', (213, 220), True, 'import numpy as np\n'), ((366, 380), 'numpy.sqrt', 'np.sqrt', (['error'], {}), '(error)\n', (373, 380), True, 'import numpy as np\n'), ((540, 554), 'numpy.sqrt', 'np.sqrt', (['error'], {}), '(error)\n', (547, 554), True, 'import numpy as np\n'), ((700, 714), 'numpy.sqrt', 'np.sqrt', (['error'], {}), '(error)\n', (707, 714), True, 'import numpy as np\n')] |
import os, sys
import numpy as np
from astropy.io import fits
sys.path.append('../')
import diskdictionary as disk
targets = ['RULup', 'Elias20', 'Sz129', 'GWLup', 'Elias24', 'AS209']
for i in range(len(targets)):
# beam solid angle
idir = '../CSD_modeling/data/'
hdu = fits.open(idir+'deep_'+targets[i]+'_data.JvMcorr.fits')
hd = hdu[0].header
hdu.close()
bmaj, bmin = hd['BMAJ'] * 3600, hd['BMIN'] * 3600
beam_area = np.pi * bmaj * bmin / (4 * np.log(2))
# search annulus solid angle
rgap = disk.disk[targets[i]]['rgap']
wgap = disk.disk[targets[i]]['wgap']
for ir in range(len(rgap)):
search_area = np.pi * ((rgap[ir]+wgap[ir])**2 - (rgap[ir]-wgap[ir])**2)
print('%10a gap%1i %4f' % (targets[i], ir, search_area / beam_area))
| [
"sys.path.append",
"numpy.log",
"astropy.io.fits.open"
] | [((62, 84), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (77, 84), False, 'import os, sys\n'), ((286, 347), 'astropy.io.fits.open', 'fits.open', (["(idir + 'deep_' + targets[i] + '_data.JvMcorr.fits')"], {}), "(idir + 'deep_' + targets[i] + '_data.JvMcorr.fits')\n", (295, 347), False, 'from astropy.io import fits\n'), ((478, 487), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (484, 487), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# -*- coding: utf8 -*-
# Copyright (c) 2021 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Default SchedulerPipeline for QuanlseSchedulerSuperconduct.
"""
from numpy import array
from Quanlse.Scheduler.SchedulerPipeline import SchedulerProcess, reverseToLayer, reverseFillUp, \
leftAlignSingleGates, delayFirstGate, findMaxT, addToJob
from Quanlse.Scheduler import Scheduler
from Quanlse.QWaveform import QJob
def _gatesBeforeMulti(layer, scheduler):
"""
Find gate number before the first multi-qubit gate
"""
flag = [True] * scheduler.subSysNum
gateNumber = [len(layer)] * scheduler.subSysNum
for i in range(len(layer[0])):
for j in range(len(layer)):
if layer[j][i] is not None:
if len(layer[j][i].qRegIndexList) >= 2 and flag[i] is True:
gateNumber[i] = j
flag[i] = False
return gateNumber
def _centerAlignedCore(job: QJob, scheduler: Scheduler) -> QJob:
"""
The left alignment strategy for scheduling.
:param job: job to be returned
:param scheduler: Scheduler object containing the circuit information
:return: the returned QJob object
"""
# Initialize layers
layer = []
for i in range(scheduler.subSysNum):
layer.append([])
# First convert gates in Scheduler to layers
reverseToLayer(layer, scheduler)
reverseFillUp(layer)
# left aligned the single-qubit gates
leftAlignSingleGates(layer)
# Transpose layers
layer = array(layer).T.tolist()
# find gate number before the first multi-qubit gates
gatesBeforeMultiQubitGate = _gatesBeforeMulti(layer, scheduler)
# start the first gate as late as possible
delayFirstGate(layer, gatesBeforeMultiQubitGate)
# clear wave and cache
job.clearWaves()
job.clearCache()
# find max time for each layer
maxi = findMaxT(layer, scheduler)
# add waves to job
addToJob(layer=layer, scheduler=scheduler, maxT=maxi, job=job)
job.buildWaveCache()
return job
centerAligned = SchedulerProcess("CenterAligned", _centerAlignedCore)
"""
A SchedulerProcess instance containing the center-aligned scheduling strategy.
"""
| [
"Quanlse.Scheduler.SchedulerPipeline.findMaxT",
"Quanlse.Scheduler.SchedulerPipeline.SchedulerProcess",
"Quanlse.Scheduler.SchedulerPipeline.addToJob",
"Quanlse.Scheduler.SchedulerPipeline.reverseFillUp",
"Quanlse.Scheduler.SchedulerPipeline.leftAlignSingleGates",
"numpy.array",
"Quanlse.Scheduler.Sched... | [((2617, 2670), 'Quanlse.Scheduler.SchedulerPipeline.SchedulerProcess', 'SchedulerProcess', (['"""CenterAligned"""', '_centerAlignedCore'], {}), "('CenterAligned', _centerAlignedCore)\n", (2633, 2670), False, 'from Quanlse.Scheduler.SchedulerPipeline import SchedulerProcess, reverseToLayer, reverseFillUp, leftAlignSingleGates, delayFirstGate, findMaxT, addToJob\n'), ((1901, 1933), 'Quanlse.Scheduler.SchedulerPipeline.reverseToLayer', 'reverseToLayer', (['layer', 'scheduler'], {}), '(layer, scheduler)\n', (1915, 1933), False, 'from Quanlse.Scheduler.SchedulerPipeline import SchedulerProcess, reverseToLayer, reverseFillUp, leftAlignSingleGates, delayFirstGate, findMaxT, addToJob\n'), ((1938, 1958), 'Quanlse.Scheduler.SchedulerPipeline.reverseFillUp', 'reverseFillUp', (['layer'], {}), '(layer)\n', (1951, 1958), False, 'from Quanlse.Scheduler.SchedulerPipeline import SchedulerProcess, reverseToLayer, reverseFillUp, leftAlignSingleGates, delayFirstGate, findMaxT, addToJob\n'), ((2006, 2033), 'Quanlse.Scheduler.SchedulerPipeline.leftAlignSingleGates', 'leftAlignSingleGates', (['layer'], {}), '(layer)\n', (2026, 2033), False, 'from Quanlse.Scheduler.SchedulerPipeline import SchedulerProcess, reverseToLayer, reverseFillUp, leftAlignSingleGates, delayFirstGate, findMaxT, addToJob\n'), ((2273, 2321), 'Quanlse.Scheduler.SchedulerPipeline.delayFirstGate', 'delayFirstGate', (['layer', 'gatesBeforeMultiQubitGate'], {}), '(layer, gatesBeforeMultiQubitGate)\n', (2287, 2321), False, 'from Quanlse.Scheduler.SchedulerPipeline import SchedulerProcess, reverseToLayer, reverseFillUp, leftAlignSingleGates, delayFirstGate, findMaxT, addToJob\n'), ((2439, 2465), 'Quanlse.Scheduler.SchedulerPipeline.findMaxT', 'findMaxT', (['layer', 'scheduler'], {}), '(layer, scheduler)\n', (2447, 2465), False, 'from Quanlse.Scheduler.SchedulerPipeline import SchedulerProcess, reverseToLayer, reverseFillUp, leftAlignSingleGates, delayFirstGate, findMaxT, addToJob\n'), ((2494, 2556), 'Quanlse.Scheduler.SchedulerPipeline.addToJob', 'addToJob', ([], {'layer': 'layer', 'scheduler': 'scheduler', 'maxT': 'maxi', 'job': 'job'}), '(layer=layer, scheduler=scheduler, maxT=maxi, job=job)\n', (2502, 2556), False, 'from Quanlse.Scheduler.SchedulerPipeline import SchedulerProcess, reverseToLayer, reverseFillUp, leftAlignSingleGates, delayFirstGate, findMaxT, addToJob\n'), ((2070, 2082), 'numpy.array', 'array', (['layer'], {}), '(layer)\n', (2075, 2082), False, 'from numpy import array\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gps_building_blocks.ml.statistical_inference.inference."""
from unittest import mock
from absl.testing import parameterized
import numpy as np
import pandas as pd
from scipy import stats
from sklearn import datasets
from sklearn import model_selection
from absl.testing import absltest
from gps_building_blocks.ml.statistical_inference import data_preparation
class InferenceTest(parameterized.TestCase):
_missing_data = pd.DataFrame(
data=[[np.nan, 0.0000],
[0.6000, 0.0000],
[0.4000, 3.0000],
[0.2000, np.nan]],
columns=['first', 'second'])
def test_missing_value_emits_warning_twice(self):
with self.assertWarns(data_preparation.MissingValueWarning):
data_preparation.InferenceData(self._missing_data)
with self.assertWarns(data_preparation.MissingValueWarning):
data_preparation.InferenceData(self._missing_data)
def test_check_data_raises_exception_on_missing_data(self):
inference_data = data_preparation.InferenceData(self._missing_data)
with self.assertRaises(data_preparation.MissingValueError):
inference_data.data_check(raise_on_error=True)
def test_invalid_target_column_raise_exception(self):
with self.assertRaises(KeyError):
data_preparation.InferenceData(
initial_data=self._missing_data,
target_column='non_ci_sono')
def test_impute_missing_values_replaced_with_mean(self):
inference_data = data_preparation.InferenceData(self._missing_data)
expected_result = pd.DataFrame(
data=[[0.4000, 0.0000],
[0.6000, 0.0000],
[0.4000, 3.0000],
[0.2000, 1.0000]],
columns=['first', 'second'])
result = inference_data.impute_missing_values(strategy='mean')
pd.testing.assert_frame_equal(result, expected_result)
def test_fixed_effect_raise_exception_on_categorical_covariate(self):
data = pd.DataFrame(
data=[['0', 0.0, '1', 3.0],
['1', 0.0, '2', 2.0],
['1', 1.0, '3', 2.0],
['1', 1.0, '4', 1.0]],
columns=['control_1', 'control_2', 'variable_1', 'variable_2'],
index=['group1', 'group2', 'group3', 'group3'])
inference_data = data_preparation.InferenceData(data)
with self.assertRaises(data_preparation.CategoricalCovariateError):
inference_data.control_with_fixed_effect(
strategy='quick',
control_columns=['control_1', 'control_2'],
min_frequency=1)
def test_fixed_effect_demeaning_subtract_mean_in_groups(self):
data = pd.DataFrame(
data=[['0', 0.0, 1, 3.0],
['1', 0.0, 2, 2.0],
['1', 1.0, 3, 2.0],
['1', 1.0, 4, 1.0]],
columns=['control_1', 'control_2', 'variable_1', 'variable_2'],
index=['group1', 'group2', 'group3', 'group3'])
expected_result = pd.DataFrame(
data=[['0', 0.0, 2.5, 2.0],
['1', 0.0, 2.5, 2.0],
['1', 1.0, 2.0, 2.5],
['1', 1.0, 3.0, 1.5]],
columns=data.columns,
index=data.index).set_index(['control_1', 'control_2'], append=True)
inference_data = data_preparation.InferenceData(data)
result = inference_data.control_with_fixed_effect(
strategy='quick',
control_columns=['control_1', 'control_2'],
min_frequency=1)
pd.testing.assert_frame_equal(result, expected_result)
def test_address_low_variance_removes_column(self):
data = pd.DataFrame(
data=[[0.0, 1.0, 0.0, 10.0],
[0.0, 1.0, 0.0, 10.0],
[1.0, 1.0, 0.0, 5.00],
[1.0, 0.0, 0.0, 0.00]],
columns=['control', 'variable', 'variable_1', 'outcome'])
expected_result = pd.DataFrame(
data=[[0.0, 1.0, 10.0],
[0.0, 1.0, 10.0],
[1.0, 1.0, 5.00],
[1.0, 0.0, 0.00]],
columns=['control', 'variable', 'outcome'])
inference_data = data_preparation.InferenceData(
data, target_column='outcome')
result = inference_data.address_low_variance(drop=True)
pd.testing.assert_frame_equal(result, expected_result)
def test_vif_raises_error_on_singular_correlation_matrix(self):
singular_correlation_matrix_df = pd.DataFrame(
data=[[1.1, 2.1, 3.1, 4.1, 1.0],
[1.0, 2.0, 3.0, 4.0, 1.0],
[1.0, 2.0, 3.0, 4.0, 1.0],
[1.0, 2.0, 3.0, 4.0, 1.0]],
columns=['control', 'variable_1', 'variable_2', 'variable_3',
'outcome'])
inference_data = data_preparation.InferenceData(
singular_correlation_matrix_df, target_column='outcome')
with self.assertRaises(data_preparation.SingularDataError):
inference_data.address_collinearity_with_vif(
handle_singular_data_errors_automatically=False)
def test_vif_raises_error_on_ill_conditioned_correlation_matrix(self):
ill_conditioned_correlation_matrix_df = pd.DataFrame(
data=[[1.0, 2.0, 3.0, 4.0, 1.0],
[0.0, 2.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 5.0, 1.0],
[0.0, 2.0, 3.0, 0.0, 1.0]],
columns=['control', 'variable_1', 'variable_2', 'variable_3',
'outcome'])
inference_data = data_preparation.InferenceData(
ill_conditioned_correlation_matrix_df, target_column='outcome')
with self.assertRaises(data_preparation.SingularDataError):
inference_data.address_collinearity_with_vif(
handle_singular_data_errors_automatically=False)
def test_vif_error_has_correct_message(self):
ill_conditioned_correlation_matrix_df = pd.DataFrame(
data=[[1.0, 2.0, 3.0, 4.0, 1.0],
[0.0, 2.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 5.0, 1.0],
[0.0, 2.0, 3.0, 0.0, 1.0]],
columns=['control', 'variable_1', 'variable_2', 'variable_3',
'outcome'])
inference_data = data_preparation.InferenceData(
ill_conditioned_correlation_matrix_df, target_column='outcome')
expected_message = (
'Inference Data has a singular or nearly singular correlation matrix. '
'This could be caused by extremely correlated or collinear columns. '
'The three pairs of columns with the highest absolute correlation '
'coefficients are: (control,variable_3): 0.970, (variable_1,variable_3)'
': -0.700, (control,variable_1): -0.577. This could also be caused by '
'columns with extremiely low variance. Recommend running the '
'address_low_variance() method before VIF. Alternatively, consider '
'running address_collinearity_with_vif() with '
'use_correlation_matrix_inversion=False to avoid this error.'
)
with self.assertRaises(
data_preparation.SingularDataError, msg=expected_message):
inference_data.address_collinearity_with_vif(
handle_singular_data_errors_automatically=False)
def test_vif_noise_injection_catches_perfect_correlation(self):
iris = datasets.load_iris()
iris_data = pd.DataFrame(
data=np.c_[iris['data'], iris['target']],
columns=iris['feature_names'] + ['target'])
iris_data['perfectly_correlated_column'] = iris_data['petal length (cm)']
expected_result = iris_data.drop(
columns=['petal length (cm)', 'perfectly_correlated_column'])
inference_data = data_preparation.InferenceData(
iris_data, target_column='target')
result = inference_data.address_collinearity_with_vif(
vif_method='quick',
drop=True,
handle_singular_data_errors_automatically=True,
vif_threshold=50.0)
pd.testing.assert_frame_equal(result, expected_result)
def test_vif_noise_injection_catches_perfect_collinearity(self):
iris = datasets.load_iris()
iris_data = pd.DataFrame(
data=np.c_[iris['data'], iris['target']],
columns=iris['feature_names'] + ['target'])
iris_data['perfectly_collinear_column'] = iris_data[
'petal length (cm)'] + iris_data['petal width (cm)']
expected_result = iris_data.drop(columns=[
'petal length (cm)', 'petal width (cm)', 'perfectly_collinear_column'
])
inference_data = data_preparation.InferenceData(
iris_data, target_column='target')
result = inference_data.address_collinearity_with_vif(
vif_method='quick',
drop=True,
handle_singular_data_errors_automatically=True,
vif_threshold=50.0)
pd.testing.assert_frame_equal(result, expected_result)
def test_vif_noise_injection_fails_correctly_when_too_few_samples(self):
too_few_samples_df = pd.DataFrame(
data=[[1.0, 2.0, 3.0, 4.0, 1.0],
[0.0, 2.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 5.0, 1.0]],
columns=['control', 'variable_1', 'variable_2', 'variable_3',
'outcome'])
inference_data = data_preparation.InferenceData(
too_few_samples_df, target_column='outcome')
expected_regex = (
'Automatic attempt to resolve SingularDataError by '
'injecting artifical noise to the data has failed. This '
'probably means the dataset has too many features relative '
'to the number of samples.')
with self.assertRaisesRegex(data_preparation.SingularDataError,
expected_regex):
inference_data.address_collinearity_with_vif(
handle_singular_data_errors_automatically=True)
def test_vif_method_fails_correctly_with_unknown_value(self):
inference_data = data_preparation.InferenceData(self._missing_data)
with self.assertRaises(ValueError):
inference_data.address_collinearity_with_vif(
vif_method='incorrect_value')
@parameterized.named_parameters({
'testcase_name': 'scale_10',
'scaling': 10,
}, {
'testcase_name': 'scale_50',
'scaling': 50,
}, {
'testcase_name': 'scale_-50',
'scaling': -50,
})
def test_minmaxscaling_drops_appropriate_variables(self, scaling):
data = pd.DataFrame(
data=[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0],
[0.1, 1.0, 0.0, 5.00], [0.2, 0.0, 0.0, 0.00]],
columns=['variable_0', 'variable_1', 'variable_2', 'outcome'])
data = data * scaling
expected_result = data[['variable_1', 'outcome']]
inference_data = data_preparation.InferenceData(
data)
result = inference_data.address_low_variance(
threshold=.15,
drop=True,
minmax_scaling=True,
)
pd.testing.assert_frame_equal(result, expected_result)
def test_zscored_input_raises_warning(self):
data = pd.DataFrame(
data=[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0],
[0.1, 1.0, 0.0, 5.00], [0.2, 0.0, 0.0, 0.00]],
columns=['variable_0', 'variable_1', 'variable_2', 'variable_3'])
data = data.apply(stats.zscore).fillna(0)
inference_data = data_preparation.InferenceData(data)
with self.assertWarns(Warning):
_ = inference_data.address_low_variance()
def test_minmaxscaling_with_invalid_threshold_raises_warning(self):
data = pd.DataFrame(
data=[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0],
[0.1, 1.0, 0.0, 5.00], [0.2, 0.0, 0.0, 0.00]],
columns=['variable_0', 'variable_1', 'variable_2', 'variable_3'])
inference_data = data_preparation.InferenceData(data)
with self.assertWarns(Warning):
_ = inference_data.address_low_variance(minmax_scaling=True, threshold=.5)
def test_address_collinearity_with_vif_removes_column(self):
iris = datasets.load_iris()
iris_data = pd.DataFrame(
data=np.c_[iris['data'], iris['target']],
columns=iris['feature_names'] + ['target'])
expected_result = iris_data.drop(columns='petal length (cm)')
inference_data = data_preparation.InferenceData(
iris_data, target_column='target')
result = inference_data.address_collinearity_with_vif(
vif_method='sequential',
drop=True)
pd.testing.assert_frame_equal(result, expected_result)
def test_encode_categorical_covariate_dummy_variable_2(self):
data = pd.DataFrame(
data=[[0.0, 1.0, 'a', 10.0],
[0.0, 1.0, 'b', 10.0],
[1.0, 1.0, 'c', 5.00],
[1.0, 0.0, 'a', 0.00]],
columns=['control', 'variable_1', 'variable_2', 'outcome'])
expected_result = pd.DataFrame(
data=[[0.0, 1.0, 10.0, 1, 0, 0],
[0.0, 1.0, 10.0, 0, 1, 0],
[1.0, 1.0, 5.00, 0, 0, 1],
[1.0, 0.0, 0.00, 1, 0, 0]],
columns=[
'control', 'variable_1', 'outcome', 'variable_2_a', 'variable_2_b',
'variable_2_c'
])
inference_data = data_preparation.InferenceData(
data, target_column='outcome')
result = inference_data.encode_categorical_covariates(
columns=['variable_2'])
pd.testing.assert_frame_equal(result, expected_result)
@parameterized.named_parameters(
('single_selections', ['1', '2', '3'], ['1', '2', '3']),
('double_selection', ['1,2', '3'], ['1', '2', '3']),
('early_stopping', ['1', ''], ['1']),
('all_at_once', ['1,2,3'], ['1', '2', '3']),
)
def test_address_collinearity_with_vif_interactive(
self, user_inputs, expected_dropped):
dataframe = pd.DataFrame(
data=[[1.1, 2.1, 3.1, 4.1, 0],
[1.0, 2.0, 3.0, 4.0, 0],
[1.0, 2.0, 3.0, 4.0, 0],
[1.0, 2.0, 3.0, 4.0, 1]],
columns=['1', '2', '3', '4', 'target'])
data = data_preparation.InferenceData(dataframe, target_column='target')
with mock.patch.object(data_preparation, '_input_mock') as input_mock:
# Avoid Colab\Notebook prints in tests output
with mock.patch.object(data_preparation, '_print_mock') as _:
user_inputs = list(reversed(user_inputs))
input_mock.side_effect = lambda x: user_inputs.pop()
result = data.address_collinearity_with_vif(
vif_method='interactive',
drop=True,
use_correlation_matrix_inversion=False
)
pd.testing.assert_frame_equal(
result,
dataframe.drop(expected_dropped, axis=1))
@parameterized.named_parameters(
('onehot_returns_expected_bins', False, False, pd.DataFrame(
[[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1]],
columns=['variable_(-0.02, 4.0]', 'variable_(4.0, 8.0]',
'variable_(8.0, 12.0]', 'variable_(12.0, 16.0]',
'variable_(16.0, 20.0]'])),
('equal_sized_onehot_returns_expected_bins', True, False, pd.DataFrame(
[[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]],
columns=['variable_(-0.001, 2.0]', 'variable_(2.0, 4.0]',
'variable_(4.0, 6.0]', 'variable_(6.0, 8.0]',
'variable_(8.0, 20.0]'])),
('scalar_numeric_returns_expected_bins', False, True, pd.DataFrame(
[0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 4], columns=['variable'])),
('equal_sized_numeric_expected_bins', True, True, pd.DataFrame(
[0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4], columns=['variable'])),
)
def test_descretize(self, equal_sized_bins, numeric, expected_result):
data = data_preparation.InferenceData(pd.DataFrame(
data=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 20],
columns=['variable']))
result = data.discretize_numeric_covariate(
'variable', equal_sized_bins=equal_sized_bins, bins=5, numeric=numeric)
pd.testing.assert_frame_equal(result, expected_result, check_dtype=False)
@parameterized.named_parameters(
('with_groups_kfold_as_int',
3,
np.array([0, 0, 1, 1, 2, 2, 3, 3, 3, 3]),
[pd.DataFrame({'variable': [0, 1, 2, 3, 4, 5]}),
pd.DataFrame({'variable': [2, 3, 6, 7, 8, 9]},
index=[2, 3, 6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 4, 5, 6, 7, 8, 9]},
index=[0, 1, 4, 5, 6, 7, 8, 9])],
[pd.DataFrame({'variable': [6, 7, 8, 9]}, index=[6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 4, 5]}, index=[0, 1, 4, 5]),
pd.DataFrame({'variable': [2, 3]}, index=[2, 3])]),
('with_groups_kfold_as_object',
model_selection.GroupKFold(n_splits=3),
np.array([0, 0, 1, 1, 2, 2, 3, 3, 3, 3]),
[pd.DataFrame({'variable': [0, 1, 2, 3, 4, 5]}),
pd.DataFrame({'variable': [2, 3, 6, 7, 8, 9]},
index=[2, 3, 6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 4, 5, 6, 7, 8, 9]},
index=[0, 1, 4, 5, 6, 7, 8, 9])],
[pd.DataFrame({'variable': [6, 7, 8, 9]}, index=[6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 4, 5]}, index=[0, 1, 4, 5]),
pd.DataFrame({'variable': [2, 3]}, index=[2, 3])]),
)
def test_split_with_groups_yields_expected_folds_with_non_overlaping_groups(
self,
cross_validation,
groups,
expected_trains,
expected_tests):
data = data_preparation.InferenceData(
pd.DataFrame({
'variable': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}))
iterator = zip(data.split(cross_validation=cross_validation, groups=groups),
expected_trains,
expected_tests)
for (train_data, test_data), expected_train, expected_test in iterator:
train_groups = set(groups[train_data.data.index.tolist()])
test_groups = set(groups[test_data.data.index.tolist()])
pd.testing.assert_frame_equal(
train_data.data, expected_train, check_dtype=False)
pd.testing.assert_frame_equal(
test_data.data, expected_test, check_dtype=False)
self.assertEmpty(train_groups.intersection(test_groups))
@parameterized.named_parameters(
('without_groups_kfold_as_int', 3,
[pd.DataFrame({'variable': [4, 5, 6, 7, 8, 9]},
index=[4, 5, 6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 2, 3, 7, 8, 9]},
index=[0, 1, 2, 3, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 2, 3, 4, 5, 6]},
index=[0, 1, 2, 3, 4, 5, 6])],
[pd.DataFrame({'variable': [0, 1, 2, 3]}, index=[0, 1, 2, 3]),
pd.DataFrame({'variable': [4, 5, 6]}, index=[4, 5, 6]),
pd.DataFrame({'variable': [7, 8, 9]}, index=[7, 8, 9])]),
('without_groups_kfold_as_object',
model_selection.KFold(n_splits=3),
[pd.DataFrame({'variable': [4, 5, 6, 7, 8, 9]},
index=[4, 5, 6, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 2, 3, 7, 8, 9]},
index=[0, 1, 2, 3, 7, 8, 9]),
pd.DataFrame({'variable': [0, 1, 2, 3, 4, 5, 6]},
index=[0, 1, 2, 3, 4, 5, 6])],
[pd.DataFrame({'variable': [0, 1, 2, 3]}, index=[0, 1, 2, 3]),
pd.DataFrame({'variable': [4, 5, 6]}, index=[4, 5, 6]),
pd.DataFrame({'variable': [7, 8, 9]}, index=[7, 8, 9])]),
)
def test_split_without_groups_yields_expected_folds(self,
cross_validation,
expected_trains,
expected_tests):
data = data_preparation.InferenceData(
pd.DataFrame({
'variable': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}))
iterator = zip(data.split(cross_validation=cross_validation),
expected_trains,
expected_tests)
for (train_data, test_data), expected_train, expected_test in iterator:
pd.testing.assert_frame_equal(
train_data.data, expected_train, check_dtype=False)
pd.testing.assert_frame_equal(
test_data.data, expected_test, check_dtype=False)
def test_split_can_use_non_integer_indices(self):
expected_trains = [
pd.DataFrame(data={'variable': [4, 5, 6, 7, 8, 9]},
index=['4', '5', '6', '7', '8', '9']),
pd.DataFrame(data={'variable': [0, 1, 2, 3, 7, 8, 9]},
index=['0', '1', '2', '3', '7', '8', '9']),
pd.DataFrame(data={'variable': [0, 1, 2, 3, 4, 5, 6]},
index=['0', '1', '2', '3', '4', '5', '6'])]
expected_tests = [
pd.DataFrame({'variable': [0, 1, 2, 3]}, index=['0', '1', '2', '3']),
pd.DataFrame({'variable': [4, 5, 6]}, index=['4', '5', '6']),
pd.DataFrame({'variable': [7, 8, 9]}, index=['7', '8', '9'])]
data = data_preparation.InferenceData(
pd.DataFrame(data={'variable': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],},
index=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']))
iterator = zip(data.split(cross_validation=3),
expected_trains,
expected_tests)
for (train_data, test_data), expected_train, expected_test in iterator:
pd.testing.assert_frame_equal(
train_data.data, expected_train, check_dtype=False)
pd.testing.assert_frame_equal(
test_data.data, expected_test, check_dtype=False)
if __name__ == '__main__':
absltest.main()
| [
"pandas.DataFrame",
"absl.testing.absltest.main",
"pandas.testing.assert_frame_equal",
"sklearn.datasets.load_iris",
"gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData",
"unittest.mock.patch.object",
"sklearn.model_selection.KFold",
"numpy.array",
"sklearn.model_selection.G... | [((1017, 1123), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[np.nan, 0.0], [0.6, 0.0], [0.4, 3.0], [0.2, np.nan]]', 'columns': "['first', 'second']"}), "(data=[[np.nan, 0.0], [0.6, 0.0], [0.4, 3.0], [0.2, np.nan]],\n columns=['first', 'second'])\n", (1029, 1123), True, 'import pandas as pd\n'), ((10291, 10469), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["{'testcase_name': 'scale_10', 'scaling': 10}", "{'testcase_name': 'scale_50', 'scaling': 50}", "{'testcase_name': 'scale_-50', 'scaling': -50}"], {}), "({'testcase_name': 'scale_10', 'scaling': 10},\n {'testcase_name': 'scale_50', 'scaling': 50}, {'testcase_name':\n 'scale_-50', 'scaling': -50})\n", (10321, 10469), False, 'from absl.testing import parameterized\n'), ((13515, 13751), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('single_selections', ['1', '2', '3'], ['1', '2', '3'])", "('double_selection', ['1,2', '3'], ['1', '2', '3'])", "('early_stopping', ['1', ''], ['1'])", "('all_at_once', ['1,2,3'], ['1', '2', '3'])"], {}), "(('single_selections', ['1', '2', '3'], ['1',\n '2', '3']), ('double_selection', ['1,2', '3'], ['1', '2', '3']), (\n 'early_stopping', ['1', ''], ['1']), ('all_at_once', ['1,2,3'], ['1',\n '2', '3']))\n", (13545, 13751), False, 'from absl.testing import parameterized\n'), ((22116, 22131), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (22129, 22131), False, 'from absl.testing import absltest\n'), ((1568, 1618), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['self._missing_data'], {}), '(self._missing_data)\n', (1598, 1618), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((2033, 2083), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['self._missing_data'], {}), '(self._missing_data)\n', (2063, 2083), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((2106, 2207), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[0.4, 0.0], [0.6, 0.0], [0.4, 3.0], [0.2, 1.0]]', 'columns': "['first', 'second']"}), "(data=[[0.4, 0.0], [0.6, 0.0], [0.4, 3.0], [0.2, 1.0]], columns\n =['first', 'second'])\n", (2118, 2207), True, 'import pandas as pd\n'), ((2359, 2413), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (2388, 2413), True, 'import pandas as pd\n'), ((2498, 2730), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "[['0', 0.0, '1', 3.0], ['1', 0.0, '2', 2.0], ['1', 1.0, '3', 2.0], ['1', \n 1.0, '4', 1.0]]", 'columns': "['control_1', 'control_2', 'variable_1', 'variable_2']", 'index': "['group1', 'group2', 'group3', 'group3']"}), "(data=[['0', 0.0, '1', 3.0], ['1', 0.0, '2', 2.0], ['1', 1.0,\n '3', 2.0], ['1', 1.0, '4', 1.0]], columns=['control_1', 'control_2',\n 'variable_1', 'variable_2'], index=['group1', 'group2', 'group3', 'group3']\n )\n", (2510, 2730), True, 'import pandas as pd\n'), ((2806, 2842), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['data'], {}), '(data)\n', (2836, 2842), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((3150, 3375), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "[['0', 0.0, 1, 3.0], ['1', 0.0, 2, 2.0], ['1', 1.0, 3, 2.0], ['1', 1.0, 4, 1.0]\n ]", 'columns': "['control_1', 'control_2', 'variable_1', 'variable_2']", 'index': "['group1', 'group2', 'group3', 'group3']"}), "(data=[['0', 0.0, 1, 3.0], ['1', 0.0, 2, 2.0], ['1', 1.0, 3, \n 2.0], ['1', 1.0, 4, 1.0]], columns=['control_1', 'control_2',\n 'variable_1', 'variable_2'], index=['group1', 'group2', 'group3', 'group3']\n )\n", (3162, 3375), True, 'import pandas as pd\n'), ((3739, 3775), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['data'], {}), '(data)\n', (3769, 3775), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((3939, 3993), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (3968, 3993), True, 'import pandas as pd\n'), ((4060, 4235), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[0.0, 1.0, 0.0, 10.0], [0.0, 1.0, 0.0, 10.0], [1.0, 1.0, 0.0, 5.0], [1.0, \n 0.0, 0.0, 0.0]]', 'columns': "['control', 'variable', 'variable_1', 'outcome']"}), "(data=[[0.0, 1.0, 0.0, 10.0], [0.0, 1.0, 0.0, 10.0], [1.0, 1.0,\n 0.0, 5.0], [1.0, 0.0, 0.0, 0.0]], columns=['control', 'variable',\n 'variable_1', 'outcome'])\n", (4072, 4235), True, 'import pandas as pd\n'), ((4311, 4449), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[0.0, 1.0, 10.0], [0.0, 1.0, 10.0], [1.0, 1.0, 5.0], [1.0, 0.0, 0.0]]', 'columns': "['control', 'variable', 'outcome']"}), "(data=[[0.0, 1.0, 10.0], [0.0, 1.0, 10.0], [1.0, 1.0, 5.0], [\n 1.0, 0.0, 0.0]], columns=['control', 'variable', 'outcome'])\n", (4323, 4449), True, 'import pandas as pd\n'), ((4528, 4589), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['data'], {'target_column': '"""outcome"""'}), "(data, target_column='outcome')\n", (4558, 4589), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((4664, 4718), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (4693, 4718), True, 'import pandas as pd\n'), ((4823, 5034), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[1.1, 2.1, 3.1, 4.1, 1.0], [1.0, 2.0, 3.0, 4.0, 1.0], [1.0, 2.0, 3.0, 4.0,\n 1.0], [1.0, 2.0, 3.0, 4.0, 1.0]]', 'columns': "['control', 'variable_1', 'variable_2', 'variable_3', 'outcome']"}), "(data=[[1.1, 2.1, 3.1, 4.1, 1.0], [1.0, 2.0, 3.0, 4.0, 1.0], [\n 1.0, 2.0, 3.0, 4.0, 1.0], [1.0, 2.0, 3.0, 4.0, 1.0]], columns=[\n 'control', 'variable_1', 'variable_2', 'variable_3', 'outcome'])\n", (4835, 5034), True, 'import pandas as pd\n'), ((5122, 5213), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['singular_correlation_matrix_df'], {'target_column': '"""outcome"""'}), "(singular_correlation_matrix_df,\n target_column='outcome')\n", (5152, 5213), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((5513, 5724), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[1.0, 2.0, 3.0, 4.0, 1.0], [0.0, 2.0, 0.0, 1.0, 1.0], [1.0, 1.0, 2.0, 5.0,\n 1.0], [0.0, 2.0, 3.0, 0.0, 1.0]]', 'columns': "['control', 'variable_1', 'variable_2', 'variable_3', 'outcome']"}), "(data=[[1.0, 2.0, 3.0, 4.0, 1.0], [0.0, 2.0, 0.0, 1.0, 1.0], [\n 1.0, 1.0, 2.0, 5.0, 1.0], [0.0, 2.0, 3.0, 0.0, 1.0]], columns=[\n 'control', 'variable_1', 'variable_2', 'variable_3', 'outcome'])\n", (5525, 5724), True, 'import pandas as pd\n'), ((5812, 5910), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['ill_conditioned_correlation_matrix_df'], {'target_column': '"""outcome"""'}), "(ill_conditioned_correlation_matrix_df,\n target_column='outcome')\n", (5842, 5910), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((6185, 6396), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[1.0, 2.0, 3.0, 4.0, 1.0], [0.0, 2.0, 0.0, 1.0, 1.0], [1.0, 1.0, 2.0, 5.0,\n 1.0], [0.0, 2.0, 3.0, 0.0, 1.0]]', 'columns': "['control', 'variable_1', 'variable_2', 'variable_3', 'outcome']"}), "(data=[[1.0, 2.0, 3.0, 4.0, 1.0], [0.0, 2.0, 0.0, 1.0, 1.0], [\n 1.0, 1.0, 2.0, 5.0, 1.0], [0.0, 2.0, 3.0, 0.0, 1.0]], columns=[\n 'control', 'variable_1', 'variable_2', 'variable_3', 'outcome'])\n", (6197, 6396), True, 'import pandas as pd\n'), ((6484, 6582), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['ill_conditioned_correlation_matrix_df'], {'target_column': '"""outcome"""'}), "(ill_conditioned_correlation_matrix_df,\n target_column='outcome')\n", (6514, 6582), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((7573, 7593), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (7591, 7593), False, 'from sklearn import datasets\n'), ((7610, 7713), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "np.c_[iris['data'], iris['target']]", 'columns': "(iris['feature_names'] + ['target'])"}), "(data=np.c_[iris['data'], iris['target']], columns=iris[\n 'feature_names'] + ['target'])\n", (7622, 7713), True, 'import pandas as pd\n'), ((7934, 7999), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['iris_data'], {'target_column': '"""target"""'}), "(iris_data, target_column='target')\n", (7964, 7999), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((8205, 8259), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (8234, 8259), True, 'import pandas as pd\n'), ((8339, 8359), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (8357, 8359), False, 'from sklearn import datasets\n'), ((8376, 8479), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "np.c_[iris['data'], iris['target']]", 'columns': "(iris['feature_names'] + ['target'])"}), "(data=np.c_[iris['data'], iris['target']], columns=iris[\n 'feature_names'] + ['target'])\n", (8388, 8479), True, 'import pandas as pd\n'), ((8764, 8829), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['iris_data'], {'target_column': '"""target"""'}), "(iris_data, target_column='target')\n", (8794, 8829), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((9035, 9089), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (9064, 9089), True, 'import pandas as pd\n'), ((9191, 9374), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[1.0, 2.0, 3.0, 4.0, 1.0], [0.0, 2.0, 0.0, 1.0, 1.0], [1.0, 1.0, 2.0, 5.0,\n 1.0]]', 'columns': "['control', 'variable_1', 'variable_2', 'variable_3', 'outcome']"}), "(data=[[1.0, 2.0, 3.0, 4.0, 1.0], [0.0, 2.0, 0.0, 1.0, 1.0], [\n 1.0, 1.0, 2.0, 5.0, 1.0]], columns=['control', 'variable_1',\n 'variable_2', 'variable_3', 'outcome'])\n", (9203, 9374), True, 'import pandas as pd\n'), ((9449, 9524), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['too_few_samples_df'], {'target_column': '"""outcome"""'}), "(too_few_samples_df, target_column='outcome')\n", (9479, 9524), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((10104, 10154), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['self._missing_data'], {}), '(self._missing_data)\n', (10134, 10154), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((10593, 10774), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0], [0.1, 1.0, 0.0, 5.0], [0.2,\n 0.0, 0.0, 0.0]]', 'columns': "['variable_0', 'variable_1', 'variable_2', 'outcome']"}), "(data=[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0], [0.1, 1.0,\n 0.0, 5.0], [0.2, 0.0, 0.0, 0.0]], columns=['variable_0', 'variable_1',\n 'variable_2', 'outcome'])\n", (10605, 10774), True, 'import pandas as pd\n'), ((10902, 10938), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['data'], {}), '(data)\n', (10932, 10938), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((11080, 11134), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (11109, 11134), True, 'import pandas as pd\n'), ((11194, 11378), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0], [0.1, 1.0, 0.0, 5.0], [0.2,\n 0.0, 0.0, 0.0]]', 'columns': "['variable_0', 'variable_1', 'variable_2', 'variable_3']"}), "(data=[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0], [0.1, 1.0,\n 0.0, 5.0], [0.2, 0.0, 0.0, 0.0]], columns=['variable_0', 'variable_1',\n 'variable_2', 'variable_3'])\n", (11206, 11378), True, 'import pandas as pd\n'), ((11472, 11508), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['data'], {}), '(data)\n', (11502, 11508), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((11675, 11859), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0], [0.1, 1.0, 0.0, 5.0], [0.2,\n 0.0, 0.0, 0.0]]', 'columns': "['variable_0', 'variable_1', 'variable_2', 'variable_3']"}), "(data=[[0.0, 1.0, 0.0, 10.0], [-0.5, 1.0, 0.0, 10.0], [0.1, 1.0,\n 0.0, 5.0], [0.2, 0.0, 0.0, 0.0]], columns=['variable_0', 'variable_1',\n 'variable_2', 'variable_3'])\n", (11687, 11859), True, 'import pandas as pd\n'), ((11907, 11943), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['data'], {}), '(data)\n', (11937, 11943), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((12136, 12156), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (12154, 12156), False, 'from sklearn import datasets\n'), ((12173, 12276), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "np.c_[iris['data'], iris['target']]", 'columns': "(iris['feature_names'] + ['target'])"}), "(data=np.c_[iris['data'], iris['target']], columns=iris[\n 'feature_names'] + ['target'])\n", (12185, 12276), True, 'import pandas as pd\n'), ((12377, 12442), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['iris_data'], {'target_column': '"""target"""'}), "(iris_data, target_column='target')\n", (12407, 12442), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((12568, 12622), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (12597, 12622), True, 'import pandas as pd\n'), ((12699, 12876), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "[[0.0, 1.0, 'a', 10.0], [0.0, 1.0, 'b', 10.0], [1.0, 1.0, 'c', 5.0], [1.0, \n 0.0, 'a', 0.0]]", 'columns': "['control', 'variable_1', 'variable_2', 'outcome']"}), "(data=[[0.0, 1.0, 'a', 10.0], [0.0, 1.0, 'b', 10.0], [1.0, 1.0,\n 'c', 5.0], [1.0, 0.0, 'a', 0.0]], columns=['control', 'variable_1',\n 'variable_2', 'outcome'])\n", (12711, 12876), True, 'import pandas as pd\n'), ((12952, 13180), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[0.0, 1.0, 10.0, 1, 0, 0], [0.0, 1.0, 10.0, 0, 1, 0], [1.0, 1.0, 5.0, 0, 0,\n 1], [1.0, 0.0, 0.0, 1, 0, 0]]', 'columns': "['control', 'variable_1', 'outcome', 'variable_2_a', 'variable_2_b',\n 'variable_2_c']"}), "(data=[[0.0, 1.0, 10.0, 1, 0, 0], [0.0, 1.0, 10.0, 0, 1, 0], [\n 1.0, 1.0, 5.0, 0, 0, 1], [1.0, 0.0, 0.0, 1, 0, 0]], columns=['control',\n 'variable_1', 'outcome', 'variable_2_a', 'variable_2_b', 'variable_2_c'])\n", (12964, 13180), True, 'import pandas as pd\n'), ((13289, 13350), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['data'], {'target_column': '"""outcome"""'}), "(data, target_column='outcome')\n", (13319, 13350), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((13456, 13510), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (13485, 13510), True, 'import pandas as pd\n'), ((13882, 14050), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[1.1, 2.1, 3.1, 4.1, 0], [1.0, 2.0, 3.0, 4.0, 0], [1.0, 2.0, 3.0, 4.0, 0],\n [1.0, 2.0, 3.0, 4.0, 1]]', 'columns': "['1', '2', '3', '4', 'target']"}), "(data=[[1.1, 2.1, 3.1, 4.1, 0], [1.0, 2.0, 3.0, 4.0, 0], [1.0, \n 2.0, 3.0, 4.0, 0], [1.0, 2.0, 3.0, 4.0, 1]], columns=['1', '2', '3',\n '4', 'target'])\n", (13894, 14050), True, 'import pandas as pd\n'), ((14112, 14177), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['dataframe'], {'target_column': '"""target"""'}), "(dataframe, target_column='target')\n", (14142, 14177), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((16556, 16629), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result', 'expected_result'], {'check_dtype': '(False)'}), '(result, expected_result, check_dtype=False)\n', (16585, 16629), True, 'import pandas as pd\n'), ((1311, 1361), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['self._missing_data'], {}), '(self._missing_data)\n', (1341, 1361), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((1433, 1483), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', (['self._missing_data'], {}), '(self._missing_data)\n', (1463, 1483), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((1838, 1934), 'gps_building_blocks.ml.statistical_inference.data_preparation.InferenceData', 'data_preparation.InferenceData', ([], {'initial_data': 'self._missing_data', 'target_column': '"""non_ci_sono"""'}), "(initial_data=self._missing_data,\n target_column='non_ci_sono')\n", (1868, 1934), False, 'from gps_building_blocks.ml.statistical_inference import data_preparation\n'), ((14188, 14238), 'unittest.mock.patch.object', 'mock.patch.object', (['data_preparation', '"""_input_mock"""'], {}), "(data_preparation, '_input_mock')\n", (14205, 14238), False, 'from unittest import mock\n'), ((16328, 16403), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 20]', 'columns': "['variable']"}), "(data=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 20], columns=['variable'])\n", (16340, 16403), True, 'import pandas as pd\n'), ((14856, 15208), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0,\n 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, \n 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 1]]'], {'columns': "['variable_(-0.02, 4.0]', 'variable_(4.0, 8.0]', 'variable_(8.0, 12.0]',\n 'variable_(12.0, 16.0]', 'variable_(16.0, 20.0]']"}), "([[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0, 0, \n 0, 0], [1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, \n 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 1]], columns=[\n 'variable_(-0.02, 4.0]', 'variable_(4.0, 8.0]', 'variable_(8.0, 12.0]',\n 'variable_(12.0, 16.0]', 'variable_(16.0, 20.0]'])\n", (14868, 15208), True, 'import pandas as pd\n'), ((15425, 15774), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1,\n 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, \n 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1]]'], {'columns': "['variable_(-0.001, 2.0]', 'variable_(2.0, 4.0]', 'variable_(4.0, 6.0]',\n 'variable_(6.0, 8.0]', 'variable_(8.0, 20.0]']"}), "([[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [0, 1, 0, \n 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, \n 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1]], columns=[\n 'variable_(-0.001, 2.0]', 'variable_(2.0, 4.0]', 'variable_(4.0, 6.0]',\n 'variable_(6.0, 8.0]', 'variable_(8.0, 20.0]'])\n", (15437, 15774), True, 'import pandas as pd\n'), ((15987, 16056), 'pandas.DataFrame', 'pd.DataFrame', (['[0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 4]'], {'columns': "['variable']"}), "([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 4], columns=['variable'])\n", (15999, 16056), True, 'import pandas as pd\n'), ((16126, 16195), 'pandas.DataFrame', 'pd.DataFrame', (['[0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4]'], {'columns': "['variable']"}), "([0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4], columns=['variable'])\n", (16138, 16195), True, 'import pandas as pd\n'), ((18074, 18132), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}"], {}), "({'variable': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]})\n", (18086, 18132), True, 'import pandas as pd\n'), ((18521, 18607), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['train_data.data', 'expected_train'], {'check_dtype': '(False)'}), '(train_data.data, expected_train, check_dtype=\n False)\n', (18550, 18607), True, 'import pandas as pd\n'), ((18620, 18699), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['test_data.data', 'expected_test'], {'check_dtype': '(False)'}), '(test_data.data, expected_test, check_dtype=False)\n', (18649, 18699), True, 'import pandas as pd\n'), ((16718, 16758), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 2, 2, 3, 3, 3, 3]'], {}), '([0, 0, 1, 1, 2, 2, 3, 3, 3, 3])\n', (16726, 16758), True, 'import numpy as np\n'), ((17280, 17318), 'sklearn.model_selection.GroupKFold', 'model_selection.GroupKFold', ([], {'n_splits': '(3)'}), '(n_splits=3)\n', (17306, 17318), False, 'from sklearn import model_selection\n'), ((17327, 17367), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 2, 2, 3, 3, 3, 3]'], {}), '([0, 0, 1, 1, 2, 2, 3, 3, 3, 3])\n', (17335, 17367), True, 'import numpy as np\n'), ((20307, 20365), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}"], {}), "({'variable': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]})\n", (20319, 20365), True, 'import pandas as pd\n'), ((20610, 20696), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['train_data.data', 'expected_train'], {'check_dtype': '(False)'}), '(train_data.data, expected_train, check_dtype=\n False)\n', (20639, 20696), True, 'import pandas as pd\n'), ((20709, 20788), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['test_data.data', 'expected_test'], {'check_dtype': '(False)'}), '(test_data.data, expected_test, check_dtype=False)\n', (20738, 20788), True, 'import pandas as pd\n'), ((19421, 19454), 'sklearn.model_selection.KFold', 'model_selection.KFold', ([], {'n_splits': '(3)'}), '(n_splits=3)\n', (19442, 19454), False, 'from sklearn import model_selection\n'), ((20885, 20978), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'variable': [4, 5, 6, 7, 8, 9]}", 'index': "['4', '5', '6', '7', '8', '9']"}), "(data={'variable': [4, 5, 6, 7, 8, 9]}, index=['4', '5', '6',\n '7', '8', '9'])\n", (20897, 20978), True, 'import pandas as pd\n'), ((21005, 21106), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'variable': [0, 1, 2, 3, 7, 8, 9]}", 'index': "['0', '1', '2', '3', '7', '8', '9']"}), "(data={'variable': [0, 1, 2, 3, 7, 8, 9]}, index=['0', '1', '2',\n '3', '7', '8', '9'])\n", (21017, 21106), True, 'import pandas as pd\n'), ((21133, 21234), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'variable': [0, 1, 2, 3, 4, 5, 6]}", 'index': "['0', '1', '2', '3', '4', '5', '6']"}), "(data={'variable': [0, 1, 2, 3, 4, 5, 6]}, index=['0', '1', '2',\n '3', '4', '5', '6'])\n", (21145, 21234), True, 'import pandas as pd\n'), ((21284, 21352), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 2, 3]}"], {'index': "['0', '1', '2', '3']"}), "({'variable': [0, 1, 2, 3]}, index=['0', '1', '2', '3'])\n", (21296, 21352), True, 'import pandas as pd\n'), ((21362, 21422), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [4, 5, 6]}"], {'index': "['4', '5', '6']"}), "({'variable': [4, 5, 6]}, index=['4', '5', '6'])\n", (21374, 21422), True, 'import pandas as pd\n'), ((21432, 21492), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [7, 8, 9]}"], {'index': "['7', '8', '9']"}), "({'variable': [7, 8, 9]}, index=['7', '8', '9'])\n", (21444, 21492), True, 'import pandas as pd\n'), ((21545, 21670), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'variable': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}", 'index': "['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']"}), "(data={'variable': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}, index=['0',\n '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n", (21557, 21670), True, 'import pandas as pd\n'), ((21895, 21981), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['train_data.data', 'expected_train'], {'check_dtype': '(False)'}), '(train_data.data, expected_train, check_dtype=\n False)\n', (21924, 21981), True, 'import pandas as pd\n'), ((21994, 22073), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['test_data.data', 'expected_test'], {'check_dtype': '(False)'}), '(test_data.data, expected_test, check_dtype=False)\n', (22023, 22073), True, 'import pandas as pd\n'), ((3451, 3603), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "[['0', 0.0, 2.5, 2.0], ['1', 0.0, 2.5, 2.0], ['1', 1.0, 2.0, 2.5], ['1', \n 1.0, 3.0, 1.5]]", 'columns': 'data.columns', 'index': 'data.index'}), "(data=[['0', 0.0, 2.5, 2.0], ['1', 0.0, 2.5, 2.0], ['1', 1.0, \n 2.0, 2.5], ['1', 1.0, 3.0, 1.5]], columns=data.columns, index=data.index)\n", (3463, 3603), True, 'import pandas as pd\n'), ((14317, 14367), 'unittest.mock.patch.object', 'mock.patch.object', (['data_preparation', '"""_print_mock"""'], {}), "(data_preparation, '_print_mock')\n", (14334, 14367), False, 'from unittest import mock\n'), ((16768, 16814), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 2, 3, 4, 5]}"], {}), "({'variable': [0, 1, 2, 3, 4, 5]})\n", (16780, 16814), True, 'import pandas as pd\n'), ((16824, 16896), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [2, 3, 6, 7, 8, 9]}"], {'index': '[2, 3, 6, 7, 8, 9]'}), "({'variable': [2, 3, 6, 7, 8, 9]}, index=[2, 3, 6, 7, 8, 9])\n", (16836, 16896), True, 'import pandas as pd\n'), ((16927, 17016), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 4, 5, 6, 7, 8, 9]}"], {'index': '[0, 1, 4, 5, 6, 7, 8, 9]'}), "({'variable': [0, 1, 4, 5, 6, 7, 8, 9]}, index=[0, 1, 4, 5, 6, \n 7, 8, 9])\n", (16939, 17016), True, 'import pandas as pd\n'), ((17043, 17103), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [6, 7, 8, 9]}"], {'index': '[6, 7, 8, 9]'}), "({'variable': [6, 7, 8, 9]}, index=[6, 7, 8, 9])\n", (17055, 17103), True, 'import pandas as pd\n'), ((17113, 17173), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 4, 5]}"], {'index': '[0, 1, 4, 5]'}), "({'variable': [0, 1, 4, 5]}, index=[0, 1, 4, 5])\n", (17125, 17173), True, 'import pandas as pd\n'), ((17183, 17231), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [2, 3]}"], {'index': '[2, 3]'}), "({'variable': [2, 3]}, index=[2, 3])\n", (17195, 17231), True, 'import pandas as pd\n'), ((17377, 17423), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 2, 3, 4, 5]}"], {}), "({'variable': [0, 1, 2, 3, 4, 5]})\n", (17389, 17423), True, 'import pandas as pd\n'), ((17433, 17505), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [2, 3, 6, 7, 8, 9]}"], {'index': '[2, 3, 6, 7, 8, 9]'}), "({'variable': [2, 3, 6, 7, 8, 9]}, index=[2, 3, 6, 7, 8, 9])\n", (17445, 17505), True, 'import pandas as pd\n'), ((17536, 17625), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 4, 5, 6, 7, 8, 9]}"], {'index': '[0, 1, 4, 5, 6, 7, 8, 9]'}), "({'variable': [0, 1, 4, 5, 6, 7, 8, 9]}, index=[0, 1, 4, 5, 6, \n 7, 8, 9])\n", (17548, 17625), True, 'import pandas as pd\n'), ((17652, 17712), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [6, 7, 8, 9]}"], {'index': '[6, 7, 8, 9]'}), "({'variable': [6, 7, 8, 9]}, index=[6, 7, 8, 9])\n", (17664, 17712), True, 'import pandas as pd\n'), ((17722, 17782), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 4, 5]}"], {'index': '[0, 1, 4, 5]'}), "({'variable': [0, 1, 4, 5]}, index=[0, 1, 4, 5])\n", (17734, 17782), True, 'import pandas as pd\n'), ((17792, 17840), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [2, 3]}"], {'index': '[2, 3]'}), "({'variable': [2, 3]}, index=[2, 3])\n", (17804, 17840), True, 'import pandas as pd\n'), ((18859, 18931), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [4, 5, 6, 7, 8, 9]}"], {'index': '[4, 5, 6, 7, 8, 9]'}), "({'variable': [4, 5, 6, 7, 8, 9]}, index=[4, 5, 6, 7, 8, 9])\n", (18871, 18931), True, 'import pandas as pd\n'), ((18962, 19040), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 2, 3, 7, 8, 9]}"], {'index': '[0, 1, 2, 3, 7, 8, 9]'}), "({'variable': [0, 1, 2, 3, 7, 8, 9]}, index=[0, 1, 2, 3, 7, 8, 9])\n", (18974, 19040), True, 'import pandas as pd\n'), ((19071, 19149), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 2, 3, 4, 5, 6]}"], {'index': '[0, 1, 2, 3, 4, 5, 6]'}), "({'variable': [0, 1, 2, 3, 4, 5, 6]}, index=[0, 1, 2, 3, 4, 5, 6])\n", (19083, 19149), True, 'import pandas as pd\n'), ((19181, 19241), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 2, 3]}"], {'index': '[0, 1, 2, 3]'}), "({'variable': [0, 1, 2, 3]}, index=[0, 1, 2, 3])\n", (19193, 19241), True, 'import pandas as pd\n'), ((19251, 19305), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [4, 5, 6]}"], {'index': '[4, 5, 6]'}), "({'variable': [4, 5, 6]}, index=[4, 5, 6])\n", (19263, 19305), True, 'import pandas as pd\n'), ((19315, 19369), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [7, 8, 9]}"], {'index': '[7, 8, 9]'}), "({'variable': [7, 8, 9]}, index=[7, 8, 9])\n", (19327, 19369), True, 'import pandas as pd\n'), ((19464, 19536), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [4, 5, 6, 7, 8, 9]}"], {'index': '[4, 5, 6, 7, 8, 9]'}), "({'variable': [4, 5, 6, 7, 8, 9]}, index=[4, 5, 6, 7, 8, 9])\n", (19476, 19536), True, 'import pandas as pd\n'), ((19567, 19645), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 2, 3, 7, 8, 9]}"], {'index': '[0, 1, 2, 3, 7, 8, 9]'}), "({'variable': [0, 1, 2, 3, 7, 8, 9]}, index=[0, 1, 2, 3, 7, 8, 9])\n", (19579, 19645), True, 'import pandas as pd\n'), ((19676, 19754), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 2, 3, 4, 5, 6]}"], {'index': '[0, 1, 2, 3, 4, 5, 6]'}), "({'variable': [0, 1, 2, 3, 4, 5, 6]}, index=[0, 1, 2, 3, 4, 5, 6])\n", (19688, 19754), True, 'import pandas as pd\n'), ((19786, 19846), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [0, 1, 2, 3]}"], {'index': '[0, 1, 2, 3]'}), "({'variable': [0, 1, 2, 3]}, index=[0, 1, 2, 3])\n", (19798, 19846), True, 'import pandas as pd\n'), ((19856, 19910), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [4, 5, 6]}"], {'index': '[4, 5, 6]'}), "({'variable': [4, 5, 6]}, index=[4, 5, 6])\n", (19868, 19910), True, 'import pandas as pd\n'), ((19920, 19974), 'pandas.DataFrame', 'pd.DataFrame', (["{'variable': [7, 8, 9]}"], {'index': '[7, 8, 9]'}), "({'variable': [7, 8, 9]}, index=[7, 8, 9])\n", (19932, 19974), True, 'import pandas as pd\n')] |
"""Evaluate the density (defined as ratio between zero and non-zero values) in matrices."""
import os
import itertools
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import scipy.signal as sig
from tqdm import tqdm
import common as com_xp
def convolution(x, y):
return np.convolve(x, y, mode='valid')[0]
def pearson_correlation(x, y):
# x -= x.mean()
# x /= x.std()
# y -= y.mean()
# y += y.std()
return np.abs(stats.pearsonr(x, y)[0])
def cross_corr(x, y):
n = x.size
x -= x.mean()
x /= x.std()
y -= y.mean()
y += y.std()
# xcorr = sig.correlate(x, y, mode='full')
xcorr = sig.correlate(x, y, mode='valid')
return np.abs(xcorr[0])
# dt = np.arange(1-n, n)
# return dt[xcorr.argmax()]
def cross_correlation(x, y):
"""Return normalized cross-correlation and offset.
Assuming x and y to be same-size arrays, in full mode, we will get a
cross-correlation array of size x.size + y.size - 1 = 2 * n - 1
(with n = x.size).
To get the offset,
"""
assert x.size == y.size
x = (x - np.mean(x)) / (np.std(x) * len(x))
y = (y - np.mean(y)) / (np.std(y))
_xcorr = np.correlate(x, y, mode='full')
return xcorr, offset, max_corr_at_offset
def binize(data, bin_size):
"""Binning a numpy array."""
return data[:(data.size // bin_size) * bin_size].reshape(-1, bin_size)
if __name__ == '__main__':
START = 0
END = 10000
#SVD_DIRPATH = '/home/kabbach/entropix/models/frontiers/aligned/'
SVD_DIRPATH = '/Users/akb/Github/entropix/models/frontiers/aligned/'
#MODEL_NAMES = ['enwiki07', 'oanc', 'enwiki2', 'acl', 'enwiki4', 'bnc']
MODEL_NAMES = ['enwiki07', 'oanc']
EPSILON = 1e-4
BIN_SIZE = 30
# test_array = np.random.rand(1,30)[0]
# print(test_array)
# prod = itertools.permutations(test_array)
# print(sum(1 for i in prod))
# test_arr = np.array([[1, 2, 0, 3], [3, 9, 0, 4]])
# print(np.count_nonzero(test_arr==0, axis=1))
models = com_xp.load_aligned_models(MODEL_NAMES, SVD_DIRPATH, START, END)
for tuple1, tuple2 in itertools.combinations(models, 2):
name1 = tuple1[0]
model1 = tuple1[1]
name2 = tuple2[0]
model2 = tuple2[1]
print('Processing models {} and {}'.format(name1, name2))
# with open('enwiki07.dim1000.txt', 'w', encoding='utf-8') as enwiki:
# for row in model1[:, 1000]:
# print(row, file=enwiki)
# with open('oanc.dim1000.txt', 'w', encoding='utf-8') as oanc:
# for row in model2[:, 1000]:
# print(row, file=oanc)
# plt.plot(model2[:, 1000])
# plt.show()
xcorrx = []
for col1, col2 in tqdm(zip(model1.T, model2.T), total=model1.shape[1]):
xcorr = cross_correlation(col1, col2)
#xcorr = pearson_correlation(col1, col2)
xcorrx.append(xcorr)
xcorrx = np.array(xcorrx)
# avgs = binize(xcorrx, BIN_SIZE).mean(axis=1)
#zeros = np.count_nonzero(binize(xcorrx, BIN_SIZE) == 0, axis=1)
# plt.plot(avgs)
#print(xcorrx)
#print(zeros)
plt.plot(xcorrx[1000:])
plt.show()
| [
"numpy.abs",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.signal.correlate",
"numpy.std",
"common.load_aligned_models",
"scipy.stats.pearsonr",
"itertools.combinations",
"numpy.mean",
"numpy.array",
"numpy.correlate",
"numpy.convolve"
] | [((665, 698), 'scipy.signal.correlate', 'sig.correlate', (['x', 'y'], {'mode': '"""valid"""'}), "(x, y, mode='valid')\n", (678, 698), True, 'import scipy.signal as sig\n'), ((710, 726), 'numpy.abs', 'np.abs', (['xcorr[0]'], {}), '(xcorr[0])\n', (716, 726), True, 'import numpy as np\n'), ((1198, 1229), 'numpy.correlate', 'np.correlate', (['x', 'y'], {'mode': '"""full"""'}), "(x, y, mode='full')\n", (1210, 1229), True, 'import numpy as np\n'), ((2037, 2101), 'common.load_aligned_models', 'com_xp.load_aligned_models', (['MODEL_NAMES', 'SVD_DIRPATH', 'START', 'END'], {}), '(MODEL_NAMES, SVD_DIRPATH, START, END)\n', (2063, 2101), True, 'import common as com_xp\n'), ((2128, 2161), 'itertools.combinations', 'itertools.combinations', (['models', '(2)'], {}), '(models, 2)\n', (2150, 2161), False, 'import itertools\n'), ((309, 340), 'numpy.convolve', 'np.convolve', (['x', 'y'], {'mode': '"""valid"""'}), "(x, y, mode='valid')\n", (320, 340), True, 'import numpy as np\n'), ((1174, 1183), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (1180, 1183), True, 'import numpy as np\n'), ((2961, 2977), 'numpy.array', 'np.array', (['xcorrx'], {}), '(xcorrx)\n', (2969, 2977), True, 'import numpy as np\n'), ((3184, 3207), 'matplotlib.pyplot.plot', 'plt.plot', (['xcorrx[1000:]'], {}), '(xcorrx[1000:])\n', (3192, 3207), True, 'import matplotlib.pyplot as plt\n'), ((3216, 3226), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3224, 3226), True, 'import matplotlib.pyplot as plt\n'), ((472, 492), 'scipy.stats.pearsonr', 'stats.pearsonr', (['x', 'y'], {}), '(x, y)\n', (486, 492), True, 'import scipy.stats as stats\n'), ((1111, 1121), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1118, 1121), True, 'import numpy as np\n'), ((1126, 1135), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (1132, 1135), True, 'import numpy as np\n'), ((1159, 1169), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1166, 1169), True, 'import numpy as np\n')] |
import csv
import os
import logging
import random
import shelve
import numpy
import arrow
import matplotlib as mpl
from matplotlib import pyplot as plt
from platypus import NSGAII, OMOPSO, EpsNSGAII, SMPSO, GDE3, SPEA2, nondominated
from eflows_optimization import settings
from belleflopt import models
from belleflopt import optimize
from belleflopt import comet
log = logging.getLogger("eflows.optimization.support")
NO_DOWNSTREAM = ("OCEAN", "MEXICO", "CLOSED_BASIN")
# See https://github.com/matplotlib/matplotlib/issues/5907 - solves an issue with plotting *lots* of points on a figure
mpl.rcParams['agg.path.chunksize'] = 10000
def day_of_water_year(year, month, day):
eval_date = arrow.Arrow(year, month, day)
if month >= 10:
eval_year = year
else:
eval_year = year - 1 # if we're in Jan-Sep, the start of the water year was last year
water_year_start = arrow.Arrow(eval_year, 10, 1)
return (eval_date - water_year_start).days + 1 # add one because otherwise everything would be zero-indexed
def water_year(year, month):
"""
Given a year and a month, returns the water year
:param year:
:param month:
:return:
"""
if month >= 10:
return year + 1
else:
return year
def run_optimize_new(algorithm=NSGAII,
NFE=1000,
popsize=25,
starting_water_price=800,
economic_water_proportion = 0.9,
seed=20200224,
model_run_name="upper_cosumnes_subset_2010",
use_comet=True,
show_plots=True,
run_problem=True,
min_proportion=0,
checkpoint_interval=True,
simplified=False,
plot_all=False,
plot_best=False):
"""
Runs a single optimization run, defaulting to 1000 NFE using NSGAII. Won't output plots to screen
by default. Outputs tables and figures to the data/results folder.
:param algorithm: a platypus Algorithm object (not the instance, but the actual item imported from platypus)
defaults to NSGAII.
:param NFE: How many times should the objective function be run?
:param popsize: The size of hte population to use
:param seed: Random seed to start
:param show_plots: Whether plots should be output to the screen
:pararm run_problem: When True, runs it, when False, just sets it up and returns it. Lets us have a consistent problem
set up in many contexts
:param min_proportion: What is the minimum proportion of flow that we can allocate to any single segment? Raising
this value (min 0, max 0.999999999) prevents the model from extracting all its water in one spot.
:param checkpoint_interval: How many NFE should elapse before this writes out plots and shelf results. Then writes
those out every NFE interval until more than NFE. If True instead of a number, then defaults to int(NFE/10).
If NFE is not evenly divisible by checkpoint_interval, then runs to the largest multiple of checkpoint_interval
less than NFE.
:param plot_all: Makes a hydrograph/component plot for every segment and population member in the final solution set.
:param plot_all: Makes a hydrograph/component plot when improved results are encountered for either objective.
:return: None
"""
if use_comet and run_problem:
experiment = comet.new_experiment()
experiment.log_parameters({"algorithm": algorithm,
"NFE": NFE,
"popsize": popsize,
"seed": seed,
"starting_water_price":starting_water_price,
"economic_water_proportion": economic_water_proportion,
"model_name": model_run_name,
"min_eflows_proportion": min_proportion,
})
else:
experiment = None
random.seed = seed
model_run = models.ModelRun.objects.get(name=model_run_name)
if plot_best:
output_folder = get_output_folder(NFE=NFE,
algorithm=algorithm,
model_run_name=model_run_name,
popsize=popsize,
seed=seed)
else:
output_folder = None
stream_network = optimize.StreamNetwork(model_run.segments, model_run.water_year, model_run)
problem = optimize.StreamNetworkProblem(stream_network,
starting_water_price=starting_water_price,
total_units_needed_factor=economic_water_proportion,
min_proportion=min_proportion,
simplified=simplified,
plot_output_folder=output_folder)
log.info("Looking for {} CFS of water to extract".format(problem.stream_network.economic_benefit_calculator.total_units_needed))
eflows_opt = algorithm(problem, generator=optimize.InitialFlowsGenerator(), population_size=popsize)
if run_problem:
elapsed_nfe = 0
if checkpoint_interval is True:
if NFE > 1000:
checkpoint_interval = int(NFE/10)
else:
checkpoint_interval = int(NFE/2)
if checkpoint_interval is False or checkpoint_interval is None:
checkpoint_interval = NFE
# TODO: This construction means the comet.ml metric logging is duplicated, but whatever right now.
for total_nfe in range(checkpoint_interval, NFE+1, checkpoint_interval):
eflows_opt.run(checkpoint_interval)
make_plots(eflows_opt, problem, total_nfe, algorithm, seed, popsize, model_run_name, experiment, show_plots, plot_all=plot_all, simplified=simplified)
log.info("Completed at {}".format(arrow.utcnow()))
if use_comet:
#file_path = os.path.join(settings.BASE_DIR, "data", "results", "results_{}_seed{}_nfe{}_popsize{}.csv".format(algorithm.__name__,str(seed),str(NFE),str(popsize)))
#output_table(problem.hucs, output_path=file_path)
#experiment.log_asset(file_path, "results.csv")
experiment.end()
#return file_path
return {"problem": problem, "solution": eflows_opt}
def incremental_maximums(values, nfe, seed=1):
"""
Generator that keeps track of our max value we've seen so we can simplify convergence plots to only the
increasing values
:param values:
:param seed: Start at 1 so that we don't necessarily go from 0 onward
:return:
"""
for i, value in enumerate(values):
if value > seed:
seed = value
yield nfe[i], value
def get_best_items_for_convergence(NFE, objective_values):
best_fe = list(incremental_maximums(objective_values, NFE)) # get the actual sequential max list as a list of tuples
return zip(*best_fe) # zip with * args unzips the tuples
def write_variables_as_shelf(model_run, output_folder):
log.info("Writing out variables and objectives to shelf")
results = nondominated(model_run.result)
variables = [s.variables for s in results]
objectives = [s.objectives for s in results]
with shelve.open(os.path.join(output_folder, "variables.shelf")) as shelf:
shelf["variables"] = variables
shelf["objectives"] = objectives
shelf["result"] = model_run.result
shelf.sync()
def plot_all_solutions(solution, problem, simplified, segment_name, output_folder, show_plots):
for i, solution in enumerate(nondominated(solution.result)):
problem.stream_network.set_segment_allocations(solution.variables, simplified=simplified)
for segment in problem.stream_network.stream_segments.values():
output_segment_name = "{}_sol_{}".format(segment_name, i)
segment.plot_results_with_components(screen=show_plots, output_folder=output_folder, name_prefix=output_segment_name)
def replot_from_shelf():
pass
def make_plots(model_run, problem, NFE, algorithm, seed, popsize, name, experiment=None, show_plots=False, plot_all=False, simplified=False):
output_folder = get_output_folder(NFE, algorithm, name, popsize, seed)
os.makedirs(output_folder, exist_ok=True)
write_variables_as_shelf(model_run, output_folder)
_plot(model_run, "Pareto Front: {} NFE, PopSize: {}".format(NFE, popsize),
experiment=experiment,
show=show_plots,
filename=os.path.join(output_folder,
"pareto_{}_seed{}_nfe{}_popsize{}.png".format(algorithm.__name__, str(seed), str(NFE),
str(popsize)))
)
try:
_plot_convergence(problem.iterations, problem.objective_1,
"Environmental Benefit v NFE. Alg: {}, PS: {}, Seed: {}".format(algorithm.__name__, str(popsize),
str(seed)),
experiment=experiment,
show=show_plots,
filename=os.path.join(output_folder,
"convergence_obj1_{}_seed{}_nfe{}_popsize{}.png".format(algorithm.__name__,
str(seed), str(NFE),
str(popsize)))
)
_plot_convergence(problem.iterations, problem.objective_2,
"Economic Benefit v NFE Alg: {}, PS: {}, Seed: {}".format(algorithm.__name__, str(popsize),
str(seed)),
experiment=experiment,
show=show_plots,
filename=os.path.join(output_folder,
"convergence_obj2_{}_seed{}_nfe{}_popsize{}.png".format(algorithm.__name__,
str(seed),
str(NFE),
str(popsize)))
)
_plot_convergence(*get_best_items_for_convergence(problem.iterations, problem.objective_1),
title="Environmental Benefit v NFE. Alg: {}, PS: {}, Seed: {}".format(algorithm.__name__, str(popsize), str(seed)),
experiment=experiment,
show=show_plots,
filename=os.path.join(output_folder,
"best_convergence_obj1_{}_seed{}_nfe{}_popsize{}.png".format(
algorithm.__name__,
str(seed), str(NFE),
str(popsize)))
)
_plot_convergence(*get_best_items_for_convergence(problem.iterations, problem.objective_2),
title="Economic Benefit v NFE Alg: {}, PS: {}, Seed: {}".format(algorithm.__name__, str(popsize), str(seed)),
experiment=experiment,
show=show_plots,
filename=os.path.join(output_folder,
"best_convergence_obj2_{}_seed{}_nfe{}_popsize{}.png".format(
algorithm.__name__,
str(seed),
str(NFE),
str(popsize)))
)
except OverflowError:
log.error("Couldn't outplot convergence plot - too many points. Continuing anyway, but you may wish to stop"
"this run if it's not outputting convergence plots anymore!")
segment_name = "scplot_m{}_{}_s{}_nfe{}_ps{}".format(name, algorithm.__name__,
str(seed),
str(NFE),
str(popsize))
if plot_all:
plot_all_solutions(solution=model_run,
problem=problem,
simplified=simplified,
segment_name=segment_name,
output_folder=output_folder,
show_plots=show_plots)
else:
# just plot the last one done - not necessarily the most optimal in *any* sense
for segment in problem.stream_network.stream_segments.values():
segment.plot_results_with_components(screen=show_plots, output_folder=output_folder, name_prefix=segment_name)
def get_output_folder(NFE, algorithm, model_run_name, popsize, seed):
output_folder = os.path.join(settings.BASE_DIR, "data", "results", model_run_name, str(NFE), algorithm.__name__, str(seed),
str(popsize))
return output_folder
def run_experimenter(NFE=50000,
popsizes=(100, 50),
algorithms=(NSGAII, SPEA2, SMPSO, GDE3),
seeds=(19991201, 18000408, 31915071, 20200224),
output_shelf=None,
problem_from_shelf=False,
resume=False,
model_run_names=("upper_cosumnes_subset_2010", "upper_cosumnes_subset_2011"),
starting_water_price=800,
economic_water_proportion=0.8, ):
# results = {}
for model_run_name in model_run_names:
problem = run_optimize_new(model_run_name=model_run_name,
starting_water_price=starting_water_price,
economic_water_proportion=economic_water_proportion,
use_comet=False,
run_problem=False)["problem"]
for algorithm in algorithms:
if type(algorithm) == tuple: # if the algorithm has arguments, then we need to split it out so we can send them in
algorithm_args = algorithm[1]
algorithm = algorithm[0]
else:
algorithm_args = {}
#if algorithm.__name__ not in results:
#results[algorithm.__name__] = {}
for seed in seeds:
#if seed not in results[algorithm.__name__]:
#results[algorithm.__name__][seed] = {}
random.seed = seed
for popsize in popsizes:
log.info("{}, {}, {}".format(algorithm.__name__, seed, popsize))
#if popsize in results[algorithm.__name__][seed]: # if the key already exists, it means we're resuming and this already ran
# continue
experiment = comet.new_experiment()
experiment.log_parameters({"algorithm": algorithm,
"NFE": NFE,
"popsize": popsize,
"seed": seed,
"starting_water_price": starting_water_price,
"economic_water_proportion": economic_water_proportion,
"model_name": model_run_name
})
problem.reset()
eflows_opt = algorithm(problem, generator=optimize.InitialFlowsGenerator(), population_size=popsize, **algorithm_args)
eflows_opt.run(NFE)
make_plots(eflows_opt, problem, NFE, algorithm, seed, popsize, model_run_name, experiment=experiment, show_plots=False)
#results[algorithm.__name__][seed][popsize] = eflows_opt
#with shelve.open(output_shelf) as shelf: # save the results out to a file after each round
# shelf["results"] = results
# these will save some space in the results
# shelf["results"][algorithm.__name__][seed][popsize].problem.stream_network = None
# shelf["results"][algorithm.__name__][seed][popsize].problem.types = None
# shelf.sync()
experiment.end()
def validate_flow_methods(model_run_name="upper_cosumnes_subset_2010", show_plot=True):
problem = run_optimize_new(run_problem=False, model_run_name=model_run_name)["problem"]
measurements = numpy.linspace(0, 1, 101)
for measurement in measurements:
log.info(measurement)
initial_flows = optimize.SimpleInitialFlowsGenerator(measurement)
runner = NSGAII(problem, generator=initial_flows, population_size=1) # shouldn't matter what algorithm we use - we only do 1 NFE
runner.run(1) # run it for 1 NFE just to see what these initial flows do
plt.plot(problem.iterations, problem.objective_1)
plt.xlabel("Percent of Available Flow")
plt.ylabel("Environmental Benefit")
plt.savefig(os.path.join(settings.BASE_DIR, "data", "results", "validation_plot_{}.png".format(model_run_name)), dpi=300)
if show_plot:
plt.show()
plt.close()
return {"x": problem.iterations, "y": problem.objective_1}
def validation_plot_thesis(show_plot=True, results_2010=None, results_2011=None, model_run="cosumnes_michigan_bar"):
"""
Hardcoded items because they're for my thesis, not meant for more general use.
:return:
"""
if results_2010 is None:
results_2010 = validate_flow_methods("{}_2010".format(model_run), show_plot=False)
if results_2011 is None:
results_2011 = validate_flow_methods("{}_2011".format(model_run), show_plot=False)
# Creates two subplots and unpacks the output array immediately
fig = plt.figure()
plt.margins(0)
full_plot = fig.add_subplot(1, 1, 1) # The big subplot
full_plot.set_xlabel("Percent of Available Flow")
full_plot.set_ylabel("Environmental Benefit", labelpad=20) # move it off the tick values
# Turn off axis lines and ticks of the big subplot
full_plot.spines['top'].set_color('none')
full_plot.spines['bottom'].set_color('none')
full_plot.spines['left'].set_color('none')
full_plot.spines['right'].set_color('none')
full_plot.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)
left_plot = fig.add_subplot(1, 2, 1) # The big subplot
left_plot.plot(results_2010["x"], results_2010["y"])
left_plot.set_title('2010')
right_plot = fig.add_subplot(1, 2, 2, sharey=left_plot) # The big subplot
right_plot.plot(results_2011["x"], results_2011["y"])
right_plot.set_title('2011')
# remove the axis values on the left to make space
right_plot.tick_params(left=True, labelleft=False, )
plt.savefig(os.path.join(settings.BASE_DIR, "data", "results", "validation_plot_thesis.png"), dpi=300)
if show_plot:
plt.show()
plt.close()
return results_2010, results_2011
def _plot(optimizer, title, experiment=None, filename=None, show=False):
results = nondominated(optimizer.result)
x = [s.objectives[0] for s in results]
y = [s.objectives[1] for s in results]
if experiment is not None:
comet.log_metric("EnvironmentalBenefit", x, experiment=experiment) # log the resulting values
comet.log_metric("EconomicBenefit", y, experiment=experiment)
log.debug("X: {}".format(x))
log.debug("Y: {}".format(y))
plt.scatter(x, y)
plt.xlabel("Environmental Flow Benefit")
plt.ylabel("Economic Benefit")
plt.title(title)
if experiment is not None:
experiment.log_figure(title)
if filename:
plt.savefig(fname=filename, dpi=300)
if show:
plt.show()
plt.close()
def _plot_convergence(i, objective, title, experiment=None, filename=None, show=False):
x = i
y = objective
plt.plot(x, y, color='steelblue', linewidth=1)
#plt.xlim([min(x)-0.1, max(x)+0.1])
#plt.ylim([min(y)-0.1, max(y)+0.1])
plt.xlabel("NFE")
plt.ylabel("Objective Value")
plt.title(title)
if experiment:
experiment.log_figure(title)
if filename:
plt.savefig(fname=filename, dpi=300)
if show:
plt.show()
plt.close()
def output_table(hucs, output_path=os.path.join(settings.BASE_DIR, "data", "results.csv")):
outputs = []
for huc in hucs:
output = {}
output["HUC_12"] = huc.huc_id
output["initial_available"] = huc.initial_available_water
output["allocation"] = huc.flow_allocation
assemblage = huc.assemblage.all()
output["assemblage"] = ", ".join([species.common_name for species in assemblage])
unmet_needs = []
for species in assemblage:
species_min_need = models.SpeciesComponent.objects.get(species=species, component__name="min_flow").value
if species_min_need > huc.flow_allocation:
if huc.flow_allocation == 0:
pct = "No Flow"
else:
pct = round((species_min_need / huc.flow_allocation) * 100)
unmet_needs.append("{} ({}%)".format(species.common_name, pct))
output["unmet_needs"] = ", ".join(unmet_needs)
output["unmet_count"] = len(unmet_needs)
output["richness"] = huc.assemblage.count()
output["unmet_proportion"] = output["unmet_count"] / output["richness"]
outputs.append(output)
fields = ["HUC_12", "initial_available", "allocation", "assemblage", "unmet_needs",
"unmet_count", "richness", "unmet_proportion" ]
with open(output_path, 'w', newline="\n") as output_file:
writer = csv.DictWriter(output_file, fieldnames=fields)
writer.writeheader()
writer.writerows(outputs)
def run_optimize_many():
"""
Runs through many algorithms and many seeds - outputs results for all
:return:
"""
algorithms = [NSGAII, SMPSO, SPEA2, GDE3]
nfe = 800
popsize = [25, 50, 100]
seeds = [20181214, 236598, 12958]
for algorithm in algorithms:
for pop in popsize:
for seed in seeds:
run_optimize(algorithm, NFE=nfe, popsize=pop, seed=seed)
def _segment_plot_helper(function, segment_id, component_id, screen, output_path, **kwargs):
"""
A helper function handle plotting
:param function: a method on a benefit object that handles plotting and takes a parameter "screen" and "output_path"
along with any other params
:param segment_id: An NHDPlus COMID
:param component_id: A CEFF Flow Component ID
:param screen: when True, displays the plot on the screen
:param output_path: When specified, saves the plot to this location
:param kwargs: Any other keyword arguments to pass to the benefit object plotting function
:return: None - plots and/or saves figure as specified
"""
segment_component = models.SegmentComponent.objects.get(component__ceff_id=component_id,
stream_segment__com_id=segment_id)
segment_component.make_benefit()
function_to_call = getattr(segment_component.benefit, function)
plot = function_to_call(screen=screen, **kwargs)
if output_path is not None:
plot = plot.get_figure() # for newer seaborn, we have to get the figure from the subplot
plot.savefig(output_path, dpi=300)
plt.close()
def plot_segment_component_annual_benefit(segment_id, component_id, screen=True, output_path=None):
"""
A helper function that is itself its own form of documentation of setup process.
Retrieves the flow component data for a segment and plots the annual benefit
surface.
:param segment_id: An NHDPlus COMID
:param component_id: A CEFF Flow Component ID
:param screen: when True, displays the plot on the screen
:param output_path: When specified, saves the plot to this location
:return: None - plots to screen and/or file as specified and closes plot
"""
_segment_plot_helper(function="plot_annual_benefit",
segment_id=segment_id,
component_id=component_id,
screen=screen,
output_path=output_path)
def plot_segment_component_day_benefit(segment_id, component_id, day=100, screen=True, output_path=None):
"""
A helper function that is itself its own form of documentation of setup process.
Retrieves the flow component data for a segment and plots the flow benefit for a single
day of the water year
:param segment_id: An NHDPlus COMID
:param component_id: A CEFF Flow Component ID
:param day: the day of year to make the plot for
:param screen: when True, displays the plot on the screen
:param output_path: When specified, saves the plot to this location
:return: None - plots to screen and/or file as specified and closes plot
"""
_segment_plot_helper(function="plot_flow_benefit",
segment_id=segment_id,
component_id=component_id,
screen=screen,
output_path=output_path,
day_of_year=day)
| [
"matplotlib.pyplot.title",
"belleflopt.comet.log_metric",
"matplotlib.pyplot.savefig",
"belleflopt.models.SegmentComponent.objects.get",
"belleflopt.optimize.StreamNetworkProblem",
"matplotlib.pyplot.margins",
"platypus.nondominated",
"belleflopt.models.ModelRun.objects.get",
"belleflopt.optimize.St... | [((374, 422), 'logging.getLogger', 'logging.getLogger', (['"""eflows.optimization.support"""'], {}), "('eflows.optimization.support')\n", (391, 422), False, 'import logging\n'), ((696, 725), 'arrow.Arrow', 'arrow.Arrow', (['year', 'month', 'day'], {}), '(year, month, day)\n', (707, 725), False, 'import arrow\n'), ((880, 909), 'arrow.Arrow', 'arrow.Arrow', (['eval_year', '(10)', '(1)'], {}), '(eval_year, 10, 1)\n', (891, 909), False, 'import arrow\n'), ((3961, 4009), 'belleflopt.models.ModelRun.objects.get', 'models.ModelRun.objects.get', ([], {'name': 'model_run_name'}), '(name=model_run_name)\n', (3988, 4009), False, 'from belleflopt import models\n'), ((4344, 4419), 'belleflopt.optimize.StreamNetwork', 'optimize.StreamNetwork', (['model_run.segments', 'model_run.water_year', 'model_run'], {}), '(model_run.segments, model_run.water_year, model_run)\n', (4366, 4419), False, 'from belleflopt import optimize\n'), ((4431, 4675), 'belleflopt.optimize.StreamNetworkProblem', 'optimize.StreamNetworkProblem', (['stream_network'], {'starting_water_price': 'starting_water_price', 'total_units_needed_factor': 'economic_water_proportion', 'min_proportion': 'min_proportion', 'simplified': 'simplified', 'plot_output_folder': 'output_folder'}), '(stream_network, starting_water_price=\n starting_water_price, total_units_needed_factor=\n economic_water_proportion, min_proportion=min_proportion, simplified=\n simplified, plot_output_folder=output_folder)\n', (4460, 4675), False, 'from belleflopt import optimize\n'), ((6918, 6948), 'platypus.nondominated', 'nondominated', (['model_run.result'], {}), '(model_run.result)\n', (6930, 6948), False, 'from platypus import NSGAII, OMOPSO, EpsNSGAII, SMPSO, GDE3, SPEA2, nondominated\n'), ((7986, 8027), 'os.makedirs', 'os.makedirs', (['output_folder'], {'exist_ok': '(True)'}), '(output_folder, exist_ok=True)\n', (7997, 8027), False, 'import os\n'), ((15970, 15995), 'numpy.linspace', 'numpy.linspace', (['(0)', '(1)', '(101)'], {}), '(0, 1, 101)\n', (15984, 15995), False, 'import numpy\n'), ((16333, 16382), 'matplotlib.pyplot.plot', 'plt.plot', (['problem.iterations', 'problem.objective_1'], {}), '(problem.iterations, problem.objective_1)\n', (16341, 16382), True, 'from matplotlib import pyplot as plt\n'), ((16385, 16424), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Percent of Available Flow"""'], {}), "('Percent of Available Flow')\n", (16395, 16424), True, 'from matplotlib import pyplot as plt\n'), ((16426, 16461), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Environmental Benefit"""'], {}), "('Environmental Benefit')\n", (16436, 16461), True, 'from matplotlib import pyplot as plt\n'), ((16617, 16628), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16626, 16628), True, 'from matplotlib import pyplot as plt\n'), ((17206, 17218), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17216, 17218), True, 'from matplotlib import pyplot as plt\n'), ((17220, 17234), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)'], {}), '(0)\n', (17231, 17234), True, 'from matplotlib import pyplot as plt\n'), ((18301, 18312), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (18310, 18312), True, 'from matplotlib import pyplot as plt\n'), ((18435, 18465), 'platypus.nondominated', 'nondominated', (['optimizer.result'], {}), '(optimizer.result)\n', (18447, 18465), False, 'from platypus import NSGAII, OMOPSO, EpsNSGAII, SMPSO, GDE3, SPEA2, nondominated\n'), ((18798, 18815), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (18809, 18815), True, 'from matplotlib import pyplot as plt\n'), ((18817, 18857), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Environmental Flow Benefit"""'], {}), "('Environmental Flow Benefit')\n", (18827, 18857), True, 'from matplotlib import pyplot as plt\n'), ((18859, 18889), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Economic Benefit"""'], {}), "('Economic Benefit')\n", (18869, 18889), True, 'from matplotlib import pyplot as plt\n'), ((18891, 18907), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (18900, 18907), True, 'from matplotlib import pyplot as plt\n'), ((19047, 19058), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19056, 19058), True, 'from matplotlib import pyplot as plt\n'), ((19172, 19218), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""steelblue"""', 'linewidth': '(1)'}), "(x, y, color='steelblue', linewidth=1)\n", (19180, 19218), True, 'from matplotlib import pyplot as plt\n'), ((19294, 19311), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""NFE"""'], {}), "('NFE')\n", (19304, 19311), True, 'from matplotlib import pyplot as plt\n'), ((19313, 19342), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Objective Value"""'], {}), "('Objective Value')\n", (19323, 19342), True, 'from matplotlib import pyplot as plt\n'), ((19344, 19360), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (19353, 19360), True, 'from matplotlib import pyplot as plt\n'), ((19488, 19499), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19497, 19499), True, 'from matplotlib import pyplot as plt\n'), ((19537, 19591), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""data"""', '"""results.csv"""'], {}), "(settings.BASE_DIR, 'data', 'results.csv')\n", (19549, 19591), False, 'import os\n'), ((21898, 22005), 'belleflopt.models.SegmentComponent.objects.get', 'models.SegmentComponent.objects.get', ([], {'component__ceff_id': 'component_id', 'stream_segment__com_id': 'segment_id'}), '(component__ceff_id=component_id,\n stream_segment__com_id=segment_id)\n', (21933, 22005), False, 'from belleflopt import models\n'), ((22370, 22381), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22379, 22381), True, 'from matplotlib import pyplot as plt\n'), ((3383, 3405), 'belleflopt.comet.new_experiment', 'comet.new_experiment', ([], {}), '()\n', (3403, 3405), False, 'from belleflopt import comet\n'), ((7364, 7393), 'platypus.nondominated', 'nondominated', (['solution.result'], {}), '(solution.result)\n', (7376, 7393), False, 'from platypus import NSGAII, OMOPSO, EpsNSGAII, SMPSO, GDE3, SPEA2, nondominated\n'), ((16072, 16121), 'belleflopt.optimize.SimpleInitialFlowsGenerator', 'optimize.SimpleInitialFlowsGenerator', (['measurement'], {}), '(measurement)\n', (16108, 16121), False, 'from belleflopt import optimize\n'), ((16134, 16193), 'platypus.NSGAII', 'NSGAII', (['problem'], {'generator': 'initial_flows', 'population_size': '(1)'}), '(problem, generator=initial_flows, population_size=1)\n', (16140, 16193), False, 'from platypus import NSGAII, OMOPSO, EpsNSGAII, SMPSO, GDE3, SPEA2, nondominated\n'), ((16604, 16614), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16612, 16614), True, 'from matplotlib import pyplot as plt\n'), ((18179, 18264), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""data"""', '"""results"""', '"""validation_plot_thesis.png"""'], {}), "(settings.BASE_DIR, 'data', 'results', 'validation_plot_thesis.png'\n )\n", (18191, 18264), False, 'import os\n'), ((18288, 18298), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18296, 18298), True, 'from matplotlib import pyplot as plt\n'), ((18577, 18643), 'belleflopt.comet.log_metric', 'comet.log_metric', (['"""EnvironmentalBenefit"""', 'x'], {'experiment': 'experiment'}), "('EnvironmentalBenefit', x, experiment=experiment)\n", (18593, 18643), False, 'from belleflopt import comet\n'), ((18674, 18735), 'belleflopt.comet.log_metric', 'comet.log_metric', (['"""EconomicBenefit"""', 'y'], {'experiment': 'experiment'}), "('EconomicBenefit', y, experiment=experiment)\n", (18690, 18735), False, 'from belleflopt import comet\n'), ((18985, 19021), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'filename', 'dpi': '(300)'}), '(fname=filename, dpi=300)\n', (18996, 19021), True, 'from matplotlib import pyplot as plt\n'), ((19034, 19044), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19042, 19044), True, 'from matplotlib import pyplot as plt\n'), ((19426, 19462), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'filename', 'dpi': '(300)'}), '(fname=filename, dpi=300)\n', (19437, 19462), True, 'from matplotlib import pyplot as plt\n'), ((19475, 19485), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19483, 19485), True, 'from matplotlib import pyplot as plt\n'), ((20746, 20792), 'csv.DictWriter', 'csv.DictWriter', (['output_file'], {'fieldnames': 'fields'}), '(output_file, fieldnames=fields)\n', (20760, 20792), False, 'import csv\n'), ((5041, 5073), 'belleflopt.optimize.InitialFlowsGenerator', 'optimize.InitialFlowsGenerator', ([], {}), '()\n', (5071, 5073), False, 'from belleflopt import optimize\n'), ((7057, 7103), 'os.path.join', 'os.path.join', (['output_folder', '"""variables.shelf"""'], {}), "(output_folder, 'variables.shelf')\n", (7069, 7103), False, 'import os\n'), ((5775, 5789), 'arrow.utcnow', 'arrow.utcnow', ([], {}), '()\n', (5787, 5789), False, 'import arrow\n'), ((19967, 20052), 'belleflopt.models.SpeciesComponent.objects.get', 'models.SpeciesComponent.objects.get', ([], {'species': 'species', 'component__name': '"""min_flow"""'}), "(species=species, component__name='min_flow'\n )\n", (20002, 20052), False, 'from belleflopt import models\n'), ((14535, 14557), 'belleflopt.comet.new_experiment', 'comet.new_experiment', ([], {}), '()\n', (14555, 14557), False, 'from belleflopt import comet\n'), ((15087, 15119), 'belleflopt.optimize.InitialFlowsGenerator', 'optimize.InitialFlowsGenerator', ([], {}), '()\n', (15117, 15119), False, 'from belleflopt import optimize\n')] |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
import time
from collections import OrderedDict
from ... import promise
from ...config import options
from ...utils import parse_readable_size, log_unhandled, readable_size, tokenize
from ...errors import SpillNotConfigured, SpillSizeExceeded, NoDataToSpill, PinDataKeyFailed
from ..utils import WorkerActor
from .core import DataStorageDevice
from .manager import StorageManagerActor
logger = logging.getLogger(__name__)
class ObjectHolderActor(WorkerActor):
_storage_device = None
_spill_devices = None
def __init__(self, size_limit=0):
super().__init__()
self._size_limit = size_limit
self._data_holder = OrderedDict()
self._data_sizes = dict()
self._total_hold = 0
self._pinned_counter = dict()
self._spill_pending_keys = set()
self._total_spill = 0
self._min_spill_size = 0
self._max_spill_size = 0
self._dispatch_ref = None
self._status_ref = None
self._storage_handler = None
def post_create(self):
from ..dispatcher import DispatchActor
from ..status import StatusActor
super().post_create()
self.register_actors_down_handler()
self._dispatch_ref = self.promise_ref(DispatchActor.default_uid())
parse_num, is_percent = parse_readable_size(options.worker.min_spill_size)
self._min_spill_size = int(self._size_limit * parse_num if is_percent else parse_num)
parse_num, is_percent = parse_readable_size(options.worker.max_spill_size)
self._max_spill_size = int(self._size_limit * parse_num if is_percent else parse_num)
status_ref = self.ctx.actor_ref(StatusActor.default_uid())
self._status_ref = status_ref if self.ctx.has_actor(status_ref) else None
self._storage_handler = self.storage_client.get_storage_handler(
self._storage_device.build_location(self.proc_id))
self.ref().update_cache_status(_tell=True)
def pre_destroy(self):
for k in self._data_holder:
self._data_holder[k] = None
def update_cache_status(self):
raise NotImplementedError
def post_delete(self, session_id, data_keys):
raise NotImplementedError
def get_size_limit(self):
return self._size_limit
@promise.reject_on_exception
@log_unhandled
def spill_size(self, size, multiplier=1, callback=None):
if not self._spill_devices: # pragma: no cover
raise SpillNotConfigured
request_size = int(size * multiplier)
request_size = max(request_size, self._min_spill_size)
if request_size > self._size_limit:
raise SpillSizeExceeded
request_size = min(request_size, self._max_spill_size)
spill_ref_key = tokenize((time.time(), size, multiplier))
logger.debug('Start spilling %d(x%d) bytes in %s. ref_key==%s.',
request_size, multiplier, self.uid, spill_ref_key)
if request_size + self._total_hold > self._size_limit:
acc_free = 0
free_keys = []
for k in self._data_holder.keys():
if k in self._pinned_counter or k in self._spill_pending_keys:
continue
acc_free += self._data_sizes[k]
free_keys.append(k)
self._spill_pending_keys.add(k)
if request_size + self._total_hold - acc_free <= self._size_limit:
break
if not free_keys:
logger.warning('Cannot spill further in %s. Rejected. request=%d ref_key=%s',
self.uid, request_size, spill_ref_key)
raise NoDataToSpill
logger.debug('Decide to spill %d data keys %r in %s. request=%d ref_key=%s',
len(free_keys), free_keys, self.uid, request_size, spill_ref_key)
@log_unhandled
def _release_spill_allocations(key):
logger.debug('Removing reference of data %s from %s when spilling. ref_key=%s',
key, self.uid, spill_ref_key)
self.delete_objects(key[0], [key[1]])
@log_unhandled
def _handle_spill_reject(*exc, **kwargs):
key = kwargs['session_data_key']
self._remove_spill_pending(*key)
raise exc[1].with_traceback(exc[2])
@log_unhandled
def _spill_key(key):
if key in self._pinned_counter or key not in self._data_holder:
self._remove_spill_pending(*key)
return
logger.debug('Spilling key %s in %s. ref_key=%s', key, self.uid, spill_ref_key)
return self.storage_client.copy_to(key[0], [key[1]], self._spill_devices) \
.then(lambda *_: _release_spill_allocations(key),
functools.partial(_handle_spill_reject, session_data_key=key))
@log_unhandled
def _finalize_spill(*_):
logger.debug('Finish spilling %d data keys in %s. ref_key=%s',
len(free_keys), self.uid, spill_ref_key)
self._plasma_client.evict(request_size)
if callback:
self.tell_promise(callback)
self.update_cache_status()
promise.all_(_spill_key(k) for k in free_keys).then(_finalize_spill) \
.catch(lambda *exc: self.tell_promise(callback, *exc, _accept=False))
else:
logger.debug('No need to spill in %s. request=%d ref_key=%s',
self.uid, request_size, spill_ref_key)
self._plasma_client.evict(request_size)
if callback:
self.tell_promise(callback)
@log_unhandled
def _internal_put_object(self, session_id, data_key, obj, size):
try:
session_data_key = (session_id, data_key)
if session_data_key in self._data_holder:
self._total_hold -= self._data_sizes[session_data_key]
del self._data_holder[session_data_key]
self._data_holder[session_data_key] = obj
self._data_sizes[session_data_key] = size
self._total_hold += size
finally:
del obj
def _finish_put_objects(self, _session_id, data_keys):
if logger.getEffectiveLevel() <= logging.DEBUG: # pragma: no cover
simplified_keys = sorted(set(k[0] if isinstance(k, tuple) else k for k in data_keys))
logger.debug('Data %r registered in %s. total_hold=%d', simplified_keys,
self.uid, self._total_hold)
self.update_cache_status()
def _remove_spill_pending(self, session_id, data_key):
try:
self._spill_pending_keys.remove((session_id, data_key))
logger.debug('Spill-pending key (%s, %s) removed in %s', session_id, data_key, self.uid)
except KeyError:
pass
@log_unhandled
def delete_objects(self, session_id, data_keys):
actual_removed = []
for data_key in data_keys:
session_data_key = (session_id, data_key)
self._remove_spill_pending(session_id, data_key)
try:
del self._pinned_counter[session_data_key]
except KeyError:
pass
if session_data_key in self._data_holder:
actual_removed.append(data_key)
data_size = self._data_sizes[session_data_key]
self._total_hold -= data_size
del self._data_holder[session_data_key]
del self._data_sizes[session_data_key]
self.post_delete(session_id, actual_removed)
if actual_removed:
logger.debug('Data %s unregistered in %s. total_hold=%d', actual_removed, self.uid, self._total_hold)
self.update_cache_status()
def lift_data_keys(self, session_id, data_keys, last=True):
for k in data_keys:
self._data_holder.move_to_end((session_id, k), last)
@log_unhandled
def pin_data_keys(self, session_id, data_keys, token):
spilling_keys = list(k for k in data_keys if (session_id, k) in self._spill_pending_keys)
if spilling_keys:
logger.warning('Cannot pin data key %r: under spilling', spilling_keys)
raise PinDataKeyFailed
pinned = []
for k in data_keys:
session_k = (session_id, k)
if session_k not in self._data_holder:
continue
if session_k not in self._pinned_counter:
self._pinned_counter[session_k] = set()
self._pinned_counter[session_k].add(token)
pinned.append(k)
logger.debug('Data keys %r pinned in %s', pinned, self.uid)
return pinned
@log_unhandled
def unpin_data_keys(self, session_id, data_keys, token):
unpinned = []
for k in data_keys:
session_k = (session_id, k)
try:
self._pinned_counter[session_k].remove(token)
if not self._pinned_counter[session_k]:
del self._pinned_counter[session_k]
unpinned.append(k)
except KeyError:
continue
if unpinned:
logger.debug('Data keys %r unpinned in %s', unpinned, self.uid)
return unpinned
def dump_keys(self): # pragma: no cover
return list(self._data_holder.keys())
class SimpleObjectHolderActor(ObjectHolderActor):
def post_create(self):
super().post_create()
manager_ref = self.ctx.actor_ref(StorageManagerActor.default_uid())
manager_ref.register_process_holder(
self.proc_id, self._storage_device, self.ref())
def put_objects(self, session_id, data_keys, data_objs, data_sizes, pin_token=None):
try:
for data_key, obj, size in zip(data_keys, data_objs, data_sizes):
self._internal_put_object(session_id, data_key, obj, size)
if pin_token:
self.pin_data_keys(session_id, data_keys, pin_token)
self._finish_put_objects(session_id, data_keys)
finally:
del data_objs
def get_object(self, session_id, data_key):
return self._data_holder[(session_id, data_key)]
def get_objects(self, session_id, data_keys):
return [self._data_holder[(session_id, key)] for key in data_keys]
def update_cache_status(self):
pass
def post_delete(self, session_id, data_keys):
pass
class SharedHolderActor(ObjectHolderActor):
if options.vineyard.socket:
_storage_device = DataStorageDevice.VINEYARD # pragma: no cover
else:
_storage_device = DataStorageDevice.SHARED_MEMORY
_spill_devices = (DataStorageDevice.DISK,)
def post_create(self):
super().post_create()
self._size_limit = self._shared_store.get_actual_capacity(self._size_limit)
logger.info('Detected actual plasma store size: %s', readable_size(self._size_limit))
def update_cache_status(self):
if self._status_ref:
self._status_ref.set_cache_allocations(
dict(hold=self._total_hold, total=self._size_limit), _tell=True, _wait=False)
def post_delete(self, session_id, data_keys):
self._shared_store.batch_delete(session_id, data_keys)
self._storage_handler.unregister_data(session_id, data_keys)
def put_objects_by_keys(self, session_id, data_keys, shapes=None, pin_token=None):
sizes = []
for data_key in data_keys:
buf = None
try:
buf = self._shared_store.get_buffer(session_id, data_key)
size = len(buf)
self._internal_put_object(session_id, data_key, buf, size)
finally:
del buf
sizes.append(size)
if pin_token:
self.pin_data_keys(session_id, data_keys, pin_token)
self.storage_client.register_data(
session_id, data_keys, (0, self._storage_device), sizes, shapes=shapes)
class InProcHolderActor(SimpleObjectHolderActor):
_storage_device = DataStorageDevice.PROC_MEMORY
class CudaHolderActor(SimpleObjectHolderActor):
_storage_device = DataStorageDevice.CUDA
if options.vineyard.socket:
shared_memory_device = DataStorageDevice.VINEYARD # pragma: no cover
else:
shared_memory_device = DataStorageDevice.SHARED_MEMORY
_spill_devices = [shared_memory_device, DataStorageDevice.DISK]
def __init__(self, size_limit=0, device_id=None):
super().__init__(size_limit=size_limit)
if device_id is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(device_id)
# warm up cupy
try:
import cupy
cupy.zeros((10, 10)).sum()
except ImportError:
pass
# warm up cudf
try:
import cudf
import numpy as np
import pandas as pd
cudf.from_pandas(pd.DataFrame(np.zeros((10, 10))))
except ImportError:
pass
| [
"functools.partial",
"cupy.zeros",
"numpy.zeros",
"time.time",
"collections.OrderedDict",
"logging.getLogger"
] | [((1035, 1062), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1052, 1062), False, 'import logging\n'), ((1289, 1302), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1300, 1302), False, 'from collections import OrderedDict\n'), ((3424, 3435), 'time.time', 'time.time', ([], {}), '()\n', (3433, 3435), False, 'import time\n'), ((5549, 5610), 'functools.partial', 'functools.partial', (['_handle_spill_reject'], {'session_data_key': 'key'}), '(_handle_spill_reject, session_data_key=key)\n', (5566, 5610), False, 'import functools\n'), ((13550, 13570), 'cupy.zeros', 'cupy.zeros', (['(10, 10)'], {}), '((10, 10))\n', (13560, 13570), False, 'import cupy\n'), ((13787, 13805), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (13795, 13805), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 15:33:38 2020
@author: enzo
"""
import matplotlib.pyplot as plt
import numpy as np
import cv2
print(f"CV2 Version: {cv2.__version__}")
from pathlib import Path
#%% Detector1
def calculate_keypoints(img, method, single_channel, graphics=False):
"""
Gray or single channel input
https://pysource.com/2018/03/21/feature-detection-sift-surf-obr-opencv-3-4-with-python-3-tutorial-25/
"""
if single_channel=='gray':
img_single_channel = single_channel_gray(img)
elif single_channel=='laplacian':
img_single_channel = compute_laplac(img)
elif single_channel=='color':
img_single_channel = clahe(img)
elif single_channel=='HSV':
img_single_channel = HSV(img)
elif single_channel=='hog':
img_single_channel = hog(img)
elif single_channel=='mixed':
img_single_channel = mixed(img)
print(img_single_channel.shape, type(img_single_channel), img_single_channel.dtype)
if method=='sift':
# SIFT
sift = cv2.SIFT_create(edgeThreshold = 21, sigma = 1.2) #edgeThreshold = 21, sigma = 1.2 #SIFT (Scale-Invariant Feature Transform)
keypoints_sift, descriptors_sift = sift.detectAndCompute(img_single_channel, None)
img_sift = cv2.drawKeypoints(img_single_channel, keypoints_sift, None, color=(0, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
if graphics == True:
plt.figure(), plt.imshow(img_sift), plt.title("SIFT"), plt.show()
return keypoints_sift, descriptors_sift
elif method=='orb':
# ORB
orb = cv2.ORB_create(nfeatures=3000)
keypoints_orb, descriptors_orb = orb.detectAndCompute(img_single_channel, None)
img_orb = cv2.drawKeypoints(img_single_channel, keypoints_orb, None, color=(0, 255, 0), flags=0)
if graphics == True:
plt.figure(), plt.imshow(img_orb), plt.title("ORB"), plt.show()
return keypoints_orb, descriptors_orb
elif method=='fast':
# FAST
fast = cv2.FastFeatureDetector_create() #FAST algorithm for corner detection
brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
keypoints_fast = fast.detect(img_single_channel, None)
keypoints_brief, descriptors_brief = brief.compute(img_single_channel, keypoints_fast)
print(len(keypoints_fast), len(keypoints_brief))
if graphics == True:
img_fast = cv2.drawKeypoints(img_single_channel, keypoints_fast, None, color=(255, 0, 0))
img_brief = cv2.drawKeypoints(img_single_channel, keypoints_brief, None, color=(255, 0, 0))
plt.figure(), plt.imshow(img_fast), plt.title("Detected FAST keypoints"), plt.show()
plt.figure(), plt.imshow(img_brief), plt.title("Detected BRIEF keypoints"), plt.show()
return keypoints_brief, descriptors_brief
elif method=='star':
# STAR-BRIEF
star = cv2.xfeatures2d.StarDetector_create() ## only feature
brief = cv2.xfeatures2d.BriefDescriptorExtractor_create() # only descript, NO feature
keypoints_star = star.detect(img_single_channel, None)
keypoints_brief, descriptors_brief = brief.compute(img_single_channel, keypoints_star)
print(len(keypoints_star), len(keypoints_brief))
if graphics == True:
img_star = cv2.drawKeypoints(img_single_channel, keypoints_star, None, color=(255, 0, 0))
img_brief = cv2.drawKeypoints(img_single_channel, keypoints_brief, None, color=(255, 0, 0))
plt.figure(), plt.imshow(img_star), plt.title("Detected STAR keypoints"), plt.show()
plt.figure(), plt.imshow(img_brief), plt.title("Detected BRIEF keypoints"), plt.show()
return keypoints_brief, descriptors_brief
return 0
def feature_matching(descriptions_query, descriptions_train, matching_type, lowe_threshold = 0.75):
if matching_type == 'flann':
descriptions_query = descriptions_query.astype('float32')
descriptions_train = descriptions_train.astype('float32')
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 7) #5
search_params = dict(checks = 70) # 50
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(descriptions_query,descriptions_train,k=2)
# store all the good matches as per Lowe's ratio test.
good_matches = []
for m,n in matches:
if m.distance < lowe_threshold*n.distance:
good_matches.append(m)
elif matching_type == 'bfhamm':
descriptions_query = descriptions_query.astype('float32')
descriptions_train = descriptions_train.astype('float32')
# matcher takes normType, which is set to cv2.NORM_L2 for SIFT and SURF, cv2.NORM_HAMMING for ORB, FAST and BRIEF
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) #brute force NORM_HAMMING NORM_L1
matches = bf.match(descriptions_query, descriptions_train)
# store all the good matches as per Lowe's ratio test.
good_matches = []
for i, m in enumerate(matches):
if i < len(matches) - 1 and m.distance < lowe_threshold * matches[i+1].distance:
good_matches.append(m)
elif matching_type == 'bruteforce':
# matcher takes normType, which is set to cv2.NORM_L2 for SIFT and SURF, cv2.NORM_HAMMING for ORB, FAST and BRIEF
bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True) #brute force NORM_HAMMING NORM_L1
matches = bf.match(descriptions_query, descriptions_train)
# store all the good matches as per Lowe's ratio test.
good_matches = []
for i, m in enumerate(matches):
if i < len(matches) - 1 and m.distance < lowe_threshold * matches[i+1].distance:
good_matches.append(m)
return good_matches
#%% Extract Transform
def extract_matrix(good_matches, kp1, kp2, img1, img2): #
"""
If enough matches are found, we extract the locations of matched keypoints in both the images.
They are passed to find the perpective transformation. Once we get this 3x3 transformation matrix,
we use it to transform the corners of queryImage to corresponding points in trainImage.
Then we draw it.
"""
MIN_MATCHES = 50
MIN_MATCH_COUNT = 10 #set a condition that atleast 10 matches (defined by MIN_MATCH_COUNT) are to be there to find the object.
#Otherwise simply show a message saying not enough matches are present.
if len(good_matches)>MIN_MATCH_COUNT: #MIN_MATCHES
# maintaining list of index of descriptors
src_points = np.float32([ kp1[m.queryIdx].pt for m in good_matches ]).reshape(-1,1,2)
dst_points = np.float32([ kp2[m.trainIdx].pt for m in good_matches ]).reshape(-1,1,2)
# finding perspective transformation
# between two planes
matrix, mask = cv2.findHomography(src_points, dst_points, cv2.RANSAC,5.0)
M, mask_reverse = cv2.findHomography(dst_points, src_points, cv2.RANSAC, 5.0)
# print(matrix.shape)
#print(mask == mask_reverse)
else:
print(f"Not enough matches are found - {len(good_matches)} / {MIN_MATCH_COUNT}")
matrix = None
M= None
return matrix, M, mask
#%% Draw Matches
def draw_inliers(mask, img1,kp1,img2,kp2,good):
"""
draw our inliers (if successfully found the object) or matching keypoints (if failed).
"""
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = mask.ravel().tolist(), # draw only inliers# ravel function returns - contiguous flattened array
flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
#plt.imshow(img3, 'gray'),plt.show()
return img3
#%% Pre process
def compute_laplac(input_image):
input_image = single_channel_gray(input_image)
#Gaussian Filter
denoised = cv2.GaussianBlur(input_image, (7,7), 5);
laplacian = cv2.Laplacian(denoised,cv2.CV_64F).astype('uint8')
denoised_laplacian = cv2.GaussianBlur(laplacian, (7,7), 5);
#sobelx = cv2.Sobel(img,cv.CV_64F,1,0,ksize=3) # x
#sobely = cv2.Sobel(img,cv.CV_64F,0,1,ksize=3) # y
return denoised_laplacian
def single_channel_gray(BRG_input_image):
gray_image = cv2.cvtColor(BRG_input_image, cv2.COLOR_BGR2GRAY)
#gray_equlized = cv2.equalizeHist(gray_image)
clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(4,3))
gray_equlized = clahe.apply(gray_image)
return gray_equlized
def clahe(img):
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
lab_planes = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=1.0,tileGridSize=(3,7))
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return bgr
def HSV(img):
img_hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
H, S, V = cv2.split(img_hsv)
clahe = cv2.createCLAHE(clipLimit=1.0,tileGridSize=(7,3))
V = clahe.apply(V)
return V
def mixed(img_rgb):
img_hsv=cv2.cvtColor(img_rgb,cv2.COLOR_BGR2HSV)
H, S, V = cv2.split(img_hsv)
r, g, b = cv2.split(img_rgb)
clahe = cv2.createCLAHE(clipLimit=1.0,tileGridSize=(7,7))
V = cv2.cvtColor(clahe.apply(V), cv2.COLOR_GRAY2BGR)
g = cv2.cvtColor(clahe.apply(g), cv2.COLOR_GRAY2BGR)
S = cv2.cvtColor(clahe.apply(S), cv2.COLOR_GRAY2BGR)
a=1/3
b=1
c=1/3
mixed = a*V+b*g+c*S
return mixed.astype('uint8')
#plt.imshow(mixed(img)), plt.show()
#queryImage
def hog (img):
from skimage.feature import hog
from skimage import exposure
gray = single_channel_gray(img)
#multic = clahe(img)
features, hog_image = hog(gray, orientations=9,
pixels_per_cell=(16, 16),
cells_per_block=(1, 1),
visualize=True) #, multichannel=True)
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 256))
overlay = hog_image_rescaled.astype('uint8')
background = HSV(img)
added_image = cv2.addWeighted(background,0.7,overlay,0.3,0)
return added_image
def feture_extraction(img):
single_chan = single_channel_gray(img)
fast = cv2.FastFeatureDetector_create() #FAST algorithm for corner detection
#star = cv2.xfeatures2d.StarDetector_create() ## only feature
brief = cv2.xfeatures2d.BriefDescriptorExtractor_create() # only descript, NO feature
keypoints_fast = fast.detect(single_chan, None)
#keypoints_star = star.detect(single_chan, None)
keypoints, descriptors = brief.compute(single_chan, keypoints_fast)
return keypoints, descriptors
def resize_image(img, scale_percent = 70): # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
return resized
# %%
def clear_all():
"""Clears all the variables from the workspace of the application."""
gl = globals().copy()
for var in gl:
if var[0] == '_': continue
if 'func' in str(globals()[var]): continue
if 'module' in str(globals()[var]): continue
del globals()[var]
def load_file(image_file_name, data_dir):
image_file = path / image_file_name
assert image_file.is_file()
img = cv2.imread(str(image_file))
return img
def crop_extract_pipeline(queryImage, keypoints_query, descriptions_query, trainImage, mask_query_circ, image_name='test'):
# Compare features between input image and its laplacian
keypoints_train, descriptions_train = calculate_keypoints(trainImage, 'sift', 'gray', graphics=False)
#feature matching
matches = feature_matching(descriptions_query, descriptions_train, matching_type='flann', lowe_threshold=0.75)
array_dist=[x.distance for x in matches[:1000]]
matrix, M, matrix_mask = extract_matrix(matches, keypoints_query, keypoints_train, queryImage, trainImage)
h,w= queryImage.shape[0:2]
pts_image = np.array([[0,0],[0,h-1],[w-1,h-1],[w-1,0]], dtype='float32')
pts_image = np.array([pts_image])
dst_image = cv2.perspectiveTransform(pts_image,matrix) # applying perspective algorithm : destination points
corrected_img = cv2.warpPerspective(trainImage, M, (queryImage.shape[1], queryImage.shape[0]), cv2.WARP_INVERSE_MAP)
homography = cv2.polylines(trainImage,[np.int32(dst_image)], True, (0,0,255), 10, cv2.LINE_AA)
#plt.imshow(homography) , plt.show()
#plt.imshow(corrected_img), plt.show()
target_filename = image_name + '_holo' + '.jpg'
target_path = OUTPUT_FOLDER / target_filename
cv2.imwrite(str(target_path), homography)
target_filename = image_name + '_cropped' + '.jpg'
target_path = OUTPUT_FOLDER / target_filename
cv2.imwrite(str(target_path), corrected_img)
compare_image = draw_inliers(matrix_mask, queryImage, keypoints_query, trainImage, keypoints_train, matches)
#plt.imshow(compare_image), plt.show()
target_filename = image_name + '_cro2cmp' + '.jpg'
target_path = OUTPUT_FOLDER / target_filename
cv2.imwrite(str(target_path), compare_image)
print(np.sum(array_dist), np.mean(array_dist), np.std(array_dist))
corrected_mask = cv2.warpPerspective(mask_query_circ, matrix, (trainImage.shape[1], trainImage.shape[0]), cv2.WARP_INVERSE_MAP)
plt.imshow(corrected_mask), plt.show()
height, width = queryImage.shape[0:2]
from well_plate_project.data_exp.well_plate import extract_features, uniform_center_region
well_plate = pd.DataFrame(index = map(chr, range(65, 73)), columns=list(range(1,13)))
#well_plate = pd.DataFrame().reindex_like(df_out)
#well_plate = well_plate.fillna({})
out_image = OUTPUT_FOLDER / image_name
out_image.mkdir(parents=True, exist_ok=True)
import json
for key, values in df_out.iterrows():
for i, well in enumerate(values):
print(key+str(i+1))
#print(well)
# well = well.astype()
mask = np.zeros((height,width), np.uint8)
x=int(well[0]); y= int(well[1]); R=int(well[2])
single_maks = cv2.circle(mask,(x,y),R,(255,255,255),-1)
corrected_mask = cv2.warpPerspective(single_maks, matrix, (trainImage.shape[1], trainImage.shape[0]), cv2.WARP_INVERSE_MAP)
#plt.imshow(corrected_mask), plt.show()
result = np.where(corrected_mask == np.amax(corrected_mask))
minx=min(result[0]); maxx=max(result[0])
miny=min(result[1]); maxy=max(result[1])
res = cv2.bitwise_and(trainImage,trainImage,mask = corrected_mask)
cropped = res[minx:maxx, miny:maxy]
#plt.imshow(res), plt.show()
#plt.imshow(cropped), plt.show()
target_filename = image_name + '_' + key+str(i+1) + '_crop' + '.png'
target_path = out_image / target_filename
cv2.imwrite(str(target_path), cropped)
reduced, mask_redu = uniform_center_region(cropped)
target_filename = image_name + '_' + key+str(i+1) + '_reduc' + '.png'
target_path = out_image / target_filename
cv2.imwrite(str(target_path), reduced)
target_filename = image_name + '_' + key+str(i+1) + '_maskreduc' + '.png'
target_path = out_image / target_filename
cv2.imwrite(str(target_path), mask_redu)
feature_dict = {}
feature_dict ['full'] = extract_features(cropped)
feature_dict ['reduced'] = extract_features(reduced)
file_name=image_name + key+str(i+1) +"_dict.json"
target_path = out_image / file_name
with open(str(target_path),"w+") as file:
json.dump(feature_dict, file)
#file.write(str(feature_dict))
well_plate.at[key, i+1] = feature_dict
return well_plate
# %% Afer functions
# Import of the query image
path_query = '../../data/data/raw/Match/'
queryImage = cv2.imread(path_query+'aswana_cropped.jpg') #aswana_cropped_2 aswana_cropped
plt.imshow(queryImage); plt.show()
# queryImage = resize_image(queryImage, 100)
# Compare features between input image and its laplacian
keypoints_query, descriptions_query = calculate_keypoints(queryImage, 'sift', 'gray', graphics=False)
# Import of the train image
path_train = '../../data/data/raw/EXPERIMENTS/' #foto_tel1 EXPERIMENTS foto_tel1
OUTPUT_FOLDER = Path('../../data/data/raw/EXPERIMENTS_Crp/') # EXPERIMENTS_Crp foto_tel1_crp
trainImage = cv2.imread(path_train+'20a.jpg') # IMG_20201118_080910 IMG_20201118_082825
#trainImage = resize_image(trainImage, 50)
#corrected_img, compare_image, array_dist = crop_pipeline(queryImage, keypoints_query, descriptions_query, trainImage)
#%%
l, a, b = cv2.split(cv2.cvtColor(queryImage, cv2.COLOR_BGR2LAB))
image = b
clahe_query = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(4,3))
image = clahe_query.apply(image)
circles = cv2.HoughCircles(image.astype('uint8'), cv2.HOUGH_GRADIENT, 2.21, 90,
param1=30,param2=90,minRadius=40,maxRadius=45)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
cv2.circle(image,(i[0],i[1]),i[2],(0,255,0),2)
plt.figure(figsize=(10,10))
plt.imshow(image)
plt.xticks([]), plt.yticks([])
plt.show()
ind = np.argsort(circles[0,:, 1])
ordered_circles=circles[0,ind]
matrix=ordered_circles.T.reshape((3, 8, 12))
#TODO improve code
sidx = matrix[0,:,:].argsort(axis=1)
out=np.zeros(matrix.shape)
out[0,:,:] = np.take_along_axis(matrix[0,:,:], sidx, axis=1)
out[1,:,:] = np.take_along_axis(matrix[1,:,:], sidx, axis=1)
out[2,:,:] = np.take_along_axis(matrix[2,:,:], sidx, axis=1)
out=np.zeros(matrix.shape)
matrix=ordered_circles.reshape((8, 12, 3))
sidx = matrix[:,:,0].argsort(axis=1)
out=np.zeros(matrix.shape)
out[:,:,0] = np.take_along_axis(matrix[:,:,0], sidx, axis=1)
out[:,:,1]= np.take_along_axis(matrix[:,:,1], sidx, axis=1)
out[:,:,2] = np.take_along_axis(matrix[:,:,2], sidx, axis=1)
import numpy.lib.recfunctions as nlr
circles_tuple = nlr.unstructured_to_structured(out).astype('O')
import pandas as pd
df_out = pd.DataFrame(circles_tuple, index = map(chr, range(65, 73)), columns=list(range(1,13)))
height, width = image.shape
mask_query_circ = np.zeros((height,width), np.uint8)
circles = np.uint16(np.around(circles))
counter = 0
for i in circles[0,:]:
# Draw on mask
cv2.circle(mask_query_circ,(i[0],i[1]),i[2],(255,255,255),-1)
masked_data = cv2.bitwise_and(image, image, mask=mask_query_circ)
counter +=1
print(counter)
plt.imshow(masked_data);plt.show()
plt.imshow(mask_query_circ);plt.show()
# x, y, r = circles[0,:][0]
# rows, cols = image.shape
# for i in range(cols):
# for j in range(rows):
# if np.linalg.norm([i-x, j-y]) > r:
# image[j,i] = 0
# #cv2.imwrite("iris.jpg",image)
# plt.imshow(image, cmap = 'gray', interpolation = 'bicubic')
# plt.xticks([]), plt.yticks([])
# plt.show()
#%%
#well_plate = crop_extract_pipeline(queryImage, keypoints_query, descriptions_query, trainImage, mask_query_circ, '20a')
#%%
#from pathlib import Path
#OUTPUT_FOLDER = Path(OUTPUT_FOLDER)
import pickle
from well_plate_project.data_etl._0_plot_hists import plot_hist_bgr
from collections import defaultdict
all_well_plates = defaultdict(dict)
path_train = Path(path_train)
for jpg in path_train.glob('*.jpg'):
print(f'Processing {jpg}', end='\n')
trainImage = cv2.imread(str(jpg))
image_name = jpg.stem
well_plate = crop_extract_pipeline(queryImage, keypoints_query, descriptions_query, trainImage, mask_query_circ, image_name )
target_filename = image_name + '_df' + '.pkl'
target_path = OUTPUT_FOLDER / target_filename
with open(str(target_path),"wb+") as file:
pickle.dump(well_plate, file)
items = image_name.split('_')
assert len(items) == 2
all_well_plates[items[0]][items[1]] = well_plate
# corrected_img, compare_image, array_dist = crop_pipeline(queryImage, keypoints_query, descriptions_query, trainImage)
# target_filename = jpg.stem + '_cropped' + jpg.suffix
# # target_filename = jpg.stem + '_y_yk3_ragYlab' + jpg.suffix #jpg.name
# #'rag_lab_b7_' +
# target_path = OUTPUT_FOLDER / target_filename
# cv2.imwrite(str(target_path), corrected_img)
# plt.figure(); plt.imshow(corrected_img); plt.show()
# print(np.sum(array_dist), np.mean(array_dist), np.std(array_dist))
# target_filename = jpg.stem + '_cro2cmp' + jpg.suffix
# target_path = OUTPUT_FOLDER / target_filename
# cv2.imwrite(str(target_path), compare_image)
hist = plot_hist_bgr(trainImage)
target_filename = jpg.stem + '_hist' + jpg.suffix
target_path = OUTPUT_FOLDER / target_filename
hist.savefig(str(target_path))
print("Saving...")
target_filename = 'all_wells' + '_dict_df' + '.pkl'
target_path = OUTPUT_FOLDER / target_filename
with open(str(target_path),"wb+") as file:
pickle.dump(all_well_plates, file)
print("Done")
# for p_id, p_info in all_well_plates.items():
# print("ID:", p_id)
# for key in p_info:
# print(key + ':', p_info[key])
| [
"matplotlib.pyplot.title",
"cv2.GaussianBlur",
"pickle.dump",
"numpy.sum",
"cv2.bitwise_and",
"skimage.exposure.rescale_intensity",
"numpy.argsort",
"collections.defaultdict",
"pathlib.Path",
"matplotlib.pyplot.figure",
"numpy.around",
"numpy.mean",
"numpy.lib.recfunctions.unstructured_to_st... | [((16544, 16589), 'cv2.imread', 'cv2.imread', (["(path_query + 'aswana_cropped.jpg')"], {}), "(path_query + 'aswana_cropped.jpg')\n", (16554, 16589), False, 'import cv2\n'), ((16621, 16643), 'matplotlib.pyplot.imshow', 'plt.imshow', (['queryImage'], {}), '(queryImage)\n', (16631, 16643), True, 'import matplotlib.pyplot as plt\n'), ((16645, 16655), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16653, 16655), True, 'import matplotlib.pyplot as plt\n'), ((16991, 17035), 'pathlib.Path', 'Path', (['"""../../data/data/raw/EXPERIMENTS_Crp/"""'], {}), "('../../data/data/raw/EXPERIMENTS_Crp/')\n", (16995, 17035), False, 'from pathlib import Path\n'), ((17084, 17118), 'cv2.imread', 'cv2.imread', (["(path_train + '20a.jpg')"], {}), "(path_train + '20a.jpg')\n", (17094, 17118), False, 'import cv2\n'), ((17416, 17467), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(1.0)', 'tileGridSize': '(4, 3)'}), '(clipLimit=1.0, tileGridSize=(4, 3))\n', (17431, 17467), False, 'import cv2\n'), ((17770, 17798), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (17780, 17798), True, 'import matplotlib.pyplot as plt\n'), ((17798, 17815), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (17808, 17815), True, 'import matplotlib.pyplot as plt\n'), ((17847, 17857), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17855, 17857), True, 'import matplotlib.pyplot as plt\n'), ((17868, 17896), 'numpy.argsort', 'np.argsort', (['circles[0, :, 1]'], {}), '(circles[0, :, 1])\n', (17878, 17896), True, 'import numpy as np\n'), ((18035, 18057), 'numpy.zeros', 'np.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (18043, 18057), True, 'import numpy as np\n'), ((18071, 18120), 'numpy.take_along_axis', 'np.take_along_axis', (['matrix[0, :, :]', 'sidx'], {'axis': '(1)'}), '(matrix[0, :, :], sidx, axis=1)\n', (18089, 18120), True, 'import numpy as np\n'), ((18132, 18181), 'numpy.take_along_axis', 'np.take_along_axis', (['matrix[1, :, :]', 'sidx'], {'axis': '(1)'}), '(matrix[1, :, :], sidx, axis=1)\n', (18150, 18181), True, 'import numpy as np\n'), ((18193, 18242), 'numpy.take_along_axis', 'np.take_along_axis', (['matrix[2, :, :]', 'sidx'], {'axis': '(1)'}), '(matrix[2, :, :], sidx, axis=1)\n', (18211, 18242), True, 'import numpy as np\n'), ((18247, 18269), 'numpy.zeros', 'np.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (18255, 18269), True, 'import numpy as np\n'), ((18354, 18376), 'numpy.zeros', 'np.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (18362, 18376), True, 'import numpy as np\n'), ((18390, 18439), 'numpy.take_along_axis', 'np.take_along_axis', (['matrix[:, :, 0]', 'sidx'], {'axis': '(1)'}), '(matrix[:, :, 0], sidx, axis=1)\n', (18408, 18439), True, 'import numpy as np\n'), ((18450, 18499), 'numpy.take_along_axis', 'np.take_along_axis', (['matrix[:, :, 1]', 'sidx'], {'axis': '(1)'}), '(matrix[:, :, 1], sidx, axis=1)\n', (18468, 18499), True, 'import numpy as np\n'), ((18511, 18560), 'numpy.take_along_axis', 'np.take_along_axis', (['matrix[:, :, 2]', 'sidx'], {'axis': '(1)'}), '(matrix[:, :, 2], sidx, axis=1)\n', (18529, 18560), True, 'import numpy as np\n'), ((18828, 18863), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.uint8'], {}), '((height, width), np.uint8)\n', (18836, 18863), True, 'import numpy as np\n'), ((19142, 19165), 'matplotlib.pyplot.imshow', 'plt.imshow', (['masked_data'], {}), '(masked_data)\n', (19152, 19165), True, 'import matplotlib.pyplot as plt\n'), ((19166, 19176), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19174, 19176), True, 'import matplotlib.pyplot as plt\n'), ((19178, 19205), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask_query_circ'], {}), '(mask_query_circ)\n', (19188, 19205), True, 'import matplotlib.pyplot as plt\n'), ((19206, 19216), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19214, 19216), True, 'import matplotlib.pyplot as plt\n'), ((19887, 19904), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (19898, 19904), False, 'from collections import defaultdict\n'), ((19918, 19934), 'pathlib.Path', 'Path', (['path_train'], {}), '(path_train)\n', (19922, 19934), False, 'from pathlib import Path\n'), ((7915, 7979), 'cv2.drawMatches', 'cv2.drawMatches', (['img1', 'kp1', 'img2', 'kp2', 'good', 'None'], {}), '(img1, kp1, img2, kp2, good, None, **draw_params)\n', (7930, 7979), False, 'import cv2\n'), ((8172, 8212), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['input_image', '(7, 7)', '(5)'], {}), '(input_image, (7, 7), 5)\n', (8188, 8212), False, 'import cv2\n'), ((8308, 8346), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['laplacian', '(7, 7)', '(5)'], {}), '(laplacian, (7, 7), 5)\n', (8324, 8346), False, 'import cv2\n'), ((8552, 8601), 'cv2.cvtColor', 'cv2.cvtColor', (['BRG_input_image', 'cv2.COLOR_BGR2GRAY'], {}), '(BRG_input_image, cv2.COLOR_BGR2GRAY)\n', (8564, 8601), False, 'import cv2\n'), ((8665, 8716), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(1.0)', 'tileGridSize': '(4, 3)'}), '(clipLimit=1.0, tileGridSize=(4, 3))\n', (8680, 8716), False, 'import cv2\n'), ((8812, 8848), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2LAB'], {}), '(img, cv2.COLOR_BGR2LAB)\n', (8824, 8848), False, 'import cv2\n'), ((8866, 8880), 'cv2.split', 'cv2.split', (['lab'], {}), '(lab)\n', (8875, 8880), False, 'import cv2\n'), ((8893, 8944), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(1.0)', 'tileGridSize': '(3, 7)'}), '(clipLimit=1.0, tileGridSize=(3, 7))\n', (8908, 8944), False, 'import cv2\n'), ((9000, 9021), 'cv2.merge', 'cv2.merge', (['lab_planes'], {}), '(lab_planes)\n', (9009, 9021), False, 'import cv2\n'), ((9032, 9068), 'cv2.cvtColor', 'cv2.cvtColor', (['lab', 'cv2.COLOR_LAB2BGR'], {}), '(lab, cv2.COLOR_LAB2BGR)\n', (9044, 9068), False, 'import cv2\n'), ((9111, 9147), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (9123, 9147), False, 'import cv2\n'), ((9161, 9179), 'cv2.split', 'cv2.split', (['img_hsv'], {}), '(img_hsv)\n', (9170, 9179), False, 'import cv2\n'), ((9192, 9243), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(1.0)', 'tileGridSize': '(7, 3)'}), '(clipLimit=1.0, tileGridSize=(7, 3))\n', (9207, 9243), False, 'import cv2\n'), ((9311, 9351), 'cv2.cvtColor', 'cv2.cvtColor', (['img_rgb', 'cv2.COLOR_BGR2HSV'], {}), '(img_rgb, cv2.COLOR_BGR2HSV)\n', (9323, 9351), False, 'import cv2\n'), ((9365, 9383), 'cv2.split', 'cv2.split', (['img_hsv'], {}), '(img_hsv)\n', (9374, 9383), False, 'import cv2\n'), ((9398, 9416), 'cv2.split', 'cv2.split', (['img_rgb'], {}), '(img_rgb)\n', (9407, 9416), False, 'import cv2\n'), ((9429, 9480), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(1.0)', 'tileGridSize': '(7, 7)'}), '(clipLimit=1.0, tileGridSize=(7, 7))\n', (9444, 9480), False, 'import cv2\n'), ((9955, 10050), 'skimage.feature.hog', 'hog', (['gray'], {'orientations': '(9)', 'pixels_per_cell': '(16, 16)', 'cells_per_block': '(1, 1)', 'visualize': '(True)'}), '(gray, orientations=9, pixels_per_cell=(16, 16), cells_per_block=(1, 1),\n visualize=True)\n', (9958, 10050), False, 'from skimage.feature import hog\n'), ((10197, 10253), 'skimage.exposure.rescale_intensity', 'exposure.rescale_intensity', (['hog_image'], {'in_range': '(0, 256)'}), '(hog_image, in_range=(0, 256))\n', (10223, 10253), False, 'from skimage import exposure\n'), ((10347, 10396), 'cv2.addWeighted', 'cv2.addWeighted', (['background', '(0.7)', 'overlay', '(0.3)', '(0)'], {}), '(background, 0.7, overlay, 0.3, 0)\n', (10362, 10396), False, 'import cv2\n'), ((10502, 10534), 'cv2.FastFeatureDetector_create', 'cv2.FastFeatureDetector_create', ([], {}), '()\n', (10532, 10534), False, 'import cv2\n'), ((10651, 10700), 'cv2.xfeatures2d.BriefDescriptorExtractor_create', 'cv2.xfeatures2d.BriefDescriptorExtractor_create', ([], {}), '()\n', (10698, 10700), False, 'import cv2\n'), ((11185, 11235), 'cv2.resize', 'cv2.resize', (['img', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(img, dim, interpolation=cv2.INTER_AREA)\n', (11195, 11235), False, 'import cv2\n'), ((12419, 12494), 'numpy.array', 'np.array', (['[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]'], {'dtype': '"""float32"""'}), "([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]], dtype='float32')\n", (12427, 12494), True, 'import numpy as np\n'), ((12496, 12517), 'numpy.array', 'np.array', (['[pts_image]'], {}), '([pts_image])\n', (12504, 12517), True, 'import numpy as np\n'), ((12534, 12577), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['pts_image', 'matrix'], {}), '(pts_image, matrix)\n', (12558, 12577), False, 'import cv2\n'), ((12651, 12756), 'cv2.warpPerspective', 'cv2.warpPerspective', (['trainImage', 'M', '(queryImage.shape[1], queryImage.shape[0])', 'cv2.WARP_INVERSE_MAP'], {}), '(trainImage, M, (queryImage.shape[1], queryImage.shape[0\n ]), cv2.WARP_INVERSE_MAP)\n', (12670, 12756), False, 'import cv2\n'), ((13708, 13822), 'cv2.warpPerspective', 'cv2.warpPerspective', (['mask_query_circ', 'matrix', '(trainImage.shape[1], trainImage.shape[0])', 'cv2.WARP_INVERSE_MAP'], {}), '(mask_query_circ, matrix, (trainImage.shape[1],\n trainImage.shape[0]), cv2.WARP_INVERSE_MAP)\n', (13727, 13822), False, 'import cv2\n'), ((17347, 17390), 'cv2.cvtColor', 'cv2.cvtColor', (['queryImage', 'cv2.COLOR_BGR2LAB'], {}), '(queryImage, cv2.COLOR_BGR2LAB)\n', (17359, 17390), False, 'import cv2\n'), ((17675, 17693), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (17684, 17693), True, 'import numpy as np\n'), ((17722, 17775), 'cv2.circle', 'cv2.circle', (['image', '(i[0], i[1])', 'i[2]', '(0, 255, 0)', '(2)'], {}), '(image, (i[0], i[1]), i[2], (0, 255, 0), 2)\n', (17732, 17775), False, 'import cv2\n'), ((17816, 17830), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (17826, 17830), True, 'import matplotlib.pyplot as plt\n'), ((17832, 17846), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (17842, 17846), True, 'import matplotlib.pyplot as plt\n'), ((18885, 18903), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (18894, 18903), True, 'import numpy as np\n'), ((18963, 19031), 'cv2.circle', 'cv2.circle', (['mask_query_circ', '(i[0], i[1])', 'i[2]', '(255, 255, 255)', '(-1)'], {}), '(mask_query_circ, (i[0], i[1]), i[2], (255, 255, 255), -1)\n', (18973, 19031), False, 'import cv2\n'), ((19043, 19094), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask_query_circ'}), '(image, image, mask=mask_query_circ)\n', (19058, 19094), False, 'import cv2\n'), ((21227, 21252), 'well_plate_project.data_etl._0_plot_hists.plot_hist_bgr', 'plot_hist_bgr', (['trainImage'], {}), '(trainImage)\n', (21240, 21252), False, 'from well_plate_project.data_etl._0_plot_hists import plot_hist_bgr\n'), ((21573, 21607), 'pickle.dump', 'pickle.dump', (['all_well_plates', 'file'], {}), '(all_well_plates, file)\n', (21584, 21607), False, 'import pickle\n'), ((1109, 1153), 'cv2.SIFT_create', 'cv2.SIFT_create', ([], {'edgeThreshold': '(21)', 'sigma': '(1.2)'}), '(edgeThreshold=21, sigma=1.2)\n', (1124, 1153), False, 'import cv2\n'), ((1343, 1476), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['img_single_channel', 'keypoints_sift', 'None'], {'color': '(0, 255, 0)', 'flags': 'cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS'}), '(img_single_channel, keypoints_sift, None, color=(0, 255, \n 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n', (1360, 1476), False, 'import cv2\n'), ((4326, 4376), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (4347, 4376), False, 'import cv2\n'), ((7057, 7116), 'cv2.findHomography', 'cv2.findHomography', (['src_points', 'dst_points', 'cv2.RANSAC', '(5.0)'], {}), '(src_points, dst_points, cv2.RANSAC, 5.0)\n', (7075, 7116), False, 'import cv2\n'), ((7142, 7201), 'cv2.findHomography', 'cv2.findHomography', (['dst_points', 'src_points', 'cv2.RANSAC', '(5.0)'], {}), '(dst_points, src_points, cv2.RANSAC, 5.0)\n', (7160, 7201), False, 'import cv2\n'), ((13621, 13639), 'numpy.sum', 'np.sum', (['array_dist'], {}), '(array_dist)\n', (13627, 13639), True, 'import numpy as np\n'), ((13641, 13660), 'numpy.mean', 'np.mean', (['array_dist'], {}), '(array_dist)\n', (13648, 13660), True, 'import numpy as np\n'), ((13662, 13680), 'numpy.std', 'np.std', (['array_dist'], {}), '(array_dist)\n', (13668, 13680), True, 'import numpy as np\n'), ((13823, 13849), 'matplotlib.pyplot.imshow', 'plt.imshow', (['corrected_mask'], {}), '(corrected_mask)\n', (13833, 13849), True, 'import matplotlib.pyplot as plt\n'), ((13851, 13861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13859, 13861), True, 'import matplotlib.pyplot as plt\n'), ((18614, 18649), 'numpy.lib.recfunctions.unstructured_to_structured', 'nlr.unstructured_to_structured', (['out'], {}), '(out)\n', (18644, 18649), True, 'import numpy.lib.recfunctions as nlr\n'), ((20363, 20392), 'pickle.dump', 'pickle.dump', (['well_plate', 'file'], {}), '(well_plate, file)\n', (20374, 20392), False, 'import pickle\n'), ((1681, 1711), 'cv2.ORB_create', 'cv2.ORB_create', ([], {'nfeatures': '(3000)'}), '(nfeatures=3000)\n', (1695, 1711), False, 'import cv2\n'), ((1818, 1909), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['img_single_channel', 'keypoints_orb', 'None'], {'color': '(0, 255, 0)', 'flags': '(0)'}), '(img_single_channel, keypoints_orb, None, color=(0, 255, 0\n ), flags=0)\n', (1835, 1909), False, 'import cv2\n'), ((4972, 5020), 'cv2.BFMatcher', 'cv2.BFMatcher', (['cv2.NORM_HAMMING'], {'crossCheck': '(True)'}), '(cv2.NORM_HAMMING, crossCheck=True)\n', (4985, 5020), False, 'import cv2\n'), ((8232, 8267), 'cv2.Laplacian', 'cv2.Laplacian', (['denoised', 'cv2.CV_64F'], {}), '(denoised, cv2.CV_64F)\n', (8245, 8267), False, 'import cv2\n'), ((12815, 12834), 'numpy.int32', 'np.int32', (['dst_image'], {}), '(dst_image)\n', (12823, 12834), True, 'import numpy as np\n'), ((14506, 14541), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.uint8'], {}), '((height, width), np.uint8)\n', (14514, 14541), True, 'import numpy as np\n'), ((14627, 14675), 'cv2.circle', 'cv2.circle', (['mask', '(x, y)', 'R', '(255, 255, 255)', '(-1)'], {}), '(mask, (x, y), R, (255, 255, 255), -1)\n', (14637, 14675), False, 'import cv2\n'), ((14698, 14809), 'cv2.warpPerspective', 'cv2.warpPerspective', (['single_maks', 'matrix', '(trainImage.shape[1], trainImage.shape[0])', 'cv2.WARP_INVERSE_MAP'], {}), '(single_maks, matrix, (trainImage.shape[1], trainImage.\n shape[0]), cv2.WARP_INVERSE_MAP)\n', (14717, 14809), False, 'import cv2\n'), ((15054, 15114), 'cv2.bitwise_and', 'cv2.bitwise_and', (['trainImage', 'trainImage'], {'mask': 'corrected_mask'}), '(trainImage, trainImage, mask=corrected_mask)\n', (15069, 15114), False, 'import cv2\n'), ((15485, 15515), 'well_plate_project.data_exp.well_plate.uniform_center_region', 'uniform_center_region', (['cropped'], {}), '(cropped)\n', (15506, 15515), False, 'from well_plate_project.data_exp.well_plate import extract_features, uniform_center_region\n'), ((16010, 16035), 'well_plate_project.data_exp.well_plate.extract_features', 'extract_features', (['cropped'], {}), '(cropped)\n', (16026, 16035), False, 'from well_plate_project.data_exp.well_plate import extract_features, uniform_center_region\n'), ((16075, 16100), 'well_plate_project.data_exp.well_plate.extract_features', 'extract_features', (['reduced'], {}), '(reduced)\n', (16091, 16100), False, 'from well_plate_project.data_exp.well_plate import extract_features, uniform_center_region\n'), ((1513, 1525), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1523, 1525), True, 'import matplotlib.pyplot as plt\n'), ((1527, 1547), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_sift'], {}), '(img_sift)\n', (1537, 1547), True, 'import matplotlib.pyplot as plt\n'), ((1549, 1566), 'matplotlib.pyplot.title', 'plt.title', (['"""SIFT"""'], {}), "('SIFT')\n", (1558, 1566), True, 'import matplotlib.pyplot as plt\n'), ((1568, 1578), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1576, 1578), True, 'import matplotlib.pyplot as plt\n'), ((2112, 2144), 'cv2.FastFeatureDetector_create', 'cv2.FastFeatureDetector_create', ([], {}), '()\n', (2142, 2144), False, 'import cv2\n'), ((2199, 2248), 'cv2.xfeatures2d.BriefDescriptorExtractor_create', 'cv2.xfeatures2d.BriefDescriptorExtractor_create', ([], {}), '()\n', (2246, 2248), False, 'import cv2\n'), ((5577, 5620), 'cv2.BFMatcher', 'cv2.BFMatcher', (['cv2.NORM_L1'], {'crossCheck': '(True)'}), '(cv2.NORM_L1, crossCheck=True)\n', (5590, 5620), False, 'import cv2\n'), ((6789, 6843), 'numpy.float32', 'np.float32', (['[kp1[m.queryIdx].pt for m in good_matches]'], {}), '([kp1[m.queryIdx].pt for m in good_matches])\n', (6799, 6843), True, 'import numpy as np\n'), ((6883, 6937), 'numpy.float32', 'np.float32', (['[kp2[m.trainIdx].pt for m in good_matches]'], {}), '([kp2[m.trainIdx].pt for m in good_matches])\n', (6893, 6937), True, 'import numpy as np\n'), ((16283, 16312), 'json.dump', 'json.dump', (['feature_dict', 'file'], {}), '(feature_dict, file)\n', (16292, 16312), False, 'import json\n'), ((1946, 1958), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1956, 1958), True, 'import matplotlib.pyplot as plt\n'), ((1960, 1979), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_orb'], {}), '(img_orb)\n', (1970, 1979), True, 'import matplotlib.pyplot as plt\n'), ((1981, 1997), 'matplotlib.pyplot.title', 'plt.title', (['"""ORB"""'], {}), "('ORB')\n", (1990, 1997), True, 'import matplotlib.pyplot as plt\n'), ((1999, 2009), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2007, 2009), True, 'import matplotlib.pyplot as plt\n'), ((2520, 2598), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['img_single_channel', 'keypoints_fast', 'None'], {'color': '(255, 0, 0)'}), '(img_single_channel, keypoints_fast, None, color=(255, 0, 0))\n', (2537, 2598), False, 'import cv2\n'), ((2623, 2702), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['img_single_channel', 'keypoints_brief', 'None'], {'color': '(255, 0, 0)'}), '(img_single_channel, keypoints_brief, None, color=(255, 0, 0))\n', (2640, 2702), False, 'import cv2\n'), ((3018, 3055), 'cv2.xfeatures2d.StarDetector_create', 'cv2.xfeatures2d.StarDetector_create', ([], {}), '()\n', (3053, 3055), False, 'import cv2\n'), ((3088, 3137), 'cv2.xfeatures2d.BriefDescriptorExtractor_create', 'cv2.xfeatures2d.BriefDescriptorExtractor_create', ([], {}), '()\n', (3135, 3137), False, 'import cv2\n'), ((14905, 14928), 'numpy.amax', 'np.amax', (['corrected_mask'], {}), '(corrected_mask)\n', (14912, 14928), True, 'import numpy as np\n'), ((866, 874), 'skimage.feature.hog', 'hog', (['img'], {}), '(img)\n', (869, 874), False, 'from skimage.feature import hog\n'), ((2718, 2730), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2728, 2730), True, 'import matplotlib.pyplot as plt\n'), ((2732, 2752), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_fast'], {}), '(img_fast)\n', (2742, 2752), True, 'import matplotlib.pyplot as plt\n'), ((2754, 2790), 'matplotlib.pyplot.title', 'plt.title', (['"""Detected FAST keypoints"""'], {}), "('Detected FAST keypoints')\n", (2763, 2790), True, 'import matplotlib.pyplot as plt\n'), ((2792, 2802), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2800, 2802), True, 'import matplotlib.pyplot as plt\n'), ((2815, 2827), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2825, 2827), True, 'import matplotlib.pyplot as plt\n'), ((2829, 2850), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_brief'], {}), '(img_brief)\n', (2839, 2850), True, 'import matplotlib.pyplot as plt\n'), ((2852, 2889), 'matplotlib.pyplot.title', 'plt.title', (['"""Detected BRIEF keypoints"""'], {}), "('Detected BRIEF keypoints')\n", (2861, 2889), True, 'import matplotlib.pyplot as plt\n'), ((2891, 2901), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2899, 2901), True, 'import matplotlib.pyplot as plt\n'), ((3433, 3511), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['img_single_channel', 'keypoints_star', 'None'], {'color': '(255, 0, 0)'}), '(img_single_channel, keypoints_star, None, color=(255, 0, 0))\n', (3450, 3511), False, 'import cv2\n'), ((3536, 3615), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['img_single_channel', 'keypoints_brief', 'None'], {'color': '(255, 0, 0)'}), '(img_single_channel, keypoints_brief, None, color=(255, 0, 0))\n', (3553, 3615), False, 'import cv2\n'), ((3632, 3644), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3642, 3644), True, 'import matplotlib.pyplot as plt\n'), ((3646, 3666), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_star'], {}), '(img_star)\n', (3656, 3666), True, 'import matplotlib.pyplot as plt\n'), ((3668, 3704), 'matplotlib.pyplot.title', 'plt.title', (['"""Detected STAR keypoints"""'], {}), "('Detected STAR keypoints')\n", (3677, 3704), True, 'import matplotlib.pyplot as plt\n'), ((3706, 3716), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3714, 3716), True, 'import matplotlib.pyplot as plt\n'), ((3729, 3741), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3739, 3741), True, 'import matplotlib.pyplot as plt\n'), ((3743, 3764), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_brief'], {}), '(img_brief)\n', (3753, 3764), True, 'import matplotlib.pyplot as plt\n'), ((3766, 3803), 'matplotlib.pyplot.title', 'plt.title', (['"""Detected BRIEF keypoints"""'], {}), "('Detected BRIEF keypoints')\n", (3775, 3803), True, 'import matplotlib.pyplot as plt\n'), ((3805, 3815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3813, 3815), True, 'import matplotlib.pyplot as plt\n')] |
# <NAME>
import os
import numpy as np
from os import listdir
from scipy.misc import imread, imresize
# Settings:
img_size = 64
channel_size = 1 # 1: Grayscale, 3: RGB
def get_img(data_path):
# Getting image array from path:
img = imread(data_path, flatten= True if channel_size == 1 else False)
img = imresize(img, (img_size, img_size, channel_size))
return img
def get_dataset(dataset_path='Data/Train_Data'):
# Getting all data from data path:
try:
X = np.load('Data/npy_dataset/X.npy')
except:
labels = listdir(dataset_path) # Geting labels
X = []
for label in labels:
datas_path = dataset_path+'/'+label
for data in listdir(datas_path):
img = get_img(datas_path+'/'+data)
X.append(img)
# Create dateset:
X = 1-np.array(X).astype('float32')/255.
X = X.reshape(X.shape[0], img_size, img_size, channel_size)
if not os.path.exists('Data/npy_dataset/'):
os.makedirs('Data/npy_dataset/')
np.save('Data/npy_dataset/X.npy', X)
return X
if __name__ == '__main__':
get_dataset()
| [
"numpy.load",
"numpy.save",
"os.makedirs",
"os.path.exists",
"numpy.array",
"scipy.misc.imresize",
"os.listdir",
"scipy.misc.imread"
] | [((242, 305), 'scipy.misc.imread', 'imread', (['data_path'], {'flatten': '(True if channel_size == 1 else False)'}), '(data_path, flatten=True if channel_size == 1 else False)\n', (248, 305), False, 'from scipy.misc import imread, imresize\n'), ((317, 366), 'scipy.misc.imresize', 'imresize', (['img', '(img_size, img_size, channel_size)'], {}), '(img, (img_size, img_size, channel_size))\n', (325, 366), False, 'from scipy.misc import imread, imresize\n'), ((492, 525), 'numpy.load', 'np.load', (['"""Data/npy_dataset/X.npy"""'], {}), "('Data/npy_dataset/X.npy')\n", (499, 525), True, 'import numpy as np\n'), ((555, 576), 'os.listdir', 'listdir', (['dataset_path'], {}), '(dataset_path)\n', (562, 576), False, 'from os import listdir\n'), ((1059, 1095), 'numpy.save', 'np.save', (['"""Data/npy_dataset/X.npy"""', 'X'], {}), "('Data/npy_dataset/X.npy', X)\n", (1066, 1095), True, 'import numpy as np\n'), ((709, 728), 'os.listdir', 'listdir', (['datas_path'], {}), '(datas_path)\n', (716, 728), False, 'from os import listdir\n'), ((969, 1004), 'os.path.exists', 'os.path.exists', (['"""Data/npy_dataset/"""'], {}), "('Data/npy_dataset/')\n", (983, 1004), False, 'import os\n'), ((1018, 1050), 'os.makedirs', 'os.makedirs', (['"""Data/npy_dataset/"""'], {}), "('Data/npy_dataset/')\n", (1029, 1050), False, 'import os\n'), ((851, 862), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (859, 862), True, 'import numpy as np\n')] |
import logging
import numpy as np
from rl_agents.agents.common.factory import safe_deepcopy_env
from rl_agents.agents.tree_search.olop import OLOP, OLOPAgent, OLOPNode
from rl_agents.utils import max_expectation_under_constraint, kl_upper_bound
logger = logging.getLogger(__name__)
class MDPGapE(OLOP):
"""
Best-Arm Identification MCTS.
"""
def __init__(self, env, config=None):
super().__init__(env, config)
self.next_observation = None
self.budget_used = 0
@classmethod
def default_config(cls):
cfg = super().default_config()
cfg.update(
{
"accuracy": 1.0,
"confidence": 0.9,
"continuation_type": "uniform",
"horizon_from_accuracy": False,
"max_next_states_count": 1,
"upper_bound": {
"type": "kullback-leibler",
"time": "global",
"threshold": "3*np.log(1 + np.log(count))"
"+ horizon*np.log(actions)"
"+ np.log(1/(1-confidence))",
"transition_threshold": "0.1*np.log(time)"
},
}
)
return cfg
def reset(self):
if "horizon" not in self.config:
self.allocate_budget()
self.root = DecisionNode(parent=None, planner=self)
def allocate_budget(self):
"""
Allocate the computational budget into tau episodes of fixed horizon H.
"""
if self.config["horizon_from_accuracy"]:
self.config["horizon"] = int(np.ceil(np.log(self.config["accuracy"] * (1 - self.config["gamma"]) / 2) \
/ np.log(self.config["gamma"])))
self.config["episodes"] = self.config["budget"] // self.config["horizon"]
assert self.config["episodes"] > 1
logger.debug("Planning at depth H={}".format(self.config["horizon"]))
else:
super().allocate_budget()
def run(self, state):
"""
Run an MDP-GapE episode.
:param state: the initial environment state
"""
# We need randomness
state.seed(self.np_random.randint(2**30))
best, challenger = None, None
if self.root.children:
logger.debug(" / ".join(["a{} ({}): [{:.3f}, {:.3f}]".format(k, n.count, n.value_lower, n.value_upper)
for k, n in self.root.children.items()]))
else:
self.root.expand(state)
# Follow selection policy, expand tree if needed, collect rewards and update confidence bounds.
decision_node = self.root
for h in range(self.config["horizon"]):
action = decision_node.sampling_rule(n_actions=state.action_space.n)
# Perform transition
chance_node, action = decision_node.get_child(action, state)
observation, reward, done, _ = self.step(state, action)
decision_node = chance_node.get_child(observation)
# Update local statistics
chance_node.update(np.nan, False)
decision_node.update(reward, done)
# Backup global statistics
decision_node.backup_to_root()
_, best, challenger = self.root.best_arm_identification_selection()
return best, challenger
def plan(self, state, observation):
done = False
episode = 0
while not done:
best, challenger = self.run(safe_deepcopy_env(state))
# Stopping rule
done = challenger.value_upper - best.value_lower < self.config["accuracy"] if best is not None else False
done = done or episode > self.config["episodes"]
episode += 1
if episode % 10 == 0:
logger.debug('Episode {}: delta = {}/{}'.format(episode,
challenger.value_upper - best.value_lower,
self.config["accuracy"]))
self.budget_used = episode * self.config["horizon"]
return self.get_plan()
def step_tree(self, actions):
"""
Update the planner tree when the agent performs an action and observes the next state
:param actions: a sequence of actions to follow from the root node
"""
if self.config["step_strategy"] == "reset":
self.step_by_reset()
elif self.config["step_strategy"] == "subtree":
if actions:
self.step_by_subtree(actions[0])
self.step_by_subtree(str(self.next_observation)) # Step to the observed next state
else:
self.step_by_reset()
else:
logger.warning("Unknown step strategy: {}".format(self.config["step_strategy"]))
self.step_by_reset()
def get_plan(self):
"""Only return the first action, the rest is conditioned on observations"""
return [self.root.selection_rule()]
class DecisionNode(OLOPNode):
def __init__(self, parent, planner):
super().__init__(parent, planner)
self.depth = 0 if parent is None else parent.depth + 1
self.mu_lcb = -np.infty
""" Lower bound of the node mean reward. """
if self.planner.config["upper_bound"]["type"] == "kullback-leibler":
self.mu_lcb = 0
gamma = self.planner.config["gamma"]
H = self.planner.config["horizon"]
self.value_upper = (1 - gamma ** (H - self.depth)) / (1 - gamma)
""" Lower bound on the node optimal reward-to-go """
self.value_lower = 0
self.gap = -np.infty
""" Maximum possible gap from this node to its neighbours, based on their value confidence intervals """
def get_child(self, action, state):
if not self.children:
self.expand(state)
if action not in self.children: # Default action may not be available
action = list(self.children.keys())[0] # Pick first available action instead
return self.children[action], action
def expand(self, state):
if state is None:
raise Exception("The state should be set before expanding a node")
try:
actions = state.get_available_actions()
except AttributeError:
actions = range(state.action_space.n)
for action in actions:
self.children[action] = ChanceNode(self, self.planner)
def selection_rule(self):
# Best arm identification at the root
if self.planner.root == self:
_, best_node, _ = self.best_arm_identification_selection()
return next(best_node.path())
# Then follow the conservative values
actions = list(self.children.keys())
index = self.random_argmax([self.children[a].value_lower for a in actions])
return actions[index]
def sampling_rule(self, n_actions):
# Best arm identification at the root
if self == self.planner.root: # Run BAI at the root
selected_child, _, _ = self.best_arm_identification_selection()
action = next(selected_child.path())
# Elsewhere, follow the optimistic values
elif self.children:
actions = list(self.children.keys())
index = self.random_argmax([self.children[a].value_upper for a in actions])
action = actions[index]
# Break ties at leaves
else:
action = self.planner.np_random.randint(n_actions) \
if self.planner.config["continuation_type"] == "uniform" else 0
return action
def compute_reward_ucb(self):
if self.planner.config["upper_bound"]["type"] == "kullback-leibler":
# Variables available for threshold evaluation
horizon = self.planner.config["horizon"]
actions = self.planner.env.action_space.n
confidence = self.planner.config["confidence"]
count = self.count
time = self.planner.config["episodes"]
threshold = eval(self.planner.config["upper_bound"]["threshold"])
self.mu_ucb = kl_upper_bound(self.cumulative_reward, self.count, threshold)
self.mu_lcb = kl_upper_bound(self.cumulative_reward, self.count, threshold, lower=True)
else:
logger.error("Unknown upper-bound type")
def backup_to_root(self):
"""
Bellman V(s) = max_a Q(s,a)
"""
if self.children:
self.value_upper = np.amax([child.value_upper for child in self.children.values()])
self.value_lower = np.amax([child.value_lower for child in self.children.values()])
else:
assert self.depth == self.planner.config["horizon"]
self.value_upper = 0 # Maybe count bound over r(H..inf) ?
self.value_lower = 0 # Maybe count bound over r(H..inf) ?
if self.parent:
self.parent.backup_to_root()
def compute_children_gaps(self):
"""
For best arm identification: compute for each child how much the other actions are potentially better.
"""
for child in self.children.values():
child.gap = -np.infty
for other in self.children.values():
if other is not child:
child.gap = max(child.gap, other.value_upper - child.value_lower)
def best_arm_identification_selection(self):
"""
Run UGapE on the children on this node, based on their value confidence intervals.
:return: selected arm, best candidate, challenger
"""
# Best candidate child has the lowest potential gap
self.compute_children_gaps()
best = min(self.children.values(), key=lambda c: c.gap)
# Challenger: not best and highest value upper bound
challenger = max([c for c in self.children.values() if c is not best], key=lambda c: c.value_upper)
# Selection: the one with highest uncertainty
return max([best, challenger], key=lambda n: n.value_upper - n.value_lower), best, challenger
class ChanceNode(OLOPNode):
def __init__(self, parent, planner):
assert parent is not None
super().__init__(parent, planner)
self.depth = parent.depth
gamma = self.planner.config["gamma"]
self.value_upper = (1 - gamma ** (self.planner.config["horizon"] - self.depth)) / (1 - gamma)
self.value_lower = 0
self.p_hat, self.p_plus, self.p_minus = None, None, None
delattr(self, 'cumulative_reward')
delattr(self, 'mu_ucb')
def update(self, reward, done):
self.count += 1
def expand(self, state):
# Generate placeholder nodes
for i in range(self.planner.config["max_next_states_count"]):
self.children["placeholder_{}".format(i)] = DecisionNode(self, self.planner)
def get_child(self, observation, hash=False):
if not self.children:
self.expand(None)
import hashlib
state_id = hashlib.sha1(str(observation).encode("UTF-8")).hexdigest()[:5] if hash else str(observation)
if state_id not in self.children:
# Assign the first available placeholder to the observation
for i in range(self.planner.config["max_next_states_count"]):
if "placeholder_{}".format(i) in self.children:
self.children[state_id] = self.children.pop("placeholder_{}".format(i))
break
else:
raise ValueError("No more placeholder nodes available, we observed more next states than "
"the 'max_next_states_count' config")
return self.children[state_id]
def backup_to_root(self):
"""
Bellman Q(s,a) = r(s,a) + gamma E_s' V(s')
"""
assert self.children
assert self.parent
gamma = self.planner.config["gamma"]
children = list(self.children.values())
u_next = np.array([c.mu_ucb + gamma * c.value_upper for c in children])
l_next = np.array([c.mu_lcb + gamma * c.value_lower for c in children])
self.p_hat = np.array([child.count for child in children]) / self.count
threshold = self.transition_threshold() / self.count
self.p_plus = max_expectation_under_constraint(u_next, self.p_hat, threshold)
self.p_minus = max_expectation_under_constraint(-l_next, self.p_hat, threshold)
self.value_upper = self.p_plus @ u_next
self.value_lower = self.p_minus @ l_next
self.parent.backup_to_root()
def transition_threshold(self):
horizon = self.planner.config["horizon"]
actions = self.planner.env.action_space.n
confidence = self.planner.config["confidence"]
count = self.count
time = self.planner.config["episodes"]
return eval(self.planner.config["upper_bound"]["transition_threshold"])
class MDPGapEAgent(OLOPAgent):
"""
An agent that uses best-arm-identification to plan a sequence of actions in an MDP.
"""
PLANNER_TYPE = MDPGapE
def step(self, actions):
"""
Handle receding horizon mechanism with chance nodes
"""
replanning_required = self.remaining_horizon == 0 # Cannot check remaining actions here
if replanning_required:
self.remaining_horizon = self.config["receding_horizon"] - 1
self.planner.step_by_reset()
else:
self.remaining_horizon -= 1
self.planner.step_tree(actions)
# Check for remaining children here instead
if self.planner.root.children:
self.previous_actions.extend(self.planner.get_plan())
else: # After stepping the transition in the tree, the subtree is empty
replanning_required = True
self.planner.step_by_reset()
return replanning_required
def record(self, state, action, reward, next_state, done, info):
self.planner.next_observation = next_state
| [
"rl_agents.agents.common.factory.safe_deepcopy_env",
"numpy.log",
"rl_agents.utils.max_expectation_under_constraint",
"rl_agents.utils.kl_upper_bound",
"numpy.array",
"logging.getLogger"
] | [((256, 283), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (273, 283), False, 'import logging\n'), ((12150, 12214), 'numpy.array', 'np.array', (['[(c.mu_ucb + gamma * c.value_upper) for c in children]'], {}), '([(c.mu_ucb + gamma * c.value_upper) for c in children])\n', (12158, 12214), True, 'import numpy as np\n'), ((12230, 12294), 'numpy.array', 'np.array', (['[(c.mu_lcb + gamma * c.value_lower) for c in children]'], {}), '([(c.mu_lcb + gamma * c.value_lower) for c in children])\n', (12238, 12294), True, 'import numpy as np\n'), ((12457, 12520), 'rl_agents.utils.max_expectation_under_constraint', 'max_expectation_under_constraint', (['u_next', 'self.p_hat', 'threshold'], {}), '(u_next, self.p_hat, threshold)\n', (12489, 12520), False, 'from rl_agents.utils import max_expectation_under_constraint, kl_upper_bound\n'), ((12544, 12608), 'rl_agents.utils.max_expectation_under_constraint', 'max_expectation_under_constraint', (['(-l_next)', 'self.p_hat', 'threshold'], {}), '(-l_next, self.p_hat, threshold)\n', (12576, 12608), False, 'from rl_agents.utils import max_expectation_under_constraint, kl_upper_bound\n'), ((8269, 8330), 'rl_agents.utils.kl_upper_bound', 'kl_upper_bound', (['self.cumulative_reward', 'self.count', 'threshold'], {}), '(self.cumulative_reward, self.count, threshold)\n', (8283, 8330), False, 'from rl_agents.utils import max_expectation_under_constraint, kl_upper_bound\n'), ((8357, 8430), 'rl_agents.utils.kl_upper_bound', 'kl_upper_bound', (['self.cumulative_reward', 'self.count', 'threshold'], {'lower': '(True)'}), '(self.cumulative_reward, self.count, threshold, lower=True)\n', (8371, 8430), False, 'from rl_agents.utils import max_expectation_under_constraint, kl_upper_bound\n'), ((12314, 12359), 'numpy.array', 'np.array', (['[child.count for child in children]'], {}), '([child.count for child in children])\n', (12322, 12359), True, 'import numpy as np\n'), ((3565, 3589), 'rl_agents.agents.common.factory.safe_deepcopy_env', 'safe_deepcopy_env', (['state'], {}), '(state)\n', (3582, 3589), False, 'from rl_agents.agents.common.factory import safe_deepcopy_env\n'), ((1661, 1725), 'numpy.log', 'np.log', (["(self.config['accuracy'] * (1 - self.config['gamma']) / 2)"], {}), "(self.config['accuracy'] * (1 - self.config['gamma']) / 2)\n", (1667, 1725), True, 'import numpy as np\n'), ((1767, 1795), 'numpy.log', 'np.log', (["self.config['gamma']"], {}), "(self.config['gamma'])\n", (1773, 1795), True, 'import numpy as np\n')] |
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from os.path import join
import numpy as np
from MCEq.misc import theta_rad
from MCEq.misc import info
import mceq_config as config
class EarthsAtmosphere(with_metaclass(ABCMeta)):
"""
Abstract class containing common methods on atmosphere.
You have to inherit from this class and implement the virtual method
:func:`get_density`.
Note:
Do not instantiate this class directly.
Attributes:
thrad (float): current zenith angle :math:`\\theta` in radiants
theta_deg (float): current zenith angle :math:`\\theta` in degrees
max_X (float): Slant depth at the surface according to the geometry
defined in the :mod:`MCEq.geometry`
geometry (object): Can be a custom instance of EarthGeometry
"""
def __init__(self, *args, **kwargs):
from MCEq.geometry.geometry import EarthGeometry
self.geom = kwargs.pop('geometry', EarthGeometry())
self.thrad = None
self.theta_deg = None
self._max_den = config.max_density
self.max_theta = 90.
self.location = None
self.season = None
@abstractmethod
def get_density(self, h_cm):
"""Abstract method which implementation should return the density in g/cm**3.
Args:
h_cm (float): height in cm
Returns:
float: density in g/cm**3
Raises:
NotImplementedError:
"""
raise NotImplementedError("Base class called.")
def calculate_density_spline(self, n_steps=2000):
"""Calculates and stores a spline of :math:`\\rho(X)`.
Args:
n_steps (int, optional): number of :math:`X` values
to use for interpolation
Raises:
Exception: if :func:`set_theta` was not called before.
"""
from scipy.integrate import cumtrapz
from time import time
from scipy.interpolate import UnivariateSpline
if self.theta_deg is None:
raise Exception('zenith angle not set')
else:
info(
5, 'Calculating spline of rho(X) for zenith {0:4.1f} degrees.'.
format(self.theta_deg))
thrad = self.thrad
path_length = self.geom.l(thrad)
vec_rho_l = np.vectorize(
lambda delta_l: self.get_density(self.geom.h(delta_l, thrad)))
dl_vec = np.linspace(0, path_length, n_steps)
now = time()
# Calculate integral for each depth point
X_int = cumtrapz(vec_rho_l(dl_vec), dl_vec) #
dl_vec = dl_vec[1:]
info(5, '.. took {0:1.2f}s'.format(time() - now))
# Save depth value at h_obs
self._max_X = X_int[-1]
self._max_den = self.get_density(self.geom.h(0, thrad))
# Interpolate with bi-splines without smoothing
h_intp = [self.geom.h(dl, thrad) for dl in reversed(dl_vec[1:])]
X_intp = [X for X in reversed(X_int[1:])]
self._s_h2X = UnivariateSpline(h_intp, np.log(X_intp), k=2, s=0.0)
self._s_X2rho = UnivariateSpline(X_int, vec_rho_l(dl_vec), k=2, s=0.0)
self._s_lX2h = UnivariateSpline(np.log(X_intp)[::-1],
h_intp[::-1],
k=2,
s=0.0)
@property
def max_X(self):
"""Depth at altitude 0."""
if not hasattr(self, '_max_X'):
self.set_theta(0)
return self._max_X
@property
def max_den(self):
"""Density at altitude 0."""
if not hasattr(self, '_max_den'):
self.set_theta(0)
return self._max_den
@property
def s_h2X(self):
"""Spline for conversion from altitude to depth."""
if not hasattr(self, '_s_h2X'):
self.set_theta(0)
return self._s_h2X
@property
def s_X2rho(self):
"""Spline for conversion from depth to density."""
if not hasattr(self, '_s_X2rho'):
self.set_theta(0)
return self._s_X2rho
@property
def s_lX2h(self):
"""Spline for conversion from depth to altitude."""
if not hasattr(self, '_s_lX2h'):
self.set_theta(0)
return self._s_lX2h
def set_theta(self, theta_deg, force_spline_calc=False):
"""Configures geometry and initiates spline calculation for
:math:`\\rho(X)`.
If the option 'use_atm_cache' is enabled in the config, the
function will check, if a corresponding spline is available
in the cache and use it. Otherwise it will call
:func:`calculate_density_spline`, make the function
:func:`r_X2rho` available to the core code and store the spline
in the cache.
Args:
theta_deg (float): zenith angle :math:`\\theta` at detector
force_spline_calc (bool): forces (re-)calculation of the
spline for each call
"""
if theta_deg < 0. or theta_deg > self.max_theta:
raise Exception('Zenith angle not in allowed range.')
self.thrad = theta_rad(theta_deg)
self.theta_deg = theta_deg
self.calculate_density_spline()
def r_X2rho(self, X):
"""Returns the inverse density :math:`\\frac{1}{\\rho}(X)`.
The spline `s_X2rho` is used, which was calculated or retrieved
from cache during the :func:`set_theta` call.
Args:
X (float): slant depth in g/cm**2
Returns:
float: :math:`1/\\rho` in cm**3/g
"""
return 1. / self.s_X2rho(X)
def h2X(self, h):
"""Returns the depth along path as function of height above
surface.
The spline `s_X2rho` is used, which was calculated or retrieved
from cache during the :func:`set_theta` call.
Args:
h (float): vertical height above surface in cm
Returns:
float: X slant depth in g/cm**2
"""
return np.exp(self.s_h2X(h))
def X2h(self, X):
"""Returns the height above surface as a function of slant depth
for currently selected zenith angle.
The spline `s_lX2h` is used, which was calculated or retrieved
from cache during the :func:`set_theta` call.
Args:
X (float): slant depth in g/cm**2
Returns:
float h: height above surface in cm
"""
return self.s_lX2h(np.log(X))
def X2rho(self, X):
"""Returns the density :math:`\\rho(X)`.
The spline `s_X2rho` is used, which was calculated or retrieved
from cache during the :func:`set_theta` call.
Args:
X (float): slant depth in g/cm**2
Returns:
float: :math:`\\rho` in cm**3/g
"""
return self.s_X2rho(X)
def moliere_air(self, h_cm):
"""Returns the Moliere unit of air for US standard atmosphere. """
return 9.3 / (self.get_density(h_cm) * 100.)
def nref_rel_air(self, h_cm):
"""Returns the refractive index - 1 in air (density parametrization
as in CORSIKA).
"""
return 0.000283 * self.get_density(h_cm) / self.get_density(0)
def gamma_cherenkov_air(self, h_cm):
"""Returns the Lorentz factor gamma of Cherenkov threshold in air (MeV).
"""
nrel = self.nref_rel_air(h_cm)
return (1. + nrel) / np.sqrt(2. * nrel + nrel**2)
def theta_cherenkov_air(self, h_cm):
"""Returns the Cherenkov angle in air (degrees).
"""
return np.arccos(1. / (1. + self.nref_rel_air(h_cm))) * 180. / np.pi
class CorsikaAtmosphere(EarthsAtmosphere):
"""Class, holding the parameters of a Linsley type parameterization
similar to the Air-Shower Monte Carlo
`CORSIKA <https://web.ikp.kit.edu/corsika/>`_.
The parameters pre-defined parameters are taken from the CORSIKA
manual. If new sets of parameters are added to :func:`init_parameters`,
the array _thickl can be calculated using :func:`calc_thickl` .
Attributes:
_atm_param (numpy.array): (5x5) Stores 5 atmospheric parameters
_aatm, _batm, _catm, _thickl, _hlay
for each of the 5 layers
Args:
location (str): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
_atm_param = None
def __init__(self, location, season=None):
cka_atmospheres = [
("USStd", None),
("BK_USStd", None),
("Karlsruhe", None),
("ANTARES/KM3NeT-ORCA", 'Summer'),
("ANTARES/KM3NeT-ORCA", 'Winter'),
("KM3NeT-ARCA", 'Summer'),
("KM3NeT-ARCA", 'Winter'),
("KM3NeT",None),
('SouthPole','December'),
('PL_SouthPole','January'),
('PL_SouthPole','August'),
]
assert (location, season) in cka_atmospheres, \
'{0}/{1} not available for CorsikaAtmsophere'.format(
location, season
)
self.init_parameters(location, season)
import MCEq.geometry.corsikaatm.corsikaatm as corsika_acc
self.corsika_acc = corsika_acc
EarthsAtmosphere.__init__(self)
def init_parameters(self, location, season):
"""Initializes :attr:`_atm_param`. Parameters from ANTARES/KM3NET
are based on the work of <NAME>
(`see this issue <https://github.com/afedynitch/MCEq/issues/12>`_)
+---------------------+-------------------+------------------------------+
| location | CORSIKA Table | Description/season |
+=====================+===================+==============================+
| "USStd" | 23 | US Standard atmosphere |
+---------------------+-------------------+------------------------------+
| "BK_USStd" | 37 | <NAME>'s USStd |
+---------------------+-------------------+------------------------------+
| "Karlsruhe" | 24 | AT115 / Karlsruhe |
+---------------------+-------------------+------------------------------+
| "SouthPole" | 26 and 28 | MSIS-90-E for Dec and June |
+---------------------+-------------------+------------------------------+
|"PL_SouthPole" | 29 and 30 | P. Lipari's Jan and Aug |
+---------------------+-------------------+------------------------------+
|"ANTARES/KM3NeT-ORCA"| NA | PhD T. Heid |
+---------------------+-------------------+------------------------------+
| "KM3NeT-ARCA" | NA | PhD T. Heid |
+---------------------+-------------------+------------------------------+
Args:
location (str): see table
season (str, optional): choice of season for supported locations
Raises:
Exception: if parameter set not available
"""
_aatm, _batm, _catm, _thickl, _hlay = None, None, None, None, None
if location == "USStd":
_aatm = np.array([-186.5562, -94.919, 0.61289, 0.0, 0.01128292])
_batm = np.array([1222.6562, 1144.9069, 1305.5948, 540.1778, 1.0])
_catm = np.array([994186.38, 878153.55, 636143.04, 772170., 1.0e9])
_thickl = np.array(
[1036.102549, 631.100309, 271.700230, 3.039494, 0.001280])
_hlay = np.array([0, 4.0e5, 1.0e6, 4.0e6, 1.0e7])
elif location == "BK_USStd":
_aatm = np.array([
-149.801663, -57.932486, 0.63631894, 4.3545369e-4, 0.01128292
])
_batm = np.array([1183.6071, 1143.0425, 1322.9748, 655.69307, 1.0])
_catm = np.array(
[954248.34, 800005.34, 629568.93, 737521.77, 1.0e9])
_thickl = np.array(
[1033.804941, 418.557770, 216.981635, 4.344861, 0.001280])
_hlay = np.array([0.0, 7.0e5, 1.14e6, 3.7e6, 1.0e7])
elif location == "Karlsruhe":
_aatm = np.array(
[-118.1277, -154.258, 0.4191499, 5.4094056e-4, 0.01128292])
_batm = np.array([1173.9861, 1205.7625, 1386.7807, 555.8935, 1.0])
_catm = np.array([919546., 963267.92, 614315., 739059.6, 1.0e9])
_thickl = np.array(
[1055.858707, 641.755364, 272.720974, 2.480633, 0.001280])
_hlay = np.array([0.0, 4.0e5, 1.0e6, 4.0e6, 1.0e7])
elif location == "KM3NeT": # averaged over detector and season
_aatm = np.array([-141.31449999999998, -8.256029999999999, 0.6132505, -0.025998975, 0.4024275])
_batm = np.array([1153.0349999999999, 1263.3325, 1257.0724999999998, 404.85974999999996, 1.0])
_catm = np.array([967990.75, 668591.75, 636790.0, 814070.75, 21426175.0])
_thickl = np.array([1011.8521512499999, 275.84507575000003, 51.0230705, 2.983134, 0.21927724999999998])
_hlay = np.array([0.0, 993750.0, 2081250.0, 4150000.0, 6877500.0])
elif location == "ANTARES/KM3NeT-ORCA":
if season == 'Summer':
_aatm = np.array([-158.85, -5.38682, 0.889893, -0.0286665, 0.50035])
_batm = np.array([1145.62, 1176.79, 1248.92, 415.543, 1.0])
_catm = np.array([998469.0, 677398.0, 636790.0, 823489.0, 16090500.0])
_thickl = np.array([986.951713, 306.4668, 40.546793, 4.288721, 0.277182])
_hlay = np.array([0, 9.0e5, 22.0e5, 38.0e5, 68.2e5])
elif season == 'Winter':
_aatm = np.array([-132.16, -2.4787, 0.298031, -0.0220264, 0.348021])
_batm = np.array([1120.45, 1203.97, 1163.28, 360.027, 1.0])
_catm = np.array([933697.0, 643957.0, 636790.0, 804486.0, 23109000.0])
_thickl = np.array([988.431172, 273.033464, 37.185105, 1.162987, 0.192998])
_hlay = np.array([0, 9.5e5, 22.0e5, 47.0e5, 68.2e5])
elif location == "KM3NeT-ARCA":
if season == 'Summer':
_aatm = np.array([-157.857, -28.7524, 0.790275, -0.0286999, 0.481114])
_batm = np.array([1190.44, 1171.0, 1344.78, 445.357, 1.0])
_catm = np.array([1006100.0, 758614.0, 636790.0, 817384.0, 16886800.0])
_thickl = np.array([1032.679434, 328.978681, 80.601135, 4.420745, 0.264112])
_hlay = np.array([0, 9.0e5, 18.0e5, 38.0e5, 68.2e5])
elif season == 'Winter':
_aatm = np.array([-116.391, 3.5938, 0.474803, -0.0246031, 0.280225])
_batm = np.array([1155.63, 1501.57, 1271.31, 398.512, 1.0])
_catm = np.array([933697.0, 594398.0, 636790.0, 810924.0, 29618400.0])
_thickl = np.array([1039.346286, 194.901358, 45.759249, 2.060083, 0.142817])
_hlay = np.array([0, 12.25e5, 21.25e5, 43.0e5, 70.5e5])
elif location == 'SouthPole':
if season == 'December':
_aatm = np.array(
[-128.601, -39.5548, 1.13088, -0.00264960, 0.00192534])
_batm = np.array([1139.99, 1073.82, 1052.96, 492.503, 1.0])
_catm = np.array(
[861913., 744955., 675928., 829627., 5.8587010e9])
_thickl = np.array(
[1011.398804, 588.128367, 240.955360, 3.964546, 0.000218])
_hlay = np.array([0.0, 4.0e5, 1.0e6, 4.0e6, 1.0e7])
elif season == "June":
_aatm = np.array(
[-163.331, -65.3713, 0.402903, -0.000479198, 0.00188667])
_batm = np.array([1183.70, 1108.06, 1424.02, 207.595, 1.0])
_catm = np.array(
[875221., 753213., 545846., 793043., 5.9787908e9])
_thickl = np.array(
[1020.370363, 586.143464, 228.374393, 1.338258, 0.000214])
_hlay = np.array([0.0, 4.0e5, 1.0e6, 4.0e6, 1.0e7])
else:
raise Exception('CorsikaAtmosphere(): Season "' + season +
'" not parameterized for location SouthPole.')
elif location == 'PL_SouthPole':
if season == 'January':
_aatm = np.array(
[-113.139, -7930635, -54.3888, -0.0, 0.00421033])
_batm = np.array([1133.10, 1101.20, 1085.00, 1098.00, 1.0])
_catm = np.array(
[861730., 826340., 790950., 682800., 2.6798156e9])
_thickl = np.array([
1019.966898, 718.071682, 498.659703, 340.222344, 0.000478
])
_hlay = np.array([0.0, 2.67e5, 5.33e5, 8.0e5, 1.0e7])
elif season == "August":
_aatm = np.array(
[-59.0293, -21.5794, -7.14839, 0.0, 0.000190175])
_batm = np.array([1079.0, 1071.90, 1182.0, 1647.1, 1.0])
_catm = np.array(
[764170., 699910., 635650., 551010., 59.329575e9])
_thickl = np.array(
[1019.946057, 391.739652, 138.023515, 43.687992, 0.000022])
_hlay = np.array([0.0, 6.67e5, 13.33e5, 2.0e6, 1.0e7])
else:
raise Exception('CorsikaAtmosphere(): Season "' + season +
'" not parameterized for location SouthPole.')
else:
raise Exception("CorsikaAtmosphere:init_parameters(): Location " +
str(location) + " not parameterized.")
self._atm_param = np.array([_aatm, _batm, _catm, _thickl, _hlay])
self.location, self.season = location, season
# Clear cached theta value to force spline recalculation
self.theta_deg = None
def depth2height(self, x_v):
"""Converts column/vertical depth to height.
Args:
x_v (float): column depth :math:`X_v` in g/cm**2
Returns:
float: height in cm
"""
_aatm, _batm, _catm, _thickl, _hlay = self._atm_param
if x_v >= _thickl[1]:
height = _catm[0] * np.log(_batm[0] / (x_v - _aatm[0]))
elif x_v >= _thickl[2]:
height = _catm[1] * np.log(_batm[1] / (x_v - _aatm[1]))
elif x_v >= _thickl[3]:
height = _catm[2] * np.log(_batm[2] / (x_v - _aatm[2]))
elif x_v >= _thickl[4]:
height = _catm[3] * np.log(_batm[3] / (x_v - _aatm[3]))
else:
height = (_aatm[4] - x_v) * _catm[4]
return height
def get_density(self, h_cm):
""" Returns the density of air in g/cm**3.
Uses the optimized module function :func:`corsika_get_density_jit`.
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
return self.corsika_acc.corsika_get_density(h_cm, *self._atm_param)
# return corsika_get_density_jit(h_cm, self._atm_param)
def get_mass_overburden(self, h_cm):
""" Returns the mass overburden in atmosphere in g/cm**2.
Uses the optimized module function :func:`corsika_get_m_overburden_jit`
Args:
h_cm (float): height in cm
Returns:
float: column depth :math:`T(h_{cm})` in g/cm**2
"""
return self.corsika_acc.corsika_get_m_overburden(h_cm, *self._atm_param)
# return corsika_get_m_overburden_jit(h_cm, self._atm_param)
def rho_inv(self, X, cos_theta):
"""Returns reciprocal density in cm**3/g using planar approximation.
This function uses the optimized function :func:`planar_rho_inv_jit`
Args:
h_cm (float): height in cm
Returns:
float: :math:`\\frac{1}{\\rho}(X,\\cos{\\theta})` cm**3/g
"""
return self.corsika_acc.planar_rho_inv(X, cos_theta, *self._atm_param)
# return planar_rho_inv_jit(X, cos_theta, self._atm_param)
def calc_thickl(self):
"""Calculates thickness layers for :func:`depth2height`
The analytical inversion of the CORSIKA parameterization
relies on the knowledge about the depth :math:`X`, where
trasitions between layers/exponentials occur.
Example:
Create a new set of parameters in :func:`init_parameters`
inserting arbitrary values in the _thikl array::
$ cor_atm = CorsikaAtmosphere(new_location, new_season)
$ cor_atm.calc_thickl()
Replace _thickl values with printout.
"""
from scipy.integrate import quad
thickl = []
for h in self._atm_param[4]:
thickl.append('{0:4.6f}'.format(
quad(self.get_density, h, 112.8e5, epsrel=1e-4)[0]))
info(5, '_thickl = np.array([' + ', '.join(thickl) + '])')
return thickl
class IsothermalAtmosphere(EarthsAtmosphere):
"""Isothermal model of the atmosphere.
This model is widely used in semi-analytical calculations. The isothermal
approximation is valid in a certain range of altitudes and usually
one adjust the parameters to match a more realistic density profile
at altitudes between 10 - 30 km, where the high energy muon production
rate peaks. Such parametrizations are given in the book "Cosmic Rays and
Particle Physics", Gaisser, Engel and Resconi (2016). The default values
are from <NAME>, <NAME>, and <NAME>, Astropart. Physics 5,
309 (1996).
Args:
location (str): no effect
season (str): no effect
hiso_km (float): isothermal scale height in km
X0 (float): Ground level overburden
"""
def __init__(self, location, season, hiso_km=6.3, X0=1300.):
self.hiso_cm = hiso_km * 1e5
self.X0 = X0
self.location = location
self.season = season
EarthsAtmosphere.__init__(self)
def get_density(self, h_cm):
""" Returns the density of air in g/cm**3.
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
return self.X0 / self.hiso_cm * np.exp(-h_cm / self.hiso_cm)
def get_mass_overburden(self, h_cm):
""" Returns the mass overburden in atmosphere in g/cm**2.
Args:
h_cm (float): height in cm
Returns:
float: column depth :math:`T(h_{cm})` in g/cm**2
"""
return self.X0 * np.exp(-h_cm / self.hiso_cm)
class MSIS00Atmosphere(EarthsAtmosphere):
"""Wrapper class for a python interface to the NRLMSISE-00 model.
`NRLMSISE-00 <http://ccmc.gsfc.nasa.gov/modelweb/atmos/nrlmsise00.html>`_
is an empirical model of the Earth's atmosphere. It is available as
a FORTRAN 77 code or as a verson traslated into
`C by <NAME> <http://www.brodo.de/english/pub/nrlmsise/>`_.
Here a PYTHON wrapper has been used.
Attributes:
_msis : NRLMSISE-00 python wrapper object handler
Args:
location (str): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
def __init__(self,
location,
season=None,
doy=None,
use_loc_altitudes=False):
from MCEq.geometry.nrlmsise00_mceq import cNRLMSISE00
msis_atmospheres = [
'SouthPole',
'Karlsruhe',
'Geneva',
'Tokyo',
'SanGrasso',
'TelAviv',
'KSC',
'SoudanMine',
'Tsukuba',
'LynnLake',
'PeaceRiver',
'FtSumner'
]
assert location in msis_atmospheres, \
'{0} not available for MSIS00Atmosphere'.format(
location
)
self._msis = cNRLMSISE00()
self.init_parameters(location, season, doy, use_loc_altitudes)
EarthsAtmosphere.__init__(self)
def init_parameters(self, location, season, doy, use_loc_altitudes):
"""Sets location and season in :class:`NRLMSISE-00`.
Translates location and season into day of year
and geo coordinates.
Args:
location (str): Supported are "SouthPole" and "Karlsruhe"
season (str): months of the year: January, February, etc.
use_loc_altitudes (bool): If to use default altitudes from location
"""
self._msis.set_location(location)
if season is not None:
self._msis.set_season(season)
else:
self._msis.set_doy(doy)
self.location, self.season = location, season
# Clear cached value to force spline recalculation
self.theta_deg = None
if use_loc_altitudes:
info(0, 'Using loc altitude', self._msis.alt_surface, 'cm')
self.geom.h_obs = self._msis.alt_surface
def get_density(self, h_cm):
""" Returns the density of air in g/cm**3.
Wraps around ctypes calls to the NRLMSISE-00 C library.
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
return self._msis.get_density(h_cm)
def set_location(self, location):
""" Changes MSIS location by strings defined in _msis_wrapper.
Args:
location (str): location as defined in :class:`NRLMSISE-00.`
"""
self._msis.set_location(location)
def set_season(self, month):
""" Changes MSIS location by month strings defined in _msis_wrapper.
Args:
location (str): month as defined in :class:`NRLMSISE-00.`
"""
self._msis.set_season(month)
def set_doy(self, day_of_year):
""" Changes MSIS season by day of year.
Args:
day_of_year (int): 1. Jan.=0, 1.Feb=32
"""
self._msis.set_doy(day_of_year)
def get_temperature(self, h_cm):
""" Returns the temperature of air in K.
Wraps around ctypes calls to the NRLMSISE-00 C library.
Args:
h_cm (float): height in cm
Returns:
float: density :math:`T(h_{cm})` in K
"""
return self._msis.get_temperature(h_cm)
class AIRSAtmosphere(EarthsAtmosphere):
"""Interpolation class for tabulated atmospheres.
This class is intended to read preprocessed AIRS Satellite data.
Args:
location (str): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
def __init__(self, location, season, extrapolate=True, *args, **kwargs):
if location != 'SouthPole':
raise Exception(self.__class__.__name__ +
"(): Only South Pole location supported. " +
location)
self.extrapolate = extrapolate
self.month2doy = {
'January': 1,
'February': 32,
'March': 60,
'April': 91,
'May': 121,
'June': 152,
'July': 182,
'August': 213,
'September': 244,
'October': 274,
'November': 305,
'December': 335
}
self.season = season
self.init_parameters(location, **kwargs)
EarthsAtmosphere.__init__(self)
def init_parameters(self, location, **kwargs):
"""Loads tables and prepares interpolation.
Args:
location (str): supported is only "SouthPole"
doy (int): Day Of Year
"""
# from time import strptime
from matplotlib.dates import datestr2num, num2date
from os import path
def bytespdate2num(b):
return datestr2num(b.decode('utf-8'))
data_path = (join(
path.expanduser('~'),
'OneDrive/Dokumente/projects/atmospheric_variations/'))
if 'table_path' in kwargs:
data_path = kwargs['table_path']
files = [('dens', 'airs_amsu_dens_180_daily.txt'),
('temp', 'airs_amsu_temp_180_daily.txt'),
('alti', 'airs_amsu_alti_180_daily.txt')]
data_collection = {}
# limit SouthPole pressure to <= 600
min_press_idx = 4
IC79_idx_1 = None
IC79_idx_2 = None
for d_key, fname in files:
fname = data_path + 'tables/' + fname
# tabf = open(fname).read()
tab = np.loadtxt(fname,
converters={0: bytespdate2num},
usecols=[0] + list(range(2, 27)))
# with open(fname, 'r') as f:
# comline = f.readline()
# p_levels = [
# float(s.strip()) for s in comline.split(' ')[3:] if s != ''
# ][min_press_idx:]
dates = num2date(tab[:, 0])
for di, date in enumerate(dates):
if date.month == 6 and date.day == 1:
if date.year == 2010:
IC79_idx_1 = di
elif date.year == 2011:
IC79_idx_2 = di
surf_val = tab[:, 1]
cols = tab[:, min_press_idx + 2:]
data_collection[d_key] = (dates, surf_val, cols)
self.interp_tab_d = {}
self.interp_tab_t = {}
self.dates = {}
dates = data_collection['alti'][0]
msis = MSIS00Atmosphere(location, 'January')
for didx, date in enumerate(dates):
h_vec = np.array(data_collection['alti'][2][didx, :] * 1e2)
d_vec = np.array(data_collection['dens'][2][didx, :])
t_vec = np.array(data_collection['temp'][2][didx, :])
if self.extrapolate:
# Extrapolate using msis
h_extra = np.linspace(h_vec[-1], self.geom.h_atm * 1e2, 250)
msis._msis.set_doy(self._get_y_doy(date)[1] - 1)
msis_extra_d = np.array([msis.get_density(h) for h in h_extra])
msis_extra_t = np.array(
[msis.get_temperature(h) for h in h_extra])
# Interpolate last few altitude bins
ninterp = 5
for ni in range(ninterp):
cl = (1 - np.exp(-ninterp + ni + 1))
ch = (1 - np.exp(-ni))
norm = 1. / (cl + ch)
d_vec[-ni -
1] = (d_vec[-ni - 1] * cl * norm +
msis.get_density(h_vec[-ni - 1]) * ch * norm)
t_vec[-ni - 1] = (
t_vec[-ni - 1] * cl * norm +
msis.get_temperature(h_vec[-ni - 1]) * ch * norm)
# Merge the two datasets
h_vec = np.hstack([h_vec[:-1], h_extra])
d_vec = np.hstack([d_vec[:-1], msis_extra_d])
t_vec = np.hstack([t_vec[:-1], msis_extra_t])
self.interp_tab_d[self._get_y_doy(date)] = (h_vec, d_vec)
self.interp_tab_t[self._get_y_doy(date)] = (h_vec, t_vec)
self.dates[self._get_y_doy(date)] = date
self.IC79_start = self._get_y_doy(dates[IC79_idx_1])
self.IC79_end = self._get_y_doy(dates[IC79_idx_2])
self.IC79_days = (dates[IC79_idx_2] - dates[IC79_idx_1]).days
self.location = location
if self.season is None:
self.set_IC79_day(0)
else:
self.set_season(self.season)
# Clear cached value to force spline recalculation
self.theta_deg = None
def set_date(self, year, doy):
self.h, self.dens = self.interp_tab_d[(year, doy)]
_, self.temp = self.interp_tab_t[(year, doy)]
self.date = self.dates[(year, doy)]
# Compatibility with caching
self.season = self.date
def _set_doy(self, doy, year=2010):
self.h, self.dens = self.interp_tab_d[(year, doy)]
_, self.temp = self.interp_tab_t[(year, doy)]
self.date = self.dates[(year, doy)]
def set_season(self, month):
self.season = month
self._set_doy(self.month2doy[month])
self.season = month
def set_IC79_day(self, IC79_day):
import datetime
if IC79_day > self.IC79_days:
raise Exception(self.__class__.__name__ +
"::set_IC79_day(): IC79_day above range.")
target_day = self._get_y_doy(self.dates[self.IC79_start] +
datetime.timedelta(days=IC79_day))
info(2, 'setting IC79_day', IC79_day)
self.h, self.dens = self.interp_tab_d[target_day]
_, self.temp = self.interp_tab_t[target_day]
self.date = self.dates[target_day]
# Compatibility with caching
self.season = self.date
def _get_y_doy(self, date):
return date.timetuple().tm_year, date.timetuple().tm_yday
def get_density(self, h_cm):
""" Returns the density of air in g/cm**3.
Interpolates table at requested value for previously set
year and day of year (doy).
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
ret = np.exp(np.interp(h_cm, self.h, np.log(self.dens)))
try:
ret[h_cm > self.h[-1]] = np.nan
except TypeError:
if h_cm > self.h[-1]:
return np.nan
return ret
def get_temperature(self, h_cm):
""" Returns the temperature in K.
Interpolates table at requested value for previously set
year and day of year (doy).
Args:
h_cm (float): height in cm
Returns:
float: temperature :math:`T(h_{cm})` in K
"""
ret = np.exp(np.interp(h_cm, self.h, np.log(self.temp)))
try:
ret[h_cm > self.h[-1]] = np.nan
except TypeError:
if h_cm > self.h[-1]:
return np.nan
return ret
class MSIS00IceCubeCentered(MSIS00Atmosphere):
"""Extension of :class:`MSIS00Atmosphere` which couples the latitude
setting with the zenith angle of the detector.
Args:
location (str): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
def __init__(self, location, season):
if location != 'SouthPole':
info(2, 'location forced to the South Pole')
location = 'SouthPole'
MSIS00Atmosphere.__init__(self, location, season)
# Allow for upgoing zenith angles
self.max_theta = 180.
def latitude(self, det_zenith_deg):
""" Returns the geographic latitude of the shower impact point.
Assumes a spherical earth. The detector is 1948m under the
surface.
Credits: geometry fomulae by <NAME>, DESY Zeuthen.
Args:
det_zenith_deg (float): zenith angle at detector in degrees
Returns:
float: latitude of the impact point in degrees
"""
r = self.geom.r_E
d = 1948 # m
theta_rad = det_zenith_deg / 180. * np.pi
x = (np.sqrt(2. * r * d + ((r - d) * np.cos(theta_rad))**2 - d**2) -
(r - d) * np.cos(theta_rad))
return -90. + np.arctan2(x * np.sin(theta_rad),
r - d + x * np.cos(theta_rad)) / np.pi * 180.
def set_theta(self, theta_deg, force_spline_calc=True):
self._msis.set_location_coord(longitude=0.,
latitude=self.latitude(theta_deg))
info(
1, 'latitude = {0:5.2f} for zenith angle = {1:5.2f}'.format(
self.latitude(theta_deg), theta_deg))
if theta_deg > 90.:
info(
1, 'theta = {0:5.2f} below horizon. using theta = {1:5.2f}'.
format(theta_deg, 180. - theta_deg))
theta_deg = 180. - theta_deg
MSIS00Atmosphere.set_theta(self,
theta_deg,
force_spline_calc=force_spline_calc)
class GeneralizedTarget(object):
"""This class provides a way to run MCEq on piece-wise constant
one-dimenional density profiles.
The default values for the average density are taken from
config file variables `len_target`, `env_density` and `env_name`.
The density profile has to be built by calling subsequently
:func:`add_material`. The current composition of the target
can be checked with :func:`draw_materials` or :func:`print_table`.
Note:
If the target is not air or hydrogen, the result is approximate,
since seconray particle yields are provided for nucleon-air or
proton-proton collisions. Depending on this choice one has to
adjust the nuclear mass in :mod:`mceq_config`.
Args:
len_target (float): total length of the target in meters
env_density (float): density of the default material in g/cm**3
env_name (str): title for this environment
"""
def __init__(
self,
len_target=config.len_target * 1e2, # cm
env_density=config.env_density, # g/cm3
env_name=config.env_name):
self.len_target = len_target
self.env_density = env_density
self.env_name = env_name
self.reset()
def reset(self):
"""Resets material list to defaults."""
self.mat_list = [[
0., self.len_target, self.env_density, self.env_name
]]
self._update_variables()
def _update_variables(self):
"""Updates internal variables. Not needed to call by user."""
self.start_bounds, self.end_bounds, \
self.densities = list(zip(*self.mat_list))[:-1]
self.densities = np.array(self.densities)
self.start_bounds = np.array(self.start_bounds)
self.end_bounds = np.array(self.end_bounds)
self._max_den = np.max(self.densities)
self._integrate()
def set_length(self, new_length_cm):
"""Updates the total length of the target.
Usually the length is set
"""
if new_length_cm < self.mat_list[-1][0]:
raise Exception(
"GeneralizedTarget::set_length(): " +
"can not set length below lower boundary of last " +
"material.")
self.len_target = new_length_cm
self.mat_list[-1][1] = new_length_cm
self._update_variables()
def add_material(self, start_position_cm, density, name):
"""Adds one additional material to a composite target.
Args:
start_position_cm (float): position where the material starts
counted from target origin l|X = 0 in cm
density (float): density of material in g/cm**3
name (str): any user defined name
Raises:
Exception: If requested start_position_cm is not properly defined.
"""
if start_position_cm < 0. or start_position_cm > self.len_target:
raise Exception("GeneralizedTarget::add_material(): " +
"distance exceeds target dimensions.")
elif (start_position_cm == self.mat_list[-1][0]
and self.mat_list[-1][-1] == self.env_name):
self.mat_list[-1] = [
start_position_cm, self.len_target, density, name
]
elif start_position_cm <= self.mat_list[-1][0]:
raise Exception("GeneralizedTarget::add_material(): " +
"start_position_cm is ahead of previous material.")
else:
self.mat_list[-1][1] = start_position_cm
self.mat_list.append(
[start_position_cm, self.len_target, density, name])
info(2,
("{0}::add_material(): Material '{1}' added. " +
"location on path {2} to {3} m").format(self.__class__.__name__,
name,
self.mat_list[-1][0],
self.mat_list[-1][1]))
self._update_variables()
def set_theta(self, *args):
"""This method is not defined for the generalized target. The purpose
is to catch usage errors.
Raises:
NotImplementedError: always
"""
raise NotImplementedError('GeneralizedTarget::set_theta(): Method' +
'not defined for this target class.')
def _integrate(self):
"""Walks through material list and computes the depth along the
position (path). Computes the spline for the position-depth relation
and determines the maximum depth for the material selection.
Method does not need to be called by the user, instead the class
calls it when necessary.
"""
from scipy.interpolate import UnivariateSpline
self.density_depth = None
self.knots = [0.]
self.X_int = [0.]
for start, end, density, _ in self.mat_list:
self.knots.append(end)
self.X_int.append(density * (end - start) + self.X_int[-1])
self._s_X2h = UnivariateSpline(self.X_int, self.knots, k=1, s=0.)
self._s_h2X = UnivariateSpline(self.knots, self.X_int, k=1, s=0.)
self._max_X = self.X_int[-1]
@property
def s_X2h(self):
"""Spline for depth at distance."""
if not hasattr(self, '_s_X2h'):
self._integrate()
return self._s_X2h
@property
def s_h2X(self):
"""Spline for distance at depth."""
if not hasattr(self, '_s_h2X'):
self._integrate()
return self._s_h2X
@property
def max_X(self):
"""Maximal depth of target."""
if not hasattr(self, '_max_X'):
self._integrate()
return self._max_X
def get_density_X(self, X):
"""Returns the density in g/cm**3 as a function of depth X.
Args:
X (float): depth in g/cm**2
Returns:
float: density in g/cm**3
Raises:
Exception: If requested depth exceeds target.
"""
X = np.atleast_1d(X)
# allow for some small constant extrapolation for odepack solvers
if X[-1] > self.max_X and X[-1] < self.max_X * 1.003:
X[-1] = self.max_X
if np.min(X) < 0. or np.max(X) > self.max_X:
# return self.get_density(self.s_X2h(self.max_X))
info(0, 'Depth {0:4.3f} exceeds target dimensions {1:4.3f}'.format(
np.max(X), self.max_X
))
raise Exception('Invalid input')
return self.get_density(self.s_X2h(X))
def r_X2rho(self, X):
"""Returns the inverse density :math:`\\frac{1}{\\rho}(X)`.
Args:
X (float): slant depth in g/cm**2
Returns:
float: :math:`1/\\rho` in cm**3/g
"""
return 1. / self.get_density_X(X)
def get_density(self, l_cm):
"""Returns the density in g/cm**3 as a function of position l in cm.
Args:
l (float): position in target in cm
Returns:
float: density in g/cm**3
Raises:
Exception: If requested position exceeds target length.
"""
l_cm = np.atleast_1d(l_cm)
res = np.zeros_like(l_cm)
if np.min(l_cm) < 0 or np.max(l_cm) > self.len_target:
raise Exception("GeneralizedTarget::get_density(): " +
"requested position exceeds target legth.")
for i, li in enumerate(l_cm):
bi = 0
while not (li >= self.start_bounds[bi]
and li <= self.end_bounds[bi]):
bi += 1
res[i] = self.densities[bi]
return res
def draw_materials(self, axes=None, logx=False):
"""Makes a plot of depth and density profile as a function
of the target length. The list of materials is printed out, too.
Args:
axes (plt.axes, optional): handle for matplotlib axes
"""
import matplotlib.pyplot as plt
if not axes:
plt.figure(figsize=(5, 2.5))
axes = plt.gca()
ymax = np.max(self.X_int) * 1.01
for _, mat in enumerate(self.mat_list):
xstart = mat[0]
xend = mat[1]
alpha = 0.188 * mat[2] / max(self.densities) + 0.248
if alpha > 1:
alpha = 1.
elif alpha < 0.:
alpha = 0.
axes.fill_between((xstart, xend), (ymax, ymax), (0., 0.),
label=mat[2],
facecolor='grey',
alpha=alpha)
# axes.text(0.5e-2 * (xstart + xend), 0.5 * ymax, str(nm))
axes.plot([xl for xl in self.knots], self.X_int, lw=1.7, color='r')
if logx:
axes.set_xscale('log', nonposx='clip')
axes.set_ylim(0., ymax)
axes.set_xlabel('distance in target (cm)')
axes.set_ylabel(r'depth X (g/cm$^2)$')
self.print_table(min_dbg_lev=2)
def print_table(self, min_dbg_lev=0):
"""Prints table of materials to standard output.
"""
templ = '{0:^3} | {1:15} | {2:^9.3g} | {3:^9.3g} | {4:^8.5g}'
info(
min_dbg_lev,
'********************* List of materials ***********************',
no_caller=True)
head = '{0:3} | {1:15} | {2:9} | {3:9} | {4:9}'.format(
'no', 'name', 'start [cm]', 'end [cm]', 'density [g/cm**3]')
info(min_dbg_lev, '-' * len(head), no_caller=True)
info(min_dbg_lev, head, no_caller=True)
info(min_dbg_lev, '-' * len(head), no_caller=True)
for nm, mat in enumerate(self.mat_list):
info(min_dbg_lev,
templ.format(nm, mat[3], mat[0], mat[1], mat[2]),
no_caller=True)
if __name__ == '__main__':
import matplotlib.pyplot as plt
plt.figure(figsize=(5, 4))
plt.title('CORSIKA atmospheres')
cka_atmospheres = [
("USStd", None),
("BK_USStd", None),
("Karlsruhe", None),
("ANTARES/KM3NeT-ORCA", 'Summer'),
("ANTARES/KM3NeT-ORCA", 'Winter'),
("KM3NeT-ARCA", 'Summer'),
("KM3NeT-ARCA", 'Winter'),
("KM3NeT", None),
('SouthPole','December'),
('PL_SouthPole','January'),
('PL_SouthPole','August'),
]
cka_surf_100 = []
for loc, season in cka_atmospheres:
cka_obj = CorsikaAtmosphere(loc, season)
cka_obj.set_theta(0.0)
x_vec = np.linspace(0, cka_obj.max_X, 5000)
plt.plot(x_vec,
1 / cka_obj.r_X2rho(x_vec),
lw=1.5,
label='{0}/{1}'.format(loc, season) if season is not None
else '{0}'.format(loc))
cka_surf_100.append((cka_obj.max_X, 1. / cka_obj.r_X2rho(100.)))
print(cka_surf_100)
plt.ylabel(r'Density $\rho$ (g/cm$^3$)')
plt.xlabel(r'Depth (g/cm$^2$)')
plt.legend(loc='upper left')
plt.tight_layout()
plt.figure(figsize=(5, 4))
plt.title('NRLMSISE-00 atmospheres')
msis_atmospheres = [
('SouthPole', "January"),
('Karlsruhe', "January"),
('Geneva', "January"),
('Tokyo', "January"),
('SanGrasso', "January"),
('TelAviv', "January"),
('KSC', "January"),
('SoudanMine', "January"),
('Tsukuba', "January"),
('LynnLake', "January"),
('PeaceRiver', "January"),
('FtSumner', "January")
]
msis_surf_100 = []
for loc, season in msis_atmospheres:
msis_obj = MSIS00Atmosphere(loc, season)
msis_obj.set_theta(0.0)
x_vec = np.linspace(0, msis_obj.max_X, 5000)
plt.plot(x_vec,
1 / msis_obj.r_X2rho(x_vec),
lw=1.5,
label='{0}'.format(loc))
msis_surf_100.append((msis_obj.max_X, 1. / msis_obj.r_X2rho(100.)))
print(msis_surf_100)
plt.ylabel(r'Density $\rho$ (g/cm$^3$)')
plt.xlabel(r'Depth (g/cm$^2$)')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.title",
"MCEq.misc.info",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.exp",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"six.with_metaclass",
"numpy.zeros_like",
"MCEq.geometry.nrlmsise00_mceq.cNRLMSISE00",
"MCEq.misc.theta_rad",
"scipy.interpolate.Univari... | [((230, 253), 'six.with_metaclass', 'with_metaclass', (['ABCMeta'], {}), '(ABCMeta)\n', (244, 253), False, 'from six import with_metaclass\n'), ((46292, 46318), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (46302, 46318), True, 'import matplotlib.pyplot as plt\n'), ((46323, 46355), 'matplotlib.pyplot.title', 'plt.title', (['"""CORSIKA atmospheres"""'], {}), "('CORSIKA atmospheres')\n", (46332, 46355), True, 'import matplotlib.pyplot as plt\n'), ((47260, 47300), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density $\\\\rho$ (g/cm$^3$)"""'], {}), "('Density $\\\\rho$ (g/cm$^3$)')\n", (47270, 47300), True, 'import matplotlib.pyplot as plt\n'), ((47305, 47335), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Depth (g/cm$^2$)"""'], {}), "('Depth (g/cm$^2$)')\n", (47315, 47335), True, 'import matplotlib.pyplot as plt\n'), ((47341, 47369), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (47351, 47369), True, 'import matplotlib.pyplot as plt\n'), ((47374, 47392), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (47390, 47392), True, 'import matplotlib.pyplot as plt\n'), ((47398, 47424), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (47408, 47424), True, 'import matplotlib.pyplot as plt\n'), ((47429, 47465), 'matplotlib.pyplot.title', 'plt.title', (['"""NRLMSISE-00 atmospheres"""'], {}), "('NRLMSISE-00 atmospheres')\n", (47438, 47465), True, 'import matplotlib.pyplot as plt\n'), ((48324, 48364), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density $\\\\rho$ (g/cm$^3$)"""'], {}), "('Density $\\\\rho$ (g/cm$^3$)')\n", (48334, 48364), True, 'import matplotlib.pyplot as plt\n'), ((48369, 48399), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Depth (g/cm$^2$)"""'], {}), "('Depth (g/cm$^2$)')\n", (48379, 48399), True, 'import matplotlib.pyplot as plt\n'), ((48405, 48433), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (48415, 48433), True, 'import matplotlib.pyplot as plt\n'), ((48438, 48456), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (48454, 48456), True, 'import matplotlib.pyplot as plt\n'), ((48461, 48471), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (48469, 48471), True, 'import matplotlib.pyplot as plt\n'), ((2473, 2509), 'numpy.linspace', 'np.linspace', (['(0)', 'path_length', 'n_steps'], {}), '(0, path_length, n_steps)\n', (2484, 2509), True, 'import numpy as np\n'), ((2525, 2531), 'time.time', 'time', ([], {}), '()\n', (2529, 2531), False, 'from time import time\n'), ((5197, 5217), 'MCEq.misc.theta_rad', 'theta_rad', (['theta_deg'], {}), '(theta_deg)\n', (5206, 5217), False, 'from MCEq.misc import theta_rad\n'), ((17815, 17862), 'numpy.array', 'np.array', (['[_aatm, _batm, _catm, _thickl, _hlay]'], {}), '([_aatm, _batm, _catm, _thickl, _hlay])\n', (17823, 17862), True, 'import numpy as np\n'), ((24024, 24037), 'MCEq.geometry.nrlmsise00_mceq.cNRLMSISE00', 'cNRLMSISE00', ([], {}), '()\n', (24035, 24037), False, 'from MCEq.geometry.nrlmsise00_mceq import cNRLMSISE00\n'), ((32704, 32741), 'MCEq.misc.info', 'info', (['(2)', '"""setting IC79_day"""', 'IC79_day'], {}), "(2, 'setting IC79_day', IC79_day)\n", (32708, 32741), False, 'from MCEq.misc import info\n'), ((37961, 37985), 'numpy.array', 'np.array', (['self.densities'], {}), '(self.densities)\n', (37969, 37985), True, 'import numpy as np\n'), ((38014, 38041), 'numpy.array', 'np.array', (['self.start_bounds'], {}), '(self.start_bounds)\n', (38022, 38041), True, 'import numpy as np\n'), ((38068, 38093), 'numpy.array', 'np.array', (['self.end_bounds'], {}), '(self.end_bounds)\n', (38076, 38093), True, 'import numpy as np\n'), ((38118, 38140), 'numpy.max', 'np.max', (['self.densities'], {}), '(self.densities)\n', (38124, 38140), True, 'import numpy as np\n'), ((41438, 41490), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['self.X_int', 'self.knots'], {'k': '(1)', 's': '(0.0)'}), '(self.X_int, self.knots, k=1, s=0.0)\n', (41454, 41490), False, 'from scipy.interpolate import UnivariateSpline\n'), ((41512, 41564), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['self.knots', 'self.X_int'], {'k': '(1)', 's': '(0.0)'}), '(self.knots, self.X_int, k=1, s=0.0)\n', (41528, 41564), False, 'from scipy.interpolate import UnivariateSpline\n'), ((42437, 42453), 'numpy.atleast_1d', 'np.atleast_1d', (['X'], {}), '(X)\n', (42450, 42453), True, 'import numpy as np\n'), ((43577, 43596), 'numpy.atleast_1d', 'np.atleast_1d', (['l_cm'], {}), '(l_cm)\n', (43590, 43596), True, 'import numpy as np\n'), ((43611, 43630), 'numpy.zeros_like', 'np.zeros_like', (['l_cm'], {}), '(l_cm)\n', (43624, 43630), True, 'import numpy as np\n'), ((45602, 45710), 'MCEq.misc.info', 'info', (['min_dbg_lev', '"""********************* List of materials ***********************"""'], {'no_caller': '(True)'}), "(min_dbg_lev,\n '********************* List of materials ***********************',\n no_caller=True)\n", (45606, 45710), False, 'from MCEq.misc import info\n'), ((45944, 45983), 'MCEq.misc.info', 'info', (['min_dbg_lev', 'head'], {'no_caller': '(True)'}), '(min_dbg_lev, head, no_caller=True)\n', (45948, 45983), False, 'from MCEq.misc import info\n'), ((46913, 46948), 'numpy.linspace', 'np.linspace', (['(0)', 'cka_obj.max_X', '(5000)'], {}), '(0, cka_obj.max_X, 5000)\n', (46924, 46948), True, 'import numpy as np\n'), ((48048, 48084), 'numpy.linspace', 'np.linspace', (['(0)', 'msis_obj.max_X', '(5000)'], {}), '(0, msis_obj.max_X, 5000)\n', (48059, 48084), True, 'import numpy as np\n'), ((988, 1003), 'MCEq.geometry.geometry.EarthGeometry', 'EarthGeometry', ([], {}), '()\n', (1001, 1003), False, 'from MCEq.geometry.geometry import EarthGeometry\n'), ((3086, 3100), 'numpy.log', 'np.log', (['X_intp'], {}), '(X_intp)\n', (3092, 3100), True, 'import numpy as np\n'), ((6543, 6552), 'numpy.log', 'np.log', (['X'], {}), '(X)\n', (6549, 6552), True, 'import numpy as np\n'), ((7507, 7538), 'numpy.sqrt', 'np.sqrt', (['(2.0 * nrel + nrel ** 2)'], {}), '(2.0 * nrel + nrel ** 2)\n', (7514, 7538), True, 'import numpy as np\n'), ((11344, 11400), 'numpy.array', 'np.array', (['[-186.5562, -94.919, 0.61289, 0.0, 0.01128292]'], {}), '([-186.5562, -94.919, 0.61289, 0.0, 0.01128292])\n', (11352, 11400), True, 'import numpy as np\n'), ((11421, 11479), 'numpy.array', 'np.array', (['[1222.6562, 1144.9069, 1305.5948, 540.1778, 1.0]'], {}), '([1222.6562, 1144.9069, 1305.5948, 540.1778, 1.0])\n', (11429, 11479), True, 'import numpy as np\n'), ((11500, 11567), 'numpy.array', 'np.array', (['[994186.38, 878153.55, 636143.04, 772170.0, 1000000000.0]'], {}), '([994186.38, 878153.55, 636143.04, 772170.0, 1000000000.0])\n', (11508, 11567), True, 'import numpy as np\n'), ((11582, 11647), 'numpy.array', 'np.array', (['[1036.102549, 631.100309, 271.70023, 3.039494, 0.00128]'], {}), '([1036.102549, 631.100309, 271.70023, 3.039494, 0.00128])\n', (11590, 11647), True, 'import numpy as np\n'), ((11687, 11744), 'numpy.array', 'np.array', (['[0, 400000.0, 1000000.0, 4000000.0, 10000000.0]'], {}), '([0, 400000.0, 1000000.0, 4000000.0, 10000000.0])\n', (11695, 11744), True, 'import numpy as np\n'), ((22368, 22396), 'numpy.exp', 'np.exp', (['(-h_cm / self.hiso_cm)'], {}), '(-h_cm / self.hiso_cm)\n', (22374, 22396), True, 'import numpy as np\n'), ((22671, 22699), 'numpy.exp', 'np.exp', (['(-h_cm / self.hiso_cm)'], {}), '(-h_cm / self.hiso_cm)\n', (22677, 22699), True, 'import numpy as np\n'), ((24963, 25022), 'MCEq.misc.info', 'info', (['(0)', '"""Using loc altitude"""', 'self._msis.alt_surface', '"""cm"""'], {}), "(0, 'Using loc altitude', self._msis.alt_surface, 'cm')\n", (24967, 25022), False, 'from MCEq.misc import info\n'), ((27990, 28010), 'os.path.expanduser', 'path.expanduser', (['"""~"""'], {}), "('~')\n", (28005, 28010), False, 'from os import path\n'), ((29021, 29040), 'matplotlib.dates.num2date', 'num2date', (['tab[:, 0]'], {}), '(tab[:, 0])\n', (29029, 29040), False, 'from matplotlib.dates import datestr2num, num2date\n'), ((29695, 29748), 'numpy.array', 'np.array', (["(data_collection['alti'][2][didx, :] * 100.0)"], {}), "(data_collection['alti'][2][didx, :] * 100.0)\n", (29703, 29748), True, 'import numpy as np\n'), ((29767, 29812), 'numpy.array', 'np.array', (["data_collection['dens'][2][didx, :]"], {}), "(data_collection['dens'][2][didx, :])\n", (29775, 29812), True, 'import numpy as np\n'), ((29833, 29878), 'numpy.array', 'np.array', (["data_collection['temp'][2][didx, :]"], {}), "(data_collection['temp'][2][didx, :])\n", (29841, 29878), True, 'import numpy as np\n'), ((34559, 34603), 'MCEq.misc.info', 'info', (['(2)', '"""location forced to the South Pole"""'], {}), "(2, 'location forced to the South Pole')\n", (34563, 34603), False, 'from MCEq.misc import info\n'), ((44441, 44469), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 2.5)'}), '(figsize=(5, 2.5))\n', (44451, 44469), True, 'import matplotlib.pyplot as plt\n'), ((44489, 44498), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (44496, 44498), True, 'import matplotlib.pyplot as plt\n'), ((44514, 44532), 'numpy.max', 'np.max', (['self.X_int'], {}), '(self.X_int)\n', (44520, 44532), True, 'import numpy as np\n'), ((3233, 3247), 'numpy.log', 'np.log', (['X_intp'], {}), '(X_intp)\n', (3239, 3247), True, 'import numpy as np\n'), ((11786, 11860), 'numpy.array', 'np.array', (['[-149.801663, -57.932486, 0.63631894, 0.00043545369, 0.01128292]'], {}), '([-149.801663, -57.932486, 0.63631894, 0.00043545369, 0.01128292])\n', (11794, 11860), True, 'import numpy as np\n'), ((11910, 11969), 'numpy.array', 'np.array', (['[1183.6071, 1143.0425, 1322.9748, 655.69307, 1.0]'], {}), '([1183.6071, 1143.0425, 1322.9748, 655.69307, 1.0])\n', (11918, 11969), True, 'import numpy as np\n'), ((11990, 12058), 'numpy.array', 'np.array', (['[954248.34, 800005.34, 629568.93, 737521.77, 1000000000.0]'], {}), '([954248.34, 800005.34, 629568.93, 737521.77, 1000000000.0])\n', (11998, 12058), True, 'import numpy as np\n'), ((12091, 12156), 'numpy.array', 'np.array', (['[1033.804941, 418.55777, 216.981635, 4.344861, 0.00128]'], {}), '([1033.804941, 418.55777, 216.981635, 4.344861, 0.00128])\n', (12099, 12156), True, 'import numpy as np\n'), ((12196, 12255), 'numpy.array', 'np.array', (['[0.0, 700000.0, 1140000.0, 3700000.0, 10000000.0]'], {}), '([0.0, 700000.0, 1140000.0, 3700000.0, 10000000.0])\n', (12204, 12255), True, 'import numpy as np\n'), ((18359, 18394), 'numpy.log', 'np.log', (['(_batm[0] / (x_v - _aatm[0]))'], {}), '(_batm[0] / (x_v - _aatm[0]))\n', (18365, 18394), True, 'import numpy as np\n'), ((29980, 30032), 'numpy.linspace', 'np.linspace', (['h_vec[-1]', '(self.geom.h_atm * 100.0)', '(250)'], {}), '(h_vec[-1], self.geom.h_atm * 100.0, 250)\n', (29991, 30032), True, 'import numpy as np\n'), ((30951, 30983), 'numpy.hstack', 'np.hstack', (['[h_vec[:-1], h_extra]'], {}), '([h_vec[:-1], h_extra])\n', (30960, 30983), True, 'import numpy as np\n'), ((31008, 31045), 'numpy.hstack', 'np.hstack', (['[d_vec[:-1], msis_extra_d]'], {}), '([d_vec[:-1], msis_extra_d])\n', (31017, 31045), True, 'import numpy as np\n'), ((31070, 31107), 'numpy.hstack', 'np.hstack', (['[t_vec[:-1], msis_extra_t]'], {}), '([t_vec[:-1], msis_extra_t])\n', (31079, 31107), True, 'import numpy as np\n'), ((32661, 32694), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'IC79_day'}), '(days=IC79_day)\n', (32679, 32694), False, 'import datetime\n'), ((33436, 33453), 'numpy.log', 'np.log', (['self.dens'], {}), '(self.dens)\n', (33442, 33453), True, 'import numpy as np\n'), ((33983, 34000), 'numpy.log', 'np.log', (['self.temp'], {}), '(self.temp)\n', (33989, 34000), True, 'import numpy as np\n'), ((35401, 35418), 'numpy.cos', 'np.cos', (['theta_rad'], {}), '(theta_rad)\n', (35407, 35418), True, 'import numpy as np\n'), ((42632, 42641), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (42638, 42641), True, 'import numpy as np\n'), ((42650, 42659), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (42656, 42659), True, 'import numpy as np\n'), ((43643, 43655), 'numpy.min', 'np.min', (['l_cm'], {}), '(l_cm)\n', (43649, 43655), True, 'import numpy as np\n'), ((43663, 43675), 'numpy.max', 'np.max', (['l_cm'], {}), '(l_cm)\n', (43669, 43675), True, 'import numpy as np\n'), ((2710, 2716), 'time.time', 'time', ([], {}), '()\n', (2714, 2716), False, 'from time import time\n'), ((12299, 12368), 'numpy.array', 'np.array', (['[-118.1277, -154.258, 0.4191499, 0.00054094056, 0.01128292]'], {}), '([-118.1277, -154.258, 0.4191499, 0.00054094056, 0.01128292])\n', (12307, 12368), True, 'import numpy as np\n'), ((12405, 12463), 'numpy.array', 'np.array', (['[1173.9861, 1205.7625, 1386.7807, 555.8935, 1.0]'], {}), '([1173.9861, 1205.7625, 1386.7807, 555.8935, 1.0])\n', (12413, 12463), True, 'import numpy as np\n'), ((12484, 12549), 'numpy.array', 'np.array', (['[919546.0, 963267.92, 614315.0, 739059.6, 1000000000.0]'], {}), '([919546.0, 963267.92, 614315.0, 739059.6, 1000000000.0])\n', (12492, 12549), True, 'import numpy as np\n'), ((12563, 12629), 'numpy.array', 'np.array', (['[1055.858707, 641.755364, 272.720974, 2.480633, 0.00128]'], {}), '([1055.858707, 641.755364, 272.720974, 2.480633, 0.00128])\n', (12571, 12629), True, 'import numpy as np\n'), ((12668, 12727), 'numpy.array', 'np.array', (['[0.0, 400000.0, 1000000.0, 4000000.0, 10000000.0]'], {}), '([0.0, 400000.0, 1000000.0, 4000000.0, 10000000.0])\n', (12676, 12727), True, 'import numpy as np\n'), ((18459, 18494), 'numpy.log', 'np.log', (['(_batm[1] / (x_v - _aatm[1]))'], {}), '(_batm[1] / (x_v - _aatm[1]))\n', (18465, 18494), True, 'import numpy as np\n'), ((42832, 42841), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (42838, 42841), True, 'import numpy as np\n'), ((12803, 12894), 'numpy.array', 'np.array', (['[-141.31449999999998, -8.256029999999999, 0.6132505, -0.025998975, 0.4024275]'], {}), '([-141.31449999999998, -8.256029999999999, 0.6132505, -0.025998975,\n 0.4024275])\n', (12811, 12894), True, 'import numpy as np\n'), ((12911, 13002), 'numpy.array', 'np.array', (['[1153.0349999999999, 1263.3325, 1257.0724999999998, 404.85974999999996, 1.0]'], {}), '([1153.0349999999999, 1263.3325, 1257.0724999999998, \n 404.85974999999996, 1.0])\n', (12919, 13002), True, 'import numpy as np\n'), ((13018, 13083), 'numpy.array', 'np.array', (['[967990.75, 668591.75, 636790.0, 814070.75, 21426175.0]'], {}), '([967990.75, 668591.75, 636790.0, 814070.75, 21426175.0])\n', (13026, 13083), True, 'import numpy as np\n'), ((13106, 13204), 'numpy.array', 'np.array', (['[1011.8521512499999, 275.84507575000003, 51.0230705, 2.983134, \n 0.21927724999999998]'], {}), '([1011.8521512499999, 275.84507575000003, 51.0230705, 2.983134, \n 0.21927724999999998])\n', (13114, 13204), True, 'import numpy as np\n'), ((13220, 13278), 'numpy.array', 'np.array', (['[0.0, 993750.0, 2081250.0, 4150000.0, 6877500.0]'], {}), '([0.0, 993750.0, 2081250.0, 4150000.0, 6877500.0])\n', (13228, 13278), True, 'import numpy as np\n'), ((18559, 18594), 'numpy.log', 'np.log', (['(_batm[2] / (x_v - _aatm[2]))'], {}), '(_batm[2] / (x_v - _aatm[2]))\n', (18565, 18594), True, 'import numpy as np\n'), ((20936, 20988), 'scipy.integrate.quad', 'quad', (['self.get_density', 'h', '(11280000.0)'], {'epsrel': '(0.0001)'}), '(self.get_density, h, 11280000.0, epsrel=0.0001)\n', (20940, 20988), False, 'from scipy.integrate import quad\n'), ((30436, 30461), 'numpy.exp', 'np.exp', (['(-ninterp + ni + 1)'], {}), '(-ninterp + ni + 1)\n', (30442, 30461), True, 'import numpy as np\n'), ((30493, 30504), 'numpy.exp', 'np.exp', (['(-ni)'], {}), '(-ni)\n', (30499, 30504), True, 'import numpy as np\n'), ((18659, 18694), 'numpy.log', 'np.log', (['(_batm[3] / (x_v - _aatm[3]))'], {}), '(_batm[3] / (x_v - _aatm[3]))\n', (18665, 18694), True, 'import numpy as np\n'), ((35458, 35475), 'numpy.sin', 'np.sin', (['theta_rad'], {}), '(theta_rad)\n', (35464, 35475), True, 'import numpy as np\n'), ((13386, 13446), 'numpy.array', 'np.array', (['[-158.85, -5.38682, 0.889893, -0.0286665, 0.50035]'], {}), '([-158.85, -5.38682, 0.889893, -0.0286665, 0.50035])\n', (13394, 13446), True, 'import numpy as np\n'), ((13471, 13522), 'numpy.array', 'np.array', (['[1145.62, 1176.79, 1248.92, 415.543, 1.0]'], {}), '([1145.62, 1176.79, 1248.92, 415.543, 1.0])\n', (13479, 13522), True, 'import numpy as np\n'), ((13547, 13609), 'numpy.array', 'np.array', (['[998469.0, 677398.0, 636790.0, 823489.0, 16090500.0]'], {}), '([998469.0, 677398.0, 636790.0, 823489.0, 16090500.0])\n', (13555, 13609), True, 'import numpy as np\n'), ((13636, 13699), 'numpy.array', 'np.array', (['[986.951713, 306.4668, 40.546793, 4.288721, 0.277182]'], {}), '([986.951713, 306.4668, 40.546793, 4.288721, 0.277182])\n', (13644, 13699), True, 'import numpy as np\n'), ((13724, 13780), 'numpy.array', 'np.array', (['[0, 900000.0, 2200000.0, 3800000.0, 6820000.0]'], {}), '([0, 900000.0, 2200000.0, 3800000.0, 6820000.0])\n', (13732, 13780), True, 'import numpy as np\n'), ((35346, 35363), 'numpy.cos', 'np.cos', (['theta_rad'], {}), '(theta_rad)\n', (35352, 35363), True, 'import numpy as np\n'), ((35522, 35539), 'numpy.cos', 'np.cos', (['theta_rad'], {}), '(theta_rad)\n', (35528, 35539), True, 'import numpy as np\n'), ((13830, 13890), 'numpy.array', 'np.array', (['[-132.16, -2.4787, 0.298031, -0.0220264, 0.348021]'], {}), '([-132.16, -2.4787, 0.298031, -0.0220264, 0.348021])\n', (13838, 13890), True, 'import numpy as np\n'), ((13915, 13966), 'numpy.array', 'np.array', (['[1120.45, 1203.97, 1163.28, 360.027, 1.0]'], {}), '([1120.45, 1203.97, 1163.28, 360.027, 1.0])\n', (13923, 13966), True, 'import numpy as np\n'), ((13991, 14053), 'numpy.array', 'np.array', (['[933697.0, 643957.0, 636790.0, 804486.0, 23109000.0]'], {}), '([933697.0, 643957.0, 636790.0, 804486.0, 23109000.0])\n', (13999, 14053), True, 'import numpy as np\n'), ((14080, 14145), 'numpy.array', 'np.array', (['[988.431172, 273.033464, 37.185105, 1.162987, 0.192998]'], {}), '([988.431172, 273.033464, 37.185105, 1.162987, 0.192998])\n', (14088, 14145), True, 'import numpy as np\n'), ((14170, 14226), 'numpy.array', 'np.array', (['[0, 950000.0, 2200000.0, 4700000.0, 6820000.0]'], {}), '([0, 950000.0, 2200000.0, 4700000.0, 6820000.0])\n', (14178, 14226), True, 'import numpy as np\n'), ((14314, 14376), 'numpy.array', 'np.array', (['[-157.857, -28.7524, 0.790275, -0.0286999, 0.481114]'], {}), '([-157.857, -28.7524, 0.790275, -0.0286999, 0.481114])\n', (14322, 14376), True, 'import numpy as np\n'), ((14401, 14451), 'numpy.array', 'np.array', (['[1190.44, 1171.0, 1344.78, 445.357, 1.0]'], {}), '([1190.44, 1171.0, 1344.78, 445.357, 1.0])\n', (14409, 14451), True, 'import numpy as np\n'), ((14476, 14539), 'numpy.array', 'np.array', (['[1006100.0, 758614.0, 636790.0, 817384.0, 16886800.0]'], {}), '([1006100.0, 758614.0, 636790.0, 817384.0, 16886800.0])\n', (14484, 14539), True, 'import numpy as np\n'), ((14566, 14632), 'numpy.array', 'np.array', (['[1032.679434, 328.978681, 80.601135, 4.420745, 0.264112]'], {}), '([1032.679434, 328.978681, 80.601135, 4.420745, 0.264112])\n', (14574, 14632), True, 'import numpy as np\n'), ((14657, 14713), 'numpy.array', 'np.array', (['[0, 900000.0, 1800000.0, 3800000.0, 6820000.0]'], {}), '([0, 900000.0, 1800000.0, 3800000.0, 6820000.0])\n', (14665, 14713), True, 'import numpy as np\n'), ((14763, 14823), 'numpy.array', 'np.array', (['[-116.391, 3.5938, 0.474803, -0.0246031, 0.280225]'], {}), '([-116.391, 3.5938, 0.474803, -0.0246031, 0.280225])\n', (14771, 14823), True, 'import numpy as np\n'), ((14848, 14899), 'numpy.array', 'np.array', (['[1155.63, 1501.57, 1271.31, 398.512, 1.0]'], {}), '([1155.63, 1501.57, 1271.31, 398.512, 1.0])\n', (14856, 14899), True, 'import numpy as np\n'), ((14924, 14986), 'numpy.array', 'np.array', (['[933697.0, 594398.0, 636790.0, 810924.0, 29618400.0]'], {}), '([933697.0, 594398.0, 636790.0, 810924.0, 29618400.0])\n', (14932, 14986), True, 'import numpy as np\n'), ((15013, 15079), 'numpy.array', 'np.array', (['[1039.346286, 194.901358, 45.759249, 2.060083, 0.142817]'], {}), '([1039.346286, 194.901358, 45.759249, 2.060083, 0.142817])\n', (15021, 15079), True, 'import numpy as np\n'), ((15104, 15161), 'numpy.array', 'np.array', (['[0, 1225000.0, 2125000.0, 4300000.0, 7050000.0]'], {}), '([0, 1225000.0, 2125000.0, 4300000.0, 7050000.0])\n', (15112, 15161), True, 'import numpy as np\n'), ((15251, 15314), 'numpy.array', 'np.array', (['[-128.601, -39.5548, 1.13088, -0.0026496, 0.00192534]'], {}), '([-128.601, -39.5548, 1.13088, -0.0026496, 0.00192534])\n', (15259, 15314), True, 'import numpy as np\n'), ((15361, 15412), 'numpy.array', 'np.array', (['[1139.99, 1073.82, 1052.96, 492.503, 1.0]'], {}), '([1139.99, 1073.82, 1052.96, 492.503, 1.0])\n', (15369, 15412), True, 'import numpy as np\n'), ((15437, 15501), 'numpy.array', 'np.array', (['[861913.0, 744955.0, 675928.0, 829627.0, 5858701000.0]'], {}), '([861913.0, 744955.0, 675928.0, 829627.0, 5858701000.0])\n', (15445, 15501), True, 'import numpy as np\n'), ((15544, 15610), 'numpy.array', 'np.array', (['[1011.398804, 588.128367, 240.95536, 3.964546, 0.000218]'], {}), '([1011.398804, 588.128367, 240.95536, 3.964546, 0.000218])\n', (15552, 15610), True, 'import numpy as np\n'), ((15657, 15716), 'numpy.array', 'np.array', (['[0.0, 400000.0, 1000000.0, 4000000.0, 10000000.0]'], {}), '([0.0, 400000.0, 1000000.0, 4000000.0, 10000000.0])\n', (15665, 15716), True, 'import numpy as np\n'), ((15760, 15826), 'numpy.array', 'np.array', (['[-163.331, -65.3713, 0.402903, -0.000479198, 0.00188667]'], {}), '([-163.331, -65.3713, 0.402903, -0.000479198, 0.00188667])\n', (15768, 15826), True, 'import numpy as np\n'), ((15872, 15922), 'numpy.array', 'np.array', (['[1183.7, 1108.06, 1424.02, 207.595, 1.0]'], {}), '([1183.7, 1108.06, 1424.02, 207.595, 1.0])\n', (15880, 15922), True, 'import numpy as np\n'), ((15948, 16012), 'numpy.array', 'np.array', (['[875221.0, 753213.0, 545846.0, 793043.0, 5978790800.0]'], {}), '([875221.0, 753213.0, 545846.0, 793043.0, 5978790800.0])\n', (15956, 16012), True, 'import numpy as np\n'), ((16055, 16122), 'numpy.array', 'np.array', (['[1020.370363, 586.143464, 228.374393, 1.338258, 0.000214]'], {}), '([1020.370363, 586.143464, 228.374393, 1.338258, 0.000214])\n', (16063, 16122), True, 'import numpy as np\n'), ((16168, 16227), 'numpy.array', 'np.array', (['[0.0, 400000.0, 1000000.0, 4000000.0, 10000000.0]'], {}), '([0.0, 400000.0, 1000000.0, 4000000.0, 10000000.0])\n', (16176, 16227), True, 'import numpy as np\n'), ((16485, 16543), 'numpy.array', 'np.array', (['[-113.139, -7930635, -54.3888, -0.0, 0.00421033]'], {}), '([-113.139, -7930635, -54.3888, -0.0, 0.00421033])\n', (16493, 16543), True, 'import numpy as np\n'), ((16589, 16636), 'numpy.array', 'np.array', (['[1133.1, 1101.2, 1085.0, 1098.0, 1.0]'], {}), '([1133.1, 1101.2, 1085.0, 1098.0, 1.0])\n', (16597, 16636), True, 'import numpy as np\n'), ((16665, 16729), 'numpy.array', 'np.array', (['[861730.0, 826340.0, 790950.0, 682800.0, 2679815600.0]'], {}), '([861730.0, 826340.0, 790950.0, 682800.0, 2679815600.0])\n', (16673, 16729), True, 'import numpy as np\n'), ((16772, 16841), 'numpy.array', 'np.array', (['[1019.966898, 718.071682, 498.659703, 340.222344, 0.000478]'], {}), '([1019.966898, 718.071682, 498.659703, 340.222344, 0.000478])\n', (16780, 16841), True, 'import numpy as np\n'), ((16904, 16961), 'numpy.array', 'np.array', (['[0.0, 267000.0, 533000.0, 800000.0, 10000000.0]'], {}), '([0.0, 267000.0, 533000.0, 800000.0, 10000000.0])\n', (16912, 16961), True, 'import numpy as np\n'), ((17011, 17069), 'numpy.array', 'np.array', (['[-59.0293, -21.5794, -7.14839, 0.0, 0.000190175]'], {}), '([-59.0293, -21.5794, -7.14839, 0.0, 0.000190175])\n', (17019, 17069), True, 'import numpy as np\n'), ((17115, 17162), 'numpy.array', 'np.array', (['[1079.0, 1071.9, 1182.0, 1647.1, 1.0]'], {}), '([1079.0, 1071.9, 1182.0, 1647.1, 1.0])\n', (17123, 17162), True, 'import numpy as np\n'), ((17188, 17253), 'numpy.array', 'np.array', (['[764170.0, 699910.0, 635650.0, 551010.0, 59329575000.0]'], {}), '([764170.0, 699910.0, 635650.0, 551010.0, 59329575000.0])\n', (17196, 17253), True, 'import numpy as np\n'), ((17295, 17362), 'numpy.array', 'np.array', (['[1019.946057, 391.739652, 138.023515, 43.687992, 2.2e-05]'], {}), '([1019.946057, 391.739652, 138.023515, 43.687992, 2.2e-05])\n', (17303, 17362), True, 'import numpy as np\n'), ((17409, 17468), 'numpy.array', 'np.array', (['[0.0, 667000.0, 1333000.0, 2000000.0, 10000000.0]'], {}), '([0.0, 667000.0, 1333000.0, 2000000.0, 10000000.0])\n', (17417, 17468), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.