code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import gym
import tensorflow as tf
import numpy as np
INPUT_SIZE = 4
HIDDEN_UNIT_NUM = 4
OUTPUT_SIZE = 1
LEARNING_RATE = 0.01
DISCOUNT_RATE = 0.95
def Cartpole_policy():
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=(None, INPUT_SIZE))
hidden = tf.layers.dense(X, HIDDEN_UNIT_NUM, activation = tf.nn.relu,
kernel_initializer = initializer)
logit = tf.layers.dense(hidden, OUTPUT_SIZE,
kernel_initializer = initializer)
output = tf.nn.sigmoid(logit)
p_left_right = tf.concat(axis = 1, values = [output, 1 - output])
#多项式采样会将梯度断开吗
action = tf.multinomial(tf.log(p_left_right), 1)
return X, logit, action
def Get_Gradient(logit, action):
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels = y,
logits = logit)
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [gradient for gradient, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for gradient, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32,
shape = gradient.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
return gradient_placeholders, gradients, grads_and_vars, training_op
def discounted_rewards(rewards, discount_rate):
discounted_rewards = np.empty(len(rewards))
cumulated_reward = 0
for step in reversed(range(len(rewards))):
cumulated_reward = discount_rate * cumulated_reward + rewards[step]
discounted_rewards[step] = cumulated_reward
return discounted_rewards
def discounted_and_normalized_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discounted_rewards(rewards, discount_rate)
for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
mean = np.mean(flat_rewards)
std = np.std(flat_rewards)
return [(discounted_rewards - mean)/std for discounted_rewards in all_discounted_rewards]
if __name__ == "__main__":
n_iterations = 250
n_max_steps = 1000
n_games_per_update = 10
save_iterations = 10
env = gym.make("CartPole-v0")
#obs = env.reset()
X, logit, action = Cartpole_policy()
gradient_placeholders, gradients, grads_and_vars, training_op = Get_Gradient(logit, action)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
obs = env.reset()
current_rewards = []
current_gradients = []
for step in range(n_max_steps):
action_val, gradient_val = sess.run(
[action, gradients],
feed_dict = {X : obs.reshape(1, INPUT_SIZE)}
)
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradient_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discounted_and_normalized_rewards(all_rewards,
DISCOUNT_RATE)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
#all_gradients = [all_gradients[game_index][step][var_index]
# for game_index, rewards in enumerate(all_rewards)
# for step, reward in enumerate(rewards)]
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis = 0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict = feed_dict)
print("Iteration: %d" % (iteration))
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
| [
"numpy.mean",
"tensorflow.to_float",
"tensorflow.contrib.layers.variance_scaling_initializer",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"tensorflow.log",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"tensorflow.nn.sigmoid",
"tensorflow.concat",
"numpy.concatenate",... | [((190, 238), 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {}), '()\n', (236, 238), True, 'import tensorflow as tf\n'), ((247, 299), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, INPUT_SIZE)'}), '(tf.float32, shape=(None, INPUT_SIZE))\n', (261, 299), True, 'import tensorflow as tf\n'), ((313, 407), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'HIDDEN_UNIT_NUM'], {'activation': 'tf.nn.relu', 'kernel_initializer': 'initializer'}), '(X, HIDDEN_UNIT_NUM, activation=tf.nn.relu,\n kernel_initializer=initializer)\n', (328, 407), True, 'import tensorflow as tf\n'), ((449, 517), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden', 'OUTPUT_SIZE'], {'kernel_initializer': 'initializer'}), '(hidden, OUTPUT_SIZE, kernel_initializer=initializer)\n', (464, 517), True, 'import tensorflow as tf\n'), ((563, 583), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logit'], {}), '(logit)\n', (576, 583), True, 'import tensorflow as tf\n'), ((603, 649), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(1)', 'values': '[output, 1 - output]'}), '(axis=1, values=[output, 1 - output])\n', (612, 649), True, 'import tensorflow as tf\n'), ((840, 903), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logit'}), '(labels=y, logits=logit)\n', (879, 903), True, 'import tensorflow as tf\n'), ((981, 1018), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['LEARNING_RATE'], {}), '(LEARNING_RATE)\n', (1003, 1018), True, 'import tensorflow as tf\n'), ((2202, 2240), 'numpy.concatenate', 'np.concatenate', (['all_discounted_rewards'], {}), '(all_discounted_rewards)\n', (2216, 2240), True, 'import numpy as np\n'), ((2252, 2273), 'numpy.mean', 'np.mean', (['flat_rewards'], {}), '(flat_rewards)\n', (2259, 2273), True, 'import numpy as np\n'), ((2284, 2304), 'numpy.std', 'np.std', (['flat_rewards'], {}), '(flat_rewards)\n', (2290, 2304), True, 'import numpy as np\n'), ((2537, 2560), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (2545, 2560), False, 'import gym\n'), ((2737, 2770), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2768, 2770), True, 'import tensorflow as tf\n'), ((2783, 2799), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2797, 2799), True, 'import tensorflow as tf\n'), ((700, 720), 'tensorflow.log', 'tf.log', (['p_left_right'], {}), '(p_left_right)\n', (706, 720), True, 'import tensorflow as tf\n'), ((800, 819), 'tensorflow.to_float', 'tf.to_float', (['action'], {}), '(action)\n', (811, 819), True, 'import tensorflow as tf\n'), ((2809, 2821), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2819, 2821), True, 'import tensorflow as tf\n')] |
""" change images between different color spaces """
import cv2 as cv
import numpy as np
from imagewizard.helpers import helpers
def img2grayscale(img,
to_binary: bool = False,
to_zero: bool = False,
inverted: bool = False,
trunc: bool = False,
is_gray: bool = True,
order: str = 'rgb'):
""" BGR/RGB to Grayscale conversion
Params:
img: (numpy.array, PIL.image, cv2.image)
thresholding_options: binary, zero, trunc, inverted binary, inverted zero
order: (RGB, BGR) input order of the colors BGR/RGB. Deafult order: RGB
Note: The output will be a numpy.array of the same order
Returns:
numpy.array of the order specified
"""
# img object passed is converted to a BGR array
# and all the operations are performed. The image will be converted
# back to specified order and returned as numpy.array
img = helpers.image2BGR(img, order)
# convert image to grey scale
if is_gray:
gs_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# if thresholding/inverting
if inverted:
if trunc:
_, gs_img = cv.threshold(gs_img, 127, 255, cv.THRESH_TRUNC)
gs_img = cv.bitwise_not(gs_img)
elif to_binary:
_, gs_img = cv.threshold(gs_img, 120, 255,
cv.THRESH_BINARY_INV)
elif to_zero:
_, gs_img = cv.threshold(gs_img, 120, 255,
cv.THRESH_TOZERO_INV)
else:
gs_img = cv.bitwise_not(gs_img)
else:
if trunc:
_, gs_img = cv.threshold(gs_img, 127, 255, cv.THRESH_TRUNC)
elif to_binary:
_, gs_img = cv.threshold(gs_img, 120, 255, cv.THRESH_BINARY)
elif to_zero:
_, gs_img = cv.threshold(gs_img, 120, 255, cv.THRESH_TOZERO)
else:
gs_img = img
if inverted:
gs_img = cv.bitwise_not(gs_img)
return helpers.format_output_order_input_BGR(gs_img, order)
def luminosity(img, intensity_shift: int, order: str = 'rgb'):
""" Increase/decrease the brightness of the image
Params:
img: (numpy.array, PIL.image, cv2.image)
intensity_shift: decrease or increase the brightness level
order: (RGB, BGR) input order of the colors BGR/RGB. Deafult order: RGB
Note: The output will be a numpy.array of the same order
Returns:
numpy.array of the order specified
"""
# img object passed is converted to a BGR array
# and all the operations are performed. The image will be converted
# back to specified order and returned as numpy.array
img = helpers.image2BGR(img, order)
# get the HSV from BGR
hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
hue, sat, brightness_value = cv.split(hsv)
# brighten the pixels (intensity_shift > 0)
if intensity_shift > 0:
# prevent overflow of value
limit = 255 - intensity_shift
brightness_value[brightness_value > limit] = 255
# increase the brightness value
brightness_value[brightness_value <= limit] += intensity_shift
# darken the pixels (intensity_shift < 0)
else:
# prevent overflow of value
limit = -(intensity_shift)
brightness_value[brightness_value < limit] = 0
# decrease the brightness value
brightness_value[brightness_value >= limit] -= limit
# re-construct hsv
final_hsv = cv.merge((hue, sat, brightness_value))
# convert hsv to BGR and return numpy.array in the order specified
img = cv.cvtColor(final_hsv, cv.COLOR_HSV2BGR)
return helpers.format_output_order_input_BGR(img, order)
def image_segmentation(image, rgb_list, order: str = 'rgb'):
""" reconstruct an image with only a specified list of colors
Params:
img: (numpy.array, PIL.image, cv2.image)
rgb_list: colors list - a 2 dimensional np array with shape (n,3) 3 being the channel values in order RGB, eg: [[224, 166, 147], [110, 34, 71], [195, 98, 100]]
order: (RGB, BGR) input order of the colors BGR/RGB. Deafult order: RGB
Note: The output will be a numpy.array of the same order
Returns:
numpy.array of the order specified
"""
image = helpers.format_image_to_PIL(image, order)
# create an array of pixel values from the given image
pixels = np.array(image.getdata(), dtype=np.uint8)
# convert rgb_list to a numpy array
rgb_list = np.array(rgb_list)
rgb_list = np.array([rgb_list]) if rgb_list.ndim == 1 else rgb_list
if not rgb_list.ndim == 2 or not rgb_list.shape[1] == 3:
raise ValueError(
'rgb_list must be a two dimensional array of shape (n, 3)')
# create an array of empty pixel values
new_pixels = [None] * len(image.getdata())
# assign new pixel the color closest to the original image's pixel value
for idx, pixel in enumerate(pixels):
shortest = float('Inf')
for color in rgb_list:
distance = helpers.calculate_distance(color, pixel)
if distance < shortest:
shortest = distance
nearest = color
new_pixels[idx] = nearest
_w, _h = image.size
# reconstruct the image np array with the new pixels
new_pixels = np.asarray(new_pixels)\
.astype('uint8')\
.reshape((_h, _w, 3))
return helpers.format_output_order_input_RGB(new_pixels, order)
| [
"cv2.merge",
"imagewizard.helpers.helpers.format_image_to_PIL",
"imagewizard.helpers.helpers.format_output_order_input_BGR",
"imagewizard.helpers.helpers.format_output_order_input_RGB",
"cv2.threshold",
"numpy.asarray",
"numpy.array",
"imagewizard.helpers.helpers.calculate_distance",
"cv2.split",
... | [((1014, 1043), 'imagewizard.helpers.helpers.image2BGR', 'helpers.image2BGR', (['img', 'order'], {}), '(img, order)\n', (1031, 1043), False, 'from imagewizard.helpers import helpers\n'), ((2143, 2195), 'imagewizard.helpers.helpers.format_output_order_input_BGR', 'helpers.format_output_order_input_BGR', (['gs_img', 'order'], {}), '(gs_img, order)\n', (2180, 2195), False, 'from imagewizard.helpers import helpers\n'), ((2876, 2905), 'imagewizard.helpers.helpers.image2BGR', 'helpers.image2BGR', (['img', 'order'], {}), '(img, order)\n', (2893, 2905), False, 'from imagewizard.helpers import helpers\n'), ((2944, 2978), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2HSV'], {}), '(img, cv.COLOR_BGR2HSV)\n', (2955, 2978), True, 'import cv2 as cv\n'), ((3012, 3025), 'cv2.split', 'cv.split', (['hsv'], {}), '(hsv)\n', (3020, 3025), True, 'import cv2 as cv\n'), ((3671, 3709), 'cv2.merge', 'cv.merge', (['(hue, sat, brightness_value)'], {}), '((hue, sat, brightness_value))\n', (3679, 3709), True, 'import cv2 as cv\n'), ((3792, 3832), 'cv2.cvtColor', 'cv.cvtColor', (['final_hsv', 'cv.COLOR_HSV2BGR'], {}), '(final_hsv, cv.COLOR_HSV2BGR)\n', (3803, 3832), True, 'import cv2 as cv\n'), ((3844, 3893), 'imagewizard.helpers.helpers.format_output_order_input_BGR', 'helpers.format_output_order_input_BGR', (['img', 'order'], {}), '(img, order)\n', (3881, 3893), False, 'from imagewizard.helpers import helpers\n'), ((4505, 4546), 'imagewizard.helpers.helpers.format_image_to_PIL', 'helpers.format_image_to_PIL', (['image', 'order'], {}), '(image, order)\n', (4532, 4546), False, 'from imagewizard.helpers import helpers\n'), ((4718, 4736), 'numpy.array', 'np.array', (['rgb_list'], {}), '(rgb_list)\n', (4726, 4736), True, 'import numpy as np\n'), ((5634, 5690), 'imagewizard.helpers.helpers.format_output_order_input_RGB', 'helpers.format_output_order_input_RGB', (['new_pixels', 'order'], {}), '(new_pixels, order)\n', (5671, 5690), False, 'from imagewizard.helpers import helpers\n'), ((1112, 1147), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (1123, 1147), True, 'import cv2 as cv\n'), ((4752, 4772), 'numpy.array', 'np.array', (['[rgb_list]'], {}), '([rgb_list])\n', (4760, 4772), True, 'import numpy as np\n'), ((2108, 2130), 'cv2.bitwise_not', 'cv.bitwise_not', (['gs_img'], {}), '(gs_img)\n', (2122, 2130), True, 'import cv2 as cv\n'), ((5265, 5305), 'imagewizard.helpers.helpers.calculate_distance', 'helpers.calculate_distance', (['color', 'pixel'], {}), '(color, pixel)\n', (5291, 5305), False, 'from imagewizard.helpers import helpers\n'), ((1255, 1302), 'cv2.threshold', 'cv.threshold', (['gs_img', '(127)', '(255)', 'cv.THRESH_TRUNC'], {}), '(gs_img, 127, 255, cv.THRESH_TRUNC)\n', (1267, 1302), True, 'import cv2 as cv\n'), ((1328, 1350), 'cv2.bitwise_not', 'cv.bitwise_not', (['gs_img'], {}), '(gs_img)\n', (1342, 1350), True, 'import cv2 as cv\n'), ((1779, 1826), 'cv2.threshold', 'cv.threshold', (['gs_img', '(127)', '(255)', 'cv.THRESH_TRUNC'], {}), '(gs_img, 127, 255, cv.THRESH_TRUNC)\n', (1791, 1826), True, 'import cv2 as cv\n'), ((1407, 1459), 'cv2.threshold', 'cv.threshold', (['gs_img', '(120)', '(255)', 'cv.THRESH_BINARY_INV'], {}), '(gs_img, 120, 255, cv.THRESH_BINARY_INV)\n', (1419, 1459), True, 'import cv2 as cv\n'), ((1883, 1931), 'cv2.threshold', 'cv.threshold', (['gs_img', '(120)', '(255)', 'cv.THRESH_BINARY'], {}), '(gs_img, 120, 255, cv.THRESH_BINARY)\n', (1895, 1931), True, 'import cv2 as cv\n'), ((5543, 5565), 'numpy.asarray', 'np.asarray', (['new_pixels'], {}), '(new_pixels)\n', (5553, 5565), True, 'import numpy as np\n'), ((1555, 1607), 'cv2.threshold', 'cv.threshold', (['gs_img', '(120)', '(255)', 'cv.THRESH_TOZERO_INV'], {}), '(gs_img, 120, 255, cv.THRESH_TOZERO_INV)\n', (1567, 1607), True, 'import cv2 as cv\n'), ((1692, 1714), 'cv2.bitwise_not', 'cv.bitwise_not', (['gs_img'], {}), '(gs_img)\n', (1706, 1714), True, 'import cv2 as cv\n'), ((1986, 2034), 'cv2.threshold', 'cv.threshold', (['gs_img', '(120)', '(255)', 'cv.THRESH_TOZERO'], {}), '(gs_img, 120, 255, cv.THRESH_TOZERO)\n', (1998, 2034), True, 'import cv2 as cv\n')] |
import torch
import torch.nn as nn
import numpy as np
import scipy.io as scio
import os
import matplotlib.pyplot as plt
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
torch.manual_seed(1)
np.random.seed(1)
lapl_op = [[[[ 0, 0, -1/12, 0, 0],
[ 0, 0, 4/3, 0, 0],
[-1/12, 4/3, -5, 4/3, -1/12],
[ 0, 0, 4/3, 0, 0],
[ 0, 0, -1/12, 0, 0]]]]
# ============ define relevant functions =============
# https://github.com/benmaier/reaction-diffusion/blob/master/gray_scott.ipynb
def apply_laplacian(mat, dx = 0.01):
# dx is inversely proportional to N
"""This function applies a discretized Laplacian
in periodic boundary conditions to a matrix
For more information see
https://en.wikipedia.org/wiki/Discrete_Laplace_operator#Implementation_via_operator_discretization
"""
# the cell appears 4 times in the formula to compute
# the total difference
neigh_mat = -5*mat.copy()
# Each direct neighbor on the lattice is counted in
# the discrete difference formula
neighbors = [
( 4/3, (-1, 0) ),
( 4/3, ( 0,-1) ),
( 4/3, ( 0, 1) ),
( 4/3, ( 1, 0) ),
(-1/12, (-2, 0)),
(-1/12, (0, -2)),
(-1/12, (0, 2)),
(-1/12, (2, 0)),
]
# shift matrix according to demanded neighbors
# and add to this cell with corresponding weight
for weight, neigh in neighbors:
neigh_mat += weight * np.roll(mat, neigh, (0,1))
return neigh_mat/dx**2
# Define the update formula for chemicals A and B
def update(A, B, DA, DB, f, k, delta_t):
"""Apply the Gray-Scott update formula"""
# compute the diffusion part of the update
diff_A = DA * apply_laplacian(A)
diff_B = DB * apply_laplacian(B)
# Apply chemical reaction
reaction = A*B**2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1-A)
diff_B -= (k+f) * B
A += diff_A * delta_t
B += diff_B * delta_t
return A, B
def GetEachTerm(A, B, DA, DB, f, k, delta_t, dx):
lap_A = DA * apply_laplacian(A,dx)
lap_B = DB * apply_laplacian(B,dx)
# Apply chemical reaction
reaction = A * B ** 2
# Apply birth/death, linear term
lin_A = f * (1 - A)
lin_B = (k + f) * B
return lap_A, lap_B, reaction, lin_A, lin_B
def update_rk4(A0, B0, DA, DB, f, k, delta_t, dx):
"""Update with Runge-kutta-4 method
See https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
"""
############# Stage 1 ##############
# compute the diffusion part of the update
diff_A = DA * apply_laplacian(A0, dx)
diff_B = DB * apply_laplacian(B0, dx)
# Apply chemical reaction
reaction = A0 * B0 ** 2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1 - A0)
diff_B -= (k + f) * B0
K1_a = diff_A
K1_b = diff_B
############# Stage 1 ##############
A1 = A0 + K1_a * delta_t/2.0
B1 = B0 + K1_b * delta_t/2.0
diff_A = DA * apply_laplacian(A1, dx)
diff_B = DB * apply_laplacian(B1, dx)
# Apply chemical reaction
reaction = A1 * B1 ** 2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1 - A1)
diff_B -= (k + f) * B1
K2_a = diff_A
K2_b = diff_B
############# Stage 2 ##############
A2 = A0 + K2_a * delta_t/2.0
B2 = B0 + K2_b * delta_t/2.0
diff_A = DA * apply_laplacian(A2, dx)
diff_B = DB * apply_laplacian(B2, dx)
# Apply chemical reaction
reaction = A2 * B2 ** 2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1 - A2)
diff_B -= (k + f) * B2
K3_a = diff_A
K3_b = diff_B
############# Stage 3 ##############
A3 = A0 + K3_a * delta_t
B3 = B0 + K3_b * delta_t
diff_A = DA * apply_laplacian(A3, dx)
diff_B = DB * apply_laplacian(B3, dx)
# Apply chemical reaction
reaction = A3 * B3 ** 2
diff_A -= reaction
diff_B += reaction
# Apply birth/death
diff_A += f * (1 - A3)
diff_B -= (k + f) * B3
K4_a = diff_A
K4_b = diff_B
# Final solution
A = A0 + delta_t*(K1_a+2*K2_a+2*K3_a+K4_a)/6.0
B = B0 + delta_t*(K1_b+2*K2_b+2*K3_b+K4_b)/6.0
return A, B
def get_initial_A_and_B(N, random_influence = 0.2):
"""get the initial chemical concentrations"""
# get initial homogeneous concentrations
A = (1-random_influence) * np.ones((N,N))
B = np.zeros((N,N))
# put some noise on there
A += random_influence * np.random.random((N,N))
B += random_influence * np.random.random((N,N))
# initial disturbance
N1, N2, N3 = N//4-4, N//2, 3*N//4
r = int(N/10.0)
# initial disturbance 1
A[N1-r:N1+r, N1-r:N1+r] = 0.50
B[N1-r:N1+r, N1-r:N1+r] = 0.25
# # initial disturbance 2
# A[N1-r:N1+r, N3-r:N3+r] = 0.50
# B[N1-r:N1+r, N3-r:N3+r] = 0.25
#
# # initial disturbance 3
# A[N3-r:N3+r, N3-r:N3+r] = 0.50
# B[N3-r:N3+r, N3-r:N3+r] = 0.25
#
# # initial disturbance 4
# A[N3-r:N3+r, N1-r:N1+r] = 0.50
# B[N3-r:N3+r, N1-r:N1+r] = 0.25
# initial disturbance 5
A[N2-r:N2+r, N2-r:N2+r] = 0.50
B[N2-r:N2+r, N2-r:N2+r] = 0.25
#
# # initial disturbance 6
# A[N2-r:N2+r, N3-r:N3+r] = 0.50
# B[N2-r:N2+r, N3-r:N3+r] = 0.25
return A, B
def postProcess(output, N, xmin, xmax, ymin, ymax, num, batch, save_path):
''' num: Number of time step
'''
x = np.linspace(xmin, xmax, N+1)[:-1]
y = np.linspace(ymin, ymax, N+1)[:-1]
x_star, y_star = np.meshgrid(x, y)
u_pred = output[num, 0, :, :]
# v_star = true[num+25, 1, 1:-1, 1:-1]
v_pred = output[num, 1, :, :]
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(6, 3))
fig.subplots_adjust(hspace=0.3, wspace=0.3)
cf = ax[0].scatter(x_star, y_star, c=u_pred, alpha=0.95, edgecolors='none', cmap='hot', marker='s', s=2)
ax[0].axis('square')
ax[0].set_xlim([xmin, xmax])
ax[0].set_ylim([ymin, ymax])
cf.cmap.set_under('black')
cf.cmap.set_over('white')
ax[0].set_title('u-FDM')
fig.colorbar(cf, ax=ax[0], extend='both')
cf = ax[1].scatter(x_star, y_star, c=v_pred, alpha=0.95, edgecolors='none', cmap='hot', marker='s', s=2) #
ax[1].axis('square')
ax[1].set_xlim([xmin, xmax])
ax[1].set_ylim([ymin, ymax])
cf.cmap.set_under('black')
cf.cmap.set_over('white')
ax[1].set_title('v-FDM')
fig.colorbar(cf, ax=ax[1], extend='both')
# plt.draw()
plt.savefig(save_path + '/uv_[b=%d][t=%d].png'%(batch, num))
plt.close('all')
if __name__ == '__main__':
#################### generate data #####################
# =========== define model parameters ==========
# dt should be 1/2 of dx
# Diffusion coefficients
DA = 0.16 #2*10**-5
DB = 0.08 #DA/4
# define birth/death rates
f = 0.06 #1/25
k = 0.062 #3/50
# grid size
N = 256 # 128
# update in time
delta_t = 1.0 #1.0/2
# spatial step
dx = 1.0 #1.0 / N
# intialize the chemical concentrations, random_incluence=0
A, B = get_initial_A_and_B(N, random_influence = 0.0)
A_record = A.copy()[None,...]
B_record = B.copy()[None,...]
N_simulation_steps = 15000
for step in range(N_simulation_steps):
# Runge-kutta scheme
#A, B = update(A, B, DA, DB, f, k, delta_t)
A, B = update_rk4(A, B, DA, DB, f, k, delta_t, dx)
if step%5 ==0:
print(step)
A_record = np.concatenate((A_record, A[None,...]), axis=0)
B_record = np.concatenate((B_record, B[None,...]), axis=0)
UV = np.concatenate((A_record[None,...], B_record[None,...]), axis=0)
save_path = './2DGS_IC1_2x3001x256x256.mat'
scio.savemat(save_path, {'uv': UV})
# Plot the result
output = np.transpose(UV, [1, 0, 2, 3])
fig_save_path = './2DGS/'
for i in range(21):
postProcess(output, N, 0, N*dx, 0, N*dx, num=150*i, batch=1,save_path=fig_save_path)
plt.figure()
plt.plot(UV[0, :, 50, 50], alpha=0.6, label='rk4, dt=1.0')
plt.legend()
# plt.show()
plt.savefig(fig_save_path + '/fig[x=50,y=50].png')
| [
"torch.manual_seed",
"matplotlib.pyplot.savefig",
"scipy.io.savemat",
"numpy.ones",
"numpy.roll",
"numpy.random.random",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.random.seed",
"numpy.concatenate",
"numpy.meshgr... | [((162, 182), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (179, 182), False, 'import torch\n'), ((184, 201), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (198, 201), True, 'import numpy as np\n'), ((4640, 4656), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (4648, 4656), True, 'import numpy as np\n'), ((5732, 5749), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5743, 5749), True, 'import numpy as np\n'), ((5877, 5923), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(6, 3)'}), '(nrows=1, ncols=2, figsize=(6, 3))\n', (5889, 5923), True, 'import matplotlib.pyplot as plt\n'), ((6670, 6732), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_path + '/uv_[b=%d][t=%d].png' % (batch, num))"], {}), "(save_path + '/uv_[b=%d][t=%d].png' % (batch, num))\n", (6681, 6732), True, 'import matplotlib.pyplot as plt\n'), ((6735, 6751), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6744, 6751), True, 'import matplotlib.pyplot as plt\n'), ((7837, 7903), 'numpy.concatenate', 'np.concatenate', (['(A_record[None, ...], B_record[None, ...])'], {'axis': '(0)'}), '((A_record[None, ...], B_record[None, ...]), axis=0)\n', (7851, 7903), True, 'import numpy as np\n'), ((7954, 7989), 'scipy.io.savemat', 'scio.savemat', (['save_path', "{'uv': UV}"], {}), "(save_path, {'uv': UV})\n", (7966, 7989), True, 'import scipy.io as scio\n'), ((8030, 8060), 'numpy.transpose', 'np.transpose', (['UV', '[1, 0, 2, 3]'], {}), '(UV, [1, 0, 2, 3])\n', (8042, 8060), True, 'import numpy as np\n'), ((8217, 8229), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8227, 8229), True, 'import matplotlib.pyplot as plt\n'), ((8234, 8292), 'matplotlib.pyplot.plot', 'plt.plot', (['UV[0, :, 50, 50]'], {'alpha': '(0.6)', 'label': '"""rk4, dt=1.0"""'}), "(UV[0, :, 50, 50], alpha=0.6, label='rk4, dt=1.0')\n", (8242, 8292), True, 'import matplotlib.pyplot as plt\n'), ((8297, 8309), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8307, 8309), True, 'import matplotlib.pyplot as plt\n'), ((8331, 8381), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_save_path + '/fig[x=50,y=50].png')"], {}), "(fig_save_path + '/fig[x=50,y=50].png')\n", (8342, 8381), True, 'import matplotlib.pyplot as plt\n'), ((4617, 4632), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (4624, 4632), True, 'import numpy as np\n'), ((4715, 4739), 'numpy.random.random', 'np.random.random', (['(N, N)'], {}), '((N, N))\n', (4731, 4739), True, 'import numpy as np\n'), ((4767, 4791), 'numpy.random.random', 'np.random.random', (['(N, N)'], {}), '((N, N))\n', (4783, 4791), True, 'import numpy as np\n'), ((5635, 5665), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(N + 1)'], {}), '(xmin, xmax, N + 1)\n', (5646, 5665), True, 'import numpy as np\n'), ((5677, 5707), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', '(N + 1)'], {}), '(ymin, ymax, N + 1)\n', (5688, 5707), True, 'import numpy as np\n'), ((1616, 1643), 'numpy.roll', 'np.roll', (['mat', 'neigh', '(0, 1)'], {}), '(mat, neigh, (0, 1))\n', (1623, 1643), True, 'import numpy as np\n'), ((7704, 7752), 'numpy.concatenate', 'np.concatenate', (['(A_record, A[None, ...])'], {'axis': '(0)'}), '((A_record, A[None, ...]), axis=0)\n', (7718, 7752), True, 'import numpy as np\n'), ((7775, 7823), 'numpy.concatenate', 'np.concatenate', (['(B_record, B[None, ...])'], {'axis': '(0)'}), '((B_record, B[None, ...]), axis=0)\n', (7789, 7823), True, 'import numpy as np\n')] |
###########################################################################
###########################################################################
# SPyH
###########################################################################
###########################################################################
#Authors : <NAME> & <NAME>
#Version : SPyH.0
#Contact : <EMAIL>
###########################################################################
# Some useful imports
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['text.usetex'] = True
matplotlib.rc('text', usetex=True)
from matplotlib.patches import Polygon
from matplotlib.collections import PolyCollection
from matplotlib.collections import PatchCollection
import matplotlib.colorbar as cbar
import matplotlib.cm
from src.spyh import *
from src.sphvar import *
def plotPropertiesWithBound(part,partProp,nameBar,bounds,dr,PARTFIG):
'''
input :
-part : list of particles
-partProp : array of data to display
-nameBar : legend for the bar color
-bounds :
[xMin,xMax,yMin,yMax,propMin, propMax] the bound of the domain and color bar
-PARTFIG : the ID of the plot
output :
-display a png image of the simulation but does not save it
'''
if len(bounds)==6:
xMin = bounds[0]
xMax = bounds[1]
yMin = bounds[2]
yMax = bounds[3]
propMin = bounds[4]
propMax = bounds[5]
else:
print('Error the bounds should have 6 inputs!!! check plotParticles function')
return
#Create a colormap
def f_color_map(x):
Nstep = 100
jet = matplotlib.cm.get_cmap('jet',Nstep)
return jet((x-propMin)/(propMax-propMin))
cmap = plt.cm.jet
normal = plt.Normalize(propMin,propMax) # my numbers from 0-1
infoTab = part[:,INFO]
cnts = part[infoTab==FLUID][:,POS]
offs = np.ones([4,len(cnts),2])
offs[0,:,:] = [-dr/2,-dr/2]
offs[1,:,:] = [dr/2,-dr/2]
offs[2,:,:] = [dr/2,dr/2]
offs[3,:,:] = [-dr/2,dr/2]
vrts_fluid = cnts + offs
vrts_fluid = np.swapaxes(vrts_fluid, 0, 1)
cnts = part[infoTab==BOUND][:,POS]
offs = np.ones([4,len(cnts),2])
offs[0,:,:] = [-dr/2,-dr/2]
offs[1,:,:] = [dr/2,-dr/2]
offs[2,:,:] = [dr/2,dr/2]
offs[3,:,:] = [-dr/2,dr/2]
vrts_bound = cnts + offs
vrts_bound = np.swapaxes(vrts_bound, 0, 1)
#MOBILE BOUNDARIES PROPERTIES
cnts = part[infoTab==MOBILEBOUND][:,POS]
offs = np.ones([4,len(cnts),2])
offs[0,:,:] = [-dr/2,-dr/2]
offs[1,:,:] = [dr/2,-dr/2]
offs[2,:,:] = [dr/2,dr/2]
offs[3,:,:] = [-dr/2,dr/2]
vrts_mobilebound = cnts + offs
vrts_mobilebound = np.swapaxes(vrts_mobilebound, 0, 1)
#MOBILE SOLIDS PROPERTIES
cnts = part[infoTab==MOBILESOLID][:,POS]
offs = np.ones([4,len(cnts),2])
offs[0,:,:] = [-dr/2,-dr/2]
offs[1,:,:] = [dr/2,-dr/2]
offs[2,:,:] = [dr/2,dr/2]
offs[3,:,:] = [-dr/2,dr/2]
vrts_mobilesolid = cnts + offs
vrts_mobilesolid = np.swapaxes(vrts_mobilesolid, 0, 1)
rgb = f_color_map(partProp[infoTab==FLUID])
# create the figure
fig = plt.figure(PARTFIG)
ax_list = fig.axes#check if the figure is already open else get back the subplot axes
if len(ax_list)<1:
ax = fig.subplots()
else:
ax = ax_list[0]
coll = PolyCollection(vrts_fluid,array=None,facecolors=rgb,edgecolor='black',linewidths=0.1)
ax.add_collection(coll)
coll = PolyCollection(vrts_bound,array=None,facecolors='gray',edgecolor='black',linewidths=0.1)
ax.add_collection(coll)
coll = PolyCollection(vrts_mobilebound,array=None,facecolors='white',edgecolor='black',linewidths=0.1)
ax.add_collection(coll)
coll = PolyCollection(vrts_mobilesolid,array=None,facecolors='purple',edgecolor='black',linewidths=0.1)
ax.add_collection(coll)
ax.set_aspect('equal')
plt.xlabel('$x$(m)',fontsize=18)
plt.ylabel('$y$(m)',fontsize=18)
plt.xlim(xMin,xMax)
plt.ylim(yMin,yMax)
plt.tight_layout()
ax = plt.gca()
ax.tick_params(axis = 'both', which = 'major', labelsize = 18)
ax.xaxis.set_major_locator(plt.MaxNLocator(5))
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
cax, _ = cbar.make_axes(ax)
cb2 = cbar.ColorbarBase(cax, cmap=cmap,norm=normal)
cb2.set_label(nameBar,fontsize=18)
ax = plt.gca()
ax.tick_params(axis = 'both', which = 'major', labelsize = 18)
##fig.savefig(figname,bbox_inches='tight')
plt.show(block=False)
plt.draw()
def particleOutline(part,partID,color,dr,PARTFIG):
# input :
# -part : list of particles
# -PartID : np.array of the particles to display
# -color : the color of the edge to outline
# -PARTFIG : the ID of the plot
# output :
# -display a png image of the simulation but does not save it
fig = plt.figure(PARTFIG)
ax = fig.axes
ax = ax[0]
fluidPart = part[partID][:,POS]
cnts = fluidPart[:]
offs = np.ones([4,len(fluidPart),2])
offs[0,:,:] = [-dr/2,-dr/2]
offs[1,:,:] = [dr/2,-dr/2]
offs[2,:,:] = [dr/2,dr/2]
offs[3,:,:] = [-dr/2,dr/2]
vrts_fluid = cnts + offs
vrts_fluid = np.swapaxes(vrts_fluid, 0, 1)
coll = PolyCollection(vrts_fluid,array=None,facecolors='none',edgecolor=color,linewidths=3)
ax.add_collection(coll)
plt.show(block=False)
plt.draw()
def plotSpaces(posSpace,color,lspace,PARTFIG):
# input :
# -space : list of particles
# -color : the ID of the plot
# -PARTFIG : the ID of the plot
# output :
# -display a png image of the simulation but does not save it
fig = plt.figure(PARTFIG)
ax = fig.axes
ax = ax[0]
cnts = posSpace
offs = np.ones([4,len(posSpace),2])
offs[0,:,:] = [-lspace/2,-lspace/2]
offs[1,:,:] = [lspace/2,-lspace/2]
offs[2,:,:] = [lspace/2,lspace/2]
offs[3,:,:] = [-lspace/2,lspace/2]
vrts_space = cnts + offs
vrts_space = np.swapaxes(vrts_space, 0, 1)
coll = PolyCollection(vrts_space,array=None,facecolors='none',edgecolor=color,linewidths=1)
ax.add_collection(coll)
for i in range(len(posSpace)):
ax.text(posSpace[i,0], posSpace[i,1],r''+repr(i), fontsize=14,horizontalalignment='center',verticalalignment='center')
plt.show(block=False)
plt.draw()
def spacesOutline(posSpace,color,lspace,PARTFIG):
# input :
# -space : list of particles
# -color : the ID of the plot
# -PARTFIG : the ID of the plot
# output :
# -display a png image of the simulation but does not save it
fig = plt.figure(PARTFIG)
ax = fig.axes
ax = ax[0]
cnts = posSpace
offs = np.ones([4,len(posSpace),2])
offs[0,:,:] = [-lspace/2,-lspace/2]
offs[1,:,:] = [lspace/2,-lspace/2]
offs[2,:,:] = [lspace/2,lspace/2]
offs[3,:,:] = [-lspace/2,lspace/2]
vrts_space = cnts + offs
vrts_space = np.swapaxes(vrts_space, 0, 1)
coll = PolyCollection(vrts_space,array=None,facecolors='none',edgecolor=color,linewidths=1)
ax.add_collection(coll)
plt.show(block=False)
plt.draw()
def quiverPlot(part,sc,PARTFIG):
# input :
# -part : list of particles
# -sc : scale of the vector
# -PARTFIG : the ID of the plot
# output :
# -display a png image of the simulation but does not save it
# create the figure
fig = plt.figure(PARTFIG)
ax_list = fig.axes#check if the figure is already open else get back the subplot axes
if len(ax_list)<1:
ax = fig.subplots()
else:
ax = ax_list[0]
x=part[:,POS[0]]
y=part[:,POS[1]]
u=part[:,VEL[0]]
v=part[:,VEL[1]]
ax.quiver(x,y,u,v,scale=sc,scale_units='inches')
##fig.savefig(figname,bbox_inches='tight')
plt.show(block=False)
plt.draw()
def plotProperties(part,partProp,nameBar,bounds,dr,PARTFIG):
'''
input :
-part : list of particles
-partProp : array of data to display
-nameBar : legend for the bar color
-bounds :
[xMin,xMax,yMin,yMax,propMin, propMax] the bound of the domain and color bar
-PARTFIG : the ID of the plot
output :
-display a png image of the simulation but does not save it
'''
if len(bounds)==6:
xMin = bounds[0]
xMax = bounds[1]
yMin = bounds[2]
yMax = bounds[3]
propMin = bounds[4]
propMax = bounds[5]
else:
print('Error the bounds should have 6 inputs!!! check plotParticles function')
return
#Create a colormap
def f_color_map(x):
Nstep = 100
jet = matplotlib.cm.get_cmap('jet',Nstep)
return jet((x-propMin)/(propMax-propMin))
cmap = plt.cm.jet
normal = plt.Normalize(propMin,propMax) # my numbers from 0-1
infoTab = part[:,INFO]
cnts = part[:,POS]
offs = np.ones([4,len(cnts),2])
offs[0,:,:] = [-dr/2,-dr/2]
offs[1,:,:] = [dr/2,-dr/2]
offs[2,:,:] = [dr/2,dr/2]
offs[3,:,:] = [-dr/2,dr/2]
vrts_fluid = cnts + offs
vrts_fluid = np.swapaxes(vrts_fluid, 0, 1)
rgb = f_color_map(partProp)
# create the figure
fig = plt.figure(PARTFIG)
ax_list = fig.axes#check if the figure is already open else get back the subplot axes
if len(ax_list)<1:
ax = fig.subplots()
else:
ax = ax_list[0]
coll = PolyCollection(vrts_fluid,array=None,facecolors=rgb,edgecolor='black',linewidths=0.1)
ax.add_collection(coll)
ax.set_aspect('equal')
plt.xlabel('$x$(m)',fontsize=18)
plt.ylabel('$y$(m)',fontsize=18)
plt.xlim(xMin,xMax)
plt.ylim(yMin,yMax)
plt.tight_layout()
ax = plt.gca()
ax.tick_params(axis = 'both', which = 'major', labelsize = 18)
ax.xaxis.set_major_locator(plt.MaxNLocator(5))
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
cax, _ = cbar.make_axes(ax)
cb2 = cbar.ColorbarBase(cax, cmap=cmap,norm=normal)
cb2.set_label(nameBar,fontsize=18)
ax = plt.gca()
ax.tick_params(axis = 'both', which = 'major', labelsize = 18)
##fig.savefig(figname,bbox_inches='tight')
plt.show(block=False)
plt.draw()
| [
"matplotlib.pyplot.draw",
"matplotlib.pyplot.MaxNLocator",
"matplotlib.collections.PolyCollection",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.Normalize",
"matplotlib.pyplot.xlabel",
"matplotlib.colorbar.ColorbarBase",
"numpy.swapaxes",
"matplotlib.pyplot.figure",
"m... | [((614, 648), 'matplotlib.rc', 'matplotlib.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (627, 648), False, 'import matplotlib\n'), ((1829, 1860), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['propMin', 'propMax'], {}), '(propMin, propMax)\n', (1842, 1860), True, 'import matplotlib.pyplot as plt\n'), ((2154, 2183), 'numpy.swapaxes', 'np.swapaxes', (['vrts_fluid', '(0)', '(1)'], {}), '(vrts_fluid, 0, 1)\n', (2165, 2183), True, 'import numpy as np\n'), ((2429, 2458), 'numpy.swapaxes', 'np.swapaxes', (['vrts_bound', '(0)', '(1)'], {}), '(vrts_bound, 0, 1)\n', (2440, 2458), True, 'import numpy as np\n'), ((2757, 2792), 'numpy.swapaxes', 'np.swapaxes', (['vrts_mobilebound', '(0)', '(1)'], {}), '(vrts_mobilebound, 0, 1)\n', (2768, 2792), True, 'import numpy as np\n'), ((3087, 3122), 'numpy.swapaxes', 'np.swapaxes', (['vrts_mobilesolid', '(0)', '(1)'], {}), '(vrts_mobilesolid, 0, 1)\n', (3098, 3122), True, 'import numpy as np\n'), ((3210, 3229), 'matplotlib.pyplot.figure', 'plt.figure', (['PARTFIG'], {}), '(PARTFIG)\n', (3220, 3229), True, 'import matplotlib.pyplot as plt\n'), ((3410, 3503), 'matplotlib.collections.PolyCollection', 'PolyCollection', (['vrts_fluid'], {'array': 'None', 'facecolors': 'rgb', 'edgecolor': '"""black"""', 'linewidths': '(0.1)'}), "(vrts_fluid, array=None, facecolors=rgb, edgecolor='black',\n linewidths=0.1)\n", (3424, 3503), False, 'from matplotlib.collections import PolyCollection\n'), ((3535, 3631), 'matplotlib.collections.PolyCollection', 'PolyCollection', (['vrts_bound'], {'array': 'None', 'facecolors': '"""gray"""', 'edgecolor': '"""black"""', 'linewidths': '(0.1)'}), "(vrts_bound, array=None, facecolors='gray', edgecolor='black',\n linewidths=0.1)\n", (3549, 3631), False, 'from matplotlib.collections import PolyCollection\n'), ((3663, 3767), 'matplotlib.collections.PolyCollection', 'PolyCollection', (['vrts_mobilebound'], {'array': 'None', 'facecolors': '"""white"""', 'edgecolor': '"""black"""', 'linewidths': '(0.1)'}), "(vrts_mobilebound, array=None, facecolors='white', edgecolor=\n 'black', linewidths=0.1)\n", (3677, 3767), False, 'from matplotlib.collections import PolyCollection\n'), ((3798, 3903), 'matplotlib.collections.PolyCollection', 'PolyCollection', (['vrts_mobilesolid'], {'array': 'None', 'facecolors': '"""purple"""', 'edgecolor': '"""black"""', 'linewidths': '(0.1)'}), "(vrts_mobilesolid, array=None, facecolors='purple', edgecolor\n ='black', linewidths=0.1)\n", (3812, 3903), False, 'from matplotlib.collections import PolyCollection\n'), ((3955, 3988), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$(m)"""'], {'fontsize': '(18)'}), "('$x$(m)', fontsize=18)\n", (3965, 3988), True, 'import matplotlib.pyplot as plt\n'), ((3992, 4025), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y$(m)"""'], {'fontsize': '(18)'}), "('$y$(m)', fontsize=18)\n", (4002, 4025), True, 'import matplotlib.pyplot as plt\n'), ((4029, 4049), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xMin', 'xMax'], {}), '(xMin, xMax)\n', (4037, 4049), True, 'import matplotlib.pyplot as plt\n'), ((4053, 4073), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yMin', 'yMax'], {}), '(yMin, yMax)\n', (4061, 4073), True, 'import matplotlib.pyplot as plt\n'), ((4078, 4096), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4094, 4096), True, 'import matplotlib.pyplot as plt\n'), ((4106, 4115), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4113, 4115), True, 'import matplotlib.pyplot as plt\n'), ((4298, 4316), 'matplotlib.colorbar.make_axes', 'cbar.make_axes', (['ax'], {}), '(ax)\n', (4312, 4316), True, 'import matplotlib.colorbar as cbar\n'), ((4328, 4374), 'matplotlib.colorbar.ColorbarBase', 'cbar.ColorbarBase', (['cax'], {'cmap': 'cmap', 'norm': 'normal'}), '(cax, cmap=cmap, norm=normal)\n', (4345, 4374), True, 'import matplotlib.colorbar as cbar\n'), ((4422, 4431), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4429, 4431), True, 'import matplotlib.pyplot as plt\n'), ((4550, 4571), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (4558, 4571), True, 'import matplotlib.pyplot as plt\n'), ((4576, 4586), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4584, 4586), True, 'import matplotlib.pyplot as plt\n'), ((4891, 4910), 'matplotlib.pyplot.figure', 'plt.figure', (['PARTFIG'], {}), '(PARTFIG)\n', (4901, 4910), True, 'import matplotlib.pyplot as plt\n'), ((5182, 5211), 'numpy.swapaxes', 'np.swapaxes', (['vrts_fluid', '(0)', '(1)'], {}), '(vrts_fluid, 0, 1)\n', (5193, 5211), True, 'import numpy as np\n'), ((5220, 5312), 'matplotlib.collections.PolyCollection', 'PolyCollection', (['vrts_fluid'], {'array': 'None', 'facecolors': '"""none"""', 'edgecolor': 'color', 'linewidths': '(3)'}), "(vrts_fluid, array=None, facecolors='none', edgecolor=color,\n linewidths=3)\n", (5234, 5312), False, 'from matplotlib.collections import PolyCollection\n'), ((5331, 5352), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (5339, 5352), True, 'import matplotlib.pyplot as plt\n'), ((5354, 5364), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (5362, 5364), True, 'import matplotlib.pyplot as plt\n'), ((5604, 5623), 'matplotlib.pyplot.figure', 'plt.figure', (['PARTFIG'], {}), '(PARTFIG)\n', (5614, 5623), True, 'import matplotlib.pyplot as plt\n'), ((5889, 5918), 'numpy.swapaxes', 'np.swapaxes', (['vrts_space', '(0)', '(1)'], {}), '(vrts_space, 0, 1)\n', (5900, 5918), True, 'import numpy as np\n'), ((5927, 6019), 'matplotlib.collections.PolyCollection', 'PolyCollection', (['vrts_space'], {'array': 'None', 'facecolors': '"""none"""', 'edgecolor': 'color', 'linewidths': '(1)'}), "(vrts_space, array=None, facecolors='none', edgecolor=color,\n linewidths=1)\n", (5941, 6019), False, 'from matplotlib.collections import PolyCollection\n'), ((6191, 6212), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (6199, 6212), True, 'import matplotlib.pyplot as plt\n'), ((6214, 6224), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (6222, 6224), True, 'import matplotlib.pyplot as plt\n'), ((6467, 6486), 'matplotlib.pyplot.figure', 'plt.figure', (['PARTFIG'], {}), '(PARTFIG)\n', (6477, 6486), True, 'import matplotlib.pyplot as plt\n'), ((6752, 6781), 'numpy.swapaxes', 'np.swapaxes', (['vrts_space', '(0)', '(1)'], {}), '(vrts_space, 0, 1)\n', (6763, 6781), True, 'import numpy as np\n'), ((6790, 6882), 'matplotlib.collections.PolyCollection', 'PolyCollection', (['vrts_space'], {'array': 'None', 'facecolors': '"""none"""', 'edgecolor': 'color', 'linewidths': '(1)'}), "(vrts_space, array=None, facecolors='none', edgecolor=color,\n linewidths=1)\n", (6804, 6882), False, 'from matplotlib.collections import PolyCollection\n'), ((6901, 6922), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (6909, 6922), True, 'import matplotlib.pyplot as plt\n'), ((6924, 6934), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (6932, 6934), True, 'import matplotlib.pyplot as plt\n'), ((7189, 7208), 'matplotlib.pyplot.figure', 'plt.figure', (['PARTFIG'], {}), '(PARTFIG)\n', (7199, 7208), True, 'import matplotlib.pyplot as plt\n'), ((7566, 7587), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (7574, 7587), True, 'import matplotlib.pyplot as plt\n'), ((7592, 7602), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (7600, 7602), True, 'import matplotlib.pyplot as plt\n'), ((8539, 8570), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['propMin', 'propMax'], {}), '(propMin, propMax)\n', (8552, 8570), True, 'import matplotlib.pyplot as plt\n'), ((8848, 8877), 'numpy.swapaxes', 'np.swapaxes', (['vrts_fluid', '(0)', '(1)'], {}), '(vrts_fluid, 0, 1)\n', (8859, 8877), True, 'import numpy as np\n'), ((8949, 8968), 'matplotlib.pyplot.figure', 'plt.figure', (['PARTFIG'], {}), '(PARTFIG)\n', (8959, 8968), True, 'import matplotlib.pyplot as plt\n'), ((9149, 9242), 'matplotlib.collections.PolyCollection', 'PolyCollection', (['vrts_fluid'], {'array': 'None', 'facecolors': 'rgb', 'edgecolor': '"""black"""', 'linewidths': '(0.1)'}), "(vrts_fluid, array=None, facecolors=rgb, edgecolor='black',\n linewidths=0.1)\n", (9163, 9242), False, 'from matplotlib.collections import PolyCollection\n'), ((9295, 9328), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$(m)"""'], {'fontsize': '(18)'}), "('$x$(m)', fontsize=18)\n", (9305, 9328), True, 'import matplotlib.pyplot as plt\n'), ((9332, 9365), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y$(m)"""'], {'fontsize': '(18)'}), "('$y$(m)', fontsize=18)\n", (9342, 9365), True, 'import matplotlib.pyplot as plt\n'), ((9369, 9389), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xMin', 'xMax'], {}), '(xMin, xMax)\n', (9377, 9389), True, 'import matplotlib.pyplot as plt\n'), ((9393, 9413), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yMin', 'yMax'], {}), '(yMin, yMax)\n', (9401, 9413), True, 'import matplotlib.pyplot as plt\n'), ((9418, 9436), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9434, 9436), True, 'import matplotlib.pyplot as plt\n'), ((9446, 9455), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9453, 9455), True, 'import matplotlib.pyplot as plt\n'), ((9638, 9656), 'matplotlib.colorbar.make_axes', 'cbar.make_axes', (['ax'], {}), '(ax)\n', (9652, 9656), True, 'import matplotlib.colorbar as cbar\n'), ((9668, 9714), 'matplotlib.colorbar.ColorbarBase', 'cbar.ColorbarBase', (['cax'], {'cmap': 'cmap', 'norm': 'normal'}), '(cax, cmap=cmap, norm=normal)\n', (9685, 9714), True, 'import matplotlib.colorbar as cbar\n'), ((9762, 9771), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9769, 9771), True, 'import matplotlib.pyplot as plt\n'), ((9890, 9911), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (9898, 9911), True, 'import matplotlib.pyplot as plt\n'), ((9916, 9926), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (9924, 9926), True, 'import matplotlib.pyplot as plt\n'), ((1708, 1744), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""jet"""', 'Nstep'], {}), "('jet', Nstep)\n", (1730, 1744), False, 'import matplotlib\n'), ((4214, 4232), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['(5)'], {}), '(5)\n', (4229, 4232), True, 'import matplotlib.pyplot as plt\n'), ((4265, 4283), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['(5)'], {}), '(5)\n', (4280, 4283), True, 'import matplotlib.pyplot as plt\n'), ((8418, 8454), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""jet"""', 'Nstep'], {}), "('jet', Nstep)\n", (8440, 8454), False, 'import matplotlib\n'), ((9554, 9572), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['(5)'], {}), '(5)\n', (9569, 9572), True, 'import matplotlib.pyplot as plt\n'), ((9605, 9623), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['(5)'], {}), '(5)\n', (9620, 9623), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from PIL import Image
from scipy import special
# PSF functions
def scalar_a(x):
if x == 0:
return 1.0
else:
return (special.jn(1,2*np.pi*x)/(np.pi*x))**2
a = np.vectorize(scalar_a)
def s_b(x, NA=0.8, n=1.33):
if x == 0:
return 0
else:
return (NA/n)**2*(special.jn(2,2*np.pi*x)/(np.pi*x))**2
b = np.vectorize(s_b)
def h00(x, NA=0.8, n=1.33):
return a(x) + 2*b(x, NA, n)
def h20(x, NA=0.8, n=1.33):
return (-a(x) + 4*b(x, NA, n))/np.sqrt(5)
# OTF functions
def myacos(x):
return np.nan_to_num(np.arccos(np.abs(x/2)))
def mysqrt(x):
return np.nan_to_num((np.abs(x/2))*np.sqrt(1 - (np.abs(x/2))**2))
def A(x):
return (2/np.pi)*(myacos(x) - mysqrt(x))
def B(x, NA=0.8, n=1.33):
N = (1/(np.pi))*((NA/n)**2)
poly = (3.0 - 2.0*(np.abs(x/2)**2))
return N*(myacos(x) - poly*mysqrt(x))
def H00(x, NA=0.8, n=1.33):
return (A(x) + 2*B(x, NA=NA, n=n))/(1 + (NA/n)**2)
def H20(x, NA=0.8, n=1.33):
return (-A(x) + 4*B(x, NA=NA, n=n))/(np.sqrt(5)*(1 + (NA/n)**2))
# File I/O
def save_tiff(image, filename):
im = Image.fromarray(image) # float32
im.save(filename, "TIFF")
def load_tiff(filename):
image = Image.open(filename, mode='r')
return np.array(image, dtype='float32')
def cs(arr):
return arr[:, np.int(arr.shape[0]/2)]
# Fourier transform
def myfft(image, pad=1000):
N = image.shape[0]
padded_image = np.pad(image, pad_width=pad, mode='constant')
F = np.fft.fftshift(
np.fft.fftn(
np.fft.ifftshift(padded_image)
))
xF = np.fft.fftshift(np.fft.fftfreq(2*pad + N, 4/N))
return xF, np.abs(F)
| [
"scipy.special.jn",
"numpy.abs",
"PIL.Image.fromarray",
"PIL.Image.open",
"numpy.sqrt",
"numpy.fft.fftfreq",
"numpy.array",
"numpy.int",
"numpy.fft.ifftshift",
"numpy.pad",
"numpy.vectorize"
] | [((203, 225), 'numpy.vectorize', 'np.vectorize', (['scalar_a'], {}), '(scalar_a)\n', (215, 225), True, 'import numpy as np\n'), ((365, 382), 'numpy.vectorize', 'np.vectorize', (['s_b'], {}), '(s_b)\n', (377, 382), True, 'import numpy as np\n'), ((1124, 1146), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (1139, 1146), False, 'from PIL import Image\n'), ((1225, 1255), 'PIL.Image.open', 'Image.open', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (1235, 1255), False, 'from PIL import Image\n'), ((1267, 1299), 'numpy.array', 'np.array', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (1275, 1299), True, 'import numpy as np\n'), ((1447, 1492), 'numpy.pad', 'np.pad', (['image'], {'pad_width': 'pad', 'mode': '"""constant"""'}), "(image, pad_width=pad, mode='constant')\n", (1453, 1492), True, 'import numpy as np\n'), ((506, 516), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (513, 516), True, 'import numpy as np\n'), ((1618, 1652), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['(2 * pad + N)', '(4 / N)'], {}), '(2 * pad + N, 4 / N)\n', (1632, 1652), True, 'import numpy as np\n'), ((1665, 1674), 'numpy.abs', 'np.abs', (['F'], {}), '(F)\n', (1671, 1674), True, 'import numpy as np\n'), ((584, 597), 'numpy.abs', 'np.abs', (['(x / 2)'], {}), '(x / 2)\n', (590, 597), True, 'import numpy as np\n'), ((644, 657), 'numpy.abs', 'np.abs', (['(x / 2)'], {}), '(x / 2)\n', (650, 657), True, 'import numpy as np\n'), ((1043, 1053), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (1050, 1053), True, 'import numpy as np\n'), ((1332, 1356), 'numpy.int', 'np.int', (['(arr.shape[0] / 2)'], {}), '(arr.shape[0] / 2)\n', (1338, 1356), True, 'import numpy as np\n'), ((1551, 1581), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['padded_image'], {}), '(padded_image)\n', (1567, 1581), True, 'import numpy as np\n'), ((161, 189), 'scipy.special.jn', 'special.jn', (['(1)', '(2 * np.pi * x)'], {}), '(1, 2 * np.pi * x)\n', (171, 189), False, 'from scipy import special\n'), ((830, 843), 'numpy.abs', 'np.abs', (['(x / 2)'], {}), '(x / 2)\n', (836, 843), True, 'import numpy as np\n'), ((323, 351), 'scipy.special.jn', 'special.jn', (['(2)', '(2 * np.pi * x)'], {}), '(2, 2 * np.pi * x)\n', (333, 351), False, 'from scipy import special\n'), ((670, 683), 'numpy.abs', 'np.abs', (['(x / 2)'], {}), '(x / 2)\n', (676, 683), True, 'import numpy as np\n')] |
# imports needed for the following examples
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.spatial.distance as distance
import scipy.cluster.hierarchy as hierarchy
# read a local file (path is relative to python's working directory)
# sep, header=True/None
infile = '../data/PO_asof_20150525.txt'
df = pd.read_table(infile, sep="|", thousands=',')
# set column name
df.columns = ['comp_code', 'comp_name', 'vendor_code', 'vendor_name',
'which_day', 'po', 'amt']
grouped = df.groupby(['comp_code', 'vendor_code']).agg(
{'amt': np.sum, 'po': 'count'})
# add two new columns
grouped['vendor_cnt'] = 1
grouped['amt_log'] = np.log2(grouped['amt'])
# clear all index generated by groupby
# prepare for pivot
slim = grouped.reset_index()
# simple version, only (vendor count, log(po count), log(amt)) for clustering
comp_vendor = slim.groupby('comp_code').agg(
{'amt': np.sum, 'po': np.sum, 'vendor_cnt': 'count'})
comp_vendor['amt_log'] = np.log(comp_vendor['amt'])
comp_vendor['po_log'] = np.log(comp_vendor['po'])
comp_vendor = comp_vendor.drop(['po', 'amt'], 1)
# comp_vendor = slim.pivot(index='comp_code',
# columns='vendor_code', values='amt_log')
comp_vendor = comp_vendor.fillna(0)
# only use top 20 for clustering
comp_vendor = comp_vendor.sort('amt_log', ascending=False)
comp_vendor = comp_vendor.head(20)
n = comp_vendor.shape[0]
k = 5
d = distance.pdist(comp_vendor)
Z = hierarchy.linkage(d, method='single')
T = hierarchy.fcluster(Z, k, 'maxclust')
# calculate labels
labels = list('' for i in range(n))
for i in range(n):
labels[i] = str(comp_vendor.index[i])
# labels[i] = str(T[i]) + '-' + str(comp_vendor.index[i])
# log2 to change the distance for better plot
# for line in Z:
# line[2] = np.log(line[2])
# calculate color threshold
ct = Z[-(k-1), 2]
# plot
P = hierarchy.dendrogram(Z, labels=labels, color_threshold=ct)
plt.show()
| [
"scipy.cluster.hierarchy.dendrogram",
"scipy.spatial.distance.pdist",
"numpy.log",
"scipy.cluster.hierarchy.linkage",
"pandas.read_table",
"numpy.log2",
"scipy.cluster.hierarchy.fcluster",
"matplotlib.pyplot.show"
] | [((340, 385), 'pandas.read_table', 'pd.read_table', (['infile'], {'sep': '"""|"""', 'thousands': '""","""'}), "(infile, sep='|', thousands=',')\n", (353, 385), True, 'import pandas as pd\n'), ((678, 701), 'numpy.log2', 'np.log2', (["grouped['amt']"], {}), "(grouped['amt'])\n", (685, 701), True, 'import numpy as np\n'), ((998, 1024), 'numpy.log', 'np.log', (["comp_vendor['amt']"], {}), "(comp_vendor['amt'])\n", (1004, 1024), True, 'import numpy as np\n'), ((1049, 1074), 'numpy.log', 'np.log', (["comp_vendor['po']"], {}), "(comp_vendor['po'])\n", (1055, 1074), True, 'import numpy as np\n'), ((1441, 1468), 'scipy.spatial.distance.pdist', 'distance.pdist', (['comp_vendor'], {}), '(comp_vendor)\n', (1455, 1468), True, 'import scipy.spatial.distance as distance\n'), ((1473, 1510), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['d'], {'method': '"""single"""'}), "(d, method='single')\n", (1490, 1510), True, 'import scipy.cluster.hierarchy as hierarchy\n'), ((1515, 1551), 'scipy.cluster.hierarchy.fcluster', 'hierarchy.fcluster', (['Z', 'k', '"""maxclust"""'], {}), "(Z, k, 'maxclust')\n", (1533, 1551), True, 'import scipy.cluster.hierarchy as hierarchy\n'), ((1886, 1944), 'scipy.cluster.hierarchy.dendrogram', 'hierarchy.dendrogram', (['Z'], {'labels': 'labels', 'color_threshold': 'ct'}), '(Z, labels=labels, color_threshold=ct)\n', (1906, 1944), True, 'import scipy.cluster.hierarchy as hierarchy\n'), ((1946, 1956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1954, 1956), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel="linear"))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c="gray", edgecolors=(0, 0, 0))
plt.scatter(
X[zero_class, 0],
X[zero_class, 1],
s=160,
edgecolors="b",
facecolors="none",
linewidths=2,
label="Class 1",
)
plt.scatter(
X[one_class, 0],
X[one_class, 1],
s=80,
edgecolors="orange",
facecolors="none",
linewidths=2,
label="Class 2",
)
plot_hyperplane(
classif.estimators_[0], min_x, max_x, "k--", "Boundary\nfor class 1"
)
plot_hyperplane(
classif.estimators_[1], min_x, max_x, "k-.", "Boundary\nfor class 2"
)
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - 0.5 * max_x, max_x + 0.5 * max_x)
plt.ylim(min_y - 0.5 * max_y, max_y + 0.5 * max_y)
if subplot == 2:
plt.xlabel("First principal component")
plt.ylabel("Second principal component")
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(
n_classes=2, n_labels=1, allow_unlabeled=True, random_state=1
)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(
n_classes=2, n_labels=1, allow_unlabeled=False, random_state=1
)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(0.04, 0.02, 0.97, 0.94, 0.09, 0.2)
plt.show()
| [
"matplotlib.pyplot.ylabel",
"sklearn.cross_decomposition.CCA",
"numpy.where",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter",
"numpy.min",
"matplotlib.pyplot.ylim",
"mat... | [((2112, 2138), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (2122, 2138), True, 'import matplotlib.pyplot as plt\n'), ((2147, 2245), 'sklearn.datasets.make_multilabel_classification', 'make_multilabel_classification', ([], {'n_classes': '(2)', 'n_labels': '(1)', 'allow_unlabeled': '(True)', 'random_state': '(1)'}), '(n_classes=2, n_labels=1, allow_unlabeled=\n True, random_state=1)\n', (2177, 2245), False, 'from sklearn.datasets import make_multilabel_classification\n'), ((2382, 2481), 'sklearn.datasets.make_multilabel_classification', 'make_multilabel_classification', ([], {'n_classes': '(2)', 'n_labels': '(1)', 'allow_unlabeled': '(False)', 'random_state': '(1)'}), '(n_classes=2, n_labels=1, allow_unlabeled=\n False, random_state=1)\n', (2412, 2481), False, 'from sklearn.datasets import make_multilabel_classification\n'), ((2617, 2671), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', (['(0.04)', '(0.02)', '(0.97)', '(0.94)', '(0.09)', '(0.2)'], {}), '(0.04, 0.02, 0.97, 0.94, 0.09, 0.2)\n', (2636, 2671), True, 'import matplotlib.pyplot as plt\n'), ((2672, 2682), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2680, 2682), True, 'import matplotlib.pyplot as plt\n'), ((420, 453), 'numpy.linspace', 'np.linspace', (['(min_x - 5)', '(max_x + 5)'], {}), '(min_x - 5, max_x + 5)\n', (431, 453), True, 'import numpy as np\n'), ((540, 580), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'yy', 'linestyle'], {'label': 'label'}), '(xx, yy, linestyle, label=label)\n', (548, 580), True, 'import matplotlib.pyplot as plt\n'), ((844, 859), 'numpy.min', 'np.min', (['X[:, 0]'], {}), '(X[:, 0])\n', (850, 859), True, 'import numpy as np\n'), ((872, 887), 'numpy.max', 'np.max', (['X[:, 0]'], {}), '(X[:, 0])\n', (878, 887), True, 'import numpy as np\n'), ((901, 916), 'numpy.min', 'np.min', (['X[:, 1]'], {}), '(X[:, 1])\n', (907, 916), True, 'import numpy as np\n'), ((929, 944), 'numpy.max', 'np.max', (['X[:, 1]'], {}), '(X[:, 1])\n', (935, 944), True, 'import numpy as np\n'), ((1029, 1055), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', 'subplot'], {}), '(2, 2, subplot)\n', (1040, 1055), True, 'import matplotlib.pyplot as plt\n'), ((1060, 1076), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1069, 1076), True, 'import matplotlib.pyplot as plt\n'), ((1095, 1112), 'numpy.where', 'np.where', (['Y[:, 0]'], {}), '(Y[:, 0])\n', (1103, 1112), True, 'import numpy as np\n'), ((1129, 1146), 'numpy.where', 'np.where', (['Y[:, 1]'], {}), '(Y[:, 1])\n', (1137, 1146), True, 'import numpy as np\n'), ((1151, 1218), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'s': '(40)', 'c': '"""gray"""', 'edgecolors': '(0, 0, 0)'}), "(X[:, 0], X[:, 1], s=40, c='gray', edgecolors=(0, 0, 0))\n", (1162, 1218), True, 'import matplotlib.pyplot as plt\n'), ((1223, 1347), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[zero_class, 0]', 'X[zero_class, 1]'], {'s': '(160)', 'edgecolors': '"""b"""', 'facecolors': '"""none"""', 'linewidths': '(2)', 'label': '"""Class 1"""'}), "(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',\n facecolors='none', linewidths=2, label='Class 1')\n", (1234, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1411, 1537), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[one_class, 0]', 'X[one_class, 1]'], {'s': '(80)', 'edgecolors': '"""orange"""', 'facecolors': '"""none"""', 'linewidths': '(2)', 'label': '"""Class 2"""'}), "(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',\n facecolors='none', linewidths=2, label='Class 2')\n", (1422, 1537), True, 'import matplotlib.pyplot as plt\n'), ((1810, 1824), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (1820, 1824), True, 'import matplotlib.pyplot as plt\n'), ((1829, 1843), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (1839, 1843), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1899), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(min_x - 0.5 * max_x)', '(max_x + 0.5 * max_x)'], {}), '(min_x - 0.5 * max_x, max_x + 0.5 * max_x)\n', (1857, 1899), True, 'import matplotlib.pyplot as plt\n'), ((1904, 1954), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(min_y - 0.5 * max_y)', '(max_y + 0.5 * max_y)'], {}), '(min_y - 0.5 * max_y, max_y + 0.5 * max_y)\n', (1912, 1954), True, 'import matplotlib.pyplot as plt\n'), ((980, 1000), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (983, 1000), False, 'from sklearn.svm import SVC\n'), ((1984, 2023), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""First principal component"""'], {}), "('First principal component')\n", (1994, 2023), True, 'import matplotlib.pyplot as plt\n'), ((2032, 2072), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Second principal component"""'], {}), "('Second principal component')\n", (2042, 2072), True, 'import matplotlib.pyplot as plt\n'), ((2081, 2109), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (2091, 2109), True, 'import matplotlib.pyplot as plt\n'), ((675, 694), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (678, 694), False, 'from sklearn.decomposition import PCA\n'), ((753, 772), 'sklearn.cross_decomposition.CCA', 'CCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (756, 772), False, 'from sklearn.cross_decomposition import CCA\n')] |
#!/usr/bin/env python3
# ver 0.1 - coding python by <NAME> on 2/26/2017
# ver 0.2 - save .npz file for outputfile on 12/2/2017
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='block average 1D Profile from np.savetxt file')
## args
parser.add_argument('-i', '--input', nargs='?',
help='input 1D profile file generated by .txt, .npy, else')
parser.add_argument('-b', '--begin', default=0, nargs='?', type=int,
help='index of beginning frame [0:N-1]')
parser.add_argument('-e', '--end', default=-1, nargs='?', type=int,
help='index of end frame [0:N-1]. If negative, use end frame')
parser.add_argument('-tol', '--tol', default=0.0, nargs='?', type=float,
help='tolerance for block average (> 0 and <= 1). No block average (tol=0). # frames to average (tol>1)')
parser.add_argument('-o', '--output', default='input', nargs='?',
help='output file of block averaged 1D profile (.avg)')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.2')
# read args
args = parser.parse_args()
# check args
print(" input arguments: {0}".format(args))
## import modules
import hjung
from hjung import *
import numpy as np
# default for args
args.output = args.input.replace('.npy','') + '.avg'
## Check arguments for log
print("===============================")
print("input filename = ", args.input)
print("index of beginning frame = ", args.begin)
if args.end != -1:
print("index of end frame = ", args.end)
else:
print("Set end frame is the end.")
hjung.blockavg.print_init(args.tol)
print("output filename = ", args.output)
## timer
start_proc, start_prof = hjung.time.init()
## check argument
args.tol = hjung.blockavg.check(args.tol)
## read input file
print("="*30)
data_1d_t = hjung.io.read_simple(args.input,0,-1)
#data_1d_t = np.loadtxt(args.input, comments='#')
print("Total trajecotry has %d frames." %(len(data_1d_t)))
if args.end >= len(data_1d_t):
raise ValueError("end frame is beyond trajectory")
if args.end < 0:
data_1d_t = data_1d_t[args.begin:]
else:
data_1d_t = data_1d_t[args.begin:args.end]
print("Done: reading input file")
## block average to get stable volume (or number density)
print("="*30)
data_1d_t, block_length = hjung.blockavg.main_1d(data_1d_t, None, args.tol)
## make average and std
# use numpy.mean(array,axis=0) to avoid transpose cost
data_1d_avg = np.mean(data_1d_t, axis=0)
data_1d_std = np.std(data_1d_t, axis=0)
data_1d = np.column_stack((data_1d_avg,data_1d_std))
## save averaged profile
print("="*30)
np.savetxt(args.output, data_1d,
header='begin frame = %d, end frame = %d, generated by Hyuntae python code' %(args.begin,args.end), fmt='%f', comments='# ')
np.save(args.output, data_1d)
print("Finished saving average number density.")
## timer
hjung.time.end_print(start_proc, start_prof)
| [
"numpy.mean",
"hjung.time.end_print",
"hjung.time.init",
"argparse.ArgumentParser",
"hjung.blockavg.print_init",
"numpy.column_stack",
"hjung.io.read_simple",
"numpy.savetxt",
"numpy.std",
"hjung.blockavg.check",
"numpy.save",
"hjung.blockavg.main_1d"
] | [((158, 308), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""block average 1D Profile from np.savetxt file"""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'block average 1D Profile from np.savetxt file')\n", (181, 308), False, 'import argparse\n'), ((1642, 1677), 'hjung.blockavg.print_init', 'hjung.blockavg.print_init', (['args.tol'], {}), '(args.tol)\n', (1667, 1677), False, 'import hjung\n'), ((1758, 1775), 'hjung.time.init', 'hjung.time.init', ([], {}), '()\n', (1773, 1775), False, 'import hjung\n'), ((1809, 1839), 'hjung.blockavg.check', 'hjung.blockavg.check', (['args.tol'], {}), '(args.tol)\n', (1829, 1839), False, 'import hjung\n'), ((1890, 1929), 'hjung.io.read_simple', 'hjung.io.read_simple', (['args.input', '(0)', '(-1)'], {}), '(args.input, 0, -1)\n', (1910, 1929), False, 'import hjung\n'), ((2369, 2418), 'hjung.blockavg.main_1d', 'hjung.blockavg.main_1d', (['data_1d_t', 'None', 'args.tol'], {}), '(data_1d_t, None, args.tol)\n', (2391, 2418), False, 'import hjung\n'), ((2518, 2544), 'numpy.mean', 'np.mean', (['data_1d_t'], {'axis': '(0)'}), '(data_1d_t, axis=0)\n', (2525, 2544), True, 'import numpy as np\n'), ((2560, 2585), 'numpy.std', 'np.std', (['data_1d_t'], {'axis': '(0)'}), '(data_1d_t, axis=0)\n', (2566, 2585), True, 'import numpy as np\n'), ((2597, 2640), 'numpy.column_stack', 'np.column_stack', (['(data_1d_avg, data_1d_std)'], {}), '((data_1d_avg, data_1d_std))\n', (2612, 2640), True, 'import numpy as np\n'), ((2684, 2852), 'numpy.savetxt', 'np.savetxt', (['args.output', 'data_1d'], {'header': "('begin frame = %d, end frame = %d, generated by Hyuntae python code' % (\n args.begin, args.end))", 'fmt': '"""%f"""', 'comments': '"""# """'}), "(args.output, data_1d, header=\n 'begin frame = %d, end frame = %d, generated by Hyuntae python code' %\n (args.begin, args.end), fmt='%f', comments='# ')\n", (2694, 2852), True, 'import numpy as np\n'), ((2846, 2875), 'numpy.save', 'np.save', (['args.output', 'data_1d'], {}), '(args.output, data_1d)\n', (2853, 2875), True, 'import numpy as np\n'), ((2939, 2983), 'hjung.time.end_print', 'hjung.time.end_print', (['start_proc', 'start_prof'], {}), '(start_proc, start_prof)\n', (2959, 2983), False, 'import hjung\n')] |
# -*- coding: utf-8 -*-
import numpy as np
class Schrodinger:
def __init__(self, V0, c, basis_size, basis_function, fxn):
'''Creates a system to calculate the schrodinger equation
Args:
V0 (float): Initial Potential Energy
c (float): Constant to be used in Schrodinger equation
basis_size (int): Size of the basis set
basis_function (int): Determines which basis function to use
- 0 : Legendre Polynomial
- 1 : Fourier Series
fxn (array-like): x and y values corresponding to function values
'''
self.V0 = V0
self.c = c
self.basis_size = basis_size
self.basis_function = basis_function
self.fxn = fxn
self.x = np.linspace(0,2,2000)
self.l = abs(self.x[0] - self.x[-1])
def get_basis(self):
'''Determines basis set coefficients for the given wavefunction'''
if self.basis_function == 0:
self.basis_set = np.polynomial.legendre.legfit(self.x, self.fxn(self.x), self.basis_size - 1)
elif self.basis_function == 1:
self.basis_set = np.array([self._cn(self.fxn, i) for i in range(self.basis_size)])
else:
self.basis_set = None
return self.basis_set
def _cn(self, fxn, n):
l = abs(self.x[0] - self.x[-1])
c = self.fxn(self.x) * np.exp(-2j * n * np.pi * self.x / l)
return sum(c)/len(c)
def apply_hamiltonian(self, basis):
if self.basis_function == 0:
temp = np.polynomial.legendre.legder(basis, 2)
d_2 = np.zeros(len(temp) + 2)
for i in range(len(temp)):
d_2[i] = temp[i]
self.converted_basis = -self.c * d_2 + self.V0 * basis * self.l
elif self.basis_function == 1:
H = np.zeros([self.basis_size, self.basis_size])
for i in range(self.basis_size):
H[i][i] = (-4 * (i**2) * (np.pi ** 2) / self.l)
self.converted_basis = H.dot(basis)
else:
self.converted_basis = None
return self.converted_basis
def variation(self, iterations = 5000, basis = None):
if basis is None:
self.basis = self.converted_basis
else:
self.basis = basis
improvements = 0
self.improvement = np.ones(len(self.basis))
while all(v != 0 for v in self.basis): #loops until no more improvements are needed
improvements += 1
self.improve_energy()
if improvements > iterations:
print("Did not converge")
break
return self.calculate_energy(self.basis)
def calculate_energy(self, basis):
'''Given any basis, calculate the energy of it through the expected value formula'''
return basis.dot(self.apply_hamiltonian(basis)) / basis.dot(basis)
def improve_energy(self):
energy = self.calculate_energy(self.basis)
self.improvement = np.zeros(len(self.basis))
#Check if adding minimizes energy
for i in range(len(self.basis)):
self.basis[i] *= 1.05
if self.calculate_energy(self.basis) <= energy:
self.improvement[i] = 1
self.basis[i] /= 1.05
#Check if subtracting minimizes energy
for i in range(len(self.basis)):
self.basis[i] *= 0.95
if self.calculate_energy(self.basis) <= energy:
self.improvement[i] = -1
self.basis[i] /= 0.95
#Make appropriate changes
for i in range(len(self.basis)):
if self.improvement[i] == 1:
self.basis[i] *= 1.05
continue
if self.improvement[i] == -1:
self.basis[i] *= 0.95
continue
| [
"numpy.exp",
"numpy.linspace",
"numpy.polynomial.legendre.legder",
"numpy.zeros"
] | [((709, 732), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(2000)'], {}), '(0, 2, 2000)\n', (720, 732), True, 'import numpy as np\n'), ((1256, 1294), 'numpy.exp', 'np.exp', (['(-2.0j * n * np.pi * self.x / l)'], {}), '(-2.0j * n * np.pi * self.x / l)\n', (1262, 1294), True, 'import numpy as np\n'), ((1396, 1435), 'numpy.polynomial.legendre.legder', 'np.polynomial.legendre.legder', (['basis', '(2)'], {}), '(basis, 2)\n', (1425, 1435), True, 'import numpy as np\n'), ((1628, 1672), 'numpy.zeros', 'np.zeros', (['[self.basis_size, self.basis_size]'], {}), '([self.basis_size, self.basis_size])\n', (1636, 1672), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
## MIT License
##
## Copyright (c) 2019 <NAME>
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
import struct
import numpy as np
import sys
from rule_handlers import Ruleset
from object_packer import ObjectPacker
_log_last_length=0
def _log(verbose, msg, delete_last=False):
""" Prints log messages according to verbosity
Args:
verbose: integer. message is printed when not zero
msg: message to print, newline char should be included
delete_last: If true, the last message will be deleted from buffer
"""
global _log_last_length
if verbose==0:
return
if delete_last:
sys.stderr.write('\b'*_log_last_length)
sys.stderr.write(msg)
sys.stderr.flush()
_log_last_length=len(msg)
class iSet:
""" A set of rules which do not intersect each other on field f
"""
def __init__(self, rule_table, iset_indices, iset_field, total_rules, validation_indices, verbose=0):
""" Initiate this
Args:
rule_table: a reference to the iSet rule-table (uint32)
iset_indices: a list of the relevant iSet indices within the rule-table
iset_field: the field of the iSet
total_rules: total rules in classifier after iset partitioning
verbose: verbosity of this
"""
F = int(rule_table.shape[1]/2)
self.F = F
self.coverage_value = float(iset_indices.shape[0]) / float(total_rules)
self.index = rule_table[iset_indices].astype(np.float32)
self.index = self.index[:, [iset_field, iset_field+F]]
self.indexed_field = int(iset_field)
self.database = rule_table[iset_indices, :].astype(np.uint32)
self.verbose = verbose
# Fix the case when the last interval has end=start
if self.index[-1,0] == self.index[-1,1]:
self.index[-1,1] = np.nextafter(self.index[-1,0], 2*self.index[-1,0], dtype=np.float32)
# Validate non overlapping ranges in index
test_vector = np.concatenate([self.index[:, 0], [self.index[-1,1]] ])
test_vector = (np.roll(test_vector, -1) - test_vector) <= 0
test_vector = test_vector[:-1]
if np.logical_or.reduce(test_vector):
raise ValueError('iSet has overlapping ranges!')
# At this point we wish to create the validation-matrix per indexed rule
# We need to partition all indexed rules and validation rules to groups
# A group holds all rules with the same priorty and projection of the iSet field
# Relevant field indices
f_start = self.indexed_field
f_end = f_start + F
# Copy rules that participate in the validation phase, as exact representation (uint32)
# Complexity: O(r*log(r)) for r the number of rules
db_indices = np.unique(np.concatenate([validation_indices, iset_indices])).astype(np.uint32)
database = rule_table[db_indices, :].astype(np.uint32)
# At this point 'database' holds both non-overlapping indexed rules
# and validation rules with sampe projections
# We sort the rules by their start value - it is promised that
# rules with the same start value overlap and have the same priority!
# Complexity: O(r*log(r)) for r the number of rules
indices = np.lexsort([database[:, f_start]])
database = database[indices, :]
# Divide the database to gropus of rules with the same priority and projection value
# Each group holds: (priority, start-idx (inclusive), end-idx (exclusive))
# Complexity: O(r) for r the number of rules
groups=[]
last_idx = 0
last_prio = database[0, -1]
last_start = database[0, f_start]
N = database.shape[0]
for rule_idx in range(1, N):
current_start = database[rule_idx, f_start]
# In the the start value of the current rule differs from the
# start value of the last rule, they must belong to different
# groups
if current_start != last_start:
groups.append((last_prio, last_idx, rule_idx))
last_idx = rule_idx
last_start = current_start
last_prio = database[rule_idx, -1]
groups.append((database[-1, -1], last_idx, N))
# Validate that the number of groups equals the number of rules
if(self.index.shape[0] != len(groups)):
raise Exception('Number of groups (%d) differ from number of indexed rules (%d)' %
(len(groups), self.index.shape[0]))
# Validation phase of rules in this
# List of validation matrics, each corresponds to a different rule-group (i.e, rules with same priority)
# Each column is a set of disjunction conditions on the same field
# Each row is a different validation phase
# Note: while the binary (& memory) format of NuevoMatch holds matrices with the same size,
# At this point different matrices may have different sizes
self.validation_matrices = []
self.validation_priorities = []
# For each group (all rules with the same priority)
for prio, start, stop in groups:
# Check that all rules in group have the same start-value, end-value, and priority
start_values = np.unique(database[start:stop, f_start]).shape[0]
end_values = np.unique(database[start:stop, f_end]).shape[0]
prio_values = np.unique(database[start:stop, -1]).shape[0]
if (start_values != 1) or (end_values != 1) or (prio_values != 1):
_log(self.verbose, 'Number of start-values: %d, end-values: %d, prio-values: %d\n' %
(start_values, end_values, prio_values))
raise Exception('Error in group partitioning.')
# Get the set of valid ranges in each field.
# Total complexity: O(r*F) for r number of rules, F number of fields
valid_ranges=[]
for f in range(F):
# Initialize as empty set
values = set()
# Get all possible values
# Complexity: O(r) for r number of rules
possible_values = database[start:stop, [f, f+F]]
# Filter only unique values
for r in range(possible_values.shape[0]):
current = (int(possible_values[r,0]), int(possible_values[r,1]))
values.add(current)
valid_ranges.append(values)
# How many validation phases are required?
phase_num = np.max([len(x) for x in valid_ranges])
# Allocate memory for validation phase of current rule
# Note: The allocation invalidates all fields in validation phase
# by putting [0xffffffff, 0] as range in all fields (invalid range)
validation_matrix = np.zeros([phase_num, 2*F], dtype=np.uint32)
validation_matrix[:, 0:F] = np.iinfo(np.uint32).max
# Build validation phases
for i in range(phase_num):
for f in range(F):
if len(valid_ranges[f]) != 0:
current_range = valid_ranges[f].pop()
validation_matrix[i, f] = current_range[0]
validation_matrix[i, f+F] = current_range[1]
self.validation_matrices.append(validation_matrix)
self.validation_priorities.append(prio)
assert(len(self.validation_matrices) == self.index.shape[0])
# Print messages
_log(self.verbose, 'iSet has %d rules, %d validation phases, and %d columns\n' %
(self.index.shape[0], self.get_validation_phase_length(), 2*F))
def __len__(self):
""" Returns the number of rules in this """
return self.index.shape[0]
def __str__(self):
output = ''
for i in range(self.index.shape[0]):
output += '%d:' % i
for f in range(self.database.shape[1]):
if f==self.indexed_field: output += '*'
output += '%d' % self.database[i,f]
if f==self.indexed_field: output += '*'
output+=' '
else:
output += '\n'
return output
def get_index(self):
""" Return the index of the iSet """
return self.index
def get_field_index(self):
""" Returns the field index this iSet was created by """
return self.indexed_field
def get_validation_phase_length(self):
""" Returns the number of validation phases of this """
return int(np.max([x.shape[0] for x in self.validation_matrices]))
def __bytes__(self):
""" Packs this to byte array object """
output = ObjectPacker()
K = self.get_validation_phase_length()
F = self.F
with output as iset_packer:
# Pack iSet version 1
iset_packer.append(0)
iset_packer.append(1)
# Pack number of validation phases, fields, and field index
iset_packer.append(K)
iset_packer.append(F*2)
iset_packer.append(self.indexed_field)
# Pack Validation phases
# Pack format: elements are stored row-wise: E00 E01 E02 ... E10 E11 E12 ...
# Note: Due to SIMD implementation in interval_set.cpp, in runtime,
# the data is stored in memory in a transposed format (column-wise)
# More deatils in interval_set.cpp
for i in range(len(self.validation_matrices)):
# Get the current matrix
matrix = self.validation_matrices[i]
for k in range(K):
for f in range(2*F):
# The default value invalidates match by
# assigning MAX_UINT for range start and 0 for range end
default_value = int(np.iinfo(np.uint32).max) if (f<F) else 0
# In case the matrix does not have a value at the current index,
# Store the default value
if matrix.shape[0] <= k:
iset_packer.append(int(default_value))
else:
iset_packer.append(int(matrix[k,f]))
# Pack the rule priority
iset_packer.append(self.validation_priorities[i])
# Print messages
_log(self.verbose, 'iSet total pack size is %d bytes\n' % len(output))
return bytes(output)
def coverage(self):
""" Returns the coverage of this """
return self.coverage_value
class RemainderSet:
""" Represents a remainder set """
def __init__(self, rule_table, indices):
""" Initiate this
Args:
rule_table: a reference to the complete rule table
indices: a list of the relevant iSet indices within the rule-table
"""
self._db = Ruleset(rule_table[indices, :])
def get_db(self):
""" Returns the rules of this """
return self._db.get()
def __bytes__(self):
""" Packs this to byte-array """
return bytes(self._db)
class CompatibleIntervalSet:
""" Holds Compatible Interval Subsets of a rule table
"""
def __init__(self, rule_table):
""" Initialize new multiset.
Args:
rule_table: Matrix of Nx(2*F+1), where N is the number of rules and F is the number of fields.
Rows' format: [field_0_start, field_1_start, ..., filed_0_end, field_1_end, ..., priority]
Throws:
ValueError in case rule_table is not in valid format
"""
if type(rule_table) is not np.ndarray:
raise ValueError('rule_table argument must by Numpy array')
self.rule_table = rule_table
self.N = self.rule_table.shape[0]
self.F = int(rule_table.shape[1]/2)
self.total_rules_for_coverage=self.N
self.subsets = []
self.subset_field = []
self.remainder_indx = None
self.isets = []
# Store extra validation phases for rules based on their priority
# Item i is a list of all expanded rule indices for iSet i.
self.extra_validation_phases=[]
# Used for iterator
self.iterator = 0
def _find_compatible_subset_in_field(self, field, available_rules):
""" Private method. Extract possible compatible subset from the current rule-set
Args:
field: The number of field (0 <= f < F) to extract from
available_rules: An array of indices, the available rules within the rule_table
Returns:
A list of indices (from the rule-set) of the compatible subset
"""
# Extract the field's intervals from rule table
intervals = self.rule_table[available_rules, :].astype(np.float32)
intervals = intervals[:, [field, field+self.F]]
lengths = intervals[:, 1] - intervals[:, 0]
# Remove negarive lengths (this is possible to split fields with modulo on thier 32bit values
negative = (lengths < 0)
intervals = intervals[~negative]
lengths = lengths[~negative]
N = intervals.shape[0]
# In case there are no valid intervals, return an empty list
if N == 0:
return np.empty(shape=[0,1])
# Apply the greedy algorithm for finding the largest compatible subset
# Sort based on interval length (large to small) and finish value (small to large)
# This kind of sorting perfers short intervals on long ones
sorted_idx = np.lexsort([ -lengths, intervals[:, 1] ])
sorted_intervals = intervals[sorted_idx]
subset=[]
i = 0
while i<N:
# Select the first interval
current_interval = i
subset.append(current_interval)
i+=1
# Ignore intervals that are non compatible with current_interval
while (i < N) and (sorted_intervals[i, 0] <= sorted_intervals[current_interval, 1]):
i+=1
# At this point, subset consists of an optimal set of intervals
# Return a vector with the subset's indices
return sorted_idx[subset]
def process(self, max_subset_count, min_items_per_subset, verbose=0):
""" Extract optimal compatible sets from the rule-table
Args:
max_subset_count: The maximum number of allowed subsets
min_items_per_subset: The minimum allowed number of items in a subset
verbose: Verbosity
"""
# Cannot process empty ruleset
if self.N == 0:
return
available_rules = np.arange(self.N)
N = available_rules.shape[0]
for i in range(max_subset_count):
# Stop in case thre are no available rules left
if available_rules.shape[0]==0:
break
# Extract the subset with maximum intervals
max_subset = np.empty(shape=[0])
max_field = 0
for f in range(self.F):
subset = self._find_compatible_subset_in_field(f, available_rules)
if subset.shape[0] > max_subset.shape[0]:
max_subset = subset
max_field = f
# In case the current subset is smaller than minimum, stop
if max_subset.shape[0] < min_items_per_subset:
break
# Update the subsets of this
_log(verbose, 'Generated subset %d with %d rules (field index: %d) \n' % (i, max_subset.shape[0], max_field))
self.subsets.append(available_rules[max_subset])
self.subset_field.append(max_field)
self.extra_validation_phases.append([])
# Remove the iSet rules from the remaining rule-set
predicat = np.full(N, True)
predicat[max_subset] = False
# Update available rules
available_rules = available_rules[predicat]
N = available_rules.shape[0]
# Get the priorities of the rules in current iSet
# Note: as priorities are unique per compact rules, two rules with the same
# priority indicates that they both originate from the same compact rule
# Complexity: O(r*log(r)) for r the number of rules
iset_priorities = np.unique(self.rule_table[self.subsets[-1], -1])
# Get the priorities of the rules in the remainder set.
# Complexity: O(r*log(r)) for r the number of rules
remainder_priorities = np.unique(self.rule_table[available_rules, -1])
# Compute the intersection of priorities
# Complexity: O(r) for r the number of rules
intersect_priorities = set(np.intersect1d(iset_priorities, remainder_priorities))
# Get the range in the field by which the last iSet was partitioned,
# for all rules with priorities in the intersection.
# Complexity: O(r) for r the number of rules
value_prio_tuple_set=set()
for j in self.subsets[-1]:
# Set membership: O(1)
if self.rule_table[j, -1] in intersect_priorities:
range_start, range_end, value = self.rule_table[j, [max_field, max_field+self.F, -1]]
value_prio_tuple_set.add((range_start, range_end, value))
# Create new predicat to remove rules
predicat = np.full(N, True)
counter = 0
# Go over all rules in the remainder set, in case their range&prio exists in
# the value_prio_tuple_set, remove the rule
# Complexity: O(r) for r the number of rules
for j in range(N):
idx=available_rules[j]
range_start, range_end, value = self.rule_table[idx, [max_field, max_field+self.F, -1]]
if (range_start, range_end, value) in value_prio_tuple_set:
predicat[j] = False
counter += 1
# Update extra validation phases
self.extra_validation_phases[i].append(available_rules[j])
# Update available rules
available_rules = available_rules[predicat]
N = available_rules.shape[0]
# Log
_log(verbose, 'Removed %d expanded-rules from remainder set \n' % counter)
# Do not continue to next iteration if remainder is less than minimum
if available_rules.shape[0] < min_items_per_subset:
break
self.remainder_indx = available_rules
_log(verbose, 'Remainder subset with %d rules \n' % available_rules.shape[0])
indices = np.arange(self.N)
_log(verbose, 'Extra validation phase covers %d rules \n' % sum([len(x) for x in self.extra_validation_phases]))
# Update the total rules of this (duplicates might have changed this)
self.total_rules_for_coverage=sum([x.shape[0] for x in self.subsets]) + self.remainder_indx.shape[0]
_log(verbose, 'Total size after removing expanded rules: %d\n' % self.total_rules_for_coverage)
# Build all iSet objects of this
self.isets = [iSet(self.rule_table, self.subsets[key], self.subset_field[key],
self.total_rules_for_coverage, self.extra_validation_phases[key], verbose) for key in range(len(self.subsets))]
def __eq__(self, rhs):
""" Returns true whether two CompatibleIntervalSet equal """
if type(self) != type(rhs): return False
if (len(self.subsets) != len(rhs.subsets)) or (len(self.subset_field) != len(rhs.subset_field)):
return False
for lhs_subset, rhs_subset in zip(self.subset_field, rhs.subset_field):
if lhs_subset != rhs_subset:
return False
for lhs_subset, rhs_subset in zip(self.subsets, rhs.subsets):
if not np.all(lhs_subset == rhs_subset):
return False
if not np.all(self.remainder_indx == rhs.remainder_indx):
return False
if not np.all(self.rule_table == rhs.rule_table):
return False
return True
def __len__(self):
""" Returns the number of iSets """
return len(self.subsets)
def __getitem__(self, key):
""" Get a specific iSet """
return self.isets[key]
def __iter__(self):
""" Get iterator for this """
return self
def __next__(self):
""" Iterator next """
if self.iterator == len(self):
raise StopIteration
self.iterator += 1
return self[self.iterator-1]
def remainder(self):
""" Returns the remainder set of this """
return RemainderSet(self.rule_table, self.remainder_indx)
def remainder_indices(self):
""" Returns the remainder subset indices of the original rules """
return self.remainder_indx
def rule_query(self, rule_priority):
""" Returns the iSet index that contains the rule
Args:
rule_idx: The rule index
Returns: list of tuples (X,Y,S,R) where:
X - indicates the subset index (-1 for the remainder set)
Y - the position of rule within that subset
S - the field index by which the subset was is indexed
R - the range of the filed by which the subset was is indexed
"""
output = []
# Check all iSets
for x,s in enumerate(self.subsets):
prio_array = self.rule_table[s, -1]
for y,v in enumerate(prio_array):
if v == rule_priority:
field = self.subset_field[x]
field_start, field_end = self.rule_table[s, :][y, [field, field+self.F]]
output.append((x, y, field, (int(field_start), int(field_end))))
# Check the remainder subset
prio_array = self.rule_table[self.remainder_indx, -1]
for y,v in enumerate(prio_array):
if v == rule_priority:
output.append((-1, y, -1, -1))
return output
def coverage(self):
""" Returns the coverage of this """
return 1 - len(self.remainder_indx) / self.total_rules_for_coverage
| [
"numpy.intersect1d",
"numpy.roll",
"numpy.unique",
"numpy.arange",
"rule_handlers.Ruleset",
"sys.stderr.flush",
"numpy.iinfo",
"object_packer.ObjectPacker",
"numpy.max",
"sys.stderr.write",
"numpy.lexsort",
"numpy.zeros",
"numpy.empty",
"numpy.nextafter",
"numpy.concatenate",
"numpy.fu... | [((1677, 1698), 'sys.stderr.write', 'sys.stderr.write', (['msg'], {}), '(msg)\n', (1693, 1698), False, 'import sys\n'), ((1700, 1718), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (1716, 1718), False, 'import sys\n'), ((1636, 1679), 'sys.stderr.write', 'sys.stderr.write', (["('\\x08' * _log_last_length)"], {}), "('\\x08' * _log_last_length)\n", (1652, 1679), False, 'import sys\n'), ((2853, 2908), 'numpy.concatenate', 'np.concatenate', (['[self.index[:, 0], [self.index[-1, 1]]]'], {}), '([self.index[:, 0], [self.index[-1, 1]]])\n', (2867, 2908), True, 'import numpy as np\n'), ((3009, 3042), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['test_vector'], {}), '(test_vector)\n', (3029, 3042), True, 'import numpy as np\n'), ((4029, 4063), 'numpy.lexsort', 'np.lexsort', (['[database[:, f_start]]'], {}), '([database[:, f_start]])\n', (4039, 4063), True, 'import numpy as np\n'), ((8647, 8661), 'object_packer.ObjectPacker', 'ObjectPacker', ([], {}), '()\n', (8659, 8661), False, 'from object_packer import ObjectPacker\n'), ((10414, 10445), 'rule_handlers.Ruleset', 'Ruleset', (['rule_table[indices, :]'], {}), '(rule_table[indices, :])\n', (10421, 10445), False, 'from rule_handlers import Ruleset\n'), ((12762, 12801), 'numpy.lexsort', 'np.lexsort', (['[-lengths, intervals[:, 1]]'], {}), '([-lengths, intervals[:, 1]])\n', (12772, 12801), True, 'import numpy as np\n'), ((13668, 13685), 'numpy.arange', 'np.arange', (['self.N'], {}), '(self.N)\n', (13677, 13685), True, 'import numpy as np\n'), ((17025, 17042), 'numpy.arange', 'np.arange', (['self.N'], {}), '(self.N)\n', (17034, 17042), True, 'import numpy as np\n'), ((2722, 2794), 'numpy.nextafter', 'np.nextafter', (['self.index[-1, 0]', '(2 * self.index[-1, 0])'], {'dtype': 'np.float32'}), '(self.index[-1, 0], 2 * self.index[-1, 0], dtype=np.float32)\n', (2734, 2794), True, 'import numpy as np\n'), ((7086, 7131), 'numpy.zeros', 'np.zeros', (['[phase_num, 2 * F]'], {'dtype': 'np.uint32'}), '([phase_num, 2 * F], dtype=np.uint32)\n', (7094, 7131), True, 'import numpy as np\n'), ((8515, 8569), 'numpy.max', 'np.max', (['[x.shape[0] for x in self.validation_matrices]'], {}), '([x.shape[0] for x in self.validation_matrices])\n', (8521, 8569), True, 'import numpy as np\n'), ((12502, 12524), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 1]'}), '(shape=[0, 1])\n', (12510, 12524), True, 'import numpy as np\n'), ((13913, 13932), 'numpy.empty', 'np.empty', ([], {'shape': '[0]'}), '(shape=[0])\n', (13921, 13932), True, 'import numpy as np\n'), ((14610, 14626), 'numpy.full', 'np.full', (['N', '(True)'], {}), '(N, True)\n', (14617, 14626), True, 'import numpy as np\n'), ((15052, 15100), 'numpy.unique', 'np.unique', (['self.rule_table[self.subsets[-1], -1]'], {}), '(self.rule_table[self.subsets[-1], -1])\n', (15061, 15100), True, 'import numpy as np\n'), ((15242, 15289), 'numpy.unique', 'np.unique', (['self.rule_table[available_rules, -1]'], {}), '(self.rule_table[available_rules, -1])\n', (15251, 15289), True, 'import numpy as np\n'), ((15997, 16013), 'numpy.full', 'np.full', (['N', '(True)'], {}), '(N, True)\n', (16004, 16013), True, 'import numpy as np\n'), ((18175, 18224), 'numpy.all', 'np.all', (['(self.remainder_indx == rhs.remainder_indx)'], {}), '(self.remainder_indx == rhs.remainder_indx)\n', (18181, 18224), True, 'import numpy as np\n'), ((18252, 18293), 'numpy.all', 'np.all', (['(self.rule_table == rhs.rule_table)'], {}), '(self.rule_table == rhs.rule_table)\n', (18258, 18293), True, 'import numpy as np\n'), ((2926, 2950), 'numpy.roll', 'np.roll', (['test_vector', '(-1)'], {}), '(test_vector, -1)\n', (2933, 2950), True, 'import numpy as np\n'), ((7161, 7180), 'numpy.iinfo', 'np.iinfo', (['np.uint32'], {}), '(np.uint32)\n', (7169, 7180), True, 'import numpy as np\n'), ((15413, 15466), 'numpy.intersect1d', 'np.intersect1d', (['iset_priorities', 'remainder_priorities'], {}), '(iset_priorities, remainder_priorities)\n', (15427, 15466), True, 'import numpy as np\n'), ((18114, 18146), 'numpy.all', 'np.all', (['(lhs_subset == rhs_subset)'], {}), '(lhs_subset == rhs_subset)\n', (18120, 18146), True, 'import numpy as np\n'), ((3580, 3630), 'numpy.concatenate', 'np.concatenate', (['[validation_indices, iset_indices]'], {}), '([validation_indices, iset_indices])\n', (3594, 3630), True, 'import numpy as np\n'), ((5787, 5827), 'numpy.unique', 'np.unique', (['database[start:stop, f_start]'], {}), '(database[start:stop, f_start])\n', (5796, 5827), True, 'import numpy as np\n'), ((5853, 5891), 'numpy.unique', 'np.unique', (['database[start:stop, f_end]'], {}), '(database[start:stop, f_end])\n', (5862, 5891), True, 'import numpy as np\n'), ((5918, 5953), 'numpy.unique', 'np.unique', (['database[start:stop, -1]'], {}), '(database[start:stop, -1])\n', (5927, 5953), True, 'import numpy as np\n'), ((9573, 9592), 'numpy.iinfo', 'np.iinfo', (['np.uint32'], {}), '(np.uint32)\n', (9581, 9592), True, 'import numpy as np\n')] |
"""
DeCliff filter contributed by Minecraft Forums user "DrRomz"
Originally posted here:
http://www.minecraftforum.net/topic/13807-mcedit-minecraft-world-editor-compatible-with-mc-beta-18/page__st__3940__p__7648793#entry7648793
"""
from numpy import zeros, array
import itertools
from pymclevel import alphaMaterials
am = alphaMaterials
# Consider below materials when determining terrain height
blocks = [
am.Stone,
am.Grass,
am.Dirt,
am.Bedrock,
am.Sand,
am.Sandstone,
am.Clay,
am.Gravel,
am.GoldOre,
am.IronOre,
am.CoalOre,
am.LapisLazuliOre,
am.DiamondOre,
am.RedstoneOre,
am.RedstoneOreGlowing,
am.Netherrack,
am.SoulSand,
am.Glowstone
]
terrainBlocktypes = [b.ID for b in blocks]
terrainBlockmask = zeros((256,), dtype='bool')
# Truth table used to calculate terrain height
# trees, leaves, etc. sit on top of terrain
terrainBlockmask[terrainBlocktypes] = True
inputs = (
# Option to limit change to raise_cliff_floor / lower_cliff_top
# Default is to adjust both and meet somewhere in the middle
("Raise/Lower", ("Both", "Lower Only", "Raise Only")),
)
#
# Calculate the maximum adjustment that can be made from
# cliff_pos in direction dir (-1/1) keeping terain at most
# maxstep blocks away from previous column
def maxadj(heightmap, slice_no, cliff_pos, dir, pushup, maxstep, slice_width):
ret = 0
if dir < 0:
if cliff_pos < 2:
return 0
end = 0
else:
if cliff_pos > slice_width - 2:
return 0
end = slice_width - 1
for cur_pos in range(cliff_pos, end, dir):
if pushup:
ret = ret + \
max([0, maxstep - dir * heightmap[slice_no, cur_pos] +
dir * heightmap[slice_no, cur_pos + dir]])
else:
ret = ret + \
min([0, -maxstep + dir * heightmap[slice_no, cur_pos] -
dir * heightmap[slice_no, cur_pos + dir]])
return ret
#
# Raise/lower column at cliff face by adj and decrement change as we move away
# from the face. Each level will be at most maxstep blocks from those beside it.
#
# This function dosn't actually change anything, but just sets array 'new'
# with the desired height.
def adjheight(orig, new, slice_no, cliff_pos, dir, adj, can_adj, maxstep, slice_width):
cur_adj = adj
prev = 0
done_adj = 0
if dir < 0:
end = 1
else:
end = slice_width - 1
if adj == 0 or can_adj == 0:
for cur_pos in range(cliff_pos, end, dir):
new[slice_no, cur_pos] = orig[slice_no, cur_pos]
else:
for cur_pos in range(cliff_pos, end, dir):
if adj > 0:
done_adj = done_adj + \
max([0, maxstep - orig[slice_no, cur_pos] +
orig[slice_no, cur_pos + dir]])
if orig[slice_no, cur_pos] - \
orig[slice_no, cur_pos + dir] > 0:
cur_adj = max([0, cur_adj - orig[slice_no, cur_pos] +
orig[slice_no, cur_pos + dir]])
prev = adj - cur_adj
else:
done_adj = done_adj + \
min([0, -maxstep +
orig[slice_no, cur_pos] -
orig[slice_no, cur_pos + dir]])
if orig[slice_no, cur_pos] - \
orig[slice_no, cur_pos + dir] > 0:
cur_adj = min([0, cur_adj + orig[slice_no, cur_pos] - orig[slice_no, cur_pos + dir]])
prev = adj - cur_adj
new[slice_no, cur_pos] = max([0, orig[slice_no, cur_pos] + cur_adj])
if cur_adj != 0 and \
abs(prev) < abs(int(adj * done_adj / can_adj)):
cur_adj += prev - int(adj * done_adj / can_adj)
prev = int(adj * done_adj / can_adj)
new[slice_no, end] = orig[slice_no, end]
def perform(level, box, options):
if box.volume > 16000000:
raise ValueError("Volume too big for this filter method!")
RLOption = options["Raise/Lower"]
schema = level.extractSchematic(box)
schema.removeEntitiesInBox(schema.bounds)
schema.removeTileEntitiesInBox(schema.bounds)
terrainBlocks = terrainBlockmask[schema.Blocks]
coords = terrainBlocks.nonzero()
# Swap values around so long edge of selected rectangle is first
# - the long edge is assumed to run parallel to the cliff face
# and we want to process slices perpendicular to the face
# heightmap will have x,z (or z,x) index with highest ground level
if schema.Width > schema.Length:
heightmap = zeros((schema.Width, schema.Length), dtype='float32')
heightmap[coords[0], coords[1]] = coords[2]
newHeightmap = zeros((schema.Width, schema.Length), dtype='uint16')
slice_count = schema.Width
slice_width = schema.Length
else:
heightmap = zeros((schema.Length, schema.Width), dtype='float32')
heightmap[coords[1], coords[0]] = coords[2]
newHeightmap = zeros((schema.Length, schema.Width), dtype='uint16')
slice_count = schema.Length
slice_width = schema.Width
nonTerrainBlocks = ~terrainBlocks
nonTerrainBlocks &= schema.Blocks != 0
for slice_no in range(0, slice_count):
cliff_height = 0
# determine pos and height of cliff in this slice
for cur_pos in range(0, slice_width - 1):
if abs(heightmap[slice_no, cur_pos] -
heightmap[slice_no, cur_pos + 1]) > abs(cliff_height):
cliff_height = \
heightmap[slice_no, cur_pos] - \
heightmap[slice_no, cur_pos + 1]
cliff_pos = cur_pos
if abs(cliff_height) < 2:
# nothing to adjust - just copy heightmap to newHightmap
adjheight(heightmap, newHeightmap, slice_no, 0, 1, 0, 1, 1, slice_width)
continue
# Try to keep adjusted columns within 1 column of their neighbours
# but ramp up to 4 blocks up/down on each column when needed
for max_step in range(1, 4):
can_left = maxadj(heightmap, slice_no, cliff_pos, -1, cliff_height < 0, max_step, slice_width)
can_right = maxadj(heightmap, slice_no, cliff_pos + 1, 1, cliff_height > 0, max_step, slice_width)
if can_right < 0 and RLOption == "Raise Only":
can_right = 0
if can_right > 0 and RLOption == "Lower Only":
can_right = 0
if can_left < 0 and RLOption == "Raise Only":
can_left = 0
if can_left > 0 and RLOption == "Lower Only":
can_left = 0
if 0 > cliff_height > can_right - can_left:
if abs(can_left) > abs(can_right):
adj_left = -1 * (cliff_height - max([int(cliff_height / 2), can_right]))
adj_right = cliff_height + adj_left
else:
adj_right = cliff_height - max([int(cliff_height / 2), -can_left])
adj_left = -1 * (cliff_height - adj_right + 1)
else:
if 0 < cliff_height < can_right - can_left:
if abs(can_left) > abs(can_right):
adj_left = -1 * (cliff_height - min([int(cliff_height / 2), can_right]))
adj_right = cliff_height + adj_left
else:
adj_right = cliff_height - min([int(cliff_height / 2), -can_left]) - 1
adj_left = -1 * (cliff_height - adj_right)
else:
adj_right = 0
adj_left = 0
continue
break
adjheight(heightmap, newHeightmap, slice_no, cliff_pos, -1, adj_left, can_left, max_step, slice_width)
adjheight(heightmap, newHeightmap, slice_no, cliff_pos + 1, 1, adj_right, can_right, max_step, slice_width)
# OK, newHeightMap has new height for each column
# so it's just a matter of moving everything up/down
for x, z in itertools.product(xrange(1, schema.Width - 1), xrange(1, schema.Length - 1)):
if schema.Width > schema.Length:
oh = heightmap[x, z]
nh = newHeightmap[x, z]
else:
oh = heightmap[z, x]
nh = newHeightmap[z, x]
delta = nh - oh
column = array(schema.Blocks[x, z])
# Keep bottom 5 blocks, so we don't loose bedrock
keep = min([5, nh])
Waterdepth = 0
# Detect Water on top
if column[oh + 1:oh + 2] == am.Water.ID or \
column[oh + 1:oh + 2] == am.Ice.ID:
for cur_pos in range(int(oh) + 1, schema.Height):
if column[cur_pos:cur_pos + 1] != am.Water.ID and \
column[cur_pos:cur_pos + 1] != am.Ice.ID:
break
Waterdepth += 1
if delta == 0:
column[oh:] = schema.Blocks[x, z, oh:]
if delta < 0:
# Moving column down
column[keep:delta] = schema.Blocks[x, z, keep - delta:]
column[delta:] = am.Air.ID
if Waterdepth > 0:
# Avoid steping small lakes, etc on cliff top
# replace with dirt 'n grass
column[nh:nh + 1] = am.Grass.ID
column[nh + 1:nh + 1 + delta] = am.Air.ID
if delta > 0:
# Moving column up
column[keep + delta:] = schema.Blocks[x, z, keep:-delta]
# Put stone in gap at the bottom
column[keep:keep + delta] = am.Stone.ID
if Waterdepth > 0:
if Waterdepth > delta:
# Retain Ice
if column[nh + Waterdepth:nh + Waterdepth + 1] == am.Ice.ID:
column[nh + Waterdepth - delta:nh + 1 + Waterdepth - delta] = \
am.Ice.ID
column[nh + 1 + Waterdepth - delta:nh + 1 + Waterdepth] = am.Air.ID
else:
if Waterdepth < delta - 2:
column[nh:nh + 1] = am.Grass.ID
column[nh + 1:nh + 1 + Waterdepth] = am.Air.ID
else:
# Beach at the edge
column[nh - 4:nh - 2] = am.Sandstone.ID
column[nh - 2:nh + 1] = am.Sand.ID
column[nh + 1:nh + 1 + Waterdepth] = am.Air.ID
schema.Blocks[x, z] = column
level.copyBlocksFrom(schema, schema.bounds, box.origin)
| [
"numpy.array",
"numpy.zeros"
] | [((780, 807), 'numpy.zeros', 'zeros', (['(256,)'], {'dtype': '"""bool"""'}), "((256,), dtype='bool')\n", (785, 807), False, 'from numpy import zeros, array\n'), ((4745, 4798), 'numpy.zeros', 'zeros', (['(schema.Width, schema.Length)'], {'dtype': '"""float32"""'}), "((schema.Width, schema.Length), dtype='float32')\n", (4750, 4798), False, 'from numpy import zeros, array\n'), ((4874, 4926), 'numpy.zeros', 'zeros', (['(schema.Width, schema.Length)'], {'dtype': '"""uint16"""'}), "((schema.Width, schema.Length), dtype='uint16')\n", (4879, 4926), False, 'from numpy import zeros, array\n'), ((5028, 5081), 'numpy.zeros', 'zeros', (['(schema.Length, schema.Width)'], {'dtype': '"""float32"""'}), "((schema.Length, schema.Width), dtype='float32')\n", (5033, 5081), False, 'from numpy import zeros, array\n'), ((5157, 5209), 'numpy.zeros', 'zeros', (['(schema.Length, schema.Width)'], {'dtype': '"""uint16"""'}), "((schema.Length, schema.Width), dtype='uint16')\n", (5162, 5209), False, 'from numpy import zeros, array\n'), ((8523, 8549), 'numpy.array', 'array', (['schema.Blocks[x, z]'], {}), '(schema.Blocks[x, z])\n', (8528, 8549), False, 'from numpy import zeros, array\n')] |
"""
Dihedral angle effect
=====================
Effect of dihedral on the lift coefficient slope of rectangular wings.
References
----------
.. [1] <NAME>., *Low-Speed Aerodynamics*, 2nd ed, Cambridge University
Press, 2001: figure 12.21
"""
import time
import matplotlib.pyplot as plt
import numpy as np
import ezaero.vlm.steady as vlm
start = time.time()
# dihedral angles grid
deltas = np.array([-45, -30, -15, 0, 15, 30]) * np.pi / 180
# define mesh parameters and flight conditions
mesh = vlm.MeshParameters(m=8, n=30)
# slope for each dihedral calculated using two flight conditions
flcond_0 = vlm.FlightConditions(ui=100.0, aoa=0.0, rho=1.0)
flcond_1 = vlm.FlightConditions(ui=100.0, aoa=np.pi / 180, rho=1.0)
cla_list = [] # container for the lift coefficient slope
for delta in deltas:
# The figure in the book uses an aspect ratio of 4. It does not
# correspond to the planform, but the "real" wingspan, hence we project
# the wingspan with the dihedral angle
bp = 4 * np.cos(delta)
# define rectangular wing (same cr and ct), with no sweep (theta).
wing = vlm.WingParameters(
root_chord=1.0,
tip_chord=1.0,
planform_wingspan=bp,
sweep_angle=0,
dihedral_angle=delta,
)
res_0 = vlm.Simulation(wing=wing, mesh=mesh, flight_conditions=flcond_0).run()
res_1 = vlm.Simulation(wing=wing, mesh=mesh, flight_conditions=flcond_1).run()
d_cl = res_1.cl_wing - res_0.cl_wing
d_alpha = flcond_1.aoa - flcond_0.aoa
slope = d_cl / d_alpha * np.cos(delta) # project load
cla_list.append(slope)
end = time.time()
elapsed = end - start
print("Elapsed time: {} s".format(elapsed))
fig = plt.figure()
plt.plot(deltas * 180 / np.pi, cla_list, "o-")
plt.xlabel(r"$\delta$[deg]")
plt.ylabel(r"CL$_\alpha$")
plt.ylim(0, 4)
plt.grid()
plt.xlim(deltas.min() * 180 / np.pi, deltas.max() * 180 / np.pi)
plt.show()
| [
"matplotlib.pyplot.grid",
"ezaero.vlm.steady.WingParameters",
"matplotlib.pyplot.ylabel",
"ezaero.vlm.steady.FlightConditions",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"ezaero.vlm.steady.MeshParameters",
"matplotlib.pyplot.figure",
"ezaero.vlm.steady.Simulation",
"nu... | [((353, 364), 'time.time', 'time.time', ([], {}), '()\n', (362, 364), False, 'import time\n'), ((504, 533), 'ezaero.vlm.steady.MeshParameters', 'vlm.MeshParameters', ([], {'m': '(8)', 'n': '(30)'}), '(m=8, n=30)\n', (522, 533), True, 'import ezaero.vlm.steady as vlm\n'), ((610, 658), 'ezaero.vlm.steady.FlightConditions', 'vlm.FlightConditions', ([], {'ui': '(100.0)', 'aoa': '(0.0)', 'rho': '(1.0)'}), '(ui=100.0, aoa=0.0, rho=1.0)\n', (630, 658), True, 'import ezaero.vlm.steady as vlm\n'), ((670, 726), 'ezaero.vlm.steady.FlightConditions', 'vlm.FlightConditions', ([], {'ui': '(100.0)', 'aoa': '(np.pi / 180)', 'rho': '(1.0)'}), '(ui=100.0, aoa=np.pi / 180, rho=1.0)\n', (690, 726), True, 'import ezaero.vlm.steady as vlm\n'), ((1600, 1611), 'time.time', 'time.time', ([], {}), '()\n', (1609, 1611), False, 'import time\n'), ((1686, 1698), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1696, 1698), True, 'import matplotlib.pyplot as plt\n'), ((1699, 1745), 'matplotlib.pyplot.plot', 'plt.plot', (['(deltas * 180 / np.pi)', 'cla_list', '"""o-"""'], {}), "(deltas * 180 / np.pi, cla_list, 'o-')\n", (1707, 1745), True, 'import matplotlib.pyplot as plt\n'), ((1746, 1774), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\delta$[deg]"""'], {}), "('$\\\\delta$[deg]')\n", (1756, 1774), True, 'import matplotlib.pyplot as plt\n'), ((1775, 1801), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CL$_\\\\alpha$"""'], {}), "('CL$_\\\\alpha$')\n", (1785, 1801), True, 'import matplotlib.pyplot as plt\n'), ((1802, 1816), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(4)'], {}), '(0, 4)\n', (1810, 1816), True, 'import matplotlib.pyplot as plt\n'), ((1817, 1827), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1825, 1827), True, 'import matplotlib.pyplot as plt\n'), ((1893, 1903), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1901, 1903), True, 'import matplotlib.pyplot as plt\n'), ((1102, 1214), 'ezaero.vlm.steady.WingParameters', 'vlm.WingParameters', ([], {'root_chord': '(1.0)', 'tip_chord': '(1.0)', 'planform_wingspan': 'bp', 'sweep_angle': '(0)', 'dihedral_angle': 'delta'}), '(root_chord=1.0, tip_chord=1.0, planform_wingspan=bp,\n sweep_angle=0, dihedral_angle=delta)\n', (1120, 1214), True, 'import ezaero.vlm.steady as vlm\n'), ((398, 434), 'numpy.array', 'np.array', (['[-45, -30, -15, 0, 15, 30]'], {}), '([-45, -30, -15, 0, 15, 30])\n', (406, 434), True, 'import numpy as np\n'), ((1006, 1019), 'numpy.cos', 'np.cos', (['delta'], {}), '(delta)\n', (1012, 1019), True, 'import numpy as np\n'), ((1536, 1549), 'numpy.cos', 'np.cos', (['delta'], {}), '(delta)\n', (1542, 1549), True, 'import numpy as np\n'), ((1270, 1334), 'ezaero.vlm.steady.Simulation', 'vlm.Simulation', ([], {'wing': 'wing', 'mesh': 'mesh', 'flight_conditions': 'flcond_0'}), '(wing=wing, mesh=mesh, flight_conditions=flcond_0)\n', (1284, 1334), True, 'import ezaero.vlm.steady as vlm\n'), ((1353, 1417), 'ezaero.vlm.steady.Simulation', 'vlm.Simulation', ([], {'wing': 'wing', 'mesh': 'mesh', 'flight_conditions': 'flcond_1'}), '(wing=wing, mesh=mesh, flight_conditions=flcond_1)\n', (1367, 1417), True, 'import ezaero.vlm.steady as vlm\n')] |
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import thinplate as tps
from numpy.testing import assert_allclose
def test_pytorch_grid():
c_dst = np.array([
[0., 0],
[1., 0],
[1, 1],
[0, 1],
], dtype=np.float32)
c_src = np.array([
[10., 10],
[20., 10],
[20, 20],
[10, 20],
], dtype=np.float32) / 40.
theta = tps.tps_theta_from_points(c_src, c_dst)
theta_r = tps.tps_theta_from_points(c_src, c_dst, reduced=True)
np_grid = tps.tps_grid(theta, c_dst, (20,20))
np_grid_r = tps.tps_grid(theta_r, c_dst, (20,20))
pth_theta = torch.tensor(theta).unsqueeze(0)
pth_grid = tps.torch.tps_grid(pth_theta, torch.tensor(c_dst), (1, 1, 20, 20)).squeeze().numpy()
pth_grid = (pth_grid + 1) / 2 # convert [-1,1] range to [0,1]
pth_theta_r = torch.tensor(theta_r).unsqueeze(0)
pth_grid_r = tps.torch.tps_grid(pth_theta_r, torch.tensor(c_dst), (1, 1, 20, 20)).squeeze().numpy()
pth_grid_r = (pth_grid_r + 1) / 2 # convert [-1,1] range to [0,1]
assert_allclose(np_grid, pth_grid)
assert_allclose(np_grid_r, pth_grid_r)
assert_allclose(np_grid_r, np_grid) | [
"thinplate.tps_grid",
"numpy.testing.assert_allclose",
"numpy.array",
"torch.tensor",
"thinplate.tps_theta_from_points"
] | [((199, 263), 'numpy.array', 'np.array', (['[[0.0, 0], [1.0, 0], [1, 1], [0, 1]]'], {'dtype': 'np.float32'}), '([[0.0, 0], [1.0, 0], [1, 1], [0, 1]], dtype=np.float32)\n', (207, 263), True, 'import numpy as np\n'), ((456, 495), 'thinplate.tps_theta_from_points', 'tps.tps_theta_from_points', (['c_src', 'c_dst'], {}), '(c_src, c_dst)\n', (481, 495), True, 'import thinplate as tps\n'), ((510, 563), 'thinplate.tps_theta_from_points', 'tps.tps_theta_from_points', (['c_src', 'c_dst'], {'reduced': '(True)'}), '(c_src, c_dst, reduced=True)\n', (535, 563), True, 'import thinplate as tps\n'), ((579, 615), 'thinplate.tps_grid', 'tps.tps_grid', (['theta', 'c_dst', '(20, 20)'], {}), '(theta, c_dst, (20, 20))\n', (591, 615), True, 'import thinplate as tps\n'), ((631, 669), 'thinplate.tps_grid', 'tps.tps_grid', (['theta_r', 'c_dst', '(20, 20)'], {}), '(theta_r, c_dst, (20, 20))\n', (643, 669), True, 'import thinplate as tps\n'), ((1122, 1156), 'numpy.testing.assert_allclose', 'assert_allclose', (['np_grid', 'pth_grid'], {}), '(np_grid, pth_grid)\n', (1137, 1156), False, 'from numpy.testing import assert_allclose\n'), ((1161, 1199), 'numpy.testing.assert_allclose', 'assert_allclose', (['np_grid_r', 'pth_grid_r'], {}), '(np_grid_r, pth_grid_r)\n', (1176, 1199), False, 'from numpy.testing import assert_allclose\n'), ((1204, 1239), 'numpy.testing.assert_allclose', 'assert_allclose', (['np_grid_r', 'np_grid'], {}), '(np_grid_r, np_grid)\n', (1219, 1239), False, 'from numpy.testing import assert_allclose\n'), ((321, 393), 'numpy.array', 'np.array', (['[[10.0, 10], [20.0, 10], [20, 20], [10, 20]]'], {'dtype': 'np.float32'}), '([[10.0, 10], [20.0, 10], [20, 20], [10, 20]], dtype=np.float32)\n', (329, 393), True, 'import numpy as np\n'), ((690, 709), 'torch.tensor', 'torch.tensor', (['theta'], {}), '(theta)\n', (702, 709), False, 'import torch\n'), ((908, 929), 'torch.tensor', 'torch.tensor', (['theta_r'], {}), '(theta_r)\n', (920, 929), False, 'import torch\n'), ((768, 787), 'torch.tensor', 'torch.tensor', (['c_dst'], {}), '(c_dst)\n', (780, 787), False, 'import torch\n'), ((992, 1011), 'torch.tensor', 'torch.tensor', (['c_dst'], {}), '(c_dst)\n', (1004, 1011), False, 'import torch\n')] |
import numpy as np
import pybullet as p
import pybullet_data as pd
import pybullet_utils.bullet_client as bc
from gym import spaces
try:
from .. import Environment
from .robots import get_robot
from .tasks import get_task
except ImportError:
from karolos.environments import Environment
from karolos.environments.robot_task_environments.robots import get_robot
from karolos.environments.robot_task_environments.tasks import get_task
class RobotTaskEnvironment(Environment):
def __init__(self, task_config, robot_config, render=False,
bullet_client=None, **kwargs):
self.render = render
self.task_config = task_config
self.robot_config = robot_config
if bullet_client is None:
connection_mode = p.GUI if render else p.DIRECT
bullet_client = bc.BulletClient(connection_mode)
bullet_client.setAdditionalSearchPath(pd.getDataPath())
time_step = 1. / 300.
bullet_client.setTimeStep(time_step)
bullet_client.setRealTimeSimulation(0)
bullet_client.loadURDF("plane.urdf")
self.bullet_client = bullet_client
self.task = get_task(task_config, self.bullet_client)
self.robot = get_robot(robot_config, self.bullet_client)
self.action_space = self.robot.action_space
self.observation_space = spaces.Dict({
'robot': self.robot.observation_space,
'task': self.task.observation_space,
})
self.reward_function = self.task.reward_function
self.success_criterion = self.task.success_criterion
def reset(self, desired_state=None):
"""
Reset the environment and return new state
"""
try:
if desired_state is not None:
observation_robot = self.robot.reset(desired_state["robot"])
observation_task, goal_info, _ = self.task.reset(self.robot,
observation_robot,
desired_state[
"task"])
else:
observation_robot = self.robot.reset()
observation_task, goal_info, _ = self.task.reset(self.robot,
observation_robot)
except AssertionError as e:
return e
state = {
'robot': observation_robot,
'task': observation_task
}
return state, goal_info
def step(self, action):
observation_robot = self.robot.step(action)
observation_task, goal_info, done = self.task.step(observation_robot)
state = {
'robot': observation_robot,
'task': observation_task
}
return state, goal_info, done
if __name__ == "__main__":
import time
env_kwargs = {
"render": True,
"task_config": {
"name": "pick_place",
# "max_steps": 25
},
"robot_config": {
"name": "panda",
# "scale": .1,
# "sim_time": .1
}
}
env = RobotTaskEnvironment(**env_kwargs)
p.resetDebugVisualizerCamera(cameraDistance=1.5,
cameraYaw=70,
cameraPitch=-27,
cameraTargetPosition=(0, 0, 0)
)
time_step = p.getPhysicsEngineParameters()["fixedTimeStep"]
while True:
obs = env.reset()
for _ in np.arange(1. / time_step):
action = env.action_space.sample()
time.sleep(time_step)
observation, goal, done = env.step(action)
reward = env.reward_function(False, goal)
| [
"pybullet.resetDebugVisualizerCamera",
"pybullet.getPhysicsEngineParameters",
"pybullet_data.getDataPath",
"karolos.environments.robot_task_environments.tasks.get_task",
"gym.spaces.Dict",
"time.sleep",
"karolos.environments.robot_task_environments.robots.get_robot",
"pybullet_utils.bullet_client.Bull... | [((3306, 3422), 'pybullet.resetDebugVisualizerCamera', 'p.resetDebugVisualizerCamera', ([], {'cameraDistance': '(1.5)', 'cameraYaw': '(70)', 'cameraPitch': '(-27)', 'cameraTargetPosition': '(0, 0, 0)'}), '(cameraDistance=1.5, cameraYaw=70, cameraPitch=\n -27, cameraTargetPosition=(0, 0, 0))\n', (3334, 3422), True, 'import pybullet as p\n'), ((1196, 1237), 'karolos.environments.robot_task_environments.tasks.get_task', 'get_task', (['task_config', 'self.bullet_client'], {}), '(task_config, self.bullet_client)\n', (1204, 1237), False, 'from karolos.environments.robot_task_environments.tasks import get_task\n'), ((1260, 1303), 'karolos.environments.robot_task_environments.robots.get_robot', 'get_robot', (['robot_config', 'self.bullet_client'], {}), '(robot_config, self.bullet_client)\n', (1269, 1303), False, 'from karolos.environments.robot_task_environments.robots import get_robot\n'), ((1391, 1485), 'gym.spaces.Dict', 'spaces.Dict', (["{'robot': self.robot.observation_space, 'task': self.task.observation_space}"], {}), "({'robot': self.robot.observation_space, 'task': self.task.\n observation_space})\n", (1402, 1485), False, 'from gym import spaces\n'), ((3567, 3597), 'pybullet.getPhysicsEngineParameters', 'p.getPhysicsEngineParameters', ([], {}), '()\n', (3595, 3597), True, 'import pybullet as p\n'), ((3676, 3702), 'numpy.arange', 'np.arange', (['(1.0 / time_step)'], {}), '(1.0 / time_step)\n', (3685, 3702), True, 'import numpy as np\n'), ((848, 880), 'pybullet_utils.bullet_client.BulletClient', 'bc.BulletClient', (['connection_mode'], {}), '(connection_mode)\n', (863, 880), True, 'import pybullet_utils.bullet_client as bc\n'), ((3763, 3784), 'time.sleep', 'time.sleep', (['time_step'], {}), '(time_step)\n', (3773, 3784), False, 'import time\n'), ((932, 948), 'pybullet_data.getDataPath', 'pd.getDataPath', ([], {}), '()\n', (946, 948), True, 'import pybullet_data as pd\n')] |
# -*- coding: utf-8 -*-
# test_nabsH.py
# This module provides the tests for the nabsH function.
# Copyright 2014 <NAME>
# This file is part of python-deltasigma.
#
# python-deltasigma is a 1:1 Python replacement of Richard Schreier's
# MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based.
# The delta sigma toolbox is (c) 2009, <NAME>.
#
# python-deltasigma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE file for the licensing terms.
"""This module provides the test class for the nabsH() function.
"""
import unittest
import numpy as np
import deltasigma as ds
from nose.tools import raises
class TestNabsH(unittest.TestCase):
"""Test class for nabsH()"""
def setUp(self):
pass
def test_nabsH(self):
"""Test function for nabsH()"""
H = ([1, 2], [2, 0, .25], 1)
N = 129
w = np.linspace(0, 2*np.pi, num=N, endpoint=True)
z = np.exp(1j*w)
r1 = -np.abs(ds.evalTF(H, z))
r2 = ds.nabsH(w, H)
self.assertTrue(np.allclose(r1, r2, atol=1e-8, rtol=1e-5))
| [
"numpy.allclose",
"deltasigma.evalTF",
"numpy.exp",
"numpy.linspace",
"deltasigma.nabsH"
] | [((1001, 1048), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)'], {'num': 'N', 'endpoint': '(True)'}), '(0, 2 * np.pi, num=N, endpoint=True)\n', (1012, 1048), True, 'import numpy as np\n'), ((1059, 1075), 'numpy.exp', 'np.exp', (['(1.0j * w)'], {}), '(1.0j * w)\n', (1065, 1075), True, 'import numpy as np\n'), ((1123, 1137), 'deltasigma.nabsH', 'ds.nabsH', (['w', 'H'], {}), '(w, H)\n', (1131, 1137), True, 'import deltasigma as ds\n'), ((1162, 1205), 'numpy.allclose', 'np.allclose', (['r1', 'r2'], {'atol': '(1e-08)', 'rtol': '(1e-05)'}), '(r1, r2, atol=1e-08, rtol=1e-05)\n', (1173, 1205), True, 'import numpy as np\n'), ((1093, 1108), 'deltasigma.evalTF', 'ds.evalTF', (['H', 'z'], {}), '(H, z)\n', (1102, 1108), True, 'import deltasigma as ds\n')] |
# ======================================================================
# Created by <NAME>, <NAME>, <NAME> 11/2021
# ======================================================================
import numpy as np
from parameters import *
from variables import *
import equations
#=======================================================================
# Objective Function to start VFI (in our case, the value function)
def EV_F(X, kap, n_agt):
""" # Extract Variables
# this loop extracts the variables more expandably than doing them individualy as before
for iter in i_pol_key:
# forms the 2d intermediate variables into globals of the same name but in matrix form
if d_pol[iter] == 2:
globals()[iter] = np.zeros((n_agt,n_agt))
for row in range(n_agt):
for col in range(n_agt):
globals()[iter][row,col] = X[I[iter][0]+col+row*n_agt]
else:
# forms the 1d intermediate variables into globals of the same name in vector(list) form
globals()[iter] = [X[ring] for ring in I[iter]] """
""" val = X[I["val"]]
V_old1 = V_INFINITY(X[I["knx"]])
# Compute Value Function
VT_sum=utility(X[I["con"]], X[I["lab"]]) + beta*V_old1 """
return X[I["utl"]] + beta*X[I["val"]]
#=======================================================================
#=======================================================================
# Computation of gradient (first order finite difference) of initial objective function
def EV_GRAD_F(X, kap, n_agt):
N=len(X)
GRAD=np.zeros(N, float) # Initial Gradient of Objective Function
h=1e-4
for ixN in range(N):
xAdj=np.copy(X)
if (xAdj[ixN] - h >= 0):
xAdj[ixN]=X[ixN] + h
fx2=EV_F(xAdj, kap, n_agt)
xAdj[ixN]=X[ixN] - h
fx1=EV_F(xAdj, kap, n_agt)
GRAD[ixN]=(fx2-fx1)/(2.0*h)
else:
xAdj[ixN]=X[ixN] + h
fx2=EV_F(xAdj, kap, n_agt)
xAdj[ixN]=X[ixN]
fx1=EV_F(xAdj, kap, n_agt)
GRAD[ixN]=(fx2-fx1)/h
return GRAD
#=======================================================================
#======================================================================
# Equality constraints for the first time step of the model
def EV_G(X, kap, n_agt):
M=n_ctt
G=np.empty(M, float)
s = (1,n_agt)
kap2 = np.zeros(s)
kap2[0,:] = X[I["knx"]]
""" print("should be the same")
#print(type(X[I["knx"]]))
print(np.shape(X[I["knx"]]))
#print(type(kap2))
print(np.shape(kap2)) """
# pull in constraints
e_ctt = equations.f_ctt(X, kap2, 1, kap)
# apply all constraints with this one loop
for iter in ctt_key:
G[I_ctt[iter]] = e_ctt[iter]
return G
#======================================================================
#======================================================================
# Computation (finite difference) of Jacobian of equality constraints
# for first time step
def EV_JAC_G(X, flag, kap, n_agt):
N=n_pol
M=n_ctt
#print(N, " ",M) #testing testing
NZ=n_pol*n_ctt # J - could it be this?
A=np.empty(NZ, float)
ACON=np.empty(NZ, int) # its cause int is already a global variable cause i made it
AVAR=np.empty(NZ, int)
# Jacobian matrix structure
if (flag):
for ixM in range(M):
for ixN in range(N):
ACON[ixN + (ixM)*N]=ixM
AVAR[ixN + (ixM)*N]=ixN
return (ACON, AVAR)
else:
# Finite Differences
h=1e-4
gx1=EV_G(X, kap, n_agt)
for ixM in range(M):
for ixN in range(N):
xAdj=np.copy(X)
xAdj[ixN]=xAdj[ixN]+h
gx2=EV_G(xAdj, kap, n_agt)
A[ixN + ixM*N]=(gx2[ixM] - gx1[ixM])/h
return A
#======================================================================
#======================================================================
class ipopt_class_inst():
"""
Class for the optimization problem to be passed to cyipopt
Further optimisations may be possible here by including a hessian (optional param)
Uses the existing instance of the Gaussian Process (GP OLD)
"""
def __init__(self, X, n_agents, k_init, NELE_JAC, NELE_HESS=None, verbose=False):
self.x = X
self.n_agents = n_agents
self.k_init = k_init
self.NELE_JAC = NELE_JAC
self.NELE_HESS = NELE_HESS
self.gp_old = gp_old
self.initial = initial
self.verbose = verbose
# Create ev_f, eval_f, eval_grad_f, eval_g, eval_jac_g for given k_init and n_agent
def eval_f(self, x):
return EV_F(x, self.k_init, self.n_agents)
def eval_grad_f(self, x):
return EV_GRAD_F(x, self.k_init, self.n_agents)
def eval_g(self, x):
return EV_G(x, self.k_init, self.n_agents, self.gp_old)
def eval_jac_g(self, x, flag):
return EV_JAC_G(x, flag, self.k_init, self.n_agents, self.gp_old)
def objective(self, x):
# Returns the scalar value of the objective given x.
return self.eval_f(x)
def gradient(self, x):
# Returns the gradient fo the objective with respect to x."""
return self.eval_grad_f(x)
def constraints(self, x):
# Returns the constraints
return self.eval_g(x)
def jacobian(self, x):
# Returns the Jacobian of the constraints with respect to x.
return self.eval_jac_g(x, False)
def intermediate(self, alg_mod, iter_count, obj_value, inf_pr, inf_du, mu,
d_norm, regularization_size, alpha_du, alpha_pr,
ls_trials):
"""Prints information at every Ipopt i_pth."""
if self.verbose:
msg = "Objective value at i_pth #{:d} is - {:g}"
print(msg.format(iter_count, obj_value))
| [
"numpy.copy",
"numpy.zeros",
"numpy.empty",
"equations.f_ctt"
] | [((1628, 1646), 'numpy.zeros', 'np.zeros', (['N', 'float'], {}), '(N, float)\n', (1636, 1646), True, 'import numpy as np\n'), ((2530, 2548), 'numpy.empty', 'np.empty', (['M', 'float'], {}), '(M, float)\n', (2538, 2548), True, 'import numpy as np\n'), ((2579, 2590), 'numpy.zeros', 'np.zeros', (['s'], {}), '(s)\n', (2587, 2590), True, 'import numpy as np\n'), ((2812, 2844), 'equations.f_ctt', 'equations.f_ctt', (['X', 'kap2', '(1)', 'kap'], {}), '(X, kap2, 1, kap)\n', (2827, 2844), False, 'import equations\n'), ((3363, 3382), 'numpy.empty', 'np.empty', (['NZ', 'float'], {}), '(NZ, float)\n', (3371, 3382), True, 'import numpy as np\n'), ((3392, 3409), 'numpy.empty', 'np.empty', (['NZ', 'int'], {}), '(NZ, int)\n', (3400, 3409), True, 'import numpy as np\n'), ((3480, 3497), 'numpy.empty', 'np.empty', (['NZ', 'int'], {}), '(NZ, int)\n', (3488, 3497), True, 'import numpy as np\n'), ((1742, 1752), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (1749, 1752), True, 'import numpy as np\n'), ((3933, 3943), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (3940, 3943), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.linear_model import Lasso
from oolearning.model_wrappers.HyperParamsBase import HyperParamsBase
from oolearning.model_wrappers.ModelExceptions import MissingValueError
from oolearning.model_wrappers.ModelWrapperBase import ModelWrapperBase
from oolearning.model_wrappers.SklearnPredictMixin import SklearnPredictArrayMixin
class LassoRegressorHP(HyperParamsBase):
"""
See http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html for more information
on tuning parameters
"""
# noinspection SpellCheckingInspection
def __init__(self, alpha: float = 0.5):
super().__init__()
self._params_dict = dict(alpha=alpha)
class LassoRegressor(SklearnPredictArrayMixin, ModelWrapperBase):
"""
fits Linear Regression model on the data
"""
def __init__(self, fit_intercept: bool = True, seed: int = 42):
super().__init__()
self._fit_intercept = fit_intercept
self._seed = seed
@property
def feature_importance(self):
return None
def _train(self,
data_x: pd.DataFrame,
data_y: np.ndarray,
hyper_params: LassoRegressorHP = None) -> object:
assert hyper_params is not None
assert isinstance(hyper_params, LassoRegressorHP)
param_dict = hyper_params.params_dict
# Regression can't handle missing values
if data_x.isnull().sum().sum() > 0:
raise MissingValueError()
if any(np.isnan(data_y)):
raise MissingValueError()
ridge_reg = Lasso(alpha=param_dict['alpha'],
fit_intercept=True,
random_state=self._seed)
ridge_reg.fit(data_x, data_y)
return ridge_reg
| [
"oolearning.model_wrappers.ModelExceptions.MissingValueError",
"numpy.isnan",
"sklearn.linear_model.Lasso"
] | [((1624, 1701), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': "param_dict['alpha']", 'fit_intercept': '(True)', 'random_state': 'self._seed'}), "(alpha=param_dict['alpha'], fit_intercept=True, random_state=self._seed)\n", (1629, 1701), False, 'from sklearn.linear_model import Lasso\n'), ((1511, 1530), 'oolearning.model_wrappers.ModelExceptions.MissingValueError', 'MissingValueError', ([], {}), '()\n', (1528, 1530), False, 'from oolearning.model_wrappers.ModelExceptions import MissingValueError\n'), ((1547, 1563), 'numpy.isnan', 'np.isnan', (['data_y'], {}), '(data_y)\n', (1555, 1563), True, 'import numpy as np\n'), ((1584, 1603), 'oolearning.model_wrappers.ModelExceptions.MissingValueError', 'MissingValueError', ([], {}), '()\n', (1601, 1603), False, 'from oolearning.model_wrappers.ModelExceptions import MissingValueError\n')] |
"""
This file contains Numba-accelerated functions used in the main detections.
"""
import numpy as np
from numba import jit
__all__ = []
#############################################################################
# NUMBA JIT UTILITY FUNCTIONS
#############################################################################
@jit('float64(float64[:], float64[:])', nopython=True)
def _corr(x, y):
"""Fast Pearson correlation."""
n = x.size
mx, my = x.mean(), y.mean()
xm2s, ym2s, r_num = 0, 0, 0
for i in range(n):
xm = x[i] - mx
ym = y[i] - my
r_num += (xm * ym)
xm2s += xm**2
ym2s += ym**2
r_d1 = np.sqrt(xm2s)
r_d2 = np.sqrt(ym2s)
r_den = r_d1 * r_d2
return r_num / r_den
@jit('float64(float64[:], float64[:])', nopython=True)
def _covar(x, y):
"""Fast Covariance."""
n = x.size
mx, my = x.mean(), y.mean()
cov = 0
for i in range(n):
xm = x[i] - mx
ym = y[i] - my
cov += (xm * ym)
return cov / (n - 1)
@jit('float64(float64[:])', nopython=True)
def _rms(x):
"""Fast root mean square."""
n = x.size
ms = 0
for i in range(n):
ms += x[i]**2
ms /= n
return np.sqrt(ms)
@jit('float64(float64[:], float64[:])', nopython=True)
def _slope_lstsq(x, y):
"""Slope of a 1D least-squares regression.
"""
n_times = x.shape[0]
sx2 = 0
sx = 0
sy = 0
sxy = 0
for j in range(n_times):
sx2 += x[j] ** 2
sx += x[j]
sxy += x[j] * y[j]
sy += y[j]
den = n_times * sx2 - (sx ** 2)
num = n_times * sxy - sx * sy
return num / den
@jit('float64[:](float64[:], float64[:])', nopython=True)
def _detrend(x, y):
"""Fast linear detrending.
"""
slope = _slope_lstsq(x, y)
intercept = y.mean() - x.mean() * slope
return y - (x * slope + intercept)
| [
"numba.jit",
"numpy.sqrt"
] | [((330, 383), 'numba.jit', 'jit', (['"""float64(float64[:], float64[:])"""'], {'nopython': '(True)'}), "('float64(float64[:], float64[:])', nopython=True)\n", (333, 383), False, 'from numba import jit\n'), ((758, 811), 'numba.jit', 'jit', (['"""float64(float64[:], float64[:])"""'], {'nopython': '(True)'}), "('float64(float64[:], float64[:])', nopython=True)\n", (761, 811), False, 'from numba import jit\n'), ((1038, 1079), 'numba.jit', 'jit', (['"""float64(float64[:])"""'], {'nopython': '(True)'}), "('float64(float64[:])', nopython=True)\n", (1041, 1079), False, 'from numba import jit\n'), ((1235, 1288), 'numba.jit', 'jit', (['"""float64(float64[:], float64[:])"""'], {'nopython': '(True)'}), "('float64(float64[:], float64[:])', nopython=True)\n", (1238, 1288), False, 'from numba import jit\n'), ((1652, 1708), 'numba.jit', 'jit', (['"""float64[:](float64[:], float64[:])"""'], {'nopython': '(True)'}), "('float64[:](float64[:], float64[:])', nopython=True)\n", (1655, 1708), False, 'from numba import jit\n'), ((667, 680), 'numpy.sqrt', 'np.sqrt', (['xm2s'], {}), '(xm2s)\n', (674, 680), True, 'import numpy as np\n'), ((692, 705), 'numpy.sqrt', 'np.sqrt', (['ym2s'], {}), '(ym2s)\n', (699, 705), True, 'import numpy as np\n'), ((1220, 1231), 'numpy.sqrt', 'np.sqrt', (['ms'], {}), '(ms)\n', (1227, 1231), True, 'import numpy as np\n')] |
import ctypes
import numpy as np
from devito.tools.utils import prod
__all__ = ['numpy_to_ctypes', 'numpy_to_mpitypes', 'numpy_view_offsets']
def numpy_to_ctypes(dtype):
"""Map numpy types to ctypes types."""
return {np.int32: ctypes.c_int,
np.float32: ctypes.c_float,
np.int64: ctypes.c_int64,
np.float64: ctypes.c_double}[dtype]
def numpy_to_mpitypes(dtype):
"""Map numpy types to MPI datatypes."""
return {np.int32: 'MPI_INT',
np.float32: 'MPI_FLOAT',
np.int64: 'MPI_LONG',
np.float64: 'MPI_DOUBLE'}[dtype]
def numpy_view_offsets(array, base=None):
"""
Retrieve the offset of a view from its base array along each dimension and side.
:param array: A :class:`numpy.ndarray`.
:param base: The base of ``array``. Most of the times the ``base`` is available
through ``array.base``. However, if this function is to be called
within ``__array_finalize__``, where ``base`` hasn't been set yet,
the ``base`` has to be provided explicitly
"""
if not isinstance(array, np.ndarray):
raise TypeError("Expected a `numpy.ndarray`, got `%s`" % type(array))
if array.base is None:
if base is None:
raise ValueError("Cannot access ``array``'s base.")
else:
base = array.base
start_byte_distance = np.byte_bounds(array)[0] - np.byte_bounds(base)[0]
start_elem_distance = start_byte_distance // array.itemsize
assert start_byte_distance % array.itemsize == 0
end_byte_distance = np.byte_bounds(array)[1] - np.byte_bounds(base)[0]
end_elem_distance = (end_byte_distance // array.itemsize) - 1
assert end_byte_distance % array.itemsize == 0
offsets = []
for i, s in enumerate(base.shape):
hyperplane_size = prod(base.shape[i+1:])
# Start
lofs = start_elem_distance // hyperplane_size
start_elem_distance -= lofs*hyperplane_size
# End
rofs = end_elem_distance // hyperplane_size
end_elem_distance -= rofs*hyperplane_size
offsets.append((lofs, s-rofs-1))
return tuple(offsets)
| [
"devito.tools.utils.prod",
"numpy.byte_bounds"
] | [((1848, 1872), 'devito.tools.utils.prod', 'prod', (['base.shape[i + 1:]'], {}), '(base.shape[i + 1:])\n', (1852, 1872), False, 'from devito.tools.utils import prod\n'), ((1404, 1425), 'numpy.byte_bounds', 'np.byte_bounds', (['array'], {}), '(array)\n', (1418, 1425), True, 'import numpy as np\n'), ((1431, 1451), 'numpy.byte_bounds', 'np.byte_bounds', (['base'], {}), '(base)\n', (1445, 1451), True, 'import numpy as np\n'), ((1597, 1618), 'numpy.byte_bounds', 'np.byte_bounds', (['array'], {}), '(array)\n', (1611, 1618), True, 'import numpy as np\n'), ((1624, 1644), 'numpy.byte_bounds', 'np.byte_bounds', (['base'], {}), '(base)\n', (1638, 1644), True, 'import numpy as np\n')] |
from __future__ import division
import matplotlib.pyplot as plt
import numpy
from . import deck
# create a deck
d = deck.deck()
balanced = []
points = []
num = int(1e4)
steps = num // 10
for i in range(num):
if (i+1) % steps == 0:
print("%d of %d" % (i+1, num))
d.shuffle(7)
d.cut()
h1, h2, h3, h4 = d.deal()
balanced.append([h1.balanced,
h2.balanced,
h3.balanced,
h4.balanced])
points.append([h1.hc_points,
h2.hc_points,
h3.hc_points,
h4.hc_points])
balanced = zip(*balanced)
fig=plt.figure()
ax=fig.add_subplot(111)
ax.hist(balanced[0])
ax.set_title('%d hands' % (num))
ax.set_ylabel('Number of hands')
ax.set_xticks([0., 1.])
ax.set_xticklabels(['Unbalanced', 'balanced'])
1/0
# scatter, if I am balanced is my partner?
# these need ot be counted !!!
cnt = numpy.histogram2d(balanced[0], balanced[2])[0]
fig=plt.figure()
ax=fig.add_subplot(111)
ax.scatter(balanced[0], balanced[2])
ax.set_title('%d hands' % (num))
ax.set_ylabel('Number of hands')
ax.set_xticks([0., 1.])
ax.set_xticklabels(['Unbalanced', 'balanced'])
ax.set_yticks([0., 1.])
ax.set_yticklabels(['Unbalanced', 'balanced'])
1/0
points = numpy.array(points)
fig=plt.figure()
ax=fig.add_subplot(111)
ax.hist(points.flat, range(points.max()-points.min()), normed=True, cumulative=False)
ax.set_title('%d hands' % (num))
ax.set_ylabel('Fractional occurance')
ax.set_xlabel('High card points')
ax.axvline(13, lw=2, color='r')
plt.draw()
fig=plt.figure()
ax=fig.add_subplot(111)
ax.hist(points.flat, range(max(points)-min(points)), normed=True, cumulative=True)
ax.set_title('%d hands' % (num))
ax.set_ylabel('Cumulative occurance')
ax.set_xlabel('High card points')
ax.axvline(13, lw=2, color='r')
plt.draw()
| [
"numpy.histogram2d",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.draw"
] | [((640, 652), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (650, 652), True, 'import matplotlib.pyplot as plt\n'), ((973, 985), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (983, 985), True, 'import matplotlib.pyplot as plt\n'), ((1271, 1290), 'numpy.array', 'numpy.array', (['points'], {}), '(points)\n', (1282, 1290), False, 'import numpy\n'), ((1296, 1308), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1306, 1308), True, 'import matplotlib.pyplot as plt\n'), ((1556, 1566), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1564, 1566), True, 'import matplotlib.pyplot as plt\n'), ((1574, 1586), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1584, 1586), True, 'import matplotlib.pyplot as plt\n'), ((1831, 1841), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1839, 1841), True, 'import matplotlib.pyplot as plt\n'), ((921, 964), 'numpy.histogram2d', 'numpy.histogram2d', (['balanced[0]', 'balanced[2]'], {}), '(balanced[0], balanced[2])\n', (938, 964), False, 'import numpy\n')] |
from bokeh.plotting import figure
from bokeh.io import output_file, show,export_png
import numpy as np
from scipy.stats import norm
def h(x):
return 750*x/(5+745*x)
F = figure(title='P(S|+) as a function of population incidence if 25% false negatives and .5% false positives',toolbar_location=None)
x=np.linspace(0,.2,100)
y=750*x/(5+745*x)
F.line(x, y)
u=[.01,.03,.05,.1]
v=[h(x) for x in u]
F.circle(u,v)
export_png(F,filename='../img/covidfn.png')
| [
"bokeh.io.export_png",
"numpy.linspace",
"bokeh.plotting.figure"
] | [((175, 315), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""P(S|+) as a function of population incidence if 25% false negatives and .5% false positives"""', 'toolbar_location': 'None'}), "(title=\n 'P(S|+) as a function of population incidence if 25% false negatives and .5% false positives'\n , toolbar_location=None)\n", (181, 315), False, 'from bokeh.plotting import figure\n'), ((307, 331), 'numpy.linspace', 'np.linspace', (['(0)', '(0.2)', '(100)'], {}), '(0, 0.2, 100)\n', (318, 331), True, 'import numpy as np\n'), ((413, 457), 'bokeh.io.export_png', 'export_png', (['F'], {'filename': '"""../img/covidfn.png"""'}), "(F, filename='../img/covidfn.png')\n", (423, 457), False, 'from bokeh.io import output_file, show, export_png\n')] |
''' Implementation of GPHMC - Gaussian Process HMC '''
import numpy
from sklearn.gaussian_process import GaussianProcessRegressor
from pypuffin.decorators import accepts
from pypuffin.numeric.mcmc.base import MCMCBase
from pypuffin.sklearn.gaussian_process import gradient_of_mean, gradient_of_std
from pypuffin.types import Callable
# TODO need different f_construct_hmc methods for exploratory and sampling phases.
# TODO still not implementing all the heuristics for exploration in Rasumussen's paper
class GPHMC(MCMCBase):
''' An object to perform GPHMC sampling. Takes as arguments:
f_target_log_prob: A callable mapping position x -> log of the target distribution.
regressor: A (non-trained) GaussianProcessRegressor instance, with appropriate kernel etc. This
will be used to approximate f_target_log_prob.
f_construct_hmc: A callable to construct an HMC sampler that takes the signature
(x_0, f_potential, f_grad_potential)
x_start: Position from which to start GP sampling
'''
@accepts(object, Callable, GaussianProcessRegressor, Callable, numpy.ndarray)
def __init__(self, f_target_log_prob, regressor, f_construct_hmc, x_start):
self._f_target_log_prob = f_target_log_prob
self._regressor = regressor
self._f_construct_hmc = f_construct_hmc
self._x_start = x_start
# Record training data for GP regressor; y values are from f_target_log_prob
self._x_train = []
self._y_train = []
# The HMC sampler for using once training is complete.
self._hmc_sampler = None
@property
def _started_sampling(self):
''' Return True iff we have started sampling from the non-training distribution '''
return self._hmc_sampler is not None
@property
def dim(self):
''' The dimension of the sampling space '''
return self._x_start.shape[0]
def _fit_gp(self):
''' Perform fitting of the regressor to the current training data, taking into account the empirical mean of
the training points.
This follows the procedure given in Rasmussen GPHMC paper, section 4.
'''
x_train_array = numpy.asarray(self._x_train)
y_train_array = numpy.asarray(self._y_train)
mean = numpy.mean(y_train_array, axis=0)
return self._regressor.fit(x_train_array, y_train_array - mean)
def predict_gp(self, x, return_std=False): # pylint: disable=invalid-name
''' Perform one or more predictions with the GP regression model, taking into account the training data mean
which was used to train the current regressor.
'''
y_train_array = numpy.asarray(self._y_train)
mean = numpy.mean(y_train_array, axis=0)
single_prediction = False
if len(x.shape) == 1:
# If it looks like we have been passed a single x value, reshape appropriately
x = x[numpy.newaxis, :]
single_prediction = True
if return_std:
mus, stds = self._regressor.predict(x, return_std=True)
if single_prediction:
return mus[0] + mean, stds[0]
return mus + mean, stds
mus = self._regressor.predict(x, return_std=False)
if single_prediction:
return mus[0] + mean
return mus + mean
def sample_explore(self):
''' Perform an exploratory sample. This will perform HMC under the prior distribution, sample a point, and
update the trained distribution accordingly. The sampled location will be returned.
This should only be done before real sampling begins.
'''
if self._started_sampling:
raise RuntimeError("Training should only be done prior to beginning real sampling!")
if not self._x_train:
# Our very first sampling point will be to evaluate x_start
self._x_train.append(self._x_start)
self._y_train.append(self._f_target_log_prob(self._x_start))
x_1 = self._x_start
else:
# Our sampling is going to be for (mean - std), as per Rasmussen. Remember that our potential is -log(prob),
# and our regressor is fitted to log(prob).
# Since we want to sample single points, there's a bit of disgustingness in reshaping x
# from (n_dim,) to (1, n_dim), and then only looking at the first element of the result.
def f_potential(x): # pylint: disable=invalid-name
''' (mu - std) '''
mu, std = self.predict_gp(x, return_std=True)
return -(mu - std)
def f_grad_potential(x): # pylint: disable=invalid-name
''' Gradient of (mu - std) '''
reshaped_x = x[numpy.newaxis, :]
mean_grad = gradient_of_mean(self._regressor, reshaped_x)[0]
std_grad = gradient_of_std(self._regressor, reshaped_x)[0]
return -(mean_grad - std_grad)
# Construct a new sampler based on these operations, and take one sample.
x_0 = self._x_train[-1]
x_1 = self._f_construct_hmc(x_0, f_potential, f_grad_potential).sample()
# Evaluate the target function. If we have proposed the same point again, there is no point in evaluating
# the function, since it's expensive and we already know the answer. Moreover, adding duplicate points
# to the training data seems to simply upset the GP, so don't do it.
if x_1 != x_0:
y_1 = self._f_target_log_prob(x_1)
self._x_train.append(x_1)
self._y_train.append(y_1)
# Refit the distribution using all training data, and return our current location
self._fit_gp()
return x_1
def sample(self):
''' Draw a new sample '''
if not self._started_sampling:
# We are now entering the sampling phase - construct an HMC sampler with our most up-to-date view of the
# Gaussian Process. This sampler will be re-used henceforth.
f_potential = lambda x: -self._regressor.predict(x[numpy.newaxis, :])[0]
f_grad_potential = lambda x: -gradient_of_mean(self._regressor, x[numpy.newaxis, :])[0]
self._hmc_sampler = self._f_construct_hmc(self._x_start, f_potential, f_grad_potential)
return self._hmc_sampler.sample()
| [
"numpy.mean",
"pypuffin.sklearn.gaussian_process.gradient_of_mean",
"pypuffin.sklearn.gaussian_process.gradient_of_std",
"numpy.asarray",
"pypuffin.decorators.accepts"
] | [((1140, 1216), 'pypuffin.decorators.accepts', 'accepts', (['object', 'Callable', 'GaussianProcessRegressor', 'Callable', 'numpy.ndarray'], {}), '(object, Callable, GaussianProcessRegressor, Callable, numpy.ndarray)\n', (1147, 1216), False, 'from pypuffin.decorators import accepts\n'), ((2303, 2331), 'numpy.asarray', 'numpy.asarray', (['self._x_train'], {}), '(self._x_train)\n', (2316, 2331), False, 'import numpy\n'), ((2356, 2384), 'numpy.asarray', 'numpy.asarray', (['self._y_train'], {}), '(self._y_train)\n', (2369, 2384), False, 'import numpy\n'), ((2400, 2433), 'numpy.mean', 'numpy.mean', (['y_train_array'], {'axis': '(0)'}), '(y_train_array, axis=0)\n', (2410, 2433), False, 'import numpy\n'), ((2798, 2826), 'numpy.asarray', 'numpy.asarray', (['self._y_train'], {}), '(self._y_train)\n', (2811, 2826), False, 'import numpy\n'), ((2842, 2875), 'numpy.mean', 'numpy.mean', (['y_train_array'], {'axis': '(0)'}), '(y_train_array, axis=0)\n', (2852, 2875), False, 'import numpy\n'), ((4951, 4996), 'pypuffin.sklearn.gaussian_process.gradient_of_mean', 'gradient_of_mean', (['self._regressor', 'reshaped_x'], {}), '(self._regressor, reshaped_x)\n', (4967, 4996), False, 'from pypuffin.sklearn.gaussian_process import gradient_of_mean, gradient_of_std\n'), ((5027, 5071), 'pypuffin.sklearn.gaussian_process.gradient_of_std', 'gradient_of_std', (['self._regressor', 'reshaped_x'], {}), '(self._regressor, reshaped_x)\n', (5042, 5071), False, 'from pypuffin.sklearn.gaussian_process import gradient_of_mean, gradient_of_std\n'), ((6353, 6407), 'pypuffin.sklearn.gaussian_process.gradient_of_mean', 'gradient_of_mean', (['self._regressor', 'x[numpy.newaxis, :]'], {}), '(self._regressor, x[numpy.newaxis, :])\n', (6369, 6407), False, 'from pypuffin.sklearn.gaussian_process import gradient_of_mean, gradient_of_std\n')] |
from tensorflow import keras
from pathlib import Path
import numpy as np
from training.image_adapter import ImageAdapter
import cv2
from training.model.model_creator import define_composite_model
class ModelSerializer:
model_names = ['d_model_A', "d_model_B", "g_model_AtoB", "g_model_BtoA"]
base_path = './training/generated_models'
image_shape = (256, 256, 3)
def serialize(self, models, dataset_name, key):
# serializes the model and it's associated metadata into a file named file_name
# these generated_models are going to be stored into the generated_models folder
folder_path = self.construct_folder_path(dataset_name, key)
Path(folder_path).mkdir(parents=True, exist_ok=True)
for name in self.model_names:
model = models[name]
file_name = f'{name}.h5'
path = folder_path + file_name
model.save(path)
def deserialize(self, dataset_name, key):
models = dict()
for name in self.model_names:
path = f"{self.base_path}/{dataset_name}/{key}/{name}.h5"
model = keras.models.load_model(path)
models[name] = model
models['c_model_AtoB'] = define_composite_model(
models['g_model_AtoB'],
models['d_model_B'],
models['g_model_BtoA'],
self.image_shape)
models['c_model_BtoA'] = define_composite_model(
models['g_model_BtoA'],
models['d_model_A'],
models['g_model_AtoB'],
self.image_shape)
return models
def deserialize_specific_network(self, dataset_name, key, network_name):
path = f"{self.base_path}/{dataset_name}/{key}/{network_name}.h5"
model = keras.models.load_model(path)
return model
def serialize_control_images(self, generator_to_fake, generator_to_real, normalized_images, dataset_name, key):
image_adapter = ImageAdapter()
folder_path = self.construct_folder_path(dataset_name, key)
folder_path = folder_path + "control_images/"
Path(folder_path).mkdir(parents=True, exist_ok=True)
normalized_fakes = generator_to_fake.predict(normalized_images)
normalized_generated_reals = generator_to_real.predict(normalized_fakes)
real_images = image_adapter.to_image(normalized_images)
generated_images = image_adapter.to_image(normalized_fakes)
real_generated_images = image_adapter.to_image(normalized_generated_reals)
images = zip(real_images, generated_images, real_generated_images)
index = 0
for real, fake, generated_real in images:
index = index + 1
image = np.hstack((real, fake, generated_real))
image_path = f'{folder_path}{index}.jpg'
cv2.imwrite(image_path, image)
def construct_folder_path(self, dataset_name, key):
return f'{self.base_path}/{dataset_name}/{key}/' | [
"cv2.imwrite",
"numpy.hstack",
"training.image_adapter.ImageAdapter",
"pathlib.Path",
"tensorflow.keras.models.load_model",
"training.model.model_creator.define_composite_model"
] | [((1214, 1328), 'training.model.model_creator.define_composite_model', 'define_composite_model', (["models['g_model_AtoB']", "models['d_model_B']", "models['g_model_BtoA']", 'self.image_shape'], {}), "(models['g_model_AtoB'], models['d_model_B'], models[\n 'g_model_BtoA'], self.image_shape)\n", (1236, 1328), False, 'from training.model.model_creator import define_composite_model\n'), ((1407, 1521), 'training.model.model_creator.define_composite_model', 'define_composite_model', (["models['g_model_BtoA']", "models['d_model_A']", "models['g_model_AtoB']", 'self.image_shape'], {}), "(models['g_model_BtoA'], models['d_model_A'], models[\n 'g_model_AtoB'], self.image_shape)\n", (1429, 1521), False, 'from training.model.model_creator import define_composite_model\n'), ((1757, 1786), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['path'], {}), '(path)\n', (1780, 1786), False, 'from tensorflow import keras\n'), ((1949, 1963), 'training.image_adapter.ImageAdapter', 'ImageAdapter', ([], {}), '()\n', (1961, 1963), False, 'from training.image_adapter import ImageAdapter\n'), ((1117, 1146), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['path'], {}), '(path)\n', (1140, 1146), False, 'from tensorflow import keras\n'), ((2711, 2750), 'numpy.hstack', 'np.hstack', (['(real, fake, generated_real)'], {}), '((real, fake, generated_real))\n', (2720, 2750), True, 'import numpy as np\n'), ((2816, 2846), 'cv2.imwrite', 'cv2.imwrite', (['image_path', 'image'], {}), '(image_path, image)\n', (2827, 2846), False, 'import cv2\n'), ((683, 700), 'pathlib.Path', 'Path', (['folder_path'], {}), '(folder_path)\n', (687, 700), False, 'from pathlib import Path\n'), ((2094, 2111), 'pathlib.Path', 'Path', (['folder_path'], {}), '(folder_path)\n', (2098, 2111), False, 'from pathlib import Path\n')] |
# implementing RNN and LSTM
# %%
import pandas as pd
import numpy as np
import nltk
import sklearn
import matplotlib.pyplot as plt
import re
import tqdm
twitter_df = pd.read_csv('twitter_train.csv')
twitter_df = twitter_df.fillna('0')
twitter_df_test = pd.read_csv('twitter_test.csv')
twitter_df_test = twitter_df_test.fillna('0')
twitter_df = twitter_df.drop('location', axis = 1)
import json
"""with open('contractions.json', "w") as f:
json.dump(contractions, f)"""
with open('abbrevations.json') as f:
abbrevation = json.load(f)
# importing required libraries for RNN
import tensorflow as tf;
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
# nltk.download('wordnet')
print("Tensorflow version", tf.__version__)
# preprocessing include lemmatizing, stemming, stopwords removal
# tokenizing, pad_sequences
# %%
lemmatizer = WordNetLemmatizer()
stop_words = stopwords.words('english')
stemmer = PorterStemmer()
from nltk import TweetTokenizer
wt = TweetTokenizer()
# Cleaning means to extract the useful information from the text data and removing those
# data that does not contribute to the LSTM and RNN learnings.
# The clean_text function lower cases the input text, removes the tags and mentions
# expands the contractions, can deal with emojis, non alphabets.
# It also removes the stop words and Lemmatizes the word to its root word.
def text_clean(text, abbrevations=abbrevation, stemmer=False):
# lower casing
text = text.lower()
for word in text.split(): # use the abbrevations dictionary to replace
if word in abbrevations.keys():
text = text.replace(word, abbrevations[word])
# removing URL, tags, non-alphabets, new-lines
text = re.sub(r'(https?://\S+|www\.\S+)', '', text) # removing http links
text = re.sub(r'@([a-zA-Z0-9:_]+)\s', '', text) # removing the hash tags and mentions
text = re.sub('[^\w\s]', ' ', text) # remove anything except words and spaces.
text = re.sub('\d', ' ', text) # removing digits
# text = re.sub('\b[a-z]{1,2}\b', ' ', text) # all words with 3 or less characters
text = re.sub('\n', ' ', text) # new line phrase
# Lemmatising and removing Stop_words
extended_stop_words_re = stop_words + ['&','rt','th','co', 're','ve','kim','daca','p.m.','retweet', 'ir']
if not stemmer:
text = ' '.join([lemmatizer.lemmatize(word) for word in text.split() if (not word in extended_stop_words_re)])
else:
text = ' '.join([stemmer.stem(word) for word in text.split() if (not word in extended_stop_words_re)])
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
text = emoji_pattern.sub(r'', text)
return text
# further cleaning is needed.
# tekenization of the cleaned text.
cleaned1 = lambda text: text_clean(text)
cleaned2 = lambda text: re.sub('\s[a-z]{,3}\s', ' ', text)
cleaned3 = lambda row: wt.tokenize(row) # tokenizing the row
# escented characters
# expanding contractions with the words in the contractions dictionary
# stemming, lemmatizing
# negated words can be used to get the sentiment
# extra spaces shouls also be removed
# %%
# Cleaning the input text with help of utility functions
twitter_df['cleaned_text'] = twitter_df['text'].apply(cleaned1)
twitter_df['cleaned_text_'] = twitter_df['cleaned_text'].apply(cleaned2)
twitter_df['tokenized_text'] = twitter_df['cleaned_text_'].apply(cleaned3)
twitter_df_test['cleaned_text'] = twitter_df_test['text'].apply(cleaned1)
twitter_df_test['cleaned_text_'] = twitter_df_test['cleaned_text'].apply(cleaned2)
twitter_df_test['tokenized_text'] = twitter_df_test['cleaned_text_'].apply(cleaned3)
# twitter_df[['id','text','cleaned_text','target']].iloc[50:110]
length = []
length = twitter_df.cleaned_text.apply(lambda x: len(x.split()))
np.max(length)
train_text = twitter_df.cleaned_text
train_label = twitter_df.target
test_text = twitter_df_test.cleaned_text
oov_tok = '<oov>'
padding_type = 'post'
trun_type = 'post'
max_length = 25
# %%
tokenizer = Tokenizer(num_words = 10000, oov_token = oov_tok)
tokenizer.fit_on_texts(train_text)
word_index = tokenizer.word_index
train_sequences = tokenizer.texts_to_sequences(train_text)
print("train sequences sample", train_sequences[10])
test_sequences = tokenizer.texts_to_sequences(test_text)
print("test sequences sample", test_sequences[1])
train_padded = pad_sequences(train_sequences, maxlen=max_length, padding=padding_type, truncating=trun_type)
test_padded = pad_sequences(test_sequences, maxlen=max_length, padding=padding_type, truncating=trun_type)
vocab_size = len(word_index) + 1
#
label_tokenizer = Tokenizer()
# %%
from gensim.models.word2vec import Word2Vec
embedding_index = {}
# for text in twitter_df:
# embedding = model.infer_vector(text)
# %%
# utility function to return the string of a list of numbers.
# this will be the input to the tokenizer methods
def return_str(text):
return text.apply(lambda x: str(x))
label_tokenizer.fit_on_texts(return_str(twitter_df.target))
train_label_seq = np.array(label_tokenizer.texts_to_sequences(return_str(train_label)))
print(train_label_seq.shape)
train_label_seq = train_label.values.reshape(-1,1)
# trying to explore the original tweet and tweet after padding
reverse_word_index = dict([(index, word) for word, index in word_index.items()])
def decode_article(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
print(decode_article(train_padded[10]))
print('-----------------')
print(train_text.iloc[10])
# %%
from gensim.models.word2vec import Word2Vec
vector_size = 100
wv = Word2Vec(sentences=twitter_df.tokenized_text,size = vector_size, iter = 50)
from collections import Counter
embedding_matrix = np.zeros((vocab_size, vector_size))
# word_index.pop("''")
for word, index in word_index.items():
try:
embedding_vector = wv[word]
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
except:
pass
# %%
embedding_dim = 100
# building the Model Architecture
from tensorflow.keras import layers # importing Dense, Bidirectional, LSTM, Dropout, Embedding
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam, SGD, Nadam
# Word Embeddings are the inputs to the neural networks.
def create_embeddings(vocab_size, embedding_dim,):
return layers.Embedding(vocab_size, embedding_dim, embeddings_initializer='GlorotNormal', weights = [embedding_matrix])
# Utility function to get fully connected dense Layers
def create_dense(in_dim = embedding_dim, activ_in = 'relu'):
return layers.Dense(in_dim, activation = activ_in, use_bias = True)
# Create LSTM layers
def get_LSTM(embedding_dim, Bi_directional = True, dropout=0.2):
if Bi_directional:
layer = layers.Bidirectional(layers.LSTM(embedding_dim, recurrent_dropout = 0.3, dropout=dropout))
else:
layer = LSTM(embedding_dim, recurrent_dropout = 0.3, dropout=dropout)
return layer
# Dropout can be represented as one of the core layers.
# They handle overfitting of the neural networks by allowing all the nodes to learn the weights
# Lot of fine tuning can be done to the Dropout layer.
def drop_out(dropout_rate = 0.2):
dp = layers.Dropout(dropout_rate)
return dp
def get_seq_model(in_dim = embedding_dim,
out_class=2,
optimizers_ = 'sgd',
learning_rate = 0.000083,
activ_out = 'softmax'):
# frees up GPU memory everytime the code is run fresh
tf.keras.backend.clear_session()
model = tf.keras.Sequential([
# adding embedding layer, expecting input vocab size of 5000
create_embeddings(vocab_size, embedding_dim),
# adding Dropout layer
drop_out(0.3),
# Bi-Directional LSTM
get_LSTM(embedding_dim, dropout = 0.3),
# Fully connected dense layers
create_dense(embedding_dim),
# adding Dropout layer
drop_out(0.3),
# Fully connected dense layers
create_dense(embedding_dim),
# adding Dropout layer
drop_out(0.3),
# the final output layer
layers.Dense(out_class, activation = 'softmax', use_bias = True)
])
# Model fitting and defining callbacks
if optimizers_.lower() == "adam":
opt = Adam(lr=learning_rate)
elif optimizers_.lower() == "sgd":
opt = SGD(lr=learning_rate)
else:
opt = Nadam(lr=learning_rate)
model.compile(loss='sparse_categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
# %%
# creating the model
num_epochs=30
model = get_seq_model(learning_rate=0.01)
# printing model summary to console
print(model.summary())
# callbacks stop the traiing after predefined patience
early_stopping = EarlyStopping(monitor='val_accuracy', patience=2)
# training the model
result = model.fit(train_padded, train_label_seq,
epochs=num_epochs,
# callbacks = [early_stopping],
verbose=2, validation_split=0.2
)
def plot_graphs(history, string, save = False):
plt.plot(result.history[string])
plt.plot(result.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
title = "Bi-LSTM " + string
if save:
plt.savefig(title)
plt.show()
plot_graphs(result, "accuracy")
plot_graphs(result, "loss")
# %%
import csv
id_1 = twitter_df_test.id
def save_pred(model, id_ = id_1, name_ = "name_1.csv", vectors_ = test_padded):
predict = model.predict_classes(vectors_)
# checking if the predictions are correct format vector
assert len(predict) == 3263
# the file is saved in the current folder
with open(name_, 'w', newline='\n') as f:
writer = csv.writer(f)
writer.writerow(['id', 'target'])
for id_, target in zip(id_, predict):
writer.writerow([id_, target])
save_pred(model, id_=id_1,name_='rnn_bilstm_1.csv')
# %%
| [
"pandas.read_csv",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"re.compile",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Dense",
"gensim.models.word2vec.Word2Vec",
"nltk.TweetTokenizer",
"nltk.corpus.stopwords.words",
"tensorflow.ke... | [((168, 200), 'pandas.read_csv', 'pd.read_csv', (['"""twitter_train.csv"""'], {}), "('twitter_train.csv')\n", (179, 200), True, 'import pandas as pd\n'), ((256, 287), 'pandas.read_csv', 'pd.read_csv', (['"""twitter_test.csv"""'], {}), "('twitter_test.csv')\n", (267, 287), True, 'import pandas as pd\n'), ((1030, 1049), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (1047, 1049), False, 'from nltk.stem import WordNetLemmatizer\n'), ((1063, 1089), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1078, 1089), False, 'from nltk.corpus import stopwords\n'), ((1100, 1115), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (1113, 1115), False, 'from nltk.stem import PorterStemmer\n'), ((1153, 1169), 'nltk.TweetTokenizer', 'TweetTokenizer', ([], {}), '()\n', (1167, 1169), False, 'from nltk import TweetTokenizer\n'), ((4559, 4573), 'numpy.max', 'np.max', (['length'], {}), '(length)\n', (4565, 4573), True, 'import numpy as np\n'), ((4783, 4828), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': '(10000)', 'oov_token': 'oov_tok'}), '(num_words=10000, oov_token=oov_tok)\n', (4792, 4828), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((5140, 5237), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['train_sequences'], {'maxlen': 'max_length', 'padding': 'padding_type', 'truncating': 'trun_type'}), '(train_sequences, maxlen=max_length, padding=padding_type,\n truncating=trun_type)\n', (5153, 5237), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((5248, 5344), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['test_sequences'], {'maxlen': 'max_length', 'padding': 'padding_type', 'truncating': 'trun_type'}), '(test_sequences, maxlen=max_length, padding=padding_type,\n truncating=trun_type)\n', (5261, 5344), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((5395, 5406), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (5404, 5406), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((6374, 6446), 'gensim.models.word2vec.Word2Vec', 'Word2Vec', ([], {'sentences': 'twitter_df.tokenized_text', 'size': 'vector_size', 'iter': '(50)'}), '(sentences=twitter_df.tokenized_text, size=vector_size, iter=50)\n', (6382, 6446), False, 'from gensim.models.word2vec import Word2Vec\n'), ((6504, 6539), 'numpy.zeros', 'np.zeros', (['(vocab_size, vector_size)'], {}), '((vocab_size, vector_size))\n', (6512, 6539), True, 'import numpy as np\n'), ((9701, 9750), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_accuracy"""', 'patience': '(2)'}), "(monitor='val_accuracy', patience=2)\n", (9714, 9750), False, 'from tensorflow.keras.callbacks import EarlyStopping\n'), ((533, 545), 'json.load', 'json.load', (['f'], {}), '(f)\n', (542, 545), False, 'import json\n'), ((1940, 1986), 're.sub', 're.sub', (['"""(https?://\\\\S+|www\\\\.\\\\S+)"""', '""""""', 'text'], {}), "('(https?://\\\\S+|www\\\\.\\\\S+)', '', text)\n", (1946, 1986), False, 'import re\n'), ((2022, 2062), 're.sub', 're.sub', (['"""@([a-zA-Z0-9:_]+)\\\\s"""', '""""""', 'text'], {}), "('@([a-zA-Z0-9:_]+)\\\\s', '', text)\n", (2028, 2062), False, 'import re\n'), ((2125, 2155), 're.sub', 're.sub', (['"""[^\\\\w\\\\s]"""', '""" """', 'text'], {}), "('[^\\\\w\\\\s]', ' ', text)\n", (2131, 2155), False, 'import re\n'), ((2228, 2252), 're.sub', 're.sub', (['"""\\\\d"""', '""" """', 'text'], {}), "('\\\\d', ' ', text)\n", (2234, 2252), False, 'import re\n'), ((2406, 2429), 're.sub', 're.sub', (['"""\n"""', '""" """', 'text'], {}), "('\\n', ' ', text)\n", (2412, 2429), False, 'import re\n'), ((2920, 2991), 're.compile', 're.compile', (['"""[😀-🙏🌀-🗿🚀-\U0001f6ff\U0001f1e0-🇿✂-➰Ⓜ-🉑]+"""'], {'flags': 're.UNICODE'}), "('[😀-🙏🌀-🗿🚀-\\U0001f6ff\\U0001f1e0-🇿✂-➰Ⓜ-🉑]+', flags=re.UNICODE)\n", (2930, 2991), False, 'import re\n'), ((3574, 3610), 're.sub', 're.sub', (['"""\\\\s[a-z]{,3}\\\\s"""', '""" """', 'text'], {}), "('\\\\s[a-z]{,3}\\\\s', ' ', text)\n", (3580, 3610), False, 'import re\n'), ((7155, 7270), 'tensorflow.keras.layers.Embedding', 'layers.Embedding', (['vocab_size', 'embedding_dim'], {'embeddings_initializer': '"""GlorotNormal"""', 'weights': '[embedding_matrix]'}), "(vocab_size, embedding_dim, embeddings_initializer=\n 'GlorotNormal', weights=[embedding_matrix])\n", (7171, 7270), False, 'from tensorflow.keras import layers\n'), ((7396, 7452), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['in_dim'], {'activation': 'activ_in', 'use_bias': '(True)'}), '(in_dim, activation=activ_in, use_bias=True)\n', (7408, 7452), False, 'from tensorflow.keras import layers\n'), ((8031, 8059), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (8045, 8059), False, 'from tensorflow.keras import layers\n'), ((8350, 8382), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (8380, 8382), True, 'import tensorflow as tf\n'), ((10059, 10091), 'matplotlib.pyplot.plot', 'plt.plot', (['result.history[string]'], {}), '(result.history[string])\n', (10067, 10091), True, 'import matplotlib.pyplot as plt\n'), ((10096, 10137), 'matplotlib.pyplot.plot', 'plt.plot', (["result.history['val_' + string]"], {}), "(result.history['val_' + string])\n", (10104, 10137), True, 'import matplotlib.pyplot as plt\n'), ((10140, 10160), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (10150, 10160), True, 'import matplotlib.pyplot as plt\n'), ((10165, 10183), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['string'], {}), '(string)\n', (10175, 10183), True, 'import matplotlib.pyplot as plt\n'), ((10188, 10225), 'matplotlib.pyplot.legend', 'plt.legend', (["[string, 'val_' + string]"], {}), "([string, 'val_' + string])\n", (10198, 10225), True, 'import matplotlib.pyplot as plt\n'), ((10300, 10310), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10308, 10310), True, 'import matplotlib.pyplot as plt\n'), ((9194, 9216), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (9198, 9216), False, 'from tensorflow.keras.optimizers import Adam, SGD, Nadam\n'), ((10277, 10295), 'matplotlib.pyplot.savefig', 'plt.savefig', (['title'], {}), '(title)\n', (10288, 10295), True, 'import matplotlib.pyplot as plt\n'), ((10750, 10763), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (10760, 10763), False, 'import csv\n'), ((7604, 7670), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['embedding_dim'], {'recurrent_dropout': '(0.3)', 'dropout': 'dropout'}), '(embedding_dim, recurrent_dropout=0.3, dropout=dropout)\n', (7615, 7670), False, 'from tensorflow.keras import layers\n'), ((9022, 9082), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['out_class'], {'activation': '"""softmax"""', 'use_bias': '(True)'}), "(out_class, activation='softmax', use_bias=True)\n", (9034, 9082), False, 'from tensorflow.keras import layers\n'), ((9270, 9291), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (9273, 9291), False, 'from tensorflow.keras.optimizers import Adam, SGD, Nadam\n'), ((9316, 9339), 'tensorflow.keras.optimizers.Nadam', 'Nadam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (9321, 9339), False, 'from tensorflow.keras.optimizers import Adam, SGD, Nadam\n')] |
import numpy as np
import pandas as pd
from itertools import islice
import multiprocessing
from multiprocessing.pool import ThreadPool, Pool
N_CPUS = multiprocessing.cpu_count()
def batch_generator(iterable, n=1):
if hasattr(iterable, '__len__'):
# https://stackoverflow.com/questions/8290397/how-to-split-an-iterable-in-constant-size-chunks
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
elif hasattr(iterable, '__next__'):
# https://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery
i = iter(iterable)
piece = list(islice(i, n))
while piece:
yield piece
piece = list(islice(i, n))
else:
raise ValueError('Iterable is not iterable?')
def map_batches_multiproc(func, iterable, chunksize, multiproc_mode='threads',
n_threads=None, threads_per_cpu=1.0):
if n_threads is None:
n_threads = int(threads_per_cpu * N_CPUS)
if hasattr(iterable, '__len__') and len(iterable) <= chunksize:
return [func(iterable)]
with pool_type(multiproc_mode)(n_threads) as pool:
batches = batch_generator(iterable, n=chunksize)
return list(pool.imap(func, batches))
def pool_type(parallelism_type):
if 'process' in parallelism_type.lower():
return Pool
elif 'thread' in parallelism_type.lower():
return ThreadPool
else:
raise ValueError('Unsupported value for "parallelism_type"')
def parallelize_dataframe(df, func, n_partitions=N_CPUS, parallelism_type='process'):
# with Pool(n_partitions) as pool:
# return pd.concat(pool.map(func, np.array_split(df, n_partitions)))
df_split = np.array_split(df, n_partitions)
with pool_type(parallelism_type)(n_partitions) as pool:
res = pool.map(func, df_split)
df = pd.concat(res, sort=False)
return df
def parallelize_array(arr, func, n_partitions=N_CPUS, parallelism_type='process'):
# with Pool(n_partitions) as pool:
# return np.concatenate(pool.map(func, np.array_split(arr, n_partitions)))
arr_split = np.array_split(arr, n_partitions)
with pool_type(parallelism_type)(n_partitions) as pool:
res = pool.map(func, arr_split)
arr = np.concatenate(res)
return arr
| [
"itertools.islice",
"multiprocessing.cpu_count",
"numpy.array_split",
"numpy.concatenate",
"pandas.concat"
] | [((151, 178), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (176, 178), False, 'import multiprocessing\n'), ((1786, 1818), 'numpy.array_split', 'np.array_split', (['df', 'n_partitions'], {}), '(df, n_partitions)\n', (1800, 1818), True, 'import numpy as np\n'), ((1927, 1953), 'pandas.concat', 'pd.concat', (['res'], {'sort': '(False)'}), '(res, sort=False)\n', (1936, 1953), True, 'import pandas as pd\n'), ((2191, 2224), 'numpy.array_split', 'np.array_split', (['arr', 'n_partitions'], {}), '(arr, n_partitions)\n', (2205, 2224), True, 'import numpy as np\n'), ((2335, 2354), 'numpy.concatenate', 'np.concatenate', (['res'], {}), '(res)\n', (2349, 2354), True, 'import numpy as np\n'), ((671, 683), 'itertools.islice', 'islice', (['i', 'n'], {}), '(i, n)\n', (677, 683), False, 'from itertools import islice\n'), ((755, 767), 'itertools.islice', 'islice', (['i', 'n'], {}), '(i, n)\n', (761, 767), False, 'from itertools import islice\n')] |
from __future__ import absolute_import, print_function, division
import warnings
import numpy as np
import astropy.units as u
__all__ = ["_get_x_in_wavenumbers", "_test_valid_x_range"]
def _get_x_in_wavenumbers(in_x):
"""
Convert input x to wavenumber given x has units.
Otherwise, assume x is in waveneumbers and issue a warning to this effect.
Parameters
----------
in_x : astropy.quantity or simple floats
x values
Returns
-------
x : floats
input x values in wavenumbers w/o units
"""
# handles the case where x is a scaler
in_x = np.atleast_1d(in_x)
# check if in_x is an astropy quantity, if not issue a warning
if not isinstance(in_x, u.Quantity):
warnings.warn(
"x has no units, assuming x units are inverse microns", UserWarning
)
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(in_x, 1.0 / u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
return x_quant.value
def _test_valid_x_range(x, x_range, outname):
"""
Test if any of the x values are outside of the valid range
Parameters
----------
x : float array
wavenumbers in inverse microns
x_range: 2 floats
allowed min/max of x
outname: str
name of curve for error message
"""
if np.logical_or(np.any(x < x_range[0]), np.any(x > x_range[1])):
raise ValueError(
"Input x outside of range defined for "
+ outname
+ " ["
+ str(x_range[0])
+ " <= x <= "
+ str(x_range[1])
+ ", x has units 1/micron]"
)
| [
"numpy.any",
"astropy.units.spectral",
"warnings.warn",
"astropy.units.Quantity",
"numpy.atleast_1d"
] | [((606, 625), 'numpy.atleast_1d', 'np.atleast_1d', (['in_x'], {}), '(in_x)\n', (619, 625), True, 'import numpy as np\n'), ((743, 829), 'warnings.warn', 'warnings.warn', (['"""x has no units, assuming x units are inverse microns"""', 'UserWarning'], {}), "('x has no units, assuming x units are inverse microns',\n UserWarning)\n", (756, 829), False, 'import warnings\n'), ((1037, 1087), 'astropy.units.Quantity', 'u.Quantity', (['in_x', '(1.0 / u.micron)'], {'dtype': 'np.float64'}), '(in_x, 1.0 / u.micron, dtype=np.float64)\n', (1047, 1087), True, 'import astropy.units as u\n'), ((1559, 1581), 'numpy.any', 'np.any', (['(x < x_range[0])'], {}), '(x < x_range[0])\n', (1565, 1581), True, 'import numpy as np\n'), ((1583, 1605), 'numpy.any', 'np.any', (['(x > x_range[1])'], {}), '(x > x_range[1])\n', (1589, 1605), True, 'import numpy as np\n'), ((1004, 1016), 'astropy.units.spectral', 'u.spectral', ([], {}), '()\n', (1014, 1016), True, 'import astropy.units as u\n')] |
"""
Specific Models
===============
"""
##########################################
# Introduction
# ^^^^^^^^^^^^
# From the algorithm preseneted in “`ABESS algorithm: details <https://abess.readthedocs.io/en/latest/auto_gallery/1-glm/plot_a2_abess_algorithm_details.html>`__”,
# one of the bottleneck in algorithm is the computation of forward and backward sacrifices,
# which requires conducting iterative algorithms or frequently visiting :math:`p` variables.
# To improve computational efficiency,
# we designed specialize strategies for computing forward and backward sacrifices for different models.
# The specialize strategies is roughly divide into two classes: (i) covariance update for (multivariate) linear model;
# (ii) quasi Newton iteration for non-linear model (e.g., logistic regression).
# We going to specify the two strategies as follows.
#
# Covariance update
# ^^^^^^^^^^^^^^^^^
# Under linear model, the core bottleneck is computing sacrifices, e.g. the foreward sacrifices,
#
# .. math:: \zeta_{j}=\mathcal{L}_{n}\left(\hat{\boldsymbol{\beta}^{\mathcal{A}}}\right)-\mathcal{L}_{n}\left(\hat{\boldsymbol{\beta}}^{\mathcal{A}}+\hat{t}^{\{j\}}\right)=\frac{X_{j}^{\top} X_{j}}{2 n}\left(\frac{\hat{\boldsymbol d}_{j}}{X_{j}^{\top} X_{j} / n}\right)^{2}.
#
# where
# :math:`\hat{t}=\arg \min _{t} \mathcal{L}_{n}\left(\hat{\boldsymbol{\beta}}^{\mathcal{A}}+t^{\{j\}}\right), \hat{\boldsymbol d}_{j}=X_{j}^{\top}(y-X \hat{\boldsymbol{\beta}}) / n`.
# Intuitively, for :math:`j \in \mathcal{A}` (or
# :math:`j \in \mathcal{I}` ), a large :math:`\xi_{j}` (or
# :math:`\zeta_{j}`) implies the :math:`j` th variable is potentially
# important.
#
# It would take a lot of time on calculating :math:`X^T_jy`, :math:`X^T_jX_j` and its inverse.
# To speed up, it is actually no need to recompute these items at each splicing process.
# Instead, they can be stored when first calculated, which is what we call
# "covariance update".
#
# It is easy to enable this feature with an additional argument
# ``covariance_update=True`` for linear model, for example:
import numpy as np
from time import time
from abess.linear import LinearRegression
from abess.datasets import make_glm_data
np.random.seed(1)
data = make_glm_data(n=10000, p=100, k=10, family='gaussian')
model1 = LinearRegression()
model2 = LinearRegression(covariance_update=True)
t1 = time()
model1.fit(data.x, data.y)
t1 = time() - t1
t2 = time()
model2.fit(data.x, data.y)
t2 = time() - t2
print(f"No covariance update: {t1}")
print(f"Covariance update: {t2}")
print(f"Same answer? {(model1.coef_==model2.coef_).all()}")
# %%
# We can see that covariance update improve computation
# when sample size :math:`n` is much larger than dimension :math:`p`.
#
# However, we have to point out that covariance update will cause higher memory usage, especially when :math:`p` is large.
# So, we recommend to enable covariance update for fast computation when sample size is much larger than dimension
# and dimension is moderate (:math:`p \leq 2000`).
# %%
# Quasi Newton iteration
# ^^^^^^^^^^^^^^^^^^^^^^
# In the third step in `Algorithm 2 <https://abess.readthedocs.io/en/latest/auto_gallery/1-glm/plot_a2_abess_algorithm_details.html#algorithm-2-splicing-left-boldsymbol-beta-d-mathcal-a-mathcal-i-k-max-tau-s-right>`__
# , we need to solve a convex optimization problem:
#
# .. math::
# \tilde{\beta} = \arg\min_{\text{supp}(\beta) = \tilde{\mathcal{A}} } l_n(\beta ).
#
#
# But generally, it has no closed-form solution, and has to be solved via iterative algorithm.
# A natural method for solving this problem is Netwon method, i.e.,
# conduct the update:
#
# .. math::
# \beta_{\tilde{\mathcal{A}} }^{m+1} \leftarrow \boldsymbol \beta_{\tilde{\mathcal{A}} }^m - \Big( \left.\frac{\partial^2 l_n( \boldsymbol \beta )}{ (\partial \boldsymbol \beta_{\tilde{\mathcal{A}}} )^2 }\right|_{\boldsymbol \beta = \boldsymbol \beta^m} \Big)^{-1} \Big( \left.\frac{\partial l_n( \boldsymbol \beta )}{ \partial \boldsymbol \beta_{\tilde{\mathcal{A}}} }\right|_{\boldsymbol \beta = \boldsymbol \beta^m} \Big),
#
#
# until :math:`\| \beta_{\tilde{\mathcal{A}} }^{m+1} - \beta_{\tilde{\mathcal{A}} }^{m}\|_2 \leq \epsilon` or :math:`m \geq k`,
# where :math:`\epsilon, k` are two user-specific parameters.
# Generally, setting :math:`\epsilon = 10^{-6}` and :math:`k = 80` achieves desirable estimation.
# Generally, the inverse of second derivative is computationally intensive, and thus,
# we approximate it with its diagonalized version. Then, the update formulate changes to:
#
# .. math::
# \beta_{\tilde{\mathcal{A}} }^{m+1} \leftarrow \boldsymbol \beta_{\tilde{\mathcal{A}} }^m - \rho D \Big( \left.\frac{\partial l_n( \boldsymbol \beta )}{ \partial \boldsymbol \beta_{\tilde{\mathcal{A}}} }\right|_{\boldsymbol \beta = \boldsymbol \beta^m} \Big),
#
#
# where :math:`D = \textup{diag}( (\left.\frac{\partial^2 l_n( \boldsymbol \beta )}{ (\partial \boldsymbol \beta_{\tilde{\mathcal{A}_{1}}} )^2 }\right|_{\boldsymbol \beta = \boldsymbol \beta^m} )^{-1}, \ldots, (\left.\frac{\partial^2 l_n( \boldsymbol \beta )}{ (\partial \boldsymbol \beta_{\tilde{\mathcal{A}}_{|A|}} )^2 }\right|_{\boldsymbol \beta = \boldsymbol \beta^m} )^{-1})`
# and :math:`\rho`` is step size.
# Although using the approximation may increase the iteration time,
# it avoids a large computational complexity when computing the matrix inversion.
# Furthermore, we use a heuristic strategy to reduce the iteration time.
# Observing that not every new support after exchanging the elements in active set and inactive set
# may not reduce the loss function,
# we can early stop the newton iteration on these support.
# Specifically, support :math:`l_1 = L({\beta}^{m}), l_2 = L({\beta}^{m+1})`,
# if :math:`l_1 - (k - m - 1) \times (l_2 - l_1)) > L - \tau`,
# then we can expect the new support cannot lead to a better loss after :math:`k` iteration,
# and hence, it is no need to conduct the remaining :math:`k - m - 1` times Newton update.
# This heuristic strategy is motivated by the convergence rate of Netwon method is linear at least.
# |image0|
#
# To enable this feature, you can simply give an additional argument ``approximate_Newton=True``.
# The :math:`\epsilon` and :math:`k` we mentioned before, can be set with ``primary_model_fit_epsilon``
# and ``primary_model_fit_max_iter``, respectively. For example:
import numpy as np
from time import time
from abess.linear import LogisticRegression
from abess.datasets import make_glm_data
np.random.seed(1)
data = make_glm_data(n=1000, p=100, k=10, family='binomial')
model1 = LogisticRegression()
model2 = LogisticRegression(approximate_Newton=True,
primary_model_fit_epsilon=1e-6,
primary_model_fit_max_iter=10)
t1 = time()
model1.fit(data.x, data.y)
t1 = time() - t1
t2 = time()
model2.fit(data.x, data.y)
t2 = time() - t2
print(f"No newton: {t1}")
print(f"Newton: {t2}")
print(f"Same answer? {(np.nonzero(model1.coef_)[0]==np.nonzero(model2.coef_)[0]).all()}")
# %%
#
# The ``abess`` R package also supports covariance update and quasi Newton iteration.
# For R tutorial, please view https://abess-team.github.io/abess/articles/v09-fasterSetting.html
#
# .. |image0| image:: ../../Tutorial/figure/convergence_rates.png
| [
"abess.linear.LogisticRegression",
"abess.datasets.make_glm_data",
"numpy.random.seed",
"numpy.nonzero",
"abess.linear.LinearRegression",
"time.time"
] | [((2194, 2211), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2208, 2211), True, 'import numpy as np\n'), ((2219, 2273), 'abess.datasets.make_glm_data', 'make_glm_data', ([], {'n': '(10000)', 'p': '(100)', 'k': '(10)', 'family': '"""gaussian"""'}), "(n=10000, p=100, k=10, family='gaussian')\n", (2232, 2273), False, 'from abess.datasets import make_glm_data\n'), ((2283, 2301), 'abess.linear.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2299, 2301), False, 'from abess.linear import LinearRegression\n'), ((2311, 2351), 'abess.linear.LinearRegression', 'LinearRegression', ([], {'covariance_update': '(True)'}), '(covariance_update=True)\n', (2327, 2351), False, 'from abess.linear import LinearRegression\n'), ((2358, 2364), 'time.time', 'time', ([], {}), '()\n', (2362, 2364), False, 'from time import time\n'), ((2415, 2421), 'time.time', 'time', ([], {}), '()\n', (2419, 2421), False, 'from time import time\n'), ((6542, 6559), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (6556, 6559), True, 'import numpy as np\n'), ((6567, 6620), 'abess.datasets.make_glm_data', 'make_glm_data', ([], {'n': '(1000)', 'p': '(100)', 'k': '(10)', 'family': '"""binomial"""'}), "(n=1000, p=100, k=10, family='binomial')\n", (6580, 6620), False, 'from abess.datasets import make_glm_data\n'), ((6630, 6650), 'abess.linear.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (6648, 6650), False, 'from abess.linear import LogisticRegression\n'), ((6660, 6771), 'abess.linear.LogisticRegression', 'LogisticRegression', ([], {'approximate_Newton': '(True)', 'primary_model_fit_epsilon': '(1e-06)', 'primary_model_fit_max_iter': '(10)'}), '(approximate_Newton=True, primary_model_fit_epsilon=1e-06,\n primary_model_fit_max_iter=10)\n', (6678, 6771), False, 'from abess.linear import LogisticRegression\n'), ((6829, 6835), 'time.time', 'time', ([], {}), '()\n', (6833, 6835), False, 'from time import time\n'), ((6886, 6892), 'time.time', 'time', ([], {}), '()\n', (6890, 6892), False, 'from time import time\n'), ((2397, 2403), 'time.time', 'time', ([], {}), '()\n', (2401, 2403), False, 'from time import time\n'), ((2454, 2460), 'time.time', 'time', ([], {}), '()\n', (2458, 2460), False, 'from time import time\n'), ((6868, 6874), 'time.time', 'time', ([], {}), '()\n', (6872, 6874), False, 'from time import time\n'), ((6925, 6931), 'time.time', 'time', ([], {}), '()\n', (6929, 6931), False, 'from time import time\n'), ((7010, 7034), 'numpy.nonzero', 'np.nonzero', (['model1.coef_'], {}), '(model1.coef_)\n', (7020, 7034), True, 'import numpy as np\n'), ((7039, 7063), 'numpy.nonzero', 'np.nonzero', (['model2.coef_'], {}), '(model2.coef_)\n', (7049, 7063), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
#***************************************************************************
#
# Copyright (c) 2015 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#***************************************************************************/
#
# @author <NAME> <<EMAIL>>
#
# The shebang of this file is currently Python2 because some
# dependencies such as pymavlink don't play well with Python3 yet.
from __future__ import division
PKG = 'px4'
import subprocess
import rospy
import math
import numpy as np
import sys
from geometry_msgs.msg import PoseStamped, Quaternion
from mavros_test_common_uav0 import MavrosTestCommon as UAV0
from mavros_test_common_uav1 import MavrosTestCommon as UAV1
from mavros_test_common_uav2 import MavrosTestCommon as UAV2
from voronoi import Controller
from pymavlink import mavutil
from six.moves import xrange
from std_msgs.msg import Header
from threading import Thread
from tf.transformations import quaternion_from_euler
import os
from gazebo_msgs.msg import ModelStates
class MavrosOffboardPosctlTest():
"""
Tests flying a path in offboard control by sending position setpoints
via MAVROS.
For the test to be successful it needs to reach all setpoints in a certain time.
FIXME: add flight path assertion (needs transformation from ROS frame to NED)
"""
def __init__(self):
self.pos0 = PoseStamped()
self.radius0 = 1
self.pos1 = PoseStamped()
self.radius1 = 1
self.pos2 = PoseStamped()
self.radius2 = 1
self.evader = ModelStates()
self.uav0_state = ModelStates()
self.uav1_state = ModelStates()
self.uav2_state = ModelStates()
self.uav0 = UAV0()
self.uav1 = UAV1()
self.uav2 = UAV2()
self.voronoi_run_flag = False
# ROS Subscrib
self.gazebo_model_state_sub = rospy.Subscriber('/gazebo/model_states', ModelStates, self.gazebo_model_state_callback)
# ROS Publish
# uav0
self.pos_setpoint_pub0 = rospy.Publisher('uav0/mavros/setpoint_position/local', PoseStamped, queue_size=1)
# uav1
self.pos_setpoint_pub1 = rospy.Publisher('uav1/mavros/setpoint_position/local', PoseStamped, queue_size=1)
# uav2
self.pos_setpoint_pub2 = rospy.Publisher('uav2/mavros/setpoint_position/local', PoseStamped, queue_size=1)
# send setpoints in seperate thread to better prevent failsafe
self.pos_thread = Thread(target=self.send_pos, args=())
self.pos_thread.daemon = True
self.pos_thread.start()
# self.voronoi_thread = Thread(target=self.voronoi_run, args=())
# self.voronoi_thread.daemon = True
# self.voronoi_thread.start()
#
def gazebo_model_state_callback(self, data):
for i in range(len(data.name)):
# rospy.loginfo("data.name = {0}".format(data.name))
if data.name[i] == "husky_alpha":
self.evader.name = data.name[i]
self.evader.pose = data.pose[i]
if data.name[i] == "iris0":
self.uav0_state.name = data.name[i]
self.uav0_state.pose = data.pose[i]
if data.name[i] == "iris1":
self.uav1_state.name = data.name[i]
self.uav1_state.pose = data.pose[i]
if data.name[i] == "iris2":
self.uav2_state.name = data.name[i]
self.uav2_state.pose = data.pose[i]
#
def voronoi_run(self):
rate = rospy.Rate(10) # Hz
while not rospy.is_shutdown():
if self.evader_captured(15) == 1 or self.evader_captured(15) == 2 or self.evader_captured(15) == 3:
if not self.voronoi_run_flag:
self.voronoi_run_flag = True
# subprocess.Popen('python voronoi.py', shell=True, stdout=subprocess.PIPE)
os.system("python voronoi.py")
try:
rate.sleep()
except rospy.ROSInterruptException:
pass
def send_pos(self):
rate = rospy.Rate(10) # Hz
self.pos0.header = Header()
self.pos0.header.frame_id = "base_footprint0"
self.pos1.header = Header()
self.pos1.header.frame_id = "base_footprint1"
self.pos2.header = Header()
self.pos2.header.frame_id = "base_footprint2"
while not rospy.is_shutdown():
self.pos0.header.stamp = rospy.Time.now()
self.pos1.header.stamp = rospy.Time.now()
self.pos2.header.stamp = rospy.Time.now()
self.pos_setpoint_pub0.publish(self.pos0)
self.pos_setpoint_pub1.publish(self.pos1)
self.pos_setpoint_pub2.publish(self.pos2)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
def is_at_position(self, p0, p1, p2, offset):
"""offset: meters"""
rospy.logdebug(
"current position | x:{0}, y:{1}, z:{2}".format(
self.uav0.local_position.pose.position, self.uav0.local_position.pose.
position, self.uav0.local_position.pose.position))
desired0 = np.array((p0[0], p0[1], p0[2]))
desired1 = np.array((p1[0], p1[1], p1[2]))
desired2 = np.array((p2[0], p2[1], p2[2]))
pos0 = np.array((self.uav0.local_position.pose.position.x,
self.uav0.local_position.pose.position.y,
self.uav0.local_position.pose.position.z))
pos1 = np.array((self.uav1.local_position.pose.position.x,
self.uav1.local_position.pose.position.y,
self.uav1.local_position.pose.position.z))
pos2 = np.array((self.uav2.local_position.pose.position.x,
self.uav2.local_position.pose.position.y,
self.uav2.local_position.pose.position.z))
return (np.linalg.norm(desired0 - pos0) < offset) and (np.linalg.norm(desired1 - pos1) < offset)and(np.linalg.norm(desired2 - pos2) < offset)
def reach_position(self, p0, p1, p2, timeout):
"""timeout(int): seconds"""
# set a position setpoint
self.pos0.pose.position.x = p0[0]
self.pos0.pose.position.y = p0[1]
self.pos0.pose.position.z = p0[2]
self.pos1.pose.position.x = p1[0]
self.pos1.pose.position.y = p1[1]
self.pos1.pose.position.z = p1[2]
self.pos2.pose.position.x = p2[0]
self.pos2.pose.position.y = p2[1]
self.pos2.pose.position.z = p2[2]
# rospy.loginfo("home_position = {0}".format(self.uav0.home_position.position))
# For demo purposes we will lock yaw/heading to north.
yaw_degrees = 0 # North
yaw = math.radians(yaw_degrees)
quaternion = quaternion_from_euler(0, 0, yaw)
self.pos0.pose.orientation = Quaternion(*quaternion)
self.pos1.pose.orientation = Quaternion(*quaternion)
self.pos2.pose.orientation = Quaternion(*quaternion)
# does it reach the position in 'timeout' seconds?
loop_freq = 2 # Hz
rate = rospy.Rate(loop_freq)
reached = False
while True:
if self.is_at_position(p0, p1, p2, self.radius1):
rospy.loginfo("position reached | seconds: of {0}".format(
timeout))
reached = True
break
if self.evader_captured(15) == 1:
self.pos0.pose.position.x = self.evader.pose.position.x -75
self.pos0.pose.position.y = self.evader.pose.position.y -27.5
self.pos0.pose.position.z = 15
if not self.voronoi_run_flag:
self.voronoi_run_flag = True
pid = subprocess.Popen([sys.executable, "voronoi.py"]) # Call subprocess
elif self.evader_captured(15) == 2:
self.pos1.pose.position.x = self.evader.pose.position.x -75
self.pos1.pose.position.y = self.evader.pose.position.y -52.5
self.pos1.pose.position.z = 15
if not self.voronoi_run_flag:
self.voronoi_run_flag = True
pid = subprocess.Popen([sys.executable, "voronoi.py"]) # Call subprocess
elif self.evader_captured(15) == 3:
self.pos2.pose.position.x = self.evader.pose.position.x -75
self.pos2.pose.position.y = self.evader.pose.position.y -77.5
self.pos2.pose.position.z = 15
if not self.voronoi_run_flag:
self.voronoi_run_flag = True
pid = subprocess.Popen([sys.executable, "voronoi.py"]) # Call subprocess
rospy.loginfo(
"attempting to reach position | p1: {0}, p2: {1}, p3: {2} | current position p0: {3}, p1: {4}, p2: {5}".
format(self.pos0.pose.position, self.pos1.pose.position, self.pos2.pose.position, self.uav0.local_position.pose.position,
self.uav1.local_position.pose.position,
self.uav2.local_position.pose.position))
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
#
def evader_captured(self, offset):
pos0 = np.array((self.uav0_state.pose.position.x, self.uav0_state.pose.position.y))
pos1 = np.array((self.uav1_state.pose.position.x, self.uav1_state.pose.position.y))
pos2 = np.array((self.uav2_state.pose.position.x, self.uav2_state.pose.position.y))
pos_evader = np.array((self.evader.pose.position.x, self.evader.pose.position.y))
if np.linalg.norm(pos0 - pos_evader) < offset:
rospy.loginfo("evader captured by uav0!!!")
return 1
elif np.linalg.norm(pos1 - pos_evader) < offset:
rospy.loginfo("evader captured by uav1!!!")
return 2
elif np.linalg.norm(pos2 - pos_evader) < offset:
rospy.loginfo("evader captured by uav2!!!")
return 3
else:
return 0
#
def test_posctl(self):
self.uav0.wait_for_topics(60)
self.uav1.wait_for_topics(60)
self.uav2.wait_for_topics(60)
self.uav0.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND, 10, -1)
self.uav1.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND, 10, -1)
self.uav2.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND, 10, -1)
self.uav0.log_topic_vars()
self.uav1.log_topic_vars()
self.uav2.log_topic_vars()
self.uav0.set_mode("OFFBOARD", 5)
self.uav1.set_mode("OFFBOARD", 5)
self.uav2.set_mode("OFFBOARD", 5)
self.uav0.set_arm(True, 5)
self.uav1.set_arm(True, 5)
self.uav2.set_arm(True, 5)
rospy.loginfo("run mission")
positions0 = ((0, 0, -0.5), (0, 0, 20), (-75, 0, 26), (-75, 0, 0))
positions1 = ((0, 0, -0.3), (0, 0, 20), (-75, 0, 26), (-75, 0, 0))
positions2 = ((0, 0, -0.1), (0, 0, 20), (-75, 0, 26), (-75, 0, 0))
for i in xrange(len(positions0)):
self.reach_position(positions0[i], positions1[i],positions2[i], 30)
self.uav0.set_mode("AUTO.LAND", 5)
self.uav1.set_mode("AUTO.LAND", 5)
self.uav2.set_mode("AUTO.LAND", 5)
self.uav0.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND,
45, 0)
self.uav0.set_arm(False, 5)
if __name__ == '__main__':
import rostest
rospy.init_node('test_node', anonymous=True)
# rostest.rosrun(PKG, 'mavros_offboard_posctl_test', MavrosOffboardPosctlTest)
MAV = MavrosOffboardPosctlTest()
MAV.test_posctl()
| [
"mavros_test_common_uav0.MavrosTestCommon",
"rospy.init_node",
"gazebo_msgs.msg.ModelStates",
"numpy.array",
"rospy.Rate",
"numpy.linalg.norm",
"subprocess.Popen",
"geometry_msgs.msg.Quaternion",
"os.system",
"rospy.Subscriber",
"mavros_test_common_uav1.MavrosTestCommon",
"math.radians",
"ro... | [((13152, 13196), 'rospy.init_node', 'rospy.init_node', (['"""test_node"""'], {'anonymous': '(True)'}), "('test_node', anonymous=True)\n", (13167, 13196), False, 'import rospy\n'), ((2850, 2863), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (2861, 2863), False, 'from geometry_msgs.msg import PoseStamped, Quaternion\n'), ((2909, 2922), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (2920, 2922), False, 'from geometry_msgs.msg import PoseStamped, Quaternion\n'), ((2968, 2981), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (2979, 2981), False, 'from geometry_msgs.msg import PoseStamped, Quaternion\n'), ((3030, 3043), 'gazebo_msgs.msg.ModelStates', 'ModelStates', ([], {}), '()\n', (3041, 3043), False, 'from gazebo_msgs.msg import ModelStates\n'), ((3070, 3083), 'gazebo_msgs.msg.ModelStates', 'ModelStates', ([], {}), '()\n', (3081, 3083), False, 'from gazebo_msgs.msg import ModelStates\n'), ((3110, 3123), 'gazebo_msgs.msg.ModelStates', 'ModelStates', ([], {}), '()\n', (3121, 3123), False, 'from gazebo_msgs.msg import ModelStates\n'), ((3150, 3163), 'gazebo_msgs.msg.ModelStates', 'ModelStates', ([], {}), '()\n', (3161, 3163), False, 'from gazebo_msgs.msg import ModelStates\n'), ((3185, 3191), 'mavros_test_common_uav0.MavrosTestCommon', 'UAV0', ([], {}), '()\n', (3189, 3191), True, 'from mavros_test_common_uav0 import MavrosTestCommon as UAV0\n'), ((3212, 3218), 'mavros_test_common_uav1.MavrosTestCommon', 'UAV1', ([], {}), '()\n', (3216, 3218), True, 'from mavros_test_common_uav1 import MavrosTestCommon as UAV1\n'), ((3239, 3245), 'mavros_test_common_uav2.MavrosTestCommon', 'UAV2', ([], {}), '()\n', (3243, 3245), True, 'from mavros_test_common_uav2 import MavrosTestCommon as UAV2\n'), ((3346, 3438), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/gazebo/model_states"""', 'ModelStates', 'self.gazebo_model_state_callback'], {}), "('/gazebo/model_states', ModelStates, self.\n gazebo_model_state_callback)\n", (3362, 3438), False, 'import rospy\n'), ((3505, 3590), 'rospy.Publisher', 'rospy.Publisher', (['"""uav0/mavros/setpoint_position/local"""', 'PoseStamped'], {'queue_size': '(1)'}), "('uav0/mavros/setpoint_position/local', PoseStamped,\n queue_size=1)\n", (3520, 3590), False, 'import rospy\n'), ((3635, 3720), 'rospy.Publisher', 'rospy.Publisher', (['"""uav1/mavros/setpoint_position/local"""', 'PoseStamped'], {'queue_size': '(1)'}), "('uav1/mavros/setpoint_position/local', PoseStamped,\n queue_size=1)\n", (3650, 3720), False, 'import rospy\n'), ((3765, 3850), 'rospy.Publisher', 'rospy.Publisher', (['"""uav2/mavros/setpoint_position/local"""', 'PoseStamped'], {'queue_size': '(1)'}), "('uav2/mavros/setpoint_position/local', PoseStamped,\n queue_size=1)\n", (3780, 3850), False, 'import rospy\n'), ((3945, 3982), 'threading.Thread', 'Thread', ([], {'target': 'self.send_pos', 'args': '()'}), '(target=self.send_pos, args=())\n', (3951, 3982), False, 'from threading import Thread\n'), ((4992, 5006), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (5002, 5006), False, 'import rospy\n'), ((5561, 5575), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (5571, 5575), False, 'import rospy\n'), ((5610, 5618), 'std_msgs.msg.Header', 'Header', ([], {}), '()\n', (5616, 5618), False, 'from std_msgs.msg import Header\n'), ((5701, 5709), 'std_msgs.msg.Header', 'Header', ([], {}), '()\n', (5707, 5709), False, 'from std_msgs.msg import Header\n'), ((5792, 5800), 'std_msgs.msg.Header', 'Header', ([], {}), '()\n', (5798, 5800), False, 'from std_msgs.msg import Header\n'), ((6731, 6762), 'numpy.array', 'np.array', (['(p0[0], p0[1], p0[2])'], {}), '((p0[0], p0[1], p0[2]))\n', (6739, 6762), True, 'import numpy as np\n'), ((6782, 6813), 'numpy.array', 'np.array', (['(p1[0], p1[1], p1[2])'], {}), '((p1[0], p1[1], p1[2]))\n', (6790, 6813), True, 'import numpy as np\n'), ((6833, 6864), 'numpy.array', 'np.array', (['(p2[0], p2[1], p2[2])'], {}), '((p2[0], p2[1], p2[2]))\n', (6841, 6864), True, 'import numpy as np\n'), ((6880, 7021), 'numpy.array', 'np.array', (['(self.uav0.local_position.pose.position.x, self.uav0.local_position.pose.\n position.y, self.uav0.local_position.pose.position.z)'], {}), '((self.uav0.local_position.pose.position.x, self.uav0.\n local_position.pose.position.y, self.uav0.local_position.pose.position.z))\n', (6888, 7021), True, 'import numpy as np\n'), ((7080, 7221), 'numpy.array', 'np.array', (['(self.uav1.local_position.pose.position.x, self.uav1.local_position.pose.\n position.y, self.uav1.local_position.pose.position.z)'], {}), '((self.uav1.local_position.pose.position.x, self.uav1.\n local_position.pose.position.y, self.uav1.local_position.pose.position.z))\n', (7088, 7221), True, 'import numpy as np\n'), ((7280, 7421), 'numpy.array', 'np.array', (['(self.uav2.local_position.pose.position.x, self.uav2.local_position.pose.\n position.y, self.uav2.local_position.pose.position.z)'], {}), '((self.uav2.local_position.pose.position.x, self.uav2.\n local_position.pose.position.y, self.uav2.local_position.pose.position.z))\n', (7288, 7421), True, 'import numpy as np\n'), ((8316, 8341), 'math.radians', 'math.radians', (['yaw_degrees'], {}), '(yaw_degrees)\n', (8328, 8341), False, 'import math\n'), ((8363, 8395), 'tf.transformations.quaternion_from_euler', 'quaternion_from_euler', (['(0)', '(0)', 'yaw'], {}), '(0, 0, yaw)\n', (8384, 8395), False, 'from tf.transformations import quaternion_from_euler\n'), ((8433, 8456), 'geometry_msgs.msg.Quaternion', 'Quaternion', (['*quaternion'], {}), '(*quaternion)\n', (8443, 8456), False, 'from geometry_msgs.msg import PoseStamped, Quaternion\n'), ((8494, 8517), 'geometry_msgs.msg.Quaternion', 'Quaternion', (['*quaternion'], {}), '(*quaternion)\n', (8504, 8517), False, 'from geometry_msgs.msg import PoseStamped, Quaternion\n'), ((8555, 8578), 'geometry_msgs.msg.Quaternion', 'Quaternion', (['*quaternion'], {}), '(*quaternion)\n', (8565, 8578), False, 'from geometry_msgs.msg import PoseStamped, Quaternion\n'), ((8682, 8703), 'rospy.Rate', 'rospy.Rate', (['loop_freq'], {}), '(loop_freq)\n', (8692, 8703), False, 'import rospy\n'), ((10869, 10945), 'numpy.array', 'np.array', (['(self.uav0_state.pose.position.x, self.uav0_state.pose.position.y)'], {}), '((self.uav0_state.pose.position.x, self.uav0_state.pose.position.y))\n', (10877, 10945), True, 'import numpy as np\n'), ((10961, 11037), 'numpy.array', 'np.array', (['(self.uav1_state.pose.position.x, self.uav1_state.pose.position.y)'], {}), '((self.uav1_state.pose.position.x, self.uav1_state.pose.position.y))\n', (10969, 11037), True, 'import numpy as np\n'), ((11053, 11129), 'numpy.array', 'np.array', (['(self.uav2_state.pose.position.x, self.uav2_state.pose.position.y)'], {}), '((self.uav2_state.pose.position.x, self.uav2_state.pose.position.y))\n', (11061, 11129), True, 'import numpy as np\n'), ((11151, 11219), 'numpy.array', 'np.array', (['(self.evader.pose.position.x, self.evader.pose.position.y)'], {}), '((self.evader.pose.position.x, self.evader.pose.position.y))\n', (11159, 11219), True, 'import numpy as np\n'), ((12428, 12456), 'rospy.loginfo', 'rospy.loginfo', (['"""run mission"""'], {}), "('run mission')\n", (12441, 12456), False, 'import rospy\n'), ((5031, 5050), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (5048, 5050), False, 'import rospy\n'), ((5873, 5892), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (5890, 5892), False, 'import rospy\n'), ((5931, 5947), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (5945, 5947), False, 'import rospy\n'), ((5985, 6001), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (5999, 6001), False, 'import rospy\n'), ((6039, 6055), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (6053, 6055), False, 'import rospy\n'), ((11232, 11265), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos0 - pos_evader)'], {}), '(pos0 - pos_evader)\n', (11246, 11265), True, 'import numpy as np\n'), ((11288, 11331), 'rospy.loginfo', 'rospy.loginfo', (['"""evader captured by uav0!!!"""'], {}), "('evader captured by uav0!!!')\n", (11301, 11331), False, 'import rospy\n'), ((7481, 7512), 'numpy.linalg.norm', 'np.linalg.norm', (['(desired0 - pos0)'], {}), '(desired0 - pos0)\n', (7495, 7512), True, 'import numpy as np\n'), ((7528, 7559), 'numpy.linalg.norm', 'np.linalg.norm', (['(desired1 - pos1)'], {}), '(desired1 - pos1)\n', (7542, 7559), True, 'import numpy as np\n'), ((7573, 7604), 'numpy.linalg.norm', 'np.linalg.norm', (['(desired2 - pos2)'], {}), '(desired2 - pos2)\n', (7587, 7604), True, 'import numpy as np\n'), ((11366, 11399), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos1 - pos_evader)'], {}), '(pos1 - pos_evader)\n', (11380, 11399), True, 'import numpy as np\n'), ((11422, 11465), 'rospy.loginfo', 'rospy.loginfo', (['"""evader captured by uav1!!!"""'], {}), "('evader captured by uav1!!!')\n", (11435, 11465), False, 'import rospy\n'), ((5375, 5405), 'os.system', 'os.system', (['"""python voronoi.py"""'], {}), "('python voronoi.py')\n", (5384, 5405), False, 'import os\n'), ((9336, 9384), 'subprocess.Popen', 'subprocess.Popen', (["[sys.executable, 'voronoi.py']"], {}), "([sys.executable, 'voronoi.py'])\n", (9352, 9384), False, 'import subprocess\n'), ((11500, 11533), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos2 - pos_evader)'], {}), '(pos2 - pos_evader)\n', (11514, 11533), True, 'import numpy as np\n'), ((11556, 11599), 'rospy.loginfo', 'rospy.loginfo', (['"""evader captured by uav2!!!"""'], {}), "('evader captured by uav2!!!')\n", (11569, 11599), False, 'import rospy\n'), ((9775, 9823), 'subprocess.Popen', 'subprocess.Popen', (["[sys.executable, 'voronoi.py']"], {}), "([sys.executable, 'voronoi.py'])\n", (9791, 9823), False, 'import subprocess\n'), ((10213, 10261), 'subprocess.Popen', 'subprocess.Popen', (["[sys.executable, 'voronoi.py']"], {}), "([sys.executable, 'voronoi.py'])\n", (10229, 10261), False, 'import subprocess\n')] |
# Copyright 2020 <NAME>, University of Pittsburgh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for bounding boxes, box are [ymin, xmin, ymax, xmax]. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
_EPSILON = 1e-10
def flip_left_right(box):
"""Flips the box left-to-right.
Args:
box: A [i1,...,iN, 4] float tensor.
Returns:
flipped_box: A [i1,...,iN, 4] float tensor.
"""
ymin, xmin, ymax, xmax = tf.unstack(box, axis=-1)
return tf.stack([ymin, 1.0 - xmax, ymax, 1.0 - xmin], axis=-1)
def center(box):
"""Computes the box center.
Args:
box: A [i1,...,iN, 4] float tensor.
Returns:
center: Box centers, a [i1,...,iN, 2] tensor denoting [ycenter, xcenter].
"""
ymin, xmin, ymax, xmax = tf.unstack(box, axis=-1)
ycenter = (ymin + ymax) / 2
xcenter = (xmin + xmax) / 2
return tf.stack([ycenter, xcenter], axis=-1)
def size(box):
"""Computes the box size.
Args:
box: A [i1,...,iN, 4] float tensor.
Returns:
size: Box sizes, a [i1,...,iN, 2] tensor denoting [height, width].
"""
ymin, xmin, ymax, xmax = tf.unstack(box, axis=-1)
height = ymax - ymin
width = xmax - xmin
return tf.stack([height, width], axis=-1)
def area(box):
"""Computes the box area.
Args:
box: A [i1,...,iN, 4] float tensor.
Returns:
area: Box areas, a [i1,...,iN] float tensor.
"""
ymin, xmin, ymax, xmax = tf.unstack(box, axis=-1)
return tf.maximum(ymax - ymin, 0.0) * tf.maximum(xmax - xmin, 0.0)
def intersect(box1, box2):
"""Computes the intersect box.
Args:
box1: A [i1,...,iN, 4] float tensor.
box2: A [i1,...,iN, 4] float tensor.
Returns:
A [i1,...,iN, 4] float tensor.
"""
ymin1, xmin1, ymax1, xmax1 = tf.unstack(box1, axis=-1)
ymin2, xmin2, ymax2, xmax2 = tf.unstack(box2, axis=-1)
ymin = tf.maximum(ymin1, ymin2)
xmin = tf.maximum(xmin1, xmin2)
ymax = tf.minimum(ymax1, ymax2)
xmax = tf.minimum(xmax1, xmax2)
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def iou(box1, box2):
"""Computes the IoU between box1 and box2.
Args:
box1: A [i1,...,iN, 4] float tensor.
box2: A [i1,...,iN, 4] float tensor.
Returns:
iou: A [i1,...,iN] float tensor.
"""
area_intersect = area(intersect(box1, box2))
area_union = area(box1) + area(box2) - area_intersect
return tf.math.divide(area_intersect, tf.maximum(area_union, _EPSILON))
def x_intersect_len(box1, box2):
"""Computes the length of the x intersect.
Args:
box1: A [i1,...,iN, 4] float tensor.
box2: A [i1,...,iN, 4] float tensor.
Returns:
A [i1,...,iN] float tensor.
"""
ymin1, xmin1, ymax1, xmax1 = tf.unstack(box1, axis=-1)
ymin2, xmin2, ymax2, xmax2 = tf.unstack(box2, axis=-1)
xmin = tf.maximum(xmin1, xmin2)
xmax = tf.minimum(xmax1, xmax2)
return tf.maximum(xmax - xmin, 0.0)
def y_intersect_len(box1, box2):
"""Computes the length of the y intersect.
Args:
box1: A [i1,...,iN, 4] float tensor.
box2: A [i1,...,iN, 4] float tensor.
Returns:
A [i1,...,iN] float tensor.
"""
ymin1, xmin1, ymax1, xmax1 = tf.unstack(box1, axis=-1)
ymin2, xmin2, ymax2, xmax2 = tf.unstack(box2, axis=-1)
ymin = tf.maximum(ymin1, ymin2)
ymax = tf.minimum(ymax1, ymax2)
return tf.maximum(ymax - ymin, 0.0)
def x_distance(box1, box2):
"""Computes the x-distance between the two centers.
Args:
box1: A [i1,...,iN, 4] float tensor.
box2: A [i1,...,iN, 4] float tensor.
Returns:
distance: A [i1,...,iN] float tensor.
"""
x1 = tf.unstack(center(box1), axis=-1)[1]
x2 = tf.unstack(center(box2), axis=-1)[1]
return x1 - x2
def y_distance(box1, box2):
"""Computes the y-distance between the two centers.
Args:
box1: A [i1,...,iN, 4] float tensor.
box2: A [i1,...,iN, 4] float tensor.
Returns:
distance: A [i1,...,iN] float tensor.
"""
y1 = tf.unstack(center(box1), axis=-1)[0]
y2 = tf.unstack(center(box2), axis=-1)[0]
return y1 - y2
def py_area(box):
"""Computes the box area.
Args:
box: A [i1,...,iN, 4] float array.
Returns:
area: Box areas, a [batch] float tensor.
"""
ymin, xmin, ymax, xmax = [
np.squeeze(x, -1) for x in np.split(box, [1, 2, 3], axis=-1)
]
return np.maximum(ymax - ymin, 0.0) * np.maximum(xmax - xmin, 0.0)
def py_intersect(box1, box2):
"""Computes the intersect box.
Args:
box1: A [i1,...,iN, 4] float tensor.
box2: A [i1,...,iN, 4] float tensor.
Returns:
A [i1,...,iN, 4] float tensor.
"""
ymin1, xmin1, ymax1, xmax1 = [
np.squeeze(x, -1) for x in np.split(box1, [1, 2, 3], axis=-1)
]
ymin2, xmin2, ymax2, xmax2 = [
np.squeeze(x, -1) for x in np.split(box2, [1, 2, 3], axis=-1)
]
ymin = np.maximum(ymin1, ymin2)
xmin = np.maximum(xmin1, xmin2)
ymax = np.minimum(ymax1, ymax2)
xmax = np.minimum(xmax1, xmax2)
return np.stack([ymin, xmin, ymax, xmax], axis=-1)
def py_iou(box1, box2):
"""Computes the IoU between box1 and box2.
Args:
box1: A [i1,...,iN, 4] float tensor.
box2: A [i1,...,iN, 4] float tensor.
Returns:
iou: A [i1,...,iN] float tensor.
"""
area_intersect = py_area(py_intersect(box1, box2))
area_union = py_area(box1) + py_area(box2) - area_intersect
return np.divide(area_intersect, np.maximum(area_union, _EPSILON))
| [
"tensorflow.unstack",
"numpy.minimum",
"numpy.squeeze",
"numpy.stack",
"numpy.split",
"tensorflow.maximum",
"numpy.maximum",
"tensorflow.minimum",
"tensorflow.stack"
] | [((1133, 1157), 'tensorflow.unstack', 'tf.unstack', (['box'], {'axis': '(-1)'}), '(box, axis=-1)\n', (1143, 1157), True, 'import tensorflow as tf\n'), ((1167, 1222), 'tensorflow.stack', 'tf.stack', (['[ymin, 1.0 - xmax, ymax, 1.0 - xmin]'], {'axis': '(-1)'}), '([ymin, 1.0 - xmax, ymax, 1.0 - xmin], axis=-1)\n', (1175, 1222), True, 'import tensorflow as tf\n'), ((1444, 1468), 'tensorflow.unstack', 'tf.unstack', (['box'], {'axis': '(-1)'}), '(box, axis=-1)\n', (1454, 1468), True, 'import tensorflow as tf\n'), ((1538, 1575), 'tensorflow.stack', 'tf.stack', (['[ycenter, xcenter]'], {'axis': '(-1)'}), '([ycenter, xcenter], axis=-1)\n', (1546, 1575), True, 'import tensorflow as tf\n'), ((1787, 1811), 'tensorflow.unstack', 'tf.unstack', (['box'], {'axis': '(-1)'}), '(box, axis=-1)\n', (1797, 1811), True, 'import tensorflow as tf\n'), ((1866, 1900), 'tensorflow.stack', 'tf.stack', (['[height, width]'], {'axis': '(-1)'}), '([height, width], axis=-1)\n', (1874, 1900), True, 'import tensorflow as tf\n'), ((2090, 2114), 'tensorflow.unstack', 'tf.unstack', (['box'], {'axis': '(-1)'}), '(box, axis=-1)\n', (2100, 2114), True, 'import tensorflow as tf\n'), ((2422, 2447), 'tensorflow.unstack', 'tf.unstack', (['box1'], {'axis': '(-1)'}), '(box1, axis=-1)\n', (2432, 2447), True, 'import tensorflow as tf\n'), ((2479, 2504), 'tensorflow.unstack', 'tf.unstack', (['box2'], {'axis': '(-1)'}), '(box2, axis=-1)\n', (2489, 2504), True, 'import tensorflow as tf\n'), ((2515, 2539), 'tensorflow.maximum', 'tf.maximum', (['ymin1', 'ymin2'], {}), '(ymin1, ymin2)\n', (2525, 2539), True, 'import tensorflow as tf\n'), ((2549, 2573), 'tensorflow.maximum', 'tf.maximum', (['xmin1', 'xmin2'], {}), '(xmin1, xmin2)\n', (2559, 2573), True, 'import tensorflow as tf\n'), ((2583, 2607), 'tensorflow.minimum', 'tf.minimum', (['ymax1', 'ymax2'], {}), '(ymax1, ymax2)\n', (2593, 2607), True, 'import tensorflow as tf\n'), ((2617, 2641), 'tensorflow.minimum', 'tf.minimum', (['xmax1', 'xmax2'], {}), '(xmax1, xmax2)\n', (2627, 2641), True, 'import tensorflow as tf\n'), ((2652, 2695), 'tensorflow.stack', 'tf.stack', (['[ymin, xmin, ymax, xmax]'], {'axis': '(-1)'}), '([ymin, xmin, ymax, xmax], axis=-1)\n', (2660, 2695), True, 'import tensorflow as tf\n'), ((3338, 3363), 'tensorflow.unstack', 'tf.unstack', (['box1'], {'axis': '(-1)'}), '(box1, axis=-1)\n', (3348, 3363), True, 'import tensorflow as tf\n'), ((3395, 3420), 'tensorflow.unstack', 'tf.unstack', (['box2'], {'axis': '(-1)'}), '(box2, axis=-1)\n', (3405, 3420), True, 'import tensorflow as tf\n'), ((3431, 3455), 'tensorflow.maximum', 'tf.maximum', (['xmin1', 'xmin2'], {}), '(xmin1, xmin2)\n', (3441, 3455), True, 'import tensorflow as tf\n'), ((3465, 3489), 'tensorflow.minimum', 'tf.minimum', (['xmax1', 'xmax2'], {}), '(xmax1, xmax2)\n', (3475, 3489), True, 'import tensorflow as tf\n'), ((3500, 3528), 'tensorflow.maximum', 'tf.maximum', (['(xmax - xmin)', '(0.0)'], {}), '(xmax - xmin, 0.0)\n', (3510, 3528), True, 'import tensorflow as tf\n'), ((3780, 3805), 'tensorflow.unstack', 'tf.unstack', (['box1'], {'axis': '(-1)'}), '(box1, axis=-1)\n', (3790, 3805), True, 'import tensorflow as tf\n'), ((3837, 3862), 'tensorflow.unstack', 'tf.unstack', (['box2'], {'axis': '(-1)'}), '(box2, axis=-1)\n', (3847, 3862), True, 'import tensorflow as tf\n'), ((3873, 3897), 'tensorflow.maximum', 'tf.maximum', (['ymin1', 'ymin2'], {}), '(ymin1, ymin2)\n', (3883, 3897), True, 'import tensorflow as tf\n'), ((3907, 3931), 'tensorflow.minimum', 'tf.minimum', (['ymax1', 'ymax2'], {}), '(ymax1, ymax2)\n', (3917, 3931), True, 'import tensorflow as tf\n'), ((3942, 3970), 'tensorflow.maximum', 'tf.maximum', (['(ymax - ymin)', '(0.0)'], {}), '(ymax - ymin, 0.0)\n', (3952, 3970), True, 'import tensorflow as tf\n'), ((5407, 5431), 'numpy.maximum', 'np.maximum', (['ymin1', 'ymin2'], {}), '(ymin1, ymin2)\n', (5417, 5431), True, 'import numpy as np\n'), ((5441, 5465), 'numpy.maximum', 'np.maximum', (['xmin1', 'xmin2'], {}), '(xmin1, xmin2)\n', (5451, 5465), True, 'import numpy as np\n'), ((5475, 5499), 'numpy.minimum', 'np.minimum', (['ymax1', 'ymax2'], {}), '(ymax1, ymax2)\n', (5485, 5499), True, 'import numpy as np\n'), ((5509, 5533), 'numpy.minimum', 'np.minimum', (['xmax1', 'xmax2'], {}), '(xmax1, xmax2)\n', (5519, 5533), True, 'import numpy as np\n'), ((5544, 5587), 'numpy.stack', 'np.stack', (['[ymin, xmin, ymax, xmax]'], {'axis': '(-1)'}), '([ymin, xmin, ymax, xmax], axis=-1)\n', (5552, 5587), True, 'import numpy as np\n'), ((2124, 2152), 'tensorflow.maximum', 'tf.maximum', (['(ymax - ymin)', '(0.0)'], {}), '(ymax - ymin, 0.0)\n', (2134, 2152), True, 'import tensorflow as tf\n'), ((2155, 2183), 'tensorflow.maximum', 'tf.maximum', (['(xmax - xmin)', '(0.0)'], {}), '(xmax - xmin, 0.0)\n', (2165, 2183), True, 'import tensorflow as tf\n'), ((3053, 3085), 'tensorflow.maximum', 'tf.maximum', (['area_union', '_EPSILON'], {}), '(area_union, _EPSILON)\n', (3063, 3085), True, 'import tensorflow as tf\n'), ((4843, 4860), 'numpy.squeeze', 'np.squeeze', (['x', '(-1)'], {}), '(x, -1)\n', (4853, 4860), True, 'import numpy as np\n'), ((4917, 4945), 'numpy.maximum', 'np.maximum', (['(ymax - ymin)', '(0.0)'], {}), '(ymax - ymin, 0.0)\n', (4927, 4945), True, 'import numpy as np\n'), ((4948, 4976), 'numpy.maximum', 'np.maximum', (['(xmax - xmin)', '(0.0)'], {}), '(xmax - xmin, 0.0)\n', (4958, 4976), True, 'import numpy as np\n'), ((5226, 5243), 'numpy.squeeze', 'np.squeeze', (['x', '(-1)'], {}), '(x, -1)\n', (5236, 5243), True, 'import numpy as np\n'), ((5331, 5348), 'numpy.squeeze', 'np.squeeze', (['x', '(-1)'], {}), '(x, -1)\n', (5341, 5348), True, 'import numpy as np\n'), ((5955, 5987), 'numpy.maximum', 'np.maximum', (['area_union', '_EPSILON'], {}), '(area_union, _EPSILON)\n', (5965, 5987), True, 'import numpy as np\n'), ((4870, 4903), 'numpy.split', 'np.split', (['box', '[1, 2, 3]'], {'axis': '(-1)'}), '(box, [1, 2, 3], axis=-1)\n', (4878, 4903), True, 'import numpy as np\n'), ((5253, 5287), 'numpy.split', 'np.split', (['box1', '[1, 2, 3]'], {'axis': '(-1)'}), '(box1, [1, 2, 3], axis=-1)\n', (5261, 5287), True, 'import numpy as np\n'), ((5358, 5392), 'numpy.split', 'np.split', (['box2', '[1, 2, 3]'], {'axis': '(-1)'}), '(box2, [1, 2, 3], axis=-1)\n', (5366, 5392), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import c3.experiment
import c3.optimizers.c1
import c3.libraries.fidelities as fid
import os
from c3.optimizers.c1 import C1
import c3.libraries.algorithms as algorithms
import c3.libraries.fidelities as fidelities
import examples.single_qubit_blackbox_exp
def rx_matrix_3(theta):
return np.array([[np.cos(theta), -np.sin(theta)*1j, 0],
[-np.sin(theta)*1j, np.cos(theta), 0],
[0, 0, 1]])
def plot_dynamics(exp, psi_init, seq, goal=-1):
"""
Plotting code for time-resolved populations.
Parameters
----------
psi_init: tf.Tensor
Initial state or density matrix.
seq: list
List of operations to apply to the initial state.
goal: tf.float64
Value of the goal function, if used.
debug: boolean
If true, return a matplotlib figure instead of saving.
"""
model = exp.pmap.model
dUs = exp.dUs
psi_t = psi_init.numpy()
pop_t = exp.populations(psi_t, model.lindbladian)
for gate in seq:
for du in dUs[gate]:
psi_t = np.matmul(du.numpy(), psi_t)
pops = exp.populations(psi_t, model.lindbladian)
pop_t = np.append(pop_t, pops, axis=1)
fig, axs = plt.subplots(1, 1)
ts = exp.ts
dt = ts[1] - ts[0]
ts = np.linspace(0.0, dt*pop_t.shape[1], pop_t.shape[1])
axs.plot(ts / 1e-9, pop_t.T)
axs.grid(linestyle="--")
axs.tick_params(
direction="in", left=True, right=True, top=True, bottom=True
)
axs.set_xlabel('Time [ns]')
axs.set_ylabel('Population')
plt.legend(model.state_labels)
pass
fig.savefig('rabi12_2.png')
return ts, pop_t.T
def simulate(exp, psi_init, seq):
model = exp.pmap.model
dUs = exp.dUs
psi_t = psi_init.numpy()
pop_t = exp.populations(psi_t, model.lindbladian)
for gate in seq:
for du in dUs[gate]:
psi_t = np.matmul(du.numpy(), psi_t)
pops = exp.populations(psi_t, model.lindbladian)
pop_t = np.append(pop_t, pops, axis=1)
def plot_fidelity(gate: str, channel: str, awg_errortype: str, error_values: np.array ):
graph = {'errval': error_values, 'fidelity': []}
fig, ax = plt.subplots()
log_dir = os.path.join('C:\\c3logs')
opt_gates = ["RX90p"]
gateset_opt_map = [
[
("RX90p", "d1", "gauss", "amp"),
]
# ,
# [
# ("RX90p", "d1", "gauss", "freq_offset"),
# ],
# [
# ("RX90p", "d1", "gauss", "xy_angle"),
# ],
# [
# ("RX90p", "d1", "gauss", "delta"),
# ]
# x90p d1 carrier framechange
]
for errval in error_values:
exp = examples.single_qubit_blackbox_exp.create_experiment()
opt = C1(
dir_path=log_dir,
fid_func=fidelities.average_infid_set,
fid_subspace=["Q1"],
pmap=exp.pmap,
algorithm=algorithms.lbfgs,
options={"maxfun": 10},
run_name="better_RX90"
)
exp.pmap.set_opt_map(gateset_opt_map)
exp.set_opt_gates(opt_gates)
opt.set_exp(exp)
setattr(exp.pmap.generator.devices['AWG'], awg_errortype, errval)
opt.optimize_controls()
unitaries_after_opt = exp.get_gates()
val = fid.unitary_infid(unitaries_after_opt, gate, [0], [3], False).numpy()
graph['fidelity'].append(val)
ax.scatter(errval, val)
ax.plot(graph['errval'], graph['fidelity'])
plt.legend()
return graph
def plot_rabi12(pops, amps12):
fig, ax = plt.subplots()
pops = pops.transpose()
for count, state in enumerate(pops):
ax.plot(amps12,pops[count],label=count)
ax.set_xlabel('Amp [V]')
ax.set_ylabel('Population')
ax.legend()
fig.show()
fig.savefig('rabi12.png')
| [
"os.path.join",
"c3.optimizers.c1.C1",
"numpy.append",
"numpy.linspace",
"c3.libraries.fidelities.unitary_infid",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend"
] | [((1389, 1407), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1401, 1407), True, 'import matplotlib.pyplot as plt\n'), ((1468, 1521), 'numpy.linspace', 'np.linspace', (['(0.0)', '(dt * pop_t.shape[1])', 'pop_t.shape[1]'], {}), '(0.0, dt * pop_t.shape[1], pop_t.shape[1])\n', (1479, 1521), True, 'import numpy as np\n'), ((1779, 1809), 'matplotlib.pyplot.legend', 'plt.legend', (['model.state_labels'], {}), '(model.state_labels)\n', (1789, 1809), True, 'import matplotlib.pyplot as plt\n'), ((2421, 2435), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2433, 2435), True, 'import matplotlib.pyplot as plt\n'), ((2450, 2476), 'os.path.join', 'os.path.join', (['"""C:\\\\c3logs"""'], {}), "('C:\\\\c3logs')\n", (2462, 2476), False, 'import os\n'), ((3725, 3737), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3735, 3737), True, 'import matplotlib.pyplot as plt\n'), ((3804, 3818), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3816, 3818), True, 'import matplotlib.pyplot as plt\n'), ((2989, 3170), 'c3.optimizers.c1.C1', 'C1', ([], {'dir_path': 'log_dir', 'fid_func': 'fidelities.average_infid_set', 'fid_subspace': "['Q1']", 'pmap': 'exp.pmap', 'algorithm': 'algorithms.lbfgs', 'options': "{'maxfun': 10}", 'run_name': '"""better_RX90"""'}), "(dir_path=log_dir, fid_func=fidelities.average_infid_set, fid_subspace=[\n 'Q1'], pmap=exp.pmap, algorithm=algorithms.lbfgs, options={'maxfun': 10\n }, run_name='better_RX90')\n", (2991, 3170), False, 'from c3.optimizers.c1 import C1\n'), ((1338, 1368), 'numpy.append', 'np.append', (['pop_t', 'pops'], {'axis': '(1)'}), '(pop_t, pops, axis=1)\n', (1347, 1368), True, 'import numpy as np\n'), ((2231, 2261), 'numpy.append', 'np.append', (['pop_t', 'pops'], {'axis': '(1)'}), '(pop_t, pops, axis=1)\n', (2240, 2261), True, 'import numpy as np\n'), ((380, 393), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (386, 393), True, 'import numpy as np\n'), ((459, 472), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (465, 472), True, 'import numpy as np\n'), ((3531, 3592), 'c3.libraries.fidelities.unitary_infid', 'fid.unitary_infid', (['unitaries_after_opt', 'gate', '[0]', '[3]', '(False)'], {}), '(unitaries_after_opt, gate, [0], [3], False)\n', (3548, 3592), True, 'import c3.libraries.fidelities as fid\n'), ((396, 409), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (402, 409), True, 'import numpy as np\n'), ((441, 454), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (447, 454), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 26 00:32:31 2020
@author: <NAME>
based on code by <NAME>
"""
import numpy as np
from sklearn.cross_decomposition import PLSRegression
# OSC
# nicomp is the number of internal components, ncomp is the number of
# components to remove (ncomp=1 recommended)
class OSC:
def __init__(self,version="SWosc",nicomp=18,ncomp=1,epsilon = 10e-6, max_iters = 20):
self.version=version
self.nicomp=nicomp
self.ncomp=ncomp
self.epsilon=epsilon
self.max_iters=20
def fit(self,xx,y):
X=xx.copy()
Y=y.copy()
# Separating X from Y for PLS
# Needs to be converted to numpy array from pandas df
#X=self.df[self.freqs].to_numpy()
# Y need to be converted to numpy array from pandas series and reshaped to (N,1) from (N,)
#Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
# Self developed version
if self.version=="SWosc":
#Centering data
A=np.identity(n = X.shape[0]) / X.shape[0]
mu_x = ((A.dot(X)).sum(axis=0))/ A.sum()
mu_y = ((A.dot(Y)).sum(axis=0))/ A.sum()
Xc = X - mu_x
Yc = Y - mu_y
#matrices to store loading vectors
W = np.zeros((X.shape[1],self.ncomp))
P = np.zeros((X.shape[1],self.ncomp))
#setup internal PLS object
int_pls_obj= PLSRegression(n_components=self.nicomp,scale=False)
i = 0
while i < self.ncomp:
# PCA to calculate starting values
xu, xs, xvt = np.linalg.svd(Xc)
# starting scores
t = xu[:,0:1]*xs[0]
iter_i = 0
convergence = 10 + self.epsilon
while convergence > self.epsilon:
# orthogonalize scores
t_new = (np.identity(Yc.shape[0]) - Yc.dot(np.linalg.pinv(Yc.T.dot(Yc)).dot(Yc.T))).dot(t)
# calculate loadings by NIPALS
int_pls_obj.fit(Xc, t_new)
w=int_pls_obj.coef_
t=Xc.dot(w)
# check convergence
convergence = np.linalg.norm(t_new - t, axis = 0) / np.linalg.norm(t_new, axis = 0)
iter_i += 1
if iter_i > self.max_iters:
# print("Did not converge!")
convergence = 0
# After convergence calculate final loadings by regressing Xc on t
p=Xc.T.dot(t)/(t.T.dot(t))
# Store component's w and p
W[:,i] = w[:,0]
P[:,i] = p[:,0]
# Remove component from Xc
Xc=Xc-t.dot(p.T)
i=i+1
self.mu_x=mu_x
self.X_osc=Xc
self.W=w
self.P=P
return (Xc,W,P,mu_x)
# JS osc algo courtesy of <NAME>
# this option is giving wrong result atm, do not use until fixed!
elif self.version=="JSosc":
X = xx.copy()
Y = y.copy()
N = X.shape[0]
K = X.shape[1]
q = Y.shape[1]
A = np.identity(n = N) / N # this is here temporarily to include sample weights
mu_x = ((A.dot(X)).sum(axis=0))/ A.sum()
mu_y = ((A.dot(Y)).sum(axis=0))/ A.sum()
Xc = X - mu_x
Yc = Y - mu_y
W = np.zeros((X.shape[1],self.ncomp))
P = np.zeros((X.shape[1],self.ncomp))
TT = np.zeros((X.shape[0],self.ncomp))
kk = 0
while kk < self.ncomp:
# --- pc of xc
xu, xs, xvt = np.linalg.svd(Xc)
tt_old = xu[:,0:1]*xs[0]
p = xvt.T[:,0:1]
p = np.multiply(p, np.sign(np.sum(p)))
iter_i = 0
convergence = 10 + self.epsilon
while convergence > self.epsilon:
# - calculate scores
tt = Xc.dot(p)/(p.T.dot(p))
# - orthogonalize scores
tt_new = (np.identity(Yc.shape[0]) - Yc.dot(np.linalg.pinv(Yc.T.dot(Yc)).dot(Yc.T))).dot(tt)
#- update loadings
p_new = Xc.T.dot(tt_new)/(tt_new.T.dot(tt_new))
# - calculate convergence
convergence = np.linalg.norm(tt_new - tt_old, axis = 0) / np.linalg.norm(tt_new, axis = 0)
# - update scores and loadings
tt_old = tt_new.copy()
p = p_new.copy()
iter_i += 1
# - check convergence in iterations
if iter_i > self.max_iters:
convergence = 0
# - perform regression of X and t, 5 lv by default
int_pls_obj= PLSRegression(n_components=self.nicomp,scale=False)
int_pls_obj.fit(Xc, tt_new)
w=int_pls_obj.coef_
w = w/np.linalg.norm(w)
# - calculate final component that will be removed and stored
tt = X.dot(w)
tt = (np.identity(Yc.shape[0]) - Yc.dot(np.linalg.pinv(Yc.T.dot(Yc)).dot(Yc.T))).dot(tt)
p = Xc.T.dot(tt)/(tt.T.dot(tt))
Xc = Xc - tt.dot(p.T)
# - store component
W[:,kk] = w[:,0]
P[:,kk] = p[:,0]
TT[:,kk] = tt[:,0]
kk += 1
# --- final transformation of original data
xx_new = ((xx - mu_x).dot(np.identity(n=xx.shape[1]) - W.dot(np.linalg.inv(P.T.dot(W)).dot(P.T)))) + mu_x
return (xx_new, W,P,mu_x)
def transform(self,X_new,mean="estimate"):
if mean=="training":
Xc_new=X_new-self.mu_x
elif mean=="estimate":
Xc_new=X_new-np.mean(X_new,axis=0)
for comp in range(self.W.shape[1]):
w=self.W[:,[comp]]
p=self.P[:,[comp]]
# project to t
t=Xc_new.dot(w)
Xc_new=Xc_new-t.dot(p.T)
return Xc_new
| [
"numpy.identity",
"numpy.mean",
"numpy.sum",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.linalg.svd",
"sklearn.cross_decomposition.PLSRegression"
] | [((1299, 1333), 'numpy.zeros', 'np.zeros', (['(X.shape[1], self.ncomp)'], {}), '((X.shape[1], self.ncomp))\n', (1307, 1333), True, 'import numpy as np\n'), ((1349, 1383), 'numpy.zeros', 'np.zeros', (['(X.shape[1], self.ncomp)'], {}), '((X.shape[1], self.ncomp))\n', (1357, 1383), True, 'import numpy as np\n'), ((1447, 1499), 'sklearn.cross_decomposition.PLSRegression', 'PLSRegression', ([], {'n_components': 'self.nicomp', 'scale': '(False)'}), '(n_components=self.nicomp, scale=False)\n', (1460, 1499), False, 'from sklearn.cross_decomposition import PLSRegression\n'), ((1023, 1048), 'numpy.identity', 'np.identity', ([], {'n': 'X.shape[0]'}), '(n=X.shape[0])\n', (1034, 1048), True, 'import numpy as np\n'), ((1650, 1667), 'numpy.linalg.svd', 'np.linalg.svd', (['Xc'], {}), '(Xc)\n', (1663, 1667), True, 'import numpy as np\n'), ((3656, 3690), 'numpy.zeros', 'np.zeros', (['(X.shape[1], self.ncomp)'], {}), '((X.shape[1], self.ncomp))\n', (3664, 3690), True, 'import numpy as np\n'), ((3706, 3740), 'numpy.zeros', 'np.zeros', (['(X.shape[1], self.ncomp)'], {}), '((X.shape[1], self.ncomp))\n', (3714, 3740), True, 'import numpy as np\n'), ((3757, 3791), 'numpy.zeros', 'np.zeros', (['(X.shape[0], self.ncomp)'], {}), '((X.shape[0], self.ncomp))\n', (3765, 3791), True, 'import numpy as np\n'), ((3374, 3390), 'numpy.identity', 'np.identity', ([], {'n': 'N'}), '(n=N)\n', (3385, 3390), True, 'import numpy as np\n'), ((3941, 3958), 'numpy.linalg.svd', 'np.linalg.svd', (['Xc'], {}), '(Xc)\n', (3954, 3958), True, 'import numpy as np\n'), ((5218, 5270), 'sklearn.cross_decomposition.PLSRegression', 'PLSRegression', ([], {'n_components': 'self.nicomp', 'scale': '(False)'}), '(n_components=self.nicomp, scale=False)\n', (5231, 5270), False, 'from sklearn.cross_decomposition import PLSRegression\n'), ((6323, 6345), 'numpy.mean', 'np.mean', (['X_new'], {'axis': '(0)'}), '(X_new, axis=0)\n', (6330, 6345), True, 'import numpy as np\n'), ((2270, 2303), 'numpy.linalg.norm', 'np.linalg.norm', (['(t_new - t)'], {'axis': '(0)'}), '(t_new - t, axis=0)\n', (2284, 2303), True, 'import numpy as np\n'), ((2308, 2337), 'numpy.linalg.norm', 'np.linalg.norm', (['t_new'], {'axis': '(0)'}), '(t_new, axis=0)\n', (2322, 2337), True, 'import numpy as np\n'), ((5372, 5389), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {}), '(w)\n', (5386, 5389), True, 'import numpy as np\n'), ((4076, 4085), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (4082, 4085), True, 'import numpy as np\n'), ((4682, 4721), 'numpy.linalg.norm', 'np.linalg.norm', (['(tt_new - tt_old)'], {'axis': '(0)'}), '(tt_new - tt_old, axis=0)\n', (4696, 4721), True, 'import numpy as np\n'), ((4726, 4756), 'numpy.linalg.norm', 'np.linalg.norm', (['tt_new'], {'axis': '(0)'}), '(tt_new, axis=0)\n', (4740, 4756), True, 'import numpy as np\n'), ((6020, 6046), 'numpy.identity', 'np.identity', ([], {'n': 'xx.shape[1]'}), '(n=xx.shape[1])\n', (6031, 6046), True, 'import numpy as np\n'), ((1944, 1968), 'numpy.identity', 'np.identity', (['Yc.shape[0]'], {}), '(Yc.shape[0])\n', (1955, 1968), True, 'import numpy as np\n'), ((5538, 5562), 'numpy.identity', 'np.identity', (['Yc.shape[0]'], {}), '(Yc.shape[0])\n', (5549, 5562), True, 'import numpy as np\n'), ((4412, 4436), 'numpy.identity', 'np.identity', (['Yc.shape[0]'], {}), '(Yc.shape[0])\n', (4423, 4436), True, 'import numpy as np\n')] |
# 5
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.layers import Dropout
import numpy as np
# sinusoidal position encoding
def get_3d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
grid_h = np.arange(grid_size)
grid_w = np.arange(grid_size)
grid_d = np.arange(grid_size)
i,j,k = np.meshgrid(grid_w, grid_h, grid_d, indexing='ij')
grid = np.stack([
np.reshape(i, [-1]),
np.reshape(j, [-1]),
np.reshape(k, [-1]),
])
pos_embed = get_3d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_3d_sincos_pos_embed_from_grid(embed_dim, grid):
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 3, grid[0])
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 3, grid[1])
emb_D = get_1d_sincos_pos_embed_from_grid(embed_dim // 3, grid[2])
emb = np.concatenate([emb_h, emb_w, emb_D], axis=1)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=float)
omega /= embed_dim / 2.
omega = 1. / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum('m,d->md', pos, omega)
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
class Patches(layers.Layer):
def __init__(self, num_patches):
super(Patches, self).__init__()
self.num_patches = num_patches
def get_config(self):
config = super().get_config()
config.update({
"num_patches": self.num_patches,
})
return config
def call(self, input_data):
patch_dims = input_data.shape[-1]
patches = tf.reshape(input_data, [-1, self.num_patches, patch_dims])
return patches
class PatchEncoder(layers.Layer):
def __init__(self, proj_dim):
super(PatchEncoder, self).__init__()
self.num_patches = 27
self.positional_embed_dim = 6
self.projection_dim = proj_dim
self.projection = layers.Dense(units=self.projection_dim - self.positional_embed_dim)
self.position_embedding = layers.Embedding(
input_dim=self.num_patches, output_dim=self.positional_embed_dim,input_shape=(self.num_patches,3),
)
rows = tf.range(0, 3, delta=1)
cols = tf.range(0, 3, delta=1)
slices = tf.range(0, 3, delta=1)
i,j,k = tf.meshgrid(cols, rows, slices, indexing='ij')
indices = tf.stack([
tf.reshape(i, [-1]),
tf.reshape(j, [-1]),
tf.reshape(k, [-1]),
])
self.positions = tf.transpose(indices)
# Use this for sinusoidal position encoding
# self.position_embedding = get_3d_sincos_pos_embed(embed_dim=self.positional_embed_dim, grid_size=3, cls_token=False)
def call(self, patch):
pe = tf.reduce_mean(self.position_embedding(self.positions), axis=1)
pe = tf.repeat(tf.expand_dims(pe, axis=0), len(patch), 0)
patch_embedding = self.projection(patch)
encoded = tf.concat([patch_embedding, tf.cast(pe, tf.float32)], axis=2)
return encoded
def get_config(self):
config = super().get_config()
config.update({
"projection_dim": self.projection_dim,
})
return config
def residual_block(x, filter, kernel_size = (1,1,1)):
x = layers.LayerNormalization()(x)
x_skip = x
x_skip = layers.Conv3D(filter, (1,1,1), padding = 'same')(x_skip)
x = layers.Conv3D(filter, kernel_size, padding = 'same')(x)
x = layers.Conv3D(4*filter, (1,1,1), padding = 'same')(x)
x = layers.Activation(tf.nn.gelu)(x)
x = layers.Conv3D(filter, (1,1,1), padding = 'same')(x)
# Add Residue
x = layers.Add()([x, x_skip])
return x
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
def transformer_block(x, projection_dim, num_heads, transformer_units, dropout):
x1 = layers.LayerNormalization(epsilon=1e-6)(x)
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=dropout)(x1, x1)
x2 = layers.Add()([attention_output, x])
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=dropout)
x = layers.Add()([x3, x2])
return x
def network(grad_directions, projection_dim=128, transformer_layers=4, transformer_units=[4*128, 128],\
num_heads=4, mlp_head_units=[1024], num_output=45, dropout=0.02):
# 3D CNN projector
inputs = keras.Input(shape=(3,3,3,grad_directions,), name='diffusion_data')
encod = residual_block(inputs, 60, kernel_size = (1,1,1))
projector = keras.Model(inputs = inputs, outputs = encod, name="projector")
# Transformer
patches = Patches(num_patches=27)(encod)
encoded_patches = PatchEncoder(projection_dim)(patches)
for _ in range(transformer_layers):
encoded_patches = transformer_block(encoded_patches, projection_dim, num_heads, transformer_units, dropout)
attention_map = layers.LayerNormalization(epsilon=1e-6, name='attention_map')(encoded_patches)
transformer = keras.Model(inputs = encod, outputs = attention_map, name="transformer")
# Regressor head
representation = layers.GlobalAvgPool1D()(attention_map)
x = mlp(representation, mlp_head_units, dropout_rate=0.05)
pred = layers.Dense(num_output)(x)
regressor = keras.Model(inputs = attention_map, outputs = pred, name="mlp_head")
encoded = projector(inputs)
features = transformer(encoded)
out = regressor(features)
model = keras.Model(inputs = inputs, outputs = out, name="model")
return model | [
"tensorflow.keras.layers.Conv3D",
"tensorflow.meshgrid",
"tensorflow.transpose",
"tensorflow.keras.layers.GlobalAvgPool1D",
"tensorflow.keras.layers.Dense",
"numpy.einsum",
"numpy.sin",
"tensorflow.cast",
"numpy.arange",
"numpy.reshape",
"tensorflow.keras.layers.MultiHeadAttention",
"numpy.con... | [((258, 278), 'numpy.arange', 'np.arange', (['grid_size'], {}), '(grid_size)\n', (267, 278), True, 'import numpy as np\n'), ((292, 312), 'numpy.arange', 'np.arange', (['grid_size'], {}), '(grid_size)\n', (301, 312), True, 'import numpy as np\n'), ((326, 346), 'numpy.arange', 'np.arange', (['grid_size'], {}), '(grid_size)\n', (335, 346), True, 'import numpy as np\n'), ((359, 409), 'numpy.meshgrid', 'np.meshgrid', (['grid_w', 'grid_h', 'grid_d'], {'indexing': '"""ij"""'}), "(grid_w, grid_h, grid_d, indexing='ij')\n", (370, 409), True, 'import numpy as np\n'), ((991, 1036), 'numpy.concatenate', 'np.concatenate', (['[emb_h, emb_w, emb_D]'], {'axis': '(1)'}), '([emb_h, emb_w, emb_D], axis=1)\n', (1005, 1036), True, 'import numpy as np\n'), ((1151, 1189), 'numpy.arange', 'np.arange', (['(embed_dim // 2)'], {'dtype': 'float'}), '(embed_dim // 2, dtype=float)\n', (1160, 1189), True, 'import numpy as np\n'), ((1303, 1335), 'numpy.einsum', 'np.einsum', (['"""m,d->md"""', 'pos', 'omega'], {}), "('m,d->md', pos, omega)\n", (1312, 1335), True, 'import numpy as np\n'), ((1353, 1364), 'numpy.sin', 'np.sin', (['out'], {}), '(out)\n', (1359, 1364), True, 'import numpy as np\n'), ((1390, 1401), 'numpy.cos', 'np.cos', (['out'], {}), '(out)\n', (1396, 1401), True, 'import numpy as np\n'), ((1424, 1466), 'numpy.concatenate', 'np.concatenate', (['[emb_sin, emb_cos]'], {'axis': '(1)'}), '([emb_sin, emb_cos], axis=1)\n', (1438, 1466), True, 'import numpy as np\n'), ((4888, 4956), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(3, 3, 3, grad_directions)', 'name': '"""diffusion_data"""'}), "(shape=(3, 3, 3, grad_directions), name='diffusion_data')\n", (4899, 4956), False, 'from tensorflow import keras\n'), ((5034, 5093), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'encod', 'name': '"""projector"""'}), "(inputs=inputs, outputs=encod, name='projector')\n", (5045, 5093), False, 'from tensorflow import keras\n'), ((5506, 5574), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'encod', 'outputs': 'attention_map', 'name': '"""transformer"""'}), "(inputs=encod, outputs=attention_map, name='transformer')\n", (5517, 5574), False, 'from tensorflow import keras\n'), ((5785, 5849), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'attention_map', 'outputs': 'pred', 'name': '"""mlp_head"""'}), "(inputs=attention_map, outputs=pred, name='mlp_head')\n", (5796, 5849), False, 'from tensorflow import keras\n'), ((5965, 6018), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'out', 'name': '"""model"""'}), "(inputs=inputs, outputs=out, name='model')\n", (5976, 6018), False, 'from tensorflow import keras\n'), ((1898, 1956), 'tensorflow.reshape', 'tf.reshape', (['input_data', '[-1, self.num_patches, patch_dims]'], {}), '(input_data, [-1, self.num_patches, patch_dims])\n', (1908, 1956), True, 'import tensorflow as tf\n'), ((2227, 2294), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': '(self.projection_dim - self.positional_embed_dim)'}), '(units=self.projection_dim - self.positional_embed_dim)\n', (2239, 2294), False, 'from tensorflow.keras import layers\n'), ((2336, 2458), 'tensorflow.keras.layers.Embedding', 'layers.Embedding', ([], {'input_dim': 'self.num_patches', 'output_dim': 'self.positional_embed_dim', 'input_shape': '(self.num_patches, 3)'}), '(input_dim=self.num_patches, output_dim=self.\n positional_embed_dim, input_shape=(self.num_patches, 3))\n', (2352, 2458), False, 'from tensorflow.keras import layers\n'), ((2490, 2513), 'tensorflow.range', 'tf.range', (['(0)', '(3)'], {'delta': '(1)'}), '(0, 3, delta=1)\n', (2498, 2513), True, 'import tensorflow as tf\n'), ((2529, 2552), 'tensorflow.range', 'tf.range', (['(0)', '(3)'], {'delta': '(1)'}), '(0, 3, delta=1)\n', (2537, 2552), True, 'import tensorflow as tf\n'), ((2570, 2593), 'tensorflow.range', 'tf.range', (['(0)', '(3)'], {'delta': '(1)'}), '(0, 3, delta=1)\n', (2578, 2593), True, 'import tensorflow as tf\n'), ((2611, 2657), 'tensorflow.meshgrid', 'tf.meshgrid', (['cols', 'rows', 'slices'], {'indexing': '"""ij"""'}), "(cols, rows, slices, indexing='ij')\n", (2622, 2657), True, 'import tensorflow as tf\n'), ((2810, 2831), 'tensorflow.transpose', 'tf.transpose', (['indices'], {}), '(indices)\n', (2822, 2831), True, 'import tensorflow as tf\n'), ((3592, 3619), 'tensorflow.keras.layers.LayerNormalization', 'layers.LayerNormalization', ([], {}), '()\n', (3617, 3619), False, 'from tensorflow.keras import layers\n'), ((3651, 3699), 'tensorflow.keras.layers.Conv3D', 'layers.Conv3D', (['filter', '(1, 1, 1)'], {'padding': '"""same"""'}), "(filter, (1, 1, 1), padding='same')\n", (3664, 3699), False, 'from tensorflow.keras import layers\n'), ((3720, 3770), 'tensorflow.keras.layers.Conv3D', 'layers.Conv3D', (['filter', 'kernel_size'], {'padding': '"""same"""'}), "(filter, kernel_size, padding='same')\n", (3733, 3770), False, 'from tensorflow.keras import layers\n'), ((3784, 3836), 'tensorflow.keras.layers.Conv3D', 'layers.Conv3D', (['(4 * filter)', '(1, 1, 1)'], {'padding': '"""same"""'}), "(4 * filter, (1, 1, 1), padding='same')\n", (3797, 3836), False, 'from tensorflow.keras import layers\n'), ((3846, 3875), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['tf.nn.gelu'], {}), '(tf.nn.gelu)\n', (3863, 3875), False, 'from tensorflow.keras import layers\n'), ((3887, 3935), 'tensorflow.keras.layers.Conv3D', 'layers.Conv3D', (['filter', '(1, 1, 1)'], {'padding': '"""same"""'}), "(filter, (1, 1, 1), padding='same')\n", (3900, 3935), False, 'from tensorflow.keras import layers\n'), ((3966, 3978), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (3976, 3978), False, 'from tensorflow.keras import layers\n'), ((4286, 4326), 'tensorflow.keras.layers.LayerNormalization', 'layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (4311, 4326), False, 'from tensorflow.keras import layers\n'), ((4352, 4443), 'tensorflow.keras.layers.MultiHeadAttention', 'layers.MultiHeadAttention', ([], {'num_heads': 'num_heads', 'key_dim': 'projection_dim', 'dropout': 'dropout'}), '(num_heads=num_heads, key_dim=projection_dim,\n dropout=dropout)\n', (4377, 4443), False, 'from tensorflow.keras import layers\n'), ((4466, 4478), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (4476, 4478), False, 'from tensorflow.keras import layers\n'), ((4511, 4551), 'tensorflow.keras.layers.LayerNormalization', 'layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (4536, 4551), False, 'from tensorflow.keras import layers\n'), ((4634, 4646), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (4644, 4646), False, 'from tensorflow.keras import layers\n'), ((5408, 5470), 'tensorflow.keras.layers.LayerNormalization', 'layers.LayerNormalization', ([], {'epsilon': '(1e-06)', 'name': '"""attention_map"""'}), "(epsilon=1e-06, name='attention_map')\n", (5433, 5470), False, 'from tensorflow.keras import layers\n'), ((5626, 5650), 'tensorflow.keras.layers.GlobalAvgPool1D', 'layers.GlobalAvgPool1D', ([], {}), '()\n', (5648, 5650), False, 'from tensorflow.keras import layers\n'), ((5741, 5765), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['num_output'], {}), '(num_output)\n', (5753, 5765), False, 'from tensorflow.keras import layers\n'), ((438, 457), 'numpy.reshape', 'np.reshape', (['i', '[-1]'], {}), '(i, [-1])\n', (448, 457), True, 'import numpy as np\n'), ((463, 482), 'numpy.reshape', 'np.reshape', (['j', '[-1]'], {}), '(j, [-1])\n', (473, 482), True, 'import numpy as np\n'), ((488, 507), 'numpy.reshape', 'np.reshape', (['k', '[-1]'], {}), '(k, [-1])\n', (498, 507), True, 'import numpy as np\n'), ((3149, 3175), 'tensorflow.expand_dims', 'tf.expand_dims', (['pe'], {'axis': '(0)'}), '(pe, axis=0)\n', (3163, 3175), True, 'import tensorflow as tf\n'), ((4091, 4133), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['units'], {'activation': 'tf.nn.gelu'}), '(units, activation=tf.nn.gelu)\n', (4103, 4133), False, 'from tensorflow.keras import layers\n'), ((4149, 4177), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (4163, 4177), False, 'from tensorflow.keras import layers\n'), ((638, 662), 'numpy.zeros', 'np.zeros', (['[1, embed_dim]'], {}), '([1, embed_dim])\n', (646, 662), True, 'import numpy as np\n'), ((2695, 2714), 'tensorflow.reshape', 'tf.reshape', (['i', '[-1]'], {}), '(i, [-1])\n', (2705, 2714), True, 'import tensorflow as tf\n'), ((2724, 2743), 'tensorflow.reshape', 'tf.reshape', (['j', '[-1]'], {}), '(j, [-1])\n', (2734, 2743), True, 'import tensorflow as tf\n'), ((2753, 2772), 'tensorflow.reshape', 'tf.reshape', (['k', '[-1]'], {}), '(k, [-1])\n', (2763, 2772), True, 'import tensorflow as tf\n'), ((3291, 3314), 'tensorflow.cast', 'tf.cast', (['pe', 'tf.float32'], {}), '(pe, tf.float32)\n', (3298, 3314), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 3 10:45:39 2022
@author: dgbli
"""
import numpy as np
import matplotlib.pyplot as plt
def return_true(n):
x = []
for i in range(n):
if i%2==0:
x.append(i)
x.append(i+1)
else:
x.append(None)
return x
def return_false(n):
x = []
for i in range(n):
if i%2==1:
x.append(i)
x.append(i+1)
else:
x.append(None)
return x
class Hitomizashi:
def __init__(self, rows, columns, color='k', linewidth=1.0, capstyle='projecting'):
self.a = columns
self.b = rows
self.Na = len(self.a)
self.Nb = len(self.b)
self.color = color
self.lw = linewidth
self.cs = capstyle
def draw(self):
fig, ax = plt.subplots()
#horizontals:
for i in range(self.Na):
if b[i] == True:
x = np.array(return_true(self.Na))
ax.plot(x,i*np.ones(len(x)), color=self.color, linewidth=self.lw, solid_capstyle=self.cs)
elif b[i] == False:
x = np.array(return_false(self.Na))
ax.plot(x,i*np.ones(len(x)), color=self.color, linewidth=self.lw, solid_capstyle=self.cs)
else:
raise TypeError('Input should be boolean')
#verticals:
for i in range(len(a)):
if a[i] == True:
y = np.array(return_true(self.Nb))
ax.plot(i*np.ones(len(y)), y, color=self.color, linewidth=self.lw, solid_capstyle=self.cs)
elif a[i] == False:
y = np.array(return_false(self.Nb))
ax.plot(i*np.ones(len(y)), y, color=self.color, linewidth=self.lw, solid_capstyle=self.cs)
else:
raise TypeError('Input should be boolean')
ax.set_axis_off()
if __name__ == "__main__":
a = np.random.rand(25) > 0.5
b = np.random.rand(25) > 0.5
hz = Hitomizashi(a,b, linewidth=4, color='dodgerblue', capstyle='round')
hz.draw()
| [
"numpy.random.rand",
"matplotlib.pyplot.subplots"
] | [((914, 928), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (926, 928), True, 'import matplotlib.pyplot as plt\n'), ((2072, 2090), 'numpy.random.rand', 'np.random.rand', (['(25)'], {}), '(25)\n', (2086, 2090), True, 'import numpy as np\n'), ((2106, 2124), 'numpy.random.rand', 'np.random.rand', (['(25)'], {}), '(25)\n', (2120, 2124), True, 'import numpy as np\n')] |
import numpy as np
from skimage import feature
from sklearn import preprocessing
class LBP:
def __init__(self, p, r):
self.p = p
self.r = r
def getVecLength(self):
return 2**self.p
def getFeature(self, imgMat):
feat = feature.local_binary_pattern(
imgMat, self.p, self.r, method='uniform')
re, _ = np.histogram(feat, bins=range(
256), normed=True)
return re
def getFeatVecs(self, imgList, load=0):
if load == 1:
feats = np.load(r"featVectLbp.npy")
types = np.load(r"typesLbp.npy")
return (feats, types)
feats = None
# i=0
types = np.float32([]).reshape((0, 1))
for mat, type in imgList:
# print("[lbp]:"+str(i))
# i+=1
if mat is None:
continue
feat = self.getFeature(mat)
if feats is None:
feats = feat.reshape((1, -1))
else:
# print(feat.shape)
# print(feats.shape)
feats = np.append(feats, feat.reshape((1, -1)), axis=0)
types = np.append(types, np.array(type).reshape((1, 1)))
np.save(r"featVectLbp.npy", feats)
np.save(r"typesLbp.npy", types)
return (feats, types)
class HOG:
def getVecLength(self):
return 1764
def getFeature(self, imgMat):
feat = feature.hog(imgMat, orientations=9, pixels_per_cell=(
16, 16), cells_per_block=(2, 2), block_norm='L2-Hys')
feat = feat.reshape((1, -1))
feat = preprocessing.normalize(feat)
return feat
def getFeatVecs(self, imgList, load=0):
if load == 1:
feats = np.load(r"featVectHog.npy")
types = np.load(r"typesHog.npy")
return (feats, types)
feats = None
# i=0
types = np.float32([]).reshape((0, 1))
for mat, type in imgList:
# print("[hog]:"+str(i))
# i+=1
# print(mat.shape)
feat = self.getFeature(mat)
if feats is None:
feats = feat.copy()
else:
feats = np.append(feats, feat, axis=0)
types = np.append(types, np.float32([type]).reshape((1, 1)))
np.save(r"featVectHog.npy", feats)
np.save(r"typesHog.npy", types)
return (feats, types)
def extractfeature(data, tags):
print("[feature] start")
matList = []
for i in range(len(data)):
matList.append((data[i], tags[i]))
hog = HOG()
lbp = LBP(8, 1)
print("[feature]hog")
featHog, types = hog.getFeatVecs(matList, load=0)
print("[feature] lbp")
featLbp, _ = lbp.getFeatVecs(matList, load=0)
feats = np.append(featHog, featLbp, axis=1)
# feats=featHog
print("[feature] end")
return (feats, types)
| [
"numpy.append",
"numpy.array",
"sklearn.preprocessing.normalize",
"skimage.feature.hog",
"numpy.load",
"numpy.float32",
"numpy.save",
"skimage.feature.local_binary_pattern"
] | [((2895, 2930), 'numpy.append', 'np.append', (['featHog', 'featLbp'], {'axis': '(1)'}), '(featHog, featLbp, axis=1)\n', (2904, 2930), True, 'import numpy as np\n'), ((280, 350), 'skimage.feature.local_binary_pattern', 'feature.local_binary_pattern', (['imgMat', 'self.p', 'self.r'], {'method': '"""uniform"""'}), "(imgMat, self.p, self.r, method='uniform')\n", (308, 350), False, 'from skimage import feature\n'), ((1270, 1303), 'numpy.save', 'np.save', (['"""featVectLbp.npy"""', 'feats'], {}), "('featVectLbp.npy', feats)\n", (1277, 1303), True, 'import numpy as np\n'), ((1314, 1344), 'numpy.save', 'np.save', (['"""typesLbp.npy"""', 'types'], {}), "('typesLbp.npy', types)\n", (1321, 1344), True, 'import numpy as np\n'), ((1500, 1610), 'skimage.feature.hog', 'feature.hog', (['imgMat'], {'orientations': '(9)', 'pixels_per_cell': '(16, 16)', 'cells_per_block': '(2, 2)', 'block_norm': '"""L2-Hys"""'}), "(imgMat, orientations=9, pixels_per_cell=(16, 16),\n cells_per_block=(2, 2), block_norm='L2-Hys')\n", (1511, 1610), False, 'from skimage import feature\n'), ((1675, 1704), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['feat'], {}), '(feat)\n', (1698, 1704), False, 'from sklearn import preprocessing\n'), ((2409, 2442), 'numpy.save', 'np.save', (['"""featVectHog.npy"""', 'feats'], {}), "('featVectHog.npy', feats)\n", (2416, 2442), True, 'import numpy as np\n'), ((2453, 2483), 'numpy.save', 'np.save', (['"""typesHog.npy"""', 'types'], {}), "('typesHog.npy', types)\n", (2460, 2483), True, 'import numpy as np\n'), ((555, 581), 'numpy.load', 'np.load', (['"""featVectLbp.npy"""'], {}), "('featVectLbp.npy')\n", (562, 581), True, 'import numpy as np\n'), ((604, 627), 'numpy.load', 'np.load', (['"""typesLbp.npy"""'], {}), "('typesLbp.npy')\n", (611, 627), True, 'import numpy as np\n'), ((1819, 1845), 'numpy.load', 'np.load', (['"""featVectHog.npy"""'], {}), "('featVectHog.npy')\n", (1826, 1845), True, 'import numpy as np\n'), ((1868, 1891), 'numpy.load', 'np.load', (['"""typesHog.npy"""'], {}), "('typesHog.npy')\n", (1875, 1891), True, 'import numpy as np\n'), ((720, 734), 'numpy.float32', 'np.float32', (['[]'], {}), '([])\n', (730, 734), True, 'import numpy as np\n'), ((1984, 1998), 'numpy.float32', 'np.float32', (['[]'], {}), '([])\n', (1994, 1998), True, 'import numpy as np\n'), ((2293, 2323), 'numpy.append', 'np.append', (['feats', 'feat'], {'axis': '(0)'}), '(feats, feat, axis=0)\n', (2302, 2323), True, 'import numpy as np\n'), ((1229, 1243), 'numpy.array', 'np.array', (['type'], {}), '(type)\n', (1237, 1243), True, 'import numpy as np\n'), ((2364, 2382), 'numpy.float32', 'np.float32', (['[type]'], {}), '([type])\n', (2374, 2382), True, 'import numpy as np\n')] |
#!/usr/bin/env
"""
Read in two extracted light curves (interest band and reference band), split
into segments, compute the power spectra per band and cross spectrum of each
segment, averages cross spectrum of all the segments, and computes frequency
lags between the two bands.
Example call:
python simple_cross_spectra.py ./cygx1_i.lc ./cygx1_ref.lc -o "./cygx1"
Enter python simple_cross_spectra.py -h at the command line for help.
"""
from __future__ import print_function
from astropy.table import Table, Column
from astropy.io import fits
import numpy as np
from scipy import fftpack
import argparse
import subprocess
from datetime import datetime
__author__ = "<NAME> <A.L.Stevens at uva.nl>"
__year__ = "2016"
class Band(object):
def __init__(self, n_bins=8192, dt=0.0078125):
self.power = np.zeros(n_bins, dtype=np.float64)
self.mean_rate = 0.0
self.rms = 0.0
self.freq = fftpack.fftfreq(n_bins, d=dt)
################################################################################
def type_power_of_two(num):
"""
Check if an input is a power of 2 (1 <= num < 2147483648), as an argparse
type.
Parameters
----------
num : int
The number in question.
Returns
-------
n : int
The number in question, if it's a power of two
Raises
------
ArgumentTypeError if n isn't a power of two.
"""
n = int(num)
x = 2
assert n > 0
if n == 1:
return n
else:
while x <= n and x < 2147483648:
if n == x:
return n
x *= 2
message = "%d is not a power of two." % n
raise argparse.ArgumentTypeError(message)
################################################################################
def get_key_val(fits_file, ext, keyword):
"""
Get the value of a keyword from a FITS header. Keyword does not seem to be
case-sensitive.
Parameters
----------
fits_file : str
File name of the FITS file.
ext : int
The FITS extension in which to search for the given keyword.
keyword : str
The keyword for which you want the associated value.
Returns
-------
any type
Value of the given keyword.
Raises
------
IOError if the input file isn't actually a FITS file.
"""
ext = np.int8(ext)
assert (ext >= 0 and ext <= 2)
keyword = str(keyword)
try:
hdulist = fits.open(fits_file)
except IOError:
print("\tERROR: File does not exist: %s" % fits_file)
exit()
key_value = hdulist[ext].header[keyword]
hdulist.close()
return key_value
################################################################################
def raw_to_absrms(power, mean_rate, n_bins, dt, noisy=True):
"""
Normalize the power spectrum to absolute rms^2 normalization.
TODO: cite paper.
Parameters
----------
power : np.array of floats
The raw power at each Fourier frequency, as a 1-D or 2-D array.
Size = (n_bins) or (n_bins, detchans).
mean_rate : float
The mean count rate for the light curve, in cts/s.
n_bins : int
Number of bins per segment of light curve.
dt : float
Timestep between bins in n_bins, in seconds.
noisy : boolean
True if there is Poisson noise in the power spectrum (i.e., from real
data), False if there is no noise in the power spectrum (i.e.,
simulations without Poisson noise). Default is True.
Returns
-------
np.array of floats
The noise-subtracted power spectrum in absolute rms^2 units, in the
same size array as the input power.
"""
if noisy:
noise = 2.0 * mean_rate
else:
noise = 0.0
return power * (2.0 * dt / np.float(n_bins)) - noise
################################################################################
def var_and_rms(power, df):
"""
Computes the variance and rms (root mean square) of a power spectrum.
Assumes the negative-frequency powers have been removed. DOES NOT WORK ON
2-D POWER ARRAYS! Not sure why.
TODO: cite textbook or paper.
Parameters
----------
power : np.array of floats
1-D array (size = n_bins/2+1) of the raw power at each of the *positive*
Fourier frequencies.
df : float
The step size between Fourier frequencies.
Returns
-------
variance : float
The variance of the power spectrum.
rms : float
The rms of the power spectrum.
"""
variance = np.sum(power * df, axis=0)
rms = np.where(variance >= 0, np.sqrt(variance), np.nan)
return variance, rms
################################################################################
def cs_out(out_base, meta_dict, cs_avg, ci, ref):
"""
Saving header data, the cross spectrum, CoI power spectrum, and reference
band power spectrum to a FITS file to use in the program make_lags.py to get
cross-spectral lags. Cross spectra and power spectra are raw, as in un-
normalized.
Parameters
----------
out_base : str
The name the FITS file to write the cross spectrum and power spectra to,
for computing the lags.
meta_dict : dict
Dictionary of necessary meta-parameters for data analysis.
cs_avg : np.array of complex numbers
2-D array of the averaged cross spectrum. Size = (n_bins, detchans).
ci : ccf_lc.Lightcurve object
The channel of interest light curve. Must already have freq, mean_rate,
and power assigned.
ref : ccf_lc.Lightcurve object
The reference band light curve. Must already have mean_rate, rms, and
power assigned.
Returns
-------
nothing, but writes to the file "*_cs.fits"
"""
out_file = out_base + "_cs.fits"
out_dir = out_file[0:out_file.rfind("/")+1]
if len(out_dir) >= 2:
subprocess.call(['mkdir', '-p', out_dir])
print("Output sent to: %s" % out_file)
out_table = Table()
out_table.add_column(Column(data=ci.freq, name='FREQUENCY', unit='Hz'))
out_table.add_column(Column(data=cs_avg, name='CROSS'))
out_table.add_column(Column(data=ci.power, name='POWER_CI'))
out_table.add_column(Column(data=ref.power, name='POWER_REF'))
out_table.meta['TYPE'] = "Cross spectrum and power spectra, saved for lags."
out_table.meta['DATE'] = str(datetime.now())
out_table.meta['EVTLIST'] = " "
out_table.meta['DT'] = meta_dict['dt']
out_table.meta['DF'] = meta_dict['df']
out_table.meta['N_BINS'] = meta_dict['n_bins']
out_table.meta['SEGMENTS'] = meta_dict['n_seg']
out_table.meta['SEC_SEG'] = meta_dict['n_seconds']
out_table.meta['EXPOSURE'] = meta_dict['exposure']
out_table.meta['DETCHANS'] = meta_dict['detchans']
out_table.meta['RATE_CI'] = ci.mean_rate
out_table.meta['RATE_REF'] = ref.mean_rate
out_table.meta['RMS_REF'] = float(ref.rms)
out_table.meta['NYQUIST'] = meta_dict['nyquist']
out_table.write(out_file, overwrite=True)
################################################################################
def make_cs(rate_ci, rate_ref, meta_dict):
"""
Generate the power spectra for each band and the cross spectrum for one
segment of the light curve.
Parameters
----------
rate_ci : np.array of floats
2-D array of the channel of interest light curve, Size = (n_bins).
rate_ref : np.array of floats
1-D array of the reference band lightcurve, Size = (n_bins).
meta_dict : dict
Dictionary of necessary meta-parameters for data analysis.
Returns
-------
cs_seg : np.array of complex numbers
2-D array of the cross spectrum of each channel of interest with the
reference band.
ci_seg : Band object
The channel of interest light curve.
ref_seg : Band object
The reference band light curve.
"""
assert np.shape(rate_ci) == (meta_dict['n_bins'], ),\
"ERROR: CoI light curve has wrong dimensions. Must have size (n_bins, "\
")."
assert np.shape(rate_ref) == (meta_dict['n_bins'], ), "ERROR: Reference "\
"light curve has wrong dimensions. Must have size (n_bins, )."
ci_seg = Band(n_bins=meta_dict['n_bins'], dt=meta_dict['dt'])
ref_seg = Band(n_bins=meta_dict['n_bins'], dt=meta_dict['dt'])
## Computing the mean count rate of the segment
ci_seg.mean_rate = np.mean(rate_ci)
ref_seg.mean_rate = np.mean(rate_ref)
## Subtracting the mean off each value of 'rate'
rate_sub_mean_ci = np.subtract(rate_ci, ci_seg.mean_rate)
rate_sub_mean_ref = np.subtract(rate_ref, ref_seg.mean_rate)
## Taking the FFT of the time-domain photon count rate
## SciPy is faster than NumPy or pyFFTW for my array sizes
fft_data_ci = fftpack.fft(rate_sub_mean_ci)
fft_data_ref = fftpack.fft(rate_sub_mean_ref)
## Computing the power from the fourier transform
ci_seg.power = np.absolute(fft_data_ci) ** 2
ref_seg.power = np.absolute(fft_data_ref) ** 2
## Computing the cross spectrum from the fourier transform
cs_seg = np.multiply(fft_data_ci, np.conj(fft_data_ref))
return cs_seg, ci_seg, ref_seg
################################################################################
def lc_in(interest_file, ref_file, meta_dict):
n_seg = 0
interest_band = Band(n_bins=meta_dict['n_bins'], dt=meta_dict['dt'])
ref_band = Band(n_bins=meta_dict['n_bins'], dt=meta_dict['dt'])
cross_spec = np.zeros(meta_dict['n_bins'], dtype=np.complex128)
## Open the light curve files and load the data as astropy tables
try:
interest_table = Table.read(interest_file)
except IOError:
print("\tERROR: File does not exist: %s" % interest_file)
exit()
try:
ref_table = Table.read(ref_file)
except IOError:
print("\tERROR: File does not exist: %s" % ref_file)
exit()
start_time_i = interest_table['TIME'][0]
end_time_i = interest_table['TIME'][-1]
start_time_r = ref_table['TIME'][0]
end_time_r = ref_table['TIME'][-1]
len_i = len(interest_table['TIME'])
len_r = len(ref_table['TIME'])
# print("i: %.15f \t %.15f" % (start_time_i, end_time_i))
# print("r: %.15f \t %.15f" % (start_time_r, end_time_r))
# print("len i:", len_i)
# print("len r:", len_r)
# assert len_i == len_r
# assert start_time_i == start_time_r
# assert end_time_i == end_time_r
## The following is in case the two files aren't the exact same length.
a = 0 ## start of bin index to make segment of data
c = 0
b = meta_dict['n_bins'] ## end of bin index to make segment of data for
## inner for-loop
d = meta_dict['n_bins']
if start_time_i > start_time_r:
bin_diff = int((start_time_i - start_time_r) / meta_dict['dt'])
assert bin_diff < len_r
c += bin_diff
d += bin_diff
elif start_time_r > start_time_i:
bin_diff = int((start_time_r - start_time_i) / meta_dict['dt'])
assert bin_diff < len_i
a += bin_diff
b += bin_diff
## Loop through segments of the light curves
while b <= len_i and d <= len_r:
n_seg += 1
## Extract the count rates for each segment
rate_ci = interest_table["RATE"][a:b]
rate_ref = ref_table["RATE"][c:d]
## Compute the power spectra and cross spectrum for that segment
cs_seg, ci_seg, ref_seg = make_cs(rate_ci, rate_ref, meta_dict)
assert int(len(cs_seg)) == meta_dict['n_bins'], "ERROR: "\
"Something went wrong in make_cs. Length of cross spectrum"\
" segment != n_bins."
## Keep running total (to be made into averages)
cross_spec += cs_seg
interest_band.power += ci_seg.power
ref_band.power += ref_seg.power
interest_band.mean_rate += ci_seg.mean_rate
ref_band.mean_rate += ref_seg.mean_rate
if (test is True) and (n_seg == 1): ## For testing
break
## Clear loop variables for the next round
rate_ci = None
rate_ref = None
cs_seg = None
ci_seg = None
ref_seg = None
## Increment the counters and indices
a = b
c = d
b += meta_dict['n_bins']
d += meta_dict['n_bins']
## Since the for-loop goes from i to j-1 (since that's how the range
## function works) it's ok that we set i=j here for the next round.
## This will not cause double-counting rows or skipping rows.
return cross_spec, interest_band, ref_band, n_seg
################################################################################
def freq_lag_out(out_base, meta_dict, freq, phase, err_phase, tlag, err_tlag,
ci_mean_rate, ref_mean_rate):
"""
Saving header data, the cross spectrum, CoI power spectrum, and reference
band power spectrum to a FITS file to use in the program make_lags.py to get
cross-spectral lags. Cross spectra and power spectra are raw, as in un-
normalized.
Parameters
----------
out_base : str
The name the FITS file to write the cross spectrum and power spectra to,
for computing the lags.
meta_dict : dict
Dictionary of necessary meta-parameters for data analysis.
freq : np.array of floats
1-D array of the Fourier frequencies against which the lag-frequency
spectrum is plotted.
phase, err_phase : np.array of floats
The phase and error in phase of the frequency lags, in radians.
tlag, err_tlag : np.array of floats
The time and error in time of the frequency lags, in seconds.
ci_mean_rate : floats
The mean photon count rate of each of the interest band, in cts/s.
ref_mean_rate : float
The mean photon count rate of the reference band, in cts/s.
Returns
-------
nothing, but writes to the file "*_lag-freq.fits"
"""
out_file = out_base + "_lag-freq.fits"
out_dir = out_file[0:out_file.rfind("/")+1]
if len(out_dir) >= 2:
subprocess.call(['mkdir', '-p', out_dir])
print("Output sent to: %s" % out_file)
out_table = Table()
out_table.add_column(Column(data=freq, name='FREQUENCY', unit='Hz'))
out_table.add_column(Column(data=phase, name='PHASE_LAG', unit='radian'))
out_table.add_column(Column(data=err_phase, name='PHASE_LAG_ERR',
unit='radian'))
out_table.add_column(Column(data=tlag, name='TIME_LAG', unit='s'))
out_table.add_column(Column(data=err_tlag, name='TIME_LAG_ERR', unit='s'))
out_table.meta['TYPE'] = "Lag-frequency spectrum"
out_table.meta['DATE'] = str(datetime.now())
out_table.meta['CS_DATA'] = out_base + "_cs.fits"
out_table.meta['DT'] = meta_dict['dt']
out_table.meta['DF'] = meta_dict['df']
out_table.meta['N_BINS'] = meta_dict['n_bins']
out_table.meta['SEGMENTS'] = meta_dict['n_seg']
out_table.meta['SEC_SEG'] = meta_dict['n_seconds']
out_table.meta['EXPOSURE'] = meta_dict['exposure']
out_table.meta['DETCHANS'] = meta_dict['detchans']
out_table.meta['RATE_CI'] = ci_mean_rate
out_table.meta['RATE_REF'] = ref_mean_rate
out_table.meta['NYQUIST'] = meta_dict['nyquist']
out_table.write(out_file, overwrite=True)
################################################################################
def bias_term(ci, ref, meta_dict, n_range):
"""
Compute the bias term to be subtracted off the cross spectrum to compute
the covariance spectrum. Equation in Equation in footnote 4 (section 2.1.3,
page 12) of Uttley et al. 2014.
Assumes power spectra are raw (not at all normalized, and not Poisson-noise-
subtracted).
Parameters
----------
ci : Band object
The channel of interest or interest band. ci.power is raw (not
normalized and not Poisson-noise-subtracted), of the frequencies to be
averaged over, with size = 1 (if avg over freq) or n_bins/2+1 (if avg
over energy). ci.mean_rate is in units of cts/s (size = 1 if avg over
energy, size = detchans if avg over freq). Power and frequency
have only the positive (tot en met Nyquist) frequencies.
ref : Band object
The reference band. ref.power is raw (not normalized and not Poisson-
noise-subtracted), of the frequencies to be averaged over, with
size = 1 (if avg over freq) or n_bins/2+1 (if avg over energy).
ref.mean_rate is in units of cts/s. Power and frequency have only the
positive (tot en met Nyquist) frequencies.
meta_dict : dict
Dictionary of meta-parameters needed for analysis.
n_range : int
Number of bins that will be averaged together for the lags. Energy bins
for frequency lags, frequency bins for energy lags.
Returns
-------
n_squared : float
The bias term to be subtracted off the cross spectrum for computing the
covariance spectrum. Equation in footnote 4 (section 2.1.3, page 12) of
Uttley et al. 2014. If you get an undefined error for the bias term,
just set it equal to zero.
"""
## Compute the Poisson noise level in absolute rms units
Pnoise_ref = ref.mean_rate * 2.0
Pnoise_ci = ci.mean_rate * 2.0
## Normalizing power spectra to absolute rms normalization
## Not subtracting the noise (yet)!
abs_ci = ci.power * (2.0 * meta_dict['dt'] / float(n_range))
abs_ref = ref.power * (2.0 * meta_dict['dt'] / float(n_range))
temp_a = (abs_ref - Pnoise_ref) * Pnoise_ci
temp_b = (abs_ci - Pnoise_ci) * Pnoise_ref
temp_c = Pnoise_ref * Pnoise_ci
n_squared = (temp_a + temp_b + temp_c) / (n_range * meta_dict['n_seg'])
return n_squared
################################################################################
def compute_coherence(cross_spec, ci, ref, meta_dict, n_range):
"""
Compute the raw coherence of the cross spectrum. Coherence equation from
Uttley et al 2014 eqn 11, bias term equation from footnote 4 on same page.
Note that if the bias term gets way too wonky or undefined, it's usually
tiny so you can just set it to zero.
Parameters
----------
cross_spec : np.array of complex numbers
1-D array of the cross spectrum, averaged over the desired energy
range or frequency range. Size = detchans (if avg over freq) or
n_bins/2+1 (if avg over energy). Should be raw, not normalized or
noise-subtracted. Eqn 9 of Uttley et al 2014.
ci : Band object
The channel of interest or interest band. ci.power is raw (not
normalized and not Poisson-noise-subtracted), of the frequencies to be
averaged over, with size = 1 (if avg over freq) or n_bins/2+1 (if avg
over energy). ci.mean_rate is in units of cts/s (size = 1 if avg over
energy, size = detchans if avg over freq). Power and frequency
have only the positive (tot en met Nyquist) frequencies.
ref : Band object
The reference band. ref.power is raw (not normalized and not Poisson-
noise-subtracted), of the frequencies to be averaged over, with
size = 1 (if avg over freq) or n_bins/2+1 (if avg over energy).
ref.mean_rate is in units of cts/s. Power and frequency have only the
positive (tot en met Nyquist) frequencies.
meta_dict : dict
Dictionary of meta-parameters needed for analysis.
n_range : int
Number of frequency bins averaged over per new frequency bin for lags.
For energy lags, this is the number of frequency bins averaged over. For
frequency lags not re-binned in frequency, this is 1. Same as K in
equations in Section 2 of Uttley et al. 2014.
Returns
-------
coherence : np.array of floats
The raw coherence of the cross spectrum. (Uttley et al 2014, eqn 11)
Size = n_bins/2+1 (if avg over energy) or detchans (if avg over freq).
"""
cs_bias = bias_term(ci, ref, meta_dict, n_range)
powers = ci.power * ref.power
crosses = cross_spec * np.conj(cross_spec) - cs_bias
with np.errstate(all='ignore'):
coherence = np.where(powers != 0, crosses / powers, 0)
# print("Coherence shape:", np.shape(coherence))
# print(coherence)
return np.real(coherence)
################################################################################
def get_phase_err(cs_avg, ci, ref, meta_dict, n_range):
"""
Compute the error on the complex phase (in radians) via the coherence.
Power should NOT be Poisson-noise-subtracted or normalized.
Parameters
----------
cs_avg : np.array of complex numbers
1-D array of the raw cross spectrum, averaged over Fourier segments and
energy channels or frequency bins.
Size = detchans (if avg over freq) or n_bins/2+1 (if avg over energy).
Eqn 9 of Uttley et al 2014.
ci : Band object
The channel of interest or interest band. ci.power is raw (not
normalized and not Poisson-noise-subtracted), of the frequencies to be
averaged over, with size = 1 (if avg over freq) or n_bins/2+1 (if avg
over energy). ci.mean_rate is in units of cts/s (size = 1 if avg over
energy, size = detchans if avg over freq). Power and frequency
have only the positive (tot en met Nyquist) frequencies.
ref : Band object
The reference band. ref.power is raw (not normalized and not Poisson-
noise-subtracted), of the frequencies to be averaged over, with
size = 1 (if avg over freq) or n_bins/2+1 (if avg over energy).
ref.mean_rate is in units of cts/s. Power and frequency have only the
positive (tot en met Nyquist) frequencies.
meta_dict :
Dictionary of meta-paramters needed for analysis.
n_range : int
Number of frequency bins averaged over per new frequency bin for lags.
For energy lags, this is the number of frequency bins averaged over. For
frequency lags not re-binned in frequency, this is 1. Same as K in
equations in Section 2 of Uttley et al. 2014.
Returns
-------
phase_err : np.array of floats
1-D array of the error on the phase of the lag.
"""
# print("Pow ci:", np.shape(power_ci))
# print("Pow ref:", np.shape(power_ref))
# print("Pow cs:", np.shape(cs_avg))
coherence = compute_coherence(cs_avg, ci, ref, meta_dict, n_range)
with np.errstate(all='ignore'):
phase_err = np.sqrt(np.where(coherence != 0, (1 - coherence) /
(2 * coherence * n_range * meta_dict['n_seg']), 0))
return phase_err
################################################################################
def phase_to_tlags(phase, f):
"""
Convert a complex-plane cross-spectrum phase (in radians) to a time lag
(in seconds).
Parameters
----------
phase : float or np.array of floats
1-D array of the phase of the lag, in radians.
f : float or np.array of floats
1-D array of the Fourier frequency of the cross-spectrum, in Hz.
Returns
-------
tlags : float or np.array of floats
1-D array of the time of the lag, in seconds.
"""
if np.shape(phase) != np.shape(f):
## Reshaping (broadcasting) f to have same size as phase
f = np.resize(np.repeat(f, np.shape(phase)[1]), np.shape(phase))
assert np.shape(phase) == np.shape(f), "ERROR: Phase array must have same "\
"dimensions as frequency array."
with np.errstate(all='ignore'):
tlags = np.where(f != 0, phase / (2.0 * np.pi * f), 0)
return tlags
################################################################################
def main(interest_file, ref_file, out_base="./out", n_seconds=64, test=False):
"""
Read in two extracted light curves (interest band and reference band),
split into segments, compute the power spectra per band and cross spectrum
of each segment, averages cross spectrum of all the segments, and computes
frequency lags between the two bands.
Parameters
----------
interest_file : str
The name of the .lc extracted light curve for the interest band.
ref_file : str
The name of the .lc extracted light curve for the reference band.
out_base : str, default = "out"
The base name to save output to. The extension will be appended to the
end.
n_seconds : int, default = 64
Number of seconds in each Fourier segment. Must be a power of 2,
positive.
test : bool, default = False
If true, only computes one segment of data. If false, runs like normal.
"""
assert n_seconds > 0, "ERROR: Number of seconds per segment must be a "\
"positive integer."
try:
t_res = float(get_key_val(interest_file, 0, 'TIMEDEL'))
except KeyError:
t_res = float(get_key_val(interest_file, 1, 'TIMEDEL'))
try:
t_res_ref = float(get_key_val(ref_file, 0, 'TIMEDEL'))
except KeyError:
t_res_ref = float(get_key_val(ref_file, 1, 'TIMEDEL'))
assert t_res == t_res_ref, "ERROR: Interest band and reference band have "\
"different time binnings. Code cannot currently cope with that."
meta_dict = {'dt': t_res, ## assumes light curves are binned to desired
## resolution already
't_res': t_res,
'n_seconds': n_seconds,
'df': 1.0 / np.float(n_seconds),
'nyquist': 1.0 / (2.0 * t_res),
'n_bins': n_seconds * int(1.0 / t_res),
'detchans': 1, ## only using 1 interest band
'exposure': 0, ## will be computed later
'n_seg': 0} ## will be computed later
## Read in from the light curve files, compute power spectra and cross
## spectrum
total_cross, total_ci, total_ref, total_seg = lc_in(interest_file, ref_file,
meta_dict)
## Assign n_seg and exposure in meta_dict
meta_dict['n_seg'] = total_seg
meta_dict['exposure'] = meta_dict['dt'] * meta_dict['n_bins'] * \
meta_dict['n_seg']
## Turn running totals into averages
total_cross /= np.float(meta_dict['n_seg'])
total_ci.power /= np.float(meta_dict['n_seg'])
total_ci.mean_rate /= np.float(meta_dict['n_seg'])
total_ref.power /= np.float(meta_dict['n_seg'])
total_ref.mean_rate /= np.float(meta_dict['n_seg'])
## Only keeping the parts associated with positive Fourier frequencies
## numpy arrays slice at end-1, and we want to include 'nyq_index';
## for frequency, abs is because the nyquist freq is both pos and neg, and
## we want it pos here.
nyq_index = meta_dict['n_bins'] / 2
total_cross = total_cross[0:nyq_index + 1]
total_ci.power = total_ci.power[0:nyq_index + 1]
total_ci.freq = np.abs(total_ci.freq[0:nyq_index + 1])
total_ref.power = total_ref.power[0:nyq_index + 1]
total_ref.freq = np.abs(total_ref.freq[0:nyq_index + 1])
## Compute the variance and rms of the absolute-rms-normalized reference
## band power spectrum
absrms_ref_pow = raw_to_absrms(total_ref.power, total_ref.mean_rate,
meta_dict['n_bins'], meta_dict['dt'],
noisy=True)
total_ref.var, total_ref.rms = var_and_rms(absrms_ref_pow, meta_dict['df'])
## Save cross spectrum and power spectra to "*_cs.fits"
cs_out(out_base, meta_dict, total_cross, total_ci, total_ref)
## Computing frequency lags
## Negative sign is so that a positive lag is a hard energy lag
phase = -np.arctan2(total_cross.imag, total_cross.real)
# print(np.shape(phase))
err_phase = get_phase_err(total_cross, total_ci, total_ref, meta_dict, 1)
# print(np.shape(err_phase))
tlag = phase_to_tlags(phase, total_ci.freq)
err_tlag = phase_to_tlags(err_phase, total_ci.freq)
## Save lag-frequency spectrum to "*_lag-freq.fits"
freq_lag_out(out_base, meta_dict, total_ci.freq, phase, err_phase, tlag,
err_tlag, total_ci.mean_rate, total_ref.mean_rate)
################################################################################
if __name__ == "__main__":
#########################################
## Parse input arguments and call 'main'
#########################################
parser = argparse.ArgumentParser(usage="python simple_cross_spectra.py "
"interest_band_file reference_band_file [OPTIONAL ARGUMENTS]",
description=__doc__,
epilog="For optional arguments, default values are given in "\
"brackets at end of description.")
parser.add_argument('interest_band_file', help="The .lc background-"\
"subtracted extracted light curve for the interest band.")
parser.add_argument('reference_band_file', help="The .lc background-"\
"subtracted extracted light curve for the reference band. Assumes "\
"it has the same time binning as the interest band.")
parser.add_argument('-o', '--out', default="./out", dest='outbase',
help="The base name for output files. Extension will be"
" appended. [./out]")
parser.add_argument('-n', '--n_sec', type=type_power_of_two, default=64,
dest='n_seconds', help="Number of seconds in each "
"Fourier segment. Must be a "
"power of 2, positive, integer. "
"[64]")
parser.add_argument('--test', type=int, default=0, choices={0,1},
dest='test', help="Int flag: 0 if computing all "
"segments, 1 if only computing one "
"segment for testing. [0]")
args = parser.parse_args()
test = False
if args.test == 1:
test = True
main(args.interest_band_file, args.reference_band_file,
out_base=args.outbase, n_seconds=args.n_seconds, test=test)
| [
"numpy.sqrt",
"astropy.table.Table",
"scipy.fftpack.fftfreq",
"numpy.arctan2",
"scipy.fftpack.fft",
"astropy.io.fits.open",
"numpy.int8",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.where",
"numpy.subtract",
"numpy.real",
"subprocess.call",
"numpy.abs",
"numpy.conj",
"argparse.Argu... | [((1664, 1699), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['message'], {}), '(message)\n', (1690, 1699), False, 'import argparse\n'), ((2358, 2370), 'numpy.int8', 'np.int8', (['ext'], {}), '(ext)\n', (2365, 2370), True, 'import numpy as np\n'), ((4602, 4628), 'numpy.sum', 'np.sum', (['(power * df)'], {'axis': '(0)'}), '(power * df, axis=0)\n', (4608, 4628), True, 'import numpy as np\n'), ((6064, 6071), 'astropy.table.Table', 'Table', ([], {}), '()\n', (6069, 6071), False, 'from astropy.table import Table, Column\n'), ((8499, 8515), 'numpy.mean', 'np.mean', (['rate_ci'], {}), '(rate_ci)\n', (8506, 8515), True, 'import numpy as np\n'), ((8540, 8557), 'numpy.mean', 'np.mean', (['rate_ref'], {}), '(rate_ref)\n', (8547, 8557), True, 'import numpy as np\n'), ((8635, 8673), 'numpy.subtract', 'np.subtract', (['rate_ci', 'ci_seg.mean_rate'], {}), '(rate_ci, ci_seg.mean_rate)\n', (8646, 8673), True, 'import numpy as np\n'), ((8698, 8738), 'numpy.subtract', 'np.subtract', (['rate_ref', 'ref_seg.mean_rate'], {}), '(rate_ref, ref_seg.mean_rate)\n', (8709, 8738), True, 'import numpy as np\n'), ((8880, 8909), 'scipy.fftpack.fft', 'fftpack.fft', (['rate_sub_mean_ci'], {}), '(rate_sub_mean_ci)\n', (8891, 8909), False, 'from scipy import fftpack\n'), ((8929, 8959), 'scipy.fftpack.fft', 'fftpack.fft', (['rate_sub_mean_ref'], {}), '(rate_sub_mean_ref)\n', (8940, 8959), False, 'from scipy import fftpack\n'), ((9579, 9629), 'numpy.zeros', 'np.zeros', (["meta_dict['n_bins']"], {'dtype': 'np.complex128'}), "(meta_dict['n_bins'], dtype=np.complex128)\n", (9587, 9629), True, 'import numpy as np\n'), ((14326, 14333), 'astropy.table.Table', 'Table', ([], {}), '()\n', (14331, 14333), False, 'from astropy.table import Table, Column\n'), ((20481, 20499), 'numpy.real', 'np.real', (['coherence'], {}), '(coherence)\n', (20488, 20499), True, 'import numpy as np\n'), ((26508, 26536), 'numpy.float', 'np.float', (["meta_dict['n_seg']"], {}), "(meta_dict['n_seg'])\n", (26516, 26536), True, 'import numpy as np\n'), ((26559, 26587), 'numpy.float', 'np.float', (["meta_dict['n_seg']"], {}), "(meta_dict['n_seg'])\n", (26567, 26587), True, 'import numpy as np\n'), ((26614, 26642), 'numpy.float', 'np.float', (["meta_dict['n_seg']"], {}), "(meta_dict['n_seg'])\n", (26622, 26642), True, 'import numpy as np\n'), ((26666, 26694), 'numpy.float', 'np.float', (["meta_dict['n_seg']"], {}), "(meta_dict['n_seg'])\n", (26674, 26694), True, 'import numpy as np\n'), ((26722, 26750), 'numpy.float', 'np.float', (["meta_dict['n_seg']"], {}), "(meta_dict['n_seg'])\n", (26730, 26750), True, 'import numpy as np\n'), ((27166, 27204), 'numpy.abs', 'np.abs', (['total_ci.freq[0:nyq_index + 1]'], {}), '(total_ci.freq[0:nyq_index + 1])\n', (27172, 27204), True, 'import numpy as np\n'), ((27281, 27320), 'numpy.abs', 'np.abs', (['total_ref.freq[0:nyq_index + 1]'], {}), '(total_ref.freq[0:nyq_index + 1])\n', (27287, 27320), True, 'import numpy as np\n'), ((28696, 28954), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""python simple_cross_spectra.py interest_band_file reference_band_file [OPTIONAL ARGUMENTS]"""', 'description': '__doc__', 'epilog': '"""For optional arguments, default values are given in brackets at end of description."""'}), "(usage=\n 'python simple_cross_spectra.py interest_band_file reference_band_file [OPTIONAL ARGUMENTS]'\n , description=__doc__, epilog=\n 'For optional arguments, default values are given in brackets at end of description.'\n )\n", (28719, 28954), False, 'import argparse\n'), ((818, 852), 'numpy.zeros', 'np.zeros', (['n_bins'], {'dtype': 'np.float64'}), '(n_bins, dtype=np.float64)\n', (826, 852), True, 'import numpy as np\n'), ((925, 954), 'scipy.fftpack.fftfreq', 'fftpack.fftfreq', (['n_bins'], {'d': 'dt'}), '(n_bins, d=dt)\n', (940, 954), False, 'from scipy import fftpack\n'), ((2461, 2481), 'astropy.io.fits.open', 'fits.open', (['fits_file'], {}), '(fits_file)\n', (2470, 2481), False, 'from astropy.io import fits\n'), ((4663, 4680), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (4670, 4680), True, 'import numpy as np\n'), ((5962, 6003), 'subprocess.call', 'subprocess.call', (["['mkdir', '-p', out_dir]"], {}), "(['mkdir', '-p', out_dir])\n", (5977, 6003), False, 'import subprocess\n'), ((6097, 6146), 'astropy.table.Column', 'Column', ([], {'data': 'ci.freq', 'name': '"""FREQUENCY"""', 'unit': '"""Hz"""'}), "(data=ci.freq, name='FREQUENCY', unit='Hz')\n", (6103, 6146), False, 'from astropy.table import Table, Column\n'), ((6173, 6206), 'astropy.table.Column', 'Column', ([], {'data': 'cs_avg', 'name': '"""CROSS"""'}), "(data=cs_avg, name='CROSS')\n", (6179, 6206), False, 'from astropy.table import Table, Column\n'), ((6233, 6271), 'astropy.table.Column', 'Column', ([], {'data': 'ci.power', 'name': '"""POWER_CI"""'}), "(data=ci.power, name='POWER_CI')\n", (6239, 6271), False, 'from astropy.table import Table, Column\n'), ((6298, 6338), 'astropy.table.Column', 'Column', ([], {'data': 'ref.power', 'name': '"""POWER_REF"""'}), "(data=ref.power, name='POWER_REF')\n", (6304, 6338), False, 'from astropy.table import Table, Column\n'), ((6455, 6469), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6467, 6469), False, 'from datetime import datetime\n'), ((7998, 8015), 'numpy.shape', 'np.shape', (['rate_ci'], {}), '(rate_ci)\n', (8006, 8015), True, 'import numpy as np\n'), ((8150, 8168), 'numpy.shape', 'np.shape', (['rate_ref'], {}), '(rate_ref)\n', (8158, 8168), True, 'import numpy as np\n'), ((9034, 9058), 'numpy.absolute', 'np.absolute', (['fft_data_ci'], {}), '(fft_data_ci)\n', (9045, 9058), True, 'import numpy as np\n'), ((9084, 9109), 'numpy.absolute', 'np.absolute', (['fft_data_ref'], {}), '(fft_data_ref)\n', (9095, 9109), True, 'import numpy as np\n'), ((9217, 9238), 'numpy.conj', 'np.conj', (['fft_data_ref'], {}), '(fft_data_ref)\n', (9224, 9238), True, 'import numpy as np\n'), ((9735, 9760), 'astropy.table.Table.read', 'Table.read', (['interest_file'], {}), '(interest_file)\n', (9745, 9760), False, 'from astropy.table import Table, Column\n'), ((9891, 9911), 'astropy.table.Table.read', 'Table.read', (['ref_file'], {}), '(ref_file)\n', (9901, 9911), False, 'from astropy.table import Table, Column\n'), ((14224, 14265), 'subprocess.call', 'subprocess.call', (["['mkdir', '-p', out_dir]"], {}), "(['mkdir', '-p', out_dir])\n", (14239, 14265), False, 'import subprocess\n'), ((14359, 14405), 'astropy.table.Column', 'Column', ([], {'data': 'freq', 'name': '"""FREQUENCY"""', 'unit': '"""Hz"""'}), "(data=freq, name='FREQUENCY', unit='Hz')\n", (14365, 14405), False, 'from astropy.table import Table, Column\n'), ((14432, 14483), 'astropy.table.Column', 'Column', ([], {'data': 'phase', 'name': '"""PHASE_LAG"""', 'unit': '"""radian"""'}), "(data=phase, name='PHASE_LAG', unit='radian')\n", (14438, 14483), False, 'from astropy.table import Table, Column\n'), ((14510, 14569), 'astropy.table.Column', 'Column', ([], {'data': 'err_phase', 'name': '"""PHASE_LAG_ERR"""', 'unit': '"""radian"""'}), "(data=err_phase, name='PHASE_LAG_ERR', unit='radian')\n", (14516, 14569), False, 'from astropy.table import Table, Column\n'), ((14628, 14672), 'astropy.table.Column', 'Column', ([], {'data': 'tlag', 'name': '"""TIME_LAG"""', 'unit': '"""s"""'}), "(data=tlag, name='TIME_LAG', unit='s')\n", (14634, 14672), False, 'from astropy.table import Table, Column\n'), ((14699, 14751), 'astropy.table.Column', 'Column', ([], {'data': 'err_tlag', 'name': '"""TIME_LAG_ERR"""', 'unit': '"""s"""'}), "(data=err_tlag, name='TIME_LAG_ERR', unit='s')\n", (14705, 14751), False, 'from astropy.table import Table, Column\n'), ((14841, 14855), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14853, 14855), False, 'from datetime import datetime\n'), ((20304, 20329), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (20315, 20329), True, 'import numpy as np\n'), ((20351, 20393), 'numpy.where', 'np.where', (['(powers != 0)', '(crosses / powers)', '(0)'], {}), '(powers != 0, crosses / powers, 0)\n', (20359, 20393), True, 'import numpy as np\n'), ((22654, 22679), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (22665, 22679), True, 'import numpy as np\n'), ((23429, 23444), 'numpy.shape', 'np.shape', (['phase'], {}), '(phase)\n', (23437, 23444), True, 'import numpy as np\n'), ((23448, 23459), 'numpy.shape', 'np.shape', (['f'], {}), '(f)\n', (23456, 23459), True, 'import numpy as np\n'), ((23611, 23626), 'numpy.shape', 'np.shape', (['phase'], {}), '(phase)\n', (23619, 23626), True, 'import numpy as np\n'), ((23630, 23641), 'numpy.shape', 'np.shape', (['f'], {}), '(f)\n', (23638, 23641), True, 'import numpy as np\n'), ((23736, 23761), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (23747, 23761), True, 'import numpy as np\n'), ((23779, 23825), 'numpy.where', 'np.where', (['(f != 0)', '(phase / (2.0 * np.pi * f))', '(0)'], {}), '(f != 0, phase / (2.0 * np.pi * f), 0)\n', (23787, 23825), True, 'import numpy as np\n'), ((27941, 27987), 'numpy.arctan2', 'np.arctan2', (['total_cross.imag', 'total_cross.real'], {}), '(total_cross.imag, total_cross.real)\n', (27951, 27987), True, 'import numpy as np\n'), ((20265, 20284), 'numpy.conj', 'np.conj', (['cross_spec'], {}), '(cross_spec)\n', (20272, 20284), True, 'import numpy as np\n'), ((22709, 22806), 'numpy.where', 'np.where', (['(coherence != 0)', "((1 - coherence) / (2 * coherence * n_range * meta_dict['n_seg']))", '(0)'], {}), "(coherence != 0, (1 - coherence) / (2 * coherence * n_range *\n meta_dict['n_seg']), 0)\n", (22717, 22806), True, 'import numpy as np\n'), ((23582, 23597), 'numpy.shape', 'np.shape', (['phase'], {}), '(phase)\n', (23590, 23597), True, 'import numpy as np\n'), ((25706, 25725), 'numpy.float', 'np.float', (['n_seconds'], {}), '(n_seconds)\n', (25714, 25725), True, 'import numpy as np\n'), ((3825, 3841), 'numpy.float', 'np.float', (['n_bins'], {}), '(n_bins)\n', (3833, 3841), True, 'import numpy as np\n'), ((23561, 23576), 'numpy.shape', 'np.shape', (['phase'], {}), '(phase)\n', (23569, 23576), True, 'import numpy as np\n')] |
import streamlit as st
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from .generic import Tool
iris = pd.DataFrame(load_iris()["data"])
df = pd.DataFrame(
np.random.randn(50, 20),
columns=('col %d' % i for i in range(20)))
# st.dataframe(df) # Same as st.write(df)
class Dataframe(Tool):
name = "Dataframes"
description = """
Render tabular input in formats like [CSV](https://en.wikipedia.org/wiki/Comma-separated_values) to [dataframes](https://databricks.com/glossary/what-are-dataframes).
"""
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html.
def __init__(self):
self.text = ""
def make_examples(self):
return {
"Example 1": iris,
"Example 2": pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"),
}
def make_input(self):
pass
def make_config(self):
use_container_width = st.checkbox("Use container width")
st.session_state.config = dict(
use_container_width = use_container_width
)
def make_output(self):
use_container_width = st.session_state.config["use_container_width"]
st.write(iris) # , use_container_width=use_container_width)
# st.write(st.session_state)
| [
"sklearn.datasets.load_iris",
"streamlit.checkbox",
"pandas.read_csv",
"streamlit.write",
"numpy.random.randn"
] | [((194, 217), 'numpy.random.randn', 'np.random.randn', (['(50)', '(20)'], {}), '(50, 20)\n', (209, 217), True, 'import numpy as np\n'), ((150, 161), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (159, 161), False, 'from sklearn.datasets import load_iris\n'), ((1000, 1034), 'streamlit.checkbox', 'st.checkbox', (['"""Use container width"""'], {}), "('Use container width')\n", (1011, 1034), True, 'import streamlit as st\n'), ((1252, 1266), 'streamlit.write', 'st.write', (['iris'], {}), '(iris)\n', (1260, 1266), True, 'import streamlit as st\n'), ((799, 891), 'pandas.read_csv', 'pd.read_csv', (['"""https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"""'], {}), "(\n 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data')\n", (810, 891), True, 'import pandas as pd\n')] |
#!/usr/bin/python3
"""
一些基础的类和函数
注意, import关系需要能够拓扑排序(不要相互调用).
"""
# 加载不应该被COPY的包
import io2 as io
import deap
from deap import algorithms, base, creator, gp, tools
from prettytable import PrettyTable
# COPY #
import copy
import random
import warnings
import sys
import pdb
import inspect
import shutil
import os
import time
import argparse
import datetime
import collections
import traceback
import math
import subprocess
import yaml
import multiprocessing
from itertools import repeat
from functools import partial
from copy import deepcopy
# 禁用NumPy自带的多线程, 这样一个进程最多100% CPU占用. 这段代码必须保证在import numpy前执行.
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['NUMEXPR_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
# 加载外部包
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
class EvalInfo:
"""currently contains shape and unit information to evaluate."""
def __init__(self, shape, unit, ret_type):
"""shape should be tuple, while unit should be np.ndarray."""
self.shape = shape
self.unit = unit
self.ret_type = ret_type
class Individual:
"""an individual in genetic programming"""
def __init__(self, expr=None, fitness=None, fitness_raw=None, pnl=None, turnover=None):
self.expr = expr
self.fitness = fitness
self.fitness_raw = fitness_raw
self.pnl = pnl
self.turnover = turnover
self.stats = dict()
class IllegalSyntex(Exception):
"""illegal syntax when checking."""
pass
class Array2d:
"""a symbolic class, only used for STGP."""
pass
class Array2dNeutralise:
"""由于中性化参数也是需要根据输入的数据调整的, 因此必须把X_NEUTRALISE作为一个参数传入. 它由这个类代表."""
pass
class Array2dValid:
"""同上. 表示例如UnivTOP4000.valid"""
pass
class Array3d:
"""a symbolic class, only used for STGP."""
pass
class Ephemeral:
"""a class representing ephemeral constants."""
pass
class FinalResult:
"""a class representing the final result, usual generated from ewma."""
pass
# 修改 ###################################################################
# 可以在这里添加自定义STGP类, 和需要被COPY的自定义函数.
# 修改 ###################################################################
def check_same_unit(unit_1, unit_2):
"""check whether two units are numerically similar, by calculating the chebyshev distance."""
epsilon = 0.001
if np.max(np.abs(unit_1 - unit_2)) <= epsilon:
return True
else:
return False
def replace_inf(arr):
ret = arr.copy()
ret[np.isinf(ret)] = np.nan
return ret
def mask(arr):
"""returns a boolean mask of an arr"""
return ~np.isnan(arr)
def imposter(arr):
"""returns an imposter of an arr"""
return np.full_like(arr, np.nan)
def ts_delay(arr, window=1, axis=0):
"""delay by window along an axis. the first/last window rows are filled with nan. """
ret = arr.copy()
if window >= 0: # if window == 0, returns exactly the input
slc1 = [slice(None)] * len(arr.shape)
slc1[axis] = slice(window, arr.shape[axis])
slc2 = [slice(None)] * len(arr.shape)
slc2[axis] = slice(0, arr.shape[axis] - window)
slc3 = [slice(None)] * len(arr.shape)
slc3[axis] = slice(0, window)
ret[tuple(slc1)] = ret[tuple(slc2)]
ret[tuple(slc3)] = np.nan
else: # delay by negative, fetching future data
slc1 = [slice(None)] * len(arr.shape)
slc1[axis] = slice(-window, arr.shape[axis])
slc2 = [slice(None)] * len(arr.shape)
slc2[axis] = slice(0, window)
slc3 = [slice(None)] * len(arr.shape)
slc3[axis] = slice(window, arr.shape[axis])
ret[tuple(slc2)] = ret[tuple(slc1)]
ret[tuple(slc3)] = np.nan
return ret
def rolling(arr, window, f, axis=0):
"""
rolling with NumPy and for loop. Note: np.nanxxx is much slower than np.xxx
:param f: a function which accepts array and axis as the first two arguments. e.g. np.nanstd
"""
ret = []
slc = [slice(None)] * len(arr.shape)
for ti in range(arr.shape[axis]):
slc[axis] = slice(max(ti - window + 1, 0), ti + 1)
rolling_data = arr[tuple(slc)]
ret.append(f(rolling_data, axis))
ret = np.stack(ret, axis=axis)
return ret
def rolling_cross(x, y, window, f, axis):
"""
rolling fucn of two arrays
:param f: a function which accepts two arrays and axis as the first three arguments. e.g. cal_pearson_r
"""
ret = []
slc = [slice(None)] * len(x.shape)
for ti in range(x.shape[axis]):
slc[axis] = slice(max(ti - window + 1, 0), ti + 1)
rolling_x = x[tuple(slc)]
rolling_y = y[tuple(slc)]
ret.append(f(rolling_x, rolling_y, axis=axis))
ret = np.stack(ret, axis=axis)
return ret
def ts_quantile_aux(arr, axis, standardize):
"""用于ts_quantile的辅助函数, 会作为f传入rolling中. axis参数不管设成什么都按照0来算. """
arr_rank = (arr[-1, ...][np.newaxis, ...] > arr).sum(0).astype('float')
arr_rank[np.isnan(arr[-1, ...])] = np.nan
if standardize:
arr_rank = arr_rank / mask(arr).sum(0)
return arr_rank
def rank(arr, axis=1, method='average'):
"""rank along an axis, starting at zero. deals with nan. """
ranks = stats.rankdata(arr, method=method, axis=axis).astype('float') # nans are given largest rank
ranks[np.isnan(arr)] = np.nan # mstats.rankdata assign 0 to masked values
return ranks - 1
#def cal_pearson_r(x, y, axis=0):
# """calculate Pearson correlation coefficient along an axis."""
# x = x.copy() # 关键的步骤, 如果不进行会导致对数据进行inplace的修改, 最终nan充满整个数组.
# y = y.copy()
# nanmask = (np.isnan(x) | np.isnan(y)) # make x and y have the same nan values
# x[nanmask] = np.nan
# y[nanmask] = np.nan
# x = x - np.nanmean(x, axis=axis, keepdims=True)
# y = y - np.nanmean(y, axis=axis, keepdims=True)
# result = np.nansum(x * y, axis) / np.sqrt(np.nansum(x ** 2, axis) * np.nansum(y ** 2, axis))
# return result
def cal_pearson_r(x, y, axis=0):
#import pdb; pdb.set_trace()
#raise Exception('bug')
xy = np.hstack((x, y))
isnan = np.isnan(xy).any(axis=1)
_x = x[np.ix_(~isnan)]
_y = y[np.ix_(~isnan)]
if _x.shape[0] < 2:
return np.nan
else:
_x = _x - np.mean(_x, axis=axis, keepdims=True)
_y = _y - np.mean(_y, axis=axis, keepdims=True)
if np.allclose(_x, 0) or np.allclose(_y, 0):
return 0.0
else:
res = np.sum(_x*_y) / np.sqrt(np.sum(_x**2, axis)) / np.sqrt(np.sum(_y**2, axis))
return res[0]
def cal_cov(x, y, axis=0):
"""calculate covariance along an axis."""
x = x.copy() # 关键的步骤, 如果不进行会导致对数据进行inplace的修改, 最终nan充满整个数组.
y = y.copy()
nanmask = (np.isnan(x) | np.isnan(y)) # make x and y have the same nan values
x[nanmask] = np.nan
y[nanmask] = np.nan
x = x - np.nanmean(x, axis=axis, keepdims=True)
y = y - np.nanmean(y, axis=axis, keepdims=True)
result = np.nansum(x * y, axis) / (~nanmask).sum(axis, keepdims=True)
return result
#def load_data_2d(fields, f_load_data):
# """读取数据. f_load_data中已经包含了start_date的信息. """
# data = dict()
# for field in fields:
# field_ = field.split('.')[-1]
# data[field_] = f_load_data(field)
# return data
def load_data_2d(fields, f_load_data):
"""读取数据. f_load_data中已经包含了start_date的信息. """
data = dict()
for field in fields:
data[field.replace('.', '_')] = f_load_data(field).to_numpy()
return data
def load_tradedate(field, f_load_data):
return f_load_data(field).index
def alpha_to_weights(alpha):
"""归一化. 最终截面绝对值和为2. """
alpha = alpha - np.nanmean(alpha, axis=1, keepdims=True)
mask_pos = (alpha > 0)
mask_neg = (alpha < 0)
alpha_pos = imposter(alpha)
alpha_pos[mask_pos] = alpha[mask_pos]
alpha_pos = alpha_pos / np.nansum(alpha_pos, 1, keepdims=True)
alpha_neg = imposter(alpha)
alpha_neg[mask_neg] = alpha[mask_neg]
alpha_neg = -alpha_neg / np.nansum(alpha_neg, 1, keepdims=True)
alpha[mask_pos] = alpha_pos[mask_pos]
alpha[mask_neg] = alpha_neg[mask_neg]
return alpha
# ENDCOPY # 以下为提交时不需要的函数
# 修改 ###################################################################
# 可以在这里添加不应该或不需要被COPY的自定义函数(如可能导致cython编译出现问题)
# 修改 ###################################################################
def get_eval_info(expr, dict_operators, dict_data_eval_info):
"""
tries to get EvalInfo of expr's root node.
if legal, returns EvalInfo of root node; otherwise, raises IllegalSyntax exception.
原来写的check_syntax函数代码习惯比较糟糕, 导致出现了各种问题. 现在写一个更规范的版本.
我们在这里尽量规避使用eval()函数. 事实上, 任何情况下都应避免使用该函数. 此后计算阿尔法值的代码可能也要改.
TODO: 可能需要更改计算阿尔法值的代码
取而代之, 利用该list深度优先遍历的特性, 采用递归的方法检查是否存在句法错误.
首先从根节点开始, 计算它的所有子节点处的EvalInfo, 随后检查该节点处的输入是否符合算子的句法.
而子节点处的EvalInfo用类似方法计算, 直到某个子节点不再有任何入度(arity, 每一个元素都有这个属性),
此时返回的是该terminal的EvalInfo(如果是数据则有显示定义, 如果是ephemeral则可以返回任何东西, 因为不参与任何运算).
至于如何检查句法, 我们通过传入的operators找到节点名字对应的算子, 直接调用该算子.
调用算子过程中可能raise IllegalSyntax错误, 则会向外不断抛出, 需要接住(如check_syntax中的处理)
TODO: 这个写法涉及一些重复计算, 可能效率较低
:param expr: list, holds tree expression from DFS
"""
if expr[0].arity > 0: # primitive
eval_info_of_subtrees = []
begin = 1
while True:
# 寻找子树. 这部分代码来自gp.PrimitiveTree的searchSubtree方法
end = begin + 1
total = expr[begin].arity
while total > 0:
total += expr[end].arity - 1
end += 1
# 上述循环结束. 此时[begin, end)是子树的区间. end刚好停在下一个子树的开始, 或者在数组末尾.
eval_info_of_subtrees.append(get_eval_info(expr[begin: end], dict_operators, dict_data_eval_info))
begin = end
if end == len(expr): # 已经进行到列表的末尾
break
f = dict_operators[expr[0].name][0]
return f(*eval_info_of_subtrees)
else: # terminal, could be data or ephemeral
if expr[0].ret == Ephemeral:
return expr[0].value # Ephemeral则返回它自己的值, 因为有些算子(例如SIGNED_POWER)需要用到它计算unit
else: # data
return dict_data_eval_info[expr[0].value] # .value returns e.g. 'OPEN', while .name returns 'ARG0'
def check_syntax(expr, dict_operators, dict_data_eval_info):
"""检查一个输入列表expr的句法是否正确."""
try:
eval_info = get_eval_info(expr, dict_operators, dict_data_eval_info)
return True
except IllegalSyntex:
return False
def compare_subtree(expr1, expr2, dict_operators, dict_data_eval_info):
"""
We check whether the return type, shape, and units of two subtrees are the same, before performing crossover or mutation.
"""
if expr1[0].ret != expr2[0].ret: # check return type
return False
eval_info_1 = get_eval_info(expr1, dict_operators, dict_data_eval_info)
eval_info_2 = get_eval_info(expr2, dict_operators, dict_data_eval_info)
# check shape and unit
try:
if eval_info_1.shape != eval_info_2.shape or check_same_unit(eval_info_1.unit, eval_info_2.unit) == False:
return False
return True
except AttributeError:
return False
def find_primitive(pset, return_type, name):
"""find a primitive in pset by name"""
for op in pset.primitives[return_type]:
if op.name == name:
return op
print("Primitive not found!")
return None
def find_terminal(pset, return_type, value=None):
"""find a terminal in pset by name"""
for op in pset.terminals[return_type]:
# 这里用了or的short-circuiting. Ephemeral类型没有name或value属性, 故先判断是否为Ephemeral; 若不是, 再比较value.
if return_type == Ephemeral or op.value == value:
if inspect.isclass(op): # Ephemeral类需要实例化. 其他算子直接就是函数了.
return op()
else:
return op
print("Terminal not found!")
return None
def cal_pool_corr(pnl, pnl_pool):
"""
计算一个阿尔法与pool的pearson最大相关系数. 下面n_days必须相同, 且为与asim一致, 必须都为500天, 且时间戳需对齐.
:param pnl: shape = (n_days,)的ndarray
:param pnl_pool: shape = (n_days, n_alphas)的ndarray
:param axis: 沿哪个轴计算
"""
# 用np.broadcast_to比用repeat快一点
maxcorr = np.max(cal_pearson_r(np.broadcast_to(pnl[:, np.newaxis], pnl_pool.shape), pnl_pool, axis=0))
return maxcorr
def maxcorr_with_pool(pnl, pnl_pool):
res = []
x = pnl.reshape(len(pnl), 1)
if len(pnl_pool.shape) > 1:
for i in range(pnl_pool.shape[1]):
y = pnl_pool[:, i].reshape(len(pnl), 1)
res.append(cal_pearson_r(x, y))
return max(res)
else:
return cal_pearson_r(x, pnl_pool.reshape(len(pnl), 1))
def expr_to_str(expr):
return str(gp.PrimitiveTree(expr))
def cal_ic_series(alpha, future_returns, ranked=True):
"""
calculate time series of information coefficient
"""
if ranked: # calculate rank IC
alpha = rank(alpha, axis=1)
future_returns = rank(future_returns, axis=1)
ic_series = cal_pearson_r(alpha, future_returns, axis=1)
return ic_series
def cal_pnl_series(alpha, today_returns):
"""
计算pnl序列, 其实只是收益率序列, 因为只差本金(常数).
:param alpha: 一个2维ndarray. 其必须满足值可以解释为权重, 例如每天的正数部分和负数部分和都为1.
:param today_returns: 也是2d数组, 其与alpha对齐的时间代表当日的收益率(alpha本身是用delay的数据计算的)
:return: 返回的是当日的策略收益率
"""
return np.nansum(ts_delay(alpha, 1) * today_returns, 1) / 2 # 有多空, 收益率要除以2. 注意若全nan则当日为0.
# basic functions used in mutation and crossover
def exception_printer(k, name):
pass # 测试时打印太多了, 先关掉
# if k == 0:
# print(f"=====Catch exception in function {name}=====")
def compare(ind1, ind2, cxpoint1, cxpoint2, dict_operators, dict_data_eval_info):
tree1, tree2 = gp.PrimitiveTree(ind1), gp.PrimitiveTree(ind2)
root1, root2 = ind1[cxpoint1], ind2[cxpoint2]
# Eliminate the situation while leaf(terminal) is selected as "subtree".
if(type(root1) == 'deap.gp.Terminal' and type(root2) == 'deap.gp.Terminal'):
return (False, 0, 0)
slice1, slice2 = tree1.searchSubtree(cxpoint1), tree2.searchSubtree(cxpoint2)
sublst1, sublst2 = ind1[slice1], ind2[slice2]
# Only consider crossover of subtree when its height is greater than 1.
if compare_subtree(sublst1, sublst2, dict_operators, dict_data_eval_info) and \
gp.PrimitiveTree(sublst1).height >= 1 and gp.PrimitiveTree(sublst2).height >= 1:
return (True, len(sublst1), len(sublst2))
else:
return (False, 0, 0)
def pw(text, log):
"""print and write. note that write() does not append newline automatically, and print() is adjusted accordingly."""
print(text, end='')
log.write(text)
def get_population_statistics(population):
"""获取一个种群的统计量. 注意所有个体必须都有fitness."""
fitness_list = np.sort(np.array([individual.fitness for individual in population if not (np.isinf(individual.fitness) or np.isnan(individual.fitness))]))
text = f'population size: {len(population)}, valid individual size: {len(fitness_list)}\n'
if len(fitness_list)==0: return text
statistics = [
np.mean(fitness_list),
np.std(fitness_list),
stats.skew(fitness_list),
stats.kurtosis(fitness_list),
fitness_list[0],
fitness_list[int(len(fitness_list) * 0.25)],
fitness_list[int(len(fitness_list) * 0.5)],
fitness_list[int(len(fitness_list) * 0.75)],
fitness_list[-1]
]
text += 'MEAN:{:4.2f} STD :{:4.2f} SKEW:{:4.2f} KURT:{:4.2f}\n' \
'MIN :{:4.2f} QT25:{:4.2f} QT50:{:4.2f} QT75:{:4.2f} MAX :{:4.2f}\n'.format(*statistics)
return text
def table_population_statistics(population, title):
"""获取一个种群的统计量. 注意所有个体必须都有fitness."""
fitness_list = [(x.fitness, abs(x.fitness_raw)) for x in population if not (np.isinf(x.fitness) or np.isnan(x.fitness))]
fitness_list.sort(key=lambda x:x[0])
fitness_list = np.array(fitness_list)
ttc, vac = len(population), len(fitness_list)
if vac > 0:
q25, q50, q75 = fitness_list[int(vac*0.25), :], fitness_list[int(vac*0.5), :], fitness_list[int(vac*0.75), :]
mm, ss, mi, ma = np.mean(fitness_list, axis=0), np.std(fitness_list, axis=0), fitness_list[0, :], fitness_list[-1, :]
else:
q25, q50, q75 = ([np.nan]*2, ) * 3
mm, ss, ma, mi= ([np.nan]*2, ) * 4
table = PrettyTable()
table.title = title
table.field_names = ['No.', 'Stats', 'Value1', 'Value2']
table.add_row(['0', 'mean', f'{mm[0]:.2f}', f'{mm[1]:.2f}'])
table.add_row(['1', 'std', f'{ss[0]:.2f}', f'{ss[1]:.2f}'])
table.add_row(['2', 'max', f'{ma[0]:.2f}', f'{ma[1]:.2f}'])
table.add_row(['3', 'Q75', f'{q75[0]:.2f}', f'{q75[1]:.2f}'])
table.add_row(['4', 'Q50', f'{q50[0]:.2f}', f'{q50[1]:.2f}'])
table.add_row(['5', 'Q25', f'{q25[0]:.2f}', f'{q25[1]:.2f}'])
table.add_row(['6', 'min', f'{mi[0]:.2f}', f'{mi[1]:.2f}'])
table.add_row(['7', 'ttCount', f'{ttc:.0f}', f'{ttc:.0f}'])
table.add_row(['8', 'vaCount', f'{vac:.0f}', f'{vac:.0f}'])
return table
def cal_frequent_subtrees(population, hof_num=10):
'''
Calculate frequently appeared subtrees (with certain operations and data).
The output will be used in subtreeMT function. Computationally inexpensive.
'''
all_count = []
for individual in population:
ind1 = individual.expr
tree1 = gp.PrimitiveTree(ind1)
size = len(ind1)
for cxpoint1 in range(size):
t1 = ind1[tree1.searchSubtree(cxpoint1)]
subtree1 = gp.PrimitiveTree(t1)
if subtree1.height > 1:
all_count.append(t1)
result = pd.value_counts(all_count)
return result.index[0:hof_num]
def cal_frequent_structure(population, hof_num=10):
'''
Calculate frequently appeared structures (with consecutive certain primitives, but the auxiliary data could be arbitrary.)
'''
all_count = []
for individual in population:
this_count = []
ind1 = individual.expr
size = len(ind1)
for cxpoint1 in range(size):
if isinstance(ind1[cxpoint1], deap.gp.Primitive):
this_count.append(ind1[cxpoint1])
else:
if (len(this_count) > 2):
all_count.append(this_count)
this_count = []
result = pd.value_counts(all_count)
return result.index[0:hof_num]
def f_load_data_io(field, data_folder, start_date, end_date):
"""用io的函数读取数据. 这个函数仅用于load_data_2d的f_load_data参数. 另一种f_load_data仅在submit时才定义, 使用self.get_data"""
# 尝试读为2d, 若失败则读为1d数据
data_field = io.read2d_from_asimcache(os.path.join(data_folder, field))[1].to_dataframe()
if data_field is None:
data_field = io.read1d_from_asimcache(os.path.join(data_folder, field))[1].to_dataframe()
if data_field is None:
raise AttributeError(f'{field} not found')
data_field = data_field.loc[start_date: end_date]
return data_field
def safe_regression(X:np.ndarray, Y:np.ndarray):
y, x = Y.reshape(len(Y), 1), X.reshape(len(X), 1)
yx = np.hstack((y, x))
nanspl = np.isnan(yx).any(axis=1)
_y, _x = y[~nanspl, :], x[~nanspl, :]
if _y.shape[0]==0: return (-1, None, None)
allzero = (_x==0).all(axis=0)
_x = _x[:, ~allzero]
if _x.shape[1]==0: return (-1, None, None)
_y = _y.reshape(len(_y), 1)
coef = np.linalg.lstsq(_x, _y, rcond=None)[0]
eps = np.full(fill_value=np.nan, shape=(Y.shape[0], ), dtype=float)
eps[~nanspl] = (_y - np.dot(_x, coef))
return (0, eps, coef) | [
"numpy.hstack",
"pandas.value_counts",
"numpy.array",
"numpy.nanmean",
"numpy.mean",
"numpy.full_like",
"inspect.isclass",
"scipy.stats.kurtosis",
"numpy.ix_",
"numpy.stack",
"numpy.dot",
"numpy.linalg.lstsq",
"numpy.isinf",
"deap.gp.PrimitiveTree",
"prettytable.PrettyTable",
"numpy.ab... | [((2907, 2932), 'numpy.full_like', 'np.full_like', (['arr', 'np.nan'], {}), '(arr, np.nan)\n', (2919, 2932), True, 'import numpy as np\n'), ((4449, 4473), 'numpy.stack', 'np.stack', (['ret'], {'axis': 'axis'}), '(ret, axis=axis)\n', (4457, 4473), True, 'import numpy as np\n'), ((4984, 5008), 'numpy.stack', 'np.stack', (['ret'], {'axis': 'axis'}), '(ret, axis=axis)\n', (4992, 5008), True, 'import numpy as np\n'), ((6346, 6363), 'numpy.hstack', 'np.hstack', (['(x, y)'], {}), '((x, y))\n', (6355, 6363), True, 'import numpy as np\n'), ((16355, 16377), 'numpy.array', 'np.array', (['fitness_list'], {}), '(fitness_list)\n', (16363, 16377), True, 'import numpy as np\n'), ((16804, 16817), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (16815, 16817), False, 'from prettytable import PrettyTable\n'), ((18129, 18155), 'pandas.value_counts', 'pd.value_counts', (['all_count'], {}), '(all_count)\n', (18144, 18155), True, 'import pandas as pd\n'), ((18850, 18876), 'pandas.value_counts', 'pd.value_counts', (['all_count'], {}), '(all_count)\n', (18865, 18876), True, 'import pandas as pd\n'), ((19607, 19624), 'numpy.hstack', 'np.hstack', (['(y, x)'], {}), '((y, x))\n', (19616, 19624), True, 'import numpy as np\n'), ((19959, 20019), 'numpy.full', 'np.full', ([], {'fill_value': 'np.nan', 'shape': '(Y.shape[0],)', 'dtype': 'float'}), '(fill_value=np.nan, shape=(Y.shape[0],), dtype=float)\n', (19966, 20019), True, 'import numpy as np\n'), ((2699, 2712), 'numpy.isinf', 'np.isinf', (['ret'], {}), '(ret)\n', (2707, 2712), True, 'import numpy as np\n'), ((2816, 2829), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (2824, 2829), True, 'import numpy as np\n'), ((5234, 5256), 'numpy.isnan', 'np.isnan', (['arr[-1, ...]'], {}), '(arr[-1, ...])\n', (5242, 5256), True, 'import numpy as np\n'), ((5586, 5599), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (5594, 5599), True, 'import numpy as np\n'), ((6414, 6428), 'numpy.ix_', 'np.ix_', (['(~isnan)'], {}), '(~isnan)\n', (6420, 6428), True, 'import numpy as np\n'), ((6442, 6456), 'numpy.ix_', 'np.ix_', (['(~isnan)'], {}), '(~isnan)\n', (6448, 6456), True, 'import numpy as np\n'), ((7025, 7036), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (7033, 7036), True, 'import numpy as np\n'), ((7039, 7050), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (7047, 7050), True, 'import numpy as np\n'), ((7156, 7195), 'numpy.nanmean', 'np.nanmean', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (7166, 7195), True, 'import numpy as np\n'), ((7209, 7248), 'numpy.nanmean', 'np.nanmean', (['y'], {'axis': 'axis', 'keepdims': '(True)'}), '(y, axis=axis, keepdims=True)\n', (7219, 7248), True, 'import numpy as np\n'), ((7263, 7285), 'numpy.nansum', 'np.nansum', (['(x * y)', 'axis'], {}), '(x * y, axis)\n', (7272, 7285), True, 'import numpy as np\n'), ((7975, 8015), 'numpy.nanmean', 'np.nanmean', (['alpha'], {'axis': '(1)', 'keepdims': '(True)'}), '(alpha, axis=1, keepdims=True)\n', (7985, 8015), True, 'import numpy as np\n'), ((8177, 8215), 'numpy.nansum', 'np.nansum', (['alpha_pos', '(1)'], {'keepdims': '(True)'}), '(alpha_pos, 1, keepdims=True)\n', (8186, 8215), True, 'import numpy as np\n'), ((8322, 8360), 'numpy.nansum', 'np.nansum', (['alpha_neg', '(1)'], {'keepdims': '(True)'}), '(alpha_neg, 1, keepdims=True)\n', (8331, 8360), True, 'import numpy as np\n'), ((13089, 13111), 'deap.gp.PrimitiveTree', 'gp.PrimitiveTree', (['expr'], {}), '(expr)\n', (13105, 13111), False, 'from deap import algorithms, base, creator, gp, tools\n'), ((14133, 14155), 'deap.gp.PrimitiveTree', 'gp.PrimitiveTree', (['ind1'], {}), '(ind1)\n', (14149, 14155), False, 'from deap import algorithms, base, creator, gp, tools\n'), ((14157, 14179), 'deap.gp.PrimitiveTree', 'gp.PrimitiveTree', (['ind2'], {}), '(ind2)\n', (14173, 14179), False, 'from deap import algorithms, base, creator, gp, tools\n'), ((15511, 15532), 'numpy.mean', 'np.mean', (['fitness_list'], {}), '(fitness_list)\n', (15518, 15532), True, 'import numpy as np\n'), ((15543, 15563), 'numpy.std', 'np.std', (['fitness_list'], {}), '(fitness_list)\n', (15549, 15563), True, 'import numpy as np\n'), ((15574, 15598), 'scipy.stats.skew', 'stats.skew', (['fitness_list'], {}), '(fitness_list)\n', (15584, 15598), False, 'from scipy import stats\n'), ((15609, 15637), 'scipy.stats.kurtosis', 'stats.kurtosis', (['fitness_list'], {}), '(fitness_list)\n', (15623, 15637), False, 'from scipy import stats\n'), ((17854, 17876), 'deap.gp.PrimitiveTree', 'gp.PrimitiveTree', (['ind1'], {}), '(ind1)\n', (17870, 17876), False, 'from deap import algorithms, base, creator, gp, tools\n'), ((19909, 19944), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['_x', '_y'], {'rcond': 'None'}), '(_x, _y, rcond=None)\n', (19924, 19944), True, 'import numpy as np\n'), ((20047, 20063), 'numpy.dot', 'np.dot', (['_x', 'coef'], {}), '(_x, coef)\n', (20053, 20063), True, 'import numpy as np\n'), ((2550, 2573), 'numpy.abs', 'np.abs', (['(unit_1 - unit_2)'], {}), '(unit_1 - unit_2)\n', (2556, 2573), True, 'import numpy as np\n'), ((5482, 5527), 'scipy.stats.rankdata', 'stats.rankdata', (['arr'], {'method': 'method', 'axis': 'axis'}), '(arr, method=method, axis=axis)\n', (5496, 5527), False, 'from scipy import stats\n'), ((6377, 6389), 'numpy.isnan', 'np.isnan', (['xy'], {}), '(xy)\n', (6385, 6389), True, 'import numpy as np\n'), ((6536, 6573), 'numpy.mean', 'np.mean', (['_x'], {'axis': 'axis', 'keepdims': '(True)'}), '(_x, axis=axis, keepdims=True)\n', (6543, 6573), True, 'import numpy as np\n'), ((6593, 6630), 'numpy.mean', 'np.mean', (['_y'], {'axis': 'axis', 'keepdims': '(True)'}), '(_y, axis=axis, keepdims=True)\n', (6600, 6630), True, 'import numpy as np\n'), ((6643, 6661), 'numpy.allclose', 'np.allclose', (['_x', '(0)'], {}), '(_x, 0)\n', (6654, 6661), True, 'import numpy as np\n'), ((6665, 6683), 'numpy.allclose', 'np.allclose', (['_y', '(0)'], {}), '(_y, 0)\n', (6676, 6683), True, 'import numpy as np\n'), ((12079, 12098), 'inspect.isclass', 'inspect.isclass', (['op'], {}), '(op)\n', (12094, 12098), False, 'import inspect\n'), ((12589, 12640), 'numpy.broadcast_to', 'np.broadcast_to', (['pnl[:, np.newaxis]', 'pnl_pool.shape'], {}), '(pnl[:, np.newaxis], pnl_pool.shape)\n', (12604, 12640), True, 'import numpy as np\n'), ((16591, 16620), 'numpy.mean', 'np.mean', (['fitness_list'], {'axis': '(0)'}), '(fitness_list, axis=0)\n', (16598, 16620), True, 'import numpy as np\n'), ((16622, 16650), 'numpy.std', 'np.std', (['fitness_list'], {'axis': '(0)'}), '(fitness_list, axis=0)\n', (16628, 16650), True, 'import numpy as np\n'), ((18019, 18039), 'deap.gp.PrimitiveTree', 'gp.PrimitiveTree', (['t1'], {}), '(t1)\n', (18035, 18039), False, 'from deap import algorithms, base, creator, gp, tools\n'), ((19639, 19651), 'numpy.isnan', 'np.isnan', (['yx'], {}), '(yx)\n', (19647, 19651), True, 'import numpy as np\n'), ((14730, 14755), 'deap.gp.PrimitiveTree', 'gp.PrimitiveTree', (['sublst1'], {}), '(sublst1)\n', (14746, 14755), False, 'from deap import algorithms, base, creator, gp, tools\n'), ((14772, 14797), 'deap.gp.PrimitiveTree', 'gp.PrimitiveTree', (['sublst2'], {}), '(sublst2)\n', (14788, 14797), False, 'from deap import algorithms, base, creator, gp, tools\n'), ((6743, 6758), 'numpy.sum', 'np.sum', (['(_x * _y)'], {}), '(_x * _y)\n', (6749, 6758), True, 'import numpy as np\n'), ((6798, 6819), 'numpy.sum', 'np.sum', (['(_y ** 2)', 'axis'], {}), '(_y ** 2, axis)\n', (6804, 6819), True, 'import numpy as np\n'), ((16248, 16267), 'numpy.isinf', 'np.isinf', (['x.fitness'], {}), '(x.fitness)\n', (16256, 16267), True, 'import numpy as np\n'), ((16271, 16290), 'numpy.isnan', 'np.isnan', (['x.fitness'], {}), '(x.fitness)\n', (16279, 16290), True, 'import numpy as np\n'), ((19151, 19183), 'os.path.join', 'os.path.join', (['data_folder', 'field'], {}), '(data_folder, field)\n', (19163, 19183), False, 'import os\n'), ((6767, 6788), 'numpy.sum', 'np.sum', (['(_x ** 2)', 'axis'], {}), '(_x ** 2, axis)\n', (6773, 6788), True, 'import numpy as np\n'), ((19278, 19310), 'os.path.join', 'os.path.join', (['data_folder', 'field'], {}), '(data_folder, field)\n', (19290, 19310), False, 'import os\n'), ((15279, 15307), 'numpy.isinf', 'np.isinf', (['individual.fitness'], {}), '(individual.fitness)\n', (15287, 15307), True, 'import numpy as np\n'), ((15311, 15339), 'numpy.isnan', 'np.isnan', (['individual.fitness'], {}), '(individual.fitness)\n', (15319, 15339), True, 'import numpy as np\n')] |
from typing import Optional, Union
import numpy as np
from scipy.spatial import cKDTree
import bbknn
from scipy.sparse import csr_matrix
import scanpy as sc
from numpy.testing import assert_array_equal, assert_array_compare
import operator
import numpy as np
from anndata import AnnData
from sklearn.utils import check_random_state, check_array
from scanpy.tools._utils import get_init_pos_from_paga#, _choose_representation
from scanpy import logging as logg
from scanpy._settings import settings
from scanpy._compat import Literal
from scanpy._utils import AnyRandom, NeighborsView
# Lots of this was stolen from https://github.com/theislab/scanpy/blob/master/scanpy/tools/_umap.py
_InitPos = Literal['paga', 'spectral', 'random']
def make_graph_from_batch_corrected_distances(distances, batch_list, neighbors_within_batch, approx, metric, use_faiss, n_trees):
'''
Identify the KNN structure to be used in graph construction. All input as in ``bbknn.bbknn()``
and ``bbknn.bbknn_pca_matrix()``. Returns a tuple of distances and indices of neighbours for
each cell.
'''
#get a list of all our batches
batches = np.unique(batch_list)
#in case we're gonna be faissing, turn the data to float32
if metric=='euclidean' and not approx and 'faiss' in sys.modules and use_faiss:
pca = pca.astype('float32')
#create the output matrices, with the indices as integers and distances as floats
knn_distances = np.zeros((distances.shape[0],neighbors_within_batch*len(batches)))
knn_indices = np.copy(knn_distances).astype(int)
#find the knns using faiss/cKDTree/KDTree/annoy
#need to compare each batch against each batch (including itself)
for to_ind in range(len(batches)):
#this is the batch that will be used as the neighbour pool
#create a boolean mask identifying the cells within this batch
#and then get the corresponding row numbers for later use
batch_to = batches[to_ind]
mask_to = batch_list == batch_to
ind_to = np.arange(len(batch_list))[mask_to]
#create the faiss/cKDTree/KDTree/annoy, depending on approx/metric
ckd = bbknn.create_tree(data=distances[mask_to,:],approx=approx,metric=metric,
use_faiss=use_faiss,n_trees=n_trees)
for from_ind in range(len(batches)):
#this is the batch that will have its neighbours identified
#repeat the mask/row number getting
batch_from = batches[from_ind]
mask_from = batch_list == batch_from
ind_from = np.arange(len(batch_list))[mask_from]
#fish the neighbours out, getting a (distances, indices) tuple back
ckdout = bbknn.query_tree(data=distances[mask_from,:],ckd=ckd,
neighbors_within_batch=neighbors_within_batch,
approx=approx,metric=metric,use_faiss=use_faiss)
#the identified indices are relative to the subsetted PCA matrix
#so we need to convert it back to the original row numbers
for i in range(ckdout[1].shape[0]):
for j in range(ckdout[1].shape[1]):
ckdout[1][i,j] = ind_to[ckdout[1][i,j]]
#save the results within the appropriate rows and columns of the structures
col_range = np.arange(to_ind*neighbors_within_batch, (to_ind+1)*neighbors_within_batch)
knn_indices[ind_from[:,None],col_range[None,:]] = ckdout[1]
knn_distances[ind_from[:,None],col_range[None,:]] = ckdout[0]
return knn_distances, knn_indices
def bbknn_distance_matrix(distances, batch_list, neighbors_within_batch=3, trim=None,
approx=True, n_trees=10, use_faiss=True, metric='angular',
set_op_mix_ratio=1, local_connectivity=1):
'''
Scanpy-independent BBKNN variant that runs on a PCA matrix and list of per-cell batch assignments instead of
an AnnData object. Non-data-entry arguments behave the same way as ``bbknn.bbknn()``.
Returns a ``(distances, connectivities)`` tuple, like what would have been stored in the AnnData object.
The connectivities are the actual neighbourhood graph.
Input
-----
pca : ``numpy.array``
PCA (or other dimensionality reduction) coordinates for each cell, with cells as rows.
batch_list : ``numpy.array`` or ``list``
A list of batch assignments for each cell.
'''
#more basic sanity checks/processing
#do we have the same number of cells in pca and batch_list?
if distances.shape[0] != len(batch_list):
raise ValueError("Different cell counts indicated by `distances.shape[0]` and `len(batch_list)`.")
#convert batch_list to np.array of strings for ease of mask making later
batch_list = np.asarray([str(i) for i in batch_list])
#metric sanity checks (duplicating the ones in bbknn(), but without scanpy logging)
if approx and metric not in ['angular', 'euclidean', 'manhattan', 'hamming']:
print('unrecognised metric for type of neighbor calculation, switching to angular')
metric = 'angular'
elif not approx and not (metric=='euclidean' or isinstance(metric,DistanceMetric) or metric in KDTree.valid_metrics):
print('unrecognised metric for type of neighbor calculation, switching to euclidean')
metric = 'euclidean'
#obtain the batch balanced KNN graph
knn_distances, knn_indices = make_graph_from_batch_corrected_distances(
distances,
batch_list=batch_list,
n_trees=n_trees,
approx=approx,
metric=metric,
use_faiss=use_faiss,
neighbors_within_batch=neighbors_within_batch)
#sort the neighbours so that they're actually in order from closest to furthest
newidx = np.argsort(knn_distances,axis=1)
knn_indices = knn_indices[np.arange(np.shape(knn_indices)[0])[:,np.newaxis],newidx]
knn_distances = knn_distances[np.arange(np.shape(knn_distances)[0])[:,np.newaxis],newidx]
#this part of the processing is akin to scanpy.api.neighbors()
dist, cnts = bbknn.compute_connectivities_umap(knn_indices, knn_distances, knn_indices.shape[0],
knn_indices.shape[1], set_op_mix_ratio=set_op_mix_ratio,
local_connectivity=local_connectivity)
#trimming. compute default range if absent
if trim is None:
trim = 10 * knn_distances.shape[1]
#skip trimming if set to 0, otherwise trim
if trim > 0:
cnts = bbknn.trimming(cnts=cnts,trim=trim)
return (dist, cnts)
def assign_neighbors(ad, neighbors_key, knn_distances, knn_indices, set_use_rep=True):
"""Add bbknn-corrected neighbors to specific keybor key"""
ad.uns[neighbors_key] = {}
#we'll have a zero distance for our cell of origin, and nonzero for every other neighbour computed
ad.uns[neighbors_key]['params'] = {
'n_neighbors': len(knn_distances[0,:].data)+1,
'method': 'umap',
# Need this to force UMAP to use the raw data as the representation
'use_rep': "X"
}
distances_key = f'{neighbors_key}__distances'
connectivities_key = f'{neighbors_key}__connectivities'
ad.obsp[connectivities_key] = csr_matrix(knn_indices)
ad.obsp[distances_key] = knn_distances
ad.uns[neighbors_key]['distances_key'] = distances_key
ad.uns[neighbors_key]['connectivities_key'] = connectivities_key
# ad.uns[neighbors_key]['distances'] = knn_distances
# ad.uns[neighbors_key]['connectivities'] = csr_matrix(knn_indices)
ad.uns[neighbors_key]['params'] = {}
ad.uns[neighbors_key]['params']['metric'] = 'precomputed'
ad.uns[neighbors_key]['params']['method'] = 'bbknn'
if set_use_rep:
ad.uns[neighbors_key]['params']['use_rep'] = neighbors_key
return ad
def bbknn_similarity_matrix_and_assign_adata(
adata,
obsp_key,
color=['narrow_group', 'species', 'PTPRC', 'SFTPC', 'n_counts', 'n_genes'],
COUNTS_BASED_UMAP_COORDS=None,
neighbors_within_batch=15,
set_use_rep=True,
batch_key='species'
**kwargs,
):
"""
COUNTS_BASED_UMAP_COORDS : array
Used for a sanity check to assert that the new UMAP doesn't match the original one
"""
print(adata)
batch_list = adata.obs[batch_key].tolist()
print(f"len(batch_list): {len(batch_list)}")
# import pdb; pdb.set_trace()
# Get similarity; stored at pairwise location
data = adata.obsp[obsp_key]
knn_distances, knn_indices = bbknn_distance_matrix(
distances=data, batch_list=batch_list, neighbors_within_batch=neighbors_within_batch)
# import pdb; pdb.set_trace()
adata = assign_neighbors(adata, obsp_key, knn_distances, knn_indices, set_use_rep=set_use_rep)
sc.tl.umap(adata, neighbors_key=obsp_key, **kwargs)
# umap_precomputed(adata, neighbors_key=neighbors_key, **kwargs)
if COUNTS_BASED_UMAP_COORDS is not None:
assert_array_compare(operator.__ne__, COUNTS_BASED_UMAP_COORDS, adata.obsm['X_umap'])
sc.pl.umap(adata, neighbors_key=obsp_key, color=color, ncols=2)
# def _choose_representation(adata, use_rep=None, n_pcs=None, silent=False):
# verbosity = settings.verbosity
# if silent and settings.verbosity > 1:
# settings.verbosity = 1
# if use_rep is None and n_pcs == 0: # backwards compat for specifying `.X`
# logg.warning('use_rep=None and n_pcs=0')
# use_rep = 'X'
# if use_rep is None:
# logg.warning('use_rep=None')
# if adata.n_vars > settings.N_PCS:
# logg.warning('adata.n_vars > settings.N_PCS')
# if 'X_pca' in adata.obsm.keys():
# if n_pcs is not None and n_pcs > adata.obsm['X_pca'].shape[1]:
# raise ValueError(
# '`X_pca` does not have enough PCs. Rerun `sc.pp.pca` with adjusted `n_comps`.')
# X = adata.obsm['X_pca'][:, :n_pcs]
# logg.info(f' using \'X_pca\' with n_pcs = {X.shape[1]}')
# else:
# logg.warning(
# f'You’re trying to run this on {adata.n_vars} dimensions of `.X`, '
# 'if you really want this, set `use_rep=\'X\'`.\n '
# 'Falling back to preprocessing with `sc.pp.pca` and default params.'
# )
# X = pca(adata.X)
# adata.obsm['X_pca'] = X[:, :n_pcs]
# else:
# logg.info(' using data matrix X directly')
# X = adata.X
# else:
# if use_rep in adata.obsm.keys():
# X = adata.obsm[use_rep]
# if use_rep == 'X_pca' and n_pcs is not None:
# X = adata.obsm[use_rep][:, :n_pcs]
# elif use_rep == 'X':
# X = adata.X
# else:
# raise ValueError(
# 'Did not find {} in `.obsm.keys()`. '
# 'You need to compute it first.'.format(use_rep))
# settings.verbosity = verbosity # resetting verbosity
# return X
# def umap_precomputed(
# adata: AnnData,
# min_dist: float = 0.5,
# spread: float = 1.0,
# n_components: int = 2,
# maxiter: Optional[int] = None,
# alpha: float = 1.0,
# gamma: float = 1.0,
# negative_sample_rate: int = 5,
# init_pos: Union[_InitPos, np.ndarray, None] = 'spectral',
# random_state: AnyRandom = 0,
# a: Optional[float] = None,
# b: Optional[float] = None,
# copy: bool = False,
# method: Literal['umap', 'rapids'] = 'umap',
# neighbors_key: Optional[str] = None,
# COUNTS_BASED_UMAP_COORDS=None,
# ):
# adata = adata.copy() if copy else adata
# if neighbors_key is None:
# neighbors_key = 'neighbors'
# if neighbors_key not in adata.uns:
# raise ValueError(
# f'Did not find .uns["{neighbors_key}"]. Run `sc.pp.neighbors` first.')
# start = logg.info('computing UMAP')
# neighbors = NeighborsView(adata, neighbors_key)
# if ('params' not in neighbors
# or neighbors['params']['method'] != 'umap'):
# logg.warning(f'.obsp["{neighbors["connectivities_key"]}"] have not been computed using umap')
# from umap.umap_ import find_ab_params, simplicial_set_embedding
# if a is None or b is None:
# a, b = find_ab_params(spread, min_dist)
# else:
# a = a
# b = b
# adata.uns['umap'] = {'params':{'a': a, 'b': b}}
# if isinstance(init_pos, str) and init_pos in adata.obsm.keys():
# init_coords = adata.obsm[init_pos]
# elif isinstance(init_pos, str) and init_pos == 'paga':
# init_coords = get_init_pos_from_paga(adata, random_state=random_state, neighbors_key=neighbors_key)
# else:
# init_coords = init_pos # Let umap handle it
# if hasattr(init_coords, "dtype"):
# init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)
# if random_state != 0:
# adata.uns['umap']['params']['random_state'] = random_state
# random_state = check_random_state(random_state)
# neigh_params = neighbors['params']
# X = _choose_representation(
# adata, neigh_params.get('use_rep', None), neigh_params.get('n_pcs', None), silent=True)
# # ---- debugger ---- #
# # import pdb; pdb.set_trace()
# # ---- debugger ---- #
# if method == 'umap':
# # the data matrix X is really only used for determining the number of connected components
# # for the init condition in the UMAP embedding
# n_epochs = 0 if maxiter is None else maxiter
# X_umap = simplicial_set_embedding(
# X,
# neighbors['connectivities'].tocoo(),
# n_components,
# alpha,
# a,
# b,
# gamma,
# negative_sample_rate,
# n_epochs,
# init_coords,
# random_state,
# neigh_params.get('metric', 'euclidean'),
# neigh_params.get('metric_kwds', {}),
# verbose=settings.verbosity > 3,
# )
# elif method == 'rapids':
# metric = neigh_params.get('metric', 'euclidean')
# if metric != 'euclidean':
# raise ValueError(
# f'`sc.pp.neighbors` was called with `metric` {metric!r}, '
# "but umap `method` 'rapids' only supports the 'euclidean' metric."
# )
# from cuml import UMAP
# n_neighbors = neighbors['params']['n_neighbors']
# n_epochs = 500 if maxiter is None else maxiter # 0 is not a valid value for rapids, unlike original umap
# X_contiguous = np.ascontiguousarray(X, dtype=np.float32)
# umap = UMAP(
# n_neighbors=n_neighbors,
# n_components=n_components,
# n_epochs=n_epochs,
# learning_rate=alpha,
# init=init_pos,
# min_dist=min_dist,
# spread=spread,
# negative_sample_rate=negative_sample_rate,
# a=a,
# b=b,
# verbose=settings.verbosity > 3,
# )
# X_umap = umap.fit_transform(X_contiguous)
# adata.obsm['X_umap'] = X_umap # annotate samples with UMAP coordinates
# logg.info(
# ' finished',
# time=start,
# deep=(
# 'added\n'
# " 'X_umap', UMAP coordinates (adata.obsm)"
# ),
# )
# return adata if copy else None
| [
"bbknn.trimming",
"numpy.copy",
"bbknn.query_tree",
"numpy.shape",
"numpy.unique",
"bbknn.create_tree",
"numpy.argsort",
"numpy.testing.assert_array_compare",
"scanpy.tl.umap",
"scanpy.pl.umap",
"scipy.sparse.csr_matrix",
"numpy.arange",
"bbknn.compute_connectivities_umap"
] | [((1148, 1169), 'numpy.unique', 'np.unique', (['batch_list'], {}), '(batch_list)\n', (1157, 1169), True, 'import numpy as np\n'), ((5800, 5833), 'numpy.argsort', 'np.argsort', (['knn_distances'], {'axis': '(1)'}), '(knn_distances, axis=1)\n', (5810, 5833), True, 'import numpy as np\n'), ((6100, 6288), 'bbknn.compute_connectivities_umap', 'bbknn.compute_connectivities_umap', (['knn_indices', 'knn_distances', 'knn_indices.shape[0]', 'knn_indices.shape[1]'], {'set_op_mix_ratio': 'set_op_mix_ratio', 'local_connectivity': 'local_connectivity'}), '(knn_indices, knn_distances, knn_indices.\n shape[0], knn_indices.shape[1], set_op_mix_ratio=set_op_mix_ratio,\n local_connectivity=local_connectivity)\n', (6133, 6288), False, 'import bbknn\n'), ((7280, 7303), 'scipy.sparse.csr_matrix', 'csr_matrix', (['knn_indices'], {}), '(knn_indices)\n', (7290, 7303), False, 'from scipy.sparse import csr_matrix\n'), ((8814, 8865), 'scanpy.tl.umap', 'sc.tl.umap', (['adata'], {'neighbors_key': 'obsp_key'}), '(adata, neighbors_key=obsp_key, **kwargs)\n', (8824, 8865), True, 'import scanpy as sc\n'), ((9085, 9148), 'scanpy.pl.umap', 'sc.pl.umap', (['adata'], {'neighbors_key': 'obsp_key', 'color': 'color', 'ncols': '(2)'}), '(adata, neighbors_key=obsp_key, color=color, ncols=2)\n', (9095, 9148), True, 'import scanpy as sc\n'), ((2162, 2279), 'bbknn.create_tree', 'bbknn.create_tree', ([], {'data': 'distances[mask_to, :]', 'approx': 'approx', 'metric': 'metric', 'use_faiss': 'use_faiss', 'n_trees': 'n_trees'}), '(data=distances[mask_to, :], approx=approx, metric=metric,\n use_faiss=use_faiss, n_trees=n_trees)\n', (2179, 2279), False, 'import bbknn\n'), ((6561, 6597), 'bbknn.trimming', 'bbknn.trimming', ([], {'cnts': 'cnts', 'trim': 'trim'}), '(cnts=cnts, trim=trim)\n', (6575, 6597), False, 'import bbknn\n'), ((8993, 9083), 'numpy.testing.assert_array_compare', 'assert_array_compare', (['operator.__ne__', 'COUNTS_BASED_UMAP_COORDS', "adata.obsm['X_umap']"], {}), "(operator.__ne__, COUNTS_BASED_UMAP_COORDS, adata.obsm[\n 'X_umap'])\n", (9013, 9083), False, 'from numpy.testing import assert_array_equal, assert_array_compare\n'), ((1544, 1566), 'numpy.copy', 'np.copy', (['knn_distances'], {}), '(knn_distances)\n', (1551, 1566), True, 'import numpy as np\n'), ((2717, 2879), 'bbknn.query_tree', 'bbknn.query_tree', ([], {'data': 'distances[mask_from, :]', 'ckd': 'ckd', 'neighbors_within_batch': 'neighbors_within_batch', 'approx': 'approx', 'metric': 'metric', 'use_faiss': 'use_faiss'}), '(data=distances[mask_from, :], ckd=ckd,\n neighbors_within_batch=neighbors_within_batch, approx=approx, metric=\n metric, use_faiss=use_faiss)\n', (2733, 2879), False, 'import bbknn\n'), ((3351, 3436), 'numpy.arange', 'np.arange', (['(to_ind * neighbors_within_batch)', '((to_ind + 1) * neighbors_within_batch)'], {}), '(to_ind * neighbors_within_batch, (to_ind + 1) *\n neighbors_within_batch)\n', (3360, 3436), True, 'import numpy as np\n'), ((5873, 5894), 'numpy.shape', 'np.shape', (['knn_indices'], {}), '(knn_indices)\n', (5881, 5894), True, 'import numpy as np\n'), ((5965, 5988), 'numpy.shape', 'np.shape', (['knn_distances'], {}), '(knn_distances)\n', (5973, 5988), True, 'import numpy as np\n')] |
import scann
from argparse import ArgumentParser
from pl_bolts.models.self_supervised import SimCLR
from pl_bolts.models.self_supervised.resnets import resnet18
from pl_bolts.models.self_supervised.simclr.transforms import SimCLREvalDataTransform, SimCLRTrainDataTransform
from pathlib import Path
import torch
import os
import time
import random
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import seaborn as sn
import h5py
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.metrics import f1_score, accuracy_score
#imports from internal
from CustomDataset import FolderDataset
from SSLTrainer import Projection
def eval_embeddings(model, dataset, save_path, rank_to, filter_hur):
if filter_hur:
rank_to = rank_to*4
model.eval()
embeddings_matrix = torch.empty((0, 512)).cuda()
for batch in tqdm(dataset):
#this mirrors shared_step
(image, im1, _), y = batch
with torch.no_grad():
image = torch.unsqueeze(image, 0)
image = image.cuda()
h1 = model(image)
embedding = h1
embeddings_matrix = torch.cat((embeddings_matrix, embedding))
embeddings_test = embeddings_matrix.cpu().numpy()
if os.path.exists('data.h5'):
os.remove('data.h5')
f = h5py.File('data.h5', 'w')
f.create_dataset("embeddings", data=embeddings_test)
dataset_scann = f['embeddings']
normalized_dataset = dataset_scann / np.linalg.norm(dataset_scann, axis=1)[:, np.newaxis]
searcher = scann.scann_ops_pybind.builder(normalized_dataset, rank_to, "dot_product").tree(num_leaves = int(np.sqrt(len(dataset_scann))), num_leaves_to_search = 10).score_brute_force().build()
neighbors, distances = searcher.search_batched(normalized_dataset)
#gets label for each image by index
def labelLookup(index):
return dataset.labels[index]
lookup = np.vectorize(labelLookup)
result_array = lookup(neighbors)
###
def same_hurricane(reference_idx, neighbor_idx):
hur = dataset.dirs[reference_idx].split('/')[-1].split('_')[0]
hur2 = dataset.dirs[neighbor_idx].split('/')[-1].split('_')[0]
return hur == hur2
if filter_hur:
mask = np.empty((0, neighbors.shape[1]))
same_hur = np.vectorize(same_hurricane)
for row in neighbors:
mask = np.vstack((mask, same_hur(row, row[0])))
mask = mask.astype(bool)
temp_res = np.empty((0, int(rank_to/4)))
#goes through each row
for i in range(result_array.shape[0]):
row = result_array[i]
msk = mask[i]
row_slice = row[~msk]
row_slice = np.insert(row_slice, 0, row[0])
if len(row_slice) < rank_to/4:
row_slice = np.append(row_slice, np.full((int(rank_to/4) - len(row_slice)), -1))
else:
row_slice = row_slice[:int(rank_to/4)]
temp_res = np.vstack((temp_res, row_slice))
result_array = temp_res
neighbor_rank = 1
array = confusion_matrix(result_array[:,0], result_array[:,neighbor_rank], normalize='true')
for i, r in enumerate(array):
acc_row = r[i]/ sum(r)
v = len(array)
df_cm = pd.DataFrame(array, range(v), range(v))
plt.figure(figsize=(10,7))
plt.title('Rank 1 on embeddings using validation transform')
sn.set(font_scale=0.5) # for label size
res = sn.heatmap(df_cm, annot=True, annot_kws={"size": 6}) # font size
figure = res.get_figure()
figure.savefig(f'{save_path}/rank1_NN_heatmap.png', dpi=400)
plt.clf()
plt.cla()
reference_image_classes = result_array[:, 0]
accs_by_rank = []
ncols = result_array.shape[1]
nrows = result_array.shape[0]
for i in range(1, ncols):
accs_by_rank.append(np.sum(reference_image_classes == result_array[:, i])/nrows)
plt.rc('ytick', labelsize=10)
plt.plot(range(1, ncols), accs_by_rank)
plt.xlabel('Nearest Neighbor Rank')
plt.ylabel('Percent in Reference Image Class')
plt.title('SimCLR (All Data) Similarity Searching')
plt.savefig(f'{save_path}/NN_acc_by_rank.png', dpi=400)
plt.clf()
plt.cla()
def accs_list(g):
f1s = []
for col in g.columns[1:]:
f1s.append(accuracy_score(g['neighbor_0'], g[col]))
return f1s
labels_df = pd.DataFrame(result_array, columns = ['neighbor_'+ str(x) for x in range(ncols)])
gp = labels_df.groupby('neighbor_0', group_keys = True)
k = list(gp.groups.keys())
inv_map = {v: k for k, v in dataset.mydict.items()}
for i, arr in enumerate(gp.apply(accs_list)):
plt.plot(range(1,ncols), arr, label = inv_map[k[i]+1])
plt.legend()
plt.xlabel('Nearest Neighbor Rank')
plt.ylabel('Percent in Reference Image Class')
plt.savefig(f'{save_path}/NN_acc_by_class_and_rank.png', dpi=400)
if os.path.exists('data.h5'):
os.remove('data.h5')
def cli_main():
parser = ArgumentParser()
parser.add_argument("--MODEL_PATH", type=str, help="path to .pt file containing SSL-trained SimCLR Resnet18 Model")
parser.add_argument("--DATA_PATH", type = str, help = "path to data. If folder already contains validation data only, set val_split to 0")
parser.add_argument("--val_split", default = 0.2, type = float, help = "amount of data to use for validation as a decimal")
parser.add_argument("--image_type", default="tif", type=str, help="extension of image for PIL to open and parse - i.e. jpeg, gif, tif, etc. Only put the extension name, not the dot (.)")
parser.add_argument("--image_embedding_size", default=128, type=int, help="size of image representation of SIMCLR")
parser.add_argument("--image_size", default = 128, type=int, help="height of square image to pass through model")
parser.add_argument("--gpus", default=1, type=int, help="number of gpus to use for training")
parser.add_argument("--rank", default=50, type=int, help="number of neighbors to search for")
parser.add_argument("--filter_same_group", default= False, type=bool, help="custom arg for hurricane data to filter same hurricanes out")
args = parser.parse_args()
MODEL_PATH = args.MODEL_PATH
DATA_PATH = args.DATA_PATH
image_size = args.image_size
image_type = args.image_type
embedding_size = args.image_embedding_size
val_split = args.val_split
gpus = args.gpus
rank_to = args.rank
filter_hur = args.filter_same_group
#testing
# MODEL_PATH = '/content/models/SSL/SIMCLR_SSL_0.pt'
# DATA_PATH = '/content/UCMerced_LandUse/Images'
# image_size = 128
# image_type = 'tif'
# embedding_size = 128
# val_split = 0.2
# gpus = 1
# #gets dataset. We can't combine since validation data has different transform needed
train_dataset = FolderDataset(DATA_PATH, validation = False,
val_split = val_split,
transform = SimCLRTrainDataTransform(image_size),
image_type = image_type
)
print('Training Data Loaded...')
val_dataset = FolderDataset(DATA_PATH, validation = True,
val_split = val_split,
transform = SimCLREvalDataTransform(image_size),
image_type = image_type
)
print('Validation Data Loaded...')
#load model
num_samples = len(train_dataset)
#init model with batch size, num_samples (len of data), epochs to train, and autofinds learning rate
model = SimCLR(arch = 'resnet18', batch_size = 1, num_samples = num_samples, gpus = gpus, dataset = 'None') #
model.encoder = resnet18(pretrained=False, first_conv=model.first_conv, maxpool1=model.maxpool1, return_all_feature_maps=False)
model.projection = Projection(input_dim = 512, hidden_dim = 256, output_dim = embedding_size) #overrides
model.load_state_dict(torch.load(MODEL_PATH))
model.cuda()
print('Successfully loaded your model for evaluation.')
#running eval on validation data
save_path = f"{MODEL_PATH[:-3]}/Evaluation/validationMetrics"
Path(save_path).mkdir(parents=True, exist_ok=True)
eval_embeddings(model, val_dataset, save_path, rank_to, filter_hur)
print('Validation Data Evaluation Complete.')
#running eval on training data
save_path = f"{MODEL_PATH[:-3]}/Evaluation/trainingMetrics"
Path(save_path).mkdir(parents=True, exist_ok=True)
eval_embeddings(model, train_dataset, save_path, rank_to, filter_hur)
print('Training Data Evaluation Complete.')
print(f'Please check {MODEL_PATH[:-3]}/Evaluation/ for your results')
if __name__ == '__main__':
cli_main()
| [
"matplotlib.pyplot.ylabel",
"pl_bolts.models.self_supervised.SimCLR",
"pl_bolts.models.self_supervised.simclr.transforms.SimCLREvalDataTransform",
"numpy.linalg.norm",
"os.remove",
"os.path.exists",
"seaborn.set",
"argparse.ArgumentParser",
"pathlib.Path",
"torch.unsqueeze",
"matplotlib.pyplot.x... | [((868, 881), 'tqdm.tqdm', 'tqdm', (['dataset'], {}), '(dataset)\n', (872, 881), False, 'from tqdm import tqdm\n'), ((1226, 1251), 'os.path.exists', 'os.path.exists', (['"""data.h5"""'], {}), "('data.h5')\n", (1240, 1251), False, 'import os\n'), ((1285, 1310), 'h5py.File', 'h5py.File', (['"""data.h5"""', '"""w"""'], {}), "('data.h5', 'w')\n", (1294, 1310), False, 'import h5py\n'), ((1867, 1892), 'numpy.vectorize', 'np.vectorize', (['labelLookup'], {}), '(labelLookup)\n', (1879, 1892), True, 'import numpy as np\n'), ((2904, 2994), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['result_array[:, 0]', 'result_array[:, neighbor_rank]'], {'normalize': '"""true"""'}), "(result_array[:, 0], result_array[:, neighbor_rank],\n normalize='true')\n", (2920, 2994), False, 'from sklearn.metrics import confusion_matrix\n'), ((3118, 3145), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (3128, 3145), True, 'import matplotlib.pyplot as plt\n'), ((3147, 3207), 'matplotlib.pyplot.title', 'plt.title', (['"""Rank 1 on embeddings using validation transform"""'], {}), "('Rank 1 on embeddings using validation transform')\n", (3156, 3207), True, 'import matplotlib.pyplot as plt\n'), ((3210, 3232), 'seaborn.set', 'sn.set', ([], {'font_scale': '(0.5)'}), '(font_scale=0.5)\n', (3216, 3232), True, 'import seaborn as sn\n'), ((3258, 3310), 'seaborn.heatmap', 'sn.heatmap', (['df_cm'], {'annot': '(True)', 'annot_kws': "{'size': 6}"}), "(df_cm, annot=True, annot_kws={'size': 6})\n", (3268, 3310), True, 'import seaborn as sn\n'), ((3420, 3429), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3427, 3429), True, 'import matplotlib.pyplot as plt\n'), ((3432, 3441), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3439, 3441), True, 'import matplotlib.pyplot as plt\n'), ((3691, 3720), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(10)'}), "('ytick', labelsize=10)\n", (3697, 3720), True, 'import matplotlib.pyplot as plt\n'), ((3765, 3800), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Nearest Neighbor Rank"""'], {}), "('Nearest Neighbor Rank')\n", (3775, 3800), True, 'import matplotlib.pyplot as plt\n'), ((3803, 3849), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percent in Reference Image Class"""'], {}), "('Percent in Reference Image Class')\n", (3813, 3849), True, 'import matplotlib.pyplot as plt\n'), ((3852, 3903), 'matplotlib.pyplot.title', 'plt.title', (['"""SimCLR (All Data) Similarity Searching"""'], {}), "('SimCLR (All Data) Similarity Searching')\n", (3861, 3903), True, 'import matplotlib.pyplot as plt\n'), ((3906, 3961), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{save_path}/NN_acc_by_rank.png"""'], {'dpi': '(400)'}), "(f'{save_path}/NN_acc_by_rank.png', dpi=400)\n", (3917, 3961), True, 'import matplotlib.pyplot as plt\n'), ((3967, 3976), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3974, 3976), True, 'import matplotlib.pyplot as plt\n'), ((3979, 3988), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3986, 3988), True, 'import matplotlib.pyplot as plt\n'), ((4494, 4506), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4504, 4506), True, 'import matplotlib.pyplot as plt\n'), ((4509, 4544), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Nearest Neighbor Rank"""'], {}), "('Nearest Neighbor Rank')\n", (4519, 4544), True, 'import matplotlib.pyplot as plt\n'), ((4547, 4593), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percent in Reference Image Class"""'], {}), "('Percent in Reference Image Class')\n", (4557, 4593), True, 'import matplotlib.pyplot as plt\n'), ((4596, 4661), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{save_path}/NN_acc_by_class_and_rank.png"""'], {'dpi': '(400)'}), "(f'{save_path}/NN_acc_by_class_and_rank.png', dpi=400)\n", (4607, 4661), True, 'import matplotlib.pyplot as plt\n'), ((4669, 4694), 'os.path.exists', 'os.path.exists', (['"""data.h5"""'], {}), "('data.h5')\n", (4683, 4694), False, 'import os\n'), ((4758, 4774), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (4772, 4774), False, 'from argparse import ArgumentParser\n'), ((7457, 7550), 'pl_bolts.models.self_supervised.SimCLR', 'SimCLR', ([], {'arch': '"""resnet18"""', 'batch_size': '(1)', 'num_samples': 'num_samples', 'gpus': 'gpus', 'dataset': '"""None"""'}), "(arch='resnet18', batch_size=1, num_samples=num_samples, gpus=gpus,\n dataset='None')\n", (7463, 7550), False, 'from pl_bolts.models.self_supervised import SimCLR\n'), ((7584, 7700), 'pl_bolts.models.self_supervised.resnets.resnet18', 'resnet18', ([], {'pretrained': '(False)', 'first_conv': 'model.first_conv', 'maxpool1': 'model.maxpool1', 'return_all_feature_maps': '(False)'}), '(pretrained=False, first_conv=model.first_conv, maxpool1=model.\n maxpool1, return_all_feature_maps=False)\n', (7592, 7700), False, 'from pl_bolts.models.self_supervised.resnets import resnet18\n'), ((7719, 7787), 'SSLTrainer.Projection', 'Projection', ([], {'input_dim': '(512)', 'hidden_dim': '(256)', 'output_dim': 'embedding_size'}), '(input_dim=512, hidden_dim=256, output_dim=embedding_size)\n', (7729, 7787), False, 'from SSLTrainer import Projection\n'), ((1257, 1277), 'os.remove', 'os.remove', (['"""data.h5"""'], {}), "('data.h5')\n", (1266, 1277), False, 'import os\n'), ((2175, 2208), 'numpy.empty', 'np.empty', (['(0, neighbors.shape[1])'], {}), '((0, neighbors.shape[1]))\n', (2183, 2208), True, 'import numpy as np\n'), ((2224, 2252), 'numpy.vectorize', 'np.vectorize', (['same_hurricane'], {}), '(same_hurricane)\n', (2236, 2252), True, 'import numpy as np\n'), ((4702, 4722), 'os.remove', 'os.remove', (['"""data.h5"""'], {}), "('data.h5')\n", (4711, 4722), False, 'import os\n'), ((7832, 7854), 'torch.load', 'torch.load', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (7842, 7854), False, 'import torch\n'), ((824, 845), 'torch.empty', 'torch.empty', (['(0, 512)'], {}), '((0, 512))\n', (835, 845), False, 'import torch\n'), ((959, 974), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (972, 974), False, 'import torch\n'), ((992, 1017), 'torch.unsqueeze', 'torch.unsqueeze', (['image', '(0)'], {}), '(image, 0)\n', (1007, 1017), False, 'import torch\n'), ((1125, 1166), 'torch.cat', 'torch.cat', (['(embeddings_matrix, embedding)'], {}), '((embeddings_matrix, embedding))\n', (1134, 1166), False, 'import torch\n'), ((1439, 1476), 'numpy.linalg.norm', 'np.linalg.norm', (['dataset_scann'], {'axis': '(1)'}), '(dataset_scann, axis=1)\n', (1453, 1476), True, 'import numpy as np\n'), ((2573, 2604), 'numpy.insert', 'np.insert', (['row_slice', '(0)', 'row[0]'], {}), '(row_slice, 0, row[0])\n', (2582, 2604), True, 'import numpy as np\n'), ((2807, 2839), 'numpy.vstack', 'np.vstack', (['(temp_res, row_slice)'], {}), '((temp_res, row_slice))\n', (2816, 2839), True, 'import numpy as np\n'), ((6774, 6810), 'pl_bolts.models.self_supervised.simclr.transforms.SimCLRTrainDataTransform', 'SimCLRTrainDataTransform', (['image_size'], {}), '(image_size)\n', (6798, 6810), False, 'from pl_bolts.models.self_supervised.simclr.transforms import SimCLREvalDataTransform, SimCLRTrainDataTransform\n'), ((7113, 7148), 'pl_bolts.models.self_supervised.simclr.transforms.SimCLREvalDataTransform', 'SimCLREvalDataTransform', (['image_size'], {}), '(image_size)\n', (7136, 7148), False, 'from pl_bolts.models.self_supervised.simclr.transforms import SimCLREvalDataTransform, SimCLRTrainDataTransform\n'), ((8051, 8066), 'pathlib.Path', 'Path', (['save_path'], {}), '(save_path)\n', (8055, 8066), False, 'from pathlib import Path\n'), ((8332, 8347), 'pathlib.Path', 'Path', (['save_path'], {}), '(save_path)\n', (8336, 8347), False, 'from pathlib import Path\n'), ((3627, 3680), 'numpy.sum', 'np.sum', (['(reference_image_classes == result_array[:, i])'], {}), '(reference_image_classes == result_array[:, i])\n', (3633, 3680), True, 'import numpy as np\n'), ((4072, 4111), 'sklearn.metrics.accuracy_score', 'accuracy_score', (["g['neighbor_0']", 'g[col]'], {}), "(g['neighbor_0'], g[col])\n", (4086, 4111), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((1505, 1579), 'scann.scann_ops_pybind.builder', 'scann.scann_ops_pybind.builder', (['normalized_dataset', 'rank_to', '"""dot_product"""'], {}), "(normalized_dataset, rank_to, 'dot_product')\n", (1535, 1579), False, 'import scann\n')] |
import argparse
import numpy as np
import models.ensemble as e
import utils.load as l
import utils.metrics as m
import utils.wrapper as w
def get_arguments():
"""Gets arguments from the command line.
Returns:
A parser with the input arguments.
"""
# Creates the ArgumentParser
parser = argparse.ArgumentParser(
usage='Optimizes a boolean-based ensemble using Univariate Marginal Distribution Algorithm.')
# Adds a dataset argument with pre-defined choices
parser.add_argument('dataset', help='Dataset identifier', choices=['RSDataset', 'RSSCN7', 'UCMerced_LandUse'])
# Adds a descriptor argument with pre-defined choices
parser.add_argument('descriptor', help='Descriptor identifier', choices=['global', 'cnn', 'all'])
# Adds an identifier argument to the desired fold identifier
parser.add_argument('fold', help='Fold identifier', type=int, choices=range(1, 6))
# Adds an identifier argument to the desired number of agents
parser.add_argument('-n_agents', help='Number of meta-heuristic agents', type=int, default=10)
# Adds an identifier argument to the desired number of iterations
parser.add_argument('-n_iter', help='Number of meta-heuristic iterations', type=int, default=10)
return parser.parse_args()
if __name__ == '__main__':
# Gathers the input arguments
args = get_arguments()
# Gathering variables from arguments
dataset = args.dataset
descriptor = args.descriptor
step = 'val'
fold = args.fold
# Random seed for experimental consistency
np.random.seed(fold-1)
# Loads the predictions and labels
preds, y = l.load_candidates(dataset, step, fold)
# If descriptor is global-based
if descriptor == 'global':
# Gets the global predictors
preds = preds[:, :35]
# If descriptor is cnn-based
elif descriptor == 'cnn':
# Gets the CNN predictors
preds = preds[:, 35:]
# Defining function to be optimized
opt_fn = e.boolean_classifiers(preds, y)
# Defining number of agents, number of variables and number of iterations
n_agents = args.n_agents
n_variables = preds.shape[1]
n_iterations = args.n_iter
# Defining meta-heuristic hyperparameters
hyperparams = dict(p_selection=0.75, lower_bound=0.05, upper_bound=0.95)
# Running the optimization task
history = w.optimize_umda(opt_fn, n_agents, n_variables, n_iterations, hyperparams)
# Saves the history object to an output file
history.save(f'output/umda_boolean_{dataset}_{step}_{fold}.pkl')
| [
"argparse.ArgumentParser",
"utils.wrapper.optimize_umda",
"numpy.random.seed",
"models.ensemble.boolean_classifiers",
"utils.load.load_candidates"
] | [((321, 448), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""Optimizes a boolean-based ensemble using Univariate Marginal Distribution Algorithm."""'}), "(usage=\n 'Optimizes a boolean-based ensemble using Univariate Marginal Distribution Algorithm.'\n )\n", (344, 448), False, 'import argparse\n'), ((1585, 1609), 'numpy.random.seed', 'np.random.seed', (['(fold - 1)'], {}), '(fold - 1)\n', (1599, 1609), True, 'import numpy as np\n'), ((1663, 1701), 'utils.load.load_candidates', 'l.load_candidates', (['dataset', 'step', 'fold'], {}), '(dataset, step, fold)\n', (1680, 1701), True, 'import utils.load as l\n'), ((2019, 2050), 'models.ensemble.boolean_classifiers', 'e.boolean_classifiers', (['preds', 'y'], {}), '(preds, y)\n', (2040, 2050), True, 'import models.ensemble as e\n'), ((2398, 2471), 'utils.wrapper.optimize_umda', 'w.optimize_umda', (['opt_fn', 'n_agents', 'n_variables', 'n_iterations', 'hyperparams'], {}), '(opt_fn, n_agents, n_variables, n_iterations, hyperparams)\n', (2413, 2471), True, 'import utils.wrapper as w\n')] |
import os
import cv2
import numpy as np
from math import *
from scipy.stats import mode
import time
# 图像矫正类
class ImgCorrect:
"""
霍夫变换进行线段检索,再根据这些线段算出夹角,利用角度的加权平均值和频率最高的思想作为旋转的最佳角度
"""
def __init__(self, img):
self.img = img
"""
# 图像归一化处理 会造成图像清晰度变低
self.h, self.w, self.channel = self.img.shape
if self.w <= self.h:
self.scale = 700 / self.w
self.w_scale = 700
self.h_scale = self.h * self.scale
self.img = cv2.resize(self.img, (0, 0), fx=self.scale, fy=self.scale, interpolation=cv2.INTER_NEAREST)
else:
self.scale = 700 / self.h
self.h_scale = 700
self.w_scale = self.w * self.scale
self.img = cv2.resize(self.img, (0, 0), fx=self.scale, fy=self.scale, interpolation=cv2.INTER_NEAREST)
"""
self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
# 只对中心区域进行霍夫变换 减少计算量
h, w = self.gray.shape
self.gray = self.gray[int(0+h/10):int(h-h/10),int(0+w/10):int(w-w/10)] # 裁剪坐标为[y0:y1, x0:x1]
def img_lines(self):
ret, binary = cv2.threshold(self.gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
# cv2.imshow("bin",binary)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) # 矩形结构
binary = cv2.dilate(binary, kernel) # 膨胀
edges = cv2.Canny(binary, 50, 200)
# cv2.imshow("edges", edges)
self.lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 200, minLineLength=100, maxLineGap=20)
# print(self.lines)
if self.lines is None:
# print("Line segment not found")
return None
# lines1 = self.lines[:, 0, :] # 提取为二维
# print(lines1)
# imglines = self.img.copy()
# for x1, y1, x2, y2 in lines1[:]:
# cv2.line(imglines, (x1, y1), (x2, y2), (0, 255, 0), 3)
# return imglines
def search_lines(self):
lines = self.lines[:, 0, :] # 提取为二维
# k = [(y2 - y1) / (x2 - x1) for x1, y1, x2, y2 in lines]
# sorted_k = sorted(lines, key=lambda x:(x[3] - x[1]) / (x[2] - x[0]))
number_inexistence_k = 0
sum_positive_k45 = 0
number_positive_k45 = 0
sum_positive_k90 = 0
number_positive_k90 = 0
sum_negative_k45 = 0
number_negative_k45 = 0
sum_negative_k90 = 0
number_negative_k90 = 0
sum_zero_k = 0
number_zero_k = 0
for x in lines:
if x[2] == x[0]:
number_inexistence_k += 1
continue
if 0 < degrees(atan((x[3] - x[1]) / (x[2] - x[0]))) < 45:
number_positive_k45 += 1
sum_positive_k45 += degrees(atan((x[3] - x[1]) / (x[2] - x[0])))
# if 45 <= degrees(atan((x[3] - x[1]) / (x[2] - x[0]))) < 90:
# number_positive_k90 += 1
# sum_positive_k90 += degrees(atan((x[3] - x[1]) / (x[2] - x[0])))
if -45 < degrees(atan((x[3] - x[1]) / (x[2] - x[0]))) < 0:
number_negative_k45 += 1
sum_negative_k45 += degrees(atan((x[3] - x[1]) / (x[2] - x[0])))
# if -90 < degrees(atan((x[3] - x[1]) / (x[2] - x[0]))) <= -45:
# number_negative_k90 += 1
# sum_negative_k90 += degrees(atan((x[3] - x[1]) / (x[2] - x[0])))
if x[3] == x[1]:
number_zero_k += 1
max_number = max(number_inexistence_k, number_positive_k45, number_positive_k90, number_negative_k45,
number_negative_k90, number_zero_k)
# print(number_inexistence_k,number_positive_k45, number_positive_k90, number_negative_k45, number_negative_k90,number_zero_k)
if max_number == number_inexistence_k:
return 90
if max_number == number_positive_k45:
return sum_positive_k45 / number_positive_k45
if max_number == number_positive_k90:
return sum_positive_k90 / number_positive_k90
if max_number == number_negative_k45:
return sum_negative_k45 / number_negative_k45
if max_number == number_negative_k90:
return sum_negative_k90 / number_negative_k90
if max_number == number_zero_k:
return 0
def rotate_image(self, degree):
"""
正角 逆时针旋转
:param degree:
:return:
"""
# print("degree:", degree)
if -45 <= degree <= 0:
degree = degree # 负角度 顺时针
if -90 <= degree < -45:
degree = 90 + degree # 正角度 逆时针
if 0 < degree <= 45:
degree = degree # 正角度 逆时针
if 45 < degree < 90:
degree = degree - 90 # 负角度 顺时针
# print("rotate degree:", degree)
# degree = -45
# # 获取旋转后4角的填充色
filled_color = -1
if filled_color == -1:
filled_color = mode([self.img[0, 0], self.img[0, -1],
self.img[-1, 0], self.img[-1, -1]]).mode[0]
if np.array(filled_color).shape[0] == 2:
if isinstance(filled_color, int):
filled_color = (filled_color, filled_color, filled_color)
else:
filled_color = tuple([int(i) for i in filled_color])
# degree = degree - 90
height, width = self.img.shape[:2]
heightNew = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree)))) # 这个公式参考之前内容
widthNew = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree))))
matRotation = cv2.getRotationMatrix2D((width / 2, height / 2), degree, 1) # 逆时针旋转 degree
matRotation[0, 2] += (widthNew - width) / 2 # 因为旋转之后,坐标系原点是新图像的左上角,所以需要根据原图做转化
matRotation[1, 2] += (heightNew - height) / 2
imgRotation = cv2.warpAffine(self.img, matRotation, (widthNew, heightNew), borderValue=filled_color)
return imgRotation
# 图像矫正函数调用
def imgTransform(image_file,img):
try:
img_correct = ImgCorrect(img)
img_correct.img_lines()
degree = img_correct.search_lines()
correct_image = img_correct.rotate_image(degree) # 图像矫正
except:
print("矫正失败:"+image_file)
return img
else:
return correct_image
# 图像重命名
def add_prefix_subfolders(input_path,out_path): # 定义函数名称
filelist = os.listdir(input_path) # 取路径下的文件名,生成列表
i = 0
for item in filelist:
if item.endswith('.jpg'):
src = os.path.join(os.path.abspath(input_path), item) # 原图的地址
dst = os.path.join(os.path.abspath(out_path), 'a'+str(i) + '.jpg') # 新图的地址(这里可以把str(folder) + '_' + str(i) + '.jpg'改成你想改的名称)
os.rename(src, dst)
print('converting %s to %s ...' % (src, dst))
i += 1
# 图像矫正并重命名
if __name__ == "__main__":
time = time.strftime("%Y%m%d", time.localtime())
input_path = r'I:/Images_OCR/after_image/med2_4000+'
out_path = r'I:/Images_OCR/after_image/Voc_med/images'
if not os.path.exists(out_path):
os.makedirs(out_path)
imgs_lists = []
for single_file in os.listdir(input_path):
file_path = os.path.join(input_path, single_file)
imgs_lists.append(file_path)
i = 0
for image_file in imgs_lists:
img = cv2.imread(image_file)
correct_image = imgTransform(image_file,img) # 矫正
newPath = os.path.join(os.path.abspath(out_path), time+'_'+str(i) + '.jpg')
cv2.imwrite(newPath, correct_image)
print(image_file + "====>" + newPath)
i += 1 | [
"numpy.array",
"os.path.exists",
"os.listdir",
"cv2.threshold",
"time.localtime",
"cv2.warpAffine",
"os.rename",
"cv2.cvtColor",
"cv2.getRotationMatrix2D",
"cv2.Canny",
"cv2.imread",
"cv2.imwrite",
"cv2.HoughLinesP",
"os.makedirs",
"scipy.stats.mode",
"os.path.join",
"os.path.abspath... | [((6384, 6406), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (6394, 6406), False, 'import os\n'), ((7140, 7162), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (7150, 7162), False, 'import os\n'), ((882, 924), 'cv2.cvtColor', 'cv2.cvtColor', (['self.img', 'cv2.COLOR_BGR2GRAY'], {}), '(self.img, cv2.COLOR_BGR2GRAY)\n', (894, 924), False, 'import cv2\n'), ((1136, 1209), 'cv2.threshold', 'cv2.threshold', (['self.gray', '(0)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(self.gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (1149, 1209), False, 'import cv2\n'), ((1262, 1311), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(3, 3)'], {}), '(cv2.MORPH_RECT, (3, 3))\n', (1287, 1311), False, 'import cv2\n'), ((1337, 1363), 'cv2.dilate', 'cv2.dilate', (['binary', 'kernel'], {}), '(binary, kernel)\n', (1347, 1363), False, 'import cv2\n'), ((1386, 1412), 'cv2.Canny', 'cv2.Canny', (['binary', '(50)', '(200)'], {}), '(binary, 50, 200)\n', (1395, 1412), False, 'import cv2\n'), ((1471, 1548), 'cv2.HoughLinesP', 'cv2.HoughLinesP', (['edges', '(1)', '(np.pi / 180)', '(200)'], {'minLineLength': '(100)', 'maxLineGap': '(20)'}), '(edges, 1, np.pi / 180, 200, minLineLength=100, maxLineGap=20)\n', (1486, 1548), False, 'import cv2\n'), ((5596, 5655), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(width / 2, height / 2)', 'degree', '(1)'], {}), '((width / 2, height / 2), degree, 1)\n', (5619, 5655), False, 'import cv2\n'), ((5838, 5929), 'cv2.warpAffine', 'cv2.warpAffine', (['self.img', 'matRotation', '(widthNew, heightNew)'], {'borderValue': 'filled_color'}), '(self.img, matRotation, (widthNew, heightNew), borderValue=\n filled_color)\n', (5852, 5929), False, 'import cv2\n'), ((6896, 6912), 'time.localtime', 'time.localtime', ([], {}), '()\n', (6910, 6912), False, 'import time\n'), ((7041, 7065), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (7055, 7065), False, 'import os\n'), ((7075, 7096), 'os.makedirs', 'os.makedirs', (['out_path'], {}), '(out_path)\n', (7086, 7096), False, 'import os\n'), ((7184, 7221), 'os.path.join', 'os.path.join', (['input_path', 'single_file'], {}), '(input_path, single_file)\n', (7196, 7221), False, 'import os\n'), ((7317, 7339), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (7327, 7339), False, 'import cv2\n'), ((7491, 7526), 'cv2.imwrite', 'cv2.imwrite', (['newPath', 'correct_image'], {}), '(newPath, correct_image)\n', (7502, 7526), False, 'import cv2\n'), ((6725, 6744), 'os.rename', 'os.rename', (['src', 'dst'], {}), '(src, dst)\n', (6734, 6744), False, 'import os\n'), ((7430, 7455), 'os.path.abspath', 'os.path.abspath', (['out_path'], {}), '(out_path)\n', (7445, 7455), False, 'import os\n'), ((6531, 6558), 'os.path.abspath', 'os.path.abspath', (['input_path'], {}), '(input_path)\n', (6546, 6558), False, 'import os\n'), ((6606, 6631), 'os.path.abspath', 'os.path.abspath', (['out_path'], {}), '(out_path)\n', (6621, 6631), False, 'import os\n'), ((4925, 4999), 'scipy.stats.mode', 'mode', (['[self.img[0, 0], self.img[0, -1], self.img[-1, 0], self.img[-1, -1]]'], {}), '([self.img[0, 0], self.img[0, -1], self.img[-1, 0], self.img[-1, -1]])\n', (4929, 4999), False, 'from scipy.stats import mode\n'), ((5052, 5074), 'numpy.array', 'np.array', (['filled_color'], {}), '(filled_color)\n', (5060, 5074), True, 'import numpy as np\n')] |
from __future__ import print_function, division, absolute_import
import argparse
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import torch
from hubconf import SRResNet
parser = argparse.ArgumentParser(description="PyTorch SRResNet Demo")
parser.add_argument("--device", default="cuda", help="device to use, e.g. 'cpu', 'cuda' (default) or 'cuda:0'")
parser.add_argument("--model", type=str, help="local model path (optional)")
parser.add_argument("--dataset", default="Set5", type=str, help="dataset name")
parser.add_argument("--image", default="butterfly_GT", type=str, help="image name")
parser.add_argument("--scale", default=4, type=int, help="scale factor, default: 4")
def PSNR(pred, gt, shave_border=0):
height, width = pred.shape[:2]
pred = pred[shave_border:height - shave_border, shave_border:width - shave_border]
gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]
imdff = pred - gt
rmse = math.sqrt(np.mean(imdff**2))
if rmse == 0:
return 100
return 20 * math.log10(255.0 / rmse)
opt = parser.parse_args()
device = torch.device(opt.device)
torch.set_grad_enabled(False)
if opt.model:
model = torch.load(opt.model, map_location=device)["model"]
else:
model = SRResNet(pretrained=True, map_location=device)
model.to(device)
im_gt = sio.loadmat("testsets/" + opt.dataset + "/" + opt.image + ".mat")['im_gt']
im_b = sio.loadmat("testsets/" + opt.dataset + "/" + opt.image + ".mat")['im_b']
im_l = sio.loadmat("testsets/" + opt.dataset + "/" + opt.image + ".mat")['im_l']
im_gt = im_gt.astype(float).astype(np.uint8)
im_b = im_b.astype(float).astype(np.uint8)
im_l = im_l.astype(float).astype(np.uint8)
im_input = torch.from_numpy(im_l).permute(2, 0, 1).to(device)
im_input = im_input.float().div(255).unsqueeze(0)
start_time = time.time()
out = model(im_input)
elapsed_time = time.time() - start_time
im_h = out.cpu().squeeze().numpy()
im_h = np.clip(im_h, 0, 1) * 255
im_h = im_h.transpose(1, 2, 0)
print("Dataset=", opt.dataset)
print("Scale=", opt.scale)
print("It takes {:.3f} ms for processing".format(elapsed_time * 1e3))
fig = plt.figure()
ax = plt.subplot("131")
ax.imshow(im_gt)
ax.set_title("GT")
ax = plt.subplot("132")
ax.imshow(im_b)
ax.set_title("Input(Bicubic)")
ax = plt.subplot("133")
ax.imshow(im_h.astype(np.uint8))
ax.set_title("Output(SRResNet)")
plt.show()
| [
"numpy.clip",
"numpy.mean",
"hubconf.SRResNet",
"argparse.ArgumentParser",
"matplotlib.pyplot.show",
"torch.load",
"scipy.io.loadmat",
"torch.from_numpy",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"torch.set_grad_enabled",
"math.log10",
"time.time",
"torch.device"
] | [((234, 294), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch SRResNet Demo"""'}), "(description='PyTorch SRResNet Demo')\n", (257, 294), False, 'import argparse\n'), ((1153, 1177), 'torch.device', 'torch.device', (['opt.device'], {}), '(opt.device)\n', (1165, 1177), False, 'import torch\n'), ((1179, 1208), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (1201, 1208), False, 'import torch\n'), ((1875, 1886), 'time.time', 'time.time', ([], {}), '()\n', (1884, 1886), False, 'import time\n'), ((2185, 2197), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2195, 2197), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2221), 'matplotlib.pyplot.subplot', 'plt.subplot', (['"""131"""'], {}), "('131')\n", (2214, 2221), True, 'import matplotlib.pyplot as plt\n'), ((2264, 2282), 'matplotlib.pyplot.subplot', 'plt.subplot', (['"""132"""'], {}), "('132')\n", (2275, 2282), True, 'import matplotlib.pyplot as plt\n'), ((2336, 2354), 'matplotlib.pyplot.subplot', 'plt.subplot', (['"""133"""'], {}), "('133')\n", (2347, 2354), True, 'import matplotlib.pyplot as plt\n'), ((2421, 2431), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2429, 2431), True, 'import matplotlib.pyplot as plt\n'), ((1306, 1352), 'hubconf.SRResNet', 'SRResNet', ([], {'pretrained': '(True)', 'map_location': 'device'}), '(pretrained=True, map_location=device)\n', (1314, 1352), False, 'from hubconf import SRResNet\n'), ((1379, 1444), 'scipy.io.loadmat', 'sio.loadmat', (["('testsets/' + opt.dataset + '/' + opt.image + '.mat')"], {}), "('testsets/' + opt.dataset + '/' + opt.image + '.mat')\n", (1390, 1444), True, 'import scipy.io as sio\n'), ((1461, 1526), 'scipy.io.loadmat', 'sio.loadmat', (["('testsets/' + opt.dataset + '/' + opt.image + '.mat')"], {}), "('testsets/' + opt.dataset + '/' + opt.image + '.mat')\n", (1472, 1526), True, 'import scipy.io as sio\n'), ((1542, 1607), 'scipy.io.loadmat', 'sio.loadmat', (["('testsets/' + opt.dataset + '/' + opt.image + '.mat')"], {}), "('testsets/' + opt.dataset + '/' + opt.image + '.mat')\n", (1553, 1607), True, 'import scipy.io as sio\n'), ((1924, 1935), 'time.time', 'time.time', ([], {}), '()\n', (1933, 1935), False, 'import time\n'), ((1992, 2011), 'numpy.clip', 'np.clip', (['im_h', '(0)', '(1)'], {}), '(im_h, 0, 1)\n', (1999, 2011), True, 'import numpy as np\n'), ((1019, 1038), 'numpy.mean', 'np.mean', (['(imdff ** 2)'], {}), '(imdff ** 2)\n', (1026, 1038), True, 'import numpy as np\n'), ((1091, 1115), 'math.log10', 'math.log10', (['(255.0 / rmse)'], {}), '(255.0 / rmse)\n', (1101, 1115), False, 'import math\n'), ((1236, 1278), 'torch.load', 'torch.load', (['opt.model'], {'map_location': 'device'}), '(opt.model, map_location=device)\n', (1246, 1278), False, 'import torch\n'), ((1760, 1782), 'torch.from_numpy', 'torch.from_numpy', (['im_l'], {}), '(im_l)\n', (1776, 1782), False, 'import torch\n')] |
from py_dp.dispersion.binning import make_input_for_binning_with_freq, make_1d_abs_vel_bins, class_index_abs_log
from py_dp.dispersion.convert_to_time_process_with_freq import remove_duplicate
import numpy as np
from copy import copy
from py_dp.dispersion.mapping import mapping_v_sgn_repeat
import os
def test_mapping_both_ways():
main_folder = os.path.dirname(os.path.dirname(__file__))
input_folder = os.path.join(main_folder, 'test_related_files', 'particle_tracking_results')
dt = 50.0
n_realz = 1
big_v_array, big_freq_array, pointer_list, initial_v0, initial_v1 = make_input_for_binning_with_freq(input_folder,
n_realz, dt)
new_v, new_f = remove_duplicate(big_v_array, big_freq_array)
n_abs_log_class = 8
abs_log_v_edges = make_1d_abs_vel_bins(new_v, n_abs_log_class, n_slow_classes = 1)
v_class_number = class_index_abs_log(new_v, abs_log_v_edges)
sub_classes_nrepeat = []
n_subclass = []
place_holder = np.array([1], dtype=np.int)
for i in range(2*n_abs_log_class):
possible_f_vals = np.unique(new_f[v_class_number == i])
if not len(possible_f_vals):
possible_f_vals = copy(place_holder)
sub_classes_nrepeat.append(sorted(possible_f_vals))
n_subclass.append(len(possible_f_vals))
modified_n_sub_class = np.array(n_subclass)
cumsum_n_subclass = np.hstack((0,np.cumsum(modified_n_sub_class)))
mapping = mapping_v_sgn_repeat(abs_log_v_edges, cumsum_n_subclass, sub_classes_nrepeat)
#test draw velocity
v_log_edges = mapping.v_log_edges
# for i in range(len(v_log_edges)-1):
for i in range(mapping.n_abs_v_classes):
class_2d = i
v1 = mapping.draw_from_class_velocity(class_2d)
v_log_edges = mapping.v_log_edges
assert(np.log(v1)>v_log_edges[class_2d])
assert(np.log(v1)<v_log_edges[class_2d+1])
#test find_3d_class_number
for i in range(len(v_log_edges)-1):
class_2d = i
cumsum_n_subclass = mapping.cumsum_n_subclass
freq_array = mapping.sub_classes_nrepeat[class_2d]
index_2d = class_2d*np.ones(len(freq_array), dtype=np.int)
class_3d = mapping.find_3d_class_number(index_2d, freq_array)
assert(np.all(class_3d == (cumsum_n_subclass[index_2d] + range(len(freq_array)))))
# #test find_3d_class for freq values not available in the binning data
# index_2d_test = [0, 0, 0]
# freq_test = [15.0, 90.0, 125]
# test_output = mapping.find_3d_class_number(index_2d_test, freq_test)
# print test_output
# assert(np.all(test_output == [13, 24, 24]))
# #test for last class
# index_2d_test = (mapping.n_2d_classes - 1)*np.ones(3, dtype=int)
# freq_test = [15.0, 90.0, 125]
# test_output = mapping.find_3d_class_number(index_2d_test, freq_test)
# expected_output = (mapping.n_3d_classes-1)*np.ones(3, dtype=int)
# assert(np.all(test_output == expected_output))
#
# #test inverse mapping
# class_3d_array = range(mapping.n_3d_classes)
# for class_3d_test in class_3d_array:
# abs_v, sgn_v, freq = mapping.find_v_sgn_freq(class_3d_test)
| [
"py_dp.dispersion.binning.make_input_for_binning_with_freq",
"py_dp.dispersion.binning.class_index_abs_log",
"numpy.unique",
"py_dp.dispersion.binning.make_1d_abs_vel_bins",
"numpy.log",
"os.path.join",
"numpy.array",
"os.path.dirname",
"py_dp.dispersion.convert_to_time_process_with_freq.remove_dupl... | [((413, 489), 'os.path.join', 'os.path.join', (['main_folder', '"""test_related_files"""', '"""particle_tracking_results"""'], {}), "(main_folder, 'test_related_files', 'particle_tracking_results')\n", (425, 489), False, 'import os\n'), ((592, 651), 'py_dp.dispersion.binning.make_input_for_binning_with_freq', 'make_input_for_binning_with_freq', (['input_folder', 'n_realz', 'dt'], {}), '(input_folder, n_realz, dt)\n', (624, 651), False, 'from py_dp.dispersion.binning import make_input_for_binning_with_freq, make_1d_abs_vel_bins, class_index_abs_log\n'), ((776, 821), 'py_dp.dispersion.convert_to_time_process_with_freq.remove_duplicate', 'remove_duplicate', (['big_v_array', 'big_freq_array'], {}), '(big_v_array, big_freq_array)\n', (792, 821), False, 'from py_dp.dispersion.convert_to_time_process_with_freq import remove_duplicate\n'), ((868, 930), 'py_dp.dispersion.binning.make_1d_abs_vel_bins', 'make_1d_abs_vel_bins', (['new_v', 'n_abs_log_class'], {'n_slow_classes': '(1)'}), '(new_v, n_abs_log_class, n_slow_classes=1)\n', (888, 930), False, 'from py_dp.dispersion.binning import make_input_for_binning_with_freq, make_1d_abs_vel_bins, class_index_abs_log\n'), ((954, 997), 'py_dp.dispersion.binning.class_index_abs_log', 'class_index_abs_log', (['new_v', 'abs_log_v_edges'], {}), '(new_v, abs_log_v_edges)\n', (973, 997), False, 'from py_dp.dispersion.binning import make_input_for_binning_with_freq, make_1d_abs_vel_bins, class_index_abs_log\n'), ((1067, 1094), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int'}), '([1], dtype=np.int)\n', (1075, 1094), True, 'import numpy as np\n'), ((1419, 1439), 'numpy.array', 'np.array', (['n_subclass'], {}), '(n_subclass)\n', (1427, 1439), True, 'import numpy as np\n'), ((1526, 1603), 'py_dp.dispersion.mapping.mapping_v_sgn_repeat', 'mapping_v_sgn_repeat', (['abs_log_v_edges', 'cumsum_n_subclass', 'sub_classes_nrepeat'], {}), '(abs_log_v_edges, cumsum_n_subclass, sub_classes_nrepeat)\n', (1546, 1603), False, 'from py_dp.dispersion.mapping import mapping_v_sgn_repeat\n'), ((367, 392), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (382, 392), False, 'import os\n'), ((1160, 1197), 'numpy.unique', 'np.unique', (['new_f[v_class_number == i]'], {}), '(new_f[v_class_number == i])\n', (1169, 1197), True, 'import numpy as np\n'), ((1265, 1283), 'copy.copy', 'copy', (['place_holder'], {}), '(place_holder)\n', (1269, 1283), False, 'from copy import copy\n'), ((1477, 1508), 'numpy.cumsum', 'np.cumsum', (['modified_n_sub_class'], {}), '(modified_n_sub_class)\n', (1486, 1508), True, 'import numpy as np\n'), ((1888, 1898), 'numpy.log', 'np.log', (['v1'], {}), '(v1)\n', (1894, 1898), True, 'import numpy as np\n'), ((1937, 1947), 'numpy.log', 'np.log', (['v1'], {}), '(v1)\n', (1943, 1947), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
OUTPUT_PATH = "../events/"
def save():
input_node = tf.placeholder(shape=[None, 100, 100, 3], dtype=tf.float32)
net = tf.layers.conv2d(input_node, 32, (3, 3), strides=(2, 2), padding='same', name='conv_1')
net = tf.layers.conv2d(net, 32, (3, 3), strides=(1, 1), padding='same', name='conv_2')
net = tf.layers.conv2d(net, 64, (3, 3), strides=(2, 2), padding='same', name='conv_3')
tf.summary.FileWriter(OUTPUT_PATH, graph=tf.get_default_graph())
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
result = get_first_filter_value(sess)
saver.save(sess, "../ckpt/model.ckpt")
return result
def get_first_filter_value(sess):
tensor = tf.get_default_graph().get_tensor_by_name("conv_1/kernel/read:0")
return sess.run(tensor)[1, :, :, 1]
def load():
tf.reset_default_graph()
with tf.Session() as sess:
saver = tf.train.import_meta_graph('../ckpt/model.ckpt.meta')
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver.restore(sess, '../ckpt/model.ckpt')
result = get_first_filter_value(sess)
return result
if __name__ == '__main__':
save_value = save()
load_value = load()
print(save_value)
print(load_value)
assert np.alltrue(save_value == load_value)
| [
"tensorflow.local_variables_initializer",
"numpy.alltrue",
"tensorflow.reset_default_graph",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"tensorflow.layers.conv2d",
"tensorflow.train.import_meta_graph",
"tensorflow.get_defaul... | [((102, 161), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, 100, 100, 3]', 'dtype': 'tf.float32'}), '(shape=[None, 100, 100, 3], dtype=tf.float32)\n', (116, 161), True, 'import tensorflow as tf\n'), ((172, 263), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['input_node', '(32)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'name': '"""conv_1"""'}), "(input_node, 32, (3, 3), strides=(2, 2), padding='same',\n name='conv_1')\n", (188, 263), True, 'import tensorflow as tf\n'), ((270, 355), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['net', '(32)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'name': '"""conv_2"""'}), "(net, 32, (3, 3), strides=(1, 1), padding='same', name='conv_2'\n )\n", (286, 355), True, 'import tensorflow as tf\n'), ((361, 446), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['net', '(64)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'name': '"""conv_3"""'}), "(net, 64, (3, 3), strides=(2, 2), padding='same', name='conv_3'\n )\n", (377, 446), True, 'import tensorflow as tf\n'), ((525, 541), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (539, 541), True, 'import tensorflow as tf\n'), ((964, 988), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (986, 988), True, 'import tensorflow as tf\n'), ((1445, 1481), 'numpy.alltrue', 'np.alltrue', (['(save_value == load_value)'], {}), '(save_value == load_value)\n', (1455, 1481), True, 'import numpy as np\n'), ((552, 564), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (562, 564), True, 'import tensorflow as tf\n'), ((998, 1010), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1008, 1010), True, 'import tensorflow as tf\n'), ((1036, 1089), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""../ckpt/model.ckpt.meta"""'], {}), "('../ckpt/model.ckpt.meta')\n", (1062, 1089), True, 'import tensorflow as tf\n'), ((488, 510), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (508, 510), True, 'import tensorflow as tf\n'), ((591, 624), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (622, 624), True, 'import tensorflow as tf\n'), ((643, 675), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (673, 675), True, 'import tensorflow as tf\n'), ((840, 862), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (860, 862), True, 'import tensorflow as tf\n'), ((1108, 1141), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1139, 1141), True, 'import tensorflow as tf\n'), ((1160, 1192), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (1190, 1192), True, 'import tensorflow as tf\n')] |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for testing numerical accuracy."""
import functools
# Dependency imports
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.math import gradient
__all__ = [
'floating_tensor_to_f32',
'floating_tensor_to_f64',
'relerr',
'relative_error_at',
'wrong_bits',
'rounding_error',
'condition_number_one_input',
'error_due_to_ill_conditioning',
'excess_wrong_bits',
]
def floating_tensor_to_f32(x):
"""Cast x to float32 if a floating-point Tensor, else return it.
This is meant to be used with `tf.nest.map_structure`, or other
situations where it may not be obvious whether an object is a Tensor
or not, or has floating dtype or not.
Args:
x: The object to be cast or left be.
Returns:
x: x, either cast or left be.
"""
if tf.is_tensor(x) and dtype_util.is_floating(x.dtype):
return tf.cast(x, dtype=tf.float32)
else:
return x
def floating_tensor_to_f64(x):
"""Cast x to float64 if a floating-point Tensor, else return it.
This is meant to be used with `tf.nest.map_structure`, or other
situations where it may not be obvious whether an object is a Tensor
or not, or has floating dtype or not.
Args:
x: The object to be cast or left be.
Returns:
x: x, either cast or left be.
"""
if tf.is_tensor(x) and dtype_util.is_floating(x.dtype):
return tf.cast(x, dtype=tf.float64)
else:
return x
def relerr(result, truth):
"""Returns the relative error of `result` relative `truth`.
The relative error is defined as
|result - truth|
----------------
|truth|
The computation of that difference and ratio are done in 64-bit
precision.
Args:
result: Tensor of values whose deviation to assess.
truth: Tensor of values presumed correct. Must broadcast with
`result`.
Returns:
err: Float64 Tensor of elementwise relative error values.
"""
result = tf.cast(result, dtype=tf.float64)
truth = tf.cast(truth, dtype=tf.float64)
err = tf.math.abs((result - truth) / truth)
# If truth is 0 (in 64 bits!), the previous expression will give infinite
# relative error for non-zero result (which is correct), and nan relative
# error for zero result (which is incorrect). This tf.where fixes that.
return tf.where(result == truth, tf.constant(0., dtype=tf.float64), err)
def relative_error_at(f, *args):
"""Returns the relative error of `f` on `args` in float32.
This function assumes that numerical error when computing `f` in float64 is
negligible. For this to work correctly, `f` needs to be _dtype-polymorphic_:
the dtype in which computations internal to `f` are performed should match the
dtype of the arguments of `f`.
Note that we are looking for errors due to the implementation of
`f`, not due to rounding of its inputs or outputs. Therefore the
arguments and the result are canonicalized to being representable
exactly in 32-bit arithmetic.
Args:
f: Function whose accuracy to evaluate. Must be dtype-polymorphic.
*args: Arguments at which to test the accuracy of `f`.
Returns:
relerr: The relative error when computing `f(*args)` in float32.
Raises:
ValueError: If `f` is found not to be dtype-polymorphic.
"""
args_32 = tf.nest.map_structure(floating_tensor_to_f32, args)
logging.vlog(1, '32-bit arguments: %s', args_32)
args_64 = tf.nest.map_structure(floating_tensor_to_f64, args_32)
truth = f(*args_64)
logging.vlog(1, 'Correct answer: %s', truth)
if truth.dtype != tf.float64:
raise ValueError('Evaluating on {} produced non-64-bit result {}'.format(
args_64, truth))
truth_32 = floating_tensor_to_f32(truth)
logging.vlog(1, 'Correct answer representable in 32 bits: %s', truth_32)
ans_32 = f(*args_32)
logging.vlog(1, 'Answer computed in 32 bits: %s', ans_32)
if ans_32.dtype != tf.float32:
raise ValueError('Evaluating on {} produced non-32-bit result {}'.format(
args_32, ans_32))
return relerr(ans_32, truth_32)
def wrong_bits(rel_err):
"""Returns how many low-order bits `rel_err` corresponds to, in float32.
In other words, if you see relative error `rel_err` on a
(non-denomal) float-32 quantity, `wrong_bits(rel_err)` is the number
of low-order bits that are wrong.
Args:
rel_err: Floating-point Tensor of relative error values.
Returns:
wrong: Tensor of elementwise corresponding wrong bits values, of the
same dtype as `rel_err`.
"""
log2 = tf.math.log(tf.constant(2.0, dtype=rel_err.dtype))
# Negative wrong bits can only be an accident
return tf.maximum(tf.math.log(tf.math.abs(rel_err)) / log2 + 24, 0)
def rounding_error(x, denormal_correction=True):
"""Compute the maximum absolute 32-bit rounding error possible in `x`.
That is, we compute
max_y |y - x| among y where x = round_in_32_bits(y)
All internal computations are carried out in float64 so this
function is itself accurate.
TensorFlow flushes denormals to zero, so there's a rounding error
cliff at the smallest normal positive float: anything that rounded
to 0 could have been as large as `tiny`; but anything that rounded
to anything positive must have been normal. The
`denormal_correction` flag controls whether this is taken into
account.
Args:
x: Tensor of `x` values to compute 32-bit rounding error for. Is
internally cast to float64 regardless of input dtype.
denormal_correction: Python bool. Denormal flushing is accounted
for if `True`; otherwise rounding is assumed to affect only the
bits that fall off the mantissa.
Returns:
err: float64 Tensor of elementwise maximum rounding errors in `x`.
"""
resolution = tf.math.pow(tf.constant(2.0, dtype=tf.float64), -24)
tiny32 = tf.constant(np.finfo(np.float32).tiny, dtype=tf.float64)
# minute32 approximates (in 64 bits) the smallest positive 32-bit
# float including denormals. There is no way for 32-bit rounding
# error to be less than this.
minute32 = tiny32 * resolution
x = tf.math.abs(tf.cast(x, dtype=tf.float64))
if denormal_correction:
return tf.where(x >= tiny32, x * resolution, tiny32)
else:
return tf.maximum(x * resolution, minute32)
def condition_number_one_input(result, argument, derivative):
"""Returns the condition number at one scalar argument.
Namely, the error in the output induced by rounding the input to a 32-bit
float, divided by the error in the output induced by rounding the output
itself to a 32-bit float.
Over most of the float-point range, this is just
|x||f'(x)|
------------
|f(x)|
but some care needs to be taken when x or f(x) may round to 0.
Computations internal to this function are done in float64 to assure
their own accuracy.
Caveat: If `f` uses `stop_gradient` or similar internally, condition
numbers estimated by this function may be incorrect.
Args:
result: A Tensor of `f(x)` values, to be analyzed as though it had
been subject to float32 round-off.
argument: A Tensor of `x` values broadcast-compatible with
`result`, to be analyzed as though it had been subject to
float32 round-off.
derivative: A Tensor of `f'(x)` values broadcast-compatible with
`result`. If `None`, assume `f(x)` does not depend on `x`
(which corresponds to a condition number of 0).
Returns:
condition_number: A float64 Tensor of condition numbers,
corresponding elementwise to the input Tensors.
"""
if derivative is None:
# The output doesn't depend on this input (up to stop_gradient
# tricks), so this input forces no wrong bits. This is also correct
# if the input is an integer, because then it's notionally exact
# and still forces no wrong bits.
return 0.0
# Do not correct for increased rounding error in the answer when the answer is
# close to 0, because that amounts to demanding more precision near zero
# outputs, and I don't think that demand is appropriate.
derivative = tf.cast(derivative, dtype=tf.float64)
return (rounding_error(argument) * tf.math.abs(derivative) /
rounding_error(result, denormal_correction=False))
def _full_flatten(xs):
def flatten(x):
return tf.reshape(x, shape=[-1])
return tf.concat(tf.nest.flatten(tf.nest.map_structure(flatten, xs)), axis=-1)
def inputwise_condition_numbers(f, *args):
"""Computes the condition numbers of `f(*args)` at each arg independently.
The function `f(*args)` must produce a scalar result; computing
batches of condition numbers or computing condition numbers of
vector-valued functions is not yet supported.
This function assumes that numerical error when computing `f` in
float64 is negligible. For this to work correctly, `f` needs to be
_dtype-polymorphic_: the dtype in which computations internal to `f`
are performed should match the dtype of the arguments of `f`.
Args:
f: Function whose accuracy to evaluate. Must be differentiable
and dtype-polymorphic.
*args: Arguments at which to test the accuracy of `f`.
Returns:
condition_numbers: The condition number of `f` with respect to each input.
The returned structure is parallel to `*args`.
Raises:
ValueError: If `f` is found not to be dtype-polymorphic.
"""
# TODO(b/181967692): Compute multivariate condition numbers.
# TODO(b/181967437): To support batch condition numbers, need batch gradients.
# Then can infer the "event shape" of the arguments by subtracting
# off the number of dimensions in f(*args).
# To also support vector outputs, need to know the "event_ndims" in
# the output f(*args), and need full Jacobians of f underneath.
args_32 = tf.nest.map_structure(floating_tensor_to_f32, args)
logging.vlog(1, '32-bit arguments: %s', args_32)
args_64 = tf.nest.map_structure(floating_tensor_to_f64, args_32)
truth, derivatives = gradient.value_and_gradient(f, args_64)
logging.vlog(1, 'Correct answer: %s', truth)
logging.vlog(1, 'Argument gradient: %s', derivatives)
def check_numerics(x):
if x is None:
return None
msg = 'Cannot check accuracy if ground truth or derivatives are not finite'
return tf.debugging.check_numerics(x, message=msg)
truth = check_numerics(truth)
derivatives = tf.nest.map_structure(check_numerics, derivatives)
if truth.dtype != tf.float64:
raise ValueError('Evaluating on {} produced non-64-bit result {}'.format(
args_64, truth))
return tf.nest.map_structure(
functools.partial(condition_number_one_input, truth),
# For some reason, value_and_gradient casts the outer structure to list in
# jax. Is that an oversight?
tuple(args_64), tuple(derivatives))
def error_due_to_ill_conditioning(f, *args):
"""Returns relative error to expect in `f(*args)` due to conditioning.
This function assumes that `f` is differentiable, and that numerical
error when computing `f` or its derivatives in float64 is
negligible. One necessary condition for this to work correctly is
that `f` be _dtype-polymorphic_: the dtype in which computations
internal to `f` (and its derivatives) are performed should match the
dtype of the arguments of `f`.
The current implementation of this function evaluates ill
conditioning of `f` independently for each argument. It would
perhaps be more faithful to accepted practice to compute the
multivariate condition number instead, which takes account of errors
caused by coordinated rounding among the inputs.
The function `f` must return a single scalar output; batching and
vector outputs from `f` are not currently supported.
Args:
f: Function whose accuracy to evaluate. Must be differentiable
and dtype-polymorphic.
*args: Arguments at which to assess the accuracy of `f`.
Returns:
relerr: A scalar float64 Tensor. The relative error when
computing `f(*args)` in float32 that should be expected due to
ill conditioning of `f`.
Raises:
ValueError: If `f` is found not to be dtype-polymorphic.
"""
condition_numbers = inputwise_condition_numbers(f, *args)
logging.vlog(1, 'Inputwise condition numbers: %s', condition_numbers)
rounding_errors = tf.nest.map_structure(
lambda x, k: rounding_error(x) * k, args, condition_numbers)
logging.vlog(1, 'Relative error due to rounding each argument: %s',
rounding_errors)
return tf.reduce_max(_full_flatten(rounding_errors), axis=-1)
def excess_wrong_bits(f, *args):
"""Returns excess inaccuracy of 32-bit `f(*args)`, relative to conditioning.
If this is positive, that suggests the implementation of `f` is
introducing unnecessary numerical error at the given arguments.
This function assumes that `f` is differentiable, and that numerical
error when computing `f` or its derivatives in float64 is
negligible. One necessary condition for this to work correctly is
that `f` be _dtype-polymorphic_: the dtype in which computations
internal to `f` (and its derivatives) are performed should match the
dtype of the arguments of `f`.
Args:
f: Function whose accuracy to evaluate. Must be differentiable
and dtype-polymorphic.
*args: Arguments at which to test the accuracy of `f`.
Returns:
wrong: The wrong bits when computing `f(*args)` in float32, in excess
of what would be expected from `f` being ill-conditioned.
"""
err = relative_error_at(f, *args)
logging.vlog(1, 'Relative error: %s', err)
conditioning_err = error_due_to_ill_conditioning(f, *args)
logging.vlog(1, 'Relative error due to input rounding: %s', conditioning_err)
wrong = wrong_bits(err)
conditioning = wrong_bits(conditioning_err)
logging.vlog(1, 'Wrong bits: %s', wrong)
logging.vlog(1, 'Wrong bits due to input rounding: %s', conditioning)
return wrong - conditioning
| [
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.is_tensor",
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.math.abs",
"tensorflow.compat.v2.cast",
"tensorflow_probability.python.math.gradient.value_and_gradient",
"tensorflow.compat.v2.reshape",
... | [((2719, 2752), 'tensorflow.compat.v2.cast', 'tf.cast', (['result'], {'dtype': 'tf.float64'}), '(result, dtype=tf.float64)\n', (2726, 2752), True, 'import tensorflow.compat.v2 as tf\n'), ((2763, 2795), 'tensorflow.compat.v2.cast', 'tf.cast', (['truth'], {'dtype': 'tf.float64'}), '(truth, dtype=tf.float64)\n', (2770, 2795), True, 'import tensorflow.compat.v2 as tf\n'), ((2804, 2841), 'tensorflow.compat.v2.math.abs', 'tf.math.abs', (['((result - truth) / truth)'], {}), '((result - truth) / truth)\n', (2815, 2841), True, 'import tensorflow.compat.v2 as tf\n'), ((4061, 4112), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['floating_tensor_to_f32', 'args'], {}), '(floating_tensor_to_f32, args)\n', (4082, 4112), True, 'import tensorflow.compat.v2 as tf\n'), ((4115, 4163), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""32-bit arguments: %s"""', 'args_32'], {}), "(1, '32-bit arguments: %s', args_32)\n", (4127, 4163), False, 'from absl import logging\n'), ((4176, 4230), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['floating_tensor_to_f64', 'args_32'], {}), '(floating_tensor_to_f64, args_32)\n', (4197, 4230), True, 'import tensorflow.compat.v2 as tf\n'), ((4255, 4299), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Correct answer: %s"""', 'truth'], {}), "(1, 'Correct answer: %s', truth)\n", (4267, 4299), False, 'from absl import logging\n'), ((4480, 4552), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Correct answer representable in 32 bits: %s"""', 'truth_32'], {}), "(1, 'Correct answer representable in 32 bits: %s', truth_32)\n", (4492, 4552), False, 'from absl import logging\n'), ((4578, 4635), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Answer computed in 32 bits: %s"""', 'ans_32'], {}), "(1, 'Answer computed in 32 bits: %s', ans_32)\n", (4590, 4635), False, 'from absl import logging\n'), ((8808, 8845), 'tensorflow.compat.v2.cast', 'tf.cast', (['derivative'], {'dtype': 'tf.float64'}), '(derivative, dtype=tf.float64)\n', (8815, 8845), True, 'import tensorflow.compat.v2 as tf\n'), ((10501, 10552), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['floating_tensor_to_f32', 'args'], {}), '(floating_tensor_to_f32, args)\n', (10522, 10552), True, 'import tensorflow.compat.v2 as tf\n'), ((10555, 10603), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""32-bit arguments: %s"""', 'args_32'], {}), "(1, '32-bit arguments: %s', args_32)\n", (10567, 10603), False, 'from absl import logging\n'), ((10616, 10670), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['floating_tensor_to_f64', 'args_32'], {}), '(floating_tensor_to_f64, args_32)\n', (10637, 10670), True, 'import tensorflow.compat.v2 as tf\n'), ((10694, 10733), 'tensorflow_probability.python.math.gradient.value_and_gradient', 'gradient.value_and_gradient', (['f', 'args_64'], {}), '(f, args_64)\n', (10721, 10733), False, 'from tensorflow_probability.python.math import gradient\n'), ((10736, 10780), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Correct answer: %s"""', 'truth'], {}), "(1, 'Correct answer: %s', truth)\n", (10748, 10780), False, 'from absl import logging\n'), ((10783, 10836), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Argument gradient: %s"""', 'derivatives'], {}), "(1, 'Argument gradient: %s', derivatives)\n", (10795, 10836), False, 'from absl import logging\n'), ((11081, 11131), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['check_numerics', 'derivatives'], {}), '(check_numerics, derivatives)\n', (11102, 11131), True, 'import tensorflow.compat.v2 as tf\n'), ((12927, 12996), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Inputwise condition numbers: %s"""', 'condition_numbers'], {}), "(1, 'Inputwise condition numbers: %s', condition_numbers)\n", (12939, 12996), False, 'from absl import logging\n'), ((13109, 13197), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Relative error due to rounding each argument: %s"""', 'rounding_errors'], {}), "(1, 'Relative error due to rounding each argument: %s',\n rounding_errors)\n", (13121, 13197), False, 'from absl import logging\n'), ((14251, 14293), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Relative error: %s"""', 'err'], {}), "(1, 'Relative error: %s', err)\n", (14263, 14293), False, 'from absl import logging\n'), ((14357, 14434), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Relative error due to input rounding: %s"""', 'conditioning_err'], {}), "(1, 'Relative error due to input rounding: %s', conditioning_err)\n", (14369, 14434), False, 'from absl import logging\n'), ((14509, 14549), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Wrong bits: %s"""', 'wrong'], {}), "(1, 'Wrong bits: %s', wrong)\n", (14521, 14549), False, 'from absl import logging\n'), ((14552, 14621), 'absl.logging.vlog', 'logging.vlog', (['(1)', '"""Wrong bits due to input rounding: %s"""', 'conditioning'], {}), "(1, 'Wrong bits due to input rounding: %s', conditioning)\n", (14564, 14621), False, 'from absl import logging\n'), ((1599, 1614), 'tensorflow.compat.v2.is_tensor', 'tf.is_tensor', (['x'], {}), '(x)\n', (1611, 1614), True, 'import tensorflow.compat.v2 as tf\n'), ((1619, 1650), 'tensorflow_probability.python.internal.dtype_util.is_floating', 'dtype_util.is_floating', (['x.dtype'], {}), '(x.dtype)\n', (1641, 1650), False, 'from tensorflow_probability.python.internal import dtype_util\n'), ((1663, 1691), 'tensorflow.compat.v2.cast', 'tf.cast', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (1670, 1691), True, 'import tensorflow.compat.v2 as tf\n'), ((2099, 2114), 'tensorflow.compat.v2.is_tensor', 'tf.is_tensor', (['x'], {}), '(x)\n', (2111, 2114), True, 'import tensorflow.compat.v2 as tf\n'), ((2119, 2150), 'tensorflow_probability.python.internal.dtype_util.is_floating', 'dtype_util.is_floating', (['x.dtype'], {}), '(x.dtype)\n', (2141, 2150), False, 'from tensorflow_probability.python.internal import dtype_util\n'), ((2163, 2191), 'tensorflow.compat.v2.cast', 'tf.cast', (['x'], {'dtype': 'tf.float64'}), '(x, dtype=tf.float64)\n', (2170, 2191), True, 'import tensorflow.compat.v2 as tf\n'), ((3104, 3138), 'tensorflow.compat.v2.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float64'}), '(0.0, dtype=tf.float64)\n', (3115, 3138), True, 'import tensorflow.compat.v2 as tf\n'), ((5289, 5326), 'tensorflow.compat.v2.constant', 'tf.constant', (['(2.0)'], {'dtype': 'rel_err.dtype'}), '(2.0, dtype=rel_err.dtype)\n', (5300, 5326), True, 'import tensorflow.compat.v2 as tf\n'), ((6513, 6547), 'tensorflow.compat.v2.constant', 'tf.constant', (['(2.0)'], {'dtype': 'tf.float64'}), '(2.0, dtype=tf.float64)\n', (6524, 6547), True, 'import tensorflow.compat.v2 as tf\n'), ((6841, 6869), 'tensorflow.compat.v2.cast', 'tf.cast', (['x'], {'dtype': 'tf.float64'}), '(x, dtype=tf.float64)\n', (6848, 6869), True, 'import tensorflow.compat.v2 as tf\n'), ((6908, 6953), 'tensorflow.compat.v2.where', 'tf.where', (['(x >= tiny32)', '(x * resolution)', 'tiny32'], {}), '(x >= tiny32, x * resolution, tiny32)\n', (6916, 6953), True, 'import tensorflow.compat.v2 as tf\n'), ((6973, 7009), 'tensorflow.compat.v2.maximum', 'tf.maximum', (['(x * resolution)', 'minute32'], {}), '(x * resolution, minute32)\n', (6983, 7009), True, 'import tensorflow.compat.v2 as tf\n'), ((9024, 9049), 'tensorflow.compat.v2.reshape', 'tf.reshape', (['x'], {'shape': '[-1]'}), '(x, shape=[-1])\n', (9034, 9049), True, 'import tensorflow.compat.v2 as tf\n'), ((10989, 11032), 'tensorflow.compat.v2.debugging.check_numerics', 'tf.debugging.check_numerics', (['x'], {'message': 'msg'}), '(x, message=msg)\n', (11016, 11032), True, 'import tensorflow.compat.v2 as tf\n'), ((11305, 11357), 'functools.partial', 'functools.partial', (['condition_number_one_input', 'truth'], {}), '(condition_number_one_input, truth)\n', (11322, 11357), False, 'import functools\n'), ((6577, 6597), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (6585, 6597), True, 'import numpy as np\n'), ((8883, 8906), 'tensorflow.compat.v2.math.abs', 'tf.math.abs', (['derivative'], {}), '(derivative)\n', (8894, 8906), True, 'import tensorflow.compat.v2 as tf\n'), ((9085, 9119), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['flatten', 'xs'], {}), '(flatten, xs)\n', (9106, 9119), True, 'import tensorflow.compat.v2 as tf\n'), ((5408, 5428), 'tensorflow.compat.v2.math.abs', 'tf.math.abs', (['rel_err'], {}), '(rel_err)\n', (5419, 5428), True, 'import tensorflow.compat.v2 as tf\n')] |
import copy
import numpy as np
import pandas as pd
from sklearn.linear_model import RidgeCV
from sklearn.model_selection import cross_val_score, GridSearchCV
import warnings
# warnings.simplefilter('ignore')
def main():
features = [
'OverallQual',
'GrLivArea',
'GarageArea',
'TotalBsmtSF',
# Added for getting normality
'HasGarage',
'HasBsmt',
]
col_id_name = 'Id'
col_target_name = 'SalePrice'
df_train = pd.read_feather('data/input/train.feather')
df_train['HasGarage'] = pd.Series(
len(df_train['GarageArea']),
index=df_train.index
)
df_train['HasGarage'] = 0
df_train.loc[df_train['GarageArea'] > 0, 'HasGarage'] = 1
df_train['HasBsmt'] = pd.Series(
len(df_train['TotalBsmtSF']),
index=df_train.index
)
df_train['HasBsmt'] = 0
df_train.loc[df_train['TotalBsmtSF'] > 0, 'HasBsmt'] = 1
all_features = copy.deepcopy(features)
all_features.extend([col_id_name, col_target_name])
df_train = df_train[all_features]
# Dealing with missing data(Drop rows)
df_train = df_train.dropna(axis='index')
# Dealing with outlier: Refer to EDA
df_train = df_train.drop(df_train[df_train['Id'] == 1299].index)
df_train = df_train.drop(df_train[df_train['Id'] == 524].index)
# Transform for getting normality
df_train['SalePrice'] = np.log(df_train['SalePrice'])
df_train['GrLivArea'] = np.log(df_train['GrLivArea'])
df_train.loc[df_train['HasGarage'] == 1, 'GarageArea'] \
= np.log(df_train['GarageArea'])
df_train.loc[df_train['HasBsmt'] == 1, 'TotalBsmtSF'] \
= np.log(df_train['TotalBsmtSF'])
X_train = df_train[features]
y_train = df_train[col_target_name]
param_grid = [
{
'cv': [5],
'scoring': ['r2'],
'gcv_mode': ['auto'],
},
]
gs = GridSearchCV(
estimator=RidgeCV(alphas=[i / 10 for i in range(1, 10)]),
param_grid=param_grid,
scoring='r2',
cv=2,
)
gs.fit(X_train, y_train)
print(gs.best_score_) # score: 0.8254907131913196
if __name__ == '__main__':
main()
| [
"pandas.read_feather",
"numpy.log",
"copy.deepcopy"
] | [((487, 530), 'pandas.read_feather', 'pd.read_feather', (['"""data/input/train.feather"""'], {}), "('data/input/train.feather')\n", (502, 530), True, 'import pandas as pd\n'), ((955, 978), 'copy.deepcopy', 'copy.deepcopy', (['features'], {}), '(features)\n', (968, 978), False, 'import copy\n'), ((1406, 1435), 'numpy.log', 'np.log', (["df_train['SalePrice']"], {}), "(df_train['SalePrice'])\n", (1412, 1435), True, 'import numpy as np\n'), ((1464, 1493), 'numpy.log', 'np.log', (["df_train['GrLivArea']"], {}), "(df_train['GrLivArea'])\n", (1470, 1493), True, 'import numpy as np\n'), ((1565, 1595), 'numpy.log', 'np.log', (["df_train['GarageArea']"], {}), "(df_train['GarageArea'])\n", (1571, 1595), True, 'import numpy as np\n'), ((1666, 1697), 'numpy.log', 'np.log', (["df_train['TotalBsmtSF']"], {}), "(df_train['TotalBsmtSF'])\n", (1672, 1697), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
from numpy import pi, sqrt, exp, power, log, log10
import os
import constants as ct
import particle as pt
import tools as tl
##############################
# Preparing SKA configurations
##############################
def initialize():
"""This routine is supposed to be run only once, \
i.e. when the module is loaded, therefore\
the I/O is not optimized for speed concerns.
"""
SKA_conf = {}
# # --------------
for exper in ['low', 'mid']:
# if exper == "low":
# path = local_path + "/data/SKA1-low_accumu.csv"
# elif exper == "mid":
# path = local_path + "/data/SKA1-mid_accumu.csv"
# data_raw = np.loadtxt(path, delimiter=',')
# radius = data_raw[:, 0]
# fraction = data_raw[:, 1]
# bins_radius = np.logspace(1, 5, 20) # bin it
# hist_radius = np.interp(np.log10(bins_radius), np.log10(
# radius), fraction, left=0) # sample at the bin edges
# if exper == "low":
# # compute the x-y coordinates of all units
# x_arr, y_arr = get_telescope_coordinate(
# fraction*ct._SKALow_number_of_stations_, radius, SKA=exper)
# # save it
# SKA_conf['low radius'] = (data_raw, x_arr, y_arr, bins_radius,
# hist_radius)
# elif exper == "mid":
# x_arr, y_arr = get_telescope_coordinate(
# fraction*ct._SKA1Mid_number_of_dishes_, radius, SKA=exper)
# SKA_conf['mid radius'] = (data_raw, x_arr, y_arr, bins_radius,
# hist_radius)
# get coordinates
if exper == "low":
SKA_conf['low0'] = np.loadtxt(
local_path + "/data/SKA1_config_low0.csv", delimiter=',')
SKA_conf['low1'] = np.loadtxt(
local_path + "/data/SKA1_config_low1.csv", delimiter=',')
SKA_conf['low2'] = np.loadtxt(
local_path + "/data/SKA1_config_low2_6clusters.csv", delimiter=',')
# update clusters, it's 6 stations per cluster
new_arr = []
for xy in (SKA_conf['low2']):
for j in range(2):
for k in range(3):
x = xy[0] + j*50
y = xy[1] + (k-1)*50
new_arr.append([x, y])
new_arr = np.array(new_arr)
SKA_conf['low2'] = new_arr
# combine them
SKA_conf['low_coord'] = np.concatenate(
(SKA_conf['low0'], SKA_conf['low1'], SKA_conf['low2']))
x_arr = SKA_conf['low_coord'][:, 0]
y_arr = SKA_conf['low_coord'][:, 1]
elif exper == "mid":
SKA_conf['mid0_MeerKAT'] = np.loadtxt(
local_path + "/data/SKA1_config_mid0_MK.csv", delimiter=',')
SKA_conf['mid0_SKA'] = np.loadtxt(
local_path + "/data/SKA1_config_mid0_SKA.csv", delimiter=',')
SKA_conf['mid1_MeerKAT'] = np.loadtxt(
local_path + "/data/SKA1_config_mid1_MK.csv", delimiter=',')
SKA_conf['mid1_SKA'] = np.loadtxt(
local_path + "/data/SKA1_config_mid1_SKA.csv", delimiter=',')
SKA_conf['mid2_SKA'] = np.loadtxt(
local_path + "/data/SKA1_config_mid2_SKA.csv", delimiter=',')
# combine them
SKA_conf['mid_coord'] = np.concatenate(
(SKA_conf['mid0_MeerKAT'],
SKA_conf['mid0_SKA'],
SKA_conf['mid1_MeerKAT'],
SKA_conf['mid1_SKA'],
SKA_conf['mid2_SKA']))
# convert km to m
SKA_conf['mid_coord'][:, 0] = SKA_conf['mid_coord'][:, 0]*1.e3
SKA_conf['mid_coord'][:, 1] = SKA_conf['mid_coord'][:, 1]*1.e3
x_arr = SKA_conf['mid_coord'][:, 0]
y_arr = SKA_conf['mid_coord'][:, 1]
# get baseline distribution
baseline_arr = get_baseline(x_arr, y_arr)
hist_baseline, bins_baseline = np.histogram(
baseline_arr, bins=np.logspace(1, 5, 20000))
# correcting the over-counting of baseline pair
hist_baseline = hist_baseline/2.
hist_baseline_cumsum = np.cumsum(hist_baseline)
# save it
if exper == "low":
SKA_conf['low baseline'] = (
baseline_arr, hist_baseline, bins_baseline, hist_baseline_cumsum)
elif exper == "mid":
SKA_conf['mid baseline'] = (
baseline_arr, hist_baseline, bins_baseline, hist_baseline_cumsum)
# about effective area
if exper == "low":
path = local_path + "/data/SKA1-low_Aeff_over_Tsys.txt"
data_raw = np.loadtxt(path)
# low is given in MHz, convert to GHz
data_raw[:, 0] = data_raw[:, 0] * 1.e-3
SKA_conf['low A/T'] = data_raw
elif exper == "mid":
path = local_path + "/data/SKA1-mid_Aeff_over_Tsys.txt"
data_raw = np.loadtxt(path)
SKA_conf['mid A/T'] = data_raw
SKA_conf['A/T'] = np.concatenate((SKA_conf['low A/T'],
SKA_conf['mid A/T']))
# computing efficiency
# make a nu grid
Nsteps = 2001
nulow = np.logspace(log10(ct._nu_min_ska_low_), log10(
ct._nu_max_ska_low_), Nsteps//2)[1:]
# ... and SKA mid...
numid = np.logspace(log10(ct._nu_min_ska_mid_), log10(
ct._nu_max_ska_mid_), Nsteps - Nsteps//2)[1:]
Aeff_over_Tsys = SKA_conf['A/T']
# Mid
nu_arr = numid
Aeff_over_Tsys_arr = np.interp(
nu_arr, Aeff_over_Tsys[:, 0], Aeff_over_Tsys[:, 2])
Tsys_arr = T_sys_mid(nu_arr)
eta_arr = Aeff_over_Tsys_arr * Tsys_arr / ct._area_ska_mid_
SKA_conf['eta mid'] = (nu_arr, eta_arr)
# Low
nu_arr = nulow
Aeff_over_Tsys_arr = np.interp(
nu_arr, Aeff_over_Tsys[:, 0], Aeff_over_Tsys[:, 2])
Tsys_arr = T_sys_low(nu_arr)
eta_arr = Aeff_over_Tsys_arr * Tsys_arr / ct._area_ska_low_
SKA_conf['eta low'] = (nu_arr, eta_arr)
# combined storage
nu_arr = np.concatenate((SKA_conf['eta low'][0], SKA_conf['eta mid'][0]))
eta_arr = np.concatenate((SKA_conf['eta low'][1], SKA_conf['eta mid'][1]))
SKA_conf['eta'] = (nu_arr, eta_arr)
return SKA_conf
################
# SKA properties
################
def SKA_get_active_baseline(length, exper_mode):
"""Get the active number of baselines in the interferometry mode
:param length: critical baseline below which the signal can be resolved
:param exper_mode: "SKA low" or "SKA mid"
:returns: number of baselines that sees the signal
"""
length_arr, is_scalar = tl.treat_as_arr(length)
if exper_mode == "SKA low":
(baseline_arr, hist_baseline, bins_baseline,
hist_baseline_cumsum) = SKA_conf['low baseline']
if exper_mode == "SKA mid":
(baseline_arr, hist_baseline, bins_baseline,
hist_baseline_cumsum) = SKA_conf['mid baseline']
res = np.interp(np.log(length_arr), np.log(bins_baseline[:-1]),
hist_baseline_cumsum, left=ct._zero_)
if exper_mode == "SKA low":
res[length_arr < ct._SKALow_station_diameter_] = ct._zero_
if exper_mode == "SKA mid":
res[length_arr < ct._SKA1Mid_dish_diameter_] = ct._zero_
if is_scalar:
res = np.squeeze(res)
return res
def SKA_exper_nu(nu):
"""
Returns the SKA experiment mode (low/mid) sensitive to the given frequency nu [GHz].
Parameters
----------
nu : frequency [GHz]
"""
if (nu < ct._nu_min_ska_low_): # frequency below SKA low lower threshold
exper_mode = None # just a placeholder, won't matter
elif (nu <= ct._nu_max_ska_low_): # frequency within SKA low range
exper_mode = 'SKA low'
elif (nu <= ct._nu_max_ska_mid_): # frequency within SKA mid range
exper_mode = 'SKA mid'
else: # frequency above SKA mid upper threshold
exper_mode = None # just a placeholder, won't matter
return exper_mode
def SKA_specs(nu, exper_mode, correlation_mode=None, theta_sig=None):
"""
Returns the SKA specifications for the given experiment mode and frequency [GHz]:
area [m^2],
window,
receiver noise brightness temperature [K],
efficiency,
solid angle resolution [sr],
number_of_dishes, and
number_of_measurements.
Parameters
----------
nu : frequency [GHz]
exper_mode : mode in which the experiment is working
correlation_mode: whether to run in interferometry mode or single dish mode. Default None is meant to raise error if not assigned explicitly.
theta_sig: the signal size we want to observe [radian]
"""
if exper_mode == None:
area, window, Tr, eta, Omega_res, number_of_dishes, number_of_measurements = 0., 0., 0., 0., 1.e-100, 0., 0. # set to zero so it will raise error if not treated
elif exper_mode == 'SKA low' and correlation_mode == "single dish":
area = ct._area_ska_low_
window = np.heaviside(nu - ct._nu_min_ska_low_, 1.) * \
np.heaviside(ct._nu_max_ska_low_ - nu, 1.)
# Tr = ct._Tr_ska_low_ # DEPRECATED
Tr = Trec_low(nu)
eta = eta_nu(nu)
# finding resolution:
wavelength = pt.lambda_from_nu(nu)/100. # wavelength [m]
# angular size of pixel resolution [rad]
# assuming this is the aperture angle and not the radial angle
theta_res = (1.22*wavelength) / \
ct._SKALow_station_diameter_ # /sqrt(eta)
Omega_res = ct.angle_to_solid_angle(
theta_res) # solid angle of resolution [sr]
number_of_dishes = ct._SKALow_number_of_stations_
number_of_measurements = number_of_dishes
# Omega_max = np.inf # being sloppy here but we never reach FOV
elif exper_mode == 'SKA low' and correlation_mode == "interferometry":
window = np.heaviside(nu - ct._nu_min_ska_low_, 1.) * \
np.heaviside(ct._nu_max_ska_low_ - nu, 1.)
# Tr = ct._Tr_ska_low_ # DEPRECATED
Tr = Trec_low(nu)
eta = eta_nu(nu)
# get the required baseline length for nu
wavelength = pt.lambda_from_nu(nu) / 100. # wavelength [m]
critical_baseline_length = (
1.22*wavelength) / (theta_sig)\
* ct._SKA_factor_lose_signal_ # fudge factor for when invisible
# get the active number of baselines
active_number_of_baselines = SKA_get_active_baseline(
critical_baseline_length, exper_mode='SKA low')
# taking the resolution to be exactly the signal size
# penalty is taken care of through active_number_of_baselines
theta_res = theta_sig
Omega_res = ct.angle_to_solid_angle(
theta_res) # solid angle of resolution [sr]
# for interferometry mode noise has 1/sqrt(number of active baselines)
number_of_measurements = active_number_of_baselines
# NOTE: N.B.: this reception area is the total area, and is correct only assuming all dishes/stations contribute
# which is NOT true for large signal angular size. The code needs to be updated to include the fact that
# only active dishes/stations/telescopes are contributing. Thus, for large signal angular sizes,
# the individual values of the S and N CANNOT BE TRUSTED.
# However, since S and N scale the same with reception area, S/N cancels out
# in the end only the number of measurements (baselines) matter.
# Therefore, our S/N CAN INDEED be trusted.
area = ct._area_ska_low_
number_of_dishes = ct._SKALow_number_of_stations_
elif exper_mode == 'SKA mid' and correlation_mode == "single dish":
area = ct._area_ska_mid_
window = np.heaviside(nu - ct._nu_min_ska_mid_, 0.) * \
np.heaviside(ct._nu_max_ska_mid_ - nu, 1.)
# Tr = ct._Tr_ska_mid_ # DEPRECATED, AND INCONSISTENT
Tr = Trec_mid(nu)
eta = eta_nu(nu)
# finding resolution:
wavelength = pt.lambda_from_nu(nu)/100. # wavelength [m]
# angular size of pixel resolution [rad]
# assuming this is the aperture angle and not the radial angle
# theta_res = (1.22*wavelength)/sqrt(eta*4.*area/pi)
theta_res = (1.22*wavelength)/ct._SKA1Mid_dish_diameter_ # /sqrt(eta)
Omega_res = ct.angle_to_solid_angle(
theta_res) # solid angle of resolution [sr]
number_of_dishes = ct._SKA1Mid_number_of_dishes_
number_of_measurements = number_of_dishes
# Omega_max = np.inf # being sloppy here but we never reach FOV
elif exper_mode == 'SKA mid' and correlation_mode == "interferometry":
area = ct._area_ska_mid_
window = np.heaviside(nu - ct._nu_min_ska_mid_, 0.) * \
np.heaviside(ct._nu_max_ska_mid_ - nu, 1.)
# Tr = ct._Tr_ska_mid_ # DEPRECATED, AND INCONSISTENT
Tr = Trec_mid(nu)
eta = eta_nu(nu)
# get the required baseline length for nu
wavelength = pt.lambda_from_nu(nu) / 100. # wavelength [m]
critical_baseline_length = (
1.22*wavelength) / (theta_sig)\
* ct._SKA_factor_lose_signal_ # fudge factor
# get the active number of baselines
active_number_of_baselines = SKA_get_active_baseline(
critical_baseline_length, exper_mode='SKA mid')
# taking the resolution to be exactly the signal size
# penalty is taken care of through active_num_of_baselines
theta_res = theta_sig
Omega_res = ct.angle_to_solid_angle(
theta_res) # solid angle of resolution [sr]
# for interferometry mode noise has 1/sqrt(number of active baselines)
number_of_measurements = active_number_of_baselines
# NOTE: N.B.: this reception area is the total area, and is correct only assuming all dishes/stations contribute
# which is NOT true for large signal angular size. The code needs to be updated to include the fact that
# only active dishes/stations/telescopes are contributing. Thus, for large signal angular sizes,
# the individual values of the S and N CANNOT BE TRUSTED.
# However, since S and N scale the same with reception area, S/N cancels out
# in the end only the number of measurements (baselines) matter.
# Therefore, our S/N CAN INDEED be trusted.
area = ct._area_ska_mid_
number_of_dishes = ct._SKA1Mid_number_of_dishes_
# in case the number of baselines is zero
if number_of_measurements == 0:
number_of_measurements = 1e-100
return area, window, Tr, eta, Omega_res, number_of_dishes, number_of_measurements
# def get_telescope_coordinate(tel_arr, r_arr, SKA):
# """Generate an array with coordinate of each telescope computed
# :param tele_arr: the array of telescope index from 1 to (number of telescope)
# :param radius_arr: the radius of each telescope
# :param SKA: "low" or "mid"
# """
# if SKA == "low":
# tel_fine_arr = np.arange(ct._SKALow_number_of_stations_)
# r_core = ct._SKALow_r_core_
# elif SKA == "mid":
# tel_fine_arr = np.arange(ct._SKA1Mid_number_of_dishes_)
# r_core = ct._SKA1Mid_r_core_
# r_fine_arr = np.interp(tel_fine_arr, tel_arr, r_arr)
# # fix seed as we don't really need the randomness
# np.random.seed(123)
# theta_arr = np.random.random(size=len(r_fine_arr)) * np.pi * 2.
# # over write the arm part
# # mask = np.where(r_fine_arr > r_core, True, False)
# for i in tel_fine_arr:
# if r_fine_arr[int(i)] > r_core:
# theta_arr[int(i)] = int(i) % 3 * 2. * np.pi / 3.
# x_arr = r_fine_arr * np.cos(theta_arr)
# y_arr = r_fine_arr * np.sin(theta_arr)
# return x_arr, y_arr
def get_baseline(x_arr, y_arr):
"""Given array coordinates x, y, compute lengths of each pair. Returns the array of pair lengths.
:param x_arr: x coordinate of all units
:param y_arr: y coordinates of all units
"""
n_unit = len(x_arr)
# n_baseline = int(n_unit * (n_unit - 1) / 2.)
baseline_arr = np.zeros((n_unit, n_unit))
for i in range(n_unit):
for j in range(n_unit):
# print("x[i]=%s, y[j]=%s" % (x_arr[i], y_arr[j]))
dist = np.sqrt((x_arr[i] - x_arr[j])**2 + (y_arr[i] - y_arr[j])**2)
baseline_arr[i, j] = dist
# baseline_arr[j, i] = dist
baseline_arr = baseline_arr.reshape(-1)
baseline_arr = baseline_arr[baseline_arr > 0]
return baseline_arr
def Trec_mid_MeerKAT(nu):
"""Receiver noise temperature [K] of a MeerKAT dish (13.5m-diameter type)
:param nu: frequency [GHz]
"""
nu, is_scalar = tl.treat_as_arr(nu)
res = []
for nui in nu:
if 0.58 < nui < 1.02:
res.append(11 - 4.5*(nui-0.58))
elif 1.02 < nui < 1.67:
res.append(7.5 + 6.8 * np.abs(nui - 1.65)**1.5)
elif 1.65 < nui < 3.05:
res.append(7.5)
else:
res.append(np.inf)
if is_scalar:
res = np.squeeze(res)
return np.array(res)
def Trec_mid_SKA(nu):
"""Receiver noise temperature [K] of a SKA dish (15m-diameter type)
:param nu: frequency [GHz]
"""
nu, is_scalar = tl.treat_as_arr(nu)
res = []
for nui in nu:
if 0.35 < nui < 0.95:
res.append(15 + 30*(nui-0.75)**2)
elif 0.95 < nui < 4.6:
res.append(7.5)
elif 4.6 < nui < 50:
res.append(4.4+0.69 * nui)
else:
res.append(np.inf)
if is_scalar:
res = np.squeeze(res)
return np.array(res)
def Trec_mid(nu):
"""Receiver noise temperature [K] of a typical SKA1-mid dish. Combines MeerKAT with SKA dishes. If there's only SKA dish, use that one; if there are both, use a weighted mean.
:param nu: frequency [GHz]
"""
nu, is_scalar = tl.treat_as_arr(nu)
Trec_arr = []
for nui in nu:
val1 = Trec_mid_MeerKAT(nui)
val2 = Trec_mid_SKA(nui)
if np.isinf(val1):
val1 = val2
# val = np.sqrt(val1*val2) # NOTE: geometric mean puts them on equal footing, even if there was but a single MeerKAT telescope!!!
val = (val1*64. + val2*133.)/(133.+64.) # weighted mean: seems fairer
Trec_arr.append(val)
Trec_arr = np.array(Trec_arr)
if is_scalar:
Trec_arr = np.squeeze(Trec_arr)
return Trec_arr
def Trec_low(nu):
"""Receiver noise temperature [K] of a typical SKA1-low dish.
:param nu: frequency [GHz]
"""
nu, is_scalar = tl.treat_as_arr(nu)
Trec_arr = np.ones_like(nu) * ct._Tr_ska_low_
if is_scalar:
Trec_arr = np.squeeze(Trec_arr)
return Trec_arr
def Trec(nu):
"""The receiver noise [K] for both SKA1-Mid and SKA1-Low
:param nu: frequency [GHz]
"""
nu, is_scalar = tl.treat_as_arr(nu)
res = np.zeros_like(nu)
low_idx = np.where(nu <= ct._nu_max_ska_low_)
mid_idx = np.where(nu > ct._nu_max_ska_low_)
res[low_idx] = Trec_low(nu[low_idx])
res[mid_idx] = Trec_mid(nu[mid_idx])
if is_scalar:
res = np.squeeze(res)
return res
def T_sys_mid(nu):
"""System noise temperature [K] of a single dish for SKA-Mid. It's defined by Treceiver + Tspillover + Tsky, where Tsky = Tcmb + Tgal + Tatm. Note that this function is only used to compute eta from the table of Aeff/Tsys in Braun et al. It is not used in the noise computation.
:param nu: frequency [GHz]
"""
nu, is_scalar = tl.treat_as_arr(nu)
Tsky_arr = np.interp(nu, Tsky_mid[:, 0], Tsky_mid[:, 1])
Trec_arr = Trec_mid(nu)
res = ct._T_spill_mid_ + Tsky_arr + Trec_arr
if is_scalar:
res = np.squeeze(res)
return res
def T_sys_low(nu):
"""System noise temperature [K] of a single station SKA-Low. It's defined by Treceiver + Tspillover + Tsky, where Tsky = Tcmb + Tgal + Tatm. Note that this function is only used to compute eta from the table of Aeff/Tsys in Braun et al. It is not used in the real noise computation.
"""
nu, is_scalar = tl.treat_as_arr(nu)
Tsky_arr = np.interp(nu, Tsky_low[:, 0], Tsky_low[:, 1])
Trec_arr = Trec_low(nu)
res = ct._T_spill_low_ + Tsky_arr + Trec_arr
if is_scalar:
res = np.squeeze(res)
return res
# #
# # efficiency related global vairiables
# # --------------------------------------
# # (nu, eta) arrays for SKA1 low and mid
# nu_eta_low = np.loadtxt(local_path+"/data/eta_low.csv", delimiter=",")
# nu_eta_mid = np.loadtxt(local_path+"/data/eta_mid.csv", delimiter=",")
# # interpolating the data:
# eta_low_fn = tl.interp_fn(nu_eta_low)
# eta_mid_fn = tl.interp_fn(nu_eta_mid)
# # --------------------------------
# # defining the general efficiency
# def eta_nu(nu, exper_mode):
# """Returns the efficiency eta.
# nu : frequency [GHz]
# exper_mode : mode in which the experiment is working
# """
# if exper_mode == None:
# eta = 0.
# elif exper_mode == 'SKA low':
# eta = eta_low_fn(nu)
# elif exper_mode == 'SKA mid':
# eta = eta_mid_fn(nu)
# return eta
def eta_nu(nu):
"""Returns the efficiency eta.
nu : frequency [GHz]
"""
nu_arr, eta_arr = SKA_conf['eta']
nu, is_scalar = tl.treat_as_arr(nu)
res = np.interp(nu, nu_arr, eta_arr)
if is_scalar:
res = np.squeeze(res)
return res
##################
# global variables
##################
# Defining local path
# --------------------
local_path = os.path.dirname(os.path.abspath(__file__))
# load the sky temperature (Tsky=Tcmb + Tgal + Tatm)
# from Braun et al. 2017
# and SKA-TEL-SKO-0000308_SKA1_System_Baseline_v2_DescriptionRev01-part-1-signed.
# Note that this Tsky is used for two things
# 1. for computing eta
# 2. for extracting Tatm
# -------------------------
Tsky_mid = np.loadtxt(local_path+"/data/Tsky_mid.csv", delimiter=',')
Tsky_low = np.loadtxt(local_path+"/data/Tsky_low.csv", delimiter=',')
# The global variable of SKA
# configuration saved into SKA_conf
# ---------------------------------
SKA_conf = initialize()
| [
"numpy.log10",
"numpy.sqrt",
"numpy.log",
"numpy.array",
"constants.angle_to_solid_angle",
"numpy.where",
"numpy.heaviside",
"numpy.concatenate",
"numpy.logspace",
"numpy.isinf",
"numpy.abs",
"numpy.squeeze",
"particle.lambda_from_nu",
"numpy.interp",
"numpy.ones_like",
"tools.treat_as... | [((22044, 22104), 'numpy.loadtxt', 'np.loadtxt', (["(local_path + '/data/Tsky_mid.csv')"], {'delimiter': '""","""'}), "(local_path + '/data/Tsky_mid.csv', delimiter=',')\n", (22054, 22104), True, 'import numpy as np\n'), ((22114, 22174), 'numpy.loadtxt', 'np.loadtxt', (["(local_path + '/data/Tsky_low.csv')"], {'delimiter': '""","""'}), "(local_path + '/data/Tsky_low.csv', delimiter=',')\n", (22124, 22174), True, 'import numpy as np\n'), ((5174, 5232), 'numpy.concatenate', 'np.concatenate', (["(SKA_conf['low A/T'], SKA_conf['mid A/T'])"], {}), "((SKA_conf['low A/T'], SKA_conf['mid A/T']))\n", (5188, 5232), True, 'import numpy as np\n'), ((5673, 5734), 'numpy.interp', 'np.interp', (['nu_arr', 'Aeff_over_Tsys[:, 0]', 'Aeff_over_Tsys[:, 2]'], {}), '(nu_arr, Aeff_over_Tsys[:, 0], Aeff_over_Tsys[:, 2])\n', (5682, 5734), True, 'import numpy as np\n'), ((5940, 6001), 'numpy.interp', 'np.interp', (['nu_arr', 'Aeff_over_Tsys[:, 0]', 'Aeff_over_Tsys[:, 2]'], {}), '(nu_arr, Aeff_over_Tsys[:, 0], Aeff_over_Tsys[:, 2])\n', (5949, 6001), True, 'import numpy as np\n'), ((6189, 6253), 'numpy.concatenate', 'np.concatenate', (["(SKA_conf['eta low'][0], SKA_conf['eta mid'][0])"], {}), "((SKA_conf['eta low'][0], SKA_conf['eta mid'][0]))\n", (6203, 6253), True, 'import numpy as np\n'), ((6268, 6332), 'numpy.concatenate', 'np.concatenate', (["(SKA_conf['eta low'][1], SKA_conf['eta mid'][1])"], {}), "((SKA_conf['eta low'][1], SKA_conf['eta mid'][1]))\n", (6282, 6332), True, 'import numpy as np\n'), ((6781, 6804), 'tools.treat_as_arr', 'tl.treat_as_arr', (['length'], {}), '(length)\n', (6796, 6804), True, 'import tools as tl\n'), ((16296, 16322), 'numpy.zeros', 'np.zeros', (['(n_unit, n_unit)'], {}), '((n_unit, n_unit))\n', (16304, 16322), True, 'import numpy as np\n'), ((16891, 16910), 'tools.treat_as_arr', 'tl.treat_as_arr', (['nu'], {}), '(nu)\n', (16906, 16910), True, 'import tools as tl\n'), ((17273, 17286), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (17281, 17286), True, 'import numpy as np\n'), ((17444, 17463), 'tools.treat_as_arr', 'tl.treat_as_arr', (['nu'], {}), '(nu)\n', (17459, 17463), True, 'import tools as tl\n'), ((17803, 17816), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (17811, 17816), True, 'import numpy as np\n'), ((18079, 18098), 'tools.treat_as_arr', 'tl.treat_as_arr', (['nu'], {}), '(nu)\n', (18094, 18098), True, 'import tools as tl\n'), ((18519, 18537), 'numpy.array', 'np.array', (['Trec_arr'], {}), '(Trec_arr)\n', (18527, 18537), True, 'import numpy as np\n'), ((18766, 18785), 'tools.treat_as_arr', 'tl.treat_as_arr', (['nu'], {}), '(nu)\n', (18781, 18785), True, 'import tools as tl\n'), ((19055, 19074), 'tools.treat_as_arr', 'tl.treat_as_arr', (['nu'], {}), '(nu)\n', (19070, 19074), True, 'import tools as tl\n'), ((19085, 19102), 'numpy.zeros_like', 'np.zeros_like', (['nu'], {}), '(nu)\n', (19098, 19102), True, 'import numpy as np\n'), ((19117, 19152), 'numpy.where', 'np.where', (['(nu <= ct._nu_max_ska_low_)'], {}), '(nu <= ct._nu_max_ska_low_)\n', (19125, 19152), True, 'import numpy as np\n'), ((19167, 19201), 'numpy.where', 'np.where', (['(nu > ct._nu_max_ska_low_)'], {}), '(nu > ct._nu_max_ska_low_)\n', (19175, 19201), True, 'import numpy as np\n'), ((19712, 19731), 'tools.treat_as_arr', 'tl.treat_as_arr', (['nu'], {}), '(nu)\n', (19727, 19731), True, 'import tools as tl\n'), ((19748, 19793), 'numpy.interp', 'np.interp', (['nu', 'Tsky_mid[:, 0]', 'Tsky_mid[:, 1]'], {}), '(nu, Tsky_mid[:, 0], Tsky_mid[:, 1])\n', (19757, 19793), True, 'import numpy as np\n'), ((20270, 20289), 'tools.treat_as_arr', 'tl.treat_as_arr', (['nu'], {}), '(nu)\n', (20285, 20289), True, 'import tools as tl\n'), ((20306, 20351), 'numpy.interp', 'np.interp', (['nu', 'Tsky_low[:, 0]', 'Tsky_low[:, 1]'], {}), '(nu, Tsky_low[:, 0], Tsky_low[:, 1])\n', (20315, 20351), True, 'import numpy as np\n'), ((21465, 21484), 'tools.treat_as_arr', 'tl.treat_as_arr', (['nu'], {}), '(nu)\n', (21480, 21484), True, 'import tools as tl\n'), ((21495, 21525), 'numpy.interp', 'np.interp', (['nu', 'nu_arr', 'eta_arr'], {}), '(nu, nu_arr, eta_arr)\n', (21504, 21525), True, 'import numpy as np\n'), ((21723, 21748), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (21738, 21748), False, 'import os\n'), ((4313, 4337), 'numpy.cumsum', 'np.cumsum', (['hist_baseline'], {}), '(hist_baseline)\n', (4322, 4337), True, 'import numpy as np\n'), ((7113, 7131), 'numpy.log', 'np.log', (['length_arr'], {}), '(length_arr)\n', (7119, 7131), True, 'import numpy as np\n'), ((7133, 7159), 'numpy.log', 'np.log', (['bins_baseline[:-1]'], {}), '(bins_baseline[:-1])\n', (7139, 7159), True, 'import numpy as np\n'), ((7449, 7464), 'numpy.squeeze', 'np.squeeze', (['res'], {}), '(res)\n', (7459, 7464), True, 'import numpy as np\n'), ((17246, 17261), 'numpy.squeeze', 'np.squeeze', (['res'], {}), '(res)\n', (17256, 17261), True, 'import numpy as np\n'), ((17776, 17791), 'numpy.squeeze', 'np.squeeze', (['res'], {}), '(res)\n', (17786, 17791), True, 'import numpy as np\n'), ((18218, 18232), 'numpy.isinf', 'np.isinf', (['val1'], {}), '(val1)\n', (18226, 18232), True, 'import numpy as np\n'), ((18576, 18596), 'numpy.squeeze', 'np.squeeze', (['Trec_arr'], {}), '(Trec_arr)\n', (18586, 18596), True, 'import numpy as np\n'), ((18801, 18817), 'numpy.ones_like', 'np.ones_like', (['nu'], {}), '(nu)\n', (18813, 18817), True, 'import numpy as np\n'), ((18874, 18894), 'numpy.squeeze', 'np.squeeze', (['Trec_arr'], {}), '(Trec_arr)\n', (18884, 18894), True, 'import numpy as np\n'), ((19316, 19331), 'numpy.squeeze', 'np.squeeze', (['res'], {}), '(res)\n', (19326, 19331), True, 'import numpy as np\n'), ((19905, 19920), 'numpy.squeeze', 'np.squeeze', (['res'], {}), '(res)\n', (19915, 19920), True, 'import numpy as np\n'), ((20463, 20478), 'numpy.squeeze', 'np.squeeze', (['res'], {}), '(res)\n', (20473, 20478), True, 'import numpy as np\n'), ((21558, 21573), 'numpy.squeeze', 'np.squeeze', (['res'], {}), '(res)\n', (21568, 21573), True, 'import numpy as np\n'), ((1774, 1842), 'numpy.loadtxt', 'np.loadtxt', (["(local_path + '/data/SKA1_config_low0.csv')"], {'delimiter': '""","""'}), "(local_path + '/data/SKA1_config_low0.csv', delimiter=',')\n", (1784, 1842), True, 'import numpy as np\n'), ((1891, 1959), 'numpy.loadtxt', 'np.loadtxt', (["(local_path + '/data/SKA1_config_low1.csv')"], {'delimiter': '""","""'}), "(local_path + '/data/SKA1_config_low1.csv', delimiter=',')\n", (1901, 1959), True, 'import numpy as np\n'), ((2008, 2086), 'numpy.loadtxt', 'np.loadtxt', (["(local_path + '/data/SKA1_config_low2_6clusters.csv')"], {'delimiter': '""","""'}), "(local_path + '/data/SKA1_config_low2_6clusters.csv', delimiter=',')\n", (2018, 2086), True, 'import numpy as np\n'), ((2460, 2477), 'numpy.array', 'np.array', (['new_arr'], {}), '(new_arr)\n', (2468, 2477), True, 'import numpy as np\n'), ((2581, 2651), 'numpy.concatenate', 'np.concatenate', (["(SKA_conf['low0'], SKA_conf['low1'], SKA_conf['low2'])"], {}), "((SKA_conf['low0'], SKA_conf['low1'], SKA_conf['low2']))\n", (2595, 2651), True, 'import numpy as np\n'), ((4809, 4825), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (4819, 4825), True, 'import numpy as np\n'), ((5362, 5388), 'numpy.log10', 'log10', (['ct._nu_min_ska_low_'], {}), '(ct._nu_min_ska_low_)\n', (5367, 5388), False, 'from numpy import pi, sqrt, exp, power, log, log10\n'), ((5390, 5416), 'numpy.log10', 'log10', (['ct._nu_max_ska_low_'], {}), '(ct._nu_max_ska_low_)\n', (5395, 5416), False, 'from numpy import pi, sqrt, exp, power, log, log10\n'), ((5491, 5517), 'numpy.log10', 'log10', (['ct._nu_min_ska_mid_'], {}), '(ct._nu_min_ska_mid_)\n', (5496, 5517), False, 'from numpy import pi, sqrt, exp, power, log, log10\n'), ((5519, 5545), 'numpy.log10', 'log10', (['ct._nu_max_ska_mid_'], {}), '(ct._nu_max_ska_mid_)\n', (5524, 5545), False, 'from numpy import pi, sqrt, exp, power, log, log10\n'), ((9673, 9707), 'constants.angle_to_solid_angle', 'ct.angle_to_solid_angle', (['theta_res'], {}), '(theta_res)\n', (9696, 9707), True, 'import constants as ct\n'), ((16466, 16530), 'numpy.sqrt', 'np.sqrt', (['((x_arr[i] - x_arr[j]) ** 2 + (y_arr[i] - y_arr[j]) ** 2)'], {}), '((x_arr[i] - x_arr[j]) ** 2 + (y_arr[i] - y_arr[j]) ** 2)\n', (16473, 16530), True, 'import numpy as np\n'), ((2835, 2906), 'numpy.loadtxt', 'np.loadtxt', (["(local_path + '/data/SKA1_config_mid0_MK.csv')"], {'delimiter': '""","""'}), "(local_path + '/data/SKA1_config_mid0_MK.csv', delimiter=',')\n", (2845, 2906), True, 'import numpy as np\n'), ((2959, 3031), 'numpy.loadtxt', 'np.loadtxt', (["(local_path + '/data/SKA1_config_mid0_SKA.csv')"], {'delimiter': '""","""'}), "(local_path + '/data/SKA1_config_mid0_SKA.csv', delimiter=',')\n", (2969, 3031), True, 'import numpy as np\n'), ((3088, 3159), 'numpy.loadtxt', 'np.loadtxt', (["(local_path + '/data/SKA1_config_mid1_MK.csv')"], {'delimiter': '""","""'}), "(local_path + '/data/SKA1_config_mid1_MK.csv', delimiter=',')\n", (3098, 3159), True, 'import numpy as np\n'), ((3212, 3284), 'numpy.loadtxt', 'np.loadtxt', (["(local_path + '/data/SKA1_config_mid1_SKA.csv')"], {'delimiter': '""","""'}), "(local_path + '/data/SKA1_config_mid1_SKA.csv', delimiter=',')\n", (3222, 3284), True, 'import numpy as np\n'), ((3337, 3409), 'numpy.loadtxt', 'np.loadtxt', (["(local_path + '/data/SKA1_config_mid2_SKA.csv')"], {'delimiter': '""","""'}), "(local_path + '/data/SKA1_config_mid2_SKA.csv', delimiter=',')\n", (3347, 3409), True, 'import numpy as np\n'), ((3491, 3630), 'numpy.concatenate', 'np.concatenate', (["(SKA_conf['mid0_MeerKAT'], SKA_conf['mid0_SKA'], SKA_conf['mid1_MeerKAT'],\n SKA_conf['mid1_SKA'], SKA_conf['mid2_SKA'])"], {}), "((SKA_conf['mid0_MeerKAT'], SKA_conf['mid0_SKA'], SKA_conf[\n 'mid1_MeerKAT'], SKA_conf['mid1_SKA'], SKA_conf['mid2_SKA']))\n", (3505, 3630), True, 'import numpy as np\n'), ((4159, 4183), 'numpy.logspace', 'np.logspace', (['(1)', '(5)', '(20000)'], {}), '(1, 5, 20000)\n', (4170, 4183), True, 'import numpy as np\n'), ((5092, 5108), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (5102, 5108), True, 'import numpy as np\n'), ((9142, 9185), 'numpy.heaviside', 'np.heaviside', (['(nu - ct._nu_min_ska_low_)', '(1.0)'], {}), '(nu - ct._nu_min_ska_low_, 1.0)\n', (9154, 9185), True, 'import numpy as np\n'), ((9201, 9244), 'numpy.heaviside', 'np.heaviside', (['(ct._nu_max_ska_low_ - nu)', '(1.0)'], {}), '(ct._nu_max_ska_low_ - nu, 1.0)\n', (9213, 9244), True, 'import numpy as np\n'), ((9391, 9412), 'particle.lambda_from_nu', 'pt.lambda_from_nu', (['nu'], {}), '(nu)\n', (9408, 9412), True, 'import particle as pt\n'), ((10853, 10887), 'constants.angle_to_solid_angle', 'ct.angle_to_solid_angle', (['theta_res'], {}), '(theta_res)\n', (10876, 10887), True, 'import constants as ct\n'), ((10029, 10072), 'numpy.heaviside', 'np.heaviside', (['(nu - ct._nu_min_ska_low_)', '(1.0)'], {}), '(nu - ct._nu_min_ska_low_, 1.0)\n', (10041, 10072), True, 'import numpy as np\n'), ((10088, 10131), 'numpy.heaviside', 'np.heaviside', (['(ct._nu_max_ska_low_ - nu)', '(1.0)'], {}), '(ct._nu_max_ska_low_ - nu, 1.0)\n', (10100, 10131), True, 'import numpy as np\n'), ((10298, 10319), 'particle.lambda_from_nu', 'pt.lambda_from_nu', (['nu'], {}), '(nu)\n', (10315, 10319), True, 'import particle as pt\n'), ((12498, 12532), 'constants.angle_to_solid_angle', 'ct.angle_to_solid_angle', (['theta_res'], {}), '(theta_res)\n', (12521, 12532), True, 'import constants as ct\n'), ((11905, 11948), 'numpy.heaviside', 'np.heaviside', (['(nu - ct._nu_min_ska_mid_)', '(0.0)'], {}), '(nu - ct._nu_min_ska_mid_, 0.0)\n', (11917, 11948), True, 'import numpy as np\n'), ((11964, 12007), 'numpy.heaviside', 'np.heaviside', (['(ct._nu_max_ska_mid_ - nu)', '(1.0)'], {}), '(ct._nu_max_ska_mid_ - nu, 1.0)\n', (11976, 12007), True, 'import numpy as np\n'), ((12172, 12193), 'particle.lambda_from_nu', 'pt.lambda_from_nu', (['nu'], {}), '(nu)\n', (12189, 12193), True, 'import particle as pt\n'), ((13708, 13742), 'constants.angle_to_solid_angle', 'ct.angle_to_solid_angle', (['theta_res'], {}), '(theta_res)\n', (13731, 13742), True, 'import constants as ct\n'), ((12888, 12931), 'numpy.heaviside', 'np.heaviside', (['(nu - ct._nu_min_ska_mid_)', '(0.0)'], {}), '(nu - ct._nu_min_ska_mid_, 0.0)\n', (12900, 12931), True, 'import numpy as np\n'), ((12947, 12990), 'numpy.heaviside', 'np.heaviside', (['(ct._nu_max_ska_mid_ - nu)', '(1.0)'], {}), '(ct._nu_max_ska_mid_ - nu, 1.0)\n', (12959, 12990), True, 'import numpy as np\n'), ((13175, 13196), 'particle.lambda_from_nu', 'pt.lambda_from_nu', (['nu'], {}), '(nu)\n', (13192, 13196), True, 'import particle as pt\n'), ((17084, 17102), 'numpy.abs', 'np.abs', (['(nui - 1.65)'], {}), '(nui - 1.65)\n', (17090, 17102), True, 'import numpy as np\n')] |
import os
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from src.data_management.New_DataSplitter_leave_k_out import New_DataSplitter_leave_k_out
from src.data_management.RecSys2019Reader import RecSys2019Reader
from src.data_management.data_reader import get_ICM_train, get_UCM_train, get_ignore_users_age
from src.feature.demographics_content import get_user_demographic
from src.utils.general_utility_functions import get_split_seed
# SETTINGS
AGE = 4
KEEP_OUT = 1
SAVE_ON_FILE = True
N_MOST_LIKED_ITEMS_TO_SHOW = 10
def write_file_and_print(s: str, file):
print(s)
if SAVE_ON_FILE:
file.write(s)
file.write("\n")
file.flush()
def show_fig(name):
fig = plt.gcf()
fig.show()
if SAVE_ON_FILE:
new_file = output_folder_path + name + ".png"
fig.savefig(new_file)
if __name__ == '__main__':
# Path creation
if SAVE_ON_FILE:
version_path = "../../report/graphics/age_exploration/{}/".format(AGE)
now = datetime.now().strftime('%b%d_%H-%M-%S')
output_folder_path = version_path + now + "/"
output_file_name = output_folder_path + "results.txt"
try:
if not os.path.exists(output_folder_path):
os.mkdir(output_folder_path)
except FileNotFoundError as e:
os.makedirs(output_folder_path)
f = open(output_file_name, "w")
else:
f = None
# Data loading
root_data_path = "../../data/"
data_reader = RecSys2019Reader(root_data_path)
data_reader = New_DataSplitter_leave_k_out(data_reader, k_out_value=1, use_validation_set=False,
force_new_split=True, seed=get_split_seed())
data_reader.load_data()
URM_train, URM_test = data_reader.get_holdout_split()
ICM_all = get_ICM_train(data_reader)
UCM_all = get_UCM_train(data_reader)
# Finding users with this age
UCM_age = data_reader.get_UCM_from_name("UCM_age")
age_feature_to_id_mapper = data_reader.dataReader_object.get_UCM_feature_to_index_mapper_from_name("UCM_age")
age_demographic = get_user_demographic(UCM_age, age_feature_to_id_mapper, binned=True)
users_oth_age = np.unique(get_ignore_users_age(age_demographic, [AGE]))
total_users = np.arange(URM_train.shape[0])
users_age = np.in1d(total_users, users_oth_age, invert=True)
users_age = total_users[users_age]
write_file_and_print("There are {} users of age {}".format(users_age.size, AGE), f)
URM_train_age = URM_train[users_age].copy()
# What are the number of interactions that we have for these users? What is their distribution?
n_tot_interactions = URM_train_age.sum()
n_avg_interactions = URM_train_age.sum(axis=1).mean()
n_std_interactions = URM_train_age.sum(axis=1).std()
write_file_and_print("There are {} total interactions for users of this age.\n"
"Avg number of interactions = {} \n"
"Std number of interactions = {} \n\n".format(n_tot_interactions, n_avg_interactions,
n_std_interactions), f)
interactions_per_user = np.squeeze(np.asarray(URM_train_age.sum(axis=1)))
interactions_per_user = np.sort(interactions_per_user)
plt.title("Number of interactions of users of age {}".format(AGE))
plt.xlabel('User index')
plt.ylabel('Number of interactions')
plt.plot(interactions_per_user)
show_fig("activity")
# What is the item popularity among them? Do they like popular items?
items_liked_age = np.squeeze(np.asarray(URM_train_age.sum(axis=0)))
items_liked_age = np.sort(items_liked_age)
items_liked_all = np.sort(np.squeeze(np.asarray(URM_train.sum(axis=0))))
plt.title("Item popularity")
plt.xlabel("Item index")
plt.ylabel("Number of interactions")
plt.plot(items_liked_age, label="Age {}".format(AGE))
plt.plot(items_liked_all, label="All")
plt.legend()
show_fig("item_popularity")
# What is the most liked item? How many interactions for it?
item_indices = items_liked_age.argsort()[-N_MOST_LIKED_ITEMS_TO_SHOW:][::-1]
for i in range(0, N_MOST_LIKED_ITEMS_TO_SHOW):
n_most_liked = items_liked_age[item_indices[i]]
write_file_and_print("The {}-th most liked item is {} with {} interactions. \n"
"Thus, it it is liked by {}% users. \n".format(i + 1, item_indices[i], n_most_liked,
(n_most_liked / users_age.size) * 100), f)
write_file_and_print("\n", f)
if SAVE_ON_FILE:
f.close()
| [
"matplotlib.pyplot.ylabel",
"src.utils.general_utility_functions.get_split_seed",
"numpy.arange",
"os.path.exists",
"src.data_management.data_reader.get_ICM_train",
"numpy.sort",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.mkdir",
"src.feature.demographics_content.get_user_demographi... | [((733, 742), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (740, 742), True, 'import matplotlib.pyplot as plt\n'), ((1520, 1552), 'src.data_management.RecSys2019Reader.RecSys2019Reader', 'RecSys2019Reader', (['root_data_path'], {}), '(root_data_path)\n', (1536, 1552), False, 'from src.data_management.RecSys2019Reader import RecSys2019Reader\n'), ((1846, 1872), 'src.data_management.data_reader.get_ICM_train', 'get_ICM_train', (['data_reader'], {}), '(data_reader)\n', (1859, 1872), False, 'from src.data_management.data_reader import get_ICM_train, get_UCM_train, get_ignore_users_age\n'), ((1887, 1913), 'src.data_management.data_reader.get_UCM_train', 'get_UCM_train', (['data_reader'], {}), '(data_reader)\n', (1900, 1913), False, 'from src.data_management.data_reader import get_ICM_train, get_UCM_train, get_ignore_users_age\n'), ((2140, 2208), 'src.feature.demographics_content.get_user_demographic', 'get_user_demographic', (['UCM_age', 'age_feature_to_id_mapper'], {'binned': '(True)'}), '(UCM_age, age_feature_to_id_mapper, binned=True)\n', (2160, 2208), False, 'from src.feature.demographics_content import get_user_demographic\n'), ((2303, 2332), 'numpy.arange', 'np.arange', (['URM_train.shape[0]'], {}), '(URM_train.shape[0])\n', (2312, 2332), True, 'import numpy as np\n'), ((2349, 2397), 'numpy.in1d', 'np.in1d', (['total_users', 'users_oth_age'], {'invert': '(True)'}), '(total_users, users_oth_age, invert=True)\n', (2356, 2397), True, 'import numpy as np\n'), ((3295, 3325), 'numpy.sort', 'np.sort', (['interactions_per_user'], {}), '(interactions_per_user)\n', (3302, 3325), True, 'import numpy as np\n'), ((3402, 3426), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""User index"""'], {}), "('User index')\n", (3412, 3426), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3467), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of interactions"""'], {}), "('Number of interactions')\n", (3441, 3467), True, 'import matplotlib.pyplot as plt\n'), ((3472, 3503), 'matplotlib.pyplot.plot', 'plt.plot', (['interactions_per_user'], {}), '(interactions_per_user)\n', (3480, 3503), True, 'import matplotlib.pyplot as plt\n'), ((3698, 3722), 'numpy.sort', 'np.sort', (['items_liked_age'], {}), '(items_liked_age)\n', (3705, 3722), True, 'import numpy as np\n'), ((3804, 3832), 'matplotlib.pyplot.title', 'plt.title', (['"""Item popularity"""'], {}), "('Item popularity')\n", (3813, 3832), True, 'import matplotlib.pyplot as plt\n'), ((3837, 3861), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Item index"""'], {}), "('Item index')\n", (3847, 3861), True, 'import matplotlib.pyplot as plt\n'), ((3866, 3902), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of interactions"""'], {}), "('Number of interactions')\n", (3876, 3902), True, 'import matplotlib.pyplot as plt\n'), ((3965, 4003), 'matplotlib.pyplot.plot', 'plt.plot', (['items_liked_all'], {'label': '"""All"""'}), "(items_liked_all, label='All')\n", (3973, 4003), True, 'import matplotlib.pyplot as plt\n'), ((4008, 4020), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4018, 4020), True, 'import matplotlib.pyplot as plt\n'), ((2239, 2283), 'src.data_management.data_reader.get_ignore_users_age', 'get_ignore_users_age', (['age_demographic', '[AGE]'], {}), '(age_demographic, [AGE])\n', (2259, 2283), False, 'from src.data_management.data_reader import get_ICM_train, get_UCM_train, get_ignore_users_age\n'), ((1728, 1744), 'src.utils.general_utility_functions.get_split_seed', 'get_split_seed', ([], {}), '()\n', (1742, 1744), False, 'from src.utils.general_utility_functions import get_split_seed\n'), ((1026, 1040), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1038, 1040), False, 'from datetime import datetime\n'), ((1215, 1249), 'os.path.exists', 'os.path.exists', (['output_folder_path'], {}), '(output_folder_path)\n', (1229, 1249), False, 'import os\n'), ((1267, 1295), 'os.mkdir', 'os.mkdir', (['output_folder_path'], {}), '(output_folder_path)\n', (1275, 1295), False, 'import os\n'), ((1347, 1378), 'os.makedirs', 'os.makedirs', (['output_folder_path'], {}), '(output_folder_path)\n', (1358, 1378), False, 'import os\n')] |
import os
from pathlib import Path
from time import time
from tqdm import tqdm
from argparse import ArgumentParser
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from datasets import GloveDataset
from glove import GloveModel, weight_func, wmse_loss
def run_train(ds_path: str, outdir:str='output', logdir: str='logs', epochs: int=100, embed_dim: int=300, n_words: int=-1, batch_size: int=2048):
N_WORDS = n_words
EMBED_DIM = embed_dim
N_EPOCHS = epochs
BATCH_SIZE = batch_size
X_MAX = 100
ALPHA = 0.75
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with open(ds_path) as f:
dataset = GloveDataset(f.read(), N_WORDS)
glove = GloveModel(dataset._vocab_len, EMBED_DIM)
glove = glove.train().to(device)
optimizer = optim.Adagrad(glove.parameters(), lr=0.05)
st = time()
n_batches = int(len(dataset._xij) / BATCH_SIZE)
loss_values = []
os.makedirs(str(Path(outdir)), exist_ok=True)
os.makedirs(str(Path(logdir)), exist_ok=True)
history = SummaryWriter(log_dir=logdir)
with tqdm(total=N_EPOCHS) as epoch_iter:
for epoch in range(0, N_EPOCHS):
batch_i = 0
with tqdm(total=n_batches, desc='Training GloVe', leave=None) as batch_iter:
for x_ij, i_idx, j_idx in dataset.get_batches(BATCH_SIZE):
x_ij, i_idx, j_idx = x_ij.to(device), i_idx.to(device), j_idx.to(device)
batch_i += 1
optimizer.zero_grad()
outputs = glove(i_idx, j_idx)
weights_x = weight_func(x_ij, X_MAX, ALPHA, device)
loss = wmse_loss(weights_x, outputs, torch.log(x_ij), device)
loss.backward()
optimizer.step()
loss_values.append(loss.item())
batch_log = 'Epoch: {}/{} Batch: {}/{} Loss: {:.3f} eTime: {:.1f}m'.format(
epoch + 1, N_EPOCHS, batch_i, n_batches, np.mean(loss_values[-20:]), (time() - st) / 60.0)
batch_iter.update(1)
batch_iter.set_description(batch_log)
epoch_log = 'Epoch: {} Loss: {:.3f} eTime: {:.1f}m'.format(epoch + 1, np.mean(loss_values[-20:]), (time() - st) / 60.0)
epoch_iter.update(1)
epoch_iter.set_description(epoch_log)
history.add_scalar('train-loss', np.mean(loss.item()), epoch + 1)
torch.save(glove.state_dict(), str(Path(outdir) / 'glove.pth'))
torch.save({
'epoch': epoch,
'model_state_dict': glove.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': np.mean(loss.item())
}, str(Path(outdir) / 'checkpoint.pth'))
def build_parser():
parser = ArgumentParser()
parser.add_argument('--ds-path', type=str, help='dataset path.')
parser.add_argument('--outdir', type=str, default='outputs', help='output directory.')
parser.add_argument('--logdir', type=str, default='logs', help='log directory.')
parser.add_argument('--epochs', type=int, default=100, help='epochs. default is 100.')
parser.add_argument('--batch-size', type=int, default=2048, help='batch size. default is 2048.')
parser.add_argument('--embed-dim', type=int, default=300, help='embedding dim. default is 300.')
parser.add_argument('--n-words', type=int, default=-1, help='numbers of words to use for training. default=-1.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = build_parser()
run_train(args.ds_path, args.outdir, args.logdir, args.epochs, args.embed_dim, args.n_words, args.batch_size)
| [
"torch.utils.tensorboard.SummaryWriter",
"numpy.mean",
"torch.log",
"argparse.ArgumentParser",
"pathlib.Path",
"tqdm.tqdm",
"glove.weight_func",
"torch.cuda.is_available",
"glove.GloveModel",
"time.time"
] | [((809, 850), 'glove.GloveModel', 'GloveModel', (['dataset._vocab_len', 'EMBED_DIM'], {}), '(dataset._vocab_len, EMBED_DIM)\n', (819, 850), False, 'from glove import GloveModel, weight_func, wmse_loss\n'), ((961, 967), 'time.time', 'time', ([], {}), '()\n', (965, 967), False, 'from time import time\n'), ((1165, 1194), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'logdir'}), '(log_dir=logdir)\n', (1178, 1194), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2998, 3014), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (3012, 3014), False, 'from argparse import ArgumentParser\n'), ((1209, 1229), 'tqdm.tqdm', 'tqdm', ([], {'total': 'N_EPOCHS'}), '(total=N_EPOCHS)\n', (1213, 1229), False, 'from tqdm import tqdm\n'), ((666, 691), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (689, 691), False, 'import torch\n'), ((1066, 1078), 'pathlib.Path', 'Path', (['outdir'], {}), '(outdir)\n', (1070, 1078), False, 'from pathlib import Path\n'), ((1116, 1128), 'pathlib.Path', 'Path', (['logdir'], {}), '(logdir)\n', (1120, 1128), False, 'from pathlib import Path\n'), ((1328, 1384), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_batches', 'desc': '"""Training GloVe"""', 'leave': 'None'}), "(total=n_batches, desc='Training GloVe', leave=None)\n", (1332, 1384), False, 'from tqdm import tqdm\n'), ((2379, 2405), 'numpy.mean', 'np.mean', (['loss_values[-20:]'], {}), '(loss_values[-20:])\n', (2386, 2405), True, 'import numpy as np\n'), ((1727, 1766), 'glove.weight_func', 'weight_func', (['x_ij', 'X_MAX', 'ALPHA', 'device'], {}), '(x_ij, X_MAX, ALPHA, device)\n', (1738, 1766), False, 'from glove import GloveModel, weight_func, wmse_loss\n'), ((1824, 1839), 'torch.log', 'torch.log', (['x_ij'], {}), '(x_ij)\n', (1833, 1839), False, 'import torch\n'), ((2145, 2171), 'numpy.mean', 'np.mean', (['loss_values[-20:]'], {}), '(loss_values[-20:])\n', (2152, 2171), True, 'import numpy as np\n'), ((2408, 2414), 'time.time', 'time', ([], {}), '()\n', (2412, 2414), False, 'from time import time\n'), ((2639, 2651), 'pathlib.Path', 'Path', (['outdir'], {}), '(outdir)\n', (2643, 2651), False, 'from pathlib import Path\n'), ((2909, 2921), 'pathlib.Path', 'Path', (['outdir'], {}), '(outdir)\n', (2913, 2921), False, 'from pathlib import Path\n'), ((2174, 2180), 'time.time', 'time', ([], {}), '()\n', (2178, 2180), False, 'from time import time\n')] |
# 实现PCA分析和法向量计算,并加载数据集中的文件进行验证
import os
import time
import numpy as np
from pyntcloud import PyntCloud
import open3d as o3d
def PCA(data: PyntCloud.points, correlation: bool=False, sort: bool=True) -> np.array:
""" Calculate PCA
Parameters
----------
data(PyntCloud.points): 点云,NX3的矩阵
correlation(bool): 区分np的cov和corrcoef,不输入时默认为False
sort(bool): 特征值排序,排序是为了其他功能方便使用,不输入时默认为True
Returns
----------
eigenvalues(np.array): 特征值
eigenvectors(np.array): 特征向量
"""
# 作业1
# 屏蔽开始
# Normalize X by the center
X_ = data - np.mean(data, axis=0)
# Get the H matrix (3x3)
H = np.dot(X_.T, X_)
# Compute SVD of H (Eigenvector of X = Eigenvector of H)
# Get U, Sigma, V* (M = U Sigma V*)
# V.columns are eigenvectors of M*M
# U.columns are eigenvectors of MM*
# Sigma.diagonal elements are non-negative roots of the eigenvalues of MM* and M*M
eigenvectors, eigenvalues, _ = np.linalg.svd(H)
# 屏蔽结束
if sort:
sort = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[sort]
eigenvectors = eigenvectors[:, sort]
return eigenvalues, eigenvectors
def main():
# 指定点云路径
path = '../../../modelnet40_normal_resampled/'
shape_name_list = np.loadtxt(os.path.join(path, 'modelnet40_shape_names.txt') if os.path.isdir(path) else None,dtype=str)
for item in shape_name_list:
# Import model
filename = os.path.join(path, item, item+'_0001.txt')
pointcloud = np.loadtxt(filename, delimiter=',')[:, 0:3]
print('total points number is:', pointcloud.shape[0])
# Convert to PyntCloud and Open3D formats
point_cloud_o3d = o3d.geometry.PointCloud()
point_cloud_o3d.points = o3d.utility.Vector3dVector(pointcloud)
# point_cloud_pynt = PyntCloud.from_instance("open3d", point_cloud_o3d)
# points = point_cloud_pynt.points
# 用PCA分析点云主方向
N = pointcloud.shape[0]
t0 = time.time()
w, v = PCA(pointcloud)
t1 = time.time()
print('###### PCA time taken (per 1k points): ', round((t1 - t0)/N*1000, 5))
point_cloud_vector = v[:, 2] #点云主方向对应的向量
print('the main orientation of this pointcloud is: ', point_cloud_vector)
principle_axis = np.concatenate((np.array([[0.,0.,0.]]), v.T))
print('Principal Axis: ', principle_axis)
# Visualise the PCA Axis
axis = o3d.geometry.TriangleMesh.create_coordinate_frame().rotate(v, center=(0,0,0))
# Visualise the PCA Projection
pr_data = pointcloud - np.dot(pointcloud, v[:,2][:,np.newaxis])*v[:, 2]
pr_data = 1*v[:, 2] + pr_data
pc_view = o3d.geometry.PointCloud(points=o3d.utility.Vector3dVector(pointcloud))
pc_view.colors = o3d.utility.Vector3dVector([[0,0,0]])
pr_view = o3d.geometry.PointCloud(points=o3d.utility.Vector3dVector(pr_data))
# o3d.visualization.draw_geometries([pc_view, axis, pr_view])
# 作业2
# 屏蔽开始
# 循环计算每个点的法向量
# 由于最近邻搜索是第二章的内容,所以此处允许直接调用open3d中的函数
# Feed the pointcloud into a kdtree structure
t0 = time.time()
pcd_tree = o3d.geometry.KDTreeFlann(point_cloud_o3d)
t1 = time.time()
print('###### KDTreeFlann time taken (per 1k points): ', round((t1 - t0)/N*1000, 5))
normals = []
t0 = time.time()
for index in range(N):
# For each point, search for its nearest k neighbors
[_, idx, _] = pcd_tree.search_knn_vector_3d(pc_view.points[index], 21)
neighbor_pc = np.asarray(pc_view.points)[idx]
# Compute the eigenvectors for its neighbors
_, v = PCA(neighbor_pc)
normals.append(v[:, 2])
t1 = time.time()
print('###### My Normal Estimation time taken (per 1k points): ', round((t1 - t0)/N*1000, 5))
t0 = time.time()
point_cloud_o3d.estimate_normals()
t1 = time.time()
print('###### Open3D Normal Estimation time taken (per 1k points): ', round((t1 - t0)/N*1000, 5))
# 屏蔽结束
# 此处把法向量存放在了normals中
o3d_normals = np.asarray(point_cloud_o3d.normals, dtype=np.float64)
normals = np.array(normals, dtype=np.float64)
point_cloud_o3d.normals = o3d.utility.Vector3dVector(normals)
# build pca line set:
points = np.vstack((pointcloud, pointcloud + 0.03*normals))
lines = [[i, i+N] for i in range(N)]
colors = np.zeros((N, 3)).tolist()
surface_normals_my = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(points),
lines=o3d.utility.Vector2iVector(lines),
)
surface_normals_my.colors = o3d.utility.Vector3dVector(colors)
points = np.vstack((pointcloud, pointcloud + 0.03*o3d_normals))
lines = [[i, i+N] for i in range(N)]
colors = np.full((N, 3), 0.5).tolist()
surface_normals_o3d = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(points),
lines=o3d.utility.Vector2iVector(lines),
)
surface_normals_o3d.colors = o3d.utility.Vector3dVector(colors)
o3d.visualization.draw_geometries([pc_view, axis, pr_view, surface_normals_my, surface_normals_o3d]) # point_cloud_o3d,
if __name__ == '__main__':
main()
| [
"numpy.mean",
"numpy.full",
"open3d.geometry.KDTreeFlann",
"open3d.utility.Vector2iVector",
"os.path.join",
"numpy.asarray",
"numpy.array",
"numpy.dot",
"os.path.isdir",
"open3d.visualization.draw_geometries",
"numpy.vstack",
"open3d.geometry.PointCloud",
"open3d.geometry.TriangleMesh.create... | [((671, 687), 'numpy.dot', 'np.dot', (['X_.T', 'X_'], {}), '(X_.T, X_)\n', (677, 687), True, 'import numpy as np\n'), ((991, 1007), 'numpy.linalg.svd', 'np.linalg.svd', (['H'], {}), '(H)\n', (1004, 1007), True, 'import numpy as np\n'), ((612, 633), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (619, 633), True, 'import numpy as np\n'), ((1479, 1523), 'os.path.join', 'os.path.join', (['path', 'item', "(item + '_0001.txt')"], {}), "(path, item, item + '_0001.txt')\n", (1491, 1523), False, 'import os\n'), ((1734, 1759), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (1757, 1759), True, 'import open3d as o3d\n'), ((1793, 1831), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pointcloud'], {}), '(pointcloud)\n', (1819, 1831), True, 'import open3d as o3d\n'), ((2024, 2035), 'time.time', 'time.time', ([], {}), '()\n', (2033, 2035), False, 'import time\n'), ((2080, 2091), 'time.time', 'time.time', ([], {}), '()\n', (2089, 2091), False, 'import time\n'), ((2844, 2883), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (2870, 2883), True, 'import open3d as o3d\n'), ((3211, 3222), 'time.time', 'time.time', ([], {}), '()\n', (3220, 3222), False, 'import time\n'), ((3242, 3283), 'open3d.geometry.KDTreeFlann', 'o3d.geometry.KDTreeFlann', (['point_cloud_o3d'], {}), '(point_cloud_o3d)\n', (3266, 3283), True, 'import open3d as o3d\n'), ((3297, 3308), 'time.time', 'time.time', ([], {}), '()\n', (3306, 3308), False, 'import time\n'), ((3445, 3456), 'time.time', 'time.time', ([], {}), '()\n', (3454, 3456), False, 'import time\n'), ((3837, 3848), 'time.time', 'time.time', ([], {}), '()\n', (3846, 3848), False, 'import time\n'), ((3965, 3976), 'time.time', 'time.time', ([], {}), '()\n', (3974, 3976), False, 'import time\n'), ((4033, 4044), 'time.time', 'time.time', ([], {}), '()\n', (4042, 4044), False, 'import time\n'), ((4219, 4272), 'numpy.asarray', 'np.asarray', (['point_cloud_o3d.normals'], {'dtype': 'np.float64'}), '(point_cloud_o3d.normals, dtype=np.float64)\n', (4229, 4272), True, 'import numpy as np\n'), ((4291, 4326), 'numpy.array', 'np.array', (['normals'], {'dtype': 'np.float64'}), '(normals, dtype=np.float64)\n', (4299, 4326), True, 'import numpy as np\n'), ((4361, 4396), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['normals'], {}), '(normals)\n', (4387, 4396), True, 'import open3d as o3d\n'), ((4445, 4497), 'numpy.vstack', 'np.vstack', (['(pointcloud, pointcloud + 0.03 * normals)'], {}), '((pointcloud, pointcloud + 0.03 * normals))\n', (4454, 4497), True, 'import numpy as np\n'), ((4789, 4823), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (4815, 4823), True, 'import open3d as o3d\n'), ((4842, 4898), 'numpy.vstack', 'np.vstack', (['(pointcloud, pointcloud + 0.03 * o3d_normals)'], {}), '((pointcloud, pointcloud + 0.03 * o3d_normals))\n', (4851, 4898), True, 'import numpy as np\n'), ((5196, 5230), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (5222, 5230), True, 'import open3d as o3d\n'), ((5240, 5344), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[pc_view, axis, pr_view, surface_normals_my, surface_normals_o3d]'], {}), '([pc_view, axis, pr_view,\n surface_normals_my, surface_normals_o3d])\n', (5273, 5344), True, 'import open3d as o3d\n'), ((1362, 1381), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1375, 1381), False, 'import os\n'), ((1310, 1358), 'os.path.join', 'os.path.join', (['path', '"""modelnet40_shape_names.txt"""'], {}), "(path, 'modelnet40_shape_names.txt')\n", (1322, 1358), False, 'import os\n'), ((1543, 1578), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (1553, 1578), True, 'import numpy as np\n'), ((2349, 2376), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0]])\n', (2357, 2376), True, 'import numpy as np\n'), ((2486, 2537), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {}), '()\n', (2535, 2537), True, 'import open3d as o3d\n'), ((2634, 2676), 'numpy.dot', 'np.dot', (['pointcloud', 'v[:, 2][:, np.newaxis]'], {}), '(pointcloud, v[:, 2][:, np.newaxis])\n', (2640, 2676), True, 'import numpy as np\n'), ((2779, 2817), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pointcloud'], {}), '(pointcloud)\n', (2805, 2817), True, 'import open3d as o3d\n'), ((2931, 2966), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pr_data'], {}), '(pr_data)\n', (2957, 2966), True, 'import open3d as o3d\n'), ((3662, 3688), 'numpy.asarray', 'np.asarray', (['pc_view.points'], {}), '(pc_view.points)\n', (3672, 3688), True, 'import numpy as np\n'), ((4558, 4574), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {}), '((N, 3))\n', (4566, 4574), True, 'import numpy as np\n'), ((4654, 4688), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (4680, 4688), True, 'import open3d as o3d\n'), ((4708, 4741), 'open3d.utility.Vector2iVector', 'o3d.utility.Vector2iVector', (['lines'], {}), '(lines)\n', (4734, 4741), True, 'import open3d as o3d\n'), ((4959, 4979), 'numpy.full', 'np.full', (['(N, 3)', '(0.5)'], {}), '((N, 3), 0.5)\n', (4966, 4979), True, 'import numpy as np\n'), ((5060, 5094), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (5086, 5094), True, 'import open3d as o3d\n'), ((5114, 5147), 'open3d.utility.Vector2iVector', 'o3d.utility.Vector2iVector', (['lines'], {}), '(lines)\n', (5140, 5147), True, 'import open3d as o3d\n')] |
from __future__ import absolute_import
from datashader.utils import ngjit
from numba import vectorize, int64
import numpy as np
import os
"""
Initially based on https://github.com/galtay/hilbert_curve, but specialized
for 2 dimensions with numba acceleration
"""
NUMBA_DISABLE_JIT = os.environ.get('NUMBA_DISABLE_JIT', 0)
@ngjit
def _int_2_binary(n, width):
"""Return a binary byte array representation of `n` zero padded to `width`
bits."""
res = np.zeros(width, dtype=np.uint8)
i = 0
for i in range(width):
res[width - i - 1] = n % 2
n = n >> 1
return res
@ngjit
def _binary_2_int(bin_vec):
"""Convert a binary byte array to an integer"""
res = 0
next_val = 1
width = len(bin_vec)
for i in range(width):
res += next_val*bin_vec[width - i - 1]
next_val <<= 1
return res
@ngjit
def _hilbert_integer_to_transpose(p, h):
"""Store a hilbert integer (`h`) as its transpose (`x`).
Args:
p (int): iterations to use in the hilbert curve
h (int): integer distance along hilbert curve
Returns:
x (list): transpose of h
(n components with values between 0 and 2**p-1)
"""
n = 2
h_bits = _int_2_binary(h, p * n)
x = [_binary_2_int(h_bits[i::n]) for i in range(n)]
return x
@ngjit
def _transpose_to_hilbert_integer(p, x, y):
"""Restore a hilbert integer (`h`) from its transpose (`x`).
Args:
p (int): iterations to use in the hilbert curve
x (list): transpose of h
(n components with values between 0 and 2**p-1)
Returns:
h (int): integer distance along hilbert curve
"""
bin1 = _int_2_binary(x, p)
bin2 = _int_2_binary(y, p)
concat = np.zeros(2*p, dtype=np.uint8)
for i in range(p):
concat[2*i] = bin1[i]
concat[2*i+1] = bin2[i]
h = _binary_2_int(concat)
return h
@ngjit
def coordinates_from_distance(p, h):
"""Return the coordinates for a given hilbert distance.
Args:
p (int): iterations to use in the hilbert curve
h (int): integer distance along hilbert curve
Returns:
x (list): transpose of h
(n components with values between 0 and 2**p-1)
"""
n = 2
x = _hilbert_integer_to_transpose(p, h)
Z = 2 << (p-1)
# Gray decode by H ^ (H/2)
t = x[n-1] >> 1
for i in range(n-1, 0, -1):
x[i] ^= x[i-1]
x[0] ^= t
# Undo excess work
Q = 2
while Q != Z:
P = Q - 1
for i in range(n-1, -1, -1):
if x[i] & Q:
# invert
x[0] ^= P
else:
# exchange
t = (x[0] ^ x[i]) & P
x[0] ^= t
x[i] ^= t
Q <<= 1
# done
return x
if NUMBA_DISABLE_JIT:
vect = np.vectorize
else:
vect = vectorize([int64(int64, int64, int64)], nopython=True)
@vect
def distance_from_coordinates(p, x, y):
"""Return the hilbert distance for a given set of coordinates.
Args:
p (int): iterations to use in the hilbert curve
x_in (list): transpose of h
(n components with values between 0 and 2**p-1)
Returns:
h (int): integer distance along hilbert curve
"""
n = 2
x = np.array([x, y], dtype=np.int64)
M = 1 << (p - 1)
# Inverse undo excess work
Q = M
while Q > 1:
P = Q - 1
for i in range(n):
if x[i] & Q:
x[0] ^= P
else:
t = (x[0] ^ x[i]) & P
x[0] ^= t
x[i] ^= t
Q >>= 1
# Gray encode
for i in range(1, n):
x[i] ^= x[i-1]
t = 0
Q = M
while Q > 1:
if x[n-1] & Q:
t ^= Q - 1
Q >>= 1
for i in range(n):
x[i] ^= t
h = _transpose_to_hilbert_integer(p, x[0], x[1])
return h
| [
"numpy.array",
"numpy.zeros",
"numba.int64",
"os.environ.get"
] | [((285, 323), 'os.environ.get', 'os.environ.get', (['"""NUMBA_DISABLE_JIT"""', '(0)'], {}), "('NUMBA_DISABLE_JIT', 0)\n", (299, 323), False, 'import os\n'), ((464, 495), 'numpy.zeros', 'np.zeros', (['width'], {'dtype': 'np.uint8'}), '(width, dtype=np.uint8)\n', (472, 495), True, 'import numpy as np\n'), ((1761, 1792), 'numpy.zeros', 'np.zeros', (['(2 * p)'], {'dtype': 'np.uint8'}), '(2 * p, dtype=np.uint8)\n', (1769, 1792), True, 'import numpy as np\n'), ((3323, 3355), 'numpy.array', 'np.array', (['[x, y]'], {'dtype': 'np.int64'}), '([x, y], dtype=np.int64)\n', (3331, 3355), True, 'import numpy as np\n'), ((2897, 2923), 'numba.int64', 'int64', (['int64', 'int64', 'int64'], {}), '(int64, int64, int64)\n', (2902, 2923), False, 'from numba import vectorize, int64\n')] |
# =============================================================================
# HEPHAESTUS VALIDATION 8 - BEAM DISPLACEMENTS AND ROTATIONS SIMPLE AL BOX BEAM
# =============================================================================
# IMPORTS:
import sys
import os
sys.path.append(os.path.abspath('..\..'))
from AeroComBAT.Structures import MaterialLib
from AeroComBAT.AircraftParts import Wing
import numpy as np
from AeroComBAT.FEM import Model
# Define the width of the cross-section
x1 = -0.8990566037735849
x2 = 0.8990566037735849
c = 1.
ctip = c
croot = c
p1 = np.array([0.,0.,0.])
p2 = np.array([0.,0.,20.])
Y_rib = np.linspace(0.,1.,2)
b_s = np.linalg.norm((Y_rib[0],Y_rib[-1]))
matLib = MaterialLib()
matLib.addMat(1,'AL','iso',[71.7e9,.33,2810],.005)
matLib.addMat(2,'Weak_mat','iso',[100,.33,10],.005)
n_ply = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
m_i = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
noe_dens = 2
wing1 = Wing(1,p1,p2,croot,ctip,x1,x2,Y_rib,n_ply,m_i,matLib,name='box',\
noe=noe_dens,meshSize=3,lam_sym=True)
sbeam1 = wing1.wingSects[0].SuperBeams[0]
xsect = sbeam1.xsect
model = Model()
model.addAircraftParts([wing1])
model.plotRigidModel(numXSects=10)
# Apply the constraint for the model
model.applyConstraints(0,'fix')
# CASE 1:
# Apply the case load
tipLoad = np.array([-10000.,100000.,-300000.,35000.,60000.,10000.])
F = {40:tipLoad}
model.applyLoads(1,F=F)
# Run the analysis
model.staticAnalysis(1)
model.plotDeformedModel(figName='V8 Case 1',numXSects=10,contLim=[0,293000],\
warpScale=10,displScale=2,contour='sig_33')
# Write the beam displacements and rotations to a file
sbeam1.writeDisplacements(fileName = 'V8_Case_1.csv')
# CASE 2:
# Apply the case load
def f(x):
vx = -0.1*(-1.0e3*x[2]**2+6e7*x[2]+1.0e6)
vy = (-1.0e3*x[2]**2+6e7*x[2]+1.0e6)
pz = 0
tz = .2*c*(-1.0e3*x[2]**2+6e7*x[2]+1.0e6)
return np.array([vx,vy,pz,0,0,tz])/1.0e4
model.resetPointLoads()
model.applyLoads(1,f=f,allElems=True)
# Run the analysis
model.staticAnalysis(1)
model.plotDeformedModel(figName='V8 Case 2',numXSects=10,contLim=[0.,5.0e8],\
warpScale=100,displScale=10,contour='MaxPrin')
# Write the beam displacements and rotations to a file
sbeam1.writeDisplacements(fileName = 'V8_Case_2.csv')
# CASE 3:
# Apply the case load
def f(x):
vx = 1e3
vy = 1e3
pz = -1e3
tz = 1e3
return np.array([vx,vy,pz,0,0,tz])
model.applyLoads(2,f=f,allElems=True)
# Run the analysis
model.staticAnalysis(2)
model.plotDeformedModel(figName='V8 Case 3',numXSects=10,contLim=[0.,5.0e8],\
warpScale=100,displScale=10,contour='MaxPrin')
# Write the beam displacements and rotations to a file
sbeam1.writeDisplacements(fileName = 'V8_Case_3.csv')
AeroComBAT_SOL = np.genfromtxt('V8_Case_3.csv', delimiter=',')
NASTRAN_SOL = np.genfromtxt('V8_Case_3_NASTRAN_SOL.csv', delimiter=',')
def u_analytical(z):
return 5.74889e-6*z+0.0000144388*z**2-4.86084e-7*z**3+6.07605e-9*z**4
#return 5.74786e-6*z+0.0000144388*z**2-4.86082e-7*z**3+6.07603e-9*z**4
def v_analytical(z):
return 0.0000136309*z+0.0000360234*z**2-1.21214e-6*z**3+1.51518e-8*z**4
#return 0.0000136249*z+0.0000360233*z**2-1.21213e-6*z**3+1.51516e-8*z**4
def w_analytical(z):
return -3.20696e-8*(40*z-z**2)
def psi_analytical(z):
return -0.0000727284*z+3.63642e-6*z**2-6.0607e-8*z**3
#return -0.0000727279*z+3.6364e-6*z**2-6.06066e-8*z**3
def gamma_analytical(z):
return 0.000029165*z-1.45825e-6*z**2+2.43042e-8*z**3
#return 0.0000291649*z-1.45825e-6*z**2+2.43041e-8*z**3
def phi_analytical(z):
return 2.18083e-7*(40*z-z**2)
z = np.linspace(0,20,41)
AeroComBAT_u = AeroComBAT_SOL[:,4]
AeroComBAT_v = AeroComBAT_SOL[:,5]
AeroComBAT_w = AeroComBAT_SOL[:,6]
AeroComBAT_psi = AeroComBAT_SOL[:,7]
AeroComBAT_gamma = AeroComBAT_SOL[:,8]
AeroComBAT_phi = AeroComBAT_SOL[:,9]
NASTRAN_u = NASTRAN_SOL[:,5]
NASTRAN_v = NASTRAN_SOL[:,6]
NASTRAN_w = NASTRAN_SOL[:,7]
NASTRAN_psi = NASTRAN_SOL[:,8]
NASTRAN_gamma = NASTRAN_SOL[:,9]
NASTRAN_phi = NASTRAN_SOL[:,10]
analytical_u = u_analytical(z)
analytical_v = v_analytical(z)
analytical_w = w_analytical(z)
analytical_psi = psi_analytical(z)
analytical_gamma = gamma_analytical(z)
analytical_phi = phi_analytical(z)
import matplotlib.pyplot as plt
plt.figure(1)
plt.hold(True)
plt.plot(z,(analytical_u-AeroComBAT_u)/analytical_u*100,'b-',label='T1 AeroComBAT Error',linewidth=3)
plt.plot(z,(analytical_v-AeroComBAT_v)/analytical_v*100,'g-',label='T2 AeroComBAT Error',linewidth=3)
plt.plot(z,(analytical_w-AeroComBAT_w)/analytical_w*100,'r-',label='T3 AeroComBAT Error',linewidth=3)
plt.plot(z,(analytical_u-NASTRAN_u)/analytical_u*100,'c--',label='T1 NASTRAN Error',linewidth=3)
plt.plot(z,(analytical_v-NASTRAN_v)/analytical_v*100,'m--',label='T2 NASTRAN Error',linewidth=3)
plt.plot(z,(analytical_w-NASTRAN_w)/analytical_w*100,'k--',label='T3 NASTRAN Error',linewidth=3)
plt.legend()
plt.grid(True)
plt.title('Displacement Percent Error')
plt.xlabel('Position along the beam, m')
plt.ylabel('Percent error')
plt.hold(False)
plt.figure(2)
plt.hold(True)
plt.plot(z,(analytical_psi-AeroComBAT_psi)/analytical_psi*100,'b-',label='R1 AeroComBAT Error',linewidth=3)
plt.plot(z,(analytical_gamma-AeroComBAT_gamma)/analytical_gamma*100,'g-',label='R2 AeroComBAT Error',linewidth=3)
plt.plot(z,(analytical_phi-AeroComBAT_phi)/analytical_phi*100,'r-',label='R3 AeroComBAT Error',linewidth=3)
plt.plot(z,(analytical_psi-NASTRAN_psi)/analytical_psi*100,'c--',label='R1 NASTRAN Error',linewidth=3)
plt.plot(z,(analytical_gamma-NASTRAN_gamma)/analytical_gamma*100,'m--',label='R2 NASTRAN Error',linewidth=3)
plt.plot(z,(analytical_phi-NASTRAN_phi)/analytical_phi*100,'k--',label='R3 NASTRAN Error',linewidth=3)
plt.legend()
plt.grid(True)
plt.title('Rotation Percent Error')
plt.xlabel('Position along the beam, m')
plt.ylabel('Percent error')
axes = plt.gca()
axes.set_ylim([-.05,.15])
plt.hold(False) | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.hold",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"AeroComBAT.AircraftParts.Wing",
"numpy.array",
"numpy.linspace",
"AeroComBAT.FEM.Model",
"matplotlib.pyplot.figure",
"numpy.linalg.nor... | [((579, 604), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (587, 604), True, 'import numpy as np\n'), ((605, 631), 'numpy.array', 'np.array', (['[0.0, 0.0, 20.0]'], {}), '([0.0, 0.0, 20.0])\n', (613, 631), True, 'import numpy as np\n'), ((635, 659), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(2)'], {}), '(0.0, 1.0, 2)\n', (646, 659), True, 'import numpy as np\n'), ((662, 699), 'numpy.linalg.norm', 'np.linalg.norm', (['(Y_rib[0], Y_rib[-1])'], {}), '((Y_rib[0], Y_rib[-1]))\n', (676, 699), True, 'import numpy as np\n'), ((709, 722), 'AeroComBAT.Structures.MaterialLib', 'MaterialLib', ([], {}), '()\n', (720, 722), False, 'from AeroComBAT.Structures import MaterialLib\n'), ((931, 1050), 'AeroComBAT.AircraftParts.Wing', 'Wing', (['(1)', 'p1', 'p2', 'croot', 'ctip', 'x1', 'x2', 'Y_rib', 'n_ply', 'm_i', 'matLib'], {'name': '"""box"""', 'noe': 'noe_dens', 'meshSize': '(3)', 'lam_sym': '(True)'}), "(1, p1, p2, croot, ctip, x1, x2, Y_rib, n_ply, m_i, matLib, name='box',\n noe=noe_dens, meshSize=3, lam_sym=True)\n", (935, 1050), False, 'from AeroComBAT.AircraftParts import Wing\n'), ((1112, 1119), 'AeroComBAT.FEM.Model', 'Model', ([], {}), '()\n', (1117, 1119), False, 'from AeroComBAT.FEM import Model\n'), ((1302, 1370), 'numpy.array', 'np.array', (['[-10000.0, 100000.0, -300000.0, 35000.0, 60000.0, 10000.0]'], {}), '([-10000.0, 100000.0, -300000.0, 35000.0, 60000.0, 10000.0])\n', (1310, 1370), True, 'import numpy as np\n'), ((2726, 2771), 'numpy.genfromtxt', 'np.genfromtxt', (['"""V8_Case_3.csv"""'], {'delimiter': '""","""'}), "('V8_Case_3.csv', delimiter=',')\n", (2739, 2771), True, 'import numpy as np\n'), ((2786, 2843), 'numpy.genfromtxt', 'np.genfromtxt', (['"""V8_Case_3_NASTRAN_SOL.csv"""'], {'delimiter': '""","""'}), "('V8_Case_3_NASTRAN_SOL.csv', delimiter=',')\n", (2799, 2843), True, 'import numpy as np\n'), ((3588, 3610), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(41)'], {}), '(0, 20, 41)\n', (3599, 3610), True, 'import numpy as np\n'), ((4249, 4262), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4259, 4262), True, 'import matplotlib.pyplot as plt\n'), ((4263, 4277), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (4271, 4277), True, 'import matplotlib.pyplot as plt\n'), ((4278, 4394), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '((analytical_u - AeroComBAT_u) / analytical_u * 100)', '"""b-"""'], {'label': '"""T1 AeroComBAT Error"""', 'linewidth': '(3)'}), "(z, (analytical_u - AeroComBAT_u) / analytical_u * 100, 'b-', label\n ='T1 AeroComBAT Error', linewidth=3)\n", (4286, 4394), True, 'import matplotlib.pyplot as plt\n'), ((4380, 4496), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '((analytical_v - AeroComBAT_v) / analytical_v * 100)', '"""g-"""'], {'label': '"""T2 AeroComBAT Error"""', 'linewidth': '(3)'}), "(z, (analytical_v - AeroComBAT_v) / analytical_v * 100, 'g-', label\n ='T2 AeroComBAT Error', linewidth=3)\n", (4388, 4496), True, 'import matplotlib.pyplot as plt\n'), ((4482, 4598), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '((analytical_w - AeroComBAT_w) / analytical_w * 100)', '"""r-"""'], {'label': '"""T3 AeroComBAT Error"""', 'linewidth': '(3)'}), "(z, (analytical_w - AeroComBAT_w) / analytical_w * 100, 'r-', label\n ='T3 AeroComBAT Error', linewidth=3)\n", (4490, 4598), True, 'import matplotlib.pyplot as plt\n'), ((4584, 4695), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '((analytical_u - NASTRAN_u) / analytical_u * 100)', '"""c--"""'], {'label': '"""T1 NASTRAN Error"""', 'linewidth': '(3)'}), "(z, (analytical_u - NASTRAN_u) / analytical_u * 100, 'c--', label=\n 'T1 NASTRAN Error', linewidth=3)\n", (4592, 4695), True, 'import matplotlib.pyplot as plt\n'), ((4681, 4792), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '((analytical_v - NASTRAN_v) / analytical_v * 100)', '"""m--"""'], {'label': '"""T2 NASTRAN Error"""', 'linewidth': '(3)'}), "(z, (analytical_v - NASTRAN_v) / analytical_v * 100, 'm--', label=\n 'T2 NASTRAN Error', linewidth=3)\n", (4689, 4792), True, 'import matplotlib.pyplot as plt\n'), ((4778, 4889), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '((analytical_w - NASTRAN_w) / analytical_w * 100)', '"""k--"""'], {'label': '"""T3 NASTRAN Error"""', 'linewidth': '(3)'}), "(z, (analytical_w - NASTRAN_w) / analytical_w * 100, 'k--', label=\n 'T3 NASTRAN Error', linewidth=3)\n", (4786, 4889), True, 'import matplotlib.pyplot as plt\n'), ((4875, 4887), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4885, 4887), True, 'import matplotlib.pyplot as plt\n'), ((4888, 4902), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4896, 4902), True, 'import matplotlib.pyplot as plt\n'), ((4903, 4942), 'matplotlib.pyplot.title', 'plt.title', (['"""Displacement Percent Error"""'], {}), "('Displacement Percent Error')\n", (4912, 4942), True, 'import matplotlib.pyplot as plt\n'), ((4943, 4983), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position along the beam, m"""'], {}), "('Position along the beam, m')\n", (4953, 4983), True, 'import matplotlib.pyplot as plt\n'), ((4984, 5011), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percent error"""'], {}), "('Percent error')\n", (4994, 5011), True, 'import matplotlib.pyplot as plt\n'), ((5012, 5027), 'matplotlib.pyplot.hold', 'plt.hold', (['(False)'], {}), '(False)\n', (5020, 5027), True, 'import matplotlib.pyplot as plt\n'), ((5029, 5042), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (5039, 5042), True, 'import matplotlib.pyplot as plt\n'), ((5043, 5057), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (5051, 5057), True, 'import matplotlib.pyplot as plt\n'), ((5058, 5179), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '((analytical_psi - AeroComBAT_psi) / analytical_psi * 100)', '"""b-"""'], {'label': '"""R1 AeroComBAT Error"""', 'linewidth': '(3)'}), "(z, (analytical_psi - AeroComBAT_psi) / analytical_psi * 100, 'b-',\n label='R1 AeroComBAT Error', linewidth=3)\n", (5066, 5179), True, 'import matplotlib.pyplot as plt\n'), ((5166, 5293), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '((analytical_gamma - AeroComBAT_gamma) / analytical_gamma * 100)', '"""g-"""'], {'label': '"""R2 AeroComBAT Error"""', 'linewidth': '(3)'}), "(z, (analytical_gamma - AeroComBAT_gamma) / analytical_gamma * 100,\n 'g-', label='R2 AeroComBAT Error', linewidth=3)\n", (5174, 5293), True, 'import matplotlib.pyplot as plt\n'), ((5280, 5401), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '((analytical_phi - AeroComBAT_phi) / analytical_phi * 100)', '"""r-"""'], {'label': '"""R3 AeroComBAT Error"""', 'linewidth': '(3)'}), "(z, (analytical_phi - AeroComBAT_phi) / analytical_phi * 100, 'r-',\n label='R3 AeroComBAT Error', linewidth=3)\n", (5288, 5401), True, 'import matplotlib.pyplot as plt\n'), ((5388, 5504), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '((analytical_psi - NASTRAN_psi) / analytical_psi * 100)', '"""c--"""'], {'label': '"""R1 NASTRAN Error"""', 'linewidth': '(3)'}), "(z, (analytical_psi - NASTRAN_psi) / analytical_psi * 100, 'c--',\n label='R1 NASTRAN Error', linewidth=3)\n", (5396, 5504), True, 'import matplotlib.pyplot as plt\n'), ((5491, 5613), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '((analytical_gamma - NASTRAN_gamma) / analytical_gamma * 100)', '"""m--"""'], {'label': '"""R2 NASTRAN Error"""', 'linewidth': '(3)'}), "(z, (analytical_gamma - NASTRAN_gamma) / analytical_gamma * 100,\n 'm--', label='R2 NASTRAN Error', linewidth=3)\n", (5499, 5613), True, 'import matplotlib.pyplot as plt\n'), ((5600, 5716), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '((analytical_phi - NASTRAN_phi) / analytical_phi * 100)', '"""k--"""'], {'label': '"""R3 NASTRAN Error"""', 'linewidth': '(3)'}), "(z, (analytical_phi - NASTRAN_phi) / analytical_phi * 100, 'k--',\n label='R3 NASTRAN Error', linewidth=3)\n", (5608, 5716), True, 'import matplotlib.pyplot as plt\n'), ((5703, 5715), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5713, 5715), True, 'import matplotlib.pyplot as plt\n'), ((5716, 5730), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5724, 5730), True, 'import matplotlib.pyplot as plt\n'), ((5731, 5766), 'matplotlib.pyplot.title', 'plt.title', (['"""Rotation Percent Error"""'], {}), "('Rotation Percent Error')\n", (5740, 5766), True, 'import matplotlib.pyplot as plt\n'), ((5767, 5807), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position along the beam, m"""'], {}), "('Position along the beam, m')\n", (5777, 5807), True, 'import matplotlib.pyplot as plt\n'), ((5808, 5835), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percent error"""'], {}), "('Percent error')\n", (5818, 5835), True, 'import matplotlib.pyplot as plt\n'), ((5843, 5852), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5850, 5852), True, 'import matplotlib.pyplot as plt\n'), ((5879, 5894), 'matplotlib.pyplot.hold', 'plt.hold', (['(False)'], {}), '(False)\n', (5887, 5894), True, 'import matplotlib.pyplot as plt\n'), ((291, 316), 'os.path.abspath', 'os.path.abspath', (['"""..\\\\.."""'], {}), "('..\\\\..')\n", (306, 316), False, 'import os\n'), ((2361, 2393), 'numpy.array', 'np.array', (['[vx, vy, pz, 0, 0, tz]'], {}), '([vx, vy, pz, 0, 0, tz])\n', (2369, 2393), True, 'import numpy as np\n'), ((1877, 1909), 'numpy.array', 'np.array', (['[vx, vy, pz, 0, 0, tz]'], {}), '([vx, vy, pz, 0, 0, tz])\n', (1885, 1909), True, 'import numpy as np\n')] |
import pkg_resources
import re
import requests
import numpy as np
import scipy as sp
import scipy.sparse
import scipy.sparse.linalg
from . import index
__all__ = ['PowerNetwork', 'load_case']
class PowerNetwork:
def __init__(self, basemva, bus=None, gen=None, gencost=None, branch=None, perunit=True):
if type(basemva) is dict:
data = basemva
self.baseMVA = data['baseMVA']
self.branch = dict()
for i, col in enumerate(index.branch):
self.branch[col] = data['branch'][:, i]
self.bus = dict()
for i, col in enumerate(index.bus):
self.bus[col] = data['bus'][:, i]
self.gen = dict()
for i, col in enumerate(index.gen):
self.gen[col] = data['gen'][:, i]
self.gencost = dict()
for i, col in enumerate(index.cost):
if col == 'COST':
self.gencost[col] = data['gencost'][:, i:]
break
self.gencost[col] = data['gencost'][:, i]
elif self.bus is not None and self.gen is not None and self.gencost is not None and self.branch is not None:
self.baseMVA = basemva
self.bus = dict()
for i, col in enumerate(index.bus):
self.bus[col] = bus[:, i]
self.gen = dict()
for i, col in enumerate(index.gen):
self.gen[col] = gen[:, i]
self.gencost = dict()
for i, col in enumerate(index.cost):
if col == 'COST':
self.gencost[col] = gencost[:, i:]
break
self.gencost[col] = gencost[:, i]
self.branch = dict()
for i, col in enumerate(index.branch):
self.branch[col] = branch[:, i]
else:
raise TypeError('Invalid input to power network. Must be either dict or all arguments must be filled.')
self.n_l = int(np.sum(self.branch['BR_STATUS']))
self.n_b = len(self.bus['BUS_I'])
self.n_g = len(self.gen['GEN_BUS'])
# Per-unit transformations
if perunit:
self.bus['PD'] /= self.baseMVA
self.bus['QD'] /= self.baseMVA
self.gen['PG'] /= self.baseMVA
self.gen['QG'] /= self.baseMVA
self.gen['QMAX'] /= self.baseMVA
self.gen['QMIN'] /= self.baseMVA
self.gen['VG'] /= self.baseMVA
self.gen['PMAX'] /= self.baseMVA
self.gen['PMIN'] /= self.baseMVA
self.gen['PC1'] /= self.baseMVA
self.gen['PC2'] /= self.baseMVA
self.gen['QC1MIN'] /= self.baseMVA
self.gen['QC1MAX'] /= self.baseMVA
self.gen['QC2MIN'] /= self.baseMVA
self.gen['QC2MAX'] /= self.baseMVA
self.gen['RAMP_AGC'] /= self.baseMVA
self.gen['RAMP_10'] /= self.baseMVA
self.gen['RAMP_30'] /= self.baseMVA
self.gen['RAMP_Q'] /= self.baseMVA
self.gencost['COST'] /= self.baseMVA
self.branch['RATE_A'] /= self.baseMVA
self.branch['RATE_B'] /= self.baseMVA
self.branch['RATE_C'] /= self.baseMVA
# Add nodal value of lost load
self.voll = 70 * np.max(self.gencost['COST'][:, -2]) * np.ones((self.n_b,))
self.Bbus = None
self.Bf = None
self.Pbusinj = None
self.Pfinj = None
self.ISF = None
self.lineOutages = None
def setContingencyLimits(self, force=False):
# Artificially enforce DA and SE limits if unenforced
if force or np.all(self.branch['RATE_A'] >= self.branch['RATE_B']):
self.branch['RATE_B'] *= 1.1
if force or np.all(self.branch['RATE_A'] >= self.branch['RATE_C']):
self.branch['RATE_C'] *= 1.7
# Artificially set ramp capacity if unset
if np.all(self.gen['RAMP_AGC'] == 0):
self.gen['RAMP_AGC'] = np.ones((self.n_g,)) * 20. / self.baseMVA
def makeDC(self, setglobal=True):
'''Construct the parameters for solution of DC optimal power flow problems.
This file emulates MATPOWER's makeBDC function.
'''
if not np.array_equal(self.bus['BUS_I'], np.arange(self.n_b) + 1):
raise ValueError('Buses must be ordered consecutively.')
# Determine online branches in order to ignore offline branches
online = np.where(self.branch['BR_STATUS'] != 0)[0]
n_online = len(online)
# for each branch, compute the elements of the branch B matrix and the phase shift "quiescent" injections, where
#
# | Pf | | Bff Bft | | Vaf | | Pfinj |
# | | = | | * | | + | |
# | Pt | | Btf Btt | | Vat | | Ptinj |
#
b = np.divide(np.ones(n_online), self.branch['BR_X'][online]) # Series susceptance
tap = self.branch['TAP'][online] # Set tap ratio
tap[tap == 0] = 1. # Set zero tap ratios to 1
b = np.divide(b, tap)
# build connection matrix Cft = Cf - Ct for line and from - to buses
rows = np.concatenate((np.arange(n_online), np.arange(n_online))) # Set of row indices
cols = np.concatenate(
(self.branch['F_BUS'][online] - 1, self.branch['T_BUS'][online] - 1)) # List of 'from' and 'to' buses
vals = np.concatenate((np.ones((n_online,)), -np.ones((n_online,)))) # Connection value
Cft = sp.sparse.csc_matrix((vals, (rows, cols)), (n_online, self.n_b)) # Connection matrix
# build Bf such that Bf * Va is the vector of real branch powers injected at each branch's "from" bus
vals = np.concatenate((b, -b))
Bf = sp.sparse.csc_matrix((vals, (rows, cols)), (n_online, self.n_b))
# build Bbus
Bbus = sp.sparse.csr_matrix(Cft.transpose().dot(Bf))
# build phase shift injection vectors
Pfinj = np.multiply(b, (-self.branch['SHIFT'][online] * np.pi / 180)) # Injected at the from bus
Pbusinj = Cft.transpose().dot(Pfinj) # Pbusinj = Cf * Pfinj + Ct * Ptinj;
# ISF Formulation - assumes Pbusinj is 0
nonslack_buses = np.where(self.bus['BUS_TYPE'] != 3)[0]
Bf_ref = Bf[:, nonslack_buses]
Bbus_ref = Bbus[:, nonslack_buses]
ISF = Bf_ref.dot(sp.sparse.linalg.spsolve(Bbus_ref.transpose().dot(Bbus_ref), Bbus_ref.transpose()))
if setglobal:
# Y-Theta Formulation
self.Bbus = Bbus
self.Bf = Bf
self.Pfinj = Pfinj
self.Pbusinj = Pbusinj
# ISF Formulation
self.ISF = sp.sparse.csc_matrix(ISF)
return Bbus, Bf, Pbusinj, Pfinj, ISF
def makeDCLineOutages(self):
self.lineOutages = []
for line in range(self.n_l):
if not self.branch['BR_STATUS'][line]:
continue
self.branch['BR_STATUS'][line] = 0
try:
Bbus, Bf, _, _, ISF = self.makeDC(False)
self.lineOutages.append({
'prob': 0.001,
'Bbus': Bbus,
'Bf': Bf,
'ISF': ISF,
'branch': {
'RATE_A': self.branch['RATE_A'][np.arange(self.n_l) != line],
'RATE_B': self.branch['RATE_B'][np.arange(self.n_l) != line],
'RATE_C': self.branch['RATE_C'][np.arange(self.n_l) != line]
}
})
except RuntimeError:
# Ensure that removal of a line does not disconnect graph
print('Failed to create contingency for line', line)
self.branch['BR_STATUS'][line] = 1
def load_case(mfile, verbose=0):
"""
Imports data from Matpower case file (MATLAB m-file).
"""
# Read m-file and strip MATLAB comments
if mfile.startswith('http'):
if verbose: print("Downloading case file: %s." % (mfile))
response = requests.get(mfile)
lines = response.text.split('\n')
elif pkg_resources.resource_exists('phasorpy', 'data/' + mfile):
if verbose: print("Loading case file: %s." % (mfile))
with pkg_resources.resource_stream('phasorpy', 'data/' + mfile) as f:
lines = [str(l, 'utf-8') for l in f.readlines()]
else:
if verbose: print("Reading case file: %s." % (mfile))
with open(mfile, "r") as f:
lines = f.readlines()
for k in range(len(lines)): lines[k] = lines[k].split('%')[0]
case_as_str = "\n".join(lines)
def str_to_array(s):
return np.array([[float(v) for v in r.strip().split()] for r in s.strip(';\n\t ').split(';')])
try:
baseMVA = re.search("mpc.baseMVA = (\d+)", case_as_str).group(1)
version = re.search("mpc.version = '(\d+)'", case_as_str).group(1)
bus_str = re.search("mpc.bus = \[([-\s0-9e.;]+)\]", case_as_str).group(1)
gen_str = re.search("mpc.gen = \[([-\s0-9e.;Iinf]+)\]", case_as_str).group(1)
branch_str = re.search("mpc.branch = \[([-\s0-9e.;]+)\]", case_as_str).group(1)
gencost_str = re.search("mpc.gencost = \[([-\s0-9e.;]+)\]", case_as_str).group(1)
except:
raise TypeError("Failed to parse case file.")
else:
if re.search('mpc.branch\(', case_as_str) or re.search('mpc.bus\(', case_as_str) or re.search('mpc.gen\(',
case_as_str):
raise TypeError("Case file not supported.")
if version != 2 and version != '2':
raise TypeError('Invalid case file version. (Requires 2: Provided"', version, '")')
return PowerNetwork({'baseMVA': float(baseMVA),
'bus': str_to_array(bus_str),
'gen': str_to_array(gen_str),
'gencost': str_to_array(gencost_str),
'branch': str_to_array(branch_str)})
def available_cases():
return pkg_resources.resource_listdir('phasorpy', 'data') | [
"scipy.sparse.csc_matrix",
"numpy.multiply",
"pkg_resources.resource_exists",
"numpy.ones",
"numpy.arange",
"numpy.where",
"requests.get",
"numpy.max",
"numpy.sum",
"numpy.concatenate",
"pkg_resources.resource_stream",
"numpy.all",
"pkg_resources.resource_listdir",
"numpy.divide",
"re.se... | [((10122, 10172), 'pkg_resources.resource_listdir', 'pkg_resources.resource_listdir', (['"""phasorpy"""', '"""data"""'], {}), "('phasorpy', 'data')\n", (10152, 10172), False, 'import pkg_resources\n'), ((3941, 3974), 'numpy.all', 'np.all', (["(self.gen['RAMP_AGC'] == 0)"], {}), "(self.gen['RAMP_AGC'] == 0)\n", (3947, 3974), True, 'import numpy as np\n'), ((5077, 5094), 'numpy.divide', 'np.divide', (['b', 'tap'], {}), '(b, tap)\n', (5086, 5094), True, 'import numpy as np\n'), ((5284, 5373), 'numpy.concatenate', 'np.concatenate', (["(self.branch['F_BUS'][online] - 1, self.branch['T_BUS'][online] - 1)"], {}), "((self.branch['F_BUS'][online] - 1, self.branch['T_BUS'][\n online] - 1))\n", (5298, 5373), True, 'import numpy as np\n'), ((5526, 5590), 'scipy.sparse.csc_matrix', 'sp.sparse.csc_matrix', (['(vals, (rows, cols))', '(n_online, self.n_b)'], {}), '((vals, (rows, cols)), (n_online, self.n_b))\n', (5546, 5590), True, 'import scipy as sp\n'), ((5738, 5761), 'numpy.concatenate', 'np.concatenate', (['(b, -b)'], {}), '((b, -b))\n', (5752, 5761), True, 'import numpy as np\n'), ((5775, 5839), 'scipy.sparse.csc_matrix', 'sp.sparse.csc_matrix', (['(vals, (rows, cols))', '(n_online, self.n_b)'], {}), '((vals, (rows, cols)), (n_online, self.n_b))\n', (5795, 5839), True, 'import scipy as sp\n'), ((5986, 6045), 'numpy.multiply', 'np.multiply', (['b', "(-self.branch['SHIFT'][online] * np.pi / 180)"], {}), "(b, -self.branch['SHIFT'][online] * np.pi / 180)\n", (5997, 6045), True, 'import numpy as np\n'), ((8066, 8085), 'requests.get', 'requests.get', (['mfile'], {}), '(mfile)\n', (8078, 8085), False, 'import requests\n'), ((8137, 8195), 'pkg_resources.resource_exists', 'pkg_resources.resource_exists', (['"""phasorpy"""', "('data/' + mfile)"], {}), "('phasorpy', 'data/' + mfile)\n", (8166, 8195), False, 'import pkg_resources\n'), ((2012, 2044), 'numpy.sum', 'np.sum', (["self.branch['BR_STATUS']"], {}), "(self.branch['BR_STATUS'])\n", (2018, 2044), True, 'import numpy as np\n'), ((3353, 3373), 'numpy.ones', 'np.ones', (['(self.n_b,)'], {}), '((self.n_b,))\n', (3360, 3373), True, 'import numpy as np\n'), ((3665, 3719), 'numpy.all', 'np.all', (["(self.branch['RATE_A'] >= self.branch['RATE_B'])"], {}), "(self.branch['RATE_A'] >= self.branch['RATE_B'])\n", (3671, 3719), True, 'import numpy as np\n'), ((3782, 3836), 'numpy.all', 'np.all', (["(self.branch['RATE_A'] >= self.branch['RATE_C'])"], {}), "(self.branch['RATE_A'] >= self.branch['RATE_C'])\n", (3788, 3836), True, 'import numpy as np\n'), ((4479, 4518), 'numpy.where', 'np.where', (["(self.branch['BR_STATUS'] != 0)"], {}), "(self.branch['BR_STATUS'] != 0)\n", (4487, 4518), True, 'import numpy as np\n'), ((4882, 4899), 'numpy.ones', 'np.ones', (['n_online'], {}), '(n_online)\n', (4889, 4899), True, 'import numpy as np\n'), ((6234, 6269), 'numpy.where', 'np.where', (["(self.bus['BUS_TYPE'] != 3)"], {}), "(self.bus['BUS_TYPE'] != 3)\n", (6242, 6269), True, 'import numpy as np\n'), ((6695, 6720), 'scipy.sparse.csc_matrix', 'sp.sparse.csc_matrix', (['ISF'], {}), '(ISF)\n', (6715, 6720), True, 'import scipy as sp\n'), ((9362, 9401), 're.search', 're.search', (['"""mpc.branch\\\\("""', 'case_as_str'], {}), "('mpc.branch\\\\(', case_as_str)\n", (9371, 9401), False, 'import re\n'), ((9404, 9440), 're.search', 're.search', (['"""mpc.bus\\\\("""', 'case_as_str'], {}), "('mpc.bus\\\\(', case_as_str)\n", (9413, 9440), False, 'import re\n'), ((9443, 9479), 're.search', 're.search', (['"""mpc.gen\\\\("""', 'case_as_str'], {}), "('mpc.gen\\\\(', case_as_str)\n", (9452, 9479), False, 'import re\n'), ((3315, 3350), 'numpy.max', 'np.max', (["self.gencost['COST'][:, -2]"], {}), "(self.gencost['COST'][:, -2])\n", (3321, 3350), True, 'import numpy as np\n'), ((5204, 5223), 'numpy.arange', 'np.arange', (['n_online'], {}), '(n_online)\n', (5213, 5223), True, 'import numpy as np\n'), ((5225, 5244), 'numpy.arange', 'np.arange', (['n_online'], {}), '(n_online)\n', (5234, 5244), True, 'import numpy as np\n'), ((5446, 5466), 'numpy.ones', 'np.ones', (['(n_online,)'], {}), '((n_online,))\n', (5453, 5466), True, 'import numpy as np\n'), ((8272, 8330), 'pkg_resources.resource_stream', 'pkg_resources.resource_stream', (['"""phasorpy"""', "('data/' + mfile)"], {}), "('phasorpy', 'data/' + mfile)\n", (8301, 8330), False, 'import pkg_resources\n'), ((8799, 8845), 're.search', 're.search', (['"""mpc.baseMVA = (\\\\d+)"""', 'case_as_str'], {}), "('mpc.baseMVA = (\\\\d+)', case_as_str)\n", (8808, 8845), False, 'import re\n'), ((8872, 8920), 're.search', 're.search', (['"""mpc.version = \'(\\\\d+)\'"""', 'case_as_str'], {}), '("mpc.version = \'(\\\\d+)\'", case_as_str)\n', (8881, 8920), False, 'import re\n'), ((8947, 9004), 're.search', 're.search', (['"""mpc.bus = \\\\[([-\\\\s0-9e.;]+)\\\\]"""', 'case_as_str'], {}), "('mpc.bus = \\\\[([-\\\\s0-9e.;]+)\\\\]', case_as_str)\n", (8956, 9004), False, 'import re\n'), ((9029, 9090), 're.search', 're.search', (['"""mpc.gen = \\\\[([-\\\\s0-9e.;Iinf]+)\\\\]"""', 'case_as_str'], {}), "('mpc.gen = \\\\[([-\\\\s0-9e.;Iinf]+)\\\\]', case_as_str)\n", (9038, 9090), False, 'import re\n'), ((9118, 9178), 're.search', 're.search', (['"""mpc.branch = \\\\[([-\\\\s0-9e.;]+)\\\\]"""', 'case_as_str'], {}), "('mpc.branch = \\\\[([-\\\\s0-9e.;]+)\\\\]', case_as_str)\n", (9127, 9178), False, 'import re\n'), ((9207, 9268), 're.search', 're.search', (['"""mpc.gencost = \\\\[([-\\\\s0-9e.;]+)\\\\]"""', 'case_as_str'], {}), "('mpc.gencost = \\\\[([-\\\\s0-9e.;]+)\\\\]', case_as_str)\n", (9216, 9268), False, 'import re\n'), ((4011, 4031), 'numpy.ones', 'np.ones', (['(self.n_g,)'], {}), '((self.n_g,))\n', (4018, 4031), True, 'import numpy as np\n'), ((4294, 4313), 'numpy.arange', 'np.arange', (['self.n_b'], {}), '(self.n_b)\n', (4303, 4313), True, 'import numpy as np\n'), ((5469, 5489), 'numpy.ones', 'np.ones', (['(n_online,)'], {}), '((n_online,))\n', (5476, 5489), True, 'import numpy as np\n'), ((7329, 7348), 'numpy.arange', 'np.arange', (['self.n_l'], {}), '(self.n_l)\n', (7338, 7348), True, 'import numpy as np\n'), ((7415, 7434), 'numpy.arange', 'np.arange', (['self.n_l'], {}), '(self.n_l)\n', (7424, 7434), True, 'import numpy as np\n'), ((7501, 7520), 'numpy.arange', 'np.arange', (['self.n_l'], {}), '(self.n_l)\n', (7510, 7520), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
import numpy as np
class Identity(torch.nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class ColorFullnesCriterion(torch.nn.Module):
def __init__(self):
super(ColorFullnesCriterion, self).__init__()
def colorfulness(self, x):
assert(x.size(1) == 3)
result = torch.zeros(x.size(0)).to(x.device)
for i in range(x.size(0)):
(R, G, B) = x[i][0], x[i][1], x[i][2],
rg = torch.abs(R - G)
yb = torch.abs(0.5 * (R + G)- B)
rgMean, rgStd = torch.mean(rg), torch.std(rg)
ybMean, ybStd = torch.mean(yb), torch.std(yb)
stdRoot = torch.sqrt((rgStd ** 2) + (ybStd ** 2))
meanRoot = torch.sqrt((rgMean ** 2) + (ybMean ** 2))
result[i] = stdRoot + (0.3 * meanRoot)
return result
def forward(self, x, y):
return nn.functional.l1_loss(self.colorfulness(x), self.colorfulness(y))
class ColorHistCriterion(torch.nn.Module):
def __init__(self):
super(ColorHistCriterion, self).__init__()
def histogram(self, x):
assert(x.size(1) == 3)
result = torch.zeros(x.size(0), x.size(1), 255).to(x.device)
for i in range(x.size(0)):
R, G, B = torch.round(x[i][0]*255.0), torch.round(x[i][1]*255.0), torch.round(x[i][2]*255.0)
R, G, B = R.data.cpu().numpy(), G.data.cpu().numpy(), B.data.cpu().numpy()
rh = np.histogram(R, bins=255)[0]
gh = np.histogram(G, bins=255)[0]
bh = np.histogram(B, bins=255)[0]
result[i][0] = torch.from_numpy(gh).float().view(-1).to(x.device)
result[i][1] = torch.from_numpy(bh).float().view(-1).to(x.device)
result[i][2] = torch.from_numpy(rh).float().view(-1).to(x.device)
return result
def forward(self, x, y):
return nn.functional.l1_loss(self.histogram(x), self.histogram(x))
class HueSaturationValueCriterion(torch.nn.Module):
def __init__(self):
super(HueSaturationValueCriterion, self).__init__()
self.criterion = nn.L1Loss()
self.eps= 1e-6
def hsv(self, im):
assert (im.size(1) == 3)
img = im * 0.5 + 0.5
hue = torch.Tensor(im.shape[0], im.shape[2], im.shape[3]).to(im.device)
hue[ img[:,2]==img.max(1)[0] ] = 4.0 + ( (img[:,0]-img[:,1]) / ( img.max(1)[0] - img.min(1)[0] + self.eps) ) [ img[:,2]==img.max(1)[0] ]
hue[ img[:,1]==img.max(1)[0] ] = 2.0 + ( (img[:,2]-img[:,0]) / ( img.max(1)[0] - img.min(1)[0] + self.eps) ) [ img[:,1]==img.max(1)[0] ]
hue[ img[:,0]==img.max(1)[0] ] = (0.0 + ( (img[:,1]-img[:,2]) / ( img.max(1)[0] - img.min(1)[0] + self.eps) ) [ img[:,0]==img.max(1)[0] ]) % 6
hue[img.min(1)[0]==img.max(1)[0]] = 0.0
hue = hue/6.0
saturation = ( img.max(1)[0] - img.min(1)[0] ) / ( img.max(1)[0] + self.eps )
saturation[ img.max(1)[0]==0 ] = 0.0
value = img.max(1)[0]
return torch.cat((hue, saturation, value), dim=1)
def forward(self, x, y):
x_hsv = self.hsv(x)
y_hsv = self.hsv(y)
return nn.functional.l1_loss(x_hsv, y_hsv)
class SILU(torch.nn.Module):
def __init__(self):
super(SILU, self).__init__()
def forward(self, x):
out = torch.mul(x, torch.sigmoid(x))
return out
class Perceptron(torch.nn.Module):
def __init__(self, in_features, out_features):
super(Perceptron, self).__init__()
self.fc = nn.Linear(in_features, out_features)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class Padding(torch.nn.Module):
def __init__(self, padding_size = 1):
super(Padding, self).__init__()
self.pad = torch.nn.ReflectionPad2d(padding_size)
def forward(self, x):
return self.pad(x)
class ConvLayer(torch.nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias = False):
super(ConvLayer, self).__init__(in_channels, out_channels, kernel_size, stride=stride, bias = bias)
padding_size = kernel_size // 2
self.pad = Padding(padding_size)
def forward(self, x):
x = self.pad(x)
x = super(ConvLayer, self).forward(x)
return x
class AttentionBlock(nn.Module):
def __init__(self, channels, gate_dimension):
super(AttentionBlock, self).__init__()
self.tract = nn.Sequential(
ConvLayer(channels, gate_dimension, kernel_size=1, stride=1, bias=True),
nn.BatchNorm2d(gate_dimension)
)
self.skip = nn.Sequential(
ConvLayer(channels, gate_dimension, kernel_size=1, stride=1, bias=True),
nn.BatchNorm2d(gate_dimension)
)
self.gate = nn.Sequential(
ConvLayer(gate_dimension, channels, kernel_size=1, stride=1, bias=True),
nn.BatchNorm2d(channels),
nn.Sigmoid()
)
self.relu = nn.ReLU(inplace=True)
def forward(self, g, x):
t = self.tract(g)
s = self.skip(x)
psi = self.relu(torch.add(t,s))
psi = self.gate(psi)
return torch.mul(x, psi)
class BaseBlock(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, activation = Identity(), bias = False, drop_out : float = 0.5):
super(BaseBlock, self).__init__()
self.model = nn.Sequential(
ConvLayer(in_channels, out_channels, kernel_size, stride, bias),
nn.BatchNorm2d(out_channels, affine=True),
nn.Dropout(drop_out),
activation,
)
def forward(self, x):
return self.model(x)
class UpsampleDeConv(torch.nn.Module):
def __init__(self, in_channels, out_channels,):
super(UpsampleDeConv, self).__init__()
self.conv2d = ConvLayer(in_channels, out_channels, 3, 1, bias=False)
def forward(self, x):
x = torch.nn.functional.interpolate(x, mode='nearest', scale_factor=2)
x = self.conv2d(x)
return x
class TransposedDeConv(torch.nn.Module):
def __init__(self, in_channels, out_channels):
super(TransposedDeConv, self).__init__()
self.conv2d = torch.nn.ConvTranspose2d(in_channels, out_channels, 4, 2, 1, bias=False)
def forward(self, x):
return self.conv2d(x)
class PixelDeConv(torch.nn.Module):
def __init__(self, in_channels, out_channels):
super(PixelDeConv, self).__init__()
self.conv2d = ConvLayer(in_channels, out_channels * 4, 3, 1)
self.upsample = nn.PixelShuffle(2)
def forward(self, x):
return self.upsample(self.conv2d(x))
class ResidualBlock(torch.nn.Module):
def __init__(self, in_channels, out_channels, stride = 1, activation = nn.LeakyReLU(0.2), drop_out : float = 0.5):
super(ResidualBlock, self).__init__()
self.conv1 = BaseBlock(in_channels, out_channels, kernel_size=3, stride=stride, activation = activation)
self.conv2 = BaseBlock(out_channels, out_channels, kernel_size=3, stride=1)
self.skip = BaseBlock(in_channels, out_channels, kernel_size=1, stride=stride, bias= False)
def forward(self, x):
residual = self.skip(x)
x = self.conv1(x)
x = self.conv2(x)
return torch.add(x, residual)
class SimpleEncoder(nn.Module):
def __init__(self, in_size, out_size, activation=nn.LeakyReLU(0.2), bn = True, drop_out : float = 0.5):
super(SimpleEncoder, self).__init__()
layers = [ConvLayer(in_size, out_size, 3, 2)]
if bn:
layers +=[nn.BatchNorm2d(out_size)]
layers +=[nn.Dropout(drop_out), activation]
self.model = nn.Sequential(*layers)
def forward(self, x):
x = self.model(x)
return x
class SimpleDecoder(nn.Module):
def __init__(self, in_size, out_size, deconv= UpsampleDeConv, drop_out : float = 0.5):
super(SimpleDecoder, self).__init__()
layers = [deconv(in_size, out_size),
nn.BatchNorm2d(out_size),
nn.Dropout(drop_out)]
self.model = nn.Sequential(*layers)
def forward(self, x):
x = self.model(x)
return x
class TotalVariation(nn.Module):
def __init__(self):
super(TotalVariation, self).__init__()
def forward(self,x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self._tensor_size(x[:,:,1:,:])
count_w = self._tensor_size(x[:,:,:,1:])
h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()
w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()
return 2*(h_tv/count_h+w_tv/count_w)/batch_size
def _tensor_size(self,t):
return t.size()[1]*t.size()[2]*t.size()[3]
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
| [
"torch.mul",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.ReflectionPad2d",
"torch.nn.L1Loss",
"torch.sqrt",
"torch.pow",
"torch.from_numpy",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.Sigmoid",
"numpy.histogram",
"torch.mean",
"torch.nn... | [((2338, 2349), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (2347, 2349), True, 'import torch.nn as nn\n'), ((3231, 3273), 'torch.cat', 'torch.cat', (['(hue, saturation, value)'], {'dim': '(1)'}), '((hue, saturation, value), dim=1)\n', (3240, 3273), False, 'import torch\n'), ((3375, 3410), 'torch.nn.functional.l1_loss', 'nn.functional.l1_loss', (['x_hsv', 'y_hsv'], {}), '(x_hsv, y_hsv)\n', (3396, 3410), True, 'import torch.nn as nn\n'), ((3743, 3779), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (3752, 3779), True, 'import torch.nn as nn\n'), ((4016, 4054), 'torch.nn.ReflectionPad2d', 'torch.nn.ReflectionPad2d', (['padding_size'], {}), '(padding_size)\n', (4040, 4054), False, 'import torch\n'), ((5235, 5256), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5242, 5256), True, 'import torch.nn as nn\n'), ((5422, 5439), 'torch.mul', 'torch.mul', (['x', 'psi'], {}), '(x, psi)\n', (5431, 5439), False, 'import torch\n'), ((6201, 6267), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['x'], {'mode': '"""nearest"""', 'scale_factor': '(2)'}), "(x, mode='nearest', scale_factor=2)\n", (6232, 6267), False, 'import torch\n'), ((6478, 6550), 'torch.nn.ConvTranspose2d', 'torch.nn.ConvTranspose2d', (['in_channels', 'out_channels', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(in_channels, out_channels, 4, 2, 1, bias=False)\n', (6502, 6550), False, 'import torch\n'), ((6834, 6852), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(2)'], {}), '(2)\n', (6849, 6852), True, 'import torch.nn as nn\n'), ((7040, 7057), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (7052, 7057), True, 'import torch.nn as nn\n'), ((7556, 7578), 'torch.add', 'torch.add', (['x', 'residual'], {}), '(x, residual)\n', (7565, 7578), False, 'import torch\n'), ((7666, 7683), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (7678, 7683), True, 'import torch.nn as nn\n'), ((7960, 7982), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (7973, 7982), True, 'import torch.nn as nn\n'), ((8375, 8397), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (8388, 8397), True, 'import torch.nn as nn\n'), ((10734, 10751), 'torch.nn.Parameter', 'Parameter', (['w.data'], {}), '(w.data)\n', (10743, 10751), False, 'from torch.nn import Parameter\n'), ((708, 724), 'torch.abs', 'torch.abs', (['(R - G)'], {}), '(R - G)\n', (717, 724), False, 'import torch\n'), ((742, 770), 'torch.abs', 'torch.abs', (['(0.5 * (R + G) - B)'], {}), '(0.5 * (R + G) - B)\n', (751, 770), False, 'import torch\n'), ((908, 943), 'torch.sqrt', 'torch.sqrt', (['(rgStd ** 2 + ybStd ** 2)'], {}), '(rgStd ** 2 + ybStd ** 2)\n', (918, 943), False, 'import torch\n'), ((971, 1008), 'torch.sqrt', 'torch.sqrt', (['(rgMean ** 2 + ybMean ** 2)'], {}), '(rgMean ** 2 + ybMean ** 2)\n', (981, 1008), False, 'import torch\n'), ((3557, 3573), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (3570, 3573), False, 'import torch\n'), ((4803, 4833), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['gate_dimension'], {}), '(gate_dimension)\n', (4817, 4833), True, 'import torch.nn as nn\n'), ((4978, 5008), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['gate_dimension'], {}), '(gate_dimension)\n', (4992, 5008), True, 'import torch.nn as nn\n'), ((5153, 5177), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['channels'], {}), '(channels)\n', (5167, 5177), True, 'import torch.nn as nn\n'), ((5191, 5203), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5201, 5203), True, 'import torch.nn as nn\n'), ((5362, 5377), 'torch.add', 'torch.add', (['t', 's'], {}), '(t, s)\n', (5371, 5377), False, 'import torch\n'), ((5778, 5819), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {'affine': '(True)'}), '(out_channels, affine=True)\n', (5792, 5819), True, 'import torch.nn as nn\n'), ((5833, 5853), 'torch.nn.Dropout', 'nn.Dropout', (['drop_out'], {}), '(drop_out)\n', (5843, 5853), True, 'import torch.nn as nn\n'), ((7904, 7924), 'torch.nn.Dropout', 'nn.Dropout', (['drop_out'], {}), '(drop_out)\n', (7914, 7924), True, 'import torch.nn as nn\n'), ((8287, 8311), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_size'], {}), '(out_size)\n', (8301, 8311), True, 'import torch.nn as nn\n'), ((8331, 8351), 'torch.nn.Dropout', 'nn.Dropout', (['drop_out'], {}), '(drop_out)\n', (8341, 8351), True, 'import torch.nn as nn\n'), ((798, 812), 'torch.mean', 'torch.mean', (['rg'], {}), '(rg)\n', (808, 812), False, 'import torch\n'), ((814, 827), 'torch.std', 'torch.std', (['rg'], {}), '(rg)\n', (823, 827), False, 'import torch\n'), ((856, 870), 'torch.mean', 'torch.mean', (['yb'], {}), '(yb)\n', (866, 870), False, 'import torch\n'), ((872, 885), 'torch.std', 'torch.std', (['yb'], {}), '(yb)\n', (881, 885), False, 'import torch\n'), ((1505, 1533), 'torch.round', 'torch.round', (['(x[i][0] * 255.0)'], {}), '(x[i][0] * 255.0)\n', (1516, 1533), False, 'import torch\n'), ((1533, 1561), 'torch.round', 'torch.round', (['(x[i][1] * 255.0)'], {}), '(x[i][1] * 255.0)\n', (1544, 1561), False, 'import torch\n'), ((1561, 1589), 'torch.round', 'torch.round', (['(x[i][2] * 255.0)'], {}), '(x[i][2] * 255.0)\n', (1572, 1589), False, 'import torch\n'), ((1692, 1717), 'numpy.histogram', 'np.histogram', (['R'], {'bins': '(255)'}), '(R, bins=255)\n', (1704, 1717), True, 'import numpy as np\n'), ((1738, 1763), 'numpy.histogram', 'np.histogram', (['G'], {'bins': '(255)'}), '(G, bins=255)\n', (1750, 1763), True, 'import numpy as np\n'), ((1784, 1809), 'numpy.histogram', 'np.histogram', (['B'], {'bins': '(255)'}), '(B, bins=255)\n', (1796, 1809), True, 'import numpy as np\n'), ((2473, 2524), 'torch.Tensor', 'torch.Tensor', (['im.shape[0]', 'im.shape[2]', 'im.shape[3]'], {}), '(im.shape[0], im.shape[2], im.shape[3])\n', (2485, 2524), False, 'import torch\n'), ((7859, 7883), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_size'], {}), '(out_size)\n', (7873, 7883), True, 'import torch.nn as nn\n'), ((8798, 8849), 'torch.pow', 'torch.pow', (['(x[:, :, 1:, :] - x[:, :, :h_x - 1, :])', '(2)'], {}), '(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2)\n', (8807, 8849), False, 'import torch\n'), ((8862, 8913), 'torch.pow', 'torch.pow', (['(x[:, :, :, 1:] - x[:, :, :, :w_x - 1])', '(2)'], {}), '(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2)\n', (8871, 8913), False, 'import torch\n'), ((1840, 1860), 'torch.from_numpy', 'torch.from_numpy', (['gh'], {}), '(gh)\n', (1856, 1860), False, 'import torch\n'), ((1918, 1938), 'torch.from_numpy', 'torch.from_numpy', (['bh'], {}), '(bh)\n', (1934, 1938), False, 'import torch\n'), ((1996, 2016), 'torch.from_numpy', 'torch.from_numpy', (['rh'], {}), '(rh)\n', (2012, 2016), False, 'import torch\n')] |
import unittest
import numpy as np
from hypothesis import given
import hypothesis.strategies as some
import hypothesis.extra.numpy as some_np
from extractor.gps import gps_to_ltp, gps_from_ltp, \
interpolate_gps
class TestGps(unittest.TestCase):
@given(
some_np.arrays(
dtype=np.float,
shape=some.tuples(
some.integers(min_value=1, max_value=5), # 1..5 rows
some.integers(min_value=3, max_value=3) # 3 columns
),
elements=some.floats(-90, 90)
)
)
def test_gps_to_ltp_consistency(self, gps):
gps_ltp, origin = gps_to_ltp(gps)
gps_recovered = gps_from_ltp(gps_ltp, origin)
self.assertTrue(
np.allclose(
(gps[0, 1], gps[0, 0], gps[0, 2]),
origin
)
)
self.assertTrue(
np.allclose(
gps,
gps_recovered
)
)
def test_gps_interpolation(self):
gps = np.array([
[10., 10., 10.],
[10., 10., 10.],
[10., 10., 10.],
[10., 10., 10.],
[12., 15., 8.],
[12., 15., 8.],
[12., 15., 8.],
[12., 15., 8.],
[15., 20., 5.],
[15., 20., 5.],
[15., 20., 5.],
[15., 20., 5.],
[20., 25., 10.],
[20., 25., 10.],
[20., 25., 10.],
[20., 25., 10.]
])
gps_interpolated_gt = np.array([
[10. , 10. , 10. ],
[10.5 , 11.25, 9.5 ],
[11. , 12.5 , 9. ],
[11.5 , 13.75, 8.5 ],
[12. , 15. , 8. ],
[12.75, 16.25, 7.25],
[13.5 , 17.5 , 6.5 ],
[14.25, 18.75, 5.75],
[15. , 20. , 5. ],
[16.25, 21.25, 6.25],
[17.5 , 22.5 , 7.5 ],
[18.75, 23.75, 8.75],
[20. , 25. , 10. ],
[20. , 25. , 10. ],
[20. , 25. , 10. ],
[20. , 25. , 10. ]
])
gps_interpolated = interpolate_gps(gps)
self.assertTrue(
np.allclose(
gps_interpolated_gt,
gps_interpolated
)
) | [
"extractor.gps.gps_to_ltp",
"numpy.allclose",
"hypothesis.strategies.integers",
"extractor.gps.interpolate_gps",
"extractor.gps.gps_from_ltp",
"hypothesis.strategies.floats",
"numpy.array"
] | [((648, 663), 'extractor.gps.gps_to_ltp', 'gps_to_ltp', (['gps'], {}), '(gps)\n', (658, 663), False, 'from extractor.gps import gps_to_ltp, gps_from_ltp, interpolate_gps\n'), ((688, 717), 'extractor.gps.gps_from_ltp', 'gps_from_ltp', (['gps_ltp', 'origin'], {}), '(gps_ltp, origin)\n', (700, 717), False, 'from extractor.gps import gps_to_ltp, gps_from_ltp, interpolate_gps\n'), ((1048, 1388), 'numpy.array', 'np.array', (['[[10.0, 10.0, 10.0], [10.0, 10.0, 10.0], [10.0, 10.0, 10.0], [10.0, 10.0, \n 10.0], [12.0, 15.0, 8.0], [12.0, 15.0, 8.0], [12.0, 15.0, 8.0], [12.0, \n 15.0, 8.0], [15.0, 20.0, 5.0], [15.0, 20.0, 5.0], [15.0, 20.0, 5.0], [\n 15.0, 20.0, 5.0], [20.0, 25.0, 10.0], [20.0, 25.0, 10.0], [20.0, 25.0, \n 10.0], [20.0, 25.0, 10.0]]'], {}), '([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0], [10.0, 10.0, 10.0], [10.0,\n 10.0, 10.0], [12.0, 15.0, 8.0], [12.0, 15.0, 8.0], [12.0, 15.0, 8.0], [\n 12.0, 15.0, 8.0], [15.0, 20.0, 5.0], [15.0, 20.0, 5.0], [15.0, 20.0, \n 5.0], [15.0, 20.0, 5.0], [20.0, 25.0, 10.0], [20.0, 25.0, 10.0], [20.0,\n 25.0, 10.0], [20.0, 25.0, 10.0]])\n', (1056, 1388), True, 'import numpy as np\n'), ((1566, 1917), 'numpy.array', 'np.array', (['[[10.0, 10.0, 10.0], [10.5, 11.25, 9.5], [11.0, 12.5, 9.0], [11.5, 13.75, \n 8.5], [12.0, 15.0, 8.0], [12.75, 16.25, 7.25], [13.5, 17.5, 6.5], [\n 14.25, 18.75, 5.75], [15.0, 20.0, 5.0], [16.25, 21.25, 6.25], [17.5, \n 22.5, 7.5], [18.75, 23.75, 8.75], [20.0, 25.0, 10.0], [20.0, 25.0, 10.0\n ], [20.0, 25.0, 10.0], [20.0, 25.0, 10.0]]'], {}), '([[10.0, 10.0, 10.0], [10.5, 11.25, 9.5], [11.0, 12.5, 9.0], [11.5,\n 13.75, 8.5], [12.0, 15.0, 8.0], [12.75, 16.25, 7.25], [13.5, 17.5, 6.5],\n [14.25, 18.75, 5.75], [15.0, 20.0, 5.0], [16.25, 21.25, 6.25], [17.5, \n 22.5, 7.5], [18.75, 23.75, 8.75], [20.0, 25.0, 10.0], [20.0, 25.0, 10.0\n ], [20.0, 25.0, 10.0], [20.0, 25.0, 10.0]])\n', (1574, 1917), True, 'import numpy as np\n'), ((2183, 2203), 'extractor.gps.interpolate_gps', 'interpolate_gps', (['gps'], {}), '(gps)\n', (2198, 2203), False, 'from extractor.gps import gps_to_ltp, gps_from_ltp, interpolate_gps\n'), ((756, 810), 'numpy.allclose', 'np.allclose', (['(gps[0, 1], gps[0, 0], gps[0, 2])', 'origin'], {}), '((gps[0, 1], gps[0, 0], gps[0, 2]), origin)\n', (767, 810), True, 'import numpy as np\n'), ((905, 936), 'numpy.allclose', 'np.allclose', (['gps', 'gps_recovered'], {}), '(gps, gps_recovered)\n', (916, 936), True, 'import numpy as np\n'), ((2249, 2299), 'numpy.allclose', 'np.allclose', (['gps_interpolated_gt', 'gps_interpolated'], {}), '(gps_interpolated_gt, gps_interpolated)\n', (2260, 2299), True, 'import numpy as np\n'), ((529, 549), 'hypothesis.strategies.floats', 'some.floats', (['(-90)', '(90)'], {}), '(-90, 90)\n', (540, 549), True, 'import hypothesis.strategies as some\n'), ((370, 409), 'hypothesis.strategies.integers', 'some.integers', ([], {'min_value': '(1)', 'max_value': '(5)'}), '(min_value=1, max_value=5)\n', (383, 409), True, 'import hypothesis.strategies as some\n'), ((439, 478), 'hypothesis.strategies.integers', 'some.integers', ([], {'min_value': '(3)', 'max_value': '(3)'}), '(min_value=3, max_value=3)\n', (452, 478), True, 'import hypothesis.strategies as some\n')] |
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision
from torchvision import transforms
import torch.nn as nn
import os
import glob
import numpy as np
import time
import cv2
from einops import rearrange, reduce, repeat
from PIL import Image
#from utils.augmentations import SSDAugmentation, BaseTransform, NlosTransform
MEANS = (103.94, 116.78, 123.68)
STD = (57.38, 57.12, 58.40)
class DetrDataset(Dataset):
def __init__(self, args, is_correlation=False, mode='train', byol=False):
'''
dataset 처리
rf와 이미지의 경우에는 init 할 때부터 읽어와서 메모리에 올리지만 gt는 데이터를 활용할 때마다 load함.
mode - train : 학습을 위함. rf, gt, img 다 있는 경우
test : test를 위함. rf, gt, img 다 있는 경우
valid: valid를 위함(demo). rf, img만 있는 경우
'''
self.is_correlation = is_correlation
self.load_img = False #args.vis
self.mode = mode
#self.is_gaussian = args.gaussian
self.std = 0.1
self.mean = 0
self.is_normalize = False
self.cutoff = args.cutoff
self.augmentation = None
self.augmentation_prob = 1
#self.intensity = Intensity(scale=0.05)
self.print_once = True
self.flatten = True #args.flatten
self.byol = byol
#self.transform = NlosTransform(MEANS)
#print("for byol ? = ", self.byol)
data_path = '/home/tako/save_data_ver2'
#data_path_list = os.listdir(data_path)
data_path_list = glob.glob(data_path + '/*')
#print("data list", data_path_list)
data_path_list = sorted(data_path_list)
#print(data_path_list)
rf_data = [] # rf data list
target_list = [] # ground truth target
mask_list = [] # ground truth mask
img_list = []
human_index = []
print("start - data read")
#test_dir = [8, 9] # past version - 1
test_dir = [2, 5, 10, 14, 16, 19] #, 19] # cur version - 2
#test_dir = [2, 5, 10] # los
#test_dir = [14, 16, 19] #nlos
#test_dir = [10, 19] # demo - with mask , los , nlos
#test_dir = [2]
#test_dir = [14] # 흰 옷
remove_dir = [3, 4]
#valid_dir = [25, 26, 27]
#valid_dir = [19]
# valid_dir = [28, 29] # nlos wall
valid_dir = [x for x in range(21, 40)]
#valid_dir = [x for x in range(15, 40)]
#valid_dir += [x for x in range(1, 13)]
valid_dir = [x for x in range(1, 40)] # Model test
dir_count = 0
rf_index = 0
if mode == 'train':
outlier_list = range(49500, 50000)
else:
outlier_list = range(18000, 19000)
rf_index = -1
target_index = -1
mask_index = -1
img_index = -1
for file in data_path_list:
if dir_count in remove_dir:
dir_count += 1
continue
if mode == 'train' and (dir_count in test_dir or dir_count in valid_dir):
dir_count += 1
continue
elif mode == 'test' and dir_count not in test_dir:
dir_count += 1
continue
elif mode == 'valid' and dir_count not in valid_dir:
dir_count += 1
continue
if os.path.isdir(file) is True:
# 각 폴더 안의 npy 데이터
rf_file_list = glob.glob(file + '/raw/*.npy')
rf_file_list = sorted(rf_file_list)
print('dir_count:', dir_count,'dir(raw):', file, '\t# of data :', len(rf_file_list))
#print(rf_file_list)
for rf in rf_file_list:
rf_index += 1
if rf_index in outlier_list:
continue
temp_raw_rf = np.load(rf)[:, :, self.cutoff:]
#print("raw shape", temp_raw_rf.shape)
#----- normalization ------
if self.is_normalize is True:
for i in range(temp_raw_rf.shape[0]):
for j in range(temp_raw_rf.shape[1]):
stdev = np.std(temp_raw_rf[i, j])
temp_raw_rf[i, j] = temp_raw_rf[i, j]/stdev
#print("now shape",temp_raw_rf.shape)
#temp_raw_rf = torch.tensor(temp_raw_rf).float()
#print("now shape",temp_raw_rf.shape)
#m = torch.nn.Upsample(scale_factor=3, mode='bilinear')
#---------- 2차원으로 만들기 -----------
if self.flatten:
#print("now shape",temp_raw_rf.shape)
#temp_raw_rf = rearrange(temp_raw_rf, 'tx rx len -> (tx rx) len')
temp_raw_rf = rearrange(temp_raw_rf, 'tx rx len -> tx (rx len)')
#temp_raw_rf = rearrange(temp_raw_rf, 'x (len1 len2) -> x len1 len2', len1=int(math.sqrt(temp_raw_rf.shape[1])))
#print("now shape",temp_raw_rf.shape)
temp_raw_rf = rearrange(temp_raw_rf, 'tx (len1 len2) -> tx len1 len2', len1=72)
#temp_raw_rf = repeat(temp_raw_rf, 'c h w -> c (h rep_1) (w rep_2)', rep_1=3, rep_2=3)
#temp_raw_rf = m(temp_raw_rf)
#temp_raw_rf = temp_raw_rf.unsqueeze(0)
#print("now shape",temp_raw_rf.shape)
rf_data.append(temp_raw_rf)
#print("rf shape", temp_raw_rf.shape)
if self.print_once:
#print(temp_raw_rf[0][0])
#print(re_temp_raw_rf[0][0])
print("rf shape", temp_raw_rf.shape)
self.print_once = False
#break
'''
ground truth data 읽어오기.
target : [num_obj * 5] ( box 좌표, class)
mask : [num_obj, h, w] 1 = mask, 0 = else
'''
target_file_list = glob.glob(file + '/target/*')
target_file_list = sorted(target_file_list)
print('dir(target):', file, '\t# of data :', len(target_file_list))
'''
mask_file_list = glob.glob(file + '/mask/*')
mask_file_list = sorted(mask_file_list)
print('dir(mask):', file, '\t# of data :', len(mask_file_list))
'''
img_file_list = glob.glob(file + '/img/*')
img_file_list = sorted(img_file_list)
print('dir(img):', file, '\t# of data :', len(img_file_list))
#----- gt 파일 이름명만 리스트에 넣어놓기 -----
for target in target_file_list:
if target_index == 0:
print("target_shape ", np.load(target).shape, np.load(target))
target_index += 1
if target_index in outlier_list:
continue
target_list.append(target)
'''
for mask in mask_file_list:
mask_index += 1
if mask_index in outlier_list:
continue
if mask_index == 0:
print("mask_shape ", np.load(mask).shape)
mask_list.append(mask)
'''
if self.load_img is True or self.mode=='test':
for img in img_file_list:
img_index += 1
if img_index in outlier_list:
continue
temp_img = cv2.imread(img)
img_list.append(temp_img)
dir_count += 1
self.rf_data = rf_data
self.target_list = target_list
#self.mask_list = mask_list
self.img_list = img_list
#self.human_index = human_index
print(len(target_list), len(img_list)) #, len(human_index))
#if self.mode == 'valid' and len(self.gt_list) == 0:
# for i in range(len(self.rf_data)):
# self.gt_list.append(np.zeros((13, 120, 120)))
print("end - data read")
print("size of dataset", len(self.rf_data))
def __len__(self):
return len(self.rf_data)
def __getitem__(self, idx):
#if True:
rf = self.rf_data[idx]
target = np.load(self.target_list[idx])
'''
if self.mode == 'train':
target = np.load(self.target_list[idx])
else:
target = np.zeros((1, 5))
'''
if self.load_img is False and self.mode=='train':
#gaussian noise
#if self.mode == 'train' and self.is_gaussian is True:
# gt = gt + torch.randn(gt.size()) * self.std + self.mean
#print(rf.shape, target.shape, target, mask.shape)
#print(gt.shape)
return rf, target, idx
# return self.rf_data[idx], self.gt_list[idx]
else:
return rf, target, idx, self.img_list[idx]
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and (lists of annotations, masks)
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list<tensor>, list<tensor>, list<int>) annotations for a given image are stacked
on 0 dim. The output gt is a tuple of annotations and masks.
"""
rfs = []
targets = []
ids = []
#masks = []
#imgs = []
for sample in batch:
rf = torch.FloatTensor(sample[0]).clone()
#img = sample[0].clone()
target = torch.FloatTensor(sample[1]).clone()
idx = torch.tensor([sample[2]])
#mask = torch.FloatTensor(sample[1][1]).clone()
#img = torch.FloatTensor(sample[13).clone()
rfs.append(rf)
targets.append(target)
ids.append(idx)
#masks.append(mask)
#imgs.append(img)
rfs = torch.stack(rfs)
return rfs, targets, ids
def detection_collate_var(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and (lists of annotations, masks)
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list<tensor>, list<tensor>, list<int>) annotations for a given image are stacked
on 0 dim. The output gt is a tuple of annotations and masks.
"""
rfs = []
targets = []
ids = []
imgs = []
for sample in batch:
rf = torch.FloatTensor(sample[0]).clone()
img = sample[3]
target = torch.FloatTensor(sample[1]).clone()
idx = torch.tensor([sample[2]])
#mask = torch.FloatTensor(sample[1][1]).clone()
#img = torch.FloatTensor(sample[1][2]).clone()
rfs.append(rf)
targets.append(target)
ids.append(idx)
#masks.append(mask)
imgs.append(img)
rfs = torch.stack(rfs)
return rfs, targets, ids, imgs | [
"torch.stack",
"einops.rearrange",
"torch.tensor",
"os.path.isdir",
"numpy.std",
"numpy.load",
"cv2.imread",
"torch.FloatTensor",
"glob.glob"
] | [((10433, 10449), 'torch.stack', 'torch.stack', (['rfs'], {}), '(rfs)\n', (10444, 10449), False, 'import torch\n'), ((11547, 11563), 'torch.stack', 'torch.stack', (['rfs'], {}), '(rfs)\n', (11558, 11563), False, 'import torch\n'), ((1526, 1553), 'glob.glob', 'glob.glob', (["(data_path + '/*')"], {}), "(data_path + '/*')\n", (1535, 1553), False, 'import glob\n'), ((8648, 8678), 'numpy.load', 'np.load', (['self.target_list[idx]'], {}), '(self.target_list[idx])\n', (8655, 8678), True, 'import numpy as np\n'), ((10156, 10181), 'torch.tensor', 'torch.tensor', (['[sample[2]]'], {}), '([sample[2]])\n', (10168, 10181), False, 'import torch\n'), ((11268, 11293), 'torch.tensor', 'torch.tensor', (['[sample[2]]'], {}), '([sample[2]])\n', (11280, 11293), False, 'import torch\n'), ((3356, 3375), 'os.path.isdir', 'os.path.isdir', (['file'], {}), '(file)\n', (3369, 3375), False, 'import os\n'), ((3450, 3480), 'glob.glob', 'glob.glob', (["(file + '/raw/*.npy')"], {}), "(file + '/raw/*.npy')\n", (3459, 3480), False, 'import glob\n'), ((6207, 6236), 'glob.glob', 'glob.glob', (["(file + '/target/*')"], {}), "(file + '/target/*')\n", (6216, 6236), False, 'import glob\n'), ((6651, 6677), 'glob.glob', 'glob.glob', (["(file + '/img/*')"], {}), "(file + '/img/*')\n", (6660, 6677), False, 'import glob\n'), ((10018, 10046), 'torch.FloatTensor', 'torch.FloatTensor', (['sample[0]'], {}), '(sample[0])\n', (10035, 10046), False, 'import torch\n'), ((10105, 10133), 'torch.FloatTensor', 'torch.FloatTensor', (['sample[1]'], {}), '(sample[1])\n', (10122, 10133), False, 'import torch\n'), ((11139, 11167), 'torch.FloatTensor', 'torch.FloatTensor', (['sample[0]'], {}), '(sample[0])\n', (11156, 11167), False, 'import torch\n'), ((11217, 11245), 'torch.FloatTensor', 'torch.FloatTensor', (['sample[1]'], {}), '(sample[1])\n', (11234, 11245), False, 'import torch\n'), ((3861, 3872), 'numpy.load', 'np.load', (['rf'], {}), '(rf)\n', (3868, 3872), True, 'import numpy as np\n'), ((4910, 4960), 'einops.rearrange', 'rearrange', (['temp_raw_rf', '"""tx rx len -> tx (rx len)"""'], {}), "(temp_raw_rf, 'tx rx len -> tx (rx len)')\n", (4919, 4960), False, 'from einops import rearrange, reduce, repeat\n'), ((5198, 5263), 'einops.rearrange', 'rearrange', (['temp_raw_rf', '"""tx (len1 len2) -> tx len1 len2"""'], {'len1': '(72)'}), "(temp_raw_rf, 'tx (len1 len2) -> tx len1 len2', len1=72)\n", (5207, 5263), False, 'from einops import rearrange, reduce, repeat\n'), ((7872, 7887), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (7882, 7887), False, 'import cv2\n'), ((7056, 7071), 'numpy.load', 'np.load', (['target'], {}), '(target)\n', (7063, 7071), True, 'import numpy as np\n'), ((4239, 4264), 'numpy.std', 'np.std', (['temp_raw_rf[i, j]'], {}), '(temp_raw_rf[i, j])\n', (4245, 4264), True, 'import numpy as np\n'), ((7033, 7048), 'numpy.load', 'np.load', (['target'], {}), '(target)\n', (7040, 7048), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from quartical.config.external import Gain
from quartical.config.internal import yield_from
from loguru import logger # noqa
import numpy as np
import dask.array as da
from pathlib import Path
import shutil
from daskms.experimental.zarr import xds_to_zarr
from quartical.gains import TERM_TYPES
from quartical.utils.dask import blockwise_unique
from quartical.utils.maths import mean_for_index
from quartical.gains.general.generics import combine_gains, combine_flags
def make_gain_xds_lod(data_xds_list,
tipc_list,
fipc_list,
coords_per_xds,
chain_opts):
"""Returns a list of dicts of xarray.Dataset objects describing the gains.
For a given input xds containing data, creates an xarray.Dataset object
per term which describes the term's dimensions.
Args:
data_xds_list: A list of xarray.Dataset objects containing MS data.
tipc_list: List of numpy.ndarray objects containing number of time
intervals in a chunk.
fipc_list: List of numpy.ndarray objects containing number of freq
intervals in a chunk.
coords_per_xds: A List of Dicts containing coordinates.
chain_opts: A Chain config object.
Returns:
gain_xds_lod: A List of Dicts of xarray.Dataset objects describing the
gain terms assosciated with each data xarray.Dataset.
"""
gain_xds_lod = []
for xds_ind, data_xds in enumerate(data_xds_list):
term_xds_dict = {}
term_coords = coords_per_xds[xds_ind]
for loop_vars in enumerate(yield_from(chain_opts, "type")):
term_ind, (term_name, term_type) = loop_vars
term_t_chunks = tipc_list[xds_ind][:, :, term_ind]
term_f_chunks = fipc_list[xds_ind][:, :, term_ind]
term_opts = getattr(chain_opts, term_name)
term_obj = TERM_TYPES[term_type](term_name,
term_opts,
data_xds,
term_coords,
term_t_chunks,
term_f_chunks)
term_xds_dict[term_name] = term_obj.make_xds()
gain_xds_lod.append(term_xds_dict)
return gain_xds_lod
def compute_interval_chunking(data_xds_list, t_map_list, f_map_list):
'''Compute the per-term chunking of the gains.
Given a list of data xarray.Datasets as well as information about the
time and frequency mappings, computes the chunk sizes of the gain terms.
Args:
data_xds_list: A list of data-containing xarray.Dataset objects.
t_map_list: A list of arrays describing how times map to solint.
f_map_list: A list of arrays describing how freqs map to solint.
Returns:
A tuple of lists containing arrays which descibe the chunking.
'''
tipc_list = []
fipc_list = []
for xds_ind, _ in enumerate(data_xds_list):
t_map_arr = t_map_list[xds_ind]
f_map_arr = f_map_list[xds_ind]
tipc_per_term = da.map_blocks(lambda arr: arr[:, -1:, :] + 1,
t_map_arr,
chunks=((2,),
(1,)*t_map_arr.numblocks[1],
t_map_arr.chunks[2]))
fipc_per_term = da.map_blocks(lambda arr: arr[:, -1:, :] + 1,
f_map_arr,
chunks=((2,),
(1,)*f_map_arr.numblocks[1],
f_map_arr.chunks[2]))
tipc_list.append(tipc_per_term)
fipc_list.append(fipc_per_term)
# This is an early compute which is necessary to figure out the gain dims.
return da.compute(tipc_list, fipc_list)
def compute_dataset_coords(data_xds_list,
t_bin_list,
f_map_list,
tipc_list,
fipc_list,
terms):
'''Compute the cooridnates for the gain datasets.
Given a list of data xarray.Datasets as well as information about the
binning along the time and frequency axes, computes the true coordinate
values for the gain xarray.Datasets.
Args:
data_xds_list: A list of data-containing xarray.Dataset objects.
t_bin_list: A list of arrays describing how times map to solint.
f_map_list: A list of arrays describing how freqs map to solint.
tipc_list: A list of arrays contatining the number of time intervals
per chunk.
fipc_list: A list of arrays contatining the number of freq intervals
per chunk.
Returns:
A list of dictionaries containing the computed coordinate values.
'''
coords_per_xds = []
for xds_ind, data_xds in enumerate(data_xds_list):
utime_chunks = list(map(int, data_xds.UTIME_CHUNKS))
unique_times = blockwise_unique(data_xds.TIME.data,
chunks=(utime_chunks,))
unique_freqs = data_xds.CHAN_FREQ.data
coord_dict = {"time": unique_times, # Doesn't vary with term.
"freq": unique_freqs} # Doesn't vary with term.
for term_ind, term_name in enumerate(terms):
# This indexing corresponds to grabbing the info per xds, per term.
tipc = tipc_list[xds_ind][:, :, term_ind]
fipc = fipc_list[xds_ind][:, :, term_ind]
term_t_bins = t_bin_list[xds_ind][:, :, term_ind]
term_f_map = f_map_list[xds_ind][:, :, term_ind]
mean_gtimes = da.map_blocks(mean_for_index,
unique_times,
term_t_bins[0],
dtype=unique_times.dtype,
chunks=(tuple(map(int, tipc[0])),))
mean_ptimes = da.map_blocks(mean_for_index,
unique_times,
term_t_bins[1],
dtype=unique_times.dtype,
chunks=(tuple(map(int, tipc[1])),))
mean_gfreqs = da.map_blocks(mean_for_index,
unique_freqs,
term_f_map[0],
dtype=unique_freqs.dtype,
chunks=(tuple(map(int, fipc[0])),))
mean_pfreqs = da.map_blocks(mean_for_index,
unique_freqs,
term_f_map[1],
dtype=unique_freqs.dtype,
chunks=(tuple(map(int, fipc[1])),))
coord_dict[f"{term_name}_mean_gtime"] = mean_gtimes
coord_dict[f"{term_name}_mean_ptime"] = mean_ptimes
coord_dict[f"{term_name}_mean_gfreq"] = mean_gfreqs
coord_dict[f"{term_name}_mean_pfreq"] = mean_pfreqs
coords_per_xds.append(coord_dict)
# We take the hit on a second early compute in order to make loading and
# interpolating gains a less complicated operation.
return da.compute(coords_per_xds)[0]
def make_net_xds_list(data_xds_list, coords_per_xds):
"""Construct a list of dicts of xarray.Datasets to house the net gains.
Args:
data_xds_list: A List of xarray.Dataset objects containing MS data.
coords_per_xds: A List of Dicts containing dataset coords.
Returns:
net_gain_xds_list: A List of xarray.Dataset objects to house
the net gains.
"""
net_gain_xds_list = []
for data_xds, xds_coords in zip(data_xds_list, coords_per_xds):
net_t_chunks = np.tile(data_xds.UTIME_CHUNKS, 2).reshape(2, -1)
net_f_chunks = np.tile(data_xds.chunks["chan"], 2).reshape(2, -1)
# Create a default config object, consistent with the net gain.
# NOTE: If we have a direction-dependent model, assume the net gain
# is also direction dependent.
config = Gain(direction_dependent=bool(data_xds.dims["dir"]))
net_obj = TERM_TYPES["complex"]("NET",
config,
data_xds,
xds_coords,
net_t_chunks,
net_f_chunks)
net_gain_xds_list.append(net_obj.make_xds())
return net_gain_xds_list
def combine_gains_wrapper(t_bin_arr, f_map_arr, d_map_arr, net_shape,
corr_mode, *gains):
"""Wrapper to stop dask from getting confused. See issue #99."""
return combine_gains(t_bin_arr, f_map_arr, d_map_arr, net_shape,
corr_mode, *gains)
def combine_flags_wrapper(t_bin_arr, f_map_arr, d_map_arr, net_shape,
corr_mode, *flags):
"""Wrapper to stop dask from getting confused. See issue #99."""
return combine_flags(t_bin_arr, f_map_arr, d_map_arr, net_shape,
corr_mode, *flags)
def populate_net_xds_list(net_gain_xds_list,
solved_gain_xds_lod,
t_bin_list,
f_map_list,
d_map_list):
"""Poplulate the list net gain datasets with net gain values.
Args:
net_gain_xds_list: A List of xarray.Dataset objects to house the
net gains.
solved_gain_xds_lol: A List of Lists of xarray.Dataset objects housing
the solved gain terms.
t_bin_list: A List of dask.Arrays containing mappings from unique
time to solution interval.
f_map_list: A List of dask.Arrays containing mappings from channel
to solution interval.
d_map_list: A List of numpy.ndarrays containing mappings between
direction dependent terms and direction independent terms.
Returns:
net_gain_xds_list: A List of xarray.Dataset objects to house the
net gains.
"""
populated_net_gain_xds_list = []
for ind, (terms, net_xds) in enumerate(zip(solved_gain_xds_lod,
net_gain_xds_list)):
net_shape = tuple(net_xds.dims[d]
for d in ["gain_t", "gain_f", "ant", "dir", "corr"])
gain_schema = ("time", "chan", "ant", "dir", "corr")
gains = [x for xds in terms.values()
for x in (xds.gains.data, gain_schema)]
corr_mode = net_shape[-1]
dtype = np.find_common_type(
[xds.gains.dtype for xds in terms.values()], []
)
identity_elements = {1: np.ones(1, dtype=dtype),
2: np.ones(2, dtype=dtype),
4: np.array((1, 0, 0, 1), dtype=dtype)}
net_gain = da.blockwise(
combine_gains_wrapper, ("time", "chan", "ant", "dir", "corr"),
t_bin_list[ind], ("param", "time", "term"),
f_map_list[ind], ("param", "chan", "term"),
d_map_list[ind], None,
net_shape, None,
corr_mode, None,
*gains,
dtype=dtype,
align_arrays=False,
concatenate=True,
adjust_chunks={"time": net_xds.GAIN_SPEC.tchunk,
"chan": net_xds.GAIN_SPEC.fchunk,
"dir": net_xds.GAIN_SPEC.dchunk}
)
flag_schema = ("time", "chan", "ant", "dir")
flags = [x for xds in terms.values()
for x in (xds.gain_flags.data, flag_schema)]
dtype = np.find_common_type(
[xds.gain_flags.dtype for xds in terms.values()], []
)
net_flags = da.blockwise(
combine_flags_wrapper, ("time", "chan", "ant", "dir"),
t_bin_list[ind], ("param", "time", "term"),
f_map_list[ind], ("param", "chan", "term"),
d_map_list[ind], None,
net_shape[:-1], None,
*flags,
dtype=dtype,
align_arrays=False,
concatenate=True,
adjust_chunks={"time": net_xds.GAIN_SPEC.tchunk,
"chan": net_xds.GAIN_SPEC.fchunk,
"dir": net_xds.GAIN_SPEC.dchunk}
)
net_gain = da.where(net_flags[..., None],
identity_elements[corr_mode],
net_gain)
net_xds = net_xds.assign(
{
"gains": (net_xds.GAIN_AXES, net_gain),
"gain_flags": (net_xds.GAIN_AXES[:-1], net_flags)
}
)
populated_net_gain_xds_list.append(net_xds)
return populated_net_gain_xds_list
def write_gain_datasets(gain_xds_lod, net_xds_list, output_opts):
"""Write the contents of gain_xds_lol to zarr in accordance with opts."""
root_path = Path(output_opts.directory).absolute()
gain_path = root_path / Path("gains.qc")
term_names = [xds.NAME for xds in gain_xds_lod[0].values()]
writable_xds_dol = {tn: [d[tn] for d in gain_xds_lod] for tn in term_names}
# If we are writing out the net/effective gains.
if output_opts.net_gain:
net_name = net_xds_list[0].NAME
term_names.append(net_name)
writable_xds_dol[net_name] = net_xds_list
# If the directory in which we intend to store a gain already exists, we
# remove it to make sure that we don't end up with a mix of old and new.
for term_name in term_names:
term_path = gain_path.joinpath(term_name)
if term_path.is_dir():
logger.info(f"Removing preexisting gain folder {term_path}.")
try:
shutil.rmtree(term_path)
except Exception as e:
logger.warning(f"Failed to delete {term_path}. Reason: {e}.")
gain_writes_lol = []
for term_name, term_xds_list in writable_xds_dol.items():
term_write_xds_list = []
# The following rechunks to some sensible chunk size. This ensures
# that the chunks are regular and <2GB, which is necessary for zarr.
for xds in term_xds_list:
target_chunks = {}
if hasattr(xds, "PARAM_AXES"):
rechunked_params = \
xds.params.chunk({ax: "auto" for ax in xds.PARAM_AXES[:2]})
target_chunks.update(rechunked_params.chunksizes)
rechunked_gains = \
xds.gains.chunk({ax: "auto" for ax in xds.GAIN_AXES[:2]})
target_chunks.update(rechunked_gains.chunksizes)
rechunked_xds = xds.chunk(target_chunks)
term_write_xds_list.append(rechunked_xds)
output_path = f"{gain_path}{'::' + term_name}"
gain_writes_lol.append(xds_to_zarr(term_write_xds_list, output_path))
# This converts the interpolated list of lists into a list of dicts.
write_xds_lod = [{tn: term for tn, term in zip(term_names, terms)}
for terms in zip(*gain_writes_lol)]
return write_xds_lod
| [
"dask.array.compute",
"dask.array.blockwise",
"numpy.tile",
"numpy.ones",
"loguru.logger.info",
"pathlib.Path",
"quartical.config.internal.yield_from",
"quartical.gains.general.generics.combine_gains",
"dask.array.map_blocks",
"loguru.logger.warning",
"quartical.utils.dask.blockwise_unique",
"... | [((3964, 3996), 'dask.array.compute', 'da.compute', (['tipc_list', 'fipc_list'], {}), '(tipc_list, fipc_list)\n', (3974, 3996), True, 'import dask.array as da\n'), ((9030, 9106), 'quartical.gains.general.generics.combine_gains', 'combine_gains', (['t_bin_arr', 'f_map_arr', 'd_map_arr', 'net_shape', 'corr_mode', '*gains'], {}), '(t_bin_arr, f_map_arr, d_map_arr, net_shape, corr_mode, *gains)\n', (9043, 9106), False, 'from quartical.gains.general.generics import combine_gains, combine_flags\n'), ((9331, 9407), 'quartical.gains.general.generics.combine_flags', 'combine_flags', (['t_bin_arr', 'f_map_arr', 'd_map_arr', 'net_shape', 'corr_mode', '*flags'], {}), '(t_bin_arr, f_map_arr, d_map_arr, net_shape, corr_mode, *flags)\n', (9344, 9407), False, 'from quartical.gains.general.generics import combine_gains, combine_flags\n'), ((3187, 3314), 'dask.array.map_blocks', 'da.map_blocks', (['(lambda arr: arr[:, -1:, :] + 1)', 't_map_arr'], {'chunks': '((2,), (1,) * t_map_arr.numblocks[1], t_map_arr.chunks[2])'}), '(lambda arr: arr[:, -1:, :] + 1, t_map_arr, chunks=((2,), (1,) *\n t_map_arr.numblocks[1], t_map_arr.chunks[2]))\n', (3200, 3314), True, 'import dask.array as da\n'), ((3502, 3629), 'dask.array.map_blocks', 'da.map_blocks', (['(lambda arr: arr[:, -1:, :] + 1)', 'f_map_arr'], {'chunks': '((2,), (1,) * f_map_arr.numblocks[1], f_map_arr.chunks[2])'}), '(lambda arr: arr[:, -1:, :] + 1, f_map_arr, chunks=((2,), (1,) *\n f_map_arr.numblocks[1], f_map_arr.chunks[2]))\n', (3515, 3629), True, 'import dask.array as da\n'), ((5169, 5229), 'quartical.utils.dask.blockwise_unique', 'blockwise_unique', (['data_xds.TIME.data'], {'chunks': '(utime_chunks,)'}), '(data_xds.TIME.data, chunks=(utime_chunks,))\n', (5185, 5229), False, 'from quartical.utils.dask import blockwise_unique\n'), ((7505, 7531), 'dask.array.compute', 'da.compute', (['coords_per_xds'], {}), '(coords_per_xds)\n', (7515, 7531), True, 'import dask.array as da\n'), ((11220, 11637), 'dask.array.blockwise', 'da.blockwise', (['combine_gains_wrapper', "('time', 'chan', 'ant', 'dir', 'corr')", 't_bin_list[ind]', "('param', 'time', 'term')", 'f_map_list[ind]', "('param', 'chan', 'term')", 'd_map_list[ind]', 'None', 'net_shape', 'None', 'corr_mode', 'None', '*gains'], {'dtype': 'dtype', 'align_arrays': '(False)', 'concatenate': '(True)', 'adjust_chunks': "{'time': net_xds.GAIN_SPEC.tchunk, 'chan': net_xds.GAIN_SPEC.fchunk, 'dir':\n net_xds.GAIN_SPEC.dchunk}"}), "(combine_gains_wrapper, ('time', 'chan', 'ant', 'dir', 'corr'),\n t_bin_list[ind], ('param', 'time', 'term'), f_map_list[ind], ('param',\n 'chan', 'term'), d_map_list[ind], None, net_shape, None, corr_mode,\n None, *gains, dtype=dtype, align_arrays=False, concatenate=True,\n adjust_chunks={'time': net_xds.GAIN_SPEC.tchunk, 'chan': net_xds.\n GAIN_SPEC.fchunk, 'dir': net_xds.GAIN_SPEC.dchunk})\n", (11232, 11637), True, 'import dask.array as da\n'), ((12108, 12505), 'dask.array.blockwise', 'da.blockwise', (['combine_flags_wrapper', "('time', 'chan', 'ant', 'dir')", 't_bin_list[ind]', "('param', 'time', 'term')", 'f_map_list[ind]', "('param', 'chan', 'term')", 'd_map_list[ind]', 'None', 'net_shape[:-1]', 'None', '*flags'], {'dtype': 'dtype', 'align_arrays': '(False)', 'concatenate': '(True)', 'adjust_chunks': "{'time': net_xds.GAIN_SPEC.tchunk, 'chan': net_xds.GAIN_SPEC.fchunk, 'dir':\n net_xds.GAIN_SPEC.dchunk}"}), "(combine_flags_wrapper, ('time', 'chan', 'ant', 'dir'),\n t_bin_list[ind], ('param', 'time', 'term'), f_map_list[ind], ('param',\n 'chan', 'term'), d_map_list[ind], None, net_shape[:-1], None, *flags,\n dtype=dtype, align_arrays=False, concatenate=True, adjust_chunks={\n 'time': net_xds.GAIN_SPEC.tchunk, 'chan': net_xds.GAIN_SPEC.fchunk,\n 'dir': net_xds.GAIN_SPEC.dchunk})\n", (12120, 12505), True, 'import dask.array as da\n'), ((12689, 12759), 'dask.array.where', 'da.where', (['net_flags[..., None]', 'identity_elements[corr_mode]', 'net_gain'], {}), '(net_flags[..., None], identity_elements[corr_mode], net_gain)\n', (12697, 12759), True, 'import dask.array as da\n'), ((13334, 13350), 'pathlib.Path', 'Path', (['"""gains.qc"""'], {}), "('gains.qc')\n", (13338, 13350), False, 'from pathlib import Path\n'), ((1648, 1678), 'quartical.config.internal.yield_from', 'yield_from', (['chain_opts', '"""type"""'], {}), "(chain_opts, 'type')\n", (1658, 1678), False, 'from quartical.config.internal import yield_from\n'), ((11049, 11072), 'numpy.ones', 'np.ones', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (11056, 11072), True, 'import numpy as np\n'), ((11106, 11129), 'numpy.ones', 'np.ones', (['(2)'], {'dtype': 'dtype'}), '(2, dtype=dtype)\n', (11113, 11129), True, 'import numpy as np\n'), ((11163, 11198), 'numpy.array', 'np.array', (['(1, 0, 0, 1)'], {'dtype': 'dtype'}), '((1, 0, 0, 1), dtype=dtype)\n', (11171, 11198), True, 'import numpy as np\n'), ((13267, 13294), 'pathlib.Path', 'Path', (['output_opts.directory'], {}), '(output_opts.directory)\n', (13271, 13294), False, 'from pathlib import Path\n'), ((13987, 14048), 'loguru.logger.info', 'logger.info', (['f"""Removing preexisting gain folder {term_path}."""'], {}), "(f'Removing preexisting gain folder {term_path}.')\n", (13998, 14048), False, 'from loguru import logger\n'), ((15155, 15200), 'daskms.experimental.zarr.xds_to_zarr', 'xds_to_zarr', (['term_write_xds_list', 'output_path'], {}), '(term_write_xds_list, output_path)\n', (15166, 15200), False, 'from daskms.experimental.zarr import xds_to_zarr\n'), ((8060, 8093), 'numpy.tile', 'np.tile', (['data_xds.UTIME_CHUNKS', '(2)'], {}), '(data_xds.UTIME_CHUNKS, 2)\n', (8067, 8093), True, 'import numpy as np\n'), ((8132, 8167), 'numpy.tile', 'np.tile', (["data_xds.chunks['chan']", '(2)'], {}), "(data_xds.chunks['chan'], 2)\n", (8139, 8167), True, 'import numpy as np\n'), ((14082, 14106), 'shutil.rmtree', 'shutil.rmtree', (['term_path'], {}), '(term_path)\n', (14095, 14106), False, 'import shutil\n'), ((14158, 14219), 'loguru.logger.warning', 'logger.warning', (['f"""Failed to delete {term_path}. Reason: {e}."""'], {}), "(f'Failed to delete {term_path}. Reason: {e}.')\n", (14172, 14219), False, 'from loguru import logger\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 19 01:31:38 2019.
@author: mtageld
"""
import unittest
import girder_client
import numpy as np
from skimage.transform import resize
# from matplotlib import pylab as plt
# from matplotlib.colors import ListedColormap
from histomicstk.saliency.tissue_detection import (
get_slide_thumbnail, get_tissue_mask)
from histomicstk.annotations_and_masks.annotation_and_mask_utils import (
get_image_from_htk_response)
from histomicstk.preprocessing.color_normalization.\
deconvolution_based_normalization import deconvolution_based_normalization
# %%===========================================================================
# Constants & prep work
APIURL = 'http://candygram.neurology.emory.edu:8080/api/v1/'
SAMPLE_SLIDE_ID = "5d817f5abd4404c6b1f744bb"
gc = girder_client.GirderClient(apiUrl=APIURL)
# gc.authenticate(interactive=True)
gc.authenticate(apiKey='<KEY>')
MAG = 1.0
# TCGA-A2-A3XS-DX1_xmin21421_ymin37486_.png, Amgad et al, 2019)
# for macenco (obtained using rgb_separate_stains_macenko_pca()
# and using reordered such that columns are the order:
# Hamtoxylin, Eosin, Null
W_target = np.array([
[0.5807549, 0.08314027, 0.08213795],
[0.71681094, 0.90081588, 0.41999816],
[0.38588316, 0.42616716, -0.90380025]
])
# %%===========================================================================
print("Getting images to be normalized ...")
# get RGB image at a small magnification
slide_info = gc.get('item/%s/tiles' % SAMPLE_SLIDE_ID)
getStr = "/item/%s/tiles/region?left=%d&right=%d&top=%d&bottom=%d" % (
SAMPLE_SLIDE_ID, 0, slide_info['sizeX'], 0, slide_info['sizeY']
) + "&magnification=%.2f" % MAG
tissue_rgb = get_image_from_htk_response(
gc.get(getStr, jsonResp=False))
# get mask of things to ignore
thumbnail_rgb = get_slide_thumbnail(gc, SAMPLE_SLIDE_ID)
mask_out, _ = get_tissue_mask(
thumbnail_rgb, deconvolve_first=True,
n_thresholding_steps=1, sigma=1.5, min_size=30)
mask_out = resize(
mask_out == 0, output_shape=tissue_rgb.shape[:2],
order=0, preserve_range=True) == 1
# since this is a unit test, just work on a small image
tissue_rgb = tissue_rgb[1000:1500, 2500:3000, :]
mask_out = mask_out[1000:1500, 2500:3000]
# %%===========================================================================
class DeconvolutionBasedNormalizationTest(unittest.TestCase):
"""Test deconvolution normalization."""
def test_macenko_normalization(self):
"""Test macenko_pca normalization."""
stain_unmixing_routine_params = {
'stains': ['hematoxylin', 'eosin'],
'stain_unmixing_method': 'macenko_pca',
}
print("Macenko - Unmasked, using default, 'idealized' W_target")
tissue_rgb_normalized = deconvolution_based_normalization(
tissue_rgb,
stain_unmixing_routine_params=stain_unmixing_routine_params)
self.assertTupleEqual(tuple(
[int(tissue_rgb_normalized[..., i].mean()) for i in range(3)]),
(192, 161, 222)
)
print("Macenko - Unmasked, using W_target from good image")
tissue_rgb_normalized = deconvolution_based_normalization(
tissue_rgb, W_target=W_target,
stain_unmixing_routine_params=stain_unmixing_routine_params)
self.assertTupleEqual(tuple(
[int(tissue_rgb_normalized[..., i].mean()) for i in range(3)]),
(198, 163, 197)
)
print("Macenko - Masked, using W_target from good image")
tissue_rgb_normalized = deconvolution_based_normalization(
tissue_rgb, W_target=W_target, mask_out=mask_out,
stain_unmixing_routine_params=stain_unmixing_routine_params)
self.assertTupleEqual(tuple(
[int(tissue_rgb_normalized[..., i].mean()) for i in range(3)]),
(194, 172, 201)
)
# def test_xu_normalization(self):
# """Test xu_snmf normalization. (VERY SLOW!!)"""
# stain_unmixing_routine_params = {
# 'stains': ['hematoxylin', 'eosin'],
# 'stain_unmixing_method': 'xu_snmf',
# }
# # Unmasked using W_target from good image
# tissue_rgb_normalized = deconvolution_based_normalization(
# tissue_rgb,
# stain_unmixing_routine_params=stain_unmixing_routine_params)
# %%===========================================================================
if __name__ == '__main__':
unittest.main()
| [
"histomicstk.saliency.tissue_detection.get_tissue_mask",
"girder_client.GirderClient",
"histomicstk.preprocessing.color_normalization.deconvolution_based_normalization.deconvolution_based_normalization",
"numpy.array",
"unittest.main",
"skimage.transform.resize",
"histomicstk.saliency.tissue_detection.g... | [((839, 880), 'girder_client.GirderClient', 'girder_client.GirderClient', ([], {'apiUrl': 'APIURL'}), '(apiUrl=APIURL)\n', (865, 880), False, 'import girder_client\n'), ((1181, 1310), 'numpy.array', 'np.array', (['[[0.5807549, 0.08314027, 0.08213795], [0.71681094, 0.90081588, 0.41999816],\n [0.38588316, 0.42616716, -0.90380025]]'], {}), '([[0.5807549, 0.08314027, 0.08213795], [0.71681094, 0.90081588, \n 0.41999816], [0.38588316, 0.42616716, -0.90380025]])\n', (1189, 1310), True, 'import numpy as np\n'), ((1850, 1890), 'histomicstk.saliency.tissue_detection.get_slide_thumbnail', 'get_slide_thumbnail', (['gc', 'SAMPLE_SLIDE_ID'], {}), '(gc, SAMPLE_SLIDE_ID)\n', (1869, 1890), False, 'from histomicstk.saliency.tissue_detection import get_slide_thumbnail, get_tissue_mask\n'), ((1905, 2011), 'histomicstk.saliency.tissue_detection.get_tissue_mask', 'get_tissue_mask', (['thumbnail_rgb'], {'deconvolve_first': '(True)', 'n_thresholding_steps': '(1)', 'sigma': '(1.5)', 'min_size': '(30)'}), '(thumbnail_rgb, deconvolve_first=True, n_thresholding_steps=\n 1, sigma=1.5, min_size=30)\n', (1920, 2011), False, 'from histomicstk.saliency.tissue_detection import get_slide_thumbnail, get_tissue_mask\n'), ((2027, 2117), 'skimage.transform.resize', 'resize', (['(mask_out == 0)'], {'output_shape': 'tissue_rgb.shape[:2]', 'order': '(0)', 'preserve_range': '(True)'}), '(mask_out == 0, output_shape=tissue_rgb.shape[:2], order=0,\n preserve_range=True)\n', (2033, 2117), False, 'from skimage.transform import resize\n'), ((4507, 4522), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4520, 4522), False, 'import unittest\n'), ((2812, 2923), 'histomicstk.preprocessing.color_normalization.deconvolution_based_normalization.deconvolution_based_normalization', 'deconvolution_based_normalization', (['tissue_rgb'], {'stain_unmixing_routine_params': 'stain_unmixing_routine_params'}), '(tissue_rgb, stain_unmixing_routine_params\n =stain_unmixing_routine_params)\n', (2845, 2923), False, 'from histomicstk.preprocessing.color_normalization.deconvolution_based_normalization import deconvolution_based_normalization\n'), ((3196, 3325), 'histomicstk.preprocessing.color_normalization.deconvolution_based_normalization.deconvolution_based_normalization', 'deconvolution_based_normalization', (['tissue_rgb'], {'W_target': 'W_target', 'stain_unmixing_routine_params': 'stain_unmixing_routine_params'}), '(tissue_rgb, W_target=W_target,\n stain_unmixing_routine_params=stain_unmixing_routine_params)\n', (3229, 3325), False, 'from histomicstk.preprocessing.color_normalization.deconvolution_based_normalization import deconvolution_based_normalization\n'), ((3597, 3746), 'histomicstk.preprocessing.color_normalization.deconvolution_based_normalization.deconvolution_based_normalization', 'deconvolution_based_normalization', (['tissue_rgb'], {'W_target': 'W_target', 'mask_out': 'mask_out', 'stain_unmixing_routine_params': 'stain_unmixing_routine_params'}), '(tissue_rgb, W_target=W_target, mask_out=\n mask_out, stain_unmixing_routine_params=stain_unmixing_routine_params)\n', (3630, 3746), False, 'from histomicstk.preprocessing.color_normalization.deconvolution_based_normalization import deconvolution_based_normalization\n')] |
# mathematical imports -
import numpy as np
# pytorch imports -
import torch
import torch.utils.data as data
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def createDiff(data):
dataOut = np.zeros(shape=(data.shape[0],data.shape[1]-1))
for i in range(data.shape[1]-1):
dataOut[:, i] = data[:, i+1] - data[:, i]
return dataOut
class DataSetUber:
def __init__(self, dataIn, lenSeqIn, lenSeqOut):
self.data = dataIn
self.lenSeqIn = lenSeqIn
self.lenSeqOut = lenSeqOut
def __getitem__(self, item):
temp = np.zeros(shape=(self.data.shape[0], self.lenSeqIn))
if(item-self.lenSeqIn > 0):
temp = self.data[:, item-self.lenSeqIn:item]
else:
temp[:, self.lenSeqIn-item:] = self.data[:, 0:item]
tempOut = np.zeros(shape=(self.data.shape[0], self.lenSeqOut))
try:
if item + 1 + self.lenSeqOut < self.data.shape[1]:
tempOut = self.data[:, item+1:item+1+self.lenSeqOut].reshape(self.data.shape[0], self.lenSeqOut)
else:
numFuture = self.data.shape[1] - (item+1)
tempOut[:, 0:numFuture] = self.data[:, item+1:] # taking the last part of the sequence
except:
print('couldnt find correct output sequence!!!')
return torch.Tensor(temp, device=device), torch.Tensor(tempOut, device=device)
def __len__(self):
return self.data.shape[1]
class DataSetLstm:
def __init__(self, dataIn, lenSeqIn):
self.data = dataIn
self.lenSeqIn = lenSeqIn
def __getitem__(self, item):
temp = np.zeros(shape=(self.data.shape[0], self.lenSeqIn))
if(item-self.lenSeqIn > 0):
temp = self.data[:, item-self.lenSeqIn:item]
else:
temp[:, self.lenSeqIn-item:] = self.data[:, 0:item]
tempOut = np.zeros(shape=(self.data.shape[0], self.lenSeqIn))
try:
if (item + 1 <= self.data.shape[1]) and (item + 1 - self.lenSeqIn > 0):
tempOut = self.data[:, item + 1 - self.lenSeqIn: item + 1].reshape(self.data.shape[0], self.lenSeqIn)
elif (item + 1 <= self.data.shape[1]) and (item + 1 - self.lenSeqIn < 0):
tempOut[:, self.lenSeqIn - item - 1:] = self.data[:, 0:item + 1]
elif (item + 1 > self.data.shape[1]) and (item + 1 - self.lenSeqIn > 0):
tempOut[:, 0:self.lenSeqIn-1] = self.data[:, item + 1 - self.lenSeqIn: item] # taking the last part of the sequence
except:
print('couldnt find correct output sequence!!!')
return torch.Tensor(temp, device=device), torch.Tensor(tempOut, device=device)
def __len__(self):
return self.data.shape[1]
class DataSetCnn:
def __init__(self, dataIn, lenSeqIn):
self.lengthX = dataIn.shape[0]
self.lengthY = dataIn.shape[1]
self.lengthT = dataIn.shape[2]
self.data = dataIn.reshape(self.lengthT, self.lengthX, self.lengthY)
self.lenSeqIn = lenSeqIn
def __getitem__(self, item):
temp = np.zeros(shape=(self.lenSeqIn, self.data.shape[1], self.data.shape[2]))
if (item - self.lenSeqIn > 0):
temp = self.data[item - self.lenSeqIn:item, :, :]
else:
temp[self.lenSeqIn - item:, :, :] = self.data[0:item, :, :]
xArr = temp
tempOut = np.zeros(shape=(self.lenSeqIn, self.data.shape[1], self.data.shape[2]))
try:
if (item + 1 <= self.data.shape[0]) and (item + 1 - self.lenSeqIn > 0):
tempOut = self.data[item + 1 - self.lenSeqIn: item + 1, :, :].reshape(self.lenSeqIn, self.data.shape[1], self.data.shape[2])
elif (item + 1 <= self.data.shape[0]) and (item + 1 - self.lenSeqIn <= 0):
tempOut[self.lenSeqIn - item - 1:, :, :] = self.data[0:item + 1, :, :]
elif (item + 1 > self.data.shape[0]) and (item + 1 - self.lenSeqIn > 0):
tempOut[0:self.lenSeqIn - 1, :, :] = self.data[item + 1 - self.lenSeqIn: item, :, :] # taking the last part of the sequence
except:
print('couldnt find correct output sequence!!!')
try:
yArr = tempOut[-1, :, :]
except:
print("couldnt take last value of time sequence for output!!!")
return torch.Tensor(xArr, device=device), torch.Tensor(yArr, device=device).type(torch.long)
def __len__(self):
return self.data.shape[0]
class DataSetCnn_LSTM:
def __init__(self, dataIn, lenSeqIn, sizeCnn):
self.lengthX = dataIn.shape[0]
self.lengthY = dataIn.shape[1]
self.lengthT = dataIn.shape[2]
self.sizeCnn = sizeCnn
self.data = dataIn.reshape(self.lengthT, self.lengthX, self.lengthY)
self.lenSeqIn = lenSeqIn
def __getitem__(self, item):
temp = np.zeros(shape=(self.lenSeqIn, self.data.shape[1], self.data.shape[2]))
if (item - self.lenSeqIn > 0):
temp = self.data[item - self.lenSeqIn:item, :, :]
else:
temp[self.lenSeqIn - item:, :, :] = self.data[0:item, :, :]
temp2 = np.zeros(shape=(self.lenSeqIn, self.sizeCnn , self.sizeCnn, self.lengthX*self.lengthY))
tempPadded = np.zeros(shape=(temp.shape[0], temp.shape[1]+self.sizeCnn, temp.shape[2]+self.sizeCnn))
tempPadded[:, self.sizeCnn: self.sizeCnn + temp.shape[1], self.sizeCnn : self.sizeCnn + temp.shape[2]] = temp
k = 0
for i in range(self.lengthX):
for j in range(self.lengthY):
try:
temp2[:, :, :, k] = tempPadded[:, i:i + self.sizeCnn, j : j+self.sizeCnn]
except:
print("couldnt create input for cnn ")
k += 1
xArr = temp2
tempOut = np.zeros(shape=(self.lenSeqIn, self.data.shape[1], self.data.shape[2]))
try:
if (item + 1 <= self.data.shape[0]) and (item + 1 - self.lenSeqIn > 0):
tempOut = self.data[item + 1 - self.lenSeqIn: item + 1, :, :].reshape(self.lenSeqIn, self.data.shape[1], self.data.shape[2])
elif (item + 1 <= self.data.shape[0]) and (item + 1 - self.lenSeqIn <= 0):
tempOut[self.lenSeqIn - item - 1:, :, :] = self.data[0:item + 1, :, :]
elif (item + 1 > self.data.shape[0]) and (item + 1 - self.lenSeqIn > 0):
tempOut[0:self.lenSeqIn - 1, :, :] = self.data[item + 1 - self.lenSeqIn: item, :, :] # taking the last part of the sequence
except:
print('couldnt find correct output sequence!!!')
try:
yArr = tempOut[-1, :, :]
except:
print("couldnt take last value of time sequence for output!!!")
return torch.Tensor(xArr, device=device), torch.Tensor(yArr, device=device).type(torch.long)
def __len__(self):
return self.data.shape[0]
def main():
path = '/Users/chanaross/dev/Thesis/UberData/'
fileName = '3D_UpdatedGrid_5min_250Grid_LimitedEventsMat_allData.p'
dataInput = np.load(path + fileName)
xmin = 0
xmax = 20
ymin = 0
ymax = 20
dataInput = dataInput[xmin:xmax, ymin:ymax, :] # shrink matrix size for fast training in order to test model
# define important sizes for network -
x_size = dataInput.shape[0]
y_size = dataInput.shape[1]
dataSize = dataInput.shape[2]
num_train = int((1 - 0.2) * dataSize)
data_train = dataInput[:, :, 0:num_train]
dataset_uber = DataSetCnn_LSTM(data_train, 5, 7)
dataloader_uber = data.DataLoader(dataset=dataset_uber, batch_size=300, shuffle=True)
# a = list(iter(dataset_uber))
for i_batch, sample_batched in enumerate(dataloader_uber):
print(i_batch, sample_batched[0].size(),
sample_batched[1].size())
return
if __name__ == '__main__':
main()
print('Done.')
| [
"torch.Tensor",
"numpy.zeros",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"numpy.load"
] | [((225, 275), 'numpy.zeros', 'np.zeros', ([], {'shape': '(data.shape[0], data.shape[1] - 1)'}), '(shape=(data.shape[0], data.shape[1] - 1))\n', (233, 275), True, 'import numpy as np\n'), ((7075, 7099), 'numpy.load', 'np.load', (['(path + fileName)'], {}), '(path + fileName)\n', (7082, 7099), True, 'import numpy as np\n'), ((7573, 7640), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'dataset_uber', 'batch_size': '(300)', 'shuffle': '(True)'}), '(dataset=dataset_uber, batch_size=300, shuffle=True)\n', (7588, 7640), True, 'import torch.utils.data as data\n'), ((149, 174), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (172, 174), False, 'import torch\n'), ((598, 649), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.data.shape[0], self.lenSeqIn)'}), '(shape=(self.data.shape[0], self.lenSeqIn))\n', (606, 649), True, 'import numpy as np\n'), ((840, 892), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.data.shape[0], self.lenSeqOut)'}), '(shape=(self.data.shape[0], self.lenSeqOut))\n', (848, 892), True, 'import numpy as np\n'), ((1658, 1709), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.data.shape[0], self.lenSeqIn)'}), '(shape=(self.data.shape[0], self.lenSeqIn))\n', (1666, 1709), True, 'import numpy as np\n'), ((1900, 1951), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.data.shape[0], self.lenSeqIn)'}), '(shape=(self.data.shape[0], self.lenSeqIn))\n', (1908, 1951), True, 'import numpy as np\n'), ((3117, 3188), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.lenSeqIn, self.data.shape[1], self.data.shape[2])'}), '(shape=(self.lenSeqIn, self.data.shape[1], self.data.shape[2]))\n', (3125, 3188), True, 'import numpy as np\n'), ((3414, 3485), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.lenSeqIn, self.data.shape[1], self.data.shape[2])'}), '(shape=(self.lenSeqIn, self.data.shape[1], self.data.shape[2]))\n', (3422, 3485), True, 'import numpy as np\n'), ((4885, 4956), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.lenSeqIn, self.data.shape[1], self.data.shape[2])'}), '(shape=(self.lenSeqIn, self.data.shape[1], self.data.shape[2]))\n', (4893, 4956), True, 'import numpy as np\n'), ((5160, 5252), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.lenSeqIn, self.sizeCnn, self.sizeCnn, self.lengthX * self.lengthY)'}), '(shape=(self.lenSeqIn, self.sizeCnn, self.sizeCnn, self.lengthX *\n self.lengthY))\n', (5168, 5252), True, 'import numpy as np\n'), ((5269, 5364), 'numpy.zeros', 'np.zeros', ([], {'shape': '(temp.shape[0], temp.shape[1] + self.sizeCnn, temp.shape[2] + self.sizeCnn)'}), '(shape=(temp.shape[0], temp.shape[1] + self.sizeCnn, temp.shape[2] +\n self.sizeCnn))\n', (5277, 5364), True, 'import numpy as np\n'), ((5830, 5901), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.lenSeqIn, self.data.shape[1], self.data.shape[2])'}), '(shape=(self.lenSeqIn, self.data.shape[1], self.data.shape[2]))\n', (5838, 5901), True, 'import numpy as np\n'), ((1354, 1387), 'torch.Tensor', 'torch.Tensor', (['temp'], {'device': 'device'}), '(temp, device=device)\n', (1366, 1387), False, 'import torch\n'), ((1389, 1425), 'torch.Tensor', 'torch.Tensor', (['tempOut'], {'device': 'device'}), '(tempOut, device=device)\n', (1401, 1425), False, 'import torch\n'), ((2647, 2680), 'torch.Tensor', 'torch.Tensor', (['temp'], {'device': 'device'}), '(temp, device=device)\n', (2659, 2680), False, 'import torch\n'), ((2682, 2718), 'torch.Tensor', 'torch.Tensor', (['tempOut'], {'device': 'device'}), '(tempOut, device=device)\n', (2694, 2718), False, 'import torch\n'), ((4359, 4392), 'torch.Tensor', 'torch.Tensor', (['xArr'], {'device': 'device'}), '(xArr, device=device)\n', (4371, 4392), False, 'import torch\n'), ((6777, 6810), 'torch.Tensor', 'torch.Tensor', (['xArr'], {'device': 'device'}), '(xArr, device=device)\n', (6789, 6810), False, 'import torch\n'), ((4394, 4427), 'torch.Tensor', 'torch.Tensor', (['yArr'], {'device': 'device'}), '(yArr, device=device)\n', (4406, 4427), False, 'import torch\n'), ((6812, 6845), 'torch.Tensor', 'torch.Tensor', (['yArr'], {'device': 'device'}), '(yArr, device=device)\n', (6824, 6845), False, 'import torch\n')] |
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from matplotlib import pyplot
import cv2
class DataAugmentation:
shift = 0.15;
datagen = None;
# constructor
def __init__(self):
# define data preparation
self.datagen = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.,
width_shift_range=self.shift,
height_shift_range=self.shift,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=True,
vertical_flip=False,
rescale=None,
dim_ordering="tf");
def runDataAugmentation(self, batch):
frames = [];
labels = [];
#self.datagen.fit(batch[0]);
for f, l in self.datagen.flow(batch[0], batch[1], batch_size=64):
frames.append(f);
labels.append(l);
break;
frames = np.array(frames);
labels = np.array(labels);
frames = np.squeeze(frames);
labels = np.squeeze(labels);
#print(frames.shape);
#print(labels.shape);
return frames, labels;
def runOverSampling(self, sample):
s = [];
# crops
s1 = sample;
s2 = sample[0:56, 0:56];
s3 = sample[(64-56):64, 0:56];
s4 = sample[(64-56):64, (64-56):64];
s5 = sample[0:56, (64-56):64];
# mirrors
s6 = cv2.flip(sample, 0);
s7 = cv2.flip(sample, 1);
s8 = cv2.flip(s6, 1);
s9 = cv2.flip(s7, 1);
# center
s10 = sample[4:60, 4:60];
s.append(s1);
s.append(s2);
s.append(s3);
s.append(s4);
s.append(s5);
s.append(s6);
s.append(s7);
s.append(s8);
s.append(s9);
s.append(s10);
s_resized = [];
for i in range(0, len(s)):
s_resized.append(cv2.resize(s[i], (64, 64)));
tmp = np.array(s_resized);
return tmp;
def showImages(self, batch):
#for i in range(0, 64):
# pyplot.subplot(8, 8, i+1)
# pyplot.imshow(batch[0][i])
# show the plot
#pyplot.show()
#self.datagen.fit(batch[0]);
for X_batch, y_batch in self.datagen.flow(batch[0], batch[1], batch_size=6):
#print(X_batch.shape);
#print(y_batch.shape);
# create a grid of 3x3 images
for i in range(0, 6):
pyplot.subplot(2, 3, (i+1))
pyplot.imshow(X_batch[i])
# show the plot
pyplot.show()
break
| [
"matplotlib.pyplot.imshow",
"cv2.flip",
"keras.preprocessing.image.ImageDataGenerator",
"numpy.squeeze",
"numpy.array",
"cv2.resize",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((256, 686), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'featurewise_center': '(False)', 'samplewise_center': '(False)', 'featurewise_std_normalization': '(False)', 'samplewise_std_normalization': '(False)', 'zca_whitening': '(False)', 'rotation_range': '(0.0)', 'width_shift_range': 'self.shift', 'height_shift_range': 'self.shift', 'shear_range': '(0.0)', 'zoom_range': '(0.0)', 'channel_shift_range': '(0.0)', 'fill_mode': '"""nearest"""', 'cval': '(0.0)', 'horizontal_flip': '(True)', 'vertical_flip': '(False)', 'rescale': 'None', 'dim_ordering': '"""tf"""'}), "(featurewise_center=False, samplewise_center=False,\n featurewise_std_normalization=False, samplewise_std_normalization=False,\n zca_whitening=False, rotation_range=0.0, width_shift_range=self.shift,\n height_shift_range=self.shift, shear_range=0.0, zoom_range=0.0,\n channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip\n =True, vertical_flip=False, rescale=None, dim_ordering='tf')\n", (274, 686), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1055, 1071), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (1063, 1071), True, 'import numpy as np\n'), ((1084, 1100), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1092, 1100), True, 'import numpy as np\n'), ((1113, 1131), 'numpy.squeeze', 'np.squeeze', (['frames'], {}), '(frames)\n', (1123, 1131), True, 'import numpy as np\n'), ((1146, 1164), 'numpy.squeeze', 'np.squeeze', (['labels'], {}), '(labels)\n', (1156, 1164), True, 'import numpy as np\n'), ((1466, 1485), 'cv2.flip', 'cv2.flip', (['sample', '(0)'], {}), '(sample, 0)\n', (1474, 1485), False, 'import cv2\n'), ((1494, 1513), 'cv2.flip', 'cv2.flip', (['sample', '(1)'], {}), '(sample, 1)\n', (1502, 1513), False, 'import cv2\n'), ((1522, 1537), 'cv2.flip', 'cv2.flip', (['s6', '(1)'], {}), '(s6, 1)\n', (1530, 1537), False, 'import cv2\n'), ((1546, 1561), 'cv2.flip', 'cv2.flip', (['s7', '(1)'], {}), '(s7, 1)\n', (1554, 1561), False, 'import cv2\n'), ((1871, 1890), 'numpy.array', 'np.array', (['s_resized'], {}), '(s_resized)\n', (1879, 1890), True, 'import numpy as np\n'), ((2363, 2376), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2374, 2376), False, 'from matplotlib import pyplot\n'), ((1833, 1859), 'cv2.resize', 'cv2.resize', (['s[i]', '(64, 64)'], {}), '(s[i], (64, 64))\n', (1843, 1859), False, 'import cv2\n'), ((2283, 2310), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(2)', '(3)', '(i + 1)'], {}), '(2, 3, i + 1)\n', (2297, 2310), False, 'from matplotlib import pyplot\n'), ((2315, 2340), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['X_batch[i]'], {}), '(X_batch[i])\n', (2328, 2340), False, 'from matplotlib import pyplot\n')] |
import glob, os, shutil, sys, json
from pathlib import Path
import pylab as plt
import trimesh
import open3d
from easydict import EasyDict
import numpy as np
from tqdm import tqdm
import utils
from features import MeshFPFH
FIX_BAD_ANNOTATION_HUMAN_15 = 0
# Labels for all datasets
# -----------------------
sigg17_part_labels = ['---', 'head', 'hand', 'lower-arm', 'upper-arm', 'body', 'upper-lag', 'lower-leg', 'foot']
sigg17_shape2label = {v: k for k, v in enumerate(sigg17_part_labels)}
model_net_labels = [
'bathtub', 'bed', 'chair', 'desk', 'dresser', 'monitor', 'night_stand', 'sofa', 'table', 'toilet',
'wardrobe', 'bookshelf', 'laptop', 'door', 'lamp', 'person', 'curtain', 'piano', 'airplane', 'cup',
'cone', 'tent', 'radio', 'stool', 'range_hood', 'car', 'sink', 'guitar', 'tv_stand', 'stairs',
'mantel', 'bench', 'plant', 'bottle', 'bowl', 'flower_pot', 'keyboard', 'vase', 'xbox', 'glass_box'
]
model_net_shape2label = {v: k for k, v in enumerate(model_net_labels)}
model_net_weights = [265, 1186, 2300, 407, 404, 956, 381, 1645, 755, 919, 145, 1002, 260, 204, 303, 248, 330, 617, 1874,
159, 213, 295, 267, 189, 303, 587, 332, 447, 483, 275, 817, 354, 623, 868, 119, 385, 412, 1216,
278, 183]
future3d_labels = ['Children Cabinet', 'Nightstand', 'Bookcase / jewelry Armoire','Wardrobe', 'Coffee Table', 'Corner/Side Table',
'Sideboard / Side Cabinet / Console Table','Wine Cabinet', 'TV Stand', 'Drawer Chest / Corner cabinet',
'Shelf', 'Round End Table', 'King-size Bed', 'Bunk Bed', 'Bed Frame', 'Single bed', 'Kids Bed', 'Dining Chair',
'Lounge Chair / Cafe Chair / Office Chair', 'Dressing Chair', 'Classic Chinese Chair','Barstool',
'Dressing Table', 'Dining Table', 'Desk', 'Three-Seat / Multi-seat Sofa', 'armchair', 'Loveseat Sofa',
'L-shaped Sofa', 'Lazy Sofa', 'Chaise Longue Sofa', 'Footstool / Sofastool / Bed End Stool / Stool',
'Pendant Lamp', 'Ceiling Lamp']
future3d_excluded_labels = ['Dressing Chair', 'Chaise Longue Sofa']
future3d_labels = [x.lower() for x in future3d_labels]
future3d_super_labels = [x.lower() for x in ['Cabinet/Shelf/Desk', 'Bed', 'Chair', 'Table', 'Sofa', 'Pier/Stool', 'Lighting']]
future_3d_labels_to_super = [12, 5, 5, 3, 6, 1, 2]
future3d_shape2label = {v: k for k, v in enumerate(future3d_labels)}
future3d_weights = [259, 1045, 262, 724, 1644, 1171, 1046, 169, 581, 643, 187, 168, 1260, 140, 395, 482, 139, 1139,
1411, 24, 32, 365, 291, 736, 198, 2479, 1741, 1169, 385, 204, 4, 885, 1915, 611]
cubes_labels = [
'apple', 'bat', 'bell', 'brick', 'camel',
'car', 'carriage', 'chopper', 'elephant', 'fork',
'guitar', 'hammer', 'heart', 'horseshoe', 'key',
'lmfish', 'octopus', 'shoe', 'spoon', 'tree',
'turtle', 'watch'
]
cubes_shape2label = {v: k for k, v in enumerate(cubes_labels)}
shrec11_labels = [
'armadillo', 'man', 'centaur', 'dinosaur', 'dog2',
'ants', 'rabbit', 'dog1', 'snake', 'bird2',
'shark', 'dino_ske', 'laptop', 'santa', 'flamingo',
'horse', 'hand', 'lamp', 'two_balls', 'gorilla',
'alien', 'octopus', 'cat', 'woman', 'spiders',
'camel', 'pliers', 'myScissor', 'glasses', 'bird1'
]
shrec11_shape2label = {v: k for k, v in enumerate(shrec11_labels)}
coseg_labels = [
'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c',
]
coseg_shape2label = {v: k for k, v in enumerate(coseg_labels)}
# ShapenetCore-55
shapenet_synsetIds = ['02691156', '02747177', '02773838', '02801938', '02808440', '02818832', '02828884', '02843684',
'02871439', '02876657', '02880940', '02924116', '02933112', '02942699', '02946921', '02954340',
'02958343', '02992529', '03001627', '03046257', '03085013', '03207941', '03211117', '03261776',
'03325088', '03337140', '03467517', '03513137', '03593526', '03624134', '03636649', '03642806',
'03691459', '03710193', '03759954', '03761084', '03790512', '03797390', '03928116', '03938244',
'03948459', '03991062', '04004475', '04074963', '04090263', '04099429', '04225987', '04256520',
'04330267', '04379243', '04401088', '04460130', '04468005', '04530566', '04554684']
shapenet_synset_to_label = {'02691156': 'airplane,aeroplane,plane',
'02747177': 'ashcan,trash can,garbage can,wastebin,ash bin,ash-bin,ashbin,dustbin,trash barrel,trash bin',
'02773838': 'bag,traveling bag,travelling bag,grip,suitcase',
'02801938': 'basket,handbasket',
'02808440': 'bathtub,bathing tub,bath,tub',
'02818832': 'bed',
'02828884': 'bench',
'02843684': 'birdhouse',
'02871439': 'bookshelf',
'02876657': 'bottle',
'02880940': 'bowl',
'02924116': 'bus,autobus,coach,charabanc,double-decker,jitney,motorbus,motorcoach,omnibus,passenger vehi',
'02933112': 'cabinet',
'02942699': 'camera,photographic camera',
'02946921': 'can,tin,tin can',
'02954340': 'cap',
'02958343': 'car,auto,automobile,machine,motorcar',
'03001627': 'chair',
'03046257': 'clock',
'03085013': 'computer keyboard,keypad',
'03207941': 'dishwasher,dish washer,dishwashing machine',
'03211117': 'display,video display',
'03261776': 'earphone,earpiece,headphone,phone',
'03325088': 'faucet,spigot',
'03337140': 'file,file cabinet,filing cabinet',
'03467517': 'guitar',
'03513137': 'helmet',
'03593526': 'jar',
'03624134': 'knife',
'03636649': 'lamp',
'03642806': 'laptop,laptop computer',
'03691459': 'loudspeaker,speaker,speaker unit,loudspeaker system,speaker system',
'03710193': 'mailbox,letter box',
'03759954': 'microphone,mike',
'03761084': 'microwave,microwave oven',
'03790512': 'motorcycle,bike',
'03797390': 'mug',
'03928116': 'piano,pianoforte,forte-piano',
'03938244': 'pillow',
'03948459': 'pistol,handgun,side arm,shooting iron',
'03991062': 'pot,flowerpot',
'04004475': 'printer,printing machine',
'04074963': 'remote control,remote',
'04090263': 'rifle',
'04099429': 'rocket,projectile',
'04225987': 'skateboard',
'04256520': 'sofa,couch,lounge',
'04330267': 'stove',
'04379243': 'table',
'04401088': 'telephone,phone,telephone set',
'02992529': 'cellular telephone,cellular phone,cellphone,cell,mobile phone',
'04460130': 'tower',
'04468005': 'train,railroad train',
'04530566': 'vessel,watercraft',
'04554684': 'washer,automatic washer,washing machine'}
shapenet_labels = [shapenet_synset_to_label[x] for x in shapenet_synsetIds]
shapenet_shapeid2label = {v: k for k, v in enumerate(shapenet_synsetIds)}
def rotate_vertices(vertices, angle):
y = angle * np.pi / 180
R = np.array(((np.cos(y),-np.sin(y), 0),
(np.sin(y), np.cos(y), 0),
(0 , 0, 1)),
dtype=vertices.dtype)
np.dot(vertices, R, out=vertices)
def calc_mesh_area(mesh):
t_mesh = trimesh.Trimesh(vertices=mesh['vertices'], faces=mesh['faces'], process=False)
mesh['area_faces'] = t_mesh.area_faces
mesh['area_vertices'] = np.zeros((mesh['vertices'].shape[0]))
for f_index, f in enumerate(mesh['faces']):
for v in f:
mesh['area_vertices'][v] += mesh['area_faces'][f_index] / f.size
def calc_vertex_labels_from_face_labels(mesh, face_labels):
vertices = mesh['vertices']
faces = mesh['faces']
all_vetrex_labels = [[] for _ in range(vertices.shape[0])]
vertex_labels = -np.ones((vertices.shape[0],), dtype=np.int)
n_classes = int(np.max(face_labels))
assert np.min(face_labels) == 1 # min label is 1, for compatibility to human_seg labels representation
v_labels_fuzzy = -np.ones((vertices.shape[0], n_classes))
for i in range(faces.shape[0]):
label = face_labels[i]
for f in faces[i]:
all_vetrex_labels[f].append(label)
for i in range(vertices.shape[0]):
counts = np.bincount(all_vetrex_labels[i])
vertex_labels[i] = np.argmax(counts)
v_labels_fuzzy[i] = np.zeros((1, n_classes))
for j in all_vetrex_labels[i]:
v_labels_fuzzy[i, int(j) - 1] += 1 / len(all_vetrex_labels[i])
return vertex_labels, v_labels_fuzzy
def prepare_edges_and_kdtree(mesh):
vertices = mesh['vertices']
faces = mesh['faces']
mesh['edges'] = [set() for _ in range(vertices.shape[0])]
for i in range(faces.shape[0]):
for v in faces[i]:
mesh['edges'][v] |= set(faces[i])
for i in range(vertices.shape[0]):
if i in mesh['edges'][i]:
mesh['edges'][i].remove(i)
mesh['edges'][i] = list(mesh['edges'][i])
max_vertex_degree = np.max([len(e) for e in mesh['edges']])
for i in range(vertices.shape[0]):
if len(mesh['edges'][i]) < max_vertex_degree:
mesh['edges'][i] += [-1] * (max_vertex_degree - len(mesh['edges'][i]))
mesh['edges'] = np.array(mesh['edges'], dtype=np.int32)
mesh['kdtree_query'] = []
t_mesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False)
n_nbrs = min(10, vertices.shape[0] - 2)
for n in range(vertices.shape[0]):
d, i_nbrs = t_mesh.kdtree.query(vertices[n], n_nbrs)
i_nbrs_cleared = [inbr for inbr in i_nbrs if inbr != n and inbr < vertices.shape[0]]
if len(i_nbrs_cleared) > n_nbrs - 1:
i_nbrs_cleared = i_nbrs_cleared[:n_nbrs - 1]
mesh['kdtree_query'].append(np.array(i_nbrs_cleared, dtype=np.int32))
mesh['kdtree_query'] = np.array(mesh['kdtree_query'])
assert mesh['kdtree_query'].shape[1] == (n_nbrs - 1), 'Number of kdtree_query is wrong: ' + str(mesh['kdtree_query'].shape[1])
def prepare_face_edges(mesh):
tmesh = trimesh.Trimesh(mesh['vertices'], mesh['faces'])
mesh['faces_edges'] = tmesh.face_adjacency
mesh['faces_edges_angles'] = tmesh.face_adjacency_angles
def add_fields_and_dump_model(mesh_data, fileds_needed, out_fn, dataset_name, dump_model=True):
m = {}
for k, v in mesh_data.items():
if k in fileds_needed:
m[k] = v
for field in fileds_needed:
if field not in m.keys():
if field == 'labels':
m[field] = np.zeros((0,))
if field == 'dataset_name':
m[field] = dataset_name
if field == 'walk_cache':
m[field] = np.zeros((0,))
if field == 'kdtree_query' or field == 'edges':
prepare_edges_and_kdtree(m)
if field == 'tri_centers':
t_mesh = trimesh.Trimesh(vertices=mesh_data.vertices, faces=mesh_data.faces, process=False)
m[field] = t_mesh.triangles_center
if field == 'tri_edges':
prepare_face_edges(m)
if field == 'vertex_normals':
t_mesh = trimesh.Trimesh(vertices=mesh_data.vertices, faces=mesh_data.faces, process=False)
m[field] = t_mesh.vertex_normals
if field == 'mfpfh':
fph = MeshFPFH(EasyDict(m), 2)
m[field] = fph.calc_fpfh()
if dump_model:
np.savez(out_fn, **m)
return m
def get_sig17_seg_bm_labels(mesh, file, seg_path):
# Finding the best match file name .. :
in_to_check = file.replace('obj', 'txt')
in_to_check = in_to_check.replace('off', 'txt')
in_to_check = in_to_check.replace('_fix_orientation', '')
if in_to_check.find('MIT_animation') != -1 and in_to_check.split('/')[-1].startswith('mesh_'):
in_to_check = '/'.join(in_to_check.split('/')[:-2])
in_to_check = in_to_check.replace('MIT_animation/meshes_', 'mit/mit_')
in_to_check += '.txt'
elif in_to_check.find('/scape/') != -1:
in_to_check = '/'.join(in_to_check.split('/')[:-1])
in_to_check += '/scape.txt'
elif in_to_check.find('/faust/') != -1:
in_to_check = '/'.join(in_to_check.split('/')[:-1])
in_to_check += '/faust.txt'
seg_full_fn = []
for fn in Path(seg_path).rglob('*.txt'):
tmp = str(fn)
tmp = tmp.replace('/segs/', '/meshes/')
tmp = tmp.replace('_full', '')
tmp = tmp.replace('shrec_', '')
tmp = tmp.replace('_corrected', '')
if tmp == in_to_check:
seg_full_fn.append(str(fn))
if len(seg_full_fn) == 1:
seg_full_fn = seg_full_fn[0]
else:
print('\nin_to_check', in_to_check)
print('tmp', tmp)
raise Exception('!!')
face_labels = np.loadtxt(seg_full_fn)
if FIX_BAD_ANNOTATION_HUMAN_15 and file.endswith('test/shrec/15.off'):
face_center = []
for f in mesh.faces:
face_center.append(np.mean(mesh.vertices[f, :], axis=0))
face_center = np.array(face_center)
idxs = (face_labels == 6) * (face_center[:, 0] < 0) * (face_center[:, 1] < -0.4)
face_labels[idxs] = 7
np.savetxt(seg_full_fn + '.fixed.txt', face_labels.astype(np.int))
return face_labels
def get_labels(dataset_name, mesh, file, fn2labels_map=None):
v_labels_fuzzy = np.zeros((0,))
if dataset_name == 'faust':
face_labels = np.load('faust_labels/faust_part_segmentation.npy').astype(np.int)
vertex_labels, v_labels_fuzzy = calc_vertex_labels_from_face_labels(mesh, face_labels)
model_label = np.zeros((0,))
return model_label, vertex_labels, v_labels_fuzzy
elif dataset_name.startswith('coseg') or dataset_name == 'human_seg_from_meshcnn':
labels_fn = '/'.join(file.split('/')[:-2]) + '/seg/' + file.split('/')[-1].split('.')[-2] + '.eseg'
e_labels = np.loadtxt(labels_fn)
v_labels = [[] for _ in range(mesh['vertices'].shape[0])]
faces = mesh['faces']
fuzzy_labels_fn = '/'.join(file.split('/')[:-2]) + '/sseg/' + file.split('/')[-1].split('.')[-2] + '.seseg'
seseg_labels = np.loadtxt(fuzzy_labels_fn)
v_labels_fuzzy = np.zeros((mesh['vertices'].shape[0], seseg_labels.shape[1]))
edge2key = dict()
edges = []
edges_count = 0
for face_id, face in enumerate(faces):
faces_edges = []
for i in range(3):
cur_edge = (face[i], face[(i + 1) % 3])
faces_edges.append(cur_edge)
for idx, edge in enumerate(faces_edges):
edge = tuple(sorted(list(edge)))
faces_edges[idx] = edge
if edge not in edge2key:
v_labels_fuzzy[edge[0]] += seseg_labels[edges_count]
v_labels_fuzzy[edge[1]] += seseg_labels[edges_count]
edge2key[edge] = edges_count
edges.append(list(edge))
v_labels[edge[0]].append(e_labels[edges_count])
v_labels[edge[1]].append(e_labels[edges_count])
edges_count += 1
assert np.max(np.sum(v_labels_fuzzy != 0, axis=1)) <= 3, 'Number of non-zero labels must not acceeds 3!'
vertex_labels = []
for l in v_labels:
l2add = np.argmax(np.bincount(l))
vertex_labels.append(l2add)
vertex_labels = np.array(vertex_labels)
model_label = np.zeros((0,))
return model_label, vertex_labels, v_labels_fuzzy
else:
tmp = file.split('/')[-1]
model_name = '_'.join(tmp.split('_')[:-1])
if dataset_name.lower().startswith('modelnet40_preprocessed'):
model_label = model_net_shape2label['_'.join(model_name.split('_')[:-1])]
elif dataset_name.lower().startswith('modelnet'):
model_label = model_net_shape2label[model_name]
elif dataset_name.lower().startswith('cubes'):
model_label = cubes_shape2label[model_name]
elif dataset_name.lower().startswith('shrec11'):
model_name = file.split('/')[-3]
if fn2labels_map is None:
model_label = shrec11_shape2label[model_name]
else:
file_index = int(file.split('.')[-2].split('T')[-1])
model_label = fn2labels_map[file_index]
else:
raise Exception('Cannot find labels for the dataset')
vertex_labels = np.zeros((0,))
return model_label, vertex_labels, v_labels_fuzzy
def fix_labels_by_dist(vertices, orig_vertices, labels_orig):
labels = -np.ones((vertices.shape[0], ))
for i, vertex in enumerate(vertices):
d = np.linalg.norm(vertex - orig_vertices, axis=1)
orig_idx = np.argmin(d)
labels[i] = labels_orig[orig_idx]
return labels
def get_faces_belong_to_vertices(vertices, faces):
faces_belong = []
for face in faces:
used = np.any([v in vertices for v in face])
if used:
faces_belong.append(face)
return np.array(faces_belong)
def remesh(mesh_orig, target_n_faces, add_labels=False, labels_orig=None):
labels = labels_orig
if target_n_faces < np.asarray(mesh_orig.triangles).shape[0]:
mesh = mesh_orig.simplify_quadric_decimation(target_n_faces)
str_to_add = '_simplified_to_' + str(target_n_faces)
mesh = mesh.remove_unreferenced_vertices()
if add_labels and labels_orig.size:
labels = fix_labels_by_dist(np.asarray(mesh.vertices), np.asarray(mesh_orig.vertices), labels_orig)
else:
mesh = mesh_orig
str_to_add = '_not_changed_' + str(np.asarray(mesh_orig.triangles).shape[0])
return mesh, labels, str_to_add
def load_meshes(model_fns):
f_names = glob.glob(model_fns)
joint_mesh_vertices = []
joint_mesh_faces = []
for fn in f_names:
mesh_ = trimesh.load_mesh(fn)
vertex_offset = len(joint_mesh_vertices)
joint_mesh_vertices += mesh_.vertices.tolist()
faces = mesh_.faces + vertex_offset
joint_mesh_faces += faces.tolist()
mesh = open3d.geometry.TriangleMesh()
mesh.vertices = open3d.utility.Vector3dVector(joint_mesh_vertices)
mesh.triangles = open3d.utility.Vector3iVector(joint_mesh_faces)
return mesh
def load_mesh(model_fn, classification=True):
if 'FUTURE' not in model_fn: # To load and clean up mesh - "remove vertices that share position"
if classification:
mesh_ = trimesh.load_mesh(model_fn, process=True)
if type(mesh_) is trimesh.Scene:
mesh = open3d.io.read_triangle_mesh(model_fn)
return mesh
else:
mesh_.remove_duplicate_faces()
else:
mesh_ = trimesh.load_mesh(model_fn, process=False)
mesh = open3d.geometry.TriangleMesh()
mesh.vertices = open3d.utility.Vector3dVector(mesh_.vertices)
mesh.triangles = open3d.utility.Vector3iVector(mesh_.faces)
else:
if not '26b439df-fba5-3b38-b6c5-bc6a4c1fb0a0' in model_fn: # list of mixed-faces (four sided faces + three sided in same .obj)
mesh = open3d.io.read_triangle_mesh(model_fn)
else:
mesh_ = trimesh.load_mesh(model_fn, process=False)
mesh = open3d.geometry.TriangleMesh()
mesh.vertices = open3d.utility.Vector3dVector(mesh_.vertices)
mesh.triangles = open3d.utility.Vector3iVector(mesh_.faces)
mesh.remove_duplicated_vertices()
mesh.remove_unreferenced_vertices()
mesh.remove_duplicated_triangles()
mesh.remove_degenerate_triangles()
return mesh
def create_tmp_dataset(model_fn, p_out, n_target_faces):
fileds_needed = ['vertices', 'faces', 'edge_features', 'edges_map', 'edges', 'kdtree_query',
'label', 'labels', 'dataset_name']
if not os.path.isdir(p_out):
os.makedirs(p_out)
mesh_orig = load_mesh(model_fn)
mesh, labels, str_to_add = remesh(mesh_orig, n_target_faces)
labels = np.zeros((np.asarray(mesh.vertices).shape[0],), dtype=np.int16)
mesh_data = EasyDict({'vertices': np.asarray(mesh.vertices), 'faces': np.asarray(mesh.triangles), 'label': 0, 'labels': labels})
out_fn = p_out + '/tmp'
add_fields_and_dump_model(mesh_data, fileds_needed, out_fn, 'tmp')
def prepare_single_mesh(params):
file, classification, p_out, fn_prefix, n_target_faces, add_labels, label, dataset_name = params
fileds_needed = ['vertices', 'faces', 'edges', 'kdtree_query',
'label', 'labels', 'dataset_name', 'vertex_normals', 'tri_centers', 'tri_edges']
out_fn = p_out + '/' + fn_prefix + os.path.split(file)[1].split('.')[0]
try:
mesh = load_mesh(file, classification=classification)
except:
print('failed loading mesh {}'.format(file))
mesh_orig = mesh
mesh_data = EasyDict({'vertices': np.asarray(mesh.vertices), 'faces': np.asarray(mesh.triangles)})
if label is None:
if add_labels:
if type(add_labels) is list:
fn2labels_map = add_labels
else:
fn2labels_map = None
label, labels_orig, v_labels_fuzzy = get_labels(dataset_name, mesh_data, file, fn2labels_map=fn2labels_map)
else:
label = np.zeros((0,))
else:
labels_orig = None
for this_target_n_faces in n_target_faces:
mesh, labels, str_to_add = remesh(mesh_orig, this_target_n_faces, add_labels=add_labels, labels_orig=labels_orig)
# str_to_add = '_simplified_{}'.format(this_target_n_faces)
if 'modelnet40_retrieval' in p_out:
for ang in np.linspace(-180, 180, int(360/30), endpoint=False):
verts = np.asarray(mesh.vertices)
rotate_vertices(verts, ang)
mesh_data = EasyDict(
{'vertices': verts, 'faces': np.asarray(mesh.triangles), 'label': label, 'labels': labels})
# mesh_data['labels_fuzzy'] = v_labels_fuzzy
out_fc_full = out_fn + '_{:03d}'.format(int(ang) + 180) +str_to_add
if os.path.exists(out_fc_full):
continue
try:
m = add_fields_and_dump_model(mesh_data, fileds_needed, out_fc_full, dataset_name)
except:
print('debug {}'.format(out_fc_full))
else:
verts = np.asarray(mesh.vertices)
mesh_data = EasyDict(
{'vertices': verts, 'faces': np.asarray(mesh.triangles), 'label': label, 'labels': labels})
# mesh_data['labels_fuzzy'] = v_labels_fuzzy
out_fc_full = out_fn + str_to_add
if os.path.exists(out_fc_full):
continue
try:
m = add_fields_and_dump_model(mesh_data, fileds_needed, out_fc_full, dataset_name)
except:
print('debug {}'.format(out_fc_full))
def prepare_directory_from_scratch(dataset_name, pathname_expansion=None, p_out=None, n_target_faces=None, add_labels=True,
size_limit=np.inf, fn_prefix='', verbose=True, classification=True, label=None, filenames=None):
if not os.path.isdir(p_out):
os.makedirs(p_out)
if filenames is None:
filenames = glob.glob(pathname_expansion)
filenames.sort()
if not len(filenames):
print('debug')
if len(filenames) > size_limit:
filenames = filenames[:size_limit]
for i, file in enumerate(filenames):
# if i < 7183:
# continue
prepare_single_mesh([file, classification, p_out, fn_prefix, n_target_faces, add_labels, label, dataset_name])
# from multiprocessing import Pool
# pool = Pool(8)
#
# inputs = [[file, classification, p_out, fn_prefix, n_target_faces, add_labels, label, dataset_name] for file in filenames]
# pool.map(prepare_single_mesh, inputs)
# pool.close()
# pool.join()
# for file in tqdm(filenames, disable=1 - verbose):
# out_fn = p_out + '/' + fn_prefix + os.path.split(file)[1].split('.')[0]
# mesh = load_mesh(file, classification=classification)
# mesh_orig = mesh
# mesh_data = EasyDict({'vertices': np.asarray(mesh.vertices), 'faces': np.asarray(mesh.triangles)})
# if label is None:
# if add_labels:
# if type(add_labels) is list:
# fn2labels_map = add_labels
# else:
# fn2labels_map = None
# label, labels_orig, v_labels_fuzzy = get_labels(dataset_name, mesh_data, file, fn2labels_map=fn2labels_map)
# else:
# label = np.zeros((0, ))
# else:
# labels_orig = None
# for this_target_n_faces in n_target_faces:
# mesh, labels, str_to_add = remesh(mesh_orig, this_target_n_faces, add_labels=add_labels, labels_orig=labels_orig)
# # str_to_add = '_simplified_{}'.format(this_target_n_faces)
# mesh_data = EasyDict({'vertices': np.asarray(mesh.vertices), 'faces': np.asarray(mesh.triangles), 'label': label, 'labels': labels})
# # mesh_data['labels_fuzzy'] = v_labels_fuzzy
# out_fc_full = out_fn + str_to_add
# if os.path.exists(out_fc_full):
# continue
# try:
# m = add_fields_and_dump_model(mesh_data, fileds_needed, out_fc_full, dataset_name)
# except:
# print('debug')
# ------------------------------------------------------- #
def prepare_modelnet40():
n_target_faces = [1000, 2000, 4000]
labels2use = model_net_labels
for i, name in tqdm(enumerate(labels2use)):
for part in ['test', 'train']:
pin = os.path.expanduser('~') + '/Databases/ModelNet40_1k2k4k/' + name + '/' + part + '/'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets/modelnet40_1k2k4k/'
prepare_directory_from_scratch('modelnet40_preprocessed', pathname_expansion=pin + '*.off',
p_out=p_out, add_labels='modelnet', n_target_faces=n_target_faces,
fn_prefix=part + '_', verbose=False)
def prepare_modelnet40_retrieval():
n_target_faces = [1000, 2000, 4000]
labels2use = model_net_labels
for i, name in tqdm(enumerate(labels2use)):
# split train files into 80 for train and 20 for test
fold = os.path.expanduser('~') + '/Databases/ModelNet40/' + name + '/train/'
tst_fold = os.path.expanduser('~') + '/Databases/ModelNet40/' + name + '/test/'
all_train_files = [os.path.join(fold, x) for x in os.listdir(fold) if x.endswith('.off')]
all_test_files = [os.path.join(tst_fold, x) for x in os.listdir(tst_fold) if x.endswith('.off')]
for split_idx in range(2):
if split_idx == 0:
all_train_files.sort()
all_test_files.sort()
else:
all_train_files = np.random.permutation(all_train_files)
all_test_files = np.random.permutation(all_test_files)
cur_files = {'train': all_train_files[:80],
'test': all_test_files[:20]}
for part in ['test', 'train']:
pin = os.path.expanduser('~') + '/Databases/ModelNet40/' + name + '/' + part + '/'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets/modelnet40_retrieval_split_{}'.format(split_idx)
prepare_directory_from_scratch('modelnet',
pathname_expansion=pin + '*.off',
p_out=p_out, add_labels='modelnet', n_target_faces=n_target_faces,
fn_prefix=part + '_', verbose=False, filenames=cur_files[part])
def prepare_modelnet40_80_20():
n_target_faces = [1000, 2000, 4000]
labels2use = model_net_labels
for i, name in tqdm(enumerate(labels2use)):
# split train files into 80 for train and 20 for test
fold = os.path.expanduser('~') + '/Databases/ModelNet40/' + name + '/train/'
tst_fold = os.path.expanduser('~') + '/Databases/ModelNet40/' + name + '/test/'
all_train_files = [os.path.join(fold, x) for x in os.listdir(fold) if x.endswith('.off')]
all_train_files.sort()
all_test_files = [os.path.join(tst_fold, x) for x in os.listdir(tst_fold) if x.endswith('.off')]
all_test_files.sort()
cur_files = {'train': all_train_files[:80],
'test': all_test_files[:20]}
for part in ['test', 'train']:
pin = os.path.expanduser('~') + '/Databases/ModelNet40/' + name + '/' + part + '/'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets/modelnet40_80_20'
prepare_directory_from_scratch('modelnet',
pathname_expansion=pin + '*.off',
p_out=p_out, add_labels='modelnet', n_target_faces=n_target_faces,
fn_prefix=part + '_', verbose=False, filenames=cur_files[part])
def prepare_3dfuture():
n_target_faces = [1000, 2000, 4000, np.inf]
labels2use = future3d_labels
splits_path = os.path.expanduser('~') + '/Databases/3D-FUTURE/GT/model_infos.json'
with open(splits_path, 'r') as f:
json_files = json.load(f)
for i, file in tqdm(enumerate(json_files)):
# if i < 7183:
# continue
split = 'train' if file['is_train'] else 'test'
pin = os.path.expanduser('~') + '/Databases/3D-FUTURE/3D-FUTURE-model/' + file['model_id'] + '/raw_model.obj'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets/3dFUTURE_raw/' + split + '_' + file['model_id']
if os.path.exists(p_out):
models = os.listdir(p_out)
n_models = len(models)
if n_models == 4:
continue
else:
n_max = [x.split('_')[-1].split('.')[0] for x in models if 'not_changed' in x]
if len(n_max):
n_max = int(n_max[0])
if n_max > 2000 and n_models == 3:
continue
elif n_max > 1000 and n_models == 2:
continue
elif n_max < 1000 and n_models == 1:
continue
# elif n_models == 3 and np.max([int(x.split('_')[-1].split('.')[0]) for x in models]) <= 4000:
# continue
# elif n_models == 2 and np.max([int(x.split('_')[-1].split('.')[0]) for x in models]) <= 2000:
# continue
# elif n_models == 1 and np.max([int(x.split('_')[-1].split('.')[0]) for x in models]) <= 1000:
# continue
prepare_directory_from_scratch('3dFUTURE', pathname_expansion=pin,
p_out=p_out, add_labels='3dfuture', n_target_faces=n_target_faces,
fn_prefix='', label=future3d_shape2label[file['category'].lower()], verbose=False)
def prepare_shapenetcore55():
import csv
n_target_faces = [1000, 2000, 4000]
base_path = '/media/ran/a6f25521-bcdb-4606-a6a0-5d8b26d7f1d8/home/ran/ShapeNetCore.v2/'
path = base_path + 'all.csv'
with open(path) as f:
filelist = [{k: v for k, v in row.items()}
for row in csv.DictReader(f, skipinitialspace=True)]
for i, file in tqdm(enumerate(filelist)):
split = file['split']
pin = os.path.join(base_path, file['synsetId'], file['modelId'], 'models', 'model_normalized.obj')
p_out = os.path.expanduser('~') + '/mesh_walker/datasets/shapenetcore55/' + '_'.join([split, file['synsetId'], file['modelId']])
prepare_directory_from_scratch('shapenetcore', pathname_expansion=pin,
p_out=p_out, add_labels='shapenetcore', n_target_faces=n_target_faces,
fn_prefix='', label=shapenet_shapeid2label[file['synsetId']], verbose=False)
def prepare_cubes(labels2use=cubes_labels,
path_in=os.path.expanduser('~') + '/datasets/cubes/',
p_out=os.path.expanduser('~') + '/mesh_walker/datasets_processed-tmp/cubes_tmp'):
dataset_name = 'cubes'
if not os.path.isdir(p_out):
os.makedirs(p_out)
for i, name in enumerate(labels2use):
print('-->>>', name)
for part in ['test', 'train']:
pin = path_in + name + '/' + part + '/'
prepare_directory_from_scratch(dataset_name, pathname_expansion=pin + '*.obj',
p_out=p_out, add_labels=dataset_name, fn_prefix=part + '_', n_target_faces=[np.inf],
classification=False)
def prepare_shrec11_from_raw():
# Prepare labels per model name
current_label = None
model_number2label = [-1 for _ in range(600)]
for line in open(os.path.expanduser('~') + '/datasets/shrec11/evaluation/test.cla'):
sp_line = line.split(' ')
if len(sp_line) == 3:
name = sp_line[0].replace('_test', '')
if name in shrec11_labels:
current_label = name
else:
raise Exception('?')
if len(sp_line) == 1 and sp_line[0] != '\n':
model_number2label[int(sp_line[0])] = shrec11_shape2label[current_label]
# Prepare npz files
p_in = os.path.expanduser('~') + '/datasets/shrec11/raw/'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets_processed-tmp/shrec11_raw_1.5k/'
prepare_directory_from_scratch('shrec11', pathname_expansion=p_in + '*.off',
p_out=p_out, add_labels=model_number2label, n_target_faces=[1500])
# Prepare split train / test
change_train_test_split(p_out, 16, 4, '16-04_C')
def calc_face_labels_after_remesh(mesh_orig, mesh, face_labels):
t_mesh = trimesh.Trimesh(vertices=np.array(mesh_orig.vertices), faces=np.array(mesh_orig.triangles), process=False)
remeshed_face_labels = []
for face in mesh.triangles:
vertices = np.array(mesh.vertices)[face]
center = np.mean(vertices, axis=0)
p, d, closest_face = trimesh.proximity.closest_point(t_mesh, [center])
remeshed_face_labels.append(face_labels[closest_face[0]])
return remeshed_face_labels
def prepare_human_body_segmentation():
dataset_name = 'sig17_seg_benchmark'
labels_fuzzy = True
human_seg_path = os.path.expanduser('~') + '/mesh_walker/datasets/sig17_seg_benchmark/'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets/sig17_seg_benchmark-no_simplification/'
fileds_needed = ['vertices', 'faces', 'edge_features', 'edges_map', 'edges', 'kdtree_query',
'label', 'labels', 'dataset_name', 'face_labels']
if labels_fuzzy:
fileds_needed += ['labels_fuzzy']
n_target_faces = [np.inf]
if not os.path.isdir(p_out):
os.makedirs(p_out)
for part in ['test', 'train']:
print('part: ', part)
path_meshes = human_seg_path + '/meshes/' + part
seg_path = human_seg_path + '/segs/' + part
all_fns = []
for fn in Path(path_meshes).rglob('*.*'):
all_fns.append(fn)
for fn in tqdm(all_fns):
model_name = str(fn)
if model_name.endswith('.obj') or model_name.endswith('.off') or model_name.endswith('.ply'):
new_fn = model_name[model_name.find(part) + len(part) + 1:]
new_fn = new_fn.replace('/', '_')
new_fn = new_fn.split('.')[-2]
out_fn = p_out + '/' + part + '__' + new_fn
mesh = mesh_orig = load_mesh(model_name, classification=False)
mesh_data = EasyDict({'vertices': np.asarray(mesh.vertices), 'faces': np.asarray(mesh.triangles)})
face_labels = get_sig17_seg_bm_labels(mesh_data, model_name, seg_path)
labels_orig, v_labels_fuzzy = calc_vertex_labels_from_face_labels(mesh_data, face_labels)
if 0: # Show segment borders
b_vertices = np.where(np.sum(v_labels_fuzzy != 0, axis=1) > 1)[0]
vertex_colors = np.zeros((mesh_data['vertices'].shape[0],), dtype=np.int)
vertex_colors[b_vertices] = 1
utils.visualize_model(mesh_data['vertices'], mesh_data['faces'], vertex_colors_idx=vertex_colors, point_size=2)
if 0: # Show face labels
utils.visualize_model(mesh_data['vertices'], mesh_data['faces'], face_colors=face_labels, show_vertices=False, show_edges=False)
if 0:
print(model_name)
print('min: ', np.min(mesh_data['vertices'], axis=0))
print('max: ', np.max(mesh_data['vertices'], axis=0))
cpos = [(-3.5, -0.12, 6.0), (0., 0., 0.1), (0., 1., 0.)]
utils.visualize_model(mesh_data['vertices'], mesh_data['faces'], vertex_colors_idx=labels_orig, cpos=cpos)
add_labels = 1
label = -1
for this_target_n_faces in n_target_faces:
mesh, labels, str_to_add = remesh(mesh_orig, this_target_n_faces, add_labels=add_labels, labels_orig=labels_orig)
if mesh == mesh_orig:
remeshed_face_labels = face_labels
else:
remeshed_face_labels = calc_face_labels_after_remesh(mesh_orig, mesh, face_labels)
mesh_data = EasyDict({'vertices': np.asarray(mesh.vertices),
'faces': np.asarray(mesh.triangles),
'label': label, 'labels': labels,
'face_labels': remeshed_face_labels})
if 1:
v_labels, v_labels_fuzzy = calc_vertex_labels_from_face_labels(mesh_data, remeshed_face_labels)
mesh_data['labels'] = v_labels
mesh_data['labels_fuzzy'] = v_labels_fuzzy
if 0: # Show segment borders
b_vertices = np.where(np.sum(v_labels_fuzzy != 0, axis=1) > 1)[0]
vertex_colors = np.zeros((mesh_data['vertices'].shape[0],), dtype=np.int)
vertex_colors[b_vertices] = 1
utils.visualize_model(mesh_data['vertices'], mesh_data['faces'], vertex_colors_idx=vertex_colors, point_size=10)
if 0: # Show face labels
utils.visualize_model(np.array(mesh.vertices), np.array(mesh.triangles), face_colors=remeshed_face_labels, show_vertices=False, show_edges=False)
out_fc_full = out_fn + str_to_add
if os.path.isfile(out_fc_full + '.npz'):
continue
add_fields_and_dump_model(mesh_data, fileds_needed, out_fc_full, dataset_name)
if 0:
utils.visualize_model(mesh_data['vertices'], mesh_data['faces'], vertex_colors_idx=mesh_data['labels'].astype(np.int),
cpos=[(-2., -0.2, 3.3), (0., -0.3, 0.1), (0., 1., 0.)])
def prepare_seg_from_meshcnn(dataset, subfolder=None):
if dataset == 'human_body':
dataset_name = 'human_seg_from_meshcnn'
p_in2add = 'human_seg'
p_out_sub = p_in2add
p_ext = ''
elif dataset == 'coseg':
p_out_sub = dataset_name = 'coseg'
p_in2add = dataset_name + '/' + subfolder
p_ext = subfolder
path_in = os.path.expanduser('~') + '/mesh_walker/datasets_raw/from_meshcnn/' + p_in2add + '/'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets_processed-tmp/' + p_out_sub + '_from_meshcnn/' + p_ext
for part in ['test', 'train']:
pin = path_in + '/' + part + '/'
prepare_directory_from_scratch(dataset_name, pathname_expansion=pin + '*.obj',
p_out=p_out, add_labels=dataset_name, fn_prefix=part + '_', n_target_faces=[np.inf],
classification=False)
def prepare_coseg(dataset_name='coseg',
path_in=os.path.expanduser('~') + '/datasets/coseg/',
p_out_root=os.path.expanduser('~') + '/mesh_walker/datasets_processed-tmp/coseg_tmp2'):
for sub_folder in os.listdir(path_in):
p_out = p_out_root + '/' + sub_folder
if not os.path.isdir(p_out):
os.makedirs(p_out + '/' + sub_folder)
for part in ['test', 'train']:
pin = path_in + '/' + sub_folder + '/' + part + '/'
prepare_directory_from_scratch(sub_folder, pathname_expansion=pin + '*.obj',
p_out=p_out, add_labels=dataset_name, fn_prefix=part + '_', n_target_faces=[np.inf])
# ------------------------------------------------------- #
def map_fns_to_label(path=None, filenames=None):
lmap = {}
if path is not None:
iterate = glob.glob(path + '/*.npz')
elif filenames is not None:
iterate = filenames
for fn in iterate:
mesh_data = np.load(fn, encoding='latin1', allow_pickle=True)
label = int(mesh_data['label'])
if label not in lmap.keys():
lmap[label] = []
if path is None:
lmap[label].append(fn)
else:
lmap[label].append(fn.split('/')[-1])
return lmap
def change_train_test_split(path, n_train_examples, n_test_examples, split_name):
np.random.seed()
fns_lbls_map = map_fns_to_label(path)
for label, fns_ in fns_lbls_map.items():
fns = np.random.permutation(fns_)
assert len(fns) == n_train_examples + n_test_examples
train_path = path + '/' + split_name + '/train'
if not os.path.isdir(train_path):
os.makedirs(train_path)
test_path = path + '/' + split_name + '/test'
if not os.path.isdir(test_path):
os.makedirs(test_path)
for i, fn in enumerate(fns):
out_fn = fn.replace('train_', '').replace('test_', '')
if i < n_train_examples:
shutil.copy(path + '/' + fn, train_path + '/' + out_fn)
else:
shutil.copy(path + '/' + fn, test_path + '/' + out_fn)
# ------------------------------------------------------- #
def prepare_one_dataset(dataset_name, mode):
dataset_name = dataset_name.lower()
if dataset_name == 'modelnet40' or dataset_name == 'modelnet':
prepare_modelnet40()
if dataset_name == 'modelnet40_retrieval' or dataset_name == 'modelnet_retrieval':
prepare_modelnet40_retrieval()
if dataset_name == 'modelnet40_80_20':
prepare_modelnet40_80_20()
if dataset_name == '3dfuture':
prepare_3dfuture()
if dataset_name == 'shapenetcore':
prepare_shapenetcore55()
if dataset_name == 'shrec11':
pass
if dataset_name == 'cubes':
pass
# Semantic Segmentations
if dataset_name == 'human_seg':
if mode == 'from_meshcnn':
prepare_seg_from_meshcnn('human_body')
else:
prepare_human_body_segmentation()
if dataset_name == 'coseg':
prepare_seg_from_meshcnn('coseg', 'coseg_aliens')
prepare_seg_from_meshcnn('coseg', 'coseg_chairs')
prepare_seg_from_meshcnn('coseg', 'coseg_vases')
def vertex_pertubation(faces, vertices):
n_vertices2change = int(vertices.shape[0] * 0.3)
for _ in range(n_vertices2change):
face = faces[np.random.randint(faces.shape[0])]
vertices_mean = np.mean(vertices[face, :], axis=0)
v = np.random.choice(face)
vertices[v] = vertices_mean
return vertices
def visualize_dataset(pathname_expansion):
cpos = None
filenames = glob.glob(pathname_expansion)
while 1:
fn = np.random.choice(filenames)
mesh_data = np.load(fn, encoding='latin1', allow_pickle=True)
vertex_colors_idx = mesh_data['labels'].astype(np.int) if mesh_data['labels'].size else None
vertices = mesh_data['vertices']
#vertices = vertex_pertubation(mesh_data['faces'], vertices)
utils.visualize_model(vertices, mesh_data['faces'], vertex_colors_idx=vertex_colors_idx, cpos=cpos, point_size=5)
if __name__ == '__main__':
TEST_FAST = 0
utils.config_gpu(False)
np.random.seed(1)
#visualize_dataset('/home/alonlahav/mesh_walker/datasets_processed-tmp/sig17_seg_benchmark-no_simplification/*.npz')
dataset_name = 'modelnet40_80_20'
mode = '' # from_meshcnn / from_raw
if len(sys.argv) > 1:
dataset_name = sys.argv[1]
if len(sys.argv) > 2:
mode = sys.argv[2]
if dataset_name == 'all':
for dataset_name_ in ['modelnet40', 'shrec11', 'cubes', 'human_seg', 'coseg']:
prepare_one_dataset(dataset_name_)
else:
prepare_one_dataset(dataset_name, mode)
if 0:
prepare_shrec11_from_raw()
elif 0:
prepare_cubes()
elif 0:
prepare_cubes(dataset_name='shrec11', path_in=os.path.expanduser('~') + '/datasets/shrec_16/',
p_out=os.path.expanduser('~') + '/mesh_walker/datasets_processed-tmp/shrec11_tmp',
labels2use=shrec11_labels)
elif 0:
prepare_coseg()
elif 0:
change_train_test_split(path=os.path.expanduser('~') + '/mesh_walker/datasets_processed-tmp/shrec11/',
n_train_examples=16, n_test_examples=4, split_name='16-04_C')
elif 0:
collect_n_models_per_class(in_path=os.path.expanduser('~') + '/mesh_walker/datasets_processed-tmp/coseg/coseg_vases/',
n_models4train=[1, 2, 4, 8, 16, 32])
| [
"csv.DictReader",
"numpy.array",
"open3d.io.read_triangle_mesh",
"numpy.linalg.norm",
"trimesh.proximity.closest_point",
"numpy.sin",
"os.path.exists",
"numpy.savez",
"os.listdir",
"numpy.mean",
"pathlib.Path",
"numpy.asarray",
"numpy.max",
"os.path.split",
"easydict.EasyDict",
"numpy.... | [((6944, 6977), 'numpy.dot', 'np.dot', (['vertices', 'R'], {'out': 'vertices'}), '(vertices, R, out=vertices)\n', (6950, 6977), True, 'import numpy as np\n'), ((7017, 7095), 'trimesh.Trimesh', 'trimesh.Trimesh', ([], {'vertices': "mesh['vertices']", 'faces': "mesh['faces']", 'process': '(False)'}), "(vertices=mesh['vertices'], faces=mesh['faces'], process=False)\n", (7032, 7095), False, 'import trimesh\n'), ((7163, 7198), 'numpy.zeros', 'np.zeros', (["mesh['vertices'].shape[0]"], {}), "(mesh['vertices'].shape[0])\n", (7171, 7198), True, 'import numpy as np\n'), ((8859, 8898), 'numpy.array', 'np.array', (["mesh['edges']"], {'dtype': 'np.int32'}), "(mesh['edges'], dtype=np.int32)\n", (8867, 8898), True, 'import numpy as np\n'), ((8939, 9001), 'trimesh.Trimesh', 'trimesh.Trimesh', ([], {'vertices': 'vertices', 'faces': 'faces', 'process': '(False)'}), '(vertices=vertices, faces=faces, process=False)\n', (8954, 9001), False, 'import trimesh\n'), ((9418, 9448), 'numpy.array', 'np.array', (["mesh['kdtree_query']"], {}), "(mesh['kdtree_query'])\n", (9426, 9448), True, 'import numpy as np\n'), ((9620, 9668), 'trimesh.Trimesh', 'trimesh.Trimesh', (["mesh['vertices']", "mesh['faces']"], {}), "(mesh['vertices'], mesh['faces'])\n", (9635, 9668), False, 'import trimesh\n'), ((12103, 12126), 'numpy.loadtxt', 'np.loadtxt', (['seg_full_fn'], {}), '(seg_full_fn)\n', (12113, 12126), True, 'import numpy as np\n'), ((12637, 12651), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (12645, 12651), True, 'import numpy as np\n'), ((15975, 15997), 'numpy.array', 'np.array', (['faces_belong'], {}), '(faces_belong)\n', (15983, 15997), True, 'import numpy as np\n'), ((16664, 16684), 'glob.glob', 'glob.glob', (['model_fns'], {}), '(model_fns)\n', (16673, 16684), False, 'import glob, os, shutil, sys, json\n'), ((16976, 17006), 'open3d.geometry.TriangleMesh', 'open3d.geometry.TriangleMesh', ([], {}), '()\n', (17004, 17006), False, 'import open3d\n'), ((17025, 17075), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['joint_mesh_vertices'], {}), '(joint_mesh_vertices)\n', (17054, 17075), False, 'import open3d\n'), ((17095, 17142), 'open3d.utility.Vector3iVector', 'open3d.utility.Vector3iVector', (['joint_mesh_faces'], {}), '(joint_mesh_faces)\n', (17124, 17142), False, 'import open3d\n'), ((37567, 37586), 'os.listdir', 'os.listdir', (['path_in'], {}), '(path_in)\n', (37577, 37586), False, 'import glob, os, shutil, sys, json\n'), ((38631, 38647), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (38645, 38647), True, 'import numpy as np\n'), ((40736, 40765), 'glob.glob', 'glob.glob', (['pathname_expansion'], {}), '(pathname_expansion)\n', (40745, 40765), False, 'import glob, os, shutil, sys, json\n'), ((41244, 41267), 'utils.config_gpu', 'utils.config_gpu', (['(False)'], {}), '(False)\n', (41260, 41267), False, 'import utils\n'), ((41270, 41287), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (41284, 41287), True, 'import numpy as np\n'), ((7530, 7573), 'numpy.ones', 'np.ones', (['(vertices.shape[0],)'], {'dtype': 'np.int'}), '((vertices.shape[0],), dtype=np.int)\n', (7537, 7573), True, 'import numpy as np\n'), ((7592, 7611), 'numpy.max', 'np.max', (['face_labels'], {}), '(face_labels)\n', (7598, 7611), True, 'import numpy as np\n'), ((7622, 7641), 'numpy.min', 'np.min', (['face_labels'], {}), '(face_labels)\n', (7628, 7641), True, 'import numpy as np\n'), ((7738, 7777), 'numpy.ones', 'np.ones', (['(vertices.shape[0], n_classes)'], {}), '((vertices.shape[0], n_classes))\n', (7745, 7777), True, 'import numpy as np\n'), ((7953, 7986), 'numpy.bincount', 'np.bincount', (['all_vetrex_labels[i]'], {}), '(all_vetrex_labels[i])\n', (7964, 7986), True, 'import numpy as np\n'), ((8010, 8027), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (8019, 8027), True, 'import numpy as np\n'), ((8052, 8076), 'numpy.zeros', 'np.zeros', (['(1, n_classes)'], {}), '((1, n_classes))\n', (8060, 8076), True, 'import numpy as np\n'), ((10837, 10858), 'numpy.savez', 'np.savez', (['out_fn'], {}), '(out_fn, **m)\n', (10845, 10858), True, 'import numpy as np\n'), ((12328, 12349), 'numpy.array', 'np.array', (['face_center'], {}), '(face_center)\n', (12336, 12349), True, 'import numpy as np\n'), ((12876, 12890), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (12884, 12890), True, 'import numpy as np\n'), ((15569, 15598), 'numpy.ones', 'np.ones', (['(vertices.shape[0],)'], {}), '((vertices.shape[0],))\n', (15576, 15598), True, 'import numpy as np\n'), ((15649, 15695), 'numpy.linalg.norm', 'np.linalg.norm', (['(vertex - orig_vertices)'], {'axis': '(1)'}), '(vertex - orig_vertices, axis=1)\n', (15663, 15695), True, 'import numpy as np\n'), ((15711, 15723), 'numpy.argmin', 'np.argmin', (['d'], {}), '(d)\n', (15720, 15723), True, 'import numpy as np\n'), ((15883, 15922), 'numpy.any', 'np.any', (['[(v in vertices) for v in face]'], {}), '([(v in vertices) for v in face])\n', (15889, 15922), True, 'import numpy as np\n'), ((16769, 16790), 'trimesh.load_mesh', 'trimesh.load_mesh', (['fn'], {}), '(fn)\n', (16786, 16790), False, 'import trimesh\n'), ((17627, 17657), 'open3d.geometry.TriangleMesh', 'open3d.geometry.TriangleMesh', ([], {}), '()\n', (17655, 17657), False, 'import open3d\n'), ((17678, 17723), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['mesh_.vertices'], {}), '(mesh_.vertices)\n', (17707, 17723), False, 'import open3d\n'), ((17745, 17787), 'open3d.utility.Vector3iVector', 'open3d.utility.Vector3iVector', (['mesh_.faces'], {}), '(mesh_.faces)\n', (17774, 17787), False, 'import open3d\n'), ((18608, 18628), 'os.path.isdir', 'os.path.isdir', (['p_out'], {}), '(p_out)\n', (18621, 18628), False, 'import glob, os, shutil, sys, json\n'), ((18634, 18652), 'os.makedirs', 'os.makedirs', (['p_out'], {}), '(p_out)\n', (18645, 18652), False, 'import glob, os, shutil, sys, json\n'), ((21661, 21681), 'os.path.isdir', 'os.path.isdir', (['p_out'], {}), '(p_out)\n', (21674, 21681), False, 'import glob, os, shutil, sys, json\n'), ((21687, 21705), 'os.makedirs', 'os.makedirs', (['p_out'], {}), '(p_out)\n', (21698, 21705), False, 'import glob, os, shutil, sys, json\n'), ((21746, 21775), 'glob.glob', 'glob.glob', (['pathname_expansion'], {}), '(pathname_expansion)\n', (21755, 21775), False, 'import glob, os, shutil, sys, json\n'), ((27321, 27344), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (27339, 27344), False, 'import glob, os, shutil, sys, json\n'), ((27443, 27455), 'json.load', 'json.load', (['f'], {}), '(f)\n', (27452, 27455), False, 'import glob, os, shutil, sys, json\n'), ((27820, 27841), 'os.path.exists', 'os.path.exists', (['p_out'], {}), '(p_out)\n', (27834, 27841), False, 'import glob, os, shutil, sys, json\n'), ((29380, 29476), 'os.path.join', 'os.path.join', (['base_path', "file['synsetId']", "file['modelId']", '"""models"""', '"""model_normalized.obj"""'], {}), "(base_path, file['synsetId'], file['modelId'], 'models',\n 'model_normalized.obj')\n", (29392, 29476), False, 'import glob, os, shutil, sys, json\n'), ((29970, 29993), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (29988, 29993), False, 'import glob, os, shutil, sys, json\n'), ((30040, 30063), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (30058, 30063), False, 'import glob, os, shutil, sys, json\n'), ((30150, 30170), 'os.path.isdir', 'os.path.isdir', (['p_out'], {}), '(p_out)\n', (30163, 30170), False, 'import glob, os, shutil, sys, json\n'), ((30176, 30194), 'os.makedirs', 'os.makedirs', (['p_out'], {}), '(p_out)\n', (30187, 30194), False, 'import glob, os, shutil, sys, json\n'), ((31199, 31222), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (31217, 31222), False, 'import glob, os, shutil, sys, json\n'), ((31260, 31283), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (31278, 31283), False, 'import glob, os, shutil, sys, json\n'), ((31906, 31931), 'numpy.mean', 'np.mean', (['vertices'], {'axis': '(0)'}), '(vertices, axis=0)\n', (31913, 31931), True, 'import numpy as np\n'), ((31957, 32006), 'trimesh.proximity.closest_point', 'trimesh.proximity.closest_point', (['t_mesh', '[center]'], {}), '(t_mesh, [center])\n', (31988, 32006), False, 'import trimesh\n'), ((32220, 32243), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (32238, 32243), False, 'import glob, os, shutil, sys, json\n'), ((32301, 32324), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (32319, 32324), False, 'import glob, os, shutil, sys, json\n'), ((32650, 32670), 'os.path.isdir', 'os.path.isdir', (['p_out'], {}), '(p_out)\n', (32663, 32670), False, 'import glob, os, shutil, sys, json\n'), ((32676, 32694), 'os.makedirs', 'os.makedirs', (['p_out'], {}), '(p_out)\n', (32687, 32694), False, 'import glob, os, shutil, sys, json\n'), ((32957, 32970), 'tqdm.tqdm', 'tqdm', (['all_fns'], {}), '(all_fns)\n', (32961, 32970), False, 'from tqdm import tqdm\n'), ((37395, 37418), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (37413, 37418), False, 'import glob, os, shutil, sys, json\n'), ((37470, 37493), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (37488, 37493), False, 'import glob, os, shutil, sys, json\n'), ((38166, 38192), 'glob.glob', 'glob.glob', (["(path + '/*.npz')"], {}), "(path + '/*.npz')\n", (38175, 38192), False, 'import glob, os, shutil, sys, json\n'), ((38285, 38334), 'numpy.load', 'np.load', (['fn'], {'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(fn, encoding='latin1', allow_pickle=True)\n", (38292, 38334), True, 'import numpy as np\n'), ((38741, 38768), 'numpy.random.permutation', 'np.random.permutation', (['fns_'], {}), '(fns_)\n', (38762, 38768), True, 'import numpy as np\n'), ((40547, 40581), 'numpy.mean', 'np.mean', (['vertices[face, :]'], {'axis': '(0)'}), '(vertices[face, :], axis=0)\n', (40554, 40581), True, 'import numpy as np\n'), ((40590, 40612), 'numpy.random.choice', 'np.random.choice', (['face'], {}), '(face)\n', (40606, 40612), True, 'import numpy as np\n'), ((40786, 40813), 'numpy.random.choice', 'np.random.choice', (['filenames'], {}), '(filenames)\n', (40802, 40813), True, 'import numpy as np\n'), ((40830, 40879), 'numpy.load', 'np.load', (['fn'], {'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(fn, encoding='latin1', allow_pickle=True)\n", (40837, 40879), True, 'import numpy as np\n'), ((41083, 41201), 'utils.visualize_model', 'utils.visualize_model', (['vertices', "mesh_data['faces']"], {'vertex_colors_idx': 'vertex_colors_idx', 'cpos': 'cpos', 'point_size': '(5)'}), "(vertices, mesh_data['faces'], vertex_colors_idx=\n vertex_colors_idx, cpos=cpos, point_size=5)\n", (41104, 41201), False, 'import utils\n'), ((9351, 9391), 'numpy.array', 'np.array', (['i_nbrs_cleared'], {'dtype': 'np.int32'}), '(i_nbrs_cleared, dtype=np.int32)\n', (9359, 9391), True, 'import numpy as np\n'), ((11665, 11679), 'pathlib.Path', 'Path', (['seg_path'], {}), '(seg_path)\n', (11669, 11679), False, 'from pathlib import Path\n'), ((13149, 13170), 'numpy.loadtxt', 'np.loadtxt', (['labels_fn'], {}), '(labels_fn)\n', (13159, 13170), True, 'import numpy as np\n'), ((13391, 13418), 'numpy.loadtxt', 'np.loadtxt', (['fuzzy_labels_fn'], {}), '(fuzzy_labels_fn)\n', (13401, 13418), True, 'import numpy as np\n'), ((13440, 13500), 'numpy.zeros', 'np.zeros', (["(mesh['vertices'].shape[0], seseg_labels.shape[1])"], {}), "((mesh['vertices'].shape[0], seseg_labels.shape[1]))\n", (13448, 13500), True, 'import numpy as np\n'), ((14483, 14506), 'numpy.array', 'np.array', (['vertex_labels'], {}), '(vertex_labels)\n', (14491, 14506), True, 'import numpy as np\n'), ((14525, 14539), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (14533, 14539), True, 'import numpy as np\n'), ((15425, 15439), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (15433, 15439), True, 'import numpy as np\n'), ((17343, 17384), 'trimesh.load_mesh', 'trimesh.load_mesh', (['model_fn'], {'process': '(True)'}), '(model_fn, process=True)\n', (17360, 17384), False, 'import trimesh\n'), ((17573, 17615), 'trimesh.load_mesh', 'trimesh.load_mesh', (['model_fn'], {'process': '(False)'}), '(model_fn, process=False)\n', (17590, 17615), False, 'import trimesh\n'), ((17942, 17980), 'open3d.io.read_triangle_mesh', 'open3d.io.read_triangle_mesh', (['model_fn'], {}), '(model_fn)\n', (17970, 17980), False, 'import open3d\n'), ((18005, 18047), 'trimesh.load_mesh', 'trimesh.load_mesh', (['model_fn'], {'process': '(False)'}), '(model_fn, process=False)\n', (18022, 18047), False, 'import trimesh\n'), ((18061, 18091), 'open3d.geometry.TriangleMesh', 'open3d.geometry.TriangleMesh', ([], {}), '()\n', (18089, 18091), False, 'import open3d\n'), ((18114, 18159), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['mesh_.vertices'], {}), '(mesh_.vertices)\n', (18143, 18159), False, 'import open3d\n'), ((18183, 18225), 'open3d.utility.Vector3iVector', 'open3d.utility.Vector3iVector', (['mesh_.faces'], {}), '(mesh_.faces)\n', (18212, 18225), False, 'import open3d\n'), ((18861, 18886), 'numpy.asarray', 'np.asarray', (['mesh.vertices'], {}), '(mesh.vertices)\n', (18871, 18886), True, 'import numpy as np\n'), ((18897, 18923), 'numpy.asarray', 'np.asarray', (['mesh.triangles'], {}), '(mesh.triangles)\n', (18907, 18923), True, 'import numpy as np\n'), ((19603, 19628), 'numpy.asarray', 'np.asarray', (['mesh.vertices'], {}), '(mesh.vertices)\n', (19613, 19628), True, 'import numpy as np\n'), ((19639, 19665), 'numpy.asarray', 'np.asarray', (['mesh.triangles'], {}), '(mesh.triangles)\n', (19649, 19665), True, 'import numpy as np\n'), ((19956, 19970), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (19964, 19970), True, 'import numpy as np\n'), ((20931, 20956), 'numpy.asarray', 'np.asarray', (['mesh.vertices'], {}), '(mesh.vertices)\n', (20941, 20956), True, 'import numpy as np\n'), ((21185, 21212), 'os.path.exists', 'os.path.exists', (['out_fc_full'], {}), '(out_fc_full)\n', (21199, 21212), False, 'import glob, os, shutil, sys, json\n'), ((24842, 24863), 'os.path.join', 'os.path.join', (['fold', 'x'], {}), '(fold, x)\n', (24854, 24863), False, 'import glob, os, shutil, sys, json\n'), ((24935, 24960), 'os.path.join', 'os.path.join', (['tst_fold', 'x'], {}), '(tst_fold, x)\n', (24947, 24960), False, 'import glob, os, shutil, sys, json\n'), ((26340, 26361), 'os.path.join', 'os.path.join', (['fold', 'x'], {}), '(fold, x)\n', (26352, 26361), False, 'import glob, os, shutil, sys, json\n'), ((26460, 26485), 'os.path.join', 'os.path.join', (['tst_fold', 'x'], {}), '(tst_fold, x)\n', (26472, 26485), False, 'import glob, os, shutil, sys, json\n'), ((27858, 27875), 'os.listdir', 'os.listdir', (['p_out'], {}), '(p_out)\n', (27868, 27875), False, 'import glob, os, shutil, sys, json\n'), ((30766, 30789), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (30784, 30789), False, 'import glob, os, shutil, sys, json\n'), ((31707, 31735), 'numpy.array', 'np.array', (['mesh_orig.vertices'], {}), '(mesh_orig.vertices)\n', (31715, 31735), True, 'import numpy as np\n'), ((31743, 31772), 'numpy.array', 'np.array', (['mesh_orig.triangles'], {}), '(mesh_orig.triangles)\n', (31751, 31772), True, 'import numpy as np\n'), ((31863, 31886), 'numpy.array', 'np.array', (['mesh.vertices'], {}), '(mesh.vertices)\n', (31871, 31886), True, 'import numpy as np\n'), ((37641, 37661), 'os.path.isdir', 'os.path.isdir', (['p_out'], {}), '(p_out)\n', (37654, 37661), False, 'import glob, os, shutil, sys, json\n'), ((37669, 37706), 'os.makedirs', 'os.makedirs', (["(p_out + '/' + sub_folder)"], {}), "(p_out + '/' + sub_folder)\n", (37680, 37706), False, 'import glob, os, shutil, sys, json\n'), ((38890, 38915), 'os.path.isdir', 'os.path.isdir', (['train_path'], {}), '(train_path)\n', (38903, 38915), False, 'import glob, os, shutil, sys, json\n'), ((38923, 38946), 'os.makedirs', 'os.makedirs', (['train_path'], {}), '(train_path)\n', (38934, 38946), False, 'import glob, os, shutil, sys, json\n'), ((39008, 39032), 'os.path.isdir', 'os.path.isdir', (['test_path'], {}), '(test_path)\n', (39021, 39032), False, 'import glob, os, shutil, sys, json\n'), ((39040, 39062), 'os.makedirs', 'os.makedirs', (['test_path'], {}), '(test_path)\n', (39051, 39062), False, 'import glob, os, shutil, sys, json\n'), ((40492, 40525), 'numpy.random.randint', 'np.random.randint', (['faces.shape[0]'], {}), '(faces.shape[0])\n', (40509, 40525), True, 'import numpy as np\n'), ((6784, 6793), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (6790, 6793), True, 'import numpy as np\n'), ((6829, 6838), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (6835, 6838), True, 'import numpy as np\n'), ((6840, 6849), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (6846, 6849), True, 'import numpy as np\n'), ((10063, 10077), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (10071, 10077), True, 'import numpy as np\n'), ((10195, 10209), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (10203, 10209), True, 'import numpy as np\n'), ((10350, 10437), 'trimesh.Trimesh', 'trimesh.Trimesh', ([], {'vertices': 'mesh_data.vertices', 'faces': 'mesh_data.faces', 'process': '(False)'}), '(vertices=mesh_data.vertices, faces=mesh_data.faces, process\n =False)\n', (10365, 10437), False, 'import trimesh\n'), ((10590, 10677), 'trimesh.Trimesh', 'trimesh.Trimesh', ([], {'vertices': 'mesh_data.vertices', 'faces': 'mesh_data.faces', 'process': '(False)'}), '(vertices=mesh_data.vertices, faces=mesh_data.faces, process\n =False)\n', (10605, 10677), False, 'import trimesh\n'), ((12272, 12308), 'numpy.mean', 'np.mean', (['mesh.vertices[f, :]'], {'axis': '(0)'}), '(mesh.vertices[f, :], axis=0)\n', (12279, 12308), True, 'import numpy as np\n'), ((12700, 12751), 'numpy.load', 'np.load', (['"""faust_labels/faust_part_segmentation.npy"""'], {}), "('faust_labels/faust_part_segmentation.npy')\n", (12707, 12751), True, 'import numpy as np\n'), ((16120, 16151), 'numpy.asarray', 'np.asarray', (['mesh_orig.triangles'], {}), '(mesh_orig.triangles)\n', (16130, 16151), True, 'import numpy as np\n'), ((16405, 16430), 'numpy.asarray', 'np.asarray', (['mesh.vertices'], {}), '(mesh.vertices)\n', (16415, 16430), True, 'import numpy as np\n'), ((16432, 16462), 'numpy.asarray', 'np.asarray', (['mesh_orig.vertices'], {}), '(mesh_orig.vertices)\n', (16442, 16462), True, 'import numpy as np\n'), ((17439, 17477), 'open3d.io.read_triangle_mesh', 'open3d.io.read_triangle_mesh', (['model_fn'], {}), '(model_fn)\n', (17467, 17477), False, 'import open3d\n'), ((20355, 20380), 'numpy.asarray', 'np.asarray', (['mesh.vertices'], {}), '(mesh.vertices)\n', (20365, 20380), True, 'import numpy as np\n'), ((20689, 20716), 'os.path.exists', 'os.path.exists', (['out_fc_full'], {}), '(out_fc_full)\n', (20703, 20716), False, 'import glob, os, shutil, sys, json\n'), ((24097, 24120), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (24115, 24120), False, 'import glob, os, shutil, sys, json\n'), ((24873, 24889), 'os.listdir', 'os.listdir', (['fold'], {}), '(fold)\n', (24883, 24889), False, 'import glob, os, shutil, sys, json\n'), ((24970, 24990), 'os.listdir', 'os.listdir', (['tst_fold'], {}), '(tst_fold)\n', (24980, 24990), False, 'import glob, os, shutil, sys, json\n'), ((25169, 25207), 'numpy.random.permutation', 'np.random.permutation', (['all_train_files'], {}), '(all_train_files)\n', (25190, 25207), True, 'import numpy as np\n'), ((25233, 25270), 'numpy.random.permutation', 'np.random.permutation', (['all_test_files'], {}), '(all_test_files)\n', (25254, 25270), True, 'import numpy as np\n'), ((26371, 26387), 'os.listdir', 'os.listdir', (['fold'], {}), '(fold)\n', (26381, 26387), False, 'import glob, os, shutil, sys, json\n'), ((26495, 26515), 'os.listdir', 'os.listdir', (['tst_fold'], {}), '(tst_fold)\n', (26505, 26515), False, 'import glob, os, shutil, sys, json\n'), ((26801, 26824), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (26819, 26824), False, 'import glob, os, shutil, sys, json\n'), ((29258, 29298), 'csv.DictReader', 'csv.DictReader', (['f'], {'skipinitialspace': '(True)'}), '(f, skipinitialspace=True)\n', (29272, 29298), False, 'import csv\n'), ((29485, 29508), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (29503, 29508), False, 'import glob, os, shutil, sys, json\n'), ((32886, 32903), 'pathlib.Path', 'Path', (['path_meshes'], {}), '(path_meshes)\n', (32890, 32903), False, 'from pathlib import Path\n'), ((36797, 36820), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (36815, 36820), False, 'import glob, os, shutil, sys, json\n'), ((39196, 39251), 'shutil.copy', 'shutil.copy', (["(path + '/' + fn)", "(train_path + '/' + out_fn)"], {}), "(path + '/' + fn, train_path + '/' + out_fn)\n", (39207, 39251), False, 'import glob, os, shutil, sys, json\n'), ((39272, 39326), 'shutil.copy', 'shutil.copy', (["(path + '/' + fn)", "(test_path + '/' + out_fn)"], {}), "(path + '/' + fn, test_path + '/' + out_fn)\n", (39283, 39326), False, 'import glob, os, shutil, sys, json\n'), ((6795, 6804), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (6801, 6804), True, 'import numpy as np\n'), ((10764, 10775), 'easydict.EasyDict', 'EasyDict', (['m'], {}), '(m)\n', (10772, 10775), False, 'from easydict import EasyDict\n'), ((14251, 14286), 'numpy.sum', 'np.sum', (['(v_labels_fuzzy != 0)'], {'axis': '(1)'}), '(v_labels_fuzzy != 0, axis=1)\n', (14257, 14286), True, 'import numpy as np\n'), ((14413, 14427), 'numpy.bincount', 'np.bincount', (['l'], {}), '(l)\n', (14424, 14427), True, 'import numpy as np\n'), ((18771, 18796), 'numpy.asarray', 'np.asarray', (['mesh.vertices'], {}), '(mesh.vertices)\n', (18781, 18796), True, 'import numpy as np\n'), ((21022, 21048), 'numpy.asarray', 'np.asarray', (['mesh.triangles'], {}), '(mesh.triangles)\n', (21032, 21048), True, 'import numpy as np\n'), ((24665, 24688), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (24683, 24688), False, 'import glob, os, shutil, sys, json\n'), ((24750, 24773), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (24768, 24773), False, 'import glob, os, shutil, sys, json\n'), ((25513, 25536), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (25531, 25536), False, 'import glob, os, shutil, sys, json\n'), ((26163, 26186), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (26181, 26186), False, 'import glob, os, shutil, sys, json\n'), ((26248, 26271), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (26266, 26271), False, 'import glob, os, shutil, sys, json\n'), ((27600, 27623), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (27618, 27623), False, 'import glob, os, shutil, sys, json\n'), ((33794, 33851), 'numpy.zeros', 'np.zeros', (["(mesh_data['vertices'].shape[0],)"], {'dtype': 'np.int'}), "((mesh_data['vertices'].shape[0],), dtype=np.int)\n", (33802, 33851), True, 'import numpy as np\n'), ((33902, 34017), 'utils.visualize_model', 'utils.visualize_model', (["mesh_data['vertices']", "mesh_data['faces']"], {'vertex_colors_idx': 'vertex_colors', 'point_size': '(2)'}), "(mesh_data['vertices'], mesh_data['faces'],\n vertex_colors_idx=vertex_colors, point_size=2)\n", (33923, 34017), False, 'import utils\n'), ((34057, 34189), 'utils.visualize_model', 'utils.visualize_model', (["mesh_data['vertices']", "mesh_data['faces']"], {'face_colors': 'face_labels', 'show_vertices': '(False)', 'show_edges': '(False)'}), "(mesh_data['vertices'], mesh_data['faces'],\n face_colors=face_labels, show_vertices=False, show_edges=False)\n", (34078, 34189), False, 'import utils\n'), ((34433, 34543), 'utils.visualize_model', 'utils.visualize_model', (["mesh_data['vertices']", "mesh_data['faces']"], {'vertex_colors_idx': 'labels_orig', 'cpos': 'cpos'}), "(mesh_data['vertices'], mesh_data['faces'],\n vertex_colors_idx=labels_orig, cpos=cpos)\n", (34454, 34543), False, 'import utils\n'), ((36067, 36103), 'os.path.isfile', 'os.path.isfile', (["(out_fc_full + '.npz')"], {}), "(out_fc_full + '.npz')\n", (36081, 36103), False, 'import glob, os, shutil, sys, json\n'), ((36892, 36915), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (36910, 36915), False, 'import glob, os, shutil, sys, json\n'), ((16545, 16576), 'numpy.asarray', 'np.asarray', (['mesh_orig.triangles'], {}), '(mesh_orig.triangles)\n', (16555, 16576), True, 'import numpy as np\n'), ((19387, 19406), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (19400, 19406), False, 'import glob, os, shutil, sys, json\n'), ((20486, 20512), 'numpy.asarray', 'np.asarray', (['mesh.triangles'], {}), '(mesh.triangles)\n', (20496, 20512), True, 'import numpy as np\n'), ((27716, 27739), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (27734, 27739), False, 'import glob, os, shutil, sys, json\n'), ((33413, 33438), 'numpy.asarray', 'np.asarray', (['mesh.vertices'], {}), '(mesh.vertices)\n', (33423, 33438), True, 'import numpy as np\n'), ((33449, 33475), 'numpy.asarray', 'np.asarray', (['mesh.triangles'], {}), '(mesh.triangles)\n', (33459, 33475), True, 'import numpy as np\n'), ((34253, 34290), 'numpy.min', 'np.min', (["mesh_data['vertices']"], {'axis': '(0)'}), "(mesh_data['vertices'], axis=0)\n", (34259, 34290), True, 'import numpy as np\n'), ((34317, 34354), 'numpy.max', 'np.max', (["mesh_data['vertices']"], {'axis': '(0)'}), "(mesh_data['vertices'], axis=0)\n", (34323, 34354), True, 'import numpy as np\n'), ((35591, 35648), 'numpy.zeros', 'np.zeros', (["(mesh_data['vertices'].shape[0],)"], {'dtype': 'np.int'}), "((mesh_data['vertices'].shape[0],), dtype=np.int)\n", (35599, 35648), True, 'import numpy as np\n'), ((35703, 35819), 'utils.visualize_model', 'utils.visualize_model', (["mesh_data['vertices']", "mesh_data['faces']"], {'vertex_colors_idx': 'vertex_colors', 'point_size': '(10)'}), "(mesh_data['vertices'], mesh_data['faces'],\n vertex_colors_idx=vertex_colors, point_size=10)\n", (35724, 35819), False, 'import utils\n'), ((34991, 35016), 'numpy.asarray', 'np.asarray', (['mesh.vertices'], {}), '(mesh.vertices)\n', (35001, 35016), True, 'import numpy as np\n'), ((35059, 35085), 'numpy.asarray', 'np.asarray', (['mesh.triangles'], {}), '(mesh.triangles)\n', (35069, 35085), True, 'import numpy as np\n'), ((35886, 35909), 'numpy.array', 'np.array', (['mesh.vertices'], {}), '(mesh.vertices)\n', (35894, 35909), True, 'import numpy as np\n'), ((35911, 35935), 'numpy.array', 'np.array', (['mesh.triangles'], {}), '(mesh.triangles)\n', (35919, 35935), True, 'import numpy as np\n'), ((41928, 41951), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (41946, 41951), False, 'import glob, os, shutil, sys, json\n'), ((42001, 42024), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (42019, 42024), False, 'import glob, os, shutil, sys, json\n'), ((23999, 24022), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (24017, 24022), False, 'import glob, os, shutil, sys, json\n'), ((26708, 26731), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (26726, 26731), False, 'import glob, os, shutil, sys, json\n'), ((33724, 33759), 'numpy.sum', 'np.sum', (['(v_labels_fuzzy != 0)'], {'axis': '(1)'}), '(v_labels_fuzzy != 0, axis=1)\n', (33730, 33759), True, 'import numpy as np\n'), ((25420, 25443), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (25438, 25443), False, 'import glob, os, shutil, sys, json\n'), ((35519, 35554), 'numpy.sum', 'np.sum', (['(v_labels_fuzzy != 0)'], {'axis': '(1)'}), '(v_labels_fuzzy != 0, axis=1)\n', (35525, 35554), True, 'import numpy as np\n'), ((42196, 42219), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (42214, 42219), False, 'import glob, os, shutil, sys, json\n'), ((42409, 42432), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (42427, 42432), False, 'import glob, os, shutil, sys, json\n')] |
from ..tasks import video, task_base
import numpy as np
def get_videos(subject, session):
video_idx = np.loadtxt(
"data/liris/order_fmri_neuromod.csv", delimiter=",", skiprows=1, dtype=np.int
)
selected_idx = video_idx[video_idx[:, 0] == session, subject + 1]
return selected_idx
def get_tasks(parsed):
tasks = []
video_indices = get_videos(int(parsed.subject), int(parsed.session))
for idx in video_indices:
tasks.append(
video.SingleVideo(
f"data/liris/videos/{idx:03d}.mp4", name=f"task-liris{idx:03d}"
)
)
continue
tasks.append(
task_base.Pause(
"""The video is finished.
The scanner might run for a few seconds to acquire more images.
Please remain still."""
)
)
return tasks
| [
"numpy.loadtxt"
] | [((108, 201), 'numpy.loadtxt', 'np.loadtxt', (['"""data/liris/order_fmri_neuromod.csv"""'], {'delimiter': '""","""', 'skiprows': '(1)', 'dtype': 'np.int'}), "('data/liris/order_fmri_neuromod.csv', delimiter=',', skiprows=1,\n dtype=np.int)\n", (118, 201), True, 'import numpy as np\n')] |
"""Module providing adapter class making node-label prediction possible in sklearn models."""
from sklearn.base import ClassifierMixin
from typing import Type, List, Dict, Optional, Any
import numpy as np
import copy
from ensmallen import Graph
from embiggen.embedding_transformers import NodeLabelPredictionTransformer, NodeTransformer
from embiggen.utils.sklearn_utils import must_be_an_sklearn_classifier_model
from embiggen.node_label_prediction.node_label_prediction_model import AbstractNodeLabelPredictionModel
from embiggen.utils.abstract_models import abstract_class
@abstract_class
class SklearnNodeLabelPredictionAdapter(AbstractNodeLabelPredictionModel):
"""Class wrapping Sklearn models for running node-label predictions."""
def __init__(
self,
model_instance: Type[ClassifierMixin],
random_state: int = 42
):
"""Create the adapter for Sklearn object.
Parameters
----------------
model_instance: Type[ClassifierMixin]
The class instance to be adapted into node-label prediction.
random_state: int = 42
The random state to use to reproduce the training.
Raises
----------------
ValueError
If the provided model_instance is not a subclass of `ClassifierMixin`.
"""
super().__init__(random_state=random_state)
must_be_an_sklearn_classifier_model(model_instance)
self._model_instance = model_instance
# We want to mask the decorator class name
self.__class__.__name__ = model_instance.__class__.__name__
self.__class__.__doc__ = model_instance.__class__.__doc__
def clone(self) -> Type["SklearnNodeLabelPredictionAdapter"]:
"""Return copy of self."""
return copy.deepcopy(self)
def _trasform_graph_into_node_embedding(
self,
graph: Graph,
node_features: List[np.ndarray],
) -> np.ndarray:
"""Transforms the provided data into an Sklearn-compatible numpy array.
Parameters
------------------
graph: Graph,
The graph whose edges are to be embedded and predicted.
It can either be an Graph or a list of lists of edges.
node_features: List[np.ndarray]
The node features to be used in the training of the model.
Raises
------------------
ValueError
If the two graphs do not share the same node vocabulary.
"""
gt = NodeTransformer(aligned_node_mapping=True)
gt.fit(node_features)
return gt.transform(graph,)
def _fit(
self,
graph: Graph,
support: Optional[Graph] = None,
node_features: Optional[List[np.ndarray]] = None,
node_type_features: Optional[List[np.ndarray]] = None,
edge_features: Optional[List[np.ndarray]] = None,
):
"""Execute fitting of the model.
Parameters
------------------
graph: Graph,
The graph whose edges are to be embedded and edge types extracted.
It can either be an Graph or a list of lists of edges.
support: Optional[Graph] = None
The graph describiding the topological structure that
includes also the above graph. This parameter
is mostly useful for topological classifiers
such as Graph Convolutional Networks.
node_features: Optional[List[np.ndarray]] = None
The node features to be used in the training of the model.
node_type_features: Optional[List[np.ndarray]] = None
The node type features to be used in the training of the model.
edge_features: Optional[List[np.ndarray]] = None
Optional edge features to be used as input concatenated
to the obtained edge embedding. The shape must be equal
to the number of directed edges in the graph.
Raises
------------------
ValueError
If the two graphs do not share the same node vocabulary.
"""
nlpt = NodeLabelPredictionTransformer(
aligned_node_mapping=True
)
nlpt.fit(node_features)
self._model_instance.fit(*nlpt.transform(
graph=graph,
behaviour_for_unknown_node_labels="drop",
shuffle=True,
random_state=self._random_state
))
def _predict_proba(
self,
graph: Graph,
support: Optional[Graph] = None,
node_features: Optional[List[np.ndarray]] = None,
node_type_features: Optional[List[np.ndarray]] = None,
edge_features: Optional[List[np.ndarray]] = None,
) -> Dict[str, float]:
"""Return evaluations of the model on the edge-label prediction task on the provided data.
Parameters
------------------
graph: Graph,
The graph whose edges are to be embedded and predicted.
It can either be an Graph or a list of lists of edges.
support: Optional[Graph] = None
The graph describiding the topological structure that
includes also the above graph. This parameter
is mostly useful for topological classifiers
such as Graph Convolutional Networks.
node_features: Optional[List[np.ndarray]] = None
The node features to be used in the evaluation of the model.
node_type_features: Optional[List[np.ndarray]] = None
The node features to be used in prediction.
edge_features: Optional[List[np.ndarray]] = None
Optional edge features to be used as input concatenated
to the obtained edge embedding. The shape must be equal
to the number of directed edges in the provided graph.
Raises
------------------
ValueError
If the two graphs do not share the same node vocabulary.
"""
predictions_probabilities = self._model_instance.predict_proba(self._trasform_graph_into_node_embedding(
graph=graph,
node_features=node_features,
))
if self.is_multilabel_prediction_task():
return np.array([
class_predictions[:, 1]
for class_predictions in predictions_probabilities
]).T
return predictions_probabilities
def _predict(
self,
graph: Graph,
support: Optional[Graph] = None,
node_features: Optional[List[np.ndarray]] = None,
node_type_features: Optional[List[np.ndarray]] = None,
edge_features: Optional[List[np.ndarray]] = None,
) -> Dict[str, float]:
"""Return evaluations of the model on the edge-label prediction task on the provided data.
Parameters
------------------
graph: Graph,
The graph whose edges are to be embedded and predicted.
It can either be an Graph or a list of lists of edges.
support: Optional[Graph] = None
The graph describiding the topological structure that
includes also the above graph. This parameter
is mostly useful for topological classifiers
such as Graph Convolutional Networks.
node_features: List[np.ndarray]
The node features to be used in prediction.
node_type_features: List[np.ndarray]
The node features to be used in prediction.
edge_features: Optional[List[np.ndarray]] = None
Optional edge features to be used as input concatenated
to the obtained edge embedding. The shape must be equal
to the number of directed edges in the provided graph.
Raises
------------------
ValueError
If the two graphs do not share the same node vocabulary.
"""
return self._model_instance.predict(self._trasform_graph_into_node_embedding(
graph=graph,
node_features=node_features,
))
@staticmethod
def library_name() -> str:
"""Return name of the model."""
return "scikit-learn"
@staticmethod
def requires_edge_weights() -> bool:
return False
@staticmethod
def requires_positive_edge_weights() -> bool:
return False
@staticmethod
def requires_edge_types() -> bool:
return False
@staticmethod
def can_use_edge_weights() -> bool:
"""Returns whether the model can optionally use edge weights."""
return False
def is_using_edge_weights(self) -> bool:
"""Returns whether the model is parametrized to use edge weights."""
return False
@staticmethod
def can_use_edge_types() -> bool:
"""Returns whether the model can optionally use edge types."""
return False
def is_using_edge_types(self) -> bool:
"""Returns whether the model is parametrized to use edge types."""
return False
| [
"embiggen.utils.sklearn_utils.must_be_an_sklearn_classifier_model",
"embiggen.embedding_transformers.NodeLabelPredictionTransformer",
"numpy.array",
"copy.deepcopy",
"embiggen.embedding_transformers.NodeTransformer"
] | [((1386, 1437), 'embiggen.utils.sklearn_utils.must_be_an_sklearn_classifier_model', 'must_be_an_sklearn_classifier_model', (['model_instance'], {}), '(model_instance)\n', (1421, 1437), False, 'from embiggen.utils.sklearn_utils import must_be_an_sklearn_classifier_model\n'), ((1786, 1805), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (1799, 1805), False, 'import copy\n'), ((2501, 2543), 'embiggen.embedding_transformers.NodeTransformer', 'NodeTransformer', ([], {'aligned_node_mapping': '(True)'}), '(aligned_node_mapping=True)\n', (2516, 2543), False, 'from embiggen.embedding_transformers import NodeLabelPredictionTransformer, NodeTransformer\n'), ((4090, 4147), 'embiggen.embedding_transformers.NodeLabelPredictionTransformer', 'NodeLabelPredictionTransformer', ([], {'aligned_node_mapping': '(True)'}), '(aligned_node_mapping=True)\n', (4120, 4147), False, 'from embiggen.embedding_transformers import NodeLabelPredictionTransformer, NodeTransformer\n'), ((6206, 6296), 'numpy.array', 'np.array', (['[class_predictions[:, 1] for class_predictions in predictions_probabilities]'], {}), '([class_predictions[:, 1] for class_predictions in\n predictions_probabilities])\n', (6214, 6296), True, 'import numpy as np\n')] |
#!/usr/bin/python
#
# Copyright 2018, <NAME>
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
from time import time
import os,sys,re,subprocess
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import random
import logging
import argparse
import pickle
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import Draw
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist,squareform
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import StratifiedKFold,GridSearchCV
from sklearn.metrics import classification_report, accuracy_score, f1_score, confusion_matrix
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError:
print("INFO: Please install seaborn package for plotting.")
__author__ = 'chris'
def extract_features(mol, sourcename, pos, printHeader=True, fillNa=np.nan, xyz_file=None, plot=False, useSelectionRules=True, OrderAtoms=True, bondAngles=True, skipH=False, addBonds=False, verbose=False):
"""
Create feature matrix from RDKit mol object or xyz file
:param mol: RDKit molecule
:param sourcename: name of sd file
:param pos: position in sdf
:param xyz_file: name of xyz
:param plot: plotting
:param useSelectionRules: use rules to remove strange bonds
:param OrderAtoms: larger atomic number first
:param bondAngles: add bond angles, i.e. distance to third atom
:param skipH: remove H
:param addBonds: add neighbor bonds as features
:param printHeader: prints column headers
:param fillNa: how to fill NA values
:param verbose: verbosity on/off
:return: pandas dataframe with feature matrix
"""
pt = Chem.GetPeriodicTable()
if xyz_file is not None:
if xyz_file.lower().endswith(".xyz"):
atomtypes, coords, q, title = read_xyz(xyz_file, skipH=skipH)
elif xyz_file.lower().endswith(".pdb"):
atomtypes, coords, q, title = read_pdbfile(xyz_file, skipH=skipH)
if q!=0:
logging.info("Found charge: %.2f"%(q))
dm = squareform(pdist(np.asarray(coords)))
else:
if skipH:
try:
mol = Chem.RemoveHs(mol)
except ValueError as e:
logging.info("Skipping H deletion for molecule at pos:" + str(pos))
return(None)
#check if bonds are available
try:
if not addBonds and mol.GetNumBonds(onlyHeavy=False)==0:
logging.info("No bonds found: skipping molecule %s " %Chem.MolToSmiles(mol))
return (None)
except RuntimeError as e:
logging.info("RuntimeError: skipping molecule")
return(None)
dm = Chem.Get3DDistanceMatrix(mol) # both should be the same!!!
q = Chem.GetFormalCharge(mol)
n,m = dm.shape
assert(n == m)
if plot:
plt.pcolormesh(dm)
plt.colorbar()
plt.xlim([0, n])
plt.ylim([0, n])
plt.show()
dist_cut = 3.0 # distance cutoff
n_cut = 3 # neighbour cutoff
if printHeader and verbose:
print('{:<4s}{:<4s}{:>4s}{:>3s}{:>3s}{:>8s}'.format('ID1','ID2','Q', '#1', '#2', 'DIST'),end='')
for i in range(2*n_cut):
if addBonds:
print('{:>4s}{:>3s}{:>8s}{:>8s}{:>4s}'.format('POS', '#', 'DIST', 'DISTB','BNB'),end='')
elif bondAngles:
print('{:>4s}{:>3s}{:>8s}{:>8s}'.format('POS', '#', 'DIST','DISTB'),end='')
else:
print('{:4s}{:3s}{:8s}'.format('POS', '#', 'DIST'),end='')
print("{:4s}".format('TYPE'))
df = []
index = []
for i in range(0,n):
if xyz_file is not None:
bnd_at1 = atomtypes[i]
bond_num1 = pt.GetAtomicNumber(bnd_at1)
else:
bnd_at1 = mol.GetAtomWithIdx(i)
bond_num1 = bnd_at1.GetAtomicNum()
bnd_at1 = bnd_at1.GetSymbol()
for j in range(0,m):
row = []
if i >= j: continue
bnd_dist = dm[i,j]
if bnd_dist>dist_cut: continue
bnd_type = 0
if xyz_file is None:
bnd_at2 = mol.GetAtomWithIdx(j)
bond_num2 = bnd_at2.GetAtomicNum()
bnd = mol.GetBondBetweenAtoms(i, j)
if bnd is not None:
bnd_type = int(bnd.GetBondTypeAsDouble())
if bnd.GetIsAromatic():
bnd_type = 4
else:
bnd_type = 0
bnd_at2=bnd_at2.GetSymbol()
else:
bnd_at2 = atomtypes[j]
bond_num2 = pt.GetAtomicNumber(bnd_at2)
#sanity checks
if xyz_file is None:
# we accept very short bonds but give warning
selstr = "Skipping"
if not useSelectionRules:
selstr = "Keeping"
if bnd_dist<0.75 and bnd_type>0:
logging.warn("Unreasonable short X-X bond (r<0.75): %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d"%(bnd_at1,i+1,bnd_at2,j+1,bnd_dist,bnd_type,sourcename,pos))
elif bnd_dist<1.1 and bond_num1>=6 and bond_num2>=6 and bnd_type>0:
logging.warn("Unreasonable short X-X bond (r<1.1): %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d"%(bnd_at1,i+1,bnd_at2,j+1,bnd_dist,bnd_type,sourcename,pos))
# in case of problems we discard whole molecule
elif bnd_dist < 0.75 and (bond_num1 == 1 or bond_num2 == 1) and bnd_type == 0:
logging.warn("%s unreasonable short X-H distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d" % (selstr,
bnd_at1,i+1, bnd_at2,j+1, bnd_dist, bnd_type,sourcename,pos))
if useSelectionRules: return (None)
elif bnd_dist < 1.5 and bond_num1==6 and bond_num2==6 and bnd_type==0:
logging.warn("%s unreasonable short C-C distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d" % (selstr,
bnd_at1,i+1, bnd_at2,j+1, bnd_dist, bnd_type,sourcename,pos))
if useSelectionRules: return(None)
elif bnd_dist < 1.0 and bond_num1>=6 and bond_num2>=6 and bnd_type==0:
logging.warn("%s unreasonable short distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d" % (selstr,
bnd_at1,i+1, bnd_at2,j+1, bnd_dist, bnd_type,sourcename,pos))
if useSelectionRules: return(None)
# rather generous cutoff
elif bnd_dist>1.8 and bond_num1==6 and bond_num2==6 and bnd_type>0:
logging.warn("%s unreasonable long C-C bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d"%(selstr,bnd_at1,i+1,bnd_at2,j+1,bnd_dist,bnd_type,sourcename,pos))
if useSelectionRules: return(None)
#unique order
if OrderAtoms and bond_num1<bond_num2:
row.extend([j + 1, i + 1, q,bond_num2, bond_num1, bnd_dist])
i_tmp,j_tmp = j,i
else:
row.extend([i + 1, j + 1, q,bond_num1, bond_num2, bnd_dist])
i_tmp, j_tmp = i, j
if verbose: print('{:<4d}{:<4d}{:4.1f}{:3d}{:3d}{:8.3f}'.format(i_tmp+1,j_tmp+1,q,bond_num1,bond_num2,bnd_dist),end='')
# now iterate over neighbors of a and b and i.e. sort row a and b and concat, then skip i and j
for a in [i_tmp,j_tmp]:
row_sorted_a = np.argsort(dm[a,:])
count = 0
k = 0
if len(row_sorted_a) > 2:
for nextn in row_sorted_a:
nextn = int(nextn)
if nextn == j_tmp or nextn == i_tmp:
continue
if k==n_cut:break
dist = dm[a,nextn]
if xyz_file is None:
at = mol.GetAtomWithIdx(nextn)
num = at.GetAtomicNum()
at = at.GetSymbol()
else:
at = atomtypes[nextn]
num = pt.GetAtomicNumber(at)
if bondAngles:
other = i_tmp if a==j_tmp else j_tmp
distb = dm[other,nextn]
if addBonds:
bndb = mol.GetBondBetweenAtoms(a, nextn)
if bndb is not None:
bnd_typeb = int(bndb.GetBondTypeAsDouble())
if bndb.GetIsAromatic():
#bnd_type=randint(1,2)
bnd_typeb = 4
else:
bnd_typeb = 0
row.extend([num, dist, distb,bnd_typeb])
if verbose:
print('{:4d}{:>3d}{:8.3f}{:8.3f}{:4d}'.format(nextn+1,num,dist,distb,bnd_typeb),end='')
else:
row.extend([num, dist,distb])
if verbose:
print('{:4d}{:>3s}{:3d}{:8.3f}{:8.3f}'.format(nextn+1,at,num,dist,distb),end='')
else:
row.extend([num, dist])
if verbose:
print('{:4d}{:>3s}{:3d}{:8.3f}'.format(nextn+1,at,num,dist),end='')
k += 1
count += 1
# padding
while count<n_cut:
count += 1
if verbose:
print('{:>4d}{:>3s}{:3d}{:8.3f}'.format(0,"NA", 0, fillNa),end='')
row.extend([0, fillNa])
if bondAngles:
row.extend([fillNa])
if verbose: print('{:4d}'.format( bnd_type),end='')
row.append(bnd_type)
df.append(row)
index.append(sourcename + '_pos' + str(pos+1) + '_' + str(i_tmp + 1) + 'x' + str(j_tmp + 1))
try:
df = pd.DataFrame(df)
colnames = ['id1','id2','q','ata','atb','distab','ata1','dista1','ata2','dista2','ata3','dista3','atb1','distb1','atb2','distb2','atb3','distb3','bond']
if addBonds:
colnames = ['id1', 'id2', 'q', 'ata', 'atb', 'distab', 'ata1', 'dista1', 'dista1b','bonda1', 'ata2', 'dista2',
'dista2b','bonda2', 'ata3', 'dista3', 'dista3b','bonda3',
'atb1', 'distb1', 'distb1a','bondb1', 'atb2', 'distb2', 'distb2a','bondb2', 'atb3', 'distb3', 'distb3a','bondb3', 'bond']
elif bondAngles:
colnames = ['id1', 'id2', 'q', 'ata', 'atb', 'distab', 'ata1', 'dista1','dista1b', 'ata2', 'dista2','dista2b', 'ata3', 'dista3','dista3b',
'atb1', 'distb1','distb1a', 'atb2', 'distb2','distb2a', 'atb3', 'distb3','distb3a','bond']
if len(colnames)!=len(df.columns):
logging.error("Mismatch in dataframe colums for %s - SMILES: %s"%(sourcename+'_pos'+str(pos+1), Chem.MolToSmiles(mol)))
df.columns = colnames
df.index = index
except ValueError:
#i.e. for empty dataframes
df = None
return df
def convert_sdf2dataframe(infile, outfile="moldat.csv", fillNa=np.nan, sanitize=True, tempsave=False, useSelectionRules=True, skipH=False, addBonds=True, sample=None, debug=False, verbose=False):
"""
Generate training dataset from list of sd files
sd file -> Pandas DataFrame
:param infile: sd file used for training
:param outfile: feature matrix as .csv file
:param fillNa: fill value for NA positions
:param sanitize: switch this off for special molecules RDKit cannot digest, should be True in order to have aromatic bonds
:param tempsave: save temporary data
:param useSelectionRules: apply rules to filter nonsense structures
:param skipH: remove hydrogens
:param addBonds: inject neighbor bonds to feature matrix
:param sample: subsample dataset fraction [0-1]
:param verbose: verbosity on/off
:return: feature matrix as pandas dataframe
"""
logging.info("Generating feature using RDKit matrix from: %s -- with options skipH (%r) iterative(%r) filterRubbish(%r) "%(infile,skipH,addBonds,useSelectionRules))
if sample is not None:
logging.info("Subsampling fraction %4.2f of dataset"%(sample))
np.random.seed(42)
df_new = None
suppl = Chem.SDMolSupplier(infile,removeHs=skipH,sanitize=False)
count=0
for i,mol in enumerate(suppl):
if sanitize:
try:
Chem.SanitizeMol(mol) #adding aromatic bonds...we may have a problem here
except ValueError as e:
logging.info("Skipping sanitization for molecule at pos:" + str(i+1))
if debug:
w = Chem.SDWriter('tmp_pos'+str(i+1)+'.sdf')
w.write(mol)
w.close()
# we cannot use it then...
if mol is not None:
if sample is not None and np.random.random_sample()>sample:
continue
if i>0:
df_new = pd.concat([df_new, extract_features(mol, infile, i, verbose=verbose, printHeader=True, fillNa=fillNa, useSelectionRules=useSelectionRules, skipH=skipH, addBonds=addBonds)], axis=0)
else:
df_new = extract_features(mol, infile, i, verbose=verbose, printHeader=True, fillNa=fillNa, useSelectionRules=useSelectionRules, skipH=skipH, addBonds=addBonds)
count += 1
else:
logging.info("SKIPPING molecule at pos:"+str(i+1))
logging.error("SKIPPING molecule at pos:" + str(i+1))
logging.info("Processed total of >%d< molecules" % (count))
if df_new is not None and tempsave:
logging.info("%3d Generated temp file: %s" % (i + 1, outfile))
df_new.to_csv(outfile,index=True)
if df_new is None:
logging.info("ERROR: There was a problem generating the data!")
logging.info("Bond types: \n%r"%(df_new['bond'].value_counts()))
logging.info("Total bonds: %r\n" % (df_new['bond'].value_counts().sum()))
return(df_new)
def convert_sdfiles2csv(file_list = [], base_dir='', outdat='train_dat.csv', method='UFF', skipH=False, addBonds=False, sample=0.25, verbose=False):
"""
Allows for training use a list of filenames, for internal testing
:param file_list: list of .sd files
:param base_dir: location of those files
:param outdat: .csv file with feature matrix and target vectors
"""
finalf = outdat
for i,f in enumerate(file_list):
infile = base_dir+f
if not os.path.isfile(infile):
logging.critical("File not found:"+infile)
logging.critical("CWD:"+os.getcwd())
sys.exit(1)
outfile = 'moldat_tmp.csv'
if infile.endswith('.smi'):
infile = convert_smiles2sdfile(smifile=infile, outdat=outfile, method=method, verbose=verbose)
infile = infile.replace(".smi",".sdf")
print(infile)
df = convert_sdf2dataframe(infile=infile, outfile=outfile, fillNa=9999.0, skipH=skipH, addBonds=addBonds, sample=sample, verbose=verbose)
if df is None: continue
outstr = 'writing'
mode = 'w'
header = True
if os.path.isfile(finalf):
mode = 'a'
header = False
outstr = 'appending'
with open(finalf, mode) as f:
df.to_csv(f, header=header, index=True)
print(df.head())
logging.info("File: %3d - %s .csv file to: %s" % (i + 1, outstr, finalf))
def train_from_csv(filename, grid_search=False, useRF=False, plotClassifier=False, save_clf='clf.p',verbose=False):
"""
Train bond data with sklearn classifier, final model gets pickled.
:param filename: .csv file with feature matrix
:param grid_search: Do a parameter search on grid
:return: trained scikit-learn model
"""
logging.info("Training data on dataset:")
df = pd.read_csv(filename,index_col=0)
if 'id1' in df.columns and 'id2' in df.columns:
df.drop(['id1', 'id2'], axis=1,inplace=True)
logging.info("Shape : %d X %d"%(df.shape[0],df.shape[1]))
logging.info("Features: %s" % (df.columns))
# remove similar data
logging.info("Droping duplicates...")
df.drop_duplicates(inplace=True)
logging.info("Shape : %d X %d" % (df.shape[0], df.shape[1]))
y = df['bond']
X = df.drop(['bond'],axis=1,inplace=False)
if plotClassifier:
tree = DecisionTreeClassifier( max_depth=5)
tree.fit(X,y)
dot_data = tree.export_graphviz(tree, out_file='tree')
import graphviz
graph = graphviz.Source(dot_data)
graph.render("decisiontree")
n_jobs = 1
n_splits = 4
if useRF:
model = RandomForestClassifier(n_estimators=250, max_depth=None, min_samples_leaf=5, n_jobs=n_jobs,
max_features=11, oob_score=False)
else:
#model = xgb.XGBClassifier(n_estimators=2000, learning_rate=0.01, max_depth=5, NA=0, subsample=.5,colsample_bytree=1.0, min_child_weight=5, n_jobs=4, objective='multi:softprob',num_class=5, booster='gbtree', silent=1, eval_size=0.0)
#parameters = {'n_estimators': [2000], 'learning_rate': [0.01, 0.1, 0.001], 'max_depth': [5, 7],'subsample': [0.5]}
model = GradientBoostingClassifier(n_estimators=1000,learning_rate=0.1,max_depth=5,verbose=1)
parameters = {}
if grid_search:
#model.set_params(n_jobs=1)
n_jobs = 4
cv = StratifiedKFold(n_splits=n_splits)
model = GridSearchCV(model, parameters, n_jobs=n_jobs, verbose=2, scoring='f1_micro', cv=cv,refit=True)
model.fit(X,y)
means = model.cv_results_['mean_test_score']
stds = model.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, model.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
print(model)
else:
logging.info("Fitting classifier: %s"%(model))
model.fit(X, y)
pickle.dump(model,open( save_clf, "wb" ))
logging.info("Saving classifier as: %s"%(save_clf))
return(model)
def train_job(filename, reset=True, eval=False, fmethod='UFF', skipH=False, iterative=False, sample=False, useRF=False,verbose=False):
"""
Use either .sdf or .smi file to
train from a new dataset or append data
:param filename: name of .smi of .sd file
:param reset: removes old training data
"""
if eval:
train_file = 'eval_dat.csv'
reset=True
else:
train_file = 'train_dat.csv'
iter_file = ""
if iterative and not eval:
logging.info("Iterative mode switched ON!")
iter_file = train_file.replace("_dat","_iter")
if useRF and not eval:
logging.info("INFO: Using Random Forest for training!")
if reset:
if os.path.isfile(train_file):
os.remove(train_file)
if os.path.isfile(iter_file):
os.remove(iter_file)
if filename.endswith('.sdf') or filename.endswith('.sd'):
convert_sdfiles2csv(file_list=[filename], outdat=train_file, skipH=skipH, addBonds=False, sample=sample, verbose=verbose)
if iterative and not eval:
convert_sdfiles2csv(file_list=[filename], outdat=iter_file, skipH=skipH, addBonds=True, sample=sample, verbose=verbose)
elif filename.endswith('.smi'):
logging.info("Using forcefield for optimization: %s" % (fmethod))
convert_sdfiles2csv(file_list=[filename], outdat=train_file, method=fmethod, skipH=skipH, addBonds=False)
if iterative and not eval:
convert_sdfiles2csv(file_list=[filename], outdat=iter_file, method=fmethod, skipH=skipH, addBonds=True, verbose=verbose)
if not os.path.isfile(train_file):
sys.stderr.write("ERROR: Missing training data file: %s!\n"%(train_file))
sys.exit(1)
if eval:
evaluate(train_file,iterative=iterative, verbose=verbose)
else:
train_from_csv(train_file, useRF=useRF, verbose=verbose)
if iterative:
train_from_csv(iter_file,useRF=useRF, save_clf="clf_iter.p", verbose=verbose)
def eval_job(filename, skipH=False, iterative=False,verbose=False):
"""
Evaluation per! molecule
:param filename: filename for evaluation
:param skipH: omit hydrogen
:param iterative: use 2nd classifier
:param verbose: verbose mode
:return: -
"""
# iterate over mols of SDF
# mol -> df -> bonds_predicted / bonds_true
# make SDF -> extract features -> df -> bonds_predicted2
# compare bonds_true & bonds_predicted2
# generatePredictions with mol
print("Evaluation run with option: noH(%r)" % (skipH))
print("Loading classifier...")
clf = pickle.load(open('clf.p', "rb"))
if iterative:
clf_iter = pickle.load(open('clf_iter.p', "rb"))
else:
clf_iter = None
suppl = Chem.SDMolSupplier(filename, removeHs=skipH, sanitize=iterative)
nok = 0
nfalse = 0
for i, mol in enumerate(suppl):
if mol is None: continue
res = generate_predictions(mol, skipH=skipH, iterative=True, forceAromatics=False, maxiter=1, verbose=verbose,
clf=clf, clf_iter=clf_iter, isEval=True)
if res is None: continue
if i % 50 == 0:
logging.info("%d %r\n" % (i, res))
if res:
nok += 1
else:
nfalse += 1
nall = len(suppl)
acc = nok / float(nall)
logging.info("\nTOTAL: %5d OK: %5d WRONG: %5d Accuray: %6.3f" % (nall, nok, nfalse, acc))
def evaluate(filename_test,filename_train='train_dat.csv',plotting=True,iterative=False,verbose=False):
"""
Evaluate on dataset with known bond info, molecule accuracy is computed afterwards
:param filename_test: name of .csv file with feature matrix and targets
"""
df = pd.read_csv(filename_test,index_col=0)
filename_train=None
# shown train_data
if filename_train is not None:
logging.info("Analyze train data...")
df_train = pd.read_csv(filename_train,index_col=0)
print(df_train.shape)
df_train['bondtype']=df_train['bond'].astype('category')
df_train = df_train[df_train.ata==6]
df_train = df_train[df_train.atb==6]
if plotting:
ax = sns.boxplot(x="bond", y="distab", data=df_train[['distab','bond']])
ax.set(ylabel='C-C distance', xlabel='bond type')
#ax.set(xticklabels=[])
plt.show()
logging.info("Evaluate data set: " + filename_test)
logging.info("Loading classifier...")
clf = pickle.load(open("clf.p", "rb"))
logging.info("Loading test set with %d rows from file %s\n"%(df.shape[0],filename_test))
y = df['bond']
X = df.drop(['bond','id1','id2'],axis=1,inplace=False)
yprob = clf.predict_proba(X)
ypred = clf.predict(X)
score = accuracy_score(y,ypred)
score2 = f1_score(y,ypred,average='weighted')
logging.info("ACCURACY:%0.3f - F1-score: %0.3f\n" % (score,score2))
X['bond_pred'] = ypred
X['p(-)'] = yprob[:, 1]
X['p(=)'] = yprob[:, 2]
X['p(#)'] = yprob[:, 3]
X['p(a)'] = yprob[:, 4]
X['bond'] = y
if plotting:
print("Misclassification stats:")
idx = (ypred != y)
df_tmp = X[idx.values]
print(df_tmp[['ata','atb','distab','bond','bond_pred']].head(200).sort_values(['ata']))
plot_classification_results(y,ypred)
mol_df_list = mol_dataframe_generator(X)
all=0
ok=0
not_ok=0
false_indices=[]
for name, df_sub in mol_df_list:
all += 1
if iterative:
print("ERROR: Iterative - does not work in fast evaluation mode..")
sys.exit(1)
# ok no coordinates/no dm how to get feature matrix...????
if np.array_equal(df_sub['bond_pred'].values, df_sub['bond'].values):
ok += 1
else:
# print("FALSE: %s"%(name))
not_ok += 1
mask = df_sub['bond_pred'] != df_sub['bond']
idx = np.argmax(mask)
false_indices.append(idx)
acc = ok/float(all)
print(false_indices)
print("\nTOTAL: %5d OK: %5d WRONG: %5d Accuray: %6.3f"%(all,ok,not_ok,acc))
return(X)
def evaluate_OB(filename='fullerene_ml.sdf', verbose=False):
"""
Evaluation via Open Babel
:param filename: sd file
:param removeHs: use H or not (obabel reorders X-H bonds...)
:param verbose: True for verbose
:return: -
"""
logging.info("Evaluating %s via OBabel"%(filename))
#if sanitize:
# print("WARNING: Switched ON sanitization!")
#else:
# print("WARNING: Switched OFF sanitization!")
suppl = Chem.SDMolSupplier(filename, removeHs=False, sanitize=True)
nok = 0
nfalse = 0
nall = len(suppl)
for i, mol in enumerate(suppl):
if mol is None: continue
xyz_str = mol2xyz(mol)
#remove H for comparison with OB
mol = Chem.RemoveHs(mol)
df_orig = extract_features(mol, "babel_orig", (i+1), skipH=True)
if df_orig is None: continue
bond_orig = df_orig['bond']
#generate xyz for OB prediction without H
myfile = StringIO.StringIO(xyz_str)
#if removeHs:
#cmd_call = ["obabel", "-d","-ixyz", "-osdf"]
#else:
cmd_call = ["obabel", "-ixyz", "-osdf"]
p = subprocess.Popen(cmd_call, stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
molblock, err = p.communicate(myfile.read())
#switch off sanitization
#mol_pred_H = Chem.MolFromMolBlock(molblock,removeHs=False,sanitize=False)
#always switch off H for comparison of main element bonds only
mol_pred = Chem.MolFromMolBlock(molblock,removeHs=True,sanitize=False)
if mol_pred is None:
nfalse += 1
continue
df = extract_features(mol_pred, "obabel", 0, skipH=True)
if df is None:
nfalse += 1
continue
if len(bond_orig)!=len(df['bond'].values):
logging.error("Original (%d) and predicted bond vector (%d) have different length!"%(len(bond_orig),len(df['bond'].values)))
if verbose:
mol_pred_noH = Chem.RemoveHs(mol_pred)
Chem.Compute2DCoords(mol_pred_noH)
Chem.Compute2DCoords(mol)
img = Draw.MolsToGridImage([mol_pred_noH, mol], molsPerRow=2, subImgSize=(400, 400),
legends=['ob' + str(i + 1), 'orig' + str(i + 1)])
img.show()
if np.array_equal(bond_orig.values, df['bond'].values):
nok+=1
else:
if verbose:
mol_pred_noH = Chem.RemoveHs(mol_pred)
Chem.Compute2DCoords(mol_pred_noH)
Chem.Compute2DCoords(mol)
img = Draw.MolsToGridImage([mol_pred_noH, mol], molsPerRow=2, subImgSize=(400, 400),
legends=['ob' + str(i + 1), 'orig' + str(i + 1)])
img.show()
res = raw_input()
if 'n' in res.lower() or "f" in res.lower():
nfalse += 1
print("FALSE: %d/%d" % (nfalse,len(suppl)))
# img.save('images/' + cname + '_' + str(i) + '.png')
else:
nok += 1
print("OK: %d/%d" % (nok,len(suppl)))
if verbose:
with open('ob_failure'+str(i+1)+'.sdf', 'w') as f:
f.write(molblock)
with open('ob_reference'+str(i+1)+'.sdf', 'w') as f:
f.write(Chem.MolToMolBlock(mol))
else:
#check for ambigious double bonds of C-O,N-O and P-O
idx = np.where(bond_orig.values != df['bond'].values)
for k in idx[0]:
try:
ata = df_orig.iloc[k][['ata']].values[0]
atb = df_orig.iloc[k][['atb']].values[0]
except IndexError as e:
print(Chem.MolToSmiles(mol))
continue
if (int(ata)==8 and int(atb)== 7) or ( int(ata)==8 and int(atb)== 6) or ( int(ata)==15 and int(atb)== 8):
bond_orig.values[k]=1
df['bond'].values[k]=1
if np.array_equal(bond_orig.values, df['bond'].values):
nok += 1
print("OK: %d/%d" % (nok, len(suppl)))
else:
nfalse += 1
with open('ob_failure'+str(i+1)+'.sdf', 'w') as f:
f.write(molblock)
with open('ob_reference'+str(i+1)+'.sdf', 'w') as f:
f.write(Chem.MolToMolBlock(mol))
print("FALSE: %d/%d (%6.3f%%)" % (nfalse, len(suppl), 1.0-nfalse/(float) (i+1)))
acc = nok / float(nall)
logging.info("\nTOTAL: %5d OK: %5d WRONG: %5d Accuray: %6.3f" % (nall, nok, nfalse, acc))
def generate_multi_predictions(filename_list, skipH=False, iterative=False, forceAromatics=False, maxiter=1, isEval=False, verbose=False, **kwargs):
"""
Generate predictions, i.e. sd file from list of xyz files
:param filename_list: list of filenames
:param skipH: omit hydrogen
:param iterative: 2-step prediction
:param forceAromatics: deprecated
:param maxiter: maxiteraions
:param isEval: evaluation run with known bonds
:param verbose: verbose
:param kwargs: additional args
:return: -
"""
clf = pickle.load(open('clf.p', "rb"))
if iterative:
clf_iter = pickle.load(open('clf_iter.p', "rb"))
else:
clf_iter = None
res_filename = 'multi.sdf'
w = Chem.SDWriter(res_filename)
w.SetKekulize(False)
for f in filename_list:
ins = generate_predictions(f, skipH=skipH, iterative=iterative, forceAromatics=forceAromatics, maxiter=maxiter, isEval=isEval, writeFile=False, verbose=verbose, clf=clf, clf_iter=clf_iter)
if ins is None:
logging.info("Skipping file %s - could not generate mol block!"%(f))
continue
mol = Chem.MolFromMolBlock(ins,sanitize=False)
w.write(mol)
logging.info("Writing multiple .xyz files to: %s"%(res_filename))
w.close()
def generate_predictions(filename, skipH=False, iterative=False, forceAromatics=False, maxiter=1, isEval=False, writeFile=False, verbose=True, **kwargs):
"""
Generate predictions for single molecule
:param filename: single filename
:param skipH: omit hydrogen
:param iterative: 2-step prediction
:param forceAromatics: deprecated
:param maxiter: maxiteraions
:param isEval: evaluation run with known bonds
:param verbose: verbose
:param kwargs: additional args
:return: sd molblock as string
"""
clf = None
clf_iter = None
if kwargs is not None and 'clf' in kwargs.keys():
#print(kwargs.keys())
clf = kwargs['clf']
clf_iter = kwargs['clf_iter']
if iterative and not os.path.isfile("clf_iter.p"):
sys.stderr.write("ERROR: Please train classifier in iterative mode (--iterative) first!\n")
sys.exit(1)
elif iterative:
if clf_iter is None:
print("Loading classifier clf_iter.p...")
clf_iter = pickle.load(open('clf_iter.p', "rb"))
#from evaluation
if isinstance(filename, Chem.rdchem.Mol):
mol = filename
filename = 'eval'
atomtypes, coords, q, title = read_mol(mol)
X = predict_bonds(mol, q=q, skipH=skipH, iterative=False, forceAromatics=forceAromatics, clf=clf, eval=True, verbose=False)
if X is None: return(None)
elif not filename.lower().endswith('.xyz') and not filename.lower().endswith('.pdb'):
sys.stderr.write("ERROR: Need .xyz/.pdb file for prediction!\n")
sys.exit(1)
elif not os.path.isfile("clf.p"):
sys.stderr.write("ERROR: Please train classifier first!\n")
sys.exit(1)
elif filename.lower().endswith(".xyz"):
atomtypes, coords, q, title = read_xyz(filename, skipH=skipH)
X = predict_bonds(filename, q=q, skipH=skipH, iterative=False, forceAromatics=forceAromatics, clf=clf, verbose=verbose)
elif filename.lower().endswith(".pdb"):
atomtypes, coords, q, title = read_pdbfile(filename, skipH=skipH)
X = predict_bonds(filename, q=q, skipH=skipH, iterative=False, forceAromatics=forceAromatics, clf=clf, verbose=verbose)
if isEval:
writeFile=False
if verbose:
print(X.head(10))
print("Total Entropy: %6.2f" % (X['S'].sum()))
print("Max Entropy : %6.2f" % ( X['S'].max()))
print("Bonds : %6d" % ((X['bond']>0)).sum())
ins = create_sdfile(filename, atomtypes, coords, X)
if iterative:
if verbose: print("Iterative prediction using bond estimates...")
if isEval:
bond_orig = X['bond_orig']
X = X.drop(['bond_orig'], axis=1)
maxiter = 10
poscounter =0
while poscounter<maxiter:
X2 = predict_bonds(ins, q=q, skipH=skipH, iterative=True, forceAromatics=forceAromatics, clf=clf_iter, verbose=False)
if X2 is None:
return(False)
#cluster similar bonds and flip them together...!!!
if verbose: print(X2.head(40))
#create probability gradient
#dX2['dp(X)'] = X2['p(X)'] - X['p(X)'].values
dX2 = pd.DataFrame(index=X2.index)
dX2['dp(-)'] = X2['p(-)'] - X['p(-)'].values
dX2['dp(=)'] = X2['p(=)'] - X['p(=)'].values
dX2['dp(#)'] = X2['p(#)'] - X['p(#)'].values
dX2['dp(a)'] = X2['p(a)'] - X['p(a)'].values
grad = dX2.values
#dX2['rand'] = np.random.beta(5.0,1.0,dX2.shape[0])
#dX2['rand'] = np.random.ranf(dX2.shape[0])
dX2['update'] = np.max(grad, axis=1)>0.25
#dX2['keep'] = np.max(grad, axis=1) < 0.5
mask = dX2['update'].values
#what to do here???? switch only update one after another
#charge??
idx = np.where(mask==True)[0]
if poscounter<len(idx):
mask[idx[poscounter]] = False
else:
break
if verbose:
logging.info("Total Entropy: %6.2f" % (X2['S'].sum()))
logging.info("Max Entropy : %6.2f" % (X2['S'].max()))
logging.info("Bonds : %6d" % ((X2['bond'] > 0)).sum())
logging.info("Max grad : %6.2f" % (grad.max()))
X.loc[mask,'bond'] = X2.loc[mask,'bond'].values
X.loc[mask, 'p(-)'] = X2.loc[mask, 'p(-)'].values
X.loc[mask, 'p(=)'] = X2.loc[mask, 'p(=)'].values
X.loc[mask, 'p(#)'] = X2.loc[mask, 'p(#)'].values
X.loc[mask, 'p(a)'] = X2.loc[mask, 'p(a)'].values
if verbose: print(X.head(40))
dX2['bond_X'] = X['bond'].values
dX2['bond_X2'] = X2['bond'].values
ins = create_sdfile(filename, atomtypes, coords, X)
poscounter+=1
if isEval:
dX2['bond_orig'] = bond_orig
if verbose:
print(bond_orig)
print(X['bond'])
if np.array_equal(bond_orig.values, X['bond'].values):
return (True)
else:
return (False)
if writeFile:
# sdf format does not know delocalized charge...!?
filename = os.path.basename(filename).replace('.xyz', '_ml.sdf').replace('.pdb', '_ml.sdf')
with open(filename, 'w') as f:
f.write(ins)
logging.info("ML-generated SDF written to: " + filename)
elif isEval:
#
if np.array_equal(X.bond_orig.values, X['bond'].values):
return (True,ins)
else:
if verbose: print(X[['bond', 'bond_orig']])
return (False,ins)
else:
return(ins)
def predict_bonds(filename, q=0, skipH=False, iterative=False, forceAromatics=False, clf=None, eval=False, verbose=False):
"""
Create dataframe with bond info for 1 molecule
Either from SDF/mol or from xyz_file
:return: pandas dataframe with bond info
"""
if not iterative:
if eval:
m = filename
df = extract_features(m, "rdkitmol", 0, verbose=verbose, printHeader=verbose, fillNa=9999.0, xyz_file=None,
skipH=skipH, addBonds=False, useSelectionRules=False)
if df is None:
return None
bond_orig = df['bond']
else:
df = extract_features(None, filename, 0, verbose=False, printHeader=False, fillNa=9999.0, xyz_file=filename,
skipH=skipH)
if df is None:
logging.info("Could not generate dataframe from %s"%(filename))
return None
clf_name = "clf.p"
else:
m = Chem.MolFromMolBlock(filename, sanitize=False)
df = extract_features(m, "rdkitmol", 0, verbose=verbose, printHeader=verbose, fillNa=9999.0, xyz_file=None,
skipH=skipH, addBonds=True, useSelectionRules=False)
if df is None:
return None
df['q'] = q
clf_name = "clf_iter.p"
order = df[['id1', 'id2']]
X = df.drop(['bond', 'id1', 'id2'], axis=1)
if verbose: print("X shape n=%d m=%d "%(X.shape[0],X.shape[1]))
if clf is None:
print("Loading classifier %s..." % (clf_name))
clf = pickle.load(open(clf_name, "rb"))
yprob = clf.predict_proba(X)
ypred = clf.predict(X)
try:
X['p(X)'] = yprob[:,0]
X['p(-)'] = yprob[:,1]
X['p(=)'] = yprob[:,2]
X['p(#)'] = yprob[:,3]
X['p(a)'] = yprob[:,4]
X['S'] = 0.0
for i in [0,1,2,3,4]:
X['S'] = X['S'] - np.log(yprob[:,i]+1E-15)*yprob[:,i]
X['bond'] = ypred
X['id1'] = order['id1']
X['id2'] = order['id2']
except IndexError as e:
sys.stderr.write("I/O error{0}\n".format(e))
sys.stderr.write("Are there have enough training data for all bond types?\n")
sys.exit(1)
X = X[['id1','id2','q', 'ata', 'atb', 'distab', 'bond', 'p(-)', 'p(=)', 'p(#)', 'p(a)','p(X)','S']]
# check high aromaticity case
if forceAromatics:
logging.info("Forcing aromaticity for bonds with p(a)>0.4!")
potaroma_idx = X['p(a)'] > 0.4
X.loc[potaroma_idx, 'bond'] = 4
if eval:
X['bond_orig'] = bond_orig
if skipH: X = X[(X.ata != 1) | (X.atb == 1)]
return(X)
def convert_smiles2sdfile(smifile="./smi_repository/bbp2.smi", method='UFF', outdat='train_dat.csv'):
"""
Generates 3D sd file using distance geometry and force-field optimisation
:param smifile: .smi file used as basis for training
:param method: UFF/MMFF/conf
:param outdat: filename of training data
"""
logging.info("Selected optimization method: %s"%(method))
mols = Chem.SmilesMolSupplier(smifile,delimiter='\t ',titleLine=False, sanitize=True)
mols = [x for x in mols if x is not None]
sdfile = smifile.replace('.smi', '_' + method + '.sdf')
w = Chem.SDWriter(sdfile)
for i,m in enumerate(mols):
#remove multiple molecule
tmp = Chem.GetMolFrags(m,asMols=True)
if (len(tmp)>1):
logging.info("Using only first fragment: %s"%(Chem.MolToSmiles(m)))
m = tmp[0]
# add H for SDF file
m = Chem.AddHs(m)
conv = 0
try:
if method=='UFF':
Chem.EmbedMolecule(m)
conv = Chem.UFFOptimizeMolecule(m,maxIters=200)
elif method=='MMFF':
Chem.EmbedMolecule(m)
conv = Chem.MMFFOptimizeMolecule(m,maxIters=200)
elif method=='ETKDG':
Chem.EmbedMolecule(m, Chem.ETKDG())
else:
Chem.EmbedMultipleConfs(m, 10, Chem.ETKDG())
#also add not converged molecules!!!
if conv==0 or conv==1:
# add aromaticity flags again!
Chem.SanitizeMol(m)
smiles = Chem.MolToSmiles(Chem.RemoveHs(m))
m.SetProp("SMILES", smiles)
w.write(m)
if conv==1:
logging.info("Optimization not converged for molecule: %d with SMILES: %s" % (i, Chem.MolToSmiles(Chem.RemoveHs(m))))
elif conv==-1:
logging.info("Forcefield could not be setup for molecule: %d with SMILES: %s" % (i, Chem.MolToSmiles(Chem.RemoveHs(m))))
except ValueError:
logging.info("Optimization problem for molecule: %d with SMILES: %s"%(i,Chem.MolToSmiles(Chem.RemoveHs(m))))
w.close()
logging.info(("Writing to SD file:"+sdfile))
return sdfile
def shuffle_split_sdfile(infile='test.sdf', frac=0.8, rseed=42):
"""
Shuffle and split molecules within SDF for train / test set generation
:param infile:
:return:
"""
random.seed=rseed
trainfile = infile.replace('.sdf', '_train.sdf')
w1 = Chem.SDWriter(trainfile)
w1c=0
testfile = infile.replace('.sdf', '_test.sdf')
w2 = Chem.SDWriter(testfile)
w2c=0
suppl = Chem.SDMolSupplier(infile, removeHs=False, sanitize=False)
for i, mol in enumerate(suppl):
rdm = random.random()
if rdm<frac:
w1.write(mol)
w1c+=1
else:
w2.write(mol)
w2c+=1
w1.close()
w2.close()
print("Finished writing %d train moles / %d test moles"%(w1c,w2c))
def get_stats(infile,skipH=False,addBonds=False):
"""
Statistical analysis of SD file
:param filename: name of .sdf
"""
df = convert_sdf2dataframe(infile=infile, outfile=None, fillNa=9999.0, verbose=False, skipH=skipH,
addBonds=addBonds)
df = df[df.bond>0]
bonds=[['C','C'],['O','C'],['N','C'],['P','C'],['S','C'],['P','O'],['S','O']]
n_rows = 2
n_cols = 4
pt = Chem.GetPeriodicTable()
fig = plt.figure()
plt.suptitle(infile)
for i,(a,b) in enumerate(bonds):
title = str(a) + '&' + str(b)
a = pt.GetAtomicNumber(a)
b = pt.GetAtomicNumber(b)
idx = ((df.ata.values == a) & (df.atb.values == b)) | ((df.ata.values == b) & (df.atb.values == a))
df_tmp = df[idx]
ax = fig.add_subplot(n_rows, n_cols, i + 1)
ax.set_title(title)
ax.set_xlabel('dist')
#colors = ['blue','black','cyan','green']
data = [df_tmp.distab[df_tmp.bond==1],df_tmp.distab[df_tmp.bond==2],df_tmp.distab[df_tmp.bond==3],df_tmp.distab[df_tmp.bond==4]]
labels = ['-','=','#','a']
#colors = ['blue']
#data = [df_tmp.distab[df_tmp.bond == 1]]
#labels = ['-']
ax.hist(data,bins=20, alpha=0.5,stacked=True,histtype='bar',label=labels)
fig.legend(labels=labels,loc = 'upper right', ncol=5, labelspacing=0.)
#df_tmp.groupby('bond').distab.hist(bins=40, ax=ax,sharex=True,sharey=True, alpha=[0.2,0.2,0.2,0.2])
#df_tmp.distab.hist(bins=40,by=df_tmp.distab, ax=ax, sharex=True, sharey=True, alpha=[0.2, 0.2, 0.2, 0.2])
#df_tmp.groupby('bond').distab.plot(kind='kde', ax=ax)
fig.tight_layout()
plt.show()
def clean_sdf(infile='test.sdf'):
"""
Cleans SD file from unreasonable structures via rules from extractFeatures function
:param infile: .sdf to clean
"""
outfile = infile.replace('.sdf', '_clean.sdf')
w = Chem.SDWriter(outfile)
suppl = Chem.SDMolSupplier(infile, removeHs=False, sanitize=False)
count=0
for i, mol in enumerate(suppl):
df = extract_features(mol, infile, i, verbose=False, printHeader=True,
useSelectionRules=True)
if df is not None and mol is not None:
w.write(mol)
else:
count+=1
logging.info("Removed %d files - saving .sdf to: %s "%(count,outfile))
w.close()
def sanitize_sdf(infile='test.sdf',removeHs=False):
"""
Sanitize SD file
:param infile: .sdf to clean
"""
outfile = infile.replace('.sdf', '_sane.sdf')
w = Chem.SDWriter(outfile)
suppl = Chem.SDMolSupplier(infile, removeHs=removeHs, sanitize=True)
count=0
for i, mol in enumerate(suppl):
if mol is not None:
w.write(mol)
else:
count+=1
logging.info("Removed %d files - saving .sdf to: %s "%(count,outfile))
w.close()
def read_pdbfile(filename, skipH=False):
"""
Read pdb data
:param filename: filename of .pdb file
:param skipH: Do not read H atoms
:return: atomtypes, coordinates and title section
"""
mol = Chem.MolFromPDBFile(filename)
atomtypes, coords, q, title = read_mol(mol)
return(atomtypes, coords, q, title)
def read_mol(mol):
"""
Analyze RDKit mol
:param mol:
:return: atomtypes, coordinates, charge & title
"""
title = ""
coords = []
atomtypes = []
for i, atom in enumerate(mol.GetAtoms()):
an = atom.GetSymbol()
if an == 1:
logging.warn("PDB file should not contain hydrogens!")
atomtypes.append(an)
pos = mol.GetConformer().GetAtomPosition(i)
coords.append([pos.x, pos.y, pos.z])
q = Chem.GetFormalCharge(mol)
return (atomtypes, coords, q, title)
def read_xyz(filename, skipH=False):
"""
Read xyz data
:param filename: filename of .xyz file
:param skipH: Do not read H atoms
:return: atomtypes, coordinates and title section
https://github.com/pele-python/pele/blob/master/pele/utils/xyz.py
"""
with open(filename,'r') as fin:
natoms = int(fin.readline())
title = fin.readline()[:-1]
q=0
qin = re.search("(?:CHARGE|CHG)=([-+]?\d*\.\d+|\d+|[-+]?\d)",title,re.IGNORECASE)
if qin:
q = float(qin.group(1))
coords = []
atomtypes = []
for x in range(natoms):
line = fin.readline().split()
if (line[0].lower()=='h') and skipH: continue
atomtypes.append(line[0])
coords.append([float(line[1]),float(line[2]),float(line[3])])
return(atomtypes,coords, q, title)
def create_sdfile(name, atomtypes, coords, df):
"""
Creates string with SD info
:param name: molecule name
:param atomtypes: atomic types
:param coords: coordinates
:param df: dataframe with ids and bond types
:return: molblock
"""
df = df[df.bond>0]
ins = name + "\n"
# comment block
ins += "ML generated sdf\n"
ins += "\n"
ins += "%3d%3d 0 0 0 0 0 0 0 0 1 V2000\n" % (len(atomtypes), (df.bond > 0).sum())
# atomb block
for at, xyz in zip(atomtypes, coords):
ins += "%10.4f%10.4f%10.4f %-2s 0 0 0 0 0\n" % (xyz[0], xyz[1], xyz[2], at)
# ins += "%2s %12.4f %12.4f %12.4f \n" % ( at,xyz[0], xyz[1], xyz[2])
# bond block
for index, row in df.iterrows():
ins += "%3d%3d%3d 0 0 0 0\n" % (row['id1'], row['id2'], row['bond'])
ins += "M END"
return(ins)
def mol_dataframe_generator(X):
# modfiy index to isolate molecule id
X['index_mod'] = X.index.str.split("_pos")
X['index_mod'] = X.index_mod.str[1]
X['index_mod'] = X.index_mod.str.split("_")
X['index_mod'] = X.index_mod.str[0]
grouped = X.groupby('index_mod')
for name, df_sub in grouped:
yield name,df_sub
def mol2xyz(mol):
"""
Converts RDKit mol to xyz
:param mol: RDKit mol
:return: xyz string
"""
atomtypes, coords, q, title = read_mol(mol)
xyz_str = "%d\n\n"%(len(atomtypes))
for at, xyz in zip(atomtypes, coords):
xyz_str += "%3s%10.4f%10.4f%10.4f\n" % (at,xyz[0], xyz[1], xyz[2])
return(xyz_str)
def plot_classification_results(y, ypred):
"""
Plots classification results
:param y: ground truth
:param ypred: prediction
"""
report = classification_report(y, ypred, digits=3, target_names=['X', '-', '=', '#', 'a'])
print(type(report))
print(report)
cm = confusion_matrix(y, ypred, labels=[0, 1, 2, 3, 4])
df_cm = pd.DataFrame(cm, index=[i for i in "X-=#a"],
columns=[i for i in "X-=#a"])
plt.figure(figsize=(10, 7))
sn.set(font_scale=1.4) # for label size
ax = sn.heatmap(df_cm, annot=True, fmt='d', annot_kws={"size": 16}, cmap="magma_r") # font size
ax.set(xlabel='predicted bond type', ylabel='true bond type')
plt.show()
def set_logging(loglevel = logging.INFO):
"""
Sets up logging
:param loglevel
"""
logging.basicConfig(filename='log.txt', level=loglevel,format='%(asctime)s - %(levelname)s - %(message)s')
root = logging.getLogger()
root.setLevel(loglevel)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(loglevel)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
if __name__ == "__main__":
import rdkit
t0 = time()
pd.set_option('display.max_columns', 30)
pd.set_option('display.width', 1000)
np.random.seed(42)
set_logging()
description = """
MAMBA - MAchine-learning Meets Bond Analysis
Creates .sdf file incl. chemical bond information out of a .xyz file
Bond perception is learned via machine learning (scikit-learn classifier) from
previous .sdf or .smi files.
Internally uses RDKit, numpy, pandas and scikit-learn python packages
(c) 2018 <NAME>
Examples:
1) mamba.py --train largefile.sdf
2) mamba.py --add special_case.sdf [optional]
3) mamba.py --predict new_molecule.xyz
"""
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter)
help_str= """
OK
"""
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-t','--train',action='store_true', help="train - train the parser using a .sdf or .smi file")
group.add_argument('-a','--add',action='store_true', help='add - add data to the parser using a .sdf or .smi file')
group.add_argument('-p','--predict',action='store_true',help='predict - create .sdf file using .xyz file as input\n')
group.add_argument('--eval',action='store_true', help='eval - evaluate .sdf file\n')
group.add_argument('--evalOB', action='store_true', help='eval - evaluate .sdf file via openbabel \n')
group.add_argument('--analyze',action='store_true', help='analyze - analyze last evulated structure\n')
group.add_argument('--stats', action='store_true', help='statistics on SD file molecules\n')
group.add_argument('--clean', action='store_true', help='clean .sd file from unreasonable structures\n')
group.add_argument('--sanitize', action='store_true', help='sanitize (via RDKit) .sd file from unreasonable structures\n')
group.add_argument('--shufflesplit', type=float, default=0.8, metavar='f',help='Create test and train set by splitting and shuffling molecules, f is a float [0..1] \n')
parser.add_argument('--noH', action='store_true', help='omit Hydrogen atom when learning\n',default=False)
parser.add_argument('--useRF', action='store_true', help='use Random Forest instead of Gradient Boosting for training\n', default=True)
parser.add_argument('-v','--verbose', action='store_true', help='verbose\n', default=False)
parser.add_argument('--iterative', action='store_true', help='Iterative prediction of bonds using 2 classifiers\n', default=False)
parser.add_argument('--sample', type=float, default=None,metavar='f',help='Subsampling of dataset during training, f is a float [0..1] \n')
parser.add_argument('--FF', choices=("UFF", "MMFF", "ETKDG"), help='Forcefield/Method to use for 3D structure generation from SMILES', required=False,default="UFF")
parser.add_argument("filename", nargs='+', help=".sdf or .smi for training, .xyz for prediction",default=[sys.stdin])
fmethod = 'UFF'
args = parser.parse_args()
if args.FF!='UFF':
fmethod=args.FF
if args.verbose:
print("Verbose ON")
if len(args.filename)>1:
if args.predict and (args.filename[0].endswith(".xyz") or args.filename[0].endswith(".pdb")):
generate_multi_predictions(args.filename, iterative=args.iterative)
else:
print("Multiple files only allowed for prediction with .xyz and .sdf")
sys.exit(1)
else:
for f in args.filename:
if args.train:
train_job(f, reset=True, fmethod=fmethod, skipH=args.noH, iterative=args.iterative, useRF=args.useRF, sample=args.sample)
elif args.add:
train_job(f, reset=False, fmethod=fmethod, skipH=args.noH, iterative=args.iterative, useRF=args.useRF, sample=args.sample)
elif args.predict:
generate_predictions(f, iterative=args.iterative,writeFile=True, verbose=args.verbose)
elif args.eval and args.iterative:
eval_job(f, skipH=args.noH, iterative=args.iterative,verbose=args.verbose)
elif args.eval:
train_job(f, eval=True, fmethod=fmethod, skipH=args.noH, iterative=args.iterative, sample=args.sample,verbose=args.verbose)
elif args.evalOB:
evaluate_OB(f,verbose=args.verbose)
elif args.analyze:
evaluate('eval_dat.csv',plotting=True)
elif args.stats:
get_stats(f)
elif args.clean:
clean_sdf(f)
elif args.sanitize:
sanitize_sdf(f)
elif args.shufflesplit:
shuffle_split_sdfile(f, frac=args.shufflesplit)
print("FINSIHED in %fs\n" % (time() - t0))
| [
"logging.getLogger",
"sklearn.model_selection.GridSearchCV",
"logging.StreamHandler",
"rdkit.Chem.AllChem.GetMolFrags",
"rdkit.Chem.AllChem.SanitizeMol",
"pandas.read_csv",
"sklearn.metrics.classification_report",
"rdkit.Chem.AllChem.Compute2DCoords",
"numpy.log",
"rdkit.Chem.AllChem.SDMolSupplier... | [((3056, 3079), 'rdkit.Chem.AllChem.GetPeriodicTable', 'Chem.GetPeriodicTable', ([], {}), '()\n', (3077, 3079), True, 'from rdkit.Chem import AllChem as Chem\n'), ((13855, 14034), 'logging.info', 'logging.info', (["('Generating feature using RDKit matrix from: %s -- with options skipH (%r) iterative(%r) filterRubbish(%r) '\n % (infile, skipH, addBonds, useSelectionRules))"], {}), "(\n 'Generating feature using RDKit matrix from: %s -- with options skipH (%r) iterative(%r) filterRubbish(%r) '\n % (infile, skipH, addBonds, useSelectionRules))\n", (13867, 14034), False, 'import logging\n'), ((14175, 14233), 'rdkit.Chem.AllChem.SDMolSupplier', 'Chem.SDMolSupplier', (['infile'], {'removeHs': 'skipH', 'sanitize': '(False)'}), '(infile, removeHs=skipH, sanitize=False)\n', (14193, 14233), True, 'from rdkit.Chem import AllChem as Chem\n'), ((15444, 15501), 'logging.info', 'logging.info', (["('Processed total of >%d< molecules' % count)"], {}), "('Processed total of >%d< molecules' % count)\n", (15456, 15501), False, 'import logging\n'), ((17733, 17774), 'logging.info', 'logging.info', (['"""Training data on dataset:"""'], {}), "('Training data on dataset:')\n", (17745, 17774), False, 'import logging\n'), ((17784, 17818), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'index_col': '(0)'}), '(filename, index_col=0)\n', (17795, 17818), True, 'import pandas as pd\n'), ((17927, 17989), 'logging.info', 'logging.info', (["('Shape : %d X %d' % (df.shape[0], df.shape[1]))"], {}), "('Shape : %d X %d' % (df.shape[0], df.shape[1]))\n", (17939, 17989), False, 'import logging\n'), ((17991, 18032), 'logging.info', 'logging.info', (["('Features: %s' % df.columns)"], {}), "('Features: %s' % df.columns)\n", (18003, 18032), False, 'import logging\n'), ((18065, 18102), 'logging.info', 'logging.info', (['"""Droping duplicates..."""'], {}), "('Droping duplicates...')\n", (18077, 18102), False, 'import logging\n'), ((18144, 18206), 'logging.info', 'logging.info', (["('Shape : %d X %d' % (df.shape[0], df.shape[1]))"], {}), "('Shape : %d X %d' % (df.shape[0], df.shape[1]))\n", (18156, 18206), False, 'import logging\n'), ((19944, 19995), 'logging.info', 'logging.info', (["('Saving classifier as: %s' % save_clf)"], {}), "('Saving classifier as: %s' % save_clf)\n", (19956, 19995), False, 'import logging\n'), ((22786, 22850), 'rdkit.Chem.AllChem.SDMolSupplier', 'Chem.SDMolSupplier', (['filename'], {'removeHs': 'skipH', 'sanitize': 'iterative'}), '(filename, removeHs=skipH, sanitize=iterative)\n', (22804, 22850), True, 'from rdkit.Chem import AllChem as Chem\n'), ((23376, 23472), 'logging.info', 'logging.info', (['("""\nTOTAL: %5d OK: %5d WRONG: %5d Accuray: %6.3f""" % (nall, nok, nfalse, acc)\n )'], {}), '("""\nTOTAL: %5d OK: %5d WRONG: %5d Accuray: %6.3f""" % (nall,\n nok, nfalse, acc))\n', (23388, 23472), False, 'import logging\n'), ((23761, 23800), 'pandas.read_csv', 'pd.read_csv', (['filename_test'], {'index_col': '(0)'}), '(filename_test, index_col=0)\n', (23772, 23800), True, 'import pandas as pd\n'), ((24405, 24456), 'logging.info', 'logging.info', (["('Evaluate data set: ' + filename_test)"], {}), "('Evaluate data set: ' + filename_test)\n", (24417, 24456), False, 'import logging\n'), ((24461, 24498), 'logging.info', 'logging.info', (['"""Loading classifier..."""'], {}), "('Loading classifier...')\n", (24473, 24498), False, 'import logging\n'), ((24547, 24642), 'logging.info', 'logging.info', (["('Loading test set with %d rows from file %s\\n' % (df.shape[0], filename_test))"], {}), "('Loading test set with %d rows from file %s\\n' % (df.shape[0],\n filename_test))\n", (24559, 24642), False, 'import logging\n'), ((24789, 24813), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'ypred'], {}), '(y, ypred)\n', (24803, 24813), False, 'from sklearn.metrics import classification_report, accuracy_score, f1_score, confusion_matrix\n'), ((24826, 24864), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'ypred'], {'average': '"""weighted"""'}), "(y, ypred, average='weighted')\n", (24834, 24864), False, 'from sklearn.metrics import classification_report, accuracy_score, f1_score, confusion_matrix\n'), ((24868, 24936), 'logging.info', 'logging.info', (["('ACCURACY:%0.3f - F1-score: %0.3f\\n' % (score, score2))"], {}), "('ACCURACY:%0.3f - F1-score: %0.3f\\n' % (score, score2))\n", (24880, 24936), False, 'import logging\n'), ((26417, 26468), 'logging.info', 'logging.info', (["('Evaluating %s via OBabel' % filename)"], {}), "('Evaluating %s via OBabel' % filename)\n", (26429, 26468), False, 'import logging\n'), ((26618, 26677), 'rdkit.Chem.AllChem.SDMolSupplier', 'Chem.SDMolSupplier', (['filename'], {'removeHs': '(False)', 'sanitize': '(True)'}), '(filename, removeHs=False, sanitize=True)\n', (26636, 26677), True, 'from rdkit.Chem import AllChem as Chem\n'), ((30916, 31012), 'logging.info', 'logging.info', (['("""\nTOTAL: %5d OK: %5d WRONG: %5d Accuray: %6.3f""" % (nall, nok, nfalse, acc)\n )'], {}), '("""\nTOTAL: %5d OK: %5d WRONG: %5d Accuray: %6.3f""" % (nall,\n nok, nfalse, acc))\n', (30928, 31012), False, 'import logging\n'), ((31746, 31773), 'rdkit.Chem.AllChem.SDWriter', 'Chem.SDWriter', (['res_filename'], {}), '(res_filename)\n', (31759, 31773), True, 'from rdkit.Chem import AllChem as Chem\n'), ((32230, 32295), 'logging.info', 'logging.info', (["('Writing multiple .xyz files to: %s' % res_filename)"], {}), "('Writing multiple .xyz files to: %s' % res_filename)\n", (32242, 32295), False, 'import logging\n'), ((41049, 41106), 'logging.info', 'logging.info', (["('Selected optimization method: %s' % method)"], {}), "('Selected optimization method: %s' % method)\n", (41061, 41106), False, 'import logging\n'), ((41118, 41203), 'rdkit.Chem.AllChem.SmilesMolSupplier', 'Chem.SmilesMolSupplier', (['smifile'], {'delimiter': '"""\t """', 'titleLine': '(False)', 'sanitize': '(True)'}), "(smifile, delimiter='\\t ', titleLine=False, sanitize=True\n )\n", (41140, 41203), True, 'from rdkit.Chem import AllChem as Chem\n'), ((41312, 41333), 'rdkit.Chem.AllChem.SDWriter', 'Chem.SDWriter', (['sdfile'], {}), '(sdfile)\n', (41325, 41333), True, 'from rdkit.Chem import AllChem as Chem\n'), ((42882, 42926), 'logging.info', 'logging.info', (["('Writing to SD file:' + sdfile)"], {}), "('Writing to SD file:' + sdfile)\n", (42894, 42926), False, 'import logging\n'), ((43220, 43244), 'rdkit.Chem.AllChem.SDWriter', 'Chem.SDWriter', (['trainfile'], {}), '(trainfile)\n', (43233, 43244), True, 'from rdkit.Chem import AllChem as Chem\n'), ((43315, 43338), 'rdkit.Chem.AllChem.SDWriter', 'Chem.SDWriter', (['testfile'], {}), '(testfile)\n', (43328, 43338), True, 'from rdkit.Chem import AllChem as Chem\n'), ((43361, 43419), 'rdkit.Chem.AllChem.SDMolSupplier', 'Chem.SDMolSupplier', (['infile'], {'removeHs': '(False)', 'sanitize': '(False)'}), '(infile, removeHs=False, sanitize=False)\n', (43379, 43419), True, 'from rdkit.Chem import AllChem as Chem\n'), ((44151, 44174), 'rdkit.Chem.AllChem.GetPeriodicTable', 'Chem.GetPeriodicTable', ([], {}), '()\n', (44172, 44174), True, 'from rdkit.Chem import AllChem as Chem\n'), ((44185, 44197), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (44195, 44197), True, 'import matplotlib.pyplot as plt\n'), ((44202, 44222), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['infile'], {}), '(infile)\n', (44214, 44222), True, 'import matplotlib.pyplot as plt\n'), ((45410, 45420), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (45418, 45420), True, 'import matplotlib.pyplot as plt\n'), ((45654, 45676), 'rdkit.Chem.AllChem.SDWriter', 'Chem.SDWriter', (['outfile'], {}), '(outfile)\n', (45667, 45676), True, 'from rdkit.Chem import AllChem as Chem\n'), ((45689, 45747), 'rdkit.Chem.AllChem.SDMolSupplier', 'Chem.SDMolSupplier', (['infile'], {'removeHs': '(False)', 'sanitize': '(False)'}), '(infile, removeHs=False, sanitize=False)\n', (45707, 45747), True, 'from rdkit.Chem import AllChem as Chem\n'), ((46041, 46114), 'logging.info', 'logging.info', (["('Removed %d files - saving .sdf to: %s ' % (count, outfile))"], {}), "('Removed %d files - saving .sdf to: %s ' % (count, outfile))\n", (46053, 46114), False, 'import logging\n'), ((46310, 46332), 'rdkit.Chem.AllChem.SDWriter', 'Chem.SDWriter', (['outfile'], {}), '(outfile)\n', (46323, 46332), True, 'from rdkit.Chem import AllChem as Chem\n'), ((46345, 46405), 'rdkit.Chem.AllChem.SDMolSupplier', 'Chem.SDMolSupplier', (['infile'], {'removeHs': 'removeHs', 'sanitize': '(True)'}), '(infile, removeHs=removeHs, sanitize=True)\n', (46363, 46405), True, 'from rdkit.Chem import AllChem as Chem\n'), ((46547, 46620), 'logging.info', 'logging.info', (["('Removed %d files - saving .sdf to: %s ' % (count, outfile))"], {}), "('Removed %d files - saving .sdf to: %s ' % (count, outfile))\n", (46559, 46620), False, 'import logging\n'), ((46856, 46885), 'rdkit.Chem.AllChem.MolFromPDBFile', 'Chem.MolFromPDBFile', (['filename'], {}), '(filename)\n', (46875, 46885), True, 'from rdkit.Chem import AllChem as Chem\n'), ((47450, 47475), 'rdkit.Chem.AllChem.GetFormalCharge', 'Chem.GetFormalCharge', (['mol'], {}), '(mol)\n', (47470, 47475), True, 'from rdkit.Chem import AllChem as Chem\n'), ((50123, 50208), 'sklearn.metrics.classification_report', 'classification_report', (['y', 'ypred'], {'digits': '(3)', 'target_names': "['X', '-', '=', '#', 'a']"}), "(y, ypred, digits=3, target_names=['X', '-', '=', '#',\n 'a'])\n", (50144, 50208), False, 'from sklearn.metrics import classification_report, accuracy_score, f1_score, confusion_matrix\n'), ((50257, 50307), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'ypred'], {'labels': '[0, 1, 2, 3, 4]'}), '(y, ypred, labels=[0, 1, 2, 3, 4])\n', (50273, 50307), False, 'from sklearn.metrics import classification_report, accuracy_score, f1_score, confusion_matrix\n'), ((50320, 50394), 'pandas.DataFrame', 'pd.DataFrame', (['cm'], {'index': "[i for i in 'X-=#a']", 'columns': "[i for i in 'X-=#a']"}), "(cm, index=[i for i in 'X-=#a'], columns=[i for i in 'X-=#a'])\n", (50332, 50394), True, 'import pandas as pd\n'), ((50424, 50451), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (50434, 50451), True, 'import matplotlib.pyplot as plt\n'), ((50669, 50679), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (50677, 50679), True, 'import matplotlib.pyplot as plt\n'), ((50784, 50896), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""log.txt"""', 'level': 'loglevel', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(filename='log.txt', level=loglevel, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n", (50803, 50896), False, 'import logging\n'), ((50902, 50921), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (50919, 50921), False, 'import logging\n'), ((50959, 50992), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (50980, 50992), False, 'import logging\n'), ((51035, 51067), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (51052, 51067), False, 'import logging\n'), ((51178, 51184), 'time.time', 'time', ([], {}), '()\n', (51182, 51184), False, 'from time import time\n'), ((51189, 51229), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(30)'], {}), "('display.max_columns', 30)\n", (51202, 51229), True, 'import pandas as pd\n'), ((51234, 51270), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(1000)'], {}), "('display.width', 1000)\n", (51247, 51270), True, 'import pandas as pd\n'), ((51276, 51294), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (51290, 51294), True, 'import numpy as np\n'), ((51875, 51975), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=description, formatter_class=argparse.\n RawTextHelpFormatter)\n', (51898, 51975), False, 'import argparse\n'), ((4086, 4115), 'rdkit.Chem.AllChem.Get3DDistanceMatrix', 'Chem.Get3DDistanceMatrix', (['mol'], {}), '(mol)\n', (4110, 4115), True, 'from rdkit.Chem import AllChem as Chem\n'), ((4158, 4183), 'rdkit.Chem.AllChem.GetFormalCharge', 'Chem.GetFormalCharge', (['mol'], {}), '(mol)\n', (4178, 4183), True, 'from rdkit.Chem import AllChem as Chem\n'), ((4245, 4263), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['dm'], {}), '(dm)\n', (4259, 4263), True, 'import matplotlib.pyplot as plt\n'), ((4272, 4286), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4284, 4286), True, 'import matplotlib.pyplot as plt\n'), ((4295, 4311), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, n]'], {}), '([0, n])\n', (4303, 4311), True, 'import matplotlib.pyplot as plt\n'), ((4320, 4336), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, n]'], {}), '([0, n])\n', (4328, 4336), True, 'import matplotlib.pyplot as plt\n'), ((4345, 4355), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4353, 4355), True, 'import matplotlib.pyplot as plt\n'), ((11773, 11789), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (11785, 11789), True, 'import pandas as pd\n'), ((14055, 14117), 'logging.info', 'logging.info', (["('Subsampling fraction %4.2f of dataset' % sample)"], {}), "('Subsampling fraction %4.2f of dataset' % sample)\n", (14067, 14117), False, 'import logging\n'), ((14126, 14144), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (14140, 14144), True, 'import numpy as np\n'), ((15552, 15614), 'logging.info', 'logging.info', (["('%3d Generated temp file: %s' % (i + 1, outfile))"], {}), "('%3d Generated temp file: %s' % (i + 1, outfile))\n", (15564, 15614), False, 'import logging\n'), ((15688, 15751), 'logging.info', 'logging.info', (['"""ERROR: There was a problem generating the data!"""'], {}), "('ERROR: There was a problem generating the data!')\n", (15700, 15751), False, 'import logging\n'), ((17073, 17095), 'os.path.isfile', 'os.path.isfile', (['finalf'], {}), '(finalf)\n', (17087, 17095), False, 'import os, sys, re, subprocess\n'), ((17303, 17376), 'logging.info', 'logging.info', (["('File: %3d - %s .csv file to: %s' % (i + 1, outstr, finalf))"], {}), "('File: %3d - %s .csv file to: %s' % (i + 1, outstr, finalf))\n", (17315, 17376), False, 'import logging\n'), ((18313, 18348), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(5)'}), '(max_depth=5)\n', (18335, 18348), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((18475, 18500), 'graphviz.Source', 'graphviz.Source', (['dot_data'], {}), '(dot_data)\n', (18490, 18500), False, 'import graphviz\n'), ((18601, 18730), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(250)', 'max_depth': 'None', 'min_samples_leaf': '(5)', 'n_jobs': 'n_jobs', 'max_features': '(11)', 'oob_score': '(False)'}), '(n_estimators=250, max_depth=None, min_samples_leaf=5,\n n_jobs=n_jobs, max_features=11, oob_score=False)\n', (18623, 18730), False, 'from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n'), ((19157, 19250), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'n_estimators': '(1000)', 'learning_rate': '(0.1)', 'max_depth': '(5)', 'verbose': '(1)'}), '(n_estimators=1000, learning_rate=0.1, max_depth=\n 5, verbose=1)\n', (19183, 19250), False, 'from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n'), ((19357, 19391), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (19372, 19391), False, 'from sklearn.model_selection import StratifiedKFold, GridSearchCV\n'), ((19408, 19509), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['model', 'parameters'], {'n_jobs': 'n_jobs', 'verbose': '(2)', 'scoring': '"""f1_micro"""', 'cv': 'cv', 'refit': '(True)'}), "(model, parameters, n_jobs=n_jobs, verbose=2, scoring=\n 'f1_micro', cv=cv, refit=True)\n", (19420, 19509), False, 'from sklearn.model_selection import StratifiedKFold, GridSearchCV\n'), ((19823, 19869), 'logging.info', 'logging.info', (["('Fitting classifier: %s' % model)"], {}), "('Fitting classifier: %s' % model)\n", (19835, 19869), False, 'import logging\n'), ((20514, 20557), 'logging.info', 'logging.info', (['"""Iterative mode switched ON!"""'], {}), "('Iterative mode switched ON!')\n", (20526, 20557), False, 'import logging\n'), ((20649, 20704), 'logging.info', 'logging.info', (['"""INFO: Using Random Forest for training!"""'], {}), "('INFO: Using Random Forest for training!')\n", (20661, 20704), False, 'import logging\n'), ((20731, 20757), 'os.path.isfile', 'os.path.isfile', (['train_file'], {}), '(train_file)\n', (20745, 20757), False, 'import os, sys, re, subprocess\n'), ((20804, 20829), 'os.path.isfile', 'os.path.isfile', (['iter_file'], {}), '(iter_file)\n', (20818, 20829), False, 'import os, sys, re, subprocess\n'), ((21629, 21655), 'os.path.isfile', 'os.path.isfile', (['train_file'], {}), '(train_file)\n', (21643, 21655), False, 'import os, sys, re, subprocess\n'), ((21665, 21738), 'sys.stderr.write', 'sys.stderr.write', (["('ERROR: Missing training data file: %s!\\n' % train_file)"], {}), "('ERROR: Missing training data file: %s!\\n' % train_file)\n", (21681, 21738), False, 'import os, sys, re, subprocess\n'), ((21747, 21758), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (21755, 21758), False, 'import os, sys, re, subprocess\n'), ((23890, 23927), 'logging.info', 'logging.info', (['"""Analyze train data..."""'], {}), "('Analyze train data...')\n", (23902, 23927), False, 'import logging\n'), ((23947, 23987), 'pandas.read_csv', 'pd.read_csv', (['filename_train'], {'index_col': '(0)'}), '(filename_train, index_col=0)\n', (23958, 23987), True, 'import pandas as pd\n'), ((25717, 25782), 'numpy.array_equal', 'np.array_equal', (["df_sub['bond_pred'].values", "df_sub['bond'].values"], {}), "(df_sub['bond_pred'].values, df_sub['bond'].values)\n", (25731, 25782), True, 'import numpy as np\n'), ((26882, 26900), 'rdkit.Chem.AllChem.RemoveHs', 'Chem.RemoveHs', (['mol'], {}), '(mol)\n', (26895, 26900), True, 'from rdkit.Chem import AllChem as Chem\n'), ((27114, 27140), 'io.StringIO.StringIO', 'StringIO.StringIO', (['xyz_str'], {}), '(xyz_str)\n', (27131, 27140), False, 'from io import StringIO\n'), ((27292, 27393), 'subprocess.Popen', 'subprocess.Popen', (['cmd_call'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd_call, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n', (27308, 27393), False, 'import os, sys, re, subprocess\n'), ((27648, 27709), 'rdkit.Chem.AllChem.MolFromMolBlock', 'Chem.MolFromMolBlock', (['molblock'], {'removeHs': '(True)', 'sanitize': '(False)'}), '(molblock, removeHs=True, sanitize=False)\n', (27668, 27709), True, 'from rdkit.Chem import AllChem as Chem\n'), ((28509, 28560), 'numpy.array_equal', 'np.array_equal', (['bond_orig.values', "df['bond'].values"], {}), "(bond_orig.values, df['bond'].values)\n", (28523, 28560), True, 'import numpy as np\n'), ((32164, 32205), 'rdkit.Chem.AllChem.MolFromMolBlock', 'Chem.MolFromMolBlock', (['ins'], {'sanitize': '(False)'}), '(ins, sanitize=False)\n', (32184, 32205), True, 'from rdkit.Chem import AllChem as Chem\n'), ((33108, 33204), 'sys.stderr.write', 'sys.stderr.write', (['"""ERROR: Please train classifier in iterative mode (--iterative) first!\n"""'], {}), "(\n 'ERROR: Please train classifier in iterative mode (--iterative) first!\\n')\n", (33124, 33204), False, 'import os, sys, re, subprocess\n'), ((33208, 33219), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (33216, 33219), False, 'import os, sys, re, subprocess\n'), ((37728, 37784), 'logging.info', 'logging.info', (["('ML-generated SDF written to: ' + filename)"], {}), "('ML-generated SDF written to: ' + filename)\n", (37740, 37784), False, 'import logging\n'), ((39045, 39091), 'rdkit.Chem.AllChem.MolFromMolBlock', 'Chem.MolFromMolBlock', (['filename'], {'sanitize': '(False)'}), '(filename, sanitize=False)\n', (39065, 39091), True, 'from rdkit.Chem import AllChem as Chem\n'), ((40454, 40514), 'logging.info', 'logging.info', (['"""Forcing aromaticity for bonds with p(a)>0.4!"""'], {}), "('Forcing aromaticity for bonds with p(a)>0.4!')\n", (40466, 40514), False, 'import logging\n'), ((41415, 41447), 'rdkit.Chem.AllChem.GetMolFrags', 'Chem.GetMolFrags', (['m'], {'asMols': '(True)'}), '(m, asMols=True)\n', (41431, 41447), True, 'from rdkit.Chem import AllChem as Chem\n'), ((41616, 41629), 'rdkit.Chem.AllChem.AddHs', 'Chem.AddHs', (['m'], {}), '(m)\n', (41626, 41629), True, 'from rdkit.Chem import AllChem as Chem\n'), ((43470, 43485), 'random.random', 'random.random', ([], {}), '()\n', (43483, 43485), False, 'import random\n'), ((47931, 48018), 're.search', 're.search', (['"""(?:CHARGE|CHG)=([-+]?\\\\d*\\\\.\\\\d+|\\\\d+|[-+]?\\\\d)"""', 'title', 're.IGNORECASE'], {}), "('(?:CHARGE|CHG)=([-+]?\\\\d*\\\\.\\\\d+|\\\\d+|[-+]?\\\\d)', title, re.\n IGNORECASE)\n", (47940, 48018), False, 'import os, sys, re, subprocess\n'), ((3385, 3423), 'logging.info', 'logging.info', (["('Found charge: %.2f' % q)"], {}), "('Found charge: %.2f' % q)\n", (3397, 3423), False, 'import logging\n'), ((16411, 16433), 'os.path.isfile', 'os.path.isfile', (['infile'], {}), '(infile)\n', (16425, 16433), False, 'import os, sys, re, subprocess\n'), ((16447, 16491), 'logging.critical', 'logging.critical', (["('File not found:' + infile)"], {}), "('File not found:' + infile)\n", (16463, 16491), False, 'import logging\n'), ((16551, 16562), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (16559, 16562), False, 'import os, sys, re, subprocess\n'), ((20771, 20792), 'os.remove', 'os.remove', (['train_file'], {}), '(train_file)\n', (20780, 20792), False, 'import os, sys, re, subprocess\n'), ((20843, 20863), 'os.remove', 'os.remove', (['iter_file'], {}), '(iter_file)\n', (20852, 20863), False, 'import os, sys, re, subprocess\n'), ((21269, 21332), 'logging.info', 'logging.info', (["('Using forcefield for optimization: %s' % fmethod)"], {}), "('Using forcefield for optimization: %s' % fmethod)\n", (21281, 21332), False, 'import logging\n'), ((23211, 23245), 'logging.info', 'logging.info', (["('%d %r\\n' % (i, res))"], {}), "('%d %r\\n' % (i, res))\n", (23223, 23245), False, 'import logging\n'), ((24211, 24279), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""bond"""', 'y': '"""distab"""', 'data': "df_train[['distab', 'bond']]"}), "(x='bond', y='distab', data=df_train[['distab', 'bond']])\n", (24222, 24279), True, 'import seaborn as sns\n'), ((24389, 24399), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24397, 24399), True, 'import matplotlib.pyplot as plt\n'), ((25622, 25633), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (25630, 25633), False, 'import os, sys, re, subprocess\n'), ((25957, 25972), 'numpy.argmax', 'np.argmax', (['mask'], {}), '(mask)\n', (25966, 25972), True, 'import numpy as np\n'), ((32060, 32128), 'logging.info', 'logging.info', (["('Skipping file %s - could not generate mol block!' % f)"], {}), "('Skipping file %s - could not generate mol block!' % f)\n", (32072, 32128), False, 'import logging\n'), ((33070, 33098), 'os.path.isfile', 'os.path.isfile', (['"""clf_iter.p"""'], {}), "('clf_iter.p')\n", (33084, 33098), False, 'import os, sys, re, subprocess\n'), ((33819, 33883), 'sys.stderr.write', 'sys.stderr.write', (['"""ERROR: Need .xyz/.pdb file for prediction!\n"""'], {}), "('ERROR: Need .xyz/.pdb file for prediction!\\n')\n", (33835, 33883), False, 'import os, sys, re, subprocess\n'), ((33892, 33903), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (33900, 33903), False, 'import os, sys, re, subprocess\n'), ((35520, 35548), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'X2.index'}), '(index=X2.index)\n', (35532, 35548), True, 'import pandas as pd\n'), ((37346, 37396), 'numpy.array_equal', 'np.array_equal', (['bond_orig.values', "X['bond'].values"], {}), "(bond_orig.values, X['bond'].values)\n", (37360, 37396), True, 'import numpy as np\n'), ((37823, 37875), 'numpy.array_equal', 'np.array_equal', (['X.bond_orig.values', "X['bond'].values"], {}), "(X.bond_orig.values, X['bond'].values)\n", (37837, 37875), True, 'import numpy as np\n'), ((40185, 40262), 'sys.stderr.write', 'sys.stderr.write', (['"""Are there have enough training data for all bond types?\n"""'], {}), "('Are there have enough training data for all bond types?\\n')\n", (40201, 40262), False, 'import os, sys, re, subprocess\n'), ((40271, 40282), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (40279, 40282), False, 'import os, sys, re, subprocess\n'), ((47260, 47314), 'logging.warn', 'logging.warn', (['"""PDB file should not contain hydrogens!"""'], {}), "('PDB file should not contain hydrogens!')\n", (47272, 47314), False, 'import logging\n'), ((54635, 54646), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (54643, 54646), False, 'import os, sys, re, subprocess\n'), ((3454, 3472), 'numpy.asarray', 'np.asarray', (['coords'], {}), '(coords)\n', (3464, 3472), True, 'import numpy as np\n'), ((3542, 3560), 'rdkit.Chem.AllChem.RemoveHs', 'Chem.RemoveHs', (['mol'], {}), '(mol)\n', (3555, 3560), True, 'from rdkit.Chem import AllChem as Chem\n'), ((3999, 4046), 'logging.info', 'logging.info', (['"""RuntimeError: skipping molecule"""'], {}), "('RuntimeError: skipping molecule')\n", (4011, 4046), False, 'import logging\n'), ((8995, 9015), 'numpy.argsort', 'np.argsort', (['dm[a, :]'], {}), '(dm[a, :])\n', (9005, 9015), True, 'import numpy as np\n'), ((14333, 14354), 'rdkit.Chem.AllChem.SanitizeMol', 'Chem.SanitizeMol', (['mol'], {}), '(mol)\n', (14349, 14354), True, 'from rdkit.Chem import AllChem as Chem\n'), ((28159, 28182), 'rdkit.Chem.AllChem.RemoveHs', 'Chem.RemoveHs', (['mol_pred'], {}), '(mol_pred)\n', (28172, 28182), True, 'from rdkit.Chem import AllChem as Chem\n'), ((28199, 28233), 'rdkit.Chem.AllChem.Compute2DCoords', 'Chem.Compute2DCoords', (['mol_pred_noH'], {}), '(mol_pred_noH)\n', (28219, 28233), True, 'from rdkit.Chem import AllChem as Chem\n'), ((28250, 28275), 'rdkit.Chem.AllChem.Compute2DCoords', 'Chem.Compute2DCoords', (['mol'], {}), '(mol)\n', (28270, 28275), True, 'from rdkit.Chem import AllChem as Chem\n'), ((28650, 28673), 'rdkit.Chem.AllChem.RemoveHs', 'Chem.RemoveHs', (['mol_pred'], {}), '(mol_pred)\n', (28663, 28673), True, 'from rdkit.Chem import AllChem as Chem\n'), ((28690, 28724), 'rdkit.Chem.AllChem.Compute2DCoords', 'Chem.Compute2DCoords', (['mol_pred_noH'], {}), '(mol_pred_noH)\n', (28710, 28724), True, 'from rdkit.Chem import AllChem as Chem\n'), ((28741, 28766), 'rdkit.Chem.AllChem.Compute2DCoords', 'Chem.Compute2DCoords', (['mol'], {}), '(mol)\n', (28761, 28766), True, 'from rdkit.Chem import AllChem as Chem\n'), ((29738, 29785), 'numpy.where', 'np.where', (["(bond_orig.values != df['bond'].values)"], {}), "(bond_orig.values != df['bond'].values)\n", (29746, 29785), True, 'import numpy as np\n'), ((30344, 30395), 'numpy.array_equal', 'np.array_equal', (['bond_orig.values', "df['bond'].values"], {}), "(bond_orig.values, df['bond'].values)\n", (30358, 30395), True, 'import numpy as np\n'), ((33918, 33941), 'os.path.isfile', 'os.path.isfile', (['"""clf.p"""'], {}), "('clf.p')\n", (33932, 33941), False, 'import os, sys, re, subprocess\n'), ((33951, 34010), 'sys.stderr.write', 'sys.stderr.write', (['"""ERROR: Please train classifier first!\n"""'], {}), "('ERROR: Please train classifier first!\\n')\n", (33967, 34010), False, 'import os, sys, re, subprocess\n'), ((34019, 34030), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (34027, 34030), False, 'import os, sys, re, subprocess\n'), ((35955, 35975), 'numpy.max', 'np.max', (['grad'], {'axis': '(1)'}), '(grad, axis=1)\n', (35961, 35975), True, 'import numpy as np\n'), ((36186, 36208), 'numpy.where', 'np.where', (['(mask == True)'], {}), '(mask == True)\n', (36194, 36208), True, 'import numpy as np\n'), ((38904, 38967), 'logging.info', 'logging.info', (["('Could not generate dataframe from %s' % filename)"], {}), "('Could not generate dataframe from %s' % filename)\n", (38916, 38967), False, 'import logging\n'), ((41706, 41727), 'rdkit.Chem.AllChem.EmbedMolecule', 'Chem.EmbedMolecule', (['m'], {}), '(m)\n', (41724, 41727), True, 'from rdkit.Chem import AllChem as Chem\n'), ((41751, 41792), 'rdkit.Chem.AllChem.UFFOptimizeMolecule', 'Chem.UFFOptimizeMolecule', (['m'], {'maxIters': '(200)'}), '(m, maxIters=200)\n', (41775, 41792), True, 'from rdkit.Chem import AllChem as Chem\n'), ((42241, 42260), 'rdkit.Chem.AllChem.SanitizeMol', 'Chem.SanitizeMol', (['m'], {}), '(m)\n', (42257, 42260), True, 'from rdkit.Chem import AllChem as Chem\n'), ((55953, 55959), 'time.time', 'time', ([], {}), '()\n', (55957, 55959), False, 'from time import time\n'), ((6370, 6561), 'logging.warn', 'logging.warn', (["('Unreasonable short X-X bond (r<0.75): %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d'\n % (bnd_at1, i + 1, bnd_at2, j + 1, bnd_dist, bnd_type, sourcename, pos))"], {}), "(\n 'Unreasonable short X-X bond (r<0.75): %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d'\n % (bnd_at1, i + 1, bnd_at2, j + 1, bnd_dist, bnd_type, sourcename, pos))\n", (6382, 6561), False, 'import logging\n'), ((14793, 14818), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (14816, 14818), True, 'import numpy as np\n'), ((16526, 16537), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16535, 16537), False, 'import os, sys, re, subprocess\n'), ((39969, 39996), 'numpy.log', 'np.log', (['(yprob[:, i] + 1e-15)'], {}), '(yprob[:, i] + 1e-15)\n', (39975, 39996), True, 'import numpy as np\n'), ((41530, 41549), 'rdkit.Chem.AllChem.MolToSmiles', 'Chem.MolToSmiles', (['m'], {}), '(m)\n', (41546, 41549), True, 'from rdkit.Chem import AllChem as Chem\n'), ((41841, 41862), 'rdkit.Chem.AllChem.EmbedMolecule', 'Chem.EmbedMolecule', (['m'], {}), '(m)\n', (41859, 41862), True, 'from rdkit.Chem import AllChem as Chem\n'), ((41886, 41928), 'rdkit.Chem.AllChem.MMFFOptimizeMolecule', 'Chem.MMFFOptimizeMolecule', (['m'], {'maxIters': '(200)'}), '(m, maxIters=200)\n', (41911, 41928), True, 'from rdkit.Chem import AllChem as Chem\n'), ((42303, 42319), 'rdkit.Chem.AllChem.RemoveHs', 'Chem.RemoveHs', (['m'], {}), '(m)\n', (42316, 42319), True, 'from rdkit.Chem import AllChem as Chem\n'), ((3900, 3921), 'rdkit.Chem.AllChem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (3916, 3921), True, 'from rdkit.Chem import AllChem as Chem\n'), ((6643, 6833), 'logging.warn', 'logging.warn', (["('Unreasonable short X-X bond (r<1.1): %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d'\n % (bnd_at1, i + 1, bnd_at2, j + 1, bnd_dist, bnd_type, sourcename, pos))"], {}), "(\n 'Unreasonable short X-X bond (r<1.1): %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d'\n % (bnd_at1, i + 1, bnd_at2, j + 1, bnd_dist, bnd_type, sourcename, pos))\n", (6655, 6833), False, 'import logging\n'), ((12766, 12787), 'rdkit.Chem.AllChem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (12782, 12787), True, 'from rdkit.Chem import AllChem as Chem\n'), ((37574, 37600), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (37590, 37600), False, 'import os, sys, re, subprocess\n'), ((6990, 7200), 'logging.warn', 'logging.warn', (["('%s unreasonable short X-H distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d'\n % (selstr, bnd_at1, i + 1, bnd_at2, j + 1, bnd_dist, bnd_type,\n sourcename, pos))"], {}), "(\n '%s unreasonable short X-H distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d'\n % (selstr, bnd_at1, i + 1, bnd_at2, j + 1, bnd_dist, bnd_type,\n sourcename, pos))\n", (7002, 7200), False, 'import logging\n'), ((29604, 29627), 'rdkit.Chem.AllChem.MolToMolBlock', 'Chem.MolToMolBlock', (['mol'], {}), '(mol)\n', (29622, 29627), True, 'from rdkit.Chem import AllChem as Chem\n'), ((30757, 30780), 'rdkit.Chem.AllChem.MolToMolBlock', 'Chem.MolToMolBlock', (['mol'], {}), '(mol)\n', (30775, 30780), True, 'from rdkit.Chem import AllChem as Chem\n'), ((42000, 42012), 'rdkit.Chem.AllChem.ETKDG', 'Chem.ETKDG', ([], {}), '()\n', (42010, 42012), True, 'from rdkit.Chem import AllChem as Chem\n'), ((42079, 42091), 'rdkit.Chem.AllChem.ETKDG', 'Chem.ETKDG', ([], {}), '()\n', (42089, 42091), True, 'from rdkit.Chem import AllChem as Chem\n'), ((7366, 7576), 'logging.warn', 'logging.warn', (["('%s unreasonable short C-C distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d'\n % (selstr, bnd_at1, i + 1, bnd_at2, j + 1, bnd_dist, bnd_type,\n sourcename, pos))"], {}), "(\n '%s unreasonable short C-C distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d'\n % (selstr, bnd_at1, i + 1, bnd_at2, j + 1, bnd_dist, bnd_type,\n sourcename, pos))\n", (7378, 7576), False, 'import logging\n'), ((30048, 30069), 'rdkit.Chem.AllChem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (30064, 30069), True, 'from rdkit.Chem import AllChem as Chem\n'), ((42530, 42546), 'rdkit.Chem.AllChem.RemoveHs', 'Chem.RemoveHs', (['m'], {}), '(m)\n', (42543, 42546), True, 'from rdkit.Chem import AllChem as Chem\n'), ((42843, 42859), 'rdkit.Chem.AllChem.RemoveHs', 'Chem.RemoveHs', (['m'], {}), '(m)\n', (42856, 42859), True, 'from rdkit.Chem import AllChem as Chem\n'), ((7737, 7943), 'logging.warn', 'logging.warn', (["('%s unreasonable short distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d'\n % (selstr, bnd_at1, i + 1, bnd_at2, j + 1, bnd_dist, bnd_type,\n sourcename, pos))"], {}), "(\n '%s unreasonable short distance w/o bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d'\n % (selstr, bnd_at1, i + 1, bnd_at2, j + 1, bnd_dist, bnd_type,\n sourcename, pos))\n", (7749, 7943), False, 'import logging\n'), ((42694, 42710), 'rdkit.Chem.AllChem.RemoveHs', 'Chem.RemoveHs', (['m'], {}), '(m)\n', (42707, 42710), True, 'from rdkit.Chem import AllChem as Chem\n'), ((8142, 8338), 'logging.warn', 'logging.warn', (["('%s unreasonable long C-C bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d'\n % (selstr, bnd_at1, i + 1, bnd_at2, j + 1, bnd_dist, bnd_type,\n sourcename, pos))"], {}), "(\n '%s unreasonable long C-C bond: %r(%d) %r(%d) %4.2f type: %d from source: %s at pos: %d'\n % (selstr, bnd_at1, i + 1, bnd_at2, j + 1, bnd_dist, bnd_type,\n sourcename, pos))\n", (8154, 8338), False, 'import logging\n')] |
import collections.abc
import enum
import os
from typing import (
TYPE_CHECKING,
Annotated,
Any,
AsyncGenerator,
Optional,
Union,
)
import aiohttp
import inflection
import numpy as np
import pandas as pd
import pydantic
import structlog
import uplink
import uplink.converters
from pandera.decorators import check_io, check_output
from pandera.errors import SchemaError
from pandera.model import SchemaModel
from pandera.model_components import Field
from pandera.typing import Series, String
from wraeblast import constants, errors
from wraeblast.filtering.elements import ItemFilter
from wraeblast.filtering.parsers.extended import env
if TYPE_CHECKING:
import uplink.commands
from wraeblast.filtering.parsers.extended import config
logger = structlog.get_logger()
QcutItemsType = list[tuple[pd.Interval, pd.DataFrame]]
SingleInfluencedQcutItemsType = list[
tuple[tuple[tuple[str, ...], pd.Interval], pd.DataFrame]
]
InsightsResultType = tuple[
Union["CurrencyType", "ItemType"],
pd.DataFrame,
]
InsightsType = Union["CurrencyType", "ItemType"]
quantiles = {
"quartile": 4,
"quintile": 5,
"decile": 10,
"percentile": 100,
}
shard_names_to_orb_names = {
"Transmutation Shard": "Orb of Transmutation",
"Alteration Shard": "Orb of Alteration",
"Alchemy Shard": "Orb of Alteration",
"Annulment Shard": "Orb of Annulment",
"Binding Shard": "Orb of Binding",
"Horizon Shard": "Orb of Horizons",
"Harbinger's Shard": "Harbinger's Orb",
"Engineer's Shard": "Engineer's Orb",
"Ancient Shard": "Ancient Orb",
"Chaos Shard": "Chaos Orb",
"Mirror Shard": "Mirror of Kalandra",
"Exalted Shard": "Exalted Orb",
"Regal Shard": "Regal Orb",
}
_cache = pd.HDFStore(os.getenv("WRAEBLAST_CACHE", "./.wbinsights.h5"))
class InflectedEnumMixin(enum.Enum):
@property
def underscored_value(self) -> str:
return inflection.underscore(self.value)
@property
def pluralized_underscored_value(self) -> str:
return inflection.pluralize(self.underscored_value)
class CurrencyType(InflectedEnumMixin, enum.Enum):
CURRENCY = "Currency"
FRAGMENT = "Fragment"
class ItemType(InflectedEnumMixin, enum.Enum):
ARTIFACT = "Artifact"
BASE_TYPE = "BaseType"
BEAST = "Beast"
BLIGHTED_MAP = "BlightedMap"
CLUSTER_JEWEL = "ClusterJewel"
DELIRIUM_ORB = "DeliriumOrb"
DIVINATION_CARD = "DivinationCard"
ESSENCE = "Essence"
FOSSIL = "Fossil"
HELMET_ENCHANT = "HelmetEnchant"
INCUBATOR = "Incubator"
INVITATION = "Invitation"
MAP = "Map"
OIL = "Oil"
PROPHECY = "Prophecy"
RESONATOR = "Resonator"
SCARAB = "Scarab"
SKILL_GEM = "SkillGem"
UNIQUE_ACCESSORY = "UniqueAccessory"
UNIQUE_ARMOUR = "UniqueArmour"
UNIQUE_FLASK = "UniqueFlask"
UNIQUE_JEWEL = "UniqueJewel"
UNIQUE_MAP = "UniqueMap"
UNIQUE_WEAPON = "UniqueWeapon"
VIAL = "Vial"
WATCHSTONE = "Watchstone"
@property
def key_name(self) -> str:
return inflection.pluralize(inflection.underscore(self.value))
@uplink.response_handler
def raise_for_status(response):
"""Checks whether or not the response was successful."""
if 200 <= response.status_code < 300:
return response
raise errors.UnsuccessfulInsightsRequest(
f"error {response.status_code}: {response.url}"
)
def get_all_insights_types() -> list[InsightsType]:
return [*CurrencyType, *ItemType]
def get_display_value(
chaos_value: float,
exalted_exchange_value: int,
round_down_by: int = 1,
precision: int = 0,
) -> str:
if chaos_value < exalted_exchange_value:
if round_down_by:
chaos_value = env.round_down(chaos_value, round_down_by)
return f"{chaos_value}c"
else:
return f"{chaos_value / exalted_exchange_value:.{precision}f}ex"
def get_insights_type_by_value(s: str) -> InsightsType:
for t in get_all_insights_types():
if t.value == s:
return t
raise KeyError(s)
def get_quantile_tuple(q: str) -> tuple[str, int]:
if q.startswith("D"):
return ("decile", int(q[1:]))
elif q.startswith("P"):
return ("percentile", int(q[1:]))
elif q.startswith("QU"):
return ("quintile", int(q[2:]))
elif q.startswith("Q"):
return ("quartile", int(q[1:]))
else:
raise RuntimeError(f"invalid quantile: {q}")
async def get_economy_overview(
league: str,
client: "NinjaConsumer",
type_: InsightsType,
) -> pd.DataFrame:
"""Request an economy overview from poe.ninja."""
if type_ in CurrencyType:
meth = client.get_currency_overview
elif type_ in ItemType:
meth = client.get_item_overview
else:
raise RuntimeError()
logger.info(
"overview.get",
client=".".join([client.__module__, client.__class__.__name__]),
type=type_.value,
)
return await meth(league=league, type=type_.value) # type: ignore
async def get_dataframes(
league: str,
types: list[InsightsType],
) -> AsyncGenerator[InsightsResultType, None]:
"""Request all economy overviews from poe.ninja."""
logger.info("all_insights.get", league=league)
session = aiohttp.ClientSession()
client = uplink.AiohttpClient(session=session)
ninja = NinjaConsumer(
base_url=NinjaConsumer.default_base_url,
client=client,
)
for t in types:
overview = await get_economy_overview(
league=league,
client=ninja,
type_=t,
)
yield (t, overview)
await session.close()
async def initialize_insights_cache(
league: str,
cache: Optional[pd.HDFStore] = None,
no_sync: bool = False,
) -> pd.HDFStore:
"""Fetch and cache economy insights as needed."""
if cache is None:
cache = _cache
# log = logger.bind(league=league, cache_dir=cache.directory)
log = logger.bind(league=league)
log.info("cache.initialize", league=league)
missing_dataframes = []
for t in get_all_insights_types():
try:
df = cache.get(f"i_{t.value}")
log.debug("cache.hit", type=t.value)
except KeyError:
log.debug("cache.miss", type=t.value)
missing_dataframes.append(t)
if missing_dataframes and no_sync:
raise errors.WraeblastError("insights cache is incomplete")
async for t, df in get_dataframes(
league=league,
types=missing_dataframes,
):
log.info(
"overview.response",
lines=df.shape[0],
type=t.value,
)
cache.put(f"i_{t.value}", df, format="table")
return cache
async def initialize_filter_context(
initialize_cache: bool = True,
league: Optional[str] = None,
cache: Optional[pd.HDFStore] = None,
no_sync: bool = False,
) -> "ItemFilterContext":
"""Create an ``ItemFilterContext`` from cached economy data."""
if initialize_cache:
if league is None:
raise RuntimeError("league must be provided if initializing cache")
cache = await initialize_insights_cache(
league=league,
cache=cache,
no_sync=no_sync,
)
elif cache is None:
cache = _cache
else:
raise RuntimeError("cache not provided")
economy_data = {}
for t in get_all_insights_types():
overview = cache.get(f"i_{t.value}")
economy_data[t.pluralized_underscored_value] = overview
return ItemFilterContext(data=economy_data)
def _parse_name_and_details_id(
name: str, details_id: str
) -> tuple[Optional[int], tuple[constants.Influence, ...]]:
tokens = details_id.split("-")
ilvl_and_influences = tokens[name.count(" ") + name.count("-") + 1 :]
try:
return (
int(ilvl_and_influences[0]),
tuple(
constants.Influence(i.capitalize())
for i in ilvl_and_influences[1:]
),
)
except (IndexError, ValueError):
return (None, tuple())
def get_quantile_thresholds(df: pd.DataFrame) -> list[dict[str, float]]:
groups = df.groupby(list(quantiles.keys()), as_index=False)
return groups.agg({"chaos_value": "min"}).to_dict( # type: ignore
"records"
)
class NinjaCurrencyOverviewSchema(SchemaModel):
currency_type_name: Series[String]
chaos_equivalent: Series[float]
details_id: Series[String]
pay_id: Series[float] = Field(alias="pay.id", nullable=True)
pay_league_id: Series[float] = Field(alias="pay.league_id", nullable=True)
pay_pay_currency_id: Series[float] = Field(
alias="pay.pay_currency_id", nullable=True
)
pay_get_currency_id: Series[float] = Field(
alias="pay.get_currency_id", nullable=True
)
pay_sample_time_utc: Series[
Annotated[pd.DatetimeTZDtype, "ns", "utc"]
] = Field(alias="pay.sample_time_utc", coerce=True, nullable=True)
pay_count: Series[float] = Field(alias="pay.count", nullable=True)
pay_value: Series[float] = Field(alias="pay.value", nullable=True)
pay_data_point_count: Series[float] = Field(
alias="pay.data_point_count", ge=0, coerce=True, nullable=True
)
pay_includes_secondary: Series[bool] = Field(
alias="pay.includes_secondary", coerce=True, nullable=True
)
pay_listing_count: Series[float] = Field(
alias="pay.listing_count", coerce=True, nullable=True
)
class NinjaItemOverviewSchema(SchemaModel):
id: Series[int]
name: Series[String]
item_class: Series[int]
flavour_text: Optional[Series[String]]
chaos_value: Series[float]
exalted_value: Series[float]
count: Series[int]
details_id: Series[String]
# listing_count: Series[int]
icon: Optional[Series[String]] = Field(nullable=True)
base_type: Optional[Series[String]] = Field(nullable=True)
gem_level: Optional[Series[float]] = Field(nullable=True)
gem_quality: Optional[Series[float]] = Field(nullable=True)
item_level: Optional[Series[float]] = Field(nullable=True)
level_required: Optional[Series[float]] = Field(nullable=True)
links: Optional[Series[float]] = Field(nullable=True)
map_tier: Optional[Series[float]] = Field(nullable=True)
stack_size: Optional[Series[float]] = Field(nullable=True)
variant: Optional[Series[String]] = Field(nullable=True)
class Config:
coerce = True
class ExtendedNinjaOverviewSchema(SchemaModel):
item_name: Series[String]
chaos_value: Series[float]
alt_quality: Series[String]
is_alt_quality: Series[bool]
chaos_value: Series[float]
chaos_value_log: Series[float]
quartile: Series[int]
quintile: Series[int]
decile: Series[int]
percentile: Series[int]
base_type: Optional[Series[String]] = Field(nullable=True)
gem_level: Optional[Series[float]] = Field(nullable=True)
gem_quality: Optional[Series[float]] = Field(nullable=True)
influences: Optional[Series[String]] = Field(nullable=True)
level_required: Optional[Series[float]] = Field(nullable=True)
links: Optional[Series[float]] = Field(nullable=True)
map_tier: Optional[Series[float]] = Field(nullable=True)
num_influences: Optional[Series[int]] = Field(nullable=True)
orb_name: Optional[Series[String]] = Field(nullable=True)
stack_size: Optional[Series[float]] = Field(nullable=True)
uber_blight: Optional[Series[bool]] = Field(nullable=True)
variant: Optional[Series[String]] = Field(nullable=True)
class PostProcessedNinjaOverviewSchema(ExtendedNinjaOverviewSchema):
exalted_value: Series[float]
display_value: Series[String]
@check_output(ExtendedNinjaOverviewSchema)
def transform_ninja_df(df: pd.DataFrame) -> pd.DataFrame:
currency_overview_schema = NinjaCurrencyOverviewSchema.to_schema()
item_overview_schema = NinjaItemOverviewSchema.to_schema()
is_currency_overview = False
df = df.fillna(0)
try:
df = currency_overview_schema.validate(df)
is_currency_overview = True
except SchemaError as e:
df = item_overview_schema.validate(df)
if is_currency_overview:
try:
shards = []
for shard_name, orb_name in shard_names_to_orb_names.items():
if not df[df["currency_type_name"] == shard_name].empty:
continue
orb_value = (
df[df["currency_type_name"] == orb_name][
"chaos_equivalent"
].iloc[0]
if orb_name != "Chaos Orb"
else 1
)
shards.append(
{
"currency_type_name": shard_name,
"chaos_equivalent": orb_value / 20,
}
)
df = df.append(
shards,
verify_integrity=True,
sort=True,
ignore_index=True,
)
except IndexError:
pass
if "sparkline.data" in df.columns:
df = df.loc[df["sparkline.data"].str.len() != 0]
output = pd.DataFrame()
output["item_name"] = (
df["currency_type_name"]
if "currency_type_name" in df.columns
else df["name"]
)
if not is_currency_overview:
output["scourged"] = output["item_name"].map(
lambda name: name.startswith("Scourged")
)
for label in ("currency_type_name", "skill_gem_name"):
if label in df.columns:
output["item_name"] = df[label]
if "map_tier" in df.columns:
output["uber_blight"] = df["name"].map(
lambda name: name.startswith("Blight-ravaged")
)
if (
"base_type" in df.columns
and not df[
df["base_type"].str.contains("Cluster Jewel", na=False)
].empty
):
try:
output["cluster_jewel_enchantment"] = df["name"].map(
lambda name: constants.get_cluster_jewel_passive(name).value
)
output["cluster_jewel_passives_min"] = df["trade_info"].apply(
lambda trade_info: [
ti
for ti in trade_info
if ti["mod"] == "enchant.stat_3086156145"
][0]["min"]
)
output["cluster_jewel_passives_max"] = df["trade_info"].apply(
lambda trade_info: [
ti
for ti in trade_info
if ti["mod"] == "enchant.stat_3086156145"
][0]["max"]
)
output["cluster_jewel_passives"] = output[
"cluster_jewel_passives_min"
]
except (KeyError, IndexError) as e:
# TODO: Find a way to filter out unique cluster jewels better
pass
output["alt_quality"] = ""
output["is_alt_quality"] = False
alt_filter = (
output["item_name"].str.startswith("Anomalous")
| output["item_name"].str.startswith("Divergent")
| output["item_name"].str.startswith("Phantasmal")
)
if not output[alt_filter].empty:
output.loc[alt_filter, "is_alt_quality"] = True
output.loc[alt_filter, "alt_quality"] = output["item_name"].apply(
lambda s: s[: s.find(" ")],
)
output.loc[alt_filter, "item_name"] = output["item_name"].apply(
lambda s: s[s.find(" ") + 1 :],
)
if "name" in df.columns and "details_id" in df.columns:
output["influences"] = df.apply(
lambda r: "/".join(
i.value
for i in _parse_name_and_details_id(
str(r["name"]),
str(r["details_id"]),
)[1]
),
axis=1,
)
output["num_influences"] = (
df["variant"].str.count("/").fillna(0).astype(int)
if "variant" in df.columns
else 0
)
for column in (
"base_type",
"level_required",
"links",
"gem_level",
"gem_quality",
"map_tier",
"stack_size",
):
if column in df.columns:
output[column] = df[column]
if column == "links":
output[column] = output[column].fillna(0)
output["chaos_value"] = (
df["chaos_equivalent"]
if "chaos_equivalent" in df.columns
else df["chaos_value"]
)
# Normalized chaos values
# XXX: since log(0) is -inf, the min chaos value of the dataframe replaces
# rows with a chaos value of 0
min_chaos_value = output.loc[
output["chaos_value"] != 0, "chaos_value"
].min()
output["chaos_value"].replace(0, min_chaos_value, inplace=True) # type: ignore
output["chaos_value_log"] = np.log(output["chaos_value"])
# Pre-defined quantiles (quartiles, quintiles, percentiles)
for label, q in quantiles.items():
labels = None
if isinstance(q, (list, tuple)):
q, labels = q
output[label] = pd.qcut(
output["chaos_value"].rank(method="first", numeric_only=True),
q=q,
labels=False if labels is None else None,
precision=0,
duplicates="drop",
)
if labels is not None:
output[label] = output[label].map(dict(enumerate(labels)))
return output
def json_normalize(data: dict[Any, Any]) -> pd.DataFrame:
df = pd.json_normalize(data)
df.columns = [inflection.underscore(c) for c in df.columns]
return df
@uplink.install
class NinjaDataFrameFactory(uplink.converters.Factory):
def create_response_body_converter(self, cls, request_definition):
return lambda response: transform_ninja_df(
df=json_normalize(response.json()["lines"]),
)
uplink_retry = uplink.retry(
when=uplink.retry.when.status(503) | uplink.retry.when.raises(Exception),
stop=uplink.retry.stop.after_attempt(5)
| uplink.retry.stop.after_delay(10),
backoff=uplink.retry.backoff.jittered(multiplier=0.5),
)
@raise_for_status
@uplink_retry
@uplink.returns.json
@uplink.json
@uplink.get
def get_json() -> uplink.commands.RequestDefinitionBuilder:
"""Template for GET requests with JSON as both request and response."""
@raise_for_status
@uplink_retry
@uplink.get
def get_dataframe() -> uplink.commands.RequestDefinitionBuilder:
...
class NinjaConsumer(uplink.Consumer):
default_base_url = "https://poe.ninja/api/data/"
@uplink.ratelimit(calls=2, period=150)
@get_dataframe("CurrencyOverview") # type: ignore
def get_currency_overview(
self,
league: uplink.Query(type=str), # type: ignore
type: uplink.Query(type=CurrencyType), # type: ignore
) -> pd.DataFrame:
...
@uplink.ratelimit(calls=2, period=150)
@get_dataframe("CurrencyHistory") # type: ignore
def get_currency_history(
self,
league: uplink.Query(type=str), # type: ignore
type: uplink.Query(type=CurrencyType), # type: ignore
currency_id: uplink.Query("currencyId", type=int), # type: ignore
) -> pd.DataFrame:
...
@uplink.ratelimit(calls=30, period=150)
@get_dataframe("ItemOverview") # type: ignore
def get_item_overview(
self,
league: uplink.Query(type=str), # type: ignore
type: uplink.Query(type=ItemType), # type: ignore
) -> pd.DataFrame:
...
class ItemFilterContext(pydantic.BaseModel):
"""Entrypoint for accessing economy data from poe.ninja."""
data: dict[str, pd.DataFrame]
_exalted_value: int = pydantic.PrivateAttr(default=0)
_quantile_thresholds: dict[
str, list[dict[str, float]]
] = pydantic.PrivateAttr(default_factory=list)
class Config:
arbitrary_types_allowed = True
def __init__(self, **data) -> None:
super().__init__(**data)
self._exalted_value = self.data["currencies"][
self.data["currencies"].item_name == "Exalted Orb"
].iloc[0]["chaos_value"]
self._quantile_thresholds = {
k: get_quantile_thresholds(df) for k, df in self.data.items()
}
self.data = {
k: self._post_process(k, df) for k, df in self.data.items()
}
def get_display_value(
self,
chaos_value: float,
round_down_by: int = 1,
precision: int = 0,
):
return get_display_value(
chaos_value=chaos_value,
exalted_exchange_value=self._exalted_value,
round_down_by=round_down_by,
precision=precision,
)
def get_quantiles_for_threshold(
self,
key: str,
min_chaos_value: float,
) -> Optional[dict[str, float]]:
for threshold in self._quantile_thresholds[key]:
if threshold["chaos_value"] >= min_chaos_value:
return threshold
return self._quantile_thresholds[key][-1]
@pydantic.validator("data")
def data_must_contain_all_types(
cls, v: dict[str, pd.DataFrame]
) -> dict[str, pd.DataFrame]:
for type_ in [*CurrencyType, *ItemType]:
if type_.pluralized_underscored_value not in v:
raise ValueError(f"{type_} missing from filter context")
return v
@check_io(
df=ExtendedNinjaOverviewSchema.to_schema(),
out=PostProcessedNinjaOverviewSchema.to_schema(),
)
def _post_process(self, key: str, df: pd.DataFrame) -> pd.DataFrame:
df["exalted_value"] = df["chaos_value"].apply(
lambda x: x / self._exalted_value
)
df["display_value"] = df["chaos_value"].apply(
lambda x: get_display_value(
chaos_value=x,
exalted_exchange_value=self._exalted_value,
precision=2,
)
)
return df
| [
"numpy.log",
"uplink.retry.backoff.jittered",
"uplink.retry.stop.after_delay",
"uplink.Query",
"wraeblast.constants.get_cluster_jewel_passive",
"uplink.ratelimit",
"pandera.model_components.Field",
"pandas.DataFrame",
"inflection.pluralize",
"pydantic.PrivateAttr",
"pydantic.validator",
"wraeb... | [((782, 804), 'structlog.get_logger', 'structlog.get_logger', ([], {}), '()\n', (802, 804), False, 'import structlog\n'), ((11725, 11766), 'pandera.decorators.check_output', 'check_output', (['ExtendedNinjaOverviewSchema'], {}), '(ExtendedNinjaOverviewSchema)\n', (11737, 11766), False, 'from pandera.decorators import check_io, check_output\n'), ((1772, 1820), 'os.getenv', 'os.getenv', (['"""WRAEBLAST_CACHE"""', '"""./.wbinsights.h5"""'], {}), "('WRAEBLAST_CACHE', './.wbinsights.h5')\n", (1781, 1820), False, 'import os\n'), ((3301, 3389), 'wraeblast.errors.UnsuccessfulInsightsRequest', 'errors.UnsuccessfulInsightsRequest', (['f"""error {response.status_code}: {response.url}"""'], {}), "(\n f'error {response.status_code}: {response.url}')\n", (3335, 3389), False, 'from wraeblast import constants, errors\n'), ((5263, 5286), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (5284, 5286), False, 'import aiohttp\n'), ((5300, 5337), 'uplink.AiohttpClient', 'uplink.AiohttpClient', ([], {'session': 'session'}), '(session=session)\n', (5320, 5337), False, 'import uplink\n'), ((8525, 8561), 'pandera.model_components.Field', 'Field', ([], {'alias': '"""pay.id"""', 'nullable': '(True)'}), "(alias='pay.id', nullable=True)\n", (8530, 8561), False, 'from pandera.model_components import Field\n'), ((8597, 8640), 'pandera.model_components.Field', 'Field', ([], {'alias': '"""pay.league_id"""', 'nullable': '(True)'}), "(alias='pay.league_id', nullable=True)\n", (8602, 8640), False, 'from pandera.model_components import Field\n'), ((8682, 8731), 'pandera.model_components.Field', 'Field', ([], {'alias': '"""pay.pay_currency_id"""', 'nullable': '(True)'}), "(alias='pay.pay_currency_id', nullable=True)\n", (8687, 8731), False, 'from pandera.model_components import Field\n'), ((8787, 8836), 'pandera.model_components.Field', 'Field', ([], {'alias': '"""pay.get_currency_id"""', 'nullable': '(True)'}), "(alias='pay.get_currency_id', nullable=True)\n", (8792, 8836), False, 'from pandera.model_components import Field\n'), ((8943, 9005), 'pandera.model_components.Field', 'Field', ([], {'alias': '"""pay.sample_time_utc"""', 'coerce': '(True)', 'nullable': '(True)'}), "(alias='pay.sample_time_utc', coerce=True, nullable=True)\n", (8948, 9005), False, 'from pandera.model_components import Field\n'), ((9037, 9076), 'pandera.model_components.Field', 'Field', ([], {'alias': '"""pay.count"""', 'nullable': '(True)'}), "(alias='pay.count', nullable=True)\n", (9042, 9076), False, 'from pandera.model_components import Field\n'), ((9108, 9147), 'pandera.model_components.Field', 'Field', ([], {'alias': '"""pay.value"""', 'nullable': '(True)'}), "(alias='pay.value', nullable=True)\n", (9113, 9147), False, 'from pandera.model_components import Field\n'), ((9190, 9259), 'pandera.model_components.Field', 'Field', ([], {'alias': '"""pay.data_point_count"""', 'ge': '(0)', 'coerce': '(True)', 'nullable': '(True)'}), "(alias='pay.data_point_count', ge=0, coerce=True, nullable=True)\n", (9195, 9259), False, 'from pandera.model_components import Field\n'), ((9317, 9382), 'pandera.model_components.Field', 'Field', ([], {'alias': '"""pay.includes_secondary"""', 'coerce': '(True)', 'nullable': '(True)'}), "(alias='pay.includes_secondary', coerce=True, nullable=True)\n", (9322, 9382), False, 'from pandera.model_components import Field\n'), ((9436, 9496), 'pandera.model_components.Field', 'Field', ([], {'alias': '"""pay.listing_count"""', 'coerce': '(True)', 'nullable': '(True)'}), "(alias='pay.listing_count', coerce=True, nullable=True)\n", (9441, 9496), False, 'from pandera.model_components import Field\n'), ((9861, 9881), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (9866, 9881), False, 'from pandera.model_components import Field\n'), ((9924, 9944), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (9929, 9944), False, 'from pandera.model_components import Field\n'), ((9986, 10006), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (9991, 10006), False, 'from pandera.model_components import Field\n'), ((10050, 10070), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (10055, 10070), False, 'from pandera.model_components import Field\n'), ((10113, 10133), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (10118, 10133), False, 'from pandera.model_components import Field\n'), ((10180, 10200), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (10185, 10200), False, 'from pandera.model_components import Field\n'), ((10238, 10258), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (10243, 10258), False, 'from pandera.model_components import Field\n'), ((10299, 10319), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (10304, 10319), False, 'from pandera.model_components import Field\n'), ((10362, 10382), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (10367, 10382), False, 'from pandera.model_components import Field\n'), ((10423, 10443), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (10428, 10443), False, 'from pandera.model_components import Field\n'), ((10873, 10893), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (10878, 10893), False, 'from pandera.model_components import Field\n'), ((10935, 10955), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (10940, 10955), False, 'from pandera.model_components import Field\n'), ((10999, 11019), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (11004, 11019), False, 'from pandera.model_components import Field\n'), ((11063, 11083), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (11068, 11083), False, 'from pandera.model_components import Field\n'), ((11130, 11150), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (11135, 11150), False, 'from pandera.model_components import Field\n'), ((11188, 11208), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (11193, 11208), False, 'from pandera.model_components import Field\n'), ((11249, 11269), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (11254, 11269), False, 'from pandera.model_components import Field\n'), ((11314, 11334), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (11319, 11334), False, 'from pandera.model_components import Field\n'), ((11376, 11396), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (11381, 11396), False, 'from pandera.model_components import Field\n'), ((11439, 11459), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (11444, 11459), False, 'from pandera.model_components import Field\n'), ((11502, 11522), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (11507, 11522), False, 'from pandera.model_components import Field\n'), ((11563, 11583), 'pandera.model_components.Field', 'Field', ([], {'nullable': '(True)'}), '(nullable=True)\n', (11568, 11583), False, 'from pandera.model_components import Field\n'), ((13221, 13235), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (13233, 13235), True, 'import pandas as pd\n'), ((16920, 16949), 'numpy.log', 'np.log', (["output['chaos_value']"], {}), "(output['chaos_value'])\n", (16926, 16949), True, 'import numpy as np\n'), ((17577, 17600), 'pandas.json_normalize', 'pd.json_normalize', (['data'], {}), '(data)\n', (17594, 17600), True, 'import pandas as pd\n'), ((18632, 18669), 'uplink.ratelimit', 'uplink.ratelimit', ([], {'calls': '(2)', 'period': '(150)'}), '(calls=2, period=150)\n', (18648, 18669), False, 'import uplink\n'), ((18930, 18967), 'uplink.ratelimit', 'uplink.ratelimit', ([], {'calls': '(2)', 'period': '(150)'}), '(calls=2, period=150)\n', (18946, 18967), False, 'import uplink\n'), ((19301, 19339), 'uplink.ratelimit', 'uplink.ratelimit', ([], {'calls': '(30)', 'period': '(150)'}), '(calls=30, period=150)\n', (19317, 19339), False, 'import uplink\n'), ((19754, 19785), 'pydantic.PrivateAttr', 'pydantic.PrivateAttr', ([], {'default': '(0)'}), '(default=0)\n', (19774, 19785), False, 'import pydantic\n'), ((19862, 19904), 'pydantic.PrivateAttr', 'pydantic.PrivateAttr', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (19882, 19904), False, 'import pydantic\n'), ((21107, 21133), 'pydantic.validator', 'pydantic.validator', (['"""data"""'], {}), "('data')\n", (21125, 21133), False, 'import pydantic\n'), ((1930, 1963), 'inflection.underscore', 'inflection.underscore', (['self.value'], {}), '(self.value)\n', (1951, 1963), False, 'import inflection\n'), ((2045, 2089), 'inflection.pluralize', 'inflection.pluralize', (['self.underscored_value'], {}), '(self.underscored_value)\n', (2065, 2089), False, 'import inflection\n'), ((6381, 6434), 'wraeblast.errors.WraeblastError', 'errors.WraeblastError', (['"""insights cache is incomplete"""'], {}), "('insights cache is incomplete')\n", (6402, 6434), False, 'from wraeblast import constants, errors\n'), ((17619, 17643), 'inflection.underscore', 'inflection.underscore', (['c'], {}), '(c)\n', (17640, 17643), False, 'import inflection\n'), ((18149, 18194), 'uplink.retry.backoff.jittered', 'uplink.retry.backoff.jittered', ([], {'multiplier': '(0.5)'}), '(multiplier=0.5)\n', (18178, 18194), False, 'import uplink\n'), ((3069, 3102), 'inflection.underscore', 'inflection.underscore', (['self.value'], {}), '(self.value)\n', (3090, 3102), False, 'import inflection\n'), ((3732, 3774), 'wraeblast.filtering.parsers.extended.env.round_down', 'env.round_down', (['chaos_value', 'round_down_by'], {}), '(chaos_value, round_down_by)\n', (3746, 3774), False, 'from wraeblast.filtering.parsers.extended import env\n'), ((17983, 18012), 'uplink.retry.when.status', 'uplink.retry.when.status', (['(503)'], {}), '(503)\n', (18007, 18012), False, 'import uplink\n'), ((18015, 18050), 'uplink.retry.when.raises', 'uplink.retry.when.raises', (['Exception'], {}), '(Exception)\n', (18039, 18050), False, 'import uplink\n'), ((18061, 18095), 'uplink.retry.stop.after_attempt', 'uplink.retry.stop.after_attempt', (['(5)'], {}), '(5)\n', (18092, 18095), False, 'import uplink\n'), ((18102, 18135), 'uplink.retry.stop.after_delay', 'uplink.retry.stop.after_delay', (['(10)'], {}), '(10)\n', (18131, 18135), False, 'import uplink\n'), ((18786, 18808), 'uplink.Query', 'uplink.Query', ([], {'type': 'str'}), '(type=str)\n', (18798, 18808), False, 'import uplink\n'), ((18840, 18871), 'uplink.Query', 'uplink.Query', ([], {'type': 'CurrencyType'}), '(type=CurrencyType)\n', (18852, 18871), False, 'import uplink\n'), ((19082, 19104), 'uplink.Query', 'uplink.Query', ([], {'type': 'str'}), '(type=str)\n', (19094, 19104), False, 'import uplink\n'), ((19136, 19167), 'uplink.Query', 'uplink.Query', ([], {'type': 'CurrencyType'}), '(type=CurrencyType)\n', (19148, 19167), False, 'import uplink\n'), ((19206, 19242), 'uplink.Query', 'uplink.Query', (['"""currencyId"""'], {'type': 'int'}), "('currencyId', type=int)\n", (19218, 19242), False, 'import uplink\n'), ((19448, 19470), 'uplink.Query', 'uplink.Query', ([], {'type': 'str'}), '(type=str)\n', (19460, 19470), False, 'import uplink\n'), ((19502, 19529), 'uplink.Query', 'uplink.Query', ([], {'type': 'ItemType'}), '(type=ItemType)\n', (19514, 19529), False, 'import uplink\n'), ((14072, 14113), 'wraeblast.constants.get_cluster_jewel_passive', 'constants.get_cluster_jewel_passive', (['name'], {}), '(name)\n', (14107, 14113), False, 'from wraeblast import constants, errors\n')] |
from heapq import heappush ,heappop, heapify ,_heapify_max
from typing import Union
# Running scalar median
# Ack: https://medium.com/mind-boggling-algorithms/streaming-algorithms-running-median-of-an-array-using-two-heaps-cd1b61b3c034
def med(s,x:Union[float,int]=None)->dict:
""" Running median
:param x: scalar float
"""
if not s or s.get('low_heap') is None:
s = dict()
s['low_heap'] = []
s['high_heap'] = []
s['median'] = 0
if x < s['median']:
heappush(s['low_heap'], x)
_heapify_max(s['low_heap'])
else:
heappush(s['high_heap'], x)
if len(s['low_heap']) > len(s['high_heap'] ) +1:
heappush(s['high_heap'], heappop(s['low_heap']))
_heapify_max(s['low_heap'])
elif len(s['high_heap']) > len(s['low_heap']) + 1:
heappush(s['low_heap'], heappop(s['high_heap']))
_heapify_max(s['low_heap'])
if len(s['low_heap']) == len(s['high_heap']):
s['median'] = float(s['low_heap'][0] + s['high_heap'][0] ) /2.0
else:
s['median'] = float(s['low_heap'][0]) if len(s['low_heap']) > len(s['high_heap']) else float(s['high_heap'][0])
return s
if __name__=='__main__':
import numpy as np
import random
xs = np.random.randn(random.choice([1,5,10,1000]))
s = {}
for x in xs:
s = med(s=s,x=x)
assert s['median'] == np.median(xs)
| [
"numpy.median",
"random.choice",
"heapq._heapify_max",
"heapq.heappop",
"heapq.heappush"
] | [((516, 542), 'heapq.heappush', 'heappush', (["s['low_heap']", 'x'], {}), "(s['low_heap'], x)\n", (524, 542), False, 'from heapq import heappush, heappop, heapify, _heapify_max\n'), ((551, 578), 'heapq._heapify_max', '_heapify_max', (["s['low_heap']"], {}), "(s['low_heap'])\n", (563, 578), False, 'from heapq import heappush, heappop, heapify, _heapify_max\n'), ((597, 624), 'heapq.heappush', 'heappush', (["s['high_heap']", 'x'], {}), "(s['high_heap'], x)\n", (605, 624), False, 'from heapq import heappush, heappop, heapify, _heapify_max\n'), ((744, 771), 'heapq._heapify_max', '_heapify_max', (["s['low_heap']"], {}), "(s['low_heap'])\n", (756, 771), False, 'from heapq import heappush, heappop, heapify, _heapify_max\n'), ((1279, 1310), 'random.choice', 'random.choice', (['[1, 5, 10, 1000]'], {}), '([1, 5, 10, 1000])\n', (1292, 1310), False, 'import random\n'), ((1388, 1401), 'numpy.median', 'np.median', (['xs'], {}), '(xs)\n', (1397, 1401), True, 'import numpy as np\n'), ((712, 734), 'heapq.heappop', 'heappop', (["s['low_heap']"], {}), "(s['low_heap'])\n", (719, 734), False, 'from heapq import heappush, heappop, heapify, _heapify_max\n'), ((892, 919), 'heapq._heapify_max', '_heapify_max', (["s['low_heap']"], {}), "(s['low_heap'])\n", (904, 919), False, 'from heapq import heappush, heappop, heapify, _heapify_max\n'), ((859, 882), 'heapq.heappop', 'heappop', (["s['high_heap']"], {}), "(s['high_heap'])\n", (866, 882), False, 'from heapq import heappush, heappop, heapify, _heapify_max\n')] |
"""Unit tests for satellite_utils.py."""
import unittest
import numpy
from ml4tc.utils import satellite_utils
TOLERANCE = 1e-6
# The following constants are used to test _find_storm_center_px_space.
GRID_LATITUDES_DEG_N = numpy.array(
[-10, -8, -6, -4, -2, 0, 3, 6, 9, 12, 20], dtype=float
)
GRID_LONGITUDES_DEG_E = numpy.array(
[350, 355, 0, 5, 10, 15, 25, 35, 45, 55, 65, 75], dtype=float
)
FIRST_STORM_LATITUDE_DEG_N = 6.9
FIRST_STORM_LONGITUDE_DEG_E = 354.3
FIRST_STORM_ROW = 7.5
FIRST_STORM_COLUMN = 0.5
SECOND_STORM_LATITUDE_DEG_N = 16.7
SECOND_STORM_LONGITUDE_DEG_E = 26.3
SECOND_STORM_ROW = 9.5
SECOND_STORM_COLUMN = 6.5
THIRD_STORM_LATITUDE_DEG_N = 11.9
THIRD_STORM_LONGITUDE_DEG_E = 35.5
THIRD_STORM_ROW = 8.5
THIRD_STORM_COLUMN = 7.5
FOURTH_STORM_LATITUDE_DEG_N = -1.5
FOURTH_STORM_LONGITUDE_DEG_E = 51.4
FOURTH_STORM_ROW = 4.5
FOURTH_STORM_COLUMN = 8.5
FIFTH_STORM_LATITUDE_DEG_N = 15.1
FIFTH_STORM_LONGITUDE_DEG_E = 7.6
FIFTH_STORM_ROW = 9.5
FIFTH_STORM_COLUMN = 3.5
# The following constants are used to test _crop_image_around_storm_center.
UNCROPPED_DATA_MATRIX = numpy.array([
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
[25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36],
[37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48],
[49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60],
[61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72],
[73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84],
[85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96],
[97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108],
[109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120],
[121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132]
], dtype=float)
NUM_CROPPED_ROWS = 4
NUM_CROPPED_COLUMNS = 6
FIRST_CROPPED_DATA_MATRIX = numpy.array([
[73, 73, 73, 74, 75, 76],
[85, 85, 85, 86, 87, 88],
[97, 97, 97, 98, 99, 100],
[109, 109, 109, 110, 111, 112]
], dtype=float)
FIRST_CROPPED_LATITUDES_DEG_N = numpy.array([3, 6, 9, 12], dtype=float)
FIRST_CROPPED_LONGITUDES_DEG_E = numpy.array(
[340, 345, 350, 355, 0, 5], dtype=float
)
SECOND_CROPPED_DATA_MATRIX = numpy.array([
[101, 102, 103, 104, 105, 106],
[113, 114, 115, 116, 117, 118],
[125, 126, 127, 128, 129, 130],
[125, 126, 127, 128, 129, 130]
], dtype=float)
SECOND_CROPPED_LATITUDES_DEG_N = numpy.array([9, 12, 20, 28], dtype=float)
SECOND_CROPPED_LONGITUDES_DEG_E = numpy.array(
[10, 15, 25, 35, 45, 55], dtype=float
)
THIRD_CROPPED_DATA_MATRIX = numpy.array([
[90, 91, 92, 93, 94, 95],
[102, 103, 104, 105, 106, 107],
[114, 115, 116, 117, 118, 119],
[126, 127, 128, 129, 130, 131]
], dtype=float)
THIRD_CROPPED_LATITUDES_DEG_N = numpy.array([6, 9, 12, 20], dtype=float)
THIRD_CROPPED_LONGITUDES_DEG_E = numpy.array(
[15, 25, 35, 45, 55, 65], dtype=float
)
FOURTH_CROPPED_DATA_MATRIX = numpy.array([
[43, 44, 45, 46, 47, 48],
[55, 56, 57, 58, 59, 60],
[67, 68, 69, 70, 71, 72],
[79, 80, 81, 82, 83, 84]
], dtype=float)
FOURTH_CROPPED_LATITUDES_DEG_N = numpy.array([-4, -2, 0, 3], dtype=float)
FOURTH_CROPPED_LONGITUDES_DEG_E = numpy.array(
[25, 35, 45, 55, 65, 75], dtype=float
)
FIFTH_CROPPED_DATA_MATRIX = numpy.array([
[98, 99, 100, 101, 102, 103],
[110, 111, 112, 113, 114, 115],
[122, 123, 124, 125, 126, 127],
[122, 123, 124, 125, 126, 127]
], dtype=float)
FIFTH_CROPPED_LATITUDES_DEG_N = numpy.array([9, 12, 20, 28], dtype=float)
FIFTH_CROPPED_LONGITUDES_DEG_E = numpy.array(
[355, 0, 5, 10, 15, 25], dtype=float
)
# The following constants are used to test get_cyclone_id and parse_cyclone_id.
YEAR = 1998
BASIN_ID_STRING = 'AL'
CYCLONE_NUMBER = 5
CYCLONE_ID_STRING = '1998AL05'
class SatelliteUtilsTests(unittest.TestCase):
"""Each method is a unit test for satellite_utils.py."""
def test_find_storm_center_px_space_first(self):
"""Ensures correct output from _find_storm_center_px_space.
In this case, using first storm center.
"""
this_row, this_column = satellite_utils._find_storm_center_px_space(
storm_latitude_deg_n=FIRST_STORM_LATITUDE_DEG_N,
storm_longitude_deg_e=FIRST_STORM_LONGITUDE_DEG_E,
grid_latitudes_deg_n=GRID_LATITUDES_DEG_N + 0.,
grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.
)
self.assertTrue(this_row == FIRST_STORM_ROW)
self.assertTrue(this_column == FIRST_STORM_COLUMN)
def test_find_storm_center_px_space_second(self):
"""Ensures correct output from _find_storm_center_px_space.
In this case, using second storm center.
"""
this_row, this_column = satellite_utils._find_storm_center_px_space(
storm_latitude_deg_n=SECOND_STORM_LATITUDE_DEG_N,
storm_longitude_deg_e=SECOND_STORM_LONGITUDE_DEG_E,
grid_latitudes_deg_n=GRID_LATITUDES_DEG_N + 0.,
grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.
)
self.assertTrue(this_row == SECOND_STORM_ROW)
self.assertTrue(this_column == SECOND_STORM_COLUMN)
def test_find_storm_center_px_space_third(self):
"""Ensures correct output from _find_storm_center_px_space.
In this case, using third storm center.
"""
this_row, this_column = satellite_utils._find_storm_center_px_space(
storm_latitude_deg_n=THIRD_STORM_LATITUDE_DEG_N,
storm_longitude_deg_e=THIRD_STORM_LONGITUDE_DEG_E,
grid_latitudes_deg_n=GRID_LATITUDES_DEG_N + 0.,
grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.
)
self.assertTrue(this_row == THIRD_STORM_ROW)
self.assertTrue(this_column == THIRD_STORM_COLUMN)
def test_find_storm_center_px_space_fourth(self):
"""Ensures correct output from _find_storm_center_px_space.
In this case, using fourth storm center.
"""
this_row, this_column = satellite_utils._find_storm_center_px_space(
storm_latitude_deg_n=FOURTH_STORM_LATITUDE_DEG_N,
storm_longitude_deg_e=FOURTH_STORM_LONGITUDE_DEG_E,
grid_latitudes_deg_n=GRID_LATITUDES_DEG_N + 0.,
grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.
)
self.assertTrue(this_row == FOURTH_STORM_ROW)
self.assertTrue(this_column == FOURTH_STORM_COLUMN)
def test_find_storm_center_px_space_fifth(self):
"""Ensures correct output from _find_storm_center_px_space.
In this case, using fifth storm center.
"""
this_row, this_column = satellite_utils._find_storm_center_px_space(
storm_latitude_deg_n=FIFTH_STORM_LATITUDE_DEG_N,
storm_longitude_deg_e=FIFTH_STORM_LONGITUDE_DEG_E,
grid_latitudes_deg_n=GRID_LATITUDES_DEG_N + 0.,
grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.
)
self.assertTrue(this_row == FIFTH_STORM_ROW)
self.assertTrue(this_column == FIFTH_STORM_COLUMN)
def test_crop_image_around_storm_center_first(self):
"""Ensures correct output from _crop_image_around_storm_center.
In this case, using first storm center.
"""
(
this_data_matrix, these_latitudes_deg_n, these_longitudes_deg_e
) = satellite_utils._crop_image_around_storm_center(
data_matrix=UNCROPPED_DATA_MATRIX + 0.,
grid_latitudes_deg_n=GRID_LATITUDES_DEG_N + 0.,
grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.,
storm_row=FIRST_STORM_ROW, storm_column=FIRST_STORM_COLUMN,
num_cropped_rows=NUM_CROPPED_ROWS,
num_cropped_columns=NUM_CROPPED_COLUMNS
)
self.assertTrue(numpy.allclose(
this_data_matrix, FIRST_CROPPED_DATA_MATRIX, atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
these_latitudes_deg_n, FIRST_CROPPED_LATITUDES_DEG_N, atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
these_longitudes_deg_e, FIRST_CROPPED_LONGITUDES_DEG_E,
atol=TOLERANCE
))
def test_crop_image_around_storm_center_second(self):
"""Ensures correct output from _crop_image_around_storm_center.
In this case, using second storm center.
"""
(
this_data_matrix, these_latitudes_deg_n, these_longitudes_deg_e
) = satellite_utils._crop_image_around_storm_center(
data_matrix=UNCROPPED_DATA_MATRIX + 0.,
grid_latitudes_deg_n=GRID_LATITUDES_DEG_N + 0.,
grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.,
storm_row=SECOND_STORM_ROW, storm_column=SECOND_STORM_COLUMN,
num_cropped_rows=NUM_CROPPED_ROWS,
num_cropped_columns=NUM_CROPPED_COLUMNS
)
self.assertTrue(numpy.allclose(
this_data_matrix, SECOND_CROPPED_DATA_MATRIX, atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
these_latitudes_deg_n, SECOND_CROPPED_LATITUDES_DEG_N,
atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
these_longitudes_deg_e, SECOND_CROPPED_LONGITUDES_DEG_E,
atol=TOLERANCE
))
def test_crop_image_around_storm_center_third(self):
"""Ensures correct output from _crop_image_around_storm_center.
In this case, using third storm center.
"""
(
this_data_matrix, these_latitudes_deg_n, these_longitudes_deg_e
) = satellite_utils._crop_image_around_storm_center(
data_matrix=UNCROPPED_DATA_MATRIX + 0.,
grid_latitudes_deg_n=GRID_LATITUDES_DEG_N + 0.,
grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.,
storm_row=THIRD_STORM_ROW, storm_column=THIRD_STORM_COLUMN,
num_cropped_rows=NUM_CROPPED_ROWS,
num_cropped_columns=NUM_CROPPED_COLUMNS
)
self.assertTrue(numpy.allclose(
this_data_matrix, THIRD_CROPPED_DATA_MATRIX, atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
these_latitudes_deg_n, THIRD_CROPPED_LATITUDES_DEG_N, atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
these_longitudes_deg_e, THIRD_CROPPED_LONGITUDES_DEG_E,
atol=TOLERANCE
))
def test_crop_image_around_storm_center_fourth(self):
"""Ensures correct output from _crop_image_around_storm_center.
In this case, using fourth storm center.
"""
(
this_data_matrix, these_latitudes_deg_n, these_longitudes_deg_e
) = satellite_utils._crop_image_around_storm_center(
data_matrix=UNCROPPED_DATA_MATRIX + 0.,
grid_latitudes_deg_n=GRID_LATITUDES_DEG_N + 0.,
grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.,
storm_row=FOURTH_STORM_ROW, storm_column=FOURTH_STORM_COLUMN,
num_cropped_rows=NUM_CROPPED_ROWS,
num_cropped_columns=NUM_CROPPED_COLUMNS
)
self.assertTrue(numpy.allclose(
this_data_matrix, FOURTH_CROPPED_DATA_MATRIX, atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
these_latitudes_deg_n, FOURTH_CROPPED_LATITUDES_DEG_N,
atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
these_longitudes_deg_e, FOURTH_CROPPED_LONGITUDES_DEG_E,
atol=TOLERANCE
))
def test_crop_image_around_storm_center_fifth(self):
"""Ensures correct output from _crop_image_around_storm_center.
In this case, using fifth storm center.
"""
(
this_data_matrix, these_latitudes_deg_n, these_longitudes_deg_e
) = satellite_utils._crop_image_around_storm_center(
data_matrix=UNCROPPED_DATA_MATRIX + 0.,
grid_latitudes_deg_n=GRID_LATITUDES_DEG_N + 0.,
grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.,
storm_row=FIFTH_STORM_ROW, storm_column=FIFTH_STORM_COLUMN,
num_cropped_rows=NUM_CROPPED_ROWS,
num_cropped_columns=NUM_CROPPED_COLUMNS
)
self.assertTrue(numpy.allclose(
this_data_matrix, FIFTH_CROPPED_DATA_MATRIX, atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
these_latitudes_deg_n, FIFTH_CROPPED_LATITUDES_DEG_N, atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
these_longitudes_deg_e, FIFTH_CROPPED_LONGITUDES_DEG_E,
atol=TOLERANCE
))
def test_get_cyclone_id(self):
"""Ensures correct output from get_cyclone_id."""
this_id_string = satellite_utils.get_cyclone_id(
year=YEAR, basin_id_string=BASIN_ID_STRING,
cyclone_number=CYCLONE_NUMBER
)
self.assertTrue(this_id_string == CYCLONE_ID_STRING)
def test_parse_cyclone_id(self):
"""Ensures correct output from parse_cyclone_id."""
this_year, this_basin_id_string, this_cyclone_number = (
satellite_utils.parse_cyclone_id(CYCLONE_ID_STRING)
)
self.assertTrue(this_year == YEAR)
self.assertTrue(this_basin_id_string == BASIN_ID_STRING)
self.assertTrue(this_cyclone_number == CYCLONE_NUMBER)
if __name__ == '__main__':
unittest.main()
| [
"numpy.allclose",
"ml4tc.utils.satellite_utils.get_cyclone_id",
"ml4tc.utils.satellite_utils._crop_image_around_storm_center",
"ml4tc.utils.satellite_utils._find_storm_center_px_space",
"numpy.array",
"ml4tc.utils.satellite_utils.parse_cyclone_id",
"unittest.main"
] | [((225, 292), 'numpy.array', 'numpy.array', (['[-10, -8, -6, -4, -2, 0, 3, 6, 9, 12, 20]'], {'dtype': 'float'}), '([-10, -8, -6, -4, -2, 0, 3, 6, 9, 12, 20], dtype=float)\n', (236, 292), False, 'import numpy\n'), ((323, 397), 'numpy.array', 'numpy.array', (['[350, 355, 0, 5, 10, 15, 25, 35, 45, 55, 65, 75]'], {'dtype': 'float'}), '([350, 355, 0, 5, 10, 15, 25, 35, 45, 55, 65, 75], dtype=float)\n', (334, 397), False, 'import numpy\n'), ((1097, 1732), 'numpy.array', 'numpy.array', (['[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13, 14, 15, 16, 17, 18, 19, 20, \n 21, 22, 23, 24], [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36], [37,\n 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48], [49, 50, 51, 52, 53, 54, \n 55, 56, 57, 58, 59, 60], [61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, \n 72], [73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84], [85, 86, 87, 88,\n 89, 90, 91, 92, 93, 94, 95, 96], [97, 98, 99, 100, 101, 102, 103, 104, \n 105, 106, 107, 108], [109, 110, 111, 112, 113, 114, 115, 116, 117, 118,\n 119, 120], [121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132]]'], {'dtype': 'float'}), '([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13, 14, 15, 16, 17, \n 18, 19, 20, 21, 22, 23, 24], [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, \n 35, 36], [37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48], [49, 50, 51,\n 52, 53, 54, 55, 56, 57, 58, 59, 60], [61, 62, 63, 64, 65, 66, 67, 68, \n 69, 70, 71, 72], [73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84], [85,\n 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96], [97, 98, 99, 100, 101, 102,\n 103, 104, 105, 106, 107, 108], [109, 110, 111, 112, 113, 114, 115, 116,\n 117, 118, 119, 120], [121, 122, 123, 124, 125, 126, 127, 128, 129, 130,\n 131, 132]], dtype=float)\n', (1108, 1732), False, 'import numpy\n'), ((1819, 1961), 'numpy.array', 'numpy.array', (['[[73, 73, 73, 74, 75, 76], [85, 85, 85, 86, 87, 88], [97, 97, 97, 98, 99, \n 100], [109, 109, 109, 110, 111, 112]]'], {'dtype': 'float'}), '([[73, 73, 73, 74, 75, 76], [85, 85, 85, 86, 87, 88], [97, 97, \n 97, 98, 99, 100], [109, 109, 109, 110, 111, 112]], dtype=float)\n', (1830, 1961), False, 'import numpy\n'), ((2008, 2047), 'numpy.array', 'numpy.array', (['[3, 6, 9, 12]'], {'dtype': 'float'}), '([3, 6, 9, 12], dtype=float)\n', (2019, 2047), False, 'import numpy\n'), ((2081, 2133), 'numpy.array', 'numpy.array', (['[340, 345, 350, 355, 0, 5]'], {'dtype': 'float'}), '([340, 345, 350, 355, 0, 5], dtype=float)\n', (2092, 2133), False, 'import numpy\n'), ((2170, 2333), 'numpy.array', 'numpy.array', (['[[101, 102, 103, 104, 105, 106], [113, 114, 115, 116, 117, 118], [125, 126,\n 127, 128, 129, 130], [125, 126, 127, 128, 129, 130]]'], {'dtype': 'float'}), '([[101, 102, 103, 104, 105, 106], [113, 114, 115, 116, 117, 118],\n [125, 126, 127, 128, 129, 130], [125, 126, 127, 128, 129, 130]], dtype=\n float)\n', (2181, 2333), False, 'import numpy\n'), ((2377, 2418), 'numpy.array', 'numpy.array', (['[9, 12, 20, 28]'], {'dtype': 'float'}), '([9, 12, 20, 28], dtype=float)\n', (2388, 2418), False, 'import numpy\n'), ((2453, 2503), 'numpy.array', 'numpy.array', (['[10, 15, 25, 35, 45, 55]'], {'dtype': 'float'}), '([10, 15, 25, 35, 45, 55], dtype=float)\n', (2464, 2503), False, 'import numpy\n'), ((2539, 2691), 'numpy.array', 'numpy.array', (['[[90, 91, 92, 93, 94, 95], [102, 103, 104, 105, 106, 107], [114, 115, 116, \n 117, 118, 119], [126, 127, 128, 129, 130, 131]]'], {'dtype': 'float'}), '([[90, 91, 92, 93, 94, 95], [102, 103, 104, 105, 106, 107], [114,\n 115, 116, 117, 118, 119], [126, 127, 128, 129, 130, 131]], dtype=float)\n', (2550, 2691), False, 'import numpy\n'), ((2739, 2779), 'numpy.array', 'numpy.array', (['[6, 9, 12, 20]'], {'dtype': 'float'}), '([6, 9, 12, 20], dtype=float)\n', (2750, 2779), False, 'import numpy\n'), ((2813, 2863), 'numpy.array', 'numpy.array', (['[15, 25, 35, 45, 55, 65]'], {'dtype': 'float'}), '([15, 25, 35, 45, 55, 65], dtype=float)\n', (2824, 2863), False, 'import numpy\n'), ((2900, 3035), 'numpy.array', 'numpy.array', (['[[43, 44, 45, 46, 47, 48], [55, 56, 57, 58, 59, 60], [67, 68, 69, 70, 71, \n 72], [79, 80, 81, 82, 83, 84]]'], {'dtype': 'float'}), '([[43, 44, 45, 46, 47, 48], [55, 56, 57, 58, 59, 60], [67, 68, \n 69, 70, 71, 72], [79, 80, 81, 82, 83, 84]], dtype=float)\n', (2911, 3035), False, 'import numpy\n'), ((3083, 3123), 'numpy.array', 'numpy.array', (['[-4, -2, 0, 3]'], {'dtype': 'float'}), '([-4, -2, 0, 3], dtype=float)\n', (3094, 3123), False, 'import numpy\n'), ((3158, 3208), 'numpy.array', 'numpy.array', (['[25, 35, 45, 55, 65, 75]'], {'dtype': 'float'}), '([25, 35, 45, 55, 65, 75], dtype=float)\n', (3169, 3208), False, 'import numpy\n'), ((3244, 3405), 'numpy.array', 'numpy.array', (['[[98, 99, 100, 101, 102, 103], [110, 111, 112, 113, 114, 115], [122, 123, \n 124, 125, 126, 127], [122, 123, 124, 125, 126, 127]]'], {'dtype': 'float'}), '([[98, 99, 100, 101, 102, 103], [110, 111, 112, 113, 114, 115],\n [122, 123, 124, 125, 126, 127], [122, 123, 124, 125, 126, 127]], dtype=\n float)\n', (3255, 3405), False, 'import numpy\n'), ((3448, 3489), 'numpy.array', 'numpy.array', (['[9, 12, 20, 28]'], {'dtype': 'float'}), '([9, 12, 20, 28], dtype=float)\n', (3459, 3489), False, 'import numpy\n'), ((3523, 3572), 'numpy.array', 'numpy.array', (['[355, 0, 5, 10, 15, 25]'], {'dtype': 'float'}), '([355, 0, 5, 10, 15, 25], dtype=float)\n', (3534, 3572), False, 'import numpy\n'), ((13294, 13309), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13307, 13309), False, 'import unittest\n'), ((4070, 4327), 'ml4tc.utils.satellite_utils._find_storm_center_px_space', 'satellite_utils._find_storm_center_px_space', ([], {'storm_latitude_deg_n': 'FIRST_STORM_LATITUDE_DEG_N', 'storm_longitude_deg_e': 'FIRST_STORM_LONGITUDE_DEG_E', 'grid_latitudes_deg_n': '(GRID_LATITUDES_DEG_N + 0.0)', 'grid_longitudes_deg_e': '(GRID_LONGITUDES_DEG_E + 0.0)'}), '(storm_latitude_deg_n=\n FIRST_STORM_LATITUDE_DEG_N, storm_longitude_deg_e=\n FIRST_STORM_LONGITUDE_DEG_E, grid_latitudes_deg_n=GRID_LATITUDES_DEG_N +\n 0.0, grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.0)\n', (4113, 4327), False, 'from ml4tc.utils import satellite_utils\n'), ((4701, 4960), 'ml4tc.utils.satellite_utils._find_storm_center_px_space', 'satellite_utils._find_storm_center_px_space', ([], {'storm_latitude_deg_n': 'SECOND_STORM_LATITUDE_DEG_N', 'storm_longitude_deg_e': 'SECOND_STORM_LONGITUDE_DEG_E', 'grid_latitudes_deg_n': '(GRID_LATITUDES_DEG_N + 0.0)', 'grid_longitudes_deg_e': '(GRID_LONGITUDES_DEG_E + 0.0)'}), '(storm_latitude_deg_n=\n SECOND_STORM_LATITUDE_DEG_N, storm_longitude_deg_e=\n SECOND_STORM_LONGITUDE_DEG_E, grid_latitudes_deg_n=GRID_LATITUDES_DEG_N +\n 0.0, grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.0)\n', (4744, 4960), False, 'from ml4tc.utils import satellite_utils\n'), ((5334, 5591), 'ml4tc.utils.satellite_utils._find_storm_center_px_space', 'satellite_utils._find_storm_center_px_space', ([], {'storm_latitude_deg_n': 'THIRD_STORM_LATITUDE_DEG_N', 'storm_longitude_deg_e': 'THIRD_STORM_LONGITUDE_DEG_E', 'grid_latitudes_deg_n': '(GRID_LATITUDES_DEG_N + 0.0)', 'grid_longitudes_deg_e': '(GRID_LONGITUDES_DEG_E + 0.0)'}), '(storm_latitude_deg_n=\n THIRD_STORM_LATITUDE_DEG_N, storm_longitude_deg_e=\n THIRD_STORM_LONGITUDE_DEG_E, grid_latitudes_deg_n=GRID_LATITUDES_DEG_N +\n 0.0, grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.0)\n', (5377, 5591), False, 'from ml4tc.utils import satellite_utils\n'), ((5965, 6224), 'ml4tc.utils.satellite_utils._find_storm_center_px_space', 'satellite_utils._find_storm_center_px_space', ([], {'storm_latitude_deg_n': 'FOURTH_STORM_LATITUDE_DEG_N', 'storm_longitude_deg_e': 'FOURTH_STORM_LONGITUDE_DEG_E', 'grid_latitudes_deg_n': '(GRID_LATITUDES_DEG_N + 0.0)', 'grid_longitudes_deg_e': '(GRID_LONGITUDES_DEG_E + 0.0)'}), '(storm_latitude_deg_n=\n FOURTH_STORM_LATITUDE_DEG_N, storm_longitude_deg_e=\n FOURTH_STORM_LONGITUDE_DEG_E, grid_latitudes_deg_n=GRID_LATITUDES_DEG_N +\n 0.0, grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.0)\n', (6008, 6224), False, 'from ml4tc.utils import satellite_utils\n'), ((6598, 6855), 'ml4tc.utils.satellite_utils._find_storm_center_px_space', 'satellite_utils._find_storm_center_px_space', ([], {'storm_latitude_deg_n': 'FIFTH_STORM_LATITUDE_DEG_N', 'storm_longitude_deg_e': 'FIFTH_STORM_LONGITUDE_DEG_E', 'grid_latitudes_deg_n': '(GRID_LATITUDES_DEG_N + 0.0)', 'grid_longitudes_deg_e': '(GRID_LONGITUDES_DEG_E + 0.0)'}), '(storm_latitude_deg_n=\n FIFTH_STORM_LATITUDE_DEG_N, storm_longitude_deg_e=\n FIFTH_STORM_LONGITUDE_DEG_E, grid_latitudes_deg_n=GRID_LATITUDES_DEG_N +\n 0.0, grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.0)\n', (6641, 6855), False, 'from ml4tc.utils import satellite_utils\n'), ((7301, 7644), 'ml4tc.utils.satellite_utils._crop_image_around_storm_center', 'satellite_utils._crop_image_around_storm_center', ([], {'data_matrix': '(UNCROPPED_DATA_MATRIX + 0.0)', 'grid_latitudes_deg_n': '(GRID_LATITUDES_DEG_N + 0.0)', 'grid_longitudes_deg_e': '(GRID_LONGITUDES_DEG_E + 0.0)', 'storm_row': 'FIRST_STORM_ROW', 'storm_column': 'FIRST_STORM_COLUMN', 'num_cropped_rows': 'NUM_CROPPED_ROWS', 'num_cropped_columns': 'NUM_CROPPED_COLUMNS'}), '(data_matrix=\n UNCROPPED_DATA_MATRIX + 0.0, grid_latitudes_deg_n=GRID_LATITUDES_DEG_N +\n 0.0, grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.0, storm_row=\n FIRST_STORM_ROW, storm_column=FIRST_STORM_COLUMN, num_cropped_rows=\n NUM_CROPPED_ROWS, num_cropped_columns=NUM_CROPPED_COLUMNS)\n', (7348, 7644), False, 'from ml4tc.utils import satellite_utils\n'), ((8399, 8744), 'ml4tc.utils.satellite_utils._crop_image_around_storm_center', 'satellite_utils._crop_image_around_storm_center', ([], {'data_matrix': '(UNCROPPED_DATA_MATRIX + 0.0)', 'grid_latitudes_deg_n': '(GRID_LATITUDES_DEG_N + 0.0)', 'grid_longitudes_deg_e': '(GRID_LONGITUDES_DEG_E + 0.0)', 'storm_row': 'SECOND_STORM_ROW', 'storm_column': 'SECOND_STORM_COLUMN', 'num_cropped_rows': 'NUM_CROPPED_ROWS', 'num_cropped_columns': 'NUM_CROPPED_COLUMNS'}), '(data_matrix=\n UNCROPPED_DATA_MATRIX + 0.0, grid_latitudes_deg_n=GRID_LATITUDES_DEG_N +\n 0.0, grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.0, storm_row=\n SECOND_STORM_ROW, storm_column=SECOND_STORM_COLUMN, num_cropped_rows=\n NUM_CROPPED_ROWS, num_cropped_columns=NUM_CROPPED_COLUMNS)\n', (8446, 8744), False, 'from ml4tc.utils import satellite_utils\n'), ((9512, 9855), 'ml4tc.utils.satellite_utils._crop_image_around_storm_center', 'satellite_utils._crop_image_around_storm_center', ([], {'data_matrix': '(UNCROPPED_DATA_MATRIX + 0.0)', 'grid_latitudes_deg_n': '(GRID_LATITUDES_DEG_N + 0.0)', 'grid_longitudes_deg_e': '(GRID_LONGITUDES_DEG_E + 0.0)', 'storm_row': 'THIRD_STORM_ROW', 'storm_column': 'THIRD_STORM_COLUMN', 'num_cropped_rows': 'NUM_CROPPED_ROWS', 'num_cropped_columns': 'NUM_CROPPED_COLUMNS'}), '(data_matrix=\n UNCROPPED_DATA_MATRIX + 0.0, grid_latitudes_deg_n=GRID_LATITUDES_DEG_N +\n 0.0, grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.0, storm_row=\n THIRD_STORM_ROW, storm_column=THIRD_STORM_COLUMN, num_cropped_rows=\n NUM_CROPPED_ROWS, num_cropped_columns=NUM_CROPPED_COLUMNS)\n', (9559, 9855), False, 'from ml4tc.utils import satellite_utils\n'), ((10610, 10955), 'ml4tc.utils.satellite_utils._crop_image_around_storm_center', 'satellite_utils._crop_image_around_storm_center', ([], {'data_matrix': '(UNCROPPED_DATA_MATRIX + 0.0)', 'grid_latitudes_deg_n': '(GRID_LATITUDES_DEG_N + 0.0)', 'grid_longitudes_deg_e': '(GRID_LONGITUDES_DEG_E + 0.0)', 'storm_row': 'FOURTH_STORM_ROW', 'storm_column': 'FOURTH_STORM_COLUMN', 'num_cropped_rows': 'NUM_CROPPED_ROWS', 'num_cropped_columns': 'NUM_CROPPED_COLUMNS'}), '(data_matrix=\n UNCROPPED_DATA_MATRIX + 0.0, grid_latitudes_deg_n=GRID_LATITUDES_DEG_N +\n 0.0, grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.0, storm_row=\n FOURTH_STORM_ROW, storm_column=FOURTH_STORM_COLUMN, num_cropped_rows=\n NUM_CROPPED_ROWS, num_cropped_columns=NUM_CROPPED_COLUMNS)\n', (10657, 10955), False, 'from ml4tc.utils import satellite_utils\n'), ((11723, 12066), 'ml4tc.utils.satellite_utils._crop_image_around_storm_center', 'satellite_utils._crop_image_around_storm_center', ([], {'data_matrix': '(UNCROPPED_DATA_MATRIX + 0.0)', 'grid_latitudes_deg_n': '(GRID_LATITUDES_DEG_N + 0.0)', 'grid_longitudes_deg_e': '(GRID_LONGITUDES_DEG_E + 0.0)', 'storm_row': 'FIFTH_STORM_ROW', 'storm_column': 'FIFTH_STORM_COLUMN', 'num_cropped_rows': 'NUM_CROPPED_ROWS', 'num_cropped_columns': 'NUM_CROPPED_COLUMNS'}), '(data_matrix=\n UNCROPPED_DATA_MATRIX + 0.0, grid_latitudes_deg_n=GRID_LATITUDES_DEG_N +\n 0.0, grid_longitudes_deg_e=GRID_LONGITUDES_DEG_E + 0.0, storm_row=\n FIFTH_STORM_ROW, storm_column=FIFTH_STORM_COLUMN, num_cropped_rows=\n NUM_CROPPED_ROWS, num_cropped_columns=NUM_CROPPED_COLUMNS)\n', (11770, 12066), False, 'from ml4tc.utils import satellite_utils\n'), ((12649, 12758), 'ml4tc.utils.satellite_utils.get_cyclone_id', 'satellite_utils.get_cyclone_id', ([], {'year': 'YEAR', 'basin_id_string': 'BASIN_ID_STRING', 'cyclone_number': 'CYCLONE_NUMBER'}), '(year=YEAR, basin_id_string=BASIN_ID_STRING,\n cyclone_number=CYCLONE_NUMBER)\n', (12679, 12758), False, 'from ml4tc.utils import satellite_utils\n'), ((13027, 13078), 'ml4tc.utils.satellite_utils.parse_cyclone_id', 'satellite_utils.parse_cyclone_id', (['CYCLONE_ID_STRING'], {}), '(CYCLONE_ID_STRING)\n', (13059, 13078), False, 'from ml4tc.utils import satellite_utils\n'), ((7730, 7805), 'numpy.allclose', 'numpy.allclose', (['this_data_matrix', 'FIRST_CROPPED_DATA_MATRIX'], {'atol': 'TOLERANCE'}), '(this_data_matrix, FIRST_CROPPED_DATA_MATRIX, atol=TOLERANCE)\n', (7744, 7805), False, 'import numpy\n'), ((7853, 7942), 'numpy.allclose', 'numpy.allclose', (['these_latitudes_deg_n', 'FIRST_CROPPED_LATITUDES_DEG_N'], {'atol': 'TOLERANCE'}), '(these_latitudes_deg_n, FIRST_CROPPED_LATITUDES_DEG_N, atol=\n TOLERANCE)\n', (7867, 7942), False, 'import numpy\n'), ((7985, 8076), 'numpy.allclose', 'numpy.allclose', (['these_longitudes_deg_e', 'FIRST_CROPPED_LONGITUDES_DEG_E'], {'atol': 'TOLERANCE'}), '(these_longitudes_deg_e, FIRST_CROPPED_LONGITUDES_DEG_E, atol\n =TOLERANCE)\n', (7999, 8076), False, 'import numpy\n'), ((8830, 8906), 'numpy.allclose', 'numpy.allclose', (['this_data_matrix', 'SECOND_CROPPED_DATA_MATRIX'], {'atol': 'TOLERANCE'}), '(this_data_matrix, SECOND_CROPPED_DATA_MATRIX, atol=TOLERANCE)\n', (8844, 8906), False, 'import numpy\n'), ((8954, 9044), 'numpy.allclose', 'numpy.allclose', (['these_latitudes_deg_n', 'SECOND_CROPPED_LATITUDES_DEG_N'], {'atol': 'TOLERANCE'}), '(these_latitudes_deg_n, SECOND_CROPPED_LATITUDES_DEG_N, atol=\n TOLERANCE)\n', (8968, 9044), False, 'import numpy\n'), ((9099, 9190), 'numpy.allclose', 'numpy.allclose', (['these_longitudes_deg_e', 'SECOND_CROPPED_LONGITUDES_DEG_E'], {'atol': 'TOLERANCE'}), '(these_longitudes_deg_e, SECOND_CROPPED_LONGITUDES_DEG_E,\n atol=TOLERANCE)\n', (9113, 9190), False, 'import numpy\n'), ((9941, 10016), 'numpy.allclose', 'numpy.allclose', (['this_data_matrix', 'THIRD_CROPPED_DATA_MATRIX'], {'atol': 'TOLERANCE'}), '(this_data_matrix, THIRD_CROPPED_DATA_MATRIX, atol=TOLERANCE)\n', (9955, 10016), False, 'import numpy\n'), ((10064, 10153), 'numpy.allclose', 'numpy.allclose', (['these_latitudes_deg_n', 'THIRD_CROPPED_LATITUDES_DEG_N'], {'atol': 'TOLERANCE'}), '(these_latitudes_deg_n, THIRD_CROPPED_LATITUDES_DEG_N, atol=\n TOLERANCE)\n', (10078, 10153), False, 'import numpy\n'), ((10196, 10287), 'numpy.allclose', 'numpy.allclose', (['these_longitudes_deg_e', 'THIRD_CROPPED_LONGITUDES_DEG_E'], {'atol': 'TOLERANCE'}), '(these_longitudes_deg_e, THIRD_CROPPED_LONGITUDES_DEG_E, atol\n =TOLERANCE)\n', (10210, 10287), False, 'import numpy\n'), ((11041, 11117), 'numpy.allclose', 'numpy.allclose', (['this_data_matrix', 'FOURTH_CROPPED_DATA_MATRIX'], {'atol': 'TOLERANCE'}), '(this_data_matrix, FOURTH_CROPPED_DATA_MATRIX, atol=TOLERANCE)\n', (11055, 11117), False, 'import numpy\n'), ((11165, 11255), 'numpy.allclose', 'numpy.allclose', (['these_latitudes_deg_n', 'FOURTH_CROPPED_LATITUDES_DEG_N'], {'atol': 'TOLERANCE'}), '(these_latitudes_deg_n, FOURTH_CROPPED_LATITUDES_DEG_N, atol=\n TOLERANCE)\n', (11179, 11255), False, 'import numpy\n'), ((11310, 11401), 'numpy.allclose', 'numpy.allclose', (['these_longitudes_deg_e', 'FOURTH_CROPPED_LONGITUDES_DEG_E'], {'atol': 'TOLERANCE'}), '(these_longitudes_deg_e, FOURTH_CROPPED_LONGITUDES_DEG_E,\n atol=TOLERANCE)\n', (11324, 11401), False, 'import numpy\n'), ((12152, 12227), 'numpy.allclose', 'numpy.allclose', (['this_data_matrix', 'FIFTH_CROPPED_DATA_MATRIX'], {'atol': 'TOLERANCE'}), '(this_data_matrix, FIFTH_CROPPED_DATA_MATRIX, atol=TOLERANCE)\n', (12166, 12227), False, 'import numpy\n'), ((12275, 12364), 'numpy.allclose', 'numpy.allclose', (['these_latitudes_deg_n', 'FIFTH_CROPPED_LATITUDES_DEG_N'], {'atol': 'TOLERANCE'}), '(these_latitudes_deg_n, FIFTH_CROPPED_LATITUDES_DEG_N, atol=\n TOLERANCE)\n', (12289, 12364), False, 'import numpy\n'), ((12407, 12498), 'numpy.allclose', 'numpy.allclose', (['these_longitudes_deg_e', 'FIFTH_CROPPED_LONGITUDES_DEG_E'], {'atol': 'TOLERANCE'}), '(these_longitudes_deg_e, FIFTH_CROPPED_LONGITUDES_DEG_E, atol\n =TOLERANCE)\n', (12421, 12498), False, 'import numpy\n')] |
"""Test file for float subgraph fusing"""
import random
from inspect import signature
import numpy
import pytest
from concrete.common.data_types.integers import Integer
from concrete.common.debugging.custom_assert import assert_not_reached
from concrete.common.optimization.topological import fuse_float_operations
from concrete.common.values import EncryptedScalar, EncryptedTensor
from concrete.numpy import tracing
from concrete.numpy.tracing import trace_numpy_function
def no_fuse(x):
"""No fuse"""
return x + 2
def no_fuse_unhandled(x, y):
"""No fuse unhandled"""
x_1 = x + 0.7
y_1 = y + 1.3
intermediate = x_1 + y_1
return intermediate.astype(numpy.int32)
def fusable_with_bigger_search(x, y):
"""fusable with bigger search"""
x = x + 1
x_1 = x.astype(numpy.int32)
x_1 = x_1 + 1.5
x_2 = x.astype(numpy.int32)
x_2 = x_2 + 3.4
add = x_1 + x_2
add_int = add.astype(numpy.int32)
return add_int + y
def fusable_with_bigger_search_needs_second_iteration(x, y):
"""fusable with bigger search and triggers a second iteration in the fusing"""
x = x + 1
x = x + 0.5
x = numpy.cos(x)
x_1 = x.astype(numpy.int32)
x_1 = x_1 + 1.5
x_p = x + 1
x_p2 = x_p + 1
x_2 = (x_p + x_p2).astype(numpy.int32)
x_2 = x_2 + 3.4
add = x_1 + x_2
add_int = add.astype(numpy.int32)
return add_int + y
def no_fuse_big_constant_3_10_10(x):
"""Pass an array x with size < 100 to trigger a no fuse condition."""
x = x.astype(numpy.float64)
return (x + numpy.ones((3, 10, 10))).astype(numpy.int32)
def no_fuse_dot(x):
"""No fuse dot"""
return numpy.dot(x, numpy.full((10,), 1.33, dtype=numpy.float64)).astype(numpy.int32)
def simple_create_fuse_opportunity(f, x):
"""No fuse because the function is explicitely marked as unfusable in our code."""
return f(x.astype(numpy.float64)).astype(numpy.int32)
def ravel_cases(x):
"""Simple ravel cases"""
return simple_create_fuse_opportunity(numpy.ravel, x)
def transpose_cases(x):
"""Simple transpose cases"""
return simple_create_fuse_opportunity(numpy.transpose, x)
def reshape_cases(x, newshape):
"""Simple reshape cases"""
return simple_create_fuse_opportunity(lambda x: numpy.reshape(x, newshape), x)
def simple_fuse_not_output(x):
"""Simple fuse not output"""
intermediate = x.astype(numpy.float64)
intermediate = intermediate.astype(numpy.int32)
return intermediate + 2
def simple_fuse_output(x):
"""Simple fuse output"""
return x.astype(numpy.float64).astype(numpy.int32)
def mix_x_and_y_intricately_and_call_f(function, x, y):
"""Mix x and y in an intricated way, that can't be simplified by
an optimizer eg, and then call function
"""
intermediate = x + y
intermediate = intermediate + 2
intermediate = intermediate.astype(numpy.float32)
intermediate = intermediate.astype(numpy.int32)
x_p_1 = intermediate + 1.5
x_p_2 = intermediate + 2.7
x_p_3 = function(x_p_1 + x_p_2)
return (
x_p_3.astype(numpy.int32),
x_p_2.astype(numpy.int32),
(x_p_2 + 3).astype(numpy.int32),
x_p_3.astype(numpy.int32) + 67,
y,
(y + 4.7).astype(numpy.int32) + 3,
)
def mix_x_and_y_and_call_f(function, x, y):
"""Mix x and y and then call function"""
x_p_1 = x + 0.1
x_p_2 = x + 0.2
x_p_3 = function(x_p_1 + x_p_2)
return (
x_p_3.astype(numpy.int32),
x_p_2.astype(numpy.int32),
(x_p_2 + 3).astype(numpy.int32),
x_p_3.astype(numpy.int32) + 67,
y,
(y + 4.7).astype(numpy.int32) + 3,
)
def mix_x_and_y_into_range_0_to_1_and_call_f(function, x, y):
"""Mix x and y and then call function, in such a way that the input to function is between
0 and 1"""
x_p_1 = x + 0.1
x_p_2 = x + 0.2
x_p_4 = 1 - numpy.abs(numpy.sin(x_p_1 + x_p_2 + 0.3))
x_p_3 = function(x_p_4)
return (
x_p_3.astype(numpy.int32),
x_p_2.astype(numpy.int32),
(x_p_2 + 3).astype(numpy.int32),
x_p_3.astype(numpy.int32) + 67,
y,
(y + 4.7).astype(numpy.int32) + 3,
)
def mix_x_and_y_into_integer_and_call_f(function, x, y):
"""Mix x and y but keep the entry to function as an integer"""
x_p_1 = x + 1
x_p_2 = x + 2
x_p_3 = function(x_p_1 + x_p_2)
return (
x_p_3.astype(numpy.int32),
x_p_2.astype(numpy.int32),
(x_p_2 + 3).astype(numpy.int32),
x_p_3.astype(numpy.int32) + 67,
y,
(y + 4.7).astype(numpy.int32) + 3,
)
def get_func_params_int32(func, scalar=True):
"""Returns a dict with parameters as scalar int32"""
return {
param_name: EncryptedScalar(Integer(32, True))
if scalar
else EncryptedTensor(Integer(32, True), (1,))
for param_name in signature(func).parameters.keys()
}
@pytest.mark.parametrize(
"function_to_trace,fused,params,warning_message",
[
pytest.param(no_fuse, False, get_func_params_int32(no_fuse), "", id="no_fuse"),
pytest.param(
no_fuse_unhandled,
False,
get_func_params_int32(no_fuse_unhandled),
"""
The following subgraph is not fusable:
%0 = x # EncryptedScalar<int32>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ one of 2 variable inputs (can only have 1 for fusing)
%1 = 0.7 # ClearScalar<float64>
%2 = y # EncryptedScalar<int32>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ one of 2 variable inputs (can only have 1 for fusing)
%3 = 1.3 # ClearScalar<float64>
%4 = add(%0, %1) # EncryptedScalar<float64>
%5 = add(%2, %3) # EncryptedScalar<float64>
%6 = add(%4, %5) # EncryptedScalar<float64>
%7 = astype(%6, dtype=int32) # EncryptedScalar<int32>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cannot fuse here as the subgraph has 2 variable inputs
return %7
""".strip(), # noqa: E501 # pylint: disable=line-too-long
id="no_fuse_unhandled",
),
pytest.param(
fusable_with_bigger_search,
True,
get_func_params_int32(fusable_with_bigger_search),
None,
id="fusable_with_bigger_search",
),
pytest.param(
fusable_with_bigger_search_needs_second_iteration,
True,
get_func_params_int32(fusable_with_bigger_search_needs_second_iteration),
None,
id="fusable_with_bigger_search",
),
pytest.param(
no_fuse_dot,
False,
{"x": EncryptedTensor(Integer(32, True), (10,))},
"""
The following subgraph is not fusable:
%0 = x # EncryptedTensor<int32, shape=(10,)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ input node with shape (10,)
%1 = [1.33 1.33 ... 1.33 1.33] # ClearTensor<float64, shape=(10,)>
%2 = dot(%0, %1) # EncryptedScalar<float64>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ output shapes: #0, () are not the same as the subgraph's input: (10,)
%3 = astype(%2, dtype=int32) # EncryptedScalar<int32>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ output shapes: #0, () are not the same as the subgraph's input: (10,)
return %3
""".strip(), # noqa: E501 # pylint: disable=line-too-long
id="no_fuse_dot",
),
pytest.param(
ravel_cases,
False,
{"x": EncryptedTensor(Integer(32, True), (10, 20))},
"""
The following subgraph is not fusable:
%0 = x # EncryptedTensor<int32, shape=(10, 20)>
%1 = astype(%0, dtype=float64) # EncryptedTensor<float64, shape=(10, 20)>
%2 = ravel(%1) # EncryptedTensor<float64, shape=(200,)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ this node is explicitely marked by the package as non-fusable
%3 = astype(%2, dtype=int32) # EncryptedTensor<int32, shape=(200,)>
return %3
""".strip(), # noqa: E501 # pylint: disable=line-too-long
id="no_fuse_explicitely_ravel",
),
pytest.param(
transpose_cases,
False,
{"x": EncryptedTensor(Integer(32, True), (10, 20))},
"""
The following subgraph is not fusable:
%0 = x # EncryptedTensor<int32, shape=(10, 20)>
%1 = astype(%0, dtype=float64) # EncryptedTensor<float64, shape=(10, 20)>
%2 = transpose(%1) # EncryptedTensor<float64, shape=(20, 10)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ this node is explicitely marked by the package as non-fusable
%3 = astype(%2, dtype=int32) # EncryptedTensor<int32, shape=(20, 10)>
return %3
""".strip(), # noqa: E501 # pylint: disable=line-too-long
id="no_fuse_explicitely_transpose",
),
pytest.param(
lambda x: reshape_cases(x, (20, 10)),
False,
{"x": EncryptedTensor(Integer(32, True), (10, 20))},
"""
The following subgraph is not fusable:
%0 = x # EncryptedTensor<int32, shape=(10, 20)>
%1 = astype(%0, dtype=float64) # EncryptedTensor<float64, shape=(10, 20)>
%2 = reshape(%1, newshape=(20, 10)) # EncryptedTensor<float64, shape=(20, 10)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ this node is explicitely marked by the package as non-fusable
%3 = astype(%2, dtype=int32) # EncryptedTensor<int32, shape=(20, 10)>
return %3
""".strip(), # noqa: E501 # pylint: disable=line-too-long
id="no_fuse_explicitely_reshape",
),
pytest.param(
no_fuse_big_constant_3_10_10,
False,
{"x": EncryptedTensor(Integer(32, True), (10, 10))},
"""
The following subgraph is not fusable:
%0 = [[[1. 1. 1 ... . 1. 1.]]] # ClearTensor<float64, shape=(3, 10, 10)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ this constant node has a bigger shape (3, 10, 10) than the subgraph's input: (10, 10)
%1 = x # EncryptedTensor<int32, shape=(10, 10)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ input node with shape (10, 10)
%2 = astype(%1, dtype=float64) # EncryptedTensor<float64, shape=(10, 10)>
%3 = add(%2, %0) # EncryptedTensor<float64, shape=(3, 10, 10)>
%4 = astype(%3, dtype=int32) # EncryptedTensor<int32, shape=(3, 10, 10)>
return %4
""".strip(), # noqa: E501 # pylint: disable=line-too-long
id="no_fuse_big_constant_3_10_10",
),
pytest.param(
simple_fuse_not_output,
True,
get_func_params_int32(simple_fuse_not_output),
None,
id="simple_fuse_not_output",
),
pytest.param(
simple_fuse_output,
True,
get_func_params_int32(simple_fuse_output),
None,
id="simple_fuse_output",
),
pytest.param(
lambda x, y: mix_x_and_y_intricately_and_call_f(numpy.rint, x, y),
True,
get_func_params_int32(lambda x, y: None),
None,
id="mix_x_and_y_intricately_and_call_f_with_rint",
),
pytest.param(
lambda x, y: mix_x_and_y_and_call_f(numpy.rint, x, y),
True,
get_func_params_int32(lambda x, y: None),
None,
id="mix_x_and_y_and_call_f_with_rint",
),
pytest.param(
transpose_cases,
True,
get_func_params_int32(transpose_cases),
None,
id="transpose_cases scalar",
),
pytest.param(
transpose_cases,
True,
{"x": EncryptedTensor(Integer(32, True), (10,))},
None,
id="transpose_cases ndim == 1",
),
pytest.param(
ravel_cases,
True,
{"x": EncryptedTensor(Integer(32, True), (10,))},
None,
id="ravel_cases ndim == 1",
),
pytest.param(
lambda x: reshape_cases(x, (10, 20)),
True,
{"x": EncryptedTensor(Integer(32, True), (10, 20))},
None,
id="reshape_cases same shape",
),
],
)
def test_fuse_float_operations(
function_to_trace,
fused,
params,
warning_message,
capfd,
remove_color_codes,
check_array_equality,
):
"""Test function for fuse_float_operations"""
op_graph = trace_numpy_function(
function_to_trace,
params,
)
orig_num_nodes = len(op_graph.graph)
fuse_float_operations(op_graph)
fused_num_nodes = len(op_graph.graph)
if fused:
assert fused_num_nodes < orig_num_nodes
else:
assert fused_num_nodes == orig_num_nodes
captured = capfd.readouterr()
assert warning_message in (output := remove_color_codes(captured.err)), output
for input_ in [0, 2, 42, 44]:
inputs = ()
for param_input_value in params.values():
if param_input_value.is_scalar:
input_ = numpy.int32(input_)
else:
input_ = numpy.full(param_input_value.shape, input_, dtype=numpy.int32)
inputs += (input_,)
check_array_equality(function_to_trace(*inputs), op_graph(*inputs))
def subtest_tensor_no_fuse(fun, tensor_shape):
"""Test case to verify float fusing is only applied on functions on scalars."""
if tensor_shape == ():
# We want tensors
return
if fun in LIST_OF_UFUNC_WHICH_HAVE_INTEGER_ONLY_SOURCES:
# We need at least one input of the bivariate function to be float
return
# Float fusing currently cannot work if the constant in a bivariate operator is bigger than the
# variable input.
# Make a broadcastable shape but with the constant being bigger
variable_tensor_shape = (1,) + tensor_shape
constant_bigger_shape = (random.randint(2, 10),) + tensor_shape
def tensor_no_fuse(x):
intermediate = x.astype(numpy.float64)
intermediate = fun(intermediate, numpy.ones(constant_bigger_shape))
return intermediate.astype(numpy.int32)
function_to_trace = tensor_no_fuse
params_names = signature(function_to_trace).parameters.keys()
op_graph = trace_numpy_function(
function_to_trace,
{
param_name: EncryptedTensor(Integer(32, True), shape=variable_tensor_shape)
for param_name in params_names
},
)
orig_num_nodes = len(op_graph.graph)
fuse_float_operations(op_graph)
fused_num_nodes = len(op_graph.graph)
assert orig_num_nodes == fused_num_nodes
def check_results_are_equal(function_result, op_graph_result):
"""Check the output of function execution and OPGraph evaluation are equal."""
if isinstance(function_result, tuple) and isinstance(op_graph_result, tuple):
assert len(function_result) == len(op_graph_result)
are_equal = (
function_output == op_graph_output
for function_output, op_graph_output in zip(function_result, op_graph_result)
)
elif not isinstance(function_result, tuple) and not isinstance(op_graph_result, tuple):
are_equal = (function_result == op_graph_result,)
else:
assert_not_reached(f"Incompatible outputs: {function_result}, {op_graph_result}")
return all(value.all() if isinstance(value, numpy.ndarray) else value for value in are_equal)
def subtest_fuse_float_unary_operations_correctness(fun, tensor_shape):
"""Test a unary function with fuse_float_operations."""
# Some manipulation to avoid issues with domain of definitions of functions
if fun == numpy.arccosh:
# 0 is not in the domain of definition
input_list = [1, 2, 42, 44]
super_fun_list = [mix_x_and_y_and_call_f]
elif fun in [numpy.arctanh, numpy.arccos, numpy.arcsin, numpy.arctan]:
# Needs values between 0 and 1 in the call function
input_list = [0, 2, 42, 44]
super_fun_list = [mix_x_and_y_into_range_0_to_1_and_call_f]
elif fun in [numpy.cosh, numpy.sinh, numpy.exp, numpy.exp2, numpy.expm1]:
# Not too large values to avoid overflows
input_list = [1, 2, 5, 11]
super_fun_list = [mix_x_and_y_and_call_f, mix_x_and_y_intricately_and_call_f]
else:
# Regular case
input_list = [0, 2, 42, 44]
super_fun_list = [mix_x_and_y_and_call_f, mix_x_and_y_intricately_and_call_f]
for super_fun in super_fun_list:
for input_ in input_list:
def get_function_to_trace():
return lambda x, y: super_fun(fun, x, y)
function_to_trace = get_function_to_trace()
params_names = signature(function_to_trace).parameters.keys()
op_graph = trace_numpy_function(
function_to_trace,
{
param_name: EncryptedTensor(Integer(32, True), tensor_shape)
for param_name in params_names
},
)
orig_num_nodes = len(op_graph.graph)
fuse_float_operations(op_graph)
fused_num_nodes = len(op_graph.graph)
assert fused_num_nodes < orig_num_nodes
# Check that the call to the function or to the op_graph evaluation give the same
# result
tensor_diversifier = (
# The following +1 in the range is to avoid to have 0's which is not in the
# domain definition of some of our functions
numpy.arange(1, numpy.product(tensor_shape) + 1, dtype=numpy.int32).reshape(
tensor_shape
)
if tensor_shape != ()
else 1
)
if fun in [numpy.arctanh, numpy.arccos, numpy.arcsin, numpy.arctan]:
# Domain of definition for these functions
tensor_diversifier = (
numpy.ones(tensor_shape, dtype=numpy.int32) if tensor_shape != () else 1
)
input_ = numpy.int32(input_ * tensor_diversifier)
num_params = len(params_names)
assert num_params == 2
# Create inputs which are either of the form [x, x] or [x, y]
for j in range(4):
if fun in [numpy.arctanh, numpy.arccos, numpy.arcsin, numpy.arctan] and j > 0:
# Domain of definition for these functions
break
input_a = input_
input_b = input_ + j
if tensor_shape != ():
numpy.random.shuffle(input_a)
numpy.random.shuffle(input_b)
inputs = (input_a, input_b) if random.randint(0, 1) == 0 else (input_b, input_a)
function_result = function_to_trace(*inputs)
op_graph_result = op_graph(*inputs)
assert check_results_are_equal(function_result, op_graph_result)
LIST_OF_UFUNC_WHICH_HAVE_INTEGER_ONLY_SOURCES = {
numpy.bitwise_and,
numpy.bitwise_or,
numpy.bitwise_xor,
numpy.gcd,
numpy.lcm,
numpy.ldexp,
numpy.left_shift,
numpy.logical_and,
numpy.logical_not,
numpy.logical_or,
numpy.logical_xor,
numpy.remainder,
numpy.right_shift,
}
def subtest_fuse_float_binary_operations_correctness(fun, tensor_shape):
"""Test a binary functions with fuse_float_operations, with a constant as a source."""
for i in range(4):
# Know if the function is defined for integer inputs
if fun in LIST_OF_UFUNC_WHICH_HAVE_INTEGER_ONLY_SOURCES:
if i not in [0, 2]:
continue
# The .astype(numpy.float64) that we have in cases 0 and 2 is here to force
# a float output even for functions which return an integer (eg, XOR), such
# that our frontend always try to fuse them
# The .astype(numpy.float64) that we have in cases 1 and 3 is here to force
# a float output even for functions which return a bool (eg, EQUAL), such
# that our frontend always try to fuse them
# For bivariate functions: fix one of the inputs
if i == 0:
# With an integer in first position
ones_0 = numpy.ones(tensor_shape, dtype=numpy.int32) if tensor_shape != () else 1
def get_function_to_trace():
return lambda x, y: fun(3 * ones_0, x + y).astype(numpy.float64).astype(numpy.int32)
elif i == 1:
# With a float in first position
ones_1 = numpy.ones(tensor_shape, dtype=numpy.float64) if tensor_shape != () else 1
def get_function_to_trace():
return (
lambda x, y: fun(2.3 * ones_1, x + y).astype(numpy.float64).astype(numpy.int32)
)
elif i == 2:
# With an integer in second position
ones_2 = numpy.ones(tensor_shape, dtype=numpy.int32) if tensor_shape != () else 1
def get_function_to_trace():
return lambda x, y: fun(x + y, 4 * ones_2).astype(numpy.float64).astype(numpy.int32)
else:
# With a float in second position
ones_else = numpy.ones(tensor_shape, dtype=numpy.float64) if tensor_shape != () else 1
def get_function_to_trace():
return (
lambda x, y: fun(x + y, 5.7 * ones_else)
.astype(numpy.float64)
.astype(numpy.int32)
)
input_list = [0, 2, 42, 44]
# Domain of definition
if fun in [numpy.true_divide, numpy.remainder, numpy.floor_divide, numpy.fmod]:
input_list = [2, 42, 44]
for input_ in input_list:
function_to_trace = get_function_to_trace()
params_names = signature(function_to_trace).parameters.keys()
op_graph = trace_numpy_function(
function_to_trace,
{
param_name: EncryptedTensor(Integer(32, True), tensor_shape)
for param_name in params_names
},
)
orig_num_nodes = len(op_graph.graph)
fuse_float_operations(op_graph)
fused_num_nodes = len(op_graph.graph)
assert fused_num_nodes < orig_num_nodes
# Check that the call to the function or to the op_graph evaluation give the same
# result
tensor_diversifier = (
# The following +1 in the range is to avoid to have 0's which is not in the
# domain definition of some of our functions
numpy.arange(1, numpy.product(tensor_shape) + 1, dtype=numpy.int32).reshape(
tensor_shape
)
if tensor_shape != ()
else numpy.int64(1)
)
# Make sure the tensor diversifier is a numpy variable, otherwise some cases may fail
# as python int and float don't have the astype method
input_ = input_ * tensor_diversifier
num_params = len(params_names)
assert num_params == 2
# Create inputs which are either of the form [x, x] or [x, y]
for j in range(4):
inputs = (input_, input_ + j)
function_result = function_to_trace(*inputs)
op_graph_result = op_graph(*inputs)
assert check_results_are_equal(function_result, op_graph_result)
def subtest_fuse_float_binary_operations_dont_support_two_variables(fun, tensor_shape):
"""Test a binary function with fuse_float_operations, with no constant as
a source."""
def get_function_to_trace():
return lambda x, y: fun(x, y).astype(numpy.int32)
function_to_trace = get_function_to_trace()
params_names = signature(function_to_trace).parameters.keys()
with pytest.raises(
AssertionError,
match=r"Can only have 1 non constant predecessor in _np_operator, got 2 for operator",
):
trace_numpy_function(
function_to_trace,
{
param_name: EncryptedTensor(Integer(32, True), tensor_shape)
for param_name in params_names
},
)
@pytest.mark.parametrize("fun", tracing.NPTracer.LIST_OF_SUPPORTED_UFUNC)
@pytest.mark.parametrize(
"tensor_shape", [pytest.param((), id="scalar"), pytest.param((3, 1, 2), id="tensor")]
)
def test_ufunc_operations(fun, tensor_shape):
"""Test functions which are in tracing.NPTracer.LIST_OF_SUPPORTED_UFUNC."""
if fun.nin == 1:
subtest_fuse_float_unary_operations_correctness(fun, tensor_shape)
elif fun.nin == 2:
subtest_fuse_float_binary_operations_correctness(fun, tensor_shape)
subtest_fuse_float_binary_operations_dont_support_two_variables(fun, tensor_shape)
subtest_tensor_no_fuse(fun, tensor_shape)
else:
raise NotImplementedError("Only unary and binary functions are tested for now")
| [
"numpy.product",
"numpy.int64",
"numpy.reshape",
"numpy.ones",
"concrete.common.data_types.integers.Integer",
"numpy.int32",
"inspect.signature",
"concrete.common.optimization.topological.fuse_float_operations",
"pytest.param",
"pytest.mark.parametrize",
"pytest.raises",
"numpy.cos",
"concre... | [((24956, 25028), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fun"""', 'tracing.NPTracer.LIST_OF_SUPPORTED_UFUNC'], {}), "('fun', tracing.NPTracer.LIST_OF_SUPPORTED_UFUNC)\n", (24979, 25028), False, 'import pytest\n'), ((1158, 1170), 'numpy.cos', 'numpy.cos', (['x'], {}), '(x)\n', (1167, 1170), False, 'import numpy\n'), ((13095, 13142), 'concrete.numpy.tracing.trace_numpy_function', 'trace_numpy_function', (['function_to_trace', 'params'], {}), '(function_to_trace, params)\n', (13115, 13142), False, 'from concrete.numpy.tracing import trace_numpy_function\n'), ((13211, 13242), 'concrete.common.optimization.topological.fuse_float_operations', 'fuse_float_operations', (['op_graph'], {}), '(op_graph)\n', (13232, 13242), False, 'from concrete.common.optimization.topological import fuse_float_operations\n'), ((15175, 15206), 'concrete.common.optimization.topological.fuse_float_operations', 'fuse_float_operations', (['op_graph'], {}), '(op_graph)\n', (15196, 15206), False, 'from concrete.common.optimization.topological import fuse_float_operations\n'), ((24588, 24713), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': '"""Can only have 1 non constant predecessor in _np_operator, got 2 for operator"""'}), "(AssertionError, match=\n 'Can only have 1 non constant predecessor in _np_operator, got 2 for operator'\n )\n", (24601, 24713), False, 'import pytest\n'), ((25076, 25105), 'pytest.param', 'pytest.param', (['()'], {'id': '"""scalar"""'}), "((), id='scalar')\n", (25088, 25105), False, 'import pytest\n'), ((25107, 25143), 'pytest.param', 'pytest.param', (['(3, 1, 2)'], {'id': '"""tensor"""'}), "((3, 1, 2), id='tensor')\n", (25119, 25143), False, 'import pytest\n'), ((2278, 2304), 'numpy.reshape', 'numpy.reshape', (['x', 'newshape'], {}), '(x, newshape)\n', (2291, 2304), False, 'import numpy\n'), ((3910, 3940), 'numpy.sin', 'numpy.sin', (['(x_p_1 + x_p_2 + 0.3)'], {}), '(x_p_1 + x_p_2 + 0.3)\n', (3919, 3940), False, 'import numpy\n'), ((14563, 14584), 'random.randint', 'random.randint', (['(2)', '(10)'], {}), '(2, 10)\n', (14577, 14584), False, 'import random\n'), ((14718, 14751), 'numpy.ones', 'numpy.ones', (['constant_bigger_shape'], {}), '(constant_bigger_shape)\n', (14728, 14751), False, 'import numpy\n'), ((15923, 16009), 'concrete.common.debugging.custom_assert.assert_not_reached', 'assert_not_reached', (['f"""Incompatible outputs: {function_result}, {op_graph_result}"""'], {}), "(\n f'Incompatible outputs: {function_result}, {op_graph_result}')\n", (15941, 16009), False, 'from concrete.common.debugging.custom_assert import assert_not_reached\n'), ((17753, 17784), 'concrete.common.optimization.topological.fuse_float_operations', 'fuse_float_operations', (['op_graph'], {}), '(op_graph)\n', (17774, 17784), False, 'from concrete.common.optimization.topological import fuse_float_operations\n'), ((18724, 18764), 'numpy.int32', 'numpy.int32', (['(input_ * tensor_diversifier)'], {}), '(input_ * tensor_diversifier)\n', (18735, 18764), False, 'import numpy\n'), ((22873, 22904), 'concrete.common.optimization.topological.fuse_float_operations', 'fuse_float_operations', (['op_graph'], {}), '(op_graph)\n', (22894, 22904), False, 'from concrete.common.optimization.topological import fuse_float_operations\n'), ((1563, 1586), 'numpy.ones', 'numpy.ones', (['(3, 10, 10)'], {}), '((3, 10, 10))\n', (1573, 1586), False, 'import numpy\n'), ((1676, 1720), 'numpy.full', 'numpy.full', (['(10,)', '(1.33)'], {'dtype': 'numpy.float64'}), '((10,), 1.33, dtype=numpy.float64)\n', (1686, 1720), False, 'import numpy\n'), ((4771, 4788), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (4778, 4788), False, 'from concrete.common.data_types.integers import Integer\n'), ((4837, 4854), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (4844, 4854), False, 'from concrete.common.data_types.integers import Integer\n'), ((13706, 13725), 'numpy.int32', 'numpy.int32', (['input_'], {}), '(input_)\n', (13717, 13725), False, 'import numpy\n'), ((13769, 13831), 'numpy.full', 'numpy.full', (['param_input_value.shape', 'input_'], {'dtype': 'numpy.int32'}), '(param_input_value.shape, input_, dtype=numpy.int32)\n', (13779, 13831), False, 'import numpy\n'), ((14860, 14888), 'inspect.signature', 'signature', (['function_to_trace'], {}), '(function_to_trace)\n', (14869, 14888), False, 'from inspect import signature\n'), ((15022, 15039), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (15029, 15039), False, 'from concrete.common.data_types.integers import Integer\n'), ((20926, 20969), 'numpy.ones', 'numpy.ones', (['tensor_shape'], {'dtype': 'numpy.int32'}), '(tensor_shape, dtype=numpy.int32)\n', (20936, 20969), False, 'import numpy\n'), ((23515, 23529), 'numpy.int64', 'numpy.int64', (['(1)'], {}), '(1)\n', (23526, 23529), False, 'import numpy\n'), ((24531, 24559), 'inspect.signature', 'signature', (['function_to_trace'], {}), '(function_to_trace)\n', (24540, 24559), False, 'from inspect import signature\n'), ((6832, 6849), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (6839, 6849), False, 'from concrete.common.data_types.integers import Integer\n'), ((7790, 7807), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (7797, 7807), False, 'from concrete.common.data_types.integers import Integer\n'), ((8576, 8593), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (8583, 8593), False, 'from concrete.common.data_types.integers import Integer\n'), ((9393, 9410), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (9400, 9410), False, 'from concrete.common.data_types.integers import Integer\n'), ((10225, 10242), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (10232, 10242), False, 'from concrete.common.data_types.integers import Integer\n'), ((12333, 12350), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (12340, 12350), False, 'from concrete.common.data_types.integers import Integer\n'), ((12533, 12550), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (12540, 12550), False, 'from concrete.common.data_types.integers import Integer\n'), ((12754, 12771), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (12761, 12771), False, 'from concrete.common.data_types.integers import Integer\n'), ((18611, 18654), 'numpy.ones', 'numpy.ones', (['tensor_shape'], {'dtype': 'numpy.int32'}), '(tensor_shape, dtype=numpy.int32)\n', (18621, 18654), False, 'import numpy\n'), ((19266, 19295), 'numpy.random.shuffle', 'numpy.random.shuffle', (['input_a'], {}), '(input_a)\n', (19286, 19295), False, 'import numpy\n'), ((19316, 19345), 'numpy.random.shuffle', 'numpy.random.shuffle', (['input_b'], {}), '(input_b)\n', (19336, 19345), False, 'import numpy\n'), ((21230, 21275), 'numpy.ones', 'numpy.ones', (['tensor_shape'], {'dtype': 'numpy.float64'}), '(tensor_shape, dtype=numpy.float64)\n', (21240, 21275), False, 'import numpy\n'), ((24848, 24865), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (24855, 24865), False, 'from concrete.common.data_types.integers import Integer\n'), ((4888, 4903), 'inspect.signature', 'signature', (['func'], {}), '(func)\n', (4897, 4903), False, 'from inspect import signature\n'), ((17381, 17409), 'inspect.signature', 'signature', (['function_to_trace'], {}), '(function_to_trace)\n', (17390, 17409), False, 'from inspect import signature\n'), ((17575, 17592), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (17582, 17592), False, 'from concrete.common.data_types.integers import Integer\n'), ((19394, 19414), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (19408, 19414), False, 'import random\n'), ((21582, 21625), 'numpy.ones', 'numpy.ones', (['tensor_shape'], {'dtype': 'numpy.int32'}), '(tensor_shape, dtype=numpy.int32)\n', (21592, 21625), False, 'import numpy\n'), ((21883, 21928), 'numpy.ones', 'numpy.ones', (['tensor_shape'], {'dtype': 'numpy.float64'}), '(tensor_shape, dtype=numpy.float64)\n', (21893, 21928), False, 'import numpy\n'), ((22501, 22529), 'inspect.signature', 'signature', (['function_to_trace'], {}), '(function_to_trace)\n', (22510, 22529), False, 'from inspect import signature\n'), ((22695, 22712), 'concrete.common.data_types.integers.Integer', 'Integer', (['(32)', '(True)'], {}), '(32, True)\n', (22702, 22712), False, 'from concrete.common.data_types.integers import Integer\n'), ((18224, 18251), 'numpy.product', 'numpy.product', (['tensor_shape'], {}), '(tensor_shape)\n', (18237, 18251), False, 'import numpy\n'), ((23344, 23371), 'numpy.product', 'numpy.product', (['tensor_shape'], {}), '(tensor_shape)\n', (23357, 23371), False, 'import numpy\n')] |
import torch
from torch import nn
import numpy as np
from collections import OrderedDict
from torch.utils.data import DataLoader
from torch.utils.data import Sampler
from contextlib import nullcontext
import yaml
from yaml import SafeLoader as yaml_Loader, SafeDumper as yaml_Dumper
import os,sys
from tqdm import tqdm
from lib.utils.dotdict import HDict
HDict.L.update_globals({'path':os.path})
def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
yaml.representer.SafeRepresenter.add_representer(str, str_presenter)
def read_config_from_file(config_file):
with open(config_file, 'r') as fp:
return yaml.load(fp, Loader=yaml_Loader)
def save_config_to_file(config, config_file):
with open(config_file, 'w') as fp:
return yaml.dump(config, fp, sort_keys=False, Dumper=yaml_Dumper)
class StopTrainingException(Exception):
pass
class CollatedBatch(list):
pass
class DistributedTestDataSampler(Sampler):
def __init__(self, data_source, batch_size, rank, world_size):
data_len = len(data_source)
all_indices = np.arange(data_len, dtype=int)
split_indices = np.array_split(all_indices, world_size)
num_batches = (len(split_indices[0]) + batch_size -1) // batch_size
self.batch_indices = [i.tolist() for i in np.array_split(split_indices[rank],
num_batches)]
def __iter__(self):
return iter(self.batch_indices)
def __len__(self):
return len(self.batch_indices)
def cached_property(func):
atrribute_name = f'_{func.__name__}'
def _wrapper(self):
try:
return getattr(self, atrribute_name)
except AttributeError:
val = func(self)
self.__dict__[atrribute_name] = val
return val
return property(_wrapper)
class TrainingBase:
def __init__(self, config=None, ddp_rank=0, ddp_world_size=1):
self.config_input = config
self.config = self.get_default_config()
if config is not None:
for k in config.keys():
if not k in self.config:
raise KeyError(f'Unknown config "{k}"')
self.config.update(config)
self.state = self.get_default_state()
self.ddp_rank = ddp_rank
self.ddp_world_size = ddp_world_size
self.is_distributed = (self.ddp_world_size > 1)
self.is_main_rank = (self.ddp_rank == 0)
@cached_property
def train_dataset(self):
raise NotImplementedError
@cached_property
def val_dataset(self):
raise NotImplementedError
@cached_property
def collate_fn(self):
return None
@cached_property
def train_sampler(self):
return torch.utils.data.DistributedSampler(self.train_dataset,
shuffle=True)
@cached_property
def train_dataloader(self):
common_kwargs = dict(
dataset=self.train_dataset,
batch_size=self.config.batch_size,
collate_fn=self.collate_fn,
pin_memory=True,
)
if self.config.dataloader_workers > 0:
common_kwargs.update(
num_workers=self.config.dataloader_workers,
persistent_workers=True,
multiprocessing_context=self.config.dataloader_mp_context,
)
if not self.is_distributed:
dataloader = DataLoader(**common_kwargs, shuffle=True,
drop_last=False)
else:
dataloader = DataLoader(**common_kwargs,
sampler=self.train_sampler)
return dataloader
@cached_property
def val_dataloader(self):
common_kwargs = dict(
dataset=self.val_dataset,
collate_fn=self.collate_fn,
pin_memory=True,
)
if self.config.dataloader_workers > 0:
common_kwargs.update(
num_workers=self.config.dataloader_workers,
persistent_workers=True,
multiprocessing_context=self.config.dataloader_mp_context,
)
prediction_batch_size = self.config.batch_size*self.config.prediction_bmult
if not self.is_distributed:
dataloader = DataLoader(**common_kwargs,
batch_size=prediction_batch_size,
shuffle=False, drop_last=False)
else:
sampler = DistributedTestDataSampler(data_source=self.val_dataset,
batch_size=prediction_batch_size,
rank=self.ddp_rank,
world_size=self.ddp_world_size)
dataloader = DataLoader(**common_kwargs, batch_sampler=sampler)
return dataloader
@cached_property
def base_model(self):
raise NotImplementedError
@cached_property
def model(self):
model = self.base_model
if self.is_distributed:
model = torch.nn.parallel.DistributedDataParallel(model,device_ids=[self.ddp_rank],
output_device=self.ddp_rank)
return model
@cached_property
def optimizer(self):
config = self.config
optimizer_class = getattr(torch.optim, config.optimizer)
optimizer = optimizer_class(self.model.parameters(),
lr=config.max_lr,
**config.optimizer_params)
return optimizer
def get_default_config(self):
return HDict(
scheme = None,
model_name = 'unnamed_model',
distributed = False,
random_seed = None,
num_epochs = 100,
save_path = HDict.L('c:path.join("models",c.model_name)'),
checkpoint_path = HDict.L('c:path.join(c.save_path,"checkpoint")'),
config_path = HDict.L('c:path.join(c.save_path,"config")'),
summary_path = HDict.L('c:path.join(c.save_path,"summary")'),
log_path = HDict.L('c:path.join(c.save_path,"logs")'),
validation_frequency = 1,
batch_size = HDict.L('c:128 if c.distributed else 32'),
optimizer = 'Adam' ,
max_lr = 5e-4 ,
clip_grad_value = None ,
optimizer_params = {} ,
dataloader_workers = 0 ,
dataloader_mp_context = 'forkserver',
training_type = 'normal' ,
evaluation_type = 'validation',
predictions_path = HDict.L('c:path.join(c.save_path,"predictions")'),
grad_accum_steps = 1 ,
prediction_bmult = 1 ,
)
def get_default_state(self):
state = HDict(
current_epoch = 0,
global_step = 0,
)
return state
def config_summary(self):
if not self.is_main_rank: return
for k,v in self.config.get_dict().items():
print(f'{k} : {v}', flush=True)
def save_config_file(self):
if not self.is_main_rank: return
os.makedirs(os.path.dirname(self.config.config_path), exist_ok=True)
save_config_to_file(self.config.get_dict(), self.config.config_path+'.yaml')
save_config_to_file(self.config_input, self.config.config_path+'_input.yaml')
def model_summary(self):
if not self.is_main_rank: return
os.makedirs(os.path.dirname(self.config.summary_path), exist_ok=True)
trainable_params = 0
non_trainable_params = 0
for p in self.model.parameters():
if p.requires_grad:
trainable_params += p.numel()
else:
non_trainable_params += p.numel()
summary = dict(
trainable_params = trainable_params,
non_trainable_params = non_trainable_params,
model_representation = repr(self.model),
)
with open(self.config.summary_path+'.txt', 'w') as fp:
yaml.dump(summary, fp, sort_keys=False, Dumper=yaml_Dumper)
def save_checkpoint(self):
if not self.is_main_rank: return
ckpt_path = self.config.checkpoint_path
os.makedirs(ckpt_path, exist_ok=True)
torch.save(self.state, os.path.join(ckpt_path, 'training_state'))
torch.save(self.base_model.state_dict(), os.path.join(ckpt_path, 'model_state'))
torch.save(self.optimizer.state_dict(), os.path.join(ckpt_path, 'optimizer_state'))
print(f'Checkpoint saved to: {ckpt_path}',flush=True)
def load_checkpoint(self):
ckpt_path = self.config.checkpoint_path
try:
self.state.update(torch.load(os.path.join(ckpt_path, 'training_state')))
self.base_model.load_state_dict(torch.load(os.path.join(ckpt_path, 'model_state')))
self.optimizer.load_state_dict(torch.load(os.path.join(ckpt_path, 'optimizer_state')))
if self.is_main_rank:
print(f'Checkpoint loaded from: {ckpt_path}',flush=True)
torch.cuda.empty_cache()
except FileNotFoundError:
pass
# Callbacks
def on_train_begin(self):
pass
def on_train_end(self):
pass
def on_epoch_begin(self, logs, training):
pass
def on_epoch_end(self, logs, training):
pass
def on_batch_begin(self, i, logs, training):
pass
def on_batch_end(self, i, logs, training):
pass
# Logging
def get_verbose_logs(self):
return OrderedDict(loss='0.4f')
@cached_property
def verbose_logs(self):
return self.get_verbose_logs()
def update_logs(self, logs, training, **updates):
if training:
logs.update(updates)
else:
logs.update(('val_'+k,v) for k,v in updates.items())
def log_description(self, i, logs, training):
if training:
return list(f'{k} = {logs[k]:{f}}'
for k,f in self.verbose_logs.items())
else:
return list(f'val_{k} = {logs["val_"+k]:{f}}'
for k,f in self.verbose_logs.items())
# Training loop
def preprocess_batch(self, batch):
if isinstance(batch, CollatedBatch):
return CollatedBatch(self.preprocess_batch(b) for b in batch)
elif hasattr(batch, 'cuda'):
return batch.cuda(non_blocking=True)
elif hasattr(batch, 'items'):
return batch.__class__((k,v.cuda(non_blocking=True)) for k,v in batch.items())
elif hasattr(batch, '__iter__'):
return batch.__class__(v.cuda(non_blocking=True) for v in batch)
else:
raise ValueError(f'Unsupported batch type: {type(batch)}')
def calculate_loss(self, outputs, inputs):
raise NotImplementedError
def grad_accum_gather_outputs(self, outputs):
return torch.cat(outputs, dim=0)
def grad_accum_reduce_loss(self, loss):
with torch.no_grad():
total_loss = sum(loss)
return total_loss
def grad_accum_collator(self, dataloader):
dataloader_iter = iter(dataloader)
if self.config.grad_accum_steps == 1:
yield from dataloader_iter
else:
while True:
collated_batch = CollatedBatch()
try:
for _ in range(self.config.grad_accum_steps):
collated_batch.append(next(dataloader_iter))
except StopIteration:
break
finally:
if len(collated_batch) > 0: yield collated_batch
@cached_property
def train_steps_per_epoch(self):
if self.config.grad_accum_steps == 1:
return len(self.train_dataloader)
else:
return (len(self.train_dataloader) + self.config.grad_accum_steps - 1)\
// self.config.grad_accum_steps
@cached_property
def validation_steps_per_epoch(self):
return len(self.val_dataloader)
def training_step(self, batch, logs):
for param in self.model.parameters():
param.grad = None
if not isinstance(batch, CollatedBatch):
outputs = self.model(batch)
loss = self.calculate_loss(outputs=outputs, inputs=batch)
loss.backward()
else:
num_nested_batches = len(batch)
outputs = CollatedBatch()
loss = CollatedBatch()
sync_context = self.model.no_sync() \
if self.is_distributed else nullcontext()
with sync_context:
for b in batch:
o = self.model(b)
l = self.calculate_loss(outputs=o, inputs=b) / num_nested_batches
l.backward()
outputs.append(o)
loss.append(l)
outputs = self.grad_accum_gather_outputs(outputs)
loss = self.grad_accum_reduce_loss(loss)
if self.config.clip_grad_value is not None:
nn.utils.clip_grad_value_(self.model.parameters(), self.config.clip_grad_value)
self.optimizer.step()
return outputs, loss
def validation_step(self, batch, logs):
outputs = self.model(batch)
loss = self.calculate_loss(outputs=outputs, inputs=batch)
return outputs, loss
def initialize_metrics(self, logs, training):
pass
def update_metrics(self, outputs, inputs, logs, training):
pass
def initialize_losses(self, logs, training):
self._total_loss = 0.
def update_losses(self, i, loss, inputs, logs, training):
if not self.is_distributed:
step_loss = loss.item()
else:
if training:
loss = loss.detach()
torch.distributed.all_reduce(loss)
step_loss = loss.item()/self.ddp_world_size
self._total_loss += step_loss
self.update_logs(logs=logs, training=training,
loss=self._total_loss/(i+1))
def train_epoch(self, epoch, logs):
self.model.train()
self.initialize_losses(logs, True)
self.initialize_metrics(logs, True)
if self.is_distributed:
self.train_sampler.set_epoch(epoch)
gen = self.grad_accum_collator(self.train_dataloader)
if self.is_main_rank:
gen = tqdm(gen, dynamic_ncols=True,
total=self.train_steps_per_epoch)
try:
for i, batch in enumerate(gen):
self.on_batch_begin(i, logs, True)
batch = self.preprocess_batch(batch)
outputs, loss = self.training_step(batch, logs)
self.state.global_step = self.state.global_step + 1
logs.update(global_step=self.state.global_step)
self.update_losses(i, loss, batch, logs, True)
self.update_metrics(outputs, batch, logs, True)
self.on_batch_end(i, logs, True)
if self.is_main_rank:
desc = 'Training: '+'; '.join(self.log_description(i, logs, True))
gen.set_description(desc)
finally:
if self.is_main_rank: gen.close()
for param in self.model.parameters():
param.grad = None
def minimal_train_epoch(self, epoch, logs):
self.model.train()
if self.is_distributed:
self.train_sampler.set_epoch(epoch)
gen = self.grad_accum_collator(self.train_dataloader)
if self.is_main_rank:
gen = tqdm(gen, dynamic_ncols=True, desc='Training: ',
total=self.train_steps_per_epoch)
try:
for i, batch in enumerate(gen):
self.on_batch_begin(i, logs, True)
batch = self.preprocess_batch(batch)
_ = self.training_step(batch, logs)
self.state.global_step = self.state.global_step + 1
logs.update(global_step=self.state.global_step)
self.on_batch_end(i, logs, True)
finally:
if self.is_main_rank: gen.close()
for param in self.model.parameters():
param.grad = None
def validation_epoch(self, epoch, logs):
self.model.eval()
self.initialize_losses(logs, False)
self.initialize_metrics(logs, False)
gen = self.val_dataloader
if self.is_main_rank:
gen = tqdm(gen, dynamic_ncols=True,
total=self.validation_steps_per_epoch)
try:
with torch.no_grad():
for i, batch in enumerate(gen):
self.on_batch_begin(i, logs, False)
batch = self.preprocess_batch(batch)
outputs, loss = self.validation_step(batch, logs)
self.update_losses(i, loss, batch, logs, False)
self.update_metrics(outputs, batch, logs, False)
self.on_batch_end(i, logs, False)
if self.is_main_rank:
desc = 'Validation: '+'; '.join(self.log_description(i, logs, False))
gen.set_description(desc)
finally:
if self.is_main_rank: gen.close()
def load_history(self):
history_file = os.path.join(self.config.log_path, 'history.yaml')
try:
with open(history_file, 'r') as fp:
return yaml.load(fp, Loader=yaml_Loader)
except FileNotFoundError:
return []
def save_history(self, history):
os.makedirs(self.config.log_path, exist_ok=True)
history_file = os.path.join(self.config.log_path, 'history.yaml')
with open(history_file, 'w') as fp:
yaml.dump(history, fp, sort_keys=False, Dumper=yaml_Dumper)
def train_model(self):
if self.is_main_rank:
history = self.load_history()
starting_epoch = self.state.current_epoch
self.on_train_begin()
should_stop_training = False
try:
for i in range(starting_epoch, self.config.num_epochs):
self.state.current_epoch = i
if self.is_main_rank:
print(f'\nEpoch {i+1}/{self.config.num_epochs}:', flush=True)
logs = dict(epoch = self.state.current_epoch,
global_step = self.state.global_step)
try:
self.on_epoch_begin(logs, True)
if self.config.training_type == 'normal':
self.train_epoch(i, logs)
elif self.config.training_type == 'minimal':
self.minimal_train_epoch(i, logs)
else:
raise ValueError(f'Unknown training type: {self.config.training_type}')
self.on_epoch_end(logs, True)
except StopTrainingException:
should_stop_training = True
try:
if (self.val_dataloader is not None)\
and (not ((i+1) % self.config.validation_frequency)):
self.on_epoch_begin(logs, False)
if self.config.evaluation_type == 'validation':
self.validation_epoch(i, logs)
elif self.config.evaluation_type == 'prediction':
self.prediction_epoch(i, logs)
else:
raise ValueError(f'Unknown evaluation type: {self.config.evaluation_type}')
self.on_epoch_end(logs, False)
except StopTrainingException:
should_stop_training = True
self.state.current_epoch = i + 1
if self.is_main_rank:
self.save_checkpoint()
history.append(logs)
self.save_history(history)
if should_stop_training:
if self.is_main_rank:
print('Stopping training ...')
break
finally:
self.on_train_end()
def distributed_barrier(self):
if self.is_distributed:
dummy = torch.ones((),dtype=torch.int64).cuda()
torch.distributed.all_reduce(dummy)
# Prediction logic
def prediction_step(self, batch):
predictions = self.model(batch)
if isinstance(batch, torch.Tensor):
return dict(inputs=batch, predictions=predictions)
elif isinstance(batch, list):
outputs = batch.copy()
batch.append(predictions)
return outputs
elif isinstance(batch, dict):
outputs = batch.copy()
outputs.update(predictions=predictions)
return outputs
def prediction_loop(self, dataloader):
self.model.eval()
outputs = []
if self.is_main_rank:
gen = tqdm(dataloader, dynamic_ncols=True)
else:
gen = dataloader
try:
with torch.no_grad():
for batch in gen:
batch = self.preprocess_batch(batch)
outputs.append(self.prediction_step(batch))
finally:
if self.is_main_rank: gen.close()
return outputs
def preprocess_predictions(self, outputs):
if isinstance(outputs[0], torch.Tensor):
return torch.cat(outputs, dim=0)
elif isinstance(outputs[0], dict):
return {k: torch.cat([o[k] for o in outputs], dim=0)
for k in outputs[0].keys()}
elif isinstance(outputs[0], list):
return [torch.cat([o[i] for o in outputs], dim=0)
for i in range(len(outputs[0]))]
else:
raise ValueError('Unsupported output type')
def postprocess_predictions(self, outputs):
if isinstance(outputs, torch.Tensor):
return outputs.cpu().numpy()
elif isinstance(outputs, dict):
return {k: v.cpu().numpy() for k, v in outputs.items()}
elif isinstance(outputs, list):
return [v.cpu().numpy() for v in outputs]
else:
raise ValueError('Unsupported output type')
def distributed_gatther_tensor(self, tensors):
shapes = torch.zeros(self.ddp_world_size+1, dtype=torch.long).cuda()
shapes[self.ddp_rank+1] = tensors.shape[0]
torch.distributed.all_reduce(shapes)
offsets = torch.cumsum(shapes, dim=0)
all_tensors = torch.zeros(offsets[-1], *tensors.shape[1:], dtype=tensors.dtype).cuda()
all_tensors[offsets[self.ddp_rank]:offsets[self.ddp_rank+1]] = tensors
torch.distributed.all_reduce(all_tensors)
return all_tensors
def distributed_gather_predictions(self, predictions):
if self.is_main_rank:
print('Gathering predictions from all ranks...')
if isinstance(predictions, torch.Tensor):
all_predictions = self.distributed_gatther_tensor(predictions)
elif isinstance(predictions, list):
all_predictions = [self.distributed_gatther_tensor(pred) for pred in predictions]
elif isinstance(predictions, dict):
all_predictions = {key:self.distributed_gatther_tensor(pred)
for key, pred in predictions.items()}
else:
raise ValueError('Unsupported output type')
if self.is_main_rank:
print('Done.')
return all_predictions
def save_predictions(self, dataset_name, predictions):
os.makedirs(self.config.predictions_path, exist_ok=True)
predictions_file = os.path.join(self.config.predictions_path, f'{dataset_name}.pt')
torch.save(predictions, predictions_file)
print(f'Saved predictions to {predictions_file}')
def evaluate_predictions(self, predictions):
raise NotImplementedError
def prediction_epoch(self, epoch, logs):
if self.is_main_rank:
print(f'Predicting on validation dataset...')
dataloader = self.val_dataloader
outputs = self.prediction_loop(dataloader)
outputs = self.preprocess_predictions(outputs)
if self.is_distributed:
outputs = self.distributed_gather_predictions(outputs)
predictions = self.postprocess_predictions(outputs)
if self.is_main_rank:
self.save_predictions('validation', predictions)
results = self.evaluate_predictions(predictions)
results = {f'val_{k}': v for k, v in results.items()}
logs.update(results)
if self.is_main_rank:
desc = 'Validation: '+'; '.join(f'{k}: {v:.4f}' for k, v in results.items())
print(desc, flush=True)
# Interface
def prepare_for_training(self):
self.config_summary()
self.save_config_file()
self.load_checkpoint()
self.model_summary()
def execute_training(self):
self.prepare_for_training()
self.train_model()
self.finalize_training()
def finalize_training(self):
pass
| [
"torch.utils.data.DistributedSampler",
"yaml.load",
"lib.utils.dotdict.HDict.L",
"numpy.array_split",
"numpy.arange",
"lib.utils.dotdict.HDict",
"torch.nn.parallel.DistributedDataParallel",
"collections.OrderedDict",
"yaml.dump",
"torch.distributed.all_reduce",
"os.path.dirname",
"yaml.represe... | [((358, 399), 'lib.utils.dotdict.HDict.L.update_globals', 'HDict.L.update_globals', (["{'path': os.path}"], {}), "({'path': os.path})\n", (380, 399), False, 'from lib.utils.dotdict import HDict\n'), ((637, 705), 'yaml.representer.SafeRepresenter.add_representer', 'yaml.representer.SafeRepresenter.add_representer', (['str', 'str_presenter'], {}), '(str, str_presenter)\n', (685, 705), False, 'import yaml\n'), ((802, 835), 'yaml.load', 'yaml.load', (['fp'], {'Loader': 'yaml_Loader'}), '(fp, Loader=yaml_Loader)\n', (811, 835), False, 'import yaml\n'), ((937, 995), 'yaml.dump', 'yaml.dump', (['config', 'fp'], {'sort_keys': '(False)', 'Dumper': 'yaml_Dumper'}), '(config, fp, sort_keys=False, Dumper=yaml_Dumper)\n', (946, 995), False, 'import yaml\n'), ((1253, 1283), 'numpy.arange', 'np.arange', (['data_len'], {'dtype': 'int'}), '(data_len, dtype=int)\n', (1262, 1283), True, 'import numpy as np\n'), ((1308, 1347), 'numpy.array_split', 'np.array_split', (['all_indices', 'world_size'], {}), '(all_indices, world_size)\n', (1322, 1347), True, 'import numpy as np\n'), ((2990, 3059), 'torch.utils.data.DistributedSampler', 'torch.utils.data.DistributedSampler', (['self.train_dataset'], {'shuffle': '(True)'}), '(self.train_dataset, shuffle=True)\n', (3025, 3059), False, 'import torch\n'), ((7377, 7414), 'lib.utils.dotdict.HDict', 'HDict', ([], {'current_epoch': '(0)', 'global_step': '(0)'}), '(current_epoch=0, global_step=0)\n', (7382, 7414), False, 'from lib.utils.dotdict import HDict\n'), ((8836, 8873), 'os.makedirs', 'os.makedirs', (['ckpt_path'], {'exist_ok': '(True)'}), '(ckpt_path, exist_ok=True)\n', (8847, 8873), False, 'import os, sys\n'), ((10186, 10210), 'collections.OrderedDict', 'OrderedDict', ([], {'loss': '"""0.4f"""'}), "(loss='0.4f')\n", (10197, 10210), False, 'from collections import OrderedDict\n'), ((11579, 11604), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(0)'}), '(outputs, dim=0)\n', (11588, 11604), False, 'import torch\n'), ((18395, 18445), 'os.path.join', 'os.path.join', (['self.config.log_path', '"""history.yaml"""'], {}), "(self.config.log_path, 'history.yaml')\n", (18407, 18445), False, 'import os, sys\n'), ((18670, 18718), 'os.makedirs', 'os.makedirs', (['self.config.log_path'], {'exist_ok': '(True)'}), '(self.config.log_path, exist_ok=True)\n', (18681, 18718), False, 'import os, sys\n'), ((18742, 18792), 'os.path.join', 'os.path.join', (['self.config.log_path', '"""history.yaml"""'], {}), "(self.config.log_path, 'history.yaml')\n", (18754, 18792), False, 'import os, sys\n'), ((23729, 23765), 'torch.distributed.all_reduce', 'torch.distributed.all_reduce', (['shapes'], {}), '(shapes)\n', (23757, 23765), False, 'import torch\n'), ((23793, 23820), 'torch.cumsum', 'torch.cumsum', (['shapes'], {'dim': '(0)'}), '(shapes, dim=0)\n', (23805, 23820), False, 'import torch\n'), ((24012, 24053), 'torch.distributed.all_reduce', 'torch.distributed.all_reduce', (['all_tensors'], {}), '(all_tensors)\n', (24040, 24053), False, 'import torch\n'), ((24934, 24990), 'os.makedirs', 'os.makedirs', (['self.config.predictions_path'], {'exist_ok': '(True)'}), '(self.config.predictions_path, exist_ok=True)\n', (24945, 24990), False, 'import os, sys\n'), ((25018, 25082), 'os.path.join', 'os.path.join', (['self.config.predictions_path', 'f"""{dataset_name}.pt"""'], {}), "(self.config.predictions_path, f'{dataset_name}.pt')\n", (25030, 25082), False, 'import os, sys\n'), ((25091, 25132), 'torch.save', 'torch.save', (['predictions', 'predictions_file'], {}), '(predictions, predictions_file)\n', (25101, 25132), False, 'import torch\n'), ((3698, 3756), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'shuffle': '(True)', 'drop_last': '(False)'}), '(**common_kwargs, shuffle=True, drop_last=False)\n', (3708, 3756), False, 'from torch.utils.data import DataLoader\n'), ((3832, 3887), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'sampler': 'self.train_sampler'}), '(**common_kwargs, sampler=self.train_sampler)\n', (3842, 3887), False, 'from torch.utils.data import DataLoader\n'), ((4570, 4667), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'batch_size': 'prediction_batch_size', 'shuffle': '(False)', 'drop_last': '(False)'}), '(**common_kwargs, batch_size=prediction_batch_size, shuffle=False,\n drop_last=False)\n', (4580, 4667), False, 'from torch.utils.data import DataLoader\n'), ((5088, 5138), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'batch_sampler': 'sampler'}), '(**common_kwargs, batch_sampler=sampler)\n', (5098, 5138), False, 'from torch.utils.data import DataLoader\n'), ((5378, 5487), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[self.ddp_rank]', 'output_device': 'self.ddp_rank'}), '(model, device_ids=[self.ddp_rank],\n output_device=self.ddp_rank)\n', (5419, 5487), False, 'import torch\n'), ((7744, 7784), 'os.path.dirname', 'os.path.dirname', (['self.config.config_path'], {}), '(self.config.config_path)\n', (7759, 7784), False, 'import os, sys\n'), ((8067, 8108), 'os.path.dirname', 'os.path.dirname', (['self.config.summary_path'], {}), '(self.config.summary_path)\n', (8082, 8108), False, 'import os, sys\n'), ((8643, 8702), 'yaml.dump', 'yaml.dump', (['summary', 'fp'], {'sort_keys': '(False)', 'Dumper': 'yaml_Dumper'}), '(summary, fp, sort_keys=False, Dumper=yaml_Dumper)\n', (8652, 8702), False, 'import yaml\n'), ((8914, 8955), 'os.path.join', 'os.path.join', (['ckpt_path', '"""training_state"""'], {}), "(ckpt_path, 'training_state')\n", (8926, 8955), False, 'import os, sys\n'), ((9006, 9044), 'os.path.join', 'os.path.join', (['ckpt_path', '"""model_state"""'], {}), "(ckpt_path, 'model_state')\n", (9018, 9044), False, 'import os, sys\n'), ((9094, 9136), 'os.path.join', 'os.path.join', (['ckpt_path', '"""optimizer_state"""'], {}), "(ckpt_path, 'optimizer_state')\n", (9106, 9136), False, 'import os, sys\n'), ((9696, 9720), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (9718, 9720), False, 'import torch\n'), ((11667, 11682), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11680, 11682), False, 'import torch\n'), ((14612, 14646), 'torch.distributed.all_reduce', 'torch.distributed.all_reduce', (['loss'], {}), '(loss)\n', (14640, 14646), False, 'import torch\n'), ((15226, 15289), 'tqdm.tqdm', 'tqdm', (['gen'], {'dynamic_ncols': '(True)', 'total': 'self.train_steps_per_epoch'}), '(gen, dynamic_ncols=True, total=self.train_steps_per_epoch)\n', (15230, 15289), False, 'from tqdm import tqdm\n'), ((16524, 16611), 'tqdm.tqdm', 'tqdm', (['gen'], {'dynamic_ncols': '(True)', 'desc': '"""Training: """', 'total': 'self.train_steps_per_epoch'}), "(gen, dynamic_ncols=True, desc='Training: ', total=self.\n train_steps_per_epoch)\n", (16528, 16611), False, 'from tqdm import tqdm\n'), ((17466, 17534), 'tqdm.tqdm', 'tqdm', (['gen'], {'dynamic_ncols': '(True)', 'total': 'self.validation_steps_per_epoch'}), '(gen, dynamic_ncols=True, total=self.validation_steps_per_epoch)\n', (17470, 17534), False, 'from tqdm import tqdm\n'), ((18849, 18908), 'yaml.dump', 'yaml.dump', (['history', 'fp'], {'sort_keys': '(False)', 'Dumper': 'yaml_Dumper'}), '(history, fp, sort_keys=False, Dumper=yaml_Dumper)\n', (18858, 18908), False, 'import yaml\n'), ((21516, 21551), 'torch.distributed.all_reduce', 'torch.distributed.all_reduce', (['dummy'], {}), '(dummy)\n', (21544, 21551), False, 'import torch\n'), ((22216, 22252), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'dynamic_ncols': '(True)'}), '(dataloader, dynamic_ncols=True)\n', (22220, 22252), False, 'from tqdm import tqdm\n'), ((22713, 22738), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(0)'}), '(outputs, dim=0)\n', (22722, 22738), False, 'import torch\n'), ((1483, 1531), 'numpy.array_split', 'np.array_split', (['split_indices[rank]', 'num_batches'], {}), '(split_indices[rank], num_batches)\n', (1497, 1531), True, 'import numpy as np\n'), ((6245, 6290), 'lib.utils.dotdict.HDict.L', 'HDict.L', (['"""c:path.join("models",c.model_name)"""'], {}), '(\'c:path.join("models",c.model_name)\')\n', (6252, 6290), False, 'from lib.utils.dotdict import HDict\n'), ((6328, 6376), 'lib.utils.dotdict.HDict.L', 'HDict.L', (['"""c:path.join(c.save_path,"checkpoint")"""'], {}), '(\'c:path.join(c.save_path,"checkpoint")\')\n', (6335, 6376), False, 'from lib.utils.dotdict import HDict\n'), ((6414, 6458), 'lib.utils.dotdict.HDict.L', 'HDict.L', (['"""c:path.join(c.save_path,"config")"""'], {}), '(\'c:path.join(c.save_path,"config")\')\n', (6421, 6458), False, 'from lib.utils.dotdict import HDict\n'), ((6496, 6541), 'lib.utils.dotdict.HDict.L', 'HDict.L', (['"""c:path.join(c.save_path,"summary")"""'], {}), '(\'c:path.join(c.save_path,"summary")\')\n', (6503, 6541), False, 'from lib.utils.dotdict import HDict\n'), ((6579, 6621), 'lib.utils.dotdict.HDict.L', 'HDict.L', (['"""c:path.join(c.save_path,"logs")"""'], {}), '(\'c:path.join(c.save_path,"logs")\')\n', (6586, 6621), False, 'from lib.utils.dotdict import HDict\n'), ((6698, 6739), 'lib.utils.dotdict.HDict.L', 'HDict.L', (['"""c:128 if c.distributed else 32"""'], {}), "('c:128 if c.distributed else 32')\n", (6705, 6739), False, 'from lib.utils.dotdict import HDict\n'), ((7165, 7214), 'lib.utils.dotdict.HDict.L', 'HDict.L', (['"""c:path.join(c.save_path,"predictions")"""'], {}), '(\'c:path.join(c.save_path,"predictions")\')\n', (7172, 7214), False, 'from lib.utils.dotdict import HDict\n'), ((13325, 13338), 'contextlib.nullcontext', 'nullcontext', ([], {}), '()\n', (13336, 13338), False, 'from contextlib import nullcontext\n'), ((17588, 17603), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17601, 17603), False, 'import torch\n'), ((18530, 18563), 'yaml.load', 'yaml.load', (['fp'], {'Loader': 'yaml_Loader'}), '(fp, Loader=yaml_Loader)\n', (18539, 18563), False, 'import yaml\n'), ((22326, 22341), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22339, 22341), False, 'import torch\n'), ((23610, 23664), 'torch.zeros', 'torch.zeros', (['(self.ddp_world_size + 1)'], {'dtype': 'torch.long'}), '(self.ddp_world_size + 1, dtype=torch.long)\n', (23621, 23664), False, 'import torch\n'), ((23843, 23908), 'torch.zeros', 'torch.zeros', (['offsets[-1]', '*tensors.shape[1:]'], {'dtype': 'tensors.dtype'}), '(offsets[-1], *tensors.shape[1:], dtype=tensors.dtype)\n', (23854, 23908), False, 'import torch\n'), ((9338, 9379), 'os.path.join', 'os.path.join', (['ckpt_path', '"""training_state"""'], {}), "(ckpt_path, 'training_state')\n", (9350, 9379), False, 'import os, sys\n'), ((9437, 9475), 'os.path.join', 'os.path.join', (['ckpt_path', '"""model_state"""'], {}), "(ckpt_path, 'model_state')\n", (9449, 9475), False, 'import os, sys\n'), ((9532, 9574), 'os.path.join', 'os.path.join', (['ckpt_path', '"""optimizer_state"""'], {}), "(ckpt_path, 'optimizer_state')\n", (9544, 9574), False, 'import os, sys\n'), ((21464, 21497), 'torch.ones', 'torch.ones', (['()'], {'dtype': 'torch.int64'}), '((), dtype=torch.int64)\n', (21474, 21497), False, 'import torch\n'), ((22805, 22846), 'torch.cat', 'torch.cat', (['[o[k] for o in outputs]'], {'dim': '(0)'}), '([o[k] for o in outputs], dim=0)\n', (22814, 22846), False, 'import torch\n'), ((22959, 23000), 'torch.cat', 'torch.cat', (['[o[i] for o in outputs]'], {'dim': '(0)'}), '([o[i] for o in outputs], dim=0)\n', (22968, 23000), False, 'import torch\n')] |
import networkx as nx
import numpy as np
def project3d(points, direction):
"""
投影函数,将三维点集投影到二维
投影平面内的y方向为z轴投影(如果投影的法向量为z轴,则y方向为x轴投影)
:param points: 三维点集
:param direction: 投影平面的法向量(u,v,w),投影平面通过原点(0,0,0)
"""
d = direction / np.linalg.norm(direction)
y0 = np.array([1, 0, 0]) if np.array([0, 0, 1]).dot(d) == 1 else np.array([0, 0, 1])
y1 = y0 - np.dot(d, y0) * d
norm_y = y1 / np.linalg.norm(y1)
x0 = np.cross(norm_y, d)
norm_x = x0 / np.linalg.norm(x0)
pos = {}
for k in points:
p0 = np.array(points[k])
p1 = p0 - np.dot(d, p0) * d
pos[k] = (np.dot(norm_y, p1), np.dot(norm_x, p1))
return pos
class Graph:
"""
包装nx.Graph的图类
"""
def __init__(self, name, nx_graph=None):
self.name = name
self.info = {}
self.g = nx.Graph(nx_graph)
def __len__(self):
return len(self.nodes())
def __getitem__(self, node):
return self.g[node]
def copy(self):
return Graph(self.name, self.g)
def add_node(self, node, **attr):
self.g.add_node(node, **attr)
def add_edge(self, node1, node2, **attr):
self.g.add_edge(node1, node2, **attr)
def remove_node(self, node):
self.g.remove_node(node)
def nodes(self):
return self.g.nodes
def edges(self):
return self.g.edges
def degree(self, node=None):
if node is not None:
return self.g.degree[node]
return self.g.degree
def subgraph(self, nodes):
return Graph(self.name, self.g.subgraph(nodes))
def max_subgraph(self):
mc = max(nx.connected_components(self.g), key=len)
return Graph(self.name, self.g.subgraph(mc))
def is_connected(self):
return nx.is_connected(self.g)
def get_node_attributes(self, attr):
return nx.get_node_attributes(self.g, attr)
def get_edge_attributes(self, attr):
return nx.get_edge_attributes(self.g, attr)
def draw_graph(self, axes, highlight=None, direction=(0, 0, 1), rotation=None):
"""用matlotlib画二维投影图"""
axes.clear()
points = self.get_node_attributes('location')
if rotation is not None:
for k in points:
points[k] = np.dot(points[k], rotation)
pos = project3d(points, np.array(direction))
label = self.get_node_attributes('label')
edge_label = self.get_edge_attributes('dist')
nx.draw_networkx(self.g, pos, alpha=0.7, with_labels=False, edge_color='.4', ax=axes)
if highlight is not None:
nx.draw_networkx_nodes(self.g, pos=pos, nodelist=highlight, node_color='r', ax=axes)
nx.draw_networkx_labels(self.g, pos, labels=label, ax=axes)
nx.draw_networkx_edge_labels(self.g, pos, edge_labels=edge_label, ax=axes)
axes.axis('off')
def draw_3d_graph(self, axes, highlight=None):
"""用matlotlib画三维图"""
axes.clear()
points = self.get_node_attributes('location')
label = self.get_node_attributes('label')
if highlight is None:
highlight = []
for key, value in points.items():
c = 'blue' # 普通原子为蓝色
if key in highlight:
c = 'red' # 高亮原子用红色表示
xi, yi, zi = value
axes.scatter(xi, yi, zi, label[key], c=c, alpha=0.9)
for i, j in enumerate(self.edges()):
# 用两端原子的坐标连线,绘制化学键
x = np.array((points[j[0]][0], points[j[1]][0]))
y = np.array((points[j[0]][1], points[j[1]][1]))
z = np.array((points[j[0]][2], points[j[1]][2]))
axes.plot(x, y, z, c='black', alpha=0.9)
def number_of_edges(self, u, v):
return self.g.number_of_edges(u, v)
| [
"numpy.cross",
"networkx.draw_networkx_edge_labels",
"networkx.is_connected",
"networkx.get_edge_attributes",
"networkx.Graph",
"networkx.connected_components",
"networkx.draw_networkx",
"networkx.draw_networkx_nodes",
"numpy.array",
"numpy.dot",
"networkx.draw_networkx_labels",
"networkx.get_... | [((446, 465), 'numpy.cross', 'np.cross', (['norm_y', 'd'], {}), '(norm_y, d)\n', (454, 465), True, 'import numpy as np\n'), ((253, 278), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (267, 278), True, 'import numpy as np\n'), ((288, 307), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (296, 307), True, 'import numpy as np\n'), ((348, 367), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (356, 367), True, 'import numpy as np\n'), ((418, 436), 'numpy.linalg.norm', 'np.linalg.norm', (['y1'], {}), '(y1)\n', (432, 436), True, 'import numpy as np\n'), ((484, 502), 'numpy.linalg.norm', 'np.linalg.norm', (['x0'], {}), '(x0)\n', (498, 502), True, 'import numpy as np\n'), ((550, 569), 'numpy.array', 'np.array', (['points[k]'], {}), '(points[k])\n', (558, 569), True, 'import numpy as np\n'), ((839, 857), 'networkx.Graph', 'nx.Graph', (['nx_graph'], {}), '(nx_graph)\n', (847, 857), True, 'import networkx as nx\n'), ((1779, 1802), 'networkx.is_connected', 'nx.is_connected', (['self.g'], {}), '(self.g)\n', (1794, 1802), True, 'import networkx as nx\n'), ((1860, 1896), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['self.g', 'attr'], {}), '(self.g, attr)\n', (1882, 1896), True, 'import networkx as nx\n'), ((1954, 1990), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['self.g', 'attr'], {}), '(self.g, attr)\n', (1976, 1990), True, 'import networkx as nx\n'), ((2465, 2554), 'networkx.draw_networkx', 'nx.draw_networkx', (['self.g', 'pos'], {'alpha': '(0.7)', 'with_labels': '(False)', 'edge_color': '""".4"""', 'ax': 'axes'}), "(self.g, pos, alpha=0.7, with_labels=False, edge_color='.4',\n ax=axes)\n", (2481, 2554), True, 'import networkx as nx\n'), ((2690, 2749), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['self.g', 'pos'], {'labels': 'label', 'ax': 'axes'}), '(self.g, pos, labels=label, ax=axes)\n', (2713, 2749), True, 'import networkx as nx\n'), ((2758, 2832), 'networkx.draw_networkx_edge_labels', 'nx.draw_networkx_edge_labels', (['self.g', 'pos'], {'edge_labels': 'edge_label', 'ax': 'axes'}), '(self.g, pos, edge_labels=edge_label, ax=axes)\n', (2786, 2832), True, 'import networkx as nx\n'), ((382, 395), 'numpy.dot', 'np.dot', (['d', 'y0'], {}), '(d, y0)\n', (388, 395), True, 'import numpy as np\n'), ((624, 642), 'numpy.dot', 'np.dot', (['norm_y', 'p1'], {}), '(norm_y, p1)\n', (630, 642), True, 'import numpy as np\n'), ((644, 662), 'numpy.dot', 'np.dot', (['norm_x', 'p1'], {}), '(norm_x, p1)\n', (650, 662), True, 'import numpy as np\n'), ((1640, 1671), 'networkx.connected_components', 'nx.connected_components', (['self.g'], {}), '(self.g)\n', (1663, 1671), True, 'import networkx as nx\n'), ((2332, 2351), 'numpy.array', 'np.array', (['direction'], {}), '(direction)\n', (2340, 2351), True, 'import numpy as np\n'), ((2597, 2685), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['self.g'], {'pos': 'pos', 'nodelist': 'highlight', 'node_color': '"""r"""', 'ax': 'axes'}), "(self.g, pos=pos, nodelist=highlight, node_color='r',\n ax=axes)\n", (2619, 2685), True, 'import networkx as nx\n'), ((3457, 3501), 'numpy.array', 'np.array', (['(points[j[0]][0], points[j[1]][0])'], {}), '((points[j[0]][0], points[j[1]][0]))\n', (3465, 3501), True, 'import numpy as np\n'), ((3518, 3562), 'numpy.array', 'np.array', (['(points[j[0]][1], points[j[1]][1])'], {}), '((points[j[0]][1], points[j[1]][1]))\n', (3526, 3562), True, 'import numpy as np\n'), ((3579, 3623), 'numpy.array', 'np.array', (['(points[j[0]][2], points[j[1]][2])'], {}), '((points[j[0]][2], points[j[1]][2]))\n', (3587, 3623), True, 'import numpy as np\n'), ((588, 601), 'numpy.dot', 'np.dot', (['d', 'p0'], {}), '(d, p0)\n', (594, 601), True, 'import numpy as np\n'), ((2272, 2299), 'numpy.dot', 'np.dot', (['points[k]', 'rotation'], {}), '(points[k], rotation)\n', (2278, 2299), True, 'import numpy as np\n'), ((311, 330), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (319, 330), True, 'import numpy as np\n')] |
import numpy as np
from scipy import ndimage
'''
See paper: Sensors 2018, 18(4), 1055; https://doi.org/10.3390/s18041055
"Divide and Conquer-Based 1D CNN Human Activity Recognition Using Test Data Sharpening"
by <NAME> & <NAME>
This code loads and sharpens UCI HAR Dataset data.
UCI HAR Dataset data can be downloaded from:
https://archive.ics.uci.edu/ml/datasets/human+activity+recognition+using+smartphones
Unzipped dataset should be placed inside the '../data/UCI HAR Dataset/' folder.
'''
dir_path = '../data/UCI HAR Dataset/'
def load_x(train_or_test):
global dir_path
if train_or_test is "train":
x_path = dir_path + 'train/X_train.txt'
elif train_or_test is "test":
x_path = dir_path + 'test/X_test.txt'
with open(x_path) as f:
container = f.readlines()
result = []
for line in container:
tmp1 = line.strip()
tmp2 = tmp1.replace(' ', ' ') # removes inconsistent blank spaces
tmp_ary = map(float, tmp2.split(' '))
result.append(tmp_ary)
return np.array(result)
def load_y(train_or_test):
global dir_path
if train_or_test is "train":
y_path = dir_path + 'train/y_train.txt'
elif train_or_test is "test":
y_path = dir_path + 'test/y_test.txt'
with open(y_path) as f:
container = f.readlines()
result = []
for line in container:
num_str = line.strip()
result.append(int(num_str))
return np.array(result)
def sharpen(x_test, sigma, alpha):
r = x_test.shape[0]
c = x_test.shape[1]
container = np.empty((r, c))
i = 0
for row in x_test:
test = np.array([row])
blurred = ndimage.gaussian_filter(test, sigma)
sharpened = test + alpha * (test - blurred)
container[i] = sharpened
i = i + 1
return container
| [
"numpy.array",
"numpy.empty",
"scipy.ndimage.gaussian_filter"
] | [((1051, 1067), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (1059, 1067), True, 'import numpy as np\n'), ((1463, 1479), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (1471, 1479), True, 'import numpy as np\n'), ((1581, 1597), 'numpy.empty', 'np.empty', (['(r, c)'], {}), '((r, c))\n', (1589, 1597), True, 'import numpy as np\n'), ((1647, 1662), 'numpy.array', 'np.array', (['[row]'], {}), '([row])\n', (1655, 1662), True, 'import numpy as np\n'), ((1681, 1717), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['test', 'sigma'], {}), '(test, sigma)\n', (1704, 1717), False, 'from scipy import ndimage\n')] |
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as rng
import scipy.special as scp
import os
import writer
import main
#Same as above, but random
def random_airfoil_eval(n,param,boundary,control,forces_control):
#Random stuff
par={'n':n, 'deg':int(rng.rand(1)*10), 'N1':rng.rand(1), 'N2':rng.rand(1), 'trail_gap':0.0}
#Fix some variables
par['N1']=0.5
par['N2']=1.0
par['deg']=2
#Generate
scale=rng.rand(1)
Au=np.multiply(rng.rand(par['deg']+1), scale)
Al=np.multiply(rng.rand(par['deg']+1), scale*0.2)
#Write in feval.in (optional, just to keep track of it)
input_string="{:.8f} {:.8f}".format(par['N1'], par['N2'])
for val in Au: input_string=input_string+" {:.8f}".format(val)
for val in Al: input_string=input_string+" {:.8f}".format(val)
Mdict=open('eval/feval.in','w')
Mdict.write(input_string)
#Simulate airfoil
D=main.airfoil_data(Au,Al,par,param,boundary,control,forces_control)
#Print aerodynamical coefficients for last iteration
Mdict=open('eval/feval.out','w')
Mdict.write("{:.8f} {:.8f}".format(D['Cd'], D['Cl']))
#Same as airfoil_data + feval, but for a circle
def circle_airfoil_eval(n,param,boundary,control,forces_control):
x_aux=np.linspace(0.0,1.0,n)
y_aux=np.sqrt(0.25-(x_aux-0.5)**(2))
x=np.concatenate((np.flip(x_aux),x_aux[1:n]))
y=np.concatenate((np.flip(y_aux), -y_aux[1:n]))
writer.write_blockMeshDict(x,y,param)
dirlist=[x.path[2:] for x in os.scandir() if x.is_dir()]
if '0' not in dirlist: os.system("mkdir 0")
writer.write_boundaryCond(boundary)
writer.write_controlDict(control)
writer.write_forceCoeffs(forces_control)
os.system("blockMesh > out_blockMesh.txt")
os.system("checkMesh > out_checkMesh.txt")
os.system("simpleFoam > out_simpleFoam.txt")
#List all directories
dirlist=[x.path[2:] for x in os.scandir() if x.is_dir()]
#List completed iterations
it=[x for x in dirlist if x.isnumeric()]
it=sorted([int(x) for x in it])
#Save data and clean garbage
#n_it=int(np.round((control['tf']-control['t0'])/control['writeint']))
#for i in range(n_it):
# if control['writeint']*(i+1)<=it[-1]: os.system("rm -r {:d}".format(control['writeint']*(i+1)))
for i in range(control['purge']): os.system("rm -r {:d}".format(it[i+1]))
#Read data from the output of simpleFoam
data=np.genfromtxt("postProcessing/forceCoeffs/0/coefficient_0.dat", delimiter='\t')
labels=['Iteration', 'Cd', 'Cs', 'Cl', 'CmRoll', 'CmPitch', 'CmYaw', 'Cd(f)', 'Cd(r)', 'Cs(f)', 'Cs(r)', 'Cl(f)', 'Cl(r)']
D = dict(zip(labels,data[-1]))
#Print aerodynamical coefficients for last iteration
Mdict=open('eval/feval.out','w')
Mdict.write("{:.8f} {:.8f}".format(D['Cd'], D['Cl']))
#A test function
def test_dir(d,scale,n,param,boundary,control,forces_control,fvsolution):
A=np.genfromtxt("eval/feval.in", delimiter=' ')
n_cont=int((len(A)-2)/2)
#Ignore first 2 components
dA=d[2:]
#Airfoil contour
Au=A[2:(n_cont+2)]; Al=A[(n_cont+2):]
#Parameters for CST airfoil parametrization
par={'n':n, 'deg':(n_cont-1), 'N1':A[0], 'N2':A[1], 'trail_gap':0.0}
Cd=[]; Cl=[]; CdCl=[]; delta=[]
dA=np.multiply(dA,scale)
Imax=30
h=1.0/Imax
#step=-0.5
step=0.0
gr, ind = plt.subplots(4,figsize=(6,8))
gr.suptitle('Variation of Cd, Cl, and Cd/Cl along gradient desc. dir.')
for i in range(Imax):
Au = Au + np.multiply(dA[0:n_cont], step)
Al = Al + np.multiply(dA[n_cont:], step)
D = main.airfoil_data(Au,Al,par,param,boundary,control,forces_control,fvsolution)
Cd.append(D['Cd'])
Cl.append(D['Cl'])
CdCl.append(D['Cd']/D['Cl'])
[x,y]=main.cst(Au,Al,par)
ind[0].plot(x,y,"0.{:d}".format(Imax**2-i**2))
delta.append(step*scale)
print("Step = {:d} --- Delta = {:.8f} --- Cd = {:.8f} --- Cl = {:.8f}".format(i, step*scale, D['Cd'], D['Cl']))
step=step+h
ind[1].plot(delta, Cd, color="blue")
ind[2].plot(delta, Cl, color="orange")
ind[3].plot(delta, CdCl, color="green")
plt.show()
#Gradient of f, central differences, variables: Au, Al
def grad_feval(n,param,boundary,control,forces_control,fvsolution,eps):
A=np.genfromtxt("eval/feval.in", delimiter=' ')
n_cont=int((len(A)-2)/2)
#Airfoil contour for fixed N1 and N2
x=A[2:]
#Parameters for CST airfoil parametrization
par={'n':n, 'deg':(n_cont-1), 'N1':A[0], 'N2':A[1], 'trail_gap':0.0}
#Gradient of Cd and Cl in Au, Al
gradCd=np.zeros(2*n_cont)
gradCl=np.zeros(2*n_cont)
gradf=np.zeros(2*n_cont)
step=eps/2.0
d=np.zeros(2*n_cont);
for i in range(2*n_cont):
d[i]=1.0
x_plus = x + np.multiply(d, step)
D = main.airfoil_data(x_plus[0:n_cont],x_plus[n_cont:],par,param,boundary,control,forces_control,fvsolution)
Cd_plus=D['Cd']; Cl_plus=D['Cl']
x_minus = x + np.multiply(d, -step)
D = main.airfoil_data(x_minus[0:n_cont],x_minus[n_cont:],par,param,boundary,control,forces_control,fvsolution)
Cd_minus=D['Cd']; Cl_minus=D['Cl']
gradCd[i]=(Cd_plus-Cd_minus)/eps
gradCl[i]=(Cl_plus-Cl_minus)/eps
gradf[i]=(Cd_plus/Cl_plus - Cd_minus/Cl_minus)/eps
d[i]=0.0
print("{:d}-th partial derivative computed".format(i+1))
gradCd=np.multiply(gradCd,1.0/np.linalg.norm(gradCd))
gradCl=np.multiply(gradCl,1.0/np.linalg.norm(gradCl))
#Write in file
#input_string="{:.8f} {:.8f}".format(A[0], A[1])
input_string="0.0 0.0"
for val in gradf: input_string=input_string+" {:.8f}".format(val)
Mdict=open('eval/gradfeval.out','w')
Mdict.write(input_string)
return [gradCd,gradCl,gradf]
def plot_airfoil(n,d,Au,Al,colscale):
par={'n':n, 'deg':d, 'N1':A[0], 'N2':A[1], 'trail_gap':0.0}
[x,y]=cst(Au,Al,par)
plt.plot(x,y,"0.{:d}".format(9-i**2))
plt.show()
| [
"writer.write_blockMeshDict",
"numpy.flip",
"numpy.multiply",
"numpy.sqrt",
"numpy.random.rand",
"main.airfoil_data",
"writer.write_controlDict",
"writer.write_forceCoeffs",
"os.scandir",
"writer.write_boundaryCond",
"numpy.linspace",
"numpy.zeros",
"numpy.linalg.norm",
"os.system",
"mai... | [((432, 443), 'numpy.random.rand', 'rng.rand', (['(1)'], {}), '(1)\n', (440, 443), True, 'import numpy.random as rng\n'), ((872, 944), 'main.airfoil_data', 'main.airfoil_data', (['Au', 'Al', 'par', 'param', 'boundary', 'control', 'forces_control'], {}), '(Au, Al, par, param, boundary, control, forces_control)\n', (889, 944), False, 'import main\n'), ((1205, 1229), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'n'], {}), '(0.0, 1.0, n)\n', (1216, 1229), True, 'import numpy as np\n'), ((1235, 1269), 'numpy.sqrt', 'np.sqrt', (['(0.25 - (x_aux - 0.5) ** 2)'], {}), '(0.25 - (x_aux - 0.5) ** 2)\n', (1242, 1269), True, 'import numpy as np\n'), ((1365, 1404), 'writer.write_blockMeshDict', 'writer.write_blockMeshDict', (['x', 'y', 'param'], {}), '(x, y, param)\n', (1391, 1404), False, 'import writer\n'), ((1511, 1546), 'writer.write_boundaryCond', 'writer.write_boundaryCond', (['boundary'], {}), '(boundary)\n', (1536, 1546), False, 'import writer\n'), ((1548, 1581), 'writer.write_controlDict', 'writer.write_controlDict', (['control'], {}), '(control)\n', (1572, 1581), False, 'import writer\n'), ((1583, 1623), 'writer.write_forceCoeffs', 'writer.write_forceCoeffs', (['forces_control'], {}), '(forces_control)\n', (1607, 1623), False, 'import writer\n'), ((1625, 1667), 'os.system', 'os.system', (['"""blockMesh > out_blockMesh.txt"""'], {}), "('blockMesh > out_blockMesh.txt')\n", (1634, 1667), False, 'import os\n'), ((1669, 1711), 'os.system', 'os.system', (['"""checkMesh > out_checkMesh.txt"""'], {}), "('checkMesh > out_checkMesh.txt')\n", (1678, 1711), False, 'import os\n'), ((1713, 1757), 'os.system', 'os.system', (['"""simpleFoam > out_simpleFoam.txt"""'], {}), "('simpleFoam > out_simpleFoam.txt')\n", (1722, 1757), False, 'import os\n'), ((2303, 2382), 'numpy.genfromtxt', 'np.genfromtxt', (['"""postProcessing/forceCoeffs/0/coefficient_0.dat"""'], {'delimiter': '"""\t"""'}), "('postProcessing/forceCoeffs/0/coefficient_0.dat', delimiter='\\t')\n", (2316, 2382), True, 'import numpy as np\n'), ((2780, 2825), 'numpy.genfromtxt', 'np.genfromtxt', (['"""eval/feval.in"""'], {'delimiter': '""" """'}), "('eval/feval.in', delimiter=' ')\n", (2793, 2825), True, 'import numpy as np\n'), ((3106, 3128), 'numpy.multiply', 'np.multiply', (['dA', 'scale'], {}), '(dA, scale)\n', (3117, 3128), True, 'import numpy as np\n'), ((3186, 3217), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)'], {'figsize': '(6, 8)'}), '(4, figsize=(6, 8))\n', (3198, 3217), True, 'import matplotlib.pyplot as plt\n'), ((3919, 3929), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3927, 3929), True, 'import matplotlib.pyplot as plt\n'), ((4062, 4107), 'numpy.genfromtxt', 'np.genfromtxt', (['"""eval/feval.in"""'], {'delimiter': '""" """'}), "('eval/feval.in', delimiter=' ')\n", (4075, 4107), True, 'import numpy as np\n'), ((4343, 4363), 'numpy.zeros', 'np.zeros', (['(2 * n_cont)'], {}), '(2 * n_cont)\n', (4351, 4363), True, 'import numpy as np\n'), ((4370, 4390), 'numpy.zeros', 'np.zeros', (['(2 * n_cont)'], {}), '(2 * n_cont)\n', (4378, 4390), True, 'import numpy as np\n'), ((4396, 4416), 'numpy.zeros', 'np.zeros', (['(2 * n_cont)'], {}), '(2 * n_cont)\n', (4404, 4416), True, 'import numpy as np\n'), ((4434, 4454), 'numpy.zeros', 'np.zeros', (['(2 * n_cont)'], {}), '(2 * n_cont)\n', (4442, 4454), True, 'import numpy as np\n'), ((5589, 5599), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5597, 5599), True, 'import matplotlib.pyplot as plt\n'), ((297, 308), 'numpy.random.rand', 'rng.rand', (['(1)'], {}), '(1)\n', (305, 308), True, 'import numpy.random as rng\n'), ((315, 326), 'numpy.random.rand', 'rng.rand', (['(1)'], {}), '(1)\n', (323, 326), True, 'import numpy.random as rng\n'), ((460, 484), 'numpy.random.rand', 'rng.rand', (["(par['deg'] + 1)"], {}), "(par['deg'] + 1)\n", (468, 484), True, 'import numpy.random as rng\n'), ((507, 531), 'numpy.random.rand', 'rng.rand', (["(par['deg'] + 1)"], {}), "(par['deg'] + 1)\n", (515, 531), True, 'import numpy.random as rng\n'), ((1487, 1507), 'os.system', 'os.system', (['"""mkdir 0"""'], {}), "('mkdir 0')\n", (1496, 1507), False, 'import os\n'), ((3407, 3495), 'main.airfoil_data', 'main.airfoil_data', (['Au', 'Al', 'par', 'param', 'boundary', 'control', 'forces_control', 'fvsolution'], {}), '(Au, Al, par, param, boundary, control, forces_control,\n fvsolution)\n', (3424, 3495), False, 'import main\n'), ((3569, 3590), 'main.cst', 'main.cst', (['Au', 'Al', 'par'], {}), '(Au, Al, par)\n', (3577, 3590), False, 'import main\n'), ((4536, 4651), 'main.airfoil_data', 'main.airfoil_data', (['x_plus[0:n_cont]', 'x_plus[n_cont:]', 'par', 'param', 'boundary', 'control', 'forces_control', 'fvsolution'], {}), '(x_plus[0:n_cont], x_plus[n_cont:], par, param, boundary,\n control, forces_control, fvsolution)\n', (4553, 4651), False, 'import main\n'), ((4720, 4837), 'main.airfoil_data', 'main.airfoil_data', (['x_minus[0:n_cont]', 'x_minus[n_cont:]', 'par', 'param', 'boundary', 'control', 'forces_control', 'fvsolution'], {}), '(x_minus[0:n_cont], x_minus[n_cont:], par, param, boundary,\n control, forces_control, fvsolution)\n', (4737, 4837), False, 'import main\n'), ((1285, 1299), 'numpy.flip', 'np.flip', (['x_aux'], {}), '(x_aux)\n', (1292, 1299), True, 'import numpy as np\n'), ((1332, 1346), 'numpy.flip', 'np.flip', (['y_aux'], {}), '(y_aux)\n', (1339, 1346), True, 'import numpy as np\n'), ((1435, 1447), 'os.scandir', 'os.scandir', ([], {}), '()\n', (1445, 1447), False, 'import os\n'), ((1813, 1825), 'os.scandir', 'os.scandir', ([], {}), '()\n', (1823, 1825), False, 'import os\n'), ((3326, 3357), 'numpy.multiply', 'np.multiply', (['dA[0:n_cont]', 'step'], {}), '(dA[0:n_cont], step)\n', (3337, 3357), True, 'import numpy as np\n'), ((3370, 3400), 'numpy.multiply', 'np.multiply', (['dA[n_cont:]', 'step'], {}), '(dA[n_cont:], step)\n', (3381, 3400), True, 'import numpy as np\n'), ((4509, 4529), 'numpy.multiply', 'np.multiply', (['d', 'step'], {}), '(d, step)\n', (4520, 4529), True, 'import numpy as np\n'), ((4692, 4713), 'numpy.multiply', 'np.multiply', (['d', '(-step)'], {}), '(d, -step)\n', (4703, 4713), True, 'import numpy as np\n'), ((5090, 5112), 'numpy.linalg.norm', 'np.linalg.norm', (['gradCd'], {}), '(gradCd)\n', (5104, 5112), True, 'import numpy as np\n'), ((5145, 5167), 'numpy.linalg.norm', 'np.linalg.norm', (['gradCl'], {}), '(gradCl)\n', (5159, 5167), True, 'import numpy as np\n'), ((275, 286), 'numpy.random.rand', 'rng.rand', (['(1)'], {}), '(1)\n', (283, 286), True, 'import numpy.random as rng\n')] |
"""
Created on August 06 15:20, 2020
@author: fassial
"""
import os
import timeit
import pyflann
import numpy as np
# local dep
import utils
# file loc params
PREFIX = ".."
# dataset & testdataset
DATASET = os.path.join(PREFIX, "dataset")
PREDATASET = os.path.join(PREFIX, "predataset")
# eval dir
EVAL_DIR = os.path.join(".", "eval")
SCORE_FILE = os.path.join(EVAL_DIR, "scores.csv")
# remap params
W = 8
# flann-hierarchical params
K = 10
"""
ptopN:
calculate accuracy of dist-classifier based on RENE-encode
@params:
x_train(np.array) : feature of trainset
y_train(np.array) : label of trainset
x_test(np.array) : feature of testset
y_test(np.array) : label of testset
k(int) : number of check
@rets:
P(float) : accuracy of classifier
"""
def ptopK(x_train, y_train, x_test, y_test, k = K):
# init flann
flann = pyflann.FLANN()
# set dataset
print("start build_index...")
params = flann.build_index(
x_train,
# algorithm = "hierarchical",
algorithm = "lsh",
target_precision = 0.9,
log_level = "info"
); print(params)
print("complete build_index")
# get query result
print("start nn_index...")
index, dists = flann.nn_index(
x_test,
num_neighbors = k,
checks = params["checks"]
); print(index, dists)
print("complete nn_index")
# calculate P & n_match
P = 0
n_match = np.zeros((y_test.shape[0],))
print("start calculate p...")
for i in range(index.shape[0]):
label = y_train[index[i]]
n_match[i] = np.sum(label == y_test[i])
# P += 1 if n_match[i] > 0 else 0
P += n_match[i] / k
print("complete calculate p")
print(n_match)
print("start save n_match...")
if not os.path.exists(EVAL_DIR): os.mkdir(EVAL_DIR)
if os.path.exists(SCORE_FILE): os.remove(SCORE_FILE)
utils.store_data(
filename = SCORE_FILE,
src = n_match
)
print("complete save n_match")
P /= index.shape[0]
return P
"""
main:
main func
"""
def main():
# set start_time
start_time = timeit.default_timer()
# get trainset & testset
# x_train, y_train, x_test, y_test = utils.load_dataset(dirpath = PREDATASET)
x_train, y_train, x_test, y_test = utils.load_dataset(dirpath = DATASET); print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
# remap x_train, x_test
x_train_max, x_test_max = np.max(x_train), np.max(x_test)
x_max = max(x_train_max, x_test_max)
x_train_remap = utils.remap(x_train, (0, 2**W-1), x_max).astype(np.uint8); print(x_train_remap.shape, x_train_remap.dtype)
x_test_remap = utils.remap(x_test, (0, 2**W-1), x_max).astype(np.uint8); print(x_test_remap.shape, x_test_remap.dtype)
# get ptopK
p = ptopK(
x_train = x_train_remap,
y_train = y_train,
x_test = x_test_remap,
y_test = y_test,
k = K
)
print("ptopK: %.2f%%" % (p*100))
# set end_time
end_time = timeit.default_timer()
print("main runs for %.1fs" % (end_time-start_time))
if __name__ == "__main__":
main()
| [
"os.path.exists",
"timeit.default_timer",
"utils.store_data",
"os.path.join",
"numpy.max",
"pyflann.FLANN",
"numpy.zeros",
"numpy.sum",
"os.mkdir",
"utils.remap",
"utils.load_dataset",
"os.remove"
] | [((209, 240), 'os.path.join', 'os.path.join', (['PREFIX', '"""dataset"""'], {}), "(PREFIX, 'dataset')\n", (221, 240), False, 'import os\n'), ((254, 288), 'os.path.join', 'os.path.join', (['PREFIX', '"""predataset"""'], {}), "(PREFIX, 'predataset')\n", (266, 288), False, 'import os\n'), ((311, 336), 'os.path.join', 'os.path.join', (['"""."""', '"""eval"""'], {}), "('.', 'eval')\n", (323, 336), False, 'import os\n'), ((350, 386), 'os.path.join', 'os.path.join', (['EVAL_DIR', '"""scores.csv"""'], {}), "(EVAL_DIR, 'scores.csv')\n", (362, 386), False, 'import os\n'), ((920, 935), 'pyflann.FLANN', 'pyflann.FLANN', ([], {}), '()\n', (933, 935), False, 'import pyflann\n'), ((1492, 1520), 'numpy.zeros', 'np.zeros', (['(y_test.shape[0],)'], {}), '((y_test.shape[0],))\n', (1500, 1520), True, 'import numpy as np\n'), ((1895, 1921), 'os.path.exists', 'os.path.exists', (['SCORE_FILE'], {}), '(SCORE_FILE)\n', (1909, 1921), False, 'import os\n'), ((1949, 1999), 'utils.store_data', 'utils.store_data', ([], {'filename': 'SCORE_FILE', 'src': 'n_match'}), '(filename=SCORE_FILE, src=n_match)\n', (1965, 1999), False, 'import utils\n'), ((2177, 2199), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2197, 2199), False, 'import timeit\n'), ((2350, 2385), 'utils.load_dataset', 'utils.load_dataset', ([], {'dirpath': 'DATASET'}), '(dirpath=DATASET)\n', (2368, 2385), False, 'import utils\n'), ((3072, 3094), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3092, 3094), False, 'import timeit\n'), ((1646, 1672), 'numpy.sum', 'np.sum', (['(label == y_test[i])'], {}), '(label == y_test[i])\n', (1652, 1672), True, 'import numpy as np\n'), ((1843, 1867), 'os.path.exists', 'os.path.exists', (['EVAL_DIR'], {}), '(EVAL_DIR)\n', (1857, 1867), False, 'import os\n'), ((1869, 1887), 'os.mkdir', 'os.mkdir', (['EVAL_DIR'], {}), '(EVAL_DIR)\n', (1877, 1887), False, 'import os\n'), ((1923, 1944), 'os.remove', 'os.remove', (['SCORE_FILE'], {}), '(SCORE_FILE)\n', (1932, 1944), False, 'import os\n'), ((2511, 2526), 'numpy.max', 'np.max', (['x_train'], {}), '(x_train)\n', (2517, 2526), True, 'import numpy as np\n'), ((2528, 2542), 'numpy.max', 'np.max', (['x_test'], {}), '(x_test)\n', (2534, 2542), True, 'import numpy as np\n'), ((2604, 2648), 'utils.remap', 'utils.remap', (['x_train', '(0, 2 ** W - 1)', 'x_max'], {}), '(x_train, (0, 2 ** W - 1), x_max)\n', (2615, 2648), False, 'import utils\n'), ((2730, 2773), 'utils.remap', 'utils.remap', (['x_test', '(0, 2 ** W - 1)', 'x_max'], {}), '(x_test, (0, 2 ** W - 1), x_max)\n', (2741, 2773), False, 'import utils\n')] |
import os
import glob
import json
import argparse
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, required=True)
parser.add_argument('--hash-bc', type=str, required=True)
parser.add_argument('--hash-dagger', type=str, required=True)
parser.add_argument('-x', type=str)
parser.add_argument('-y', type=str, required=True)
parser.add_argument('--xlabel', type=str)
parser.add_argument('--ylabel', type=str)
parser.add_argument('--title', type=str)
ARGS = parser.parse_args()
ARGS.y = ARGS.y.lower()
def parse_csv(file):
ret = {}
with open(file, mode='r') as f:
lines = [x.strip() for x in f.readlines()]
keys = [x.strip().lower() for x in lines[0].split(',')]
for key in keys:
ret[key] = []
for line in lines[1:]:
vals = [x.strip() for x in line.split(',')]
assert len(vals) == len(keys)
for i in range(len(vals)):
ret[keys[i]].append(vals[i])
return ret, keys
DATA_PATH_DAGGER = './output_dagger/%s/%s' % (ARGS.hash_dagger, ARGS.env)
y_dagger = []
paths = glob.glob(os.path.join(DATA_PATH_DAGGER, 'seed_*'))
for path in paths:
file = os.path.join(path, 'log.csv')
d, keys = parse_csv(file)
y_dagger.append([float(x) for x in d[ARGS.y]])
DATA_PATH_BC = './output_bc/%s/%s' % (ARGS.hash_bc, ARGS.env)
y_bc = []
paths = glob.glob(os.path.join(DATA_PATH_BC, 'seed_*'))
for path in paths:
file = os.path.join(path, 'log.csv')
d, keys = parse_csv(file)
y_bc.append([float(x) for x in d[ARGS.y]])
ARGS.x = ARGS.x if ARGS.x else keys[0]
x = np.array([int(x) for x in d[ARGS.x]])
y_bc = np.array(y_bc)
y_dagger = np.array(y_dagger)
with open(os.path.join(path, 'args.json'), mode='r') as f:
num_demos = json.load(f)['num_demos']
with open('./expert_data_%d/%s-score.txt' % (num_demos, ARGS.env), mode='r') as f:
expert_score = float(f.readlines()[0].split('±')[0].strip())
with plt.style.context('seaborn'):
plt.rcParams.update({'font.size': 22})
fig = plt.figure(figsize=(4, 4))
plt.plot(x, y_bc.mean(axis=0), label='BC')
plt.plot(x, y_dagger.mean(axis=0), label='DAgger')
plt.plot([x[0], x[-1]], [expert_score, expert_score], label='Expert')
plt.autoscale(False)
plt.fill_between(x, y_bc.mean(axis=0) - y_bc.std(axis=0), y_bc.mean(axis=0) + y_bc.std(axis=0), alpha=0.25)
plt.fill_between(x, y_dagger.mean(axis=0) - y_dagger.std(axis=0), y_dagger.mean(axis=0) + y_dagger.std(axis=0), alpha=0.25)
plt.title(ARGS.title if ARGS.title else ARGS.env.split('-')[0])
plt.xlabel(ARGS.xlabel if ARGS.xlabel else ARGS.x.title())
plt.ylabel(ARGS.ylabel if ARGS.ylabel else ARGS.y.title())
plt.legend(loc='lower right')
plt.tight_layout()
fig.savefig(os.path.join(DATA_PATH_BC, 'plot_x=%s_y=%s.pdf' % (ARGS.x, ARGS.y)), bbox_inches='tight')
fig.savefig(os.path.join(DATA_PATH_BC, 'plot_x=%s_y=%s.png' % (ARGS.x, ARGS.y)), bbox_inches='tight', dpi=600)
fig.savefig(os.path.join(DATA_PATH_BC, 'plot_x=%s_y=%s.svg' % (ARGS.x, ARGS.y)), bbox_inches='tight')
fig.savefig(os.path.join(DATA_PATH_DAGGER, 'plot_x=%s_y=%s.pdf' % (ARGS.x, ARGS.y)), bbox_inches='tight')
fig.savefig(os.path.join(DATA_PATH_DAGGER, 'plot_x=%s_y=%s.png' % (ARGS.x, ARGS.y)), bbox_inches='tight', dpi=600)
fig.savefig(os.path.join(DATA_PATH_DAGGER, 'plot_x=%s_y=%s.svg' % (ARGS.x, ARGS.y)), bbox_inches='tight')
# plt.show()
plt.close()
| [
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.style.context",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.tight_layout",
"json.load",
"matp... | [((133, 158), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (156, 158), False, 'import argparse\n'), ((1714, 1728), 'numpy.array', 'np.array', (['y_bc'], {}), '(y_bc)\n', (1722, 1728), True, 'import numpy as np\n'), ((1740, 1758), 'numpy.array', 'np.array', (['y_dagger'], {}), '(y_dagger)\n', (1748, 1758), True, 'import numpy as np\n'), ((1176, 1216), 'os.path.join', 'os.path.join', (['DATA_PATH_DAGGER', '"""seed_*"""'], {}), "(DATA_PATH_DAGGER, 'seed_*')\n", (1188, 1216), False, 'import os\n'), ((1248, 1277), 'os.path.join', 'os.path.join', (['path', '"""log.csv"""'], {}), "(path, 'log.csv')\n", (1260, 1277), False, 'import os\n'), ((1450, 1486), 'os.path.join', 'os.path.join', (['DATA_PATH_BC', '"""seed_*"""'], {}), "(DATA_PATH_BC, 'seed_*')\n", (1462, 1486), False, 'import os\n'), ((1518, 1547), 'os.path.join', 'os.path.join', (['path', '"""log.csv"""'], {}), "(path, 'log.csv')\n", (1530, 1547), False, 'import os\n'), ((2016, 2044), 'matplotlib.pyplot.style.context', 'plt.style.context', (['"""seaborn"""'], {}), "('seaborn')\n", (2033, 2044), True, 'import matplotlib.pyplot as plt\n'), ((2050, 2088), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 22}"], {}), "({'font.size': 22})\n", (2069, 2088), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2125), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (2109, 2125), True, 'import matplotlib.pyplot as plt\n'), ((2232, 2301), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[0], x[-1]]', '[expert_score, expert_score]'], {'label': '"""Expert"""'}), "([x[0], x[-1]], [expert_score, expert_score], label='Expert')\n", (2240, 2301), True, 'import matplotlib.pyplot as plt\n'), ((2306, 2326), 'matplotlib.pyplot.autoscale', 'plt.autoscale', (['(False)'], {}), '(False)\n', (2319, 2326), True, 'import matplotlib.pyplot as plt\n'), ((2765, 2794), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (2775, 2794), True, 'import matplotlib.pyplot as plt\n'), ((2799, 2817), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2815, 2817), True, 'import matplotlib.pyplot as plt\n'), ((3505, 3516), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3514, 3516), True, 'import matplotlib.pyplot as plt\n'), ((1770, 1801), 'os.path.join', 'os.path.join', (['path', '"""args.json"""'], {}), "(path, 'args.json')\n", (1782, 1801), False, 'import os\n'), ((1835, 1847), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1844, 1847), False, 'import json\n'), ((2834, 2901), 'os.path.join', 'os.path.join', (['DATA_PATH_BC', "('plot_x=%s_y=%s.pdf' % (ARGS.x, ARGS.y))"], {}), "(DATA_PATH_BC, 'plot_x=%s_y=%s.pdf' % (ARGS.x, ARGS.y))\n", (2846, 2901), False, 'import os\n'), ((2940, 3007), 'os.path.join', 'os.path.join', (['DATA_PATH_BC', "('plot_x=%s_y=%s.png' % (ARGS.x, ARGS.y))"], {}), "(DATA_PATH_BC, 'plot_x=%s_y=%s.png' % (ARGS.x, ARGS.y))\n", (2952, 3007), False, 'import os\n'), ((3055, 3122), 'os.path.join', 'os.path.join', (['DATA_PATH_BC', "('plot_x=%s_y=%s.svg' % (ARGS.x, ARGS.y))"], {}), "(DATA_PATH_BC, 'plot_x=%s_y=%s.svg' % (ARGS.x, ARGS.y))\n", (3067, 3122), False, 'import os\n'), ((3161, 3232), 'os.path.join', 'os.path.join', (['DATA_PATH_DAGGER', "('plot_x=%s_y=%s.pdf' % (ARGS.x, ARGS.y))"], {}), "(DATA_PATH_DAGGER, 'plot_x=%s_y=%s.pdf' % (ARGS.x, ARGS.y))\n", (3173, 3232), False, 'import os\n'), ((3271, 3342), 'os.path.join', 'os.path.join', (['DATA_PATH_DAGGER', "('plot_x=%s_y=%s.png' % (ARGS.x, ARGS.y))"], {}), "(DATA_PATH_DAGGER, 'plot_x=%s_y=%s.png' % (ARGS.x, ARGS.y))\n", (3283, 3342), False, 'import os\n'), ((3390, 3461), 'os.path.join', 'os.path.join', (['DATA_PATH_DAGGER', "('plot_x=%s_y=%s.svg' % (ARGS.x, ARGS.y))"], {}), "(DATA_PATH_DAGGER, 'plot_x=%s_y=%s.svg' % (ARGS.x, ARGS.y))\n", (3402, 3461), False, 'import os\n')] |
import tensorflow as tf
from keras.models import Sequential,load_model,model_from_json
from keras.layers import Dense, Dropout,Activation,MaxPooling2D,Conv2D,Flatten
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing.image import load_img
from keras.preprocessing import image
import numpy as np
import h5py
import os
import sys
import json
from sklearn.preprocessing import StandardScaler
from predictor import sc
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
# Define a flask app
app = Flask(__name__)
with open('customer_churn_prediction_model.json','r') as f:
model = model_from_json(f.read())
# Load your trained model
model.load_weights('customer_churn_prediction_model.h5')
print('Model loaded. Check http://127.0.0.1:5000/')
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('prediction.html')
@app.route('/', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the values from the form
credit_score = request.form['cr_score']
age = request.form['age']
tenure = request.form['tenure']
balance = request.form.get('balance')
number_of_products = request.form.get('no_of_products')
estimated_salary = request.form['salary']
country = request.form['country']
gender = request.form['gender']
has_credit_card = request.form['cr_card']
is_active_member = request.form['active_member']
print([credit_score,age,tenure,balance,number_of_products,estimated_salary,country,gender,has_credit_card,is_active_member])
# Process input
if country=="France":
countries= [0,0]
elif country=="Germany":
countries = [1,0]
else:
countries = [0,1]
# Make Prediction
prediction = model.predict(sc.transform(np.array([[countries[0],countries[1],credit_score,gender,age,tenure,balance,number_of_products,has_credit_card,is_active_member,estimated_salary]])))
# Process your result for human
if prediction > 0.5:
result = "The customer will leave the bank"
else:
result = "The customer won't leave the bank"
return result
return None
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000)
| [
"flask.render_template",
"numpy.array",
"flask.request.form.get",
"flask.Flask"
] | [((628, 643), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (633, 643), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((958, 992), 'flask.render_template', 'render_template', (['"""prediction.html"""'], {}), "('prediction.html')\n", (973, 992), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((1263, 1290), 'flask.request.form.get', 'request.form.get', (['"""balance"""'], {}), "('balance')\n", (1279, 1290), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((1320, 1354), 'flask.request.form.get', 'request.form.get', (['"""no_of_products"""'], {}), "('no_of_products')\n", (1336, 1354), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((1992, 2157), 'numpy.array', 'np.array', (['[[countries[0], countries[1], credit_score, gender, age, tenure, balance,\n number_of_products, has_credit_card, is_active_member, estimated_salary]]'], {}), '([[countries[0], countries[1], credit_score, gender, age, tenure,\n balance, number_of_products, has_credit_card, is_active_member,\n estimated_salary]])\n', (2000, 2157), True, 'import numpy as np\n')] |
import numpy as np
import warnings
warnings.filterwarnings("ignore")
def knee_pt(y, x=None):
x_was_none = False
use_absolute_dev_p = True
res_x = np.nan
idx_of_result = np.nan
if type(y) is not np.ndarray:
print('knee_pt: y must be a numpy 1D vector')
return res_x, idx_of_result
else:
if y.ndim >= 2:
print('knee_pt: y must be 1 dimensional')
return res_x, idx_of_result
if np.size(y) == 0:
print('knee_pt: y can not be an empty vector')
return res_x, idx_of_result
else:
if x is None:
x_was_none = True
x = np.arange(1, np.amax(y.shape) + 1, dtype=np.int)
if x.shape != y.shape:
print('knee_pt: y and x must have the same dimensions')
return res_x, idx_of_result
if y.size < 3:
res_x, idx_of_result = np.min(y), np.argmin(y)
return res_x, idx_of_result
if np.all(np.diff(x) >= 0) and (not x_was_none):
idx = np.argsort(x)
y = np.sort(y)
x = np.sort(x)
else:
idx = np.arange(0, np.amax(x.shape))
sigma_xy = np.cumsum(np.multiply(x, y), axis=0)
sigma_x = np.cumsum(x, axis=0)
sigma_y = np.cumsum(y, axis=0)
sigma_xx = np.cumsum(np.multiply(x, x), axis=0)
n = np.arange(1, np.amax(y.shape) + 1).conj().T
det = np.multiply(n, sigma_xx) - np.multiply(sigma_x, sigma_x)
mfwd = (np.multiply(n, sigma_xy) -
np.multiply(sigma_x, sigma_y)) / det
bfwd = -1 * ((np.multiply(sigma_x, sigma_xy) -
np.multiply(sigma_xx, sigma_y)) / det)
sigma_xy = np.cumsum(np.multiply(x[::-1], y[::-1]), axis=0)
sigma_x = np.cumsum(x[::-1], axis=0)
sigma_y = np.cumsum(y[::-1], axis=0)
sigma_xx = np.cumsum(np.multiply(x[::-1], x[::-1]), axis=0)
n = np.arange(1, np.amax(y.shape) + 1).conj().T
det = np.multiply(n, sigma_xx) - np.multiply(sigma_x, sigma_x)
mbck = ((np.multiply(n, sigma_xy) -
np.multiply(sigma_x, sigma_y)) / det)[::-1]
bbck = (-1 *
((np.multiply(sigma_x, sigma_xy) -
np.multiply(sigma_xx, sigma_y)) / det))[::-1]
error_curve = np.full(y.shape, np.nan)
for breakpt in range(1, np.amax((y - 1).shape)):
delsfwd = (np.multiply(mfwd[breakpt], x[0:breakpt + 1]) +
bfwd[breakpt]) - y[0:breakpt + 1]
delsbck = (np.multiply(mbck[breakpt], x[breakpt:]) +
bbck[breakpt]) - y[breakpt:]
if use_absolute_dev_p:
error_curve[breakpt] = \
np.sum(np.abs(delsfwd)) + np.sum(np.abs(delsbck))
else:
error_curve[breakpt] = \
np.sqrt(np.sum(np.multiply(delsfwd, delsfwd))) + \
np.sqrt(np.sum(np.multiply(delsbck, delsbck)))
try:
loc = np.nanargmin(error_curve)
except ValueError as e:
loc = 0
res_x = x[loc]
idx_of_result = idx[loc]
return res_x, idx_of_result
| [
"numpy.abs",
"numpy.multiply",
"numpy.nanargmin",
"numpy.full",
"numpy.size",
"numpy.sort",
"numpy.diff",
"numpy.argsort",
"numpy.min",
"numpy.argmin",
"numpy.cumsum",
"numpy.amax",
"warnings.filterwarnings"
] | [((36, 69), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (59, 69), False, 'import warnings\n'), ((458, 468), 'numpy.size', 'np.size', (['y'], {}), '(y)\n', (465, 468), True, 'import numpy as np\n'), ((1314, 1334), 'numpy.cumsum', 'np.cumsum', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1323, 1334), True, 'import numpy as np\n'), ((1357, 1377), 'numpy.cumsum', 'np.cumsum', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (1366, 1377), True, 'import numpy as np\n'), ((1896, 1922), 'numpy.cumsum', 'np.cumsum', (['x[::-1]'], {'axis': '(0)'}), '(x[::-1], axis=0)\n', (1905, 1922), True, 'import numpy as np\n'), ((1945, 1971), 'numpy.cumsum', 'np.cumsum', (['y[::-1]'], {'axis': '(0)'}), '(y[::-1], axis=0)\n', (1954, 1971), True, 'import numpy as np\n'), ((2467, 2491), 'numpy.full', 'np.full', (['y.shape', 'np.nan'], {}), '(y.shape, np.nan)\n', (2474, 2491), True, 'import numpy as np\n'), ((1085, 1098), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (1095, 1098), True, 'import numpy as np\n'), ((1119, 1129), 'numpy.sort', 'np.sort', (['y'], {}), '(y)\n', (1126, 1129), True, 'import numpy as np\n'), ((1150, 1160), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (1157, 1160), True, 'import numpy as np\n'), ((1265, 1282), 'numpy.multiply', 'np.multiply', (['x', 'y'], {}), '(x, y)\n', (1276, 1282), True, 'import numpy as np\n'), ((1411, 1428), 'numpy.multiply', 'np.multiply', (['x', 'x'], {}), '(x, x)\n', (1422, 1428), True, 'import numpy as np\n'), ((1516, 1540), 'numpy.multiply', 'np.multiply', (['n', 'sigma_xx'], {}), '(n, sigma_xx)\n', (1527, 1540), True, 'import numpy as np\n'), ((1543, 1572), 'numpy.multiply', 'np.multiply', (['sigma_x', 'sigma_x'], {}), '(sigma_x, sigma_x)\n', (1554, 1572), True, 'import numpy as np\n'), ((1835, 1864), 'numpy.multiply', 'np.multiply', (['x[::-1]', 'y[::-1]'], {}), '(x[::-1], y[::-1])\n', (1846, 1864), True, 'import numpy as np\n'), ((2005, 2034), 'numpy.multiply', 'np.multiply', (['x[::-1]', 'x[::-1]'], {}), '(x[::-1], x[::-1])\n', (2016, 2034), True, 'import numpy as np\n'), ((2122, 2146), 'numpy.multiply', 'np.multiply', (['n', 'sigma_xx'], {}), '(n, sigma_xx)\n', (2133, 2146), True, 'import numpy as np\n'), ((2149, 2178), 'numpy.multiply', 'np.multiply', (['sigma_x', 'sigma_x'], {}), '(sigma_x, sigma_x)\n', (2160, 2178), True, 'import numpy as np\n'), ((2528, 2550), 'numpy.amax', 'np.amax', (['(y - 1).shape'], {}), '((y - 1).shape)\n', (2535, 2550), True, 'import numpy as np\n'), ((3223, 3248), 'numpy.nanargmin', 'np.nanargmin', (['error_curve'], {}), '(error_curve)\n', (3235, 3248), True, 'import numpy as np\n'), ((934, 943), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (940, 943), True, 'import numpy as np\n'), ((945, 957), 'numpy.argmin', 'np.argmin', (['y'], {}), '(y)\n', (954, 957), True, 'import numpy as np\n'), ((1214, 1230), 'numpy.amax', 'np.amax', (['x.shape'], {}), '(x.shape)\n', (1221, 1230), True, 'import numpy as np\n'), ((1593, 1617), 'numpy.multiply', 'np.multiply', (['n', 'sigma_xy'], {}), '(n, sigma_xy)\n', (1604, 1617), True, 'import numpy as np\n'), ((1640, 1669), 'numpy.multiply', 'np.multiply', (['sigma_x', 'sigma_y'], {}), '(sigma_x, sigma_y)\n', (1651, 1669), True, 'import numpy as np\n'), ((681, 697), 'numpy.amax', 'np.amax', (['y.shape'], {}), '(y.shape)\n', (688, 697), True, 'import numpy as np\n'), ((1024, 1034), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (1031, 1034), True, 'import numpy as np\n'), ((1703, 1733), 'numpy.multiply', 'np.multiply', (['sigma_x', 'sigma_xy'], {}), '(sigma_x, sigma_xy)\n', (1714, 1733), True, 'import numpy as np\n'), ((1762, 1792), 'numpy.multiply', 'np.multiply', (['sigma_xx', 'sigma_y'], {}), '(sigma_xx, sigma_y)\n', (1773, 1792), True, 'import numpy as np\n'), ((2200, 2224), 'numpy.multiply', 'np.multiply', (['n', 'sigma_xy'], {}), '(n, sigma_xy)\n', (2211, 2224), True, 'import numpy as np\n'), ((2248, 2277), 'numpy.multiply', 'np.multiply', (['sigma_x', 'sigma_y'], {}), '(sigma_x, sigma_y)\n', (2259, 2277), True, 'import numpy as np\n'), ((2580, 2624), 'numpy.multiply', 'np.multiply', (['mfwd[breakpt]', 'x[0:breakpt + 1]'], {}), '(mfwd[breakpt], x[0:breakpt + 1])\n', (2591, 2624), True, 'import numpy as np\n'), ((2715, 2754), 'numpy.multiply', 'np.multiply', (['mbck[breakpt]', 'x[breakpt:]'], {}), '(mbck[breakpt], x[breakpt:])\n', (2726, 2754), True, 'import numpy as np\n'), ((2339, 2369), 'numpy.multiply', 'np.multiply', (['sigma_x', 'sigma_xy'], {}), '(sigma_x, sigma_xy)\n', (2350, 2369), True, 'import numpy as np\n'), ((2394, 2424), 'numpy.multiply', 'np.multiply', (['sigma_xx', 'sigma_y'], {}), '(sigma_xx, sigma_y)\n', (2405, 2424), True, 'import numpy as np\n'), ((2928, 2943), 'numpy.abs', 'np.abs', (['delsfwd'], {}), '(delsfwd)\n', (2934, 2943), True, 'import numpy as np\n'), ((2954, 2969), 'numpy.abs', 'np.abs', (['delsbck'], {}), '(delsbck)\n', (2960, 2969), True, 'import numpy as np\n'), ((1467, 1483), 'numpy.amax', 'np.amax', (['y.shape'], {}), '(y.shape)\n', (1474, 1483), True, 'import numpy as np\n'), ((2073, 2089), 'numpy.amax', 'np.amax', (['y.shape'], {}), '(y.shape)\n', (2080, 2089), True, 'import numpy as np\n'), ((3077, 3106), 'numpy.multiply', 'np.multiply', (['delsfwd', 'delsfwd'], {}), '(delsfwd, delsfwd)\n', (3088, 3106), True, 'import numpy as np\n'), ((3152, 3181), 'numpy.multiply', 'np.multiply', (['delsbck', 'delsbck'], {}), '(delsbck, delsbck)\n', (3163, 3181), True, 'import numpy as np\n')] |
#########################################################################
# Dicomifier - Copyright (C) Universite de Strasbourg
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
#########################################################################
import json
import itertools
import pickle
import re
import numpy
import odil
from .. import logger
from . import siemens
def get_stacks(data_sets, extra_splitters=None):
""" Return the stacks contained in the data sets. The result is a dictionary
in which the values are pairs of (data_set, frame_index) (in the case
of single-frame data sets, frame_index is None), and in which the keys
are tuples of selectors. In this context, a selector is defined as
a pair of (group sequence, group, tag) (group sequence and group being
None for single-frame data sets), and a value.
:param data_sets: list of dicom data sets
:param extra_splitters: additional splitters to be used when building
stacks
"""
splitters = _get_splitters(data_sets)
if extra_splitters:
splitters.extend(extra_splitters)
stacks = {}
def build_selector(
data_set, getter, group_sequence, group, tag, in_stack_position):
selector = None
if getter is get_dimension_index:
original_getter = getter
getter = lambda d,t: original_getter(d, t, in_stack_position)
if group is not None and group in data_set:
value = getter(data_set[group][0], tag)
else:
value = getter(data_set, tag)
if value is not None:
selector = ((group_sequence, group, tag), tuple(value))
return selector
for data_set in data_sets:
frames = []
in_stack_position = None
if odil.registry.SharedFunctionalGroupsSequence not in data_set:
frames.append([(data_set,), None, [None]])
else:
in_stack_position = get_in_stack_position_index(data_set)
shared_groups = data_set[odil.registry.SharedFunctionalGroupsSequence][0]
frames_groups = data_set[odil.registry.PerFrameFunctionalGroupsSequence]
group_sequences = [
odil.registry.SharedFunctionalGroupsSequence,
odil.registry.PerFrameFunctionalGroupsSequence
]
frames.extend([
[(shared_groups, frame_groups), i, group_sequences]
for i, frame_groups in enumerate(frames_groups)])
for frame_infos, frame_index, group_sequences in frames:
key = []
for (group, tag), getter in splitters:
if frame_index is None and group is not None:
# Use top-level tags only for single-frame data sets
continue
elif frame_index is not None and group is None:
# Use frame group tags only for multi-frame data sets
continue
for frame_info, group_sequence in zip(frame_infos, group_sequences):
selector = build_selector(
frame_info, getter, group_sequence, group, tag,
in_stack_position)
if selector is not None:
key.append(selector)
stacks.setdefault(tuple(key), []).append((data_set, frame_index))
# Normalize the keys so that all stacks have the same key fields
key_items = set()
for key in stacks.keys():
key_items.update(x[0] for x in key)
normalized_keys = {}
for key in stacks.keys():
normalized_keys[key] = list(key)
for key_item in key_items:
if key_item not in [x[0] for x in key]:
normalized_keys[key].append((key_item, None))
for key, normalized_key in normalized_keys.items():
normalized_keys[key] = tuple(normalized_key)
stacks = { normalized_keys[key]: value for key, value in stacks.items() }
# Simplify keys: remove those that have the same value for all stacks
keys = numpy.asarray(list(stacks.keys()), dtype=object)
to_keep = []
for index in range(keys.shape[1]):
unique_values = set(keys[:,index,:][:,1])
# We need to keep these keys as they will be used in the sort() function
is_sorting_key = keys[:,index,:][0][0][2] in [
odil.registry.ImageOrientationPatient,
odil.registry.DimensionIndexValues
]
if len(unique_values) > 1 or is_sorting_key:
to_keep.append(index)
stacks = {
tuple(v for (i, v) in enumerate(stack_key) if i in to_keep): stack_value
for stack_key, stack_value in stacks.items()
}
return stacks
def sort(key, frames):
""" Sort the frames of a stack according to the items present in the
stack key.
"""
if len(frames) <= 1:
return
ordering = None
for (_, _, tag), value in key:
if tag == odil.registry.DimensionIndexValues:
# sort by In-Stack Position
position = []
for data_set, frame_index in frames:
position_index = get_in_stack_position_index(data_set)
frame = data_set[
odil.registry.PerFrameFunctionalGroupsSequence][frame_index]
frame_content = frame[odil.registry.FrameContentSequence][0]
position.append(
frame_content[odil.registry.DimensionIndexValues][position_index])
keydict = dict(zip((id(x) for x in frames), numpy.argsort(position)))
ordering = lambda x: keydict[id(x)]
break
if tag == odil.registry.ImageOrientationPatient:
data_set, frame_idx = frames[0]
if get_frame_position(data_set, frame_idx) is not None:
normal = numpy.cross(value[:3], value[3:])
ordering = lambda x: numpy.dot(get_frame_position(*x), normal)
break
else:
logger.warning(
"Orientation found but no position available to sort frames")
if ordering is not None:
frames.sort(key=ordering)
else:
logger.warning(
"Cannot sort frames for the moment, available tags : {}".format(
[x[0][2].get_name() for x in key]))
def get_frame_position(data_set, frame_index):
""" Get the position of the specified frame.
"""
if odil.registry.PerFrameFunctionalGroupsSequence in data_set:
frame = data_set[odil.registry.PerFrameFunctionalGroupsSequence][frame_index]
if odil.registry.PlanePositionSequence in frame:
plane_position_seq = frame[odil.registry.PlanePositionSequence][0]
else:
return None
else:
plane_position_seq = data_set
if odil.registry.ImagePositionPatient not in plane_position_seq:
return None
return plane_position_seq[odil.registry.ImagePositionPatient]
def get_in_stack_position_index(data_set):
""" Return the position of In Stack Position element inside the Dimension
Index.
"""
if (
odil.registry.DimensionIndexSequence in data_set
and not data_set.empty(odil.registry.DimensionIndexSequence)):
dimension_indices = data_set[odil.registry.DimensionIndexSequence]
position = set()
for i, dimension_index in enumerate(dimension_indices):
if odil.registry.DimensionIndexPointer in dimension_index:
idx = dimension_index[odil.registry.DimensionIndexPointer][0]
if odil.Tag(idx) == odil.registry.InStackPositionNumber:
position.add(i)
if len(position) == 1:
return list(position)[0]
else:
return None
else:
return None
class OrientationGetter(object):
""" Return the ideal orientation of a data set, i.e. allow small variations
in the actual orientation.
"""
def __init__(self):
self._orientations = {}
def __call__(self, data_set, tag):
value = data_set.get(tag)
if value is None:
return None
# WARNING: a rotating plane will yield the same normal
orientation = [value[:3], value[3:]]
normal = numpy.cross(*orientation)
closest = None
for candidate in self._orientations.items():
if OrientationGetter._comparator(normal, candidate[0]):
closest = candidate[1]
break
if closest is None:
self._orientations[tuple(normal)] = value
else:
value = closest
return tuple(value)
@property
def orientations(self):
return self._orientations
@staticmethod
def _comparator(o1, o2, epsilon=0.05):
if numpy.shape(o1) == (0,) and numpy.shape(o2) == (0,):
return True
elif any(numpy.shape(x) == (0,) for x in (o1, o2)):
return False
else:
return (
numpy.linalg.norm(numpy.subtract(o1, o2), numpy.inf) <= epsilon)
def get_dimension_index(data_set, tag, in_stack_position_index):
""" Return the dimension index pointer without InStackPosition in order to
find the different volumes
:param in_stack_position_index: index of the In Stack Position element
within the Dimension Index tuple
"""
value = data_set.get(tag)
if value is not None:
value = list(value)
if in_stack_position_index is not None:
del value[in_stack_position_index]
return tuple(value)
else:
raise Exception(
"Dimension Index Values found but InStackPosition is missing")
return None
def get_diffusion(data_set, tag):
""" Get b-value and gradient diffusion from the data_set.
"""
value = data_set.get(tag)
if value is not None:
b_value = value[0][odil.registry.DiffusionBValue][0]
directionality = value[0][odil.registry.DiffusionDirectionality][0]
sensitization = None
if directionality == b"DIRECTIONAL":
item = value[0][odil.registry.DiffusionGradientDirectionSequence][0]
sensitization = tuple(item[odil.registry.DiffusionGradientOrientation])
elif directionality == b"BMATRIX":
item = value[0][odil.registry.DiffusionBMatrixSequence][0]
sensitization = tuple(
item[getattr(odil.registry, "DiffusionBValue{}".format(x))][0]
for x in ["XX", "XY", "XZ", "YY", "YZ", "ZZ"])
elif directionality == b"ISOTROPIC" or directionality == b"NONE":
return None
else:
raise Exception(
"Unknown directionality: {}".format(directionality))
value = (b_value, sensitization)
return value
def frame_group_index_getter(data_set, tag):
""" Return bruker_to_dicom-specific frame group information.
"""
value = data_set.get(tag)
if value is None:
return value
frame_group_index_entries = [
x for x in value
if (
x[odil.registry.Manufacturer][0] == b"Dicomifier"
and x[odil.registry.ManufacturerModelName][0] == b"Bruker Frame Group index")]
if not frame_group_index_entries:
return None
elif len(frame_group_index_entries) > 1:
raise Exception("Multiple Frame Group index entries found")
contribution_description = json.loads(
frame_group_index_entries[0][
odil.registry.ContributionDescription][0].decode())
index = tuple(tuple(x) for x in contribution_description)
return index
def ge_diffusion_getter(data_set, tag):
""" Return GE-specific diffusion data.
"""
if data_set[odil.registry.Manufacturer][0] != b"GE MEDICAL SYSTEMS":
return None
# GEMS_ACQU_01 contains directions, GEMS_PARM_01 contains b-value
gems_acq = _find_private_creator(data_set, b"GEMS_ACQU_01", 0x0019)
gems_parm = _find_private_creator(data_set, b"GEMS_PARM_01", 0x0043)
direction = None
if gems_acq is not None:
direction = tuple(
data_set.get(odil.Tag(gems_acq+x), [None])[0]
for x in [0xbb, 0xbc, 0xbd])
if direction and not(isinstance(direction[0], (int, float))):
return None
# WARNING: this is the *maximal* b-value. The real b-value is determined
# by the square of the norm of the gradient direction (at on RX29.0).
# This is still not enough for multiple b=0 in the same series
maximal_b_value = data_set.get(odil.Tag(gems_parm+0x39), [None])[0]
if maximal_b_value is None:
maximal_b_value = data_set.get(odil.registry.DiffusionBValue, [None])[0]
if maximal_b_value and not(isinstance(maximal_b_value, (int, float))):
return None
# b-value, rounded to nearest multiple of 5
b_value = maximal_b_value * numpy.linalg.norm(direction)**2
b_value = 5 * round(b_value/5)
return direction, b_value
def ge_complex_image_component_getter(data_set, tag):
""" Return GE-specific Complex Image Component data.
"""
if data_set[odil.registry.Manufacturer][0] != b"GE MEDICAL SYSTEMS":
return None
gems_parm = _find_private_creator(data_set, b"GEMS_PARM_01", 0x0043)
if gems_parm is None:
return None
return (data_set.get(odil.Tag(gems_parm+0x2f), [None])[0], )
def siemens_coil_getter(data_set, tag):
""" Return Siemens-specific coil identifier.
"""
if data_set[odil.registry.Manufacturer][0] != b"SIEMENS":
return None
if data_set[odil.registry.Modality][0] != b"MR":
return None
csa_header = _find_private_creator(data_set, b"SIEMENS CSA HEADER", 0x0029)
if csa_header is None:
return None
item = data_set.get(odil.Tag(csa_header + 0x10), [None])[0]
if item is None:
return None
siemens_data = siemens.parse_csa(item.get_memory_view().tobytes())
return siemens_data.get("ImaCoilString", [b""])[0].strip(b"\x00")
def canon_getter(data_set, tag):
""" Return Canon-specific diffusion information.
"""
if data_set[odil.registry.Manufacturer][0] != b"CANON_MEC":
return None
if data_set[odil.registry.SOPClassUID][0] != odil.registry.MRImageStorage:
# NOTE Enhanced MR Image Storage use the standard fields
return None
if odil.registry.DiffusionBValue not in data_set:
return None
b_value_element = data_set[odil.registry.DiffusionBValue][0]
if odil.registry.ImageComments not in data_set:
if b_value_element != 0:
logger.debug("No ImageComments, b-value={}".format(b_value_element))
return None
else:
image_comments = b"b=0(0,0,0)"
else:
image_comments = data_set[odil.registry.ImageComments][0]
match = re.match(
br"^b=([\d.]+)\("
br"(-?[\d.]+),(-?[\d.]+),(-?[\d.]+)"
br"\)$",
image_comments)
if not match:
logger.debug("ImageComments not matched: '{}'".format(image_comments))
return None
try:
b_value_comment, x, y, z = [float(x) for x in match.groups()]
except ValueError:
logger.debug(
"b-value discrepancy: {} != {}".format(
b_value_element, b_value_comment))
return None
if not numpy.isclose(b_value_element, b_value_comment):
return None
return image_comments
def _get_splitters(data_sets):
""" Return a list of splitters (tag and getter) depending on the SOPClassUID
of each dataset
:param data_sets: Data sets of the current stack
"""
def default_getter(data_set, tag):
element = data_set.get(tag)
if element is not None and element.is_binary():
# WARNING: random error either with odil wrappers or with
# numpy.array when simplifying keys. Fix by pickling the binary
# DICOM elements.
element = pickle.dumps(element)
return element
splitters = {
"ALL": [
# Single Frame generic tags
((None, odil.registry.SeriesInstanceUID), default_getter),
((None, odil.registry.ImageType), default_getter),
(
(None, odil.registry.ImageOrientationPatient),
OrientationGetter()),
((None, odil.registry.SpacingBetweenSlices), default_getter),
((None, odil.registry.Rows), default_getter),
((None, odil.registry.Columns), default_getter),
# FIXME: PixelSpacing; both X and Y must be close
((None, odil.registry.PhotometricInterpretation), default_getter),
# Multiframe generic tags
(
(
odil.registry.FrameContentSequence,
odil.registry.DimensionIndexValues),
get_dimension_index),
(
(
odil.registry.PlaneOrientationSequence,
odil.registry.ImageOrientationPatient),
OrientationGetter()),
(
(
odil.registry.PixelMeasuresSequence,
odil.registry.SpacingBetweenSlices),
default_getter),
(
(
odil.registry.FrameContentSequence,
odil.registry.FrameAcquisitionNumber),
default_getter),
(
(odil.registry.FrameContentSequence, odil.registry.FrameLabel),
default_getter)
],
odil.registry.MRImageStorage: [
((None, odil.registry.AcquisitionNumber), default_getter),
((None, odil.registry.RepetitionTime), default_getter),
((None, odil.registry.EchoTime), default_getter),
((None, odil.registry.InversionTime), default_getter),
((None, odil.registry.EchoNumbers), default_getter),
((None, odil.registry.MRDiffusionSequence), get_diffusion),
# Philips Ingenia stores these fields at top-level
((None, odil.registry.DiffusionGradientOrientation), default_getter),
((None, odil.registry.DiffusionBValue), default_getter),
((None, odil.registry.TriggerTime), default_getter),
(
(None, odil.registry.ContributingEquipmentSequence),
frame_group_index_getter)
],
odil.registry.EnhancedMRImageStorage: [
(
(
odil.registry.MRTimingAndRelatedParametersSequence,
odil.registry.RepetitionTime),
default_getter),
(
(odil.registry.MREchoSequence, odil.registry.EffectiveEchoTime),
default_getter),
(
(odil.registry.MRModifierSequence, odil.registry.InversionTimes),
default_getter),
(
(odil.registry.MRImageFrameTypeSequence, odil.registry.FrameType),
default_getter),
(
(
odil.registry.MRMetaboliteMapSequence,
odil.registry.MetaboliteMapDescription),
default_getter),
((None, odil.registry.MRDiffusionSequence), get_diffusion),
],
odil.registry.EnhancedPETImageStorage: [
(
(odil.registry.PETFrameTypeSequence, odil.registry.FrameType),
default_getter)
],
odil.registry.EnhancedCTImageStorage: [
(
(odil.registry.CTImageFrameTypeSequence, odil.registry.FrameType),
default_getter)
]
}
sop_classes = set(x[odil.registry.SOPClassUID][0] for x in data_sets)
splitters = list(itertools.chain(
splitters["ALL"],
*[splitters.get(x, []) for x in sop_classes]
))
if any(d.get(odil.registry.Manufacturer, [None])[0] == b"GE MEDICAL SYSTEMS" for d in data_sets):
splitters.append(((None, None), ge_diffusion_getter))
splitters.append(((None, None), ge_complex_image_component_getter))
if any(d.get(odil.registry.Manufacturer, [None])[0] == b"SIEMENS" for d in data_sets):
splitters.append(((None, None), siemens_coil_getter))
if any(d.get(odil.registry.Manufacturer, [None])[0] == b"CANON_MEC" for d in data_sets):
splitters.append(((None, None), canon_getter))
return splitters
def _find_private_creator(data_set, private_creator, group):
""" Return the private group (as an integer) corresponding to the given
private creator and root group, or None.
"""
tag = odil.Tag(group, 0x0000)
private_group = None
for element in range(0, 256):
tag.element = element
# Get the content of the potential private creator. It may be stored as
# a binary item (e.g. when VR=UN), in that case convert it to a byte
# string.
content = data_set.get(tag, [None])[0]
if isinstance(content, odil.Value.BinaryItem):
content = content.get_memory_view().tobytes()
if content == private_creator:
private_group = (group << 16) + (element << 8)
break
return private_group
| [
"numpy.isclose",
"numpy.cross",
"odil.Tag",
"pickle.dumps",
"re.match",
"numpy.subtract",
"numpy.argsort",
"numpy.linalg.norm",
"numpy.shape"
] | [((21251, 21269), 'odil.Tag', 'odil.Tag', (['group', '(0)'], {}), '(group, 0)\n', (21259, 21269), False, 'import odil\n'), ((8609, 8634), 'numpy.cross', 'numpy.cross', (['*orientation'], {}), '(*orientation)\n', (8620, 8634), False, 'import numpy\n'), ((15293, 15380), 're.match', 're.match', (["b'^b=([\\\\d.]+)\\\\((-?[\\\\d.]+),(-?[\\\\d.]+),(-?[\\\\d.]+)\\\\)$'", 'image_comments'], {}), "(b'^b=([\\\\d.]+)\\\\((-?[\\\\d.]+),(-?[\\\\d.]+),(-?[\\\\d.]+)\\\\)$',\n image_comments)\n", (15301, 15380), False, 'import re\n'), ((12957, 12981), 'odil.Tag', 'odil.Tag', (['(gems_parm + 57)'], {}), '(gems_parm + 57)\n', (12965, 12981), False, 'import odil\n'), ((13287, 13315), 'numpy.linalg.norm', 'numpy.linalg.norm', (['direction'], {}), '(direction)\n', (13304, 13315), False, 'import numpy\n'), ((14220, 14245), 'odil.Tag', 'odil.Tag', (['(csa_header + 16)'], {}), '(csa_header + 16)\n', (14228, 14245), False, 'import odil\n'), ((15872, 15919), 'numpy.isclose', 'numpy.isclose', (['b_value_element', 'b_value_comment'], {}), '(b_value_element, b_value_comment)\n', (15885, 15919), False, 'import numpy\n'), ((16515, 16536), 'pickle.dumps', 'pickle.dumps', (['element'], {}), '(element)\n', (16527, 16536), False, 'import pickle\n'), ((6159, 6192), 'numpy.cross', 'numpy.cross', (['value[:3]', 'value[3:]'], {}), '(value[:3], value[3:])\n', (6170, 6192), False, 'import numpy\n'), ((9149, 9164), 'numpy.shape', 'numpy.shape', (['o1'], {}), '(o1)\n', (9160, 9164), False, 'import numpy\n'), ((9177, 9192), 'numpy.shape', 'numpy.shape', (['o2'], {}), '(o2)\n', (9188, 9192), False, 'import numpy\n'), ((13756, 13780), 'odil.Tag', 'odil.Tag', (['(gems_parm + 47)'], {}), '(gems_parm + 47)\n', (13764, 13780), False, 'import odil\n'), ((5873, 5896), 'numpy.argsort', 'numpy.argsort', (['position'], {}), '(position)\n', (5886, 5896), False, 'import numpy\n'), ((7916, 7929), 'odil.Tag', 'odil.Tag', (['idx'], {}), '(idx)\n', (7924, 7929), False, 'import odil\n'), ((9243, 9257), 'numpy.shape', 'numpy.shape', (['x'], {}), '(x)\n', (9254, 9257), False, 'import numpy\n'), ((9380, 9402), 'numpy.subtract', 'numpy.subtract', (['o1', 'o2'], {}), '(o1, o2)\n', (9394, 9402), False, 'import numpy\n'), ((12539, 12561), 'odil.Tag', 'odil.Tag', (['(gems_acq + x)'], {}), '(gems_acq + x)\n', (12547, 12561), False, 'import odil\n')] |
import numpy as np
from math import factorial
def main():
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y = np.array([1, 2, 3, 4])
z = np.convolve(x, y, mode="valid")
print(z)
z = np.convolve(x, y, mode="full")
print(z)
x = np.arange(0, 20, 1) ** 2
smoothed = savitzky_golay(x, 4, 3, 0, 1)
print(smoothed)
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
# print(half_window)
b = np.mat(
[[k**i for i in order_range] for k in range(-half_window, half_window + 1)]
)
# print(b)
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1 : half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1 : -1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode="valid")
if __name__ == "__main__":
main()
| [
"numpy.abs",
"numpy.convolve",
"numpy.linalg.pinv",
"math.factorial",
"numpy.array",
"numpy.concatenate",
"numpy.arange"
] | [((68, 109), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (76, 109), True, 'import numpy as np\n'), ((118, 140), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (126, 140), True, 'import numpy as np\n'), ((150, 181), 'numpy.convolve', 'np.convolve', (['x', 'y'], {'mode': '"""valid"""'}), "(x, y, mode='valid')\n", (161, 181), True, 'import numpy as np\n'), ((204, 234), 'numpy.convolve', 'np.convolve', (['x', 'y'], {'mode': '"""full"""'}), "(x, y, mode='full')\n", (215, 234), True, 'import numpy as np\n'), ((961, 1001), 'numpy.concatenate', 'np.concatenate', (['(firstvals, y, lastvals)'], {}), '((firstvals, y, lastvals))\n', (975, 1001), True, 'import numpy as np\n'), ((1014, 1051), 'numpy.convolve', 'np.convolve', (['m[::-1]', 'y'], {'mode': '"""valid"""'}), "(m[::-1], y, mode='valid')\n", (1025, 1051), True, 'import numpy as np\n'), ((257, 276), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(1)'], {}), '(0, 20, 1)\n', (266, 276), True, 'import numpy as np\n'), ((714, 730), 'math.factorial', 'factorial', (['deriv'], {}), '(deriv)\n', (723, 730), False, 'from math import factorial\n'), ((838, 879), 'numpy.abs', 'np.abs', (['(y[1:half_window + 1][::-1] - y[0])'], {}), '(y[1:half_window + 1][::-1] - y[0])\n', (844, 879), True, 'import numpy as np\n'), ((905, 949), 'numpy.abs', 'np.abs', (['(y[-half_window - 1:-1][::-1] - y[-1])'], {}), '(y[-half_window - 1:-1][::-1] - y[-1])\n', (911, 949), True, 'import numpy as np\n'), ((671, 688), 'numpy.linalg.pinv', 'np.linalg.pinv', (['b'], {}), '(b)\n', (685, 688), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 11:15:49 2018
# Congo basin tree fraction using 2018 dataset with gain
@author: earjba
"""
import numpy as np
import importlib
import iris
import iris.quickplot as qplt
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from netCDF4 import Dataset, num2date
from mpl_toolkits import basemap
from jpros import readfiles
from jpros import harmonised
importlib.reload(readfiles)
importlib.reload(harmonised)
from iris.experimental.equalise_cubes import equalise_attributes
from iris.util import unify_time_units
import numpy as np
import pandas as pd
import tifffile as tiff
import h5py
import glob
import math
import gdal
import iris
from netCDF4 import Dataset as NetCDFFile
from PIL import Image
from pyhdf.SD import SD, SDC
from mpl_toolkits import basemap
def get_coords(gt, width, height):
minx = gt[0]
miny = gt[3] + width*gt[4] + height*gt[5]
resx = gt[1]
maxx = gt[0] + width*gt[1] + height*gt[2]
maxy = gt[3]
resy = gt[5]
lon = np.arange(minx, maxx, resx)
lat = np.arange(miny, maxy, -resy)
return(lat, lon)
def regrid_data(var, lat, lon, target_res=2):
if lat[0] > lat[-1]:
#flip lat and associated index
lat = lat[::-1]
var = var[:, :, ::-1, :]
new_lat = np.arange(lat[0], lat[-1]+(abs(lat[1]-lat[0])), target_res)
new_lon = np.arange(lon[0], lon[-1]+(abs(lat[1]-lat[0])), target_res)
lon_sub, lat_sub = np.meshgrid(new_lon, new_lat)
var_rescale = np.empty((var.shape[0], var.shape[1],
len(new_lat), len(new_lon)))
for yr in range(var.shape[0]):
for mn in range(12):
var_rescale[yr, mn, :, :] = basemap.interp(var[yr, mn, :, :],
lon, lat,
lon_sub, lat_sub,
order=1)
return(var_rescale, new_lat, new_lon)
def get_lat_bounds(array1d):
div = abs(array1d[1] - array1d[0])
if array1d[0] < 0:
extra_val = array1d[0] - div
bounds1d = np.concatenate(([extra_val], array1d))
else:
extra_val = array1d[-1] - div
bounds1d = np.concatenate((array1d, [extra_val]))
bounds2d = np.hstack((bounds1d[:-1, np.newaxis], bounds1d[1:, np.newaxis]))
bounds2d = bounds2d.astype('float')
return(bounds2d)
def get_lon_bounds(array1d):
div = abs(array1d[1] - array1d[0])
extra_val = array1d[-1] + div
bounds1d = np.concatenate((array1d, [extra_val]))
bounds2d = np.hstack((bounds1d[:-1, np.newaxis], bounds1d[1:, np.newaxis]))
bounds2d = bounds2d.astype('float')
return(bounds2d)
def minus180_to_plus180(var, lon):
if len(var.shape) < 4:
raise TypeError('Variable not in correct format')
else:
l = int(var.shape[-1]/2)
temp1 = var[:, :, :, 0:l]
temp2 = var[:, :, :, l:]
new_var = np.concatenate((temp2, temp1), axis=3)
new_lon = np.arange(-180, 180, (abs(lon[1]-lon[0])))
return(new_var, new_lon)
path = ('/nfs/a68/gyjcab/datasets/lapse_data_harmonised/Jan_2018/mon_1.0deg/')
# read in surface air temperture 2001- 2018
one_deg_cube = path+'tas_airs_mon_1.0deg_2003_2018.nc'
path = ('/nfs/a68/gyjcab/datasets/lapse_data_harmonised/Jan_2018/mon_0.25deg/')
# read in surface albedo
pt25_cube = path+'sal_clara_mon_0.25deg_1982_2015_direct_from_netcdf.nc'
pt5_cube = ('/nfs/a68/gyjcab/datasets/lapse_data_harmonised/Jan_2018/Final/'
# read in Global Precipitation Climatology Centre monthly total of Precipitation
'0.5deg/pr_gpcc_mon_0.5deg_1983_2018.nc')
regrid_cube = one_deg_cube
#%%
def get_forest_cover_2018(res=0.05):
# read canopy cover data
forest_2000_path = '/nfs/a68/gyjcab/datasets/GFC_Hansen/v1.6/treecover2000/'
# read forest loss year data
year_loss_path = '/nfs/a68/gyjcab/datasets/GFC_Hansen/v1.6/lossYear/'
# read each tile and down-scale data over Central Africa
scale = int(res/abs(0.00025))
ydim = (int(40/res))
xdim1 = (int(10/res))
xdim2 = (int(40/res))
vdat = np.empty((ydim, xdim1))
hdat = np.empty((ydim, xdim2))
j = 0
# 0-40E, 20-20S
for nx in np.arange(0,40,10):
i = 0
for ny in np.arange(20,-20,-10):
xstr = str(nx).zfill(3)
ystr = str(ny).zfill(2)
# First get tree cover 2000
fname = 'Hansen_GFC-2018-v1.6_treecover2000_' +\
ystr + 'N_' + xstr + 'E.tif'
if ny < 0:
ystr = str(abs(ny)).zfill(2)
fname = 'Hansen_GFC-2018-v1.6_treecover2000_' +\
ystr + 'S_' + xstr + 'E.tif'
print(fname)
# open tiff
ds = gdal.Open(forest_2000_path+fname)
band = ds.GetRasterBand(1)
treeFrac_2000 = band.ReadAsArray()
width = ds.RasterXSize
height = ds.RasterYSize
gt = ds.GetGeoTransform()
lat, lon = get_coords(gt, width, height)
# Then get loss data
fname = 'Hansen_GFC-2018-v1.6_lossyear_' +\
ystr + 'N_' + xstr + 'E.tif'
if ny < 0:
ystr = str(abs(ny)).zfill(2)
fname = 'Hansen_GFC-2018-v1.6_lossyear_' +\
ystr + 'S_' + xstr + 'E.tif'
print(fname)
# open tiff
ds = gdal.Open(year_loss_path+fname)
band = ds.GetRasterBand(1)
loss_data = band.ReadAsArray()
# Get mask of forest lost by 2018
# where loss_data has values greater than 16, set to 0
loss_data[loss_data>19] = 0
# for all years, set value to 1. loss is now just binary
loss_data[(loss_data>=1)&(loss_data<=19)] = 1
treeFrac_2018 = treeFrac_2000.copy()
treeFrac_2018[loss_data==1] = 0
# Find mean forest cover per 0.25 degree pixel
xx, yy = lon[::scale],lat[::scale]
fc16_data = np.zeros((xdim1, xdim1))
for ix in range(len(xx)):
temp_lon_ix = np.where((lon==xx[ix]))[0]
for iy in range(len(yy)):
temp_lat_iy = np.where((lat==yy[iy]))[0]
# Mean tree cover in 2000
# temp = (treeFrac_2000[temp_lat_iy[0]:temp_lat_iy[0]+scale, :]
# [:, temp_lon_ix[0]:temp_lon_ix[0]+scale])
# print(np.nanmean(temp))
# Mean tree cover in 2018
data_trim = (treeFrac_2018[temp_lat_iy[0]:temp_lat_iy[0]+scale, :]
[:, temp_lon_ix[0]:temp_lon_ix[0]+scale])
mean_tree_cover = np.nanmean(data_trim)
# print(mean_tree_cover)
fc16_data[iy, ix] = mean_tree_cover
# assert False
vdat[i*xdim1:(i+1)*xdim1,:] = fc16_data
i += 1
hdat[:,j*xdim1:(j+1)*xdim1] = vdat
j += 1
lat = np.arange(20, -20, -res)
lon = np.arange(0, 40, res)
return(hdat, lat, lon)
# LANDSAT forest loss
res = 0.05
hdat, lat, lon = get_forest_cover_2018(res=res)
plt.imshow(hdat)
if res == 0.25:
regrid_cube = pt25_cube
elif res == 0.05:
regrid_cube = ('/nfs/see-fs-02_users/earjba/python_scripts/'
'deforestation_analysis/temp_cube_5km.nc')
args = [hdat, lat, lon, 2018, 0, '1']
kwargs = {'standard_name': 'area_fraction',
'long_name': 'Tree Cover Fraction (year 2018)',
'short_name': 'treeFrac2018_' + str(res),
'product': 'hansen-landsat',
'regrid': True,
'latlon_bounds': True,
'time_bounds': False,
'regrid_cube': regrid_cube}
harmonised.write_netcdf(*args, **kwargs)
### fix artefacts
path = '/nfs/a68/ee13c2s/python/treefrac_africa/2018/feb21/'
if res == 0.05:
fname = 'treeFrac2018_0.05_hansen-landsat_mon_0.05deg.nc'
if res == 0.25:
fname = 'treeFrac2018_0.25_hansen-landsat_mon_0.25deg.nc'
cube = iris.load_cube(path + fname)
temp_lat = cube.coord('latitude').points
temp_lon = cube.coord('longitude').points
i, = np.where((temp_lat>lat.max())|(temp_lat<lat.min()))
j, = np.where((temp_lon>lon.max())|(temp_lon<lon.min()))
cube.data[i, :] = np.nan
cube.data[:, j] = np.nan
plt.imshow(cube.data, origin='lower')
plt.colorbar()
iris.save(cube, path+fname)
| [
"matplotlib.pyplot.imshow",
"gdal.Open",
"numpy.hstack",
"mpl_toolkits.basemap.interp",
"iris.save",
"numpy.where",
"matplotlib.pyplot.colorbar",
"numpy.nanmean",
"numpy.zeros",
"jpros.harmonised.write_netcdf",
"numpy.empty",
"iris.load_cube",
"importlib.reload",
"numpy.concatenate",
"nu... | [((445, 472), 'importlib.reload', 'importlib.reload', (['readfiles'], {}), '(readfiles)\n', (461, 472), False, 'import importlib\n'), ((473, 501), 'importlib.reload', 'importlib.reload', (['harmonised'], {}), '(harmonised)\n', (489, 501), False, 'import importlib\n'), ((7309, 7325), 'matplotlib.pyplot.imshow', 'plt.imshow', (['hdat'], {}), '(hdat)\n', (7319, 7325), True, 'import matplotlib.pyplot as plt\n'), ((7877, 7917), 'jpros.harmonised.write_netcdf', 'harmonised.write_netcdf', (['*args'], {}), '(*args, **kwargs)\n', (7900, 7917), False, 'from jpros import harmonised\n'), ((8162, 8190), 'iris.load_cube', 'iris.load_cube', (['(path + fname)'], {}), '(path + fname)\n', (8176, 8190), False, 'import iris\n'), ((8438, 8475), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cube.data'], {'origin': '"""lower"""'}), "(cube.data, origin='lower')\n", (8448, 8475), True, 'import matplotlib.pyplot as plt\n'), ((8476, 8490), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (8488, 8490), True, 'import matplotlib.pyplot as plt\n'), ((8491, 8520), 'iris.save', 'iris.save', (['cube', '(path + fname)'], {}), '(cube, path + fname)\n', (8500, 8520), False, 'import iris\n'), ((1063, 1090), 'numpy.arange', 'np.arange', (['minx', 'maxx', 'resx'], {}), '(minx, maxx, resx)\n', (1072, 1090), True, 'import numpy as np\n'), ((1101, 1129), 'numpy.arange', 'np.arange', (['miny', 'maxy', '(-resy)'], {}), '(miny, maxy, -resy)\n', (1110, 1129), True, 'import numpy as np\n'), ((1491, 1520), 'numpy.meshgrid', 'np.meshgrid', (['new_lon', 'new_lat'], {}), '(new_lon, new_lat)\n', (1502, 1520), True, 'import numpy as np\n'), ((2325, 2389), 'numpy.hstack', 'np.hstack', (['(bounds1d[:-1, np.newaxis], bounds1d[1:, np.newaxis])'], {}), '((bounds1d[:-1, np.newaxis], bounds1d[1:, np.newaxis]))\n', (2334, 2389), True, 'import numpy as np\n'), ((2570, 2608), 'numpy.concatenate', 'np.concatenate', (['(array1d, [extra_val])'], {}), '((array1d, [extra_val]))\n', (2584, 2608), True, 'import numpy as np\n'), ((2624, 2688), 'numpy.hstack', 'np.hstack', (['(bounds1d[:-1, np.newaxis], bounds1d[1:, np.newaxis])'], {}), '((bounds1d[:-1, np.newaxis], bounds1d[1:, np.newaxis]))\n', (2633, 2688), True, 'import numpy as np\n'), ((4170, 4193), 'numpy.empty', 'np.empty', (['(ydim, xdim1)'], {}), '((ydim, xdim1))\n', (4178, 4193), True, 'import numpy as np\n'), ((4205, 4228), 'numpy.empty', 'np.empty', (['(ydim, xdim2)'], {}), '((ydim, xdim2))\n', (4213, 4228), True, 'import numpy as np\n'), ((4273, 4293), 'numpy.arange', 'np.arange', (['(0)', '(40)', '(10)'], {}), '(0, 40, 10)\n', (4282, 4293), True, 'import numpy as np\n'), ((7143, 7167), 'numpy.arange', 'np.arange', (['(20)', '(-20)', '(-res)'], {}), '(20, -20, -res)\n', (7152, 7167), True, 'import numpy as np\n'), ((7178, 7199), 'numpy.arange', 'np.arange', (['(0)', '(40)', 'res'], {}), '(0, 40, res)\n', (7187, 7199), True, 'import numpy as np\n'), ((2165, 2203), 'numpy.concatenate', 'np.concatenate', (['([extra_val], array1d)'], {}), '(([extra_val], array1d))\n', (2179, 2203), True, 'import numpy as np\n'), ((2271, 2309), 'numpy.concatenate', 'np.concatenate', (['(array1d, [extra_val])'], {}), '((array1d, [extra_val]))\n', (2285, 2309), True, 'import numpy as np\n'), ((3000, 3038), 'numpy.concatenate', 'np.concatenate', (['(temp2, temp1)'], {'axis': '(3)'}), '((temp2, temp1), axis=3)\n', (3014, 3038), True, 'import numpy as np\n'), ((4325, 4348), 'numpy.arange', 'np.arange', (['(20)', '(-20)', '(-10)'], {}), '(20, -20, -10)\n', (4334, 4348), True, 'import numpy as np\n'), ((1738, 1808), 'mpl_toolkits.basemap.interp', 'basemap.interp', (['var[yr, mn, :, :]', 'lon', 'lat', 'lon_sub', 'lat_sub'], {'order': '(1)'}), '(var[yr, mn, :, :], lon, lat, lon_sub, lat_sub, order=1)\n', (1752, 1808), False, 'from mpl_toolkits import basemap\n'), ((4821, 4856), 'gdal.Open', 'gdal.Open', (['(forest_2000_path + fname)'], {}), '(forest_2000_path + fname)\n', (4830, 4856), False, 'import gdal\n'), ((5487, 5520), 'gdal.Open', 'gdal.Open', (['(year_loss_path + fname)'], {}), '(year_loss_path + fname)\n', (5496, 5520), False, 'import gdal\n'), ((6109, 6133), 'numpy.zeros', 'np.zeros', (['(xdim1, xdim1)'], {}), '((xdim1, xdim1))\n', (6117, 6133), True, 'import numpy as np\n'), ((6202, 6225), 'numpy.where', 'np.where', (['(lon == xx[ix])'], {}), '(lon == xx[ix])\n', (6210, 6225), True, 'import numpy as np\n'), ((6852, 6873), 'numpy.nanmean', 'np.nanmean', (['data_trim'], {}), '(data_trim)\n', (6862, 6873), True, 'import numpy as np\n'), ((6305, 6328), 'numpy.where', 'np.where', (['(lat == yy[iy])'], {}), '(lat == yy[iy])\n', (6313, 6328), True, 'import numpy as np\n')] |
#<NAME>
#30/11/21
#Some basic college coding - NDVI, Advanced list manipulations & plotting
########################
#Imports & Inits
########################
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import seaborn as sns
from sklearn.linear_model import LinearRegression
########################
# GET EXCEL FILE
########################
location = "C:\\Data\\Remote_Sensing\\CourseData\\Remotesensing(1)\\Achterhoek_FieldSpec_2008.xlsx"
matrix = pd.read_excel(location)
first = pd.ExcelFile("C:\\Data\\Remote_Sensing\\CourseData\\Remotesensing(1)\\Achterhoek_FieldSpec_2008.xlsx")
second = pd.read_excel(first, 'Field_sampling')
# matrix.plot(x='Unnamed: 0', y= ['plot', 'Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'])
# matrix.plot(x='Unnamed: 0')
# matrix[['Unnamed: 0', 'plot', 'Unnamed: 2', 'Unnamed: 3']].plot(x='Unnamed: 0')
########################
# CREATE DATA FRAMES
########################
fresh_weight = pd.DataFrame(data=second.iloc[0, 1:])
N_concentration = pd.DataFrame(data=second.iloc[2, 1:])
N_content = pd.DataFrame(data=second.iloc[3, 1:])
ndvi = pd.DataFrame(data=(matrix.iloc[431, :] - matrix.iloc[321, :])
/ (matrix.iloc[321, :] + matrix.iloc[431, :]))
rep = pd.DataFrame(data=700 + 40 * ((matrix.iloc[321, :] + matrix.iloc[431, :])
/ 2 - matrix.iloc[351, :]) / (matrix.iloc[391, :] - matrix.iloc[351, :]))
wavelengths = pd.DataFrame(data=matrix.iloc[:, 0][1:])
plot = pd.DataFrame(data=matrix.iloc[:, 1][1:])
plot2 = pd.DataFrame(data=matrix.iloc[:, 2][1:])
########################
# CREATE ARRAYS
########################
list1 = ndvi.values.tolist()
flat_list1 = [j for i in list1 for j in i]
y = np.array(flat_list1[1:])
list5 = rep.values.tolist()
flat_list5 = [j for i in list5 for j in i]
y2 = np.array(flat_list5[1:])
list2 = fresh_weight.values.tolist()
flat_list2 = [j for i in list2 for j in i]
x = np.array(flat_list2)
list3 = N_concentration.values.tolist()
flat_list3 = [j for i in list3 for j in i]
x2 = np.array(flat_list3)
list4 = N_content.values.tolist()
flat_list4 = [j for i in list4 for j in i]
x3 = np.array(flat_list4)
list6 = wavelengths.values.tolist()
flat_list6 = [j for i in list6 for j in i]
x4 = np.array(flat_list6)
list7 = plot.values.tolist()
flat_list7 = [j for i in list7 for j in i]
y4 = np.array(flat_list7)
list8 = plot2.values.tolist()
flat_list8 = [j for i in list8 for j in i]
y5 = np.array(flat_list8)
########################
# DESIGN PLOTS
########################
plt.plot(x, y, 'o')
plt.xlabel('Fresh weight (ton/ha)')
plt.ylabel('NDVI')
plt.title('Regression of NDVI by fresh weight of vegetation')
# plt.text(0.75, 0, 'CC: 0.525', fontsize=12, bbox=dict(facecolor='white', alpha=0.5))
plt.grid()
# plt.legend(loc='best', bbox_to_anchor=(0.6, 0.5))
# plt.legend(loc = 'upper left')
# plt.plot(x2, y, 'o')
# plt.xlabel('N concentration (g/kg)')
# plt.ylabel('NDVI')
# plt.title('Regression of NDVI by N concentration in vegetation')
# plt.grid()
# plt.plot(x3, y, 'o')
# plt.xlabel('N content (g/m2)')
# plt.ylabel('NDVI')
# plt.title('Regression of NDVI by N content in vegetation')
# plt.grid()
# plt.plot(x, y2, 'o')
# plt.xlabel('Fresh weight (ton/ha)')
# plt.ylabel('REP')
# plt.title('Regression of REP by fresh weight of vegetation')
# plt.grid()
# plt.plot(x2, y2, 'o')
# plt.xlabel('N concentration (g/kg)')
# plt.ylabel('REP')
# plt.title('Regression of REP by N concentration in vegetation')
# plt.grid()
# plt.plot(x3, y2, 'o')
# plt.xlabel('N content (g/m2)')
# plt.ylabel('REP')
# plt.title('Regression of REP by N content in vegetation')
# plt.grid()
# plt.plot(x3, y4, label='plot1')
# plt.plot(wavelengths, plot2, label='Plot 2')
# plt.xlabel('Wavelength')
# plt.ylabel('Reflectance')
# plt.title('Spectral reflectance of a plot')
# plt.grid()
# plt.legend()
# plt.xticks(np.arange(min(x3), max(x3), 250.0))
# COMPUTE REGRESSION LINE (m = slope, b = intercept)
# m, b = np.polyfit(x3, y2, 1)
# plt.plot(x3, m*x3 + b)
# coef = np.polyfit(x,y,1)
# poly1d_fn = np.poly1d(coef)
# poly1d_fn is now a function which takes in x and returns an estimate for y
# plt.plot(x,y, 'yo', x, poly1d_fn(x), '--k')
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
print(r_value)
# line = slope * x + intercept
# plt.scatter(x,y)
# plt.plot(x, line, 'r')
# sns.regplot(x, y, ci=None)
plt.show()
# model = LinearRegression().fit(x, y)
# plt.scatter(x, y, color="black")
# plt.plot(x, y, color="blue", linewidth=3)
# plt.show()
| [
"scipy.stats.linregress",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"pandas.ExcelFile",
"pandas.read_excel",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((524, 547), 'pandas.read_excel', 'pd.read_excel', (['location'], {}), '(location)\n', (537, 547), True, 'import pandas as pd\n'), ((557, 669), 'pandas.ExcelFile', 'pd.ExcelFile', (['"""C:\\\\Data\\\\Remote_Sensing\\\\CourseData\\\\Remotesensing(1)\\\\Achterhoek_FieldSpec_2008.xlsx"""'], {}), "(\n 'C:\\\\Data\\\\Remote_Sensing\\\\CourseData\\\\Remotesensing(1)\\\\Achterhoek_FieldSpec_2008.xlsx'\n )\n", (569, 669), True, 'import pandas as pd\n'), ((670, 708), 'pandas.read_excel', 'pd.read_excel', (['first', '"""Field_sampling"""'], {}), "(first, 'Field_sampling')\n", (683, 708), True, 'import pandas as pd\n'), ((1003, 1040), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'second.iloc[0, 1:]'}), '(data=second.iloc[0, 1:])\n', (1015, 1040), True, 'import pandas as pd\n'), ((1060, 1097), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'second.iloc[2, 1:]'}), '(data=second.iloc[2, 1:])\n', (1072, 1097), True, 'import pandas as pd\n'), ((1111, 1148), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'second.iloc[3, 1:]'}), '(data=second.iloc[3, 1:])\n', (1123, 1148), True, 'import pandas as pd\n'), ((1157, 1270), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '((matrix.iloc[431, :] - matrix.iloc[321, :]) / (matrix.iloc[321, :] +\n matrix.iloc[431, :]))'}), '(data=(matrix.iloc[431, :] - matrix.iloc[321, :]) / (matrix.\n iloc[321, :] + matrix.iloc[431, :]))\n', (1169, 1270), True, 'import pandas as pd\n'), ((1294, 1445), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '(700 + 40 * ((matrix.iloc[321, :] + matrix.iloc[431, :]) / 2 - matrix.iloc[\n 351, :]) / (matrix.iloc[391, :] - matrix.iloc[351, :]))'}), '(data=700 + 40 * ((matrix.iloc[321, :] + matrix.iloc[431, :]) /\n 2 - matrix.iloc[351, :]) / (matrix.iloc[391, :] - matrix.iloc[351, :]))\n', (1306, 1445), True, 'import pandas as pd\n'), ((1478, 1518), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'matrix.iloc[:, 0][1:]'}), '(data=matrix.iloc[:, 0][1:])\n', (1490, 1518), True, 'import pandas as pd\n'), ((1527, 1567), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'matrix.iloc[:, 1][1:]'}), '(data=matrix.iloc[:, 1][1:])\n', (1539, 1567), True, 'import pandas as pd\n'), ((1577, 1617), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'matrix.iloc[:, 2][1:]'}), '(data=matrix.iloc[:, 2][1:])\n', (1589, 1617), True, 'import pandas as pd\n'), ((1768, 1792), 'numpy.array', 'np.array', (['flat_list1[1:]'], {}), '(flat_list1[1:])\n', (1776, 1792), True, 'import numpy as np\n'), ((1874, 1898), 'numpy.array', 'np.array', (['flat_list5[1:]'], {}), '(flat_list5[1:])\n', (1882, 1898), True, 'import numpy as np\n'), ((1988, 2008), 'numpy.array', 'np.array', (['flat_list2'], {}), '(flat_list2)\n', (1996, 2008), True, 'import numpy as np\n'), ((2102, 2122), 'numpy.array', 'np.array', (['flat_list3'], {}), '(flat_list3)\n', (2110, 2122), True, 'import numpy as np\n'), ((2210, 2230), 'numpy.array', 'np.array', (['flat_list4'], {}), '(flat_list4)\n', (2218, 2230), True, 'import numpy as np\n'), ((2320, 2340), 'numpy.array', 'np.array', (['flat_list6'], {}), '(flat_list6)\n', (2328, 2340), True, 'import numpy as np\n'), ((2423, 2443), 'numpy.array', 'np.array', (['flat_list7'], {}), '(flat_list7)\n', (2431, 2443), True, 'import numpy as np\n'), ((2527, 2547), 'numpy.array', 'np.array', (['flat_list8'], {}), '(flat_list8)\n', (2535, 2547), True, 'import numpy as np\n'), ((2619, 2638), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {}), "(x, y, 'o')\n", (2627, 2638), True, 'import matplotlib.pyplot as plt\n'), ((2640, 2675), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fresh weight (ton/ha)"""'], {}), "('Fresh weight (ton/ha)')\n", (2650, 2675), True, 'import matplotlib.pyplot as plt\n'), ((2677, 2695), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""NDVI"""'], {}), "('NDVI')\n", (2687, 2695), True, 'import matplotlib.pyplot as plt\n'), ((2697, 2758), 'matplotlib.pyplot.title', 'plt.title', (['"""Regression of NDVI by fresh weight of vegetation"""'], {}), "('Regression of NDVI by fresh weight of vegetation')\n", (2706, 2758), True, 'import matplotlib.pyplot as plt\n'), ((2848, 2858), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2856, 2858), True, 'import matplotlib.pyplot as plt\n'), ((4382, 4404), 'scipy.stats.linregress', 'stats.linregress', (['x', 'y'], {}), '(x, y)\n', (4398, 4404), False, 'from scipy import stats\n'), ((4534, 4544), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4542, 4544), True, 'import matplotlib.pyplot as plt\n')] |
import os, pprint
import numpy as np
import joblib
from utils.BaseModel import BaseModel
from utils.AwesomeTimeIt import timeit
from utils.RegressionReport import evaluate_regression
from utils.FeatureImportanceReport import report_feature_importance
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
import xgboost as xgb
class Boosting_XGB(BaseModel):
def __init__(self, name, dl):
super().__init__(name, 'XGB', dl)
self.n_top_features = dl.n_top_features
self.k = dl.k
#splitting data into X and Y
self.X_train, self.X_test, self.Y_train, self.Y_test, \
self.dates_train, self.dates_test = dl.load_with_test()
self.X, self.Y, _ = dl.load_all()
def setParams(self, n_estimators = 2000, learning_rate = 0.05, max_depth = 5,
max_features = 2, min_samples_leaf=4, min_samples_split = 0.6,
reg_alpha=0.0004, should_cross_val=True, n_jobs = 1,
verbose = 0):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.max_depth = max_depth
self.max_features = max_features
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.should_cross_val = should_cross_val
self.n_jobs = n_jobs
self.verbose = verbose
self.reg_alpha = reg_alpha
self.log.info(pprint.pformat({
"n_estimators" : n_estimators,
"learning_rate" : learning_rate,
"max_depth" : max_depth,
"max_features" : max_features,
"min_samples_leaf" : min_samples_leaf,
"min_samples_split" : min_samples_split,
"should_cross_val" : should_cross_val,
"n_jobs" : n_jobs,
"verbose" : verbose,
"reg_alpha" : reg_alpha,
'random_state': self.dl.random_state
}))
@timeit
def xgb_run(self):
data_matrix = xgb.DMatrix(data=self.X_train, label=self.Y_train)
model = xgb.XGBRegressor(max_depth=self.max_depth,
learning_rate=self.learning_rate,
n_estimators=self.n_estimators,
verbosity=self.verbose,
reg_alpha=self.reg_alpha,
booster='gbtree',
n_jobs=self.n_jobs)
eval_set = [(self.X_train, self.Y_train), (self.X_test, self.Y_test)]
model.fit(self.X_train, self.Y_train, eval_set=eval_set, verbose=True)
if self.should_cross_val:
scores = cross_val_score(model, self.X, self.Y, cv=self.k, verbose=0)
self.log.info(f"---- Cross validation with {self.k} groups----\n\nThe results on each split" +\
str(scores)+"\n")
self.log.info(f"The average of the cross validation is {scores.mean()}\n")
print (f"Cross validation is done for {self.name}")
joblib.dump(model, self.directory + f'/{self.name}.pkl', compress=3)
evaluate_regression(self.directory, self.X_train,
self.Y_train, model.predict(self.X_train),
self.dates_train, 'XGB-OnTrain',
self.log, slicer = 1,
should_log_inverse = self.data_loader.should_log_inverse)
evaluate_regression(self.directory, self.X_test,
self.Y_test, model.predict(self.X_test),
self.dates_test, 'XGB-OnTest',
self.log, slicer = 1,
should_log_inverse = self.data_loader.should_log_inverse)
def tune(self, grid = {'n_estimators': [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)],
'learning_rate': np.linspace(0.01, 0.2, 10),
'max_features': ['auto', 'sqrt'],
'max_depth': [int(x) for x in np.linspace(5, 20, num = 15)] + [None],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [1, 2, 4],
'reg_alpha': np.linspace(0.0001, 0.02, 200)},
should_random_search = False,
n_iter = 1):
self.log.info(f'------ {self.name} is going to be Tunned with \n -----')
model = xgb.XGBRegressor()
if should_random_search:
search_models = RandomizedSearchCV(estimator = model,
param_distributions = grid,
n_iter = n_iter,
cv = self.k,
verbose=2,
n_jobs = -1)
else:
search_models = GridSearchCV(estimator = model,
param_distributions = grid,
cv = self.k,
verbose=2,
n_jobs = -1)
search_models.fit(self.X, self.Y)
self.log.info(f"\n\nBest params:\n{pprint.pformat(search_models.best_params_)}\n")
self.log.info(f"\n\nBest score: {search_models.best_score_:0.4f}\n\n")
print (search_models.best_score_)
def load_xgb(self):
model = joblib.load(self.directory + f'/XGBOOST-{self.name}.pkl')
input_params = self.X.as_matrix()
pre = model.predict(input_params)
print (pre)
@timeit
def run():
file = 'HACKA1'
bst = Boosting(file, name = file, split_size=0.2, should_shuffle=True, k=5, num_top_features = 10)
bst.setParams(n_estimators = 100,
learning_rate = 0.02,
max_depth = 5,
max_features = 'auto',
min_samples_leaf= 1,
min_samples_split = 2,
reg_alpha=0.005,
should_cross_val=False, n_jobs = -1, verbose = 1)
# bst.tune( grid = {'n_estimators': [int(x) for x in np.linspace(start = 10, stop = 2000, num = 20)],
# 'learning_rate': np.linspace(0.01, 0.2, 10),
# 'max_features': ['auto', 'sqrt'],
# 'max_depth': [int(x) for x in np.linspace(5, 100, num = 15)],
# 'min_samples_split': np.arange(2,20,4),
# 'min_samples_leaf': np.arange(2,20,4),
# 'reg_alpha': np.linspace(0.0001, 0.02, 200)},
# should_random_search = True,
# n_iter = 200)
bst.xgb_run()
# bst.load_xgb()
if __name__ == "__main__":
run()
| [
"sklearn.model_selection.GridSearchCV",
"pprint.pformat",
"xgboost.XGBRegressor",
"numpy.linspace",
"joblib.load",
"xgboost.DMatrix",
"joblib.dump",
"sklearn.model_selection.RandomizedSearchCV"
] | [((2116, 2166), 'xgboost.DMatrix', 'xgb.DMatrix', ([], {'data': 'self.X_train', 'label': 'self.Y_train'}), '(data=self.X_train, label=self.Y_train)\n', (2127, 2166), True, 'import xgboost as xgb\n'), ((2192, 2397), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {'max_depth': 'self.max_depth', 'learning_rate': 'self.learning_rate', 'n_estimators': 'self.n_estimators', 'verbosity': 'self.verbose', 'reg_alpha': 'self.reg_alpha', 'booster': '"""gbtree"""', 'n_jobs': 'self.n_jobs'}), "(max_depth=self.max_depth, learning_rate=self.learning_rate,\n n_estimators=self.n_estimators, verbosity=self.verbose, reg_alpha=self.\n reg_alpha, booster='gbtree', n_jobs=self.n_jobs)\n", (2208, 2397), True, 'import xgboost as xgb\n'), ((3194, 3262), 'joblib.dump', 'joblib.dump', (['model', "(self.directory + f'/{self.name}.pkl')"], {'compress': '(3)'}), "(model, self.directory + f'/{self.name}.pkl', compress=3)\n", (3205, 3262), False, 'import joblib\n'), ((4707, 4725), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {}), '()\n', (4723, 4725), True, 'import xgboost as xgb\n'), ((5762, 5819), 'joblib.load', 'joblib.load', (["(self.directory + f'/XGBOOST-{self.name}.pkl')"], {}), "(self.directory + f'/XGBOOST-{self.name}.pkl')\n", (5773, 5819), False, 'import joblib\n'), ((1539, 1907), 'pprint.pformat', 'pprint.pformat', (["{'n_estimators': n_estimators, 'learning_rate': learning_rate, 'max_depth':\n max_depth, 'max_features': max_features, 'min_samples_leaf':\n min_samples_leaf, 'min_samples_split': min_samples_split,\n 'should_cross_val': should_cross_val, 'n_jobs': n_jobs, 'verbose':\n verbose, 'reg_alpha': reg_alpha, 'random_state': self.dl.random_state}"], {}), "({'n_estimators': n_estimators, 'learning_rate':\n learning_rate, 'max_depth': max_depth, 'max_features': max_features,\n 'min_samples_leaf': min_samples_leaf, 'min_samples_split':\n min_samples_split, 'should_cross_val': should_cross_val, 'n_jobs':\n n_jobs, 'verbose': verbose, 'reg_alpha': reg_alpha, 'random_state':\n self.dl.random_state})\n", (1553, 1907), False, 'import os, pprint\n'), ((4083, 4109), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.2)', '(10)'], {}), '(0.01, 0.2, 10)\n', (4094, 4109), True, 'import numpy as np\n'), ((4452, 4482), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(0.02)', '(200)'], {}), '(0.0001, 0.02, 200)\n', (4463, 4482), True, 'import numpy as np\n'), ((4787, 4900), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', ([], {'estimator': 'model', 'param_distributions': 'grid', 'n_iter': 'n_iter', 'cv': 'self.k', 'verbose': '(2)', 'n_jobs': '(-1)'}), '(estimator=model, param_distributions=grid, n_iter=n_iter,\n cv=self.k, verbose=2, n_jobs=-1)\n', (4805, 4900), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((5189, 5282), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'model', 'param_distributions': 'grid', 'cv': 'self.k', 'verbose': '(2)', 'n_jobs': '(-1)'}), '(estimator=model, param_distributions=grid, cv=self.k, verbose=\n 2, n_jobs=-1)\n', (5201, 5282), False, 'from sklearn.model_selection import GridSearchCV\n'), ((3984, 4025), 'numpy.linspace', 'np.linspace', ([], {'start': '(200)', 'stop': '(2000)', 'num': '(10)'}), '(start=200, stop=2000, num=10)\n', (3995, 4025), True, 'import numpy as np\n'), ((5548, 5590), 'pprint.pformat', 'pprint.pformat', (['search_models.best_params_'], {}), '(search_models.best_params_)\n', (5562, 5590), False, 'import os, pprint\n'), ((4239, 4265), 'numpy.linspace', 'np.linspace', (['(5)', '(20)'], {'num': '(15)'}), '(5, 20, num=15)\n', (4250, 4265), True, 'import numpy as np\n')] |
import os
import json
import numpy as np
from SoccerNet.Downloader import getListGames
from config.classes import EVENT_DICTIONARY_V2, INVERSE_EVENT_DICTIONARY_V2
def predictions2json(predictions_half_1, output_path, framerate=2):
os.makedirs(output_path, exist_ok=True)
output_file_path = output_path + "/Predictions-v2.json"
frames_half_1, class_half_1 = np.where(predictions_half_1 >= 0)
json_data = dict()
json_data["predictions"] = list()
for frame_index, class_index in zip(frames_half_1, class_half_1):
confidence = predictions_half_1[frame_index, class_index]
seconds = int((frame_index//framerate)%60)
minutes = int((frame_index//framerate)//60)
prediction_data = dict()
prediction_data["gameTime"] = str(1) + " - " + str(minutes) + ":" + str(seconds)
prediction_data["label"] = INVERSE_EVENT_DICTIONARY_V2[class_index]
prediction_data["position"] = str(int((frame_index/framerate)*1000))
prediction_data["half"] = str(1)
prediction_data["confidence"] = str(confidence)
json_data["predictions"].append(prediction_data)
with open(output_file_path, 'w') as output_file:
json.dump(json_data, output_file, indent=4)
| [
"numpy.where",
"json.dump",
"os.makedirs"
] | [((237, 276), 'os.makedirs', 'os.makedirs', (['output_path'], {'exist_ok': '(True)'}), '(output_path, exist_ok=True)\n', (248, 276), False, 'import os\n'), ((372, 405), 'numpy.where', 'np.where', (['(predictions_half_1 >= 0)'], {}), '(predictions_half_1 >= 0)\n', (380, 405), True, 'import numpy as np\n'), ((1203, 1246), 'json.dump', 'json.dump', (['json_data', 'output_file'], {'indent': '(4)'}), '(json_data, output_file, indent=4)\n', (1212, 1246), False, 'import json\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import io
import warnings
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from xgboost import XGBClassifier
from matplotlib.lines import Line2D
# helper functions
def data_preprocess(df, y_name, verbose, max_lev, transform_date, transform_time, impute):
def print_dtypes_summary(df):
buffer = io.StringIO()
df.info(verbose=False, buf=buffer)
s = buffer.getvalue()
print(' ', s.split('\n')[-3])
if verbose > 1:
print('y:')
fig = plt.figure(figsize=(3, 1))
ax = sns.countplot(df[y_name], palette=sns.color_palette("Set1"))
totals = [i.get_height() for i in ax.patches]
for i in ax.patches:
ax.text(i.get_x(), i.get_height(), str(round((i.get_height()/sum(totals))*100))+'%')
plt.show()
# original X information
X = df.drop(columns=y_name)
if verbose > 0:
print('original X:')
print(' shape:', X.shape)
print_dtypes_summary(X)
if verbose > 1:
print(' ', X.columns.tolist())
# clean X by dtypes
X = X.select_dtypes(exclude=[object])
before_dum_vars = X.columns.tolist() # this is for wanting to aggregate back all coefs after the model is trained
to_exclude = []
for c in X.columns:
if X[c].dtype.name == 'category':
if X[c].nunique() > max_lev:
to_exclude.append(c)
if 'datetime' in X[c].dtype.name: # notice it will get imputed as numeric next if theres NaT
to_exclude.append(c)
if transform_date:
X[f'{c}_year'] = X[c].dt.year
X[f'{c}_month'] = X[c].dt.month
X[f'{c}_week'] = X[c].dt.week
X[f'{c}_dayofweek'] = X[c].dt.dayofweek
if transform_time:
X[f'{c}_hour'] = X[c].dt.hour
X[f'{c}_minute'] = X[c].dt.minute
X[f'{c}_second'] = X[c].dt.second
# exclude all null or only one lev
if X[c].notnull().sum() == 0 or X[c].nunique() == 1:
to_exclude.append(c)
X = X.drop(columns=to_exclude)
if verbose > 0:
print('processed X:')
print(' shape:', X.shape)
print_dtypes_summary(X)
if verbose > 1:
print(' ', X.columns.tolist())
# deal with nulls
if impute:
# drop null y no matter waht
X[y_name] = df[y_name]
X = X[X[y_name].notnull()].reset_index(drop=True)
y = X.reset_index(drop=True)[y_name]
X = X.drop(columns=y_name).reset_index(drop=True)
for c in X.columns:
if X[c].isnull().any():
if 'float' in X[c].dtype.name or 'int' in X[c].dtype.name:
X[c] = X[c].fillna(X[c].median())
else:
X[c] = X[c].fillna(X[c].mode().iloc[0])
else:
X[y_name] = df[y_name] # put y back to dropna together
X = X.dropna()
y = X.reset_index(drop=True)[y_name]
X = X.drop(columns=y_name).reset_index(drop=True)
if verbose and not impute:
print('dropna X:')
print(' shape:', X.shape)
print_dtypes_summary(X)
if verbose > 0:
print('y:')
fig = plt.figure(figsize=(3, 1))
ax = sns.countplot(y, palette=sns.color_palette("Set1"))
totals = [i.get_height() for i in ax.patches]
for i in ax.patches:
ax.text(i.get_x(), i.get_height(), str(round((i.get_height()/sum(totals))*100))+'%')
plt.show()
# not sure, convert y labels to 0, 1
if y.nunique() == 2 and y.dtype != 'bool':
y = y == y.iloc[0]
# dummy
X = pd.get_dummies(X)
if verbose > 0:
print('dummy X:')
print(' shape:', X.shape)
print_dtypes_summary(X)
if verbose > 1:
print(' ', X.columns.tolist())
return X, y, before_dum_vars
def model_selection(X, y, verbose, models, CV, use_metric, random_state, binary):
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
best_metric, best_model = float("-inf"), None
for model in models:
model_name = model.__class__.__name__
if verbose > 1:
print('start training', model)
cv_result = cross_validate(model, X, y, scoring=['accuracy', use_metric], cv=CV, return_train_score=False,
verbose=2 if verbose==2 else 0)
accuracies = cv_result['test_accuracy']
f1s = cv_result[f'test_{use_metric}']
for i in range(len(accuracies)):
entries.append((model_name, i, accuracies[i], f1s[i]))
temp_mean_metric = np.mean(cv_result[f'test_{use_metric}'])
if temp_mean_metric > best_metric:
best_metric = temp_mean_metric
best_model = model
# best_model = models[0] # manually select the final model for debugging, logistic, a lot coefs for multiclass
# best_model = models[1] # manually select the final model for debugging,
cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy', use_metric])
if verbose > 1:
print(cv_df)
if verbose > 0:
cv_df_melted = cv_df.melt(id_vars=['model_name', 'fold_idx'], var_name='metric', value_name='score')
g = sns.FacetGrid(cv_df_melted, col='metric', size=6, sharey=False)
g = g.map(sns.boxplot, 'model_name', 'score', data=cv_df_melted, palette=sns.color_palette("Set2"))
g = g.map(sns.stripplot, 'model_name', 'score', data=cv_df_melted, palette=sns.color_palette("Set2"),
size=8, jitter=True, edgecolor="gray", linewidth=2)
for ax in g.axes.flat:
for label in ax.get_xticklabels():
label.set_rotation(15)
metric_mean = cv_df.groupby('model_name').agg('mean')
for i, l in zip(range(metric_mean.shape[0]), ax.get_xticklabels()):
g.axes.flat[0].text(i, metric_mean.loc[l.get_text(), 'accuracy'], f'{metric_mean.loc[l.get_text(), "accuracy"]*100:.2f}%',
horizontalalignment='center', weight='bold', color='black')
g.axes.flat[1].text(i, metric_mean.loc[l.get_text(), use_metric], f'{metric_mean.loc[l.get_text(), use_metric]*100:.2f}%',
horizontalalignment='center', weight='bold', color='black')
plt.show()
# best_model_name = metric_mean.sort_values(use_metric, ascending=False).index[0]
# best_model_name = 'MultinomialNB'
# return best_model_name
return best_model
def train_test_CV(X, y, verbose, best_model, CV, random_state, binary):
if verbose > 1:
print('random_state:', random_state)
print('fitting best model:', best_model)
i = 0
y_pred_all, y_test_all = [], []
df_coef = pd.DataFrame(index=X.columns.tolist())
kf = KFold(n_splits=CV, shuffle=True, random_state=random_state)
for train_index, test_index in kf.split(X):
X_train, X_test, y_train, y_test = X.loc[train_index], X.loc[test_index], y[train_index], y[test_index]
best_model.fit(X_train, y_train)
y_pred = best_model.predict(X_test)
y_pred_all += list(y_pred)
y_test_all += list(y_test)
if i == 0:
confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted'])
else:
confusion_matrix += pd.crosstab(y_test, y_pred)
i += 1
if not binary and 'feature_importances_' not in dir(best_model):
for c in range(y.nunique()):
df_coef[f'class{c}_cv{i}'] = best_model.coef_[c]
else:
df_coef[i] = best_model.feature_importances_ if 'feature_importances_' in dir(best_model) else best_model.coef_[0] # feature_importances_ for xgb and rf, else use coef_
confusion_matrix /= 5
return confusion_matrix, y_pred_all, y_test_all, df_coef
def classification_result(verbose, confusion_matrix, y_pred_all, y_test_all, CV, best_model_name, binary):
if verbose > 0:
ax = sns.heatmap(confusion_matrix, annot=True, fmt='.1f', cmap='PuBu') # color credit: <NAME>
ax.set(title=f'mean confusion matrix of {CV}-fold {best_model_name}')
plt.show()
if verbose > 0:
print('classification result:')
print(f' accuracy : {accuracy_score(y_pred_all, y_test_all)*100:.2f}')
if binary:
print(f' false positive: {np.mean(np.logical_and([x==1 for x in y_pred_all], [x!=1 for x in y_test_all]))*100:>5.2f}')
print(f' false negative: {np.mean(np.logical_and([x!=1 for x in y_pred_all], [x==1 for x in y_test_all]))*100:>5.2f}')
if verbose > 1:
print(f' precision : {precision_score(y_pred_all, y_test_all)*100:.2f}')
print(f' recall : {recall_score(y_pred_all, y_test_all)*100:.2f}')
print(f' f1 : {f1_score(y_pred_all, y_test_all)*100:.2f}')
if verbose > 1:
print(classification_report(y_test_all, y_pred_all))
def coef_to_feat_imp(df_coef, c, use_coef, binary):
if use_coef:
if not binary:
df_coef_mean = pd.DataFrame()
for c in range(len(c)):
df_coef_c_mean = df_coef[[cn for cn in df_coef.columns if f'class{c}' in cn]].apply('mean', axis=1)
df_coef_mean[c] = df_coef_c_mean
feat_imp = df_coef_mean.apply(lambda x: np.max(np.abs(x)), axis=1) # can't take into consider for sign cuz it has opposite influence for different classes
else:
feat_imp = df_coef.apply(lambda x: np.max(np.abs(x)), axis=1)
else:
feat_imp = df_coef.apply('mean', axis=1)
return feat_imp
def plot_detailed_feature_importance(best_model, df_coef, binary, CV):
'''if the model use coef, plot for every class (target)
if the model use feature importance, plot only 1 plot'''
use_coef = 'feature_importances_' not in dir(best_model)
for c in range(len(best_model.classes_)):
if not binary and use_coef:
df_coef_c = df_coef[[cn for cn in df_coef.columns if f'class{c}' in cn]].copy()
else:
df_coef_c = df_coef
df_coef_c['mean feature importance'] = df_coef_c.apply('mean', axis=1)
df_coef_c['abs feature importance'] = np.abs(df_coef_c['mean feature importance'])
fig = plt.figure(figsize=(10, 5))
ax = df_coef_c.sort_values('abs feature importance', ascending=False).head(20)['abs feature importance'].plot.bar(
color=['darkblue' if x > 0 else 'crimson' for x in df_coef_c['mean feature importance']])
ax.legend([Line2D([0], [0], color='darkblue', lw=8), Line2D([0], [0], color='crimson', lw=8)],
['positive', 'negative'])
ax.set(title=f'mean feature importance for {best_model.classes_[c]} of {CV}-fold {best_model.__class__.__name__}', ylabel='abs feature importance')
for label in ax.get_xticklabels():
label.set_rotation(45)
label.set_ha('right')
if binary:
break
if not use_coef:
ax.set(title=f'mean feature importance of {CV}-fold {best_model.__class__.__name__}')
break
def plot_summary_feature_importance(feat_imp, best_model, CV, use_coef):
'''deal with multi class here, sum it to one class and thus one plot only'''
fig = plt.figure(figsize=(10, 5))
ax = feat_imp.sort_values(ascending=False).head(20).plot.bar(color='grey')
if use_coef:
title = f'mean feature importance from abs max coef on all classes of {CV}-fold {best_model.__class__.__name__}'
else:
title = f'mean feature importance of {CV}-fold {best_model.__class__.__name__}'
ax.set(title=title, ylabel='abs feature importance')
for label in ax.get_xticklabels():
label.set_rotation(45)
label.set_ha('right')
plt.show()
def agg_feat_imp(feat_imp, before_dum_vars):
agged_fi = pd.Series(index=before_dum_vars)
for v in before_dum_vars:
# print(feat_imp[[v in i for i in feat_imp.index]])
# this will have problem if the col name are similar, for example if the original column names have 'age' and 'age_man', then 'age_man' will be contained in 'age'
agged_fi[v] = feat_imp[[v in i for i in feat_imp.index]].max()
return agged_fi
# main singal task functions
def fit_classification(df, y_name, verbose, max_lev, transform_date, transform_time, impute,
CV, class_weight, use_metric,
return_agg_feat_imp):
if verbose < 2:
warnings.simplefilter('ignore', UserWarning)
else:
warnings.simplefilter('always', UserWarning)
# data preprocessing
X, y, before_dum_vars = data_preprocess(df, y_name, verbose, max_lev, transform_date, transform_time, impute)
# model selection
random_state = np.random.randint(1e8)
binary = True if y.nunique() == 2 else False
# use_metric = 'accuracy' if not binary else use_metric
models = [
LogisticRegression(random_state=random_state, class_weight=class_weight, solver='lbfgs', multi_class='auto'),
RandomForestClassifier(random_state=random_state, class_weight=class_weight, n_estimators=100),
LinearSVC(random_state=random_state, class_weight=class_weight),
# MultinomialNB(), # not sure if this is weighted or not
XGBClassifier(random_state=random_state,
scale_pos_weight=y.value_counts(normalize=True).iloc[0] if class_weight=='balanced' and binary else 1)
]
# best_model_name = model_selection(X, y, verbose, models, CV, use_metric, random_state, binary)
best_model = model_selection(X, y, verbose, models, CV, use_metric, random_state, binary)
# final train on best model (CV)
# best_model = [model for model in models if model.__class__.__name__ == best_model_name][0]
random_state = np.random.randint(1e8)
confusion_matrix, y_pred_all, y_test_all, df_coef = train_test_CV(X, y, verbose, best_model, CV, random_state, binary)
# confusion matrix and metrics
classification_result(verbose, confusion_matrix, y_pred_all, y_test_all, CV, best_model.__class__.__name__ , binary)
# plot feature importance
# use_coef = 'feature_importances_' not in dir(best_model)
# plot_feature_importance(df_coef, y, binary, CV, use_coef, best_model_name)
# plot_feature_importance(df_coef, y, binary, CV, use_coef, best_model.__class__.__name__)
use_coef = 'feature_importances_' not in dir(best_model)
feat_imp = coef_to_feat_imp(df_coef, best_model.classes_, use_coef, binary)
if verbose > 0:
plot_summary_feature_importance(feat_imp, best_model, CV, use_coef)
if verbose > 1:
plot_detailed_feature_importance(best_model, df_coef, binary, CV)
if return_agg_feat_imp:
fi = agg_feat_imp(feat_imp, before_dum_vars)
return fi
return
def fit(df, y_name, verbose=1, max_lev=10, transform_date=True, transform_time=False, impute=True,
CV=5, class_weight='balanced', use_metric='f1_weighted', return_agg_feat_imp=False):
if df[y_name].nunique() <= max_lev: # bool or category
return fit_classification(**locals())
elif 'float' in df[y_name].dtype.name or 'int' in df[y_name].dtype.name:
print("didnt handle numeric yet")
else:
print('didnt handle this kind of y now')
# def get_fitted_feat_imp(df, y_name, **kwargs):
# print(**kwargs)
# fit(df, y_name, kwargs) | [
"sklearn.metrics.classification_report",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"sklearn.model_selection.KFold",
"matplotlib.lines.Line2D",
"numpy.mean",
"seaborn.color_palette",
"pandas.DataFrame",
"warnings.simplefilter",
"io.StringIO",
"numpy.abs",
"sklearn.model... | [((4194, 4211), 'pandas.get_dummies', 'pd.get_dummies', (['X'], {}), '(X)\n', (4208, 4211), True, 'import pandas as pd\n'), ((5547, 5632), 'pandas.DataFrame', 'pd.DataFrame', (['entries'], {'columns': "['model_name', 'fold_idx', 'accuracy', use_metric]"}), "(entries, columns=['model_name', 'fold_idx', 'accuracy',\n use_metric])\n", (5559, 5632), True, 'import pandas as pd\n'), ((7329, 7388), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'CV', 'shuffle': '(True)', 'random_state': 'random_state'}), '(n_splits=CV, shuffle=True, random_state=random_state)\n', (7334, 7388), False, 'from sklearn.model_selection import KFold\n'), ((11879, 11906), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (11889, 11906), True, 'import matplotlib.pyplot as plt\n'), ((12383, 12393), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12391, 12393), True, 'import matplotlib.pyplot as plt\n'), ((12455, 12487), 'pandas.Series', 'pd.Series', ([], {'index': 'before_dum_vars'}), '(index=before_dum_vars)\n', (12464, 12487), True, 'import pandas as pd\n'), ((13344, 13374), 'numpy.random.randint', 'np.random.randint', (['(100000000.0)'], {}), '(100000000.0)\n', (13361, 13374), True, 'import numpy as np\n'), ((14371, 14401), 'numpy.random.randint', 'np.random.randint', (['(100000000.0)'], {}), '(100000000.0)\n', (14388, 14401), True, 'import numpy as np\n'), ((865, 878), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (876, 878), False, 'import io\n'), ((1047, 1073), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3, 1)'}), '(figsize=(3, 1))\n', (1057, 1073), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1344, 1346), True, 'import matplotlib.pyplot as plt\n'), ((3766, 3792), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3, 1)'}), '(figsize=(3, 1))\n', (3776, 3792), True, 'import matplotlib.pyplot as plt\n'), ((4046, 4056), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4054, 4056), True, 'import matplotlib.pyplot as plt\n'), ((4794, 4926), 'sklearn.model_selection.cross_validate', 'cross_validate', (['model', 'X', 'y'], {'scoring': "['accuracy', use_metric]", 'cv': 'CV', 'return_train_score': '(False)', 'verbose': '(2 if verbose == 2 else 0)'}), "(model, X, y, scoring=['accuracy', use_metric], cv=CV,\n return_train_score=False, verbose=2 if verbose == 2 else 0)\n", (4808, 4926), False, 'from sklearn.model_selection import cross_validate\n'), ((5167, 5207), 'numpy.mean', 'np.mean', (["cv_result[f'test_{use_metric}']"], {}), "(cv_result[f'test_{use_metric}'])\n", (5174, 5207), True, 'import numpy as np\n'), ((5812, 5875), 'seaborn.FacetGrid', 'sns.FacetGrid', (['cv_df_melted'], {'col': '"""metric"""', 'size': '(6)', 'sharey': '(False)'}), "(cv_df_melted, col='metric', size=6, sharey=False)\n", (5825, 5875), True, 'import seaborn as sns\n'), ((6845, 6855), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6853, 6855), True, 'import matplotlib.pyplot as plt\n'), ((8535, 8600), 'seaborn.heatmap', 'sns.heatmap', (['confusion_matrix'], {'annot': '(True)', 'fmt': '""".1f"""', 'cmap': '"""PuBu"""'}), "(confusion_matrix, annot=True, fmt='.1f', cmap='PuBu')\n", (8546, 8600), True, 'import seaborn as sns\n'), ((8710, 8720), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8718, 8720), True, 'import matplotlib.pyplot as plt\n'), ((10813, 10857), 'numpy.abs', 'np.abs', (["df_coef_c['mean feature importance']"], {}), "(df_coef_c['mean feature importance'])\n", (10819, 10857), True, 'import numpy as np\n'), ((10872, 10899), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (10882, 10899), True, 'import matplotlib.pyplot as plt\n'), ((13054, 13098), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (13075, 13098), False, 'import warnings\n'), ((13117, 13161), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'UserWarning'], {}), "('always', UserWarning)\n", (13138, 13161), False, 'import warnings\n'), ((13499, 13611), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': 'random_state', 'class_weight': 'class_weight', 'solver': '"""lbfgs"""', 'multi_class': '"""auto"""'}), "(random_state=random_state, class_weight=class_weight,\n solver='lbfgs', multi_class='auto')\n", (13517, 13611), False, 'from sklearn.linear_model import LogisticRegression\n'), ((13617, 13715), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': 'random_state', 'class_weight': 'class_weight', 'n_estimators': '(100)'}), '(random_state=random_state, class_weight=class_weight,\n n_estimators=100)\n', (13639, 13715), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((13721, 13784), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': 'random_state', 'class_weight': 'class_weight'}), '(random_state=random_state, class_weight=class_weight)\n', (13730, 13784), False, 'from sklearn.svm import LinearSVC\n'), ((7757, 7829), 'pandas.crosstab', 'pd.crosstab', (['y_test', 'y_pred'], {'rownames': "['Actual']", 'colnames': "['Predicted']"}), "(y_test, y_pred, rownames=['Actual'], colnames=['Predicted'])\n", (7768, 7829), True, 'import pandas as pd\n'), ((7876, 7903), 'pandas.crosstab', 'pd.crosstab', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7887, 7903), True, 'import pandas as pd\n'), ((9492, 9537), 'sklearn.metrics.classification_report', 'classification_report', (['y_test_all', 'y_pred_all'], {}), '(y_test_all, y_pred_all)\n', (9513, 9537), False, 'from sklearn.metrics import classification_report\n'), ((9659, 9673), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9671, 9673), True, 'import pandas as pd\n'), ((1121, 1146), 'seaborn.color_palette', 'sns.color_palette', (['"""Set1"""'], {}), "('Set1')\n", (1138, 1146), True, 'import seaborn as sns\n'), ((3831, 3856), 'seaborn.color_palette', 'sns.color_palette', (['"""Set1"""'], {}), "('Set1')\n", (3848, 3856), True, 'import seaborn as sns\n'), ((5957, 5982), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""'], {}), "('Set2')\n", (5974, 5982), True, 'import seaborn as sns\n'), ((6067, 6092), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""'], {}), "('Set2')\n", (6084, 6092), True, 'import seaborn as sns\n'), ((11144, 11184), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""darkblue"""', 'lw': '(8)'}), "([0], [0], color='darkblue', lw=8)\n", (11150, 11184), False, 'from matplotlib.lines import Line2D\n'), ((11186, 11225), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""crimson"""', 'lw': '(8)'}), "([0], [0], color='crimson', lw=8)\n", (11192, 11225), False, 'from matplotlib.lines import Line2D\n'), ((8819, 8857), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_pred_all', 'y_test_all'], {}), '(y_pred_all, y_test_all)\n', (8833, 8857), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n'), ((9934, 9943), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (9940, 9943), True, 'import numpy as np\n'), ((10110, 10119), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (10116, 10119), True, 'import numpy as np\n'), ((9414, 9446), 'sklearn.metrics.f1_score', 'f1_score', (['y_pred_all', 'y_test_all'], {}), '(y_pred_all, y_test_all)\n', (9422, 9446), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n'), ((8937, 9015), 'numpy.logical_and', 'np.logical_and', (['[(x == 1) for x in y_pred_all]', '[(x != 1) for x in y_test_all]'], {}), '([(x == 1) for x in y_pred_all], [(x != 1) for x in y_test_all])\n', (8951, 9015), True, 'import numpy as np\n'), ((9071, 9149), 'numpy.logical_and', 'np.logical_and', (['[(x != 1) for x in y_pred_all]', '[(x == 1) for x in y_test_all]'], {}), '([(x != 1) for x in y_pred_all], [(x == 1) for x in y_test_all])\n', (9085, 9149), True, 'import numpy as np\n'), ((9229, 9268), 'sklearn.metrics.precision_score', 'precision_score', (['y_pred_all', 'y_test_all'], {}), '(y_pred_all, y_test_all)\n', (9244, 9268), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n'), ((9325, 9361), 'sklearn.metrics.recall_score', 'recall_score', (['y_pred_all', 'y_test_all'], {}), '(y_pred_all, y_test_all)\n', (9337, 9361), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n')] |
#!/usr/bin/env python
# encoding: utf-8
'''
@project : MSRGCN
@file : draw_pictures.py
@author : Droliven
@contact : <EMAIL>
@ide : PyCharm
@time : 2021-07-27 21:22
'''
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
def draw_pic_single(mydata, I, J, LR, full_path):
# 22, 3
# I
# J
# LR
# # ****************************
# # 调整坐标,规范数据格式,:这里由于转换过来后本身应满足需求,不需要专门 revert_coordinate 或者交换坐标轴
mydata = mydata[:, [0, 2, 1]]
# # ****************************
x = mydata[:, 0]
y = mydata[:, 1]
z = mydata[:, 2]
plt.figure()
ax = plt.subplot(111, projection='3d')
ax.grid(False)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d([-1000, 1000])
ax.set_ylim3d([-1000, 1000])
ax.set_zlim3d([-1000, 1000])
ax.scatter(x, y, z, c='b')
# (250, 40, 40) #FA2828 红
# (245, 125, 125) #F57D7D 粉
# (11, 11, 11) #0B0B0B 黑色
# (180, 180, 180) #B4B4B4 灰色
# Make connection matrix
for i in np.arange(len(I)):
x, y, z = [np.array([mydata[I[i], j], mydata[J[i], j]]) for j in range(3)]
ax.plot(x, y, z, lw=2, c='#B4B4B4' if LR[i] else '#B4B4B4')
plt.savefig(full_path)
plt.close()
def draw_pic_single_2d(mydata, I, J, LR, full_path):
x = mydata[:, 0]
y = mydata[:, 1]
plt.figure(figsize=(6, 6))
plt.scatter(x, y, c='r')
# (250, 40, 40) #FA2828 红
# (245, 125, 125) #F57D7D 粉
# (11, 11, 11) #0B0B0B 黑色
# (180, 180, 180) #B4B4B4 灰色
# Make connection matrix
for i in np.arange(len(I)):
x, y = [np.array([mydata[I[i], j], mydata[J[i], j]]) for j in range(2)]
# ax.plot(x, y, z, lw=2, color='#FA2828' if LR[i] else '#F57D7D')
# ax.plot(x, y, z, lw=2, color='#0B0B0B' if LR[i] else '#B4B4B4')
plt.plot(x, y, lw=2, color='g' if LR[i] else 'b')
plt.xlim((-800, 800))
plt.ylim((-1500, 800))
# 设置坐标轴名称
plt.xlabel('x')
plt.ylabel('y')
# 设置坐标轴刻度
my_x_ticks = np.arange(-1000, 1000, 200)
my_y_ticks = np.arange(-1000, 1000, 200)
plt.xticks(my_x_ticks)
plt.yticks(my_y_ticks)
plt.grid(False)
plt.savefig(full_path)
plt.close(1)
def draw_pic_gt_pred(gt, pred, I, J, LR, full_path):
# # ****************************
# # 调整坐标,规范数据格式,:这里由于转换过来后本身应满足需求,不需要专门 revert_coordinate 或者交换坐标轴
gt = gt[:, [0, 2, 1]]
pred = pred[:, [0, 2, 1]]
# # ****************************
plt.figure()
ax = plt.subplot(111, projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d([-1000, 1000])
ax.set_ylim3d([-1000, 1000])
ax.set_zlim3d([-1000, 1000])
ax.scatter(gt[:, 0], gt[:, 1], gt[:, 2], c='k', linewidths=1)
ax.scatter(pred[:, 0], pred[:, 1], pred[:, 2], c='r', linewidths=1)
# (250, 40, 40) #FA2828 红
# (245, 125, 125) #F57D7D 粉
# (11, 11, 11) #0B0B0B 黑色
# (180, 180, 180) #B4B4B4 灰色
# Make connection matrix
for i in np.arange(len(I)):
x, y, z = [np.array([gt[I[i], j], gt[J[i], j]]) for j in range(3)]
ax.plot(x, y, z, lw=1, color='#0B0B0B' if LR[i] else '#B4B4B4')
for i in np.arange(len(I)):
x, y, z = [np.array([pred[I[i], j], pred[J[i], j]]) for j in range(3)]
ax.plot(x, y, z, lw=2, color='#FA2828' if LR[i] else '#F57D7D')
plt.savefig(full_path)
plt.close()
def draw_pic_gt_pred_2d(gt, pred, I, J, LR, full_path):
plt.figure(figsize=(6, 6))
plt.scatter(gt[:, 0], gt[:, 1], c='k', linewidths=1)
plt.scatter(pred[:, 0], pred[:, 1], c='r', linewidths=1)
# (250, 40, 40) #FA2828 红
# (245, 125, 125) #F57D7D 粉
# (11, 11, 11) #0B0B0B 黑色
# (180, 180, 180) #B4B4B4 灰色
# Make connection matrix
for i in np.arange(len(I)):
x, y = [np.array([gt[I[i], j], gt[J[i], j]]) for j in range(2)]
plt.plot(x, y, lw=1, color='#0B0B0B' if LR[i] else '#B4B4B4')
for i in np.arange(len(I)):
x, y = [np.array([pred[I[i], j], pred[J[i], j]]) for j in range(2)]
plt.plot(x, y, lw=2, color='#FA2828' if LR[i] else '#F57D7D')
plt.xlim((-800, 800))
plt.ylim((-1500, 800))
# 设置坐标轴名称
plt.xlabel('x')
plt.ylabel('y')
# 设置坐标轴刻度
my_x_ticks = np.arange(-1000, 1000, 200)
my_y_ticks = np.arange(-1000, 1000, 200)
plt.xticks(my_x_ticks)
plt.yticks(my_y_ticks)
plt.grid(False)
plt.savefig(full_path)
plt.close(1)
if __name__ == "__main__":
import numpy as np
data = np.random.randn(220, 220)
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.random.randn",
"matplotlib.pyplot.... | [((217, 238), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (231, 238), False, 'import matplotlib\n'), ((670, 682), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (680, 682), True, 'import matplotlib.pyplot as plt\n'), ((692, 725), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'projection': '"""3d"""'}), "(111, projection='3d')\n", (703, 725), True, 'import matplotlib.pyplot as plt\n'), ((1289, 1311), 'matplotlib.pyplot.savefig', 'plt.savefig', (['full_path'], {}), '(full_path)\n', (1300, 1311), True, 'import matplotlib.pyplot as plt\n'), ((1316, 1327), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1325, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1429, 1455), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (1439, 1455), True, 'import matplotlib.pyplot as plt\n'), ((1461, 1485), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': '"""r"""'}), "(x, y, c='r')\n", (1472, 1485), True, 'import matplotlib.pyplot as plt\n'), ((1965, 1986), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-800, 800)'], {}), '((-800, 800))\n', (1973, 1986), True, 'import matplotlib.pyplot as plt\n'), ((1991, 2013), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1500, 800)'], {}), '((-1500, 800))\n', (1999, 2013), True, 'import matplotlib.pyplot as plt\n'), ((2032, 2047), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2042, 2047), True, 'import matplotlib.pyplot as plt\n'), ((2052, 2067), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2062, 2067), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2126), 'numpy.arange', 'np.arange', (['(-1000)', '(1000)', '(200)'], {}), '(-1000, 1000, 200)\n', (2108, 2126), True, 'import numpy as np\n'), ((2144, 2171), 'numpy.arange', 'np.arange', (['(-1000)', '(1000)', '(200)'], {}), '(-1000, 1000, 200)\n', (2153, 2171), True, 'import numpy as np\n'), ((2176, 2198), 'matplotlib.pyplot.xticks', 'plt.xticks', (['my_x_ticks'], {}), '(my_x_ticks)\n', (2186, 2198), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2225), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_y_ticks'], {}), '(my_y_ticks)\n', (2213, 2225), True, 'import matplotlib.pyplot as plt\n'), ((2230, 2245), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (2238, 2245), True, 'import matplotlib.pyplot as plt\n'), ((2251, 2273), 'matplotlib.pyplot.savefig', 'plt.savefig', (['full_path'], {}), '(full_path)\n', (2262, 2273), True, 'import matplotlib.pyplot as plt\n'), ((2278, 2290), 'matplotlib.pyplot.close', 'plt.close', (['(1)'], {}), '(1)\n', (2287, 2290), True, 'import matplotlib.pyplot as plt\n'), ((2551, 2563), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2561, 2563), True, 'import matplotlib.pyplot as plt\n'), ((2573, 2606), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'projection': '"""3d"""'}), "(111, projection='3d')\n", (2584, 2606), True, 'import matplotlib.pyplot as plt\n'), ((3437, 3459), 'matplotlib.pyplot.savefig', 'plt.savefig', (['full_path'], {}), '(full_path)\n', (3448, 3459), True, 'import matplotlib.pyplot as plt\n'), ((3464, 3475), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3473, 3475), True, 'import matplotlib.pyplot as plt\n'), ((3538, 3564), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (3548, 3564), True, 'import matplotlib.pyplot as plt\n'), ((3570, 3622), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gt[:, 0]', 'gt[:, 1]'], {'c': '"""k"""', 'linewidths': '(1)'}), "(gt[:, 0], gt[:, 1], c='k', linewidths=1)\n", (3581, 3622), True, 'import matplotlib.pyplot as plt\n'), ((3627, 3683), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pred[:, 0]', 'pred[:, 1]'], {'c': '"""r"""', 'linewidths': '(1)'}), "(pred[:, 0], pred[:, 1], c='r', linewidths=1)\n", (3638, 3683), True, 'import matplotlib.pyplot as plt\n'), ((4197, 4218), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-800, 800)'], {}), '((-800, 800))\n', (4205, 4218), True, 'import matplotlib.pyplot as plt\n'), ((4223, 4245), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1500, 800)'], {}), '((-1500, 800))\n', (4231, 4245), True, 'import matplotlib.pyplot as plt\n'), ((4264, 4279), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (4274, 4279), True, 'import matplotlib.pyplot as plt\n'), ((4284, 4299), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (4294, 4299), True, 'import matplotlib.pyplot as plt\n'), ((4331, 4358), 'numpy.arange', 'np.arange', (['(-1000)', '(1000)', '(200)'], {}), '(-1000, 1000, 200)\n', (4340, 4358), True, 'import numpy as np\n'), ((4376, 4403), 'numpy.arange', 'np.arange', (['(-1000)', '(1000)', '(200)'], {}), '(-1000, 1000, 200)\n', (4385, 4403), True, 'import numpy as np\n'), ((4408, 4430), 'matplotlib.pyplot.xticks', 'plt.xticks', (['my_x_ticks'], {}), '(my_x_ticks)\n', (4418, 4430), True, 'import matplotlib.pyplot as plt\n'), ((4435, 4457), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_y_ticks'], {}), '(my_y_ticks)\n', (4445, 4457), True, 'import matplotlib.pyplot as plt\n'), ((4462, 4477), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (4470, 4477), True, 'import matplotlib.pyplot as plt\n'), ((4483, 4505), 'matplotlib.pyplot.savefig', 'plt.savefig', (['full_path'], {}), '(full_path)\n', (4494, 4505), True, 'import matplotlib.pyplot as plt\n'), ((4510, 4522), 'matplotlib.pyplot.close', 'plt.close', (['(1)'], {}), '(1)\n', (4519, 4522), True, 'import matplotlib.pyplot as plt\n'), ((4587, 4612), 'numpy.random.randn', 'np.random.randn', (['(220)', '(220)'], {}), '(220, 220)\n', (4602, 4612), True, 'import numpy as np\n'), ((1910, 1959), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'lw': '(2)', 'color': "('g' if LR[i] else 'b')"}), "(x, y, lw=2, color='g' if LR[i] else 'b')\n", (1918, 1959), True, 'import matplotlib.pyplot as plt\n'), ((3952, 4013), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'lw': '(1)', 'color': "('#0B0B0B' if LR[i] else '#B4B4B4')"}), "(x, y, lw=1, color='#0B0B0B' if LR[i] else '#B4B4B4')\n", (3960, 4013), True, 'import matplotlib.pyplot as plt\n'), ((4130, 4191), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'lw': '(2)', 'color': "('#FA2828' if LR[i] else '#F57D7D')"}), "(x, y, lw=2, color='#FA2828' if LR[i] else '#F57D7D')\n", (4138, 4191), True, 'import matplotlib.pyplot as plt\n'), ((1152, 1196), 'numpy.array', 'np.array', (['[mydata[I[i], j], mydata[J[i], j]]'], {}), '([mydata[I[i], j], mydata[J[i], j]])\n', (1160, 1196), True, 'import numpy as np\n'), ((1690, 1734), 'numpy.array', 'np.array', (['[mydata[I[i], j], mydata[J[i], j]]'], {}), '([mydata[I[i], j], mydata[J[i], j]])\n', (1698, 1734), True, 'import numpy as np\n'), ((3121, 3157), 'numpy.array', 'np.array', (['[gt[I[i], j], gt[J[i], j]]'], {}), '([gt[I[i], j], gt[J[i], j]])\n', (3129, 3157), True, 'import numpy as np\n'), ((3300, 3340), 'numpy.array', 'np.array', (['[pred[I[i], j], pred[J[i], j]]'], {}), '([pred[I[i], j], pred[J[i], j]])\n', (3308, 3340), True, 'import numpy as np\n'), ((3888, 3924), 'numpy.array', 'np.array', (['[gt[I[i], j], gt[J[i], j]]'], {}), '([gt[I[i], j], gt[J[i], j]])\n', (3896, 3924), True, 'import numpy as np\n'), ((4062, 4102), 'numpy.array', 'np.array', (['[pred[I[i], j], pred[J[i], j]]'], {}), '([pred[I[i], j], pred[J[i], j]])\n', (4070, 4102), True, 'import numpy as np\n')] |
import math
import numpy as np
import torch
from torch import nn
import copy
import random
import concurrent.futures
## Distributions
def generate_gaussian_parity(n, cov_scale=1, angle_params=None, k=1, acorn=None):
""" Generate Gaussian XOR, a mixture of four Gaussians elonging to two classes.
Class 0 consists of negative samples drawn from two Gaussians with means (−1,−1) and (1,1)
Class 1 comprises positive samples drawn from the other Gaussians with means (1,−1) and (−1,1)
"""
# means = [[-1.5, -1.5], [1.5, 1.5], [1.5, -1.5], [-1.5, 1.5]]
blob_num = 4
# get the number of samples in each blob with equal probability
samples_per_blob = np.random.multinomial(
n, 1 / blob_num * np.ones(blob_num)
)
means = [[-1, -1], [1, 1], [1, -1], [-1, 1]]
blob = np.concatenate(
[
np.random.multivariate_normal(
mean, cov_scale * np.eye(len(mean)), size=samples_per_blob[i]
)
for i,mean in enumerate(means)
]
)
X = np.zeros_like(blob)
Y = np.concatenate([np.ones((samples_per_blob[i])) * int(i < 2) for i in range(len(means))])
X[:, 0] = blob[:, 0] * np.cos(angle_params * np.pi / 180) + blob[:, 1] * np.sin(
angle_params * np.pi / 180
)
X[:, 1] = -blob[:, 0] * np.sin(angle_params * np.pi / 180) + blob[:, 1] * np.cos(
angle_params * np.pi / 180
)
return X, Y.astype(int)
## Network functions
# Polytope functions
def get_polytopes(model, train_x, penultimate=False):
"""
Returns the polytopes.
Points that has same activations values after fed to the model
belong to the same polytope.
"""
polytope_memberships = []
last_activations = train_x.cpu().numpy()
penultimate_act = None
layers = [module for module in model.modules() if type(module) == torch.nn.Linear]
for layer_id, layer in enumerate(layers):
weights, bias = layer.weight.data.detach().cpu().numpy(), layer.bias.data.detach().cpu().numpy()
preactivation = np.matmul(last_activations, weights.T) + bias
if layer_id == len(layers) - 1:
binary_preactivation = (preactivation > 0.5).astype('int')
else:
binary_preactivation = (preactivation > 0).astype('int')
polytope_memberships.append(binary_preactivation)
last_activations = preactivation * binary_preactivation
if penultimate and layer_id == len(layers) - 1:
penultimate_act = last_activations
polytope_memberships = [np.tensordot(np.concatenate(polytope_memberships, axis = 1), 2 ** np.arange(0, np.shape(np.concatenate(polytope_memberships, axis = 1))[1]), axes = 1)]
if penultimate:
return polytope_memberships, penultimate_act
return polytope_memberships, last_activations
# Model
class Net(nn.Module):
""" DeepNet class
A deep net architecture with `n_hidden` layers,
each having `hidden_size` nodes.
"""
def __init__(self, in_dim, out_dim, hidden_size=10, n_hidden=2,
activation=torch.nn.ReLU(), bias=False, penultimate=False, bn=False):
super(Net, self).__init__()
module = nn.ModuleList()
module.append(nn.Linear(in_dim, hidden_size, bias=bias))
self.layer = 1
self.bias = bias
for ll in range(n_hidden):
module.append( activation )
self.layer += 1
if bn:
module.append( nn.BatchNorm1d( hidden_size ) )
module.append( nn.Linear(hidden_size, hidden_size, bias=bias) )
self.layer += 1
if penultimate:
module.append( activation )
self.layer += 1
if bn:
module.append( nn.BatchNorm1d( hidden_size ) )
module.append( nn.Linear(hidden_size, 2, bias=bias) )
self.layer += 1
hidden_size = 2
module.append( activation )
self.layer += 1
if bn:
module.append( nn.BatchNorm1d( hidden_size ) )
module.append( nn.Linear(hidden_size, out_dim, bias=bias) )
self.layer += 1
self.sequential = nn.Sequential(*module)
def freeze_net(self):
for layer in range(0,self.layer-1,2):
self.sequential[layer].weight.requires_grad = False
if self.bias:
self.sequential[layer].bias.requires_grad = False
def forward(self, x):
return self.sequential(x)
def train_model(model, train_x, train_y, lr=0.01, iteration=100, multi_label=False, verbose=False):
"""
Train the model given the training data
"""
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
loss_func = torch.nn.BCEWithLogitsLoss()
losses = []
for step in range(iteration):
optimizer.zero_grad()
outputs = model(train_x)
if multi_label:
train_y = train_y.type_as(outputs)
loss=loss_func(outputs, train_y)
trainL = loss.detach().item()
if verbose and (step % 500 == 0):
print("train loss = ", trainL)
losses.append(trainL)
loss.backward()
optimizer.step()
return losses
def get_model(hidden_size=20, n_hidden=5, in_dim=2, out_dim=1, penultimate=False, use_cuda=False, bn=False):
"""
Initialize the model and send to gpu
"""
in_dim = in_dim
out_dim = out_dim #1
model = Net(in_dim, out_dim, n_hidden=n_hidden, hidden_size=hidden_size,
activation=torch.nn.ReLU(), bias=True, penultimate=penultimate, bn=bn)
if use_cuda:
model=model.cuda()
return model
def get_dataset(N=1000, angle_param=0, one_hot=False, cov_scale=1, include_hybrid=False):
"""
Generate the Gaussian XOR dataset and move to gpu
"""
'''use_cuda = torch.cuda.is_available()
if use_cuda:
torch.cuda.set_device(0)
torch.set_default_tensor_type(torch.cuda.FloatTensor)'''
if include_hybrid:
D_x, D_y = generate_gaussian_parity(cov_scale=cov_scale, n=2*N, angle_params=angle_param)
D_perm = np.random.permutation(2*N)
D_x, D_y = D_x[D_perm,:], D_y[D_perm]
train_x, train_y = D_x[:N], D_y[:N]
ghost_x, ghost_y = D_x[N:], D_y[N:]
hybrid_sets = []
rand_idx = random.sample(range(0,N-1), N//10)
for rand_i in rand_idx:
hybrid_x, hybrid_y = np.copy(train_x), np.copy(train_y)
hybrid_x[rand_i], hybrid_y[rand_i] = ghost_x[rand_i], ghost_y[rand_i]
hybrid_x = torch.FloatTensor(hybrid_x)
hybrid_y = (torch.FloatTensor(hybrid_y).unsqueeze(-1))
#hybrid_x, hybrid_y = hybrid_x.cuda(), hybrid_y.cuda()
hybrid_sets.append((hybrid_x, hybrid_y))
else:
train_x, train_y = generate_gaussian_parity(cov_scale=cov_scale, n=N, angle_params=angle_param)
train_perm = np.random.permutation(N)
train_x, train_y = train_x[train_perm,:], train_y[train_perm]
test_x, test_y = generate_gaussian_parity(cov_scale=cov_scale, n=1000, angle_params=angle_param)
test_perm = np.random.permutation(1000)
test_x, test_y = test_x[test_perm,:], test_y[test_perm]
train_x = torch.FloatTensor(train_x)
test_x = torch.FloatTensor(test_x)
train_y = (torch.FloatTensor(train_y).unsqueeze(-1))#[:,0]
test_y = (torch.FloatTensor(test_y).unsqueeze(-1))#[:,0]
if one_hot:
train_y = torch.nn.functional.one_hot(train_y[:,0].to(torch.long))
test_y = torch.nn.functional.one_hot(test_y[:,0].to(torch.long))
# move to gpu
'''if use_cuda:
train_x, train_y = train_x.cuda(), train_y.cuda()
test_x, test_y = test_x.cuda(), test_y.cuda()'''
if include_hybrid:
return train_x, train_y, test_x, test_y, hybrid_sets
return train_x, train_y, test_x, test_y
| [
"numpy.copy",
"torch.nn.ReLU",
"numpy.ones",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"numpy.sin",
"torch.nn.BatchNorm1d",
"numpy.matmul",
"numpy.cos",
"numpy.concatenate",
"torch.nn.Linear",
"torch.nn.BCEWithLogitsLoss",
"numpy.zeros_like",
"torch.FloatTensor",
"numpy.random.permut... | [((1050, 1069), 'numpy.zeros_like', 'np.zeros_like', (['blob'], {}), '(blob)\n', (1063, 1069), True, 'import numpy as np\n'), ((4746, 4774), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {}), '()\n', (4772, 4774), False, 'import torch\n'), ((7182, 7209), 'numpy.random.permutation', 'np.random.permutation', (['(1000)'], {}), '(1000)\n', (7203, 7209), True, 'import numpy as np\n'), ((7290, 7316), 'torch.FloatTensor', 'torch.FloatTensor', (['train_x'], {}), '(train_x)\n', (7307, 7316), False, 'import torch\n'), ((7330, 7355), 'torch.FloatTensor', 'torch.FloatTensor', (['test_x'], {}), '(test_x)\n', (7347, 7355), False, 'import torch\n'), ((3083, 3098), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (3096, 3098), False, 'import torch\n'), ((3196, 3211), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3209, 3211), False, 'from torch import nn\n'), ((4192, 4214), 'torch.nn.Sequential', 'nn.Sequential', (['*module'], {}), '(*module)\n', (4205, 4214), False, 'from torch import nn\n'), ((6168, 6196), 'numpy.random.permutation', 'np.random.permutation', (['(2 * N)'], {}), '(2 * N)\n', (6189, 6196), True, 'import numpy as np\n'), ((6964, 6988), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (6985, 6988), True, 'import numpy as np\n'), ((732, 749), 'numpy.ones', 'np.ones', (['blob_num'], {}), '(blob_num)\n', (739, 749), True, 'import numpy as np\n'), ((1194, 1228), 'numpy.cos', 'np.cos', (['(angle_params * np.pi / 180)'], {}), '(angle_params * np.pi / 180)\n', (1200, 1228), True, 'import numpy as np\n'), ((1244, 1278), 'numpy.sin', 'np.sin', (['(angle_params * np.pi / 180)'], {}), '(angle_params * np.pi / 180)\n', (1250, 1278), True, 'import numpy as np\n'), ((1321, 1355), 'numpy.sin', 'np.sin', (['(angle_params * np.pi / 180)'], {}), '(angle_params * np.pi / 180)\n', (1327, 1355), True, 'import numpy as np\n'), ((1371, 1405), 'numpy.cos', 'np.cos', (['(angle_params * np.pi / 180)'], {}), '(angle_params * np.pi / 180)\n', (1377, 1405), True, 'import numpy as np\n'), ((2062, 2100), 'numpy.matmul', 'np.matmul', (['last_activations', 'weights.T'], {}), '(last_activations, weights.T)\n', (2071, 2100), True, 'import numpy as np\n'), ((2569, 2613), 'numpy.concatenate', 'np.concatenate', (['polytope_memberships'], {'axis': '(1)'}), '(polytope_memberships, axis=1)\n', (2583, 2613), True, 'import numpy as np\n'), ((3234, 3275), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'hidden_size'], {'bias': 'bias'}), '(in_dim, hidden_size, bias=bias)\n', (3243, 3275), False, 'from torch import nn\n'), ((4096, 4138), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'out_dim'], {'bias': 'bias'}), '(hidden_size, out_dim, bias=bias)\n', (4105, 4138), False, 'from torch import nn\n'), ((5565, 5580), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (5578, 5580), False, 'import torch\n'), ((6614, 6641), 'torch.FloatTensor', 'torch.FloatTensor', (['hybrid_x'], {}), '(hybrid_x)\n', (6631, 6641), False, 'import torch\n'), ((7372, 7398), 'torch.FloatTensor', 'torch.FloatTensor', (['train_y'], {}), '(train_y)\n', (7389, 7398), False, 'import torch\n'), ((7434, 7459), 'torch.FloatTensor', 'torch.FloatTensor', (['test_y'], {}), '(test_y)\n', (7451, 7459), False, 'import torch\n'), ((1094, 1122), 'numpy.ones', 'np.ones', (['samples_per_blob[i]'], {}), '(samples_per_blob[i])\n', (1101, 1122), True, 'import numpy as np\n'), ((3538, 3584), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {'bias': 'bias'}), '(hidden_size, hidden_size, bias=bias)\n', (3547, 3584), False, 'from torch import nn\n'), ((3831, 3867), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(2)'], {'bias': 'bias'}), '(hidden_size, 2, bias=bias)\n', (3840, 3867), False, 'from torch import nn\n'), ((4041, 4068), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_size'], {}), '(hidden_size)\n', (4055, 4068), False, 'from torch import nn\n'), ((6474, 6490), 'numpy.copy', 'np.copy', (['train_x'], {}), '(train_x)\n', (6481, 6490), True, 'import numpy as np\n'), ((6492, 6508), 'numpy.copy', 'np.copy', (['train_y'], {}), '(train_y)\n', (6499, 6508), True, 'import numpy as np\n'), ((3479, 3506), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_size'], {}), '(hidden_size)\n', (3493, 3506), False, 'from torch import nn\n'), ((3772, 3799), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_size'], {}), '(hidden_size)\n', (3786, 3799), False, 'from torch import nn\n'), ((6666, 6693), 'torch.FloatTensor', 'torch.FloatTensor', (['hybrid_y'], {}), '(hybrid_y)\n', (6683, 6693), False, 'import torch\n'), ((2644, 2688), 'numpy.concatenate', 'np.concatenate', (['polytope_memberships'], {'axis': '(1)'}), '(polytope_memberships, axis=1)\n', (2658, 2688), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import sys
import csv
def check_overlap(interval, array):
height = array.shape[0]
intervals = np.stack([np.tile(interval,(height,1)), array],axis=0)
swaghook = (intervals[0,:,0] < intervals[1,:,0]).astype(int)
return intervals[1-swaghook,np.arange(height),1] > intervals[swaghook,np.arange(height),0]
data = pd.read_table(sys.argv[1], header=None)
post = np.array([[float(k) for k in j.split(',')] for i,j in data[3].items()])
filt = check_overlap(np.array([-np.log(float(sys.argv[2])),np.log(float(sys.argv[2]))]),post) == False
data[4] = filt
data.to_csv(sys.argv[3],sep='\t', index=False, header=False)
| [
"numpy.tile",
"pandas.read_table",
"numpy.arange"
] | [((367, 406), 'pandas.read_table', 'pd.read_table', (['sys.argv[1]'], {'header': 'None'}), '(sys.argv[1], header=None)\n', (380, 406), True, 'import pandas as pd\n'), ((154, 184), 'numpy.tile', 'np.tile', (['interval', '(height, 1)'], {}), '(interval, (height, 1))\n', (161, 184), True, 'import numpy as np\n'), ((296, 313), 'numpy.arange', 'np.arange', (['height'], {}), '(height)\n', (305, 313), True, 'import numpy as np\n'), ((338, 355), 'numpy.arange', 'np.arange', (['height'], {}), '(height)\n', (347, 355), True, 'import numpy as np\n')] |
from graph import get_goodreads_graph, get_sc_graph
import json
import numpy as np
import math
import os
# don't let matplotlib use xwindows
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.pylab import savefig
import seaborn as sns
sns.set_style("ticks")
import pandas as pd
output_directory_path = './figures'
if not os.path.exists(output_directory_path):
os.makedirs(output_directory_path)
def plot_relative_popularity_by_year():
# get the shakespeare and company graph
sc_books_in_vertex_order, sc_book_to_vertex_index, sc_edge_to_weight, sc_vertex_to_neighbors, sc_n, sc_book_uri_to_num_events, sc_book_uri_to_text, sc_book_uri_to_year, sc_book_uri_to_title, sc_book_uri_to_author = get_sc_graph()
# and now get the goodreads graph
gr_books_in_vertex_order, gr_book_to_vertex_index, gr_edge_to_weight, gr_vertex_to_neighbors, gr_n, gr_book_id_to_num_ratings, gr_book_id_to_text = get_goodreads_graph()
with open('data/goodreads-book-id-to-sc-uri_full-matching.json', 'r') as f:
goodreads_book_id_to_sc_uri = json.load(f)
# load newly scraped data
df = pd.read_json('data/matched-goodreads-metadata.json')
gr_book_id_to_scraped_num_reviews = {str(gr_id): num_reviews for gr_id, num_reviews in zip(df['bookID'], df['numReviews'])}
gr_book_id_to_scraped_year = {str(gr_id): year for gr_id, year in zip(df['bookID'], df['yearFirstPublished'])}
gr_book_id_to_scraped_title = {str(gr_id): title for gr_id, title in zip(df['bookID'], df['title'])}
gr_book_id_to_scraped_author = {str(gr_id): author for gr_id, author in zip(df['bookID'], df['author'])}
years = []
titles = []
authors = []
gr_popularity_ratios = []
sc_popularity_ratios = []
gr_total_reviews = sum(gr_book_id_to_scraped_num_reviews.values())
sc_total_borrows = sum(sc_book_uri_to_num_events.values())
sc_texts = []
for gr_book_id, sc_uri in goodreads_book_id_to_sc_uri.items():
# some matched books don't have years in the dataset!!
if gr_book_id not in gr_book_id_to_scraped_year:
continue
if math.isnan(gr_book_id_to_scraped_year[gr_book_id]):
continue
year = int(gr_book_id_to_scraped_year[gr_book_id])
title = sc_book_uri_to_title[sc_uri]
author = sc_book_uri_to_author[sc_uri]
# skip super old books
if year < 1800 or year > 1940:
continue
# sometimes popularity is zero--skip!!!
if gr_book_id_to_scraped_num_reviews[gr_book_id] == 0:
continue
if sc_book_uri_to_num_events[sc_uri] == 0:
continue
#gr_text = gr_book_id_to_text[gr_book_id]
#sc_text = sc_book_uri_to_text[sc_uri]
sc_text = '{}\t{}'.format(gr_book_id_to_scraped_title[gr_book_id], gr_book_id_to_scraped_author[gr_book_id])
# get relative popularity ratios
gr_popularity_ratios.append(gr_book_id_to_scraped_num_reviews[gr_book_id] / gr_total_reviews)
sc_popularity_ratios.append(sc_book_uri_to_num_events[sc_uri] / sc_total_borrows)
years.append(year)
titles.append(title)
authors.append(author)
sc_texts.append(sc_text)
# now plot!
log_ratios = [np.log(s / g) for s, g in zip(sc_popularity_ratios, gr_popularity_ratios)]
point_types = []
most_gr_examples = sorted(zip(log_ratios, years, sc_texts, [i for i in range(len(years))]), reverse=False)[:30]
most_gr_idxs = [i for _, _, _, i in most_gr_examples]
most_sc_examples = sorted(zip(log_ratios, years, sc_texts, [i for i in range(len(years))]), reverse=True)[:30]
most_sc_idxs = [i for _, _, _, i in most_sc_examples]
for i in range(len(log_ratios)):
if i in most_gr_idxs:
point_types.append('gr')
elif i in most_sc_idxs:
point_types.append('sc')
else:
point_types.append('normal')
# '#86ceeb'
color_dict = {'normal': '#b3cde3', 'sc': '#fc6b32', 'gr': '#13c28d'}
marker_dict = {'normal': 'o', 'sc': 's', 'gr': 'D'}
results = pd.DataFrame({'Year': years,
'log(SC/GR)': log_ratios,
'point_types': point_types
})
plt.figure(figsize=(12.8, 4.8))
ax = sns.scatterplot(data=results, x='Year', y='log(SC/GR)',
hue='point_types', palette=color_dict,
style='point_types', markers=marker_dict, legend=False)
sns.despine()
ax.set_xlim([1800,1941])
ax.set_xlabel('Publication Year', fontsize=14)
ax.set_ylabel('log(SC/GR)', fontsize=14)
gr_legend = matplotlib.lines.Line2D([], [], color=color_dict['gr'], marker=marker_dict['gr'], linestyle='None',
markersize=10, label='Much more popular in Goodreads')
sc_legend = matplotlib.lines.Line2D([], [], color=color_dict['sc'], marker=marker_dict['sc'], linestyle='None',
markersize=10, label='Much more popular in Shakespeare and Company')
plt.legend(handles=[sc_legend, gr_legend])
savefig('{}/relative-popularity-by-year.png'.format(output_directory_path), bbox_inches='tight', dpi=300)
plt.close()
# print extreme books
print('Most relatively popular in Goodreads:')
for i, (ratio, year, title, author, text) in enumerate(sorted(zip(log_ratios, years, titles, authors, sc_texts), reverse=False)[:20]):
print('\t{}\t{}\t{}\t{}'.format(i+1, year, title, author))
print('Most relatively popular in Shakespeare and Company:')
for i, (ratio, year, title, author, text) in enumerate(sorted(zip(log_ratios, years, titles, authors, sc_texts), reverse=True)[:20]):
print('\t{}\t{}\t{}\t{}'.format(i+1, year, title, author))
if __name__ == '__main__':
plot_relative_popularity_by_year()
| [
"os.path.exists",
"os.makedirs",
"seaborn.despine",
"matplotlib.use",
"matplotlib.pyplot.legend",
"graph.get_goodreads_graph",
"numpy.log",
"seaborn.set_style",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"json.load",
"seaborn.scatterplot",
"pandas.DataFrame",
"matplotlib.lines.... | [((160, 181), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (174, 181), False, 'import matplotlib\n'), ((273, 295), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (286, 295), True, 'import seaborn as sns\n'), ((360, 397), 'os.path.exists', 'os.path.exists', (['output_directory_path'], {}), '(output_directory_path)\n', (374, 397), False, 'import os\n'), ((403, 437), 'os.makedirs', 'os.makedirs', (['output_directory_path'], {}), '(output_directory_path)\n', (414, 437), False, 'import os\n'), ((743, 757), 'graph.get_sc_graph', 'get_sc_graph', ([], {}), '()\n', (755, 757), False, 'from graph import get_goodreads_graph, get_sc_graph\n'), ((949, 970), 'graph.get_goodreads_graph', 'get_goodreads_graph', ([], {}), '()\n', (968, 970), False, 'from graph import get_goodreads_graph, get_sc_graph\n'), ((1143, 1195), 'pandas.read_json', 'pd.read_json', (['"""data/matched-goodreads-metadata.json"""'], {}), "('data/matched-goodreads-metadata.json')\n", (1155, 1195), True, 'import pandas as pd\n'), ((4100, 4187), 'pandas.DataFrame', 'pd.DataFrame', (["{'Year': years, 'log(SC/GR)': log_ratios, 'point_types': point_types}"], {}), "({'Year': years, 'log(SC/GR)': log_ratios, 'point_types':\n point_types})\n", (4112, 4187), True, 'import pandas as pd\n'), ((4260, 4291), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12.8, 4.8)'}), '(figsize=(12.8, 4.8))\n', (4270, 4291), True, 'import matplotlib.pyplot as plt\n'), ((4301, 4455), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'results', 'x': '"""Year"""', 'y': '"""log(SC/GR)"""', 'hue': '"""point_types"""', 'palette': 'color_dict', 'style': '"""point_types"""', 'markers': 'marker_dict', 'legend': '(False)'}), "(data=results, x='Year', y='log(SC/GR)', hue='point_types',\n palette=color_dict, style='point_types', markers=marker_dict, legend=False)\n", (4316, 4455), True, 'import seaborn as sns\n'), ((4506, 4519), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (4517, 4519), True, 'import seaborn as sns\n'), ((4666, 4830), 'matplotlib.lines.Line2D', 'matplotlib.lines.Line2D', (['[]', '[]'], {'color': "color_dict['gr']", 'marker': "marker_dict['gr']", 'linestyle': '"""None"""', 'markersize': '(10)', 'label': '"""Much more popular in Goodreads"""'}), "([], [], color=color_dict['gr'], marker=marker_dict[\n 'gr'], linestyle='None', markersize=10, label=\n 'Much more popular in Goodreads')\n", (4689, 4830), False, 'import matplotlib\n'), ((4863, 5041), 'matplotlib.lines.Line2D', 'matplotlib.lines.Line2D', (['[]', '[]'], {'color': "color_dict['sc']", 'marker': "marker_dict['sc']", 'linestyle': '"""None"""', 'markersize': '(10)', 'label': '"""Much more popular in Shakespeare and Company"""'}), "([], [], color=color_dict['sc'], marker=marker_dict[\n 'sc'], linestyle='None', markersize=10, label=\n 'Much more popular in Shakespeare and Company')\n", (4886, 5041), False, 'import matplotlib\n'), ((5066, 5108), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[sc_legend, gr_legend]'}), '(handles=[sc_legend, gr_legend])\n', (5076, 5108), True, 'import matplotlib.pyplot as plt\n'), ((5224, 5235), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5233, 5235), True, 'import matplotlib.pyplot as plt\n'), ((1090, 1102), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1099, 1102), False, 'import json\n'), ((2138, 2188), 'math.isnan', 'math.isnan', (['gr_book_id_to_scraped_year[gr_book_id]'], {}), '(gr_book_id_to_scraped_year[gr_book_id])\n', (2148, 2188), False, 'import math\n'), ((3267, 3280), 'numpy.log', 'np.log', (['(s / g)'], {}), '(s / g)\n', (3273, 3280), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
This module contains a method for flagging consecutive data values where the
recorded value repeats multiple times.
================================================================================
@Author:
| <NAME>, NSSC Contractor (ORAU)
| U.S. EPA / ORD / CEMM / AMCD / SFSB
Created:
Tue Aug 17 15:24:31 2021
Last Updated:
Tue Aug 17 15:24:31 2021
"""
import pandas as pd
import numpy as np
def persistent_values(df, param, tolerance=3, freq='H', invalidate=False):
"""Flag data points where consecutive timestamp parameter values repeat.
Values persisting for N or greater consecutive timestamps will be flagged
(N is the integer value set for the tolerance).
If invalidate is true, corresponding values will be set null (np.nan).
Args:
df (pandas DataFrame):
Dataset containing parameter data to check for repeating values.
param (str):
The name of the parameter to check for repeating values.
tolerance (int, optional):
The number of consecutive entries for repeated/persistent values
required to flag a data point. Defaults to 3.
freq (TYPE, optional):
The sampling frequency or averaging interval of the passed dataset,
expressed as a pandas offset alias (see a list here
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases).
Defaults to 'H' for 1-hour averaged datasets.
invalidate (bool, optional):
If True, repeated entries will be set null (np.nan). Defaults to
False.
Returns:
df (pandas DataFrame):
Modified dataset with flagged entries for repeated data entries.
"""
if param + '_QAQC_Code' not in df:
df.loc[:, param + '_QAQC_Code'] = np.nan
if df[df[param + '_Value'].diff() == 0].empty:
print('..no persistant values found for ' + param)
return df
print('..flagging persistant values for ' + param)
data = df[param + '_Value'].copy().to_frame()
# take the difference between consequtive data points and shift n times
# where n is the tolerance
window_df = pd.DataFrame()
for i in np.arange(1, tolerance + 1, 1, dtype=int):
window_df[param + '_diff_' + str(i)] = data[param + '_Value'].diff()
window_df['z_count'] = (window_df == 0).astype(int).sum(axis=1)
flag_idx = window_df[window_df.z_count == tolerance].index
flag_idx = df.index.intersection(flag_idx)
df.loc[flag_idx, param + '_QAQC_Code'] = 'persist' # temporary flag
if invalidate is True:
df.loc[flag_idx, param + '_Value'] = np.nan
return df
| [
"pandas.DataFrame",
"numpy.arange"
] | [((2216, 2230), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2228, 2230), True, 'import pandas as pd\n'), ((2244, 2285), 'numpy.arange', 'np.arange', (['(1)', '(tolerance + 1)', '(1)'], {'dtype': 'int'}), '(1, tolerance + 1, 1, dtype=int)\n', (2253, 2285), True, 'import numpy as np\n')] |
import math
from os import path
import logging
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.functional import F
from transformers import BertTokenizer
from h02_bert_embeddings.bert import BertProcessor
from utils import constants
from utils import utils
class BertEmbeddingsGetter(object):
def __init__(self, bert_option, batch_size, dump_size, tgt_words):
self.bert_option = bert_option
self.batch_size = batch_size
self.dump_size = dump_size
self.tgt_words = tgt_words
self.bert = self.load_bert(bert_option, tgt_words)
self.n_skipped = 0
self.src_fname = None
@classmethod
def load_bert(cls, bert_option, tgt_words):
logging.info("Loading pre-trained BERT network")
bert = BertProcessor(bert_option, tgt_words=tgt_words)
return bert
def get_embeddings(self, src_file, tgt_path):
with torch.no_grad():
self.process_file(src_file, tgt_path)
def process_file(self, src_file, tgt_path):
n_lines = utils.get_n_lines(src_file)
self.n_skipped = 0
with tqdm(total=n_lines, desc='Processing sentences. 0 skipped',
mininterval=.2) as pbar:
self.run(src_file, tgt_path, pbar)
def run(self, src_file, tgt_path, pbar):
tqdm.write('\tRunning on %s' % constants.device)
self.dump_id = 0
processed_size = 0
embeddings = {}
n_skip = len(utils.get_filenames(tgt_path))
for batch in self.iterate_wiki(src_file, pbar):
processed_size += len(batch)
if self.dump_id < n_skip:
if processed_size >= self.dump_size:
self.dump_id += 1
processed_size = 0
continue
self.bert.process_batch(batch, embeddings)
if processed_size >= self.dump_size:
self.write_results(embeddings, tgt_path)
embeddings = {}
processed_size = 0
if processed_size > 0:
self.write_results(embeddings, tgt_path)
def iterate_wiki(self, src_file, pbar):
batch = []
for sentence in self.get_next_sentence(src_file, pbar):
batch += [sentence]
if len(batch) == self.batch_size:
yield batch
batch = []
if batch:
yield batch
def get_next_sentence(self, src_file, pbar):
with open(src_file, 'r') as f:
for line in f:
tokens = line.strip().split(' ')
pbar.update(1)
if len(tokens) > 100 or len(tokens) <= 2:
self.n_skipped += 1
pbar.set_description('Processing sentences. %d skipped' % self.n_skipped)
continue
yield tokens
def write_results(self, results, tgt_path):
results = {key: np.matrix(values) for key, values in results.items()}
self._write_results(results, tgt_path)
def _write_results(self, results, tgt_path):
fname = 'results--%05d.pickle' % \
(self.dump_id)
tqdm.write("\tSaving partial results: %s" % fname)
filename = path.join(tgt_path, fname)
utils.write_pickle(filename, results)
self.dump_id += 1
| [
"tqdm.tqdm.write",
"tqdm.tqdm",
"os.path.join",
"h02_bert_embeddings.bert.BertProcessor",
"utils.utils.get_n_lines",
"utils.utils.write_pickle",
"torch.no_grad",
"numpy.matrix",
"logging.info",
"utils.utils.get_filenames"
] | [((743, 791), 'logging.info', 'logging.info', (['"""Loading pre-trained BERT network"""'], {}), "('Loading pre-trained BERT network')\n", (755, 791), False, 'import logging\n'), ((807, 854), 'h02_bert_embeddings.bert.BertProcessor', 'BertProcessor', (['bert_option'], {'tgt_words': 'tgt_words'}), '(bert_option, tgt_words=tgt_words)\n', (820, 854), False, 'from h02_bert_embeddings.bert import BertProcessor\n'), ((1073, 1100), 'utils.utils.get_n_lines', 'utils.get_n_lines', (['src_file'], {}), '(src_file)\n', (1090, 1100), False, 'from utils import utils\n'), ((1346, 1394), 'tqdm.tqdm.write', 'tqdm.write', (["('\\tRunning on %s' % constants.device)"], {}), "('\\tRunning on %s' % constants.device)\n", (1356, 1394), False, 'from tqdm import tqdm\n'), ((3183, 3233), 'tqdm.tqdm.write', 'tqdm.write', (["('\\tSaving partial results: %s' % fname)"], {}), "('\\tSaving partial results: %s' % fname)\n", (3193, 3233), False, 'from tqdm import tqdm\n'), ((3253, 3279), 'os.path.join', 'path.join', (['tgt_path', 'fname'], {}), '(tgt_path, fname)\n', (3262, 3279), False, 'from os import path\n'), ((3288, 3325), 'utils.utils.write_pickle', 'utils.write_pickle', (['filename', 'results'], {}), '(filename, results)\n', (3306, 3325), False, 'from utils import utils\n'), ((939, 954), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (952, 954), False, 'import torch\n'), ((1142, 1218), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_lines', 'desc': '"""Processing sentences. 0 skipped"""', 'mininterval': '(0.2)'}), "(total=n_lines, desc='Processing sentences. 0 skipped', mininterval=0.2)\n", (1146, 1218), False, 'from tqdm import tqdm\n'), ((1493, 1522), 'utils.utils.get_filenames', 'utils.get_filenames', (['tgt_path'], {}), '(tgt_path)\n', (1512, 1522), False, 'from utils import utils\n'), ((2954, 2971), 'numpy.matrix', 'np.matrix', (['values'], {}), '(values)\n', (2963, 2971), True, 'import numpy as np\n')] |
# 1. Only add your code inside the function (including newly improted packages).
# You can design a new function and call the new function in the given functions.
# 2. For bonus: Give your own picturs. If you have N pictures, name your pictures such as ["t3_1.png", "t3_2.png", ..., "t3_N.png"], and put them inside the folder "images".
# 3. Not following the project guidelines will result in a 10% reduction in grades
import json
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Use the keypoints to stitch the images
def get_stitched_image(img1, img2, M):
# Get width and height of input images
w1, h1 = img1.shape[:2]
w2, h2 = img2.shape[:2]
# Get the canvas dimensions
img1_dims = np.float32([[0, 0], [0, w1], [h1, w1], [h1, 0]]).reshape(-1, 1, 2)
img2_dims_temp = np.float32([[0, 0], [0, w2], [h2, w2], [h2, 0]]).reshape(-1, 1, 2)
# Get relative perspective of second image
img2_dims = cv2.perspectiveTransform(img2_dims_temp, M)
# Resulting dimensions
result_dims = np.concatenate((img1_dims, img2_dims), axis=0)
# Getting images together
# Calculate dimensions of match points
[x_min, y_min] = np.int32(result_dims.min(axis=0).ravel() - 0.5)
[x_max, y_max] = np.int32(result_dims.max(axis=0).ravel() + 0.5)
# Create output array after affine transformation
transform_dist = [-x_min, -y_min]
transform_array = np.array([[1, 0, transform_dist[0]],
[0, 1, transform_dist[1]],
[0, 0, 1]])
# Warp images to get the resulting image
result_img = cv2.warpPerspective(img2, transform_array.dot(M),
(x_max - x_min, y_max - y_min))
result_img[transform_dist[1]:w1 + transform_dist[1],
transform_dist[0]:h1 + transform_dist[0]] = img1
# Return the result
return result_img
def matches_d(img1, img2):
# Initialize SIFT
sift = cv2.SIFT_create(1000)
# Extract keypoints and descriptors
k1, d1 = sift.detectAndCompute(img1, None)
k2, d2 = sift.detectAndCompute(img2, None)
distances = np.sum(d1 ** 2, axis=1, keepdims=True) + np.sum(d2 ** 2, axis=1) - 2 * d1.dot(d2.T)
distances = np.sqrt(distances)
# print(distances)
# Get smallest indices
min_indices = np.argsort(distances, axis=1)
# Init ndarray
matches = []
# Iter for nearest neighbors
for i in range(min_indices.shape[0]):
neighbors = min_indices[i][:2]
# print(neighbors)
curr_matches = []
for j in range(len(neighbors)):
match = []
match.append(i)
match.append(neighbors[j])
match.append(distances[i][neighbors[j]] * 1.)
curr_matches.append(match)
matches.append(curr_matches)
# Make sure that the matches are good
verify_ratio = 0.8 # Source: stackoverflow
verified_matches = []
for m1, m2 in matches:
# Add to array only if it's a good match
if m1[2] < 0.8 * m2[2]:
verified_matches.append(m1)
print("Verified Number of Matches:",len(verified_matches))
return verified_matches
# Find SIFT and return Homography Matrix
def get_sift_homography(img1, img2):
# Get matches from the images
verified_matches = matches_d(img1, img2)
# Initialize SIFT
sift = cv2.SIFT_create(1000)
# Extract keypoints and descriptors
k1, d1 = sift.detectAndCompute(img1, None)
k2, d2 = sift.detectAndCompute(img2, None)
# Minimum number of matches
min_matches = 8
if len(verified_matches) > min_matches:
# Array to store matching points
img1_pts = []
img2_pts = []
# Add matching points to array
for match in verified_matches:
img1_pts.append(k1[match[0]].pt)
img2_pts.append(k2[match[1]].pt)
img1_pts = np.float32(img1_pts).reshape(-1, 1, 2)
img2_pts = np.float32(img2_pts).reshape(-1, 1, 2)
# Compute homography matrix
M, mask = cv2.findHomography(img1_pts, img2_pts, cv2.RANSAC, 5.0)
return M
else:
print('Error: Not enough matches')
exit()
# Equalize Histogram of Color Images
def equalize_histogram_color(img):
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])
img = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
return img
def stitch_2_images(img1, img2):
img1 = equalize_histogram_color(img1)
img2 = equalize_histogram_color(img2)
# Use SIFT to find keypoints and return homography matrix
M = get_sift_homography(img1, img2)
# Stitch the images together using homography matrix
result_image = get_stitched_image(img2, img1, M)
return result_image
def spatial_overlaps_matrix(imgs):
overlap_arr = np.zeros((len(imgs), len(imgs)))
for i in range(0, len(imgs)):
for j in range(0, len(imgs)):
verified_matches = matches_d(imgs[i], imgs[j])
matches_length = len(verified_matches)
print('matches_length', matches_length)
if matches_length > 20:
overlap_arr[i][j] = 1
else:
overlap_arr[i][j] = 0
return overlap_arr
def stitch(imgmark, N,
savepath=''): # For bonus: change your input(N=*) here as default if the number of your input pictures is not 4.
"The output image should be saved in the savepath."
"The intermediate overlap relation should be returned as NxN a one-hot(only contains 0 or 1) array."
"Do NOT modify the code provided."
imgpath = [f'./images/{imgmark}_{n}.png' for n in range(1, N + 1)]
imgs = []
kp = []
des = []
for ipath in imgpath:
img = cv2.imread(ipath)
imgs.append(img)
"Start you code here"
result_image = stitch_2_images(imgs[0], imgs[1])
if len(imgs) > 2:
for k in range(2, len(imgs)):
result_image = stitch_2_images(result_image, imgs[k])
cv2.imwrite(f'{imgmark}_result.png', result_image)
cv2.imshow('Result', result_image)
cv2.waitKey(0)
plt.show()
overlap_arr = spatial_overlaps_matrix(imgs)
print('overlap_arr', overlap_arr)
return overlap_arr
if __name__ == "__main__":
# task2
overlap_arr = stitch('t2', N=4, savepath='task2.png')
with open('t2_overlap.txt', 'w') as outfile:
json.dump(overlap_arr.tolist(), outfile)
# bonus
overlap_arr2 = stitch('t3', N=4, savepath='task3.png')
with open('t3_overlap.txt', 'w') as outfile:
json.dump(overlap_arr2.tolist(), outfile)
| [
"cv2.imwrite",
"cv2.imread",
"numpy.sqrt",
"cv2.findHomography",
"cv2.imshow",
"numpy.argsort",
"numpy.array",
"cv2.SIFT_create",
"cv2.equalizeHist",
"numpy.sum",
"numpy.concatenate",
"cv2.cvtColor",
"cv2.perspectiveTransform",
"cv2.waitKey",
"numpy.float32",
"matplotlib.pyplot.show"
] | [((947, 990), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['img2_dims_temp', 'M'], {}), '(img2_dims_temp, M)\n', (971, 990), False, 'import cv2\n'), ((1037, 1083), 'numpy.concatenate', 'np.concatenate', (['(img1_dims, img2_dims)'], {'axis': '(0)'}), '((img1_dims, img2_dims), axis=0)\n', (1051, 1083), True, 'import numpy as np\n'), ((1411, 1486), 'numpy.array', 'np.array', (['[[1, 0, transform_dist[0]], [0, 1, transform_dist[1]], [0, 0, 1]]'], {}), '([[1, 0, transform_dist[0]], [0, 1, transform_dist[1]], [0, 0, 1]])\n', (1419, 1486), True, 'import numpy as np\n'), ((1952, 1973), 'cv2.SIFT_create', 'cv2.SIFT_create', (['(1000)'], {}), '(1000)\n', (1967, 1973), False, 'import cv2\n'), ((2226, 2244), 'numpy.sqrt', 'np.sqrt', (['distances'], {}), '(distances)\n', (2233, 2244), True, 'import numpy as np\n'), ((2314, 2343), 'numpy.argsort', 'np.argsort', (['distances'], {'axis': '(1)'}), '(distances, axis=1)\n', (2324, 2343), True, 'import numpy as np\n'), ((3390, 3411), 'cv2.SIFT_create', 'cv2.SIFT_create', (['(1000)'], {}), '(1000)\n', (3405, 3411), False, 'import cv2\n'), ((4299, 4335), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2YUV'], {}), '(img, cv2.COLOR_BGR2YUV)\n', (4311, 4335), False, 'import cv2\n'), ((4359, 4393), 'cv2.equalizeHist', 'cv2.equalizeHist', (['img_yuv[:, :, 0]'], {}), '(img_yuv[:, :, 0])\n', (4375, 4393), False, 'import cv2\n'), ((4404, 4444), 'cv2.cvtColor', 'cv2.cvtColor', (['img_yuv', 'cv2.COLOR_YUV2BGR'], {}), '(img_yuv, cv2.COLOR_YUV2BGR)\n', (4416, 4444), False, 'import cv2\n'), ((6046, 6096), 'cv2.imwrite', 'cv2.imwrite', (['f"""{imgmark}_result.png"""', 'result_image'], {}), "(f'{imgmark}_result.png', result_image)\n", (6057, 6096), False, 'import cv2\n'), ((6101, 6135), 'cv2.imshow', 'cv2.imshow', (['"""Result"""', 'result_image'], {}), "('Result', result_image)\n", (6111, 6135), False, 'import cv2\n'), ((6140, 6154), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (6151, 6154), False, 'import cv2\n'), ((6159, 6169), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6167, 6169), True, 'import matplotlib.pyplot as plt\n'), ((4070, 4125), 'cv2.findHomography', 'cv2.findHomography', (['img1_pts', 'img2_pts', 'cv2.RANSAC', '(5.0)'], {}), '(img1_pts, img2_pts, cv2.RANSAC, 5.0)\n', (4088, 4125), False, 'import cv2\n'), ((5794, 5811), 'cv2.imread', 'cv2.imread', (['ipath'], {}), '(ipath)\n', (5804, 5811), False, 'import cv2\n'), ((728, 776), 'numpy.float32', 'np.float32', (['[[0, 0], [0, w1], [h1, w1], [h1, 0]]'], {}), '([[0, 0], [0, w1], [h1, w1], [h1, 0]])\n', (738, 776), True, 'import numpy as np\n'), ((816, 864), 'numpy.float32', 'np.float32', (['[[0, 0], [0, w2], [h2, w2], [h2, 0]]'], {}), '([[0, 0], [0, w2], [h2, w2], [h2, 0]])\n', (826, 864), True, 'import numpy as np\n'), ((2126, 2164), 'numpy.sum', 'np.sum', (['(d1 ** 2)'], {'axis': '(1)', 'keepdims': '(True)'}), '(d1 ** 2, axis=1, keepdims=True)\n', (2132, 2164), True, 'import numpy as np\n'), ((2167, 2190), 'numpy.sum', 'np.sum', (['(d2 ** 2)'], {'axis': '(1)'}), '(d2 ** 2, axis=1)\n', (2173, 2190), True, 'import numpy as np\n'), ((3918, 3938), 'numpy.float32', 'np.float32', (['img1_pts'], {}), '(img1_pts)\n', (3928, 3938), True, 'import numpy as np\n'), ((3976, 3996), 'numpy.float32', 'np.float32', (['img2_pts'], {}), '(img2_pts)\n', (3986, 3996), True, 'import numpy as np\n')] |
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import os
import numpy as np
from PIL import Image
import torch
import torch.backends.cudnn as cudnn
from torchvision import transforms
from nntools.maybe_cuda import mbcuda
from .utils import get_config
from .trainer import Trainer
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config',
type=str,
default='configs/funit_animals.yaml')
parser.add_argument('--ckpt',
type=str,
default='pretrained/animal119_gen_00200000.pt')
parser.add_argument('--class_image_folder',
type=str,
default='images/n02138411')
parser.add_argument('--input',
type=str,
default='images/input_content.jpg')
parser.add_argument('--output',
type=str,
default='images/output.jpg')
opts = parser.parse_args()
cudnn.benchmark = True
opts.vis = True
config = get_config(opts.config)
config['batch_size'] = 1
config['gpus'] = 1
trainer = Trainer(config)
mbcuda(trainer)
trainer.load_ckpt(opts.ckpt)
trainer.eval()
transform_list = [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
transform_list = [transforms.Resize((128, 128))] + transform_list
transform = transforms.Compose(transform_list)
print('Compute average class codes for images in %s' % opts.class_image_folder)
images = os.listdir(opts.class_image_folder)
for i, f in enumerate(images):
fn = os.path.join(opts.class_image_folder, f)
img = Image.open(fn).convert('RGB')
img_tensor = mbcuda(transform(img).unsqueeze(0))
with torch.no_grad():
class_code = trainer.model.compute_k_style(img_tensor, 1)
if i == 0:
new_class_code = class_code
else:
new_class_code += class_code
final_class_code = new_class_code / len(images)
image = Image.open(opts.input)
image = image.convert('RGB')
content_img = transform(image).unsqueeze(0)
print('Compute translation for %s' % opts.input)
with torch.no_grad():
output_image = trainer.model.translate_simple(content_img, final_class_code)
image = output_image.detach().cpu().squeeze().numpy()
image = np.transpose(image, (1, 2, 0))
image = ((image + 1) * 0.5 * 255.0)
output_img = Image.fromarray(np.uint8(image))
output_img.save(opts.output, 'JPEG', quality=99)
print('Save output to %s' % opts.output)
| [
"numpy.uint8",
"os.listdir",
"nntools.maybe_cuda.mbcuda",
"PIL.Image.open",
"argparse.ArgumentParser",
"os.path.join",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"numpy.transpose",
"torchvision.transforms.Compose"
] | [((460, 485), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (483, 485), False, 'import argparse\n'), ((1278, 1293), 'nntools.maybe_cuda.mbcuda', 'mbcuda', (['trainer'], {}), '(trainer)\n', (1284, 1293), False, 'from nntools.maybe_cuda import mbcuda\n'), ((1539, 1573), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (1557, 1573), False, 'from torchvision import transforms\n'), ((1667, 1702), 'os.listdir', 'os.listdir', (['opts.class_image_folder'], {}), '(opts.class_image_folder)\n', (1677, 1702), False, 'import os\n'), ((2151, 2173), 'PIL.Image.open', 'Image.open', (['opts.input'], {}), '(opts.input)\n', (2161, 2173), False, 'from PIL import Image\n'), ((1361, 1382), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1380, 1382), False, 'from torchvision import transforms\n'), ((1403, 1457), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1423, 1457), False, 'from torchvision import transforms\n'), ((1745, 1785), 'os.path.join', 'os.path.join', (['opts.class_image_folder', 'f'], {}), '(opts.class_image_folder, f)\n', (1757, 1785), False, 'import os\n'), ((2307, 2322), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2320, 2322), False, 'import torch\n'), ((2478, 2508), 'numpy.transpose', 'np.transpose', (['image', '(1, 2, 0)'], {}), '(image, (1, 2, 0))\n', (2490, 2508), True, 'import numpy as np\n'), ((1478, 1507), 'torchvision.transforms.Resize', 'transforms.Resize', (['(128, 128)'], {}), '((128, 128))\n', (1495, 1507), False, 'from torchvision import transforms\n'), ((1891, 1906), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1904, 1906), False, 'import torch\n'), ((2584, 2599), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (2592, 2599), True, 'import numpy as np\n'), ((1797, 1811), 'PIL.Image.open', 'Image.open', (['fn'], {}), '(fn)\n', (1807, 1811), False, 'from PIL import Image\n')] |
import random
import numpy
import simpy
from file_manager import SharedFile
def new_inter_session_time():
"""
Ritorna un valore per l'istanza di "inter-session time"
"""
return numpy.random.lognormal(mean=7.971, sigma=1.308)
def new_session_duration():
"""
Ritorna un valore per l'istanza di "session time"
"""
return numpy.random.lognormal(mean=8.492, sigma=1.545)
def new_inter_upload_time():
"""
Ritorna un valore per l'istanza di "inter-upload time"
"""
return numpy.random.lognormal(mean=3.748, sigma=2.286)
def new_download_time(s, download_rate):
"""
Ritorna un valore per l'istanza di "download time", relativa al file di dimensione "s"
"""
if s == 0:
return 0
else:
return s / download_rate
def new_download_rate(f):
"""
Ritorna un valore per l'istanza di "download rate", relativa al file "f"
"""
r = f.get_throughput()
delta_r = (random.random() - 0.25) * 2 * r
r += delta_r
return r
def new_upload_time(file_size, upload_rate):
"""
Ritorna un valore per l'istanza di "upload time", relativa al file di dimensione "file_size"
"""
return new_download_time(file_size, upload_rate)
def new_upload_rate(f):
"""
Ritorna un valore per l'istanza di "upload rate", relativa al file "f"
"""
return new_download_rate(f)
class Device(object):
# cosftructor
def __init__(self, device_id, env, fm, cs, cenv):
"""
:param device_id: id del dispositivo
:param env: ambiente di simulazione simpy
:param fm: file manager
:param cs: cloud stats
"""
# ID del dispositivo
self.id = device_id
# Elenco delle cartelle condivise
self.my_shared_folders = []
# Ambiente di simulazione
self.env = env
# Ambiente cloud
self.cloud_env = cenv
# File manager
self.fm = fm
# Gestore statistiche
self.stats = cs
# Cartella condivisa di lavoro (per una certa sessione)
self.current_sf = None
# Timestamp di fine sessione
self.end_session = -1
# Flag di login
self.logged_in = False
# Elenco dei file obsoleti/mancanti, da scaricare
self.missing_files = set([])
# Elenco dei file che non sono stati caricati in upload
self.missed_uploads = set([])
# Elenco dei file da scaricare al volo
self.triggered_list = set([])
# Risorsa condivisa per i triggered download: utile per scaricare i file al volo
self.trigger_lock = simpy.Container(self.env, init=0)
# Flag: se vero, il dispositivo viene notificato realtime sull'upload di nuovi file su Cloud
self.triggerable = False
# Contributo nel trasferimento file P2P
self.p2p_contribution = 0.0
# Preparazione alla simulazione
self.prepare()
# fancy printing as string
def __str__(self):
sf_str = ", ".join([str(i) for i in self.my_shared_folders])
return "Device: " + str(self.id) + ", Shared Folders [" + sf_str + "]"
# add a shared folder to this device
def add_shared_folder(self, sf):
self.my_shared_folders.append(sf)
def is_working_in_sf(self, sf):
"""
Verifica che il dispositivo stia lavorando nella cartella condivisa specificata
"""
return self.current_sf == sf
def is_on(self):
"""
Verifica che il dispositivo sia loggato
"""
return self.logged_in
def has_file(self, f):
"""
Verifica che il dispositivo abbia gia' scaricato il file "f"
"""
return f not in self.missing_files
def has_shared_folder(self, sf):
"""
Verifica che il dispositivo abbia i privilegi per lavorare in una certa cartella condivisa
"""
return sf in self.my_shared_folders
def get_id(self):
return self.id
def random_sf(self):
"""
Ritorna una delle shared folder, scelta casualmente
"""
return random.choice(self.my_shared_folders)
def residual_session_duration(self):
"""
Ritorna il numero di secondi rimanenti prima del logout
"""
return self.end_session - int(self.env.now)
def prepare(self):
"""
La funzione prepara i processi di simulazione per il dispositivo in questione
"""
self.env.process(self.run())
def run(self):
"""
Questo metodo alterna lo stato di online/offline per il device
"""
while True:
# Tempo di attesa, prima di diventare online
inter_session_time = new_inter_session_time()
yield self.env.timeout(inter_session_time)
# Scelgo la shared folder su cui operare in questa sessione
self.current_sf = self.random_sf()
# Il device effettua il login
session_duration = new_session_duration()
# La sessione ha una durata massima, entro cui posso svolgere le operazioni: quando session_duration ha
# valore negativo, significa che l'operazione corrente di upload/download viene troncata
self.session_start(session_duration)
# DOWNLOADS
residual_time = session_duration
# Ricaviamo l'elenco dei file che dovrei scaricare
if len(self.downloads()) == 0:
self.fm.log('Device %d has no file to download from the server' % self.id)
else:
while len(self.downloads()) > 0:
# File da scaricare
f = self.downloads().pop()
file_size_to_download = f.get_size()
server_download = True
# Verifico il download P2P
if not self.cloud_env.server:
server_download = False
# Elenco dei dispositivi loggati che dispongono del file
peers = self.cloud_env.look_for_peers(f)
if len(peers) > 0:
# Ricavo le durate utili residue relative alle sessioni dei peers
residual_times = map(lambda p: min(p.residual_session_duration(), residual_time), peers)
# Calcolo un valore di throughput per il trasferimento dati del file dai vari peers
rates = map(lambda p: new_download_rate(f), peers)
# Funzione locale, verifica se ho tempo a disposizione per scaricare il file da altri peers
def p2p_check():
for t in residual_times:
if t > 0:
return True
return False
# Calcolo ora per quanto tempo rimanere connesso ai vari peers, per scaricare il file
durations = [0] * len(peers)
downloaded_data = 0.0
file_size = f.get_size()
downloaded = False
while (not downloaded) and p2p_check():
for i in range(len(peers)):
if residual_times[i] > 0:
# Scarichero' un secondo di dati dal peer i-esimo
residual_times[i] -= 1
durations[i] += 1
downloaded_data += rates[i]
# Se il file puo' essere scaricato, interrompo i cicli
if downloaded_data >= file_size:
downloaded = True
break
# Eseguo il download in parallelo dai vari peers
events = []
for i in range(len(peers)):
if durations[i] > 0:
events.append(self.env.process(peers[i].p2p_upload(f, durations[i], rates[i])))
self.env.process(self.p2p_download(f, durations[i], rates[i], peers[i].get_id()))
if len(events) > 0:
yield simpy.events.AllOf(self.env, events)
# Terminato il download dai peer, se il file e' troppo grande la parte residua viene
# scaricata dal server centrale
if not downloaded:
file_size_to_download -= downloaded_data
server_download = True
else:
self.missing_files.remove(f)
else:
# Non ci sono peer che dispongono del file che sto cercando
server_download = True
if server_download:
# Devo scaricare il file da server
# Tempo richiesto per il download del file
server_download_rate = new_download_rate(f)
server_download_time = new_download_time(file_size_to_download, server_download_rate)
residual_time -= server_download_time
# Verifico di avere tempo sufficiente per eseguire correttamente il download del file
if residual_time >= 0:
# Riesco a scaricare correttamente il file
yield self.env.process(self.download(f, server_download_time, server_download_rate, True))
else:
# L'operazione di download e' stata prematuramente interrotta
self.missing_files.add(f)
self.stats.download_start(self, f)
yield self.env.timeout(residual_time + server_download_time)
self.stats.download_end(self, f, server_download_rate)
self.fm.log('Device %s fails to download on fly file "%d" at %d' % (self.id, f.get_id(),
int(self.env.now)))
return
self.fm.log('Device %s finishes its downloads at %d' % (self.id, int(self.env.now)))
# Nell'eventuale parte rimanente della sessione, il dispositivo effettua upload di file e scarica le nuove
# modifiche
if residual_time > 0:
# TRIGGERED DOWNLOADS
# In parallelo agli uploads, il dispositivo rimane in ascolto per scaricare file caricati da altri sulla
# cartella condivisa corrente
self.triggerable = True
# UPLOADS
# Se la parte di download e' terminata con successo, procedo nel caricare in upload piu' file possibile
yield self.env.process(self.uploads(residual_time))
'''tdw_proc = self.env.process(self.triggered_downloads(residual_time))
yield up_proc or tdw_proc'''
self.triggerable = False
# up_proc.interrupt('')
# tdw_proc.interrupt('')
self.session_end()
self.fm.log('Device %d logs out at %d: session lasts for %d' % (self.id, int(self.env.now),
int(session_duration)))
def downloads(self):
"""
La funzione restituisce l'elenco di file da scaricare (perche' nuovi o modificati) da una specifica cartella
condivisa
"""
return filter(lambda x: x.get_shared_folder() == self.current_sf, self.missing_files)
def download(self, f, download_time, download_rate, on_fly=False):
"""
La funzione simula il download del file "f"
"""
# Eseguo il download del file, che puo' essere server o P2P
self.stats.download_start(self, f)
yield self.env.timeout(download_time)
self.stats.download_end(self, f, download_rate)
self.stats.download_successful(self, f, download_time)
# Ho scaricato il file, quindi lo segnalo come aggiornato
self.missing_files.remove(f)
self.fm.log(
'Device %d downloads %sfile "%d" from the server at %d: download lasts for %.2f' %
(self.id, 'on fly ' if on_fly else '', f.get_id(), int(self.env.now), download_time)
)
def p2p_download(self, f, download_time, download_rate, peer_id):
"""
La funzione simula il download di una porzione di file da un peer
:param f: file scaricato
:param download_time: tempo impiegato per scaricare la porzione di file (s)
:param download_rate: velcoita' di download (bit/s)
:param peer_id: id del peer che effettua l'upload dei dati
"""
size = download_time * download_rate
self.stats.p2p_download_start()
yield self.env.timeout(download_time)
self.stats.p2p_download_end(size)
if f.get_size() == size:
# Sto scaricando l'intero file da un unico peer
tmp = 'the entire'
else:
tmp = 'a portion of'
self.fm.log('Device %d downloads %s file "%d" (size: %.2f bits) from the peer "%d" at %d: download '
'lasts for %.2f' % (self.id, tmp, f.get_id(), size, peer_id, int(self.env.now), download_time))
def uploads(self, residual_time):
"""
La funzione esegue, per il tempo rimasto, l'upload di piu' file possibile sulla cartella condivisa
- "residual_time" e' il tempo di sessione rimasto
"""
try:
while residual_time > 0:
# Verifico che l'upload possa avere luogo
inter_upload_time = new_inter_upload_time()
self.fm.log('Device %s starts waiting an inter-upload time of %d at %s' % (self.id, inter_upload_time,
int(self.env.now)))
# File da mandare in upload
f = self.to_upload()
if inter_upload_time >= residual_time:
# Non riesco a fare altri uploads
self.missed_uploads.add(f)
yield self.env.timeout(residual_time)
self.fm.log('Device %s has no time to upload file (inter-upload time) at %s' % (self.id,
int(self.env.now)))
residual_time = 0
else:
# Posso tentare un nuovo upload
yield self.env.timeout(inter_upload_time)
residual_time -= inter_upload_time
# UPLOAD
upload_rate = new_upload_rate(f)
upload_time = new_upload_time(f.get_size(), upload_rate)
if residual_time >= upload_time:
# Posso effettuare correttamente l'upload del file
yield self.env.process(self.upload(f, upload_time, upload_rate))
residual_time -= upload_time
else:
# L'operazione di upload viene interrotta prematuramente a causa del logout
self.stats.upload_start(self, f)
yield self.env.timeout(residual_time)
self.stats.upload_end(self, f, upload_rate)
self.fm.log(
'Device %s fails to upload file "%s" at %s' % (self.id, f.get_id(), int(self.env.now)))
residual_time = 0
except simpy.Interrupt:
pass
def to_upload(self):
"""
La funzione restituisce il prossimo file da caricare in upload: se l'operazione non viene troncata da un logout
prematuro, allora gli altri device che condividono la cartella riceveranno questo file
"""
# Per prima cosa, guardo i file che non sono riuscito a caricare in precedenza
for x in self.missed_uploads:
# Mi soffermo sulla cartella condivisa su cui il device opera in questa sua sessione
if x.get_shared_folder() == self.current_sf:
if self.current_sf.has_file(x):
if x.get_last_modified() > self.current_sf.get_last_modified(x):
# File da aggiornare
self.missed_uploads.remove(x)
return x
else:
# File obsoleto
self.missed_uploads.remove(x)
else:
# File non presente su cloud
self.missed_uploads.remove(x)
return x
# Se non ho file in arretrato, carico qualcosa di nuovo
fc = self.fm.new_upload()
t = int(self.env.now)
return SharedFile.from_cloud(fc, self.current_sf, t, self.id)
def upload(self, f, upload_time, upload_rate):
"""
La funzione simula l'upload del file "f" su server, di durata "upload_time" e rate "upload_rate"
:param f: file da mandare in upload
:param upload_time: durata del trasferimento dati
:param upload_rate: velocita' di trasferimento dei dati
"""
# Eseguo l'upload del file
self.stats.upload_start(self, f)
yield self.env.timeout(upload_time)
# Aggiorna i riferimenti su cartella condivisa e notifica gli altri device
sf = f.get_shared_folder()
sf.upload_file(f, int(self.env.now))
self.stats.upload_end(self, f, upload_rate)
self.stats.upload_successful(self, f, upload_time)
self.fm.log(
'Device %d uploads file "%d" in shared folder "%d", at %d: upload lasts for %.2f' %
(self.id, f.get_id(), sf.get_id(), int(self.env.now), int(upload_time))
)
def p2p_upload(self, f, upload_time, upload_rate):
"""
La funzione simula l'upload del file "f", di durata "upload_time" e rate "upload_rate"
:param f: file da mandare in upload
:param upload_time: durata del trasferimento dati
:param upload_rate: velocita' di trasferimento dei dati
"""
self.fm.log(
'Device %d starts uploading file "%d" to a peer, at %d: upload will lasts for %.2f' %
(self.id, f.get_id(), int(self.env.now), int(upload_time))
)
# Il contatore deve essere veritiero anche nel caso in cui la simulazione si interrompa
for t in range(upload_time):
yield self.env.timeout(1)
self.p2p_contribution += upload_rate
'''
def triggered_downloads(self, residual_time):
"""
La funzione permette di scaricare in real-time i file caricati/modificati da altri dispositivi sulla cartella
condivisa
"""
while self.env.now < self.end_session and self.triggerable:
# Attendo notifica da parte del server
yield self.trigger_lock.get(1)
if self.env.now < self.end_session and self.triggerable:
# Scarico il nuovo file, in parallelo agli upload
f = self.triggered_list.pop()
dr = new_download_rate(f)
dt = new_download_time(f.get_size(), dr)
if self.env.now + dt <= self.end_session:
yield self.env.process(self.download(f, dt, dr))
else:
self.stats.download_start(self, f)
yield self.env.timeout(int(self.end_session - self.env.now))
self.stats.download_end(self, f, dr)
self.fm.log('Device %s fails to download file "%d" on the fly at %d' % (self.id, f.get_id(),
int(self.env.now)))
'''
def trigger_download(self, f):
"""
La funzione permette di far scaricare al volo il file "f" al dispositivo, appena caricato su Cloud da altri
"""
# Il singolo device puo' scaricare un solo file per volta dal server -> Risorsa condivisa
yield self.trigger_lock.get(1)
# Tento di scaricare il file dal server
# Nota: in realta', qui potrei essere al di fuori della sessione, o in una nuova. Occorre tenerne conto,
# verificando che il dispositivo sia "triggerable" e che il file non sia stato gia' scaricato
if self.triggerable and (f in self.missing_files):
dr = new_download_rate(f)
dt = new_download_time(f.get_size(), dr)
if self.env.now + dt <= self.end_session:
# Ho tempo sufficiente per completare il download
yield self.env.process(self.download(f, dt, dr))
else:
# Non riesco a scaricare il file per intero
self.stats.download_start(self, f)
yield self.env.timeout(int(self.end_session - self.env.now))
self.stats.download_end(self, f, dr)
self.fm.log('Device %s fails to download file "%d" on the fly at %d' % (self.id, f.get_id(),
int(self.env.now)))
# Rilascio la risorsa
yield self.trigger_lock.put(1)
def new_file_to_download(self, f):
"""
La funzione tiene traccia dei file che il dispositivo deve scaricare, emulando la notifica dell'elenco file
da parte del Cloud Server
"""
self.missing_files.add(f)
def session_start(self, session_duration):
"""
La funzione esegue routine in fase di login del dispositivo
"""
self.fm.log('Device %s logged in at %s, on shared folder "%d"' % (self.id, int(self.env.now),
self.current_sf.get_id()))
self.end_session = int(self.env.now) + session_duration
self.logged_in = True
self.stats.login(self)
def session_end(self):
"""
La funzinoe esegue routine in fase di logout del dispositivo
"""
# Triggered download non scaricati
self.triggerable = False
self.triggered_list.clear()
if self.trigger_lock.level > 0:
self.trigger_lock.get(self.trigger_lock.level)
self.logged_in = False
self.stats.logout(self)
| [
"random.choice",
"simpy.events.AllOf",
"file_manager.SharedFile.from_cloud",
"random.random",
"simpy.Container",
"numpy.random.lognormal"
] | [((195, 242), 'numpy.random.lognormal', 'numpy.random.lognormal', ([], {'mean': '(7.971)', 'sigma': '(1.308)'}), '(mean=7.971, sigma=1.308)\n', (217, 242), False, 'import numpy\n'), ((354, 401), 'numpy.random.lognormal', 'numpy.random.lognormal', ([], {'mean': '(8.492)', 'sigma': '(1.545)'}), '(mean=8.492, sigma=1.545)\n', (376, 401), False, 'import numpy\n'), ((519, 566), 'numpy.random.lognormal', 'numpy.random.lognormal', ([], {'mean': '(3.748)', 'sigma': '(2.286)'}), '(mean=3.748, sigma=2.286)\n', (541, 566), False, 'import numpy\n'), ((2618, 2651), 'simpy.Container', 'simpy.Container', (['self.env'], {'init': '(0)'}), '(self.env, init=0)\n', (2633, 2651), False, 'import simpy\n'), ((4107, 4144), 'random.choice', 'random.choice', (['self.my_shared_folders'], {}), '(self.my_shared_folders)\n', (4120, 4144), False, 'import random\n'), ((17421, 17475), 'file_manager.SharedFile.from_cloud', 'SharedFile.from_cloud', (['fc', 'self.current_sf', 't', 'self.id'], {}), '(fc, self.current_sf, t, self.id)\n', (17442, 17475), False, 'from file_manager import SharedFile\n'), ((955, 970), 'random.random', 'random.random', ([], {}), '()\n', (968, 970), False, 'import random\n'), ((8524, 8560), 'simpy.events.AllOf', 'simpy.events.AllOf', (['self.env', 'events'], {}), '(self.env, events)\n', (8542, 8560), False, 'import simpy\n')] |
#!/usr/bin/env python #
# #
# Autor: <NAME>, GSFC/CRESST/UMBC . #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
#------------------------------------------------------------------------------#
import os
import re
import numpy as np
import healpy as hp
from numba import jit
import astropy.io.fits as pf
from itertools import product
from scipy.special import factorial
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
from Xgam import X_OUT
from Xgam.utils.logging_ import logger, startmsg
from Xgam.utils.spline_ import xInterpolatedUnivariateSplineLinear
FORE_EN = re.compile('\_\d+\.')
def get_fore_integral_flux_map(fore_files_list, e_min, e_max):
"""
A powerlaw is assumed for the foreground energy spectrum, hence
the interpolation between 2 given maps at given energies (given
by the model) is done in logarithmic scales.
Parameters
----------
fore_files_list: list of str
Ordered list of the foreground files (one for each energy)
e_min: float
the min of the energy bin
e_max: float
the max of the energy bin
Returns
-------
array
foreground map integrated between e_min and e_max
"""
fore_en = []
for ff in fore_files_list:
m = re.search(FORE_EN, ff)
en = int(m.group(0).replace('_', '').replace('.', ''))
fore_en.append(en)
fore_en = np.array(fore_en)
out_name = fore_files_list[0].replace('_%i.fits'%fore_en[0],
'_%d-%d.fits'%(e_min, e_max))
if os.path.exists(out_name):
logger.info('ATT: file %s already exists and returned...'%out_name)
fore_map = hp.read_map(out_name)
return fore_map
else:
logger.info('Computing the integral flux of the foreground model...')
logger.info('...between %.2f - %.2f'%(e_min, e_max))
fore_emin_sx, fore_emin_dx = find_outer_energies(e_min, fore_en)
fore_emax_sx, fore_emax_dx = find_outer_energies(e_max, fore_en)
fore_emin_sx_ind = np.where(fore_en == fore_emin_sx)[0][0]
fore_emin_dx_ind = np.where(fore_en == fore_emin_dx)[0][0]
fore_emax_sx_ind = np.where(fore_en == fore_emax_sx)[0][0]
fore_emax_dx_ind = np.where(fore_en == fore_emax_dx)[0][0]
fore_fmin_sx = hp.read_map(fore_files_list[fore_emin_sx_ind])
fore_fmin_dx = hp.read_map(fore_files_list[fore_emin_dx_ind])
fore_fmax_sx = hp.read_map(fore_files_list[fore_emax_sx_ind])
fore_fmax_dx = hp.read_map(fore_files_list[fore_emax_dx_ind])
m1 = (np.log10(fore_fmin_sx)-np.log10(fore_fmin_dx))/ \
(np.log10(fore_emin_sx)-np.log10(fore_emin_dx))
m2 = (np.log10(fore_fmax_sx)-np.log10(fore_fmax_dx))/ \
(np.log10(fore_emax_sx)-np.log10(fore_emax_dx))
logfore1 = m1*(np.log10(e_min)-np.log10(fore_emin_sx))+ \
np.log10(fore_fmin_sx)
logfore2 = m2*(np.log10(e_max)-np.log10(fore_emax_sx))+ \
np.log10(fore_fmax_sx)
fore1 = 10**(logfore1)
fore2 = 10**(logfore2)
fore_integ_map = np.sqrt(fore1*fore2)*(e_max - e_min)
hp.write_map(out_name, fore_integ_map)
logger.info('Created file %s'%out_name)
return fore_integ_map
def find_outer_energies(en_val, en_arr):
"""
Gives the first element on the right and the first on the left
of a given E value (en_val), among all values in an ordered array (en_arr).
These values are used to integrate the foreground model in the considered energy bin.
Parameters
----------
en_val : float
mean energy
en_arr : float
array of the energies at which the foreground model is given.
Returns
-------
float, float
first element on the right and the first on the left
"""
en_sx_arr = en_arr[en_arr < en_val]
en_dx_arr = en_arr[en_arr > en_val]
if en_sx_arr.size == 0:
en_sx = en_dx_arr[0]
en_dx = en_dx_arr[1]
logger.info('Considering model in the interval %.1f-%.1f MeV'
%(en_sx, en_dx))
elif en_dx_arr.size == 0:
en_sx = en_sx_arr[-2]
en_dx = en_sx_arr[-1]
logger.info('Considering model in the interval %.1f-%.1f MeV'
%(en_sx, en_dx))
else:
en_sx = en_sx_arr[-1]
en_dx = en_dx_arr[0]
return en_sx, en_dx
@jit
def myfactorial(_x):
_facx = []
for x in _x:
n = 1
for i in range(2, x+1):
n *= i
_facx.append(n)
return np.array(_facx)
@jit
def poisson_likelihood(norm_guess, const_guess, fore_map, data_map, exp=None, sr=None):
"""
Compute the log-likelihood as decribed here:
http://iopscience.iop.org/article/10.1088/0004-637X/750/1/3/pdf
where the model to fit to data is given by norm*fore_map+const.
Parameters
----------
norm_guess : float
initial guess for normalization parameter
const_guess : float
initial guess for constant parameter
fore_map : numpy array
helapix map of foreground model
data_map : numpy array
helapix map of data. It could be either a count map or a flux map.
If a counts map is given, an exposure map should be given too. See
next parameter.
exp : numpy array or None
helapix map of the exposure. Should be given if the data map is in
counts (beacause foreground map is in flux units by default and it
needs to be turned to counts to be fitted). While, If data map is
in flux units, do not declare this parameter, which is None by
default.
sr : float or None
pixel area -> 4*pi/Npix
Returns
-------
float
likelihood value.
"""
a = norm_guess
b = const_guess
factorial_data = factorial(data_map)
lh = 0
if exp is not None:
for i, f in enumerate(fore_map):
lh += (a*f+b)*exp[i]*sr+np.log(factorial_data[i])-data_map[i]*np.log((a*f+b)*exp[i]*sr)
else:
for i, f in enumerate(fore_map):
lh += np.sum(((a*f+b)+np.log(factorial_data[i])-data_map[i]*np.log((a*f+b))))
return lh
def get_2params_profile_likelihood(lh_matrix, param1_list, param2_list):
"""
Returns splines with profile likelihood for the two parameters of the fit.
NOTE: param1 is supposed to be the normalization, param2 the constant.
"""
n_lh = np.amin(lh_matrix, axis=1)
c_lh = np.amin(lh_matrix, axis=0)
fmt1 = dict(xname='N', xunits='', yname='Likelihood', yunits='')
spline1 = xInterpolatedUnivariateSplineLinear(param1_list, n_lh, **fmt1)
fmt2 = dict(xname='C', xunits='', yname='Likelihood', yunits='')
spline2 = xInterpolatedUnivariateSplineLinear(param2_list, c_lh, **fmt2)
return spline1, spline2
def get_param_error(profilelh_spline, param_array, lh_delta=2.3):
"""
"""
lh_array = profilelh_spline(param_array)
lh_min_idx = np.argmin(lh_array)
lherr = lh_array[lh_min_idx]+lh_delta
if lh_min_idx == 0:
logger.info('ATT: UPPER limit!')
sx_err = param_array[0]
dx_err = param_array[-1]
elif lh_min_idx == len(lh_array)-1:
logger.info('ATT: LOWER limit!')
sx_err = param_array[0]
dx_err = param_array[-1]
else:
sx_err = param_array[np.abs(lh_array[:lh_min_idx]-lherr).argmin()]
dx_err = param_array[lh_min_idx + np.abs(lh_array[lh_min_idx:]-lherr).argmin()]
return sx_err, dx_err
def fit_foreground_poisson(fore_map, data_map, mask_map=None, n_guess=1.,
c_guess=0.1,exp=None, smooth=False, show=False):
"""
Performs the poissonian fit, recursively computing the log likelihood
(using poisson_likelihood) for a grid of values of fit parameters around
the guess. Returns the values of parameters which minimize the log
likelihood, togather to the 1-sigma error
Parameters
----------
n_guess : float
initial guess for normalization parameter
c_guess : float
initial guess for constant parameter
fore_map : numpy array
helapix map of foreground model
data_map : numpy array
helapix map of data. It could be either a count map or a flux map.
If a counts map is given, an exposure map should be given too. See
next parameter.
exp : numpy array or None
helapix map of the exposure. Should be given if the data map is in
counts (beacause foreground map is in flux units by default and it
needs to be turned to counts to be fitted). While, If data map is
in flux units, do not declare this parameter, which is None by
default.
smooth : bool
not implemented yet...
show : bool
if true it shows some usefull plot to check if the fit is functioning
Returns
-------
float, float, float, float, float, float
In order: best fit N, best fit C, N's right error, N's left error,
C's right error, C's left error
"""
#show=True
#mask_map=None
logger.info('Performing poissonian fit...')
norm_guess = n_guess
igrb_guess = c_guess
nside_out = 64
mask = 0.
logger.info('N guess = %.2f - C guess = %.1e'%(norm_guess, igrb_guess))
if mask_map is None:
logger.info('fit outside default mask: 30deg gp, 2 deg srcs.')
mask_f = os.path.join(X_OUT, 'fits/Mask_hp64_src2_gp30.fits')
mask = hp.read_map(mask_f)
else:
logger.info('fit outside mask given in config file.')
mask = mask_map
mask = np.array(hp.ud_grade(mask, nside_out=nside_out,
power=-2))
mask[np.where(mask!=np.amax(mask))[0]] = 0
mask[np.where(mask==np.amax(mask))[0]] = 1
logger.info('down grade...')
fore_repix = np.array(hp.ud_grade(fore_map, nside_out=nside_out))
data_repix = np.array(hp.ud_grade(data_map, nside_out=nside_out, power=-2))
_unmask = np.where(mask > 1e-30)[0]
norm_list = np.linspace(norm_guess-0.8, norm_guess+0.8, 200)
igrb_list = np.logspace(np.log10(igrb_guess*0.01), np.log10(igrb_guess*10), 200)
logger.info('-------------------------------')
logger.info('Minimization likelihood run1...')
lh_list = []
combinations = list(product(norm_list, igrb_list))
if exp is not None:
exposure = exp
exposure = np.array(hp.ud_grade(exposure, nside_out=nside_out))
areapix = 4*np.pi/(len(data_repix))
for i,j in product(norm_list, igrb_list):
lh = poisson_likelihood(i, j, fore_repix[_unmask],
data_repix[_unmask],
exp=exposure[_unmask],
sr=areapix)
lh_list.append(lh)
else:
for i,j in product(norm_list, igrb_list):
lh = poisson_likelihood(i, j, fore_repix[_unmask], data_repix[_unmask])
lh_list.append(lh)
lh_list = np.array(lh_list)
lh_matrix = lh_list.reshape(len(norm_list), len(igrb_list))
prof_lh_norm, prof_lh_igrb = get_2params_profile_likelihood(lh_matrix, norm_list, igrb_list)
nn = np.linspace(np.amin(norm_list), np.amax(norm_list), 1000)
cc = np.linspace(np.amin(igrb_list), np.amax(igrb_list), 1000)
lh_min = np.amin(prof_lh_norm(nn))
logger.info('Minimum -LogL = %s'%lh_min)
norm_min = nn[np.argmin(prof_lh_norm(nn))]
igrb_min = cc[np.argmin(prof_lh_igrb(cc))]
logger.info('Run1 results: n=%.3f c=%e'%(norm_min, igrb_min))
norm_sxerr, norm_dxerr = get_param_error(prof_lh_norm, nn, lh_delta=2.3)
logger.info('Norm err: %.4f - %.4f'%(norm_sxerr, norm_dxerr))
igrb_sxerr, igrb_dxerr = get_param_error(prof_lh_igrb, cc, lh_delta=2.3)
logger.info('Igrb err: %.2e - %.2e'%(igrb_sxerr, igrb_dxerr))
"""
logger.info('-------------------------------')
logger.info('Minimization likelihood run2...')
norm_list = np.linspace(norm_min-0.3, norm_min+0.3, 100)
igrb_list = np.linspace(igrb_min*0.1, igrb_min*10, 200)
lh_list = []
combinations = np.array(list(product(norm_list, igrb_list)))
if exp is not None:
exposure = exp
exposure = np.array(hp.ud_grade(exposure, nside_out=nside_out))
areapix = 4*np.pi/(len(data_repix))
for i,j in product(norm_list, igrb_list):
lh = poisson_likelihood(i, j, fore_repix[_unmask],
data_repix[_unmask],
exp=exposure[_unmask],
sr=areapix)
lh_list.append(lh)
else:
for i,j in product(norm_list, igrb_list):
lh = poisson_likelihood(i, j, fore_repix[_unmask],
data_repix[_unmask])
lh_list.append(lh)
lh_list = np.array(lh_list)
lh_matrix = lh_list.reshape(len(norm_list), len(igrb_list))
prof_lh_norm, prof_lh_igrb = get_2params_profile_likelihood(lh_matrix, norm_list, igrb_list)
nn = np.linspace(np.amin(norm_list), np.amax(norm_list), 500)
cc = np.linspace(np.amin(igrb_list), np.amax(igrb_list), 1000)
lh_min = np.amin(prof_lh_norm(nn))
lh_delta = lh_min+2.3
logger.info('Minimum -LogL = %s'%lh_min)
norm_min = nn[np.argmin(prof_lh_norm(nn))]
igrb_min = cc[np.argmin(prof_lh_igrb(cc))]
logger.info('Run2 results: n=%.3f c=%e'%(norm_min, igrb_min))
norm_sxerr, norm_dxerr = get_param_error(prof_lh_norm, nn, lh_delta)
logger.info('Norm err: %.4f - %.4f'%(norm_sxerr, norm_dxerr))
igrb_sxerr, igrb_dxerr = get_param_error(prof_lh_igrb, cc, lh_delta)
logger.info('Norm err: %.4f - %.4f'%(igrb_sxerr, igrb_dxerr))
"""
if show == True:
plt.figure(facecolor='white')
plt.plot(nn, prof_lh_norm(nn), '-', color='black')
plt.plot([norm_min, norm_min], [lh_min-10, lh_min+40], color='red')
plt.plot([norm_sxerr, norm_sxerr], [lh_min-2, lh_min+40], 'r--', alpha=0.7)
plt.plot([norm_dxerr, norm_dxerr], [lh_min-2, lh_min+40], 'r--', alpha=0.7)
plt.xlabel('Normalization')
plt.ylabel('-Log(Likelihood)')
plt.ylim(lh_min-5, lh_min+30)
plt.xlim(norm_min-0.2, norm_min+0.2)
plt.figure(facecolor='white')
plt.plot(cc, prof_lh_igrb(cc), '-', color='black')
plt.plot([igrb_min, igrb_min], [lh_min-10, lh_min+40], color='red')
plt.plot([igrb_sxerr, igrb_sxerr], [lh_min-2, lh_min+40], 'r--', alpha=0.7)
plt.plot([igrb_dxerr, igrb_dxerr], [lh_min-2, lh_min+40], 'r--', alpha=0.7)
plt.xlabel('Constant')
plt.ylabel('-Log(Likelihood)')
plt.ylim(lh_min-5, lh_min+30)
plt.xlim(igrb_min*0.9, igrb_min*1.1)
plt.xscale('log')
"""
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
x, y = np.mgrid(norm_list, igrb_list)
X, Y = np.mgrid(nn, cc)
print('---------------', lh_matrix.shape, X.shape, Y.shape)
print('---------------', lh_matrix.shape, x.shape, y.shape)
Z = griddata((x, y), lh_matrix, (X, Y), method='linear')
contours = plt.contour(X, Y, Z, 20, colors='0.4')
cax = ax.matshow(Z, origin='lower', cmap='RdGy',
extent=[np.amin(norm_list), np.amax(norm_list),
np.amin(igrb_list), np.amax(igrb_list)],
aspect='auto', alpha=0.5)
plt.clabel(contours, inline=True, fontsize=8)
plt.ylabel('C [cm$^{-2}$s$^{-1}$sr$^{-1}$]')
plt.xlabel('N')
ax.xaxis.set_ticks_position('bottom')
plt.grid('off')
cb = plt.colorbar(cax, format='$%.1e$')
cb.set_label('-Log(Likelihood)', rotation=90)
"""
plt.show()
return norm_min, igrb_min, norm_sxerr, norm_dxerr, igrb_sxerr, igrb_dxerr
def main():
"""Test smodule.
"""
logger.info('No test module is available at the moment... bye bye!')
return 0
if __name__ == '__main__':
main()
| [
"numpy.log10",
"numpy.sqrt",
"re.compile",
"matplotlib.pyplot.ylabel",
"scipy.special.factorial",
"numpy.log",
"numpy.array",
"re.search",
"os.path.exists",
"numpy.where",
"matplotlib.pyplot.xlabel",
"itertools.product",
"Xgam.utils.spline_.xInterpolatedUnivariateSplineLinear",
"matplotlib... | [((1202, 1226), 're.compile', 're.compile', (['"""\\\\_\\\\d+\\\\."""'], {}), "('\\\\_\\\\d+\\\\.')\n", (1212, 1226), False, 'import re\n'), ((2036, 2053), 'numpy.array', 'np.array', (['fore_en'], {}), '(fore_en)\n', (2044, 2053), True, 'import numpy as np\n'), ((2198, 2222), 'os.path.exists', 'os.path.exists', (['out_name'], {}), '(out_name)\n', (2212, 2222), False, 'import os\n'), ((5208, 5223), 'numpy.array', 'np.array', (['_facx'], {}), '(_facx)\n', (5216, 5223), True, 'import numpy as np\n'), ((6526, 6545), 'scipy.special.factorial', 'factorial', (['data_map'], {}), '(data_map)\n', (6535, 6545), False, 'from scipy.special import factorial\n'), ((7138, 7164), 'numpy.amin', 'np.amin', (['lh_matrix'], {'axis': '(1)'}), '(lh_matrix, axis=1)\n', (7145, 7164), True, 'import numpy as np\n'), ((7176, 7202), 'numpy.amin', 'np.amin', (['lh_matrix'], {'axis': '(0)'}), '(lh_matrix, axis=0)\n', (7183, 7202), True, 'import numpy as np\n'), ((7295, 7357), 'Xgam.utils.spline_.xInterpolatedUnivariateSplineLinear', 'xInterpolatedUnivariateSplineLinear', (['param1_list', 'n_lh'], {}), '(param1_list, n_lh, **fmt1)\n', (7330, 7357), False, 'from Xgam.utils.spline_ import xInterpolatedUnivariateSplineLinear\n'), ((7441, 7503), 'Xgam.utils.spline_.xInterpolatedUnivariateSplineLinear', 'xInterpolatedUnivariateSplineLinear', (['param2_list', 'c_lh'], {}), '(param2_list, c_lh, **fmt2)\n', (7476, 7503), False, 'from Xgam.utils.spline_ import xInterpolatedUnivariateSplineLinear\n'), ((7687, 7706), 'numpy.argmin', 'np.argmin', (['lh_array'], {}), '(lh_array)\n', (7696, 7706), True, 'import numpy as np\n'), ((9850, 9893), 'Xgam.utils.logging_.logger.info', 'logger.info', (['"""Performing poissonian fit..."""'], {}), "('Performing poissonian fit...')\n", (9861, 9893), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((9981, 10054), 'Xgam.utils.logging_.logger.info', 'logger.info', (["('N guess = %.2f - C guess = %.1e' % (norm_guess, igrb_guess))"], {}), "('N guess = %.2f - C guess = %.1e' % (norm_guess, igrb_guess))\n", (9992, 10054), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((10568, 10596), 'Xgam.utils.logging_.logger.info', 'logger.info', (['"""down grade..."""'], {}), "('down grade...')\n", (10579, 10596), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((10804, 10856), 'numpy.linspace', 'np.linspace', (['(norm_guess - 0.8)', '(norm_guess + 0.8)', '(200)'], {}), '(norm_guess - 0.8, norm_guess + 0.8, 200)\n', (10815, 10856), True, 'import numpy as np\n'), ((10947, 10993), 'Xgam.utils.logging_.logger.info', 'logger.info', (['"""-------------------------------"""'], {}), "('-------------------------------')\n", (10958, 10993), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((10998, 11044), 'Xgam.utils.logging_.logger.info', 'logger.info', (['"""Minimization likelihood run1..."""'], {}), "('Minimization likelihood run1...')\n", (11009, 11044), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((11782, 11799), 'numpy.array', 'np.array', (['lh_list'], {}), '(lh_list)\n', (11790, 11799), True, 'import numpy as np\n'), ((12148, 12190), 'Xgam.utils.logging_.logger.info', 'logger.info', (["('Minimum -LogL = %s' % lh_min)"], {}), "('Minimum -LogL = %s' % lh_min)\n", (12159, 12190), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((12292, 12355), 'Xgam.utils.logging_.logger.info', 'logger.info', (["('Run1 results: n=%.3f c=%e' % (norm_min, igrb_min))"], {}), "('Run1 results: n=%.3f c=%e' % (norm_min, igrb_min))\n", (12303, 12355), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((12440, 12503), 'Xgam.utils.logging_.logger.info', 'logger.info', (["('Norm err: %.4f - %.4f' % (norm_sxerr, norm_dxerr))"], {}), "('Norm err: %.4f - %.4f' % (norm_sxerr, norm_dxerr))\n", (12451, 12503), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((12583, 12646), 'Xgam.utils.logging_.logger.info', 'logger.info', (["('Igrb err: %.2e - %.2e' % (igrb_sxerr, igrb_dxerr))"], {}), "('Igrb err: %.2e - %.2e' % (igrb_sxerr, igrb_dxerr))\n", (12594, 12646), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((16795, 16863), 'Xgam.utils.logging_.logger.info', 'logger.info', (['"""No test module is available at the moment... bye bye!"""'], {}), "('No test module is available at the moment... bye bye!')\n", (16806, 16863), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((1909, 1931), 're.search', 're.search', (['FORE_EN', 'ff'], {}), '(FORE_EN, ff)\n', (1918, 1931), False, 'import re\n'), ((2232, 2301), 'Xgam.utils.logging_.logger.info', 'logger.info', (["('ATT: file %s already exists and returned...' % out_name)"], {}), "('ATT: file %s already exists and returned...' % out_name)\n", (2243, 2301), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((2319, 2340), 'healpy.read_map', 'hp.read_map', (['out_name'], {}), '(out_name)\n', (2330, 2340), True, 'import healpy as hp\n'), ((2383, 2452), 'Xgam.utils.logging_.logger.info', 'logger.info', (['"""Computing the integral flux of the foreground model..."""'], {}), "('Computing the integral flux of the foreground model...')\n", (2394, 2452), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((2461, 2515), 'Xgam.utils.logging_.logger.info', 'logger.info', (["('...between %.2f - %.2f' % (e_min, e_max))"], {}), "('...between %.2f - %.2f' % (e_min, e_max))\n", (2472, 2515), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((2951, 2997), 'healpy.read_map', 'hp.read_map', (['fore_files_list[fore_emin_sx_ind]'], {}), '(fore_files_list[fore_emin_sx_ind])\n', (2962, 2997), True, 'import healpy as hp\n'), ((3021, 3067), 'healpy.read_map', 'hp.read_map', (['fore_files_list[fore_emin_dx_ind]'], {}), '(fore_files_list[fore_emin_dx_ind])\n', (3032, 3067), True, 'import healpy as hp\n'), ((3091, 3137), 'healpy.read_map', 'hp.read_map', (['fore_files_list[fore_emax_sx_ind]'], {}), '(fore_files_list[fore_emax_sx_ind])\n', (3102, 3137), True, 'import healpy as hp\n'), ((3161, 3207), 'healpy.read_map', 'hp.read_map', (['fore_files_list[fore_emax_dx_ind]'], {}), '(fore_files_list[fore_emax_dx_ind])\n', (3172, 3207), True, 'import healpy as hp\n'), ((3790, 3828), 'healpy.write_map', 'hp.write_map', (['out_name', 'fore_integ_map'], {}), '(out_name, fore_integ_map)\n', (3802, 3828), True, 'import healpy as hp\n'), ((3837, 3878), 'Xgam.utils.logging_.logger.info', 'logger.info', (["('Created file %s' % out_name)"], {}), "('Created file %s' % out_name)\n", (3848, 3878), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((4660, 4739), 'Xgam.utils.logging_.logger.info', 'logger.info', (["('Considering model in the interval %.1f-%.1f MeV' % (en_sx, en_dx))"], {}), "('Considering model in the interval %.1f-%.1f MeV' % (en_sx, en_dx))\n", (4671, 4739), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((7782, 7814), 'Xgam.utils.logging_.logger.info', 'logger.info', (['"""ATT: UPPER limit!"""'], {}), "('ATT: UPPER limit!')\n", (7793, 7814), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((10086, 10148), 'Xgam.utils.logging_.logger.info', 'logger.info', (['"""fit outside default mask: 30deg gp, 2 deg srcs."""'], {}), "('fit outside default mask: 30deg gp, 2 deg srcs.')\n", (10097, 10148), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((10166, 10218), 'os.path.join', 'os.path.join', (['X_OUT', '"""fits/Mask_hp64_src2_gp30.fits"""'], {}), "(X_OUT, 'fits/Mask_hp64_src2_gp30.fits')\n", (10178, 10218), False, 'import os\n'), ((10234, 10253), 'healpy.read_map', 'hp.read_map', (['mask_f'], {}), '(mask_f)\n', (10245, 10253), True, 'import healpy as hp\n'), ((10272, 10325), 'Xgam.utils.logging_.logger.info', 'logger.info', (['"""fit outside mask given in config file."""'], {}), "('fit outside mask given in config file.')\n", (10283, 10325), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((10623, 10665), 'healpy.ud_grade', 'hp.ud_grade', (['fore_map'], {'nside_out': 'nside_out'}), '(fore_map, nside_out=nside_out)\n', (10634, 10665), True, 'import healpy as hp\n'), ((10693, 10745), 'healpy.ud_grade', 'hp.ud_grade', (['data_map'], {'nside_out': 'nside_out', 'power': '(-2)'}), '(data_map, nside_out=nside_out, power=-2)\n', (10704, 10745), True, 'import healpy as hp\n'), ((10761, 10783), 'numpy.where', 'np.where', (['(mask > 1e-30)'], {}), '(mask > 1e-30)\n', (10769, 10783), True, 'import numpy as np\n'), ((10881, 10908), 'numpy.log10', 'np.log10', (['(igrb_guess * 0.01)'], {}), '(igrb_guess * 0.01)\n', (10889, 10908), True, 'import numpy as np\n'), ((10908, 10933), 'numpy.log10', 'np.log10', (['(igrb_guess * 10)'], {}), '(igrb_guess * 10)\n', (10916, 10933), True, 'import numpy as np\n'), ((11086, 11115), 'itertools.product', 'product', (['norm_list', 'igrb_list'], {}), '(norm_list, igrb_list)\n', (11093, 11115), False, 'from itertools import product\n'), ((11299, 11328), 'itertools.product', 'product', (['norm_list', 'igrb_list'], {}), '(norm_list, igrb_list)\n', (11306, 11328), False, 'from itertools import product\n'), ((11617, 11646), 'itertools.product', 'product', (['norm_list', 'igrb_list'], {}), '(norm_list, igrb_list)\n', (11624, 11646), False, 'from itertools import product\n'), ((11987, 12005), 'numpy.amin', 'np.amin', (['norm_list'], {}), '(norm_list)\n', (11994, 12005), True, 'import numpy as np\n'), ((12007, 12025), 'numpy.amax', 'np.amax', (['norm_list'], {}), '(norm_list)\n', (12014, 12025), True, 'import numpy as np\n'), ((12054, 12072), 'numpy.amin', 'np.amin', (['igrb_list'], {}), '(igrb_list)\n', (12061, 12072), True, 'import numpy as np\n'), ((12074, 12092), 'numpy.amax', 'np.amax', (['igrb_list'], {}), '(igrb_list)\n', (12081, 12092), True, 'import numpy as np\n'), ((14599, 14628), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""white"""'}), "(facecolor='white')\n", (14609, 14628), True, 'import matplotlib.pyplot as plt\n'), ((14696, 14767), 'matplotlib.pyplot.plot', 'plt.plot', (['[norm_min, norm_min]', '[lh_min - 10, lh_min + 40]'], {'color': '"""red"""'}), "([norm_min, norm_min], [lh_min - 10, lh_min + 40], color='red')\n", (14704, 14767), True, 'import matplotlib.pyplot as plt\n'), ((14772, 14851), 'matplotlib.pyplot.plot', 'plt.plot', (['[norm_sxerr, norm_sxerr]', '[lh_min - 2, lh_min + 40]', '"""r--"""'], {'alpha': '(0.7)'}), "([norm_sxerr, norm_sxerr], [lh_min - 2, lh_min + 40], 'r--', alpha=0.7)\n", (14780, 14851), True, 'import matplotlib.pyplot as plt\n'), ((14856, 14935), 'matplotlib.pyplot.plot', 'plt.plot', (['[norm_dxerr, norm_dxerr]', '[lh_min - 2, lh_min + 40]', '"""r--"""'], {'alpha': '(0.7)'}), "([norm_dxerr, norm_dxerr], [lh_min - 2, lh_min + 40], 'r--', alpha=0.7)\n", (14864, 14935), True, 'import matplotlib.pyplot as plt\n'), ((14940, 14967), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Normalization"""'], {}), "('Normalization')\n", (14950, 14967), True, 'import matplotlib.pyplot as plt\n'), ((14976, 15006), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""-Log(Likelihood)"""'], {}), "('-Log(Likelihood)')\n", (14986, 15006), True, 'import matplotlib.pyplot as plt\n'), ((15015, 15048), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(lh_min - 5)', '(lh_min + 30)'], {}), '(lh_min - 5, lh_min + 30)\n', (15023, 15048), True, 'import matplotlib.pyplot as plt\n'), ((15053, 15093), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(norm_min - 0.2)', '(norm_min + 0.2)'], {}), '(norm_min - 0.2, norm_min + 0.2)\n', (15061, 15093), True, 'import matplotlib.pyplot as plt\n'), ((15107, 15136), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""white"""'}), "(facecolor='white')\n", (15117, 15136), True, 'import matplotlib.pyplot as plt\n'), ((15204, 15275), 'matplotlib.pyplot.plot', 'plt.plot', (['[igrb_min, igrb_min]', '[lh_min - 10, lh_min + 40]'], {'color': '"""red"""'}), "([igrb_min, igrb_min], [lh_min - 10, lh_min + 40], color='red')\n", (15212, 15275), True, 'import matplotlib.pyplot as plt\n'), ((15280, 15359), 'matplotlib.pyplot.plot', 'plt.plot', (['[igrb_sxerr, igrb_sxerr]', '[lh_min - 2, lh_min + 40]', '"""r--"""'], {'alpha': '(0.7)'}), "([igrb_sxerr, igrb_sxerr], [lh_min - 2, lh_min + 40], 'r--', alpha=0.7)\n", (15288, 15359), True, 'import matplotlib.pyplot as plt\n'), ((15364, 15443), 'matplotlib.pyplot.plot', 'plt.plot', (['[igrb_dxerr, igrb_dxerr]', '[lh_min - 2, lh_min + 40]', '"""r--"""'], {'alpha': '(0.7)'}), "([igrb_dxerr, igrb_dxerr], [lh_min - 2, lh_min + 40], 'r--', alpha=0.7)\n", (15372, 15443), True, 'import matplotlib.pyplot as plt\n'), ((15448, 15470), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Constant"""'], {}), "('Constant')\n", (15458, 15470), True, 'import matplotlib.pyplot as plt\n'), ((15479, 15509), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""-Log(Likelihood)"""'], {}), "('-Log(Likelihood)')\n", (15489, 15509), True, 'import matplotlib.pyplot as plt\n'), ((15518, 15551), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(lh_min - 5)', '(lh_min + 30)'], {}), '(lh_min - 5, lh_min + 30)\n', (15526, 15551), True, 'import matplotlib.pyplot as plt\n'), ((15556, 15596), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(igrb_min * 0.9)', '(igrb_min * 1.1)'], {}), '(igrb_min * 0.9, igrb_min * 1.1)\n', (15564, 15596), True, 'import matplotlib.pyplot as plt\n'), ((15601, 15618), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (15611, 15618), True, 'import matplotlib.pyplot as plt\n'), ((16645, 16655), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16653, 16655), True, 'import matplotlib.pyplot as plt\n'), ((3534, 3556), 'numpy.log10', 'np.log10', (['fore_fmin_sx'], {}), '(fore_fmin_sx)\n', (3542, 3556), True, 'import numpy as np\n'), ((3635, 3657), 'numpy.log10', 'np.log10', (['fore_fmax_sx'], {}), '(fore_fmax_sx)\n', (3643, 3657), True, 'import numpy as np\n'), ((3745, 3767), 'numpy.sqrt', 'np.sqrt', (['(fore1 * fore2)'], {}), '(fore1 * fore2)\n', (3752, 3767), True, 'import numpy as np\n'), ((4857, 4936), 'Xgam.utils.logging_.logger.info', 'logger.info', (["('Considering model in the interval %.1f-%.1f MeV' % (en_sx, en_dx))"], {}), "('Considering model in the interval %.1f-%.1f MeV' % (en_sx, en_dx))\n", (4868, 4936), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((7928, 7960), 'Xgam.utils.logging_.logger.info', 'logger.info', (['"""ATT: LOWER limit!"""'], {}), "('ATT: LOWER limit!')\n", (7939, 7960), False, 'from Xgam.utils.logging_ import logger, startmsg\n'), ((10374, 10422), 'healpy.ud_grade', 'hp.ud_grade', (['mask'], {'nside_out': 'nside_out', 'power': '(-2)'}), '(mask, nside_out=nside_out, power=-2)\n', (10385, 10422), True, 'import healpy as hp\n'), ((11192, 11234), 'healpy.ud_grade', 'hp.ud_grade', (['exposure'], {'nside_out': 'nside_out'}), '(exposure, nside_out=nside_out)\n', (11203, 11234), True, 'import healpy as hp\n'), ((2687, 2720), 'numpy.where', 'np.where', (['(fore_en == fore_emin_sx)'], {}), '(fore_en == fore_emin_sx)\n', (2695, 2720), True, 'import numpy as np\n'), ((2754, 2787), 'numpy.where', 'np.where', (['(fore_en == fore_emin_dx)'], {}), '(fore_en == fore_emin_dx)\n', (2762, 2787), True, 'import numpy as np\n'), ((2821, 2854), 'numpy.where', 'np.where', (['(fore_en == fore_emax_sx)'], {}), '(fore_en == fore_emax_sx)\n', (2829, 2854), True, 'import numpy as np\n'), ((2888, 2921), 'numpy.where', 'np.where', (['(fore_en == fore_emax_dx)'], {}), '(fore_en == fore_emax_dx)\n', (2896, 2921), True, 'import numpy as np\n'), ((3222, 3244), 'numpy.log10', 'np.log10', (['fore_fmin_sx'], {}), '(fore_fmin_sx)\n', (3230, 3244), True, 'import numpy as np\n'), ((3245, 3267), 'numpy.log10', 'np.log10', (['fore_fmin_dx'], {}), '(fore_fmin_dx)\n', (3253, 3267), True, 'import numpy as np\n'), ((3285, 3307), 'numpy.log10', 'np.log10', (['fore_emin_sx'], {}), '(fore_emin_sx)\n', (3293, 3307), True, 'import numpy as np\n'), ((3308, 3330), 'numpy.log10', 'np.log10', (['fore_emin_dx'], {}), '(fore_emin_dx)\n', (3316, 3330), True, 'import numpy as np\n'), ((3346, 3368), 'numpy.log10', 'np.log10', (['fore_fmax_sx'], {}), '(fore_fmax_sx)\n', (3354, 3368), True, 'import numpy as np\n'), ((3369, 3391), 'numpy.log10', 'np.log10', (['fore_fmax_dx'], {}), '(fore_fmax_dx)\n', (3377, 3391), True, 'import numpy as np\n'), ((3409, 3431), 'numpy.log10', 'np.log10', (['fore_emax_sx'], {}), '(fore_emax_sx)\n', (3417, 3431), True, 'import numpy as np\n'), ((3432, 3454), 'numpy.log10', 'np.log10', (['fore_emax_dx'], {}), '(fore_emax_dx)\n', (3440, 3454), True, 'import numpy as np\n'), ((3479, 3494), 'numpy.log10', 'np.log10', (['e_min'], {}), '(e_min)\n', (3487, 3494), True, 'import numpy as np\n'), ((3495, 3517), 'numpy.log10', 'np.log10', (['fore_emin_sx'], {}), '(fore_emin_sx)\n', (3503, 3517), True, 'import numpy as np\n'), ((3580, 3595), 'numpy.log10', 'np.log10', (['e_max'], {}), '(e_max)\n', (3588, 3595), True, 'import numpy as np\n'), ((3596, 3618), 'numpy.log10', 'np.log10', (['fore_emax_sx'], {}), '(fore_emax_sx)\n', (3604, 3618), True, 'import numpy as np\n'), ((6658, 6683), 'numpy.log', 'np.log', (['factorial_data[i]'], {}), '(factorial_data[i])\n', (6664, 6683), True, 'import numpy as np\n'), ((6696, 6729), 'numpy.log', 'np.log', (['((a * f + b) * exp[i] * sr)'], {}), '((a * f + b) * exp[i] * sr)\n', (6702, 6729), True, 'import numpy as np\n'), ((6807, 6832), 'numpy.log', 'np.log', (['factorial_data[i]'], {}), '(factorial_data[i])\n', (6813, 6832), True, 'import numpy as np\n'), ((6845, 6862), 'numpy.log', 'np.log', (['(a * f + b)'], {}), '(a * f + b)\n', (6851, 6862), True, 'import numpy as np\n'), ((8065, 8102), 'numpy.abs', 'np.abs', (['(lh_array[:lh_min_idx] - lherr)'], {}), '(lh_array[:lh_min_idx] - lherr)\n', (8071, 8102), True, 'import numpy as np\n'), ((10490, 10503), 'numpy.amax', 'np.amax', (['mask'], {}), '(mask)\n', (10497, 10503), True, 'import numpy as np\n'), ((10541, 10554), 'numpy.amax', 'np.amax', (['mask'], {}), '(mask)\n', (10548, 10554), True, 'import numpy as np\n'), ((8153, 8190), 'numpy.abs', 'np.abs', (['(lh_array[lh_min_idx:] - lherr)'], {}), '(lh_array[lh_min_idx:] - lherr)\n', (8159, 8190), True, 'import numpy as np\n')] |
import numpy as np
def minmax(it):
min = max = None
for val in it:
if min is None or val < min:
min = val
if max is None or val > max:
max = val
return min, max
def NGaussFunc(x, *params): # x0 pk width
y = np.zeros_like(x)
for i in range(0, len(params) - 1, 3):
ctr = params[i]
amp = params[i + 1]
wid = params[i + 2]
y = y + amp * np.exp(-((x - ctr) / wid) ** 2)
return y + params[-1]
| [
"numpy.exp",
"numpy.zeros_like"
] | [((268, 284), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (281, 284), True, 'import numpy as np\n'), ((430, 461), 'numpy.exp', 'np.exp', (['(-((x - ctr) / wid) ** 2)'], {}), '(-((x - ctr) / wid) ** 2)\n', (436, 461), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : MLStudio #
# File : \test_optimizers copy.py #
# Python : 3.8.3 #
# --------------------------------------------------------------------------- #
# Author : <NAME> #
# Company : nov8.ai #
# Email : <EMAIL> #
# URL : https://github.com/nov8ai/MLStudio #
# --------------------------------------------------------------------------- #
# Created : Thursday, July 9th 2020, 8:23:30 am #
# Last Modified : Thursday, July 9th 2020, 8:23:30 am #
# Modified By : <NAME> (<EMAIL>) #
# --------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2020 nov8.ai #
# =========================================================================== #
#%%
import math
import os
from pathlib import Path
import sys
import glob
import numpy as np
import pandas as pd
import pytest
from pytest import mark
from sklearn.metrics import mean_squared_error
from sklearn.datasets import make_regression, make_classification
from sklearn.datasets import make_multilabel_classification
homedir = str(Path(__file__).parents[3])
datadir = os.path.join(homedir, "tests\\test_data")
sys.path.append(homedir)
sys.path.append(datadir)
from mlstudio.supervised.algorithms.optimization.services.optimizers import GradientDescentOptimizer
from mlstudio.supervised.algorithms.optimization.services.optimizers import Momentum, Nesterov
from mlstudio.supervised.algorithms.optimization.services.optimizers import Adagrad, Adadelta
from mlstudio.supervised.algorithms.optimization.services.optimizers import RMSprop, Adam, AdaMax
from mlstudio.supervised.algorithms.optimization.services.optimizers import Nadam, AMSGrad, AdamW
from mlstudio.supervised.algorithms.optimization.services.optimizers import AggMo, QuasiHyperbolicMomentum
# -------------------------------------------------------------------------- #
# Mock gradient function
def gradient(theta):
theta = theta * 0.95
return theta
@mark.optimizers
@mark.momentum
def test_optimizer_momentum(get_optimization_momentum_test_package):
p = get_optimization_momentum_test_package
theta = p['theta_init']
alpha = p['alpha']
optimizer = Momentum()
for i in range(10):
assert np.allclose(theta, p['theta'][i]), \
"Momentum not working, Iteration {i} expected {e}, actual {a}".format(
i = str(i),
e=str(p['theta'][i]), a=str(theta)
)
theta, grad = optimizer(gradient, alpha, theta)
@mark.optimizers
@mark.nesterov
def test_optimizer_nesterov(get_optimization_nesterov_test_package):
p = get_optimization_nesterov_test_package
theta = p['theta_init']
alpha = p['alpha']
optimizer = Nesterov()
for i in range(10):
assert np.allclose(theta, p['theta'][i]), \
"Nesterov not working, Iteration {i} expected {e}, actual {a}".format(
i = str(i),
e=str(p['theta'][i]), a=str(theta)
)
theta, grad = optimizer(gradient, alpha, theta)
@mark.optimizers
@mark.adagrad
def test_optimizer_adagrad(get_optimization_adagrad_test_package):
p = get_optimization_adagrad_test_package
theta = p['theta_init']
alpha = p['alpha']
optimizer = Adagrad()
for i in range(4):
assert np.allclose(theta, p['theta'][i]), \
"Adagrad not working, Iteration {i} expected {e}, actual {a}".format(
i = str(i),
e=str(p['theta'][i]), a=str(theta)
)
theta, grad = optimizer(gradient, alpha, theta) | [
"numpy.allclose",
"pathlib.Path",
"mlstudio.supervised.algorithms.optimization.services.optimizers.Nesterov",
"os.path.join",
"mlstudio.supervised.algorithms.optimization.services.optimizers.Adagrad",
"mlstudio.supervised.algorithms.optimization.services.optimizers.Momentum",
"sys.path.append"
] | [((1738, 1779), 'os.path.join', 'os.path.join', (['homedir', '"""tests\\\\test_data"""'], {}), "(homedir, 'tests\\\\test_data')\n", (1750, 1779), False, 'import os\n'), ((1780, 1804), 'sys.path.append', 'sys.path.append', (['homedir'], {}), '(homedir)\n', (1795, 1804), False, 'import sys\n'), ((1805, 1829), 'sys.path.append', 'sys.path.append', (['datadir'], {}), '(datadir)\n', (1820, 1829), False, 'import sys\n'), ((2813, 2823), 'mlstudio.supervised.algorithms.optimization.services.optimizers.Momentum', 'Momentum', ([], {}), '()\n', (2821, 2823), False, 'from mlstudio.supervised.algorithms.optimization.services.optimizers import Momentum, Nesterov\n'), ((3352, 3362), 'mlstudio.supervised.algorithms.optimization.services.optimizers.Nesterov', 'Nesterov', ([], {}), '()\n', (3360, 3362), False, 'from mlstudio.supervised.algorithms.optimization.services.optimizers import Momentum, Nesterov\n'), ((3882, 3891), 'mlstudio.supervised.algorithms.optimization.services.optimizers.Adagrad', 'Adagrad', ([], {}), '()\n', (3889, 3891), False, 'from mlstudio.supervised.algorithms.optimization.services.optimizers import Adagrad, Adadelta\n'), ((2863, 2896), 'numpy.allclose', 'np.allclose', (['theta', "p['theta'][i]"], {}), "(theta, p['theta'][i])\n", (2874, 2896), True, 'import numpy as np\n'), ((3402, 3435), 'numpy.allclose', 'np.allclose', (['theta', "p['theta'][i]"], {}), "(theta, p['theta'][i])\n", (3413, 3435), True, 'import numpy as np\n'), ((3930, 3963), 'numpy.allclose', 'np.allclose', (['theta', "p['theta'][i]"], {}), "(theta, p['theta'][i])\n", (3941, 3963), True, 'import numpy as np\n'), ((1701, 1715), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1705, 1715), False, 'from pathlib import Path\n')] |
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import os
import sys
import csv
import ast
import logging
import pickle
import numpy as np
import pandas as pd
from ts_datasets.anomaly.base import TSADBaseDataset
from ts_datasets.anomaly.smd import download, combine_train_test_datasets
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
_handler = logging.StreamHandler(sys.stdout)
_handler.setLevel(logging.DEBUG)
_logger.addHandler(_handler)
class SMAP(TSADBaseDataset):
"""
Soil Moisture Active Passive (SMAP) satellite and Mars Science Laboratory (MSL) rover Datasets.
SMAP and MSL are two realworld public datasets, which are two real-world datasets expert-labeled by NASA.
- source: https://github.com/khundman/telemanom
"""
url = "https://www.dropbox.com/s/uv9ojw353qwzqht/SMAP.tar.gz?dl=1"
def __init__(self, subset=None, rootdir=None):
super().__init__()
if rootdir is None:
fdir = os.path.dirname(os.path.abspath(__file__))
merlion_root = os.path.abspath(os.path.join(fdir, "..", "..", ".."))
rootdir = os.path.join(merlion_root, "data", "smap")
# Download the SMAP dataset if it doesn't exist
download(_logger, rootdir, SMAP.url, "SMAP")
preprocess(_logger, os.path.join(rootdir, "SMAP"), dataset="SMAP")
# Load training/test datasets
df, metadata = combine_train_test_datasets(*load_data(os.path.join(rootdir, "SMAP"), "SMAP"))
self.time_series.append(df)
self.metadata.append(metadata)
def preprocess(logger, data_folder, dataset):
if (
os.path.exists(os.path.join(data_folder, f"{dataset}_test_label.pkl"))
and os.path.exists(os.path.join(data_folder, f"{dataset}_train.pkl"))
and os.path.exists(os.path.join(data_folder, f"{dataset}_test.pkl"))
):
return
logger.info(f"Preprocessing {dataset}")
with open(os.path.join(data_folder, "labeled_anomalies.csv"), "r") as f:
csv_reader = csv.reader(f, delimiter=",")
res = [row for row in csv_reader][1:]
res = sorted(res, key=lambda k: k[0])
labels = []
data_info = [row for row in res if row[1] == dataset and row[0] != "P-2"]
for row in data_info:
anomalies = ast.literal_eval(row[2])
length = int(row[-1])
label = np.zeros([length], dtype=bool)
for anomaly in anomalies:
label[anomaly[0] : anomaly[1] + 1] = True
labels.extend(label)
labels = np.asarray(labels)
with open(os.path.join(data_folder, f"{dataset}_test_label.pkl"), "wb") as f:
pickle.dump(labels, f)
for category in ["train", "test"]:
data = []
for row in data_info:
data.extend(np.load(os.path.join(data_folder, category, row[0] + ".npy")))
data = np.asarray(data)
with open(os.path.join(data_folder, f"{dataset}_{category}.pkl"), "wb") as f:
pickle.dump(data, f)
def load_data(directory, dataset):
with open(os.path.join(directory, f"{dataset}_test.pkl"), "rb") as f:
test_data = pickle.load(f)
with open(os.path.join(directory, f"{dataset}_test_label.pkl"), "rb") as f:
test_labels = pickle.load(f)
with open(os.path.join(directory, f"{dataset}_train.pkl"), "rb") as f:
train_data = pickle.load(f)
train_df, test_df = pd.DataFrame(train_data), pd.DataFrame(test_data)
return train_df, test_df, test_labels.astype(int)
| [
"logging.getLogger",
"logging.StreamHandler",
"pickle.dump",
"numpy.asarray",
"ts_datasets.anomaly.smd.download",
"pickle.load",
"os.path.join",
"ast.literal_eval",
"os.path.abspath",
"numpy.zeros",
"pandas.DataFrame",
"csv.reader"
] | [((469, 496), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (486, 496), False, 'import logging\n'), ((540, 573), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (561, 573), False, 'import logging\n'), ((2680, 2698), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (2690, 2698), True, 'import numpy as np\n'), ((1399, 1443), 'ts_datasets.anomaly.smd.download', 'download', (['_logger', 'rootdir', 'SMAP.url', '"""SMAP"""'], {}), "(_logger, rootdir, SMAP.url, 'SMAP')\n", (1407, 1443), False, 'from ts_datasets.anomaly.smd import download, combine_train_test_datasets\n'), ((2190, 2218), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (2200, 2218), False, 'import csv\n'), ((2448, 2472), 'ast.literal_eval', 'ast.literal_eval', (['row[2]'], {}), '(row[2])\n', (2464, 2472), False, 'import ast\n'), ((2519, 2549), 'numpy.zeros', 'np.zeros', (['[length]'], {'dtype': 'bool'}), '([length], dtype=bool)\n', (2527, 2549), True, 'import numpy as np\n'), ((2789, 2811), 'pickle.dump', 'pickle.dump', (['labels', 'f'], {}), '(labels, f)\n', (2800, 2811), False, 'import pickle\n'), ((3002, 3018), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (3012, 3018), True, 'import numpy as np\n'), ((3269, 3283), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3280, 3283), False, 'import pickle\n'), ((3386, 3400), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3397, 3400), False, 'import pickle\n'), ((3497, 3511), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3508, 3511), False, 'import pickle\n'), ((3536, 3560), 'pandas.DataFrame', 'pd.DataFrame', (['train_data'], {}), '(train_data)\n', (3548, 3560), True, 'import pandas as pd\n'), ((3562, 3585), 'pandas.DataFrame', 'pd.DataFrame', (['test_data'], {}), '(test_data)\n', (3574, 3585), True, 'import pandas as pd\n'), ((1291, 1333), 'os.path.join', 'os.path.join', (['merlion_root', '"""data"""', '"""smap"""'], {}), "(merlion_root, 'data', 'smap')\n", (1303, 1333), False, 'import os\n'), ((1472, 1501), 'os.path.join', 'os.path.join', (['rootdir', '"""SMAP"""'], {}), "(rootdir, 'SMAP')\n", (1484, 1501), False, 'import os\n'), ((1814, 1868), 'os.path.join', 'os.path.join', (['data_folder', 'f"""{dataset}_test_label.pkl"""'], {}), "(data_folder, f'{dataset}_test_label.pkl')\n", (1826, 1868), False, 'import os\n'), ((1897, 1946), 'os.path.join', 'os.path.join', (['data_folder', 'f"""{dataset}_train.pkl"""'], {}), "(data_folder, f'{dataset}_train.pkl')\n", (1909, 1946), False, 'import os\n'), ((1975, 2023), 'os.path.join', 'os.path.join', (['data_folder', 'f"""{dataset}_test.pkl"""'], {}), "(data_folder, f'{dataset}_test.pkl')\n", (1987, 2023), False, 'import os\n'), ((2106, 2156), 'os.path.join', 'os.path.join', (['data_folder', '"""labeled_anomalies.csv"""'], {}), "(data_folder, 'labeled_anomalies.csv')\n", (2118, 2156), False, 'import os\n'), ((2713, 2767), 'os.path.join', 'os.path.join', (['data_folder', 'f"""{dataset}_test_label.pkl"""'], {}), "(data_folder, f'{dataset}_test_label.pkl')\n", (2725, 2767), False, 'import os\n'), ((3117, 3137), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (3128, 3137), False, 'import pickle\n'), ((3189, 3235), 'os.path.join', 'os.path.join', (['directory', 'f"""{dataset}_test.pkl"""'], {}), "(directory, f'{dataset}_test.pkl')\n", (3201, 3235), False, 'import os\n'), ((3298, 3350), 'os.path.join', 'os.path.join', (['directory', 'f"""{dataset}_test_label.pkl"""'], {}), "(directory, f'{dataset}_test_label.pkl')\n", (3310, 3350), False, 'import os\n'), ((3415, 3462), 'os.path.join', 'os.path.join', (['directory', 'f"""{dataset}_train.pkl"""'], {}), "(directory, f'{dataset}_train.pkl')\n", (3427, 3462), False, 'import os\n'), ((1161, 1186), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1176, 1186), False, 'import os\n'), ((1231, 1267), 'os.path.join', 'os.path.join', (['fdir', '""".."""', '""".."""', '""".."""'], {}), "(fdir, '..', '..', '..')\n", (1243, 1267), False, 'import os\n'), ((3037, 3091), 'os.path.join', 'os.path.join', (['data_folder', 'f"""{dataset}_{category}.pkl"""'], {}), "(data_folder, f'{dataset}_{category}.pkl')\n", (3049, 3091), False, 'import os\n'), ((1619, 1648), 'os.path.join', 'os.path.join', (['rootdir', '"""SMAP"""'], {}), "(rootdir, 'SMAP')\n", (1631, 1648), False, 'import os\n'), ((2932, 2984), 'os.path.join', 'os.path.join', (['data_folder', 'category', "(row[0] + '.npy')"], {}), "(data_folder, category, row[0] + '.npy')\n", (2944, 2984), False, 'import os\n')] |
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import glob
import matplotlib.pyplot as plt
from PIL import Image
class customDataset(Dataset):
def __init__(self, data, targets, transform=None):
self.data = data
self.targets = torch.Tensor(targets)
self.transform = transform
def __getitem__(self, index):
x = self.data[index]
y = self.targets[index]
if self.transform:
x = Image.fromarray(self.data[index].astype(np.uint8).transpose(1,2,0))
x = self.transform(x)
return x, y
def __len__(self):
return len(self.data)
def getFileData(path):
data = dict(np.load(path))
imgs = np.array(data["images"])
labels = np.array(data["labels"])
return imgs, labels
def mnist():
train_data_paths = glob.glob("../../../data/corruptmnist/train*.npz")
test_data_path = "../../../data/corruptmnist/test.npz"
#Obtaining the training data
X_train = []
y_train = []
for path in train_data_paths:
imgs, labels = getFileData(path)
X_train.append(imgs)
y_train.append(labels)
X_train = np.array(X_train).reshape(-1, 1, 28, 28)
y_train = np.array(y_train).flatten()
#Obtaining the testing data
X_test = []
y_test = []
imgs, labels = getFileData(test_data_path)
X_test.append(imgs)
y_test.append(labels)
X_test = np.array(X_test).reshape(-1, 1, 28, 28)
y_test = np.array(y_test).flatten()
#Visualizing one example image
#print(y_train[10])
#imgplot = plt.imshow(X_train[10], cmap="gray")
#From np arrays to dataloaders
#transform = transforms.Compose([transforms.ToTensor(),
# transforms.Normalize((0.5,), (0.5,)),])
X_train_tensor = torch.Tensor(X_train)
y_train_tensor = torch.Tensor(y_train)
X_test_tensor = torch.Tensor(X_test)
y_test_tensor = torch.Tensor(y_test)
trainset = customDataset(X_train_tensor, y_train_tensor, transform = None)
train_dataloader = DataLoader(trainset, batch_size=4, shuffle=True)
testset = customDataset(X_test_tensor, y_test_tensor, transform = None)
test_dataloader = DataLoader(testset)
print("Data obtained")
return train_dataloader, test_dataloader
| [
"torch.Tensor",
"numpy.array",
"torch.utils.data.DataLoader",
"numpy.load",
"glob.glob"
] | [((749, 773), 'numpy.array', 'np.array', (["data['images']"], {}), "(data['images'])\n", (757, 773), True, 'import numpy as np\n'), ((787, 811), 'numpy.array', 'np.array', (["data['labels']"], {}), "(data['labels'])\n", (795, 811), True, 'import numpy as np\n'), ((873, 923), 'glob.glob', 'glob.glob', (['"""../../../data/corruptmnist/train*.npz"""'], {}), "('../../../data/corruptmnist/train*.npz')\n", (882, 923), False, 'import glob\n'), ((1820, 1841), 'torch.Tensor', 'torch.Tensor', (['X_train'], {}), '(X_train)\n', (1832, 1841), False, 'import torch\n'), ((1863, 1884), 'torch.Tensor', 'torch.Tensor', (['y_train'], {}), '(y_train)\n', (1875, 1884), False, 'import torch\n'), ((1906, 1926), 'torch.Tensor', 'torch.Tensor', (['X_test'], {}), '(X_test)\n', (1918, 1926), False, 'import torch\n'), ((1947, 1967), 'torch.Tensor', 'torch.Tensor', (['y_test'], {}), '(y_test)\n', (1959, 1967), False, 'import torch\n'), ((2071, 2119), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': '(4)', 'shuffle': '(True)'}), '(trainset, batch_size=4, shuffle=True)\n', (2081, 2119), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2219, 2238), 'torch.utils.data.DataLoader', 'DataLoader', (['testset'], {}), '(testset)\n', (2229, 2238), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((281, 302), 'torch.Tensor', 'torch.Tensor', (['targets'], {}), '(targets)\n', (293, 302), False, 'import torch\n'), ((723, 736), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (730, 736), True, 'import numpy as np\n'), ((1202, 1219), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (1210, 1219), True, 'import numpy as np\n'), ((1257, 1274), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1265, 1274), True, 'import numpy as np\n'), ((1461, 1477), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (1469, 1477), True, 'import numpy as np\n'), ((1514, 1530), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (1522, 1530), True, 'import numpy as np\n')] |
import unittest
import parameterized
import numpy as np
from rlutil.envs.tabular_cy import q_iteration, tabular_env
from rlutil.envs.tabular import q_iteration as q_iteration_py
class QIterationTest(unittest.TestCase):
def setUp(self):
self.env = tabular_env.CliffwalkEnv(num_states=3, transition_noise=0.01)
def test_qiteration(self):
params = {
'num_itrs': 50,
'ent_wt': 1.0,
'discount': 0.99,
}
qvals_py = q_iteration_py.softq_iteration(self.env, **params)
qvals_cy = q_iteration.softq_iteration(self.env, **params)
self.assertTrue(np.allclose(qvals_cy, qvals_py))
def test_qevaluation_noent(self):
env = tabular_env.CliffwalkEnv(num_states=2, transition_noise=0.00)
params = {
'num_itrs': 100,
'ent_wt': 0.0,
'discount': 0.5,
}
q_values = np.zeros((env.num_states, env.num_actions))
q_values[:, 1] = 1e10
returns, _ = q_iteration.softq_evaluation(env, q_values, **params)
self.assertAlmostEqual(returns, 0.66666666)
def test_qevaluation_ent(self):
env = tabular_env.CliffwalkEnv(num_states=2, transition_noise=0.00)
params = {
'num_itrs': 100,
'ent_wt': 0.001,
'discount': 0.5,
}
q_values = np.zeros((env.num_states, env.num_actions))
q_values[:, 1] = 1e10
returns, _ = q_iteration.softq_evaluation(env, q_values, **params)
self.assertAlmostEqual(returns, 0.66666666)
def test_visitations(self):
env = tabular_env.CliffwalkEnv(num_states=3, transition_noise=0.00)
params = {
'num_itrs': 50,
'ent_wt': 0.0,
'discount': 0.99,
}
qvals_py = q_iteration_py.softq_iteration(env, **params)
visitations = q_iteration_py.compute_visitation(env, qvals_py, ent_wt=0.0, env_time_limit=1)
s_visitations = np.sum(visitations, axis=1)
tru_visits = np.array([1, 0, 0])
self.assertTrue(np.allclose(tru_visits, s_visitations))
visitations = q_iteration_py.compute_visitation(env, qvals_py, ent_wt=0.0, env_time_limit=3)
s_visitations = np.sum(visitations, axis=1)
tru_visits = np.array([1, 1, 1]) / 3.0
self.assertTrue(np.allclose(tru_visits, s_visitations))
visitations = q_iteration_py.compute_visitation(env, qvals_py, ent_wt=0.0, env_time_limit=5)
s_visitations = np.sum(visitations, axis=1)
tru_visits = np.array([2, 2, 1]) / 5.0
self.assertTrue(np.allclose(tru_visits, s_visitations))
if __name__ == '__main__':
unittest.main()
| [
"numpy.allclose",
"rlutil.envs.tabular_cy.q_iteration.softq_iteration",
"rlutil.envs.tabular_cy.tabular_env.CliffwalkEnv",
"rlutil.envs.tabular_cy.q_iteration.softq_evaluation",
"numpy.sum",
"numpy.zeros",
"rlutil.envs.tabular.q_iteration.softq_iteration",
"rlutil.envs.tabular.q_iteration.compute_visi... | [((2468, 2483), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2481, 2483), False, 'import unittest\n'), ((256, 317), 'rlutil.envs.tabular_cy.tabular_env.CliffwalkEnv', 'tabular_env.CliffwalkEnv', ([], {'num_states': '(3)', 'transition_noise': '(0.01)'}), '(num_states=3, transition_noise=0.01)\n', (280, 317), False, 'from rlutil.envs.tabular_cy import q_iteration, tabular_env\n'), ((457, 507), 'rlutil.envs.tabular.q_iteration.softq_iteration', 'q_iteration_py.softq_iteration', (['self.env'], {}), '(self.env, **params)\n', (487, 507), True, 'from rlutil.envs.tabular import q_iteration as q_iteration_py\n'), ((523, 570), 'rlutil.envs.tabular_cy.q_iteration.softq_iteration', 'q_iteration.softq_iteration', (['self.env'], {}), '(self.env, **params)\n', (550, 570), False, 'from rlutil.envs.tabular_cy import q_iteration, tabular_env\n'), ((671, 731), 'rlutil.envs.tabular_cy.tabular_env.CliffwalkEnv', 'tabular_env.CliffwalkEnv', ([], {'num_states': '(2)', 'transition_noise': '(0.0)'}), '(num_states=2, transition_noise=0.0)\n', (695, 731), False, 'from rlutil.envs.tabular_cy import q_iteration, tabular_env\n'), ((842, 885), 'numpy.zeros', 'np.zeros', (['(env.num_states, env.num_actions)'], {}), '((env.num_states, env.num_actions))\n', (850, 885), True, 'import numpy as np\n'), ((929, 982), 'rlutil.envs.tabular_cy.q_iteration.softq_evaluation', 'q_iteration.softq_evaluation', (['env', 'q_values'], {}), '(env, q_values, **params)\n', (957, 982), False, 'from rlutil.envs.tabular_cy import q_iteration, tabular_env\n'), ((1076, 1136), 'rlutil.envs.tabular_cy.tabular_env.CliffwalkEnv', 'tabular_env.CliffwalkEnv', ([], {'num_states': '(2)', 'transition_noise': '(0.0)'}), '(num_states=2, transition_noise=0.0)\n', (1100, 1136), False, 'from rlutil.envs.tabular_cy import q_iteration, tabular_env\n'), ((1249, 1292), 'numpy.zeros', 'np.zeros', (['(env.num_states, env.num_actions)'], {}), '((env.num_states, env.num_actions))\n', (1257, 1292), True, 'import numpy as np\n'), ((1336, 1389), 'rlutil.envs.tabular_cy.q_iteration.softq_evaluation', 'q_iteration.softq_evaluation', (['env', 'q_values'], {}), '(env, q_values, **params)\n', (1364, 1389), False, 'from rlutil.envs.tabular_cy import q_iteration, tabular_env\n'), ((1479, 1539), 'rlutil.envs.tabular_cy.tabular_env.CliffwalkEnv', 'tabular_env.CliffwalkEnv', ([], {'num_states': '(3)', 'transition_noise': '(0.0)'}), '(num_states=3, transition_noise=0.0)\n', (1503, 1539), False, 'from rlutil.envs.tabular_cy import q_iteration, tabular_env\n'), ((1650, 1695), 'rlutil.envs.tabular.q_iteration.softq_iteration', 'q_iteration_py.softq_iteration', (['env'], {}), '(env, **params)\n', (1680, 1695), True, 'from rlutil.envs.tabular import q_iteration as q_iteration_py\n'), ((1715, 1793), 'rlutil.envs.tabular.q_iteration.compute_visitation', 'q_iteration_py.compute_visitation', (['env', 'qvals_py'], {'ent_wt': '(0.0)', 'env_time_limit': '(1)'}), '(env, qvals_py, ent_wt=0.0, env_time_limit=1)\n', (1748, 1793), True, 'from rlutil.envs.tabular import q_iteration as q_iteration_py\n'), ((1814, 1841), 'numpy.sum', 'np.sum', (['visitations'], {'axis': '(1)'}), '(visitations, axis=1)\n', (1820, 1841), True, 'import numpy as np\n'), ((1859, 1878), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (1867, 1878), True, 'import numpy as np\n'), ((1959, 2037), 'rlutil.envs.tabular.q_iteration.compute_visitation', 'q_iteration_py.compute_visitation', (['env', 'qvals_py'], {'ent_wt': '(0.0)', 'env_time_limit': '(3)'}), '(env, qvals_py, ent_wt=0.0, env_time_limit=3)\n', (1992, 2037), True, 'from rlutil.envs.tabular import q_iteration as q_iteration_py\n'), ((2058, 2085), 'numpy.sum', 'np.sum', (['visitations'], {'axis': '(1)'}), '(visitations, axis=1)\n', (2064, 2085), True, 'import numpy as np\n'), ((2208, 2286), 'rlutil.envs.tabular.q_iteration.compute_visitation', 'q_iteration_py.compute_visitation', (['env', 'qvals_py'], {'ent_wt': '(0.0)', 'env_time_limit': '(5)'}), '(env, qvals_py, ent_wt=0.0, env_time_limit=5)\n', (2241, 2286), True, 'from rlutil.envs.tabular import q_iteration as q_iteration_py\n'), ((2307, 2334), 'numpy.sum', 'np.sum', (['visitations'], {'axis': '(1)'}), '(visitations, axis=1)\n', (2313, 2334), True, 'import numpy as np\n'), ((591, 622), 'numpy.allclose', 'np.allclose', (['qvals_cy', 'qvals_py'], {}), '(qvals_cy, qvals_py)\n', (602, 622), True, 'import numpy as np\n'), ((1900, 1938), 'numpy.allclose', 'np.allclose', (['tru_visits', 's_visitations'], {}), '(tru_visits, s_visitations)\n', (1911, 1938), True, 'import numpy as np\n'), ((2103, 2122), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (2111, 2122), True, 'import numpy as np\n'), ((2149, 2187), 'numpy.allclose', 'np.allclose', (['tru_visits', 's_visitations'], {}), '(tru_visits, s_visitations)\n', (2160, 2187), True, 'import numpy as np\n'), ((2352, 2371), 'numpy.array', 'np.array', (['[2, 2, 1]'], {}), '([2, 2, 1])\n', (2360, 2371), True, 'import numpy as np\n'), ((2398, 2436), 'numpy.allclose', 'np.allclose', (['tru_visits', 's_visitations'], {}), '(tru_visits, s_visitations)\n', (2409, 2436), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
import itertools
import csv
import gensim
import re
import nltk.data
import tensorflow
from nltk.tokenize import WordPunctTokenizer
from collections import Counter
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import LSTM, Merge
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from IPython.display import SVG, display
from keras.utils.visualize_util import plot, to_graph
# from keras import backend as K
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def message_to_wordlist(message, lemmas_bool, remove_stopwords=False):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove HTML
#review_text = BeautifulSoup(review).get_text()
#
# 2. Remove messages numbers
message_text = re.sub(">>\d+","", message)
message_text = message_text.lower()
message_text = re.sub(u"ё", 'e', message_text, re.UNICODE)
message_text = clean_str(message_text)
tokenizer = WordPunctTokenizer()
# 3. Convert words to lower case and split them
words = tokenizer.tokenize(message_text)
lemmas = []
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
if lemmas_bool == 'l':
for word in words:
word_parsed = morph.parse(word)
if len(word_parsed) > 0:
lemmas.append(word_parsed[0].normal_form)
elif lemmas_bool == 's':
for word in words:
word = stemmer.stem(word)
if len(word) > 0:
lemmas.append(word)
else:
lemmas = words
# 5. Return a list of words
return(lemmas)
#return(words)
# Define a function to split a message into parsed sentences
def message_to_sentences( message, tokenizer, lemmas_bool, remove_stopwords=False):
sentences = []
# Function to split a message into parsed sentences. Returns a
# list of sentences, where each sentence is a list of words
#
# 1. Use the NLTK tokenizer to split the paragraph into sentences
if type(message) == str:
message = message.decode('utf-8')
raw_sentences = tokenizer.tokenize(message.strip())
#
# 2. Loop over each sentence
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call message_to_wordlist to get a list of words
sentences += message_to_wordlist( raw_sentence,lemmas_bool, remove_stopwords)
return sentences
def pad_sentences(sentences, padding_word="<PAD/>"):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
new_labels = []
for label in labels:
if label == 1:
new_labels.append([1,0])
else:
new_labels.append([0,1])
labels = new_labels
y = np.array(labels)
return [x, y]
def load_data():
messages = pd.read_csv( 'aggression.csv', header=0,
delimiter="\t", quoting = csv.QUOTE_MINIMAL )
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
labels = messages[:]['Aggression']
messages = messages[:]['Text']
messages = [message_to_sentences(message, tokenizer, '') for message in messages]
pos_data = [nltk.pos_tag(message) for message in messages]
tags = []
for sent in pos_data:
sent_tags = []
for word in sent:
sent_tags.append(word[1])
tags.append(sent_tags)
messages = pad_sentences(messages) # turn to the same length
tags = pad_sentences(tags)
vocabulary, vocabulary_inv = build_vocab(messages)
vocabulary_pos, vocabulary_inv_pos = build_vocab(tags)
x_pos = np.array([[vocabulary_pos[word] for word in sentence] for sentence in tags])
x, y = build_input_data(messages, labels, vocabulary)
return [x, y, vocabulary, vocabulary_inv, vocabulary_pos, vocabulary_inv_pos, x_pos]
np.random.seed(2)
model_variation = 'CNN-non-static' # CNN-rand | CNN-non-static | CNN-static
print('Model variation is %s' % model_variation)
# Model Hyperparameters
sequence_length = 287
embedding_dim = 600
filter_sizes = (3, 4)
num_filters = 150
dropout_prob = (0.25, 0.5)
hidden_dims = 150
# Training parameters
batch_size = 32
num_epochs = 100
val_split = 0.1
# Word2Vec parameters, see train_word2vec
min_word_count = 4 # Minimum word count
context = 10 # Context window size
print("Loading data...")
x, y, vocabulary, vocabulary_inv, voc_pos, voc_inv_pos, x_pos = load_data()
if model_variation == 'CNN-non-static' or model_variation == 'CNN-static':
embedding_model = gensim.models.Word2Vec.load('model')
model_words = embedding_model.index2word
embedding_weights = [np.array([embedding_model[w] if w in vocabulary and w in model_words\
else np.random.uniform(-0.25,0.25,600)\
for w in vocabulary_inv])]
if model_variation=='CNN-static':
x = embedding_weights[0][x]
elif model_variation=='CNN-rand':
embedding_weights = None
else:
raise ValueError('Unknown model variation')
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices].argmax(axis=1)
x_pos = x_pos[shuffle_indices]
print("Vocabulary Size: {:d}".format(len(vocabulary)))
# Building model
# ==================================================
#
# graph subnet with one input and one output,
# convolutional layers concateneted in parallel
graph = Graph()
graph.add_input(name='input', input_shape=(sequence_length, embedding_dim))
for fsz in filter_sizes:
conv = Convolution1D(nb_filter=num_filters,
filter_length=fsz,
border_mode='valid',
activation='relu',
subsample_length=1)
pool = MaxPooling1D(pool_length=2)
graph.add_node(conv, name='conv-%s' % fsz, input='input')
graph.add_node(pool, name='maxpool-%s' % fsz, input='conv-%s' % fsz)
graph.add_node(Flatten(), name='flatten-%s' % fsz, input='maxpool-%s' % fsz)
if len(filter_sizes)>1:
graph.add_output(name='output',
inputs=['flatten-%s' % fsz for fsz in filter_sizes],
merge_mode='concat')
else:
graph.add_output(name='output', input='flatten-%s' % filter_sizes[0])
# main sequential model
model = Sequential()
if not model_variation=='CNN-static':
model.add(Embedding(len(vocabulary), embedding_dim, input_length=sequence_length,
weights=embedding_weights))
# model.add(Embedding(len(vocabulary), 1, input_length=sequence_length)
model.add(Dropout(dropout_prob[0], input_shape=(sequence_length, embedding_dim)))
model.add(graph)
model.add(Dense(hidden_dims))
model.add(Dropout(dropout_prob[1]))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# model.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')
model_b = Sequential()
model_b.add(Dense(287, init='uniform', input_shape=(sequence_length,)))
model_b.add(Dense(32, init='uniform'))
model_b.add(Activation('relu'))
model_b.add(Dense(2, init='uniform'))
model_b.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')
decoder = Sequential()
decoder.add(Merge([model, model_b], mode='concat'))
decoder.add(Dense(2, activation='softmax'))
decoder.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')
# Training model
# ==================================================
print ("Drawing graph")
graph = to_graph(decoder, show_shape=True)
graph.write_png("model.png")
print ("Training model")
decoder.fit([x_shuffled, x_pos], y_shuffled, batch_size=batch_size,
nb_epoch=num_epochs, show_accuracy=True,
validation_split=val_split, verbose=2)
| [
"itertools.chain",
"keras.layers.core.Flatten",
"keras.layers.Merge",
"keras.layers.core.Activation",
"pandas.read_csv",
"keras.models.Graph",
"nltk.tokenize.WordPunctTokenizer",
"gensim.models.Word2Vec.load",
"keras.models.Sequential",
"keras.utils.visualize_util.to_graph",
"numpy.array",
"ke... | [((6080, 6097), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (6094, 6097), True, 'import numpy as np\n'), ((7765, 7772), 'keras.models.Graph', 'Graph', ([], {}), '()\n', (7770, 7772), False, 'from keras.models import Sequential, Graph\n'), ((8664, 8676), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (8674, 8676), False, 'from keras.models import Sequential, Graph\n'), ((9275, 9287), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9285, 9287), False, 'from keras.models import Sequential, Graph\n'), ((9567, 9579), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9577, 9579), False, 'from keras.models import Sequential, Graph\n'), ((9867, 9901), 'keras.utils.visualize_util.to_graph', 'to_graph', (['decoder'], {'show_shape': '(True)'}), '(decoder, show_shape=True)\n', (9875, 9901), False, 'from keras.utils.visualize_util import plot, to_graph\n'), ((827, 873), 're.sub', 're.sub', (['"""[^A-Za-z0-9(),!?\\\\\'\\\\`]"""', '""" """', 'string'], {}), '("[^A-Za-z0-9(),!?\\\\\'\\\\`]", \' \', string)\n', (833, 873), False, 'import re\n'), ((886, 915), 're.sub', 're.sub', (['"""\\\\\'s"""', '""" \'s"""', 'string'], {}), '("\\\\\'s", " \'s", string)\n', (892, 915), False, 'import re\n'), ((930, 961), 're.sub', 're.sub', (['"""\\\\\'ve"""', '""" \'ve"""', 'string'], {}), '("\\\\\'ve", " \'ve", string)\n', (936, 961), False, 'import re\n'), ((976, 1007), 're.sub', 're.sub', (['"""n\\\\\'t"""', '""" n\'t"""', 'string'], {}), '("n\\\\\'t", " n\'t", string)\n', (982, 1007), False, 'import re\n'), ((1022, 1053), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" \'re"""', 'string'], {}), '("\\\\\'re", " \'re", string)\n', (1028, 1053), False, 'import re\n'), ((1068, 1097), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" \'d"""', 'string'], {}), '("\\\\\'d", " \'d", string)\n', (1074, 1097), False, 'import re\n'), ((1112, 1143), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" \'ll"""', 'string'], {}), '("\\\\\'ll", " \'ll", string)\n', (1118, 1143), False, 'import re\n'), ((1158, 1184), 're.sub', 're.sub', (['""","""', '""" , """', 'string'], {}), "(',', ' , ', string)\n", (1164, 1184), False, 'import re\n'), ((1199, 1225), 're.sub', 're.sub', (['"""!"""', '""" ! """', 'string'], {}), "('!', ' ! ', string)\n", (1205, 1225), False, 'import re\n'), ((1240, 1270), 're.sub', 're.sub', (['"""\\\\("""', '""" \\\\( """', 'string'], {}), "('\\\\(', ' \\\\( ', string)\n", (1246, 1270), False, 'import re\n'), ((1283, 1313), 're.sub', 're.sub', (['"""\\\\)"""', '""" \\\\) """', 'string'], {}), "('\\\\)', ' \\\\) ', string)\n", (1289, 1313), False, 'import re\n'), ((1326, 1356), 're.sub', 're.sub', (['"""\\\\?"""', '""" \\\\? """', 'string'], {}), "('\\\\?', ' \\\\? ', string)\n", (1332, 1356), False, 'import re\n'), ((1369, 1399), 're.sub', 're.sub', (['"""\\\\s{2,}"""', '""" """', 'string'], {}), "('\\\\s{2,}', ' ', string)\n", (1375, 1399), False, 'import re\n'), ((1768, 1797), 're.sub', 're.sub', (['""">>\\\\d+"""', '""""""', 'message'], {}), "('>>\\\\d+', '', message)\n", (1774, 1797), False, 'import re\n'), ((1855, 1898), 're.sub', 're.sub', (['u"""ё"""', '"""e"""', 'message_text', 're.UNICODE'], {}), "(u'ё', 'e', message_text, re.UNICODE)\n", (1861, 1898), False, 'import re\n'), ((1958, 1978), 'nltk.tokenize.WordPunctTokenizer', 'WordPunctTokenizer', ([], {}), '()\n', (1976, 1978), False, 'from nltk.tokenize import WordPunctTokenizer\n'), ((4762, 4839), 'numpy.array', 'np.array', (['[[vocabulary[word] for word in sentence] for sentence in sentences]'], {}), '([[vocabulary[word] for word in sentence] for sentence in sentences])\n', (4770, 4839), True, 'import numpy as np\n'), ((5028, 5044), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5036, 5044), True, 'import numpy as np\n'), ((5096, 5183), 'pandas.read_csv', 'pd.read_csv', (['"""aggression.csv"""'], {'header': '(0)', 'delimiter': '"""\t"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "('aggression.csv', header=0, delimiter='\\t', quoting=csv.\n QUOTE_MINIMAL)\n", (5107, 5183), True, 'import pandas as pd\n'), ((5855, 5931), 'numpy.array', 'np.array', (['[[vocabulary_pos[word] for word in sentence] for sentence in tags]'], {}), '([[vocabulary_pos[word] for word in sentence] for sentence in tags])\n', (5863, 5931), True, 'import numpy as np\n'), ((6814, 6850), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['"""model"""'], {}), "('model')\n", (6841, 6850), False, 'import gensim\n'), ((7886, 8005), 'keras.layers.convolutional.Convolution1D', 'Convolution1D', ([], {'nb_filter': 'num_filters', 'filter_length': 'fsz', 'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample_length': '(1)'}), "(nb_filter=num_filters, filter_length=fsz, border_mode='valid',\n activation='relu', subsample_length=1)\n", (7899, 8005), False, 'from keras.layers.convolutional import Convolution1D, MaxPooling1D\n'), ((8113, 8140), 'keras.layers.convolutional.MaxPooling1D', 'MaxPooling1D', ([], {'pool_length': '(2)'}), '(pool_length=2)\n', (8125, 8140), False, 'from keras.layers.convolutional import Convolution1D, MaxPooling1D\n'), ((8939, 9009), 'keras.layers.core.Dropout', 'Dropout', (['dropout_prob[0]'], {'input_shape': '(sequence_length, embedding_dim)'}), '(dropout_prob[0], input_shape=(sequence_length, embedding_dim))\n', (8946, 9009), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((9039, 9057), 'keras.layers.core.Dense', 'Dense', (['hidden_dims'], {}), '(hidden_dims)\n', (9044, 9057), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((9069, 9093), 'keras.layers.core.Dropout', 'Dropout', (['dropout_prob[1]'], {}), '(dropout_prob[1])\n', (9076, 9093), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((9105, 9123), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9115, 9123), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((9135, 9143), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (9140, 9143), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((9155, 9176), 'keras.layers.core.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (9165, 9176), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((9300, 9358), 'keras.layers.core.Dense', 'Dense', (['(287)'], {'init': '"""uniform"""', 'input_shape': '(sequence_length,)'}), "(287, init='uniform', input_shape=(sequence_length,))\n", (9305, 9358), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((9372, 9397), 'keras.layers.core.Dense', 'Dense', (['(32)'], {'init': '"""uniform"""'}), "(32, init='uniform')\n", (9377, 9397), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((9411, 9429), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9421, 9429), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((9443, 9467), 'keras.layers.core.Dense', 'Dense', (['(2)'], {'init': '"""uniform"""'}), "(2, init='uniform')\n", (9448, 9467), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((9592, 9630), 'keras.layers.Merge', 'Merge', (['[model, model_b]'], {'mode': '"""concat"""'}), "([model, model_b], mode='concat')\n", (9597, 9630), False, 'from keras.layers import LSTM, Merge\n'), ((9644, 9674), 'keras.layers.core.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (9649, 9674), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((4361, 4388), 'itertools.chain', 'itertools.chain', (['*sentences'], {}), '(*sentences)\n', (4376, 4388), False, 'import itertools\n'), ((8295, 8304), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (8302, 8304), False, 'from keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((7052, 7087), 'numpy.random.uniform', 'np.random.uniform', (['(-0.25)', '(0.25)', '(600)'], {}), '(-0.25, 0.25, 600)\n', (7069, 7087), True, 'import numpy as np\n')] |
import os
import torch
import numpy as np
from mpi_utils.mpi_utils import sync_networks
from rl_modules.buffer import ReplayBuffer
from networks import LanguageCritic, LanguageActor
from mpi_utils.normalizer import Normalizer
from her_modules.her import HerSampler
from updates import update_language
from utils import hard_update, soft_update, available_device
class LangRLAgent:
def __init__(self, cfg, env_params, compute_rew, hipss_module):
self.cfg = cfg
self.alpha = cfg.alpha
self.env_params = env_params
self.hipss_module = hipss_module
self.total_iter = 0
self.freq_target_update = cfg.freq_target_update
self.actor_network = LanguageActor(cfg, env_params)
self.critic_network = LanguageCritic(cfg, env_params)
sync_networks(self.actor_network)
sync_networks(self.critic_network)
self.critic_target_network = LanguageCritic(cfg, env_params)
hard_update(self.critic_target_network, self.critic_network)
sync_networks(self.critic_target_network)
self.policy_optim = torch.optim.Adam(self.actor_network.parameters(), lr=self.cfg.lr_actor)
self.critic_optim = torch.optim.Adam(self.critic_network.parameters(), lr=self.cfg.lr_critic)
self.o_norm = Normalizer(size=self.env_params['obs'], default_clip_range=self.cfg.clip_range)
if self.cfg.cuda:
self.actor_network.cuda()
self.critic_network.cuda()
self.critic_target_network.cuda()
self.log_alpha = None
self.target_entropy = None
self.alpha_optim = None
if self.cfg.automatic_entropy_tuning:
self.target_entropy = -torch.prod(torch.Tensor(self.env_params['action'])).item()
self.log_alpha = torch.zeros(1, requires_grad=True)
self.alpha_optim = torch.optim.Adam([self.log_alpha], lr=self.cfg.lr_entropy)
if self.cfg.cuda:
self.log_alpha = self.log_alpha.cuda()
self.her_module = HerSampler(self.cfg, compute_rew)
self.buffer = ReplayBuffer(env_params=self.env_params,
buffer_size=self.cfg.buffer_size,
sample_func=self.her_module.sample_her_lang_transitions,
hipss_module=self.hipss_module,
lang_mode=True)
@torch.no_grad()
def act(self, obs, instruction, with_noise):
input_tensor, instr_tensor = self._preproc_inputs(obs, instruction)
action = self._select_actions(input_tensor, instr_tensor, with_noise)
return action.copy()
def store(self, episodes):
self.buffer.store_episode(episode_batch=episodes)
def _preproc_inputs(self, obs, instruction):
if self.env_params['image_observation']:
obs_norm = obs
else:
obs_norm = self.o_norm.normalize(obs)
inputs = torch.tensor(obs_norm, dtype=torch.float32).unsqueeze(0)
instr_tensor = torch.tensor(instruction, dtype=torch.long).unsqueeze(0)
if self.cfg.cuda:
inputs = inputs.cuda()
instr_tensor = instr_tensor.cuda()
return inputs, instr_tensor
def train(self):
self.total_iter += 1
metric_dict = self._update_network()
if self.total_iter % self.freq_target_update == 0:
soft_update(self.critic_target_network, self.critic_network, self.cfg.polyak)
return metric_dict
def _select_actions(self, state, instruction, with_noise):
if with_noise:
action, _, _ = self.actor_network.sample(state, instruction)
else:
_, _, action = self.actor_network.sample(state, instruction)
return action.detach().cpu().numpy()[0]
def _update_normalizer(self, episode):
mb_obs = episode['obs']
mb_actions = episode['action']
mb_instructions = episode['instruction']
mb_rewards = episode['reward']
mb_hindsight_instructions = episode['hindsight_instruction']
mb_obs_next = mb_obs[1:, :]
num_transitions = mb_actions.shape[0]
buffer_temp = {
'obs': np.expand_dims(mb_obs, 0),
'action': np.expand_dims(mb_actions, 0),
'obs_next': np.expand_dims(mb_obs_next, 0),
'instruction': np.expand_dims(mb_instructions, 0),
'reward': np.expand_dims(mb_rewards, 0),
'hindsight_instruction': np.expand_dims(mb_hindsight_instructions, 0)
}
transitions = self.her_module.sample_her_lang_transitions(buffer_temp, num_transitions, None)
obs = transitions['obs']
transitions['obs'] = self._preproc_o(obs)
self.o_norm.update(transitions['obs'])
self.o_norm.recompute_stats()
def _preproc_o(self, o):
if self.env_params['image_observation']:
return np.asarray(o, dtype=np.float32)
else:
return np.clip(o, -self.cfg.clip_obs, self.cfg.clip_obs)
def _update_network(self):
transitions = self.buffer.sample(self.cfg.batch_size)
o, o_next, instruction, actions, rewards = transitions['obs'], transitions['obs_next'], transitions['instruction'], \
transitions['action'], transitions['reward']
transitions['obs'] = self._preproc_o(o)
transitions['obs_next'] = self._preproc_o(o_next)
if self.env_params['image_observation']:
obs_norm = transitions['obs']
obs_next_norm = transitions['obs_next']
else:
obs_norm = self.o_norm.normalize(transitions['obs'])
obs_next_norm = self.o_norm.normalize(transitions['obs_next'])
metric_dict = update_language(self.actor_network, self.critic_network, self.critic_target_network,
self.policy_optim, self.critic_optim, self.alpha, self.log_alpha,
self.target_entropy, self.alpha_optim, obs_norm, instruction, obs_next_norm,
actions, rewards, self.cfg)
if 'reward_metrics' in transitions:
metric_dict.update(transitions['reward_metrics'])
return metric_dict
def save(self, model_path, epoch='latest'):
model_dict = {'actor': self.actor_network.state_dict(), 'critic': self.critic_network.state_dict()}
torch.save([self.o_norm.mean, self.o_norm.std, model_dict], os.path.join(model_path, f'model_{epoch}.pt'))
def load(self, model_path):
if os.path.islink(model_path):
model_path = os.readlink(model_path)
o_mean, o_std, model_dict = torch.load(model_path, map_location=available_device())
self.actor_network.load_state_dict(model_dict['actor'])
self.actor_network.eval()
self.o_norm.mean = o_mean
self.o_norm.std = o_std
| [
"numpy.clip",
"rl_modules.buffer.ReplayBuffer",
"utils.available_device",
"os.path.islink",
"utils.soft_update",
"os.readlink",
"mpi_utils.mpi_utils.sync_networks",
"numpy.asarray",
"networks.LanguageCritic",
"utils.hard_update",
"networks.LanguageActor",
"torch.Tensor",
"her_modules.her.Her... | [((2412, 2427), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2425, 2427), False, 'import torch\n'), ((702, 732), 'networks.LanguageActor', 'LanguageActor', (['cfg', 'env_params'], {}), '(cfg, env_params)\n', (715, 732), False, 'from networks import LanguageCritic, LanguageActor\n'), ((763, 794), 'networks.LanguageCritic', 'LanguageCritic', (['cfg', 'env_params'], {}), '(cfg, env_params)\n', (777, 794), False, 'from networks import LanguageCritic, LanguageActor\n'), ((803, 836), 'mpi_utils.mpi_utils.sync_networks', 'sync_networks', (['self.actor_network'], {}), '(self.actor_network)\n', (816, 836), False, 'from mpi_utils.mpi_utils import sync_networks\n'), ((845, 879), 'mpi_utils.mpi_utils.sync_networks', 'sync_networks', (['self.critic_network'], {}), '(self.critic_network)\n', (858, 879), False, 'from mpi_utils.mpi_utils import sync_networks\n'), ((918, 949), 'networks.LanguageCritic', 'LanguageCritic', (['cfg', 'env_params'], {}), '(cfg, env_params)\n', (932, 949), False, 'from networks import LanguageCritic, LanguageActor\n'), ((958, 1018), 'utils.hard_update', 'hard_update', (['self.critic_target_network', 'self.critic_network'], {}), '(self.critic_target_network, self.critic_network)\n', (969, 1018), False, 'from utils import hard_update, soft_update, available_device\n'), ((1027, 1068), 'mpi_utils.mpi_utils.sync_networks', 'sync_networks', (['self.critic_target_network'], {}), '(self.critic_target_network)\n', (1040, 1068), False, 'from mpi_utils.mpi_utils import sync_networks\n'), ((1295, 1374), 'mpi_utils.normalizer.Normalizer', 'Normalizer', ([], {'size': "self.env_params['obs']", 'default_clip_range': 'self.cfg.clip_range'}), "(size=self.env_params['obs'], default_clip_range=self.cfg.clip_range)\n", (1305, 1374), False, 'from mpi_utils.normalizer import Normalizer\n'), ((2029, 2062), 'her_modules.her.HerSampler', 'HerSampler', (['self.cfg', 'compute_rew'], {}), '(self.cfg, compute_rew)\n', (2039, 2062), False, 'from her_modules.her import HerSampler\n'), ((2086, 2274), 'rl_modules.buffer.ReplayBuffer', 'ReplayBuffer', ([], {'env_params': 'self.env_params', 'buffer_size': 'self.cfg.buffer_size', 'sample_func': 'self.her_module.sample_her_lang_transitions', 'hipss_module': 'self.hipss_module', 'lang_mode': '(True)'}), '(env_params=self.env_params, buffer_size=self.cfg.buffer_size,\n sample_func=self.her_module.sample_her_lang_transitions, hipss_module=\n self.hipss_module, lang_mode=True)\n', (2098, 2274), False, 'from rl_modules.buffer import ReplayBuffer\n'), ((5736, 6004), 'updates.update_language', 'update_language', (['self.actor_network', 'self.critic_network', 'self.critic_target_network', 'self.policy_optim', 'self.critic_optim', 'self.alpha', 'self.log_alpha', 'self.target_entropy', 'self.alpha_optim', 'obs_norm', 'instruction', 'obs_next_norm', 'actions', 'rewards', 'self.cfg'], {}), '(self.actor_network, self.critic_network, self.\n critic_target_network, self.policy_optim, self.critic_optim, self.alpha,\n self.log_alpha, self.target_entropy, self.alpha_optim, obs_norm,\n instruction, obs_next_norm, actions, rewards, self.cfg)\n', (5751, 6004), False, 'from updates import update_language\n'), ((6555, 6581), 'os.path.islink', 'os.path.islink', (['model_path'], {}), '(model_path)\n', (6569, 6581), False, 'import os\n'), ((1792, 1826), 'torch.zeros', 'torch.zeros', (['(1)'], {'requires_grad': '(True)'}), '(1, requires_grad=True)\n', (1803, 1826), False, 'import torch\n'), ((1858, 1916), 'torch.optim.Adam', 'torch.optim.Adam', (['[self.log_alpha]'], {'lr': 'self.cfg.lr_entropy'}), '([self.log_alpha], lr=self.cfg.lr_entropy)\n', (1874, 1916), False, 'import torch\n'), ((3406, 3483), 'utils.soft_update', 'soft_update', (['self.critic_target_network', 'self.critic_network', 'self.cfg.polyak'], {}), '(self.critic_target_network, self.critic_network, self.cfg.polyak)\n', (3417, 3483), False, 'from utils import hard_update, soft_update, available_device\n'), ((4203, 4228), 'numpy.expand_dims', 'np.expand_dims', (['mb_obs', '(0)'], {}), '(mb_obs, 0)\n', (4217, 4228), True, 'import numpy as np\n'), ((4252, 4281), 'numpy.expand_dims', 'np.expand_dims', (['mb_actions', '(0)'], {}), '(mb_actions, 0)\n', (4266, 4281), True, 'import numpy as np\n'), ((4307, 4337), 'numpy.expand_dims', 'np.expand_dims', (['mb_obs_next', '(0)'], {}), '(mb_obs_next, 0)\n', (4321, 4337), True, 'import numpy as np\n'), ((4366, 4400), 'numpy.expand_dims', 'np.expand_dims', (['mb_instructions', '(0)'], {}), '(mb_instructions, 0)\n', (4380, 4400), True, 'import numpy as np\n'), ((4424, 4453), 'numpy.expand_dims', 'np.expand_dims', (['mb_rewards', '(0)'], {}), '(mb_rewards, 0)\n', (4438, 4453), True, 'import numpy as np\n'), ((4492, 4536), 'numpy.expand_dims', 'np.expand_dims', (['mb_hindsight_instructions', '(0)'], {}), '(mb_hindsight_instructions, 0)\n', (4506, 4536), True, 'import numpy as np\n'), ((4915, 4946), 'numpy.asarray', 'np.asarray', (['o'], {'dtype': 'np.float32'}), '(o, dtype=np.float32)\n', (4925, 4946), True, 'import numpy as np\n'), ((4980, 5029), 'numpy.clip', 'np.clip', (['o', '(-self.cfg.clip_obs)', 'self.cfg.clip_obs'], {}), '(o, -self.cfg.clip_obs, self.cfg.clip_obs)\n', (4987, 5029), True, 'import numpy as np\n'), ((6464, 6509), 'os.path.join', 'os.path.join', (['model_path', 'f"""model_{epoch}.pt"""'], {}), "(model_path, f'model_{epoch}.pt')\n", (6476, 6509), False, 'import os\n'), ((6608, 6631), 'os.readlink', 'os.readlink', (['model_path'], {}), '(model_path)\n', (6619, 6631), False, 'import os\n'), ((2957, 3000), 'torch.tensor', 'torch.tensor', (['obs_norm'], {'dtype': 'torch.float32'}), '(obs_norm, dtype=torch.float32)\n', (2969, 3000), False, 'import torch\n'), ((3037, 3080), 'torch.tensor', 'torch.tensor', (['instruction'], {'dtype': 'torch.long'}), '(instruction, dtype=torch.long)\n', (3049, 3080), False, 'import torch\n'), ((6704, 6722), 'utils.available_device', 'available_device', ([], {}), '()\n', (6720, 6722), False, 'from utils import hard_update, soft_update, available_device\n'), ((1715, 1754), 'torch.Tensor', 'torch.Tensor', (["self.env_params['action']"], {}), "(self.env_params['action'])\n", (1727, 1754), False, 'import torch\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.