code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import csv # to process CSV file
import cv2 # to read images and flip them
import numpy as np # to generate numpy arrays
import random # to generate random numbers in order to filter out some training data
def readCSV(filename):
""" Process CSV file
filename - Name of the CSV file which stores training data generated by the Simulator
Returns all lines of the CSV file except the first one which contains column headers
"""
# list for the output
lines = []
with open(filename) as csvfile:
reader = csv.reader(csvfile)
# iterates throuh each line of the CSV file
for line in reader:
# current line is appended to the list
lines.append(line)
# the first line contains column headers, therefore it must be filtered out
return lines[1:]
def loadImages(lines, folder, which=0):
""" Loads images and measurements
lines - Lines of the CSV file
folder - Folder of the image files
which - If it is 0, images captured by the center camera will be loaded in. If it is +1, images captured by the left camera will be loaded in. It it is -1, images captured by the right camera will be loaded in.
Returns the list of images and belonging steering measurements.
"""
images = []
measurements = []
for line in lines:
# filenames are stored in the first three columns of CSV lines. The path is irrelavant at this point.
filename = line[which % 3].split('/')[-1]
path = folder + filename
# Reads the image
image = cv2.imread(path)
# Image is appended to the list of images
images.append(image)
# Measurement is read from the CSV record and it is modified if not the center camera is used.
measurements.append(float(line[3 + (which % 3)]) + which * 0.2)
return images, measurements
def flipImages(images, measurements):
"""Flips the images horizontally
images - Images which will be flipped
measurements - Steering measurements of the images
Returns the original and flipped images and the belonging measurements
"""
new_images = []
new_measurements = []
for i in range(len(images)):
# The original images and measurements are appended to the output lists
new_images.append(images[i])
new_measurements.append(measurements[i])
# The flipped images are appended to the output image list
new_images.append(cv2.flip(images[i],1))
# The beloinging measurements must be "flipped" (multiplied by -1) and appended to the output measurement list
new_measurements.append(-measurements[i])
return new_images, new_measurements
def cutHistogram(images, measurements, value):
"""If the measurement is equal to value only a given ratio of images and belonging measurement will be stored furtherly. It is used for make more uniform distribution of measurements.
images - List of omages in the data set
measurements - List of steering measurements in the data set
value - Where the distribution of measurements is too high. For center camera value is 0, and for left and right camera it is +0.2 and -0.2 respectively.
Returns the list of images and measurements containing much less occurencies of measurements equal to the given value.
"""
# The ratio of the measurements will be kept on.
keeping_ratio = 0.05
new_images = []
new_measurements = []
for i in range(len(images)):
# If the measurement equal to the value only a small ratio will be stored in the output lists. The occurencies will be filtered out randomly.
if measurements[i] == value:
if random.random() <= keeping_ratio:
new_images.append(images[i])
new_measurements.append(measurements[i])
# If the measurement is not equal to the value all occurencies will be kept on.
else:
new_images.append(images[i])
new_measurements.append(measurements[i])
return new_images, new_measurements
lines = readCSV('./training_data/driving_log.csv')
center_images, center_measurements = loadImages(lines, './training_data/IMG/')
print('Number of images: ', len(center_images))
hist_measurements = np.histogram(center_measurements, bins=21)
print('Histogram of steering measurements: ', hist_measurements[0]) # only the frequency is relevant, the bin borders are not displayed
center_images, center_measurements = cutHistogram(center_images, center_measurements, 0)
hist_measurements = np.histogram(center_measurements, bins=21)
print('Histogram of steering measurements after filtering out a lot of zeros: ', hist_measurements[0])
### It was a try to use images from left and right camera as well. It did not give acceptable results so I rejected this approach.
# left = +1
# left_images, left_measurements = loadImages(lines, './training_data/IMG/', left)
# left_images, left_measurements = cutHistogram(left_images, left_measurements, 0.2)
# right = -1
# right_images, right_measurements = loadImages(lines, './training_data/IMG/', right)
# right_images, right_measurements = cutHistogram(right_images, right_measurements, -0.2)
# images = center_images + left_images + right_images
# measurements = center_measurements + left_measurements + right_measurements
images = center_images
measurements = center_measurements
images, measurements = flipImages(images, measurements)
X_train = np.array(images)
y_train = np.array(measurements)
from keras.models import Sequential
from keras.layers import Cropping2D
from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
### It was the first try to use LeNet architecture, but it did not provide an acceptable result so I rejected it.
## LeNet
# model = Sequential()
# model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(160,320,3)))
# model.add(Lambda(lambda x: (x / 255.0) - 0.5))
# model.add(Conv2D(filters=32,kernel_size=(5,5),padding='valid'))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2,2)))
# model.add(Conv2D(filters=16,kernel_size=(5,5),padding='valid'))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2,2)))
# model.add(Flatten())
# model.add(Dropout(0.5))
# model.add(Dense(120))
# model.add(Activation('relu'))
# model.add(Dropout(0.5))
# model.add(Dense(40))
# model.add(Activation('relu'))
# model.add(Dense(1))
## NVIDIA architecture
model = Sequential()
# The top 60 and bottom 20 rows of the images will be cropped out
model.add(Cropping2D(cropping=((60,20), (0,0)), input_shape=(160,320,3)))
# The images are normalized and shifted to the -0.5 and +0.5 range.
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
# First convolutional layer with max pooling
model.add(Conv2D(filters=24,kernel_size=(5,5),padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
# Second convolutional layer with max pooling
model.add(Conv2D(filters=36,kernel_size=(5,5),padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
# Third convolutional layer with max pooling
model.add(Conv2D(filters=48,kernel_size=(5,5),padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
# Fourth convolutional layer without max pooling
model.add(Conv2D(filters=64,kernel_size=(3,3),padding='valid', activation='relu'))
# Fifth convolutional layer without max pooling
model.add(Conv2D(filters=64,kernel_size=(3,3),padding='valid', activation='relu'))
# Make a vector from matrices
model.add(Flatten())
# First fully connected layer with dropout
model.add(Dropout(0.25))
model.add(Dense(1164, activation='relu'))
# Second fully connected layer with dropout
model.add(Dropout(0.25))
model.add(Dense(100, activation='relu'))
# Third fully connected layer with dropot
model.add(Dropout(0.25))
model.add(Dense(50, activation='relu'))
# Fourth fully connected layer with dropout
model.add(Dropout(0.25))
model.add(Dense(10, activation='relu'))
# Fifth fully connected layer
model.add(Dense(1))
# Creates a summary of the whole model
model.summary()
# Compiles the model using Mean square error as loss and the optimizer is Adam
model.compile(loss='mse', optimizer='adam')
# Trains the model. 20% of the data set is used for validation. The elements of data set are selected randomly. The number of epochs is 10.
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, epochs=10)
model.save('model.h5') | [
"numpy.histogram",
"keras.layers.core.Flatten",
"cv2.imread",
"cv2.flip",
"keras.layers.pooling.MaxPooling2D",
"keras.layers.core.Lambda",
"keras.models.Sequential",
"numpy.array",
"keras.layers.convolutional.Conv2D",
"csv.reader",
"random.random",
"keras.layers.core.Dropout",
"keras.layers.... | [((4296, 4338), 'numpy.histogram', 'np.histogram', (['center_measurements'], {'bins': '(21)'}), '(center_measurements, bins=21)\n', (4308, 4338), True, 'import numpy as np\n'), ((4584, 4626), 'numpy.histogram', 'np.histogram', (['center_measurements'], {'bins': '(21)'}), '(center_measurements, bins=21)\n', (4596, 4626), True, 'import numpy as np\n'), ((5489, 5505), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (5497, 5505), True, 'import numpy as np\n'), ((5516, 5538), 'numpy.array', 'np.array', (['measurements'], {}), '(measurements)\n', (5524, 5538), True, 'import numpy as np\n'), ((6572, 6584), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6582, 6584), False, 'from keras.models import Sequential\n'), ((6661, 6727), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((60, 20), (0, 0))', 'input_shape': '(160, 320, 3)'}), '(cropping=((60, 20), (0, 0)), input_shape=(160, 320, 3))\n', (6671, 6727), False, 'from keras.layers import Cropping2D\n'), ((6803, 6836), 'keras.layers.core.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {}), '(lambda x: x / 255.0 - 0.5)\n', (6809, 6836), False, 'from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout\n'), ((6895, 6969), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': '(24)', 'kernel_size': '(5, 5)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(filters=24, kernel_size=(5, 5), padding='valid', activation='relu')\n", (6901, 6969), False, 'from keras.layers.convolutional import Conv2D\n'), ((6978, 7008), 'keras.layers.pooling.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6990, 7008), False, 'from keras.layers.pooling import MaxPooling2D\n'), ((7065, 7139), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': '(36)', 'kernel_size': '(5, 5)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(filters=36, kernel_size=(5, 5), padding='valid', activation='relu')\n", (7071, 7139), False, 'from keras.layers.convolutional import Conv2D\n'), ((7148, 7178), 'keras.layers.pooling.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (7160, 7178), False, 'from keras.layers.pooling import MaxPooling2D\n'), ((7234, 7308), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': '(48)', 'kernel_size': '(5, 5)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(filters=48, kernel_size=(5, 5), padding='valid', activation='relu')\n", (7240, 7308), False, 'from keras.layers.convolutional import Conv2D\n'), ((7317, 7347), 'keras.layers.pooling.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (7329, 7347), False, 'from keras.layers.pooling import MaxPooling2D\n'), ((7407, 7481), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), padding='valid', activation='relu')\n", (7413, 7481), False, 'from keras.layers.convolutional import Conv2D\n'), ((7538, 7612), 'keras.layers.convolutional.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), padding='valid', activation='relu')\n", (7544, 7612), False, 'from keras.layers.convolutional import Conv2D\n'), ((7651, 7660), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (7658, 7660), False, 'from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout\n'), ((7715, 7728), 'keras.layers.core.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (7722, 7728), False, 'from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout\n'), ((7740, 7770), 'keras.layers.core.Dense', 'Dense', (['(1164)'], {'activation': '"""relu"""'}), "(1164, activation='relu')\n", (7745, 7770), False, 'from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout\n'), ((7826, 7839), 'keras.layers.core.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (7833, 7839), False, 'from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout\n'), ((7851, 7880), 'keras.layers.core.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (7856, 7880), False, 'from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout\n'), ((7934, 7947), 'keras.layers.core.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (7941, 7947), False, 'from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout\n'), ((7959, 7987), 'keras.layers.core.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (7964, 7987), False, 'from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout\n'), ((8043, 8056), 'keras.layers.core.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (8050, 8056), False, 'from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout\n'), ((8068, 8096), 'keras.layers.core.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (8073, 8096), False, 'from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout\n'), ((8138, 8146), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (8143, 8146), False, 'from keras.layers.core import Flatten, Dense, Lambda, Activation, Dropout\n'), ((546, 565), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (556, 565), False, 'import csv\n'), ((1578, 1594), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (1588, 1594), False, 'import cv2\n'), ((2485, 2507), 'cv2.flip', 'cv2.flip', (['images[i]', '(1)'], {}), '(images[i], 1)\n', (2493, 2507), False, 'import cv2\n'), ((3720, 3735), 'random.random', 'random.random', ([], {}), '()\n', (3733, 3735), False, 'import random\n')] |
import numpy as np
import cv2
import pdb
import math
from geometry import *
class MapPoint():
def __init__(self, pos, descriptor):
pos = np.array(pos).reshape(-1,1)
if len(pos) == 3:
pos = homogenize_vectors(pos)
self.pos = pos # Should be a homogeneous 4x1 column vector
self.descriptor = descriptor
def __repr__(self):
return "<MapPoint pos:%s descriptor:%s>" % (self.pos.flatten()[:-1], self.descriptor)
class Frame():
def __init__(self, img, keypoints, descriptors, intrinsic_mat, t=None, index=None, use_opencv_keypoints=True):
self.img = img
self.keypoints = keypoints
self.descriptors = descriptors # Should be in 1-1 correspondence with keypoints
self.intrinsic_mat = intrinsic_mat
self.t = t
self.index = index
if use_opencv_keypoints:
self.get_coords_from_keypoints()
else:
self.keypoint_coords = self.keypoints
def get_coords_from_keypoints(self):
# Should return a 3xn set of homogeneous 2D vectors (in the image plane)
# By convention, the center of the top-left corner pixel is (0,0)
self.keypoint_coords = np.ones((3,len(self.keypoints)))
for i in range(len(self.keypoints)):
self.keypoint_coords[0:2,i] = self.keypoints[i].pt
class Map():
def __init__(self, max_map_points=np.inf):
self.frames = []
self.camera_poses = []
self.map_points = []
self.max_map_points = max_map_points
self.last_keyframe_idx = None
self.map_point_last_checked = []
# Need some sort of data structure holding point correspondences
def add_map_points(self, map_points, frame_idx):
# Add a list of map_points
self.map_points = self.map_points + map_points
self.map_point_last_checked = self.map_point_last_checked + [frame_idx for _ in range(len(map_points))]
over_count = len(self.map_points) - self.max_map_points
if over_count > 0:
# new_idx = np.flip(np.argsort(self.map_point_last_checked))
# self.map_points = [self.map_points[i] for i in new_idx]
# self.map_point_last_checked = [self.map_point_last_checked[i] for i in new_idx]
# self.map_points = [self.map_points[i] for i in range(over_count,len(self.map_points))]
# self.map_point_last_checked = [self.map_point_last_checked[i] for i in range(over_count,len(self.map_point_last_checked))]
# idx = np.random.choice(len(self.map_points), self.max_map_points)
# self.map_points = [self.map_points[i] for i in idx]
self.map_points = [self.map_points[i] for i in range(over_count, len(self.map_points))]
def add_frame(self, frame, pose, keyframe=False):
self.frames.append(frame)
self.camera_poses.append(pose)
if keyframe:
self.last_keyframe_idx = len(self.frames) - 1
def update_keypoint_last_checked(self, points, frame_num):
for idx in points:
self.map_point_last_checked[idx] = frame_num
class SLAM():
def __init__(self, match_descriptors_func, n_local_map_points):
self.local_map = Map(n_local_map_points)
self.global_map = Map()
self.has_finished_initialization = False
self.match_descriptors = match_descriptors_func # This function should take in two lists of descriptors,
# and return a nx2 numpy array of pairs of indices of matches.
def start_initialization(self, frame, ground_truth_pose):
# The ground truth camera pose is only used in the first frame, so that we can work in the same
# coordinate system as the ground truth outputs
self.init_frame = frame
self.init_pose = ground_truth_pose
def try_finish_initialization(self, frame, scale):
# Get possible matches
pairs = self.match_descriptors(self.init_frame.descriptors, frame.descriptors)
start_points, next_points = self.init_frame.keypoint_coords[:,pairs[:,0]], frame.keypoint_coords[:,pairs[:,1]]
start_points, next_points = start_points[:-1,:], next_points[:-1,:]
descriptors = self.init_frame.descriptors[pairs[:,0]]
# Do the triangulation
point_4d, R, t, mask = triangulate(start_points, next_points, frame.intrinsic_mat, scale)
descriptors = descriptors[mask]
# Compue the camera and point positions in the global frame
mat = np.matmul(make_translation_matrix(t), homogenize_matrix(R)) # Maps from the new camera frame to the old camera frame
old_mat = np.matmul(make_translation_matrix(self.init_pose.pos), homogenize_matrix(quat_to_mat(self.init_pose.quat)))
total_mat = np.matmul(old_mat, mat)
new_pos = total_mat[:,3]
new_quat = mat_to_quat(unhomogenize_matrix(total_mat))
new_pose = Pose(new_pos, new_quat, t=frame.t)
points_global = local_xyz_to_global_xyz(new_pose, point_4d)
map_points = [MapPoint(points_global[:,i], descriptors[i]) for i in range(len(descriptors))]
# Compute the angle between the two frames for each point
start_vecs = (points_global - self.init_pose.pos)[0:3]
next_vecs = (points_global - new_pose.pos)[0:3]
start_vecs = start_vecs / np.linalg.norm(start_vecs, axis=0)
next_vecs = next_vecs / np.linalg.norm(next_vecs, axis=0)
dprods = np.sum(np.multiply(start_vecs, next_vecs), axis=0)
import matplotlib.pyplot as plt
# plt.scatter(next_points[0], next_points[1], color="blue", s=20**2)
# plt.scatter(local_xyz_to_uv(frame.intrinsic_mat, point_4d)[0], local_xyz_to_uv(frame.intrinsic_mat, point_4d)[1], color="red", s=8**2)
# plt.scatter(global_xyz_to_uv(new_pose, frame.intrinsic_mat, points_global)[0], global_xyz_to_uv(new_pose, frame.intrinsic_mat, points_global)[1], color="green", s=2**2)
# plt.show()
# plt.scatter(start_points[0], start_points[1], color="blue", s=20**2)
# plt.scatter(global_xyz_to_uv(self.init_pose, self.init_frame.intrinsic_mat, points_global)[0], global_xyz_to_uv(self.init_pose, self.init_frame.intrinsic_mat, points_global)[1], color="green", s=2**2)
# plt.show()
# Remove points with insufficient parallax
cos1 = math.cos(3.0 * (math.pi / 180.0))
cos2 = math.cos(5.0 * (math.pi / 180.0))
mask = dprods < cos1
if np.sum(mask) < 40: # Check that we have enough points
print("Not enough parallax points! Only found %d." % np.sum(mask))
return
elif np.mean(dprods[mask]) > cos2: # Check that the points we have are good overall
print("Average parallax insufficient! Needed %f, got %f." % (math.acos(cos2) * 180 / math.pi, math.acos(np.mean(dprods[mask])) * 180 / math.pi))
return
else:
print("Found sufficient parallax! Finishing initialization.")
self.has_finished_initialization = True
for map_obj in [self.local_map, self.global_map]:
map_obj.add_map_points(map_points, frame.index)
map_obj.add_frame(self.init_frame, self.init_pose, keyframe=True)
map_obj.add_frame(frame, new_pose, keyframe=True)
return
def track_next_frame(self, frame):
# Find map points visible in the current frame, by looking at the previous frame's pose.
map_point_coords = np.array([point.pos.flatten() for point in self.local_map.map_points]).T
local_coords = global_xyz_to_local_xyz(self.local_map.camera_poses[-1], map_point_coords)
uv_points, idx = local_only_good_image_idx(frame.intrinsic_mat, frame.img.shape, local_coords)
descriptors = np.array([point.descriptor for point in self.local_map.map_points])[idx]
# Match frame keypoints with map points
pairs = self.match_descriptors(frame.descriptors, descriptors)
print("Num matches: %d" % len(pairs))
frame_points, map_points = frame.keypoint_coords[:,pairs[:,0]], (map_point_coords[:,idx])[:,pairs[:,1]]
frame_points, map_points = frame_points[:-1,:], map_points[:-1,:]
self.local_map.update_keypoint_last_checked(pairs[:,1], frame.index)
# Reshape according to this: https://stackoverflow.com/questions/33696082/error-using-solvepnpransac-function
frame_points = frame_points.T.reshape(-1,1,2)
map_points = map_points.T.reshape(-1,1,3)
camera_mat_3x3 = frame.intrinsic_mat[0:3,0:3]
# Estimate the last camera pose
pos = self.local_map.camera_poses[-1].pos
quat = self.local_map.camera_poses[-1].quat
input_Rvec = mat_to_rot_vec(quat_to_mat(quat).T)
input_tvec = -1 * unhomogenize_vectors(pos).flatten()
# Actually find the camera position
suc, R_vec, t_vec, mask = cv2.solvePnPRansac(map_points, frame_points, camera_mat_3x3, distCoeffs=None,
rvec=input_Rvec, tvec=input_tvec, useExtrinsicGuess=True,
iterationsCount=10000, reprojectionError=2.0, confidence=0.999,
flags=cv2.SOLVEPNP_ITERATIVE)
print("solvePnPRansac success? %s" % suc)
# R, t are the rotation and translation from the world frame to the camera frame
# So in our pose scheme, we have to use their inverses
pose_mat = np.matmul(homogenize_matrix(rot_vec_to_mat(R_vec)).T, make_translation_matrix(-1 * t_vec))
pos = pose_mat[:,3]
quat = mat_to_quat(unhomogenize_matrix(pose_mat))
camera_pose = Pose(pos, quat, t=frame.t)
# Local map update step
# Check if change in pose between this frame and the last keyframe is large enough
# If it is, triangulate, and add any new points to the local map
if homogeneous_norm(camera_pose.pos - self.local_map.camera_poses[-1].pos) > 0.25:
return False
dist = homogeneous_norm(camera_pose.pos - self.local_map.camera_poses[self.local_map.last_keyframe_idx].pos)
print("dist, %f" % dist)
this_frame_keyframe = dist > 0.1 or quat_error(camera_pose.quat, self.local_map.camera_poses[self.local_map.last_keyframe_idx].quat) > 0.1
if this_frame_keyframe:
last_keyframe = self.local_map.frames[self.local_map.last_keyframe_idx]
last_keyframe_pos = self.local_map.camera_poses[self.local_map.last_keyframe_idx]
# For now, just triangulate all points, and don't worry about duplicates
pairs = self.match_descriptors(last_keyframe.descriptors, frame.descriptors)
start_points, next_points = last_keyframe.keypoint_coords[:,pairs[:,0]], frame.keypoint_coords[:,pairs[:,1]]
start_points, next_points = start_points[:-1,:], next_points[:-1,:]
descriptors = last_keyframe.descriptors[pairs[:,0]]
point_4d, R, t, mask = triangulate(start_points, next_points, frame.intrinsic_mat, dist)
descriptors = descriptors[mask]
mat = np.matmul(make_translation_matrix(t), homogenize_matrix(R)) # Maps from the new camera frame to the old camera frame
old_mat = np.matmul(make_translation_matrix(last_keyframe_pos.pos), homogenize_matrix(quat_to_mat(last_keyframe_pos.quat)))
total_mat = np.matmul(old_mat, mat)
new_pos = total_mat[:,3]
new_quat = mat_to_quat(unhomogenize_matrix(total_mat))
# camera_pose = Pose(new_pos, new_quat, t=frame.t)
points_global = local_xyz_to_global_xyz(camera_pose, point_4d)
map_points = [MapPoint(points_global[:,i], descriptors[i]) for i in range(len(descriptors))]
all_map_descriptors = [point.descriptor for point in self.local_map.map_points]
pairs = self.match_descriptors(descriptors, all_map_descriptors)
if len(pairs) == 0:
no_duplicate_map_points = map_points
else:
duplicates = pairs[:,0]
no_duplicate_map_points = [map_points[i] for i in range(len(map_points)) if i not in duplicates]
print("%d duplicates" % len(duplicates))
for map_obj in [self.local_map, self.global_map]:
map_obj.add_frame(frame, camera_pose, keyframe=this_frame_keyframe)
if this_frame_keyframe:
print("Adding %d map points" % len(no_duplicate_map_points))
map_obj.add_map_points(no_duplicate_map_points, frame.index)
return True | [
"numpy.mean",
"numpy.multiply",
"math.acos",
"cv2.solvePnPRansac",
"math.cos",
"numpy.sum",
"numpy.array",
"numpy.matmul",
"numpy.linalg.norm"
] | [((4326, 4349), 'numpy.matmul', 'np.matmul', (['old_mat', 'mat'], {}), '(old_mat, mat)\n', (4335, 4349), True, 'import numpy as np\n'), ((5773, 5806), 'math.cos', 'math.cos', (['(3.0 * (math.pi / 180.0))'], {}), '(3.0 * (math.pi / 180.0))\n', (5781, 5806), False, 'import math\n'), ((5816, 5849), 'math.cos', 'math.cos', (['(5.0 * (math.pi / 180.0))'], {}), '(5.0 * (math.pi / 180.0))\n', (5824, 5849), False, 'import math\n'), ((8057, 8300), 'cv2.solvePnPRansac', 'cv2.solvePnPRansac', (['map_points', 'frame_points', 'camera_mat_3x3'], {'distCoeffs': 'None', 'rvec': 'input_Rvec', 'tvec': 'input_tvec', 'useExtrinsicGuess': '(True)', 'iterationsCount': '(10000)', 'reprojectionError': '(2.0)', 'confidence': '(0.999)', 'flags': 'cv2.SOLVEPNP_ITERATIVE'}), '(map_points, frame_points, camera_mat_3x3, distCoeffs=\n None, rvec=input_Rvec, tvec=input_tvec, useExtrinsicGuess=True,\n iterationsCount=10000, reprojectionError=2.0, confidence=0.999, flags=\n cv2.SOLVEPNP_ITERATIVE)\n', (8075, 8300), False, 'import cv2\n'), ((4835, 4869), 'numpy.linalg.norm', 'np.linalg.norm', (['start_vecs'], {'axis': '(0)'}), '(start_vecs, axis=0)\n', (4849, 4869), True, 'import numpy as np\n'), ((4896, 4929), 'numpy.linalg.norm', 'np.linalg.norm', (['next_vecs'], {'axis': '(0)'}), '(next_vecs, axis=0)\n', (4910, 4929), True, 'import numpy as np\n'), ((4948, 4982), 'numpy.multiply', 'np.multiply', (['start_vecs', 'next_vecs'], {}), '(start_vecs, next_vecs)\n', (4959, 4982), True, 'import numpy as np\n'), ((5879, 5891), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (5885, 5891), True, 'import numpy as np\n'), ((7039, 7106), 'numpy.array', 'np.array', (['[point.descriptor for point in self.local_map.map_points]'], {}), '([point.descriptor for point in self.local_map.map_points])\n', (7047, 7106), True, 'import numpy as np\n'), ((10368, 10391), 'numpy.matmul', 'np.matmul', (['old_mat', 'mat'], {}), '(old_mat, mat)\n', (10377, 10391), True, 'import numpy as np\n'), ((142, 155), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (150, 155), True, 'import numpy as np\n'), ((6020, 6041), 'numpy.mean', 'np.mean', (['dprods[mask]'], {}), '(dprods[mask])\n', (6027, 6041), True, 'import numpy as np\n'), ((5989, 6001), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (5995, 6001), True, 'import numpy as np\n'), ((6163, 6178), 'math.acos', 'math.acos', (['cos2'], {}), '(cos2)\n', (6172, 6178), False, 'import math\n'), ((6206, 6227), 'numpy.mean', 'np.mean', (['dprods[mask]'], {}), '(dprods[mask])\n', (6213, 6227), True, 'import numpy as np\n')] |
#!/usr/bin/python3
"""
~~~~~~~~~~~~~~~~~~
Camera calibration
~~~~~~~~~~~~~~~~~~
Usage:
python calib.py \
-i /dev/video0 \
-grid 9x6 \
-out fisheye.yaml \
-framestep 20 \
-resolution 640x480
--fisheye
"""
import argparse
import os
import numpy as np
import yaml
import cv2
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", default=0, help="input video file or camera device")
parser.add_argument("-grid", "--grid", default="20x20", help="size of the grid (rows x cols)")
parser.add_argument("-framestep", type=int, default=20, help="use every nth frame in the video")
parser.add_argument("-o", "--output", default="./yaml",
help="path to output yaml file")
parser.add_argument("-resolution", "--resolution", default="640x480",
help="resolution of the camera")
parser.add_argument("-fisheye", "--fisheye", action="store_true",
help="set ture if this is a fisheye camera")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
try:
source = cv2.VideoCapture(int(args.input))
except:
source = cv2.VideoCapture(args.input)
W, H = [int(x) for x in args.resolution.split("x")]
source.set(3, W)
source.set(4, H)
grid_size = tuple(int(x) for x in args.grid.split("x"))
grid_points = np.zeros((np.prod(grid_size), 3), np.float32)
grid_points[:, :2] = np.indices(grid_size).T.reshape(-1, 2)
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane
quit = False
do_calib = False
i = -1
while True:
i += 1
retcode, img = source.read()
if not retcode:
raise ValueError("cannot read frame from video")
if i % args.framestep != 0:
continue
print("searching for chessboard corners in frame " + str(i) + "...")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
found, corners = cv2.findChessboardCorners(
gray,
grid_size,
cv2.CALIB_CB_ADAPTIVE_THRESH +
cv2.CALIB_CB_NORMALIZE_IMAGE +
cv2.CALIB_CB_FILTER_QUADS
)
if found:
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.01)
cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)
print("OK")
imgpoints.append(corners.reshape(1, -1, 2))
objpoints.append(grid_points.reshape(1, -1, 3))
cv2.drawChessboardCorners(img, grid_size, corners, found)
text1 = "press c to calibrate"
text2 = "press q to quit"
text3 = "device: {}".format(args.input)
fontscale = 0.6
cv2.putText(img, text1, (20, 70), cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255, 200, 0), 2)
cv2.putText(img, text2, (20, 110), cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255, 200, 0), 2)
cv2.putText(img, text3, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255, 200, 0), 2)
cv2.imshow("corners", img)
key = cv2.waitKey(1) & 0xFF
if key == ord("c"):
do_calib = True
break
elif key == ord("q"):
quit = True
break
if quit:
source.release()
cv2.destroyAllWindows()
if do_calib:
print("\nPerforming calibration...\n")
N_OK = len(objpoints)
if N_OK < 12:
print("Less than 12 corners detected, calibration failed")
return
K = np.zeros((3, 3))
D = np.zeros((4, 1))
rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for _ in range(N_OK)]
tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for _ in range(N_OK)]
calibration_flags = (cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC +
cv2.fisheye.CALIB_CHECK_COND +
cv2.fisheye.CALIB_FIX_SKEW)
# 求出内参矩阵和畸变系数
if args.fisheye:
ret, mtx, dist, rvecs, tvecs = cv2.fisheye.calibrate(
objpoints,
imgpoints,
(W, H),
K,
D,
rvecs,
tvecs,
calibration_flags,
(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6)
)
else:
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
objpoints,
imgpoints,
(W, H),
None,
None)
if ret:
print(ret)
data = {"dim": np.array([W, H]).tolist(), "K": K.tolist(), "D": D.tolist()}
fname = os.path.join(args.output, "camera" + str(args.input) + ".yaml")
print(fname)
with open(fname, "w") as f:
yaml.safe_dump(data, f)
print("succesfully saved camera data")
cv2.putText(img, "Success!", (220 , 240), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2)
else:
cv2.putText(img, "Failed!", (220, 240), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2)
cv2.imshow("corners", img)
cv2.waitKey(0)
if __name__ == "__main__":
main()
| [
"numpy.prod",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.calibrateCamera",
"cv2.findChessboardCorners",
"cv2.cornerSubPix",
"os.path.exists",
"argparse.ArgumentParser",
"os.mkdir",
"cv2.waitKey",
"yaml.safe_dump",
"numpy.indices",
"cv2.putText",
"cv2.fisheye.calibrate",
... | [((352, 377), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (375, 377), False, 'import argparse\n'), ((1103, 1130), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (1117, 1130), False, 'import os\n'), ((1140, 1161), 'os.mkdir', 'os.mkdir', (['args.output'], {}), '(args.output)\n', (1148, 1161), False, 'import os\n'), ((2021, 2058), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2033, 2058), False, 'import cv2\n'), ((2084, 2219), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', 'grid_size', '(cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_NORMALIZE_IMAGE + cv2.\n CALIB_CB_FILTER_QUADS)'], {}), '(gray, grid_size, cv2.CALIB_CB_ADAPTIVE_THRESH +\n cv2.CALIB_CB_NORMALIZE_IMAGE + cv2.CALIB_CB_FILTER_QUADS)\n', (2109, 2219), False, 'import cv2\n'), ((2814, 2906), 'cv2.putText', 'cv2.putText', (['img', 'text1', '(20, 70)', 'cv2.FONT_HERSHEY_SIMPLEX', 'fontscale', '(255, 200, 0)', '(2)'], {}), '(img, text1, (20, 70), cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255,\n 200, 0), 2)\n', (2825, 2906), False, 'import cv2\n'), ((2911, 3005), 'cv2.putText', 'cv2.putText', (['img', 'text2', '(20, 110)', 'cv2.FONT_HERSHEY_SIMPLEX', 'fontscale', '(255, 200, 0)', '(2)'], {}), '(img, text2, (20, 110), cv2.FONT_HERSHEY_SIMPLEX, fontscale, (\n 255, 200, 0), 2)\n', (2922, 3005), False, 'import cv2\n'), ((3009, 3101), 'cv2.putText', 'cv2.putText', (['img', 'text3', '(20, 30)', 'cv2.FONT_HERSHEY_SIMPLEX', 'fontscale', '(255, 200, 0)', '(2)'], {}), '(img, text3, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255,\n 200, 0), 2)\n', (3020, 3101), False, 'import cv2\n'), ((3106, 3132), 'cv2.imshow', 'cv2.imshow', (['"""corners"""', 'img'], {}), "('corners', img)\n", (3116, 3132), False, 'import cv2\n'), ((3363, 3386), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3384, 3386), False, 'import cv2\n'), ((3607, 3623), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3615, 3623), True, 'import numpy as np\n'), ((3636, 3652), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (3644, 3652), True, 'import numpy as np\n'), ((5184, 5210), 'cv2.imshow', 'cv2.imshow', (['"""corners"""', 'img'], {}), "('corners', img)\n", (5194, 5210), False, 'import cv2\n'), ((5219, 5233), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5230, 5233), False, 'import cv2\n'), ((1252, 1280), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.input'], {}), '(args.input)\n', (1268, 1280), False, 'import cv2\n'), ((1469, 1487), 'numpy.prod', 'np.prod', (['grid_size'], {}), '(grid_size)\n', (1476, 1487), True, 'import numpy as np\n'), ((2395, 2450), 'cv2.cornerSubPix', 'cv2.cornerSubPix', (['gray', 'corners', '(5, 5)', '(-1, -1)', 'term'], {}), '(gray, corners, (5, 5), (-1, -1), term)\n', (2411, 2450), False, 'import cv2\n'), ((2603, 2660), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['img', 'grid_size', 'corners', 'found'], {}), '(img, grid_size, corners, found)\n', (2628, 2660), False, 'import cv2\n'), ((3147, 3161), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3158, 3161), False, 'import cv2\n'), ((3670, 3707), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.float64'}), '((1, 1, 3), dtype=np.float64)\n', (3678, 3707), True, 'import numpy as np\n'), ((3747, 3784), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.float64'}), '((1, 1, 3), dtype=np.float64)\n', (3755, 3784), True, 'import numpy as np\n'), ((4084, 4247), 'cv2.fisheye.calibrate', 'cv2.fisheye.calibrate', (['objpoints', 'imgpoints', '(W, H)', 'K', 'D', 'rvecs', 'tvecs', 'calibration_flags', '(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-06)'], {}), '(objpoints, imgpoints, (W, H), K, D, rvecs, tvecs,\n calibration_flags, (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,\n 30, 1e-06))\n', (4105, 4247), False, 'import cv2\n'), ((4454, 4515), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', '(W, H)', 'None', 'None'], {}), '(objpoints, imgpoints, (W, H), None, None)\n', (4473, 4515), False, 'import cv2\n'), ((4977, 5066), 'cv2.putText', 'cv2.putText', (['img', '"""Success!"""', '(220, 240)', 'cv2.FONT_HERSHEY_COMPLEX', '(2)', '(0, 0, 255)', '(2)'], {}), "(img, 'Success!', (220, 240), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0,\n 255), 2)\n", (4988, 5066), False, 'import cv2\n'), ((5090, 5178), 'cv2.putText', 'cv2.putText', (['img', '"""Failed!"""', '(220, 240)', 'cv2.FONT_HERSHEY_COMPLEX', '(2)', '(0, 0, 255)', '(2)'], {}), "(img, 'Failed!', (220, 240), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0,\n 255), 2)\n", (5101, 5178), False, 'import cv2\n'), ((1530, 1551), 'numpy.indices', 'np.indices', (['grid_size'], {}), '(grid_size)\n', (1540, 1551), True, 'import numpy as np\n'), ((4889, 4912), 'yaml.safe_dump', 'yaml.safe_dump', (['data', 'f'], {}), '(data, f)\n', (4903, 4912), False, 'import yaml\n'), ((4663, 4679), 'numpy.array', 'np.array', (['[W, H]'], {}), '([W, H])\n', (4671, 4679), True, 'import numpy as np\n')] |
# @Author : <NAME>
# @Email : <EMAIL>
import os
from scipy import stats, linalg
#from SALib.sample import latin
import numpy as np
from ReadResults import Utilities
import pickle
import CoreFiles.GeneralFunctions as GrlFct
import openturns as ot
ot.ResourceMap.SetAsBool("ComposedDistribution-UseGenericCovarianceAlgorithm", True)
def getYearlyError(Res,NewMeas):
#definition of the reference for comparison
# EPCHeatArea = Res['EPC_Heat']
# EPCHeat = [val*Res['ATemp'][0] for val in Res['EPC_Heat']]
EPHeat = []
for idx in range(len(Res['EP_Heat'])):
Heat2treat = Res['HeatedArea'][idx]['Data_Zone Ideal Loads Supply Air Total Heating Rate']
HeatPower = Utilities.Average(Heat2treat, int(len(Heat2treat) / 8760))
try:
if 'Data_Total DHW Heating Power' in Res['Other'][idx].keys():
Data2treat = Res['Other'][idx]['Data_Total DHW Heating Power']
else:
Data2treat = Res['Other'][idx]['Data_Water Use Equipment Heating Rate']
DHWPower = Utilities.Average(Data2treat, int(len(Data2treat) / 8760))
EPHeat.append(sum([(val + DHWPower[i]) for i, val in enumerate(HeatPower)])/1000)
except:
EPHeat.append(sum([(val) for i, val in enumerate(HeatPower)])/1000)
EPHeatArea = [val/Res['EP_Area'][0] for val in EPHeat]
varx = [i for i in range(len(Res['SimNum']))]
MeasArea = sum(NewMeas['EnergySurfRatio']) / NewMeas['Atemp.DHSurfRatio']
Meas = sum(NewMeas['EnergySurfRatio'])
error = [( val - Meas) / Meas * 100 for val in EPHeat]
#Matche = [val for idx,val in enumerate(Res['SimNum']) if (abs(EPHeat[idx]-Meas)/Meas*100)<Relerror]
return error,EPHeat
def getPeriodError(Res,NewMeas,idx,NbSample):
#definition of the reference for comparison
Heat2treat = Res['HeatedArea'][idx]['Data_Zone Ideal Loads Supply Air Total Heating Rate']
HeatPower = Utilities.Average(Heat2treat, int(len(Heat2treat) / 8760))
try:
if 'Data_Total DHW Heating Power' in Res['Other'][idx].keys():
Data2treat = Res['Other'][idx]['Data_Total DHW Heating Power']
else:
Data2treat = Res['Other'][idx]['Data_Water Use Equipment Heating Rate']
DHWPower = Utilities.Average(Data2treat, int(len(Data2treat) / 8760))
SimPower = [(val + DHWPower[i]) / Res['EP_Area'][idx] for i, val in enumerate(HeatPower)]
SimPower = [(val + DHWPower[i]) for i, val in enumerate(HeatPower)]
except:
SimPower = [(val) / Res['EP_Area'][idx] for i, val in enumerate(HeatPower)]
SimPower = [(val) for i, val in enumerate(HeatPower)]
# MeasPower = [val * 1000 / NewMeas[Res['SimNum'][idx]]['Atemp.DHSurfRatio'] for val in
# NewMeas[Res['SimNum'][idx]]['EnergySurfRatio']]
MeasPower = [val * 1000 for val in
NewMeas['EnergySurfRatio']]
MeasPower = MeasPower[1:-23]
#compute month csum
nbHrperSample = int(8760/NbSample)
SampleEnergySim = []
SampleEnergyMeas = []
SampleError = []
SampleVal = []
for i in range(NbSample):
SampleEnergySim.append(sum(SimPower[i*nbHrperSample:nbHrperSample+i*nbHrperSample]))
SampleEnergyMeas.append(sum(MeasPower[i * nbHrperSample:nbHrperSample + i * nbHrperSample]))
SampleError.append(abs(SampleEnergySim[-1]-SampleEnergyMeas[-1])/SampleEnergyMeas[-1]*100)
SampleVal.append(i+1)
error = max(SampleError)
error = (sum([(SampleEnergyMeas[i]-SampleEnergySim[i])**2 /(NbSample-1) for i in range(NbSample)])**0.5/np.mean(SampleEnergyMeas))*100
return SampleError, error
# if error<Relerror:
# return Res['SimNum'][idx]
def getErrorMatches(Res,Meas,CalibrationBasis):
Error = []
if 'YearlyBasis' in CalibrationBasis:
Error, EPHeat = getYearlyError(Res, Meas)
elif 'MonthlyBasis' in CalibrationBasis:
for idx in range(len(Res['SimNum'])):
SampleEr,CVRMSEro = getPeriodError(Res, Meas, idx, 12)
Error.append(CVRMSEro)
elif 'WeeklyBasis' in CalibrationBasis:
for idx in range(len(Res['SimNum'])):
SampleEr, CVRMSEro = getPeriodError(Res, Meas, idx, 52)
Error.append(CVRMSEro)
elif 'DailyBasis' in CalibrationBasis:
for idx in range(len(Res['SimNum'])):
SampleEr, CVRMSEro = getPeriodError(Res, Meas, idx, 365)
Error.append(CVRMSEro)
return Error
def getGoodParamList(Error,CalibBasis, VarName2Change, ParamSample, REMax=5, CVRMSMax = 15):
Matches = {}
Criteria = CVRMSMax
if 'YearlyBasis' in CalibBasis:
Criteria = REMax
for idx,key in enumerate(VarName2Change):
Matches[key] = [ParamSample[x,idx] for x,val in enumerate(Error) if abs(val) < Criteria]
return Matches
def getOpenTurnsCorrelated(Data, VarName2Change, nbruns, BoundLim):
##################NO MORE USED##########################
# this is taken form https://se.mathworks.com/matlabcentral/fileexchange/56384-lhsgeneral-pd-correlation-n
# and implemented in python by a proposeal in https://openturns.discourse.group/t/generate-multivariate-joint-distribution/182/3
ParamSample = []
pd = []
for idx,key in enumerate(VarName2Change):
ParamSample.append(Data[key])
full_range = BoundLim[idx][1] - BoundLim[idx][0]
pd.append(ot.Uniform(max(BoundLim[idx][0],Data[key].min() - 0.1*full_range),
min(BoundLim[idx][1],Data[key].max() + 0.1*full_range)))
ParamSample = np.array(ParamSample)
#pd = [ot.Normal(0.0, 20.0), ot.Triangular(0.0, 100.0, 150.0)]
covMat = np.cov(ParamSample.transpose(), rowvar=False)
correlation = ot.CorrelationMatrix(idx+1,[float(val) for val in list(np.reshape(covMat, ((idx+1)**2, 1)))] )
n = nbruns
return np.array(lhsgeneral(pd, correlation, n))
def getOpenTurnsCorrelatedFromSample(Data, VarName2Change, nbruns, BoundLim):
ParamSample = []
for idx, key in enumerate(VarName2Change):
ParamSample.append(Data[key])
ParamSample = np.array(ParamSample)
data = ot.Sample(ParamSample.transpose())
# Identify the associated normal copula
copula = ot.NormalCopulaFactory().build(data)
# Identify the marginal distributions
pd = [ot.HistogramFactory().build(data.getMarginal(i)) for i in range(data.getDimension())]
# Build the joint distribution
dist = ot.ComposedDistribution(pd, copula)
# Generate a new sample
correlatedSamples = dist.getSample(nbruns)
#R = correlatedSamples.computeLinearCorrelation()
return np.array(correlatedSamples)
def lhsgeneral(pd, correlation, n):
##################NO MORE USED##########################
dim = len(pd)
RStar = correlation
unifND = [ot.Uniform(0.0, 1.0)]*dim
normND = [ot.Normal(0.0, 1.0)]*dim
lhsDOE = ot.LHSExperiment(ot.ComposedDistribution(unifND), n)
x = lhsDOE.generate()
independent_sample = ot.MarginalTransformationEvaluation(unifND, normND)(x)
R = independent_sample.computeLinearCorrelation()
P = RStar.computeCholesky()
Q = R.computeCholesky()
M = P * Q.solveLinearSystem(ot.IdentityMatrix(dim))
lin = ot.LinearEvaluation([0.0]*dim, [0.0]*dim, M.transpose())
dependent_sample = lin(independent_sample)
transformed_sample = ot.MarginalTransformationEvaluation(normND, pd)(dependent_sample)
return transformed_sample
def getCovarCalibratedParam(Data, VarName2Change, nbruns, BoundLim):
##################NO MORE USED##########################
# the method below follows the one describe in :
# https://scipy-cookbook.readthedocs.io/items/CorrelatedRandomSamples.html
# with the exception that as we on't have the former distribution type, the new sample are kept uniform
# this should be enhanced further
ParamSample = []
for key in VarName2Change:
ParamSample.append(Data[key])
ParamSample = np.array(ParamSample)
RStar = np.cov(ParamSample.transpose(), rowvar=False)
problemnew = {
'num_vars': len(VarName2Change),
'names': VarName2Change,
'bounds': [[0, 1]] * len(VarName2Change)
}
xx = latin.sample(problemnew, nbruns)
z = []
for i in range(xx.shape[1]):
tmp = stats.norm.ppf(xx[:, i], 0, 1)
z.append(tmp)
xx = np.array(z) # this is used to change dimension array from n,m to m,n
P = linalg.cholesky(RStar, lower=True)
x_xcorrelated = np.dot(P,xx)
y = stats.norm.cdf(x_xcorrelated)
if y.shape[0] != len(VarName2Change):
y = y.transpose()
# now we have the samples based on correlated data with provided but we need to transform them to
# their real ranges example: temperature samples from -4 to 4 -> 19 to 22.
y_transformed = []
for i in range(len(y[:, 0])):
#full_range = ParamSample[i, :].max() - ParamSample[i, :].min()
full_range = BoundLim[i][1]-BoundLim[i][0]
y_transformed.append(np.interp(y[i], (y[i].min(), y[i].max()), (
max(BoundLim[i][0], ParamSample[i, :].min() - 0.1 * full_range),
min(BoundLim[i][1], ParamSample[i, :].max() + 0.1 * full_range))))
Param2keep = np.array(y_transformed)
return Param2keep.transpose()
def getBootStrapedParam(Data, VarName2Change, nbruns, BoundLim):
##################NO MORE USED##########################
import openturns as ot
ParamSample = []
NormalizedParam = []
for key in VarName2Change:
ParamSample.append(Data[key])
NormalizedParam.append((Data[key]-Data[key].min())/(Data[key].max()-Data[key].min()))
ParamSample = np.array(ParamSample).transpose()
NormalizedParam = np.array(NormalizedParam).transpose()
BottObject = ot.BootstrapExperiment(NormalizedParam)
NewSampleAsArray = []
finished = False
while not finished:
NewSample = BottObject.generate()
try:
if not NewSampleAsArray:
NewSampleAsArray = np.array(list(NewSample))
except:
NewSampleAsArray = np.append(NewSampleAsArray,np.array(list(NewSample)),axis = 0)
if NewSampleAsArray.shape[0]>nbruns:
finished = True
y = np.array([NewSampleAsArray[i,:] for i in np.random.randint(0, NewSampleAsArray.shape[0], nbruns)])
y_transformed = []
for i in range(y.shape[1]):
#full_range = ParamSample[i, :].max() - ParamSample[i, :].min()
full_range = BoundLim[i][1]-BoundLim[i][0]
y_transformed.append(np.interp(y[:,i], (y[:,i].min(), y[:,i].max()), (
max(BoundLim[i][0], ParamSample[:, i].min() - 0.1 * full_range),
min(BoundLim[i][1], ParamSample[:, i].max() + 0.1 * full_range))))
Param2keep = np.array(y_transformed)
return Param2keep.transpose()
def getNewBounds(Bounds,BoundLim):
newBounds = []
for idx, bd in enumerate(Bounds):
newBounds.append(
[max(bd[0] - 0.1 * (bd[1] - bd[0]),BoundLim[idx][0]), min(BoundLim[idx][1], bd[1] + 0.1 * (bd[1] - bd[0]))])
return newBounds
def getTheWinners(VarName2Change,Matches20, Matches10, Matches5):
if len(Matches5[VarName2Change[0]]) > 20:
Matches = Matches5
elif len(Matches10[VarName2Change[0]]) > 20:
Matches = Matches10
else:
Matches = Matches20
return Matches, len(Matches[VarName2Change[0]])
def getTheWeightedWinners(VarName2Change,Matches20, Matches10, Matches5):
Matches = {}
#the number of winners taken for defning the sample size is kept as for the non weighted function
if len(Matches5[VarName2Change[0]]) > 20:
nbwinners = len(Matches5[VarName2Change[0]])
for key in VarName2Change:
Matches[key] = np.array(Matches5[key])
elif len(Matches10[VarName2Change[0]]) > 20:
for key in VarName2Change:
Matches[key] = np.array(Matches10[key])
for weigth in range(2):
Matches[key] = np.append(Matches[key], Matches5[key])
nbwinners = max(10,len(Matches10[VarName2Change[0]])/2)
else:
nbwinners = 10 #len(Matches20[CalibBasis][VarName2Change[0]]) this way, there will be half of the next sample
for key in VarName2Change:
Matches[key] = np.array(Matches20[key])
# a weight of 3 is applied to 10% matches (the good ones are also in 20% so there is the need to add only 2)
for weigth in range(2):
Matches[key] = np.append(Matches[key], Matches10[key])
# a weight of 5 is applied to 5% matches (the good ones are also in 20% and 10% so there is the need to add only 3)
for weigth in range(3):
Matches[key] = np.append(Matches[key], Matches5[key])
return Matches, int(nbwinners)
def CompareSample(Finished,idx_offset, SimDir,CurrentPath,nbBuild,VarName2Change,CalibBasis,MeasPath,ParamSample,
Bounds,BoundLim,ParamMethods,NbRun):
# once every run has been computed, lets get the matche and compute the covariance depending on the number of matches
extraVar = ['nbAppartments', 'ATempOr', 'SharedBld', 'height', 'StoreyHeigth', 'nbfloor','BlocHeight','BlocFootprintArea','BlocNbFloor',
'HeatedArea', 'AreaBasedFlowRate','NonHeatedArea', 'Other']
Res = Utilities.GetData(os.path.join(SimDir, 'Sim_Results'), extraVar)
os.chdir(CurrentPath)
ComputfFilePath = os.path.normcase(MeasPath)
#'C:\\Users\\xav77\\Documents\\FAURE\\prgm_python\\UrbanT\\Eplus4Mubes\\MUBES_SimResults\\ComputedElem4Calibration')
with open(os.path.join(ComputfFilePath, 'Building_' + str(nbBuild) + '_Meas.pickle'),
'rb') as handle:
Meas = pickle.load(handle)
Error = getErrorMatches(Res, Meas, CalibBasis)
Matches20 = getGoodParamList(Error,CalibBasis, VarName2Change, ParamSample, REMax=20, CVRMSMax = 30)
Matches10 = getGoodParamList(Error,CalibBasis, VarName2Change, ParamSample, REMax=10, CVRMSMax = 20)
Matches5 = getGoodParamList(Error, CalibBasis, VarName2Change, ParamSample, REMax=5, CVRMSMax=15)
print('Nb of matches at 20% is : ' + str(len(Matches20[VarName2Change[0]])))
print('Nb of matches at 10% is : ' + str(len(Matches10[VarName2Change[0]])))
print('Nb of matches at 5% is : ' + str(len(Matches5[VarName2Change[0]])))
#Matches, NbWinners = getTheWinners(VarName2Change,Matches20, Matches10, Matches5)
Matches, NbWinners = getTheWeightedWinners(VarName2Change, Matches20, Matches10, Matches5)
try:
if len(ParamSample[:, 0]) >= 2000 or len(Matches5[VarName2Change[0]]) > 100:
Finished = True
elif len(ParamSample[:, 0]) >= 1000 and len(Matches5[VarName2Change[0]]) < 5:
Finished = True
else:
print('New runs loop')
if len(Matches[VarName2Change[0]]) > 10:
try:
NBskewedRuns = min(NbRun,NbWinners+90)
print('Nd of skewed runs : '+str(NBskewedRuns))
NbNewRuns = NbRun-NBskewedRuns
#NewSample1 = getBootStrapedParam(Matches, VarName2Change, NBskewedRuns, BoundLim)
#NewSample1 = getOpenTurnsCorrelated(Matches, VarName2Change, NBskewedRuns, BoundLim)
NewSample1 = getOpenTurnsCorrelatedFromSample(Matches, VarName2Change, NBskewedRuns, BoundLim)
#NewSample1 = getCovarCalibratedParam(Matches, VarName2Change, NBskewedRuns, BoundLim)
if NbNewRuns > 0:
#lets make new bounds for non correlated sample, being in the same range as for correlated ones
openRange = 0
if len(Matches5[VarName2Change[0]]) < 10:
openRange = 1
ModifiedBounds = []
for i in range(len(NewSample1[0,:])):
fullRange = (BoundLim[i][1]-BoundLim[i][0])*openRange
ModifiedBounds.append([max(BoundLim[i][0], NewSample1[:, i].min() - 0.1*fullRange),
min(BoundLim[i][1], NewSample1[:, i].max() + 0.1*fullRange)])
NewSample2 = GrlFct.getParamSample(VarName2Change, ModifiedBounds, NbNewRuns,ParamMethods)
NewSample = np.append(NewSample1, NewSample2, axis=0)
else:
NewSample = NewSample1
print('Covariance worked !')
except:
print('Covariance did not work...')
Bounds = getNewBounds(Bounds, BoundLim)
NewSample = GrlFct.getParamSample(VarName2Change, Bounds, NbRun,ParamMethods)
else:
Bounds = getNewBounds(Bounds, BoundLim)
NewSample = GrlFct.getParamSample(VarName2Change, Bounds, NbRun,ParamMethods)
idx_offset = len(ParamSample[:, 0])
ParamSample = np.concatenate((ParamSample, NewSample))
Paramfile = os.path.join(SimDir, 'ParamSample.pickle')
with open(Paramfile, 'wb') as handle:
pickle.dump(ParamSample, handle, protocol=pickle.HIGHEST_PROTOCOL)
except:
print('No matches at all from now...')
if len(ParamSample[:, 0]) >= 2000:
Finished = True
else:
Bounds = getNewBounds(Bounds, BoundLim)
NewSample = GrlFct.getParamSample(VarName2Change, Bounds, NbRun,ParamMethods)
idx_offset = len(ParamSample[:, 0])
ParamSample = np.concatenate((ParamSample, NewSample))
Paramfile = os.path.join(SimDir, 'ParamSample.pickle')
with open(Paramfile, 'wb') as handle:
pickle.dump(ParamSample, handle, protocol=pickle.HIGHEST_PROTOCOL)
return Finished,idx_offset,ParamSample
if __name__ == '__main__' :
print('CalibUtilities.py') | [
"CoreFiles.GeneralFunctions.getParamSample",
"openturns.IdentityMatrix",
"openturns.MarginalTransformationEvaluation",
"numpy.array",
"scipy.linalg.cholesky",
"openturns.ComposedDistribution",
"openturns.Normal",
"scipy.stats.norm.cdf",
"openturns.ResourceMap.SetAsBool",
"numpy.mean",
"openturns... | [((250, 338), 'openturns.ResourceMap.SetAsBool', 'ot.ResourceMap.SetAsBool', (['"""ComposedDistribution-UseGenericCovarianceAlgorithm"""', '(True)'], {}), "('ComposedDistribution-UseGenericCovarianceAlgorithm',\n True)\n", (274, 338), True, 'import openturns as ot\n'), ((5542, 5563), 'numpy.array', 'np.array', (['ParamSample'], {}), '(ParamSample)\n', (5550, 5563), True, 'import numpy as np\n'), ((6073, 6094), 'numpy.array', 'np.array', (['ParamSample'], {}), '(ParamSample)\n', (6081, 6094), True, 'import numpy as np\n'), ((6419, 6454), 'openturns.ComposedDistribution', 'ot.ComposedDistribution', (['pd', 'copula'], {}), '(pd, copula)\n', (6442, 6454), True, 'import openturns as ot\n'), ((6595, 6622), 'numpy.array', 'np.array', (['correlatedSamples'], {}), '(correlatedSamples)\n', (6603, 6622), True, 'import numpy as np\n'), ((7936, 7957), 'numpy.array', 'np.array', (['ParamSample'], {}), '(ParamSample)\n', (7944, 7957), True, 'import numpy as np\n'), ((8326, 8337), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (8334, 8337), True, 'import numpy as np\n'), ((8404, 8438), 'scipy.linalg.cholesky', 'linalg.cholesky', (['RStar'], {'lower': '(True)'}), '(RStar, lower=True)\n', (8419, 8438), False, 'from scipy import stats, linalg\n'), ((8459, 8472), 'numpy.dot', 'np.dot', (['P', 'xx'], {}), '(P, xx)\n', (8465, 8472), True, 'import numpy as np\n'), ((8480, 8509), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['x_xcorrelated'], {}), '(x_xcorrelated)\n', (8494, 8509), False, 'from scipy import stats, linalg\n'), ((9181, 9204), 'numpy.array', 'np.array', (['y_transformed'], {}), '(y_transformed)\n', (9189, 9204), True, 'import numpy as np\n'), ((9731, 9770), 'openturns.BootstrapExperiment', 'ot.BootstrapExperiment', (['NormalizedParam'], {}), '(NormalizedParam)\n', (9753, 9770), True, 'import openturns as ot\n'), ((10709, 10732), 'numpy.array', 'np.array', (['y_transformed'], {}), '(y_transformed)\n', (10717, 10732), True, 'import numpy as np\n'), ((13326, 13347), 'os.chdir', 'os.chdir', (['CurrentPath'], {}), '(CurrentPath)\n', (13334, 13347), False, 'import os\n'), ((13370, 13396), 'os.path.normcase', 'os.path.normcase', (['MeasPath'], {}), '(MeasPath)\n', (13386, 13396), False, 'import os\n'), ((6872, 6903), 'openturns.ComposedDistribution', 'ot.ComposedDistribution', (['unifND'], {}), '(unifND)\n', (6895, 6903), True, 'import openturns as ot\n'), ((6959, 7010), 'openturns.MarginalTransformationEvaluation', 'ot.MarginalTransformationEvaluation', (['unifND', 'normND'], {}), '(unifND, normND)\n', (6994, 7010), True, 'import openturns as ot\n'), ((7323, 7370), 'openturns.MarginalTransformationEvaluation', 'ot.MarginalTransformationEvaluation', (['normND', 'pd'], {}), '(normND, pd)\n', (7358, 7370), True, 'import openturns as ot\n'), ((8264, 8294), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['xx[:, i]', '(0)', '(1)'], {}), '(xx[:, i], 0, 1)\n', (8278, 8294), False, 'from scipy import stats, linalg\n'), ((13275, 13310), 'os.path.join', 'os.path.join', (['SimDir', '"""Sim_Results"""'], {}), "(SimDir, 'Sim_Results')\n", (13287, 13310), False, 'import os\n'), ((13654, 13673), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (13665, 13673), False, 'import pickle\n'), ((3567, 3592), 'numpy.mean', 'np.mean', (['SampleEnergyMeas'], {}), '(SampleEnergyMeas)\n', (3574, 3592), True, 'import numpy as np\n'), ((6198, 6222), 'openturns.NormalCopulaFactory', 'ot.NormalCopulaFactory', ([], {}), '()\n', (6220, 6222), True, 'import openturns as ot\n'), ((6777, 6797), 'openturns.Uniform', 'ot.Uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6787, 6797), True, 'import openturns as ot\n'), ((6817, 6836), 'openturns.Normal', 'ot.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6826, 6836), True, 'import openturns as ot\n'), ((7160, 7182), 'openturns.IdentityMatrix', 'ot.IdentityMatrix', (['dim'], {}), '(dim)\n', (7177, 7182), True, 'import openturns as ot\n'), ((9620, 9641), 'numpy.array', 'np.array', (['ParamSample'], {}), '(ParamSample)\n', (9628, 9641), True, 'import numpy as np\n'), ((9676, 9701), 'numpy.array', 'np.array', (['NormalizedParam'], {}), '(NormalizedParam)\n', (9684, 9701), True, 'import numpy as np\n'), ((11696, 11719), 'numpy.array', 'np.array', (['Matches5[key]'], {}), '(Matches5[key])\n', (11704, 11719), True, 'import numpy as np\n'), ((6287, 6308), 'openturns.HistogramFactory', 'ot.HistogramFactory', ([], {}), '()\n', (6306, 6308), True, 'import openturns as ot\n'), ((10227, 10282), 'numpy.random.randint', 'np.random.randint', (['(0)', 'NewSampleAsArray.shape[0]', 'nbruns'], {}), '(0, NewSampleAsArray.shape[0], nbruns)\n', (10244, 10282), True, 'import numpy as np\n'), ((11831, 11855), 'numpy.array', 'np.array', (['Matches10[key]'], {}), '(Matches10[key])\n', (11839, 11855), True, 'import numpy as np\n'), ((12216, 12240), 'numpy.array', 'np.array', (['Matches20[key]'], {}), '(Matches20[key])\n', (12224, 12240), True, 'import numpy as np\n'), ((16943, 16983), 'numpy.concatenate', 'np.concatenate', (['(ParamSample, NewSample)'], {}), '((ParamSample, NewSample))\n', (16957, 16983), True, 'import numpy as np\n'), ((17008, 17050), 'os.path.join', 'os.path.join', (['SimDir', '"""ParamSample.pickle"""'], {}), "(SimDir, 'ParamSample.pickle')\n", (17020, 17050), False, 'import os\n'), ((17404, 17470), 'CoreFiles.GeneralFunctions.getParamSample', 'GrlFct.getParamSample', (['VarName2Change', 'Bounds', 'NbRun', 'ParamMethods'], {}), '(VarName2Change, Bounds, NbRun, ParamMethods)\n', (17425, 17470), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((17544, 17584), 'numpy.concatenate', 'np.concatenate', (['(ParamSample, NewSample)'], {}), '((ParamSample, NewSample))\n', (17558, 17584), True, 'import numpy as np\n'), ((17609, 17651), 'os.path.join', 'os.path.join', (['SimDir', '"""ParamSample.pickle"""'], {}), "(SimDir, 'ParamSample.pickle')\n", (17621, 17651), False, 'import os\n'), ((5763, 5802), 'numpy.reshape', 'np.reshape', (['covMat', '((idx + 1) ** 2, 1)'], {}), '(covMat, ((idx + 1) ** 2, 1))\n', (5773, 5802), True, 'import numpy as np\n'), ((11923, 11961), 'numpy.append', 'np.append', (['Matches[key]', 'Matches5[key]'], {}), '(Matches[key], Matches5[key])\n', (11932, 11961), True, 'import numpy as np\n'), ((12429, 12468), 'numpy.append', 'np.append', (['Matches[key]', 'Matches10[key]'], {}), '(Matches[key], Matches10[key])\n', (12438, 12468), True, 'import numpy as np\n'), ((12664, 12702), 'numpy.append', 'np.append', (['Matches[key]', 'Matches5[key]'], {}), '(Matches[key], Matches5[key])\n', (12673, 12702), True, 'import numpy as np\n'), ((16803, 16869), 'CoreFiles.GeneralFunctions.getParamSample', 'GrlFct.getParamSample', (['VarName2Change', 'Bounds', 'NbRun', 'ParamMethods'], {}), '(VarName2Change, Bounds, NbRun, ParamMethods)\n', (16824, 16869), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((17117, 17183), 'pickle.dump', 'pickle.dump', (['ParamSample', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(ParamSample, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (17128, 17183), False, 'import pickle\n'), ((17718, 17784), 'pickle.dump', 'pickle.dump', (['ParamSample', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(ParamSample, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (17729, 17784), False, 'import pickle\n'), ((16185, 16263), 'CoreFiles.GeneralFunctions.getParamSample', 'GrlFct.getParamSample', (['VarName2Change', 'ModifiedBounds', 'NbNewRuns', 'ParamMethods'], {}), '(VarName2Change, ModifiedBounds, NbNewRuns, ParamMethods)\n', (16206, 16263), True, 'import CoreFiles.GeneralFunctions as GrlFct\n'), ((16299, 16340), 'numpy.append', 'np.append', (['NewSample1', 'NewSample2'], {'axis': '(0)'}), '(NewSample1, NewSample2, axis=0)\n', (16308, 16340), True, 'import numpy as np\n'), ((16635, 16701), 'CoreFiles.GeneralFunctions.getParamSample', 'GrlFct.getParamSample', (['VarName2Change', 'Bounds', 'NbRun', 'ParamMethods'], {}), '(VarName2Change, Bounds, NbRun, ParamMethods)\n', (16656, 16701), True, 'import CoreFiles.GeneralFunctions as GrlFct\n')] |
#!/usr/bin/env python3
import argparse
import os
import sys
import json
import numpy as np
PROG = os.path.basename(sys.argv[0])
def main():
parser = argparse.ArgumentParser(
description='Fix a raman.json file created before 99e7a42a5 (June 14)',
)
parser.add_argument('INPUT', nargs='*')
parser.add_argument(
'--temperature', type=float,
help="specify temperature, for double checking that the missing prefactor matches expectation.",
)
parser.add_argument(
'--mistake-no', type=parse_mistake, default=1,
help="which mistake defines our expectation?"
" 1: first mistake (completely missing),"
" 2: second mistake (missing sqrt),"
" *: any of the above mistakes",
)
args = parser.parse_args()
for path in args.INPUT:
corrected_loc = path + '.corrected'
with open(path) as f:
d = json.load(f)
frequencies = np.array(d['frequency'])
correct_averages = np.array(d['average-3d'])
recorded_tensors = np.array(d['raman-tensor'])
actual_averages = np.sum(recorded_tensors**2, axis=(1,2)) / 9
if np.allclose(correct_averages, actual_averages, rtol=1e-10, atol=0):
continue
missing_prefactors = correct_averages / actual_averages
missing_prefactors[frequencies <= 0] = 0
if args.temperature is not None:
if check_expected_prefactors(frequencies, temperature=args.temperature, mistake=args.mistake_no, missing_prefactors=missing_prefactors):
warn(f"{path} has missing prefactors that match expectation")
else:
warn(f"{path} has missing prefactors that DO NOT match expectation!!")
# print(np.hstack([correct_averages[:, None], actual_averages[:, None]]), file=sys.stderr)
# print(np.hstack([expected_prefactors[:, None], missing_prefactors[:, None]]), file=sys.stderr)
else:
warn(f"{path} has missing prefactors")
warn(f"Writing {corrected_loc}")
correct_tensors = recorded_tensors * np.sqrt(missing_prefactors[:, None, None])
d['raman-tensor'] = correct_tensors.tolist()
with open(corrected_loc, 'w') as f:
json.dump(d, f)
print(file=f)
ANY_MISTAKE = object()
def parse_mistake(s):
if s == '*':
return ANY_MISTAKE
elif s in '12':
return int(s)
else:
raise ValueError(f'invalid mistake: {repr(s)}')
def check_expected_prefactors(frequencies, temperature, missing_prefactors, mistake):
if mistake is ANY_MISTAKE:
return any(
check_expected_prefactors(
frequencies=frequencies,
temperature=temperature,
missing_prefactors=missing_prefactors,
mistake=m,
) for m in [1, 2]
)
hk = 0.22898852319
if temperature == 0:
bose_occupation = 1
else:
expm1 = np.expm1(hk * frequencies / temperature)
bose_occupation = (1.0 + 1.0 / expm1)
prefactors = bose_occupation / frequencies
if mistake == 1:
expected_prefactors = np.where(frequencies <= 0.0, 0.0, prefactors)
elif mistake == 2:
expected_prefactors = np.where(frequencies <= 0.0, 0.0, prefactors ** -1)
else:
raise ValueError('mistake')
return np.allclose(expected_prefactors, missing_prefactors, atol=0)
# ------------------------------------------------------
def warn(*args, **kw):
print(f'{PROG}:', *args, file=sys.stderr, **kw)
def die(*args, code=1):
warn('Fatal:', *args)
sys.exit(code)
# ------------------------------------------------------
if __name__ == '__main__':
main()
| [
"numpy.allclose",
"numpy.sqrt",
"argparse.ArgumentParser",
"numpy.where",
"numpy.expm1",
"numpy.array",
"numpy.sum",
"os.path.basename",
"sys.exit",
"json.load",
"json.dump"
] | [((100, 129), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (116, 129), False, 'import os\n'), ((156, 256), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fix a raman.json file created before 99e7a42a5 (June 14)"""'}), "(description=\n 'Fix a raman.json file created before 99e7a42a5 (June 14)')\n", (179, 256), False, 'import argparse\n'), ((3400, 3460), 'numpy.allclose', 'np.allclose', (['expected_prefactors', 'missing_prefactors'], {'atol': '(0)'}), '(expected_prefactors, missing_prefactors, atol=0)\n', (3411, 3460), True, 'import numpy as np\n'), ((3650, 3664), 'sys.exit', 'sys.exit', (['code'], {}), '(code)\n', (3658, 3664), False, 'import sys\n'), ((962, 986), 'numpy.array', 'np.array', (["d['frequency']"], {}), "(d['frequency'])\n", (970, 986), True, 'import numpy as np\n'), ((1014, 1039), 'numpy.array', 'np.array', (["d['average-3d']"], {}), "(d['average-3d'])\n", (1022, 1039), True, 'import numpy as np\n'), ((1067, 1094), 'numpy.array', 'np.array', (["d['raman-tensor']"], {}), "(d['raman-tensor'])\n", (1075, 1094), True, 'import numpy as np\n'), ((1177, 1243), 'numpy.allclose', 'np.allclose', (['correct_averages', 'actual_averages'], {'rtol': '(1e-10)', 'atol': '(0)'}), '(correct_averages, actual_averages, rtol=1e-10, atol=0)\n', (1188, 1243), True, 'import numpy as np\n'), ((3005, 3045), 'numpy.expm1', 'np.expm1', (['(hk * frequencies / temperature)'], {}), '(hk * frequencies / temperature)\n', (3013, 3045), True, 'import numpy as np\n'), ((3191, 3236), 'numpy.where', 'np.where', (['(frequencies <= 0.0)', '(0.0)', 'prefactors'], {}), '(frequencies <= 0.0, 0.0, prefactors)\n', (3199, 3236), True, 'import numpy as np\n'), ((926, 938), 'json.load', 'json.load', (['f'], {}), '(f)\n', (935, 938), False, 'import json\n'), ((1121, 1163), 'numpy.sum', 'np.sum', (['(recorded_tensors ** 2)'], {'axis': '(1, 2)'}), '(recorded_tensors ** 2, axis=(1, 2))\n', (1127, 1163), True, 'import numpy as np\n'), ((2128, 2170), 'numpy.sqrt', 'np.sqrt', (['missing_prefactors[:, None, None]'], {}), '(missing_prefactors[:, None, None])\n', (2135, 2170), True, 'import numpy as np\n'), ((2280, 2295), 'json.dump', 'json.dump', (['d', 'f'], {}), '(d, f)\n', (2289, 2295), False, 'import json\n'), ((3290, 3341), 'numpy.where', 'np.where', (['(frequencies <= 0.0)', '(0.0)', '(prefactors ** -1)'], {}), '(frequencies <= 0.0, 0.0, prefactors ** -1)\n', (3298, 3341), True, 'import numpy as np\n')] |
import numpy as np
from .. import base
from river.utils.skmultiflow_utils import check_random_state
class LED(base.SyntheticDataset):
""" LED stream generator.
This data source originates from the CART book [^1]. An implementation
in C was donated to the UCI [^2] machine learning repository by <NAME>.
The goal is to predict the digit displayed on a seven-segment LED display,
where each attribute has a 10% chance of being inverted. It has an optimal
Bayes classification rate of 74%. The particular configuration of the
generator used for experiments (LED) produces 24 binary attributes,
17 of which are irrelevant.
Parameters
----------
seed
If int, `seed` is used to seed the random number generator;
If RandomState instance, `seed` is the random number generator;
If None, the random number generator is the `RandomState` instance used
by `np.random`.
noise_percentage
The probability that noise will happen in the generation. At each
new sample generated, a random number is generated, and if it is equal
or less than the noise_percentage, the led value will be switched
irrelevant_features
Adds 17 non-relevant attributes to the stream.
Examples
--------
>>> from river import synth
>>> dataset = synth.LED(seed = 112, noise_percentage = 0.28, irrelevant_features= False)
>>> for x, y in dataset.take(5):
... print(x, y)
{0: 0, 1: 1, 2: 1, 3: 1, 4: 0, 5: 0, 6: 0} 4
{0: 0, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0, 6: 0} 4
{0: 1, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1} 3
{0: 0, 1: 1, 2: 1, 3: 0, 4: 0, 5: 1, 6: 1} 0
{0: 1, 1: 1, 2: 1, 3: 1, 4: 0, 5: 1, 6: 0} 4
Notes
-----
An instance is generated based on the parameters passed. If `has_noise`
is set then the total number of attributes will be 24, otherwise there will
be 7 attributes.
References
----------
[^1]: <NAME>, <NAME>, <NAME>, and <NAME>.
Classification and Regression Trees. Wadsworth and Brooks,
Monterey, CA,1984.
[^2]: <NAME> and <NAME>. UCI Machine Learning Repository
[http://www.ics.uci.edu/∼mlearn/mlrepository.html].
University of California, Irvine, School of Information and
Computer Sciences,2007.
"""
_N_RELEVANT_FEATURES = 7
_N_FEATURES_INCLUDING_NOISE = 24
_ORIGINAL_INSTANCES = np.array([[1, 1, 1, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1, 0],
[1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 1, 0, 1, 1],
[0, 1, 1, 1, 0, 1, 0],
[1, 1, 0, 1, 0, 1, 1],
[1, 1, 0, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 1, 1]], dtype=int)
def __init__(self, seed: int or np.random.RandomState = None,
noise_percentage: float = 0.0, irrelevant_features: bool = False):
super().__init__(n_features=self._N_FEATURES_INCLUDING_NOISE if irrelevant_features else
self._N_RELEVANT_FEATURES, n_classes=10, n_outputs=1,
task=base.MULTI_CLF)
self.seed = seed
self._rng = None # This is the actual random_state object used internally
if not (0.0 <= noise_percentage <= 1.0):
raise ValueError(f"Invalid noise_percentage ({noise_percentage}). "
"Valid range is [0.0, 1.0]")
self.noise_percentage = noise_percentage
self.irrelevant_features = irrelevant_features
self.n_cat_features = self.n_features
self.target_values = [i for i in range(self.n_classes)]
def __iter__(self):
self._rng = check_random_state(self.seed)
while True:
x = dict()
y = self._rng.randint(self.n_classes)
for i in range(self._N_RELEVANT_FEATURES):
if (0.01 + self._rng.rand()) <= self.noise_percentage:
x[i] = int(self._ORIGINAL_INSTANCES[y, i] == 0)
else:
x[i] = self._ORIGINAL_INSTANCES[y, i]
if self.irrelevant_features:
for i in range(self._N_RELEVANT_FEATURES, self._N_FEATURES_INCLUDING_NOISE):
x[i] = self._rng.randint(2)
yield x, y
class LEDDrift(LED):
""" LED stream generator with concept drift.
This class is an extension of the `LED` generator whose purpose is to add
concept drift to the stream.
Parameters
----------
seed
If int, `seed` is used to seed the random number generator;
If RandomState instance, `seed` is the random number generator;
If None, the random number generator is the `RandomState` instance used
by `np.random`.
noise_percentage
The probability that noise will happen in the generation. At each
new sample generated, a random number is generated, and if it is equal
or less than the noise_percentage, the led value will be switched
irrelevant_features
Adds 17 non-relevant attributes to the stream.
n_drift_features
The number of attributes that have drift.
Examples
--------
>>> from river import synth
>>> dataset = synth.LEDDrift(seed = 112, noise_percentage = 0.28,
... irrelevant_features= True, n_drift_features=4)
>>> for x, y in dataset.take(5):
... print(list(x.values()), y)
[1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1] 8
[0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1] 5
[1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1] 8
[0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0] 3
[0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0] 5
Notes
-----
An instance is generated based on the parameters passed. If `has_noise`
is set then the total number of attributes will be 24, otherwise there will
be 7 attributes.
"""
_N_IRRELEVANT_ATTRIBUTES = 17
def __init__(self, seed: int or np.random.RandomState = None,
noise_percentage: float = 0.0, irrelevant_features: bool = False,
n_drift_features: int = 0):
super().__init__(seed=seed, noise_percentage=noise_percentage,
irrelevant_features=irrelevant_features)
self.n_drift_features = n_drift_features
def __iter__(self):
self._rng = check_random_state(self.seed)
self._attr_idx = np.arange(self._N_FEATURES_INCLUDING_NOISE)
# Change attributes
if self.irrelevant_features and self.n_drift_features > 0:
random_int = self._rng.randint(7)
offset = self._rng.randint(self._N_IRRELEVANT_ATTRIBUTES)
for i in range(self.n_drift_features):
value_1 = (i + random_int) % 7
value_2 = 7 + (i + offset) % self._N_IRRELEVANT_ATTRIBUTES
self._attr_idx[value_1] = value_2
self._attr_idx[value_2] = value_1
while True:
x = {i: -1 for i in range(self.n_features)} # Initialize to keep order in dictionary
y = self._rng.randint(self.n_classes)
for i in range(self._N_RELEVANT_FEATURES):
if (0.01 + self._rng.rand()) <= self.noise_percentage:
x[self._attr_idx[i]] = int(self._ORIGINAL_INSTANCES[y, i] == 0)
else:
x[self._attr_idx[i]] = self._ORIGINAL_INSTANCES[y, i]
if self.irrelevant_features:
for i in range(self._N_RELEVANT_FEATURES, self._N_FEATURES_INCLUDING_NOISE):
x[self._attr_idx[i]] = self._rng.randint(2)
yield x, y
| [
"numpy.array",
"river.utils.skmultiflow_utils.check_random_state",
"numpy.arange"
] | [((2430, 2694), 'numpy.array', 'np.array', (['[[1, 1, 1, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1, 0], [1, 0, 1, 1, 1, 0, 1], [1, 0,\n 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 1, 0], [1, 1, 0, 1, 0, 1, 1], [1, 1, 0,\n 1, 1, 1, 1], [1, 0, 1, 0, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1,\n 0, 1, 1]]'], {'dtype': 'int'}), '([[1, 1, 1, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1, 0], [1, 0, 1, 1, 1, 0, \n 1], [1, 0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 1, 0], [1, 1, 0, 1, 0, 1, 1],\n [1, 1, 0, 1, 1, 1, 1], [1, 0, 1, 0, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1,\n 1, 1, 1, 0, 1, 1]], dtype=int)\n', (2438, 2694), True, 'import numpy as np\n'), ((3933, 3962), 'river.utils.skmultiflow_utils.check_random_state', 'check_random_state', (['self.seed'], {}), '(self.seed)\n', (3951, 3962), False, 'from river.utils.skmultiflow_utils import check_random_state\n'), ((6756, 6785), 'river.utils.skmultiflow_utils.check_random_state', 'check_random_state', (['self.seed'], {}), '(self.seed)\n', (6774, 6785), False, 'from river.utils.skmultiflow_utils import check_random_state\n'), ((6811, 6854), 'numpy.arange', 'np.arange', (['self._N_FEATURES_INCLUDING_NOISE'], {}), '(self._N_FEATURES_INCLUDING_NOISE)\n', (6820, 6854), True, 'import numpy as np\n')] |
#!/usr/local/bin/python3
# https://data36.com/linear-regression-in-python-numpy-polyfit/
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
# {{{
n_sedov = [
278,
288,
297,
306,
314,
318,
322,
330,
337,
344,
350,
363,
369,
375,
380,
386,
391,
396,
401,
406,
411,
416,
420,
]
MiB_gpu = [
4759,
5291,
5731,
6271,
6759,
6979,
7247,
7785,
8275,
8767,
9247,
10277,
10767,
11305,
11741,
12285,
12769,
13257,
13747,
14239,
14777,
15317,
15751,
]
# }}}
p100 = {'n_sedov': n_sedov, 'MiB_gpu': MiB_gpu}
mydata = pd.DataFrame(data=p100)
x = mydata.n_sedov
xx = [i**3 for i in x]
y = mydata.MiB_gpu
# plt.scatter(x,y)
model = np.polyfit(xx, y, 1) # array([2.50256443e-03, 2.54777987e+02])
model
# y = model[0] * x + model[1]
predict = np.poly1d(model)
# hours_studied = 20
# predict(hours_studied)
from sklearn.metrics import r2_score
r2_score(y, predict(xx)) # 0.9999902500986138
x_lin_reg = range(1800000, 6200000)
y_lin_reg = predict(x_lin_reg)
plt.scatter(x, y)
plt.plot(x_lin_reg, y_lin_reg, c = 'r')
| [
"numpy.polyfit",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"numpy.poly1d"
] | [((547, 570), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'p100'}), '(data=p100)\n', (559, 570), True, 'import pandas as pd\n'), ((659, 679), 'numpy.polyfit', 'np.polyfit', (['xx', 'y', '(1)'], {}), '(xx, y, 1)\n', (669, 679), True, 'import numpy as np\n'), ((769, 785), 'numpy.poly1d', 'np.poly1d', (['model'], {}), '(model)\n', (778, 785), True, 'import numpy as np\n'), ((984, 1001), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (995, 1001), True, 'import matplotlib.pyplot as plt\n'), ((1002, 1039), 'matplotlib.pyplot.plot', 'plt.plot', (['x_lin_reg', 'y_lin_reg'], {'c': '"""r"""'}), "(x_lin_reg, y_lin_reg, c='r')\n", (1010, 1039), True, 'import matplotlib.pyplot as plt\n')] |
###############################################################################
# "Sentiment-driven statistical causality in multimodal systems"
#
# <NAME>, <NAME>, <NAME> and <NAME>
#
# <NAME>, <EMAIL>
# April 2021
###############################################################################
import time
import datetime
from python.utils import get_timeseries_summary_per_day, get_timestamp_gaps_dates
import pickle
import ipdb
import os
from scipy.io import savemat
import numpy as np
import pandas as pd
def convolve(x, y):
return np.sum(np.multiply(x, np.flip(y)))
if __name__ == "__main__":
DIR = "./data/sentiment"
DIR_out = "./data/output_nlp2"
if not os.path.exists(DIR_out):
os.makedirs(DIR_out)
kwargs_list = ["BTC", "ETH", "LTC", "XRP", "TRX"]
sites = ["cryptoslate", "cryptodaily"]
ts = ["ts_recursive_cumulative_freq_neg.pickle", "ts_recursive_cumulative_freq.pickle",
"ts_recursive_cumulative_freq_pos.pickle", "ts_recursive_cumulative_freq_neutral.pickle",
"ts_token_entropy.pickle", "ts_token_entropy_neg.pickle", "ts_token_entropy_pos.pickle",
"ts_token_entropy_neutral.pickle"]
matlab_output = dict()
matlab_output_fwd = dict()
# decay rates
btc_rate = 0.015
eth_rate = 0.025
ltc_rate = 0.035
xrp_rate = 0.045
trx_rate = 0.065
t0 = time.time()
for t in ts:
coins_meta = []
# unique date keys
idxs = []
coins_series = []
for top in range(len(kwargs_list)):
series = []
extra = []
elem = []
for site in sites:
coin = kwargs_list[top]
ts_name = "{}/{}_{}/".format(DIR, site, coin)
try:
with open(ts_name + "/timeseries_elements.pickle", "rb") as f:
timeseries_elements = pickle.load(f)
metadata = pickle.load(f)
except FileNotFoundError:
print("{} was not found - check ts construction script".format(ts_name
+ "/timeseries_elements.pickle"))
meta = []
ts_elements = []
try:
with open(ts_name+t, "rb") as f:
ts_data = pickle.load(f)
if "neg" in ts_name+t or "pos" in ts_name+t or "neutral" in ts_name+t:
if "neg" in ts_name+t:
senti = "neg"
elif "pos" in ts_name+t:
senti = "pos"
elif "neutral" in ts_name+t:
senti = "neutral"
with open(ts_name+"ts_recursive_cumulative_freq_" + senti + "_metadata.pickle", "rb") as g:
ts_elements = pickle.load(g)
meta = pickle.load(g)
if len(ts_elements) > 0:
telements = ts_elements
metadata = meta
else:
telements = timeseries_elements
except FileNotFoundError:
print("{} was not found.".format(ts_name+t))
continue
series.extend(ts_data)
extra.extend(metadata)
elem.extend(telements)
summary_ts, summary_meta, time_batches = get_timeseries_summary_per_day(series, elem, extra, summary="IQR")
idxs.extend([sk for sk in summary_ts.keys() if sk not in idxs])
coins_series.append(summary_ts)
coins_meta.append(summary_meta)
with open(DIR_out + "/" + t.replace(".pickle", "_pre_join_data.pickle"), "wb") as f:
pickle.dump(coins_series, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(coins_meta, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(idxs, f, pickle.HIGHEST_PROTOCOL)
# sorting and weighting
total_summaries = []
total_meta = []
btc_past = []
eth_past = []
ltc_past = []
xrp_past = []
trx_past = []
btc_weights = []
eth_weights = []
ltc_weights = []
xrp_weights = []
trx_weights = []
btc_decay = np.exp(-btc_rate*np.arange(0, len(idxs), 1))
eth_decay = np.exp(-eth_rate * np.arange(0, len(idxs), 1))
ltc_decay = np.exp(-ltc_rate * np.arange(0, len(idxs), 1))
xrp_decay = np.exp(-xrp_rate * np.arange(0, len(idxs), 1))
trx_decay = np.exp(-trx_rate * np.arange(0, len(idxs), 1))
idxs = sorted(idxs)
for j in range(len(idxs)):
i = idxs[j]
total_sum = 0
try:
# BTC val and ngram number
btc = coins_series[0][i]
btc_num = coins_meta[0][i]["number of ngrams"]
btc_sum = coins_meta[0][i]
except:
btc = 0
btc_num = 0
btc_sum = []
btc_past.append(btc)
try:
# ETH
eth = coins_series[1][i]
eth_num = coins_meta[1][i]["number of ngrams"]
eth_sum = coins_meta[1][i]
except:
eth = 0
eth_num = 0
eth_sum = []
eth_past.append(eth)
try:
# LTC
ltc = coins_series[2][i]
ltc_num = coins_meta[2][i]["number of ngrams"]
ltc_sum = coins_meta[2][i]
except:
ltc = 0
ltc_num = 0
ltc_sum = {}
ltc_past.append(ltc)
try:
# XRP
xrp = coins_series[3][i]
xrp_num = coins_meta[3][i]["number of ngrams"]
xrp_sum = coins_meta[3][i]
except:
xrp = 0
xrp_num = 0
xrp_sum = []
xrp_past.append(xrp)
try:
# TRX
trx = coins_series[4][i]
trx_num = coins_meta[4][i]["number of ngrams"]
trx_sum = coins_meta[4][i]
except:
trx = 0
trx_num = 0
trx_sum = []
trx_past.append(trx)
b = convolve(btc_past, btc_decay[:j+1])
btc_weights.append(b)
e = convolve(eth_past, eth_decay[:j+1])
eth_weights.append(e)
lt = convolve(ltc_past, ltc_decay[:j+1])
ltc_weights.append(lt)
tr = convolve(trx_past, trx_decay[:j+1])
trx_weights.append(tr)
xr = convolve(xrp_past, xrp_decay[:j+1])
xrp_weights.append(xr)
total_summaries.append(b + e + lt + tr + xr)
total_meta.append([btc_sum, eth_sum, ltc_sum, xrp_sum, trx_sum])
with open(DIR_out + "/" + t.replace(".pickle", "_total.pickle"), "wb") as f:
pickle.dump(total_summaries, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(total_meta, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(idxs, f, pickle.HIGHEST_PROTOCOL)
matlab_output[t.replace(".pickle", "_total")] = total_summaries
matlab_output[t.replace(".pickle", "_total_metadata")] = total_meta
matlab_output[t.replace(".pickle", "_time")] = [str(i) for i in idxs]
gap_idx = get_timestamp_gaps_dates(idxs, DIR_out + "/" + t.replace(".pickle", "_gaps.html"))
gap_indices = [i[0] for i in gap_idx]
total_summaries_fwd = []
total_meta_fwd = []
idxss = []
btc_weights_extend = []
eth_weights_extend = []
ltc_weights_extend = []
xrp_weights_extend = []
trx_weights_extend = []
ftxt = open(DIR_out + "/gap_dates.txt", "wt")
for i in range(len(idxs)):
if i in gap_indices:
total_summaries_fwd.extend(gap_idx[gap_indices.index(i)][1]*[total_summaries[i]])
total_meta_fwd.extend(gap_idx[gap_indices.index(i)][1] * [total_meta[i]])
# for gap days, append zeros when there's a gap
btc_weights_extend.extend(gap_idx[gap_indices.index(i)][1]*[0])
eth_weights_extend.extend(gap_idx[gap_indices.index(i)][1]*[0])
ltc_weights_extend.extend(gap_idx[gap_indices.index(i)][1]*[0])
xrp_weights_extend.extend(gap_idx[gap_indices.index(i)][1]*[0])
trx_weights_extend.extend(gap_idx[gap_indices.index(i)][1]*[0])
# from 0 as we also need idxs[i]
for j in range(0, gap_idx[gap_indices.index(i)][1], 1):
idxss.append(idxs[i] + np.timedelta64(j, 'D'))
# for j = 0 we have the last day with non zero counts before the gap
if j > 0:
ftxt.write(str(idxs[i] + np.timedelta64(j, 'D')) + "\n")
else:
btc_weights_extend.append(btc_weights[i])
eth_weights_extend.append(eth_weights[i])
ltc_weights_extend.append(ltc_weights[i])
xrp_weights_extend.append(xrp_weights[i])
trx_weights_extend.append(trx_weights[i])
total_summaries_fwd.append(total_summaries[i])
total_meta_fwd.append(total_meta[i])
idxss.append(idxs[i])
ftxt.close()
if not os.path.isfile(DIR_out + "/project_decays.pickle"):
with open(DIR_out + "/project_decays.pickle", "wb") as f:
pickle.dump(btc_weights_extend, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(eth_weights_extend, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(ltc_weights_extend, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(xrp_weights_extend, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(trx_weights_extend, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(idxss, f, pickle.HIGHEST_PROTOCOL)
with open(DIR_out + "/" + t.replace(".pickle", "_total_fwd.pickle"), "wb") as f:
pickle.dump(total_summaries_fwd, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(total_meta_fwd, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(idxss, f, pickle.HIGHEST_PROTOCOL)
matlab_output_fwd[t.replace(".pickle", "_total")] = total_summaries_fwd
matlab_output_fwd[t.replace(".pickle", "_total_metadata")] = total_meta_fwd
matlab_output_fwd[t.replace(".pickle", "_time")] = [str(i) for i in idxss]
matlab_output_fwd[t.replace(".pickle", "_gaps")] = gap_idx
gap_idx = get_timestamp_gaps_dates(idxss, DIR_out + "/" + t.replace(".pickle", "_gaps_after.html"))
savemat(DIR_out + "/summaries_matlab.mat", matlab_output)
savemat(DIR_out + "/summaries_matlab_carry_fwd.mat", matlab_output_fwd)
t1 = time.time()
print("Time to completion: "+str(datetime.timedelta(seconds=t1-t0)))
| [
"os.path.exists",
"numpy.flip",
"scipy.io.savemat",
"pickle.dump",
"os.makedirs",
"pickle.load",
"os.path.isfile",
"python.utils.get_timeseries_summary_per_day",
"numpy.timedelta64",
"datetime.timedelta",
"time.time"
] | [((1373, 1384), 'time.time', 'time.time', ([], {}), '()\n', (1382, 1384), False, 'import time\n'), ((10905, 10962), 'scipy.io.savemat', 'savemat', (["(DIR_out + '/summaries_matlab.mat')", 'matlab_output'], {}), "(DIR_out + '/summaries_matlab.mat', matlab_output)\n", (10912, 10962), False, 'from scipy.io import savemat\n'), ((10967, 11038), 'scipy.io.savemat', 'savemat', (["(DIR_out + '/summaries_matlab_carry_fwd.mat')", 'matlab_output_fwd'], {}), "(DIR_out + '/summaries_matlab_carry_fwd.mat', matlab_output_fwd)\n", (10974, 11038), False, 'from scipy.io import savemat\n'), ((11049, 11060), 'time.time', 'time.time', ([], {}), '()\n', (11058, 11060), False, 'import time\n'), ((689, 712), 'os.path.exists', 'os.path.exists', (['DIR_out'], {}), '(DIR_out)\n', (703, 712), False, 'import os\n'), ((722, 742), 'os.makedirs', 'os.makedirs', (['DIR_out'], {}), '(DIR_out)\n', (733, 742), False, 'import os\n'), ((570, 580), 'numpy.flip', 'np.flip', (['y'], {}), '(y)\n', (577, 580), True, 'import numpy as np\n'), ((3572, 3638), 'python.utils.get_timeseries_summary_per_day', 'get_timeseries_summary_per_day', (['series', 'elem', 'extra'], {'summary': '"""IQR"""'}), "(series, elem, extra, summary='IQR')\n", (3602, 3638), False, 'from python.utils import get_timeseries_summary_per_day, get_timestamp_gaps_dates\n'), ((3909, 3962), 'pickle.dump', 'pickle.dump', (['coins_series', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(coins_series, f, pickle.HIGHEST_PROTOCOL)\n', (3920, 3962), False, 'import pickle\n'), ((3975, 4026), 'pickle.dump', 'pickle.dump', (['coins_meta', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(coins_meta, f, pickle.HIGHEST_PROTOCOL)\n', (3986, 4026), False, 'import pickle\n'), ((4039, 4084), 'pickle.dump', 'pickle.dump', (['idxs', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(idxs, f, pickle.HIGHEST_PROTOCOL)\n', (4050, 4084), False, 'import pickle\n'), ((7147, 7203), 'pickle.dump', 'pickle.dump', (['total_summaries', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(total_summaries, f, pickle.HIGHEST_PROTOCOL)\n', (7158, 7203), False, 'import pickle\n'), ((7220, 7271), 'pickle.dump', 'pickle.dump', (['total_meta', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(total_meta, f, pickle.HIGHEST_PROTOCOL)\n', (7231, 7271), False, 'import pickle\n'), ((7288, 7333), 'pickle.dump', 'pickle.dump', (['idxs', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(idxs, f, pickle.HIGHEST_PROTOCOL)\n', (7299, 7333), False, 'import pickle\n'), ((9608, 9658), 'os.path.isfile', 'os.path.isfile', (["(DIR_out + '/project_decays.pickle')"], {}), "(DIR_out + '/project_decays.pickle')\n", (9622, 9658), False, 'import os\n'), ((10279, 10339), 'pickle.dump', 'pickle.dump', (['total_summaries_fwd', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(total_summaries_fwd, f, pickle.HIGHEST_PROTOCOL)\n', (10290, 10339), False, 'import pickle\n'), ((10356, 10411), 'pickle.dump', 'pickle.dump', (['total_meta_fwd', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(total_meta_fwd, f, pickle.HIGHEST_PROTOCOL)\n', (10367, 10411), False, 'import pickle\n'), ((10428, 10474), 'pickle.dump', 'pickle.dump', (['idxss', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(idxss, f, pickle.HIGHEST_PROTOCOL)\n', (10439, 10474), False, 'import pickle\n'), ((9746, 9805), 'pickle.dump', 'pickle.dump', (['btc_weights_extend', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(btc_weights_extend, f, pickle.HIGHEST_PROTOCOL)\n', (9757, 9805), False, 'import pickle\n'), ((9822, 9881), 'pickle.dump', 'pickle.dump', (['eth_weights_extend', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(eth_weights_extend, f, pickle.HIGHEST_PROTOCOL)\n', (9833, 9881), False, 'import pickle\n'), ((9898, 9957), 'pickle.dump', 'pickle.dump', (['ltc_weights_extend', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(ltc_weights_extend, f, pickle.HIGHEST_PROTOCOL)\n', (9909, 9957), False, 'import pickle\n'), ((9974, 10033), 'pickle.dump', 'pickle.dump', (['xrp_weights_extend', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(xrp_weights_extend, f, pickle.HIGHEST_PROTOCOL)\n', (9985, 10033), False, 'import pickle\n'), ((10050, 10109), 'pickle.dump', 'pickle.dump', (['trx_weights_extend', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(trx_weights_extend, f, pickle.HIGHEST_PROTOCOL)\n', (10061, 10109), False, 'import pickle\n'), ((10126, 10172), 'pickle.dump', 'pickle.dump', (['idxss', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(idxss, f, pickle.HIGHEST_PROTOCOL)\n', (10137, 10172), False, 'import pickle\n'), ((11098, 11133), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(t1 - t0)'}), '(seconds=t1 - t0)\n', (11116, 11133), False, 'import datetime\n'), ((1893, 1907), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1904, 1907), False, 'import pickle\n'), ((1943, 1957), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1954, 1957), False, 'import pickle\n'), ((2382, 2396), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2393, 2396), False, 'import pickle\n'), ((8887, 8909), 'numpy.timedelta64', 'np.timedelta64', (['j', '"""D"""'], {}), "(j, 'D')\n", (8901, 8909), True, 'import numpy as np\n'), ((2961, 2975), 'pickle.load', 'pickle.load', (['g'], {}), '(g)\n', (2972, 2975), False, 'import pickle\n'), ((3015, 3029), 'pickle.load', 'pickle.load', (['g'], {}), '(g)\n', (3026, 3029), False, 'import pickle\n'), ((9078, 9100), 'numpy.timedelta64', 'np.timedelta64', (['j', '"""D"""'], {}), "(j, 'D')\n", (9092, 9100), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from math import log, exp
from random import uniform
class UtilityModel:
def __init__(self,name,churn_rate,behavior_model):
'''
This class calculates the churn probability for a customer based on their event counts. Its called a "Utility
Model" to mean utility in the economic sense: How much a good or service to satisfies one or more needs or
wants of a consumer. So how likely the customer is to churn depends on how much utility they get, which is
based on how many events they have from the behavior model.
The parameters of the utility model are loaded from a file. The current implementation loads a file with the
same form as the behavior model: a vector and a matrix. The number of these must match the number of
behaviors in the behavior model. The vector is for calculating multiplicative utility: Each element is multiplied
by the number of events to get the model utility from those behaviors. The matrix is unused as of this time,
but the idea was that a second term could be added with the matrix defining utility interactions between the
behaviors. The utility is calculated in the function `utility_function`
The churn probability is a sigmoidal function based on utility - see the function `simulate_churn`.
The model also takes a churn rate as a parameter. The model is calibrated by configuring the slope term of the
churn probability sigma, which is the class variable `kappa`, so that if a customer has the average behaviors
(taken from the behavior model means) they will have the target churn rate. This doesn't guarantee that the
simulation will have the average churn rate, but it seems to work well enough.
:param name:
:param churn_rate: Target churn rate for calibration
:param behavior_model: The behavior model that this utility function works withy
'''
self.name=name
self.churn_rate = churn_rate
data=pd.read_csv('../conf/'+name+'_utility.csv')
data.set_index(['behavior'],inplace=True)
self.linear_utility=data['util']
self.behave_names=data.index.values
assert len(self.behave_names)==len(behavior_model.behave_names)
assert all(self.behave_names == behavior_model.behave_names)
self.utility_interactions=data[self.behave_names]
# pick the constant so the mean behavior has the target churn rate
expected_utility=self.utility_function(behavior_model.behave_means)
r=1.0-self.churn_rate
self.kappa=-log(1.0/r-1.0)/expected_utility
def utility_function(self,behavior):
'''
Given a vector of behavior counts, calculate the model for customer utility. Right now its just a dot
product and doesn't use the matrix. That can be added in the future to make more complex simulations.
:param behavior:
:return:
'''
# ToDo: add interaction term
utility= np.dot(behavior,self.linear_utility)
return utility
def simulate_churn(self,event_counts):
'''
Simulates one customer churn, given a set of event counts. The retention probability is a sigmoidal function
in the utility, and the churn probability is 100% minus retention. The return value is a binary indicating
churn or no churn, by comparing a uniform random variable on [0,1] to the churn probability.
:param event_counts:
:return:
'''
u=self.utility_function(event_counts)
churn_prob=1.0-1.0/(1.0+exp(-self.kappa*u))
return uniform(0, 1) < churn_prob
| [
"random.uniform",
"pandas.read_csv",
"math.log",
"numpy.dot",
"math.exp"
] | [((2072, 2119), 'pandas.read_csv', 'pd.read_csv', (["('../conf/' + name + '_utility.csv')"], {}), "('../conf/' + name + '_utility.csv')\n", (2083, 2119), True, 'import pandas as pd\n'), ((3068, 3105), 'numpy.dot', 'np.dot', (['behavior', 'self.linear_utility'], {}), '(behavior, self.linear_utility)\n', (3074, 3105), True, 'import numpy as np\n'), ((3689, 3702), 'random.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3696, 3702), False, 'from random import uniform\n'), ((2652, 2670), 'math.log', 'log', (['(1.0 / r - 1.0)'], {}), '(1.0 / r - 1.0)\n', (2655, 2670), False, 'from math import log, exp\n'), ((3654, 3674), 'math.exp', 'exp', (['(-self.kappa * u)'], {}), '(-self.kappa * u)\n', (3657, 3674), False, 'from math import log, exp\n')] |
import matplotlib.pyplot as plt
import torch
from torchvision import datasets, transforms, models
from collections import OrderedDict
from torch import nn
from torch import optim
import torch.nn.functional as F
import time
from workspace_utils import active_session
import numpy as np
from PIL import Image
from torch.autograd import Variable
import argparse
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
model = checkpoint['model']
classifier=checkpoint['classifier']
model.classifier = classifier
criterion=checkpoint['criterion']
model.load_state_dict(checkpoint['state_dict'])
optimizer=checkpoint['optimizer']
class_to_idx=checkpoint['class_to_idx']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
return model,optimizer,criterion,class_to_idx,device
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
II = Image.open(image)
II.load()
if II.size[0] > II.size[1]:
II.thumbnail((100000, 256))
else:
II.thumbnail((256, 100000))
left = (II.size[0] - 224) / 2
lower = (II.size[1] - 224) / 2
right = (II.size[0] + 224) / 2
upper = (II.size[1] + 224) / 2
cropped_img = II.crop((left, lower, right,
upper))
np_img = np.array(cropped_img) / 255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
Std_image = (np_img - mean) / std
trans_img = Std_image.transpose((2, 0, 1))
return trans_img
def imshow(image, ax=None, title=None):
if ax is None:
fig, ax = plt.subplots()
if title:
plt.title(title)
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def predict(image_path, model, device, topk, cat_to_name):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
model.to(device)
model.eval()
with torch.no_grad():
image = process_image(image_path)
image = torch.from_numpy(image)
image = image.unsqueeze(0)
image = image.type(torch.FloatTensor)
image = image.to(device)
output = model.forward(image)
ps = torch.exp(output)
top_p, top_c = torch.topk(ps, topk)
tp = []
for p in top_p[0]:
tp.append(float(p))
tc = []
for c in top_c[0]:
tc.append(float(c))
cti = dict(model.class_to_idx.items())
ind = []
for i in tc:
ind.append(list(cti.keys())[list(cti.values()).index(i)])
flower_names = []
for i in ind:
flower_names.append(cat_to_name[i])
return tp, ind, flower_names | [
"numpy.clip",
"PIL.Image.open",
"torch.load",
"torch.topk",
"torch.from_numpy",
"torch.exp",
"numpy.array",
"torch.no_grad",
"torch.cuda.is_available",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots"
] | [((408, 428), 'torch.load', 'torch.load', (['filepath'], {}), '(filepath)\n', (418, 428), False, 'import torch\n'), ((1067, 1084), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (1077, 1084), False, 'from PIL import Image\n'), ((1489, 1520), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1497, 1520), True, 'import numpy as np\n'), ((1531, 1562), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1539, 1562), True, 'import numpy as np\n'), ((2000, 2031), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (2008, 2031), True, 'import numpy as np\n'), ((2042, 2073), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (2050, 2073), True, 'import numpy as np\n'), ((2204, 2224), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (2211, 2224), True, 'import numpy as np\n'), ((1450, 1471), 'numpy.array', 'np.array', (['cropped_img'], {}), '(cropped_img)\n', (1458, 1471), True, 'import numpy as np\n'), ((1748, 1762), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1760, 1762), True, 'import matplotlib.pyplot as plt\n'), ((1785, 1801), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1794, 1801), True, 'import matplotlib.pyplot as plt\n'), ((2539, 2554), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2552, 2554), False, 'import torch\n'), ((2614, 2637), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (2630, 2637), False, 'import torch\n'), ((2804, 2821), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (2813, 2821), False, 'import torch\n'), ((2845, 2865), 'torch.topk', 'torch.topk', (['ps', 'topk'], {}), '(ps, topk)\n', (2855, 2865), False, 'import torch\n'), ((743, 768), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (766, 768), False, 'import torch\n')] |
####################################################### README #########################################################
# This file consists of function that convolves an image with a receptive field so that input to the network is
# close to the form perceived by our eyes.
########################################################################################################################
import tensorflow as tf
import numpy as np
def rf_tf(inp):
sca1 = 0.625
sca2 = 0.125
sca3 = -0.125
sca4 = -0.5
# Receptive field kernel
w = [[ sca4, sca3, sca2, sca3, sca4],
[ sca3, sca2, sca1, sca2, sca3],
[sca2, sca1, 1, sca1, sca2],
[ sca3, sca2, sca1, sca2, sca3],
[ sca4, sca3, sca2, sca3, sca4]]
filter = tf.convert_to_tensor(w, dtype=tf.float32)
filter = tf.expand_dims(filter, -1)
filter = tf.expand_dims(filter, -1)
pot = tf.nn.conv2d(inp, filter, strides=[1, 1, 1, 1], padding='SAME')
return pot
def rf_np(inp):
sca1 = 0.625
sca2 = 0.125
sca3 = -0.125
sca4 = -.5
# Receptive field kernel
w = [[ sca4, sca3, sca2, sca3, sca4],
[ sca3, sca2, sca1, sca2, sca3],
[ sca2, sca1, 1, sca1, sca2],
[ sca3, sca2, sca1, sca2, sca3],
[ sca4, sca3, sca2, sca3, sca4]]
pot = np.zeros([inp.shape[0], inp.shape[1]])
ran = [-2, -1, 0, 1, 2]
ox = 2
oy = 2
# Convolution
for i in range(inp.shape[0]):
for j in range(inp.shape[1]):
summ = 0
for m in ran:
for n in ran:
if (i + m) >= 0 and (i + m) <= inp.shape[0] - 1 and (j + n) >= 0 and (j + n) <= inp.shape[0] - 1:
summ = summ + w[ox + m][oy + n] * inp[i + m][j + n] / 255
pot[i][j] = summ
return pot
| [
"tensorflow.convert_to_tensor",
"tensorflow.expand_dims",
"numpy.zeros",
"tensorflow.nn.conv2d"
] | [((778, 819), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['w'], {'dtype': 'tf.float32'}), '(w, dtype=tf.float32)\n', (798, 819), True, 'import tensorflow as tf\n'), ((833, 859), 'tensorflow.expand_dims', 'tf.expand_dims', (['filter', '(-1)'], {}), '(filter, -1)\n', (847, 859), True, 'import tensorflow as tf\n'), ((873, 899), 'tensorflow.expand_dims', 'tf.expand_dims', (['filter', '(-1)'], {}), '(filter, -1)\n', (887, 899), True, 'import tensorflow as tf\n'), ((911, 974), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inp', 'filter'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(inp, filter, strides=[1, 1, 1, 1], padding='SAME')\n", (923, 974), True, 'import tensorflow as tf\n'), ((1328, 1366), 'numpy.zeros', 'np.zeros', (['[inp.shape[0], inp.shape[1]]'], {}), '([inp.shape[0], inp.shape[1]])\n', (1336, 1366), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Union
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import os
from copy import deepcopy
def suppress_axes_lines(ax):
"""
:param ax: pyplot axes object
"""
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
return
def plot_batch_prediction(batch: dict, results_dict: dict, cf, outfile: Union[str, None]=None,
suptitle: Union[str, None]=None):
"""
plot the input images, ground truth annotations, and output predictions of a batch. If 3D batch, plots a 2D projection
of one randomly sampled element (patient) in the batch. Since plotting all slices of patient volume blows up costs of
time and space, only a section containing a randomly sampled ground truth annotation is plotted.
:param batch: dict with keys: 'data' (input image), 'seg' (pixelwise annotations), 'pid'
:param results_dict: list over batch element. Each element is a list of boxes (prediction and ground truth),
where every box is a dictionary containing box_coords, box_score and box_type.
"""
if outfile is None:
outfile = os.path.join(cf.plot_dir, 'pred_example_{}.png'.format(cf.fold))
data = batch['data']
segs = batch['seg']
pids = batch['pid']
# for 3D, repeat pid over batch elements.
if len(set(pids)) == 1:
pids = [pids] * data.shape[0]
seg_preds = results_dict['seg_preds']
roi_results = deepcopy(results_dict['boxes'])
# Randomly sampled one patient of batch and project data into 2D slices for plotting.
if cf.dim == 3:
patient_ix = np.random.choice(data.shape[0])
data = np.transpose(data[patient_ix], axes=(3, 0, 1, 2))
# select interesting foreground section to plot.
gt_boxes = [box['box_coords'] for box in roi_results[patient_ix] if box['box_type'] == 'gt']
if len(gt_boxes) > 0:
z_cuts = [np.max((int(gt_boxes[0][4]) - 5, 0)), np.min((int(gt_boxes[0][5]) + 5, data.shape[0]))]
else:
z_cuts = [data.shape[0]//2 - 5, int(data.shape[0]//2 + np.min([10, data.shape[0]//2]))]
p_roi_results = roi_results[patient_ix]
roi_results = [[] for _ in range(data.shape[0])]
# iterate over cubes and spread across slices.
for box in p_roi_results:
b = box['box_coords']
# dismiss negative anchor slices.
slices = np.round(np.unique(np.clip(np.arange(b[4], b[5] + 1), 0, data.shape[0]-1)))
for s in slices:
roi_results[int(s)].append(box)
roi_results[int(s)][-1]['box_coords'] = b[:4]
roi_results = roi_results[z_cuts[0]: z_cuts[1]]
data = data[z_cuts[0]: z_cuts[1]]
segs = np.transpose(segs[patient_ix], axes=(3, 0, 1, 2))[z_cuts[0]: z_cuts[1]]
seg_preds = np.transpose(seg_preds[patient_ix], axes=(3, 0, 1, 2))[z_cuts[0]: z_cuts[1]]
pids = [pids[patient_ix]] * data.shape[0]
try:
# all dimensions except for the 'channel-dimension' are required to match
for i in [0, 2, 3]:
assert data.shape[i] == segs.shape[i] == seg_preds.shape[i]
except:
raise Warning('Shapes of arrays to plot not in agreement!'
'Shapes {} vs. {} vs {}'.format(data.shape, segs.shape, seg_preds.shape))
show_arrays = np.concatenate([data, segs, seg_preds, data[:, 0][:, None]], axis=1).astype(float)
approx_figshape = (4 * show_arrays.shape[0], 4 * show_arrays.shape[1])
fig = plt.figure(figsize=approx_figshape)
gs = gridspec.GridSpec(show_arrays.shape[1] + 1, show_arrays.shape[0])
gs.update(wspace=0.1, hspace=0.1)
for b in range(show_arrays.shape[0]):
for m in range(show_arrays.shape[1]):
ax = plt.subplot(gs[m, b])
suppress_axes_lines(ax)
if m < show_arrays.shape[1]:
arr = show_arrays[b, m]
if m < data.shape[1] or m == show_arrays.shape[1] - 1:
if b == 0:
ax.set_ylabel("Input" + (" + GT & Pred Box" if m == show_arrays.shape[1] - 1 else ""))
cmap = 'gray'
vmin = None
vmax = None
else:
cmap = None
vmin = 0
vmax = cf.num_seg_classes - 1
if m == 0:
plt.title('{}'.format(pids[b][:10]), fontsize=20)
plt.imshow(arr, cmap=cmap, vmin=vmin, vmax=vmax)
if m >= (data.shape[1]):
if b == 0:
if m == data.shape[1]:
ax.set_ylabel("GT Box & Seg")
if m == data.shape[1]+1:
ax.set_ylabel("GT Box + Pred Seg & Box")
for box in roi_results[b]:
if box['box_type'] != 'patient_tn_box': # don't plot true negative dummy boxes.
coords = box['box_coords']
if box['box_type'] == 'det':
# dont plot background preds or low confidence boxes.
if box['box_pred_class_id'] > 0 and box['box_score'] > 0.1:
plot_text = True
score = np.max(box['box_score'])
score_text = '{}|{:.0f}'.format(box['box_pred_class_id'], score*100)
# if prob detection: plot only boxes from correct sampling instance.
if 'sample_id' in box.keys() and int(box['sample_id']) != m - data.shape[1] - 2:
continue
# if prob detection: plot reconstructed boxes only in corresponding line.
if not 'sample_id' in box.keys() and (m != data.shape[1] + 1) and (m != show_arrays.shape[1] - 1):
continue
score_font_size = 7
text_color = 'w'
text_x = coords[1] + 5*(box['box_pred_class_id'] -1) #avoid overlap of scores in plot.
text_y = coords[2] + 5
else:
continue
elif box['box_type'] == 'gt':
plot_text = True
score_text = int(box['box_label'])
score_font_size = 7
text_color = 'r'
text_x = coords[1]
text_y = coords[0] - 1
else:
plot_text = False
color_var = 'extra_usage' if 'extra_usage' in list(box.keys()) else 'box_type'
color = cf.box_color_palette[box[color_var]]
plt.plot([coords[1], coords[3]], [coords[0], coords[0]], color=color, linewidth=1, alpha=1) # up
plt.plot([coords[1], coords[3]], [coords[2], coords[2]], color=color, linewidth=1, alpha=1) # down
plt.plot([coords[1], coords[1]], [coords[0], coords[2]], color=color, linewidth=1, alpha=1) # left
plt.plot([coords[3], coords[3]], [coords[0], coords[2]], color=color, linewidth=1, alpha=1) # right
if plot_text:
plt.text(text_x, text_y, score_text, fontsize=score_font_size, color=text_color)
if suptitle is not None:
plt.suptitle(suptitle, fontsize=22)
try:
plt.savefig(outfile)
except:
raise Warning('failed to save plot.')
plt.close(fig)
class TrainingPlot_2Panel():
# todo remove since replaced by tensorboard?
def __init__(self, cf):
self.file_name = cf.plot_dir + '/monitor_{}'.format(cf.fold)
self.exp_name = cf.fold_dir
self.do_validation = cf.do_validation
self.separate_values_dict = cf.assign_values_to_extra_figure
self.figure_list = []
for n in range(cf.n_monitoring_figures):
self.figure_list.append(plt.figure(figsize=(10, 6)))
self.figure_list[-1].ax1 = plt.subplot(111)
self.figure_list[-1].ax1.set_xlabel('epochs')
self.figure_list[-1].ax1.set_ylabel('loss / metrics')
self.figure_list[-1].ax1.set_xlim(0, cf.num_epochs)
self.figure_list[-1].ax1.grid()
self.figure_list[0].ax1.set_ylim(0, 1.5)
self.color_palette = ['b', 'c', 'r', 'purple', 'm', 'y', 'k', 'tab:gray']
def update_and_save(self, metrics, epoch):
for figure_ix in range(len(self.figure_list)):
fig = self.figure_list[figure_ix]
detection_monitoring_plot(fig.ax1, metrics, self.exp_name, self.color_palette, epoch, figure_ix,
self.separate_values_dict,
self.do_validation)
fig.savefig(self.file_name + '_{}'.format(figure_ix))
def detection_monitoring_plot(ax1, metrics, exp_name, color_palette, epoch, figure_ix, separate_values_dict, do_validation):
# todo remove since replaced by tensorboard?
monitor_values_keys = metrics['train']['monitor_values'][1][0].keys()
separate_values = [v for fig_ix in separate_values_dict.values() for v in fig_ix]
if figure_ix == 0:
plot_keys = [ii for ii in monitor_values_keys if ii not in separate_values]
plot_keys += [k for k in metrics['train'].keys() if k != 'monitor_values']
else:
plot_keys = separate_values_dict[figure_ix]
x = np.arange(1, epoch + 1)
for kix, pk in enumerate(plot_keys):
if pk in metrics['train'].keys():
y_train = metrics['train'][pk][1:]
if do_validation:
y_val = metrics['val'][pk][1:]
else:
y_train = [np.mean([er[pk] for er in metrics['train']['monitor_values'][e]]) for e in x]
if do_validation:
y_val = [np.mean([er[pk] for er in metrics['val']['monitor_values'][e]]) for e in x]
ax1.plot(x, y_train, label='train_{}'.format(pk), linestyle='--', color=color_palette[kix])
if do_validation:
ax1.plot(x, y_val, label='val_{}'.format(pk), linestyle='-', color=color_palette[kix])
if epoch == 1:
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax1.set_title(exp_name)
def plot_prediction_hist(label_list: list, pred_list: list, type_list: list, outfile: str):
"""
plot histogram of predictions for a specific class.
:param label_list: list of 1s and 0s specifying whether prediction is a true positive match (1) or a false positive (0).
False negatives (missed ground truth objects) are artificially added predictions with score 0 and label 1.
:param pred_list: list of prediction-scores.
:param type_list: list of prediction-types for stastic-info in title.
"""
preds = np.array(pred_list)
labels = np.array(label_list)
title = outfile.split('/')[-1] + ' count:{}'.format(len(label_list))
plt.figure()
plt.yscale('log')
if 0 in labels:
plt.hist(preds[labels == 0], alpha=0.3, color='g', range=(0, 1), bins=50, label='false pos.')
if 1 in labels:
plt.hist(preds[labels == 1], alpha=0.3, color='b', range=(0, 1), bins=50, label='true pos. (false neg. @ score=0)')
if type_list is not None:
fp_count = type_list.count('det_fp')
fn_count = type_list.count('det_fn')
tp_count = type_list.count('det_tp')
pos_count = fn_count + tp_count
title += ' tp:{} fp:{} fn:{} pos:{}'. format(tp_count, fp_count, fn_count, pos_count)
plt.legend()
plt.title(title)
plt.xlabel('confidence score')
plt.ylabel('log n')
plt.savefig(outfile)
plt.close()
def plot_stat_curves(stats: list, outfile: str):
for c in ['roc', 'prc']:
plt.figure()
for s in stats:
if not (isinstance(s[c], float) and np.isnan(s[c])):
plt.plot(s[c][0], s[c][1], label=s['name'] + '_' + c)
plt.title(outfile.split('/')[-1] + '_' + c)
plt.legend(loc=3 if c == 'prc' else 4)
plt.xlabel('precision' if c == 'prc' else '1-spec.')
plt.ylabel('recall')
plt.savefig(outfile + '_' + c)
plt.close()
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.array",
"copy.deepcopy",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.gridspec.GridSpec",
"numpy.concatenate",
... | [((785, 806), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (799, 806), False, 'import matplotlib\n'), ((2429, 2460), 'copy.deepcopy', 'deepcopy', (["results_dict['boxes']"], {}), "(results_dict['boxes'])\n", (2437, 2460), False, 'from copy import deepcopy\n'), ((4502, 4537), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'approx_figshape'}), '(figsize=approx_figshape)\n', (4512, 4537), True, 'import matplotlib.pyplot as plt\n'), ((4547, 4612), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(show_arrays.shape[1] + 1)', 'show_arrays.shape[0]'], {}), '(show_arrays.shape[1] + 1, show_arrays.shape[0])\n', (4564, 4612), True, 'import matplotlib.gridspec as gridspec\n'), ((8668, 8682), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8677, 8682), True, 'import matplotlib.pyplot as plt\n'), ((10624, 10647), 'numpy.arange', 'np.arange', (['(1)', '(epoch + 1)'], {}), '(1, epoch + 1)\n', (10633, 10647), True, 'import numpy as np\n'), ((12085, 12104), 'numpy.array', 'np.array', (['pred_list'], {}), '(pred_list)\n', (12093, 12104), True, 'import numpy as np\n'), ((12118, 12138), 'numpy.array', 'np.array', (['label_list'], {}), '(label_list)\n', (12126, 12138), True, 'import numpy as np\n'), ((12216, 12228), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12226, 12228), True, 'import matplotlib.pyplot as plt\n'), ((12233, 12250), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (12243, 12250), True, 'import matplotlib.pyplot as plt\n'), ((12822, 12834), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12832, 12834), True, 'import matplotlib.pyplot as plt\n'), ((12839, 12855), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (12848, 12855), True, 'import matplotlib.pyplot as plt\n'), ((12860, 12890), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""confidence score"""'], {}), "('confidence score')\n", (12870, 12890), True, 'import matplotlib.pyplot as plt\n'), ((12895, 12914), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log n"""'], {}), "('log n')\n", (12905, 12914), True, 'import matplotlib.pyplot as plt\n'), ((12919, 12939), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {}), '(outfile)\n', (12930, 12939), True, 'import matplotlib.pyplot as plt\n'), ((12944, 12955), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12953, 12955), True, 'import matplotlib.pyplot as plt\n'), ((2593, 2624), 'numpy.random.choice', 'np.random.choice', (['data.shape[0]'], {}), '(data.shape[0])\n', (2609, 2624), True, 'import numpy as np\n'), ((2640, 2689), 'numpy.transpose', 'np.transpose', (['data[patient_ix]'], {'axes': '(3, 0, 1, 2)'}), '(data[patient_ix], axes=(3, 0, 1, 2))\n', (2652, 2689), True, 'import numpy as np\n'), ((8531, 8566), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['suptitle'], {'fontsize': '(22)'}), '(suptitle, fontsize=22)\n', (8543, 8566), True, 'import matplotlib.pyplot as plt\n'), ((8585, 8605), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {}), '(outfile)\n', (8596, 8605), True, 'import matplotlib.pyplot as plt\n'), ((12279, 12376), 'matplotlib.pyplot.hist', 'plt.hist', (['preds[labels == 0]'], {'alpha': '(0.3)', 'color': '"""g"""', 'range': '(0, 1)', 'bins': '(50)', 'label': '"""false pos."""'}), "(preds[labels == 0], alpha=0.3, color='g', range=(0, 1), bins=50,\n label='false pos.')\n", (12287, 12376), True, 'import matplotlib.pyplot as plt\n'), ((12401, 12520), 'matplotlib.pyplot.hist', 'plt.hist', (['preds[labels == 1]'], {'alpha': '(0.3)', 'color': '"""b"""', 'range': '(0, 1)', 'bins': '(50)', 'label': '"""true pos. (false neg. @ score=0)"""'}), "(preds[labels == 1], alpha=0.3, color='b', range=(0, 1), bins=50,\n label='true pos. (false neg. @ score=0)')\n", (12409, 12520), True, 'import matplotlib.pyplot as plt\n'), ((13045, 13057), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13055, 13057), True, 'import matplotlib.pyplot as plt\n'), ((13277, 13315), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': "(3 if c == 'prc' else 4)"}), "(loc=3 if c == 'prc' else 4)\n", (13287, 13315), True, 'import matplotlib.pyplot as plt\n'), ((13324, 13376), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('precision' if c == 'prc' else '1-spec.')"], {}), "('precision' if c == 'prc' else '1-spec.')\n", (13334, 13376), True, 'import matplotlib.pyplot as plt\n'), ((13385, 13405), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recall"""'], {}), "('recall')\n", (13395, 13405), True, 'import matplotlib.pyplot as plt\n'), ((13414, 13444), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outfile + '_' + c)"], {}), "(outfile + '_' + c)\n", (13425, 13444), True, 'import matplotlib.pyplot as plt\n'), ((13453, 13464), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13462, 13464), True, 'import matplotlib.pyplot as plt\n'), ((3728, 3777), 'numpy.transpose', 'np.transpose', (['segs[patient_ix]'], {'axes': '(3, 0, 1, 2)'}), '(segs[patient_ix], axes=(3, 0, 1, 2))\n', (3740, 3777), True, 'import numpy as np\n'), ((3820, 3874), 'numpy.transpose', 'np.transpose', (['seg_preds[patient_ix]'], {'axes': '(3, 0, 1, 2)'}), '(seg_preds[patient_ix], axes=(3, 0, 1, 2))\n', (3832, 3874), True, 'import numpy as np\n'), ((4334, 4402), 'numpy.concatenate', 'np.concatenate', (['[data, segs, seg_preds, data[:, 0][:, None]]'], {'axis': '(1)'}), '([data, segs, seg_preds, data[:, 0][:, None]], axis=1)\n', (4348, 4402), True, 'import numpy as np\n'), ((4757, 4778), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[m, b]'], {}), '(gs[m, b])\n', (4768, 4778), True, 'import matplotlib.pyplot as plt\n'), ((5404, 5452), 'matplotlib.pyplot.imshow', 'plt.imshow', (['arr'], {'cmap': 'cmap', 'vmin': 'vmin', 'vmax': 'vmax'}), '(arr, cmap=cmap, vmin=vmin, vmax=vmax)\n', (5414, 5452), True, 'import matplotlib.pyplot as plt\n'), ((9197, 9213), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (9208, 9213), True, 'import matplotlib.pyplot as plt\n'), ((9129, 9156), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (9139, 9156), True, 'import matplotlib.pyplot as plt\n'), ((10893, 10958), 'numpy.mean', 'np.mean', (["[er[pk] for er in metrics['train']['monitor_values'][e]]"], {}), "([er[pk] for er in metrics['train']['monitor_values'][e]])\n", (10900, 10958), True, 'import numpy as np\n'), ((13163, 13216), 'matplotlib.pyplot.plot', 'plt.plot', (['s[c][0]', 's[c][1]'], {'label': "(s['name'] + '_' + c)"}), "(s[c][0], s[c][1], label=s['name'] + '_' + c)\n", (13171, 13216), True, 'import matplotlib.pyplot as plt\n'), ((11026, 11089), 'numpy.mean', 'np.mean', (["[er[pk] for er in metrics['val']['monitor_values'][e]]"], {}), "([er[pk] for er in metrics['val']['monitor_values'][e]])\n", (11033, 11089), True, 'import numpy as np\n'), ((13130, 13144), 'numpy.isnan', 'np.isnan', (['s[c]'], {}), '(s[c])\n', (13138, 13144), True, 'import numpy as np\n'), ((3070, 3102), 'numpy.min', 'np.min', (['[10, data.shape[0] // 2]'], {}), '([10, data.shape[0] // 2])\n', (3076, 3102), True, 'import numpy as np\n'), ((3426, 3451), 'numpy.arange', 'np.arange', (['b[4]', '(b[5] + 1)'], {}), '(b[4], b[5] + 1)\n', (3435, 3451), True, 'import numpy as np\n'), ((7879, 7974), 'matplotlib.pyplot.plot', 'plt.plot', (['[coords[1], coords[3]]', '[coords[0], coords[0]]'], {'color': 'color', 'linewidth': '(1)', 'alpha': '(1)'}), '([coords[1], coords[3]], [coords[0], coords[0]], color=color,\n linewidth=1, alpha=1)\n', (7887, 7974), True, 'import matplotlib.pyplot as plt\n'), ((8000, 8095), 'matplotlib.pyplot.plot', 'plt.plot', (['[coords[1], coords[3]]', '[coords[2], coords[2]]'], {'color': 'color', 'linewidth': '(1)', 'alpha': '(1)'}), '([coords[1], coords[3]], [coords[2], coords[2]], color=color,\n linewidth=1, alpha=1)\n', (8008, 8095), True, 'import matplotlib.pyplot as plt\n'), ((8123, 8218), 'matplotlib.pyplot.plot', 'plt.plot', (['[coords[1], coords[1]]', '[coords[0], coords[2]]'], {'color': 'color', 'linewidth': '(1)', 'alpha': '(1)'}), '([coords[1], coords[1]], [coords[0], coords[2]], color=color,\n linewidth=1, alpha=1)\n', (8131, 8218), True, 'import matplotlib.pyplot as plt\n'), ((8246, 8341), 'matplotlib.pyplot.plot', 'plt.plot', (['[coords[3], coords[3]]', '[coords[0], coords[2]]'], {'color': 'color', 'linewidth': '(1)', 'alpha': '(1)'}), '([coords[3], coords[3]], [coords[0], coords[2]], color=color,\n linewidth=1, alpha=1)\n', (8254, 8341), True, 'import matplotlib.pyplot as plt\n'), ((8412, 8497), 'matplotlib.pyplot.text', 'plt.text', (['text_x', 'text_y', 'score_text'], {'fontsize': 'score_font_size', 'color': 'text_color'}), '(text_x, text_y, score_text, fontsize=score_font_size, color=text_color\n )\n', (8420, 8497), True, 'import matplotlib.pyplot as plt\n'), ((6230, 6254), 'numpy.max', 'np.max', (["box['box_score']"], {}), "(box['box_score'])\n", (6236, 6254), True, 'import numpy as np\n')] |
""" module to test interpolate_wrapper.py
"""
from __future__ import division, print_function, absolute_import
# Unit Test
import unittest
import time
from numpy import arange, allclose, ones, NaN, isnan
import numpy as np
# functionality to be tested
from scipy.interpolate.interpolate_wrapper import atleast_1d_and_contiguous, \
linear, logarithmic, block_average_above, block, nearest
class Test(unittest.TestCase):
def assertAllclose(self, x, y, rtol=1.0e-5):
for i, xi in enumerate(x):
self.assertTrue(allclose(xi, y[i], rtol) or (isnan(xi) and isnan(y[i])))
def test_nearest(self):
N = 5
x = arange(N)
y = arange(N)
self.assertAllclose(y, nearest(x, y, x+.1))
self.assertAllclose(y, nearest(x, y, x-.1))
def test_linear(self):
N = 3000.
x = arange(N)
y = arange(N)
new_x = arange(N)+0.5
t1 = time.clock()
new_y = linear(x, y, new_x)
t2 = time.clock()
#print "time for linear interpolation with N = %i:" % N, t2 - t1
self.assertAllclose(new_y[:5], [0.5, 1.5, 2.5, 3.5, 4.5])
def test_block_average_above(self):
N = 3000.
x = arange(N)
y = arange(N)
new_x = arange(N/2)*2
t1 = time.clock()
new_y = block_average_above(x, y, new_x)
t2 = time.clock()
#print "time for block_avg_above interpolation with N = %i:" % N, t2 - t1
self.assertAllclose(new_y[:5], [0.0, 0.5, 2.5, 4.5, 6.5])
def test_linear2(self):
N = 3000.
x = arange(N)
y = ones((100,N)) * arange(N)
new_x = arange(N)+0.5
t1 = time.clock()
new_y = linear(x, y, new_x)
t2 = time.clock()
#print "time for 2D linear interpolation with N = %i:" % N, t2 - t1
self.assertAllclose(new_y[:5,:5],
[[0.5, 1.5, 2.5, 3.5, 4.5],
[0.5, 1.5, 2.5, 3.5, 4.5],
[0.5, 1.5, 2.5, 3.5, 4.5],
[0.5, 1.5, 2.5, 3.5, 4.5],
[0.5, 1.5, 2.5, 3.5, 4.5]])
def test_logarithmic(self):
N = 4000.
x = arange(N)
y = arange(N)
new_x = arange(N)+0.5
t1 = time.clock()
new_y = logarithmic(x, y, new_x)
t2 = time.clock()
#print "time for logarithmic interpolation with N = %i:" % N, t2 - t1
correct_y = [np.NaN, 1.41421356, 2.44948974, 3.46410162, 4.47213595]
self.assertAllclose(new_y[:5], correct_y)
def runTest(self):
test_list = [name for name in dir(self) if name.find('test_') == 0]
for test_name in test_list:
exec("self.%s()" % test_name)
if __name__ == '__main__':
unittest.main()
| [
"scipy.interpolate.interpolate_wrapper.linear",
"numpy.allclose",
"scipy.interpolate.interpolate_wrapper.block_average_above",
"numpy.ones",
"time.clock",
"scipy.interpolate.interpolate_wrapper.nearest",
"scipy.interpolate.interpolate_wrapper.logarithmic",
"numpy.isnan",
"unittest.main",
"numpy.ar... | [((2780, 2795), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2793, 2795), False, 'import unittest\n'), ((656, 665), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (662, 665), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((678, 687), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (684, 687), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((850, 859), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (856, 859), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((872, 881), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (878, 881), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((925, 937), 'time.clock', 'time.clock', ([], {}), '()\n', (935, 937), False, 'import time\n'), ((954, 973), 'scipy.interpolate.interpolate_wrapper.linear', 'linear', (['x', 'y', 'new_x'], {}), '(x, y, new_x)\n', (960, 973), False, 'from scipy.interpolate.interpolate_wrapper import atleast_1d_and_contiguous, linear, logarithmic, block_average_above, block, nearest\n'), ((987, 999), 'time.clock', 'time.clock', ([], {}), '()\n', (997, 999), False, 'import time\n'), ((1211, 1220), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (1217, 1220), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((1233, 1242), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (1239, 1242), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((1287, 1299), 'time.clock', 'time.clock', ([], {}), '()\n', (1297, 1299), False, 'import time\n'), ((1316, 1348), 'scipy.interpolate.interpolate_wrapper.block_average_above', 'block_average_above', (['x', 'y', 'new_x'], {}), '(x, y, new_x)\n', (1335, 1348), False, 'from scipy.interpolate.interpolate_wrapper import atleast_1d_and_contiguous, linear, logarithmic, block_average_above, block, nearest\n'), ((1362, 1374), 'time.clock', 'time.clock', ([], {}), '()\n', (1372, 1374), False, 'import time\n'), ((1582, 1591), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (1588, 1591), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((1673, 1685), 'time.clock', 'time.clock', ([], {}), '()\n', (1683, 1685), False, 'import time\n'), ((1702, 1721), 'scipy.interpolate.interpolate_wrapper.linear', 'linear', (['x', 'y', 'new_x'], {}), '(x, y, new_x)\n', (1708, 1721), False, 'from scipy.interpolate.interpolate_wrapper import atleast_1d_and_contiguous, linear, logarithmic, block_average_above, block, nearest\n'), ((1735, 1747), 'time.clock', 'time.clock', ([], {}), '()\n', (1745, 1747), False, 'import time\n'), ((2210, 2219), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (2216, 2219), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((2232, 2241), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (2238, 2241), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((2285, 2297), 'time.clock', 'time.clock', ([], {}), '()\n', (2295, 2297), False, 'import time\n'), ((2314, 2338), 'scipy.interpolate.interpolate_wrapper.logarithmic', 'logarithmic', (['x', 'y', 'new_x'], {}), '(x, y, new_x)\n', (2325, 2338), False, 'from scipy.interpolate.interpolate_wrapper import atleast_1d_and_contiguous, linear, logarithmic, block_average_above, block, nearest\n'), ((2352, 2364), 'time.clock', 'time.clock', ([], {}), '()\n', (2362, 2364), False, 'import time\n'), ((719, 741), 'scipy.interpolate.interpolate_wrapper.nearest', 'nearest', (['x', 'y', '(x + 0.1)'], {}), '(x, y, x + 0.1)\n', (726, 741), False, 'from scipy.interpolate.interpolate_wrapper import atleast_1d_and_contiguous, linear, logarithmic, block_average_above, block, nearest\n'), ((771, 793), 'scipy.interpolate.interpolate_wrapper.nearest', 'nearest', (['x', 'y', '(x - 0.1)'], {}), '(x, y, x - 0.1)\n', (778, 793), False, 'from scipy.interpolate.interpolate_wrapper import atleast_1d_and_contiguous, linear, logarithmic, block_average_above, block, nearest\n'), ((898, 907), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (904, 907), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((1260, 1273), 'numpy.arange', 'arange', (['(N / 2)'], {}), '(N / 2)\n', (1266, 1273), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((1604, 1618), 'numpy.ones', 'ones', (['(100, N)'], {}), '((100, N))\n', (1608, 1618), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((1620, 1629), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (1626, 1629), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((1646, 1655), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (1652, 1655), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((2258, 2267), 'numpy.arange', 'arange', (['N'], {}), '(N)\n', (2264, 2267), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((544, 568), 'numpy.allclose', 'allclose', (['xi', 'y[i]', 'rtol'], {}), '(xi, y[i], rtol)\n', (552, 568), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((573, 582), 'numpy.isnan', 'isnan', (['xi'], {}), '(xi)\n', (578, 582), False, 'from numpy import arange, allclose, ones, NaN, isnan\n'), ((587, 598), 'numpy.isnan', 'isnan', (['y[i]'], {}), '(y[i])\n', (592, 598), False, 'from numpy import arange, allclose, ones, NaN, isnan\n')] |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convert_to_constants.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python import keras
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import convert_to_constants
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import simple_save
from tensorflow.python.saved_model.load import load
from tensorflow.python.saved_model.save import save
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
class VariablesToConstantsTest(test.TestCase):
def _hasStatefulPartitionedCallOp(self, graph_def):
"""Determines if a StatefulPartitionedCall op exists in the graph."""
for node in graph_def.node:
if node.op == "StatefulPartitionedCall":
return True
return False
def _getNumVariables(self, graph_def):
"""Returns the number of ReadVariableOp in the graph."""
return sum(node.op == "ReadVariableOp" for node in graph_def.node)
def _testConvertedFunction(self, obj, func, converted_concrete_func,
input_data):
# Check that the converted ConcreteFunction produces the same result as the
# original Function.
expected_value = nest.flatten(func(**input_data))
actual_value = nest.flatten(converted_concrete_func(**input_data))
for expected, actual in zip(expected_value, actual_value):
np.testing.assert_almost_equal(expected.numpy(), actual.numpy())
# Ensure the shape is retained.
for tensor in converted_concrete_func.inputs:
actual_shape = input_data[tensor.name.split(":")[0]].shape
self.assertEqual(tensor.shape, actual_shape)
# Save the converted ConcreteFunction as a signature.
save_dir = os.path.join(self.get_temp_dir(), "frozen_saved_model")
root = tracking.AutoTrackable()
root.f = converted_concrete_func
save(root, save_dir, {"mykey": converted_concrete_func})
# Load it back and make sure it works.
loaded_obj = load(save_dir)
actual_value = nest.flatten(loaded_obj.signatures["mykey"](**input_data))
for expected, actual in zip(expected_value, actual_value):
np.testing.assert_almost_equal(expected.numpy(), actual.numpy())
@test_util.run_v2_only
def testConstSavedModel(self):
"""Test a basic model with functions to make sure functions are inlined."""
input_data = {"x": constant_op.constant(1., shape=[1])}
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data["x"])
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save(root, save_dir, to_save)
saved_model = load(save_dir)
input_func = saved_model.signatures["serving_default"]
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(variable_graph_def))
self.assertTrue(variable_graph_def.library.function)
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(constant_graph_def.library.function)
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testVariableModel(self):
"""Test a basic model with Variables."""
input_data = {"x": constant_op.constant(1., shape=[1])}
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
input_func = root.f.get_concrete_function(input_data["x"])
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(2, self._getNumVariables(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testScalarModel(self):
"""Test a basic model with Variables."""
input_data = {"x": constant_op.constant(1., shape=[])}
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
input_func = root.f.get_concrete_function(input_data["x"])
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(2, self._getNumVariables(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testVariableSavedModel(self):
"""Test a basic model with Variables with saving/loading the SavedModel."""
input_data = {"x": constant_op.constant(1., shape=[1])}
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
to_save = root.f.get_concrete_function(input_data["x"])
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save(root, save_dir, to_save)
saved_model = load(save_dir)
input_func = saved_model.signatures["serving_default"]
variable_graph_def = input_func.graph.as_graph_def()
self.assertTrue(self._hasStatefulPartitionedCallOp(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testMultiFunctionModel(self):
"""Test a basic model with Variables."""
class BasicModel(tracking.AutoTrackable):
def __init__(self):
self.y = None
self.z = None
@def_function.function
def add(self, x):
if self.y is None:
self.y = variables.Variable(2.)
return x + self.y
@def_function.function
def sub(self, x):
if self.z is None:
self.z = variables.Variable(3.)
return x - self.z
input_data = {"x": constant_op.constant(1., shape=[1])}
root = BasicModel()
input_func = root.add.get_concrete_function(input_data["x"])
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(1, self._getNumVariables(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.add, output_func, input_data)
@test_util.run_v2_only
def testKerasModel(self):
input_data = constant_op.constant(1., shape=[1, 1])
# Create a simple Keras model.
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
model = keras.models.Sequential(
[keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer="sgd", loss="mean_squared_error")
model.fit(x, y, epochs=1)
# Get the concrete function from the Keras model.
@def_function.function
def to_save(x):
return model(x)
input_func = to_save.get_concrete_function(input_data)
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(2, self._getNumVariables(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
# Check value.
expected_value = to_save(input_data)
actual_value = nest.flatten(output_func(input_data))
self.assertEqual(expected_value.numpy(), actual_value)
def _singleMetaGraphSavedModel(self):
export_graph = ops.Graph()
with export_graph.as_default():
start = array_ops.placeholder(
shape=[1, 1], dtype=dtypes.float32, name="start")
distractor = variables.RefVariable(-1., name="distractor")
v = variables.RefVariable(3., name="v")
local_variable = variables.VariableV1(
1.,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
trainable=False,
use_resource=True)
output = array_ops.identity(start * v * local_variable, name="output")
with session_lib.Session() as session:
session.run([v.initializer, distractor.initializer,
local_variable.initializer])
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"start": start},
outputs={"output": output},
legacy_init_op=local_variable.initializer)
return path
@test_util.run_v2_only
def testRefVariableImport(self):
saved = self._singleMetaGraphSavedModel()
imported = load(saved)
fn = imported.signatures["serving_default"]
output_func = convert_to_constants.convert_variables_to_constants_v2(fn)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
input_data = {"start": constant_op.constant(1., shape=[1, 1])}
root = tracking.AutoTrackable()
self._testConvertedFunction(root, fn, output_func, input_data)
@test_util.run_v2_only
def testControlFlow(self):
input_data = {
"x": constant_op.constant([1., 2.], shape=[1, 2]),
"b": constant_op.constant(True)
}
weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=dtypes.float32)
def true_fn(x):
return math_ops.matmul(x, weights)
def false_fn(x):
return math_ops.add(x, weights)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[1, 2], dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)
])
def model(x, b):
return control_flow_ops.cond(
b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))
root = tracking.AutoTrackable()
root.f = model
input_func = root.f.get_concrete_function()
input_func(**input_data)
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func, lower_control_flow=False)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testStaticRnn(self):
input_data = {
"x":
constant_op.constant(
np.array(np.random.random_sample((3, 10)), dtype=np.float32))
}
cell = rnn_cell_impl.LSTMCell(10)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[3, 10], dtype=dtypes.float32)
])
def model(x):
seq = array_ops.split(x, 3, 0)
return rnn.static_rnn(
cell, seq, dtype=dtypes.float32, sequence_length=[1])
root = tracking.AutoTrackable()
root.f = model
input_func = root.f.get_concrete_function()
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func, lower_control_flow=False)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.eager.def_function.function",
"tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.saved_model.simple_save.simple_save",
"tensorflow.python.ops.rnn.static_rnn",
"tensorflow.python.ops.array_o... | [((13839, 13850), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (13848, 13850), False, 'from tensorflow.python.platform import test\n'), ((3180, 3204), 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), '()\n', (3202, 3204), False, 'from tensorflow.python.training.tracking import tracking\n'), ((3246, 3302), 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', "{'mykey': converted_concrete_func}"], {}), "(root, save_dir, {'mykey': converted_concrete_func})\n", (3250, 3302), False, 'from tensorflow.python.saved_model.save import save\n'), ((3364, 3378), 'tensorflow.python.saved_model.load.load', 'load', (['save_dir'], {}), '(save_dir)\n', (3368, 3378), False, 'from tensorflow.python.saved_model.load import load\n'), ((3801, 3825), 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), '()\n', (3823, 3825), False, 'from tensorflow.python.training.tracking import tracking\n'), ((3839, 3879), 'tensorflow.python.eager.def_function.function', 'def_function.function', (['(lambda x: 2.0 * x)'], {}), '(lambda x: 2.0 * x)\n', (3860, 3879), False, 'from tensorflow.python.eager import def_function\n'), ((4008, 4037), 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save'], {}), '(root, save_dir, to_save)\n', (4012, 4037), False, 'from tensorflow.python.saved_model.save import save\n'), ((4056, 4070), 'tensorflow.python.saved_model.load.load', 'load', (['save_dir'], {}), '(save_dir)\n', (4060, 4070), False, 'from tensorflow.python.saved_model.load import load\n'), ((4331, 4397), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['input_func'], {}), '(input_func)\n', (4385, 4397), False, 'from tensorflow.python.framework import convert_to_constants\n'), ((4835, 4859), 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), '()\n', (4857, 4859), False, 'from tensorflow.python.training.tracking import tracking\n'), ((4874, 4897), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(3.0)'], {}), '(3.0)\n', (4892, 4897), False, 'from tensorflow.python.ops import variables\n'), ((4911, 4934), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(2.0)'], {}), '(2.0)\n', (4929, 4934), False, 'from tensorflow.python.ops import variables\n'), ((4947, 5001), 'tensorflow.python.eager.def_function.function', 'def_function.function', (['(lambda x: root.v1 * root.v2 * x)'], {}), '(lambda x: root.v1 * root.v2 * x)\n', (4968, 5001), False, 'from tensorflow.python.eager import def_function\n'), ((5209, 5275), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['input_func'], {}), '(input_func)\n', (5263, 5275), False, 'from tensorflow.python.framework import convert_to_constants\n'), ((5729, 5753), 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), '()\n', (5751, 5753), False, 'from tensorflow.python.training.tracking import tracking\n'), ((5768, 5791), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(3.0)'], {}), '(3.0)\n', (5786, 5791), False, 'from tensorflow.python.ops import variables\n'), ((5805, 5828), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(2.0)'], {}), '(2.0)\n', (5823, 5828), False, 'from tensorflow.python.ops import variables\n'), ((5841, 5895), 'tensorflow.python.eager.def_function.function', 'def_function.function', (['(lambda x: root.v1 * root.v2 * x)'], {}), '(lambda x: root.v1 * root.v2 * x)\n', (5862, 5895), False, 'from tensorflow.python.eager import def_function\n'), ((6103, 6169), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['input_func'], {}), '(input_func)\n', (6157, 6169), False, 'from tensorflow.python.framework import convert_to_constants\n'), ((6666, 6690), 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), '()\n', (6688, 6690), False, 'from tensorflow.python.training.tracking import tracking\n'), ((6705, 6728), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(3.0)'], {}), '(3.0)\n', (6723, 6728), False, 'from tensorflow.python.ops import variables\n'), ((6742, 6765), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(2.0)'], {}), '(2.0)\n', (6760, 6765), False, 'from tensorflow.python.ops import variables\n'), ((6778, 6832), 'tensorflow.python.eager.def_function.function', 'def_function.function', (['(lambda x: root.v1 * root.v2 * x)'], {}), '(lambda x: root.v1 * root.v2 * x)\n', (6799, 6832), False, 'from tensorflow.python.eager import def_function\n'), ((6962, 6991), 'tensorflow.python.saved_model.save.save', 'save', (['root', 'save_dir', 'to_save'], {}), '(root, save_dir, to_save)\n', (6966, 6991), False, 'from tensorflow.python.saved_model.save import save\n'), ((7010, 7024), 'tensorflow.python.saved_model.load.load', 'load', (['save_dir'], {}), '(save_dir)\n', (7014, 7024), False, 'from tensorflow.python.saved_model.load import load\n'), ((7237, 7303), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['input_func'], {}), '(input_func)\n', (7291, 7303), False, 'from tensorflow.python.framework import convert_to_constants\n'), ((8404, 8470), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['input_func'], {}), '(input_func)\n', (8458, 8470), False, 'from tensorflow.python.framework import convert_to_constants\n'), ((8827, 8866), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[1, 1]'}), '(1.0, shape=[1, 1])\n', (8847, 8866), False, 'from tensorflow.python.framework import constant_op\n'), ((9473, 9539), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['input_func'], {}), '(input_func)\n', (9527, 9539), False, 'from tensorflow.python.framework import convert_to_constants\n'), ((9988, 9999), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (9997, 9999), False, 'from tensorflow.python.framework import ops\n'), ((11068, 11079), 'tensorflow.python.saved_model.load.load', 'load', (['saved'], {}), '(saved)\n', (11072, 11079), False, 'from tensorflow.python.saved_model.load import load\n'), ((11146, 11204), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['fn'], {}), '(fn)\n', (11200, 11204), False, 'from tensorflow.python.framework import convert_to_constants\n'), ((11486, 11510), 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), '()\n', (11508, 11510), False, 'from tensorflow.python.training.tracking import tracking\n'), ((11772, 11838), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['[[0.1, 0.2], [0.3, 0.4]]'], {'dtype': 'dtypes.float32'}), '([[0.1, 0.2], [0.3, 0.4]], dtype=dtypes.float32)\n', (11790, 11838), False, 'from tensorflow.python.ops import variables\n'), ((12282, 12306), 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), '()\n', (12304, 12306), False, 'from tensorflow.python.training.tracking import tracking\n'), ((12422, 12518), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['input_func'], {'lower_control_flow': '(False)'}), '(input_func,\n lower_control_flow=False)\n', (12476, 12518), False, 'from tensorflow.python.framework import convert_to_constants\n'), ((13013, 13039), 'tensorflow.python.ops.rnn_cell_impl.LSTMCell', 'rnn_cell_impl.LSTMCell', (['(10)'], {}), '(10)\n', (13035, 13039), False, 'from tensorflow.python.ops import rnn_cell_impl\n'), ((13321, 13345), 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), '()\n', (13343, 13345), False, 'from tensorflow.python.training.tracking import tracking\n'), ((13432, 13528), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_to_constants.convert_variables_to_constants_v2', (['input_func'], {'lower_control_flow': '(False)'}), '(input_func,\n lower_control_flow=False)\n', (13486, 13528), False, 'from tensorflow.python.framework import convert_to_constants\n'), ((3753, 3789), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[1]'}), '(1.0, shape=[1])\n', (3773, 3789), False, 'from tensorflow.python.framework import constant_op\n'), ((4787, 4823), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[1]'}), '(1.0, shape=[1])\n', (4807, 4823), False, 'from tensorflow.python.framework import constant_op\n'), ((5682, 5717), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[]'}), '(1.0, shape=[])\n', (5702, 5717), False, 'from tensorflow.python.framework import constant_op\n'), ((6618, 6654), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[1]'}), '(1.0, shape=[1])\n', (6638, 6654), False, 'from tensorflow.python.framework import constant_op\n'), ((8134, 8170), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[1]'}), '(1.0, shape=[1])\n', (8154, 8170), False, 'from tensorflow.python.framework import constant_op\n'), ((10050, 10121), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'shape': '[1, 1]', 'dtype': 'dtypes.float32', 'name': '"""start"""'}), "(shape=[1, 1], dtype=dtypes.float32, name='start')\n", (10071, 10121), False, 'from tensorflow.python.ops import array_ops\n'), ((10152, 10198), 'tensorflow.python.ops.variables.RefVariable', 'variables.RefVariable', (['(-1.0)'], {'name': '"""distractor"""'}), "(-1.0, name='distractor')\n", (10173, 10198), False, 'from tensorflow.python.ops import variables\n'), ((10208, 10244), 'tensorflow.python.ops.variables.RefVariable', 'variables.RefVariable', (['(3.0)'], {'name': '"""v"""'}), "(3.0, name='v')\n", (10229, 10244), False, 'from tensorflow.python.ops import variables\n'), ((10267, 10377), 'tensorflow.python.ops.variables.VariableV1', 'variables.VariableV1', (['(1.0)'], {'collections': '[ops.GraphKeys.LOCAL_VARIABLES]', 'trainable': '(False)', 'use_resource': '(True)'}), '(1.0, collections=[ops.GraphKeys.LOCAL_VARIABLES],\n trainable=False, use_resource=True)\n', (10287, 10377), False, 'from tensorflow.python.ops import variables\n'), ((10429, 10490), 'tensorflow.python.ops.array_ops.identity', 'array_ops.identity', (['(start * v * local_variable)'], {'name': '"""output"""'}), "(start * v * local_variable, name='output')\n", (10447, 10490), False, 'from tensorflow.python.ops import array_ops\n'), ((11435, 11474), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'shape': '[1, 1]'}), '(1.0, shape=[1, 1])\n', (11455, 11474), False, 'from tensorflow.python.framework import constant_op\n'), ((11665, 11711), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0, 2.0]'], {'shape': '[1, 2]'}), '([1.0, 2.0], shape=[1, 2])\n', (11685, 11711), False, 'from tensorflow.python.framework import constant_op\n'), ((11724, 11750), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(True)'], {}), '(True)\n', (11744, 11750), False, 'from tensorflow.python.framework import constant_op\n'), ((11873, 11900), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['x', 'weights'], {}), '(x, weights)\n', (11888, 11900), False, 'from tensorflow.python.ops import math_ops\n'), ((11936, 11960), 'tensorflow.python.ops.math_ops.add', 'math_ops.add', (['x', 'weights'], {}), '(x, weights)\n', (11948, 11960), False, 'from tensorflow.python.ops import math_ops\n'), ((13191, 13215), 'tensorflow.python.ops.array_ops.split', 'array_ops.split', (['x', '(3)', '(0)'], {}), '(x, 3, 0)\n', (13206, 13215), False, 'from tensorflow.python.ops import array_ops\n'), ((13229, 13297), 'tensorflow.python.ops.rnn.static_rnn', 'rnn.static_rnn', (['cell', 'seq'], {'dtype': 'dtypes.float32', 'sequence_length': '[1]'}), '(cell, seq, dtype=dtypes.float32, sequence_length=[1])\n', (13243, 13297), False, 'from tensorflow.python.ops import rnn\n'), ((9006, 9050), 'tensorflow.python.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(1)', 'input_shape': '[1]'}), '(units=1, input_shape=[1])\n', (9024, 9050), False, 'from tensorflow.python import keras\n'), ((10502, 10523), 'tensorflow.python.client.session.Session', 'session_lib.Session', ([], {}), '()\n', (10521, 10523), True, 'from tensorflow.python.client import session as session_lib\n'), ((10734, 10873), 'tensorflow.python.saved_model.simple_save.simple_save', 'simple_save.simple_save', (['session', 'path'], {'inputs': "{'start': start}", 'outputs': "{'output': output}", 'legacy_init_op': 'local_variable.initializer'}), "(session, path, inputs={'start': start}, outputs={\n 'output': output}, legacy_init_op=local_variable.initializer)\n", (10757, 10873), False, 'from tensorflow.python.saved_model import simple_save\n'), ((7912, 7935), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(2.0)'], {}), '(2.0)\n', (7930, 7935), False, 'from tensorflow.python.ops import variables\n'), ((8061, 8084), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(3.0)'], {}), '(3.0)\n', (8079, 8084), False, 'from tensorflow.python.ops import variables\n'), ((12015, 12073), 'tensorflow.python.framework.tensor_spec.TensorSpec', 'tensor_spec.TensorSpec', ([], {'shape': '[1, 2]', 'dtype': 'dtypes.float32'}), '(shape=[1, 2], dtype=dtypes.float32)\n', (12037, 12073), False, 'from tensorflow.python.framework import tensor_spec\n'), ((12083, 12134), 'tensorflow.python.framework.tensor_spec.TensorSpec', 'tensor_spec.TensorSpec', ([], {'shape': '()', 'dtype': 'dtypes.bool'}), '(shape=(), dtype=dtypes.bool)\n', (12105, 12134), False, 'from tensorflow.python.framework import tensor_spec\n'), ((12942, 12974), 'numpy.random.random_sample', 'np.random.random_sample', (['(3, 10)'], {}), '((3, 10))\n', (12965, 12974), True, 'import numpy as np\n'), ((13094, 13153), 'tensorflow.python.framework.tensor_spec.TensorSpec', 'tensor_spec.TensorSpec', ([], {'shape': '[3, 10]', 'dtype': 'dtypes.float32'}), '(shape=[3, 10], dtype=dtypes.float32)\n', (13116, 13153), False, 'from tensorflow.python.framework import tensor_spec\n'), ((10714, 10723), 'tensorflow.python.framework.ops.uid', 'ops.uid', ([], {}), '()\n', (10721, 10723), False, 'from tensorflow.python.framework import ops\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 7 15:30:26 2019
@author: nipun
"""
"""
PROBLEM 4, PART B
"""
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import PCA as RandomizedPCA
import matplotlib.pyplot as plt
import numpy as np
faces = fetch_lfw_people(min_faces_per_person=60)
print(faces.target_names)
print(faces.images.shape) ##1348 images of 62 x 47 pixels each
n_samples, h, w = faces.images.shape
print(n_samples)
n_components = 150
pca = RandomizedPCA(n_components=n_components, svd_solver='randomized') ##Randomized PCA for the the first 150 components
x_proj = pca.fit_transform(faces.data)
#Reconstruction
x_inv_proj = pca.inverse_transform(x_proj)
x_proj_img = np.reshape(x_inv_proj,(1348,62,47))
#The first 24 reconstructed images
fig, axes = plt.subplots(3, 8, figsize=(9, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(x_proj_img[i], cmap='bone')
plt.show()
#Original Pictures (The first 24)
fig, axes = plt.subplots(3, 8, figsize=(9, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(faces.images[i], cmap='bone')
plt.show()
| [
"sklearn.datasets.fetch_lfw_people",
"numpy.reshape",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.show"
] | [((297, 338), 'sklearn.datasets.fetch_lfw_people', 'fetch_lfw_people', ([], {'min_faces_per_person': '(60)'}), '(min_faces_per_person=60)\n', (313, 338), False, 'from sklearn.datasets import fetch_lfw_people\n'), ((517, 582), 'sklearn.decomposition.PCA', 'RandomizedPCA', ([], {'n_components': 'n_components', 'svd_solver': '"""randomized"""'}), "(n_components=n_components, svd_solver='randomized')\n", (530, 582), True, 'from sklearn.decomposition import PCA as RandomizedPCA\n'), ((754, 792), 'numpy.reshape', 'np.reshape', (['x_inv_proj', '(1348, 62, 47)'], {}), '(x_inv_proj, (1348, 62, 47))\n', (764, 792), True, 'import numpy as np\n'), ((1093, 1103), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1101, 1103), True, 'import matplotlib.pyplot as plt\n'), ((1410, 1420), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1418, 1420), True, 'import matplotlib.pyplot as plt\n')] |
import sys
import os
import csv
import numpy as np
from torch.nn.utils.rnn import pad_sequence
end_time = 360
def csv_loader(path):
with open(path, 'r') as f:
reader = csv.reader(f)
vec = [row[1:] for row in reader]
vec.pop(0)
vec = np.array(vec).astype(np.float32)
return vec[:end_time]
def csv_loader_criteria_list(path):
with open(path, 'r') as f:
reader = csv.reader(f)
vec = [row[1:] for row in reader]
return np.array(vec[0])
def label_loader(file_list):
label = []
for fl in file_list:
if fl[:fl.find('/')] == 'born':
label.append(0)
elif fl[:fl.find('/')] == 'abort':
label.append(1)
else:
sys.exit()
def pad_collate(batch):
(xx, yy) = zip(*batch)
x_lens = [len(x) for x in xx]
y_lens = [len(y) for y in yy]
xx_pad = pad_sequence(xx, batch_first=True, padding_value=0)
yy_pad = pad_sequence(yy, batch_first=True, padding_value=0)
return xx_pad, yy_pad
| [
"numpy.array",
"csv.reader",
"torch.nn.utils.rnn.pad_sequence",
"sys.exit"
] | [((483, 499), 'numpy.array', 'np.array', (['vec[0]'], {}), '(vec[0])\n', (491, 499), True, 'import numpy as np\n'), ((879, 930), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['xx'], {'batch_first': '(True)', 'padding_value': '(0)'}), '(xx, batch_first=True, padding_value=0)\n', (891, 930), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((944, 995), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['yy'], {'batch_first': '(True)', 'padding_value': '(0)'}), '(yy, batch_first=True, padding_value=0)\n', (956, 995), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((183, 196), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (193, 196), False, 'import csv\n'), ((416, 429), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (426, 429), False, 'import csv\n'), ((272, 285), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (280, 285), True, 'import numpy as np\n'), ((735, 745), 'sys.exit', 'sys.exit', ([], {}), '()\n', (743, 745), False, 'import sys\n')] |
"""
Helper functions
"""
from typing import Tuple
import numpy as np
import pybullet as p
import cv2
from scipy import interpolate
# https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
RGB_COLOR_255 = [(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # lavender
(128, 128, 0), # olive
(255, 215, 180), # apricot
(0, 0, 128), # navy
(128, 128, 128), # grey
(0, 0, 0), # white
(255, 255, 255)] # black
class Boundary(object):
""" A boundary class to sample object in its work space. """
def __init__(self, boundary: [list, tuple, np.ndarray]):
self._boundary, self._area = None, 0
self.set_boundary(boundary)
self._contained_objects = []
self._contained_object_positions = []
def _get_position_within_boundary(self) -> tuple:
x = np.random.uniform(
self._boundary[0][0], self._boundary[0][1])
y = np.random.uniform(
self._boundary[1][0], self._boundary[1][1])
z = np.random.uniform(
self._boundary[2][0], self._boundary[2][1])
return x, y, z
def set_boundary(self, boundary: [list, tuple, np.ndarray]):
assert len(boundary) == 3 # assume the boundary is a cube
if not isinstance(boundary, np.ndarray):
self._boundary = np.array(boundary)
else:
self._boundary = boundary
self._area = float(np.prod(self._boundary[:, 1] - self._boundary[:, 0]))
def get_area(self) -> float:
return self._area
def add(self, obj_id: int,
sample: bool = True,
min_rotation: tuple = (0.0, 0.0, -3.14),
max_rotation: tuple = (0.0, 0.0, 3.14),
min_distance: float = 0.01) -> bool:
""" Returns true if can add and adds it or do not change the position (sample)
assume the object is the Base object
rotation_limits: how mush we allow it to rotate from its original position
"""
if not sample:
# simply add the object into self._contained_objects
success = True
pos, rotation = p.getBasePositionAndOrientation(obj_id)
new_pos = np.array(pos)
else:
# sample the position and rotation within the boundary
# Rotate the bounding box randomly
rotation = np.random.uniform(list(min_rotation), list(max_rotation))
rotation = p.getQuaternionFromEuler(rotation)
success, attempt_num = False, 0
new_pos = None
while not success and attempt_num < 100:
new_pos = np.array(self._get_position_within_boundary())
success = True
for obj in self._contained_object_positions:
if np.linalg.norm(new_pos - obj) < min_distance:
success = False
break
attempt_num += 1
if success:
p.resetBasePositionAndOrientation(obj_id, new_pos, rotation)
self._contained_objects.append(obj_id)
self._contained_object_positions.append(new_pos)
return success
def clear(self) -> None:
self._contained_objects = []
class Trajectory(object):
""" Generate a 2-D (x, y) trajectory using the heuristics """
def __init__(self, workspace_limits: np.ndarray, num_points=2000, seed=1024):
self.workspace_limits = workspace_limits.copy()
self._seed = seed
self.xi, self.yi = None, None
self.pts = None
self._step = 0
self.generate_trajectory(num_points)
def generate_trajectory(self, num_points):
""" Generate a trajectory using sampled waypoints in four blocks and B-spline interpolation. """
st0 = np.random.get_state()
if self.xi is None and self.yi is None:
# using the seed to generate a specific trajectory
np.random.seed(self._seed)
# Divide the work space into four blocks
boundary_x = np.random.uniform(self.workspace_limits[0, :].mean() - 0.1 * self.workspace_limits[0, :].ptp(),
self.workspace_limits[0, :].mean() + 0.1 * self.workspace_limits[0, :].ptp())
boundary_y = np.random.uniform(self.workspace_limits[1, :].mean() - 0.1 * self.workspace_limits[1, :].ptp(),
self.workspace_limits[1, :].mean() + 0.1 * self.workspace_limits[1, :].ptp())
# upper left
pts_ul = np.random.uniform(low=(self.workspace_limits[0, 0], boundary_y),
high=(boundary_x, self.workspace_limits[1, 1]),
size=(3, 2))
pts_ul = pts_ul[pts_ul[:, 1].argsort()[::-1]] # by y, descend
# bottom left
pts_bl = np.random.uniform(low=(self.workspace_limits[0, 0], self.workspace_limits[1, 0]),
high=(boundary_x, boundary_y),
size=(3, 2))
pts_bl = pts_bl[pts_bl[:, 1].argsort()[::-1]] # by y, descend
# bottom right
pts_br = np.random.uniform(low=(boundary_x, self.workspace_limits[1, 0]),
high=(self.workspace_limits[0, 1], boundary_y),
size=(3, 2))
pts_br = pts_br[pts_br[:, 0].argsort()] # by x, ascend
# upper left
pts_ur = np.random.uniform(low=(boundary_x, boundary_y),
high=(self.workspace_limits[0, 1], self.workspace_limits[1, 1]),
size=(3, 2))
pts_ur = pts_ur[pts_ur[:, 0].argsort()[::-1]] # by x, ascend
pts = np.concatenate([pts_ul, pts_bl, pts_br, pts_ur, pts_ul[0].reshape((1, 2))], axis=0)
self.pts = pts
# interpolate the smooth curve using B-spline
tck, u = interpolate.splprep(x=[pts[:, 0], pts[:, 1]], s=0, per=True)
# evaluate the spline fits for 1000 evenly spaced distance values
xi, yi = interpolate.splev(np.linspace(0, 1, num_points), tck)
if self.xi is None and self.yi is None:
# restore the numpy state
np.random.set_state(st0)
self.xi, self.yi = xi, yi
self._step = np.random.randint(len(self.xi))
def step(self) -> list:
""" Return the next (x, y) position. """
self._step = (self._step + 1) % len(self.xi)
return [self.xi[self._step], self.yi[self._step]]
def get_step(self) -> int:
return self._step
def set_step(self, step: int):
self._step = step
def reset(self):
""" Reset the _step to be the origin. """
self._step = 0
def seed(self, seed: int):
self._seed = seed
def get_centroid(mask: np.ndarray, target: int) -> Tuple[bool, np.ndarray]:
"""
Get the centroid of the target
:return: True with normalized centroids [-1, 1] if target in the mask picture else False
"""
target_mask = (mask == target).astype(np.float)
img_shape = mask.shape[:2]
if np.sum(target_mask > 0):
M = cv2.moments(target_mask)
cX = M["m10"] / M["m00"]
cY = M["m01"] / M["m00"]
normalized_x = (cX - img_shape[1] / 2) / (img_shape[1] - img_shape[1] / 2)
normalized_y = (cY - img_shape[0] / 2) / (img_shape[0] - img_shape[0] / 2)
return True, np.array([normalized_x, normalized_y])
else:
return False, np.array([np.NaN, np.NaN])
| [
"numpy.prod",
"numpy.random.get_state",
"numpy.random.set_state",
"scipy.interpolate.splprep",
"pybullet.getBasePositionAndOrientation",
"numpy.linalg.norm",
"numpy.sum",
"numpy.array",
"pybullet.getQuaternionFromEuler",
"numpy.linspace",
"numpy.random.seed",
"cv2.moments",
"numpy.random.uni... | [((7731, 7754), 'numpy.sum', 'np.sum', (['(target_mask > 0)'], {}), '(target_mask > 0)\n', (7737, 7754), True, 'import numpy as np\n'), ((1502, 1563), 'numpy.random.uniform', 'np.random.uniform', (['self._boundary[0][0]', 'self._boundary[0][1]'], {}), '(self._boundary[0][0], self._boundary[0][1])\n', (1519, 1563), True, 'import numpy as np\n'), ((1589, 1650), 'numpy.random.uniform', 'np.random.uniform', (['self._boundary[1][0]', 'self._boundary[1][1]'], {}), '(self._boundary[1][0], self._boundary[1][1])\n', (1606, 1650), True, 'import numpy as np\n'), ((1676, 1737), 'numpy.random.uniform', 'np.random.uniform', (['self._boundary[2][0]', 'self._boundary[2][1]'], {}), '(self._boundary[2][0], self._boundary[2][1])\n', (1693, 1737), True, 'import numpy as np\n'), ((4442, 4463), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (4461, 4463), True, 'import numpy as np\n'), ((5170, 5300), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(self.workspace_limits[0, 0], boundary_y)', 'high': '(boundary_x, self.workspace_limits[1, 1])', 'size': '(3, 2)'}), '(low=(self.workspace_limits[0, 0], boundary_y), high=(\n boundary_x, self.workspace_limits[1, 1]), size=(3, 2))\n', (5187, 5300), True, 'import numpy as np\n'), ((5476, 5605), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(self.workspace_limits[0, 0], self.workspace_limits[1, 0])', 'high': '(boundary_x, boundary_y)', 'size': '(3, 2)'}), '(low=(self.workspace_limits[0, 0], self.workspace_limits[1,\n 0]), high=(boundary_x, boundary_y), size=(3, 2))\n', (5493, 5605), True, 'import numpy as np\n'), ((5783, 5913), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(boundary_x, self.workspace_limits[1, 0])', 'high': '(self.workspace_limits[0, 1], boundary_y)', 'size': '(3, 2)'}), '(low=(boundary_x, self.workspace_limits[1, 0]), high=(self\n .workspace_limits[0, 1], boundary_y), size=(3, 2))\n', (5800, 5913), True, 'import numpy as np\n'), ((6081, 6211), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(boundary_x, boundary_y)', 'high': '(self.workspace_limits[0, 1], self.workspace_limits[1, 1])', 'size': '(3, 2)'}), '(low=(boundary_x, boundary_y), high=(self.workspace_limits\n [0, 1], self.workspace_limits[1, 1]), size=(3, 2))\n', (6098, 6211), True, 'import numpy as np\n'), ((6540, 6600), 'scipy.interpolate.splprep', 'interpolate.splprep', ([], {'x': '[pts[:, 0], pts[:, 1]]', 's': '(0)', 'per': '(True)'}), '(x=[pts[:, 0], pts[:, 1]], s=0, per=True)\n', (6559, 6600), False, 'from scipy import interpolate\n'), ((7768, 7792), 'cv2.moments', 'cv2.moments', (['target_mask'], {}), '(target_mask)\n', (7779, 7792), False, 'import cv2\n'), ((1985, 2003), 'numpy.array', 'np.array', (['boundary'], {}), '(boundary)\n', (1993, 2003), True, 'import numpy as np\n'), ((2083, 2135), 'numpy.prod', 'np.prod', (['(self._boundary[:, 1] - self._boundary[:, 0])'], {}), '(self._boundary[:, 1] - self._boundary[:, 0])\n', (2090, 2135), True, 'import numpy as np\n'), ((2786, 2825), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['obj_id'], {}), '(obj_id)\n', (2817, 2825), True, 'import pybullet as p\n'), ((2848, 2861), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (2856, 2861), True, 'import numpy as np\n'), ((3094, 3128), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['rotation'], {}), '(rotation)\n', (3118, 3128), True, 'import pybullet as p\n'), ((3623, 3683), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['obj_id', 'new_pos', 'rotation'], {}), '(obj_id, new_pos, rotation)\n', (3656, 3683), True, 'import pybullet as p\n'), ((4587, 4613), 'numpy.random.seed', 'np.random.seed', (['self._seed'], {}), '(self._seed)\n', (4601, 4613), True, 'import numpy as np\n'), ((6710, 6739), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num_points'], {}), '(0, 1, num_points)\n', (6721, 6739), True, 'import numpy as np\n'), ((6845, 6869), 'numpy.random.set_state', 'np.random.set_state', (['st0'], {}), '(st0)\n', (6864, 6869), True, 'import numpy as np\n'), ((8046, 8084), 'numpy.array', 'np.array', (['[normalized_x, normalized_y]'], {}), '([normalized_x, normalized_y])\n', (8054, 8084), True, 'import numpy as np\n'), ((8117, 8143), 'numpy.array', 'np.array', (['[np.NaN, np.NaN]'], {}), '([np.NaN, np.NaN])\n', (8125, 8143), True, 'import numpy as np\n'), ((3442, 3471), 'numpy.linalg.norm', 'np.linalg.norm', (['(new_pos - obj)'], {}), '(new_pos - obj)\n', (3456, 3471), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
n=800
niter=150
pow=2
x0=-2
x1=1
y0=-1
y1=1
x,y=np.meshgrid(np.linspace(x0,x1,n),np.linspace(y0,y1,n))
c=x + 1j*y
z=np.zeros((n,n))
k=np.zeros((n,n))
for i in range(1,niter):
z=z**pow+c
k[np.logical_and(abs(z)>2,k==0)]=niter-i
plt.imshow(k,cmap='YlGn')
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.show"
] | [((188, 204), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (196, 204), True, 'import numpy as np\n'), ((207, 223), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (215, 223), True, 'import numpy as np\n'), ((322, 348), 'matplotlib.pyplot.imshow', 'plt.imshow', (['k'], {'cmap': '"""YlGn"""'}), "(k, cmap='YlGn')\n", (332, 348), True, 'import matplotlib.pyplot as plt\n'), ((349, 359), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (357, 359), True, 'import matplotlib.pyplot as plt\n'), ((128, 150), 'numpy.linspace', 'np.linspace', (['x0', 'x1', 'n'], {}), '(x0, x1, n)\n', (139, 150), True, 'import numpy as np\n'), ((149, 171), 'numpy.linspace', 'np.linspace', (['y0', 'y1', 'n'], {}), '(y0, y1, n)\n', (160, 171), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''
-------------------------------------------------------------------------------------------------
This code accompanies the paper titled "Human injury-based safety decision of automated vehicles"
Author: <NAME>, ***,
Corresponding author: <NAME> (<EMAIL>)
-------------------------------------------------------------------------------------------------
'''
import argparse
import random
import numpy as np
import joblib
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import AdaBoostClassifier
from imblearn.combine import SMOTEENN
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import EditedNearestNeighbours
from imblearn.metrics import geometric_mean_score
from imblearn.metrics import classification_report_imbalanced
from sklearn.metrics import confusion_matrix
__author__ = "<NAME>"
def load_data(data, resample, seed):
''' Load and process the crash data. '''
# Divide the dataset into three parts: training, validation, and testing.
shuffle = np.random.permutation(len(data))
data = data[shuffle]
data_train = data[:int(len(data) * 0.7)]
data_test = data[int(len(data) * 0.7):int(len(data) * 0.85)]
data_val = data[int(len(data) * 0.85):]
# Data re-sampling to reduce imbalance problems.
if resample == 'US':
enn = EditedNearestNeighbours(sampling_strategy=[0], n_neighbors=5, kind_sel="all")
X_enn, y_enn = enn.fit_resample(data_train[:, :-1], data_train[:, -1])
data_train = np.zeros((len(X_enn), 10))
data_train[:, :-1], data_train[:, -1] = X_enn, y_enn
enn = EditedNearestNeighbours(sampling_strategy=[1], n_neighbors=3, kind_sel="all")
X_enn, y_enn = enn.fit_resample(data_train[:, :-1], data_train[:, -1])
data_train = np.zeros((len(X_enn), 10))
data_train[:, :-1], data_train[:, -1] = X_enn, y_enn
elif resample == 'OS':
smo = SMOTE(random_state=seed, sampling_strategy={1: 1900, 2: 1400, 3: 1000})
X_smo, y_smo = smo.fit_resample(data_train[:, :-1], data_train[:, -1])
data_train = np.zeros((len(X_smo), 10))
data_train[:, :-1], data_train[:, -1] = X_smo, y_smo
elif resample == 'CS':
smo = SMOTE(random_state=seed, sampling_strategy={1: 2000, 2: 1200, 3: 800})
enn = EditedNearestNeighbours(sampling_strategy=[0, 1, 2, 3], n_neighbors=3)
smo_enn = SMOTEENN(random_state=seed, smote=smo, enn=enn)
X_enn, y_enn = smo_enn.fit_resample(data_train[:, :-1], data_train[:, -1])
data_train = np.zeros((len(X_enn), 10))
data_train[:, :-1], data_train[:, -1] = X_enn, y_enn
else:
print('Wrong re-sampling method!')
return
return data_train, data_val, data_test
def evaluate_model(true, pred, pri):
''' Evaluate the model. '''
accu = 100. * (1 - np.count_nonzero(true - pred) / float(len(true)))
conf_mat = confusion_matrix(true, pred)
G_mean = geometric_mean_score(true, pred)
report = classification_report_imbalanced(true, pred, digits=3)
if pri:
print('Test | Accuracy: ' + str(np.around(accu, 1)) + '%')
print('Test | G-mean: ' + str(np.around(G_mean, 3)))
print(conf_mat)
print(report)
def train_SVM(data_train, data_val, data_test, opt):
''' Train and test the SVM-based occupant injury prediction model. '''
# Define the parameter matrix for grid search.
C_list = [1, 10, 100]
kernel_list = ['rbf', 'sigmoid'] * 3
Gamma_list = [0.1, 0.01, 0.001, 'auto'] * 6
best_G_mean, best_i = 0, 0
# Start the grid search for the optimal parameter combination.
for i in range(24):
# Obtain parameters.
C = C_list[i // 8]
kernel = kernel_list[i // 4]
Gamma = Gamma_list[i]
# Load the SVM-based model.
SVM = SVC(C=C, kernel=kernel, gamma=Gamma)
# Train the model.
SVM.fit(data_train[:, :-1], data_train[:, -1])
# Calculate the prediction accuracy.
pred = SVM.predict(data_val[:, :-1])
true = data_val[:, -1]
G_mean = geometric_mean_score(true, pred)
# Save the model with the highest accuracy.
if G_mean > best_G_mean:
best_G_mean = G_mean
best_i = i
# Load the model with the highest accuracy.
C = C_list[best_i // 8]
kernel = kernel_list[best_i // 4]
Gamma = Gamma_list[best_i]
SVM = SVC(C=C, kernel=kernel, gamma=Gamma)
SVM.fit(data_train[:, :-1], data_train[:, -1])
# Save the optimal model parameters.
if opt.save_para:
joblib.dump(SVM, "Saved_Model_params\model_SVM_%s.m" % opt.re_samp)
# Obtain the prediction performance.
evaluate_model(data_test[:, -1], SVM.predict(data_test[:, :-1]), opt.print_inf)
return SVM
def train_DT(data_train, data_val, data_test, opt):
''' Train and test the DT-based occupant injury prediction model. '''
# Define the parameter matrix for grid search.
criterion_list = ['entropy', 'gini']
splitter_list = ['best', 'random'] * 2
maxdepth_list = [None, 10, 20, 50] * 4
best_G_mean, best_i = 0, 0
# Start the grid search for the optimal parameter combination.
for i in range(16):
# Obtain parameters.
criterion = criterion_list[i // 8]
splitter = splitter_list[i // 4]
maxdepth = maxdepth_list[i]
# Load the SVM-based model.
DT = DecisionTreeClassifier(criterion=criterion, splitter=splitter, max_depth=maxdepth)
# Train the model.
DT.fit(data_train[:, :-1], data_train[:, -1])
# Calculate the prediction accuracy.
pred = DT.predict(data_val[:, :-1])
true = data_val[:, -1]
G_mean = geometric_mean_score(true, pred)
# Save the model with the highest accuracy.
if G_mean > best_G_mean:
best_G_mean = G_mean
best_i = i
# Load the model with the highest accuracy.
criterion = criterion_list[best_i // 8]
splitter = splitter_list[best_i // 4]
maxdepth = maxdepth_list[best_i]
DT = DecisionTreeClassifier(criterion=criterion, splitter=splitter, max_depth=maxdepth)
DT.fit(data_train[:, :-1], data_train[:, -1])
# Save the optimal model parameters.
if opt.save_para:
joblib.dump(DT, "Saved_Model_params\model_DT_%s.m" % opt.re_samp)
# Obtain the prediction performance.
evaluate_model(data_test[:, -1], DT.predict(data_test[:, :-1]), opt.print_inf)
return DT
def train_KNN(data_train, data_val, data_test, opt):
''' Train and test the KNN-based occupant injury prediction model. '''
# Define the parameter matrix for grid search.
n_neighbors_list = [3, 5, 10]
algorithm_list = ['ball_tree', 'kd_tree', 'brute'] * 3
p_list = [1, 2, 3] * 9
best_G_mean, best_i = 0, 0
# Start the grid search for the optimal parameter combination.
for i in range(27):
# Obtain parameters.
n_neighbors = n_neighbors_list[i // 9]
algorithm = algorithm_list[i // 3]
p = p_list[i]
# Load the KNN-based model.
KNN = KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=algorithm, p=p)
# Train the model.
KNN.fit(data_train[:, :-1], data_train[:, -1])
# Calculate the prediction accuracy.
pred = KNN.predict(data_val[:, :-1])
true = data_val[:, -1]
G_mean = geometric_mean_score(true, pred)
# Save the model with the highest accuracy.
if G_mean > best_G_mean:
best_G_mean = G_mean
best_i = i
# Load the model with the highest accuracy.
n_neighbors = n_neighbors_list[best_i // 9]
algorithm = algorithm_list[best_i // 3]
p = p_list[best_i]
KNN = KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=algorithm, p=p)
KNN.fit(data_train[:, :-1], data_train[:, -1])
# Save the optimal model parameters.
if opt.save_para:
joblib.dump(KNN, "Saved_Model_params\model_KNN_%s.m" % opt.re_samp)
# Obtain the prediction performance.
evaluate_model(data_test[:, -1], KNN.predict(data_test[:, :-1]), opt.print_inf)
return KNN
def train_NB(data_train, data_val, data_test, opt):
''' Train and test the NB-based occupant injury prediction model. '''
# Load the model with the highest accuracy.
NB = GaussianNB()
NB.fit(data_train[:, :-1], data_train[:, -1])
# Save the optimal model parameters.
if opt.save_para:
joblib.dump(NB, "Saved_Model_params\model_NB_%s.m" % opt.re_samp)
# Obtain the prediction performance.
evaluate_model(data_test[:, -1], NB.predict(data_test[:, :-1]), opt.print_inf)
return NB
def train_AB(data_train, data_val, data_test, opt, base_estimator_list, seed):
''' Train and test the AB-based occupant injury prediction model. '''
# Define the parameter matrix for grid search.
base_estimator_list = base_estimator_list
n_estimators_list = [3, 10, 30] * 4
learning_rate_list = [0.1, 0.01] * 12
best_G_mean, best_i = 0, 0
# Start the grid search for the optimal parameter combination.
for i in range(18):
# Obtain parameters.
base_estimator = base_estimator_list[i // 6]
n_estimators = n_estimators_list[i // 2]
learning_rate = learning_rate_list[i]
# Load the AB-based model.
AB = AdaBoostClassifier(base_estimator=base_estimator, n_estimators=n_estimators, learning_rate=learning_rate,
algorithm='SAMME', random_state=seed)
# Train the model.
AB.fit(data_train[:, :-1], data_train[:, -1])
# Calculate the prediction accuracy.
pred = AB.predict(data_val[:, :-1])
true = data_val[:, -1]
G_mean = geometric_mean_score(true, pred)
# Save the model with the highest accuracy.
if G_mean > best_G_mean:
best_G_mean = G_mean
best_i = i
# Load the model with the highest accuracy.
base_estimator = base_estimator_list[best_i // 6]
n_estimators = n_estimators_list[best_i // 2]
learning_rate = learning_rate_list[best_i]
AB = AdaBoostClassifier(base_estimator=base_estimator, n_estimators=n_estimators, learning_rate=learning_rate,
algorithm='SAMME', random_state=seed)
AB.fit(data_train[:, :-1], data_train[:, -1])
# Save the optimal model parameters.
if opt.save_para:
joblib.dump(AB, "Saved_Model_params\model_AB_%s.m" % opt.re_samp)
# Obtain the prediction performance.
evaluate_model(data_test[:, -1], AB.predict(data_test[:, :-1]), opt.print_inf)
return AB
def main():
''' Train and test the machine-learning occupant injury prediction models. '''
parser = argparse.ArgumentParser()
parser.add_argument('--rand_seed', type=int, default=123, help='Random seed')
parser.add_argument('--re_samp', type=str, default='OS', help='Re-sampling methods: US, OS, CS')
parser.add_argument('--print_inf', action='store_false', help='print the information of the training process')
parser.add_argument('--save_para', action='store_false', help='save the model parameters')
opt = parser.parse_args()
# Define the random seed.
seed = opt.rand_seed
np.random.seed(seed)
random.seed(seed)
# Load the real-world crash data.
data = np.load('dataset/data_pro.npy')
data_train, data_val, data_test = load_data(data, opt.re_samp, seed)
# Train the five machine-learning models.
SVM = train_SVM(data_train, data_val, data_test, opt)
DT = train_DT(data_train, data_val, data_test, opt)
KNN = train_KNN(data_train, data_val, data_test, opt)
NB = train_NB(data_train, data_val, data_test, opt)
AB = train_AB(data_train, data_val, data_test, opt, [SVM, DT, NB], seed)
if __name__ == "__main__":
main()
| [
"sklearn.metrics.confusion_matrix",
"argparse.ArgumentParser",
"imblearn.under_sampling.EditedNearestNeighbours",
"imblearn.over_sampling.SMOTE",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"random.seed",
"imblearn.combine.S... | [((3050, 3078), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['true', 'pred'], {}), '(true, pred)\n', (3066, 3078), False, 'from sklearn.metrics import confusion_matrix\n'), ((3092, 3124), 'imblearn.metrics.geometric_mean_score', 'geometric_mean_score', (['true', 'pred'], {}), '(true, pred)\n', (3112, 3124), False, 'from imblearn.metrics import geometric_mean_score\n'), ((3138, 3192), 'imblearn.metrics.classification_report_imbalanced', 'classification_report_imbalanced', (['true', 'pred'], {'digits': '(3)'}), '(true, pred, digits=3)\n', (3170, 3192), False, 'from imblearn.metrics import classification_report_imbalanced\n'), ((4563, 4599), 'sklearn.svm.SVC', 'SVC', ([], {'C': 'C', 'kernel': 'kernel', 'gamma': 'Gamma'}), '(C=C, kernel=kernel, gamma=Gamma)\n', (4566, 4599), False, 'from sklearn.svm import SVC\n'), ((6221, 6308), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': 'criterion', 'splitter': 'splitter', 'max_depth': 'maxdepth'}), '(criterion=criterion, splitter=splitter, max_depth=\n maxdepth)\n', (6243, 6308), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((7892, 7963), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'n_neighbors', 'algorithm': 'algorithm', 'p': 'p'}), '(n_neighbors=n_neighbors, algorithm=algorithm, p=p)\n', (7912, 7963), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((8483, 8495), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (8493, 8495), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((10288, 10435), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'base_estimator': 'base_estimator', 'n_estimators': 'n_estimators', 'learning_rate': 'learning_rate', 'algorithm': '"""SAMME"""', 'random_state': 'seed'}), "(base_estimator=base_estimator, n_estimators=n_estimators,\n learning_rate=learning_rate, algorithm='SAMME', random_state=seed)\n", (10306, 10435), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((10899, 10924), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10922, 10924), False, 'import argparse\n'), ((11408, 11428), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11422, 11428), True, 'import numpy as np\n'), ((11433, 11450), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (11444, 11450), False, 'import random\n'), ((11501, 11532), 'numpy.load', 'np.load', (['"""dataset/data_pro.npy"""'], {}), "('dataset/data_pro.npy')\n", (11508, 11532), True, 'import numpy as np\n'), ((1474, 1551), 'imblearn.under_sampling.EditedNearestNeighbours', 'EditedNearestNeighbours', ([], {'sampling_strategy': '[0]', 'n_neighbors': '(5)', 'kind_sel': '"""all"""'}), "(sampling_strategy=[0], n_neighbors=5, kind_sel='all')\n", (1497, 1551), False, 'from imblearn.under_sampling import EditedNearestNeighbours\n'), ((1754, 1831), 'imblearn.under_sampling.EditedNearestNeighbours', 'EditedNearestNeighbours', ([], {'sampling_strategy': '[1]', 'n_neighbors': '(3)', 'kind_sel': '"""all"""'}), "(sampling_strategy=[1], n_neighbors=3, kind_sel='all')\n", (1777, 1831), False, 'from imblearn.under_sampling import EditedNearestNeighbours\n'), ((3973, 4009), 'sklearn.svm.SVC', 'SVC', ([], {'C': 'C', 'kernel': 'kernel', 'gamma': 'Gamma'}), '(C=C, kernel=kernel, gamma=Gamma)\n', (3976, 4009), False, 'from sklearn.svm import SVC\n'), ((4232, 4264), 'imblearn.metrics.geometric_mean_score', 'geometric_mean_score', (['true', 'pred'], {}), '(true, pred)\n', (4252, 4264), False, 'from imblearn.metrics import geometric_mean_score\n'), ((4723, 4791), 'joblib.dump', 'joblib.dump', (['SVM', "('Saved_Model_params\\\\model_SVM_%s.m' % opt.re_samp)"], {}), "(SVM, 'Saved_Model_params\\\\model_SVM_%s.m' % opt.re_samp)\n", (4734, 4791), False, 'import joblib\n'), ((5562, 5649), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': 'criterion', 'splitter': 'splitter', 'max_depth': 'maxdepth'}), '(criterion=criterion, splitter=splitter, max_depth=\n maxdepth)\n', (5584, 5649), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((5865, 5897), 'imblearn.metrics.geometric_mean_score', 'geometric_mean_score', (['true', 'pred'], {}), '(true, pred)\n', (5885, 5897), False, 'from imblearn.metrics import geometric_mean_score\n'), ((6426, 6492), 'joblib.dump', 'joblib.dump', (['DT', "('Saved_Model_params\\\\model_DT_%s.m' % opt.re_samp)"], {}), "(DT, 'Saved_Model_params\\\\model_DT_%s.m' % opt.re_samp)\n", (6437, 6492), False, 'import joblib\n'), ((7249, 7320), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'n_neighbors', 'algorithm': 'algorithm', 'p': 'p'}), '(n_neighbors=n_neighbors, algorithm=algorithm, p=p)\n', (7269, 7320), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((7543, 7575), 'imblearn.metrics.geometric_mean_score', 'geometric_mean_score', (['true', 'pred'], {}), '(true, pred)\n', (7563, 7575), False, 'from imblearn.metrics import geometric_mean_score\n'), ((8087, 8155), 'joblib.dump', 'joblib.dump', (['KNN', "('Saved_Model_params\\\\model_KNN_%s.m' % opt.re_samp)"], {}), "(KNN, 'Saved_Model_params\\\\model_KNN_%s.m' % opt.re_samp)\n", (8098, 8155), False, 'import joblib\n'), ((8618, 8684), 'joblib.dump', 'joblib.dump', (['NB', "('Saved_Model_params\\\\model_NB_%s.m' % opt.re_samp)"], {}), "(NB, 'Saved_Model_params\\\\model_NB_%s.m' % opt.re_samp)\n", (8629, 8684), False, 'import joblib\n'), ((9508, 9655), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'base_estimator': 'base_estimator', 'n_estimators': 'n_estimators', 'learning_rate': 'learning_rate', 'algorithm': '"""SAMME"""', 'random_state': 'seed'}), "(base_estimator=base_estimator, n_estimators=n_estimators,\n learning_rate=learning_rate, algorithm='SAMME', random_state=seed)\n", (9526, 9655), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((9904, 9936), 'imblearn.metrics.geometric_mean_score', 'geometric_mean_score', (['true', 'pred'], {}), '(true, pred)\n', (9924, 9936), False, 'from imblearn.metrics import geometric_mean_score\n'), ((10582, 10648), 'joblib.dump', 'joblib.dump', (['AB', "('Saved_Model_params\\\\model_AB_%s.m' % opt.re_samp)"], {}), "(AB, 'Saved_Model_params\\\\model_AB_%s.m' % opt.re_samp)\n", (10593, 10648), False, 'import joblib\n'), ((2062, 2139), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'random_state': 'seed', 'sampling_strategy': '{(1): 1900, (2): 1400, (3): 1000}'}), '(random_state=seed, sampling_strategy={(1): 1900, (2): 1400, (3): 1000})\n', (2067, 2139), False, 'from imblearn.over_sampling import SMOTE\n'), ((2364, 2440), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'random_state': 'seed', 'sampling_strategy': '{(1): 2000, (2): 1200, (3): 800}'}), '(random_state=seed, sampling_strategy={(1): 2000, (2): 1200, (3): 800})\n', (2369, 2440), False, 'from imblearn.over_sampling import SMOTE\n'), ((2449, 2519), 'imblearn.under_sampling.EditedNearestNeighbours', 'EditedNearestNeighbours', ([], {'sampling_strategy': '[0, 1, 2, 3]', 'n_neighbors': '(3)'}), '(sampling_strategy=[0, 1, 2, 3], n_neighbors=3)\n', (2472, 2519), False, 'from imblearn.under_sampling import EditedNearestNeighbours\n'), ((2538, 2585), 'imblearn.combine.SMOTEENN', 'SMOTEENN', ([], {'random_state': 'seed', 'smote': 'smo', 'enn': 'enn'}), '(random_state=seed, smote=smo, enn=enn)\n', (2546, 2585), False, 'from imblearn.combine import SMOTEENN\n'), ((2985, 3014), 'numpy.count_nonzero', 'np.count_nonzero', (['(true - pred)'], {}), '(true - pred)\n', (3001, 3014), True, 'import numpy as np\n'), ((3310, 3330), 'numpy.around', 'np.around', (['G_mean', '(3)'], {}), '(G_mean, 3)\n', (3319, 3330), True, 'import numpy as np\n'), ((3245, 3263), 'numpy.around', 'np.around', (['accu', '(1)'], {}), '(accu, 1)\n', (3254, 3263), True, 'import numpy as np\n')] |
# _____ ______ _____
# / ____/ /\ | ____ | __ \
# | | / \ | |__ | |__) | Caer - Modern Computer Vision
# | | / /\ \ | __| | _ / Languages: Python, C, C++
# | |___ / ____ \ | |____ | | \ \ http://github.com/jasmcaus/caer
# \_____\/_/ \_ \______ |_| \_\
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-2021 The Caer Authors <http://github.com/jasmcaus>
# Pulled from Scikit-Learn's official Github Repo (18 Sep 2020) to speed up 'caer' package import speeds (since this was the only method referenced from sklearn)
from itertools import chain, compress
from math import ceil, floor
import numpy as np
from ._sklearn_utils import _num_samples, issparse
try:
from pkg_resources import parse_version # type: ignore
except ImportError:
# setuptools not installed
from distutils.version import LooseVersion
parse_version = LooseVersion # type: ignore
np_version = parse_version(np.__version__)
def train_test_split(*arrays,
test_size=None,
train_size=None
):
"""Split arrays or matrices into random train and test subsets
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.25.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 1.6.6
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError('At least one array required as input')
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
n_train, n_test = _validate_shuffle_split(n_samples, test_size, train_size,
default_test_size=0.25)
train = np.arange(n_train)
test = np.arange(n_train, n_train + n_test)
return list(chain.from_iterable((_safe_indexing(a, train),
_safe_indexing(a, test)) for a in arrays))
def _make_indexable(iterable):
"""Ensure iterable supports indexing or convert to an indexable variant.
Convert sparse matrices to csr and other non-indexable iterable to arrays.
Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged.
Parameters
----------
iterable : {list, dataframe, Tensor, sparse matrix} or None
Object to be converted to an indexable iterable.
"""
if issparse(iterable):
return iterable.tocsr()
elif hasattr(iterable, "__getitem__") or hasattr(iterable, "iloc"):
return iterable
elif iterable is None:
return iterable
return np.array(iterable)
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
uniques = np.unique(lengths)
if len(uniques) > 1:
raise ValueError("Found input variables with inconsistent numbers of"
" samples: %r" % [int(l) for l in lengths])
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : {lists, dataframes, Tensors, sparse matrices}
List of objects to ensure sliceability.
"""
result = [_make_indexable(X) for X in iterables]
check_consistent_length(*result)
return result
def _determine_key_type(key, accept_slice=True):
"""Determine the data type of key.
Parameters
----------
key : scalar, slice or array-like
The key from which we want to infer the data type.
accept_slice : bool, default=True
Whether or not to raise an error if the key is a slice.
Returns
-------
dtype : {'int', 'str', 'bool', None}
Returns the data type of key.
"""
err_msg = ("No valid specification of the columns. Only a scalar, list or "
"slice of all integers or all strings, or boolean mask is "
"allowed")
dtype_to_str = {int: 'int', str: 'str', bool: 'bool', np.bool_: 'bool'}
array_dtype_to_str = {'i': 'int', 'u': 'int', 'b': 'bool', 'O': 'str',
'U': 'str', 'S': 'str'}
if key is None:
return None
if isinstance(key, tuple(dtype_to_str.keys())):
try:
return dtype_to_str[type(key)]
except KeyError:
raise ValueError(err_msg)
if isinstance(key, slice):
if not accept_slice:
raise TypeError(
'Only array-like or scalar are supported. '
'A Python slice was given.'
)
if key.start is None and key.stop is None:
return None
key_start_type = _determine_key_type(key.start)
key_stop_type = _determine_key_type(key.stop)
if key_start_type is not None and key_stop_type is not None:
if key_start_type != key_stop_type:
raise ValueError(err_msg)
if key_start_type is not None:
return key_start_type
return key_stop_type
if isinstance(key, (list, tuple)):
unique_key = set(key)
key_type = {_determine_key_type(elt) for elt in unique_key}
if not key_type:
return None
if len(key_type) != 1:
raise ValueError(err_msg)
return key_type.pop()
if hasattr(key, 'dtype'):
try:
return array_dtype_to_str[key.dtype.kind]
except KeyError:
raise ValueError(err_msg)
raise ValueError(err_msg)
def _array_indexing(array, key, key_dtype, axis):
"""Index an array or scipy.sparse consistently across NumPy version."""
if np_version < parse_version('1.12') or issparse(array):
# Remove the check for NumPy when using >= 1.12
# check if we have an boolean array-likes to make the proper indexing
if key_dtype == 'bool':
key = np.asarray(key)
if isinstance(key, tuple):
key = list(key)
return array[key] if axis == 0 else array[:, key]
def _pandas_indexing(X, key, key_dtype, axis):
"""Index a pandas dataframe or a series."""
if hasattr(key, 'shape'):
# Work-around for indexing with read-only key in pandas
# solved in pandas 0.25
key = np.asarray(key)
key = key if key.flags.writeable else key.copy()
elif isinstance(key, tuple):
key = list(key)
# check whether we should index with loc or iloc
indexer = X.iloc if key_dtype == 'int' else X.loc
return indexer[:, key] if axis else indexer[key]
def _list_indexing(X, key, key_dtype):
"""Index a Python list."""
if np.isscalar(key) or isinstance(key, slice):
# key is a slice or a scalar
return X[key]
if key_dtype == 'bool':
# key is a boolean array-like
return list(compress(X, key))
# key is a integer array-like of key
return [X[idx] for idx in key]
def _safe_indexing(X, indices, *, axis=0):
"""Return rows, items or columns of X using indices.
.. warning::
This utility is documented, but **private**. This means that
backward compatibility might be broken without any deprecation
cycle.
Parameters
----------
X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series
Data from which to sample rows, items or columns. `list` are only
supported when `axis=0`.
indices : bool, int, str, slice, array-like
- If `axis=0`, boolean and integer array-like, integer slice,
and scalar integer are supported.
- If `axis=1`:
- to select a single column, `indices` can be of `int` type for
all `X` types and `str` only for dataframe. The selected subset
will be 1D, unless `X` is a sparse matrix in which case it will
be 2D.
- to select multiples columns, `indices` can be one of the
following: `list`, `array`, `slice`. The type used in
these containers can be one of the following: `int`, 'bool' and
`str`. However, `str` is only supported when `X` is a dataframe.
The selected subset will be 2D.
axis : int, default=0
The axis along which `X` will be subsampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
subset
Subset of X on axis 0 or 1.
Notes
-----
CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are
not supported.
"""
if indices is None:
return X
if axis not in (0, 1):
raise ValueError(
"'axis' should be either 0 (to index rows) or 1 (to index "
f" column). Got {axis} instead."
)
indices_dtype = _determine_key_type(indices)
if axis == 0 and indices_dtype == 'str':
raise ValueError(
"String indexing is not supported with 'axis=0'"
)
if axis == 1 and X.ndim != 2:
raise ValueError(
"'X' should be a 2D NumPy array, 2D sparse matrix or pandas "
"dataframe when indexing the columns (i.e. 'axis=1'). "
f"Got {type(X)} instead with {X.ndim} dimension(s)."
)
if axis == 1 and indices_dtype == 'str' and not hasattr(X, 'loc'):
raise ValueError(
"Specifying the columns using strings is only supported for "
"pandas DataFrames"
)
if hasattr(X, "iloc"):
return _pandas_indexing(X, indices, indices_dtype, axis=axis)
elif hasattr(X, "shape"):
return _array_indexing(X, indices, indices_dtype, axis=axis)
else:
return _list_indexing(X, indices, indices_dtype)
def _validate_shuffle_split(n_samples, test_size, train_size,
default_test_size=None):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if test_size is None and train_size is None:
test_size = default_test_size
test_size_type = np.asarray(test_size).dtype.kind
train_size_type = np.asarray(train_size).dtype.kind
if (test_size_type == 'i' and (test_size >= n_samples or test_size <= 0)
or test_size_type == 'f' and (test_size <= 0 or test_size >= 1)):
raise ValueError(f'test_size={test_size} should be either positive and smaller'
f' than the number of samples {n_samples} or a float in the '
'(0, 1) range')
if (train_size_type == 'i' and (train_size >= n_samples or train_size <= 0)
or train_size_type == 'f' and (train_size <= 0 or train_size >= 1)):
raise ValueError(f'train_size={train_size} should be either positive and smaller'
f' than the number of samples {n_samples} or a float in the '
'(0, 1) range')
if train_size is not None and train_size_type not in ('i', 'f'):
raise ValueError(f"Invalid value for train_size: {train_size}")
if test_size is not None and test_size_type not in ('i', 'f'):
raise ValueError(f"Invalid value for test_size: {test_size}")
if (train_size_type == 'f' and test_size_type == 'f' and
train_size + test_size > 1):
raise ValueError(
f'The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)'
' range. Reduce test_size and/or train_size.'
)
if test_size_type == 'f':
n_test = ceil(test_size * n_samples)
elif test_size_type == 'i':
n_test = float(test_size)
if train_size_type == 'f':
n_train = floor(train_size * n_samples)
elif train_size_type == 'i':
n_train = float(train_size)
if train_size is None:
n_train = n_samples - n_test
elif test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
n_train, n_test = int(n_train), int(n_test)
if n_train == 0:
raise ValueError(
f'With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the '
'resulting train set will be empty. Adjust any of the '
'aforementioned parameters.'
)
return n_train, n_test | [
"math.ceil",
"numpy.unique",
"numpy.isscalar",
"math.floor",
"numpy.asarray",
"numpy.array",
"pkg_resources.parse_version",
"itertools.compress",
"numpy.arange"
] | [((1011, 1040), 'pkg_resources.parse_version', 'parse_version', (['np.__version__'], {}), '(np.__version__)\n', (1024, 1040), False, 'from pkg_resources import parse_version\n'), ((2645, 2663), 'numpy.arange', 'np.arange', (['n_train'], {}), '(n_train)\n', (2654, 2663), True, 'import numpy as np\n'), ((2675, 2711), 'numpy.arange', 'np.arange', (['n_train', '(n_train + n_test)'], {}), '(n_train, n_train + n_test)\n', (2684, 2711), True, 'import numpy as np\n'), ((3499, 3517), 'numpy.array', 'np.array', (['iterable'], {}), '(iterable)\n', (3507, 3517), True, 'import numpy as np\n'), ((3916, 3934), 'numpy.unique', 'np.unique', (['lengths'], {}), '(lengths)\n', (3925, 3934), True, 'import numpy as np\n'), ((7517, 7532), 'numpy.asarray', 'np.asarray', (['key'], {}), '(key)\n', (7527, 7532), True, 'import numpy as np\n'), ((7886, 7902), 'numpy.isscalar', 'np.isscalar', (['key'], {}), '(key)\n', (7897, 7902), True, 'import numpy as np\n'), ((12764, 12791), 'math.ceil', 'ceil', (['(test_size * n_samples)'], {}), '(test_size * n_samples)\n', (12768, 12791), False, 'from math import ceil, floor\n'), ((12908, 12937), 'math.floor', 'floor', (['(train_size * n_samples)'], {}), '(train_size * n_samples)\n', (12913, 12937), False, 'from math import ceil, floor\n'), ((6929, 6950), 'pkg_resources.parse_version', 'parse_version', (['"""1.12"""'], {}), "('1.12')\n", (6942, 6950), False, 'from pkg_resources import parse_version\n'), ((7155, 7170), 'numpy.asarray', 'np.asarray', (['key'], {}), '(key)\n', (7165, 7170), True, 'import numpy as np\n'), ((8075, 8091), 'itertools.compress', 'compress', (['X', 'key'], {}), '(X, key)\n', (8083, 8091), False, 'from itertools import chain, compress\n'), ((11302, 11323), 'numpy.asarray', 'np.asarray', (['test_size'], {}), '(test_size)\n', (11312, 11323), True, 'import numpy as np\n'), ((11357, 11379), 'numpy.asarray', 'np.asarray', (['train_size'], {}), '(train_size)\n', (11367, 11379), True, 'import numpy as np\n')] |
"""
Construct a dataset with (multiple) source and target domains, adapted from
https://github.com/criteo-research/pytorch-ada/blob/master/adalib/ada/datasets/multisource.py
"""
import logging
import os
from enum import Enum
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
import numpy as np
import torch.utils.data
from sklearn.utils import check_random_state
from torchvision.datasets import VisionDataset
from torchvision.datasets.folder import default_loader, has_file_allowed_extension, IMG_EXTENSIONS
from kale.loaddata.dataset_access import DatasetAccess, get_class_subset, split_by_ratios
from kale.loaddata.sampler import FixedSeedSamplingConfig, get_labels, MultiDataLoader, SamplingConfig
class WeightingType(Enum):
NATURAL = "natural"
BALANCED = "balanced"
PRESET0 = "preset0"
class DatasetSizeType(Enum):
Max = "max" # size of the biggest dataset
Source = "source" # size of the source dataset
@staticmethod
def get_size(size_type, source_dataset, *other_datasets):
if size_type is DatasetSizeType.Max:
return max(list(map(len, other_datasets)) + [len(source_dataset)])
elif size_type is DatasetSizeType.Source:
return len(source_dataset)
else:
raise ValueError(f"Size type size must be 'max' or 'source', had '{size_type}'")
class DomainsDatasetBase:
def prepare_data_loaders(self):
"""
handles train/validation/test split to have 3 datasets each with data from all domains
"""
raise NotImplementedError()
def get_domain_loaders(self, split="train", batch_size=32):
"""
handles the sampling of a dataset containing multiple domains
Args:
split (string, optional): ["train"|"valid"|"test"]. Which dataset to iterate on. Defaults to "train".
batch_size (int, optional): Defaults to 32.
Returns:
MultiDataLoader: A dataloader with API similar to the torch.dataloader, but returning
batches from several domains at each iteration.
"""
raise NotImplementedError()
class MultiDomainDatasets(DomainsDatasetBase):
def __init__(
self,
source_access: DatasetAccess,
target_access: DatasetAccess,
config_weight_type="natural",
config_size_type=DatasetSizeType.Max,
val_split_ratio=0.1,
source_sampling_config=None,
target_sampling_config=None,
n_fewshot=None,
random_state=None,
class_ids=None,
):
"""The class controlling how the source and target domains are
iterated over.
Args:
source_access (DatasetAccess): accessor for the source dataset
target_access (DatasetAccess): accessor for the target dataset
config_weight_type (WeightingType, optional): The weight type for sampling. Defaults to 'natural'.
config_size_type (DatasetSizeType, optional): Which dataset size to use to define the number of epochs vs
batch_size. Defaults to DatasetSizeType.Max.
val_split_ratio (float, optional): ratio for the validation part of the train dataset. Defaults to 0.1.
source_sampling_config (SamplingConfig, optional): How to sample from the source. Defaults to None
(=> RandomSampler).
target_sampling_config (SamplingConfig, optional): How to sample from the target. Defaults to None
(=> RandomSampler).
n_fewshot (int, optional): Number of target samples for which the label may be used,
to define the few-shot, semi-supervised setting. Defaults to None.
random_state ([int|np.random.RandomState], optional): Used for deterministic sampling/few-shot label
selection. Defaults to None.
class_ids (list, optional): List of chosen subset of class ids. Defaults to None (=> All Classes).
Examples::
>>> dataset = MultiDomainDatasets(source_access, target_access)
"""
weight_type = WeightingType(config_weight_type)
size_type = DatasetSizeType(config_size_type)
if weight_type is WeightingType.PRESET0:
self._source_sampling_config = SamplingConfig(class_weights=np.arange(source_access.n_classes(), 0, -1))
self._target_sampling_config = SamplingConfig(
class_weights=np.random.randint(1, 4, size=target_access.n_classes())
)
elif weight_type is WeightingType.BALANCED:
self._source_sampling_config = SamplingConfig(balance=True)
self._target_sampling_config = SamplingConfig(balance=True)
elif weight_type not in WeightingType:
raise ValueError(f"Unknown weighting method {weight_type}.")
else:
self._source_sampling_config = SamplingConfig()
self._target_sampling_config = SamplingConfig()
self._source_access = source_access
self._target_access = target_access
self._val_split_ratio = val_split_ratio
# self._source_sampling_config = (
# source_sampling_config
# if source_sampling_config is not None
# else SamplingConfig()
# )
# self._target_sampling_config = (
# target_sampling_config
# if target_sampling_config is not None
# else SamplingConfig()
# )
self._size_type = size_type
self._n_fewshot = n_fewshot
self._random_state = check_random_state(random_state)
self._source_by_split: Dict[str, torch.utils.data.Subset] = {}
self._labeled_target_by_split = None
self._target_by_split: Dict[str, torch.utils.data.Subset] = {}
self.class_ids = class_ids
def is_semi_supervised(self):
return self._n_fewshot is not None and self._n_fewshot > 0
def prepare_data_loaders(self):
logging.debug("Load source")
(self._source_by_split["train"], self._source_by_split["valid"],) = self._source_access.get_train_val(
self._val_split_ratio
)
if self.class_ids is not None:
self._source_by_split["train"] = get_class_subset(self._source_by_split["train"], self.class_ids)
self._source_by_split["valid"] = get_class_subset(self._source_by_split["valid"], self.class_ids)
logging.debug("Load target")
(self._target_by_split["train"], self._target_by_split["valid"],) = self._target_access.get_train_val(
self._val_split_ratio
)
if self.class_ids is not None:
self._target_by_split["train"] = get_class_subset(self._target_by_split["train"], self.class_ids)
self._target_by_split["valid"] = get_class_subset(self._target_by_split["valid"], self.class_ids)
logging.debug("Load source Test")
self._source_by_split["test"] = self._source_access.get_test()
if self.class_ids is not None:
self._source_by_split["test"] = get_class_subset(self._source_by_split["test"], self.class_ids)
logging.debug("Load target Test")
self._target_by_split["test"] = self._target_access.get_test()
if self.class_ids is not None:
self._target_by_split["test"] = get_class_subset(self._target_by_split["test"], self.class_ids)
if self._n_fewshot is not None and self._n_fewshot > 0:
# semi-supervised target domain
self._labeled_target_by_split = {}
for part in ["train", "valid", "test"]:
(self._labeled_target_by_split[part], self._target_by_split[part],) = _split_dataset_few_shot(
self._target_by_split[part], self._n_fewshot
)
def get_domain_loaders(self, split="train", batch_size=32):
source_ds = self._source_by_split[split]
source_loader = self._source_sampling_config.create_loader(source_ds, batch_size)
target_ds = self._target_by_split[split]
if self._labeled_target_by_split is None:
# unsupervised target domain
target_loader = self._target_sampling_config.create_loader(target_ds, batch_size)
n_dataset = DatasetSizeType.get_size(self._size_type, source_ds, target_ds)
return MultiDataLoader(
dataloaders=[source_loader, target_loader], n_batches=max(n_dataset // batch_size, 1),
)
else:
# semi-supervised target domain
target_labeled_ds = self._labeled_target_by_split[split]
target_unlabeled_ds = target_ds
# label domain: always balanced
target_labeled_loader = SamplingConfig(balance=True, class_weights=None).create_loader(
target_labeled_ds, batch_size=min(len(target_labeled_ds), batch_size)
)
target_unlabeled_loader = self._target_sampling_config.create_loader(target_unlabeled_ds, batch_size)
n_dataset = DatasetSizeType.get_size(self._size_type, source_ds, target_labeled_ds, target_unlabeled_ds)
return MultiDataLoader(
dataloaders=[source_loader, target_labeled_loader, target_unlabeled_loader],
n_batches=max(n_dataset // batch_size, 1),
)
def __len__(self):
source_ds = self._source_by_split["train"]
target_ds = self._target_by_split["train"]
if self._labeled_target_by_split is None:
return DatasetSizeType.get_size(self._size_type, source_ds, target_ds)
else:
labeled_target_ds = self._labeled_target_by_split["train"]
return DatasetSizeType.get_size(self._size_type, source_ds, labeled_target_ds, target_ds)
def _split_dataset_few_shot(dataset, n_fewshot, random_state=None):
if n_fewshot <= 0:
raise ValueError(f"n_fewshot should be > 0, not '{n_fewshot}'")
assert n_fewshot > 0
labels = get_labels(dataset)
classes = sorted(set(labels))
if n_fewshot < 1:
max_few = len(dataset) // len(classes)
n_fewshot = round(max_few * n_fewshot)
n_fewshot = int(round(n_fewshot))
random_state = check_random_state(random_state)
# sample n_fewshot items per class from last dataset
tindices = []
uindices = []
for class_ in classes:
indices = np.where(labels == class_)[0]
random_state.shuffle(indices)
head, tail = np.split(indices, [n_fewshot])
assert len(head) == n_fewshot
tindices.append(head)
uindices.append(tail)
tindices = np.concatenate(tindices)
uindices = np.concatenate(uindices)
assert len(tindices) == len(classes) * n_fewshot
labeled_dataset = torch.utils.data.Subset(dataset, tindices)
unlabeled_dataset = torch.utils.data.Subset(dataset, uindices)
return labeled_dataset, unlabeled_dataset
def _domain_stratified_split(domain_labels, n_partitions, split_ratios):
"""Get domain stratified indices of random split. Samples with the same domain label will be split based on the
given ratios. Then the indices of different domains within the same split will be concatenated.
Args:
domain_labels (array-like): Labels to indicate which domains the samples are from.
n_partitions (int): Number of partitions to split, 2 <= n_partitions <= len(split_ratios) + 1.
split_ratios (list): Ratios of splits to be produced, where 0 < sum(split_ratios) <= 1.
Returns:
[list]: Indices for different splits.
"""
domains = np.unique(domain_labels)
subset_idx = [[] for i in range(n_partitions)]
for domain_label_ in domains:
domain_idx = np.where(domain_labels == domain_label_)[0]
subsets = split_by_ratios(torch.from_numpy(domain_idx), split_ratios)
for i in range(n_partitions):
subset_idx[i].append(domain_idx[subsets[i].indices])
stratified_idx = []
for i in range(n_partitions):
stratified_idx.append(np.concatenate(subset_idx[i]))
return stratified_idx
class MultiDomainImageFolder(VisionDataset):
"""A generic data loader where the samples are arranged in this way: ::
root/domain_a/class_1/xxx.ext
root/domain_a/class_1/xxy.ext
root/domain_a/class_2/xxz.ext
root/domain_b/class_1/efg.ext
root/domain_b/class_2/pqr.ext
root/domain_b/class_2/lmn.ext
root/domain_k/class_2/123.ext
root/domain_k/class_1/abc3.ext
root/domain_k/class_1/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (tuple[string]): A list of allowed extensions. Either extensions or is_valid_file should be
passed.
transform (callable, optional): A function/transform that takes in a sample and returns a transformed
version. E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
sub_domain_set (list): A list of domain names, which should be a subset of domains (folders) under the root
directory. If None, all available domains will be used. Defaults to None.
sub_class_set (list): A list of class names, which should be a subset of classes (folders) under each
domain's directory. If None, all available classes will be used. Defaults to None.
is_valid_file (callable, optional): A function that takes path of a file and check if the file is a valid
file (to check corrupt files). Either extensions or is_valid_file should be passed.
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
domains (list): List of the domain names sorted alphabetically.
domain_to_idx (dict): Dict with items (domain_name, domain_index).
domain_labels (list): The domain_index value for each image in the dataset
"""
def __init__(
self,
root: str,
loader: Callable[[str], Any] = default_loader,
extensions: Optional[Tuple[str, ...]] = IMG_EXTENSIONS,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
sub_domain_set=None,
sub_class_set=None,
is_valid_file: Optional[Callable[[str], bool]] = None,
return_domain_label: Optional[bool] = False,
split_train_test: Optional[bool] = False,
split_ratio: Optional[float] = 0.8,
) -> None:
super(MultiDomainImageFolder, self).__init__(root, transform=transform, target_transform=target_transform)
domains, domain_to_idx = self._find_classes(self.root)
if type(sub_domain_set) == list:
for domain_name in sub_domain_set:
if domain_name not in domains:
raise ValueError("Domain %s not in the image directory" % domain_name)
domains = sub_domain_set
domain_to_idx = {domain_name: i for i, domain_name in enumerate(sub_domain_set)}
classes, class_to_idx = self._find_classes(os.path.join(self.root, domains[0]))
if type(sub_class_set) == list:
for class_name in sub_class_set:
if class_name not in classes:
raise ValueError("Class %s not in the image directory" % class_name)
classes = sub_class_set
class_to_idx = {class_name: i for i, class_name in enumerate(sub_class_set)}
samples = make_multi_domain_set(self.root, class_to_idx, domain_to_idx, extensions, is_valid_file)
if len(samples) == 0:
msg = "Found 0 files in sub-folders of: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
self.domains = domains
self.domain_to_idx = domain_to_idx
self.domain_labels = [s[2] for s in samples]
self.return_domain_label = return_domain_label
self.split_train_test = split_train_test
self.split_ratio = split_ratio
if split_train_test:
self.train_idx, self.test_idx = _domain_stratified_split(self.domain_labels, 2, [split_ratio])
else:
self.train_idx = None
self.test_idx = None
@staticmethod
def _find_classes(directory: str) -> Tuple[List[str], Dict[str, int]]:
"""
Finds the class folders in a dataset.
Args:
directory (string): Directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes = [d.name for d in os.scandir(directory) if d.is_dir()]
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def __getitem__(self, index: int) -> Tuple:
"""
Args:
index (int): Index
Returns:
tuple: (sample, target, domain) where target is class_index of the target class.
"""
path, target, domain = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
if self.return_domain_label:
return sample, target, domain
else:
return sample, target
def __len__(self) -> int:
return len(self.samples)
def get_train(self):
if self.split_train_test:
return torch.utils.data.Subset(self, self.train_idx)
else:
return None
def get_test(self):
if self.split_train_test:
return torch.utils.data.Subset(self, self.test_idx)
else:
return None
def make_multi_domain_set(
directory: str,
class_to_idx: Dict[str, int],
domain_to_idx: Dict[str, int],
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> List[Tuple[str, int, int]]:
"""Generates a list of samples of a form (path_to_sample, class, domain).
Args:
directory (str): root dataset directory
class_to_idx (Dict[str, int]): dictionary mapping class name to class index
domain_to_idx (Dict[str, int]): dictionary mapping d name to class index
extensions (optional): A list of allowed extensions. Either extensions or is_valid_file should be passed.
Defaults to None.
is_valid_file (optional): A function that takes path of a file and checks if the file is a valid file
(to check corrupt files) both extensions and is_valid_file should not be passed. Defaults to None.
Raises:
ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.
Returns:
List[Tuple[str, int, int]]: samples of a form (path_to_sample, class, domain)
"""
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
for target_domain in sorted(domain_to_idx.keys()):
domain_index = domain_to_idx[target_domain]
domain_dir = os.path.join(directory, target_domain)
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(domain_dir, target_class)
if not os.path.isdir(target_dir):
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = path, class_index, domain_index
instances.append(item)
return instances
class ConcatMultiDomainAccess(torch.utils.data.Dataset):
"""Concatenate multiple datasets as a single dataset with domain labels
Args:
data_access (dict): Dictionary of domain datasets, e.g. {"Domain1_name": domain1_set,
"Domain2_name": domain2_set}
domain_to_idx (dict): Dictionary of domain name to domain labels, e.g. {"Domain1_name": 0, "Domain2_name": 1}
return_domain_label (Optional[bool], optional): Whether return domain labels in each batch. Defaults to False.
"""
def __init__(
self, data_access: dict, domain_to_idx: dict, return_domain_label: Optional[bool] = False,
):
self.domain_to_idx = domain_to_idx
self.data = []
self.labels = []
self.domain_labels = []
for domain_ in domain_to_idx:
n_samples = data_access[domain_].data.shape[0]
for idx in range(n_samples):
x, y = data_access[domain_][idx]
self.data.append(x)
self.labels.append(y)
self.domain_labels.append(domain_to_idx[domain_])
self.data = torch.stack(self.data)
self.labels = torch.tensor(self.labels)
self.domain_labels = torch.tensor(self.domain_labels)
self.return_domain_label = return_domain_label
def __getitem__(self, index: int) -> Tuple:
if self.return_domain_label:
return self.data[index], self.labels[index], self.domain_labels[index]
else:
return self.data[index], self.labels[index]
def __len__(self):
return len(self.labels)
class MultiDomainAccess(DatasetAccess):
def __init__(self, data_access: dict, n_classes: int, return_domain_label: Optional[bool] = False):
"""Convert multiple digits-like data accesses to a single data access.
Args:
data_access (dict): Dictionary of data accesses, e.g. {"Domain1_name": domain1_access,
"Domain2_name": domain2_access}
n_classes (int): number of classes.
return_domain_label (Optional[bool], optional): Whether return domain labels in each batch.
Defaults to False.
"""
super().__init__(n_classes)
self.data_access = data_access
self.domain_to_idx = {list(data_access.keys())[i]: i for i in range(len(data_access))}
self.return_domain_label = return_domain_label
def get_train(self):
train_access = {domain_: self.data_access[domain_].get_train() for domain_ in self.domain_to_idx}
return ConcatMultiDomainAccess(train_access, self.domain_to_idx, self.return_domain_label)
def get_test(self):
test_access = {domain_: self.data_access[domain_].get_test() for domain_ in self.domain_to_idx}
return ConcatMultiDomainAccess(test_access, self.domain_to_idx, self.return_domain_label)
def __len__(self):
return len(self.get_train()) + len(self.get_test())
class MultiDomainAdapDataset(DomainsDatasetBase):
"""The class controlling how the multiple domains are iterated over.
Args:
data_access (MultiDomainImageFolder, or MultiDomainAccess): Multi-domain data access.
val_split_ratio (float, optional): Split ratio for validation set. Defaults to 0.1.
test_split_ratio (float, optional): Split ratio for test set. Defaults to 0.2.
random_state (int, optional): Random state for generator. Defaults to 1.
"""
def __init__(
self, data_access, val_split_ratio=0.1, test_split_ratio=0.2, random_state: int = 1,
):
self.domain_to_idx = data_access.domain_to_idx
self.n_domains = len(data_access.domain_to_idx)
self.data_access = data_access
self._val_split_ratio = val_split_ratio
self._test_split_ratio = test_split_ratio
self._sample_by_split: Dict[str, torch.utils.data.Subset] = {}
self._sampling_config = FixedSeedSamplingConfig(seed=random_state, balance_domain=True)
self._loader = MultiDataLoader
self._random_state = random_state
def prepare_data_loaders(self):
splits = ["test", "valid", "train"]
self._sample_by_split["test"] = self.data_access.get_test()
if self._sample_by_split["test"] is None:
# split test, valid, and train set if the data access no train test splits
subset_idx = _domain_stratified_split(
self.data_access.domain_labels, 3, [self._test_split_ratio, self._val_split_ratio]
)
for i in range(len(splits)):
self._sample_by_split[splits[i]] = torch.utils.data.Subset(self.data_access, subset_idx[i])
else:
# use original data split if get_test() is not none
self._sample_by_split["valid"], self._sample_by_split["train"] = self.data_access.get_train_val(
self._val_split_ratio
)
def get_domain_loaders(self, split="train", batch_size=32):
return self._sampling_config.create_loader(self._sample_by_split[split], batch_size)
def __len__(self):
return len(self.data_access)
| [
"sklearn.utils.check_random_state",
"kale.loaddata.dataset_access.get_class_subset",
"numpy.unique",
"logging.debug",
"numpy.where",
"kale.loaddata.sampler.SamplingConfig",
"os.scandir",
"os.path.join",
"os.walk",
"kale.loaddata.sampler.FixedSeedSamplingConfig",
"numpy.split",
"os.path.isdir",... | [((9958, 9977), 'kale.loaddata.sampler.get_labels', 'get_labels', (['dataset'], {}), '(dataset)\n', (9968, 9977), False, 'from kale.loaddata.sampler import FixedSeedSamplingConfig, get_labels, MultiDataLoader, SamplingConfig\n'), ((10186, 10218), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (10204, 10218), False, 'from sklearn.utils import check_random_state\n'), ((10590, 10614), 'numpy.concatenate', 'np.concatenate', (['tindices'], {}), '(tindices)\n', (10604, 10614), True, 'import numpy as np\n'), ((10630, 10654), 'numpy.concatenate', 'np.concatenate', (['uindices'], {}), '(uindices)\n', (10644, 10654), True, 'import numpy as np\n'), ((11564, 11588), 'numpy.unique', 'np.unique', (['domain_labels'], {}), '(domain_labels)\n', (11573, 11588), True, 'import numpy as np\n'), ((19834, 19863), 'os.path.expanduser', 'os.path.expanduser', (['directory'], {}), '(directory)\n', (19852, 19863), False, 'import os\n'), ((20321, 20363), 'typing.cast', 'cast', (['Callable[[str], bool]', 'is_valid_file'], {}), '(Callable[[str], bool], is_valid_file)\n', (20325, 20363), False, 'from typing import Any, Callable, cast, Dict, List, Optional, Tuple\n'), ((5561, 5593), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (5579, 5593), False, 'from sklearn.utils import check_random_state\n'), ((5963, 5991), 'logging.debug', 'logging.debug', (['"""Load source"""'], {}), "('Load source')\n", (5976, 5991), False, 'import logging\n'), ((6415, 6443), 'logging.debug', 'logging.debug', (['"""Load target"""'], {}), "('Load target')\n", (6428, 6443), False, 'import logging\n'), ((6867, 6900), 'logging.debug', 'logging.debug', (['"""Load source Test"""'], {}), "('Load source Test')\n", (6880, 6900), False, 'import logging\n'), ((7127, 7160), 'logging.debug', 'logging.debug', (['"""Load target Test"""'], {}), "('Load target Test')\n", (7140, 7160), False, 'import logging\n'), ((10446, 10476), 'numpy.split', 'np.split', (['indices', '[n_fewshot]'], {}), '(indices, [n_fewshot])\n', (10454, 10476), True, 'import numpy as np\n'), ((20492, 20530), 'os.path.join', 'os.path.join', (['directory', 'target_domain'], {}), '(directory, target_domain)\n', (20504, 20530), False, 'import os\n'), ((25057, 25120), 'kale.loaddata.sampler.FixedSeedSamplingConfig', 'FixedSeedSamplingConfig', ([], {'seed': 'random_state', 'balance_domain': '(True)'}), '(seed=random_state, balance_domain=True)\n', (25080, 25120), False, 'from kale.loaddata.sampler import FixedSeedSamplingConfig, get_labels, MultiDataLoader, SamplingConfig\n'), ((6231, 6295), 'kale.loaddata.dataset_access.get_class_subset', 'get_class_subset', (["self._source_by_split['train']", 'self.class_ids'], {}), "(self._source_by_split['train'], self.class_ids)\n", (6247, 6295), False, 'from kale.loaddata.dataset_access import DatasetAccess, get_class_subset, split_by_ratios\n'), ((6341, 6405), 'kale.loaddata.dataset_access.get_class_subset', 'get_class_subset', (["self._source_by_split['valid']", 'self.class_ids'], {}), "(self._source_by_split['valid'], self.class_ids)\n", (6357, 6405), False, 'from kale.loaddata.dataset_access import DatasetAccess, get_class_subset, split_by_ratios\n'), ((6683, 6747), 'kale.loaddata.dataset_access.get_class_subset', 'get_class_subset', (["self._target_by_split['train']", 'self.class_ids'], {}), "(self._target_by_split['train'], self.class_ids)\n", (6699, 6747), False, 'from kale.loaddata.dataset_access import DatasetAccess, get_class_subset, split_by_ratios\n'), ((6793, 6857), 'kale.loaddata.dataset_access.get_class_subset', 'get_class_subset', (["self._target_by_split['valid']", 'self.class_ids'], {}), "(self._target_by_split['valid'], self.class_ids)\n", (6809, 6857), False, 'from kale.loaddata.dataset_access import DatasetAccess, get_class_subset, split_by_ratios\n'), ((7055, 7118), 'kale.loaddata.dataset_access.get_class_subset', 'get_class_subset', (["self._source_by_split['test']", 'self.class_ids'], {}), "(self._source_by_split['test'], self.class_ids)\n", (7071, 7118), False, 'from kale.loaddata.dataset_access import DatasetAccess, get_class_subset, split_by_ratios\n'), ((7315, 7378), 'kale.loaddata.dataset_access.get_class_subset', 'get_class_subset', (["self._target_by_split['test']", 'self.class_ids'], {}), "(self._target_by_split['test'], self.class_ids)\n", (7331, 7378), False, 'from kale.loaddata.dataset_access import DatasetAccess, get_class_subset, split_by_ratios\n'), ((10357, 10383), 'numpy.where', 'np.where', (['(labels == class_)'], {}), '(labels == class_)\n', (10365, 10383), True, 'import numpy as np\n'), ((11695, 11735), 'numpy.where', 'np.where', (['(domain_labels == domain_label_)'], {}), '(domain_labels == domain_label_)\n', (11703, 11735), True, 'import numpy as np\n'), ((12009, 12038), 'numpy.concatenate', 'np.concatenate', (['subset_idx[i]'], {}), '(subset_idx[i])\n', (12023, 12038), True, 'import numpy as np\n'), ((15507, 15542), 'os.path.join', 'os.path.join', (['self.root', 'domains[0]'], {}), '(self.root, domains[0])\n', (15519, 15542), False, 'import os\n'), ((20666, 20704), 'os.path.join', 'os.path.join', (['domain_dir', 'target_class'], {}), '(domain_dir, target_class)\n', (20678, 20704), False, 'import os\n'), ((4608, 4636), 'kale.loaddata.sampler.SamplingConfig', 'SamplingConfig', ([], {'balance': '(True)'}), '(balance=True)\n', (4622, 4636), False, 'from kale.loaddata.sampler import FixedSeedSamplingConfig, get_labels, MultiDataLoader, SamplingConfig\n'), ((4680, 4708), 'kale.loaddata.sampler.SamplingConfig', 'SamplingConfig', ([], {'balance': '(True)'}), '(balance=True)\n', (4694, 4708), False, 'from kale.loaddata.sampler import FixedSeedSamplingConfig, get_labels, MultiDataLoader, SamplingConfig\n'), ((17457, 17478), 'os.scandir', 'os.scandir', (['directory'], {}), '(directory)\n', (17467, 17478), False, 'import os\n'), ((20265, 20298), 'typing.cast', 'cast', (['Tuple[str, ...]', 'extensions'], {}), '(Tuple[str, ...], extensions)\n', (20269, 20298), False, 'from typing import Any, Callable, cast, Dict, List, Optional, Tuple\n'), ((20724, 20749), 'os.path.isdir', 'os.path.isdir', (['target_dir'], {}), '(target_dir)\n', (20737, 20749), False, 'import os\n'), ((20818, 20855), 'os.walk', 'os.walk', (['target_dir'], {'followlinks': '(True)'}), '(target_dir, followlinks=True)\n', (20825, 20855), False, 'import os\n'), ((4886, 4902), 'kale.loaddata.sampler.SamplingConfig', 'SamplingConfig', ([], {}), '()\n', (4900, 4902), False, 'from kale.loaddata.sampler import FixedSeedSamplingConfig, get_labels, MultiDataLoader, SamplingConfig\n'), ((4946, 4962), 'kale.loaddata.sampler.SamplingConfig', 'SamplingConfig', ([], {}), '()\n', (4960, 4962), False, 'from kale.loaddata.sampler import FixedSeedSamplingConfig, get_labels, MultiDataLoader, SamplingConfig\n'), ((8712, 8760), 'kale.loaddata.sampler.SamplingConfig', 'SamplingConfig', ([], {'balance': '(True)', 'class_weights': 'None'}), '(balance=True, class_weights=None)\n', (8726, 8760), False, 'from kale.loaddata.sampler import FixedSeedSamplingConfig, get_labels, MultiDataLoader, SamplingConfig\n'), ((20930, 20955), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (20942, 20955), False, 'import os\n')] |
import matplotlib
matplotlib.use('MacOSX')
import matplotlib.pyplot as plt
import healpy as hp
#import astropy.io.fits as pyfits
import fitsio
import numpy as np
import plot_lib as plib
print('Healpy version', hp.__version__)
GeV = 1.60218e-10
def get_header_info(fits_map_filename):
h = fitsio.read_header(fits_map_filename, ext=1)
NSIDE = h["NSIDE"]
print ("NSIDE: %i" % NSIDE)
print ("approx. resolution: %3.1f deg" % (hp.nside2resol(NSIDE, arcmin=True) / 60))
print ("number of pixels: %i" % hp.nside2npix(NSIDE))
print ("Process: %s" % h["PROCESS"])
print ("Units: %s" % h["TUNIT1"])
return NSIDE
def get_map(fits_map_filename):
h = fitsio.read_header(fits_map_filename, ext=1)
n_entries = h["NAXIS2"]
NSIDE = h["NSIDE"]
assert(n_entries == hp.nside2npix(NSIDE))
fits = fitsio.FITS(fits_map_filename, iter_row_buffer=10000)
hmap = []
for i in range(n_entries):
flux = fits[1][i][0]
hmap.append(flux)
print ("Read map from %3.1e to %3.1e with %i pixels" % (min(hmap), max(hmap), n_entries))
print ("Mean flux: ", np.mean(hmap))
print ("Total flux: ", sum(hmap))
return np.array(hmap)
def compute_map_slope(map_nu1, map_nu2, nu1, nu2, b, l):
b_size, l_size = len(b), len(l)
slopes = np.zeros((b_size, l_size))
for i in range(b_size):
for j in range(l_size):
ipix = hp.ang2pix(nside, l[j], b[i], lonlat=True)
slopes[i][j] = (np.log(map_nu1[ipix]) - np.log(map_nu2[ipix])) / (np.log(nu1) - np.log(nu2))
return slopes
def make_cbar(image, units):
cb = fig.colorbar(image, orientation='horizontal', pad=0.15)
cb.ax.set_xlabel(units, fontsize=10)
cb.ax.labelpad = 2
cb.ax.tick_params('both', length=0, width=1., which='major', pad=4, bottom=True, top=True, left=True, right=True)
cb.outline.set_linewidth(0.8)
# MAIN
output_filename = 'SynchrotronSlope-408MHz-cartesian-128'
title = r'Synchrotron Slope'
units = r'$\beta$'
min_map, max_map = -3.1, -2.9
fig, ax = plib.set_plot_style((4.5, 4))
nside = get_header_info('fits/map-Synchro-408MHz-128.fits.gz')
map_nu1 = get_map('fits/map-Synchro-noturb-408MHz-128.fits.gz')
map_nu2 = get_map('fits/map-Synchro-noturb-412MHz-128.fits.gz')
nu1, nu2 = 408., 412.
b = np.linspace(-80., +80., 160 * 2)
l = np.linspace(-150., +150., 300 * 2)
map_2d = compute_map_slope(map_nu1, map_nu2, nu1, nu2, b, l)
image = ax.pcolor(l, b, map_2d,
cmap='jet',
vmin=min_map,
vmax=max_map,
shading='auto',
edgecolors='face')
make_cbar(image, units)
ax.invert_xaxis()
ax.set_title(title, pad=5, fontsize=11)
#ax.grid(True)
ax.set_xlabel(r'l [deg]')
ax.set_ylabel(r'b [deg]')
plib.savefig(plt, output_filename)
| [
"numpy.mean",
"matplotlib.use",
"plot_lib.savefig",
"fitsio.FITS",
"numpy.log",
"fitsio.read_header",
"numpy.array",
"plot_lib.set_plot_style",
"numpy.linspace",
"numpy.zeros",
"healpy.ang2pix",
"healpy.nside2resol",
"healpy.nside2npix"
] | [((18, 42), 'matplotlib.use', 'matplotlib.use', (['"""MacOSX"""'], {}), "('MacOSX')\n", (32, 42), False, 'import matplotlib\n'), ((2038, 2067), 'plot_lib.set_plot_style', 'plib.set_plot_style', (['(4.5, 4)'], {}), '((4.5, 4))\n', (2057, 2067), True, 'import plot_lib as plib\n'), ((2288, 2322), 'numpy.linspace', 'np.linspace', (['(-80.0)', '(+80.0)', '(160 * 2)'], {}), '(-80.0, +80.0, 160 * 2)\n', (2299, 2322), True, 'import numpy as np\n'), ((2325, 2361), 'numpy.linspace', 'np.linspace', (['(-150.0)', '(+150.0)', '(300 * 2)'], {}), '(-150.0, +150.0, 300 * 2)\n', (2336, 2361), True, 'import numpy as np\n'), ((2773, 2807), 'plot_lib.savefig', 'plib.savefig', (['plt', 'output_filename'], {}), '(plt, output_filename)\n', (2785, 2807), True, 'import plot_lib as plib\n'), ((295, 339), 'fitsio.read_header', 'fitsio.read_header', (['fits_map_filename'], {'ext': '(1)'}), '(fits_map_filename, ext=1)\n', (313, 339), False, 'import fitsio\n'), ((678, 722), 'fitsio.read_header', 'fitsio.read_header', (['fits_map_filename'], {'ext': '(1)'}), '(fits_map_filename, ext=1)\n', (696, 722), False, 'import fitsio\n'), ((831, 884), 'fitsio.FITS', 'fitsio.FITS', (['fits_map_filename'], {'iter_row_buffer': '(10000)'}), '(fits_map_filename, iter_row_buffer=10000)\n', (842, 884), False, 'import fitsio\n'), ((1169, 1183), 'numpy.array', 'np.array', (['hmap'], {}), '(hmap)\n', (1177, 1183), True, 'import numpy as np\n'), ((1291, 1317), 'numpy.zeros', 'np.zeros', (['(b_size, l_size)'], {}), '((b_size, l_size))\n', (1299, 1317), True, 'import numpy as np\n'), ((798, 818), 'healpy.nside2npix', 'hp.nside2npix', (['NSIDE'], {}), '(NSIDE)\n', (811, 818), True, 'import healpy as hp\n'), ((1105, 1118), 'numpy.mean', 'np.mean', (['hmap'], {}), '(hmap)\n', (1112, 1118), True, 'import numpy as np\n'), ((519, 539), 'healpy.nside2npix', 'hp.nside2npix', (['NSIDE'], {}), '(NSIDE)\n', (532, 539), True, 'import healpy as hp\n'), ((1397, 1439), 'healpy.ang2pix', 'hp.ang2pix', (['nside', 'l[j]', 'b[i]'], {'lonlat': '(True)'}), '(nside, l[j], b[i], lonlat=True)\n', (1407, 1439), True, 'import healpy as hp\n'), ((441, 475), 'healpy.nside2resol', 'hp.nside2resol', (['NSIDE'], {'arcmin': '(True)'}), '(NSIDE, arcmin=True)\n', (455, 475), True, 'import healpy as hp\n'), ((1468, 1489), 'numpy.log', 'np.log', (['map_nu1[ipix]'], {}), '(map_nu1[ipix])\n', (1474, 1489), True, 'import numpy as np\n'), ((1492, 1513), 'numpy.log', 'np.log', (['map_nu2[ipix]'], {}), '(map_nu2[ipix])\n', (1498, 1513), True, 'import numpy as np\n'), ((1518, 1529), 'numpy.log', 'np.log', (['nu1'], {}), '(nu1)\n', (1524, 1529), True, 'import numpy as np\n'), ((1532, 1543), 'numpy.log', 'np.log', (['nu2'], {}), '(nu2)\n', (1538, 1543), True, 'import numpy as np\n')] |
"""
TO-DO:
add probability map layer for T-bar or cleft detection and other semantic prediction
"""
import neuroglancer as ng
import numpy as np
from chunkflow.chunk import Chunk
from .base import OperatorBase
class NeuroglancerOperator(OperatorBase):
def __init__(self,
name: str = 'neuroglancer',
port: int = None,
voxel_size: tuple = None):
super().__init__(name=name)
self.port = port
self.voxel_size = voxel_size
def _get_voxel_size(self, chunk):
if self.voxel_size:
voxel_size = self.voxel_size
elif chunk.voxel_size:
voxel_size = chunk.voxel_size
else:
voxel_size = (1, 1, 1)
return voxel_size
def _append_synapse_annotation_layer(self, viewer_state: ng.viewer_state.ViewerState, name: str, data: dict):
annotations = []
presynapses = data['presynapses']
for sid, pre_coordinate in presynapses.items():
if 'postsynapses' in data:
postsynapses = data['postsynapses']
if sid in postsynapses:
coordinates = postsynapses[sid]
for idx, post_coordinate in enumerate(coordinates):
post_annotation = ng.LineAnnotation(
id=str(sid) + str(idx) + '_post',
# note that the synapse coordinate is already in xyz order
# so we do not need to reverse it!
pointA=pre_coordinate,
pointB=post_coordinate,
props=['#0ff', 5]
)
annotations.append(post_annotation)
# we would like to show line first and then the presynapse point
# so, we have distinct color to show T-bar
pre_annotation = ng.PointAnnotation(
id=str(sid) + '_pre',
point=pre_coordinate,
props=['#ff0', 8]
)
annotations.append(pre_annotation)
viewer_state.layers.append(
name=name,
layer=ng.LocalAnnotationLayer(
dimensions=ng.CoordinateSpace(names=data['order'], units="nm", scales=data['resolution']),
annotation_properties=[
ng.AnnotationPropertySpec(
id='color',
type='rgb',
default='red',
),
ng.AnnotationPropertySpec(
id='size',
type='float32',
default=5
)
],
annotations=annotations,
shader='''
void main() {
setColor(prop_color());
setPointMarkerSize(prop_size());
}
''',
),
)
def _append_image_layer(self, viewer_state: ng.viewer_state.ViewerState, chunk_name: str, chunk: Chunk):
voxel_size = self._get_voxel_size(chunk)
dimensions = ng.CoordinateSpace(
scales=voxel_size,
units=['nm', 'nm', 'nm'],
names=['z', 'y', 'x']
)
shader="""#uicontrol int channel slider(min=0, max=4)
#uicontrol vec3 color color(default="white")
#uicontrol float brightness slider(min=-1, max=1)
#uicontrol float contrast slider(min=-3, max=3, step=0.01)
void main() {
emitRGB(color *
(toNormalized(getDataValue(channel)) + brightness) *
exp(contrast));
}"""
viewer_state.layers.append(
name=chunk_name,
layer=ng.LocalVolume(
data=chunk,
dimensions=dimensions,
# offset is in nm, not voxels
voxel_offset=chunk.voxel_offset,
),
shader=shader
)
def _append_segmentation_layer(self, viewer_state: ng.viewer_state.ViewerState, chunk_name: str, chunk: Chunk):
if np.issubdtype(chunk.dtype, np.int64):
assert chunk.min() >= 0
chunk = chunk.astype(np.uint64)
voxel_size = self._get_voxel_size(chunk)
dimensions = ng.CoordinateSpace(
scales=voxel_size,
units=['nm', 'nm', 'nm'],
names=['z', 'y', 'x']
)
viewer_state.layers.append(
name=chunk_name,
layer=ng.LocalVolume(
data=chunk,
dimensions=dimensions,
# offset is in nm, not voxels
# chunkflow use C order with zyx,
# while neuroglancer use F order with xyz
voxel_offset=chunk.voxel_offset,
)
)
def _append_probability_map_layer(self, viewer_state: ng.viewer_state.ViewerState, chunk_name: str, chunk: Chunk):
if chunk.dtype == np.dtype('<f4') or chunk.dtype == np.dtype('float16'):
chunk = chunk.astype(np.float32)
voxel_size = self._get_voxel_size(chunk)
# chunk = np.ascontiguousarray(chunk)
if chunk.shape[0] == 1:
shader = """void main() {
emitGrayscale(toNormalized(getDataValue(0)));
}
"""
elif chunk.shape[0] == 2:
shader = """void main() {
emitRGB(vec3(toNormalized(getDataValue(0)),
toNormalized(getDataValue(1)),
0.));
}
"""
else:
shader = """void main() {
emitRGB(vec3(toNormalized(getDataValue(0)),
toNormalized(getDataValue(1)),
toNormalized(getDataValue(2))));
}
"""
dimensions = ng.CoordinateSpace(
scales=(1, ) + voxel_size,
units=['', 'nm', 'nm', 'nm'],
names=['c^', 'z', 'y', 'x']
)
viewer_state.layers.append(
name=chunk_name,
layer=ng.LocalVolume(
data=chunk.array,
dimensions=dimensions,
# offset is in nm, not voxels
voxel_offset=(0, ) + chunk.voxel_offset,
),
shader=shader
)
def __call__(self, chunks: dict, selected: str=None):
"""
Parameters:
chunks: multiple chunks
"""
if selected is None:
selected = chunks.keys()
elif isinstance(selected, str):
selected = selected.split(',')
# ng.set_static_content_source(
# url='https://neuromancer-seung-import.appspot.com')
ng.set_server_bind_address(bind_address='0.0.0.0', bind_port=self.port)
viewer = ng.Viewer()
with viewer.txn() as viewer_state:
for chunk_name in selected:
chunk = chunks[chunk_name]
print(f'visualizing chunk {chunk_name} with voxel offset: {chunk.voxel_offset}, voxel size: {chunk.voxel_size}')
if isinstance(chunk, dict):
# this could be synapses
self._append_synapse_annotation_layer(viewer_state, chunk_name, chunk)
elif chunk.is_image or (chunk.ndim==3 and np.issubdtype(chunk.dtype, np.floating)):
self._append_image_layer(viewer_state, chunk_name, chunk)
elif chunk.is_segmentation:
self._append_segmentation_layer(viewer_state, chunk_name, chunk)
elif chunk.is_probability_map:
self._append_probability_map_layer(viewer_state, chunk_name, chunk)
else:
breakpoint()
raise ValueError(f'do not support this type: {type(chunk)} with datatype {chunk.dtype}')
print('Open this url in browser: ')
viewer_url = viewer.get_viewer_url()
print(viewer_url)
key = None
while key!='q':
key = input('Press q and enter/return to quit neuroglancer.')
| [
"neuroglancer.AnnotationPropertySpec",
"neuroglancer.Viewer",
"neuroglancer.set_server_bind_address",
"numpy.issubdtype",
"neuroglancer.CoordinateSpace",
"numpy.dtype",
"neuroglancer.LocalVolume"
] | [((3108, 3198), 'neuroglancer.CoordinateSpace', 'ng.CoordinateSpace', ([], {'scales': 'voxel_size', 'units': "['nm', 'nm', 'nm']", 'names': "['z', 'y', 'x']"}), "(scales=voxel_size, units=['nm', 'nm', 'nm'], names=['z',\n 'y', 'x'])\n", (3126, 3198), True, 'import neuroglancer as ng\n'), ((4023, 4059), 'numpy.issubdtype', 'np.issubdtype', (['chunk.dtype', 'np.int64'], {}), '(chunk.dtype, np.int64)\n', (4036, 4059), True, 'import numpy as np\n'), ((4211, 4301), 'neuroglancer.CoordinateSpace', 'ng.CoordinateSpace', ([], {'scales': 'voxel_size', 'units': "['nm', 'nm', 'nm']", 'names': "['z', 'y', 'x']"}), "(scales=voxel_size, units=['nm', 'nm', 'nm'], names=['z',\n 'y', 'x'])\n", (4229, 4301), True, 'import neuroglancer as ng\n'), ((5605, 5712), 'neuroglancer.CoordinateSpace', 'ng.CoordinateSpace', ([], {'scales': '((1,) + voxel_size)', 'units': "['', 'nm', 'nm', 'nm']", 'names': "['c^', 'z', 'y', 'x']"}), "(scales=(1,) + voxel_size, units=['', 'nm', 'nm', 'nm'],\n names=['c^', 'z', 'y', 'x'])\n", (5623, 5712), True, 'import neuroglancer as ng\n'), ((6481, 6552), 'neuroglancer.set_server_bind_address', 'ng.set_server_bind_address', ([], {'bind_address': '"""0.0.0.0"""', 'bind_port': 'self.port'}), "(bind_address='0.0.0.0', bind_port=self.port)\n", (6507, 6552), True, 'import neuroglancer as ng\n'), ((6570, 6581), 'neuroglancer.Viewer', 'ng.Viewer', ([], {}), '()\n', (6579, 6581), True, 'import neuroglancer as ng\n'), ((3666, 3753), 'neuroglancer.LocalVolume', 'ng.LocalVolume', ([], {'data': 'chunk', 'dimensions': 'dimensions', 'voxel_offset': 'chunk.voxel_offset'}), '(data=chunk, dimensions=dimensions, voxel_offset=chunk.\n voxel_offset)\n', (3680, 3753), True, 'import neuroglancer as ng\n'), ((4434, 4521), 'neuroglancer.LocalVolume', 'ng.LocalVolume', ([], {'data': 'chunk', 'dimensions': 'dimensions', 'voxel_offset': 'chunk.voxel_offset'}), '(data=chunk, dimensions=dimensions, voxel_offset=chunk.\n voxel_offset)\n', (4448, 4521), True, 'import neuroglancer as ng\n'), ((4892, 4907), 'numpy.dtype', 'np.dtype', (['"""<f4"""'], {}), "('<f4')\n", (4900, 4907), True, 'import numpy as np\n'), ((4926, 4945), 'numpy.dtype', 'np.dtype', (['"""float16"""'], {}), "('float16')\n", (4934, 4945), True, 'import numpy as np\n'), ((5839, 5938), 'neuroglancer.LocalVolume', 'ng.LocalVolume', ([], {'data': 'chunk.array', 'dimensions': 'dimensions', 'voxel_offset': '((0,) + chunk.voxel_offset)'}), '(data=chunk.array, dimensions=dimensions, voxel_offset=(0,) +\n chunk.voxel_offset)\n', (5853, 5938), True, 'import neuroglancer as ng\n'), ((2254, 2332), 'neuroglancer.CoordinateSpace', 'ng.CoordinateSpace', ([], {'names': "data['order']", 'units': '"""nm"""', 'scales': "data['resolution']"}), "(names=data['order'], units='nm', scales=data['resolution'])\n", (2272, 2332), True, 'import neuroglancer as ng\n'), ((2394, 2458), 'neuroglancer.AnnotationPropertySpec', 'ng.AnnotationPropertySpec', ([], {'id': '"""color"""', 'type': '"""rgb"""', 'default': '"""red"""'}), "(id='color', type='rgb', default='red')\n", (2419, 2458), True, 'import neuroglancer as ng\n'), ((2575, 2638), 'neuroglancer.AnnotationPropertySpec', 'ng.AnnotationPropertySpec', ([], {'id': '"""size"""', 'type': '"""float32"""', 'default': '(5)'}), "(id='size', type='float32', default=5)\n", (2600, 2638), True, 'import neuroglancer as ng\n'), ((7075, 7114), 'numpy.issubdtype', 'np.issubdtype', (['chunk.dtype', 'np.floating'], {}), '(chunk.dtype, np.floating)\n', (7088, 7114), True, 'import numpy as np\n')] |
'''
Module for loading data from the Pangeo CMIP6n intake catalogue (usng the intake-esm
loader) created by NCAR.
'''
import functools
try:
import iris
except ModuleNotFoundError:
iris = None
# ReadTheDocs can't import iris
import numpy
try:
import intake
except ModuleNotFoundError:
intake = None
import forest.map_view
from forest import geo, util
from forest.drivers import gridded_forecast
# Location of the Pangeo-CMIP6 intake catalogue file.
URL = 'https://raw.githubusercontent.com/NCAR/intake-esm-datastore/master/catalogs/pangeo-cmip6.json'
class Dataset:
def __init__(self, pattern=None, **kwargs):
self.pattern = pattern
def navigator(self):
return Navigator()
def map_view(self, color_mapper):
loader = IntakeLoader(self.pattern)
view = forest.map_view.ImageView(loader, color_mapper)
view.set_hover_properties(INTAKE_TOOLTIPS, INTAKE_FORMATTERS)
return view
@functools.lru_cache(maxsize=64)
def _get_intake_vars(
experiment_id,
table_id,
grid_label,
institution_id,
member_id):
"""
Helper function to get a list of variables for this particular combination
of parameters. Function is cahced to reduce remote queries.
"""
collection = intake.open_esm_datastore(URL)
cat = collection.search(
experiment_id=experiment_id,
table_id=table_id,
grid_label=grid_label,
institution_id=institution_id,
member_id=member_id,
)
var_list = cat.unique('variable_id')['variable_id']['values']
return var_list
@functools.lru_cache(maxsize=16)
def _load_from_intake(
experiment_id,
table_id,
grid_label,
variable_id,
institution_id,
activity_id,
member_id):
"""
Load data from the pangeo CMIP6 intake catalogue.The arguments relate to
the CMIP6 parameters of a dataset. The CMIP6 reference is the ESGF servers
which can be accessed here:
https://esgf-index1.ceda.ac.uk/search/cmip6-ceda/
Function is cahced to reduce remote queries.
"""
collection = intake.open_esm_datastore(URL)
cat = collection.search(
experiment_id=experiment_id,
table_id=table_id,
grid_label=grid_label,
institution_id=institution_id,
member_id=member_id,
variable_id=variable_id)
dset_dict = cat.to_dataset_dict(
zarr_kwargs={'consolidated': True, 'decode_times': False},
cdf_kwargs={'chunks': {}, 'decode_times': False})
# The search should have produced a dictionary with only 1 item, so
# get that item and get a cube from it.
ds_label, xr = dset_dict.popitem()
cube = xr[variable_id].to_iris()
coord_names = [c1.name() for c1 in cube.coords()]
if 'air_pressure' in coord_names:
cube.coord('air_pressure').convert_units('hPa')
return iris.util.squeeze(cube) # drop member dimension
INTAKE_TOOLTIPS = [
("Name", "@name"),
("Value", "@image @units"),
('Valid', '@valid{%F %H:%M}'),
("Level", "@level"),
("Experiment", "@experiment"),
("Institution", "@institution"),
("Member", "@memberid"),
('Variable', "@variableid"), ]
INTAKE_FORMATTERS = {
'@valid': 'datetime',
}
@functools.lru_cache(maxsize=16)
def _get_bokeh_image(cube,
experiment_id,
variable_id,
institution_id,
initial_time,
member_id,
selected_time,
pressure,
):
"""
A helper function to do the creation of the image dict required by bokeh.
This includes downloading the actual data required for the current view, so
this function is cached to reduce remote queries.
"""
def time_comp(select_time, time_cell): #
data_time = util.to_datetime(time_cell.point)
if abs((select_time - data_time).days) < 2:
return True
return False
def lat_filter(lat):
"""
Due to the way the current projection of gridded data works, the poles are
not well handled, resulting in NaNs if we use the full range of latitudes.
The current hack is to chop off latitude greater than 85 degrees north and
south. Given the importance of data at the poles in climate change research,
we will need to fix this in future.
"""
return -85.0 < lat < 85.0
def pressure_select(select_pressure, data_pressure):
return abs(select_pressure - data_pressure.point) < 1.0
if cube is None or initial_time is None:
data = gridded_forecast.empty_image()
else:
constraint_dict = {'time': functools.partial(time_comp,
selected_time),
'latitude': lat_filter,
}
coord_names = [c1.name() for c1 in cube.coords()]
if 'air_pressure' in coord_names:
constraint_dict['air_pressure'] = functools.partial(
pressure_select,
pressure,
)
cube_cropped = cube.extract(iris.Constraint(**constraint_dict))
lat_pts = cube_cropped.coord('latitude').points
long_pts = cube_cropped.coord('longitude').points - 180.0
cube_data_cropped = cube_cropped.data
cube_width = int(cube_data_cropped.shape[1] / 2)
cube_data_cropped = numpy.concatenate(
[cube_data_cropped[:, cube_width:],
cube_data_cropped[:, :cube_width]], axis=1)
data = geo.stretch_image(long_pts, lat_pts, cube_data_cropped)
data['image'] = [numpy.ma.masked_array(data['image'][0],
mask=numpy.isnan(
data['image'][0]))]
return data
class IntakeLoader:
"""
Loader class for the CMIP6 intake dataset.
"""
def __init__(self, pattern):
institution_id, experiment_id,member_id, grid, table_id,activity_id = pattern.split('_')
self.experiment_id = experiment_id
self.table_id = table_id
self.grid_label = grid
self.variable_id = ''
self.institution_id = institution_id
self.activity_id = activity_id
self.member_id = member_id
self._label = f'{self.experiment_id}_{self.institution_id}_{self.member_id}'
self._cube = None
@property
def cube(self):
"""
The purpose of this property is to delay loading of the cube until the
point where all relevant parameters are defined and data and metadata
can be downloaded.
"""
if not self._cube:
self._load_cube()
return self._cube
def _load_cube(self):
self._cube = _load_from_intake(experiment_id=self.experiment_id,
table_id=self.table_id,
grid_label=self.grid_label,
variable_id=self.variable_id,
institution_id=self.institution_id,
activity_id=self.activity_id,
member_id=self.member_id)
def image(self, state):
"""
Main image loading function. This function will actually realise the
data,
"""
if self.variable_id != state.variable:
self.variable_id = state.variable
self._cube = None
valid_time = state.valid_time
pressure = state.pressure
selected_time = util.to_datetime(valid_time)
# the guts of creating the bokeh object has been put into a separate
# function so that it can be cached, so if image is called multiple
# time the calculations are only done once (hopefully).
cube = self.cube
coord_names = [c1.name() for c1 in cube.coords()]
if 'air_pressure' in coord_names and pressure is None:
data = gridded_forecast.empty_image()
return data
data = _get_bokeh_image(cube, self.experiment_id,
self.variable_id,
self.institution_id, state.initial_time,
self.member_id, selected_time, pressure)
data.update(gridded_forecast.coordinates(str(selected_time),
state.initial_time,
state.pressures,
pressure))
data.update({
'name': [self._label],
'units': [str(cube.units)],
'experiment': [self.experiment_id],
'institution': [self.institution_id],
'memberid': [self.member_id],
'variableid': [self.variable_id]
})
return data
class Navigator:
"""
Navigator class for CMIP6 intake dataset.
"""
def __init__(self):
self.experiment_id = ''
self.table_id = ''
self.grid_label = ''
self.variable_id = ''
self.institution_id = ''
self.activity_id = ''
self.parent_source_id = ''
self.member_id = ''
self._cube = None
def _parse_pattern(self, pattern):
"""
The pattern contains the important CMIP6 parameters needed to get the
correct data and metadata through the intake catalogue.
"""
institution_id, experiment_id,member_id, grid, table_id,activity_id = pattern.split('_')
self.experiment_id = experiment_id
self.table_id = table_id
self.grid_label = grid
self.institution_id = institution_id
self.activity_id = activity_id
self.member_id = member_id
self._label = f'{self.experiment_id}_{self.institution_id}_{self.member_id}'
@property
def cube(self):
"""
The purpose of this property is to delay loading of the cube until the
point where all relevant parameters are defined and data and metadata
can be downloaded.
"""
if not self._cube:
self._load_cube()
return self._cube
def _load_cube(self):
if not self.variable_id:
self.variable_id = self._get_vars()[0]
self._cube = _load_from_intake(experiment_id=self.experiment_id,
table_id=self.table_id,
grid_label=self.grid_label,
variable_id=self.variable_id,
institution_id=self.institution_id,
activity_id=self.activity_id,
member_id=self.member_id)
def variables(self, pattern):
self._parse_pattern(pattern)
return self._get_vars()
def _get_vars(self):
var_list = _get_intake_vars(experiment_id=self.experiment_id,
table_id=self.table_id,
grid_label=self.grid_label,
institution_id=self.institution_id,
member_id=self.member_id)
# make air temperature at surface the first variable so it shows as
# default
if 'tas' in var_list:
var_list = ['tas'] + [v1 for v1 in var_list if v1 != 'tas']
return var_list
def initial_times(self, pattern, variable=None):
self._parse_pattern(pattern)
cube = self.cube
for cell in cube.coord('time').cells():
init_time = util.to_datetime(cell.point)
return [init_time]
def valid_times(self, pattern, variable, initial_time):
if self.variable_id != variable:
self.variable_id = variable
self._cube = None
self._parse_pattern(pattern)
cube = self.cube
valid_times = [util.to_datetime(cell.point) for cell in
cube.coord('time').cells()]
return valid_times
def pressures(self, pattern, variable, initial_time):
print(f'retrieving pressures for variable {variable}')
if self.variable_id != variable:
self.variable_id = variable
self._cube = None
self._parse_pattern(pattern)
cube = self.cube
print(pattern)
print(variable)
try:
# get pressures and sorted from largest to smallest, so that
# closer to the surface shows higher up the list.
pressures = sorted((cell.point for cell in
cube.coord('air_pressure').cells()), reverse=True)
except iris.exceptions.CoordinateNotFoundError:
pressures = []
return pressures
| [
"iris.util.squeeze",
"intake.open_esm_datastore",
"forest.geo.stretch_image",
"functools.partial",
"forest.util.to_datetime",
"numpy.concatenate",
"numpy.isnan",
"functools.lru_cache",
"iris.Constraint",
"forest.drivers.gridded_forecast.empty_image"
] | [((964, 995), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(64)'}), '(maxsize=64)\n', (983, 995), False, 'import functools\n'), ((1617, 1648), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(16)'}), '(maxsize=16)\n', (1636, 1648), False, 'import functools\n'), ((3287, 3318), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(16)'}), '(maxsize=16)\n', (3306, 3318), False, 'import functools\n'), ((1299, 1329), 'intake.open_esm_datastore', 'intake.open_esm_datastore', (['URL'], {}), '(URL)\n', (1324, 1329), False, 'import intake\n'), ((2143, 2173), 'intake.open_esm_datastore', 'intake.open_esm_datastore', (['URL'], {}), '(URL)\n', (2168, 2173), False, 'import intake\n'), ((2913, 2936), 'iris.util.squeeze', 'iris.util.squeeze', (['cube'], {}), '(cube)\n', (2930, 2936), False, 'import iris\n'), ((3907, 3940), 'forest.util.to_datetime', 'util.to_datetime', (['time_cell.point'], {}), '(time_cell.point)\n', (3923, 3940), False, 'from forest import geo, util\n'), ((4683, 4713), 'forest.drivers.gridded_forecast.empty_image', 'gridded_forecast.empty_image', ([], {}), '()\n', (4711, 4713), False, 'from forest.drivers import gridded_forecast\n'), ((5500, 5601), 'numpy.concatenate', 'numpy.concatenate', (['[cube_data_cropped[:, cube_width:], cube_data_cropped[:, :cube_width]]'], {'axis': '(1)'}), '([cube_data_cropped[:, cube_width:], cube_data_cropped[:,\n :cube_width]], axis=1)\n', (5517, 5601), False, 'import numpy\n'), ((5640, 5695), 'forest.geo.stretch_image', 'geo.stretch_image', (['long_pts', 'lat_pts', 'cube_data_cropped'], {}), '(long_pts, lat_pts, cube_data_cropped)\n', (5657, 5695), False, 'from forest import geo, util\n'), ((7697, 7725), 'forest.util.to_datetime', 'util.to_datetime', (['valid_time'], {}), '(valid_time)\n', (7713, 7725), False, 'from forest import geo, util\n'), ((4759, 4802), 'functools.partial', 'functools.partial', (['time_comp', 'selected_time'], {}), '(time_comp, selected_time)\n', (4776, 4802), False, 'import functools\n'), ((5083, 5127), 'functools.partial', 'functools.partial', (['pressure_select', 'pressure'], {}), '(pressure_select, pressure)\n', (5100, 5127), False, 'import functools\n'), ((5211, 5245), 'iris.Constraint', 'iris.Constraint', ([], {}), '(**constraint_dict)\n', (5226, 5245), False, 'import iris\n'), ((8109, 8139), 'forest.drivers.gridded_forecast.empty_image', 'gridded_forecast.empty_image', ([], {}), '()\n', (8137, 8139), False, 'from forest.drivers import gridded_forecast\n'), ((11757, 11785), 'forest.util.to_datetime', 'util.to_datetime', (['cell.point'], {}), '(cell.point)\n', (11773, 11785), False, 'from forest import geo, util\n'), ((12074, 12102), 'forest.util.to_datetime', 'util.to_datetime', (['cell.point'], {}), '(cell.point)\n', (12090, 12102), False, 'from forest import geo, util\n'), ((5813, 5842), 'numpy.isnan', 'numpy.isnan', (["data['image'][0]"], {}), "(data['image'][0])\n", (5824, 5842), False, 'import numpy\n')] |
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
# MNIST dataset params
n_classes = 10
n_features = 784
# Training params
l_rate = 0.001
epoches = 3000
batch_size = 256
display_steps = 100
# Network param
n_hidden_1 = 128
n_hidden_2 = 256
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Convert to float32
X_train, X_test = np.array(X_train, np.float32), np.array(X_test, np.float32)
# Flatten images to 1-D array
X_train, X_test = X_train.reshape([-1, n_features]), X_test.reshape([-1, n_features])
# Normalize images values from [0, 255] to [0,1]
X_train, X_test = X_train / 255., X_test / 255.
# Shuffle data using tensorflow tf.data
train_data = tf.data.Dataset.from_tensor_slices((X_train, y_train))
train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)
# random value generator to initialize weights
random_normal = tf.initializers.RandomNormal()
# weights
weights = {
'h1': tf.Variable(random_normal([n_features, n_hidden_1])),
'h2': tf.Variable(random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.zeros([n_hidden_1])),
'b2': tf.Variable(tf.zeros([n_hidden_2])),
'out': tf.Variable(tf.zeros([n_classes]))
}
# Create model
def neural_net(x):
# Hidden fullu connected layer with 128 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
# Apply sigmoid to layer_1 output for non-linearity
layer_1 = tf.nn.sigmoid(layer_1)
# Hidden fully connected layer with 256 neurons
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
# Apply sigmoid to layer_2 output for non-linearity
layer_2 = tf.nn.sigmoid(layer_2)
# Output fully connected layer with a neuron for each class
output_layer = tf.matmul(layer_2, weights['out']) + biases['out']
# Apply softmax to nomalize the logits to a probabilty distribution
return tf.nn.softmax(output_layer)
# Cross-entroy
def cross_entry(y_pred, y_true):
# encode label to a one hot vector
y_true = tf.one_hot(y_true, depth=n_classes)
# clip prediction values to avoid log(0) errors
y_pred = tf.clip_by_value(y_pred, 1e-9, 1.)
# Compute cross-entrpy
return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred)))
def accuracy(y_pred, y_true):
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)
# Stochastic gradient descent optimizer
optimizer = tf.optimizers.SGD(l_rate)
# Optimizer process
def run_optimizer(x, y):
with tf.GradientTape() as g:
pred = neural_net(x)
loss = cross_entry(pred, y)
# Variables to update
trainable_vars = list(weights.values()) + list(biases.values())
# Compute gradient
gradients = g.gradient(loss, trainable_vars)
# Update W and b following gradients
optimizer.apply_gradients(zip(gradients, trainable_vars))
# Run training for the given steps
for step, (batch_x, batch_y) in enumerate(train_data.take(epoches), 1):
run_optimizer(batch_x, batch_y)
if step % display_steps == 0:
pred = neural_net(batch_x)
loss = cross_entry(pred, batch_y)
acc = accuracy(pred, batch_y)
print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))
# Test model on validation set
pred = neural_net(X_test)
print("Test accuracy: %f" % accuracy(pred, y_test))
n_images = 5
test_images = X_test[:n_images]
predictions = neural_net(test_images)
for i in range(n_images):
plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray')
plt.show()
print("Model prediction: %i" % np.argmax(predictions.numpy()[i]))
| [
"tensorflow.one_hot",
"numpy.reshape",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.math.log",
"tensorflow.optimizers.SGD",
"tensorflow.GradientTape",
"numpy.array",
"tensorflow.initializers.RandomNormal",
"tensorflow.nn.sigmoid",
"tensor... | [((352, 369), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (367, 369), False, 'from tensorflow.keras.datasets import mnist\n'), ((739, 793), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(X_train, y_train)'], {}), '((X_train, y_train))\n', (773, 793), True, 'import tensorflow as tf\n'), ((935, 965), 'tensorflow.initializers.RandomNormal', 'tf.initializers.RandomNormal', ([], {}), '()\n', (963, 965), True, 'import tensorflow as tf\n'), ((2633, 2658), 'tensorflow.optimizers.SGD', 'tf.optimizers.SGD', (['l_rate'], {}), '(l_rate)\n', (2650, 2658), True, 'import tensorflow as tf\n'), ((410, 439), 'numpy.array', 'np.array', (['X_train', 'np.float32'], {}), '(X_train, np.float32)\n', (418, 439), True, 'import numpy as np\n'), ((441, 469), 'numpy.array', 'np.array', (['X_test', 'np.float32'], {}), '(X_test, np.float32)\n', (449, 469), True, 'import numpy as np\n'), ((1560, 1582), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['layer_1'], {}), '(layer_1)\n', (1573, 1582), True, 'import tensorflow as tf\n'), ((1777, 1799), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['layer_2'], {}), '(layer_2)\n', (1790, 1799), True, 'import tensorflow as tf\n'), ((2020, 2047), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['output_layer'], {}), '(output_layer)\n', (2033, 2047), True, 'import tensorflow as tf\n'), ((2150, 2185), 'tensorflow.one_hot', 'tf.one_hot', (['y_true'], {'depth': 'n_classes'}), '(y_true, depth=n_classes)\n', (2160, 2185), True, 'import tensorflow as tf\n'), ((2252, 2288), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['y_pred', '(1e-09)', '(1.0)'], {}), '(y_pred, 1e-09, 1.0)\n', (2268, 2288), True, 'import tensorflow as tf\n'), ((3732, 3742), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3740, 3742), True, 'import matplotlib.pyplot as plt\n'), ((1216, 1238), 'tensorflow.zeros', 'tf.zeros', (['[n_hidden_1]'], {}), '([n_hidden_1])\n', (1224, 1238), True, 'import tensorflow as tf\n'), ((1263, 1285), 'tensorflow.zeros', 'tf.zeros', (['[n_hidden_2]'], {}), '([n_hidden_2])\n', (1271, 1285), True, 'import tensorflow as tf\n'), ((1311, 1332), 'tensorflow.zeros', 'tf.zeros', (['[n_classes]'], {}), '([n_classes])\n', (1319, 1332), True, 'import tensorflow as tf\n'), ((1446, 1473), 'tensorflow.matmul', 'tf.matmul', (['x', "weights['h1']"], {}), "(x, weights['h1'])\n", (1455, 1473), True, 'import tensorflow as tf\n'), ((1657, 1690), 'tensorflow.matmul', 'tf.matmul', (['layer_1', "weights['h2']"], {}), "(layer_1, weights['h2'])\n", (1666, 1690), True, 'import tensorflow as tf\n'), ((1885, 1919), 'tensorflow.matmul', 'tf.matmul', (['layer_2', "weights['out']"], {}), "(layer_2, weights['out'])\n", (1894, 1919), True, 'import tensorflow as tf\n'), ((2454, 2474), 'tensorflow.argmax', 'tf.argmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (2463, 2474), True, 'import tensorflow as tf\n'), ((2476, 2501), 'tensorflow.cast', 'tf.cast', (['y_true', 'tf.int64'], {}), '(y_true, tf.int64)\n', (2483, 2501), True, 'import tensorflow as tf\n'), ((2529, 2568), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2536, 2568), True, 'import tensorflow as tf\n'), ((2715, 2732), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2730, 2732), True, 'import tensorflow as tf\n'), ((3677, 3713), 'numpy.reshape', 'np.reshape', (['test_images[i]', '[28, 28]'], {}), '(test_images[i], [28, 28])\n', (3687, 3713), True, 'import numpy as np\n'), ((2366, 2385), 'tensorflow.math.log', 'tf.math.log', (['y_pred'], {}), '(y_pred)\n', (2377, 2385), True, 'import tensorflow as tf\n')] |
"""
This script prepares data in the format for the testing
algorithms to run
The script is expanded to the
"""
from __future__ import division
from queue import PriorityQueue
from datetime import datetime
import shared_variables
from shared_variables import get_unicode_from_int
import copy
import csv
import re
import time
import numpy as np
def prepare_testing_data(eventlog):
csvfile = open(shared_variables.data_folder + '%s.csv' % eventlog, 'r')
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(spamreader, None) # skip the headers
lastcase = ''
line = ''
line_group = ''
line_time = ''
first_line = True
lines_id = []
lines = []
lines_group = []
lines_time = []
timeseqs = [] # relative time since previous event
timeseqs2 = [] # relative time since case start
timeseqs3 = [] # absolute time of previous event
timeseqs4 = [] # absolute time of event as a string
times = []
times2 = []
times3 = []
times4 = []
difflist = []
numlines = 0
casestarttime = None
lasteventtime = None
r = 3
for row in spamreader:
t1 = time.strptime(row[2], "%Y-%m-%d %H:%M:%S")
if row[0] != lastcase:
lastevent = t1
lastcase = row[0]
if row[1] != '0':
t2 = datetime.fromtimestamp(time.mktime(t1)) - datetime.fromtimestamp(time.mktime(lastevent))
tdiff = 86400 * t2.days + t2.seconds
else:
tdiff = 0
difflist.append(tdiff)
lastevent = t1
difflist = [int(i) for i in difflist]
maxdiff = max(difflist)
diff = maxdiff / r
# mediandiff = np.percentile(difflist, 50)
# diff = mediandiff / r
csvfile.seek(0)
next(spamreader, None) # skip the headers
row_index = 0
for row in spamreader:
t = time.strptime(row[2], "%Y-%m-%d %H:%M:%S")
if row[0] != lastcase:
casestarttime = t
lasteventtime = t
lastcase = row[0]
if not first_line:
lines.append(line)
lines_group.append(line_group)
lines_time.append(line_time)
timeseqs.append(times)
timeseqs2.append(times2)
timeseqs3.append(times3)
timeseqs4.append(times4)
lines_id.append(lastcase)
line = ''
line_group = ''
line_time = ''
times = []
times2 = []
times3 = []
times4 = []
numlines += 1
line += get_unicode_from_int(row[1])
line_group += get_unicode_from_int(row[3])
line_time += get_unicode_from_int(int(difflist[row_index] / diff))
if hasattr(time, 'tzset'):
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
time.tzset()
timesincelastevent = datetime.fromtimestamp(time.mktime(t)) - datetime.fromtimestamp(time.mktime(lasteventtime))
timesincecasestart = datetime.fromtimestamp(time.mktime(t)) - datetime.fromtimestamp(time.mktime(casestarttime))
timediff = 86400 * timesincelastevent.days + timesincelastevent.seconds
timediff2 = 86400 * timesincecasestart.days + timesincecasestart.seconds
times.append(timediff)
times2.append(timediff2)
times3.append(datetime.fromtimestamp(time.mktime(t)))
times4.append(row[2])
lasteventtime = t
first_line = False
row_index += 1
# add last case
lines.append(line)
lines_group.append(line_group)
lines_time.append(line_time)
timeseqs.append(times)
timeseqs2.append(times2)
timeseqs3.append(times3)
timeseqs4.append(times4)
numlines += 1
divisor = np.mean([item for sublist in timeseqs for item in sublist])
divisor2 = np.mean([item for sublist in timeseqs2 for item in sublist])
#divisor3 = np.mean(map(lambda x: np.mean(map(lambda y: x[len(x) - 1] - y, x)), timeseqs2))
divisor3 = np.mean([np.mean([x[len(x) - 1] - y for y in x]) for x in timeseqs2])
elems_per_fold = int(round(numlines / 3))
fold1and2lines = lines[:2 * elems_per_fold]
#fold1and2lines = map(lambda x: x + '!', fold1and2lines)
#maxlen = max(map(lambda x: len(x), fold1and2lines))
fold1and2lines = [x + '!' for x in fold1and2lines]
maxlen = max([len(x) for x in fold1and2lines])
chars = list(map(lambda x: set(x), fold1and2lines))
chars = list(set().union(*chars))
chars.sort()
target_chars = copy.copy(chars)
if '!' in chars:
chars.remove('!')
char_indices = dict((c, i) for i, c in enumerate(chars))
target_char_indices = dict((c, i) for i, c in enumerate(target_chars))
target_indices_char = dict((i, c) for i, c in enumerate(target_chars))
fold1and2lines_group = lines_group[:2 * elems_per_fold]
# fold1and2lines_group = map(lambda x: x + '!', fold1and2lines_group)
chars_group = list(map(lambda x: set(x), fold1and2lines_group))
chars_group = list(set().union(*chars_group))
chars_group.sort()
target_chars_group = copy.copy(chars_group)
# chars_group.remove('!')
char_indices_group = dict((c, i) for i, c in enumerate(chars_group))
target_char_indices_group = dict((c, i) for i, c in enumerate(target_chars_group))
target_indices_char_group = dict((i, c) for i, c in enumerate(target_chars_group))
fold1and2lines_time = lines_time[:2 * elems_per_fold]
# fold1and2lines_time = map(lambda x: x + '!', fold1and2lines_time)
chars_time = list(map(lambda x: set(x), fold1and2lines_time))
chars_time = list(set().union(*chars_time))
chars_time.sort()
target_chars_time = copy.copy(chars_time)
# chars_time.remove('!')
char_indices_time = dict((c, i) for i, c in enumerate(chars_time))
target_char_indices_time = dict((c, i) for i, c in enumerate(target_chars_time))
target_indices_char_time = dict((i, c) for i, c in enumerate(target_chars_time))
# we only need the third fold, because first two were used for training
fold3 = lines[2 * elems_per_fold:]
fold3_id = lines_id[2 * elems_per_fold:]
fold3_group = lines_group[2 * elems_per_fold:]
fold3_time = lines_time[2 * elems_per_fold:]
fold3_t = timeseqs[2 * elems_per_fold:]
fold3_t2 = timeseqs2[2 * elems_per_fold:]
fold3_t3 = timeseqs3[2 * elems_per_fold:]
fold3_t4 = timeseqs4[2 * elems_per_fold:]
lines = fold3
lines_id = fold3_id
lines_group = fold3_group
lines_time = fold3_time
lines_t = fold3_t
lines_t2 = fold3_t2
lines_t3 = fold3_t3
lines_t4 = fold3_t4
# set parameters
predict_size = maxlen
return lines, \
lines_id, \
lines_group, \
lines_time, \
lines_t, \
lines_t2, \
lines_t3, \
lines_t4, \
maxlen, \
chars, \
chars_group, \
chars_time, \
char_indices, \
char_indices_group, \
char_indices_time, \
divisor, \
divisor2, \
divisor3, \
predict_size, \
target_indices_char, \
target_indices_char_group, \
target_indices_char_time, \
target_char_indices, \
target_char_indices_group, \
target_char_indices_time
# selects traces verified by a declare model
def select_declare_verified_traces(server_replayer, path_to_declare_model_file, lines, lines_id, lines_group, lines_time, lines_t, lines_t2,
lines_t3, lines_t4, prefix=0):
# select only lines with formula verified
lines_v = []
lines_id_v = []
lines_group_v = []
lines_time_v = []
lines_t_v = []
lines_t2_v = []
lines_t3_v = []
lines_t4_v = []
for line, line_id, line_group, line_time, times, times2, times3, times4 in zip(lines,
lines_id,
lines_group,
lines_time,
lines_t,
lines_t2,
lines_t3,
lines_t4):
if server_replayer.verify_with_elapsed_time(path_to_declare_model_file, line_id, line, line_group, line_time, times4, prefix):
lines_v.append(line)
lines_id_v.append(line_id)
lines_group_v.append(line_group)
lines_time_v.append(line_time)
lines_t_v.append(times)
lines_t2_v.append(times2)
lines_t3_v.append(times3)
lines_t4_v.append(times4)
return lines_v, lines_id_v, lines_group_v, lines_time_v, lines_t_v, lines_t2_v, lines_t3_v, lines_t4_v
# selects traces verified by LTL formula
def select_formula_verified_traces(server_replayer, lines, lines_id, lines_group, lines_time, lines_t, lines_t2, lines_t3,
lines_t4, formula, prefix=0):
# select only lines with formula verified
lines_v = []
lines_id_v = []
lines_group_v = []
lines_time_v = []
lines_t_v = []
lines_t2_v = []
lines_t3_v = []
lines_t4_v = []
for line, line_id, line_group, line_time, times, times2, times3, times4 in zip(lines,
lines_id,
lines_group,
lines_time,
lines_t,
lines_t2,
lines_t3,
lines_t4):
if server_replayer.verify_formula_as_compliant(line, formula, prefix):
lines_v.append(line)
lines_id_v.append(line_id)
lines_group_v.append(line_group)
lines_time_v.append(line_time)
lines_t_v.append(times)
lines_t2_v.append(times2)
lines_t3_v.append(times3)
lines_t4_v.append(times4)
return lines_v, lines_id_v, lines_group_v, lines_time_v, lines_t_v, lines_t2_v, lines_t3_v, lines_t4_v
# define helper functions
# this one encodes the current sentence into the onehot encoding
def encode(sentence, sentence_group, sentence_time, times, times3, maxlen, chars, chars_group, chars_time,
char_indices, char_indices_group, char_indices_time, divisor, divisor2):
num_features = len(chars) + len(chars_group) + len(chars_time) + 5
x = np.zeros((1, maxlen, num_features), dtype=np.float32)
leftpad = maxlen - len(sentence)
times2 = np.cumsum(times)
for t, char in enumerate(sentence):
midnight = times3[t].replace(hour=0, minute=0, second=0, microsecond=0)
timesincemidnight = times3[t] - midnight
for c in chars:
if c == char:
x[0, t + leftpad, char_indices[c]] = 1
for g in chars_group:
if g == sentence_group[t]:
x[0, t + leftpad, len(char_indices) + char_indices_group[g]] = 1
for y in chars_time:
if y == sentence_time[t]:
x[0, t + leftpad, len(char_indices) + len(char_indices_group) + char_indices_time[y]] = 1
x[0, t + leftpad, len(chars) + len(chars_group) + len(chars_time)] = t + 1
x[0, t + leftpad, len(chars) + len(chars_group) + len(chars_time) + 1] = times[t] / divisor
x[0, t + leftpad, len(chars) + len(chars_group) + len(chars_time) + 2] = times2[t] / divisor2
x[0, t + leftpad, len(chars) + len(chars_group) + len(chars_time) + 3] = timesincemidnight.seconds / 86400
x[0, t + leftpad, len(chars) + len(chars_group) + len(chars_time) + 4] = times3[t].weekday() / 7
return x
# modify to be able to get second best prediction
# def getSymbol(predictions, target_indices_char, ith_best=0):
# i = np.argsort(predictions)[len(predictions) - ith_best - 1]
# return target_indices_char[i]
# modify to be able to get second best prediction
def get_symbol_ampl(predictions, target_indices_char, target_char_indices, start_of_the_cycle_symbol,
stop_symbol_probability_amplifier_current, ith_best=0):
a_pred = list(predictions)
if start_of_the_cycle_symbol in target_char_indices:
place_of_starting_symbol = target_char_indices[start_of_the_cycle_symbol]
a_pred[place_of_starting_symbol] = a_pred[place_of_starting_symbol] / stop_symbol_probability_amplifier_current
i = np.argsort(a_pred)[len(a_pred) - ith_best - 1]
return target_indices_char[i]
# modify to be able to get second best prediction
def adjust_probabilities(predictions, target_char_indices, start_of_the_cycle_symbol,
stop_symbol_probability_amplifier_current):
a_pred = list(predictions)
if start_of_the_cycle_symbol in target_char_indices:
place_of_starting_symbol = target_char_indices[start_of_the_cycle_symbol]
a_pred[place_of_starting_symbol] = a_pred[place_of_starting_symbol] / stop_symbol_probability_amplifier_current
return a_pred
# find repetitions
def repetitions(s):
r = re.compile(r"(.+?)\1+")
for match in r.finditer(s):
yield (match.group(1), len(match.group(0)) / len(match.group(1)))
def amplify(s):
list_of_rep = list(repetitions(s))
if list_of_rep:
str_rep = list_of_rep[-1][0]
if s.endswith(str_rep):
return np.math.exp(list_of_rep[-1][-1]), list_of_rep[-1][0][0]
else:
return 1, list_of_rep[-1][0][0]
return 1, " "
def create_queue(activities, resources, times):
queue = PriorityQueue()
# resources_standardized = standardize_list(activities, resources)
for activity_index in range(len(activities)):
for resource_index in range(len(resources)):
for time_index in range(len(times)):
queue.put((-(np.log(activities[activity_index])+np.log(resources[resource_index])+np.log(times[time_index])),
[activity_index, resource_index, time_index]))
return queue
def standardize_list(list1, list2):
len1 = float(len(list1))
len2 = float(len(list2))
weight = len2/len1
#standardized_list = map(lambda x: weight * x, list2)
standardized_list = [weight * x for x in list2]
return standardized_list
| [
"numpy.mean",
"time.strptime",
"re.compile",
"time.tzset",
"shared_variables.get_unicode_from_int",
"time.mktime",
"numpy.log",
"numpy.math.exp",
"numpy.argsort",
"numpy.zeros",
"queue.PriorityQueue",
"numpy.cumsum",
"copy.copy",
"csv.reader"
] | [((479, 528), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(csvfile, delimiter=',', quotechar='|')\n", (489, 528), False, 'import csv\n'), ((3850, 3909), 'numpy.mean', 'np.mean', (['[item for sublist in timeseqs for item in sublist]'], {}), '([item for sublist in timeseqs for item in sublist])\n', (3857, 3909), True, 'import numpy as np\n'), ((3925, 3985), 'numpy.mean', 'np.mean', (['[item for sublist in timeseqs2 for item in sublist]'], {}), '([item for sublist in timeseqs2 for item in sublist])\n', (3932, 3985), True, 'import numpy as np\n'), ((4617, 4633), 'copy.copy', 'copy.copy', (['chars'], {}), '(chars)\n', (4626, 4633), False, 'import copy\n'), ((5193, 5215), 'copy.copy', 'copy.copy', (['chars_group'], {}), '(chars_group)\n', (5202, 5215), False, 'import copy\n'), ((5784, 5805), 'copy.copy', 'copy.copy', (['chars_time'], {}), '(chars_time)\n', (5793, 5805), False, 'import copy\n'), ((11044, 11097), 'numpy.zeros', 'np.zeros', (['(1, maxlen, num_features)'], {'dtype': 'np.float32'}), '((1, maxlen, num_features), dtype=np.float32)\n', (11052, 11097), True, 'import numpy as np\n'), ((11148, 11164), 'numpy.cumsum', 'np.cumsum', (['times'], {}), '(times)\n', (11157, 11164), True, 'import numpy as np\n'), ((13671, 13694), 're.compile', 're.compile', (['"""(.+?)\\\\1+"""'], {}), "('(.+?)\\\\1+')\n", (13681, 13694), False, 'import re\n'), ((14160, 14175), 'queue.PriorityQueue', 'PriorityQueue', ([], {}), '()\n', (14173, 14175), False, 'from queue import PriorityQueue\n'), ((1163, 1205), 'time.strptime', 'time.strptime', (['row[2]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(row[2], '%Y-%m-%d %H:%M:%S')\n", (1176, 1205), False, 'import time\n'), ((1861, 1903), 'time.strptime', 'time.strptime', (['row[2]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(row[2], '%Y-%m-%d %H:%M:%S')\n", (1874, 1903), False, 'import time\n'), ((2597, 2625), 'shared_variables.get_unicode_from_int', 'get_unicode_from_int', (['row[1]'], {}), '(row[1])\n', (2617, 2625), False, 'from shared_variables import get_unicode_from_int\n'), ((2648, 2676), 'shared_variables.get_unicode_from_int', 'get_unicode_from_int', (['row[3]'], {}), '(row[3])\n', (2668, 2676), False, 'from shared_variables import get_unicode_from_int\n'), ((13026, 13044), 'numpy.argsort', 'np.argsort', (['a_pred'], {}), '(a_pred)\n', (13036, 13044), True, 'import numpy as np\n'), ((2944, 2956), 'time.tzset', 'time.tzset', ([], {}), '()\n', (2954, 2956), False, 'import time\n'), ((3009, 3023), 'time.mktime', 'time.mktime', (['t'], {}), '(t)\n', (3020, 3023), False, 'import time\n'), ((3050, 3076), 'time.mktime', 'time.mktime', (['lasteventtime'], {}), '(lasteventtime)\n', (3061, 3076), False, 'import time\n'), ((3130, 3144), 'time.mktime', 'time.mktime', (['t'], {}), '(t)\n', (3141, 3144), False, 'import time\n'), ((3171, 3197), 'time.mktime', 'time.mktime', (['casestarttime'], {}), '(casestarttime)\n', (3182, 3197), False, 'import time\n'), ((3469, 3483), 'time.mktime', 'time.mktime', (['t'], {}), '(t)\n', (3480, 3483), False, 'import time\n'), ((13966, 13998), 'numpy.math.exp', 'np.math.exp', (['list_of_rep[-1][-1]'], {}), '(list_of_rep[-1][-1])\n', (13977, 13998), True, 'import numpy as np\n'), ((1360, 1375), 'time.mktime', 'time.mktime', (['t1'], {}), '(t1)\n', (1371, 1375), False, 'import time\n'), ((1402, 1424), 'time.mktime', 'time.mktime', (['lastevent'], {}), '(lastevent)\n', (1413, 1424), False, 'import time\n'), ((14497, 14522), 'numpy.log', 'np.log', (['times[time_index]'], {}), '(times[time_index])\n', (14503, 14522), True, 'import numpy as np\n'), ((14428, 14462), 'numpy.log', 'np.log', (['activities[activity_index]'], {}), '(activities[activity_index])\n', (14434, 14462), True, 'import numpy as np\n'), ((14463, 14496), 'numpy.log', 'np.log', (['resources[resource_index]'], {}), '(resources[resource_index])\n', (14469, 14496), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 24 17:31:23 2019
@author: esteban
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
import solver as sol
import numpy as np
label_size = 14
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['font.size'] = label_size
mpl.rcParams['agg.path.chunksize'] = 10000
def system(t, x):
# Controller parameters
a1, a2, b1, b2 = 128, 64, 128, 64
k = 1.1
# Disturbance
Delta = np.sin(2 * np.pi * 5 * t)
# State variables
x1, x2 = x[0], x[1]
# Sliding variable
s = x2 + sol.odd_pow(sol.odd_pow(x2,2) + a1*x1 + b1 * sol.odd_pow(x1, 3), 0.5)
# Controller
u = -(a1 + 3 * b1 * x1**2 + 2 * k) / 2 * np.sign(s) - sol.odd_pow(a2*s + b2 * sol.odd_pow(s, 3), 0.5)
return np.array([x2, u+Delta])
# Simulation parameters
t0, tf, h, i = 0, 1.2, 1e-5, 0
# Space to plot
fig, [ax1, ax2] = plt.subplots(nrows=1, ncols=2)
fig.subplots_adjust(wspace=0.31)
# Simulation
t, x = sol.ode1(system, np.array([1000, 0]), t0, tf, h)
# States
x1, x2 = x
# Plot of the trajectories
ax1.plot(t, x1, color=0*np.ones(3), lw=2, label='$x_1(t)$')
ax1.plot(t, x2, '--', color=0.5*np.ones(3), lw=2, label='$x_2(t)$')
ax1.set_xlim(0, 1.2)
ax1.set_ylim(-5, 5)
ax1.set_xlabel('$t$', fontsize = 14)
ax1.axvline(x = 1, ymin = -1, ymax = 2, linestyle='dashed', color = 0.6*np.ones(3))
ax1.legend(loc='best')
ax1.grid()
# Controller parameters
a1, a2, b1, b2 = 128, 64, 128, 64
k = 1.1
# Sliding variable
s = x2 + sol.odd_pow(sol.odd_pow(x2,2) + a1*x1 + b1 * sol.odd_pow(x1, 3), 0.5)
# Controller
u = -(a1 + 3 * b1 * x1**2 + 2 * k) / 2 * np.sign(s) - sol.odd_pow(a2*s + b2 * sol.odd_pow(s, 3), 0.5)
# Plot of the control
ax2.plot(t, u, color=0*np.ones(3), lw=2)
ax2.set_xlim(0, 1.2)
ax2.set_ylim(-100, 100)
ax2.set_xlabel('$t$', fontsize = 14)
ax2.grid()
fig.savefig('figures/poly_controller.eps', bbox_inches='tight', format='eps', dpi=1500) | [
"solver.odd_pow",
"numpy.ones",
"numpy.array",
"numpy.sign",
"numpy.sin",
"matplotlib.pyplot.subplots"
] | [((905, 935), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)'}), '(nrows=1, ncols=2)\n', (917, 935), True, 'import matplotlib.pyplot as plt\n'), ((478, 503), 'numpy.sin', 'np.sin', (['(2 * np.pi * 5 * t)'], {}), '(2 * np.pi * 5 * t)\n', (484, 503), True, 'import numpy as np\n'), ((790, 815), 'numpy.array', 'np.array', (['[x2, u + Delta]'], {}), '([x2, u + Delta])\n', (798, 815), True, 'import numpy as np\n'), ((1008, 1027), 'numpy.array', 'np.array', (['[1000, 0]'], {}), '([1000, 0])\n', (1016, 1027), True, 'import numpy as np\n'), ((1632, 1642), 'numpy.sign', 'np.sign', (['s'], {}), '(s)\n', (1639, 1642), True, 'import numpy as np\n'), ((718, 728), 'numpy.sign', 'np.sign', (['s'], {}), '(s)\n', (725, 728), True, 'import numpy as np\n'), ((1111, 1121), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1118, 1121), True, 'import numpy as np\n'), ((1179, 1189), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1186, 1189), True, 'import numpy as np\n'), ((1367, 1377), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1374, 1377), True, 'import numpy as np\n'), ((1738, 1748), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1745, 1748), True, 'import numpy as np\n'), ((1520, 1538), 'solver.odd_pow', 'sol.odd_pow', (['x2', '(2)'], {}), '(x2, 2)\n', (1531, 1538), True, 'import solver as sol\n'), ((1553, 1571), 'solver.odd_pow', 'sol.odd_pow', (['x1', '(3)'], {}), '(x1, 3)\n', (1564, 1571), True, 'import solver as sol\n'), ((1669, 1686), 'solver.odd_pow', 'sol.odd_pow', (['s', '(3)'], {}), '(s, 3)\n', (1680, 1686), True, 'import solver as sol\n'), ((598, 616), 'solver.odd_pow', 'sol.odd_pow', (['x2', '(2)'], {}), '(x2, 2)\n', (609, 616), True, 'import solver as sol\n'), ((631, 649), 'solver.odd_pow', 'sol.odd_pow', (['x1', '(3)'], {}), '(x1, 3)\n', (642, 649), True, 'import solver as sol\n'), ((755, 772), 'solver.odd_pow', 'sol.odd_pow', (['s', '(3)'], {}), '(s, 3)\n', (766, 772), True, 'import solver as sol\n')] |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for generating the feature_statistics proto from generic data.
The proto is used as input for the Overview visualization.
"""
import numpy as np
import pandas as pd
class BaseGenericFeatureStatisticsGenerator(object):
"""Base class for generator of stats proto from generic data."""
def __init__(self, fs_proto, datasets_proto, histogram_proto):
self.fs_proto = fs_proto
self.datasets_proto = datasets_proto
self.histogram_proto = histogram_proto
def ProtoFromDataFrames(self, dataframes):
"""Creates a feature statistics proto from a set of pandas dataframes.
Args:
dataframes: A list of dicts describing tables for each dataset for the
proto. Each entry contains a 'table' field of the dataframe of the
data
and a 'name' field to identify the dataset in the proto.
Returns:
The feature statistics proto for the provided tables.
"""
datasets = []
for dataframe in dataframes:
table = dataframe['table']
table_entries = {}
for col in table:
table_entries[col] = self.NdarrayToEntry(table[col])
datasets.append({
'entries': table_entries,
'size': len(table),
'name': dataframe['name']
})
return self.GetDatasetsProto(datasets)
def DtypeToType(self, dtype):
"""Converts a Numpy dtype to the FeatureNameStatistics.Type proto enum."""
if dtype.char in np.typecodes['AllFloat']:
return self.fs_proto.FLOAT
elif (dtype.char in np.typecodes['AllInteger'] or dtype == np.bool or
np.issubdtype(dtype, np.datetime64) or
np.issubdtype(dtype, np.timedelta64)):
return self.fs_proto.INT
else:
return self.fs_proto.STRING
def DtypeToNumberConverter(self, dtype):
"""Converts a Numpy dtype to a converter method if applicable.
The converter method takes in a numpy array of objects of the provided
dtype
and returns a numpy array of the numbers backing that object for
statistical
analysis. Returns None if no converter is necessary.
Args:
dtype: The numpy dtype to make a converter for.
Returns:
The converter method or None.
"""
if np.issubdtype(dtype, np.datetime64):
def DatetimesToNumbers(dt_list):
return np.array([pd.Timestamp(dt).value for dt in dt_list])
return DatetimesToNumbers
elif np.issubdtype(dtype, np.timedelta64):
def TimedetlasToNumbers(td_list):
return np.array([pd.Timedelta(td).value for td in td_list])
return TimedetlasToNumbers
else:
return None
def NdarrayToEntry(self, x):
"""Converts an ndarray to the Entry format."""
row_counts = []
for row in x:
try:
rc = np.count_nonzero(~np.isnan(row))
if rc != 0:
row_counts.append(rc)
except TypeError:
try:
row_counts.append(row.size)
except AttributeError:
row_counts.append(1)
data_type = self.DtypeToType(x.dtype)
converter = self.DtypeToNumberConverter(x.dtype)
flattened = x.ravel()
orig_size = len(flattened)
# Remove all None and nan values and count how many were removed.
flattened = flattened[flattened != np.array(None)]
if converter:
flattened = converter(flattened)
flattened = ([x for x in flattened if str(x) != 'nan']
if data_type == self.fs_proto.STRING else
flattened[~np.isnan(flattened)].tolist())
missing = orig_size - len(flattened)
return {
'vals': flattened,
'counts': row_counts,
'missing': missing,
'type': data_type
}
def GetDatasetsProto(self, datasets, features=None):
"""Generates the feature stats proto from dictionaries of feature values.
Args:
datasets: An array of dictionaries, one per dataset, each one containing:
- 'entries': The dictionary of features in the dataset from the parsed
examples.
- 'size': The number of examples parsed for the dataset.
- 'name': The name of the dataset.
features: A list of strings that is a whitelist of feature names to create
feature statistics for. If set to None then all features in the
dataset
are analyzed. Defaults to None.
Returns:
The feature statistics proto for the provided datasets.
"""
features_seen = set()
whitelist_features = set(features) if features else None
all_datasets = self.datasets_proto()
# TODO(jwexler): Add ability to generate weighted feature stats
# if there is a specified weight feature in the dataset.
# Initialize each dataset
for dataset in datasets:
all_datasets.datasets.add(
name=dataset['name'], num_examples=dataset['size'])
# This outer loop ensures that for each feature seen in any of the provided
# datasets, we check the feature once against all datasets.
for outer_dataset in datasets:
for key, value in outer_dataset['entries'].items():
# If we have a feature whitelist and this feature is not in the
# whitelist then do not process it.
# If we have processed this feature already, no need to do it again.
if ((whitelist_features and key not in whitelist_features) or
key in features_seen):
continue
features_seen.add(key)
# Default to type int if no type is found, so that the fact that all
# values are missing from this feature can be displayed.
feature_type = value['type'] if 'type' in value else self.fs_proto.INT
# Process the found feature for each dataset.
for j, dataset in enumerate(datasets):
feat = all_datasets.datasets[j].features.add(
type=feature_type, name=key)
value = dataset['entries'].get(key)
has_data = value is not None and (value['vals'].size != 0
if isinstance(
value['vals'], np.ndarray) else
value['vals'])
commonstats = None
# For numeric features, calculate numeric statistics.
if feat.type in (self.fs_proto.INT, self.fs_proto.FLOAT):
featstats = feat.num_stats
commonstats = featstats.common_stats
if has_data:
nums = value['vals']
featstats.std_dev = np.std(nums)
featstats.mean = np.mean(nums)
featstats.min = np.min(nums)
featstats.max = np.max(nums)
featstats.median = np.median(nums)
featstats.num_zeros = len(nums) - np.count_nonzero(nums)
nums = np.array(nums)
num_nan = len(nums[np.isnan(nums)])
num_posinf = len(nums[np.isposinf(nums)])
num_neginf = len(nums[np.isneginf(nums)])
# Remove all non-finite (including NaN) values from the numeric
# values in order to calculate histogram buckets/counts. The
# inf values will be added back to the first and last buckets.
nums = nums[np.isfinite(nums)]
counts, buckets = np.histogram(nums)
hist = featstats.histograms.add()
hist.type = self.histogram_proto.STANDARD
hist.num_nan = num_nan
for bucket_count in range(len(counts)):
bucket = hist.buckets.add(
low_value=buckets[bucket_count],
high_value=buckets[bucket_count + 1],
sample_count=counts[bucket_count])
# Add any negative or positive infinities to the first and last
# buckets in the histogram.
if bucket_count == 0 and num_neginf > 0:
bucket.low_value = float('-inf')
bucket.sample_count += num_neginf
elif bucket_count == len(counts) - 1 and num_posinf > 0:
bucket.high_value = float('inf')
bucket.sample_count += num_posinf
if not hist.buckets:
if num_neginf:
hist.buckets.add(
low_value=float('-inf'),
high_value=float('-inf'),
sample_count=num_neginf)
if num_posinf:
hist.buckets.add(
low_value=float('inf'),
high_value=float('inf'),
sample_count=num_posinf)
self._PopulateQuantilesHistogram(featstats.histograms.add(),
nums.tolist())
elif feat.type == self.fs_proto.STRING:
featstats = feat.string_stats
commonstats = featstats.common_stats
if has_data:
strs = value['vals']
featstats.avg_length = np.mean(np.vectorize(len)(strs))
vals, counts = np.unique(strs, return_counts=True)
featstats.unique = len(vals)
sorted_vals = sorted(zip(counts, vals), reverse=True)
for val_index, val in enumerate(sorted_vals):
if val[1].dtype.type is np.str_:
printable_val = val[1]
else:
try:
printable_val = val[1].decode('UTF-8', 'strict')
except UnicodeDecodeError:
printable_val = '__BYTES_VALUE__'
bucket = featstats.rank_histogram.buckets.add(
low_rank=val_index,
high_rank=val_index,
sample_count=val[0],
label=printable_val)
if val_index < 2:
featstats.top_values.add(
value=bucket.label, frequency=bucket.sample_count)
# Add the common stats regardless of the feature type.
if has_data:
commonstats.num_missing = value['missing']
commonstats.num_non_missing = (all_datasets.datasets[j].num_examples
- featstats.common_stats.num_missing)
commonstats.min_num_values = np.min(value['counts']).astype(int)
commonstats.max_num_values = np.max(value['counts']).astype(int)
commonstats.avg_num_values = np.mean(value['counts'])
if 'feat_lens' in value and value['feat_lens']:
self._PopulateQuantilesHistogram(
commonstats.feature_list_length_histogram, value['feat_lens'])
self._PopulateQuantilesHistogram(commonstats.num_values_histogram,
value['counts'])
else:
commonstats.num_non_missing = 0
commonstats.num_missing = all_datasets.datasets[j].num_examples
return all_datasets
def _PopulateQuantilesHistogram(self, hist, nums):
"""Fills in the histogram with quantile information from the provided array.
Args:
hist: A Histogram proto message to fill in.
nums: A list of numbers to create a quantiles histogram from.
"""
if not nums:
return
num_quantile_buckets = 10
quantiles_to_get = [
x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1)
]
quantiles = np.percentile(nums, quantiles_to_get)
hist.type = self.histogram_proto.QUANTILES
quantiles_sample_count = float(len(nums)) / num_quantile_buckets
for low, high in zip(quantiles, quantiles[1:]):
hist.buckets.add(
low_value=low, high_value=high, sample_count=quantiles_sample_count)
| [
"numpy.mean",
"numpy.histogram",
"numpy.median",
"numpy.unique",
"pandas.Timedelta",
"numpy.min",
"numpy.max",
"numpy.count_nonzero",
"numpy.issubdtype",
"numpy.array",
"numpy.isneginf",
"numpy.isnan",
"numpy.isfinite",
"numpy.std",
"numpy.percentile",
"pandas.Timestamp",
"numpy.ispo... | [((2901, 2936), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.datetime64'], {}), '(dtype, np.datetime64)\n', (2914, 2936), True, 'import numpy as np\n'), ((12068, 12105), 'numpy.percentile', 'np.percentile', (['nums', 'quantiles_to_get'], {}), '(nums, quantiles_to_get)\n', (12081, 12105), True, 'import numpy as np\n'), ((3088, 3124), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.timedelta64'], {}), '(dtype, np.timedelta64)\n', (3101, 3124), True, 'import numpy as np\n'), ((2259, 2294), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.datetime64'], {}), '(dtype, np.datetime64)\n', (2272, 2294), True, 'import numpy as np\n'), ((2308, 2344), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.timedelta64'], {}), '(dtype, np.timedelta64)\n', (2321, 2344), True, 'import numpy as np\n'), ((3927, 3941), 'numpy.array', 'np.array', (['None'], {}), '(None)\n', (3935, 3941), True, 'import numpy as np\n'), ((3460, 3473), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (3468, 3473), True, 'import numpy as np\n'), ((11093, 11117), 'numpy.mean', 'np.mean', (["value['counts']"], {}), "(value['counts'])\n", (11100, 11117), True, 'import numpy as np\n'), ((3003, 3019), 'pandas.Timestamp', 'pd.Timestamp', (['dt'], {}), '(dt)\n', (3015, 3019), True, 'import pandas as pd\n'), ((4146, 4165), 'numpy.isnan', 'np.isnan', (['flattened'], {}), '(flattened)\n', (4154, 4165), True, 'import numpy as np\n'), ((7163, 7175), 'numpy.std', 'np.std', (['nums'], {}), '(nums)\n', (7169, 7175), True, 'import numpy as np\n'), ((7207, 7220), 'numpy.mean', 'np.mean', (['nums'], {}), '(nums)\n', (7214, 7220), True, 'import numpy as np\n'), ((7251, 7263), 'numpy.min', 'np.min', (['nums'], {}), '(nums)\n', (7257, 7263), True, 'import numpy as np\n'), ((7294, 7306), 'numpy.max', 'np.max', (['nums'], {}), '(nums)\n', (7300, 7306), True, 'import numpy as np\n'), ((7340, 7355), 'numpy.median', 'np.median', (['nums'], {}), '(nums)\n', (7349, 7355), True, 'import numpy as np\n'), ((7449, 7463), 'numpy.array', 'np.array', (['nums'], {}), '(nums)\n', (7457, 7463), True, 'import numpy as np\n'), ((7934, 7952), 'numpy.histogram', 'np.histogram', (['nums'], {}), '(nums)\n', (7946, 7952), True, 'import numpy as np\n'), ((3192, 3208), 'pandas.Timedelta', 'pd.Timedelta', (['td'], {}), '(td)\n', (3204, 3208), True, 'import pandas as pd\n'), ((7404, 7426), 'numpy.count_nonzero', 'np.count_nonzero', (['nums'], {}), '(nums)\n', (7420, 7426), True, 'import numpy as np\n'), ((7883, 7900), 'numpy.isfinite', 'np.isfinite', (['nums'], {}), '(nums)\n', (7894, 7900), True, 'import numpy as np\n'), ((9706, 9741), 'numpy.unique', 'np.unique', (['strs'], {'return_counts': '(True)'}), '(strs, return_counts=True)\n', (9715, 9741), True, 'import numpy as np\n'), ((10939, 10962), 'numpy.min', 'np.min', (["value['counts']"], {}), "(value['counts'])\n", (10945, 10962), True, 'import numpy as np\n'), ((11016, 11039), 'numpy.max', 'np.max', (["value['counts']"], {}), "(value['counts'])\n", (11022, 11039), True, 'import numpy as np\n'), ((7497, 7511), 'numpy.isnan', 'np.isnan', (['nums'], {}), '(nums)\n', (7505, 7511), True, 'import numpy as np\n'), ((7550, 7567), 'numpy.isposinf', 'np.isposinf', (['nums'], {}), '(nums)\n', (7561, 7567), True, 'import numpy as np\n'), ((7606, 7623), 'numpy.isneginf', 'np.isneginf', (['nums'], {}), '(nums)\n', (7617, 7623), True, 'import numpy as np\n'), ((9652, 9669), 'numpy.vectorize', 'np.vectorize', (['len'], {}), '(len)\n', (9664, 9669), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# Last Change: Wed Sep 24 05:00 PM 2008 J
import numpy as np
import scipy as sp
import scipy.signal as sig
def lpc_ref(signal, order):
"""Compute the Linear Prediction Coefficients.
Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will
find the k+1 coefficients of a k order linear filter:
xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1]
Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized.
Parameters
----------
signal: array_like
input signal
order : int
LPC order (the output will have order + 1 items)
Notes
----
This is just for reference, as it is using the direct inversion of the
toeplitz matrix, which is really slow"""
if signal.ndim > 1:
raise ValueError("Array of rank > 1 not supported yet")
if order > signal.size:
raise ValueError("Input signal must have a lenght >= lpc order")
if order > 0:
p = order + 1
r = np.zeros(p, signal.dtype)
# Number of non zero values in autocorrelation one needs for p LPC
# coefficients
nx = np.min([p, signal.size])
x = np.correlate(signal, signal, 'full')
r[:nx] = x[signal.size-1:signal.size+order]
phi = np.dot(sp.linalg.inv(sp.linalg.toeplitz(r[:-1])), -r[1:])
return np.concatenate(([1.], phi))
else:
return np.ones(1, dtype = signal.dtype)
def levinson_1d(r, order):
"""Levinson-Durbin recursion, to efficiently solve symmetric linear systems
with toeplitz structure.
Parameters
---------
r : array-like
input array to invert (since the matrix is symmetric Toeplitz, the
corresponding pxp matrix is defined by p items only). Generally the
autocorrelation of the signal for linear prediction coefficients
estimation. The first item must be a non zero real.
Notes
----
This implementation is in python, hence unsuitable for any serious
computation. Use it as educational and reference purpose only.
Levinson is a well-known algorithm to solve the Hermitian toeplitz
equation:
_ _
-R[1] = R[0] R[1] ... R[p-1] a[1]
: : : : * :
: : : _ * :
-R[p] = R[p-1] R[p-2] ... R[0] a[p]
_
with respect to a ( is the complex conjugate). Using the special symmetry
in the matrix, the inversion can be done in O(p^2) instead of O(p^3).
"""
r = np.atleast_1d(r)
if r.ndim > 1:
raise ValueError("Only rank 1 are supported for now.")
n = r.size
if n < 1:
raise ValueError("Cannot operate on empty array !")
elif order > n - 1:
raise ValueError("Order should be <= size-1")
if not np.isreal(r[0]):
raise ValueError("First item of input must be real.")
elif not np.isfinite(1/r[0]):
raise ValueError("First item should be != 0")
# Estimated coefficients
a = np.empty(order+1, r.dtype)
# temporary array
t = np.empty(order+1, r.dtype)
# Reflection coefficients
k = np.empty(order, r.dtype)
a[0] = 1.
e = r[0]
for i in range(1, order+1):
acc = r[i]
for j in range(1, i):
acc += a[j] * r[i-j]
k[i-1] = -acc / e
a[i] = k[i-1]
for j in range(order):
t[j] = a[j]
for j in range(1, i):
a[j] += k[i-1] * np.conj(t[i-j])
e *= 1 - k[i-1] * np.conj(k[i-1])
return a, e, k | [
"numpy.ones",
"numpy.conj",
"numpy.zeros",
"numpy.correlate",
"numpy.empty",
"numpy.isreal",
"numpy.concatenate",
"numpy.min",
"numpy.isfinite",
"scipy.linalg.toeplitz",
"numpy.atleast_1d"
] | [((2572, 2588), 'numpy.atleast_1d', 'np.atleast_1d', (['r'], {}), '(r)\n', (2585, 2588), True, 'import numpy as np\n'), ((3056, 3084), 'numpy.empty', 'np.empty', (['(order + 1)', 'r.dtype'], {}), '(order + 1, r.dtype)\n', (3064, 3084), True, 'import numpy as np\n'), ((3113, 3141), 'numpy.empty', 'np.empty', (['(order + 1)', 'r.dtype'], {}), '(order + 1, r.dtype)\n', (3121, 3141), True, 'import numpy as np\n'), ((3178, 3202), 'numpy.empty', 'np.empty', (['order', 'r.dtype'], {}), '(order, r.dtype)\n', (3186, 3202), True, 'import numpy as np\n'), ((1005, 1030), 'numpy.zeros', 'np.zeros', (['p', 'signal.dtype'], {}), '(p, signal.dtype)\n', (1013, 1030), True, 'import numpy as np\n'), ((1142, 1166), 'numpy.min', 'np.min', (['[p, signal.size]'], {}), '([p, signal.size])\n', (1148, 1166), True, 'import numpy as np\n'), ((1179, 1215), 'numpy.correlate', 'np.correlate', (['signal', 'signal', '"""full"""'], {}), "(signal, signal, 'full')\n", (1191, 1215), True, 'import numpy as np\n'), ((1355, 1383), 'numpy.concatenate', 'np.concatenate', (['([1.0], phi)'], {}), '(([1.0], phi))\n', (1369, 1383), True, 'import numpy as np\n'), ((1408, 1438), 'numpy.ones', 'np.ones', (['(1)'], {'dtype': 'signal.dtype'}), '(1, dtype=signal.dtype)\n', (1415, 1438), True, 'import numpy as np\n'), ((2851, 2866), 'numpy.isreal', 'np.isreal', (['r[0]'], {}), '(r[0])\n', (2860, 2866), True, 'import numpy as np\n'), ((2943, 2964), 'numpy.isfinite', 'np.isfinite', (['(1 / r[0])'], {}), '(1 / r[0])\n', (2954, 2964), True, 'import numpy as np\n'), ((1303, 1329), 'scipy.linalg.toeplitz', 'sp.linalg.toeplitz', (['r[:-1]'], {}), '(r[:-1])\n', (1321, 1329), True, 'import scipy as sp\n'), ((3510, 3527), 'numpy.conj', 'np.conj', (['t[i - j]'], {}), '(t[i - j])\n', (3517, 3527), True, 'import numpy as np\n'), ((3553, 3570), 'numpy.conj', 'np.conj', (['k[i - 1]'], {}), '(k[i - 1])\n', (3560, 3570), True, 'import numpy as np\n')] |
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from collections import defaultdict
import numpy as np
from fastestimator.architecture.retinanet import _get_fpn_anchor_box
from fastestimator.trace import Trace
from pycocotools import mask as maskUtils
class MeanAvgPrecision(Trace):
"""Calculates mean avg precision for various ios. Based out of cocoapi
"""
def __init__(self, num_classes, input_shape, pred_key, gt_key, mode="eval", output_name=("mAP", "AP50", "AP75")):
super().__init__(outputs=output_name, mode=mode)
self.pred_key = pred_key
self.gt_key = gt_key
self.output_name = output_name
assert len(self.output_name) == 3, 'MeanAvgPrecision trace adds 3 fields mAP AP50 AP75 to state '
self.iou_thres = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)
self.rec_thres = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)
self.categories = [n + 1 for n in range(num_classes)] # MSCOCO style class label starts from 1
self.maxdets = 100
self.image_ids = []
self.anch_box = _get_fpn_anchor_box(input_shape=input_shape)[0]
def get_ids_in_epoch(self, idx_in_batch):
unique_cnt_pr = len(np.unique(self.ids_unique))
self.ids_unique.append(idx_in_batch)
unique_cnt_ltr = len(np.unique(self.ids_unique))
if unique_cnt_ltr > unique_cnt_pr:
self.ids_in_epoch += 1
self.ids_batch_to_epoch[idx_in_batch] = self.ids_in_epoch
return self.ids_in_epoch
def on_epoch_begin(self, state):
self.image_ids = [] # append all the image ids coming from each iteration
self.evalimgs = {}
self.eval = {}
self.ids_in_epoch = -1
def on_batch_begin(self, state):
self.gt = defaultdict(list) # gt for evaluation
self.dt = defaultdict(list) # dt for evaluation
self.batch_image_ids = [] # img_ids per batch
self.ious = defaultdict(list)
self.ids_unique = []
self.ids_batch_to_epoch = {}
def on_batch_end(self, state):
pred = state["batch"][self.pred_key]
pred = pred.numpy()
gt = state["batch"][self.gt_key]
gt = gt.numpy()
ground_truth_bb = []
for gt_item in gt:
idx_in_batch, x1, y1, w, h, cls = gt_item
idx_in_batch, cls = int(idx_in_batch), int(cls)
id_epoch = self.get_ids_in_epoch(idx_in_batch)
self.batch_image_ids.append(id_epoch)
self.image_ids.append(id_epoch)
tmp_dict = {'idx': id_epoch, 'x1': x1, 'y1': y1, 'w': w, 'h': h, 'cls': cls}
ground_truth_bb.append(tmp_dict)
predicted_bb = []
for pred_item in pred:
idx_in_batch, x1, y1, w, h, cls, score = pred_item
idx_in_batch, cls = int(idx_in_batch), int(cls)
id_epoch = self.ids_batch_to_epoch[idx_in_batch]
self.image_ids.append(id_epoch)
tmp_dict = {'idx': id_epoch, 'x1': x1, 'y1': y1, 'w': w, 'h': h, 'cls': cls, 'score': score}
predicted_bb.append(tmp_dict)
for dict_elem in ground_truth_bb:
self.gt[dict_elem['idx'], dict_elem['cls']].append(dict_elem)
for dict_elem in predicted_bb:
self.dt[dict_elem['idx'], dict_elem['cls']].append(dict_elem)
self.ious = {(img_id, cat_id): self.compute_iou(self.dt[img_id, cat_id], self.gt[img_id, cat_id])
for img_id in self.batch_image_ids for cat_id in self.categories}
for cat_id in self.categories:
for img_id in self.batch_image_ids:
self.evalimgs[(cat_id, img_id)] = self.evaluate_img(cat_id, img_id)
def on_epoch_end(self, state):
self.accumulate()
mean_ap = self.summarize()
ap50 = self.summarize(iou=0.5)
ap75 = self.summarize(iou=0.75)
state[self.output_name[0]] = mean_ap
state[self.output_name[1]] = ap50
state[self.output_name[2]] = ap75
def accumulate(self):
key_list = self.evalimgs
key_list = sorted(key_list)
eval_list = [self.evalimgs[key] for key in key_list]
self.image_ids = np.unique(self.image_ids)
T = len(self.iou_thres)
R = len(self.rec_thres)
K = len(self.categories)
cat_list_zeroidx = [n for n, cat in enumerate(self.categories)]
I = len(self.image_ids)
maxdets = self.maxdets
precision = -np.ones((T, R, K))
recall = -np.ones((T, K))
scores = -np.ones((T, R, K))
for k in cat_list_zeroidx:
Nk = k * I
E = [eval_list[Nk + img_idx] for img_idx in range(I)]
E = [e for e in E if not e is None]
if len(E) == 0:
continue
dt_scores = np.concatenate([e['dtScores'][0:maxdets] for e in E])
inds = np.argsort(-dt_scores, kind='mergesort')
dt_scores_sorted = dt_scores[inds]
dtm = np.concatenate([e['dtMatches'][:, 0:maxdets] for e in E], axis=1)[:, inds]
npig = np.sum([e['num_gt'] for e in E])
if npig == 0:
continue
tps = dtm > 0
fps = dtm == 0
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp + tp + np.spacing(1))
q = np.zeros((R, ))
ss = np.zeros((R, ))
if nd:
recall[t, k] = rc[-1]
else:
recall[t, k] = 0
pr = pr.tolist()
q = q.tolist()
for i in range(nd - 1, 0, -1):
if pr[i] > pr[i - 1]:
pr[i - 1] = pr[i]
inds = np.searchsorted(rc, self.rec_thres, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dt_scores_sorted[pi]
except:
pass
precision[t, :, k] = np.array(q)
scores[t, :, k] = np.array(ss)
self.eval = {
'counts': [T, R, K],
'precision': precision,
'recall': recall,
'scores': scores, }
def summarize(self, iou=None):
s = self.eval['precision']
if iou is not None:
t = np.where(iou == self.iou_thres)[0]
s = s[t]
s = s[:, :, :]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
return mean_s
def evaluate_img(self, cat_id, img_id):
dt = self.dt[img_id, cat_id]
gt = self.gt[img_id, cat_id]
num_dt = len(dt)
num_gt = len(gt)
if num_gt == 0 and num_dt == 0:
return None
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:self.maxdets]]
iou_mat = self.ious[img_id, cat_id]
T = len(self.iou_thres)
dtm = np.zeros((T, num_dt))
gtm = np.zeros((T, num_gt))
if len(iou_mat) != 0:
for thres_idx, thres_elem in enumerate(self.iou_thres):
for dt_idx, dt_elem in enumerate(dt):
m = -1
iou = min([thres_elem, 1 - 1e-10])
for gt_idx, gt_elem in enumerate(gt):
if gtm[thres_idx, gt_idx] > 0:
continue
if iou_mat[dt_idx, gt_idx] >= iou:
iou = iou_mat[dt_idx, gt_idx]
m = gt_idx
if m != -1:
dtm[thres_idx, dt_idx] = gt[m]['idx']
gtm[thres_idx, m] = 1
return {
'image_id': img_id,
'category_id': cat_id,
'gtIds': [g['idx'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'num_gt': num_gt,
}
def compute_iou(self, dt, gt):
num_dt = len(dt)
num_gt = len(gt)
if num_gt == 0 and num_dt == 0:
return []
boxes_a = np.zeros(shape=(0, 4), dtype=float)
boxes_b = np.zeros(shape=(0, 4), dtype=float)
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if len(dt) > self.maxdets:
dt = dt[0:self.maxdets]
boxes_a = [[dt_elem['x1'], dt_elem['y1'], dt_elem['w'], dt_elem['h']] for dt_elem in dt]
boxes_b = [[gt_elem['x1'], gt_elem['y1'], gt_elem['w'], gt_elem['h']] for gt_elem in gt]
iscrowd = [0 for o in gt] # to leverage maskUtils.iou
iou_dt_gt = maskUtils.iou(boxes_a, boxes_b, iscrowd)
return iou_dt_gt
| [
"numpy.mean",
"pycocotools.mask.iou",
"fastestimator.architecture.retinanet._get_fpn_anchor_box",
"numpy.unique",
"numpy.ones",
"numpy.searchsorted",
"numpy.where",
"numpy.argsort",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"collections.defaultdict",
"numpy.concatenate",
"numpy.cumsum",
... | [((2465, 2482), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2476, 2482), False, 'from collections import defaultdict\n'), ((2522, 2539), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2533, 2539), False, 'from collections import defaultdict\n'), ((2636, 2653), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2647, 2653), False, 'from collections import defaultdict\n'), ((4869, 4894), 'numpy.unique', 'np.unique', (['self.image_ids'], {}), '(self.image_ids)\n', (4878, 4894), True, 'import numpy as np\n'), ((7787, 7844), 'numpy.argsort', 'np.argsort', (["[(-d['score']) for d in dt]"], {'kind': '"""mergesort"""'}), "([(-d['score']) for d in dt], kind='mergesort')\n", (7797, 7844), True, 'import numpy as np\n'), ((7987, 8008), 'numpy.zeros', 'np.zeros', (['(T, num_dt)'], {}), '((T, num_dt))\n', (7995, 8008), True, 'import numpy as np\n'), ((8023, 8044), 'numpy.zeros', 'np.zeros', (['(T, num_gt)'], {}), '((T, num_gt))\n', (8031, 8044), True, 'import numpy as np\n'), ((9175, 9210), 'numpy.zeros', 'np.zeros', ([], {'shape': '(0, 4)', 'dtype': 'float'}), '(shape=(0, 4), dtype=float)\n', (9183, 9210), True, 'import numpy as np\n'), ((9229, 9264), 'numpy.zeros', 'np.zeros', ([], {'shape': '(0, 4)', 'dtype': 'float'}), '(shape=(0, 4), dtype=float)\n', (9237, 9264), True, 'import numpy as np\n'), ((9281, 9338), 'numpy.argsort', 'np.argsort', (["[(-d['score']) for d in dt]"], {'kind': '"""mergesort"""'}), "([(-d['score']) for d in dt], kind='mergesort')\n", (9291, 9338), True, 'import numpy as np\n'), ((9722, 9762), 'pycocotools.mask.iou', 'maskUtils.iou', (['boxes_a', 'boxes_b', 'iscrowd'], {}), '(boxes_a, boxes_b, iscrowd)\n', (9735, 9762), True, 'from pycocotools import mask as maskUtils\n'), ((1773, 1817), 'fastestimator.architecture.retinanet._get_fpn_anchor_box', '_get_fpn_anchor_box', ([], {'input_shape': 'input_shape'}), '(input_shape=input_shape)\n', (1792, 1817), False, 'from fastestimator.architecture.retinanet import _get_fpn_anchor_box\n'), ((1896, 1922), 'numpy.unique', 'np.unique', (['self.ids_unique'], {}), '(self.ids_unique)\n', (1905, 1922), True, 'import numpy as np\n'), ((1998, 2024), 'numpy.unique', 'np.unique', (['self.ids_unique'], {}), '(self.ids_unique)\n', (2007, 2024), True, 'import numpy as np\n'), ((5151, 5169), 'numpy.ones', 'np.ones', (['(T, R, K)'], {}), '((T, R, K))\n', (5158, 5169), True, 'import numpy as np\n'), ((5188, 5203), 'numpy.ones', 'np.ones', (['(T, K)'], {}), '((T, K))\n', (5195, 5203), True, 'import numpy as np\n'), ((5222, 5240), 'numpy.ones', 'np.ones', (['(T, R, K)'], {}), '((T, R, K))\n', (5229, 5240), True, 'import numpy as np\n'), ((5491, 5544), 'numpy.concatenate', 'np.concatenate', (["[e['dtScores'][0:maxdets] for e in E]"], {}), "([e['dtScores'][0:maxdets] for e in E])\n", (5505, 5544), True, 'import numpy as np\n'), ((5564, 5604), 'numpy.argsort', 'np.argsort', (['(-dt_scores)'], {'kind': '"""mergesort"""'}), "(-dt_scores, kind='mergesort')\n", (5574, 5604), True, 'import numpy as np\n'), ((5765, 5797), 'numpy.sum', 'np.sum', (["[e['num_gt'] for e in E]"], {}), "([e['num_gt'] for e in E])\n", (5771, 5797), True, 'import numpy as np\n'), ((7496, 7514), 'numpy.mean', 'np.mean', (['s[s > -1]'], {}), '(s[s > -1])\n', (7503, 7514), True, 'import numpy as np\n'), ((1447, 1476), 'numpy.round', 'np.round', (['((0.95 - 0.5) / 0.05)'], {}), '((0.95 - 0.5) / 0.05)\n', (1455, 1476), True, 'import numpy as np\n'), ((1542, 1570), 'numpy.round', 'np.round', (['((1.0 - 0.0) / 0.01)'], {}), '((1.0 - 0.0) / 0.01)\n', (1550, 1570), True, 'import numpy as np\n'), ((5670, 5735), 'numpy.concatenate', 'np.concatenate', (["[e['dtMatches'][:, 0:maxdets] for e in E]"], {'axis': '(1)'}), "([e['dtMatches'][:, 0:maxdets] for e in E], axis=1)\n", (5684, 5735), True, 'import numpy as np\n'), ((6123, 6135), 'numpy.array', 'np.array', (['tp'], {}), '(tp)\n', (6131, 6135), True, 'import numpy as np\n'), ((6157, 6169), 'numpy.array', 'np.array', (['fp'], {}), '(fp)\n', (6165, 6169), True, 'import numpy as np\n'), ((6302, 6316), 'numpy.zeros', 'np.zeros', (['(R,)'], {}), '((R,))\n', (6310, 6316), True, 'import numpy as np\n'), ((6339, 6353), 'numpy.zeros', 'np.zeros', (['(R,)'], {}), '((R,))\n', (6347, 6353), True, 'import numpy as np\n'), ((6699, 6747), 'numpy.searchsorted', 'np.searchsorted', (['rc', 'self.rec_thres'], {'side': '"""left"""'}), "(rc, self.rec_thres, side='left')\n", (6714, 6747), True, 'import numpy as np\n'), ((6999, 7010), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (7007, 7010), True, 'import numpy as np\n'), ((7045, 7057), 'numpy.array', 'np.array', (['ss'], {}), '(ss)\n', (7053, 7057), True, 'import numpy as np\n'), ((7326, 7357), 'numpy.where', 'np.where', (['(iou == self.iou_thres)'], {}), '(iou == self.iou_thres)\n', (7334, 7357), True, 'import numpy as np\n'), ((5925, 5947), 'numpy.cumsum', 'np.cumsum', (['tps'], {'axis': '(1)'}), '(tps, axis=1)\n', (5934, 5947), True, 'import numpy as np\n'), ((5992, 6014), 'numpy.cumsum', 'np.cumsum', (['fps'], {'axis': '(1)'}), '(fps, axis=1)\n', (6001, 6014), True, 'import numpy as np\n'), ((6267, 6280), 'numpy.spacing', 'np.spacing', (['(1)'], {}), '(1)\n', (6277, 6280), True, 'import numpy as np\n')] |
"""
@author: <NAME> (andreww(at)email(dot)sc(dot)edu)
Script used to generate optimization-based whitebox attacks and test them
against
"""
import argparse
import numpy as np
import pandas as pd
import os
import time
from matplotlib import pyplot as plt
import scipy.io
from utils.model import load_pool, load_lenet
from models.athena import Ensemble, ENSEMBLE_STRATEGY
from utils.file import load_from_json
from utils.metrics import error_rate, get_corrections
from attacks.attack import generate
def generate_whitebox_ae(model, data, labels, attack_configs,
eot=False,
save=False, output_dir=None):
"""
Generate whitebox adversarial examples using the optimization approach.
:param model: The targeted model. For a whitebox attack, this should be
the full defended model.
:param data: array. The benign samples to generate adversarial for.
:param labels: array or list. The true labels.
:param attack_configs: dictionary. Attacks and corresponding settings.
:param save: boolean. True, if save the adversarial examples.
:param output_dir: str or path. Location to save the adversarial examples.
It cannot be None when save is True.
:return:
"""
img_rows, img_cols = data.shape[1], data.shape[2]
num_attacks = attack_configs.get("num_attacks")
data_loader = (data, labels)
if len(labels.shape) > 1:
labels = [np.argmax(p) for p in labels] #returns correct label
# initialize array for storing predicted values for each attack
# each row corresponds to an image from the MNIST dataset
# the first column contains the true values, and each subsequent column
# contains the predicted values for each attack.
# The array is initialized with '-1' at all elements so that any values
# which are not overwritten with digits 0-9 are identifiable as erroneous
dataTable = -np.ones((num_images, num_attacks+1), dtype = int)
dataTable[:,0] = labels;
for id in range(num_attacks): #outer loop steps through attacks
key = "configs{}".format(id)
attack_args = attack_configs.get(key)
attack_args["eot"] = eot
data_adv = generate(model=model,
data_loader=data_loader,
attack_args=attack_args
)
# predict the adversarial examples
predictions = model.predict(data_adv)
predictions = [np.argmax(p) for p in predictions]
err_rate = error_rate(np.asarray(predictions), np.asarray(labels));
print('>>>Error Rate: ',err_rate)
dataTable[:,id+1] = predictions #insert predicted values into new column
# plotting some examples
num_plotting = min(data.shape[0], 2)
for i in range(num_plotting): #inner loop steps through images to plot
img = data_adv[i].reshape((img_rows, img_cols))
plt.imshow(img, cmap='gray')
title = '{}: {}->{}'.format(attack_configs.get(key).get("description"),
labels[i],
predictions[i])
plt.title(title)
plt.show()
plt.close()
# save the adversarial example
if save:
if output_dir is None:
raise ValueError("Cannot save images to a none path.")
# save with a random name
file = os.path.join(output_dir, "ae_whitebox_{}_EOToff.npy".format(attack_configs.get(key).get("description")))
print("Saving the adversarial examples to file [{}].".format(file))
np.save(file, data_adv)
if save:
file = os.path.join(output_dir, "dataTable2.mat")
print("Saving dataTable to "+file)
scipy.io.savemat(file, {'dataTable2':dataTable})
def evaluate_baseline_attacks(trans_configs, model_configs,
data_configs, num_images, save=False, output_dir=None):
"""
Apply transformation(s) on images.
:param trans_configs: dictionary. The collection of the parameterized transformations to test.
in the form of
{ configsx: {
param: value,
}
}
The key of a configuration is 'configs'x, where 'x' is the id of corresponding weak defense.
:param model_configs: dictionary. Defines model related information.
Such as, location, the undefended model, the file format, etc.
:param data_configs: dictionary. Defines data related information.
Such as, location, the file for the true labels, the file for the benign samples,
the files for the adversarial examples, etc.
:param labels: the correct labels for each image
:param save: boolean. Save the transformed sample or not.
:param output_dir: path or str. The location to store the transformed samples.
It cannot be None when save is True.
:return:
"""
'''
# Load the baseline defense (PGD-ADT model)
pgd_adt = load_lenet(file=model_configs.get('pgd_trained'), trans_configs=None,
use_logits=False, wrap=False)
'''
# get the undefended model (UM)
file = os.path.join(model_configs.get('dir'), model_configs.get('um_file'))
undefended = load_lenet(file=file,
trans_configs=trans_configs.get('configs0'),
wrap=True)
print(">>> um:", type(undefended))
# load weak defenses into a pool
pool, _ = load_pool(trans_configs=trans_configs,
model_configs=model_configs,
active_list=True,
wrap=True)
# create AVEP and MV ensembles from the WD pool
wds = list(pool.values())
print(">>> wds:", type(wds), type(wds[0]))
#ensemble_AVEP = Ensemble(classifiers=wds, strategy=ENSEMBLE_STRATEGY.AVEP.value)
ensemble_MV = Ensemble(classifiers=wds, strategy=ENSEMBLE_STRATEGY.MV.value)
# load the benign samples
bs_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file'))
x_bs = np.load(bs_file)
img_rows, img_cols = x_bs.shape[1], x_bs.shape[2]
# load the corresponding true labels
label_file = os.path.join(data_configs.get('dir'), data_configs.get('label_file'))
labels = np.load(label_file)
if len(labels.shape) > 1:
labels = [np.argmax(p) for p in labels] #returns correct label
# cut dataset to specified number of images
x_bs = x_bs[:num_images]
labels = labels[:num_images]
# get indices of benign samples that are correctly classified by the targeted model
print(">>> Evaluating UM on [{}], it may take a while...".format(bs_file))
pred_bs = undefended.predict(x_bs)
corrections = get_corrections(y_pred=pred_bs, y_true=labels)
pred_bs = [np.argmax(p) for p in pred_bs]
# Evaluate AEs.
ae_list = data_configs.get('ae_files')
print(">>>>>>> AE list: ", ae_list)
predictionData = -np.ones((num_images, len(ae_list)), dtype = int)
for ae_count in range(len(ae_list)): # step through ae's one by one
ae_file = os.path.join(data_configs.get('dir'), ae_list[ae_count])
x_adv = np.load(ae_file)
x_adv = x_adv[:num_images,:,:,:]
'''
# evaluate the undefended model on the AE
print(">>> Evaluating UM on [{}], it may take a while...".format(ae_file))
pred_adv_um = undefended.predict(x_adv) #num_images by 10 array
pred_adv_um = [np.argmax(p) for p in pred_adv_um] # returns prediction
err_um = error_rate(y_pred=np.asarray(pred_adv_um),
y_true=np.asarray(labels),
correct_on_bs=corrections)
print(">>> error rate: ",err_um)
predictionData[:,ae_count,0] = pred_adv_um
# evaluate the ensembles on the AE
print(">>> Evaluating AVEP on [{}], it may take a while...".format(ae_file))
pred_adv_AVEP = ensemble_AVEP.predict(x_adv)
pred_adv_AVEP = [np.argmax(p) for p in pred_adv_AVEP]
err_AVEP = error_rate(y_pred=np.asarray(pred_adv_AVEP),
y_true=np.asarray(labels),
correct_on_bs=corrections)
print(">>> error rate: ", err_AVEP)
predictionData[:,ae_count,1] = pred_adv_AVEP
'''
print(">>> Evaluating MV on [{}], it may take a while...".format(ae_file))
pred_adv_MV = ensemble_MV.predict(x_adv)
pred_adv_MV = [np.argmax(p) for p in pred_adv_MV]
err_MV = error_rate(y_pred = np.asarray(pred_adv_MV),
y_true = np.asarray(labels),
correct_on_bs=corrections)
print(">>> error rate: ", err_MV)
predictionData[:,ae_count] = pred_adv_MV
'''
# evaluate the baseline on the AE
print(">>> Evaluating baseline model on [{}], it may take a while...".format(ae_file))
pred_adv_pgd_adt = pgd_adt.predict(x_adv)
pred_adv_pgd_adt = [np.argmax(p) for p in pred_adv_pgd_adt]
err_pgd_adt = error_rate(y_pred=np.asarray(pred_adv_pgd_adt),
y_true=np.asarray(labels),
correct_on_bs=corrections)
print(">>> error rate: ", err_pgd_adt)
predictionData[:,ae_count,3] = pred_adv_pgd_adt
'''
# track the result
#results['PGD-ADT'] = err_pgd_adt
#print(predictionData[:15,:])
if save:
file = os.path.join(output_dir, "predictionData.mat")
print("Saving predictionData and labels to "+file)
scipy.io.savemat(file, {'predictions':predictionData,
'labels':labels,
'pred_bs_um':pred_bs}) #save to .mat file format
if __name__ == '__main__':
# probably need to edit the parser arguments here in order to change the
# targeted model
parser = argparse.ArgumentParser(description="")
parser.add_argument('-m', '--model-configs', required=False,
default='configs/experiment/model-mnist.json',
help='Folder where models are stored.')
parser.add_argument('-t', '--trans-configs', required=False,
default='configs/experiment/athena-mnist.json',
help='Configuration file for transformations.')
parser.add_argument('-d', '--data-configs', required=False,
default='configs/experiment/data-mnist.json',
help='Folder where test data are stored.')
parser.add_argument('-a', '--attack-configs', required=False,
default='configs/experiment/attack-zk-mnist.json',
help='Folder where attack data are stored.')
parser.add_argument('-o', '--output-root', required=False,
default='results',
help='Folder for outputs.')
parser.add_argument('--debug', required=False, default=True)
args = parser.parse_args()
print("------AUGMENT SUMMARY-------")
print('TRANSFORMATION CONFIGS:', args.trans_configs)
print("MODEL CONFIGS:", args.model_configs)
print("DATA CONFIGS:", args.data_configs)
print("ATTACK CONFIGS:", args.attack_configs)
print("OUTPUT ROOT:", args.output_root)
print("DEBUGGING MODE:", args.debug)
print('----------------------------\n')
# parse configurations (into a dictionary) from json file
model_configs = load_from_json(args.model_configs)
data_configs = load_from_json(args.data_configs)
attack_configs = load_from_json(args.attack_configs)
trans_configs = load_from_json(args.trans_configs)
# load the targeted model
#model_file = os.path.join(model_configs.get("dir"), model_configs.get("um_file"))
#target = load_lenet(file=model_file, wrap=True)
# load weak defenses into a pool
pool, _ = load_pool(trans_configs=trans_configs,
model_configs=model_configs,
active_list=True,
wrap=True)
# create AVEP and MV ensembles from the WD pool
wds = list(pool.values())
print(">>> wds:", type(wds), type(wds[0]))
#ensemble_AVEP = Ensemble(classifiers=wds, strategy=ENSEMBLE_STRATEGY.AVEP.value)
ensemble_MV = Ensemble(classifiers=wds, strategy=ENSEMBLE_STRATEGY.MV.value)
target = ensemble_MV
# load the benign samples
data_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file'))
data_bs = np.load(data_file)
# load the corresponding true labels
label_file = os.path.join(data_configs.get('dir'), data_configs.get('label_file'))
labels = np.load(label_file)
# generate adversarial examples
num_images = 200 #set to large number (maybe 1000) for final run, <50 while developing for speed
data_bs = data_bs[:num_images]
labels = labels[:num_images]
'''
generate_whitebox_ae(model=target,
data=data_bs,
labels=labels,
attack_configs=attack_configs,
eot = False,
save=False, output_dir=('C:/Users/andre/CSCE585_local/'+
'project-athena/Task 2/data'))
'''
evaluate_baseline_attacks(trans_configs=trans_configs,
model_configs=model_configs,
data_configs=data_configs,
num_images = num_images,
save=True,
output_dir=('C:/Users/andre/CSCE585_local/'+
'project-athena/Task 2/data')) | [
"utils.metrics.get_corrections",
"matplotlib.pyplot.imshow",
"utils.file.load_from_json",
"argparse.ArgumentParser",
"numpy.ones",
"models.athena.Ensemble",
"attacks.attack.generate",
"os.path.join",
"utils.model.load_pool",
"numpy.argmax",
"numpy.asarray",
"matplotlib.pyplot.close",
"matplo... | [((5594, 5694), 'utils.model.load_pool', 'load_pool', ([], {'trans_configs': 'trans_configs', 'model_configs': 'model_configs', 'active_list': '(True)', 'wrap': '(True)'}), '(trans_configs=trans_configs, model_configs=model_configs,\n active_list=True, wrap=True)\n', (5603, 5694), False, 'from utils.model import load_pool, load_lenet\n'), ((5996, 6058), 'models.athena.Ensemble', 'Ensemble', ([], {'classifiers': 'wds', 'strategy': 'ENSEMBLE_STRATEGY.MV.value'}), '(classifiers=wds, strategy=ENSEMBLE_STRATEGY.MV.value)\n', (6004, 6058), False, 'from models.athena import Ensemble, ENSEMBLE_STRATEGY\n'), ((6191, 6207), 'numpy.load', 'np.load', (['bs_file'], {}), '(bs_file)\n', (6198, 6207), True, 'import numpy as np\n'), ((6404, 6423), 'numpy.load', 'np.load', (['label_file'], {}), '(label_file)\n', (6411, 6423), True, 'import numpy as np\n'), ((6873, 6919), 'utils.metrics.get_corrections', 'get_corrections', ([], {'y_pred': 'pred_bs', 'y_true': 'labels'}), '(y_pred=pred_bs, y_true=labels)\n', (6888, 6919), False, 'from utils.metrics import error_rate, get_corrections\n'), ((10110, 10149), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (10133, 10149), False, 'import argparse\n'), ((11682, 11716), 'utils.file.load_from_json', 'load_from_json', (['args.model_configs'], {}), '(args.model_configs)\n', (11696, 11716), False, 'from utils.file import load_from_json\n'), ((11736, 11769), 'utils.file.load_from_json', 'load_from_json', (['args.data_configs'], {}), '(args.data_configs)\n', (11750, 11769), False, 'from utils.file import load_from_json\n'), ((11791, 11826), 'utils.file.load_from_json', 'load_from_json', (['args.attack_configs'], {}), '(args.attack_configs)\n', (11805, 11826), False, 'from utils.file import load_from_json\n'), ((11847, 11881), 'utils.file.load_from_json', 'load_from_json', (['args.trans_configs'], {}), '(args.trans_configs)\n', (11861, 11881), False, 'from utils.file import load_from_json\n'), ((12104, 12204), 'utils.model.load_pool', 'load_pool', ([], {'trans_configs': 'trans_configs', 'model_configs': 'model_configs', 'active_list': '(True)', 'wrap': '(True)'}), '(trans_configs=trans_configs, model_configs=model_configs,\n active_list=True, wrap=True)\n', (12113, 12204), False, 'from utils.model import load_pool, load_lenet\n'), ((12506, 12568), 'models.athena.Ensemble', 'Ensemble', ([], {'classifiers': 'wds', 'strategy': 'ENSEMBLE_STRATEGY.MV.value'}), '(classifiers=wds, strategy=ENSEMBLE_STRATEGY.MV.value)\n', (12514, 12568), False, 'from models.athena import Ensemble, ENSEMBLE_STRATEGY\n'), ((12731, 12749), 'numpy.load', 'np.load', (['data_file'], {}), '(data_file)\n', (12738, 12749), True, 'import numpy as np\n'), ((12891, 12910), 'numpy.load', 'np.load', (['label_file'], {}), '(label_file)\n', (12898, 12910), True, 'import numpy as np\n'), ((1954, 2003), 'numpy.ones', 'np.ones', (['(num_images, num_attacks + 1)'], {'dtype': 'int'}), '((num_images, num_attacks + 1), dtype=int)\n', (1961, 2003), True, 'import numpy as np\n'), ((2255, 2326), 'attacks.attack.generate', 'generate', ([], {'model': 'model', 'data_loader': 'data_loader', 'attack_args': 'attack_args'}), '(model=model, data_loader=data_loader, attack_args=attack_args)\n', (2263, 2326), False, 'from attacks.attack import generate\n'), ((3777, 3819), 'os.path.join', 'os.path.join', (['output_dir', '"""dataTable2.mat"""'], {}), "(output_dir, 'dataTable2.mat')\n", (3789, 3819), False, 'import os\n'), ((6935, 6947), 'numpy.argmax', 'np.argmax', (['p'], {}), '(p)\n', (6944, 6947), True, 'import numpy as np\n'), ((7313, 7329), 'numpy.load', 'np.load', (['ae_file'], {}), '(ae_file)\n', (7320, 7329), True, 'import numpy as np\n'), ((9657, 9703), 'os.path.join', 'os.path.join', (['output_dir', '"""predictionData.mat"""'], {}), "(output_dir, 'predictionData.mat')\n", (9669, 9703), False, 'import os\n'), ((1460, 1472), 'numpy.argmax', 'np.argmax', (['p'], {}), '(p)\n', (1469, 1472), True, 'import numpy as np\n'), ((2524, 2536), 'numpy.argmax', 'np.argmax', (['p'], {}), '(p)\n', (2533, 2536), True, 'import numpy as np\n'), ((2598, 2621), 'numpy.asarray', 'np.asarray', (['predictions'], {}), '(predictions)\n', (2608, 2621), True, 'import numpy as np\n'), ((2623, 2641), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (2633, 2641), True, 'import numpy as np\n'), ((3007, 3035), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""gray"""'}), "(img, cmap='gray')\n", (3017, 3035), True, 'from matplotlib import pyplot as plt\n'), ((3239, 3255), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3248, 3255), True, 'from matplotlib import pyplot as plt\n'), ((3268, 3278), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3276, 3278), True, 'from matplotlib import pyplot as plt\n'), ((3291, 3302), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3300, 3302), True, 'from matplotlib import pyplot as plt\n'), ((3725, 3748), 'numpy.save', 'np.save', (['file', 'data_adv'], {}), '(file, data_adv)\n', (3732, 3748), True, 'import numpy as np\n'), ((6472, 6484), 'numpy.argmax', 'np.argmax', (['p'], {}), '(p)\n', (6481, 6484), True, 'import numpy as np\n'), ((8631, 8643), 'numpy.argmax', 'np.argmax', (['p'], {}), '(p)\n', (8640, 8643), True, 'import numpy as np\n'), ((8703, 8726), 'numpy.asarray', 'np.asarray', (['pred_adv_MV'], {}), '(pred_adv_MV)\n', (8713, 8726), True, 'import numpy as np\n'), ((8766, 8784), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (8776, 8784), True, 'import numpy as np\n')] |
import os
import pickle
import numpy as np
import pandas as pd
with open('../output/9atc_feature.pickle', 'rb') as labels_file:
psi_df = pd.read_pickle(labels_file)
psi_array = np.array(psi_df)
print(psi_array)
print(psi_array.shape)
| [
"pandas.read_pickle",
"numpy.array"
] | [((142, 169), 'pandas.read_pickle', 'pd.read_pickle', (['labels_file'], {}), '(labels_file)\n', (156, 169), True, 'import pandas as pd\n'), ((186, 202), 'numpy.array', 'np.array', (['psi_df'], {}), '(psi_df)\n', (194, 202), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from pararealml import *
from pararealml.core.operators.ml.auto_regression import *
from pararealml.core.operators.fdm import *
from pararealml.utils.rand import SEEDS, set_random_seed
set_random_seed(SEEDS[0])
diff_eq = DiffusionEquation(2)
mesh = Mesh([(0., 10.), (0., 10.)], [.2, .2])
bcs = [
(DirichletBoundaryCondition(
lambda x, t: np.full((len(x), 1), 1.5), is_static=True),
DirichletBoundaryCondition(
lambda x, t: np.full((len(x), 1), 1.5), is_static=True)),
(NeumannBoundaryCondition(
lambda x, t: np.zeros((len(x), 1)), is_static=True),
NeumannBoundaryCondition(
lambda x, t: np.zeros((len(x), 1)), is_static=True))
]
cp = ConstrainedProblem(diff_eq, mesh, bcs)
ic = GaussianInitialCondition(
cp,
[(np.array([5., 5.]), np.array([[2.5, 0.], [0., 2.5]]))],
[100.]
)
ivp = InitialValueProblem(cp, (0., 2.), ic)
fdm_op = FDMOperator(RK4(), ThreePointCentralDifferenceMethod(), .01)
ar_op = AutoRegressionOperator(.5, fdm_op.vertex_oriented)
fdm_sol = fdm_op.solve(ivp)
fdm_sol_y = fdm_sol.discrete_y(fdm_op.vertex_oriented)
v_min = np.min(fdm_sol_y)
v_max = np.max(fdm_sol_y)
fdm_sol.plot('diffusion_fdm', n_images=10, v_min=v_min, v_max=v_max)
ar_op.train(
ivp,
fdm_op,
RandomForestRegressor(n_jobs=4, verbose=True),
10,
lambda t, y: y + np.random.normal(0., t / 3., size=y.shape)
)
ar_op.solve(ivp).plot('diffusion_ar', n_images=10, v_min=v_min, v_max=v_max)
| [
"numpy.random.normal",
"sklearn.ensemble.RandomForestRegressor",
"numpy.max",
"pararealml.utils.rand.set_random_seed",
"numpy.array",
"numpy.min"
] | [((257, 282), 'pararealml.utils.rand.set_random_seed', 'set_random_seed', (['SEEDS[0]'], {}), '(SEEDS[0])\n', (272, 282), False, 'from pararealml.utils.rand import SEEDS, set_random_seed\n'), ((1178, 1195), 'numpy.min', 'np.min', (['fdm_sol_y'], {}), '(fdm_sol_y)\n', (1184, 1195), True, 'import numpy as np\n'), ((1204, 1221), 'numpy.max', 'np.max', (['fdm_sol_y'], {}), '(fdm_sol_y)\n', (1210, 1221), True, 'import numpy as np\n'), ((1330, 1375), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_jobs': '(4)', 'verbose': '(True)'}), '(n_jobs=4, verbose=True)\n', (1351, 1375), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((843, 863), 'numpy.array', 'np.array', (['[5.0, 5.0]'], {}), '([5.0, 5.0])\n', (851, 863), True, 'import numpy as np\n'), ((863, 897), 'numpy.array', 'np.array', (['[[2.5, 0.0], [0.0, 2.5]]'], {}), '([[2.5, 0.0], [0.0, 2.5]])\n', (871, 897), True, 'import numpy as np\n'), ((1406, 1450), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(t / 3.0)'], {'size': 'y.shape'}), '(0.0, t / 3.0, size=y.shape)\n', (1422, 1450), True, 'import numpy as np\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import sqrt
import unittest
from numpy import array, abs, tile
from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors
from pyspark.mllib.linalg.distributed import RowMatrix
from pyspark.mllib.feature import HashingTF, IDF, StandardScaler, ElementwiseProduct, Word2Vec
from pyspark.testing.mllibutils import MLlibTestCase
class FeatureTest(MLlibTestCase):
def test_idf_model(self):
data = [
Vectors.dense([1, 2, 6, 0, 2, 3, 1, 1, 0, 0, 3]),
Vectors.dense([1, 3, 0, 1, 3, 0, 0, 2, 0, 0, 1]),
Vectors.dense([1, 4, 1, 0, 0, 4, 9, 0, 1, 2, 0]),
Vectors.dense([2, 1, 0, 3, 0, 0, 5, 0, 2, 3, 9])
]
model = IDF().fit(self.sc.parallelize(data, 2))
idf = model.idf()
self.assertEqual(len(idf), 11)
class Word2VecTests(MLlibTestCase):
def test_word2vec_setters(self):
model = Word2Vec() \
.setVectorSize(2) \
.setLearningRate(0.01) \
.setNumPartitions(2) \
.setNumIterations(10) \
.setSeed(1024) \
.setMinCount(3) \
.setWindowSize(6)
self.assertEqual(model.vectorSize, 2)
self.assertTrue(model.learningRate < 0.02)
self.assertEqual(model.numPartitions, 2)
self.assertEqual(model.numIterations, 10)
self.assertEqual(model.seed, 1024)
self.assertEqual(model.minCount, 3)
self.assertEqual(model.windowSize, 6)
def test_word2vec_get_vectors(self):
data = [
["a", "b", "c", "d", "e", "f", "g"],
["a", "b", "c", "d", "e", "f"],
["a", "b", "c", "d", "e"],
["a", "b", "c", "d"],
["a", "b", "c"],
["a", "b"],
["a"]
]
model = Word2Vec().fit(self.sc.parallelize(data))
self.assertEqual(len(model.getVectors()), 3)
class StandardScalerTests(MLlibTestCase):
def test_model_setters(self):
data = [
[1.0, 2.0, 3.0],
[2.0, 3.0, 4.0],
[3.0, 4.0, 5.0]
]
model = StandardScaler().fit(self.sc.parallelize(data))
self.assertIsNotNone(model.setWithMean(True))
self.assertIsNotNone(model.setWithStd(True))
self.assertEqual(model.transform([1.0, 2.0, 3.0]), DenseVector([-1.0, -1.0, -1.0]))
def test_model_transform(self):
data = [
[1.0, 2.0, 3.0],
[2.0, 3.0, 4.0],
[3.0, 4.0, 5.0]
]
model = StandardScaler().fit(self.sc.parallelize(data))
self.assertEqual(model.transform([1.0, 2.0, 3.0]), DenseVector([1.0, 2.0, 3.0]))
class ElementwiseProductTests(MLlibTestCase):
def test_model_transform(self):
weight = Vectors.dense([3, 2, 1])
densevec = Vectors.dense([4, 5, 6])
sparsevec = Vectors.sparse(3, [0], [1])
eprod = ElementwiseProduct(weight)
self.assertEqual(eprod.transform(densevec), DenseVector([12, 10, 6]))
self.assertEqual(
eprod.transform(sparsevec), SparseVector(3, [0], [3]))
class HashingTFTest(MLlibTestCase):
def test_binary_term_freqs(self):
hashingTF = HashingTF(100).setBinary(True)
doc = "a a b c c c".split(" ")
n = hashingTF.numFeatures
output = hashingTF.transform(doc).toArray()
expected = Vectors.sparse(n, {hashingTF.indexOf("a"): 1.0,
hashingTF.indexOf("b"): 1.0,
hashingTF.indexOf("c"): 1.0}).toArray()
for i in range(0, n):
self.assertAlmostEqual(output[i], expected[i], 14, "Error at " + str(i) +
": expected " + str(expected[i]) + ", got " + str(output[i]))
class DimensionalityReductionTests(MLlibTestCase):
denseData = [
Vectors.dense([0.0, 1.0, 2.0]),
Vectors.dense([3.0, 4.0, 5.0]),
Vectors.dense([6.0, 7.0, 8.0]),
Vectors.dense([9.0, 0.0, 1.0])
]
sparseData = [
Vectors.sparse(3, [(1, 1.0), (2, 2.0)]),
Vectors.sparse(3, [(0, 3.0), (1, 4.0), (2, 5.0)]),
Vectors.sparse(3, [(0, 6.0), (1, 7.0), (2, 8.0)]),
Vectors.sparse(3, [(0, 9.0), (2, 1.0)])
]
def assertEqualUpToSign(self, vecA, vecB):
eq1 = vecA - vecB
eq2 = vecA + vecB
self.assertTrue(sum(abs(eq1)) < 1e-6 or sum(abs(eq2)) < 1e-6)
def test_svd(self):
denseMat = RowMatrix(self.sc.parallelize(self.denseData))
sparseMat = RowMatrix(self.sc.parallelize(self.sparseData))
m = 4
n = 3
for mat in [denseMat, sparseMat]:
for k in range(1, 4):
rm = mat.computeSVD(k, computeU=True)
self.assertEqual(rm.s.size, k)
self.assertEqual(rm.U.numRows(), m)
self.assertEqual(rm.U.numCols(), k)
self.assertEqual(rm.V.numRows, n)
self.assertEqual(rm.V.numCols, k)
# Test that U returned is None if computeU is set to False.
self.assertEqual(mat.computeSVD(1).U, None)
# Test that low rank matrices cannot have number of singular values
# greater than a limit.
rm = RowMatrix(self.sc.parallelize(tile([1, 2, 3], (3, 1))))
self.assertEqual(rm.computeSVD(3, False, 1e-6).s.size, 1)
def test_pca(self):
expected_pcs = array([
[0.0, 1.0, 0.0],
[sqrt(2.0) / 2.0, 0.0, sqrt(2.0) / 2.0],
[sqrt(2.0) / 2.0, 0.0, -sqrt(2.0) / 2.0]
])
n = 3
denseMat = RowMatrix(self.sc.parallelize(self.denseData))
sparseMat = RowMatrix(self.sc.parallelize(self.sparseData))
for mat in [denseMat, sparseMat]:
for k in range(1, 4):
pcs = mat.computePrincipalComponents(k)
self.assertEqual(pcs.numRows, n)
self.assertEqual(pcs.numCols, k)
# We can just test the updated principal component for equality.
self.assertEqualUpToSign(pcs.toArray()[:, k - 1], expected_pcs[:, k - 1])
if __name__ == "__main__":
from pyspark.mllib.tests.test_feature import * # noqa: F401
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| [
"pyspark.mllib.linalg.SparseVector",
"pyspark.mllib.feature.StandardScaler",
"numpy.tile",
"pyspark.mllib.feature.ElementwiseProduct",
"numpy.abs",
"pyspark.mllib.feature.IDF",
"pyspark.mllib.feature.Word2Vec",
"pyspark.mllib.linalg.Vectors.sparse",
"pyspark.mllib.linalg.DenseVector",
"math.sqrt",... | [((7142, 7191), 'unittest.main', 'unittest.main', ([], {'testRunner': 'testRunner', 'verbosity': '(2)'}), '(testRunner=testRunner, verbosity=2)\n', (7155, 7191), False, 'import unittest\n'), ((3529, 3553), 'pyspark.mllib.linalg.Vectors.dense', 'Vectors.dense', (['[3, 2, 1]'], {}), '([3, 2, 1])\n', (3542, 3553), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((3574, 3598), 'pyspark.mllib.linalg.Vectors.dense', 'Vectors.dense', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (3587, 3598), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((3619, 3646), 'pyspark.mllib.linalg.Vectors.sparse', 'Vectors.sparse', (['(3)', '[0]', '[1]'], {}), '(3, [0], [1])\n', (3633, 3646), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((3663, 3689), 'pyspark.mllib.feature.ElementwiseProduct', 'ElementwiseProduct', (['weight'], {}), '(weight)\n', (3681, 3689), False, 'from pyspark.mllib.feature import HashingTF, IDF, StandardScaler, ElementwiseProduct, Word2Vec\n'), ((4619, 4649), 'pyspark.mllib.linalg.Vectors.dense', 'Vectors.dense', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (4632, 4649), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((4659, 4689), 'pyspark.mllib.linalg.Vectors.dense', 'Vectors.dense', (['[3.0, 4.0, 5.0]'], {}), '([3.0, 4.0, 5.0])\n', (4672, 4689), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((4699, 4729), 'pyspark.mllib.linalg.Vectors.dense', 'Vectors.dense', (['[6.0, 7.0, 8.0]'], {}), '([6.0, 7.0, 8.0])\n', (4712, 4729), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((4739, 4769), 'pyspark.mllib.linalg.Vectors.dense', 'Vectors.dense', (['[9.0, 0.0, 1.0]'], {}), '([9.0, 0.0, 1.0])\n', (4752, 4769), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((4803, 4842), 'pyspark.mllib.linalg.Vectors.sparse', 'Vectors.sparse', (['(3)', '[(1, 1.0), (2, 2.0)]'], {}), '(3, [(1, 1.0), (2, 2.0)])\n', (4817, 4842), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((4852, 4901), 'pyspark.mllib.linalg.Vectors.sparse', 'Vectors.sparse', (['(3)', '[(0, 3.0), (1, 4.0), (2, 5.0)]'], {}), '(3, [(0, 3.0), (1, 4.0), (2, 5.0)])\n', (4866, 4901), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((4911, 4960), 'pyspark.mllib.linalg.Vectors.sparse', 'Vectors.sparse', (['(3)', '[(0, 6.0), (1, 7.0), (2, 8.0)]'], {}), '(3, [(0, 6.0), (1, 7.0), (2, 8.0)])\n', (4925, 4960), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((4970, 5009), 'pyspark.mllib.linalg.Vectors.sparse', 'Vectors.sparse', (['(3)', '[(0, 9.0), (2, 1.0)]'], {}), '(3, [(0, 9.0), (2, 1.0)])\n', (4984, 5009), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((7021, 7087), 'xmlrunner.XMLTestRunner', 'xmlrunner.XMLTestRunner', ([], {'output': '"""target/test-reports"""', 'verbosity': '(2)'}), "(output='target/test-reports', verbosity=2)\n", (7044, 7087), False, 'import xmlrunner\n'), ((1226, 1274), 'pyspark.mllib.linalg.Vectors.dense', 'Vectors.dense', (['[1, 2, 6, 0, 2, 3, 1, 1, 0, 0, 3]'], {}), '([1, 2, 6, 0, 2, 3, 1, 1, 0, 0, 3])\n', (1239, 1274), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((1288, 1336), 'pyspark.mllib.linalg.Vectors.dense', 'Vectors.dense', (['[1, 3, 0, 1, 3, 0, 0, 2, 0, 0, 1]'], {}), '([1, 3, 0, 1, 3, 0, 0, 2, 0, 0, 1])\n', (1301, 1336), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((1350, 1398), 'pyspark.mllib.linalg.Vectors.dense', 'Vectors.dense', (['[1, 4, 1, 0, 0, 4, 9, 0, 1, 2, 0]'], {}), '([1, 4, 1, 0, 0, 4, 9, 0, 1, 2, 0])\n', (1363, 1398), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((1412, 1460), 'pyspark.mllib.linalg.Vectors.dense', 'Vectors.dense', (['[2, 1, 0, 3, 0, 0, 5, 0, 2, 3, 9]'], {}), '([2, 1, 0, 3, 0, 0, 5, 0, 2, 3, 9])\n', (1425, 1460), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((3092, 3123), 'pyspark.mllib.linalg.DenseVector', 'DenseVector', (['[-1.0, -1.0, -1.0]'], {}), '([-1.0, -1.0, -1.0])\n', (3103, 3123), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((3398, 3426), 'pyspark.mllib.linalg.DenseVector', 'DenseVector', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (3409, 3426), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((3742, 3766), 'pyspark.mllib.linalg.DenseVector', 'DenseVector', (['[12, 10, 6]'], {}), '([12, 10, 6])\n', (3753, 3766), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((3834, 3859), 'pyspark.mllib.linalg.SparseVector', 'SparseVector', (['(3)', '[0]', '[3]'], {}), '(3, [0], [3])\n', (3846, 3859), False, 'from pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\n'), ((1487, 1492), 'pyspark.mllib.feature.IDF', 'IDF', ([], {}), '()\n', (1490, 1492), False, 'from pyspark.mllib.feature import HashingTF, IDF, StandardScaler, ElementwiseProduct, Word2Vec\n'), ((2576, 2586), 'pyspark.mllib.feature.Word2Vec', 'Word2Vec', ([], {}), '()\n', (2584, 2586), False, 'from pyspark.mllib.feature import HashingTF, IDF, StandardScaler, ElementwiseProduct, Word2Vec\n'), ((2878, 2894), 'pyspark.mllib.feature.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2892, 2894), False, 'from pyspark.mllib.feature import HashingTF, IDF, StandardScaler, ElementwiseProduct, Word2Vec\n'), ((3291, 3307), 'pyspark.mllib.feature.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3305, 3307), False, 'from pyspark.mllib.feature import HashingTF, IDF, StandardScaler, ElementwiseProduct, Word2Vec\n'), ((3958, 3972), 'pyspark.mllib.feature.HashingTF', 'HashingTF', (['(100)'], {}), '(100)\n', (3967, 3972), False, 'from pyspark.mllib.feature import HashingTF, IDF, StandardScaler, ElementwiseProduct, Word2Vec\n'), ((6027, 6050), 'numpy.tile', 'tile', (['[1, 2, 3]', '(3, 1)'], {}), '([1, 2, 3], (3, 1))\n', (6031, 6050), False, 'from numpy import array, abs, tile\n'), ((5144, 5152), 'numpy.abs', 'abs', (['eq1'], {}), '(eq1)\n', (5147, 5152), False, 'from numpy import array, abs, tile\n'), ((5168, 5176), 'numpy.abs', 'abs', (['eq2'], {}), '(eq2)\n', (5171, 5176), False, 'from numpy import array, abs, tile\n'), ((6217, 6226), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (6221, 6226), False, 'from math import sqrt\n'), ((6239, 6248), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (6243, 6248), False, 'from math import sqrt\n'), ((6270, 6279), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (6274, 6279), False, 'from math import sqrt\n'), ((6293, 6302), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (6297, 6302), False, 'from math import sqrt\n'), ((1683, 1693), 'pyspark.mllib.feature.Word2Vec', 'Word2Vec', ([], {}), '()\n', (1691, 1693), False, 'from pyspark.mllib.feature import HashingTF, IDF, StandardScaler, ElementwiseProduct, Word2Vec\n')] |
import numpy as np
import ray
import torch
from ray.rllib.agents.ppo import ppo
from ray.rllib.models.preprocessors import get_preprocessor
from ray.tune import register_env
from games.base import MeanFieldGame
from games.mfg_wrapper import MFGGymWrapper
from simulator.mean_fields.base import MeanField
from solver.base import Solver
from solver.policy.finite_policy import FiniteFeedbackPolicy
class PPOSolver(Solver):
"""
Approximate deterministic solutions using Rllib
"""
def __init__(self, total_iterations=500, prior=None, eta=0, verbose=False, **kwargs):
super().__init__(**kwargs)
self.prior_policy = prior
self.eta = eta
self.total_iterations = total_iterations
self.verbose = verbose
ray.init(ignore_reinit_error=True)
def load_from_checkpoint(self, game: MeanFieldGame, checkpoint):
def env_creator(env_config=None):
return MFGGymWrapper(game, None, time_obs_augment=True)
register_env("MFG-v0", env_creator)
trainer = ppo.PPOTrainer(env="MFG-v0")
trainer.load_checkpoint(checkpoint)
class TrainerFeedbackPolicyPPO(FiniteFeedbackPolicy):
def __init__(self, state_space, action_space, eta, prior_policy=None):
super().__init__(state_space, action_space)
self.trainer = trainer
self.wrapper = MFGGymWrapper(game, None, time_obs_augment=True)
self.maxq = (eta == 0)
if not self.maxq:
self.tau = 1 / eta
self.prior_policy = prior_policy
obs_space = env_creator().observation_space
self.prep = get_preprocessor(obs_space)(obs_space)
def pmf(self, t, x):
obs = self.wrapper.augment_obs(t, x)
prepped_obs = self.prep.transform(obs)
likelihoods = np.exp(np.array([trainer.get_policy().compute_log_likelihoods([i], torch.tensor(np.expand_dims(prepped_obs, axis=0)))
for i in range(self.action_space.n)]))
return np.squeeze(likelihoods)
return TrainerFeedbackPolicyPPO(game.agent_observation_space, game.agent_action_space, self.eta,
prior_policy=self.prior_policy), {"rllib_saved_chkpt": checkpoint}
def solve(self, game: MeanFieldGame, mu: MeanField, **config):
def env_creator(env_config=None):
return MFGGymWrapper(game, mu, time_obs_augment=True)
register_env("MFG-v0", env_creator)
trainer = ppo.PPOTrainer(env="MFG-v0", config={
'num_workers': 6,
"gamma": 1,
"entropy_coeff": 0.01,
"clip_param": 0.2,
"kl_target": 0.006,
})
logs = []
for iteration in range(self.total_iterations):
log = trainer.train()
if self.verbose:
print("Loop {} mean {} ent {}".format(iteration, log['episode_reward_mean'],
log['info']['learner']['default_policy']['entropy']))
logs.append(log)
checkpoint = trainer.save()
trainer.load_checkpoint(checkpoint)
class TrainerFeedbackPolicyPPO(FiniteFeedbackPolicy):
def __init__(self, state_space, action_space, eta, prior_policy=None):
super().__init__(state_space, action_space)
self.trainer = trainer
self.wrapper = MFGGymWrapper(game, mu, time_obs_augment=True)
self.maxq = (eta == 0)
if not self.maxq:
self.tau = 1 / eta
self.prior_policy = prior_policy
obs_space = env_creator().observation_space
self.prep = get_preprocessor(obs_space)(obs_space)
def pmf(self, t, x):
obs = self.wrapper.augment_obs(t, x)
prepped_obs = self.prep.transform(obs)
likelihoods = np.exp(np.array([trainer.get_policy().compute_log_likelihoods([i], torch.tensor(np.expand_dims(prepped_obs, axis=0)))
for i in range(self.action_space.n)]))
return np.squeeze(likelihoods)
return TrainerFeedbackPolicyPPO(game.agent_observation_space, game.agent_action_space, self.eta,
prior_policy=self.prior_policy), {"rllib_saved_chkpt": checkpoint}
| [
"games.mfg_wrapper.MFGGymWrapper",
"ray.rllib.agents.ppo.ppo.PPOTrainer",
"ray.tune.register_env",
"numpy.squeeze",
"ray.rllib.models.preprocessors.get_preprocessor",
"numpy.expand_dims",
"ray.init"
] | [((763, 797), 'ray.init', 'ray.init', ([], {'ignore_reinit_error': '(True)'}), '(ignore_reinit_error=True)\n', (771, 797), False, 'import ray\n'), ((987, 1022), 'ray.tune.register_env', 'register_env', (['"""MFG-v0"""', 'env_creator'], {}), "('MFG-v0', env_creator)\n", (999, 1022), False, 'from ray.tune import register_env\n'), ((1041, 1069), 'ray.rllib.agents.ppo.ppo.PPOTrainer', 'ppo.PPOTrainer', ([], {'env': '"""MFG-v0"""'}), "(env='MFG-v0')\n", (1055, 1069), False, 'from ray.rllib.agents.ppo import ppo\n'), ((2542, 2577), 'ray.tune.register_env', 'register_env', (['"""MFG-v0"""', 'env_creator'], {}), "('MFG-v0', env_creator)\n", (2554, 2577), False, 'from ray.tune import register_env\n'), ((2596, 2729), 'ray.rllib.agents.ppo.ppo.PPOTrainer', 'ppo.PPOTrainer', ([], {'env': '"""MFG-v0"""', 'config': "{'num_workers': 6, 'gamma': 1, 'entropy_coeff': 0.01, 'clip_param': 0.2,\n 'kl_target': 0.006}"}), "(env='MFG-v0', config={'num_workers': 6, 'gamma': 1,\n 'entropy_coeff': 0.01, 'clip_param': 0.2, 'kl_target': 0.006})\n", (2610, 2729), False, 'from ray.rllib.agents.ppo import ppo\n'), ((929, 977), 'games.mfg_wrapper.MFGGymWrapper', 'MFGGymWrapper', (['game', 'None'], {'time_obs_augment': '(True)'}), '(game, None, time_obs_augment=True)\n', (942, 977), False, 'from games.mfg_wrapper import MFGGymWrapper\n'), ((2486, 2532), 'games.mfg_wrapper.MFGGymWrapper', 'MFGGymWrapper', (['game', 'mu'], {'time_obs_augment': '(True)'}), '(game, mu, time_obs_augment=True)\n', (2499, 2532), False, 'from games.mfg_wrapper import MFGGymWrapper\n'), ((1390, 1438), 'games.mfg_wrapper.MFGGymWrapper', 'MFGGymWrapper', (['game', 'None'], {'time_obs_augment': '(True)'}), '(game, None, time_obs_augment=True)\n', (1403, 1438), False, 'from games.mfg_wrapper import MFGGymWrapper\n'), ((2123, 2146), 'numpy.squeeze', 'np.squeeze', (['likelihoods'], {}), '(likelihoods)\n', (2133, 2146), True, 'import numpy as np\n'), ((3520, 3566), 'games.mfg_wrapper.MFGGymWrapper', 'MFGGymWrapper', (['game', 'mu'], {'time_obs_augment': '(True)'}), '(game, mu, time_obs_augment=True)\n', (3533, 3566), False, 'from games.mfg_wrapper import MFGGymWrapper\n'), ((4251, 4274), 'numpy.squeeze', 'np.squeeze', (['likelihoods'], {}), '(likelihoods)\n', (4261, 4274), True, 'import numpy as np\n'), ((1688, 1715), 'ray.rllib.models.preprocessors.get_preprocessor', 'get_preprocessor', (['obs_space'], {}), '(obs_space)\n', (1704, 1715), False, 'from ray.rllib.models.preprocessors import get_preprocessor\n'), ((3816, 3843), 'ray.rllib.models.preprocessors.get_preprocessor', 'get_preprocessor', (['obs_space'], {}), '(obs_space)\n', (3832, 3843), False, 'from ray.rllib.models.preprocessors import get_preprocessor\n'), ((1979, 2014), 'numpy.expand_dims', 'np.expand_dims', (['prepped_obs'], {'axis': '(0)'}), '(prepped_obs, axis=0)\n', (1993, 2014), True, 'import numpy as np\n'), ((4107, 4142), 'numpy.expand_dims', 'np.expand_dims', (['prepped_obs'], {'axis': '(0)'}), '(prepped_obs, axis=0)\n', (4121, 4142), True, 'import numpy as np\n')] |
from collections import defaultdict
import numpy as np
from cycgkit.cgtypes import *
from cycgkit.boundingbox import BoundingBox
from numpy import arccos, arctan2, array, float32, ndarray, pi, sqrt, uint32
from ..commonValues import scaleNumber
class UVCalculationTypeEnum(object):
noUVs = 'noUVs'
spherical = 'spherical'
planar = 'planar'
box = 'box'
class NormalsCalculationTypeEnum(object):
smooth = 'smooth'
hard = 'hard'
class Mesh(object):
def __init__(self):
self.boneOffsets = {}
self.boneMinMax = {}
self._vertexBufferArray = None
self._indexBufferArray = None
self._declaration = {}
self._stride = 0
self._hasTexCoords = [False] * 8
self._materialIndex = -1
self._transformation = None
self._minmax = [[0, 0, 0], [0, 0, 0]]
self._VertexCount = 0
self._PrimitiveCount = 0
self._IndexCount = 0
self.ID = -1
def get_PrimitiveCount(self):
return self._PrimitiveCount
primitiveCount = property(fget=get_PrimitiveCount)
def get_VertexCount(self):
return self._VertexCount
vertexCount = property(fget=get_VertexCount)
def get_IndexCount(self):
return self._IndexCount
indexCount = property(fget=get_IndexCount)
@staticmethod
def fromAssimpMesh(mesh, transform, useChannel0AsUVChannel, lastUVs, boneDict, forceStatic):
"""
@param forceStatic:
@param mesh:
@type mesh: assimpCy.aiMesh
@param transform:
@type transform: mat4
@param useChannel0AsUVChannel:
@param lastUVs:
@rtype : Mesh
@param boneDict:
@type boneDict: dict
"""
newMesh_hasTexCoords = mesh.HasTextureCoords
if isinstance(useChannel0AsUVChannel, int) and useChannel0AsUVChannel > 0:
newMesh_hasTexCoords[useChannel0AsUVChannel] = True
vertices = mesh.mVertices
normals = mesh.mNormals
tangents = mesh.mTangents
bitangents = mesh.mBitangents
colours = mesh.mColors
if not forceStatic:
bones = mesh.mBones
else:
bones = None
texCoords = []
i = 0
while i < 8:
if newMesh_hasTexCoords[i]:
if useChannel0AsUVChannel != i:
texCoords.append(mesh.mTextureCoords[i])
else:
texCoords.append([])
i += 1
try:
if len(lastUVs) > 0:
lastind = int(lastUVs[0][2])
v = 0
while v <= mesh.mNumVertices - 1:
texCoords[useChannel0AsUVChannel].append(lastUVs[lastind])
lastind += 1
v += 1
lastUVs[0][2] += float(v)
except Exception as ex:
raise Exception(
"The imported UV's do not match the second model's vertex count." + '\nOriginal message:\n' +
ex.message)
nMesh = Mesh.fromObjectInfo(vertices, mesh.mFaces, None, texCoords, normals, tangents, bitangents, transform,
bones, colours, mesh.mMaterialIndex, boneDict)
return nMesh
@staticmethod
def fromObjectInfo(vertices, faces, minmax, UVsOrCalculationType, normals, tangents=None, bitangents=None,
transform=None, mesh_mBones=None, colors=None, materialIndex=0, boneDict=None,
forceReIndexing=False):
"""
@rtype : Mesh
"""
if not transform:
transform = mat4.identity()
baketrans = mesh_mBones is None
"""@type:mat4"""
invTranspose = transform.inversed().transposed()
newMesh = Mesh()
newMesh.ID = id(newMesh)
newMesh._materialIndex = materialIndex
hasColors = colors is not None and colors[0] is not None
hasTangents = tangents is not None
hasBones = mesh_mBones is not None
texCoords = []
reindexingRequired = forceReIndexing
if normals is None:
normals = NormalsCalculationTypeEnum.hard
if len(normals) < len(vertices) or isinstance(normals, type(NormalsCalculationTypeEnum.smooth)):
if normals == NormalsCalculationTypeEnum.hard:
normals, vertices, faces = Mesh.calculateHardNormals(vertices, faces)
reindexingRequired = True
else:
normals = Mesh.calculateSmoothNormals(vertices, faces)
hasNormals = True
uvsTypes = [list, ndarray]
hasAnyTex = False
if type(UVsOrCalculationType) in uvsTypes:
if len(UVsOrCalculationType) != 8:
raise ValueError("'UVsOrCalculationType should be of len=7. It is len={}".format(len(UVsOrCalculationType)))
for i in range(len(UVsOrCalculationType)):
if type(UVsOrCalculationType[i]) in uvsTypes and (len(UVsOrCalculationType[i]) == len(vertices)
or len(UVsOrCalculationType[i]) == len(faces)):
newMesh._hasTexCoords[i] = True
hasAnyTex = True
texCoords.append(UVsOrCalculationType[i])
if i >= 7:
break
if not hasAnyTex:
UVsOrCalculationType = UVCalculationTypeEnum.planar
vertices = [list(v) for v in vertices]
faces = [list(f) for f in faces]
if hasAnyTex:
texCoords[0] = [list(t) for t in texCoords[0]]
normals = [list(n) for n in normals] # todo: convert everything to vec3?
if UVsOrCalculationType == UVCalculationTypeEnum.spherical:
texCoords.append(Mesh.calculateSphericalUVS(vertices))
Mesh.fixSphereUVs(vertices, faces, texCoords[0], normals)
newMesh._hasTexCoords[0] = True
reindexingRequired = True
elif UVsOrCalculationType == UVCalculationTypeEnum.box:
texCoords.append(Mesh.calculateBoxUVS(vertices, faces, normals))
newMesh._hasTexCoords[0] = True
elif UVsOrCalculationType == UVCalculationTypeEnum.planar:
texCoords.append(Mesh.calculatePlanarUVS([vertices], normals))
newMesh._hasTexCoords[0] = True
if newMesh._hasTexCoords[0] and (tangents is None and bitangents is None):
tangents, bitangents = Mesh.calculateTanBitan(vertices, faces, texCoords[0], normals)
hasTangents = True
# hasBiTangents = True
tangents = [list(t) for t in tangents]
bitangents = [list(b) for b in bitangents]
if reindexingRequired:
res = Mesh.reIndexMesh(vertices, faces, normals, tangents, bitangents, texCoords[0])
vertices, faces, normals, tangents, bitangents, texCoords[0] = res
# TODO: Properly fix all present texcoord channels
_3floatStride = np.empty((1,), np.float32).strides[0] * 3
# _4intStride = np.empty((1,), np.int).strides[0] * 4
newMesh._stride = _3floatStride
newMesh._declaration = [VertexDeclaration("position", 0)]
if hasColors:
newMesh._declaration.append(VertexDeclaration("color", newMesh._stride))
newMesh._stride += _3floatStride
if hasNormals:
newMesh._declaration.append(VertexDeclaration("normal", newMesh._stride))
newMesh._stride += _3floatStride
if hasTangents:
newMesh._declaration.append(VertexDeclaration("tangent", newMesh._stride))
newMesh._stride += _3floatStride
newMesh._declaration.append(VertexDeclaration("bitangent", newMesh._stride))
newMesh._stride += _3floatStride
for i in range(8):
if newMesh._hasTexCoords[i]:
newMesh._declaration.append(VertexDeclaration("texcoord" + str(i), newMesh._stride))
newMesh._stride += (_3floatStride / 3) * 2
if hasBones:
newMesh._declaration.append(VertexDeclaration("boneweights", newMesh._stride))
newMesh._stride += (_3floatStride / 3) * 4
newMesh._declaration.append(VertexDeclaration("boneindexes", newMesh._stride))
newMesh._stride += (_3floatStride / 3) * 4
for b in mesh_mBones:
newMesh.boneOffsets[b.mName] = mat4(b.mOffsetMatrix.tolist())
# if baketrans and transform != mat4.identity():
vertices = transformVec(transform, vertices, baketrans)
normals = transformVec(invTranspose, normals, baketrans)
tangents = transformVec(invTranspose, tangents, baketrans)
bitangents = transformVec(invTranspose, bitangents, baketrans)
vertexStream = []
if minmax:
newMesh._minmax = minmax
# else:
# # vl = list(vertices[0])
# # newMesh._minmax = [list(vl), list(vl)]
# newMesh._minmax = [0, 0]
currentVertexN = 0
for v in vertices:
vertexStream.extend(v)
if hasColors:
vertexStream.extend(colors[currentVertexN])
# if hasNormals:
normal = normals[currentVertexN]
try:
normal = normal.normalized()
except Exception:
pass
vertexStream.extend(normal)
if hasTangents:
vertexStream.extend(tangents[currentVertexN])
# if hasBiTangents:
vertexStream.extend(bitangents[currentVertexN])
for uvchan in texCoords:
if not len(uvchan) == 0:
coord = [float(uvchan[currentVertexN][0]), float(uvchan[currentVertexN][1])]
coord[1] = 1 - coord[1]
vertexStream.extend(coord)
vl = vec3(v)
if baketrans:
cv = newMesh._minmax
newMesh._minmax[0][0] = min(cv[0][0], vl[0])
newMesh._minmax[0][1] = min(cv[0][1], vl[1])
newMesh._minmax[0][2] = min(cv[0][2], vl[2])
newMesh._minmax[1][0] = max(cv[1][0], vl[0])
newMesh._minmax[1][1] = max(cv[1][1], vl[1])
newMesh._minmax[1][2] = max(cv[1][2], vl[2])
if hasBones:
bb = 0
bonewl = [0, 0, 0, 0]
# boneil = [_maxBones, _maxBones, _maxBones, _maxBones]
boneil = [0, 0, 0, 0]
for b in mesh_mBones:
for w in b.mWeights:
if w.mVertexId == currentVertexN:
if bb < 4:
bName = b.mName
bonewl[bb] = float(w.mWeight)
boneil[bb] = float(boneDict[bName])
if bonewl[bb] > 0.0:
if bName in newMesh.boneMinMax.keys():
cv = list(newMesh.boneMinMax[bName])
newMesh.boneMinMax[bName][0][0] = min(cv[0][0], vl[0])
newMesh.boneMinMax[bName][0][1] = min(cv[0][1], vl[1])
newMesh.boneMinMax[bName][0][2] = min(cv[0][2], vl[2])
newMesh.boneMinMax[bName][1][0] = max(cv[1][0], vl[0])
newMesh.boneMinMax[bName][1][1] = max(cv[1][1], vl[1])
newMesh.boneMinMax[bName][1][2] = max(cv[1][2], vl[2])
else:
newMesh.boneMinMax[bName] = [vl, vl]
bb += 1
else:
self._engine.log('Vertex {} is affected by more than 4 bones.'.format(currentVertexN))
b.mWeights.remove(w)
break
vertexStream.extend(bonewl)
vertexStream.extend(boneil)
currentVertexN += 1
newMesh._vertexBufferArray = array(vertexStream, float32)
newMesh._indexBufferArray = array(faces, dtype=uint32).flatten()
newMesh._VertexCount = len(vertices)
newMesh._PrimitiveCount = len(faces)
newMesh._IndexCount = len(faces) * 3
return newMesh
@staticmethod
def calculateHardNormals(verts, faces):
newVertices = []
newFaces = []
finalNormals = []
normals = []
for i in range(len(faces)):
face = int(faces[i][0]), int(faces[i][1]), int(faces[i][2])
triang = [verts[face[0]], verts[face[1]], verts[face[2]]]
normal = Mesh.calculateSurfaceNormal(triang)
normals.append(normal)
for i in range(len(faces)):
face = int(faces[i][0]), int(faces[i][1]), int(faces[i][2])
triang = list([verts[face[0]], verts[face[1]], verts[face[2]]])
normal = list(normals[i])
newVertices.extend(triang)
finalNormals.extend([normal] * 3)
nvLen = len(newVertices)
newFaces.append([nvLen - 3, nvLen - 2, nvLen - 1])
return finalNormals, newVertices, newFaces
@staticmethod
def calculateSmoothNormalsAv(verts, faces):
normals = []
snormals = []
vertexDict = {}
for i in range(len(verts)):
vertexDict[i] = []
snormals.append(list(vec3(0)))
for i in range(len(faces)):
ind = faces[i]
vertexDict[ind[0]].append(i)
vertexDict[ind[1]].append(i)
vertexDict[ind[2]].append(i)
triang = [verts[ind[0]], verts[ind[1]], verts[ind[2]]]
normal = Mesh.calculateSurfaceNormal(triang)
normals.append(normal)
for vert in vertexDict.items():
totalN = vec3()
for face in vert[1]:
totalN += normals[face]
if len(vert[1]) > 0:
snormals[vert[0]] = list(totalN / len(vert[1]))
return snormals
@staticmethod
def calculateSmoothNormals(verts, inds):
snormals = []
for i in range(len(verts)):
snormals.append(vec3(0))
for i in range(len(inds)):
a, b, c = inds[i]
ind = int(a), int(b), int(c)
triang = [verts[ind[0]], verts[ind[1]], verts[ind[2]]]
normal = Mesh.calculateSurfaceNormal(triang)
snormals[ind[0]] += vec3(normal)
snormals[ind[1]] += vec3(normal)
snormals[ind[2]] += vec3(normal)
return snormals
@staticmethod
def calculateSurfaceNormal(Triangle):
# http://www.iquilezles.org/www/articles/normals/normals.htm
U = vec3(Triangle[0]) - vec3(Triangle[1])
V = vec3(Triangle[2]) - vec3(Triangle[1])
n = V.cross(U)
return list(n)
@staticmethod
def calculateSurfaceNormalAlt(Triangle):
# opengl wiki
U = vec3(Triangle[1]) - vec3(Triangle[0])
V = vec3(Triangle[2]) - vec3(Triangle[0])
Normal = vec3()
Normal.x = (U.y * V.z) - (U.z * V.y)
Normal.y = (U.z * V.x) - (U.x * V.z)
Normal.z = (U.x * V.y) - (U.y * V.x)
return list(Normal)
@staticmethod
def calculateSphericalUVS(vertices):
# http://sol.gfxile.net/sphere/
uvs = []
for vv in vertices:
ver = vec3(vv)
tlen = sqrt(ver.x * ver.x + ver.y * ver.y + ver.z * ver.z)
v = float(arccos(ver.y / tlen) / pi)
u = float((arctan2(ver.x, ver.z) / pi + 1.0) * 0.5)
uvs.append(list(vec3(u, 1.0 - v, 0)))
return uvs
@staticmethod
def calculateBoxUVS(vertices, faces, normals):
uvs = [None] * len(vertices)
quads = {}
for i in range(len(normals)):
isinQuads = False
norm = vec3(normals[i])
for qnorm in quads.keys():
if qnorm == norm:
isinQuads = True
break
if not isinQuads:
quads[norm] = []
for i in range(len(faces)):
verta = int(faces[i][0])
norm = vec3(normals[verta])
for qnorm in quads.keys():
if qnorm == norm:
quads[qnorm].append(faces[i])
break
quadUVS = []
for q in quads.items():
vertlist = []
for face in q[1]:
ver0 = vertices[int(face[0])]
ver1 = vertices[int(face[1])]
ver2 = vertices[int(face[2])]
vertlist.append([ver0, ver1, ver2])
res = Mesh.calculatePlanarUVS(vertlist, q[0])
quadUVS.append(res)
counter = 0
for q in quads.items():
lastface = 0
facesUvs = quadUVS[counter]
for face in q[1]:
uvs[int(face[0])] = facesUvs[lastface]
uvs[int(face[1])] = facesUvs[lastface + 1]
uvs[int(face[2])] = facesUvs[lastface + 2]
lastface += 3
counter += 1
return uvs
@staticmethod
def calculatePlanarUVS(groupedVertices, normal):
if isinstance(normal, list):
if normal.__len__() > 1:
normal = vec3(normal[0])
else:
normal = vec3(normal)
uvs = []
bbox = BoundingBox()
M = mat3.fromToRotation(normal, vec3(0, 0, 1))
newverts = []
for v in groupedVertices:
faceverts = []
for vv in v:
ver = M * vec3(vv)
faceverts.append(ver)
newverts.append(faceverts)
for v in newverts:
for vv in v:
bbox.addPoint(vv)
src1, src2 = bbox.getBounds()
srcU = [src1.x, src2.x]
srcV = [src1.y, src2.y]
for v in newverts:
for ver in v:
U = scaleNumber(ver.x, srcU, [0.0, 1.0])
V = scaleNumber(ver.y, srcV, [0.0, 1.0])
uvs.append([U, V, 1.0])
return uvs
@staticmethod
def fixPolarUVS(faces, uvs, vertices, normals, place):
assert isinstance(vertices, list)
assert isinstance(normals, list)
assert isinstance(uvs, list)
assert isinstance(faces, list)
sharesdict = {}
for i in range(len(vertices)):
sharesdict[i] = []
try:
for i in range(faces.__len__()):
face = faces[i]
for verN in range(3):
sharesdict[face[verN]].append(i)
except Exception:
raise
shareN = {}
for vert in sharesdict.items():
shareN[vert[0]] = len(vert[1])
svalues = sorted(shareN.values(), reverse=True)
polars = []
for ob in shareN.items():
if ob[1] in [svalues[0], svalues[1]]:
polars.append(ob[0])
if polars.__len__() == 2:
break
polarF0 = sharesdict[polars[0]]
polarF1 = sharesdict[polars[1]]
for f in polarF0:
face = faces[f]
face = int(face[0]), int(face[1]), int(face[2])
for v in range(3):
if face[v] == polars[0]:
newuv = list(uvs[face[v]])
uvstoav = [uvs[face[0]][place], uvs[face[1]][place], uvs[face[2]][place]]
uvstoav.pop(v)
avuv = (uvstoav[0] + uvstoav[1]) / 2.0
newvertex = list(vertices[face[v]])
vertices.append(newvertex)
if 0 in uvstoav:
for ui in range(2):
if uvstoav[ui] != 0:
if uvstoav[ui] > 0.5:
newuv[place] = 1.0 # - .063
else:
newuv[place] = avuv
break
else:
newuv[place] = avuv
uvs.append(newuv)
newnormal = vec3(normals[face[v]])
normals.append(list(newnormal))
base = len(vertices)
faces[f][v] = base - 1
break
for f in polarF1:
face = faces[f]
face = int(face[0]), int(face[1]), int(face[2])
for v in range(3):
if face[v] == polars[1]:
newuv = list(uvs[face[v]])
uvstoav = [uvs[face[0]][place], uvs[face[1]][place], uvs[face[2]][place]]
uvstoav.pop(v)
avuv = (uvstoav[0] + uvstoav[1]) / 2.0
newvertex = list(vertices[face[v]])
vertices.append(newvertex)
if 0 in uvstoav:
for ui in range(2):
if uvstoav[ui] != 0:
if uvstoav[ui] > 0.5:
newuv[place] = 1.0 # - .063
else:
newuv[place] = avuv
pass
break
else:
newuv[place] = avuv
uvs.append(newuv)
newnormal = vec3(normals[face[v]])
normals.append(list(newnormal))
base = len(vertices)
faces[f][v] = base - 1
break
@staticmethod
def fixSphereUVs(vertices, faces, uvs, normals):
"""
@param vertices:
@type vertices: list
@param faces:
@type faces: list
@param uvs:
@type uvs: list
@param normals:
@type normals: list
@return:
@rtype:
http://gamedev.stackexchange.com/a/33957
Check each triangle if it is on the seam.
1a. Get each texture coord for the triangle.
1b. If one or two have their U coord = 0, it is on the seam
1c. If the remaining texcoords have U > 0.5 (closer to 1 than 0) this triangle is also causing distortion.
If so, clone the vertices where texcoord.U = 0, and set the U value to 1.
Get the index of each cloned vertex
Alter the current triangle, to use theese indices instead.
Draw/Add the altered triangle
"""
place = 0
Mesh.fixPolarUVS(faces, uvs, vertices, normals, place)
# place = 1
for i in range(faces.__len__()):
face = faces[i]
face = int(face[0]), int(face[1]), int(face[2])
uv0 = uvs[face[0]]
uv1 = uvs[face[1]]
uv2 = uvs[face[2]]
allcuvs = [uv0, uv1, uv2]
uvlist = [uv0[place], uv1[place], uv2[place]]
testa = 0.0 in uvlist
testb = any([True if val > 0.5 else False for val in uvlist])
if testa and testb:
for uv in range(3):
if uvlist[uv] == 0.0:
newvertex = list(vertices[face[uv]])
vertices.append(newvertex)
newuv = list(allcuvs[uv])
newuv[place] = 1.0
newnormal = vec3(normals[face[uv]])
uvs.append(newuv)
normals.append(newnormal)
base = len(vertices)
faces[i][uv] = base - 1
@staticmethod
def calculateTanBitan(vertices, faces, uvs, normals):
# http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-13-normal-mapping/
tangents = [list([0.0, 0.0, 0.0])] * len(vertices)
bitangents = [list([0.0, 0.0, 0.0])] * len(vertices)
for face in faces:
face = int(face[0]), int(face[1]), int(face[2])
v0 = vertices[face[0]]
v1 = vertices[face[1]]
v2 = vertices[face[2]]
uv0 = uvs[face[0]]
uv1 = uvs[face[1]]
uv2 = uvs[face[2]]
norm0 = normals[face[0]]
norm1 = normals[face[1]]
norm2 = normals[face[2]]
deltaPos1 = vec3(v1) - vec3(v0)
deltaPos2 = vec3(v2) - vec3(v0)
deltaUV1 = vec3(uv1) - vec3(uv0)
deltaUV2 = vec3(uv2) - vec3(uv0)
# todo: replace this dirty fix for a proper fix to avoid division by 0
rdelta = (deltaUV1.x * deltaUV2.y - deltaUV1.y * deltaUV2.x) or 1.0
r = 1.0 / rdelta
tangent = (deltaPos1 * deltaUV2.y - deltaPos2 * deltaUV1.y) * r
bitangent = (deltaPos2 * deltaUV1.x - deltaPos1 * deltaUV2.x) * r
tangents[face[0]] = Mesh.fixInvertedTan(tangent, bitangent, norm0)
tangents[face[1]] = Mesh.fixInvertedTan(tangent, bitangent, norm1)
tangents[face[2]] = Mesh.fixInvertedTan(tangent, bitangent, norm2)
bitangents[face[0]] = bitangent
bitangents[face[1]] = bitangent
bitangents[face[2]] = bitangent
return tangents, bitangents
@staticmethod
def makeOrthoTan(t, n):
t = ((t - n) * (n * t))
try:
t = t.normalized() # todo: check change from .normalize to .normalized
except:
pass
return t
@staticmethod
def fixInvertedTan(t, b, n):
n = vec3(n)
# if (t.cross(n) * b) < 0.0:
if (n.cross(t) * b) < 0.0:
t = t * -1.0
return t
@staticmethod
def findSimilarVertexFromLUTable(v, u, n, uvs, norms, table, vertsCount):
key = '{}|{}|{}'.format(v[0], v[1], v[2])
if key in table.keys():
for vectorInd in table[key]:
if np.all(uvs[vectorInd] == u) and np.all(norms[vectorInd] == n):
return vectorInd, table
table[key].append(vertsCount)
return None, table
@staticmethod
def reIndexMesh(vertices, faces, normals, tangents, bitangents, uvs):
nver = []
nnorm = []
nuvs = []
nfaces = []
ntans = []
nbitans = []
table = defaultdict(list)
for face in faces:
nface = [0, 0, 0]
for i in range(3):
cFaceVert = int(face[i])
v = vertices[cFaceVert]
u = uvs[cFaceVert]
n = normals[cFaceVert]
sv, table = Mesh.findSimilarVertexFromLUTable(v, u, n, nuvs, nnorm, table, nver.__len__())
if sv is not None:
nface[i] = sv
if tangents is not None:
ntans[sv] = list(vec3(ntans[sv]) + vec3(tangents[cFaceVert]))
nbitans[sv] = list(vec3(nbitans[sv]) + vec3(bitangents[cFaceVert]))
else:
nface[i] = nver.__len__()
nver.append(v)
nuvs.append(u)
nnorm.append(n)
if tangents is not None:
t = tangents[cFaceVert]
bt = bitangents[cFaceVert]
ntans.append(t)
nbitans.append(bt)
nfaces.append(nface)
return nver, nfaces, nnorm, ntans, nbitans, nuvs
def transformVec(matrix, vectorsList, baketrans):
if 'ndarray' in str(type(vectorsList)):
if vectorsList.dtype != np.double:
newVectors = vectorsList.astype(np.double)
else:
newVectors = vectorsList
elif isinstance(vectorsList, list):
newVectors = np.array(vectorsList, np.double)
else:
raise TypeError('Wrong data type for vector')
if baketrans and matrix != mat4.identity():
return [vec3(ver) * matrix for ver in newVectors]
else:
return [vec3(ver) for ver in newVectors]
class VertexDeclaration(object):
def __init__(self, vname, offset):
"""
Defines the vertex elements contained in the renderable.
Accesible through objects's member: _declaration (List)
@type name: str
@type offset: int
@rtype : VertexDeclaration
@param self:
@param name:
@param offset:
"""
self._name = vname
self._offset = int(offset)
def __repr__(self):
return '{}, offset:{}'.format(self._name, self._offset)
| [
"numpy.sqrt",
"numpy.arccos",
"cycgkit.boundingbox.BoundingBox",
"numpy.array",
"numpy.empty",
"collections.defaultdict",
"numpy.arctan2",
"numpy.all"
] | [((12259, 12287), 'numpy.array', 'array', (['vertexStream', 'float32'], {}), '(vertexStream, float32)\n', (12264, 12287), False, 'from numpy import arccos, arctan2, array, float32, ndarray, pi, sqrt, uint32\n'), ((17643, 17656), 'cycgkit.boundingbox.BoundingBox', 'BoundingBox', ([], {}), '()\n', (17654, 17656), False, 'from cycgkit.boundingbox import BoundingBox\n'), ((26513, 26530), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (26524, 26530), False, 'from collections import defaultdict\n'), ((15656, 15707), 'numpy.sqrt', 'sqrt', (['(ver.x * ver.x + ver.y * ver.y + ver.z * ver.z)'], {}), '(ver.x * ver.x + ver.y * ver.y + ver.z * ver.z)\n', (15660, 15707), False, 'from numpy import arccos, arctan2, array, float32, ndarray, pi, sqrt, uint32\n'), ((27972, 28004), 'numpy.array', 'np.array', (['vectorsList', 'np.double'], {}), '(vectorsList, np.double)\n', (27980, 28004), True, 'import numpy as np\n'), ((12324, 12350), 'numpy.array', 'array', (['faces'], {'dtype': 'uint32'}), '(faces, dtype=uint32)\n', (12329, 12350), False, 'from numpy import arccos, arctan2, array, float32, ndarray, pi, sqrt, uint32\n'), ((7043, 7069), 'numpy.empty', 'np.empty', (['(1,)', 'np.float32'], {}), '((1,), np.float32)\n', (7051, 7069), True, 'import numpy as np\n'), ((15730, 15750), 'numpy.arccos', 'arccos', (['(ver.y / tlen)'], {}), '(ver.y / tlen)\n', (15736, 15750), False, 'from numpy import arccos, arctan2, array, float32, ndarray, pi, sqrt, uint32\n'), ((26117, 26144), 'numpy.all', 'np.all', (['(uvs[vectorInd] == u)'], {}), '(uvs[vectorInd] == u)\n', (26123, 26144), True, 'import numpy as np\n'), ((26149, 26178), 'numpy.all', 'np.all', (['(norms[vectorInd] == n)'], {}), '(norms[vectorInd] == n)\n', (26155, 26178), True, 'import numpy as np\n'), ((15780, 15801), 'numpy.arctan2', 'arctan2', (['ver.x', 'ver.z'], {}), '(ver.x, ver.z)\n', (15787, 15801), False, 'from numpy import arccos, arctan2, array, float32, ndarray, pi, sqrt, uint32\n')] |
from seetaaip.struct import *
import numpy
from seetaaip import _C
if __name__ == '__main__':
# a = Tensor(shape=[1, 2, 3], dtype=numpy.float32)
# b = Tensor(a.ref)
# print(a.type)
# print(a)
# print(b.type)
# print(b)
device = Device()
engine = Engine("../../lib/log")
package = engine.package
print(package.aip_version)
fake_image = numpy.zeros([300, 400, 3], dtype=numpy.uint8)
data = ImageData(fake_image, fmt=_C.FORMAT_U8BGR)
data2 = ImageData(data.ref)
print(data2.shape)
obj = Object(shape=Shape(_C.SHAPE_POINTS, [(1, 2), (3, 4)]), extra=Tensor("String========"))
obj2 = Object.FromRaw(obj.ref)
instance = Instance(engine, device, ["test1"], [obj2])
objects, images = instance.forward(0, [data2], [obj2])
print(objects, images)
instance.set("A", obj)
print(instance.get("A"))
print(instance.property())
instance.dispose()
engine.dispose()
p = Point(x=0, y=0)
print(p)
pass | [
"numpy.zeros"
] | [((383, 428), 'numpy.zeros', 'numpy.zeros', (['[300, 400, 3]'], {'dtype': 'numpy.uint8'}), '([300, 400, 3], dtype=numpy.uint8)\n', (394, 428), False, 'import numpy\n')] |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Unit tests for the loss channel
Convention: The loss channel N(T) has the action
N(T){|n><m|} = \sum_{l=0}^{min(m,n)} ((1-T)/T) ^ l * T^((n+m)/2) / l! * \sqrt(n!m!/((n-l)!(m-l)!))n-l><m-l|
"""
import pytest
import numpy as np
from scipy.special import factorial
LOSS_TS = np.linspace(0.0, 1.0, 3, endpoint=True)
MAG_ALPHAS = np.linspace(0, 0.75, 3)
PHASE_ALPHAS = np.linspace(0, 2 * np.pi, 3, endpoint=False)
MAX_FOCK = 5
class TestRepresentationIndependent:
"""Basic implementation-independent tests."""
@pytest.mark.parametrize("T", LOSS_TS)
def test_loss_channel_on_vacuum(self, setup_backend, T, tol):
"""Tests loss channels on vacuum (result should be vacuum)."""
backend = setup_backend(1)
backend.loss(T, 0)
assert np.all(backend.is_vacuum(tol))
@pytest.mark.parametrize("mag_alpha", MAG_ALPHAS)
@pytest.mark.parametrize("phase_alpha", PHASE_ALPHAS)
def test_full_loss_channel_on_coherent_states(
self, setup_backend, mag_alpha, phase_alpha, tol
):
"""Tests the full-loss channel on various states (result should be vacuum)."""
T = 0.0
backend = setup_backend(1)
backend.displacement(mag_alpha, phase_alpha, 0)
backend.loss(T, 0)
assert np.all(backend.is_vacuum(tol))
# at the moment the Fock backends don't support
# thermal loss channels.
@pytest.mark.backends("gaussian","bosonic")
class TestThermalLossChannel:
"""Tests that make use of the Gaussian representation to test
thermal loss channels."""
@pytest.mark.parametrize("T", LOSS_TS)
def test_thermal_loss_channel_with_vacuum(self, T, setup_backend, pure, tol):
"""Tests thermal loss channel with nbar=0 (should be same as loss channel)."""
backend = setup_backend(1)
z = 0.432 * np.exp(1j * 0.534)
alpha = 0.654 + 1j * 0.239
nbar = 0.0
backend.squeeze(np.abs(z), np.angle(z), 0)
backend.displacement(np.abs(alpha), np.angle(alpha), 0)
backend.loss(T, 0)
state1 = backend.state()
backend.reset(pure=pure)
backend.squeeze(np.abs(z), np.angle(z), 0)
backend.displacement(np.abs(alpha), np.angle(alpha), 0)
backend.thermal_loss(T, nbar, 0)
state2 = backend.state()
assert np.allclose(state1.means(), state2.means(), atol=tol, rtol=0)
# Gaussian has one cov, bosonic has multiple covs
if state1._basis == "gaussian":
assert np.allclose(state1.cov(), state2.cov(), atol=tol, rtol=0)
elif state1._basis == "bosonic":
assert np.allclose(state1.covs(), state2.covs(), atol=tol, rtol=0)
@pytest.mark.parametrize("nbar", MAG_ALPHAS)
def test_full_thermal_loss_channel(self, nbar, setup_backend, pure, tol):
"""Tests thermal loss channel with T=0 (should produce a thermal state)."""
backend = setup_backend(1)
z = 0.432 * np.exp(1j * 0.534)
alpha = 0.654 + 1j * 0.239
T = 0
backend.prepare_thermal_state(nbar, 0)
state1 = backend.state()
backend.reset(pure=pure)
backend.squeeze(np.abs(z), np.angle(z), 0)
backend.displacement(np.abs(alpha), np.angle(alpha), 0)
backend.thermal_loss(T, nbar, 0)
state2 = backend.state()
assert np.allclose(state1.means(), state2.means(), atol=tol, rtol=0)
if state1._basis == "gaussian":
assert np.allclose(state1.cov(), state2.cov(), atol=tol, rtol=0)
elif state1._basis == "bosonic":
assert np.allclose(state1.covs(), state2.covs(), atol=tol, rtol=0)
@pytest.mark.parametrize("T", LOSS_TS)
@pytest.mark.parametrize("nbar", MAG_ALPHAS)
def test_thermal_loss_channel_on_squeezed_state(
self, nbar, T, setup_backend, pure, tol, hbar
):
"""Tests thermal loss channel on a squeezed state"""
backend = setup_backend(1)
r = 0.432
backend.squeeze(r, 0, 0)
backend.thermal_loss(T, nbar, 0)
state = backend.state()
if state._basis == "gaussian":
res = state.cov()
elif state._basis == "bosonic":
res = state.covs()
exp = np.diag(
[
T * np.exp(-2 * r) + (1 - T) * (2 * nbar + 1),
T * np.exp(2 * r) + (1 - T) * (2 * nbar + 1),
]
)*hbar/2
print(res, exp)
assert np.allclose(res, exp, atol=tol, rtol=0)
@pytest.mark.backends("fock", "tf")
class TestFockRepresentation:
"""Tests that make use of the Fock basis representation."""
@pytest.mark.parametrize("T", LOSS_TS)
@pytest.mark.parametrize("mag_alpha", MAG_ALPHAS)
@pytest.mark.parametrize("phase_alpha", PHASE_ALPHAS)
def test_normalized_after_loss_channel_on_coherent_state(
self, setup_backend, T, mag_alpha, phase_alpha, tol
):
"""Tests if a range of loss states are normalized."""
backend = setup_backend(1)
backend.prepare_coherent_state(mag_alpha, phase_alpha, 0)
backend.loss(T, 0)
state = backend.state()
tr = state.trace()
assert np.allclose(tr, 1.0, atol=tol, rtol=0.0)
@pytest.mark.parametrize("T", LOSS_TS)
@pytest.mark.parametrize("n", range(MAX_FOCK))
def test_normalized_after_loss_channel_on_fock_state(
self, setup_backend, T, n, tol
):
"""Tests if a range of loss states are normalized."""
backend = setup_backend(1)
backend.prepare_fock_state(n, 0)
backend.loss(T, 0)
state = backend.state()
tr = state.trace()
assert np.allclose(tr, 1.0, atol=tol, rtol=0.0)
@pytest.mark.parametrize("n", range(MAX_FOCK))
def test_full_loss_channel_on_fock_states(self, setup_backend, n, tol):
"""Tests the full-loss channel on various states (result should be vacuum)."""
T = 0.0
backend = setup_backend(1)
backend.prepare_fock_state(n, 0)
backend.loss(T, 0)
assert np.all(backend.is_vacuum(tol))
@pytest.mark.parametrize("T", LOSS_TS)
@pytest.mark.parametrize("mag_alpha", MAG_ALPHAS)
@pytest.mark.parametrize("phase_alpha", PHASE_ALPHAS)
def test_loss_channel_on_coherent_states(
self, setup_backend, T, mag_alpha, phase_alpha, cutoff, tol
):
"""Tests various loss channels on coherent states (result should be coherent state with amplitude weighted by sqrt(T)."""
rootT_alpha = np.sqrt(T) * mag_alpha * np.exp(1j * phase_alpha)
backend = setup_backend(1)
backend.prepare_coherent_state(mag_alpha, phase_alpha, 0)
backend.loss(T, 0)
s = backend.state()
if s.is_pure:
numer_state = s.ket()
else:
numer_state = s.dm()
n = np.arange(cutoff)
ref_state = (
np.exp(-0.5 * np.abs(rootT_alpha) ** 2)
* rootT_alpha ** n
/ np.sqrt(factorial(n))
)
ref_state = np.outer(ref_state, np.conj(ref_state))
assert np.allclose(numer_state, ref_state, atol=tol, rtol=0.0)
| [
"numpy.abs",
"numpy.allclose",
"numpy.sqrt",
"numpy.conj",
"scipy.special.factorial",
"numpy.angle",
"numpy.exp",
"pytest.mark.parametrize",
"numpy.linspace",
"pytest.mark.backends",
"numpy.arange"
] | [((874, 913), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(3)'], {'endpoint': '(True)'}), '(0.0, 1.0, 3, endpoint=True)\n', (885, 913), True, 'import numpy as np\n'), ((927, 950), 'numpy.linspace', 'np.linspace', (['(0)', '(0.75)', '(3)'], {}), '(0, 0.75, 3)\n', (938, 950), True, 'import numpy as np\n'), ((966, 1010), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(3)'], {'endpoint': '(False)'}), '(0, 2 * np.pi, 3, endpoint=False)\n', (977, 1010), True, 'import numpy as np\n'), ((1977, 2020), 'pytest.mark.backends', 'pytest.mark.backends', (['"""gaussian"""', '"""bosonic"""'], {}), "('gaussian', 'bosonic')\n", (1997, 2020), False, 'import pytest\n'), ((5057, 5091), 'pytest.mark.backends', 'pytest.mark.backends', (['"""fock"""', '"""tf"""'], {}), "('fock', 'tf')\n", (5077, 5091), False, 'import pytest\n'), ((1119, 1156), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T"""', 'LOSS_TS'], {}), "('T', LOSS_TS)\n", (1142, 1156), False, 'import pytest\n'), ((1410, 1458), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mag_alpha"""', 'MAG_ALPHAS'], {}), "('mag_alpha', MAG_ALPHAS)\n", (1433, 1458), False, 'import pytest\n'), ((1464, 1516), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""phase_alpha"""', 'PHASE_ALPHAS'], {}), "('phase_alpha', PHASE_ALPHAS)\n", (1487, 1516), False, 'import pytest\n'), ((2152, 2189), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T"""', 'LOSS_TS'], {}), "('T', LOSS_TS)\n", (2175, 2189), False, 'import pytest\n'), ((3265, 3308), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nbar"""', 'MAG_ALPHAS'], {}), "('nbar', MAG_ALPHAS)\n", (3288, 3308), False, 'import pytest\n'), ((4219, 4256), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T"""', 'LOSS_TS'], {}), "('T', LOSS_TS)\n", (4242, 4256), False, 'import pytest\n'), ((4262, 4305), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nbar"""', 'MAG_ALPHAS'], {}), "('nbar', MAG_ALPHAS)\n", (4285, 4305), False, 'import pytest\n'), ((5192, 5229), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T"""', 'LOSS_TS'], {}), "('T', LOSS_TS)\n", (5215, 5229), False, 'import pytest\n'), ((5235, 5283), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mag_alpha"""', 'MAG_ALPHAS'], {}), "('mag_alpha', MAG_ALPHAS)\n", (5258, 5283), False, 'import pytest\n'), ((5289, 5341), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""phase_alpha"""', 'PHASE_ALPHAS'], {}), "('phase_alpha', PHASE_ALPHAS)\n", (5312, 5341), False, 'import pytest\n'), ((5783, 5820), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T"""', 'LOSS_TS'], {}), "('T', LOSS_TS)\n", (5806, 5820), False, 'import pytest\n'), ((6646, 6683), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""T"""', 'LOSS_TS'], {}), "('T', LOSS_TS)\n", (6669, 6683), False, 'import pytest\n'), ((6689, 6737), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mag_alpha"""', 'MAG_ALPHAS'], {}), "('mag_alpha', MAG_ALPHAS)\n", (6712, 6737), False, 'import pytest\n'), ((6743, 6795), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""phase_alpha"""', 'PHASE_ALPHAS'], {}), "('phase_alpha', PHASE_ALPHAS)\n", (6766, 6795), False, 'import pytest\n'), ((5014, 5053), 'numpy.allclose', 'np.allclose', (['res', 'exp'], {'atol': 'tol', 'rtol': '(0)'}), '(res, exp, atol=tol, rtol=0)\n', (5025, 5053), True, 'import numpy as np\n'), ((5736, 5776), 'numpy.allclose', 'np.allclose', (['tr', '(1.0)'], {'atol': 'tol', 'rtol': '(0.0)'}), '(tr, 1.0, atol=tol, rtol=0.0)\n', (5747, 5776), True, 'import numpy as np\n'), ((6217, 6257), 'numpy.allclose', 'np.allclose', (['tr', '(1.0)'], {'atol': 'tol', 'rtol': '(0.0)'}), '(tr, 1.0, atol=tol, rtol=0.0)\n', (6228, 6257), True, 'import numpy as np\n'), ((7393, 7410), 'numpy.arange', 'np.arange', (['cutoff'], {}), '(cutoff)\n', (7402, 7410), True, 'import numpy as np\n'), ((7637, 7692), 'numpy.allclose', 'np.allclose', (['numer_state', 'ref_state'], {'atol': 'tol', 'rtol': '(0.0)'}), '(numer_state, ref_state, atol=tol, rtol=0.0)\n', (7648, 7692), True, 'import numpy as np\n'), ((2414, 2434), 'numpy.exp', 'np.exp', (['(1.0j * 0.534)'], {}), '(1.0j * 0.534)\n', (2420, 2434), True, 'import numpy as np\n'), ((2512, 2521), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (2518, 2521), True, 'import numpy as np\n'), ((2523, 2534), 'numpy.angle', 'np.angle', (['z'], {}), '(z)\n', (2531, 2534), True, 'import numpy as np\n'), ((2568, 2581), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (2574, 2581), True, 'import numpy as np\n'), ((2583, 2598), 'numpy.angle', 'np.angle', (['alpha'], {}), '(alpha)\n', (2591, 2598), True, 'import numpy as np\n'), ((2721, 2730), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (2727, 2730), True, 'import numpy as np\n'), ((2732, 2743), 'numpy.angle', 'np.angle', (['z'], {}), '(z)\n', (2740, 2743), True, 'import numpy as np\n'), ((2777, 2790), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (2783, 2790), True, 'import numpy as np\n'), ((2792, 2807), 'numpy.angle', 'np.angle', (['alpha'], {}), '(alpha)\n', (2800, 2807), True, 'import numpy as np\n'), ((3526, 3546), 'numpy.exp', 'np.exp', (['(1.0j * 0.534)'], {}), '(1.0j * 0.534)\n', (3532, 3546), True, 'import numpy as np\n'), ((3733, 3742), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (3739, 3742), True, 'import numpy as np\n'), ((3744, 3755), 'numpy.angle', 'np.angle', (['z'], {}), '(z)\n', (3752, 3755), True, 'import numpy as np\n'), ((3789, 3802), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (3795, 3802), True, 'import numpy as np\n'), ((3804, 3819), 'numpy.angle', 'np.angle', (['alpha'], {}), '(alpha)\n', (3812, 3819), True, 'import numpy as np\n'), ((7095, 7121), 'numpy.exp', 'np.exp', (['(1.0j * phase_alpha)'], {}), '(1.0j * phase_alpha)\n', (7101, 7121), True, 'import numpy as np\n'), ((7602, 7620), 'numpy.conj', 'np.conj', (['ref_state'], {}), '(ref_state)\n', (7609, 7620), True, 'import numpy as np\n'), ((7070, 7080), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (7077, 7080), True, 'import numpy as np\n'), ((7538, 7550), 'scipy.special.factorial', 'factorial', (['n'], {}), '(n)\n', (7547, 7550), False, 'from scipy.special import factorial\n'), ((7459, 7478), 'numpy.abs', 'np.abs', (['rootT_alpha'], {}), '(rootT_alpha)\n', (7465, 7478), True, 'import numpy as np\n'), ((4837, 4851), 'numpy.exp', 'np.exp', (['(-2 * r)'], {}), '(-2 * r)\n', (4843, 4851), True, 'import numpy as np\n'), ((4900, 4913), 'numpy.exp', 'np.exp', (['(2 * r)'], {}), '(2 * r)\n', (4906, 4913), True, 'import numpy as np\n')] |
from flask import Flask, request, render_template
import numpy as np
import pandas as pd
import pickle
import joblib
from flasgger import Swagger
app = Flask(__name__)
Swagger(app)
model = open("linear_regression_model.pkl", "rb")
lr_model = joblib.load(model)
@app.route("/predict", methods=["GET", "POST"])
def predict():
"""Returns the predicted value from the ML model
---
parameters:
- name: alcohol
in: query
type: number
required: true
- name: volatile acidity
in: query
type: number
required: true
- name: sulphates
in: query
type: number
required: true
- name: total sulfur dioxide
in: query
type: number
required: true
responses:
500:
description: Prediction
"""
if request.method == "POST":
print(request.args.get("alcohol"))
print(request.args.get("volatile acidity"))
print(request.args.get("sulphates"))
print(request.args.get("total sulfur dioxide"))
try:
alc = float(request.args.get("alcohol"))
vol = float(request.args.get("volatile acidity"))
sul = float(request.args.get("sulphates"))
dio = float(request.args.get("total sulfur dioxide"))
pred_arg = [alc, vol, sul, dio]
pred_arg = np.array(pred_arg)
pred_arg = pred_arg.reshape(1, -1)
try:
model_pred = lr_model.predict(pred_arg)
model_pred = round(float(model_pred), 2)
except Exception as e:
return str(e)
except ValueError:
return "Please Enter valid values"
return str("The predicted value is: " + str(model_pred))
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
| [
"flask.request.args.get",
"flask.Flask",
"flasgger.Swagger",
"numpy.array",
"joblib.load"
] | [((154, 169), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (159, 169), False, 'from flask import Flask, request, render_template\n'), ((170, 182), 'flasgger.Swagger', 'Swagger', (['app'], {}), '(app)\n', (177, 182), False, 'from flasgger import Swagger\n'), ((245, 263), 'joblib.load', 'joblib.load', (['model'], {}), '(model)\n', (256, 263), False, 'import joblib\n'), ((795, 822), 'flask.request.args.get', 'request.args.get', (['"""alcohol"""'], {}), "('alcohol')\n", (811, 822), False, 'from flask import Flask, request, render_template\n'), ((838, 874), 'flask.request.args.get', 'request.args.get', (['"""volatile acidity"""'], {}), "('volatile acidity')\n", (854, 874), False, 'from flask import Flask, request, render_template\n'), ((890, 919), 'flask.request.args.get', 'request.args.get', (['"""sulphates"""'], {}), "('sulphates')\n", (906, 919), False, 'from flask import Flask, request, render_template\n'), ((935, 975), 'flask.request.args.get', 'request.args.get', (['"""total sulfur dioxide"""'], {}), "('total sulfur dioxide')\n", (951, 975), False, 'from flask import Flask, request, render_template\n'), ((1294, 1312), 'numpy.array', 'np.array', (['pred_arg'], {}), '(pred_arg)\n', (1302, 1312), True, 'import numpy as np\n'), ((1014, 1041), 'flask.request.args.get', 'request.args.get', (['"""alcohol"""'], {}), "('alcohol')\n", (1030, 1041), False, 'from flask import Flask, request, render_template\n'), ((1067, 1103), 'flask.request.args.get', 'request.args.get', (['"""volatile acidity"""'], {}), "('volatile acidity')\n", (1083, 1103), False, 'from flask import Flask, request, render_template\n'), ((1129, 1158), 'flask.request.args.get', 'request.args.get', (['"""sulphates"""'], {}), "('sulphates')\n", (1145, 1158), False, 'from flask import Flask, request, render_template\n'), ((1184, 1224), 'flask.request.args.get', 'request.args.get', (['"""total sulfur dioxide"""'], {}), "('total sulfur dioxide')\n", (1200, 1224), False, 'from flask import Flask, request, render_template\n')] |
from typing import Any, Dict, Optional
import numpy as np
import pandas as pd
from streamlit_prophet.lib.utils.load import load_config
config, _, _ = load_config(
"config_streamlit.toml", "config_instructions.toml", "config_readme.toml"
)
def make_test_df(
ds: Optional[Dict[Any, Any]] = None,
cols: Optional[Dict[Any, Any]] = None,
start: str = "2010-01-01",
end: str = "2020-01-01",
freq: str = "D",
range: int = 10,
) -> pd.DataFrame:
"""Creates a sample dataframe with specifications defined by the arguments, for testing purpose.
Parameters
----------
ds : Optional[dict]
Specifications for date column.
cols : Optional[dict]
Specifications for other columns.
start : str
Start date for date column.
end : str
End date for date column.
freq : str
Frequency for date column.
range : int
Range for numerical columns.
Returns
-------
pd.DataFrame
Dataframe that will be used for unit tests.
"""
df = pd.DataFrame()
if ds is not None:
df["ds"] = pd.date_range(
start=start if "start_date" not in ds.keys() else ds["start_date"],
end=end if "end_date" not in ds.keys() else ds["end_date"],
freq=freq if "freq" not in ds.keys() else ds["freq"],
)
if "str" in ds.keys():
df["ds"] = df["ds"].map(lambda x: x.strftime(ds["str"]))
if "frac_nan" in ds.keys():
df.loc[df.sample(frac=ds["frac_nan"]).index, "ds"] = np.nan
if cols is not None:
N = len(df) if len(df) > 0 else 1000
for col in cols.keys():
if "cat" in cols[col].keys():
df[col] = np.random.choice(a=cols[col]["cat"], size=N)
else:
range = range if "range" not in cols[col].keys() else cols[col]["range"]
df[col] = np.random.randn(1, N).ravel() * range
if "abs" in cols[col].keys():
df[col] = abs(df[col])
if "frac_nan" in cols[col].keys():
df.loc[df.sample(frac=cols[col]["frac_nan"]).index, col] = np.nan
return df
# Synthetic categorical variables
int_long_target = list(range(1, config["validity"]["min_target_cardinality"] + 2))
int_short_target = list(range(1, config["validity"]["min_target_cardinality"] - 1))
int_long_cat = list(range(1, config["validity"]["max_cat_reg_cardinality"] + 2))
int_short_cat = list(range(1, config["validity"]["max_cat_reg_cardinality"] - 1))
str_long_target = [
chr(ord("@") + i) for i in range(1, config["validity"]["min_target_cardinality"] + 2)
]
str_short_target = [
chr(ord("@") + i) for i in range(1, config["validity"]["min_target_cardinality"] - 1)
]
str_long_cat = [
chr(ord("@") + i) for i in range(1, config["validity"]["max_cat_reg_cardinality"] + 2)
]
str_short_cat = [
chr(ord("@") + i) for i in range(1, config["validity"]["max_cat_reg_cardinality"] - 1)
]
# Test dataframes
df_test = dict()
df_test[0] = pd.DataFrame()
df_test[1] = make_test_df(
cols={0: {"cat": ["A", "B", "C"]}, 1: {"cat": ["A", "B"]}, 2: {"cat": ["A"]}, 3: {}}
)
df_test[2] = make_test_df(
cols={0: {"cat": ["A"], "frac_nan": 1}, 1: {"cat": ["A"], "frac_nan": 0.1}, 2: {"cat": ["A"]}}
)
df_test[3] = make_test_df(cols={"y": {"cat": int_short_target}})
df_test[4] = make_test_df(cols={"y": {"cat": int_short_target, "frac_nan": 0.1}})
df_test[5] = make_test_df(cols={"y": {"cat": int_short_target, "frac_nan": 1}})
df_test[6] = make_test_df(cols={"y": {"cat": str_long_target}})
df_test[7] = make_test_df(cols={"y": {"cat": str_long_target, "frac_nan": 0.1}})
df_test[8] = make_test_df(ds={}, cols={"y": {"cat": int_long_target}})
df_test[9] = make_test_df(
ds={"str": "%Y-%m-%d"}, cols={"y": {"cat": int_long_target, "frac_nan": 0.1}}
)
df_test[10] = make_test_df(ds={"freq": "Y"}, cols={"y": {"range": 100}})
df_test[11] = make_test_df(ds={"freq": "H"}, cols={"y": {"range": 1, "abs": True}})
df_test[12] = make_test_df(ds={"frac_nan": 0.1}, cols={"y": {"range": 1, "frac_nan": 0.1}})
df_test[13] = make_test_df(
cols={
0: {},
1: {"frac_nan": 0.1},
2: {"frac_nan": 1},
3: {"abs": True},
4: {"cat": int_short_cat},
5: {"cat": int_short_cat, "frac_nan": 0.1},
6: {"cat": str_short_cat, "frac_nan": 0.1},
}
)
df_test[14] = lambda x: make_test_df(
ds={"freq": x},
cols={
"y": {},
0: {},
1: {"frac_nan": 0.1},
2: {"frac_nan": 1},
3: {"abs": True},
4: {"cat": int_short_cat},
5: {"cat": int_short_cat, "frac_nan": 0.1},
6: {"cat": str_short_cat, "frac_nan": 0.1},
7: {"cat": str_long_cat},
8: {"cat": int_long_cat},
9: {"cat": str_long_cat, "frac_nan": 0.1},
10: {"cat": int_long_cat, "frac_nan": 0.1},
11: {"cat": ["A"]},
12: {"cat": ["A"], "frac_nan": 0.1},
},
)
df_test[15] = make_test_df(cols={"y": {"cat": [2]}})
df_test[16] = make_test_df(cols={"y": {"cat": [3]}})
df_test[17] = make_test_df(ds={}, cols={"truth": {}, "forecast": {}})
df_test[18] = make_test_df(ds={}, cols={"truth": {"frac_nan": 1}, "forecast": {"frac_nan": 1}})
df_test[19] = make_test_df(ds={"freq": "W"}, cols={"truth": {"frac_nan": 0.1}, "forecast": {}})
df_test[20] = make_test_df(ds={}, cols={"y": {}, "regressor1": {}, "regressor2": {"cat": [0, 1]}})
| [
"pandas.DataFrame",
"streamlit_prophet.lib.utils.load.load_config",
"numpy.random.randn",
"numpy.random.choice"
] | [((152, 242), 'streamlit_prophet.lib.utils.load.load_config', 'load_config', (['"""config_streamlit.toml"""', '"""config_instructions.toml"""', '"""config_readme.toml"""'], {}), "('config_streamlit.toml', 'config_instructions.toml',\n 'config_readme.toml')\n", (163, 242), False, 'from streamlit_prophet.lib.utils.load import load_config\n'), ((3035, 3049), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3047, 3049), True, 'import pandas as pd\n'), ((1048, 1062), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1060, 1062), True, 'import pandas as pd\n'), ((1726, 1770), 'numpy.random.choice', 'np.random.choice', ([], {'a': "cols[col]['cat']", 'size': 'N'}), "(a=cols[col]['cat'], size=N)\n", (1742, 1770), True, 'import numpy as np\n'), ((1904, 1925), 'numpy.random.randn', 'np.random.randn', (['(1)', 'N'], {}), '(1, N)\n', (1919, 1925), True, 'import numpy as np\n')] |
import sys
import numpy
from PyQt5.QtWidgets import QApplication, QMessageBox, QSizePolicy
from PyQt5.QtGui import QIntValidator, QDoubleValidator
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui, congruence
from oasys.widgets.exchange import DataExchangeObject
from orangecontrib.xoppy.util.xoppy_xraylib_util import xpower_calc
from oasys.widgets.exchange import DataExchangeObject
from orangecontrib.xoppy.widgets.gui.ow_xoppy_widget import XoppyWidget
import scipy.constants as codata
class OWxpower(XoppyWidget):
name = "POWER"
id = "orange.widgets.dataxpower"
description = "Power Absorbed and Transmitted by Optical Elements"
icon = "icons/xoppy_xpower.png"
priority = 3
category = ""
keywords = ["xoppy", "power"]
inputs = [("ExchangeData", DataExchangeObject, "acceptExchangeData")]
SOURCE = Setting(2)
ENER_MIN = Setting(1000.0)
ENER_MAX = Setting(50000.0)
ENER_N = Setting(100)
SOURCE_FILE = Setting("?")
NELEMENTS = Setting(1)
EL1_FOR = Setting("Be")
EL1_FLAG = Setting(0)
EL1_THI = Setting(0.5)
EL1_ANG = Setting(3.0)
EL1_ROU = Setting(0.0)
EL1_DEN = Setting("?")
EL2_FOR = Setting("Rh")
EL2_FLAG = Setting(1)
EL2_THI = Setting(0.5)
EL2_ANG = Setting(3.0)
EL2_ROU = Setting(0.0)
EL2_DEN = Setting("?")
EL3_FOR = Setting("Al")
EL3_FLAG = Setting(0)
EL3_THI = Setting(0.5)
EL3_ANG = Setting(3.0)
EL3_ROU = Setting(0.0)
EL3_DEN = Setting("?")
EL4_FOR = Setting("B")
EL4_FLAG = Setting(0)
EL4_THI = Setting(0.5)
EL4_ANG = Setting(3.0)
EL4_ROU = Setting(0.0)
EL4_DEN = Setting("?")
EL5_FOR = Setting("Pt")
EL5_FLAG = Setting(1)
EL5_THI = Setting(0.5)
EL5_ANG = Setting(3.0)
EL5_ROU = Setting(0.0)
EL5_DEN = Setting("?")
PLOT_SETS = Setting(2)
FILE_DUMP = 0
def build_gui(self):
self.leftWidgetPart.setSizePolicy(QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding))
self.leftWidgetPart.setMaximumWidth(self.CONTROL_AREA_WIDTH + 20)
self.leftWidgetPart.updateGeometry()
box = oasysgui.widgetBox(self.controlArea, self.name + " Input Parameters", orientation="vertical", width=self.CONTROL_AREA_WIDTH-10)
idx = -1
#widget index 2
idx += 1
box1 = gui.widgetBox(box)
self.box_source = gui.comboBox(box1, self, "SOURCE",
label=self.unitLabels()[idx], addSpace=False,
items=['From Oasys wire', 'Normalized to 1', 'From external file. '],
valueType=int, orientation="horizontal", labelWidth=150)
self.show_at(self.unitFlags()[idx], box1)
#widget index 6
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "ENER_MIN",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 7
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "ENER_MAX",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 8
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "ENER_N",
label=self.unitLabels()[idx], addSpace=False,
valueType=int, validator=QIntValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 9
idx += 1
box1 = gui.widgetBox(box)
file_box = oasysgui.widgetBox(box1, "", addSpace=False, orientation="horizontal", height=25)
self.le_file = oasysgui.lineEdit(file_box, self, "SOURCE_FILE",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal")
self.show_at(self.unitFlags()[idx], box1)
#widget index 10
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "NELEMENTS",
label=self.unitLabels()[idx], addSpace=False,
items=['1', '2', '3', '4', '5'],
valueType=int, orientation="horizontal", callback=self.set_NELEMENTS, labelWidth=330)
self.show_at(self.unitFlags()[idx], box1)
#widget index 11
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "EL1_FOR",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 12
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "EL1_FLAG",
label=self.unitLabels()[idx], addSpace=False,
items=['Filter', 'Mirror'],
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 13
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL1_THI",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 14
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL1_ANG",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 15
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL1_ROU",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 16
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL1_DEN",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 17
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "EL2_FOR",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 18
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "EL2_FLAG",
label=self.unitLabels()[idx], addSpace=False,
items=['Filter', 'Mirror'],
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 19
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL2_THI",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 20
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL2_ANG",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 21
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL2_ROU",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 22
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL2_DEN",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 23
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "EL3_FOR",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 24
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "EL3_FLAG",
label=self.unitLabels()[idx], addSpace=False,
items=['Filter', 'Mirror'],
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 25
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL3_THI",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 26
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL3_ANG",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 27
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL3_ROU",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 28
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL3_DEN",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 29
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "EL4_FOR",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 30
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "EL4_FLAG",
label=self.unitLabels()[idx], addSpace=False,
items=['Filter', 'Mirror'],
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 31
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL4_THI",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 32
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL4_ANG",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 33
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL4_ROU",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 34
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL4_DEN",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 35
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "EL5_FOR",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 36
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "EL5_FLAG",
label=self.unitLabels()[idx], addSpace=False,
items=['Filter', 'Mirror'],
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 37
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL5_THI",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 38
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL5_ANG",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 39
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL5_ROU",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 40
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL5_DEN",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 41
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "PLOT_SETS",
label=self.unitLabels()[idx], addSpace=False,
items=['Local properties', 'Cumulated intensities', 'All'],
valueType=int, orientation="horizontal", labelWidth=250, callback=self.set_NELEMENTS)
self.show_at(self.unitFlags()[idx], box1)
#widget index 42
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "FILE_DUMP",
label=self.unitLabels()[idx], addSpace=False,
items=['No', 'Yes (power.spec)'],
valueType=int, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
self.input_spectrum = None
def set_NELEMENTS(self):
self.initializeTabs()
def set_EL_FLAG(self):
self.initializeTabs()
def unitLabels(self):
return ['Input beam:',
'From energy [eV]: ',
'To energy [eV]:',
'Energy points: ',
'File with input beam spectral power:',
'Number of elements:',
'1st oe formula','kind:','Filter thick[mm]','Mirror angle[mrad]','Roughness[A]','Density [g/cm^3]',
'2nd oe formula','kind:','Filter thick[mm]','Mirror angle[mrad]','Roughness[A]','Density [g/cm^3]',
'3rd oe formula','kind:','Filter thick[mm]','Mirror angle[mrad]','Roughness[A]','Density [g/cm^3]',
'4th oe formula','kind:','Filter thick[mm]','Mirror angle[mrad]','Roughness[A]','Density [g/cm^3]',
'5th oe formula','kind:','Filter thick[mm]','Mirror angle[mrad]','Roughness[A]','Density [g/cm^3]',
"Plot","Dump file"]
def unitFlags(self):
return ['True',
'self.SOURCE == 1',
'self.SOURCE == 1',
'self.SOURCE == 1',
'self.SOURCE == 2',
'True',
'self.NELEMENTS >= 0',' self.NELEMENTS >= 0','self.EL1_FLAG == 0 and self.NELEMENTS >= 0','self.EL1_FLAG != 0 and self.NELEMENTS >= 0','self.EL1_FLAG != 0 and self.NELEMENTS >= 0',' self.NELEMENTS >= 0',
'self.NELEMENTS >= 1',' self.NELEMENTS >= 1','self.EL2_FLAG == 0 and self.NELEMENTS >= 1','self.EL2_FLAG != 0 and self.NELEMENTS >= 1','self.EL2_FLAG != 0 and self.NELEMENTS >= 1',' self.NELEMENTS >= 1',
'self.NELEMENTS >= 2',' self.NELEMENTS >= 2','self.EL3_FLAG == 0 and self.NELEMENTS >= 2','self.EL3_FLAG != 0 and self.NELEMENTS >= 2','self.EL3_FLAG != 0 and self.NELEMENTS >= 2',' self.NELEMENTS >= 2',
'self.NELEMENTS >= 3',' self.NELEMENTS >= 3','self.EL4_FLAG == 0 and self.NELEMENTS >= 3','self.EL4_FLAG != 0 and self.NELEMENTS >= 3','self.EL4_FLAG != 0 and self.NELEMENTS >= 3',' self.NELEMENTS >= 3',
'self.NELEMENTS >= 4',' self.NELEMENTS >= 4','self.EL5_FLAG == 0 and self.NELEMENTS >= 4','self.EL5_FLAG != 0 and self.NELEMENTS >= 4','self.EL5_FLAG != 0 and self.NELEMENTS >= 4',' self.NELEMENTS >= 4',
'True','True']
def get_help_name(self):
return 'power'
def selectFile(self):
self.le_source_file.setText(oasysgui.selectFileFromDialog(self, self.SOURCE_FILE, "Open Source File", file_extension_filter="*.*"))
def acceptExchangeData(self, exchangeData):
self.input_spectrum = None
self.SOURCE = 0
# self.box_source.setCurrentIndex(self.SOURCE)
try:
if not exchangeData is None:
if exchangeData.get_program_name() == "XOPPY":
no_bandwidth = False
if exchangeData.get_widget_name() =="UNDULATOR_FLUX" :
# self.SOURCE_FILE = "xoppy_undulator_flux"
no_bandwidth = True
index_flux = 2
elif exchangeData.get_widget_name() == "BM" :
if exchangeData.get_content("is_log_plot") == 1:
raise Exception("Logaritmic X scale of Xoppy Energy distribution not supported")
if exchangeData.get_content("calculation_type") == 0 and exchangeData.get_content("psi") == 0:
# self.SOURCE_FILE = "xoppy_bm_flux"
no_bandwidth = True
index_flux = 6
else:
raise Exception("Xoppy result is not an Flux vs Energy distribution integrated in Psi")
elif exchangeData.get_widget_name() =="XWIGGLER" :
# self.SOURCE_FILE = "xoppy_xwiggler_flux"
no_bandwidth = True
index_flux = 2
elif exchangeData.get_widget_name() =="WS" :
# self.SOURCE_FILE = "xoppy_xwiggler_flux"
no_bandwidth = True
index_flux = 2
elif exchangeData.get_widget_name() =="XTUBES" :
# self.SOURCE_FILE = "xoppy_xtubes_flux"
index_flux = 1
no_bandwidth = True
elif exchangeData.get_widget_name() =="XTUBE_W" :
# self.SOURCE_FILE = "xoppy_xtube_w_flux"
index_flux = 1
no_bandwidth = True
elif exchangeData.get_widget_name() =="BLACK_BODY" :
# self.SOURCE_FILE = "xoppy_black_body_flux"
no_bandwidth = True
index_flux = 2
elif exchangeData.get_widget_name() =="UNDULATOR_RADIATION" :
# self.SOURCE_FILE = "xoppy_undulator_radiation"
no_bandwidth = True
index_flux = 1
elif exchangeData.get_widget_name() =="POWER" :
# self.SOURCE_FILE = "xoppy_undulator_power"
no_bandwidth = True
index_flux = -1
elif exchangeData.get_widget_name() =="POWER3D" :
# self.SOURCE_FILE = "xoppy_power3d"
no_bandwidth = True
index_flux = 1
else:
raise Exception("Xoppy Source not recognized")
# self.SOURCE_FILE += "_" + str(id(self)) + ".dat"
spectrum = exchangeData.get_content("xoppy_data")
if exchangeData.get_widget_name() =="UNDULATOR_RADIATION" or \
exchangeData.get_widget_name() =="POWER3D":
[p, e, h, v ] = spectrum
tmp = p.sum(axis=2).sum(axis=1)*(h[1]-h[0])*(v[1]-v[0])*codata.e*1e3
spectrum = numpy.vstack((e,p.sum(axis=2).sum(axis=1)*(h[1]-h[0])*(v[1]-v[0])*
codata.e*1e3))
self.input_spectrum = spectrum
else:
if not no_bandwidth:
spectrum[:,index_flux] /= 0.001*spectrum[:,0]
self.input_spectrum = numpy.vstack((spectrum[:,0],spectrum[:,index_flux]))
self.process_showers()
self.compute()
except Exception as exception:
QMessageBox.critical(self, "Error",
str(exception),
QMessageBox.Ok)
#raise exception
def check_fields(self):
if self.SOURCE == 1:
self.ENER_MIN = congruence.checkPositiveNumber(self.ENER_MIN, "Energy from")
self.ENER_MAX = congruence.checkStrictlyPositiveNumber(self.ENER_MAX, "Energy to")
congruence.checkLessThan(self.ENER_MIN, self.ENER_MAX, "Energy from", "Energy to")
self.NPOINTS = congruence.checkStrictlyPositiveNumber(self.ENER_N, "Energy Points")
elif self.SOURCE == 2:
congruence.checkFile(self.SOURCE_FILE)
if self.NELEMENTS >= 1:
self.EL1_FOR = congruence.checkEmptyString(self.EL1_FOR, "1st oe formula")
if self.EL1_FLAG == 0: # filter
self.EL1_THI = congruence.checkStrictlyPositiveNumber(self.EL1_THI, "1st oe filter thickness")
elif self.EL1_FLAG == 1: # mirror
self.EL1_ANG = congruence.checkStrictlyPositiveNumber(self.EL1_ANG, "1st oe mirror angle")
self.EL1_ROU = congruence.checkPositiveNumber(self.EL1_ROU, "1st oe mirror roughness")
if not self.EL1_DEN.strip() == "?":
self.EL1_DEN = str(congruence.checkStrictlyPositiveNumber(float(congruence.checkNumber(self.EL1_DEN, "1st oe density")), "1st oe density"))
if self.NELEMENTS >= 2:
self.EL2_FOR = congruence.checkEmptyString(self.EL2_FOR, "2nd oe formula")
if self.EL2_FLAG == 0: # filter
self.EL2_THI = congruence.checkStrictlyPositiveNumber(self.EL2_THI, "2nd oe filter thickness")
elif self.EL2_FLAG == 1: # mirror
self.EL2_ANG = congruence.checkStrictlyPositiveNumber(self.EL2_ANG, "2nd oe mirror angle")
self.EL2_ROU = congruence.checkPositiveNumber(self.EL2_ROU, "2nd oe mirror roughness")
if not self.EL2_DEN.strip() == "?":
self.EL2_DEN = str(congruence.checkStrictlyPositiveNumber(float(congruence.checkNumber(self.EL2_DEN, "2nd oe density")), "2nd oe density"))
if self.NELEMENTS >= 3:
self.EL3_FOR = congruence.checkEmptyString(self.EL3_FOR, "3rd oe formula")
if self.EL3_FLAG == 0: # filter
self.EL3_THI = congruence.checkStrictlyPositiveNumber(self.EL3_THI, "3rd oe filter thickness")
elif self.EL3_FLAG == 1: # mirror
self.EL3_ANG = congruence.checkStrictlyPositiveNumber(self.EL3_ANG, "3rd oe mirror angle")
self.EL3_ROU = congruence.checkPositiveNumber(self.EL3_ROU, "3rd oe mirror roughness")
if not self.EL3_DEN.strip() == "?":
self.EL3_DEN = str(congruence.checkStrictlyPositiveNumber(float(congruence.checkNumber(self.EL3_DEN, "3rd oe density")), "3rd oe density"))
if self.NELEMENTS >= 4:
self.EL4_FOR = congruence.checkEmptyString(self.EL4_FOR, "4th oe formula")
if self.EL4_FLAG == 0: # filter
self.EL4_THI = congruence.checkStrictlyPositiveNumber(self.EL4_THI, "4th oe filter thickness")
elif self.EL4_FLAG == 1: # mirror
self.EL4_ANG = congruence.checkStrictlyPositiveNumber(self.EL4_ANG, "4th oe mirror angle")
self.EL4_ROU = congruence.checkPositiveNumber(self.EL4_ROU, "4th oe mirror roughness")
if not self.EL4_DEN.strip() == "?":
self.EL4_DEN = str(congruence.checkStrictlyPositiveNumber(float(congruence.checkNumber(self.EL4_DEN, "4th oe density")), "4th oe density"))
if self.NELEMENTS >= 5:
self.EL5_FOR = congruence.checkEmptyString(self.EL5_FOR, "5th oe formula")
if self.EL5_FLAG == 0: # filter
self.EL5_THI = congruence.checkStrictlyPositiveNumber(self.EL5_THI, "5th oe filter thickness")
elif self.EL5_FLAG == 1: # mirror
self.EL5_ANG = congruence.checkStrictlyPositiveNumber(self.EL5_ANG, "5th oe mirror angle")
self.EL5_ROU = congruence.checkPositiveNumber(self.EL5_ROU, "5th oe mirror roughness")
if not self.EL5_DEN.strip() == "?":
self.EL5_DEN = str(congruence.checkStrictlyPositiveNumber(float(congruence.checkNumber(self.EL5_DEN, "5th oe density")), "5th oe density"))
def do_xoppy_calculation(self):
return self.xoppy_calc_xpower()
def extract_data_from_xoppy_output(self, calculation_output):
return calculation_output
def get_data_exchange_widget_name(self):
return "POWER"
def getKind(self, oe_n):
if oe_n == 1:
return self.EL1_FLAG
elif oe_n == 2:
return self.EL2_FLAG
elif oe_n == 3:
return self.EL3_FLAG
elif oe_n == 4:
return self.EL4_FLAG
elif oe_n == 5:
return self.EL5_FLAG
def do_plot_local(self):
out = False
if self.PLOT_SETS == 0: out = True
if self.PLOT_SETS == 2: out = True
return out
def do_plot_intensity(self):
out = False
if self.PLOT_SETS == 1: out = True
if self.PLOT_SETS == 2: out = True
return out
def getTitles(self):
titles = []
if self.do_plot_intensity(): titles.append("Input beam")
for oe_n in range(1, self.NELEMENTS+2):
kind = self.getKind(oe_n)
if kind == 0: # FILTER
if self.do_plot_local(): titles.append("[oe " + str(oe_n) + "] Total CS")
if self.do_plot_local(): titles.append("[oe " + str(oe_n) + "] Mu")
if self.do_plot_local(): titles.append("[oe " + str(oe_n) + "] Transmitivity")
if self.do_plot_local(): titles.append("[oe " + str(oe_n) + "] Absorption")
if self.do_plot_intensity(): titles.append("Intensity after oe " + str(oe_n))
else: # MIRROR
if self.do_plot_local(): titles.append("[oe " + str(oe_n) + "] 1-Re[n]=delta")
if self.do_plot_local(): titles.append("[oe " + str(oe_n) + "] Im[n]=beta")
if self.do_plot_local(): titles.append("[oe " + str(oe_n) + "] delta/beta")
if self.do_plot_local(): titles.append("[oe " + str(oe_n) + "] Reflectivity-s")
if self.do_plot_local(): titles.append("[oe " + str(oe_n) + "] Transmitivity")
if self.do_plot_intensity(): titles.append("Intensity after oe " + str(oe_n))
return titles
def getXTitles(self):
xtitles = []
if self.do_plot_intensity(): xtitles.append("Photon Energy [eV]")
for oe_n in range(1, self.NELEMENTS+2):
kind = self.getKind(oe_n)
if kind == 0: # FILTER
if self.do_plot_local(): xtitles.append("Photon Energy [eV]")
if self.do_plot_local(): xtitles.append("Photon Energy [eV]")
if self.do_plot_local(): xtitles.append("Photon Energy [eV]")
if self.do_plot_local(): xtitles.append("Photon Energy [eV]")
if self.do_plot_intensity(): xtitles.append("Photon Energy [eV]")
else: # MIRROR
if self.do_plot_local(): xtitles.append("Photon Energy [eV]")
if self.do_plot_local(): xtitles.append("Photon Energy [eV]")
if self.do_plot_local(): xtitles.append("Photon Energy [eV]")
if self.do_plot_local(): xtitles.append("Photon Energy [eV]")
if self.do_plot_local(): xtitles.append("Photon Energy [eV]")
if self.do_plot_intensity(): xtitles.append("Photon Energy [eV]")
return xtitles
def getYTitles(self):
ytitles = []
if self.do_plot_intensity(): ytitles.append("Source")
for oe_n in range(1, self.NELEMENTS+2):
kind = self.getKind(oe_n)
if kind == 0: # FILTER
if self.do_plot_local(): ytitles.append("[oe " + str(oe_n) + "] Total CS cm2/g")
if self.do_plot_local(): ytitles.append("[oe " + str(oe_n) + "] Mu cm^-1")
if self.do_plot_local(): ytitles.append("[oe " + str(oe_n) + "] Transmitivity")
if self.do_plot_local(): ytitles.append("[oe " + str(oe_n) + "] Absorption")
if self.do_plot_intensity(): ytitles.append("Intensity after oe " + str(oe_n))
else: # MIRROR
if self.do_plot_local(): ytitles.append("[oe " + str(oe_n) + "] 1-Re[n]=delta")
if self.do_plot_local(): ytitles.append("[oe " + str(oe_n) + "] Im[n]=beta")
if self.do_plot_local(): ytitles.append("[oe " + str(oe_n) + "] delta/beta")
if self.do_plot_local(): ytitles.append("[oe " + str(oe_n) + "] Reflectivity-s")
if self.do_plot_local(): ytitles.append("[oe " + str(oe_n) + "] Transmitivity")
if self.do_plot_intensity(): ytitles.append("Intensity after oe " + str(oe_n))
return ytitles
def getVariablesToPlot(self):
variables = []
if self.do_plot_intensity(): variables.append((0, 1)) # start plotting the source
shift = 0
for oe_n in range(1, self.NELEMENTS+2):
kind = self.getKind(oe_n)
if oe_n == 1:
shift = 0
else:
kind_previous = self.getKind(oe_n-1)
if kind_previous == 0: # FILTER
shift += 5
else:
shift += 6
if kind == 0: # FILTER
if self.do_plot_local(): variables.append((0, 2+shift))
if self.do_plot_local(): variables.append((0, 3+shift))
if self.do_plot_local(): variables.append((0, 4+shift))
if self.do_plot_local(): variables.append((0, 5+shift))
if self.do_plot_intensity(): variables.append((0, 6+shift))
else:
if self.do_plot_local(): variables.append((0, 2+shift))
if self.do_plot_local(): variables.append((0, 3+shift))
if self.do_plot_local(): variables.append((0, 4+shift))
if self.do_plot_local(): variables.append((0, 5+shift))
if self.do_plot_local(): variables.append((0, 6+shift))
if self.do_plot_intensity(): variables.append((0, 7+shift))
return variables
def getLogPlot(self):
logplot = []
if self.do_plot_intensity(): logplot.append((False,False))
for oe_n in range(1, self.NELEMENTS+2):
kind = self.getKind(oe_n)
if kind == 0: # FILTER
if self.do_plot_local(): logplot.append((False, True))
if self.do_plot_local(): logplot.append((False, True))
if self.do_plot_local(): logplot.append((False, False))
if self.do_plot_local(): logplot.append((False, False))
if self.do_plot_intensity(): logplot.append((False, False))
else: # MIRROR
if self.do_plot_local(): logplot.append((False, True))
if self.do_plot_local(): logplot.append((False, True))
if self.do_plot_local(): logplot.append((False, False))
if self.do_plot_local(): logplot.append((False, False))
if self.do_plot_local(): logplot.append((False, False))
if self.do_plot_intensity(): logplot.append((False, False))
return logplot
def xoppy_calc_xpower(self):
#
# prepare input for xpower_calc
# Note that the input for xpower_calc accepts any number of elements.
#
substance = [self.EL1_FOR,self.EL2_FOR,self.EL3_FOR,self.EL4_FOR,self.EL5_FOR]
thick = numpy.array( (self.EL1_THI,self.EL2_THI,self.EL3_THI,self.EL4_THI,self.EL5_THI))
angle = numpy.array( (self.EL1_ANG,self.EL2_ANG,self.EL3_ANG,self.EL4_ANG,self.EL5_ANG))
dens = [self.EL1_DEN,self.EL2_DEN,self.EL3_DEN,self.EL4_DEN,self.EL5_DEN]
roughness = numpy.array( (self.EL1_ROU,self.EL2_ROU,self.EL3_ROU,self.EL4_ROU,self.EL5_ROU))
flags = numpy.array( (self.EL1_FLAG,self.EL2_FLAG,self.EL3_FLAG,self.EL4_FLAG,self.EL5_FLAG))
substance = substance[0:self.NELEMENTS+1]
thick = thick[0:self.NELEMENTS+1]
angle = angle[0:self.NELEMENTS+1]
dens = dens[0:self.NELEMENTS+1]
roughness = roughness[0:self.NELEMENTS+1]
flags = flags[0:self.NELEMENTS+1]
if self.SOURCE == 0:
# energies = numpy.arange(0,500)
# elefactor = numpy.log10(10000.0 / 30.0) / 300.0
# energies = 10.0 * 10**(energies * elefactor)
# source = numpy.ones(energies.size)
# tmp = numpy.vstack( (energies,source))
if self.input_spectrum is None:
raise Exception("No input beam")
else:
energies = self.input_spectrum[0,:].copy()
source = self.input_spectrum[1,:].copy()
elif self.SOURCE == 1:
energies = numpy.linspace(self.ENER_MIN,self.ENER_MAX,self.ENER_N)
source = numpy.ones(energies.size)
tmp = numpy.vstack( (energies,source))
self.input_spectrum = source
elif self.SOURCE == 2:
if self.SOURCE == 2: source_file = self.SOURCE_FILE
# if self.SOURCE == 3: source_file = "SRCOMPE"
# if self.SOURCE == 4: source_file = "SRCOMPF"
try:
tmp = numpy.loadtxt(source_file)
energies = tmp[:,0]
source = tmp[:,1]
self.input_spectrum = source
except:
print("Error loading file %s "%(source_file))
raise
if self.FILE_DUMP == 0:
output_file = None
else:
output_file = "power.spec"
out_dictionary = xpower_calc(energies=energies,source=source,substance=substance,
flags=flags,dens=dens,thick=thick,angle=angle,roughness=roughness,output_file=output_file)
try:
print(out_dictionary["info"])
except:
pass
#send exchange
calculated_data = DataExchangeObject("XOPPY", self.get_data_exchange_widget_name())
try:
calculated_data.add_content("xoppy_data", out_dictionary["data"].T)
calculated_data.add_content("plot_x_col",0)
calculated_data.add_content("plot_y_col",-1)
except:
pass
try:
# print(out_dictionary["labels"])
calculated_data.add_content("labels", out_dictionary["labels"])
except:
pass
try:
calculated_data.add_content("info",out_dictionary["info"])
except:
pass
return calculated_data
if __name__ == "__main__":
from oasys.widgets.exchange import DataExchangeObject
input_data_type = "POWER3D"
if input_data_type == "POWER":
# create fake UNDULATOR_FLUX xoppy exchange data
e = numpy.linspace(1000.0, 10000.0, 100)
source = e/10
received_data = DataExchangeObject("XOPPY", "POWER")
received_data.add_content("xoppy_data", numpy.vstack((e,e,source)).T)
received_data.add_content("xoppy_code", "US")
elif input_data_type == "POWER3D":
# create unulator_radiation xoppy exchange data
from orangecontrib.xoppy.util.xoppy_undulators import xoppy_calc_undulator_radiation
e, h, v, p, code = xoppy_calc_undulator_radiation(ELECTRONENERGY=6.04,ELECTRONENERGYSPREAD=0.001,ELECTRONCURRENT=0.2,\
ELECTRONBEAMSIZEH=0.000395,ELECTRONBEAMSIZEV=9.9e-06,\
ELECTRONBEAMDIVERGENCEH=1.05e-05,ELECTRONBEAMDIVERGENCEV=3.9e-06,\
PERIODID=0.018,NPERIODS=222,KV=1.68,DISTANCE=30.0,
SETRESONANCE=0,HARMONICNUMBER=1,
GAPH=0.001,GAPV=0.001,\
HSLITPOINTS=41,VSLITPOINTS=41,METHOD=0,
PHOTONENERGYMIN=7000,PHOTONENERGYMAX=8100,PHOTONENERGYPOINTS=20,
USEEMITTANCES=1)
received_data = DataExchangeObject("XOPPY", "POWER3D")
received_data = DataExchangeObject("XOPPY", "UNDULATOR_RADIATION")
received_data.add_content("xoppy_data", [p, e, h, v])
received_data.add_content("xoppy_code", code)
app = QApplication(sys.argv)
w = OWxpower()
w.acceptExchangeData(received_data)
w.show()
app.exec()
w.saveSettings()
| [
"oasys.widgets.gui.widgetBox",
"oasys.widgets.congruence.checkNumber",
"oasys.widgets.congruence.checkEmptyString",
"numpy.array",
"oasys.widgets.congruence.checkStrictlyPositiveNumber",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QSizePolicy",
"orangewidget.settings.Setting",
"oasys.widgets.co... | [((908, 918), 'orangewidget.settings.Setting', 'Setting', (['(2)'], {}), '(2)\n', (915, 918), False, 'from orangewidget.settings import Setting\n'), ((934, 949), 'orangewidget.settings.Setting', 'Setting', (['(1000.0)'], {}), '(1000.0)\n', (941, 949), False, 'from orangewidget.settings import Setting\n'), ((965, 981), 'orangewidget.settings.Setting', 'Setting', (['(50000.0)'], {}), '(50000.0)\n', (972, 981), False, 'from orangewidget.settings import Setting\n'), ((995, 1007), 'orangewidget.settings.Setting', 'Setting', (['(100)'], {}), '(100)\n', (1002, 1007), False, 'from orangewidget.settings import Setting\n'), ((1026, 1038), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (1033, 1038), False, 'from orangewidget.settings import Setting\n'), ((1055, 1065), 'orangewidget.settings.Setting', 'Setting', (['(1)'], {}), '(1)\n', (1062, 1065), False, 'from orangewidget.settings import Setting\n'), ((1080, 1093), 'orangewidget.settings.Setting', 'Setting', (['"""Be"""'], {}), "('Be')\n", (1087, 1093), False, 'from orangewidget.settings import Setting\n'), ((1109, 1119), 'orangewidget.settings.Setting', 'Setting', (['(0)'], {}), '(0)\n', (1116, 1119), False, 'from orangewidget.settings import Setting\n'), ((1134, 1146), 'orangewidget.settings.Setting', 'Setting', (['(0.5)'], {}), '(0.5)\n', (1141, 1146), False, 'from orangewidget.settings import Setting\n'), ((1161, 1173), 'orangewidget.settings.Setting', 'Setting', (['(3.0)'], {}), '(3.0)\n', (1168, 1173), False, 'from orangewidget.settings import Setting\n'), ((1188, 1200), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (1195, 1200), False, 'from orangewidget.settings import Setting\n'), ((1215, 1227), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (1222, 1227), False, 'from orangewidget.settings import Setting\n'), ((1242, 1255), 'orangewidget.settings.Setting', 'Setting', (['"""Rh"""'], {}), "('Rh')\n", (1249, 1255), False, 'from orangewidget.settings import Setting\n'), ((1271, 1281), 'orangewidget.settings.Setting', 'Setting', (['(1)'], {}), '(1)\n', (1278, 1281), False, 'from orangewidget.settings import Setting\n'), ((1296, 1308), 'orangewidget.settings.Setting', 'Setting', (['(0.5)'], {}), '(0.5)\n', (1303, 1308), False, 'from orangewidget.settings import Setting\n'), ((1323, 1335), 'orangewidget.settings.Setting', 'Setting', (['(3.0)'], {}), '(3.0)\n', (1330, 1335), False, 'from orangewidget.settings import Setting\n'), ((1350, 1362), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (1357, 1362), False, 'from orangewidget.settings import Setting\n'), ((1377, 1389), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (1384, 1389), False, 'from orangewidget.settings import Setting\n'), ((1404, 1417), 'orangewidget.settings.Setting', 'Setting', (['"""Al"""'], {}), "('Al')\n", (1411, 1417), False, 'from orangewidget.settings import Setting\n'), ((1433, 1443), 'orangewidget.settings.Setting', 'Setting', (['(0)'], {}), '(0)\n', (1440, 1443), False, 'from orangewidget.settings import Setting\n'), ((1458, 1470), 'orangewidget.settings.Setting', 'Setting', (['(0.5)'], {}), '(0.5)\n', (1465, 1470), False, 'from orangewidget.settings import Setting\n'), ((1485, 1497), 'orangewidget.settings.Setting', 'Setting', (['(3.0)'], {}), '(3.0)\n', (1492, 1497), False, 'from orangewidget.settings import Setting\n'), ((1512, 1524), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (1519, 1524), False, 'from orangewidget.settings import Setting\n'), ((1539, 1551), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (1546, 1551), False, 'from orangewidget.settings import Setting\n'), ((1566, 1578), 'orangewidget.settings.Setting', 'Setting', (['"""B"""'], {}), "('B')\n", (1573, 1578), False, 'from orangewidget.settings import Setting\n'), ((1594, 1604), 'orangewidget.settings.Setting', 'Setting', (['(0)'], {}), '(0)\n', (1601, 1604), False, 'from orangewidget.settings import Setting\n'), ((1619, 1631), 'orangewidget.settings.Setting', 'Setting', (['(0.5)'], {}), '(0.5)\n', (1626, 1631), False, 'from orangewidget.settings import Setting\n'), ((1646, 1658), 'orangewidget.settings.Setting', 'Setting', (['(3.0)'], {}), '(3.0)\n', (1653, 1658), False, 'from orangewidget.settings import Setting\n'), ((1673, 1685), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (1680, 1685), False, 'from orangewidget.settings import Setting\n'), ((1700, 1712), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (1707, 1712), False, 'from orangewidget.settings import Setting\n'), ((1727, 1740), 'orangewidget.settings.Setting', 'Setting', (['"""Pt"""'], {}), "('Pt')\n", (1734, 1740), False, 'from orangewidget.settings import Setting\n'), ((1756, 1766), 'orangewidget.settings.Setting', 'Setting', (['(1)'], {}), '(1)\n', (1763, 1766), False, 'from orangewidget.settings import Setting\n'), ((1781, 1793), 'orangewidget.settings.Setting', 'Setting', (['(0.5)'], {}), '(0.5)\n', (1788, 1793), False, 'from orangewidget.settings import Setting\n'), ((1808, 1820), 'orangewidget.settings.Setting', 'Setting', (['(3.0)'], {}), '(3.0)\n', (1815, 1820), False, 'from orangewidget.settings import Setting\n'), ((1835, 1847), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (1842, 1847), False, 'from orangewidget.settings import Setting\n'), ((1862, 1874), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (1869, 1874), False, 'from orangewidget.settings import Setting\n'), ((1891, 1901), 'orangewidget.settings.Setting', 'Setting', (['(2)'], {}), '(2)\n', (1898, 1901), False, 'from orangewidget.settings import Setting\n'), ((39714, 39736), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (39726, 39736), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QSizePolicy\n'), ((2196, 2329), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['self.controlArea', "(self.name + ' Input Parameters')"], {'orientation': '"""vertical"""', 'width': '(self.CONTROL_AREA_WIDTH - 10)'}), "(self.controlArea, self.name + ' Input Parameters',\n orientation='vertical', width=self.CONTROL_AREA_WIDTH - 10)\n", (2214, 2329), True, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((2402, 2420), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (2415, 2420), False, 'from orangewidget import gui\n'), ((2849, 2867), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (2862, 2867), False, 'from orangewidget import gui\n'), ((3213, 3231), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (3226, 3231), False, 'from orangewidget import gui\n'), ((3577, 3595), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (3590, 3595), False, 'from orangewidget import gui\n'), ((3934, 3952), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (3947, 3952), False, 'from orangewidget import gui\n'), ((3974, 4059), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['box1', '""""""'], {'addSpace': '(False)', 'orientation': '"""horizontal"""', 'height': '(25)'}), "(box1, '', addSpace=False, orientation='horizontal',\n height=25)\n", (3992, 4059), True, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((4331, 4349), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (4344, 4349), False, 'from orangewidget import gui\n'), ((4732, 4750), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (4745, 4750), False, 'from orangewidget import gui\n'), ((4760, 4789), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (4773, 4789), False, 'from orangewidget import gui\n'), ((5068, 5086), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (5081, 5086), False, 'from orangewidget import gui\n'), ((5471, 5489), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (5484, 5489), False, 'from orangewidget import gui\n'), ((5835, 5853), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (5848, 5853), False, 'from orangewidget import gui\n'), ((6199, 6217), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (6212, 6217), False, 'from orangewidget import gui\n'), ((6563, 6581), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (6576, 6581), False, 'from orangewidget import gui\n'), ((6850, 6868), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (6863, 6868), False, 'from orangewidget import gui\n'), ((6878, 6907), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (6891, 6907), False, 'from orangewidget import gui\n'), ((7186, 7204), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (7199, 7204), False, 'from orangewidget import gui\n'), ((7589, 7607), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (7602, 7607), False, 'from orangewidget import gui\n'), ((7953, 7971), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (7966, 7971), False, 'from orangewidget import gui\n'), ((8317, 8335), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (8330, 8335), False, 'from orangewidget import gui\n'), ((8681, 8699), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (8694, 8699), False, 'from orangewidget import gui\n'), ((8978, 8996), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (8991, 8996), False, 'from orangewidget import gui\n'), ((9006, 9035), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (9019, 9035), False, 'from orangewidget import gui\n'), ((9314, 9332), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (9327, 9332), False, 'from orangewidget import gui\n'), ((9717, 9735), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (9730, 9735), False, 'from orangewidget import gui\n'), ((10081, 10099), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (10094, 10099), False, 'from orangewidget import gui\n'), ((10445, 10463), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (10458, 10463), False, 'from orangewidget import gui\n'), ((10809, 10827), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (10822, 10827), False, 'from orangewidget import gui\n'), ((11106, 11124), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (11119, 11124), False, 'from orangewidget import gui\n'), ((11134, 11163), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (11147, 11163), False, 'from orangewidget import gui\n'), ((11442, 11460), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (11455, 11460), False, 'from orangewidget import gui\n'), ((11845, 11863), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (11858, 11863), False, 'from orangewidget import gui\n'), ((12209, 12227), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (12222, 12227), False, 'from orangewidget import gui\n'), ((12573, 12591), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (12586, 12591), False, 'from orangewidget import gui\n'), ((12937, 12955), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (12950, 12955), False, 'from orangewidget import gui\n'), ((13234, 13252), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (13247, 13252), False, 'from orangewidget import gui\n'), ((13261, 13290), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (13274, 13290), False, 'from orangewidget import gui\n'), ((13569, 13587), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (13582, 13587), False, 'from orangewidget import gui\n'), ((13972, 13990), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (13985, 13990), False, 'from orangewidget import gui\n'), ((14336, 14354), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (14349, 14354), False, 'from orangewidget import gui\n'), ((14700, 14718), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (14713, 14718), False, 'from orangewidget import gui\n'), ((15064, 15082), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (15077, 15082), False, 'from orangewidget import gui\n'), ((15350, 15368), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (15363, 15368), False, 'from orangewidget import gui\n'), ((15377, 15406), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (15390, 15406), False, 'from orangewidget import gui\n'), ((15815, 15833), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (15828, 15833), False, 'from orangewidget import gui\n'), ((15842, 15871), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (15855, 15871), False, 'from orangewidget import gui\n'), ((34839, 34927), 'numpy.array', 'numpy.array', (['(self.EL1_THI, self.EL2_THI, self.EL3_THI, self.EL4_THI, self.EL5_THI)'], {}), '((self.EL1_THI, self.EL2_THI, self.EL3_THI, self.EL4_THI, self.\n EL5_THI))\n', (34850, 34927), False, 'import numpy\n'), ((34940, 35028), 'numpy.array', 'numpy.array', (['(self.EL1_ANG, self.EL2_ANG, self.EL3_ANG, self.EL4_ANG, self.EL5_ANG)'], {}), '((self.EL1_ANG, self.EL2_ANG, self.EL3_ANG, self.EL4_ANG, self.\n EL5_ANG))\n', (34951, 35028), False, 'import numpy\n'), ((35128, 35216), 'numpy.array', 'numpy.array', (['(self.EL1_ROU, self.EL2_ROU, self.EL3_ROU, self.EL4_ROU, self.EL5_ROU)'], {}), '((self.EL1_ROU, self.EL2_ROU, self.EL3_ROU, self.EL4_ROU, self.\n EL5_ROU))\n', (35139, 35216), False, 'import numpy\n'), ((35229, 35321), 'numpy.array', 'numpy.array', (['(self.EL1_FLAG, self.EL2_FLAG, self.EL3_FLAG, self.EL4_FLAG, self.EL5_FLAG)'], {}), '((self.EL1_FLAG, self.EL2_FLAG, self.EL3_FLAG, self.EL4_FLAG,\n self.EL5_FLAG))\n', (35240, 35321), False, 'import numpy\n'), ((36997, 37168), 'orangecontrib.xoppy.util.xoppy_xraylib_util.xpower_calc', 'xpower_calc', ([], {'energies': 'energies', 'source': 'source', 'substance': 'substance', 'flags': 'flags', 'dens': 'dens', 'thick': 'thick', 'angle': 'angle', 'roughness': 'roughness', 'output_file': 'output_file'}), '(energies=energies, source=source, substance=substance, flags=\n flags, dens=dens, thick=thick, angle=angle, roughness=roughness,\n output_file=output_file)\n', (37008, 37168), False, 'from orangecontrib.xoppy.util.xoppy_xraylib_util import xpower_calc\n'), ((38181, 38217), 'numpy.linspace', 'numpy.linspace', (['(1000.0)', '(10000.0)', '(100)'], {}), '(1000.0, 10000.0, 100)\n', (38195, 38217), False, 'import numpy\n'), ((38264, 38300), 'oasys.widgets.exchange.DataExchangeObject', 'DataExchangeObject', (['"""XOPPY"""', '"""POWER"""'], {}), "('XOPPY', 'POWER')\n", (38282, 38300), False, 'from oasys.widgets.exchange import DataExchangeObject\n'), ((1989, 2060), 'PyQt5.QtWidgets.QSizePolicy', 'QSizePolicy', (['QSizePolicy.MinimumExpanding', 'QSizePolicy.MinimumExpanding'], {}), '(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)\n', (2000, 2060), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QSizePolicy\n'), ((18823, 18929), 'oasys.widgets.gui.selectFileFromDialog', 'oasysgui.selectFileFromDialog', (['self', 'self.SOURCE_FILE', '"""Open Source File"""'], {'file_extension_filter': '"""*.*"""'}), "(self, self.SOURCE_FILE, 'Open Source File',\n file_extension_filter='*.*')\n", (18852, 18929), True, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((23293, 23353), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.ENER_MIN', '"""Energy from"""'], {}), "(self.ENER_MIN, 'Energy from')\n", (23323, 23353), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((23382, 23448), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.ENER_MAX', '"""Energy to"""'], {}), "(self.ENER_MAX, 'Energy to')\n", (23420, 23448), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((23461, 23547), 'oasys.widgets.congruence.checkLessThan', 'congruence.checkLessThan', (['self.ENER_MIN', 'self.ENER_MAX', '"""Energy from"""', '"""Energy to"""'], {}), "(self.ENER_MIN, self.ENER_MAX, 'Energy from',\n 'Energy to')\n", (23485, 23547), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((23571, 23639), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.ENER_N', '"""Energy Points"""'], {}), "(self.ENER_N, 'Energy Points')\n", (23609, 23639), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((23782, 23841), 'oasys.widgets.congruence.checkEmptyString', 'congruence.checkEmptyString', (['self.EL1_FOR', '"""1st oe formula"""'], {}), "(self.EL1_FOR, '1st oe formula')\n", (23809, 23841), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((24519, 24578), 'oasys.widgets.congruence.checkEmptyString', 'congruence.checkEmptyString', (['self.EL2_FOR', '"""2nd oe formula"""'], {}), "(self.EL2_FOR, '2nd oe formula')\n", (24546, 24578), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((25256, 25315), 'oasys.widgets.congruence.checkEmptyString', 'congruence.checkEmptyString', (['self.EL3_FOR', '"""3rd oe formula"""'], {}), "(self.EL3_FOR, '3rd oe formula')\n", (25283, 25315), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((25993, 26052), 'oasys.widgets.congruence.checkEmptyString', 'congruence.checkEmptyString', (['self.EL4_FOR', '"""4th oe formula"""'], {}), "(self.EL4_FOR, '4th oe formula')\n", (26020, 26052), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((26731, 26790), 'oasys.widgets.congruence.checkEmptyString', 'congruence.checkEmptyString', (['self.EL5_FOR', '"""5th oe formula"""'], {}), "(self.EL5_FOR, '5th oe formula')\n", (26758, 26790), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((38650, 39135), 'orangecontrib.xoppy.util.xoppy_undulators.xoppy_calc_undulator_radiation', 'xoppy_calc_undulator_radiation', ([], {'ELECTRONENERGY': '(6.04)', 'ELECTRONENERGYSPREAD': '(0.001)', 'ELECTRONCURRENT': '(0.2)', 'ELECTRONBEAMSIZEH': '(0.000395)', 'ELECTRONBEAMSIZEV': '(9.9e-06)', 'ELECTRONBEAMDIVERGENCEH': '(1.05e-05)', 'ELECTRONBEAMDIVERGENCEV': '(3.9e-06)', 'PERIODID': '(0.018)', 'NPERIODS': '(222)', 'KV': '(1.68)', 'DISTANCE': '(30.0)', 'SETRESONANCE': '(0)', 'HARMONICNUMBER': '(1)', 'GAPH': '(0.001)', 'GAPV': '(0.001)', 'HSLITPOINTS': '(41)', 'VSLITPOINTS': '(41)', 'METHOD': '(0)', 'PHOTONENERGYMIN': '(7000)', 'PHOTONENERGYMAX': '(8100)', 'PHOTONENERGYPOINTS': '(20)', 'USEEMITTANCES': '(1)'}), '(ELECTRONENERGY=6.04, ELECTRONENERGYSPREAD=\n 0.001, ELECTRONCURRENT=0.2, ELECTRONBEAMSIZEH=0.000395,\n ELECTRONBEAMSIZEV=9.9e-06, ELECTRONBEAMDIVERGENCEH=1.05e-05,\n ELECTRONBEAMDIVERGENCEV=3.9e-06, PERIODID=0.018, NPERIODS=222, KV=1.68,\n DISTANCE=30.0, SETRESONANCE=0, HARMONICNUMBER=1, GAPH=0.001, GAPV=0.001,\n HSLITPOINTS=41, VSLITPOINTS=41, METHOD=0, PHOTONENERGYMIN=7000,\n PHOTONENERGYMAX=8100, PHOTONENERGYPOINTS=20, USEEMITTANCES=1)\n', (38680, 39135), False, 'from orangecontrib.xoppy.util.xoppy_undulators import xoppy_calc_undulator_radiation\n'), ((39470, 39508), 'oasys.widgets.exchange.DataExchangeObject', 'DataExchangeObject', (['"""XOPPY"""', '"""POWER3D"""'], {}), "('XOPPY', 'POWER3D')\n", (39488, 39508), False, 'from oasys.widgets.exchange import DataExchangeObject\n'), ((39533, 39583), 'oasys.widgets.exchange.DataExchangeObject', 'DataExchangeObject', (['"""XOPPY"""', '"""UNDULATOR_RADIATION"""'], {}), "('XOPPY', 'UNDULATOR_RADIATION')\n", (39551, 39583), False, 'from oasys.widgets.exchange import DataExchangeObject\n'), ((3033, 3051), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (3049, 3051), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((3397, 3415), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (3413, 3415), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((3757, 3772), 'PyQt5.QtGui.QIntValidator', 'QIntValidator', ([], {}), '()\n', (3770, 3772), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((5654, 5672), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (5670, 5672), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((6018, 6036), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (6034, 6036), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((6382, 6400), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (6398, 6400), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((7772, 7790), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (7788, 7790), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((8136, 8154), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (8152, 8154), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((8500, 8518), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (8516, 8518), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((9900, 9918), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (9916, 9918), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((10264, 10282), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (10280, 10282), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((10628, 10646), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (10644, 10646), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((12028, 12046), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (12044, 12046), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((12392, 12410), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (12408, 12410), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((12756, 12774), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (12772, 12774), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((14155, 14173), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (14171, 14173), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((14519, 14537), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (14535, 14537), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((14883, 14901), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (14899, 14901), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((23683, 23721), 'oasys.widgets.congruence.checkFile', 'congruence.checkFile', (['self.SOURCE_FILE'], {}), '(self.SOURCE_FILE)\n', (23703, 23721), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((23918, 23997), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL1_THI', '"""1st oe filter thickness"""'], {}), "(self.EL1_THI, '1st oe filter thickness')\n", (23956, 23997), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((24655, 24734), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL2_THI', '"""2nd oe filter thickness"""'], {}), "(self.EL2_THI, '2nd oe filter thickness')\n", (24693, 24734), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((25392, 25471), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL3_THI', '"""3rd oe filter thickness"""'], {}), "(self.EL3_THI, '3rd oe filter thickness')\n", (25430, 25471), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((26129, 26208), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL4_THI', '"""4th oe filter thickness"""'], {}), "(self.EL4_THI, '4th oe filter thickness')\n", (26167, 26208), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((26867, 26946), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL5_THI', '"""5th oe filter thickness"""'], {}), "(self.EL5_THI, '5th oe filter thickness')\n", (26905, 26946), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((36162, 36219), 'numpy.linspace', 'numpy.linspace', (['self.ENER_MIN', 'self.ENER_MAX', 'self.ENER_N'], {}), '(self.ENER_MIN, self.ENER_MAX, self.ENER_N)\n', (36176, 36219), False, 'import numpy\n'), ((36239, 36264), 'numpy.ones', 'numpy.ones', (['energies.size'], {}), '(energies.size)\n', (36249, 36264), False, 'import numpy\n'), ((36283, 36315), 'numpy.vstack', 'numpy.vstack', (['(energies, source)'], {}), '((energies, source))\n', (36295, 36315), False, 'import numpy\n'), ((38349, 38377), 'numpy.vstack', 'numpy.vstack', (['(e, e, source)'], {}), '((e, e, source))\n', (38361, 38377), False, 'import numpy\n'), ((24075, 24150), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL1_ANG', '"""1st oe mirror angle"""'], {}), "(self.EL1_ANG, '1st oe mirror angle')\n", (24113, 24150), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((24182, 24253), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.EL1_ROU', '"""1st oe mirror roughness"""'], {}), "(self.EL1_ROU, '1st oe mirror roughness')\n", (24212, 24253), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((24812, 24887), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL2_ANG', '"""2nd oe mirror angle"""'], {}), "(self.EL2_ANG, '2nd oe mirror angle')\n", (24850, 24887), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((24919, 24990), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.EL2_ROU', '"""2nd oe mirror roughness"""'], {}), "(self.EL2_ROU, '2nd oe mirror roughness')\n", (24949, 24990), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((25549, 25624), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL3_ANG', '"""3rd oe mirror angle"""'], {}), "(self.EL3_ANG, '3rd oe mirror angle')\n", (25587, 25624), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((25656, 25727), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.EL3_ROU', '"""3rd oe mirror roughness"""'], {}), "(self.EL3_ROU, '3rd oe mirror roughness')\n", (25686, 25727), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((26286, 26361), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL4_ANG', '"""4th oe mirror angle"""'], {}), "(self.EL4_ANG, '4th oe mirror angle')\n", (26324, 26361), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((26393, 26464), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.EL4_ROU', '"""4th oe mirror roughness"""'], {}), "(self.EL4_ROU, '4th oe mirror roughness')\n", (26423, 26464), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((27024, 27099), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL5_ANG', '"""5th oe mirror angle"""'], {}), "(self.EL5_ANG, '5th oe mirror angle')\n", (27062, 27099), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((27131, 27202), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.EL5_ROU', '"""5th oe mirror roughness"""'], {}), "(self.EL5_ROU, '5th oe mirror roughness')\n", (27161, 27202), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((22866, 22921), 'numpy.vstack', 'numpy.vstack', (['(spectrum[:, 0], spectrum[:, index_flux])'], {}), '((spectrum[:, 0], spectrum[:, index_flux]))\n', (22878, 22921), False, 'import numpy\n'), ((36609, 36635), 'numpy.loadtxt', 'numpy.loadtxt', (['source_file'], {}), '(source_file)\n', (36622, 36635), False, 'import numpy\n'), ((24383, 24437), 'oasys.widgets.congruence.checkNumber', 'congruence.checkNumber', (['self.EL1_DEN', '"""1st oe density"""'], {}), "(self.EL1_DEN, '1st oe density')\n", (24405, 24437), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((25120, 25174), 'oasys.widgets.congruence.checkNumber', 'congruence.checkNumber', (['self.EL2_DEN', '"""2nd oe density"""'], {}), "(self.EL2_DEN, '2nd oe density')\n", (25142, 25174), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((25857, 25911), 'oasys.widgets.congruence.checkNumber', 'congruence.checkNumber', (['self.EL3_DEN', '"""3rd oe density"""'], {}), "(self.EL3_DEN, '3rd oe density')\n", (25879, 25911), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((26595, 26649), 'oasys.widgets.congruence.checkNumber', 'congruence.checkNumber', (['self.EL4_DEN', '"""4th oe density"""'], {}), "(self.EL4_DEN, '4th oe density')\n", (26617, 26649), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((27332, 27386), 'oasys.widgets.congruence.checkNumber', 'congruence.checkNumber', (['self.EL5_DEN', '"""5th oe density"""'], {}), "(self.EL5_DEN, '5th oe density')\n", (27354, 27386), False, 'from oasys.widgets import gui as oasysgui, congruence\n')] |
'''
Tests of parameter_plots.py module
'''
import pytest
import os
import numpy as np
import scipy.interpolate as si
import matplotlib.image as mpimg
from ogusa import utils, parameter_plots, income
# Load in test results and parameters
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
base_params = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'model_params_baseline.pkl'))
def test_plot_imm_rates():
fig = parameter_plots.plot_imm_rates(
base_params, include_title=True)
assert fig
def test_plot_imm_rates_save_fig(tmpdir):
parameter_plots.plot_imm_rates(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'imm_rates_orig.png'))
assert isinstance(img, np.ndarray)
def test_plot_mort_rates():
fig = parameter_plots.plot_mort_rates(
base_params, include_title=True)
assert fig
def test_plot_mort_rates_save_fig(tmpdir):
parameter_plots.plot_mort_rates(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'mortality_rates.png'))
assert isinstance(img, np.ndarray)
def test_plot_pop_growth():
fig = parameter_plots.plot_pop_growth(
base_params, include_title=True)
assert fig
def test_plot_pop_growth_rates_save_fig(tmpdir):
parameter_plots.plot_pop_growth(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'pop_growth_rates.png'))
assert isinstance(img, np.ndarray)
def test_plot_ability_profiles():
fig = parameter_plots.plot_ability_profiles(
base_params, include_title=True)
assert fig
def test_plot_ability_profiles_save_fig(tmpdir):
parameter_plots.plot_ability_profiles(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'ability_profiles.png'))
assert isinstance(img, np.ndarray)
def test_plot_elliptical_u():
fig1 = parameter_plots.plot_elliptical_u(
base_params, include_title=True)
fig2 = parameter_plots.plot_elliptical_u(
base_params, plot_MU=False, include_title=True)
assert fig1
assert fig2
def test_plot_elliptical_u_save_fig(tmpdir):
parameter_plots.plot_elliptical_u(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'ellipse_v_CFE.png'))
assert isinstance(img, np.ndarray)
def test_plot_chi_n():
fig = parameter_plots.plot_chi_n(
base_params, include_title=True)
assert fig
def test_plot_chi_n_save_fig(tmpdir):
parameter_plots.plot_chi_n(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'chi_n_values.png'))
assert isinstance(img, np.ndarray)
@pytest.mark.parametrize(
'years_to_plot', [['SS'], [2025], [2050, 2070]],
ids=['SS', '2025', 'List of years'])
def test_plot_population(years_to_plot):
fig = parameter_plots.plot_population(
base_params, years_to_plot=years_to_plot, include_title=True)
assert fig
def test_plot_population_save_fig(tmpdir):
parameter_plots.plot_population(
base_params, path=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'pop_distribution.png'))
assert isinstance(img, np.ndarray)
def test_plot_fert_rates():
totpers = base_params.S
min_yr = 20
max_yr = 100
fert_data = (np.array([0.0, 0.0, 0.3, 12.3, 47.1, 80.7, 105.5, 98.0,
49.3, 10.4, 0.8, 0.0, 0.0]) / 2000)
age_midp = np.array([9, 10, 12, 16, 18.5, 22, 27, 32, 37, 42, 47,
55, 56])
fert_func = si.interp1d(age_midp, fert_data, kind='cubic')
fert_rates = np.random.uniform(size=totpers)
fig = parameter_plots.plot_fert_rates(
fert_func, age_midp, totpers, min_yr, max_yr, fert_data,
fert_rates)
assert fig
def test_plot_fert_rates_save_fig(tmpdir):
totpers = base_params.S
min_yr = 20
max_yr = 100
fert_data = (np.array([0.0, 0.0, 0.3, 12.3, 47.1, 80.7, 105.5, 98.0,
49.3, 10.4, 0.8, 0.0, 0.0]) / 2000)
age_midp = np.array([9, 10, 12, 16, 18.5, 22, 27, 32, 37, 42, 47,
55, 56])
fert_func = si.interp1d(age_midp, fert_data, kind='cubic')
fert_rates = np.random.uniform(size=totpers)
parameter_plots.plot_fert_rates(
fert_func, age_midp, totpers, min_yr, max_yr, fert_data,
fert_rates, output_dir=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'fert_rates.png'))
assert isinstance(img, np.ndarray)
def test_plot_mort_rates_data():
totpers = base_params.S - 1
min_yr = 21
max_yr = 100
age_year_all = np.arange(min_yr, max_yr)
mort_rates = base_params.rho[1:]
mort_rates_all = base_params.rho[1:]
infmort_rate = base_params.rho[0]
fig = parameter_plots.plot_mort_rates_data(
totpers, min_yr, max_yr, age_year_all, mort_rates_all,
infmort_rate, mort_rates, output_dir=None)
assert fig
def test_plot_mort_rates_data_save_fig(tmpdir):
totpers = base_params.S - 1
min_yr = 21
max_yr = 100
age_year_all = np.arange(min_yr, max_yr)
mort_rates = base_params.rho[1:]
mort_rates_all = base_params.rho[1:]
infmort_rate = base_params.rho[0]
parameter_plots.plot_mort_rates_data(
totpers, min_yr, max_yr, age_year_all, mort_rates_all,
infmort_rate, mort_rates, output_dir=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'mort_rates.png'))
assert isinstance(img, np.ndarray)
def test_plot_omega_fixed():
E = 0
S = base_params.S
age_per_EpS = np.arange(21, S + 21)
omega_SS_orig = base_params.omega_SS
omega_SSfx = base_params.omega_SS
fig = parameter_plots.plot_omega_fixed(
age_per_EpS, omega_SS_orig, omega_SSfx, E, S)
assert fig
def test_plot_omega_fixed_save_fig(tmpdir):
E = 0
S = base_params.S
age_per_EpS = np.arange(21, S + 21)
omega_SS_orig = base_params.omega_SS
omega_SSfx = base_params.omega_SS
parameter_plots.plot_omega_fixed(
age_per_EpS, omega_SS_orig, omega_SSfx, E, S, output_dir=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'OrigVsFixSSpop.png'))
assert isinstance(img, np.ndarray)
def test_plot_imm_fixed():
E = 0
S = base_params.S
age_per_EpS = np.arange(21, S + 21)
imm_rates_orig = base_params.imm_rates[0, :]
imm_rates_adj = base_params.imm_rates[-1, :]
fig = parameter_plots.plot_imm_fixed(
age_per_EpS, imm_rates_orig, imm_rates_adj, E, S)
assert fig
def test_plot_imm_fixed_save_fig(tmpdir):
E = 0
S = base_params.S
age_per_EpS = np.arange(21, S + 21)
imm_rates_orig = base_params.imm_rates[0, :]
imm_rates_adj = base_params.imm_rates[-1, :]
parameter_plots.plot_imm_fixed(
age_per_EpS, imm_rates_orig, imm_rates_adj, E, S,
output_dir=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'OrigVsAdjImm.png'))
assert isinstance(img, np.ndarray)
def test_plot_population_path():
E = 0
S = base_params.S
age_per_EpS = np.arange(21, S + 21)
pop_2013_pct = base_params.omega[0, :]
omega_path_lev = base_params.omega.T
omega_SSfx = base_params.omega_SS
curr_year = base_params.start_year
fig = parameter_plots.plot_population_path(
age_per_EpS, pop_2013_pct, omega_path_lev, omega_SSfx,
curr_year, E, S)
assert fig
def test_plot_population_path_save_fig(tmpdir):
E = 0
S = base_params.S
age_per_EpS = np.arange(21, S + 21)
pop_2013_pct = base_params.omega[0, :]
omega_path_lev = base_params.omega.T
omega_SSfx = base_params.omega_SS
curr_year = base_params.start_year
parameter_plots.plot_population_path(
age_per_EpS, pop_2013_pct, omega_path_lev, omega_SSfx,
curr_year, E, S, output_dir=tmpdir)
img = mpimg.imread(os.path.join(tmpdir, 'PopDistPath.png'))
assert isinstance(img, np.ndarray)
# TODO:
# gen_3Dscatters_hist -- requires microdata df
# txfunc_graph - require micro data df
# txfunc_sse_plot
def test_plot_income_data():
ages = np.linspace(20 + 0.5, 100 - 0.5, 80)
abil_midp = np.array([0.125, 0.375, 0.6, 0.75, 0.85, 0.945, 0.995])
abil_pcts = np.array([0.25, 0.25, 0.2, 0.1, 0.1, 0.09, 0.01])
age_wgts = np.ones(80) * 1 / 80
emat = income.get_e_orig(age_wgts, abil_pcts)
fig = parameter_plots.plot_income_data(
ages, abil_midp, abil_pcts, emat)
assert fig
def test_plot_income_data_save_fig(tmpdir):
ages = np.linspace(20 + 0.5, 100 - 0.5, 80)
abil_midp = np.array([0.125, 0.375, 0.6, 0.75, 0.85, 0.945, 0.995])
abil_pcts = np.array([0.25, 0.25, 0.2, 0.1, 0.1, 0.09, 0.01])
age_wgts = np.ones(80) * 1 / 80
emat = income.get_e_orig(age_wgts, abil_pcts)
parameter_plots.plot_income_data(
ages, abil_midp, abil_pcts, emat, output_dir=tmpdir)
img1 = mpimg.imread(os.path.join(tmpdir, 'ability_3D_lev.png'))
img2 = mpimg.imread(os.path.join(tmpdir, 'ability_3D_log.png'))
img3 = mpimg.imread(os.path.join(tmpdir, 'ability_2D_log.png'))
assert isinstance(img1, np.ndarray)
assert isinstance(img2, np.ndarray)
assert isinstance(img3, np.ndarray)
| [
"ogusa.parameter_plots.plot_population",
"scipy.interpolate.interp1d",
"numpy.array",
"ogusa.parameter_plots.plot_omega_fixed",
"numpy.arange",
"numpy.linspace",
"ogusa.parameter_plots.plot_population_path",
"ogusa.parameter_plots.plot_elliptical_u",
"ogusa.parameter_plots.plot_imm_fixed",
"ogusa.... | [((2680, 2793), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""years_to_plot"""', "[['SS'], [2025], [2050, 2070]]"], {'ids': "['SS', '2025', 'List of years']"}), "('years_to_plot', [['SS'], [2025], [2050, 2070]],\n ids=['SS', '2025', 'List of years'])\n", (2703, 2793), False, 'import pytest\n'), ((267, 292), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (282, 292), False, 'import os\n'), ((336, 403), 'os.path.join', 'os.path.join', (['CUR_PATH', '"""test_io_data"""', '"""model_params_baseline.pkl"""'], {}), "(CUR_PATH, 'test_io_data', 'model_params_baseline.pkl')\n", (348, 403), False, 'import os\n'), ((444, 507), 'ogusa.parameter_plots.plot_imm_rates', 'parameter_plots.plot_imm_rates', (['base_params'], {'include_title': '(True)'}), '(base_params, include_title=True)\n', (474, 507), False, 'from ogusa import utils, parameter_plots, income\n'), ((580, 636), 'ogusa.parameter_plots.plot_imm_rates', 'parameter_plots.plot_imm_rates', (['base_params'], {'path': 'tmpdir'}), '(base_params, path=tmpdir)\n', (610, 636), False, 'from ogusa import utils, parameter_plots, income\n'), ((797, 861), 'ogusa.parameter_plots.plot_mort_rates', 'parameter_plots.plot_mort_rates', (['base_params'], {'include_title': '(True)'}), '(base_params, include_title=True)\n', (828, 861), False, 'from ogusa import utils, parameter_plots, income\n'), ((935, 992), 'ogusa.parameter_plots.plot_mort_rates', 'parameter_plots.plot_mort_rates', (['base_params'], {'path': 'tmpdir'}), '(base_params, path=tmpdir)\n', (966, 992), False, 'from ogusa import utils, parameter_plots, income\n'), ((1154, 1218), 'ogusa.parameter_plots.plot_pop_growth', 'parameter_plots.plot_pop_growth', (['base_params'], {'include_title': '(True)'}), '(base_params, include_title=True)\n', (1185, 1218), False, 'from ogusa import utils, parameter_plots, income\n'), ((1298, 1355), 'ogusa.parameter_plots.plot_pop_growth', 'parameter_plots.plot_pop_growth', (['base_params'], {'path': 'tmpdir'}), '(base_params, path=tmpdir)\n', (1329, 1355), False, 'from ogusa import utils, parameter_plots, income\n'), ((1524, 1594), 'ogusa.parameter_plots.plot_ability_profiles', 'parameter_plots.plot_ability_profiles', (['base_params'], {'include_title': '(True)'}), '(base_params, include_title=True)\n', (1561, 1594), False, 'from ogusa import utils, parameter_plots, income\n'), ((1674, 1737), 'ogusa.parameter_plots.plot_ability_profiles', 'parameter_plots.plot_ability_profiles', (['base_params'], {'path': 'tmpdir'}), '(base_params, path=tmpdir)\n', (1711, 1737), False, 'from ogusa import utils, parameter_plots, income\n'), ((1903, 1969), 'ogusa.parameter_plots.plot_elliptical_u', 'parameter_plots.plot_elliptical_u', (['base_params'], {'include_title': '(True)'}), '(base_params, include_title=True)\n', (1936, 1969), False, 'from ogusa import utils, parameter_plots, income\n'), ((1990, 2076), 'ogusa.parameter_plots.plot_elliptical_u', 'parameter_plots.plot_elliptical_u', (['base_params'], {'plot_MU': '(False)', 'include_title': '(True)'}), '(base_params, plot_MU=False, include_title\n =True)\n', (2023, 2076), False, 'from ogusa import utils, parameter_plots, income\n'), ((2164, 2223), 'ogusa.parameter_plots.plot_elliptical_u', 'parameter_plots.plot_elliptical_u', (['base_params'], {'path': 'tmpdir'}), '(base_params, path=tmpdir)\n', (2197, 2223), False, 'from ogusa import utils, parameter_plots, income\n'), ((2378, 2437), 'ogusa.parameter_plots.plot_chi_n', 'parameter_plots.plot_chi_n', (['base_params'], {'include_title': '(True)'}), '(base_params, include_title=True)\n', (2404, 2437), False, 'from ogusa import utils, parameter_plots, income\n'), ((2506, 2558), 'ogusa.parameter_plots.plot_chi_n', 'parameter_plots.plot_chi_n', (['base_params'], {'path': 'tmpdir'}), '(base_params, path=tmpdir)\n', (2532, 2558), False, 'from ogusa import utils, parameter_plots, income\n'), ((2850, 2947), 'ogusa.parameter_plots.plot_population', 'parameter_plots.plot_population', (['base_params'], {'years_to_plot': 'years_to_plot', 'include_title': '(True)'}), '(base_params, years_to_plot=years_to_plot,\n include_title=True)\n', (2881, 2947), False, 'from ogusa import utils, parameter_plots, income\n'), ((3017, 3074), 'ogusa.parameter_plots.plot_population', 'parameter_plots.plot_population', (['base_params'], {'path': 'tmpdir'}), '(base_params, path=tmpdir)\n', (3048, 3074), False, 'from ogusa import utils, parameter_plots, income\n'), ((3439, 3502), 'numpy.array', 'np.array', (['[9, 10, 12, 16, 18.5, 22, 27, 32, 37, 42, 47, 55, 56]'], {}), '([9, 10, 12, 16, 18.5, 22, 27, 32, 37, 42, 47, 55, 56])\n', (3447, 3502), True, 'import numpy as np\n'), ((3544, 3590), 'scipy.interpolate.interp1d', 'si.interp1d', (['age_midp', 'fert_data'], {'kind': '"""cubic"""'}), "(age_midp, fert_data, kind='cubic')\n", (3555, 3590), True, 'import scipy.interpolate as si\n'), ((3608, 3639), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'totpers'}), '(size=totpers)\n', (3625, 3639), True, 'import numpy as np\n'), ((3650, 3754), 'ogusa.parameter_plots.plot_fert_rates', 'parameter_plots.plot_fert_rates', (['fert_func', 'age_midp', 'totpers', 'min_yr', 'max_yr', 'fert_data', 'fert_rates'], {}), '(fert_func, age_midp, totpers, min_yr,\n max_yr, fert_data, fert_rates)\n', (3681, 3754), False, 'from ogusa import utils, parameter_plots, income\n'), ((4040, 4103), 'numpy.array', 'np.array', (['[9, 10, 12, 16, 18.5, 22, 27, 32, 37, 42, 47, 55, 56]'], {}), '([9, 10, 12, 16, 18.5, 22, 27, 32, 37, 42, 47, 55, 56])\n', (4048, 4103), True, 'import numpy as np\n'), ((4145, 4191), 'scipy.interpolate.interp1d', 'si.interp1d', (['age_midp', 'fert_data'], {'kind': '"""cubic"""'}), "(age_midp, fert_data, kind='cubic')\n", (4156, 4191), True, 'import scipy.interpolate as si\n'), ((4209, 4240), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'totpers'}), '(size=totpers)\n', (4226, 4240), True, 'import numpy as np\n'), ((4245, 4368), 'ogusa.parameter_plots.plot_fert_rates', 'parameter_plots.plot_fert_rates', (['fert_func', 'age_midp', 'totpers', 'min_yr', 'max_yr', 'fert_data', 'fert_rates'], {'output_dir': 'tmpdir'}), '(fert_func, age_midp, totpers, min_yr,\n max_yr, fert_data, fert_rates, output_dir=tmpdir)\n', (4276, 4368), False, 'from ogusa import utils, parameter_plots, income\n'), ((4604, 4629), 'numpy.arange', 'np.arange', (['min_yr', 'max_yr'], {}), '(min_yr, max_yr)\n', (4613, 4629), True, 'import numpy as np\n'), ((4756, 4894), 'ogusa.parameter_plots.plot_mort_rates_data', 'parameter_plots.plot_mort_rates_data', (['totpers', 'min_yr', 'max_yr', 'age_year_all', 'mort_rates_all', 'infmort_rate', 'mort_rates'], {'output_dir': 'None'}), '(totpers, min_yr, max_yr, age_year_all,\n mort_rates_all, infmort_rate, mort_rates, output_dir=None)\n', (4792, 4894), False, 'from ogusa import utils, parameter_plots, income\n'), ((5057, 5082), 'numpy.arange', 'np.arange', (['min_yr', 'max_yr'], {}), '(min_yr, max_yr)\n', (5066, 5082), True, 'import numpy as np\n'), ((5203, 5343), 'ogusa.parameter_plots.plot_mort_rates_data', 'parameter_plots.plot_mort_rates_data', (['totpers', 'min_yr', 'max_yr', 'age_year_all', 'mort_rates_all', 'infmort_rate', 'mort_rates'], {'output_dir': 'tmpdir'}), '(totpers, min_yr, max_yr, age_year_all,\n mort_rates_all, infmort_rate, mort_rates, output_dir=tmpdir)\n', (5239, 5343), False, 'from ogusa import utils, parameter_plots, income\n'), ((5541, 5562), 'numpy.arange', 'np.arange', (['(21)', '(S + 21)'], {}), '(21, S + 21)\n', (5550, 5562), True, 'import numpy as np\n'), ((5652, 5730), 'ogusa.parameter_plots.plot_omega_fixed', 'parameter_plots.plot_omega_fixed', (['age_per_EpS', 'omega_SS_orig', 'omega_SSfx', 'E', 'S'], {}), '(age_per_EpS, omega_SS_orig, omega_SSfx, E, S)\n', (5684, 5730), False, 'from ogusa import utils, parameter_plots, income\n'), ((5851, 5872), 'numpy.arange', 'np.arange', (['(21)', '(S + 21)'], {}), '(21, S + 21)\n', (5860, 5872), True, 'import numpy as np\n'), ((5956, 6057), 'ogusa.parameter_plots.plot_omega_fixed', 'parameter_plots.plot_omega_fixed', (['age_per_EpS', 'omega_SS_orig', 'omega_SSfx', 'E', 'S'], {'output_dir': 'tmpdir'}), '(age_per_EpS, omega_SS_orig, omega_SSfx, E,\n S, output_dir=tmpdir)\n', (5988, 6057), False, 'from ogusa import utils, parameter_plots, income\n'), ((6249, 6270), 'numpy.arange', 'np.arange', (['(21)', '(S + 21)'], {}), '(21, S + 21)\n', (6258, 6270), True, 'import numpy as np\n'), ((6379, 6464), 'ogusa.parameter_plots.plot_imm_fixed', 'parameter_plots.plot_imm_fixed', (['age_per_EpS', 'imm_rates_orig', 'imm_rates_adj', 'E', 'S'], {}), '(age_per_EpS, imm_rates_orig, imm_rates_adj, E, S\n )\n', (6409, 6464), False, 'from ogusa import utils, parameter_plots, income\n'), ((6578, 6599), 'numpy.arange', 'np.arange', (['(21)', '(S + 21)'], {}), '(21, S + 21)\n', (6587, 6599), True, 'import numpy as np\n'), ((6702, 6805), 'ogusa.parameter_plots.plot_imm_fixed', 'parameter_plots.plot_imm_fixed', (['age_per_EpS', 'imm_rates_orig', 'imm_rates_adj', 'E', 'S'], {'output_dir': 'tmpdir'}), '(age_per_EpS, imm_rates_orig, imm_rates_adj,\n E, S, output_dir=tmpdir)\n', (6732, 6805), False, 'from ogusa import utils, parameter_plots, income\n'), ((7009, 7030), 'numpy.arange', 'np.arange', (['(21)', '(S + 21)'], {}), '(21, S + 21)\n', (7018, 7030), True, 'import numpy as np\n'), ((7202, 7314), 'ogusa.parameter_plots.plot_population_path', 'parameter_plots.plot_population_path', (['age_per_EpS', 'pop_2013_pct', 'omega_path_lev', 'omega_SSfx', 'curr_year', 'E', 'S'], {}), '(age_per_EpS, pop_2013_pct,\n omega_path_lev, omega_SSfx, curr_year, E, S)\n', (7238, 7314), False, 'from ogusa import utils, parameter_plots, income\n'), ((7443, 7464), 'numpy.arange', 'np.arange', (['(21)', '(S + 21)'], {}), '(21, S + 21)\n', (7452, 7464), True, 'import numpy as np\n'), ((7630, 7761), 'ogusa.parameter_plots.plot_population_path', 'parameter_plots.plot_population_path', (['age_per_EpS', 'pop_2013_pct', 'omega_path_lev', 'omega_SSfx', 'curr_year', 'E', 'S'], {'output_dir': 'tmpdir'}), '(age_per_EpS, pop_2013_pct,\n omega_path_lev, omega_SSfx, curr_year, E, S, output_dir=tmpdir)\n', (7666, 7761), False, 'from ogusa import utils, parameter_plots, income\n'), ((8035, 8071), 'numpy.linspace', 'np.linspace', (['(20 + 0.5)', '(100 - 0.5)', '(80)'], {}), '(20 + 0.5, 100 - 0.5, 80)\n', (8046, 8071), True, 'import numpy as np\n'), ((8088, 8143), 'numpy.array', 'np.array', (['[0.125, 0.375, 0.6, 0.75, 0.85, 0.945, 0.995]'], {}), '([0.125, 0.375, 0.6, 0.75, 0.85, 0.945, 0.995])\n', (8096, 8143), True, 'import numpy as np\n'), ((8160, 8209), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.2, 0.1, 0.1, 0.09, 0.01]'], {}), '([0.25, 0.25, 0.2, 0.1, 0.1, 0.09, 0.01])\n', (8168, 8209), True, 'import numpy as np\n'), ((8257, 8295), 'ogusa.income.get_e_orig', 'income.get_e_orig', (['age_wgts', 'abil_pcts'], {}), '(age_wgts, abil_pcts)\n', (8274, 8295), False, 'from ogusa import utils, parameter_plots, income\n'), ((8306, 8372), 'ogusa.parameter_plots.plot_income_data', 'parameter_plots.plot_income_data', (['ages', 'abil_midp', 'abil_pcts', 'emat'], {}), '(ages, abil_midp, abil_pcts, emat)\n', (8338, 8372), False, 'from ogusa import utils, parameter_plots, income\n'), ((8455, 8491), 'numpy.linspace', 'np.linspace', (['(20 + 0.5)', '(100 - 0.5)', '(80)'], {}), '(20 + 0.5, 100 - 0.5, 80)\n', (8466, 8491), True, 'import numpy as np\n'), ((8508, 8563), 'numpy.array', 'np.array', (['[0.125, 0.375, 0.6, 0.75, 0.85, 0.945, 0.995]'], {}), '([0.125, 0.375, 0.6, 0.75, 0.85, 0.945, 0.995])\n', (8516, 8563), True, 'import numpy as np\n'), ((8580, 8629), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.2, 0.1, 0.1, 0.09, 0.01]'], {}), '([0.25, 0.25, 0.2, 0.1, 0.1, 0.09, 0.01])\n', (8588, 8629), True, 'import numpy as np\n'), ((8677, 8715), 'ogusa.income.get_e_orig', 'income.get_e_orig', (['age_wgts', 'abil_pcts'], {}), '(age_wgts, abil_pcts)\n', (8694, 8715), False, 'from ogusa import utils, parameter_plots, income\n'), ((8720, 8809), 'ogusa.parameter_plots.plot_income_data', 'parameter_plots.plot_income_data', (['ages', 'abil_midp', 'abil_pcts', 'emat'], {'output_dir': 'tmpdir'}), '(ages, abil_midp, abil_pcts, emat,\n output_dir=tmpdir)\n', (8752, 8809), False, 'from ogusa import utils, parameter_plots, income\n'), ((673, 715), 'os.path.join', 'os.path.join', (['tmpdir', '"""imm_rates_orig.png"""'], {}), "(tmpdir, 'imm_rates_orig.png')\n", (685, 715), False, 'import os\n'), ((1029, 1072), 'os.path.join', 'os.path.join', (['tmpdir', '"""mortality_rates.png"""'], {}), "(tmpdir, 'mortality_rates.png')\n", (1041, 1072), False, 'import os\n'), ((1392, 1436), 'os.path.join', 'os.path.join', (['tmpdir', '"""pop_growth_rates.png"""'], {}), "(tmpdir, 'pop_growth_rates.png')\n", (1404, 1436), False, 'import os\n'), ((1774, 1818), 'os.path.join', 'os.path.join', (['tmpdir', '"""ability_profiles.png"""'], {}), "(tmpdir, 'ability_profiles.png')\n", (1786, 1818), False, 'import os\n'), ((2260, 2301), 'os.path.join', 'os.path.join', (['tmpdir', '"""ellipse_v_CFE.png"""'], {}), "(tmpdir, 'ellipse_v_CFE.png')\n", (2272, 2301), False, 'import os\n'), ((2595, 2635), 'os.path.join', 'os.path.join', (['tmpdir', '"""chi_n_values.png"""'], {}), "(tmpdir, 'chi_n_values.png')\n", (2607, 2635), False, 'import os\n'), ((3111, 3155), 'os.path.join', 'os.path.join', (['tmpdir', '"""pop_distribution.png"""'], {}), "(tmpdir, 'pop_distribution.png')\n", (3123, 3155), False, 'import os\n'), ((3305, 3393), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.3, 12.3, 47.1, 80.7, 105.5, 98.0, 49.3, 10.4, 0.8, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.3, 12.3, 47.1, 80.7, 105.5, 98.0, 49.3, 10.4, 0.8, \n 0.0, 0.0])\n', (3313, 3393), True, 'import numpy as np\n'), ((3906, 3994), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.3, 12.3, 47.1, 80.7, 105.5, 98.0, 49.3, 10.4, 0.8, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.3, 12.3, 47.1, 80.7, 105.5, 98.0, 49.3, 10.4, 0.8, \n 0.0, 0.0])\n', (3914, 3994), True, 'import numpy as np\n'), ((4405, 4443), 'os.path.join', 'os.path.join', (['tmpdir', '"""fert_rates.png"""'], {}), "(tmpdir, 'fert_rates.png')\n", (4417, 4443), False, 'import os\n'), ((5380, 5418), 'os.path.join', 'os.path.join', (['tmpdir', '"""mort_rates.png"""'], {}), "(tmpdir, 'mort_rates.png')\n", (5392, 5418), False, 'import os\n'), ((6086, 6128), 'os.path.join', 'os.path.join', (['tmpdir', '"""OrigVsFixSSpop.png"""'], {}), "(tmpdir, 'OrigVsFixSSpop.png')\n", (6098, 6128), False, 'import os\n'), ((6842, 6882), 'os.path.join', 'os.path.join', (['tmpdir', '"""OrigVsAdjImm.png"""'], {}), "(tmpdir, 'OrigVsAdjImm.png')\n", (6854, 6882), False, 'import os\n'), ((7798, 7837), 'os.path.join', 'os.path.join', (['tmpdir', '"""PopDistPath.png"""'], {}), "(tmpdir, 'PopDistPath.png')\n", (7810, 7837), False, 'import os\n'), ((8839, 8881), 'os.path.join', 'os.path.join', (['tmpdir', '"""ability_3D_lev.png"""'], {}), "(tmpdir, 'ability_3D_lev.png')\n", (8851, 8881), False, 'import os\n'), ((8907, 8949), 'os.path.join', 'os.path.join', (['tmpdir', '"""ability_3D_log.png"""'], {}), "(tmpdir, 'ability_3D_log.png')\n", (8919, 8949), False, 'import os\n'), ((8975, 9017), 'os.path.join', 'os.path.join', (['tmpdir', '"""ability_2D_log.png"""'], {}), "(tmpdir, 'ability_2D_log.png')\n", (8987, 9017), False, 'import os\n'), ((8225, 8236), 'numpy.ones', 'np.ones', (['(80)'], {}), '(80)\n', (8232, 8236), True, 'import numpy as np\n'), ((8645, 8656), 'numpy.ones', 'np.ones', (['(80)'], {}), '(80)\n', (8652, 8656), True, 'import numpy as np\n')] |
import numpy as np
from nilearn.image.image import check_niimg
from nilearn.image.image import _crop_img_to as crop_img_to
def crop_img(img, rtol=1e-8, copy=True, return_slices=False):
"""Crops img as much as possible
Will crop img, removing as many zero entries as possible
without touching non-zero entries. Will leave one voxel of
zero padding around the obtained non-zero area in order to
avoid sampling issues later on.
Parameters
----------
img: Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
img to be cropped.
rtol: float
relative tolerance (with respect to maximal absolute
value of the image), under which values are considered
negligeable and thus croppable.
copy: boolean
Specifies whether cropped data is copied or not.
return_slices: boolean
If True, the slices that define the cropped image will be returned.
Returns
-------
cropped_img: image
Cropped version of the input image
"""
#img = check_niimg(img)
data = img
infinity_norm = max(-data.min(), data.max())
passes_threshold = np.logical_or(data < -rtol * infinity_norm,
data > rtol * infinity_norm)
if data.ndim == 4:
passes_threshold = np.any(passes_threshold, axis=-1)
coords = np.array(np.where(passes_threshold))
start = coords.min(axis=1)
end = coords.max(axis=1) + 1
# pad with one voxel to avoid resampling problems
start = np.maximum(start - 1, 0)
end = np.minimum(end + 1, data.shape[:3])
slices = [slice(s, e) for s, e in zip(start, end)]
if return_slices:
return slices
return crop_img_to(img, slices, copy=copy)
| [
"numpy.minimum",
"numpy.where",
"nilearn.image.image._crop_img_to",
"numpy.logical_or",
"numpy.any",
"numpy.maximum"
] | [((1178, 1250), 'numpy.logical_or', 'np.logical_or', (['(data < -rtol * infinity_norm)', '(data > rtol * infinity_norm)'], {}), '(data < -rtol * infinity_norm, data > rtol * infinity_norm)\n', (1191, 1250), True, 'import numpy as np\n'), ((1554, 1578), 'numpy.maximum', 'np.maximum', (['(start - 1)', '(0)'], {}), '(start - 1, 0)\n', (1564, 1578), True, 'import numpy as np\n'), ((1589, 1624), 'numpy.minimum', 'np.minimum', (['(end + 1)', 'data.shape[:3]'], {}), '(end + 1, data.shape[:3])\n', (1599, 1624), True, 'import numpy as np\n'), ((1738, 1773), 'nilearn.image.image._crop_img_to', 'crop_img_to', (['img', 'slices'], {'copy': 'copy'}), '(img, slices, copy=copy)\n', (1749, 1773), True, 'from nilearn.image.image import _crop_img_to as crop_img_to\n'), ((1339, 1372), 'numpy.any', 'np.any', (['passes_threshold'], {'axis': '(-1)'}), '(passes_threshold, axis=-1)\n', (1345, 1372), True, 'import numpy as np\n'), ((1395, 1421), 'numpy.where', 'np.where', (['passes_threshold'], {}), '(passes_threshold)\n', (1403, 1421), True, 'import numpy as np\n')] |
#basics
import numpy as np
from typing import Union
#pytorch
import torch.nn as nn
def init_weights(module: nn.Module, weight_init: Union[str, dict]):
"""
Will init all linear layers in a nn.Module
"""
# https://towardsdatascience.com/weight-initialization-techniques-in-neural-networks-26c649eb3b78
if weight_init is None:
return
if isinstance(weight_init, dict):
init_method = weight_init.pop("name")
kwargs = weight_init
if isinstance(weight_init, str):
init_method = weight_init
kwargs = {}
for name, param in module.named_parameters():
if "weight" in name:
# https://stackoverflow.com/questions/49433936/how-to-initialize-weights-in-pytorch
if init_method == "normal":
kwargs["std"] = 1/np.sqrt(m.in_features)
getattr(nn.init, "normal_")(param.data, **kwargs)
else:
getattr(nn.init, init_method)(param.data, **kwargs)
if "bias" in name:
param.data.fill_(0) | [
"numpy.sqrt"
] | [((831, 853), 'numpy.sqrt', 'np.sqrt', (['m.in_features'], {}), '(m.in_features)\n', (838, 853), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import logging
import os
import sys
from colorsys import hsv_to_rgb
import mp2
try:
import cPickle as pickle
except ImportError: # Python 3.x
import pickle
import colorlog
import numpy as np
from PIL import Image
logger = logging.getLogger()
logger.setLevel(colorlog.colorlog.logging.DEBUG)
handler = colorlog.StreamHandler()
handler.setFormatter(colorlog.ColoredFormatter())
logger.addHandler(handler)
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(linewidth=10000)
script_dir = os.path.dirname(__file__)
hist_output_dir = os.path.join(script_dir, "histogram_data")
images_dir = os.path.join(script_dir, "images")
results_dir = os.path.join(script_dir, "results")
def array2tuples(img_array):
"""Given a 2-dimensional array of vectors length 3, returns a 2d array
of tuples length 3.
I just prefer to work with tuples because they're hashable and play nice
with dictionaries in Python. The up front cost isn't that great."""
return_array = np.zeros(
(img_array.shape[0], img_array.shape[1]), dtype=(type((1, 2, 3)))
)
for i in range(0, img_array.shape[0]):
for j in range(0, img_array.shape[1]):
tuple_in = (img_array[i, j, 0], img_array[i, j, 1], img_array[i, j, 2])
return_array[i, j] = tuple_in
return return_array
def array2tuples_2d(img_array, val1=0, val2=1):
"""Given a 2-dimensional array of vectors length 3, returns a 2d array
of tuples length 2, dropping one of the 3 values to instead store just
(img_array[i, j, val1], img_array[i, j, val2]).
I just prefer to work with tuples because they're hashable and play nice
with dictionaries in Python. The up front cost isn't that great."""
return_array = np.zeros(
(img_array.shape[0], img_array.shape[1]), dtype=(type((1, 2)))
)
for i in range(0, img_array.shape[0]):
for j in range(0, img_array.shape[1]):
tuple_in = (img_array[i, j, val1], img_array[i, j, val2])
return_array[i, j] = tuple_in
return return_array
def create_hard_mask_3d(img, hist, threshold=0):
"""Given a dictionary of 3-tuples and a dictionary of 3-tuples which
returns histogram values, return an array of 0s and 1s with the same
dimensions as img based on whether the pixel at img[i, j] met or
exceeded the threshold.
If the pixel value isn't found in the dictionary at all, it defaults
to 0."""
mask = np.zeros((img.shape[0], img.shape[1]), dtype=int)
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
if img[i, j] in hist:
if hist[img[i, j]] > threshold:
mask[i, j] = 1
return mask
def create_hard_mask_2d(img, hist, threshold=0):
"""Given an array of 2-tuples and a dictionary of 2-tuple keys,
return an array of 0s and 1s with the same dimensions as img based on
wether the 2-tuple at img[i,j] met or exceeded the threshold when looked
up in hist.
If the pixel value isn't found in the dictionary at all, it defaults
to 0."""
mask = np.zeros((img.shape[0], img.shape[1]), dtype=int)
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
if img[i, j] in hist:
if hist[img[i, j]] > threshold:
mask[i, j] = 1
return mask
def apply_hard_mask(img_array, mask):
"""Given img_array and a mask value, returns a new array like image_array
but all pixels with 0s in the mask have been turned off."""
img_out_array = np.zeros_like(img_array)
for i in range(0, img_array.shape[0]):
for j in range(0, img_array.shape[1]):
img_out_array[i, j] = img_array[i, j] * mask[i, j]
return img_out_array
def array_hsv2rgb(img_array):
"""Given an array of vectors of length 3 of HSV values, rewrites it in
place to be RGB values."""
for i in range(0, img_array.shape[0]):
for j in range(0, img_array.shape[1]):
rgb = hsv_to_rgb(
img_array[i, j, 0] / 255,
img_array[i, j, 1] / 255,
img_array[i, j, 2] / 255,
)
img_array[i, j, 0] = rgb[0] * 255
img_array[i, j, 1] = rgb[1] * 255
img_array[i, j, 2] = rgb[2] * 255
return img_array
if __name__ == "__main__":
logger.info("<NAME> - EECS 334 - MP 4")
logger.info("-" * 80)
logger.info("This file should be run *after* `mp4_histogram_training.py`")
logger.info("has generated histogram training data in `histogram_data/`.")
# Load in the histogram files.
hist_rgb = pickle.load(open(os.path.join(hist_output_dir, "hist_rgb.pickle"), "rb"))
hist_hsv = pickle.load(open(os.path.join(hist_output_dir, "hist_hsv.pickle"), "rb"))
hist_rg = pickle.load(open(os.path.join(hist_output_dir, "hist_rg.pickle"), "rb"))
hist_rb = pickle.load(open(os.path.join(hist_output_dir, "hist_rb.pickle"), "rb"))
hist_gb = pickle.load(open(os.path.join(hist_output_dir, "hist_gb.pickle"), "rb"))
hist_hs = pickle.load(open(os.path.join(hist_output_dir, "hist_hs.pickle"), "rb"))
hist_hv = pickle.load(open(os.path.join(hist_output_dir, "hist_hv.pickle"), "rb"))
hist_sv = pickle.load(open(os.path.join(hist_output_dir, "hist_sv.pickle"), "rb"))
global_threshold = round(hist_rgb["size"] * 0.00005)
for path, subdirs, files in os.walk(images_dir):
for name in files:
img = os.path.join(path, name)
logger.debug("Now operating on {}".format(img))
image_array_rgb = np.array(Image.open(img).convert("RGB"))
image_array_hsv = np.array(Image.open(img).convert("HSV"))
image_array_rgb_tupled = array2tuples(image_array_rgb)
image_array_hsv_tupled = array2tuples(image_array_hsv)
image_array_hs_tupled = array2tuples_2d(image_array_hsv)
mask_rgb = create_hard_mask_3d(
image_array_rgb_tupled, hist_rgb, global_threshold
)
image_out_rgb = apply_hard_mask(image_array_rgb, mask_rgb)
save_loc = os.path.join(results_dir, name[:-4] + "_rgb_mask.bmp")
logger.debug("Saving RGB mask to {}".format(save_loc))
im = Image.fromarray(image_out_rgb)
im.save(save_loc)
mask_hsv = create_hard_mask_3d(
image_array_hsv_tupled, hist_hsv, global_threshold
)
image_out_hsv = apply_hard_mask(image_array_hsv, mask_hsv)
save_loc = os.path.join(results_dir, name[:-4] + "_hsv_mask.bmp")
logger.debug("Saving HSV mask to {}".format(save_loc))
im = Image.fromarray(array_hsv2rgb(image_out_hsv))
im.save(save_loc)
mask_hs = create_hard_mask_2d(
image_array_hs_tupled, hist_hs, global_threshold
)
image_out_hs = apply_hard_mask(image_array_hsv, mask_hs)
save_loc = os.path.join(results_dir, name[:-4] + "_hs_mask.bmp")
logger.debug("Saving HS mask to {}".format(save_loc))
im = Image.fromarray(array_hsv2rgb(image_out_hs))
im.save(save_loc)
mask_hs = mp2.erode(mask_hs, mp2.se_cross_3)
# mask_hs = mp2.erode(mask_hs, mp2.se_shield_5)
mask_hs = mp2.dilate(mask_hs, mp2.se_circle_5)
mask_hs = mp2.dilate(mask_hs, mp2.se_circle_5)
mask_hs = mp2.dilate(mask_hs, mp2.se_circle_5)
image_out_hs = apply_hard_mask(image_array_hsv, mask_hs)
save_loc = os.path.join(results_dir, name[:-4] + "_hs_mask_morphed.bmp")
logger.debug("Saving HS mask to {}".format(save_loc))
im = Image.fromarray(array_hsv2rgb(image_out_hs))
im.save(save_loc)
| [
"logging.getLogger",
"PIL.Image.fromarray",
"PIL.Image.open",
"colorlog.StreamHandler",
"mp2.dilate",
"os.path.join",
"colorsys.hsv_to_rgb",
"os.path.dirname",
"numpy.zeros",
"mp2.erode",
"numpy.zeros_like",
"colorlog.ColoredFormatter",
"os.walk",
"numpy.set_printoptions"
] | [((255, 274), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (272, 274), False, 'import logging\n'), ((335, 359), 'colorlog.StreamHandler', 'colorlog.StreamHandler', ([], {}), '()\n', (357, 359), False, 'import colorlog\n'), ((438, 480), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (457, 480), True, 'import numpy as np\n'), ((481, 517), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(10000)'}), '(linewidth=10000)\n', (500, 517), True, 'import numpy as np\n'), ((532, 557), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (547, 557), False, 'import os\n'), ((576, 618), 'os.path.join', 'os.path.join', (['script_dir', '"""histogram_data"""'], {}), "(script_dir, 'histogram_data')\n", (588, 618), False, 'import os\n'), ((632, 666), 'os.path.join', 'os.path.join', (['script_dir', '"""images"""'], {}), "(script_dir, 'images')\n", (644, 666), False, 'import os\n'), ((681, 716), 'os.path.join', 'os.path.join', (['script_dir', '"""results"""'], {}), "(script_dir, 'results')\n", (693, 716), False, 'import os\n'), ((381, 408), 'colorlog.ColoredFormatter', 'colorlog.ColoredFormatter', ([], {}), '()\n', (406, 408), False, 'import colorlog\n'), ((2474, 2523), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1])'], {'dtype': 'int'}), '((img.shape[0], img.shape[1]), dtype=int)\n', (2482, 2523), True, 'import numpy as np\n'), ((3119, 3168), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1])'], {'dtype': 'int'}), '((img.shape[0], img.shape[1]), dtype=int)\n', (3127, 3168), True, 'import numpy as np\n'), ((3582, 3606), 'numpy.zeros_like', 'np.zeros_like', (['img_array'], {}), '(img_array)\n', (3595, 3606), True, 'import numpy as np\n'), ((5426, 5445), 'os.walk', 'os.walk', (['images_dir'], {}), '(images_dir)\n', (5433, 5445), False, 'import os\n'), ((4031, 4123), 'colorsys.hsv_to_rgb', 'hsv_to_rgb', (['(img_array[i, j, 0] / 255)', '(img_array[i, j, 1] / 255)', '(img_array[i, j, 2] / 255)'], {}), '(img_array[i, j, 0] / 255, img_array[i, j, 1] / 255, img_array[i,\n j, 2] / 255)\n', (4041, 4123), False, 'from colorsys import hsv_to_rgb\n'), ((4667, 4715), 'os.path.join', 'os.path.join', (['hist_output_dir', '"""hist_rgb.pickle"""'], {}), "(hist_output_dir, 'hist_rgb.pickle')\n", (4679, 4715), False, 'import os\n'), ((4756, 4804), 'os.path.join', 'os.path.join', (['hist_output_dir', '"""hist_hsv.pickle"""'], {}), "(hist_output_dir, 'hist_hsv.pickle')\n", (4768, 4804), False, 'import os\n'), ((4844, 4891), 'os.path.join', 'os.path.join', (['hist_output_dir', '"""hist_rg.pickle"""'], {}), "(hist_output_dir, 'hist_rg.pickle')\n", (4856, 4891), False, 'import os\n'), ((4931, 4978), 'os.path.join', 'os.path.join', (['hist_output_dir', '"""hist_rb.pickle"""'], {}), "(hist_output_dir, 'hist_rb.pickle')\n", (4943, 4978), False, 'import os\n'), ((5018, 5065), 'os.path.join', 'os.path.join', (['hist_output_dir', '"""hist_gb.pickle"""'], {}), "(hist_output_dir, 'hist_gb.pickle')\n", (5030, 5065), False, 'import os\n'), ((5105, 5152), 'os.path.join', 'os.path.join', (['hist_output_dir', '"""hist_hs.pickle"""'], {}), "(hist_output_dir, 'hist_hs.pickle')\n", (5117, 5152), False, 'import os\n'), ((5192, 5239), 'os.path.join', 'os.path.join', (['hist_output_dir', '"""hist_hv.pickle"""'], {}), "(hist_output_dir, 'hist_hv.pickle')\n", (5204, 5239), False, 'import os\n'), ((5279, 5326), 'os.path.join', 'os.path.join', (['hist_output_dir', '"""hist_sv.pickle"""'], {}), "(hist_output_dir, 'hist_sv.pickle')\n", (5291, 5326), False, 'import os\n'), ((5492, 5516), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (5504, 5516), False, 'import os\n'), ((6144, 6198), 'os.path.join', 'os.path.join', (['results_dir', "(name[:-4] + '_rgb_mask.bmp')"], {}), "(results_dir, name[:-4] + '_rgb_mask.bmp')\n", (6156, 6198), False, 'import os\n'), ((6283, 6313), 'PIL.Image.fromarray', 'Image.fromarray', (['image_out_rgb'], {}), '(image_out_rgb)\n', (6298, 6313), False, 'from PIL import Image\n'), ((6565, 6619), 'os.path.join', 'os.path.join', (['results_dir', "(name[:-4] + '_hsv_mask.bmp')"], {}), "(results_dir, name[:-4] + '_hsv_mask.bmp')\n", (6577, 6619), False, 'import os\n'), ((6996, 7049), 'os.path.join', 'os.path.join', (['results_dir', "(name[:-4] + '_hs_mask.bmp')"], {}), "(results_dir, name[:-4] + '_hs_mask.bmp')\n", (7008, 7049), False, 'import os\n'), ((7231, 7265), 'mp2.erode', 'mp2.erode', (['mask_hs', 'mp2.se_cross_3'], {}), '(mask_hs, mp2.se_cross_3)\n', (7240, 7265), False, 'import mp2\n'), ((7348, 7384), 'mp2.dilate', 'mp2.dilate', (['mask_hs', 'mp2.se_circle_5'], {}), '(mask_hs, mp2.se_circle_5)\n', (7358, 7384), False, 'import mp2\n'), ((7407, 7443), 'mp2.dilate', 'mp2.dilate', (['mask_hs', 'mp2.se_circle_5'], {}), '(mask_hs, mp2.se_circle_5)\n', (7417, 7443), False, 'import mp2\n'), ((7466, 7502), 'mp2.dilate', 'mp2.dilate', (['mask_hs', 'mp2.se_circle_5'], {}), '(mask_hs, mp2.se_circle_5)\n', (7476, 7502), False, 'import mp2\n'), ((7596, 7657), 'os.path.join', 'os.path.join', (['results_dir', "(name[:-4] + '_hs_mask_morphed.bmp')"], {}), "(results_dir, name[:-4] + '_hs_mask_morphed.bmp')\n", (7608, 7657), False, 'import os\n'), ((5617, 5632), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (5627, 5632), False, 'from PIL import Image\n'), ((5688, 5703), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (5698, 5703), False, 'from PIL import Image\n')] |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
from PySide2.QtWidgets import QWidget, QFileDialog
from .layout.mg_state_machine_widget_ui import Ui_MGStateMachineWidget
from tool.core.widget_manager import WidgetManager
import numpy as np
class MGStateMachineWidget(QWidget, Ui_MGStateMachineWidget):
COMPONENT_NAME = "morphablegraph_state_machine"
def __init__(self, parent=None):
QWidget.__init__(self, parent)
Ui_MGStateMachineWidget.setupUi(self, self)
self.mg_controller = None
self.stateDisplay.text = ""
self.dirXLineEdit.returnPressed.connect(self.set_direction_vector)
self.dirZLineEdit.returnPressed.connect(self.set_direction_vector)
self.animSpeedLineEdit.returnPressed.connect(self.set_animation_speed)
self.blendWindowLineEdit.returnPressed.connect(self.set_blend_window)
self.posWeightLineEdit.returnPressed.connect(self.set_position_weight)
self.dirWeightLineEdit.returnPressed.connect(self.set_direction_weight)
self.activateIKCheckBox.stateChanged.connect(self.set_ik)
self.groundingCheckBox.stateChanged.connect(self.set_grounding)
self.transitionConstraintCheckBox.stateChanged.connect(self.set_transition_constraint)
def set_object(self, scene_object):
if scene_object is not None and scene_object.has_component("morphablegraph_state_machine"):
self.mg_controller = scene_object._components["morphablegraph_state_machine"]
self.stateDisplay.setText(self.mg_controller.current_node[1])
self.dirXLineEdit.setText(str(self.mg_controller.direction_vector[0]))
self.dirZLineEdit.setText(str(self.mg_controller.direction_vector[2]))
self.animSpeedLineEdit.setText(str(self.mg_controller.speed))
self.posWeightLineEdit.setText(str(self.mg_controller.planner.settings.position_constraint_weight))
self.dirWeightLineEdit.setText(str(self.mg_controller.planner.settings.direction_constraint_weight))
self.activateIKCheckBox.setChecked(self.mg_controller.planner.settings.activate_ik)
self.groundingCheckBox.setChecked(self.mg_controller.activate_grounding)
self.transitionConstraintCheckBox.setChecked(self.mg_controller.planner.settings.add_transition_constraint)
def set_direction_vector(self):
x = float(self.dirXLineEdit.text())
z = float(self.dirZLineEdit.text())
dir_vector = np.array([x,0,z])
dir_vector /= np.linalg.norm(dir_vector)
self.mg_controller.direction_vector = dir_vector
def set_animation_speed(self):
speed = float(self.animSpeedLineEdit.text())
self.mg_controller.speed = speed
def set_blend_window(self):
blend_window = int(self.blendWindowLineEdit.text())
self.mg_controller.planner.settings.blend_window = blend_window
def set_ik(self, state):
self.mg_controller.planner.settings.activate_ik = bool(state)
def set_grounding(self, state):
self.mg_controller.activate_grounding = bool(state)
def set_transition_constraint(self, state):
self.mg_controller.planner.settings.add_transition_constraint = bool(state)
def set_position_weight(self):
self.mg_controller.planner.settings.position_constraint_weight = float(self.posWeightLineEdit.text())
def set_direction_weight(self):
self.mg_controller.planner.settings.direction_constraint_weight = float(self.dirWeightLineEdit.text())
WidgetManager.register("morphablegraph_state_machine", MGStateMachineWidget)
| [
"numpy.array",
"tool.core.widget_manager.WidgetManager.register",
"PySide2.QtWidgets.QWidget.__init__",
"numpy.linalg.norm"
] | [((4581, 4657), 'tool.core.widget_manager.WidgetManager.register', 'WidgetManager.register', (['"""morphablegraph_state_machine"""', 'MGStateMachineWidget'], {}), "('morphablegraph_state_machine', MGStateMachineWidget)\n", (4603, 4657), False, 'from tool.core.widget_manager import WidgetManager\n'), ((1465, 1495), 'PySide2.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (1481, 1495), False, 'from PySide2.QtWidgets import QWidget, QFileDialog\n'), ((3536, 3555), 'numpy.array', 'np.array', (['[x, 0, z]'], {}), '([x, 0, z])\n', (3544, 3555), True, 'import numpy as np\n'), ((3576, 3602), 'numpy.linalg.norm', 'np.linalg.norm', (['dir_vector'], {}), '(dir_vector)\n', (3590, 3602), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 3 11:45:14 2018
backprop for the XOR problem.
19th March 2019 JVS modified code and added graphics.
"""
import numpy as np
import matplotlib.pyplot as plt
# set random seed so get same sequence of random numbers each time prog is run.
np.random.seed(1)
########## set input and output values ##########
# set input vectors for XOR
#Input array
X = np.array([[0,0], [0,1], [1,0] , [1,1]])
# set output values for XOR
targetvectors = np.array([[0],[1],[1],[0]])
# define unit activcation function as sigmoid function
def sigmoid (x):
return 1/(1 + np.exp(-x))
# define derivative of Sigmoid Function
def derivatives_sigmoid(x):
return x * (1 - x)
########## set parameters ##########
niter = 3000 # number of training iterations
plotinterval = 100 # interval between plotting graphs
errors = np.zeros(niter) # record of errors during training
numcorrects = np.zeros(niter) # number of correct outputs during training
# decide if want to have sigmoidal or linear output units
sigmoidalOutputUnits = 0
if (sigmoidalOutputUnits):
lr = 0.5 # learning rate
else:
lr = 0.1 # learning rate
inputlayer_neurons = X.shape[1] # number of units in input layer
hiddenlayer_neurons = 2 # number of hidden layer units
output_neurons = 1 # number of units in output layer
# weight and bias initialization
# weights between input and hidden layer
wh = np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
# biases of hidden layer units
bh = np.random.uniform(size=(1,hiddenlayer_neurons))
# weights of output layer units
wout = np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
# biases of output layer units
bout = np.random.uniform(size=(1,output_neurons))
########## SET UP GRAPHS ##########
# set interactive plotting on, so graphs appear outside of console.
plt.ion()
fig, axes = plt.subplots(1,2)
axerror = axes[0]
axnumcorrect = axes[1]
axerror.set_xlabel('Epoch')
axerror.set_ylabel('Error')
axnumcorrect.set_ylabel('Number correct')
axnumcorrect.set_xlabel('Epoch')
# set state of bias unit; this code works if set to -1 or +1.
biasunitstate = -1.0
########## LEARN ##########
for iter in range(niter):
# Forward Propogation
hidden_layer_input1 = np.dot(X,wh) # input from input layer
hidden_layer_input = hidden_layer_input1 + bh*biasunitstate # add input from bias unit
hiddenlayer_states = sigmoid(hidden_layer_input)
output_layer_input1 = np.dot(hiddenlayer_states,wout)
output_layer_input= output_layer_input1 + bout * biasunitstate
# Backpropagation
# get derivatives of errors wrt unit inputs ...
# ... of output layer
if (sigmoidalOutputUnits):
output = sigmoid(output_layer_input)
slope_output_layer = derivatives_sigmoid(output)
else: # output units are linear
output = output_layer_input
slope_output_layer = output*0 + 1 # each derivative = 1
d = targetvectors - output # delta terms = errors in output layer
# get derivatives of errors wrt unit inputs of hidden units
slope_hidden_layer = derivatives_sigmoid(hiddenlayer_states)
# get delta terms of output units = d_output
d_output = d * slope_output_layer
Error_at_hidden_layer = d_output.dot(wout.T)
d_hiddenlayer = Error_at_hidden_layer * slope_hidden_layer
# update weights of output units
wout += hiddenlayer_states.T.dot(d_output) * lr
# update biases of output units
bout += np.sum(d_output, axis=0,keepdims=True) * biasunitstate * lr
# update weights and biases of hidden units
wh += X.T.dot(d_hiddenlayer) * lr
bh += np.sum(d_hiddenlayer, axis=0,keepdims=True) * biasunitstate * lr
error = np.linalg.norm(d)
errors[iter] = error
# count number of correct responses
a = (output<0.5)
b = (targetvectors<0.5)
numcorrect = sum(a==b)
numcorrects[iter] = numcorrect
########## Plot ##########
if (iter % plotinterval == 0):
axerror.plot(errors[0:niter],'k')
plt.show()
plt.pause(0.001)
axnumcorrect.plot(numcorrects[0:niter],'k')
plt.show()
plt.pause(0.001)
########## Print results ##########
print('Target values')
print(targetvectors)
print('Output values')
print(output)
error=np.linalg.norm(d)
#print(i)
print('Final error:')
print(error)
########## The End ##########
| [
"numpy.linalg.norm",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.sum",
"numpy.random.seed",
"numpy.random.uniform",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((308, 325), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (322, 325), True, 'import numpy as np\n'), ((423, 465), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1]])\n', (431, 465), True, 'import numpy as np\n'), ((508, 538), 'numpy.array', 'np.array', (['[[0], [1], [1], [0]]'], {}), '([[0], [1], [1], [0]])\n', (516, 538), True, 'import numpy as np\n'), ((878, 893), 'numpy.zeros', 'np.zeros', (['niter'], {}), '(niter)\n', (886, 893), True, 'import numpy as np\n'), ((943, 958), 'numpy.zeros', 'np.zeros', (['niter'], {}), '(niter)\n', (951, 958), True, 'import numpy as np\n'), ((1434, 1499), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(inputlayer_neurons, hiddenlayer_neurons)'}), '(size=(inputlayer_neurons, hiddenlayer_neurons))\n', (1451, 1499), True, 'import numpy as np\n'), ((1535, 1583), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1, hiddenlayer_neurons)'}), '(size=(1, hiddenlayer_neurons))\n', (1552, 1583), True, 'import numpy as np\n'), ((1623, 1684), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(hiddenlayer_neurons, output_neurons)'}), '(size=(hiddenlayer_neurons, output_neurons))\n', (1640, 1684), True, 'import numpy as np\n'), ((1722, 1765), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1, output_neurons)'}), '(size=(1, output_neurons))\n', (1739, 1765), True, 'import numpy as np\n'), ((1870, 1879), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1877, 1879), True, 'import matplotlib.pyplot as plt\n'), ((1893, 1911), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (1905, 1911), True, 'import matplotlib.pyplot as plt\n'), ((4350, 4367), 'numpy.linalg.norm', 'np.linalg.norm', (['d'], {}), '(d)\n', (4364, 4367), True, 'import numpy as np\n'), ((2285, 2298), 'numpy.dot', 'np.dot', (['X', 'wh'], {}), '(X, wh)\n', (2291, 2298), True, 'import numpy as np\n'), ((2498, 2530), 'numpy.dot', 'np.dot', (['hiddenlayer_states', 'wout'], {}), '(hiddenlayer_states, wout)\n', (2504, 2530), True, 'import numpy as np\n'), ((3774, 3791), 'numpy.linalg.norm', 'np.linalg.norm', (['d'], {}), '(d)\n', (3788, 3791), True, 'import numpy as np\n'), ((4094, 4104), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4102, 4104), True, 'import matplotlib.pyplot as plt\n'), ((4113, 4129), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (4122, 4129), True, 'import matplotlib.pyplot as plt\n'), ((4190, 4200), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4198, 4200), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4225), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (4218, 4225), True, 'import matplotlib.pyplot as plt\n'), ((627, 637), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (633, 637), True, 'import numpy as np\n'), ((3531, 3570), 'numpy.sum', 'np.sum', (['d_output'], {'axis': '(0)', 'keepdims': '(True)'}), '(d_output, axis=0, keepdims=True)\n', (3537, 3570), True, 'import numpy as np\n'), ((3696, 3740), 'numpy.sum', 'np.sum', (['d_hiddenlayer'], {'axis': '(0)', 'keepdims': '(True)'}), '(d_hiddenlayer, axis=0, keepdims=True)\n', (3702, 3740), True, 'import numpy as np\n')] |
import json
import numpy as np
from zarr.errors import MetadataError
def decode_metadata(b):
s = str(b, 'ascii')
meta = json.loads(s)
zarr_format = meta.get('zarr_format', None)
if zarr_format != 1:
raise MetadataError('unsupported zarr format: %s' % zarr_format)
try:
meta = dict(
zarr_format=meta['zarr_format'],
shape=tuple(meta['shape']),
chunks=tuple(meta['chunks']),
dtype=decode_dtype(meta['dtype']),
compression=meta['compression'],
compression_opts=meta['compression_opts'],
fill_value=meta['fill_value'],
order=meta['order'],
)
except Exception as e:
raise MetadataError('error decoding metadata: %s' % e)
else:
return meta
def encode_metadata(meta):
meta = dict(
zarr_format=1,
shape=meta['shape'],
chunks=meta['chunks'],
dtype=encode_dtype(meta['dtype']),
compression=meta['compression'],
compression_opts=meta['compression_opts'],
fill_value=meta['fill_value'],
order=meta['order'],
)
s = json.dumps(meta, indent=4, sort_keys=True, ensure_ascii=True)
b = s.encode('ascii')
return b
def encode_dtype(d):
if d.fields is None:
return d.str
else:
return d.descr
def _decode_dtype_descr(d):
# need to convert list of lists to list of tuples
if isinstance(d, list):
# recurse to handle nested structures
d = [(f, _decode_dtype_descr(v)) for f, v in d]
return d
def decode_dtype(d):
d = _decode_dtype_descr(d)
return np.dtype(d)
| [
"numpy.dtype",
"json.loads",
"json.dumps",
"zarr.errors.MetadataError"
] | [((132, 145), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (142, 145), False, 'import json\n'), ((1148, 1209), 'json.dumps', 'json.dumps', (['meta'], {'indent': '(4)', 'sort_keys': '(True)', 'ensure_ascii': '(True)'}), '(meta, indent=4, sort_keys=True, ensure_ascii=True)\n', (1158, 1209), False, 'import json\n'), ((1643, 1654), 'numpy.dtype', 'np.dtype', (['d'], {}), '(d)\n', (1651, 1654), True, 'import numpy as np\n'), ((233, 291), 'zarr.errors.MetadataError', 'MetadataError', (["('unsupported zarr format: %s' % zarr_format)"], {}), "('unsupported zarr format: %s' % zarr_format)\n", (246, 291), False, 'from zarr.errors import MetadataError\n'), ((723, 771), 'zarr.errors.MetadataError', 'MetadataError', (["('error decoding metadata: %s' % e)"], {}), "('error decoding metadata: %s' % e)\n", (736, 771), False, 'from zarr.errors import MetadataError\n')] |
"""
Author: Dr. <NAME> <<EMAIL>>
Dr. <NAME> <<EMAIL>>
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
This package is distributed under New BSD license.
"""
from setuptools import setup, Extension
import sys
import numpy as np
from Cython.Build import cythonize
from smt import __version__
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Programming Language :: C++
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: Unix
Operating System :: MacOS
"""
LONG_DESCRIPTION = """
The surrogate modeling toolbox (SMT) is a Python package that contains
a collection of surrogate modeling methods, sampling techniques, and
benchmarking functions. This package provides a library of surrogate
models that is simple to use and facilitates the implementation of additional methods.
SMT is different from existing surrogate modeling libraries because of
its emphasis on derivatives, including training derivatives used for
gradient-enhanced modeling, prediction derivatives, and derivatives
with respect to the training data. It also includes new surrogate models
that are not available elsewhere: kriging by partial-least squares reduction
and energy-minimizing spline interpolation.
"""
extra_compile_args = []
if not sys.platform.startswith("win"):
extra_compile_args.append("-std=c++11")
ext = (
cythonize(
Extension(
"smt.surrogate_models.rbfclib",
sources=["smt/src/rbf/rbf.cpp", "smt/src/rbf/rbfclib.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
include_dirs=[np.get_include()],
)
)
+ cythonize(
Extension(
"smt.surrogate_models.idwclib",
sources=["smt/src/idw/idw.cpp", "smt/src/idw/idwclib.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
include_dirs=[np.get_include()],
)
)
+ cythonize(
Extension(
"smt.surrogate_models.rmtsclib",
sources=[
"smt/src/rmts/rmtsclib.pyx",
"smt/src/rmts/utils.cpp",
"smt/src/rmts/rmts.cpp",
"smt/src/rmts/rmtb.cpp",
"smt/src/rmts/rmtc.cpp",
],
language="c++",
extra_compile_args=extra_compile_args,
include_dirs=[np.get_include()],
)
)
)
metadata = dict(
name="smt",
version=__version__,
description="The Surrogate Modeling Toolbox (SMT)",
long_description=LONG_DESCRIPTION,
author="<NAME> et al.",
author_email="<EMAIL>",
license="BSD-3",
classifiers=[_f for _f in CLASSIFIERS.split("\n") if _f],
packages=[
"smt",
"smt.surrogate_models",
"smt.problems",
"smt.sampling_methods",
"smt.utils",
"smt.utils.neural_net",
"smt.applications",
],
install_requires=[
"scikit-learn",
"packaging",
"pyDOE2",
"matplotlib",
"numpydoc",
"scipy",
],
python_requires=">=3.6",
zip_safe=False,
ext_modules=ext,
url="https://github.com/SMTorg/smt", # use the URL to the github repo
download_url="https://github.com/SMTorg/smt/releases",
)
setup(**metadata)
| [
"setuptools.setup",
"sys.platform.startswith",
"numpy.get_include"
] | [((3590, 3607), 'setuptools.setup', 'setup', ([], {}), '(**metadata)\n', (3595, 3607), False, 'from setuptools import setup, Extension\n'), ((1594, 1624), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (1617, 1624), False, 'import sys\n'), ((2694, 2710), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (2708, 2710), True, 'import numpy as np\n'), ((1934, 1950), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (1948, 1950), True, 'import numpy as np\n'), ((2226, 2242), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (2240, 2242), True, 'import numpy as np\n')] |
from experiment.train_single_model import Experiment
from sample.sample_model import get_model
from sample.sample_dataset import load_dataset
import torch
from algorithms.bayesian_optimization import Bayesian
from algorithms.grid_search_algorithm import GridSearch
from algorithms.evolutionary_optimization import EvolutionaryOptimization
from algorithms.reinforcement_learning_optimization import RLOptimization
import numpy as np
def calculate_reward(eps, train_loss, val_loss, alpha=0.33):
return alpha*np.exp(-(0.5 * eps)) + (1-alpha)*np.exp(-(0.5 * val_loss))
def run_sample():
criterion = torch.nn.BCELoss()
train_dataset, test_dataset = load_dataset()
e = Experiment(get_model, criterion, train_dataset, test_dataset)
results = e.run_experiment(0.15, 0.001)
print()
print("RESULTS:")
_ = [print(key+":", round(item, 4)) for key, item in results.items()]
def run_bayesian():
criterion = torch.nn.BCELoss()
train_dataset, test_dataset = load_dataset()
e = Experiment(get_model, criterion, train_dataset, test_dataset)
b = Bayesian(e.run_experiment, calculate_reward, 100, search_space_nm=[0.5, 2.5], search_space_lr=[0.001, 0.05])
progress = b.run()
return progress
def run_grid_search():
criterion = torch.nn.BCELoss()
train_dataset, test_dataset = load_dataset()
e = Experiment(get_model, criterion, train_dataset, test_dataset)
gs = GridSearch(e.run_experiment, calculate_reward, 10, search_space_nm=[0.5, 2.5], search_space_lr=[0.001, 0.05])
progress = gs.run()
return progress
def run_evolutionary_optimization():
criterion = torch.nn.BCELoss()
train_dataset, test_dataset = load_dataset()
e = Experiment(get_model, criterion, train_dataset, test_dataset)
eo = EvolutionaryOptimization(e.run_experiment, calculate_reward, 10, search_space_nm=[0.5, 2.5], search_space_lr=[0.001, 0.05])
progress = eo.run()
return progress
def run_reinforcement_learning_optimization():
criterion = torch.nn.BCELoss()
train_dataset, test_dataset = load_dataset()
e = Experiment(get_model, criterion, train_dataset, test_dataset)
rl = RLOptimization(e.run_experiment, calculate_reward, 10, search_space_nm=[0.5, 2.5], search_space_lr=[0.001, 0.05])
progress = rl.run()
return progress
if __name__ == '__main__':
print("----------RUN SAMPLE-----------")
run_sample()
print("\n\n----------RUN BAYESIAN---------")
run_bayesian()
print("\n\n----------GRID SEARCH----------")
run_grid_search()
print("\n\n---EVOLUTIONARY OPTIMIZATION---")
run_evolutionary_optimization()
print("\n\n-----REINFORCEMENT LEARNING----")
run_reinforcement_learning_optimization() | [
"experiment.train_single_model.Experiment",
"algorithms.reinforcement_learning_optimization.RLOptimization",
"algorithms.grid_search_algorithm.GridSearch",
"algorithms.bayesian_optimization.Bayesian",
"numpy.exp",
"torch.nn.BCELoss",
"algorithms.evolutionary_optimization.EvolutionaryOptimization",
"sa... | [((599, 617), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (615, 617), False, 'import torch\n'), ((649, 663), 'sample.sample_dataset.load_dataset', 'load_dataset', ([], {}), '()\n', (661, 663), False, 'from sample.sample_dataset import load_dataset\n'), ((669, 730), 'experiment.train_single_model.Experiment', 'Experiment', (['get_model', 'criterion', 'train_dataset', 'test_dataset'], {}), '(get_model, criterion, train_dataset, test_dataset)\n', (679, 730), False, 'from experiment.train_single_model import Experiment\n'), ((905, 923), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (921, 923), False, 'import torch\n'), ((955, 969), 'sample.sample_dataset.load_dataset', 'load_dataset', ([], {}), '()\n', (967, 969), False, 'from sample.sample_dataset import load_dataset\n'), ((975, 1036), 'experiment.train_single_model.Experiment', 'Experiment', (['get_model', 'criterion', 'train_dataset', 'test_dataset'], {}), '(get_model, criterion, train_dataset, test_dataset)\n', (985, 1036), False, 'from experiment.train_single_model import Experiment\n'), ((1042, 1155), 'algorithms.bayesian_optimization.Bayesian', 'Bayesian', (['e.run_experiment', 'calculate_reward', '(100)'], {'search_space_nm': '[0.5, 2.5]', 'search_space_lr': '[0.001, 0.05]'}), '(e.run_experiment, calculate_reward, 100, search_space_nm=[0.5, 2.5\n ], search_space_lr=[0.001, 0.05])\n', (1050, 1155), False, 'from algorithms.bayesian_optimization import Bayesian\n'), ((1225, 1243), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (1241, 1243), False, 'import torch\n'), ((1275, 1289), 'sample.sample_dataset.load_dataset', 'load_dataset', ([], {}), '()\n', (1287, 1289), False, 'from sample.sample_dataset import load_dataset\n'), ((1295, 1356), 'experiment.train_single_model.Experiment', 'Experiment', (['get_model', 'criterion', 'train_dataset', 'test_dataset'], {}), '(get_model, criterion, train_dataset, test_dataset)\n', (1305, 1356), False, 'from experiment.train_single_model import Experiment\n'), ((1363, 1477), 'algorithms.grid_search_algorithm.GridSearch', 'GridSearch', (['e.run_experiment', 'calculate_reward', '(10)'], {'search_space_nm': '[0.5, 2.5]', 'search_space_lr': '[0.001, 0.05]'}), '(e.run_experiment, calculate_reward, 10, search_space_nm=[0.5, \n 2.5], search_space_lr=[0.001, 0.05])\n', (1373, 1477), False, 'from algorithms.grid_search_algorithm import GridSearch\n'), ((1562, 1580), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (1578, 1580), False, 'import torch\n'), ((1612, 1626), 'sample.sample_dataset.load_dataset', 'load_dataset', ([], {}), '()\n', (1624, 1626), False, 'from sample.sample_dataset import load_dataset\n'), ((1632, 1693), 'experiment.train_single_model.Experiment', 'Experiment', (['get_model', 'criterion', 'train_dataset', 'test_dataset'], {}), '(get_model, criterion, train_dataset, test_dataset)\n', (1642, 1693), False, 'from experiment.train_single_model import Experiment\n'), ((1700, 1827), 'algorithms.evolutionary_optimization.EvolutionaryOptimization', 'EvolutionaryOptimization', (['e.run_experiment', 'calculate_reward', '(10)'], {'search_space_nm': '[0.5, 2.5]', 'search_space_lr': '[0.001, 0.05]'}), '(e.run_experiment, calculate_reward, 10,\n search_space_nm=[0.5, 2.5], search_space_lr=[0.001, 0.05])\n', (1724, 1827), False, 'from algorithms.evolutionary_optimization import EvolutionaryOptimization\n'), ((1923, 1941), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (1939, 1941), False, 'import torch\n'), ((1973, 1987), 'sample.sample_dataset.load_dataset', 'load_dataset', ([], {}), '()\n', (1985, 1987), False, 'from sample.sample_dataset import load_dataset\n'), ((1993, 2054), 'experiment.train_single_model.Experiment', 'Experiment', (['get_model', 'criterion', 'train_dataset', 'test_dataset'], {}), '(get_model, criterion, train_dataset, test_dataset)\n', (2003, 2054), False, 'from experiment.train_single_model import Experiment\n'), ((2061, 2178), 'algorithms.reinforcement_learning_optimization.RLOptimization', 'RLOptimization', (['e.run_experiment', 'calculate_reward', '(10)'], {'search_space_nm': '[0.5, 2.5]', 'search_space_lr': '[0.001, 0.05]'}), '(e.run_experiment, calculate_reward, 10, search_space_nm=[0.5,\n 2.5], search_space_lr=[0.001, 0.05])\n', (2075, 2178), False, 'from algorithms.reinforcement_learning_optimization import RLOptimization\n'), ((508, 528), 'numpy.exp', 'np.exp', (['(-(0.5 * eps))'], {}), '(-(0.5 * eps))\n', (514, 528), True, 'import numpy as np\n'), ((541, 566), 'numpy.exp', 'np.exp', (['(-(0.5 * val_loss))'], {}), '(-(0.5 * val_loss))\n', (547, 566), True, 'import numpy as np\n')] |
from .codec import CTCCodec
from shared_code import preprocessor as prep
from tensorflow import make_tensor_proto, make_ndarray
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from time import time
import azure.functions as func
import grpc
import json
import logging
import numpy as np
import os
_HOST = os.environ.get("HANDWRITTEN_IPADDRESS")
_PORT = os.environ.get("HANDWRITTEN_PORT")
def main(req: func.HttpRequest, context: func.Context) -> func.HttpResponse:
_NAME = 'image'
event_id = context.invocation_id
logging.info(
f"Python handwritten function start process.\nID:{event_id}\nback server host:{_HOST}:{_PORT}")
try:
method = req.method
url = req.url
params = req.params
files = req.files[_NAME]
if method != 'POST':
logging.warning(
f'ID:{event_id},the method was {files.content_type}.refused.')
return func.HttpResponse(f'only accept POST method', status_code=400)
if files:
if files.content_type != 'image/jpeg':
logging.warning(
f'ID:{event_id},the file type was {files.content_type}.refused.')
return func.HttpResponse(f'only accept jpeg images', status_code=400)
# get japanese_char_list by char_list_path
# logging.info(os.getcwd())
CHARSET_PATH = "./handwritten/kondate_nakayosi_char_list.txt"
characters = prep.get_characters(CHARSET_PATH)
codec = CTCCodec(characters)
logging.info(f'Codec Success')
# pre processing
img_bin = files.read()
img = prep.to_pil_image(img_bin)
if np.array(img).ndim == 3:
img = np.array(img)[:, :, 0]
else:
img = np.array(img)
img = img.astype(np.float32)
# logging.info(f'img.shape{img.shape}')
#logging.warning(f'img.shape2{np.array(img)[:, :, 0].shape}')
# FIXED the width is too long
input_batch_size, input_channel, input_height, input_width = (
1, 1, 96, 2000)
input_image = prep.preprocess_input(
img, height=input_height, width=input_width)[None, :, :, :]
#input_image = prep.preprocess_input(np.array(img)[:, :, 0], height=input_height, width=input_width)[None,:,:,:]
logging.info(f'input_image success')
request = predict_pb2.PredictRequest()
request.model_spec.name = 'handwritten-japanese-recognition'
request.inputs["actual_input"].CopyFrom(
make_tensor_proto(input_image, shape=input_image.shape))
logging.info(f'Request detail success')
# send to infer model by grpc
start = time()
channel = grpc.insecure_channel("{}:{}".format(_HOST, _PORT))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
output = stub.Predict(request, timeout=10.0)
logging.info(f'Process success')
result = make_ndarray(output.outputs["output"])
timecost = time()-start
logging.warning(f"Inference complete,Takes{timecost}")
text = codec.decode(result)
logging.info(f"TextLength{len(text[0])}")
logging.info(f"TextType{type(text[0])}")
# Error: Words are garbled
# logging.info(chardet.detect(text[0].encode()))
#text[0] = text[0].encode().decode('utf-8')
# text = text[0].encode().decode('utf-8')
if len(text[0]) == 0:
return func.HttpResponse(f'AI model could not understand your handwriting', status_code=500)
text = text[0]
# logging.warning(f'Azure Function has{subprocess.call('echo $LANG', shell=True)}')
# Fix: just response result and status code
logging.info(f'Text Content{text}')
response = {
'count': len(text),
'timecost': timecost,
'text': text
}
return func.HttpResponse(json.dumps(response), mimetype="application/json")
else:
logging.warning(f'ID:{event_id},Failed to get file,down.')
return func.HttpResponse(f'no image files', status_code=400)
except grpc.RpcError as e:
status_code = e.code()
if "DEADLINE_EXCEEDED" in status_code.name:
logging.error(e)
return func.HttpResponse(f'the grpc request timeout', status_code=408)
else:
logging.error(f"grpcError:{e}")
return func.HttpResponse(f'Failed to get grpcResponse', status_code=500)
except Exception as e:
logging.error(f"Error:{e}\n\
url:{url}\n\
method:{method}\n\
params:{params}")
return func.HttpResponse(f'Service Error.check the log.', status_code=500)
| [
"shared_code.preprocessor.get_characters",
"tensorflow_serving.apis.predict_pb2.PredictRequest",
"azure.functions.HttpResponse",
"tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionServiceStub",
"tensorflow.make_tensor_proto",
"json.dumps",
"logging.warning",
"os.environ.get",
"shared_cod... | [((370, 409), 'os.environ.get', 'os.environ.get', (['"""HANDWRITTEN_IPADDRESS"""'], {}), "('HANDWRITTEN_IPADDRESS')\n", (384, 409), False, 'import os\n'), ((418, 452), 'os.environ.get', 'os.environ.get', (['"""HANDWRITTEN_PORT"""'], {}), "('HANDWRITTEN_PORT')\n", (432, 452), False, 'import os\n'), ((596, 716), 'logging.info', 'logging.info', (['f"""Python handwritten function start process.\nID:{event_id}\nback server host:{_HOST}:{_PORT}"""'], {}), '(\n f"""Python handwritten function start process.\nID:{event_id}\nback server host:{_HOST}:{_PORT}"""\n )\n', (608, 716), False, 'import logging\n'), ((877, 955), 'logging.warning', 'logging.warning', (['f"""ID:{event_id},the method was {files.content_type}.refused."""'], {}), "(f'ID:{event_id},the method was {files.content_type}.refused.')\n", (892, 955), False, 'import logging\n'), ((992, 1054), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""only accept POST method"""'], {'status_code': '(400)'}), "(f'only accept POST method', status_code=400)\n", (1009, 1054), True, 'import azure.functions as func\n'), ((1525, 1558), 'shared_code.preprocessor.get_characters', 'prep.get_characters', (['CHARSET_PATH'], {}), '(CHARSET_PATH)\n', (1544, 1558), True, 'from shared_code import preprocessor as prep\n'), ((1612, 1642), 'logging.info', 'logging.info', (['f"""Codec Success"""'], {}), "(f'Codec Success')\n", (1624, 1642), False, 'import logging\n'), ((1725, 1751), 'shared_code.preprocessor.to_pil_image', 'prep.to_pil_image', (['img_bin'], {}), '(img_bin)\n', (1742, 1751), True, 'from shared_code import preprocessor as prep\n'), ((2469, 2505), 'logging.info', 'logging.info', (['f"""input_image success"""'], {}), "(f'input_image success')\n", (2481, 2505), False, 'import logging\n'), ((2529, 2557), 'tensorflow_serving.apis.predict_pb2.PredictRequest', 'predict_pb2.PredictRequest', ([], {}), '()\n', (2555, 2557), False, 'from tensorflow_serving.apis import predict_pb2\n'), ((2769, 2808), 'logging.info', 'logging.info', (['f"""Request detail success"""'], {}), "(f'Request detail success')\n", (2781, 2808), False, 'import logging\n'), ((2873, 2879), 'time.time', 'time', ([], {}), '()\n', (2877, 2879), False, 'from time import time\n'), ((2973, 3031), 'tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionServiceStub', 'prediction_service_pb2_grpc.PredictionServiceStub', (['channel'], {}), '(channel)\n', (3022, 3031), False, 'from tensorflow_serving.apis import prediction_service_pb2_grpc\n'), ((3101, 3133), 'logging.info', 'logging.info', (['f"""Process success"""'], {}), "(f'Process success')\n", (3113, 3133), False, 'import logging\n'), ((3155, 3193), 'tensorflow.make_ndarray', 'make_ndarray', (["output.outputs['output']"], {}), "(output.outputs['output'])\n", (3167, 3193), False, 'from tensorflow import make_tensor_proto, make_ndarray\n'), ((3243, 3297), 'logging.warning', 'logging.warning', (['f"""Inference complete,Takes{timecost}"""'], {}), "(f'Inference complete,Takes{timecost}')\n", (3258, 3297), False, 'import logging\n'), ((3992, 4027), 'logging.info', 'logging.info', (['f"""Text Content{text}"""'], {}), "(f'Text Content{text}')\n", (4004, 4027), False, 'import logging\n'), ((4287, 4345), 'logging.warning', 'logging.warning', (['f"""ID:{event_id},Failed to get file,down."""'], {}), "(f'ID:{event_id},Failed to get file,down.')\n", (4302, 4345), False, 'import logging\n'), ((4365, 4418), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""no image files"""'], {'status_code': '(400)'}), "(f'no image files', status_code=400)\n", (4382, 4418), True, 'import azure.functions as func\n'), ((4825, 4980), 'logging.error', 'logging.error', (['f"""Error:{e}\n url:{url}\n method:{method}\n params:{params}"""'], {}), '(\n f"""Error:{e}\n url:{url}\n method:{method}\n params:{params}"""\n )\n', (4838, 4980), False, 'import logging\n'), ((4991, 5058), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""Service Error.check the log."""'], {'status_code': '(500)'}), "(f'Service Error.check the log.', status_code=500)\n", (5008, 5058), True, 'import azure.functions as func\n'), ((1141, 1227), 'logging.warning', 'logging.warning', (['f"""ID:{event_id},the file type was {files.content_type}.refused."""'], {}), "(\n f'ID:{event_id},the file type was {files.content_type}.refused.')\n", (1156, 1227), False, 'import logging\n'), ((1267, 1329), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""only accept jpeg images"""'], {'status_code': '(400)'}), "(f'only accept jpeg images', status_code=400)\n", (1284, 1329), True, 'import azure.functions as func\n'), ((1877, 1890), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1885, 1890), True, 'import numpy as np\n'), ((2233, 2299), 'shared_code.preprocessor.preprocess_input', 'prep.preprocess_input', (['img'], {'height': 'input_height', 'width': 'input_width'}), '(img, height=input_height, width=input_width)\n', (2254, 2299), True, 'from shared_code import preprocessor as prep\n'), ((2700, 2755), 'tensorflow.make_tensor_proto', 'make_tensor_proto', (['input_image'], {'shape': 'input_image.shape'}), '(input_image, shape=input_image.shape)\n', (2717, 2755), False, 'from tensorflow import make_tensor_proto, make_ndarray\n'), ((3218, 3224), 'time.time', 'time', ([], {}), '()\n', (3222, 3224), False, 'from time import time\n'), ((3713, 3802), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""AI model could not understand your handwriting"""'], {'status_code': '(500)'}), "(f'AI model could not understand your handwriting',\n status_code=500)\n", (3730, 3802), True, 'import azure.functions as func\n'), ((4209, 4229), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (4219, 4229), False, 'import json\n'), ((4546, 4562), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (4559, 4562), False, 'import logging\n'), ((4582, 4645), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""the grpc request timeout"""'], {'status_code': '(408)'}), "(f'the grpc request timeout', status_code=408)\n", (4599, 4645), True, 'import azure.functions as func\n'), ((4672, 4703), 'logging.error', 'logging.error', (['f"""grpcError:{e}"""'], {}), "(f'grpcError:{e}')\n", (4685, 4703), False, 'import logging\n'), ((4723, 4788), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""Failed to get grpcResponse"""'], {'status_code': '(500)'}), "(f'Failed to get grpcResponse', status_code=500)\n", (4740, 4788), True, 'import azure.functions as func\n'), ((1767, 1780), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1775, 1780), True, 'import numpy as np\n'), ((1814, 1827), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1822, 1827), True, 'import numpy as np\n')] |
from load_data import LoadData
from preprocess_data import PreprocessData
from model_data import CNNClassificationModel
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
import pandas as pd
import time
import datetime
import sys
import numpy as np
import torch
import argparse
import os
import pickle
np.random.seed(15)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def evaluate(x, y, model, criterion):
model.eval() # setting model to eval mode
y_pred = model(x)
loss = criterion(y_pred, y)
accuracy = accuracy_score(y, y_pred.argmax(-1))
return loss.item(), accuracy
def get_batch(x, y, batch_size=50):
x, y = shuffle(x, y, random_state=13)
start_index, end_index = 0, 0
data_batches = []
while end_index < len(x):
end_index = (start_index + batch_size) if (start_index + batch_size) < len(x) else len(x)
data_batches.append((x[start_index:end_index], y[start_index:end_index]))
start_index = start_index + batch_size
return data_batches
def train(x_train, y_train, x_test, y_test, model, optimizer, criterion, model_name='model',
model_store=False, batch_size=50, epochs=100, log_iter=10):
data_batches = get_batch(x_train, y_train, batch_size)
train_loss, train_accuracy, test_loss, test_accuracy = [], [], [], []
start_time = time.time()
for epoch in range(epochs):
# Setting model intot training mode
model.train() # setting model in train mode
for batch_num, batch in enumerate(data_batches):
x, y = batch[0], batch[1]
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
# This piece is to constrain the normalized weight ||w|| of
# the output linear layer be constrained at 3
# l2_weights = torch.norm(model.linear.weight).item()
# if l2_weights > 3:
# model.linear.parameters = model.linear.weight/l2_weights
optimizer.step()
sys.stdout.write('\rEpoch:{} | Batch:{} | Time Running: {}'.format(epoch,
batch_num,
datetime.timedelta(seconds=np.round(
time.time() - start_time, 0))))
trainloss, trainacc = evaluate(x_train, y_train, model, criterion)
testloss, testacc = evaluate(x_test, y_test, model, criterion)
if epoch > log_iter and testacc > np.max(test_accuracy) and model_store is True:
torch.save(model, './models/' + model_name + '.torch')
train_loss.append(trainloss)
train_accuracy.append(trainacc)
test_loss.append(testloss)
test_accuracy.append(testacc)
if epoch % log_iter == 0:
print(' Train Acc {:.4f}, Train Loss {:.4f}, Test Acc {:.4f}, Test Loss {:.4f}'.format(trainacc,
trainloss,
testacc,
testloss))
print('\nBest Test Accuracy {:.4f}'.format(np.max(test_accuracy)))
return train_loss, train_accuracy, test_loss, test_accuracy
parser = argparse.ArgumentParser(description='CNN Based Text classifier CNN - Training')
# Data Locations
parser.add_argument('-dataset', type=str, default='MR',
help='Name of the Dataset to use')
parser.add_argument('-dataset_path', type=str, default=None,
help='Location to the Dataset')
parser.add_argument('-word2vec_path', type=str, default=None,
help='Location to GoogleNews-vectors-negative300.bin file')
# Model iterations and size control
parser.add_argument('-epochs', type=int, default=25,
help='Number of Epochs to train')
parser.add_argument('-epochs_log', type=int, default=5,
help='Log Accuracy after every X Epochs')
parser.add_argument('-batch_size', type=int, default=50,
help='Batch Size to use while training')
# Hyperparameters for Tuning
parser.add_argument('-optimizer', type=str, default='Adam',
help='Select optimizer from "Adam" or "Adadelta"')
parser.add_argument('-embedding_size', type=int, default=300,
help='Embedding size to be used in case of self trained embedding only, '
'redundant is using pretrained vector')
parser.add_argument('-lr', type=float, default=0.001,
help='Learning rate to use while training')
# Running Different variations of model
parser.add_argument('-use_pretrained_vector', type=str2bool, nargs='?',const=True, default=False,
help='To use Pretrained vector (word2vec) or not')
parser.add_argument('-keep_embedding_static', type=str2bool, nargs='?',const=True, default=False,
help='Would like to train/adjust the Embedding or not')
parser.add_argument('-use_multi_channel', type=str2bool, nargs='?', const=True, default=False,
help='Use multichannel or not')
# Storing Logs and Model
parser.add_argument('-model_name', type=str, default='model',
help='Provide a name to the model, use the names in the paper for logging')
parser.add_argument('-save_model', type=str2bool, nargs='?', const=True, default=False,
help='Would like to store the model or not, model is '
'stored by dataset name and model name arguments provided')
parser.add_argument('-log_results', type=str2bool, nargs='?', const=True, default=False,
help='Would like to log the final results of the model')
args = parser.parse_args()
print ('Arguments Loaded')
print (args)
print ('-' * 20)
data_loader = LoadData(dataset_name=args.dataset, dataset_path=args.dataset_path)
data_preprocessor = PreprocessData(w2v_path=args.word2vec_path, use_pretrained_vector=args.use_pretrained_vector)
data_preprocessor.reset()
data_preprocessor.set_dataset_name(args.dataset)
data_preprocessor.set_maximum_sentence_length(data_loader.data[0]['x_train'] + data_loader.data[0]['x_test'])
data_preprocessor.train_dictionary(data_loader.data[0]['x_train'] + data_loader.data[0]['x_test'],
use_pretrained_vector=args.use_pretrained_vector)
data_preprocessor.train_classes(data_loader.data[0]['y_train'])
print ('\n\nDataset Name - {}'.format(args.dataset))
print ('Number of Classes - {}'.format(data_preprocessor.classCount))
print ('Average Length of Sentences - {}'.format(np.round(data_preprocessor.get_average_sentence_length(data_loader.data[0]['x_train'] +
data_loader.data[0]['x_test']), 0)))
print ('Max Length of Sentence - {}'.format(data_preprocessor.max_sen_len))
print ('Dataset Size - {}'.format(len(data_loader.data[0]['x_train'] + data_loader.data[0]['x_test'])))
print ('Number of Words - {}'.format(data_preprocessor.wordCount))
if args.use_pretrained_vector:
print ('Number of Words in Word2Vec - {}'.format(data_preprocessor.wordCount_w2v))
print ('Test Data Size - {}'.format('CV' if len(data_loader.data) > 1 else len(data_loader.data[0]['x_test'])))
if args.save_model:
with open('./models/' + args.model_name + '.preprocessor', 'wb') as f:
pickle.dump(data_preprocessor, f)
if not os.path.isdir('models'):
os.mkdir('models')
if not os.path.isdir('results'):
os.mkdir('models')
accuracy = []
for d in data_loader.data:
x_train = data_preprocessor.sent2Index(d['x_train'])
y_train = data_preprocessor.class2Index(d['y_train'])
x_test = data_preprocessor.sent2Index(d['x_test'])
y_test = data_preprocessor.class2Index(d['y_test'])
model = CNNClassificationModel(use_pretrained_vector=args.use_pretrained_vector,
pretrained_vector_weight=data_preprocessor.weights,
word_count=data_preprocessor.wordCount + 1,
embedding_size=args.embedding_size,
number_of_classes=data_preprocessor.classCount,
keep_embeddings_static=args.keep_embedding_static,
use_multi_channel=args.use_multi_channel).to(device)
criterion = torch.nn.CrossEntropyLoss()
if args.optimizer == 'Adadelta':
optimizer = torch.optim.Adadelta(model.parameters(), lr=args.lr, rho=0.95, eps=1e-06, weight_decay=1e-03)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
train_loss, train_accuracy, test_loss, test_accuracy = train(x_train, y_train, x_test, y_test,
model, optimizer, criterion,
epochs=args.epochs,
log_iter=args.epochs_log,
model_name=args.dataset + '_' + args.model_name,
model_store=args.save_model,
batch_size=args.batch_size)
accuracy.append(np.max(test_accuracy))
print('\nTest Accuracy {}'.format(np.mean(accuracy)))
# logging model results
if args.log_results:
if not os.path.isfile('./results/' + args.dataset + '.csv'):
df = pd.DataFrame({'Date': [], 'Model Type': [], 'Test Accuracy': [], 'Parameters': []})
else:
df = pd.read_csv('./results/' + args.dataset + '.csv')
df = pd.concat([df, pd.DataFrame({'Date': [datetime.datetime.now().strftime('%d %b %Y')],
'Model Type': [args.model_name],
'Test Accuracy': [np.mean(accuracy)],
'Parameters': [args]})])
df.to_csv('./results/' + args.dataset + '.csv', index=False) | [
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"load_data.LoadData",
"torch.cuda.is_available",
"preprocess_data.PreprocessData",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.max",
"os.path.isdir",
"numpy.random.seed",
"os.mkdir",
"pandas.DataFrame",
"argparse.ArgumentTypeError",
"os.p... | [((329, 347), 'numpy.random.seed', 'np.random.seed', (['(15)'], {}), '(15)\n', (343, 347), True, 'import numpy as np\n'), ((3846, 3925), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CNN Based Text classifier CNN - Training"""'}), "(description='CNN Based Text classifier CNN - Training')\n", (3869, 3925), False, 'import argparse\n'), ((6419, 6486), 'load_data.LoadData', 'LoadData', ([], {'dataset_name': 'args.dataset', 'dataset_path': 'args.dataset_path'}), '(dataset_name=args.dataset, dataset_path=args.dataset_path)\n', (6427, 6486), False, 'from load_data import LoadData\n'), ((6507, 6605), 'preprocess_data.PreprocessData', 'PreprocessData', ([], {'w2v_path': 'args.word2vec_path', 'use_pretrained_vector': 'args.use_pretrained_vector'}), '(w2v_path=args.word2vec_path, use_pretrained_vector=args.\n use_pretrained_vector)\n', (6521, 6605), False, 'from preprocess_data import PreprocessData\n'), ((979, 1009), 'sklearn.utils.shuffle', 'shuffle', (['x', 'y'], {'random_state': '(13)'}), '(x, y, random_state=13)\n', (986, 1009), False, 'from sklearn.utils import shuffle\n'), ((1662, 1673), 'time.time', 'time.time', ([], {}), '()\n', (1671, 1673), False, 'import time\n'), ((8054, 8077), 'os.path.isdir', 'os.path.isdir', (['"""models"""'], {}), "('models')\n", (8067, 8077), False, 'import os\n'), ((8083, 8101), 'os.mkdir', 'os.mkdir', (['"""models"""'], {}), "('models')\n", (8091, 8101), False, 'import os\n'), ((8110, 8134), 'os.path.isdir', 'os.path.isdir', (['"""results"""'], {}), "('results')\n", (8123, 8134), False, 'import os\n'), ((8140, 8158), 'os.mkdir', 'os.mkdir', (['"""models"""'], {}), "('models')\n", (8148, 8158), False, 'import os\n'), ((9025, 9052), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (9050, 9052), False, 'import torch\n'), ((382, 407), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (405, 407), False, 'import torch\n'), ((8012, 8045), 'pickle.dump', 'pickle.dump', (['data_preprocessor', 'f'], {}), '(data_preprocessor, f)\n', (8023, 8045), False, 'import pickle\n'), ((9975, 9996), 'numpy.max', 'np.max', (['test_accuracy'], {}), '(test_accuracy)\n', (9981, 9996), True, 'import numpy as np\n'), ((10033, 10050), 'numpy.mean', 'np.mean', (['accuracy'], {}), '(accuracy)\n', (10040, 10050), True, 'import numpy as np\n'), ((10110, 10162), 'os.path.isfile', 'os.path.isfile', (["('./results/' + args.dataset + '.csv')"], {}), "('./results/' + args.dataset + '.csv')\n", (10124, 10162), False, 'import os\n'), ((10177, 10264), 'pandas.DataFrame', 'pd.DataFrame', (["{'Date': [], 'Model Type': [], 'Test Accuracy': [], 'Parameters': []}"], {}), "({'Date': [], 'Model Type': [], 'Test Accuracy': [],\n 'Parameters': []})\n", (10189, 10264), True, 'import pandas as pd\n'), ((10284, 10333), 'pandas.read_csv', 'pd.read_csv', (["('./results/' + args.dataset + '.csv')"], {}), "('./results/' + args.dataset + '.csv')\n", (10295, 10333), True, 'import pandas as pd\n'), ((653, 706), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (679, 706), False, 'import argparse\n'), ((3022, 3076), 'torch.save', 'torch.save', (['model', "('./models/' + model_name + '.torch')"], {}), "(model, './models/' + model_name + '.torch')\n", (3032, 3076), False, 'import torch\n'), ((3748, 3769), 'numpy.max', 'np.max', (['test_accuracy'], {}), '(test_accuracy)\n', (3754, 3769), True, 'import numpy as np\n'), ((8441, 8804), 'model_data.CNNClassificationModel', 'CNNClassificationModel', ([], {'use_pretrained_vector': 'args.use_pretrained_vector', 'pretrained_vector_weight': 'data_preprocessor.weights', 'word_count': '(data_preprocessor.wordCount + 1)', 'embedding_size': 'args.embedding_size', 'number_of_classes': 'data_preprocessor.classCount', 'keep_embeddings_static': 'args.keep_embedding_static', 'use_multi_channel': 'args.use_multi_channel'}), '(use_pretrained_vector=args.use_pretrained_vector,\n pretrained_vector_weight=data_preprocessor.weights, word_count=\n data_preprocessor.wordCount + 1, embedding_size=args.embedding_size,\n number_of_classes=data_preprocessor.classCount, keep_embeddings_static=\n args.keep_embedding_static, use_multi_channel=args.use_multi_channel)\n', (8463, 8804), False, 'from model_data import CNNClassificationModel\n'), ((2963, 2984), 'numpy.max', 'np.max', (['test_accuracy'], {}), '(test_accuracy)\n', (2969, 2984), True, 'import numpy as np\n'), ((10556, 10573), 'numpy.mean', 'np.mean', (['accuracy'], {}), '(accuracy)\n', (10563, 10573), True, 'import numpy as np\n'), ((10382, 10405), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10403, 10405), False, 'import datetime\n'), ((2741, 2752), 'time.time', 'time.time', ([], {}), '()\n', (2750, 2752), False, 'import time\n')] |
import gzip
import sys
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as linalg
import rmsd
import cheminfo
import rdkit.Chem as Chem
import rdkit.Chem.AllChem as AllChem
def center_of_mass(atoms, coordinates):
total_mass = np.sum(atoms)
X = coordinates[:,0]
Y = coordinates[:,1]
Z = coordinates[:,2]
R = np.zeros(3)
R[0] = np.sum(atoms*X)
R[1] = np.sum(atoms*Y)
R[2] = np.sum(atoms*Z)
R /= total_mass
return R
def get_inertia(atoms, coordinates):
com = center_of_mass(atoms, coordinates)
coordinates -= com
X = coordinates[:,0]
Y = coordinates[:,1]
Z = coordinates[:,2]
rxx = Y**2 + Z**2
ryy = X**2 + Z**2
rzz = X**2 + Y**2
Ixx = atoms*rxx
Iyy = atoms*ryy
Izz = atoms*rzz
Ixy = atoms*Y*X
Ixz = atoms*X*Z
Iyz = atoms*Y*Z
Ixx = np.sum(Ixx)
Iyy = np.sum(Iyy)
Izz = np.sum(Izz)
Ixy = np.sum(Ixy)
Ixz = np.sum(Ixz)
Iyz = np.sum(Iyz)
inertia = np.zeros((3,3))
inertia[0,0] = Ixx
inertia[1,1] = Iyy
inertia[2,2] = Izz
inertia[0,1] = -Ixy
inertia[1,0] = -Ixy
inertia[0,2] = -Ixz
inertia[2,0] = -Ixz
inertia[1,2] = -Iyz
inertia[2,1] = -Iyz
w, v = linalg.eig(inertia)
return w
def get_inertia_diag(atoms, coordinates):
com = center_of_mass(atoms, coordinates)
coordinates -= com
X = coordinates[:,0]
Y = coordinates[:,1]
Z = coordinates[:,2]
rx2 = Y**2 + Z**2
ry2 = X**2 + Z**2
rz2 = X**2 + Y**2
Ix = atoms*rx2
Iy = atoms*ry2
Iz = atoms*rz2
Ix = np.sum(Ix)
Iy = np.sum(Iy)
Iz = np.sum(Iz)
inertia = np.zeros(3)
inertia[0] = Ix
inertia[1] = Iy
inertia[2] = Iz
return inertia
def get_ratio(inertia):
inertia.sort()
# s = inertia.sum()
ratio = np.zeros(2)
ratio[0] = inertia[0]/inertia[2]
ratio[1] = inertia[1]/inertia[2]
return ratio
def generate_structure(smiles):
molobj = Chem.MolFromSmiles(smiles)
molobj = Chem.AddHs(molobj)
cheminfo.molobj_optimize(molobj)
return molobj
def parse_molobj_conf(molobj, nconf=1000, dumpcoord=False):
atoms, coordinates = cheminfo.molobj_to_xyz(molobj)
print("generating confs")
conformers = cheminfo.molobj_conformers(molobj, nconf)
print("generating confs, done")
inertias = []
atomsstr = [str(atom) for atom in atoms]
dumpxyz = []
for conformer in conformers:
coordinates = conformer.GetPositions()
coordinates = np.array(coordinates)
inertia = get_inertia(atoms, coordinates)
if dumpcoord:
dumpxyz.append(rmsd.set_coordinates(atomsstr, coordinates))
yield inertia
if dumpcoord:
dumpxyz = "\n".join(dumpxyz)
f = open("dump.xyz", 'w')
f.write(dumpxyz)
f.close()
def clear_molobj(molobj, add_hydrogens=True, optimize=False):
smiles = cheminfo.molobj_to_smiles(molobj)
molobj, status = cheminfo.smiles_to_molobj(smiles)
if molobj is None:
return None
conformers = cheminfo.molobj_conformers(molobj, 1)
if add_hydrogens:
molobj = Chem.AddHs(molobj)
if optimize:
status = cheminfo.molobj_optimize_mmff(molobj)
if status > 0:
return None
try:
molobj.GetConformer()
except ValueError:
return None
return molobj
def parse_molobj(molobj):
atoms, coordinates = cheminfo.molobj_to_xyz(molobj)
inertia = get_inertia(atoms, coordinates)
return inertia
def parse_xyz(filename):
atoms, coordinates = rmsd.get_coordinates_xyz(filename)
inertia = get_inertia(atoms, coordinates)
return inertia
def parse_sdf(filename, nconf=1):
suppl = Chem.SDMolSupplier(filename,
removeHs=False,
sanitize=True)
for molobj in suppl:
if molobj is None:
continue
if nconf is None:
inertia = parse_molobj(molobj)
yield inertia
else:
inertias = parse_molobj_conf(molobj, nconf=nconf)
for inertia in inertias:
yield inertia
def parse_sdfgz(filename):
f = gzip.open(filename)
suppl = Chem.ForwardSDMolSupplier(f,
removeHs=False,
sanitize=True)
for molobj in suppl:
if molobj is None: continue
inertia = parse_molobj(molobj)
yield inertia
def parse_smi(filename, sep=None, idx=0):
with open(filename) as f:
for i, line in enumerate(f):
if sep is None:
line = line.strip().split()
else:
line = line.strip().split(sep)
smi = line[idx]
molobj = generate_structure(smi)
if molobj is None: continue
inertia = parse_molobj(molobj)
yield inertia
def parse_smigz(filename, sep=None, idx=0):
with gzip.open(filename) as f:
for line in f:
line = line.decode()
if sep is None:
line = line.strip().split()
else:
line = line.strip().split(sep)
smi = line[idx]
molobj = generate_structure(smi)
if molobj is None: continue
inertia = parse_molobj(molobj)
yield inertia
def parse_filename(filename, nconf=None, **kwargs):
fileext = filename.split(".")
if fileext[-1] == "gz":
fileext = ".".join(fileext[-2:])
else:
fileext = fileext[-1]
if fileext == "sdf.gz":
generator = parse_sdfgz(filename)
elif fileext == "smi.gz":
generator = parse_smigz(filename)
elif fileext == "sdf":
generator = parse_sdf(filename, nconf=nconf)
elif fileext == "smi":
generator = parse_smi(filename)
else:
print("error: don't know how to parse file")
quit()
return generator
def procs_parse_sdfgz(filename, procs=1, per_procs=None):
with gzip.open(filename) as f:
filetxt = f.read()
filetxt = filetxt.decode()
filetxt = filetxt.split("$$$$\n")
results = procs_sdflist(filetxt, procs=procs, per_procs=per_procs)
return results
def procs_parse_sdf(filename, procs=1, per_procs=None):
with open(filename) as f:
filetxt = f.read()
filetxt = filetxt.split("$$$$\n")
results = procs_sdflist(filetxt, procs=procs, per_procs=per_procs)
return results
def procs_sdflist(sdf_list, procs=1, per_procs=None):
import multiprocessing
N = len(sdf_list)
if per_procs is None:
per_procs = np.ceil(float(N) / float(procs))
per_procs = int(per_procs)
jobs = [sdf_list[line:line+per_procs] for line in range(0, N, per_procs)]
del sdf_list
pool = multiprocessing.Pool(processes=procs)
results_out = pool.map(worker_sdfstr, jobs)
results_flat = [item for sublist in results_out for item in sublist]
return results_flat
def worker_sdfstr(lines, append_smiles=False, add_hydrogen=True, optimize=True):
result = []
for line in lines:
molobj = Chem.MolFromMolBlock(line, removeHs=False)
if molobj is None: continue
molobj = clear_molobj(molobj)
if molobj is None: continue
# if add_hydrogen:
# molobj = Chem.AddHs(molobj)
# if molobj is None: continue
#
# if optimize:
# try:
# status = cheminfo.molobj_optimize(molobj)
# except:
# continue
inertia = parse_molobj(molobj)
if append_smiles:
smi = Chem.MolToSmiles(molobj)
inertia = [smi] + list(inertia)
result.append(inertia)
return result
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filename', type=str, help="Calculate inertia of filename.{.sdf.gz,.smi.gz,.sdf,smi}")
parser.add_argument('-j', '--procs', type=int, help="Use subprocess to run over more cores", default=1)
parser.add_argument('--ratio', action="store_true", help="calculate ratio")
parser.add_argument('--nconf', type=int, help="how many conformers per compound", default=None)
parser.add_argument('--prepend-smiles', action="store_true", help="")
# TODO re-generate 3D coordinates from SDF (for chembl database)
# sdf -> molobj -> smiles -> molobj -> add h -> inertia
args = parser.parse_args()
if args.procs > 1:
generator = procs_parse_sdf(args.filename, procs=args.procs)
elif args.filename:
generator = parse_filename(args.filename, nconf=args.nconf)
for result in generator:
if args.ratio:
result = get_ratio(result)
fmt = "{:5.3f}"
else:
fmt = "{:15.8f}"
result = [fmt.format(x) for x in result]
print(*result)
return
if __name__ == "__main__":
main()
| [
"cheminfo.molobj_conformers",
"gzip.open",
"rmsd.set_coordinates",
"numpy.array",
"rdkit.Chem.SDMolSupplier",
"cheminfo.smiles_to_molobj",
"argparse.ArgumentParser",
"rdkit.Chem.MolToSmiles",
"rdkit.Chem.ForwardSDMolSupplier",
"numpy.linalg.eig",
"rdkit.Chem.MolFromMolBlock",
"rdkit.Chem.AddHs... | [((258, 271), 'numpy.sum', 'np.sum', (['atoms'], {}), '(atoms)\n', (264, 271), True, 'import numpy as np\n'), ((357, 368), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (365, 368), True, 'import numpy as np\n'), ((381, 398), 'numpy.sum', 'np.sum', (['(atoms * X)'], {}), '(atoms * X)\n', (387, 398), True, 'import numpy as np\n'), ((408, 425), 'numpy.sum', 'np.sum', (['(atoms * Y)'], {}), '(atoms * Y)\n', (414, 425), True, 'import numpy as np\n'), ((435, 452), 'numpy.sum', 'np.sum', (['(atoms * Z)'], {}), '(atoms * Z)\n', (441, 452), True, 'import numpy as np\n'), ((870, 881), 'numpy.sum', 'np.sum', (['Ixx'], {}), '(Ixx)\n', (876, 881), True, 'import numpy as np\n'), ((892, 903), 'numpy.sum', 'np.sum', (['Iyy'], {}), '(Iyy)\n', (898, 903), True, 'import numpy as np\n'), ((914, 925), 'numpy.sum', 'np.sum', (['Izz'], {}), '(Izz)\n', (920, 925), True, 'import numpy as np\n'), ((937, 948), 'numpy.sum', 'np.sum', (['Ixy'], {}), '(Ixy)\n', (943, 948), True, 'import numpy as np\n'), ((959, 970), 'numpy.sum', 'np.sum', (['Ixz'], {}), '(Ixz)\n', (965, 970), True, 'import numpy as np\n'), ((981, 992), 'numpy.sum', 'np.sum', (['Iyz'], {}), '(Iyz)\n', (987, 992), True, 'import numpy as np\n'), ((1008, 1024), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1016, 1024), True, 'import numpy as np\n'), ((1251, 1270), 'numpy.linalg.eig', 'linalg.eig', (['inertia'], {}), '(inertia)\n', (1261, 1270), True, 'import numpy.linalg as linalg\n'), ((1610, 1620), 'numpy.sum', 'np.sum', (['Ix'], {}), '(Ix)\n', (1616, 1620), True, 'import numpy as np\n'), ((1630, 1640), 'numpy.sum', 'np.sum', (['Iy'], {}), '(Iy)\n', (1636, 1640), True, 'import numpy as np\n'), ((1650, 1660), 'numpy.sum', 'np.sum', (['Iz'], {}), '(Iz)\n', (1656, 1660), True, 'import numpy as np\n'), ((1676, 1687), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1684, 1687), True, 'import numpy as np\n'), ((1852, 1863), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1860, 1863), True, 'import numpy as np\n'), ((2004, 2030), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (2022, 2030), True, 'import rdkit.Chem as Chem\n'), ((2044, 2062), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['molobj'], {}), '(molobj)\n', (2054, 2062), True, 'import rdkit.Chem as Chem\n'), ((2067, 2099), 'cheminfo.molobj_optimize', 'cheminfo.molobj_optimize', (['molobj'], {}), '(molobj)\n', (2091, 2099), False, 'import cheminfo\n'), ((2207, 2237), 'cheminfo.molobj_to_xyz', 'cheminfo.molobj_to_xyz', (['molobj'], {}), '(molobj)\n', (2229, 2237), False, 'import cheminfo\n'), ((2287, 2328), 'cheminfo.molobj_conformers', 'cheminfo.molobj_conformers', (['molobj', 'nconf'], {}), '(molobj, nconf)\n', (2313, 2328), False, 'import cheminfo\n'), ((2952, 2985), 'cheminfo.molobj_to_smiles', 'cheminfo.molobj_to_smiles', (['molobj'], {}), '(molobj)\n', (2977, 2985), False, 'import cheminfo\n'), ((3007, 3040), 'cheminfo.smiles_to_molobj', 'cheminfo.smiles_to_molobj', (['smiles'], {}), '(smiles)\n', (3032, 3040), False, 'import cheminfo\n'), ((3103, 3140), 'cheminfo.molobj_conformers', 'cheminfo.molobj_conformers', (['molobj', '(1)'], {}), '(molobj, 1)\n', (3129, 3140), False, 'import cheminfo\n'), ((3477, 3507), 'cheminfo.molobj_to_xyz', 'cheminfo.molobj_to_xyz', (['molobj'], {}), '(molobj)\n', (3499, 3507), False, 'import cheminfo\n'), ((3628, 3662), 'rmsd.get_coordinates_xyz', 'rmsd.get_coordinates_xyz', (['filename'], {}), '(filename)\n', (3652, 3662), False, 'import rmsd\n'), ((3779, 3838), 'rdkit.Chem.SDMolSupplier', 'Chem.SDMolSupplier', (['filename'], {'removeHs': '(False)', 'sanitize': '(True)'}), '(filename, removeHs=False, sanitize=True)\n', (3797, 3838), True, 'import rdkit.Chem as Chem\n'), ((4208, 4227), 'gzip.open', 'gzip.open', (['filename'], {}), '(filename)\n', (4217, 4227), False, 'import gzip\n'), ((4240, 4299), 'rdkit.Chem.ForwardSDMolSupplier', 'Chem.ForwardSDMolSupplier', (['f'], {'removeHs': '(False)', 'sanitize': '(True)'}), '(f, removeHs=False, sanitize=True)\n', (4265, 4299), True, 'import rdkit.Chem as Chem\n'), ((6808, 6845), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'procs'}), '(processes=procs)\n', (6828, 6845), False, 'import multiprocessing\n'), ((7816, 7841), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7839, 7841), False, 'import argparse\n'), ((2551, 2572), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (2559, 2572), True, 'import numpy as np\n'), ((3181, 3199), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['molobj'], {}), '(molobj)\n', (3191, 3199), True, 'import rdkit.Chem as Chem\n'), ((3235, 3272), 'cheminfo.molobj_optimize_mmff', 'cheminfo.molobj_optimize_mmff', (['molobj'], {}), '(molobj)\n', (3264, 3272), False, 'import cheminfo\n'), ((4937, 4956), 'gzip.open', 'gzip.open', (['filename'], {}), '(filename)\n', (4946, 4956), False, 'import gzip\n'), ((6015, 6034), 'gzip.open', 'gzip.open', (['filename'], {}), '(filename)\n', (6024, 6034), False, 'import gzip\n'), ((7134, 7176), 'rdkit.Chem.MolFromMolBlock', 'Chem.MolFromMolBlock', (['line'], {'removeHs': '(False)'}), '(line, removeHs=False)\n', (7154, 7176), True, 'import rdkit.Chem as Chem\n'), ((7648, 7672), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['molobj'], {}), '(molobj)\n', (7664, 7672), True, 'import rdkit.Chem as Chem\n'), ((2673, 2716), 'rmsd.set_coordinates', 'rmsd.set_coordinates', (['atomsstr', 'coordinates'], {}), '(atomsstr, coordinates)\n', (2693, 2716), False, 'import rmsd\n')] |
# ************** START OF PROGRAM ***************** #
# ************** TEST PROGRAM ***************** #
# **************** PROGRAM TO CREATE THE DATASET ***************** #
import cv2
import numpy as np
import os
# *************** Using OpenCV VideoCapture for capturing live video and setting window parameters *************** #
kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
path = "D:/Applied Hands-on/7th sem - Minor project/Data/Test_Data"
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cap.set(3, 1000)
cap.set(4, 1000)
img_counter = 1
while True:
# *************** Code for reading each frame, and preprocessing the data *************** #
success, img = cap.read()
imgGrey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgblur = cv2.GaussianBlur(imgGrey, (3, 3), sigmaX=0, sigmaY=0)
imgOut = cv2.flip(imgblur, 1)
imgOut = imgOut[300:850, 850:2700]
imgOut = cv2.filter2D(imgOut, -1, kernel)
imgOut = cv2.resize(imgOut, (224, 224))
cv2.imshow("Data", imgOut)
k = cv2.waitKey(1)
if k % 256 == 27:
print("Escape hit")
break
# *************** Code for saving the image on every SPACE key press *************** #
elif k % 256 == 32:
cv2.imwrite(os.path.join(path, f"ImTest ({img_counter}).png"), imgOut)
print(f"ImThumb_Little_{img_counter}.png written")
img_counter += 1
# ************** END OF PROGRAM ***************** #
| [
"cv2.flip",
"os.path.join",
"cv2.filter2D",
"cv2.imshow",
"numpy.array",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.resize",
"cv2.GaussianBlur",
"cv2.waitKey"
] | [((363, 414), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n', (371, 414), True, 'import numpy as np\n'), ((491, 525), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)', 'cv2.CAP_DSHOW'], {}), '(0, cv2.CAP_DSHOW)\n', (507, 525), False, 'import cv2\n'), ((735, 772), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (747, 772), False, 'import cv2\n'), ((788, 841), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['imgGrey', '(3, 3)'], {'sigmaX': '(0)', 'sigmaY': '(0)'}), '(imgGrey, (3, 3), sigmaX=0, sigmaY=0)\n', (804, 841), False, 'import cv2\n'), ((856, 876), 'cv2.flip', 'cv2.flip', (['imgblur', '(1)'], {}), '(imgblur, 1)\n', (864, 876), False, 'import cv2\n'), ((931, 963), 'cv2.filter2D', 'cv2.filter2D', (['imgOut', '(-1)', 'kernel'], {}), '(imgOut, -1, kernel)\n', (943, 963), False, 'import cv2\n'), ((978, 1008), 'cv2.resize', 'cv2.resize', (['imgOut', '(224, 224)'], {}), '(imgOut, (224, 224))\n', (988, 1008), False, 'import cv2\n'), ((1014, 1040), 'cv2.imshow', 'cv2.imshow', (['"""Data"""', 'imgOut'], {}), "('Data', imgOut)\n", (1024, 1040), False, 'import cv2\n'), ((1050, 1064), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1061, 1064), False, 'import cv2\n'), ((1270, 1319), 'os.path.join', 'os.path.join', (['path', 'f"""ImTest ({img_counter}).png"""'], {}), "(path, f'ImTest ({img_counter}).png')\n", (1282, 1319), False, 'import os\n')] |
import sdp.scripts.load_nstx_exp_ref as nstx_exp
import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp
import sdp.plasma.analysis as ana
import matplotlib.pyplot as plt
import pickle
import numpy as np
with open('/p/gkp/lshi/XGC1_NSTX_Case/FullF_XGC_ti191_output/ref_pos.pck','r') as f:
ref_pos = pickle.load(f)
dne_ana = ana.XGC_Density_Loader('/p/gkp/lshi/XGC1_NSTX_Case/FullF_XGC_ti191_output/dne_file.sav.npz')
n_channel = 16
#create the distance matrix, dx[i,j] is the absolute distance between the reflection points of i-th and j-th channel
dx = np.absolute(np.zeros((n_channel,n_channel))+ref_pos[np.newaxis,:]-ref_pos[:,np.newaxis])
#calculate cross-correlation matrix from synthetic signals
cc_fwr = fwrpp.pp.Cross_Correlation_by_fft(fwrpp.ref2d_out)
cc_fwr2 = fwrpp.pp.Cross_Correlation_by_fft(fwrpp.ref2d_amp2_out)
cc_fwr01 = fwrpp.pp.Cross_Correlation_by_fft(fwrpp.ref2d_amp01_out)
cc_3d = fwrpp.pp.Cross_Correlation_by_fft(fwrpp.ref3d_out)
cs_fwr = fwrpp.pp.Self_Correlation(fwrpp.ref2d_out)
cs_fwr2 = fwrpp.pp.Self_Correlation(fwrpp.ref2d_amp2_out)
cs_fwr01 = fwrpp.pp.Self_Correlation(fwrpp.ref2d_amp01_out)
cs_3d = fwrpp.pp.Self_Correlation(fwrpp.ref3d_out)
print('FWR data loaded')
#calculate cross-correlation matrix from experimental signals, note that for our case, the simulated time slice is at t=0.632s, so we choose corresponding experimental data from 0.632-0.640, the total sample number is chosen to be 2000 because larger sample doesn't bring in any difference, since the increased samples are not statistical independent.
cc_exp = nstx_exp.analyser.Cross_Correlation_by_fft(0.632,0.640,8000)
#cc_exp_short = nstx_exp.analyser.Cross_Correlation_by_fft(0.634,0.6348,8000)
#calculate coherent signal for all channels from NSTX. The result is an 2D array containing time series of coherent signal from all the channels.
cs_exp = nstx_exp.analyser.Coherent_over_time(0.632,0.640,2e-5,1e-4)
print('nstx data loaded')
#choose the channel ranges representing top/bottom part of pedestal, and center channels for each region.
top_center = 11
top_range = [8,12]
bottom_center = 6
bottom_range = [2,7]
#pick chosen data from whole correlation matrices
fwr_top=[]
fwr2_top = []
fwr01_top=[]
fwr3d_top=[]
exp_top = []
dx_top=[]
def pick_top():
global fwr_top,fwr2_top,exp_top,dx_top,fwr01_top,fwr3d_top
fwr_top = np.absolute(cc_fwr[top_center,top_range[0]:top_range[1]])
fwr2_top = np.absolute(cc_fwr2[top_center,top_range[0]:top_range[1]])
fwr01_top = np.absolute(cc_fwr01[top_center,top_range[0]:top_range[1]])
fwr3d_top = np.absolute(cc_3d[top_center,top_range[0]:top_range[1]])
exp_top = np.absolute(cc_exp[top_center,top_range[0]:top_range[1]])
dx_top = dx[top_center,top_range[0]:top_range[1]]
pick_top()
fwr_bot=[]
fwr2_bot=[]
fwr01_bot = []
fwr3d_bot = []
exp_bot=[]
dx_bot=[]
def pick_bottom():
global fwr_bot,fwr2_bot,fwr01_bot,exp_bot,dx_bot,fwr3d_bot
fwr_bot = np.absolute(cc_fwr[bottom_center,bottom_range[0]:bottom_range[1]])
fwr2_bot = np.absolute(cc_fwr2[bottom_center,bottom_range[0]:bottom_range[1]])
fwr01_bot = np.absolute(cc_fwr01[bottom_center,bottom_range[0]:bottom_range[1]])
fwr3d_bot = np.absolute(cc_3d[bottom_center,bottom_range[0]:bottom_range[1]])
exp_bot = np.absolute(cc_exp[bottom_center,bottom_range[0]:bottom_range[1]])
dx_bot = dx[bottom_center,bottom_range[0]:bottom_range[1]]
pick_bottom()
#fitting with gaussian(for bottom) and exponential(for top)
xmax_t = 0
xfit_t = 0
fwr_fit_t = 0
fwr2_fit_t = 0
fwr01_fit_t = 0
fwr3d_fit_t = 0
exp_fit_t = 0
fwr_t_a,fwr_t_sa = 0,0
fwr2_t_a,fwr2_t_sa = 0,0
fwr01_t_a,fwr01_t_sa = 0,0
fwr3d_t_a,fwr3d_t_sa = 0,0
exp_t_a,exp_t_sa = 0,0
xgc_fit_t = 0
xgc_t_a,xgc_t_sa = 0,0
x_t,dne_c_t = 0,0
def fit_top():
global fwr_t_a,fwr_t_sa,fwr2_t_a,fwr2_t_sa,fwr01_t_a,fwr01_t_sa,fwr3d_t_a,fwr3d_t_sa,exp_t_a,expt_sa,xmax_t,xfit_t,fwr_fit_t,fwr2_fit_t,exp_fit_t,fwr01_fit_t,fwr3d_fit_t,xgc_fit_t,xgc_t_a,xgc_t_sa,x_t,dne_c_t
fwr_t_a,fwr_t_sa = fwrpp.pp.fitting_cross_correlation(fwr_top,dx_top,'exponential')
fwr2_t_a,fwr2_t_sa = fwrpp.pp.fitting_cross_correlation(fwr2_top,dx_top,'exponential')
fwr01_t_a,fwr01_t_sa = fwrpp.pp.fitting_cross_correlation(fwr01_top,dx_top,'exponential')
fwr3d_t_a,fwr3d_t_sa = fwrpp.pp.fitting_cross_correlation(fwr3d_top,dx_top,'exponential')
exp_t_a,exp_t_sa = fwrpp.pp.fitting_cross_correlation(exp_top,dx_top,'exponential')
opt_t,x_t,dne_c_t = dne_ana.density_correlation(ref_pos[top_center],width = ref_pos[top_range[0]]-ref_pos[top_center])
xgc_t_a,xgc_t_sa = opt_t
xmax_t = 2*np.max((np.abs(fwr_t_a),np.abs(fwr2_t_a),np.abs(exp_t_a)))
xfit_t = np.linspace(0,xmax_t,500)
fwr_fit_t = fwrpp.pp.exponential_fit(xfit_t,fwr_t_a)
fwr2_fit_t = fwrpp.pp.exponential_fit(xfit_t,fwr2_t_a)
fwr01_fit_t = fwrpp.pp.exponential_fit(xfit_t,fwr01_t_a)
fwr3d_fit_t = fwrpp.pp.exponential_fit(xfit_t,fwr3d_t_a)
exp_fit_t = fwrpp.pp.exponential_fit(xfit_t,exp_t_a)
xgc_fit_t = ana.gaussian_correlation_func(xfit_t,xgc_t_a)
fit_top()
xmax_b = 0
xfit_b = 0
fwr_fit_b = 0
fwr2_fit_b = 0
fwr01_fit_b = 0
fwr3d_fit_b = 0
exp_fit_b = 0
fwr_b_a,fwr_b_sa = 0,0
fwr2_b_a,fwr2_b_sa = 0,0
fwr01_b_a,fwr01_b_sa = 0,0
fwr3d_b_a,fwr3d_b_sa = 0,0
exp_b_a,exp_b_sa = 0,0
xgc_fit_b = 0
xgc_b_a,xgc_b_sa = 0,0
x_b,dne_c_b = 0,0
def fit_bot():
global fwr_b_a,fwr_b_sa,fwr2_b_a,fwr2_b_sa,fwr01_b_a,fwr01_b_sa,fwr3d_b_a,fwr3d_b_sa,exp_b_a,expt_sa,xmax_b,xfit_b,fwr_fit_b,fwr2_fit_b,exp_fit_b,fwr01_fit_b,fwr3d_fit_b,xgc_fit_b,xgc_b_a,xgc_b_sa,x_b,dne_c_b
fwr_b_a,fwr_b_sa = fwrpp.pp.fitting_cross_correlation(fwr_bot,dx_bot,'gaussian')
fwr2_b_a,fwr2_b_sa = fwrpp.pp.fitting_cross_correlation(fwr2_bot,dx_bot,'gaussian')
fwr01_b_a,fwr01_b_sa = fwrpp.pp.fitting_cross_correlation(fwr01_bot,dx_bot,'gaussian')
fwr3d_b_a,fwr3d_b_sa = fwrpp.pp.fitting_cross_correlation(fwr3d_bot,dx_bot,'gaussian')
exp_b_a,exp_b_sa = fwrpp.pp.fitting_cross_correlation(exp_bot,dx_bot,'gaussian')
opt_b,x_b,dne_c_b = dne_ana.density_correlation(ref_pos[bottom_center],width = ref_pos[bottom_range[0]]-ref_pos[bottom_center])
xgc_b_a,xgc_b_sa = opt_b
xmax_b = 2*np.sqrt(np.max((np.abs(fwr_b_a),np.abs(fwr2_b_a),np.abs(exp_b_a))))
xfit_b = np.linspace(0,xmax_b,500)
fwr_fit_b = fwrpp.pp.gaussian_fit(xfit_b,fwr_b_a)
fwr2_fit_b = fwrpp.pp.gaussian_fit(xfit_b,fwr2_b_a)
fwr01_fit_b = fwrpp.pp.gaussian_fit(xfit_b,fwr01_b_a)
fwr3d_fit_b = fwrpp.pp.gaussian_fit(xfit_b,fwr3d_b_a)
exp_fit_b = fwrpp.pp.gaussian_fit(xfit_b,exp_b_a)
xgc_fit_b = ana.gaussian_correlation_func(xfit_b,xgc_b_a)
fit_bot()
print('fitting complete')
print('fitting curve ready. call plot() to plot. note that the default region is top, pass "bottom" as the argument to plot bottom region. ')
#plot the data points and curves
total_plot = 0
#top data
def plot(region = 'top'):
global total_plot
#plt.figure()
#total_plot += 1
if(region == 'top'):
plt.title('Cross-Correlation at Upper Pedestal,center_channel at {0:.4}m'.format(ref_pos[top_center]))
plt.plot(dx_top,exp_top,'bs',label = 'exp data')
plt.plot(dx_top,fwr_top,'ro',label = 'FWR data amp=1')
plt.plot(dx_top,fwr2_top,'r^',label = 'FWR data amp=2')
plt.plot(dx_top,fwr01_top,'r+',label = 'FWR data amp=0.1')
plt.plot(xfit_t,exp_fit_t,'b-',label = 'exp exponential fit')
plt.plot(xfit_t,fwr_fit_t,'r--',label = 'FWR fit')
plt.plot(xfit_t,fwr2_fit_t,'r-.',label = 'FWR amp2 fit')
plt.plot(xfit_t,fwr01_fit_t,'r:',label = 'FWR amp0.1 fit')
plt.xlabel('distance from center channel reflection($m$)')
plt.ylabel('cross-correlation')
plt.legend(labelspacing = 0.2,prop = {'size':12})
plt.tight_layout()
elif(region == 'bottom'):
plt.title('Cross-Correlation at Lower Pedestal,center_channel at {0:.4}m'.format(ref_pos[bottom_center]))
plt.plot(dx_bot,exp_bot,'bs',label = 'exp data')
plt.plot(dx_bot,fwr_bot,'ro',label = 'FWR data amp=1')
plt.plot(dx_bot,fwr2_bot,'r^',label = 'FWR data amp=2')
plt.plot(dx_bot,fwr01_bot,'r+',label = 'FWR data amp=0.1')
plt.plot(xfit_b,exp_fit_b,'b-',label = 'exp gaussian fit')
plt.plot(xfit_b,fwr_fit_b,'r--',label = 'FWR fit')
plt.plot(xfit_b,fwr2_fit_b,'r-.',label = 'FWR amp2 fit')
plt.plot(xfit_b,fwr01_fit_b,'r:',label = 'FWR amp0.1 fit')
plt.xlabel('distance from center channel reflection($m$)')
plt.ylabel('cross-correlation')
plt.legend(labelspacing = 0.2,prop = {'size':12})
plt.tight_layout()
elif(region == '2d/3d_top'):
plt.title('Cross-Correlation at Upper Pedestal,center_channel at {0:.4}m'.format(ref_pos[top_center]))
plt.plot(dx_top,exp_top,'bs',label = 'exp data')
plt.plot(dx_top,fwr_top,'ro',label = 'FWR2D data')
plt.plot(dx_top,fwr3d_top,'r^',label = 'FWR3D data')
plt.plot(xfit_t,exp_fit_t,'b-',label = 'exp exponential fit')
plt.plot(xfit_t,fwr_fit_t,'r--',label = 'FWR2D fit')
plt.plot(xfit_t,fwr3d_fit_t,'r-.',label = 'FWR3D fit')
plt.xlabel('distance from center channel reflection($m$)')
plt.ylabel('cross-correlation')
plt.legend(labelspacing = 0.2,prop = {'size':12})
plt.tight_layout()
elif(region =='2d/3d_bot'):
#plt.title('Cross-Correlation at Lower Pedestal,center_channel at {0:.4}m'.format(ref_pos[bottom_center]))
plt.plot(dx_bot,exp_bot,'bs',label = 'exp data')
plt.plot(dx_bot,fwr_bot,'go',label = 'FWR2D data')
plt.plot(dx_bot,fwr3d_bot,'r^',label = 'FWR3D data')
plt.plot(xfit_b,exp_fit_b,'b-')
plt.plot(xfit_b,fwr_fit_b,'g--')
plt.plot(xfit_b,fwr3d_fit_b,'r-.')
plt.xlabel('$distance from center channel(mm)$')
plt.ylabel('$\gamma$')
plt.legend(labelspacing = 0.2,prop = {'size':15})
plt.tight_layout()
elif(region == '3d_bot'):
plt.title('2D/3D Cross-Correlation and XGC1 Density Correlation, Lower')
plt.plot(dx_bot,fwr_bot,'ro',label = '2D')
plt.plot(dx_bot,fwr3d_bot,'r^',label = '3D')
plt.plot(x_b,dne_c_b,'bs',label = 'XGC')
plt.plot(xfit_b,fwr_fit_b,'r-.',label = '2D fit')
plt.plot(xfit_b,fwr3d_fit_b,'r--',label = '3D fit')
plt.plot(xfit_b,xgc_fit_b,'b-',label = 'XGC fit')
plt.xlabel('distance from center channel relfection($m$)')
plt.ylabel('cross-corelation')
plt.legend(labelspacing = 0.2,prop = {'size':12})
plt.tight_layout()
elif(region == '3d_top'):
plt.title('2D/3D Cross-Correlation and XGC1 Density Correlation, Upper')
plt.plot(dx_top,fwr_top,'ro',label = '2D')
plt.plot(dx_top,fwr3d_top,'r^',label = '3D')
plt.plot(x_t,dne_c_t,'bs',label = 'XGC')
plt.plot(xfit_t,fwr_fit_t,'r-.',label = '2D fit')
plt.plot(xfit_t,fwr3d_fit_t,'r--',label = '3D fit')
plt.plot(xfit_t,xgc_fit_t,'b-',label = 'XGC fit')
plt.xlabel('distance from center channel relfection($m$)')
plt.ylabel('cross-corelation')
plt.legend(labelspacing = 0.2,prop = {'size':12})
plt.tight_layout()
def clear_all():
global total_plot
for i in range(total_plot):
plt.close()
# Coherent Signal comparison
| [
"sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.gaussian_fit",
"matplotlib.pyplot.ylabel",
"sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.Self_Correlation",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.abs",
"sdp.plasma.analysis.XGC_Density_L... | [((333, 430), 'sdp.plasma.analysis.XGC_Density_Loader', 'ana.XGC_Density_Loader', (['"""/p/gkp/lshi/XGC1_NSTX_Case/FullF_XGC_ti191_output/dne_file.sav.npz"""'], {}), "(\n '/p/gkp/lshi/XGC1_NSTX_Case/FullF_XGC_ti191_output/dne_file.sav.npz')\n", (355, 430), True, 'import sdp.plasma.analysis as ana\n'), ((725, 775), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.Cross_Correlation_by_fft', 'fwrpp.pp.Cross_Correlation_by_fft', (['fwrpp.ref2d_out'], {}), '(fwrpp.ref2d_out)\n', (758, 775), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((786, 841), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.Cross_Correlation_by_fft', 'fwrpp.pp.Cross_Correlation_by_fft', (['fwrpp.ref2d_amp2_out'], {}), '(fwrpp.ref2d_amp2_out)\n', (819, 841), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((853, 909), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.Cross_Correlation_by_fft', 'fwrpp.pp.Cross_Correlation_by_fft', (['fwrpp.ref2d_amp01_out'], {}), '(fwrpp.ref2d_amp01_out)\n', (886, 909), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((918, 968), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.Cross_Correlation_by_fft', 'fwrpp.pp.Cross_Correlation_by_fft', (['fwrpp.ref3d_out'], {}), '(fwrpp.ref3d_out)\n', (951, 968), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((979, 1021), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.Self_Correlation', 'fwrpp.pp.Self_Correlation', (['fwrpp.ref2d_out'], {}), '(fwrpp.ref2d_out)\n', (1004, 1021), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((1032, 1079), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.Self_Correlation', 'fwrpp.pp.Self_Correlation', (['fwrpp.ref2d_amp2_out'], {}), '(fwrpp.ref2d_amp2_out)\n', (1057, 1079), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((1091, 1139), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.Self_Correlation', 'fwrpp.pp.Self_Correlation', (['fwrpp.ref2d_amp01_out'], {}), '(fwrpp.ref2d_amp01_out)\n', (1116, 1139), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((1148, 1190), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.Self_Correlation', 'fwrpp.pp.Self_Correlation', (['fwrpp.ref3d_out'], {}), '(fwrpp.ref3d_out)\n', (1173, 1190), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((1582, 1643), 'sdp.scripts.load_nstx_exp_ref.analyser.Cross_Correlation_by_fft', 'nstx_exp.analyser.Cross_Correlation_by_fft', (['(0.632)', '(0.64)', '(8000)'], {}), '(0.632, 0.64, 8000)\n', (1624, 1643), True, 'import sdp.scripts.load_nstx_exp_ref as nstx_exp\n'), ((1878, 1942), 'sdp.scripts.load_nstx_exp_ref.analyser.Coherent_over_time', 'nstx_exp.analyser.Coherent_over_time', (['(0.632)', '(0.64)', '(2e-05)', '(0.0001)'], {}), '(0.632, 0.64, 2e-05, 0.0001)\n', (1914, 1942), True, 'import sdp.scripts.load_nstx_exp_ref as nstx_exp\n'), ((307, 321), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (318, 321), False, 'import pickle\n'), ((2366, 2424), 'numpy.absolute', 'np.absolute', (['cc_fwr[top_center, top_range[0]:top_range[1]]'], {}), '(cc_fwr[top_center, top_range[0]:top_range[1]])\n', (2377, 2424), True, 'import numpy as np\n'), ((2439, 2498), 'numpy.absolute', 'np.absolute', (['cc_fwr2[top_center, top_range[0]:top_range[1]]'], {}), '(cc_fwr2[top_center, top_range[0]:top_range[1]])\n', (2450, 2498), True, 'import numpy as np\n'), ((2514, 2574), 'numpy.absolute', 'np.absolute', (['cc_fwr01[top_center, top_range[0]:top_range[1]]'], {}), '(cc_fwr01[top_center, top_range[0]:top_range[1]])\n', (2525, 2574), True, 'import numpy as np\n'), ((2590, 2647), 'numpy.absolute', 'np.absolute', (['cc_3d[top_center, top_range[0]:top_range[1]]'], {}), '(cc_3d[top_center, top_range[0]:top_range[1]])\n', (2601, 2647), True, 'import numpy as np\n'), ((2661, 2719), 'numpy.absolute', 'np.absolute', (['cc_exp[top_center, top_range[0]:top_range[1]]'], {}), '(cc_exp[top_center, top_range[0]:top_range[1]])\n', (2672, 2719), True, 'import numpy as np\n'), ((2956, 3023), 'numpy.absolute', 'np.absolute', (['cc_fwr[bottom_center, bottom_range[0]:bottom_range[1]]'], {}), '(cc_fwr[bottom_center, bottom_range[0]:bottom_range[1]])\n', (2967, 3023), True, 'import numpy as np\n'), ((3038, 3106), 'numpy.absolute', 'np.absolute', (['cc_fwr2[bottom_center, bottom_range[0]:bottom_range[1]]'], {}), '(cc_fwr2[bottom_center, bottom_range[0]:bottom_range[1]])\n', (3049, 3106), True, 'import numpy as np\n'), ((3122, 3191), 'numpy.absolute', 'np.absolute', (['cc_fwr01[bottom_center, bottom_range[0]:bottom_range[1]]'], {}), '(cc_fwr01[bottom_center, bottom_range[0]:bottom_range[1]])\n', (3133, 3191), True, 'import numpy as np\n'), ((3207, 3273), 'numpy.absolute', 'np.absolute', (['cc_3d[bottom_center, bottom_range[0]:bottom_range[1]]'], {}), '(cc_3d[bottom_center, bottom_range[0]:bottom_range[1]])\n', (3218, 3273), True, 'import numpy as np\n'), ((3287, 3354), 'numpy.absolute', 'np.absolute', (['cc_exp[bottom_center, bottom_range[0]:bottom_range[1]]'], {}), '(cc_exp[bottom_center, bottom_range[0]:bottom_range[1]])\n', (3298, 3354), True, 'import numpy as np\n'), ((4023, 4089), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.fitting_cross_correlation', 'fwrpp.pp.fitting_cross_correlation', (['fwr_top', 'dx_top', '"""exponential"""'], {}), "(fwr_top, dx_top, 'exponential')\n", (4057, 4089), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((4113, 4180), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.fitting_cross_correlation', 'fwrpp.pp.fitting_cross_correlation', (['fwr2_top', 'dx_top', '"""exponential"""'], {}), "(fwr2_top, dx_top, 'exponential')\n", (4147, 4180), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((4206, 4274), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.fitting_cross_correlation', 'fwrpp.pp.fitting_cross_correlation', (['fwr01_top', 'dx_top', '"""exponential"""'], {}), "(fwr01_top, dx_top, 'exponential')\n", (4240, 4274), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((4300, 4368), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.fitting_cross_correlation', 'fwrpp.pp.fitting_cross_correlation', (['fwr3d_top', 'dx_top', '"""exponential"""'], {}), "(fwr3d_top, dx_top, 'exponential')\n", (4334, 4368), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((4390, 4456), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.fitting_cross_correlation', 'fwrpp.pp.fitting_cross_correlation', (['exp_top', 'dx_top', '"""exponential"""'], {}), "(exp_top, dx_top, 'exponential')\n", (4424, 4456), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((4699, 4726), 'numpy.linspace', 'np.linspace', (['(0)', 'xmax_t', '(500)'], {}), '(0, xmax_t, 500)\n', (4710, 4726), True, 'import numpy as np\n'), ((4741, 4782), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.exponential_fit', 'fwrpp.pp.exponential_fit', (['xfit_t', 'fwr_t_a'], {}), '(xfit_t, fwr_t_a)\n', (4765, 4782), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((4799, 4841), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.exponential_fit', 'fwrpp.pp.exponential_fit', (['xfit_t', 'fwr2_t_a'], {}), '(xfit_t, fwr2_t_a)\n', (4823, 4841), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((4859, 4902), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.exponential_fit', 'fwrpp.pp.exponential_fit', (['xfit_t', 'fwr01_t_a'], {}), '(xfit_t, fwr01_t_a)\n', (4883, 4902), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((4920, 4963), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.exponential_fit', 'fwrpp.pp.exponential_fit', (['xfit_t', 'fwr3d_t_a'], {}), '(xfit_t, fwr3d_t_a)\n', (4944, 4963), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((4979, 5020), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.exponential_fit', 'fwrpp.pp.exponential_fit', (['xfit_t', 'exp_t_a'], {}), '(xfit_t, exp_t_a)\n', (5003, 5020), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((5036, 5082), 'sdp.plasma.analysis.gaussian_correlation_func', 'ana.gaussian_correlation_func', (['xfit_t', 'xgc_t_a'], {}), '(xfit_t, xgc_t_a)\n', (5065, 5082), True, 'import sdp.plasma.analysis as ana\n'), ((5624, 5687), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.fitting_cross_correlation', 'fwrpp.pp.fitting_cross_correlation', (['fwr_bot', 'dx_bot', '"""gaussian"""'], {}), "(fwr_bot, dx_bot, 'gaussian')\n", (5658, 5687), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((5711, 5775), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.fitting_cross_correlation', 'fwrpp.pp.fitting_cross_correlation', (['fwr2_bot', 'dx_bot', '"""gaussian"""'], {}), "(fwr2_bot, dx_bot, 'gaussian')\n", (5745, 5775), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((5801, 5866), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.fitting_cross_correlation', 'fwrpp.pp.fitting_cross_correlation', (['fwr01_bot', 'dx_bot', '"""gaussian"""'], {}), "(fwr01_bot, dx_bot, 'gaussian')\n", (5835, 5866), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((5892, 5957), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.fitting_cross_correlation', 'fwrpp.pp.fitting_cross_correlation', (['fwr3d_bot', 'dx_bot', '"""gaussian"""'], {}), "(fwr3d_bot, dx_bot, 'gaussian')\n", (5926, 5957), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((5979, 6042), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.fitting_cross_correlation', 'fwrpp.pp.fitting_cross_correlation', (['exp_bot', 'dx_bot', '"""gaussian"""'], {}), "(exp_bot, dx_bot, 'gaussian')\n", (6013, 6042), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((6308, 6335), 'numpy.linspace', 'np.linspace', (['(0)', 'xmax_b', '(500)'], {}), '(0, xmax_b, 500)\n', (6319, 6335), True, 'import numpy as np\n'), ((6350, 6388), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.gaussian_fit', 'fwrpp.pp.gaussian_fit', (['xfit_b', 'fwr_b_a'], {}), '(xfit_b, fwr_b_a)\n', (6371, 6388), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((6405, 6444), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.gaussian_fit', 'fwrpp.pp.gaussian_fit', (['xfit_b', 'fwr2_b_a'], {}), '(xfit_b, fwr2_b_a)\n', (6426, 6444), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((6462, 6502), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.gaussian_fit', 'fwrpp.pp.gaussian_fit', (['xfit_b', 'fwr01_b_a'], {}), '(xfit_b, fwr01_b_a)\n', (6483, 6502), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((6520, 6560), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.gaussian_fit', 'fwrpp.pp.gaussian_fit', (['xfit_b', 'fwr3d_b_a'], {}), '(xfit_b, fwr3d_b_a)\n', (6541, 6560), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((6576, 6614), 'sdp.scripts.FWR2D_NSTX_139047_Postprocess.pp.gaussian_fit', 'fwrpp.pp.gaussian_fit', (['xfit_b', 'exp_b_a'], {}), '(xfit_b, exp_b_a)\n', (6597, 6614), True, 'import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp\n'), ((6630, 6676), 'sdp.plasma.analysis.gaussian_correlation_func', 'ana.gaussian_correlation_func', (['xfit_b', 'xgc_b_a'], {}), '(xfit_b, xgc_b_a)\n', (6659, 6676), True, 'import sdp.plasma.analysis as ana\n'), ((7146, 7195), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_top', 'exp_top', '"""bs"""'], {'label': '"""exp data"""'}), "(dx_top, exp_top, 'bs', label='exp data')\n", (7154, 7195), True, 'import matplotlib.pyplot as plt\n'), ((7203, 7258), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_top', 'fwr_top', '"""ro"""'], {'label': '"""FWR data amp=1"""'}), "(dx_top, fwr_top, 'ro', label='FWR data amp=1')\n", (7211, 7258), True, 'import matplotlib.pyplot as plt\n'), ((7266, 7322), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_top', 'fwr2_top', '"""r^"""'], {'label': '"""FWR data amp=2"""'}), "(dx_top, fwr2_top, 'r^', label='FWR data amp=2')\n", (7274, 7322), True, 'import matplotlib.pyplot as plt\n'), ((7330, 7389), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_top', 'fwr01_top', '"""r+"""'], {'label': '"""FWR data amp=0.1"""'}), "(dx_top, fwr01_top, 'r+', label='FWR data amp=0.1')\n", (7338, 7389), True, 'import matplotlib.pyplot as plt\n'), ((7397, 7459), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_t', 'exp_fit_t', '"""b-"""'], {'label': '"""exp exponential fit"""'}), "(xfit_t, exp_fit_t, 'b-', label='exp exponential fit')\n", (7405, 7459), True, 'import matplotlib.pyplot as plt\n'), ((7467, 7518), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_t', 'fwr_fit_t', '"""r--"""'], {'label': '"""FWR fit"""'}), "(xfit_t, fwr_fit_t, 'r--', label='FWR fit')\n", (7475, 7518), True, 'import matplotlib.pyplot as plt\n'), ((7526, 7583), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_t', 'fwr2_fit_t', '"""r-."""'], {'label': '"""FWR amp2 fit"""'}), "(xfit_t, fwr2_fit_t, 'r-.', label='FWR amp2 fit')\n", (7534, 7583), True, 'import matplotlib.pyplot as plt\n'), ((7591, 7650), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_t', 'fwr01_fit_t', '"""r:"""'], {'label': '"""FWR amp0.1 fit"""'}), "(xfit_t, fwr01_fit_t, 'r:', label='FWR amp0.1 fit')\n", (7599, 7650), True, 'import matplotlib.pyplot as plt\n'), ((7658, 7716), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""distance from center channel reflection($m$)"""'], {}), "('distance from center channel reflection($m$)')\n", (7668, 7716), True, 'import matplotlib.pyplot as plt\n'), ((7725, 7756), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cross-correlation"""'], {}), "('cross-correlation')\n", (7735, 7756), True, 'import matplotlib.pyplot as plt\n'), ((7765, 7812), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labelspacing': '(0.2)', 'prop': "{'size': 12}"}), "(labelspacing=0.2, prop={'size': 12})\n", (7775, 7812), True, 'import matplotlib.pyplot as plt\n'), ((7823, 7841), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7839, 7841), True, 'import matplotlib.pyplot as plt\n'), ((11358, 11369), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11367, 11369), True, 'import matplotlib.pyplot as plt\n'), ((579, 611), 'numpy.zeros', 'np.zeros', (['(n_channel, n_channel)'], {}), '((n_channel, n_channel))\n', (587, 611), True, 'import numpy as np\n'), ((7994, 8043), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_bot', 'exp_bot', '"""bs"""'], {'label': '"""exp data"""'}), "(dx_bot, exp_bot, 'bs', label='exp data')\n", (8002, 8043), True, 'import matplotlib.pyplot as plt\n'), ((8051, 8106), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_bot', 'fwr_bot', '"""ro"""'], {'label': '"""FWR data amp=1"""'}), "(dx_bot, fwr_bot, 'ro', label='FWR data amp=1')\n", (8059, 8106), True, 'import matplotlib.pyplot as plt\n'), ((8114, 8170), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_bot', 'fwr2_bot', '"""r^"""'], {'label': '"""FWR data amp=2"""'}), "(dx_bot, fwr2_bot, 'r^', label='FWR data amp=2')\n", (8122, 8170), True, 'import matplotlib.pyplot as plt\n'), ((8178, 8237), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_bot', 'fwr01_bot', '"""r+"""'], {'label': '"""FWR data amp=0.1"""'}), "(dx_bot, fwr01_bot, 'r+', label='FWR data amp=0.1')\n", (8186, 8237), True, 'import matplotlib.pyplot as plt\n'), ((8245, 8304), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_b', 'exp_fit_b', '"""b-"""'], {'label': '"""exp gaussian fit"""'}), "(xfit_b, exp_fit_b, 'b-', label='exp gaussian fit')\n", (8253, 8304), True, 'import matplotlib.pyplot as plt\n'), ((8312, 8363), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_b', 'fwr_fit_b', '"""r--"""'], {'label': '"""FWR fit"""'}), "(xfit_b, fwr_fit_b, 'r--', label='FWR fit')\n", (8320, 8363), True, 'import matplotlib.pyplot as plt\n'), ((8371, 8428), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_b', 'fwr2_fit_b', '"""r-."""'], {'label': '"""FWR amp2 fit"""'}), "(xfit_b, fwr2_fit_b, 'r-.', label='FWR amp2 fit')\n", (8379, 8428), True, 'import matplotlib.pyplot as plt\n'), ((8436, 8495), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_b', 'fwr01_fit_b', '"""r:"""'], {'label': '"""FWR amp0.1 fit"""'}), "(xfit_b, fwr01_fit_b, 'r:', label='FWR amp0.1 fit')\n", (8444, 8495), True, 'import matplotlib.pyplot as plt\n'), ((8503, 8561), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""distance from center channel reflection($m$)"""'], {}), "('distance from center channel reflection($m$)')\n", (8513, 8561), True, 'import matplotlib.pyplot as plt\n'), ((8570, 8601), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cross-correlation"""'], {}), "('cross-correlation')\n", (8580, 8601), True, 'import matplotlib.pyplot as plt\n'), ((8610, 8657), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labelspacing': '(0.2)', 'prop': "{'size': 12}"}), "(labelspacing=0.2, prop={'size': 12})\n", (8620, 8657), True, 'import matplotlib.pyplot as plt\n'), ((8668, 8686), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8684, 8686), True, 'import matplotlib.pyplot as plt\n'), ((4635, 4650), 'numpy.abs', 'np.abs', (['fwr_t_a'], {}), '(fwr_t_a)\n', (4641, 4650), True, 'import numpy as np\n'), ((4651, 4667), 'numpy.abs', 'np.abs', (['fwr2_t_a'], {}), '(fwr2_t_a)\n', (4657, 4667), True, 'import numpy as np\n'), ((4668, 4683), 'numpy.abs', 'np.abs', (['exp_t_a'], {}), '(exp_t_a)\n', (4674, 4683), True, 'import numpy as np\n'), ((8839, 8888), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_top', 'exp_top', '"""bs"""'], {'label': '"""exp data"""'}), "(dx_top, exp_top, 'bs', label='exp data')\n", (8847, 8888), True, 'import matplotlib.pyplot as plt\n'), ((8896, 8947), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_top', 'fwr_top', '"""ro"""'], {'label': '"""FWR2D data"""'}), "(dx_top, fwr_top, 'ro', label='FWR2D data')\n", (8904, 8947), True, 'import matplotlib.pyplot as plt\n'), ((8955, 9008), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_top', 'fwr3d_top', '"""r^"""'], {'label': '"""FWR3D data"""'}), "(dx_top, fwr3d_top, 'r^', label='FWR3D data')\n", (8963, 9008), True, 'import matplotlib.pyplot as plt\n'), ((9016, 9078), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_t', 'exp_fit_t', '"""b-"""'], {'label': '"""exp exponential fit"""'}), "(xfit_t, exp_fit_t, 'b-', label='exp exponential fit')\n", (9024, 9078), True, 'import matplotlib.pyplot as plt\n'), ((9086, 9139), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_t', 'fwr_fit_t', '"""r--"""'], {'label': '"""FWR2D fit"""'}), "(xfit_t, fwr_fit_t, 'r--', label='FWR2D fit')\n", (9094, 9139), True, 'import matplotlib.pyplot as plt\n'), ((9147, 9202), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_t', 'fwr3d_fit_t', '"""r-."""'], {'label': '"""FWR3D fit"""'}), "(xfit_t, fwr3d_fit_t, 'r-.', label='FWR3D fit')\n", (9155, 9202), True, 'import matplotlib.pyplot as plt\n'), ((9210, 9268), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""distance from center channel reflection($m$)"""'], {}), "('distance from center channel reflection($m$)')\n", (9220, 9268), True, 'import matplotlib.pyplot as plt\n'), ((9277, 9308), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cross-correlation"""'], {}), "('cross-correlation')\n", (9287, 9308), True, 'import matplotlib.pyplot as plt\n'), ((9317, 9364), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labelspacing': '(0.2)', 'prop': "{'size': 12}"}), "(labelspacing=0.2, prop={'size': 12})\n", (9327, 9364), True, 'import matplotlib.pyplot as plt\n'), ((9375, 9393), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9391, 9393), True, 'import matplotlib.pyplot as plt\n'), ((6243, 6258), 'numpy.abs', 'np.abs', (['fwr_b_a'], {}), '(fwr_b_a)\n', (6249, 6258), True, 'import numpy as np\n'), ((6259, 6275), 'numpy.abs', 'np.abs', (['fwr2_b_a'], {}), '(fwr2_b_a)\n', (6265, 6275), True, 'import numpy as np\n'), ((6276, 6291), 'numpy.abs', 'np.abs', (['exp_b_a'], {}), '(exp_b_a)\n', (6282, 6291), True, 'import numpy as np\n'), ((9549, 9598), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_bot', 'exp_bot', '"""bs"""'], {'label': '"""exp data"""'}), "(dx_bot, exp_bot, 'bs', label='exp data')\n", (9557, 9598), True, 'import matplotlib.pyplot as plt\n'), ((9606, 9657), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_bot', 'fwr_bot', '"""go"""'], {'label': '"""FWR2D data"""'}), "(dx_bot, fwr_bot, 'go', label='FWR2D data')\n", (9614, 9657), True, 'import matplotlib.pyplot as plt\n'), ((9665, 9718), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_bot', 'fwr3d_bot', '"""r^"""'], {'label': '"""FWR3D data"""'}), "(dx_bot, fwr3d_bot, 'r^', label='FWR3D data')\n", (9673, 9718), True, 'import matplotlib.pyplot as plt\n'), ((9726, 9759), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_b', 'exp_fit_b', '"""b-"""'], {}), "(xfit_b, exp_fit_b, 'b-')\n", (9734, 9759), True, 'import matplotlib.pyplot as plt\n'), ((9766, 9800), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_b', 'fwr_fit_b', '"""g--"""'], {}), "(xfit_b, fwr_fit_b, 'g--')\n", (9774, 9800), True, 'import matplotlib.pyplot as plt\n'), ((9807, 9843), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_b', 'fwr3d_fit_b', '"""r-."""'], {}), "(xfit_b, fwr3d_fit_b, 'r-.')\n", (9815, 9843), True, 'import matplotlib.pyplot as plt\n'), ((9850, 9898), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$distance from center channel(mm)$"""'], {}), "('$distance from center channel(mm)$')\n", (9860, 9898), True, 'import matplotlib.pyplot as plt\n'), ((9907, 9930), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\gamma$"""'], {}), "('$\\\\gamma$')\n", (9917, 9930), True, 'import matplotlib.pyplot as plt\n'), ((9938, 9985), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labelspacing': '(0.2)', 'prop': "{'size': 15}"}), "(labelspacing=0.2, prop={'size': 15})\n", (9948, 9985), True, 'import matplotlib.pyplot as plt\n'), ((9996, 10014), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10012, 10014), True, 'import matplotlib.pyplot as plt\n'), ((10053, 10125), 'matplotlib.pyplot.title', 'plt.title', (['"""2D/3D Cross-Correlation and XGC1 Density Correlation, Lower"""'], {}), "('2D/3D Cross-Correlation and XGC1 Density Correlation, Lower')\n", (10062, 10125), True, 'import matplotlib.pyplot as plt\n'), ((10134, 10177), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_bot', 'fwr_bot', '"""ro"""'], {'label': '"""2D"""'}), "(dx_bot, fwr_bot, 'ro', label='2D')\n", (10142, 10177), True, 'import matplotlib.pyplot as plt\n'), ((10185, 10230), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_bot', 'fwr3d_bot', '"""r^"""'], {'label': '"""3D"""'}), "(dx_bot, fwr3d_bot, 'r^', label='3D')\n", (10193, 10230), True, 'import matplotlib.pyplot as plt\n'), ((10238, 10279), 'matplotlib.pyplot.plot', 'plt.plot', (['x_b', 'dne_c_b', '"""bs"""'], {'label': '"""XGC"""'}), "(x_b, dne_c_b, 'bs', label='XGC')\n", (10246, 10279), True, 'import matplotlib.pyplot as plt\n'), ((10287, 10337), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_b', 'fwr_fit_b', '"""r-."""'], {'label': '"""2D fit"""'}), "(xfit_b, fwr_fit_b, 'r-.', label='2D fit')\n", (10295, 10337), True, 'import matplotlib.pyplot as plt\n'), ((10345, 10397), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_b', 'fwr3d_fit_b', '"""r--"""'], {'label': '"""3D fit"""'}), "(xfit_b, fwr3d_fit_b, 'r--', label='3D fit')\n", (10353, 10397), True, 'import matplotlib.pyplot as plt\n'), ((10405, 10455), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_b', 'xgc_fit_b', '"""b-"""'], {'label': '"""XGC fit"""'}), "(xfit_b, xgc_fit_b, 'b-', label='XGC fit')\n", (10413, 10455), True, 'import matplotlib.pyplot as plt\n'), ((10463, 10521), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""distance from center channel relfection($m$)"""'], {}), "('distance from center channel relfection($m$)')\n", (10473, 10521), True, 'import matplotlib.pyplot as plt\n'), ((10530, 10560), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cross-corelation"""'], {}), "('cross-corelation')\n", (10540, 10560), True, 'import matplotlib.pyplot as plt\n'), ((10569, 10616), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labelspacing': '(0.2)', 'prop': "{'size': 12}"}), "(labelspacing=0.2, prop={'size': 12})\n", (10579, 10616), True, 'import matplotlib.pyplot as plt\n'), ((10627, 10645), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10643, 10645), True, 'import matplotlib.pyplot as plt\n'), ((10684, 10756), 'matplotlib.pyplot.title', 'plt.title', (['"""2D/3D Cross-Correlation and XGC1 Density Correlation, Upper"""'], {}), "('2D/3D Cross-Correlation and XGC1 Density Correlation, Upper')\n", (10693, 10756), True, 'import matplotlib.pyplot as plt\n'), ((10765, 10808), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_top', 'fwr_top', '"""ro"""'], {'label': '"""2D"""'}), "(dx_top, fwr_top, 'ro', label='2D')\n", (10773, 10808), True, 'import matplotlib.pyplot as plt\n'), ((10816, 10861), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_top', 'fwr3d_top', '"""r^"""'], {'label': '"""3D"""'}), "(dx_top, fwr3d_top, 'r^', label='3D')\n", (10824, 10861), True, 'import matplotlib.pyplot as plt\n'), ((10869, 10910), 'matplotlib.pyplot.plot', 'plt.plot', (['x_t', 'dne_c_t', '"""bs"""'], {'label': '"""XGC"""'}), "(x_t, dne_c_t, 'bs', label='XGC')\n", (10877, 10910), True, 'import matplotlib.pyplot as plt\n'), ((10918, 10968), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_t', 'fwr_fit_t', '"""r-."""'], {'label': '"""2D fit"""'}), "(xfit_t, fwr_fit_t, 'r-.', label='2D fit')\n", (10926, 10968), True, 'import matplotlib.pyplot as plt\n'), ((10976, 11028), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_t', 'fwr3d_fit_t', '"""r--"""'], {'label': '"""3D fit"""'}), "(xfit_t, fwr3d_fit_t, 'r--', label='3D fit')\n", (10984, 11028), True, 'import matplotlib.pyplot as plt\n'), ((11036, 11086), 'matplotlib.pyplot.plot', 'plt.plot', (['xfit_t', 'xgc_fit_t', '"""b-"""'], {'label': '"""XGC fit"""'}), "(xfit_t, xgc_fit_t, 'b-', label='XGC fit')\n", (11044, 11086), True, 'import matplotlib.pyplot as plt\n'), ((11094, 11152), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""distance from center channel relfection($m$)"""'], {}), "('distance from center channel relfection($m$)')\n", (11104, 11152), True, 'import matplotlib.pyplot as plt\n'), ((11161, 11191), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cross-corelation"""'], {}), "('cross-corelation')\n", (11171, 11191), True, 'import matplotlib.pyplot as plt\n'), ((11200, 11247), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labelspacing': '(0.2)', 'prop': "{'size': 12}"}), "(labelspacing=0.2, prop={'size': 12})\n", (11210, 11247), True, 'import matplotlib.pyplot as plt\n'), ((11258, 11276), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11274, 11276), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
import numpy,os
##### User defined data types for Grid IO #####
def dequote(string):
return(string.strip('"'))
def enquote(string):
return('"'+string+'"')
# . . Implement a view on a grid
class View:
ndim=None
ox=None
dx=None
nx=None
start=None
stop=None
step=None
label=None
unit=None
allocated=False
def __init__(self):
self.reset()
def copy(self, other):
self.ndim=copy.deepcopy(other.ndim)
self.ox=copy.deepcopy(other.ox)
self.dx=copy.deepcopy(other.dx)
self.nx=copy.deepcopy(other.nx)
self.start=copy.deepcopy(other.start)
self.stop=copy.deepcopy(other.stop)
self.step=copy.deepcopy(other.step)
self.label=copy.deepcopy(other.label)
self.unit=copy.deepcopy(other.unit)
self.allocated=copy.deepcopy(other.allocated)
def allocate(self, ndim):
self.ndim=ndim
self.ox=numpy.zeros(ndim,dtype=float)
self.dx=numpy.ones(ndim,dtype=float)
self.nx=numpy.ones(ndim,dtype=int)
self.start=numpy.zeros(ndim, dtype=int)
self.stop=numpy.ones(ndim,dtype=int)
self.step=numpy.ones(ndim,dtype=int)
self.unit=numpy.ndarray(ndim, dtype='object')
self.unit[:]=""
self.label=numpy.ndarray(ndim, dtype='object')
self.label[:]=""
self.allocated=True
def reset(self):
self.ndim=None
self.ox=None
self.dx=None
self.nx=None
self.start=None
self.stop=None
self.step=None
self.unit=None
self.label=None
self.allocated=False
def fill(self, other, local_dim, other_dim):
assert local_dim<=self.ndim
self.ox[local_dim]=other.ox[other_dim]
self.nx[local_dim]=other.nx[other_dim]
self.dx[local_dim]=other.dx[other_dim]
self.start[local_dim]=other.start[other_dim]
self.stop[local_dim]=other.stop[other_dim]
self.step[local_dim]=other.step[other_dim]
self.unit[local_dim]=other.unit[other_dim]
self.label[local_dim]=other.label[other_dim]
def create_slices(self):
# Create slices for use in numpy array operations
assert(self.allocated)
slices=[]
for n in range(0,self.ndim):
slices.append(slice(self.start[n], self.stop[n], self.step[n]))
return(tuple(slices))
def create_slices_from_view(self, ext_view):
# Create an intersection between this view (self) and an external one (if possible)
# Doesn't allow for start and stop positions within views
assert(self.ndim==ext_view.ndim)
dx_relative_threshold=0.01
for i in range(0, self.ndim):
assert(abs((self.dx[i]-ext_view.dx[i])/self.dx[i])<=dx_relative_threshold)
# start position
sx_start_intersect=copy.deepcopy(self.ox)
sx_stop_intersect=copy.deepcopy(self.ox)
self_start_intersect=self.ox+self.start*self.dx
self_stop_intersect=self.ox+self.stop*self.dx
ext_start_intersect=ext_view.ox+ext_view.start*ext_view.dx
ext_stop_intersect=ext_view.ox+ext_view.stop*ext_view.dx
nx_intersect=copy.deepcopy(self.nx)
slices_local=[]
slices_ext=[]
badresult=False
for i in range(0, self.ndim):
if self.dx[i]>0.0:
sx_start_intersect[i]=max(self_start_intersect[i],ext_start_intersect[i])
sx_stop_intersect[i]=min(self_stop_intersect[i], ext_stop_intersect[i])
else:
sx_start_intersect[i]=min(self_start_intersect[i], ext_start_intersect[i])
sx_stop_intersect[i]=max(self_stop_intersect[i], ext_stop_intersect[i])
nx_intersect[i]=math.floor((sx_stop_intersect[i]-sx_start_intersect[i])/self.dx[i]+0.5)
if (nx_intersect[i]<=0):
badresult=True
else:
slices_local.append(slice(int(math.floor((sx_start_intersect[i]-self.ox[i])/self.dx[i]+0.5)),int(math.floor((sx_stop_intersect[i]-self.ox[i])/self.dx[i]+0.5)),self.step[i]))
slices_ext.append(slice(int(math.floor((sx_start_intersect[i]-ext_view.ox[i])/ext_view.dx[i]+0.5)),int(math.floor((sx_stop_intersect[i]-ext_view.ox[i])/ext_view.dx[i]+0.5)),ext_view.step[i]))
if badresult:
return([None, None])
else:
return([slices_local, slices_ext])
def create_view_from_slices(self, slices):
# Create a view on this array given some slices
assert(self.allocated)
assert(len(slices)==len(self.nx))
view=View()
view.allocate(self.ndim)
view.dx=copy.deepcopy(self.dx)
view.label=copy.deepcopy(self.label)
view.unit=copy.deepcopy(self.unit)
for i in range(0, self.ndim):
view.nx[i]=abs(slices[i].stop-slices[i].start)
view.ox[i]=self.ox[i]+slices[i].start*self.dx[i]
view.start[i]=0
view.stop[i]=view.nx[i]
view.step[i]=slices[i].step
return(view)
def default_view(self, dim):
# Make up a default view for dimension dim
assert(self.allocated)
self.start[dim]=0
self.stop[dim]=self.nx[dim]
self.step[dim]=1
def make_default_view(self):
for n in range(0, self.ndim):
self.default_view(n)
def create_dict(self):
# Create a dictionary for json upload etc.
assert(allocated);
parm=dict()
parm["ndim"]=copy.deepcopy(ndim)
parm["nx"]=copy.deepcopy(self.nx)
parm["ox"]=copy.deepcopy(self.ox)
parm["dx"]=copy.deepcopy(self.dx)
parm["start"]=copy.deepcopy(self.start)
parm["stop"]=copy.deepcopy(self.stop)
parm["step"]=copy.deepcopy(self.step)
parm["unit"]=copy.deepcopy(self.unit)
parm["label"]=copy.deepcopy(self.label)
return(parm)
def unload_dict(self, parm):
# Download from dictionary
self.ndim=copy.deepcopy(parm["ndim"])
self.nx=copy.deepcopy(parm["nx"])
self.ox=copy.deepcopy(parm["ox"])
self.dx=copy.deepcopy(parm["dx"])
self.start=copy.deepcopy(parm["start"])
self.stop=copy.deepcopy(parm["stop"])
self.step=copy.deepcopy(parm["step"])
self.unit=copy.deepcopy(parm["unit"])
self.label=copy.deepcopy(parm["label"])
def print_metadata(self, stream):
print >> stream, "ndim=", self.ndim
print >> stream, "ox=", self.ox
print >> stream, "dx=", self.dx
print >> stream, "nx=", self.nx
print >> stream, "start=", self.start
print >> stream, "stop=", self.stop
print >> stream, "step=", self.step
print >> stream, "unit=", self.unit
print >> stream, "label=", self.label
# .. Define Grid Class
class Grid():
# The numpy array holding the data
view=View()
# an array or view to a binary file
data=None
# are we allocated?
allocated=False
dtype=numpy.float32
# Array ordering
order="C"
def deallocate(self):
self.data=none
self.dtype=numpy.float32
self.view.deallocate()
gc.collect()
self.allocated=False
def allocate(self, dtype=numpy.float32, order="C"):
assert(self.view.allocated)
self.order=order
self.dtype=dtype
self.data=numpy.zeros(self.view.nx, dtype=self.dtype, order=self.order)
self.allocated=True
def reset(self):
self.view = View()
self.data=[]
self.allocated=False
self.dtype=numpy.float32
def __init__(self):
self.reset()
def ingest_array(self, array, binary_order="C"):
self.view.ndim=int(len(array.shape))
self.view.allocate(len(array.shape))
self.view.nx[:]=numpy.array(array.shape, dtype=int)
self.view.ox[:]=numpy.zeros((self.view.ndim), dtype=numpy.float32)
self.view.dx[:]=self.view.ox[:]+1.0
if binary_order!=self.order:
self.data=array.ravel(order=self.order).reshape(self.view.nx, order=self.order)
else:
self.data=array
self.allocated=True
def ingest_binary(self, binary_fname, dtype=numpy.float32, binary_order="C"):
# If view is already there, ingest a binary file with the proper checks
assert(self.view.allocated)
self.dtype=dtype
if binary_order!=self.order:
temp=numpy.fromfile(binary_fname, dtype=self.dtype).reshape(tuple(self.view.nx), order=binary_order)
self.data=temp.ravel(order=self.order).reshape(tuple(self.view.nx), order=self.order)
else:
self.data=numpy.fromfile(binary_fname, dtype=self.dtype).reshape(self.view.nx, order=self.order)
# Read rsf file
def read_rsf_file(infile=None, use_memmap=False):
if (infile==None):
# File is from standard input
tempgrid=read_rsf(sys.stdin, use_memmap)
else:
# Try opening the file
input_file=str(infile).strip()
if os.path.isfile(input_file):
# Open the file
f=open(input_file,'r')
tempgrid=read_rsf(f, use_memmap)
f.close()
else:
# It might be a tag
f=open(input_file+".rsf", 'r')
tempgrid=read_rsf(f, use_memmap)
f.close()
return(tempgrid)
def read_rsf(instream, use_memmap=False):
# Read rsf file from a stream filelike object
parm=dict()
for line in instream:
part="".join(line.split()).partition('=')
if (part[2]!=''):
parm[part[0]]=dequote(part[2])
# Get the number of dimensions
count=1
ndim=0
while "n"+str(count) in parm.keys():
ndim+=1
count+=1
# Get the data size
if "esize" in parm:
esize=int(parm["esize"])
else:
esize=4
# Get the data type
if "type" in parm:
type=parm["type"]
else:
type=None
if "data_format" in parm:
data_format=parm["data_format"]
else:
data_format="native_float"
# get the form
if "form" in parm:
form=parm["form"]
else:
form="native"
# Get the right dtype
if ((type=="int" or data_format=="native_int") and esize==4):
dtype=numpy.int32
elif ((type=="complex" or data_format=="native_complex") and esize==8):
dtype=numpy.complex64
elif ((type=="short" or data_format=="native_short") and esize==2):
dtype=numpy.int16
else:
dtype=numpy.float32
# Get the input grid
ingrid=Grid()
ingrid.view.allocate(ndim)
for i in range(0,ndim):
var='o'+str(i+1)
if var in parm:
ingrid.view.ox[i]=float(parm[var])
var='d'+str(i+1)
if var in parm:
ingrid.view.dx[i]=float(parm[var])
var='n'+str(i+1)
if var in parm:
ingrid.view.nx[i]=int(parm[var])
var='label'+str(i+1)
if var in parm:
ingrid.view.label[i]=parm[var]
var='unit'+str(i+1)
if var in parm:
ingrid.view.unit[i]=parm[var]
# Make sure we have an input binary file
assert("in" in parm)
# Strip the quotes
parm["in"]=parm["in"].strip('"')
# Now read the data
if use_memmap:
# Use the efficient memory map format
ingrid.data=numpy.memmap(parm["in"], dtype=dtype, mode='r', order='F', shape=tuple(ingrid.view.nx))
elif (form=="native"):
ingrid.data=numpy.fromfile(parm["in"], dtype=dtype).reshape(ingrid.view.nx, order='F')
else:
# Try reading from ascii
ingrid.data=numpy.fromfile(parm["in"], dtype=dtype, sep=" ").reshape(ingrid.view.nx, order='F')
# This has allocated Grid
ingrid.allocated=True
ingrid.dtype=dtype
return(ingrid)
| [
"numpy.fromfile",
"numpy.ones",
"os.path.isfile",
"numpy.array",
"numpy.zeros",
"numpy.ndarray"
] | [((839, 869), 'numpy.zeros', 'numpy.zeros', (['ndim'], {'dtype': 'float'}), '(ndim, dtype=float)\n', (850, 869), False, 'import numpy, os\n'), ((879, 908), 'numpy.ones', 'numpy.ones', (['ndim'], {'dtype': 'float'}), '(ndim, dtype=float)\n', (889, 908), False, 'import numpy, os\n'), ((918, 945), 'numpy.ones', 'numpy.ones', (['ndim'], {'dtype': 'int'}), '(ndim, dtype=int)\n', (928, 945), False, 'import numpy, os\n'), ((958, 986), 'numpy.zeros', 'numpy.zeros', (['ndim'], {'dtype': 'int'}), '(ndim, dtype=int)\n', (969, 986), False, 'import numpy, os\n'), ((999, 1026), 'numpy.ones', 'numpy.ones', (['ndim'], {'dtype': 'int'}), '(ndim, dtype=int)\n', (1009, 1026), False, 'import numpy, os\n'), ((1038, 1065), 'numpy.ones', 'numpy.ones', (['ndim'], {'dtype': 'int'}), '(ndim, dtype=int)\n', (1048, 1065), False, 'import numpy, os\n'), ((1077, 1112), 'numpy.ndarray', 'numpy.ndarray', (['ndim'], {'dtype': '"""object"""'}), "(ndim, dtype='object')\n", (1090, 1112), False, 'import numpy, os\n'), ((1144, 1179), 'numpy.ndarray', 'numpy.ndarray', (['ndim'], {'dtype': '"""object"""'}), "(ndim, dtype='object')\n", (1157, 1179), False, 'import numpy, os\n'), ((6385, 6446), 'numpy.zeros', 'numpy.zeros', (['self.view.nx'], {'dtype': 'self.dtype', 'order': 'self.order'}), '(self.view.nx, dtype=self.dtype, order=self.order)\n', (6396, 6446), False, 'import numpy, os\n'), ((6758, 6793), 'numpy.array', 'numpy.array', (['array.shape'], {'dtype': 'int'}), '(array.shape, dtype=int)\n', (6769, 6793), False, 'import numpy, os\n'), ((6812, 6860), 'numpy.zeros', 'numpy.zeros', (['self.view.ndim'], {'dtype': 'numpy.float32'}), '(self.view.ndim, dtype=numpy.float32)\n', (6823, 6860), False, 'import numpy, os\n'), ((7835, 7861), 'os.path.isfile', 'os.path.isfile', (['input_file'], {}), '(input_file)\n', (7849, 7861), False, 'import numpy, os\n'), ((7307, 7353), 'numpy.fromfile', 'numpy.fromfile', (['binary_fname'], {'dtype': 'self.dtype'}), '(binary_fname, dtype=self.dtype)\n', (7321, 7353), False, 'import numpy, os\n'), ((7513, 7559), 'numpy.fromfile', 'numpy.fromfile', (['binary_fname'], {'dtype': 'self.dtype'}), '(binary_fname, dtype=self.dtype)\n', (7527, 7559), False, 'import numpy, os\n'), ((10244, 10283), 'numpy.fromfile', 'numpy.fromfile', (["parm['in']"], {'dtype': 'dtype'}), "(parm['in'], dtype=dtype)\n", (10258, 10283), False, 'import numpy, os\n'), ((10382, 10430), 'numpy.fromfile', 'numpy.fromfile', (["parm['in']"], {'dtype': 'dtype', 'sep': '""" """'}), "(parm['in'], dtype=dtype, sep=' ')\n", (10396, 10430), False, 'import numpy, os\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import nltk
import re
def load_data_and_labels(path):
data = []
lines = [line.strip() for line in open(path)]
for idx in range(0, len(lines), 4):
id = lines[idx].split("\t")[0]
relation = lines[idx + 1]
sentence = lines[idx].split("\t")[1][1:-1]
# sentence = sentence.replace("<e1>", " _e1_ ").replace("</e1>", " _/e1_ ")
# sentence = sentence.replace("<e2>", " _e2_ ").replace("</e2>", " _/e2_ ")
sentence = sentence.replace("<e1>", "<e1> ").replace("</e1>", " </e11>")
sentence = sentence.replace("<e2>", "<e2> ").replace("</e2>", " </e22>")
# tokens = nltk.word_tokenize(sentence)
#
# tokens.remove('_/e1_')
# tokens.remove('_/e2_')
#
# e1 = tokens.index("_e1_")
# del tokens[e1]
# e2 = tokens.index("_e2_")
# del tokens[e2]
#
# sentence = " ".join(tokens)
sentence = clean_str(sentence)
# data.append([id, sentence, e1, e2, relation])
data.append([id, sentence, relation])
# df = pd.DataFrame(data=data, columns=["id", "sentence", "e1_pos", "e2_pos", "relation"])
df = pd.DataFrame(data=data, columns=["id", "sentence", "relation"])
labelsMapping = {'Other': 0,
'Message-Topic(e1,e2)': 1, 'Message-Topic(e2,e1)': 2,
'Product-Producer(e1,e2)': 3, 'Product-Producer(e2,e1)': 4,
'Instrument-Agency(e1,e2)': 5, 'Instrument-Agency(e2,e1)': 6,
'Entity-Destination(e1,e2)': 7, 'Entity-Destination(e2,e1)': 8,
'Cause-Effect(e1,e2)': 9, 'Cause-Effect(e2,e1)': 10,
'Component-Whole(e1,e2)': 11, 'Component-Whole(e2,e1)': 12,
'Entity-Origin(e1,e2)': 13, 'Entity-Origin(e2,e1)': 14,
'Member-Collection(e1,e2)': 15, 'Member-Collection(e2,e1)': 16,
'Content-Container(e1,e2)': 17, 'Content-Container(e2,e1)': 18}
df['label'] = [labelsMapping[r] for r in df['relation']]
x_text = df['sentence'].tolist()
# pos1, pos2 = get_relative_position(df)
# Label Data
y = df['label']
labels_flat = y.values.ravel()
labels_count = np.unique(labels_flat).shape[0]
# convert class labels from scalars to one-hot vectors
# 0 => [1 0 0 0 0 ... 0 0 0 0 0]
# 1 => [0 1 0 0 0 ... 0 0 0 0 0]
# ...
# 18 => [0 0 0 0 0 ... 0 0 0 0 1]
def dense_to_one_hot(labels_dense, num_classes):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
labels = dense_to_one_hot(labels_flat, labels_count)
labels = labels.astype(np.uint8)
# return x_text, pos1, pos2, labels
return x_text, labels
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
# if shuffle:
# shuffle_indices = np.random.permutation(np.arange(data_size))
# shuffled_data = data[shuffle_indices]
# else:
# shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
# yield shuffled_data[start_index:end_index]
yield data[start_index:end_index]
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9()<>/,!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def get_relative_position(df, max_sentence_length=100):
# Position data
pos1 = []
pos2 = []
for df_idx in range(len(df)):
sentence = df.iloc[df_idx]['sentence']
tokens = nltk.word_tokenize(sentence)
e1 = df.iloc[df_idx]['e1_pos']
e2 = df.iloc[df_idx]['e2_pos']
d1 = ""
d2 = ""
for word_idx in range(len(tokens)):
d1 += str((max_sentence_length - 1) + word_idx - e1) + " "
d2 += str((max_sentence_length - 1) + word_idx - e2) + " "
for _ in range(max_sentence_length - len(tokens)):
d1 += "999 "
d2 += "999 "
pos1.append(d1)
pos2.append(d2)
return pos1, pos2
| [
"numpy.unique",
"nltk.word_tokenize",
"numpy.array",
"numpy.zeros",
"pandas.DataFrame",
"re.sub",
"numpy.arange"
] | [((1091, 1154), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': "['id', 'sentence', 'relation']"}), "(data=data, columns=['id', 'sentence', 'relation'])\n", (1103, 1154), True, 'import pandas as pd\n'), ((2749, 2763), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2757, 2763), True, 'import numpy as np\n'), ((3522, 3571), 're.sub', 're.sub', (['"""[^A-Za-z0-9()<>/,!?\\\\\'\\\\`]"""', '""" """', 'string'], {}), '("[^A-Za-z0-9()<>/,!?\\\\\'\\\\`]", \' \', string)\n', (3528, 3571), False, 'import re\n'), ((3581, 3610), 're.sub', 're.sub', (['"""\\\\\'s"""', '""" \'s"""', 'string'], {}), '("\\\\\'s", " \'s", string)\n', (3587, 3610), False, 'import re\n'), ((3622, 3653), 're.sub', 're.sub', (['"""\\\\\'ve"""', '""" \'ve"""', 'string'], {}), '("\\\\\'ve", " \'ve", string)\n', (3628, 3653), False, 'import re\n'), ((3665, 3696), 're.sub', 're.sub', (['"""n\\\\\'t"""', '""" n\'t"""', 'string'], {}), '("n\\\\\'t", " n\'t", string)\n', (3671, 3696), False, 'import re\n'), ((3708, 3739), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" \'re"""', 'string'], {}), '("\\\\\'re", " \'re", string)\n', (3714, 3739), False, 'import re\n'), ((3751, 3780), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" \'d"""', 'string'], {}), '("\\\\\'d", " \'d", string)\n', (3757, 3780), False, 'import re\n'), ((3792, 3823), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" \'ll"""', 'string'], {}), '("\\\\\'ll", " \'ll", string)\n', (3798, 3823), False, 'import re\n'), ((3835, 3861), 're.sub', 're.sub', (['""","""', '""" , """', 'string'], {}), "(',', ' , ', string)\n", (3841, 3861), False, 'import re\n'), ((3873, 3899), 're.sub', 're.sub', (['"""!"""', '""" ! """', 'string'], {}), "('!', ' ! ', string)\n", (3879, 3899), False, 'import re\n'), ((3911, 3941), 're.sub', 're.sub', (['"""\\\\("""', '""" \\\\( """', 'string'], {}), "('\\\\(', ' \\\\( ', string)\n", (3917, 3941), False, 'import re\n'), ((3951, 3981), 're.sub', 're.sub', (['"""\\\\)"""', '""" \\\\) """', 'string'], {}), "('\\\\)', ' \\\\) ', string)\n", (3957, 3981), False, 'import re\n'), ((3991, 4021), 're.sub', 're.sub', (['"""\\\\?"""', '""" \\\\? """', 'string'], {}), "('\\\\?', ' \\\\? ', string)\n", (3997, 4021), False, 'import re\n'), ((4031, 4061), 're.sub', 're.sub', (['"""\\\\s{2,}"""', '""" """', 'string'], {}), "('\\\\s{2,}', ' ', string)\n", (4037, 4061), False, 'import re\n'), ((2353, 2388), 'numpy.zeros', 'np.zeros', (['(num_labels, num_classes)'], {}), '((num_labels, num_classes))\n', (2361, 2388), True, 'import numpy as np\n'), ((4272, 4300), 'nltk.word_tokenize', 'nltk.word_tokenize', (['sentence'], {}), '(sentence)\n', (4290, 4300), False, 'import nltk\n'), ((1993, 2015), 'numpy.unique', 'np.unique', (['labels_flat'], {}), '(labels_flat)\n', (2002, 2015), True, 'import numpy as np\n'), ((2298, 2319), 'numpy.arange', 'np.arange', (['num_labels'], {}), '(num_labels)\n', (2307, 2319), True, 'import numpy as np\n')] |
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" delta-delta op unittest"""
import tempfile
import numpy as np
import tensorflow as tf
from absl import logging
from kaldiio import WriteHelper
from delta.layers.ops import py_x_ops
class DeltaDeltaOpTest(tf.test.TestCase):
""" delta-delta op test"""
def setUp(self):
""" set up"""
self.feat_dim = 80
self.order = 2
self.window = 2
self.data = np.arange(self.feat_dim, dtype=np.float32)
# dump to ark to computing delta-delta by kaldi
ark_file = tempfile.mktemp(suffix="feat.ark")
scp_file = tempfile.mktemp(suffix="feat.scp")
logging.info("ark, scp: {} {}".format(ark_file, scp_file))
with WriteHelper("ark,scp:{},{}".format(ark_file, scp_file)) as writer:
writer(str(0), self.data[None, :])
# compute from kaldi `add-detlas` tools
self.output_true = np.array(
[
0.0000000e00,
1.0000000e00,
2.0000000e00,
3.0000000e00,
4.0000000e00,
5.0000000e00,
6.0000000e00,
7.0000000e00,
8.0000000e00,
9.0000000e00,
1.0000000e01,
1.1000000e01,
1.2000000e01,
1.3000000e01,
1.4000000e01,
1.5000000e01,
1.6000000e01,
1.7000000e01,
1.8000000e01,
1.9000000e01,
2.0000000e01,
2.1000000e01,
2.2000000e01,
2.3000000e01,
2.4000000e01,
2.5000000e01,
2.6000000e01,
2.7000000e01,
2.8000000e01,
2.9000000e01,
3.0000000e01,
3.1000000e01,
3.2000000e01,
3.3000000e01,
3.4000000e01,
3.5000000e01,
3.6000000e01,
3.7000000e01,
3.8000000e01,
3.9000000e01,
4.0000000e01,
4.1000000e01,
4.2000000e01,
4.3000000e01,
4.4000000e01,
4.5000000e01,
4.6000000e01,
4.7000000e01,
4.8000000e01,
4.9000000e01,
5.0000000e01,
5.1000000e01,
5.2000000e01,
5.3000000e01,
5.4000000e01,
5.5000000e01,
5.6000000e01,
5.7000000e01,
5.8000000e01,
5.9000000e01,
6.0000000e01,
6.1000000e01,
6.2000000e01,
6.3000000e01,
6.4000000e01,
6.5000000e01,
6.6000000e01,
6.7000000e01,
6.8000000e01,
6.9000000e01,
7.0000000e01,
7.1000000e01,
7.2000000e01,
7.3000000e01,
7.4000000e01,
7.5000000e01,
7.6000000e01,
7.7000000e01,
7.8000000e01,
7.9000000e01,
0.0000000e00,
-1.4901161e-08,
-2.9802322e-08,
0.0000000e00,
-5.9604645e-08,
0.0000000e00,
0.0000000e00,
1.1920929e-07,
-1.1920929e-07,
1.1920929e-07,
0.0000000e00,
-2.3841858e-07,
0.0000000e00,
2.3841858e-07,
2.3841858e-07,
0.0000000e00,
-2.3841858e-07,
-2.3841858e-07,
2.3841858e-07,
2.3841858e-07,
0.0000000e00,
4.7683716e-07,
-4.7683716e-07,
4.7683716e-07,
0.0000000e00,
0.0000000e00,
4.7683716e-07,
-4.7683716e-07,
4.7683716e-07,
-4.7683716e-07,
0.0000000e00,
4.7683716e-07,
-4.7683716e-07,
4.7683716e-07,
-4.7683716e-07,
0.0000000e00,
4.7683716e-07,
-4.7683716e-07,
4.7683716e-07,
-4.7683716e-07,
0.0000000e00,
9.5367432e-07,
9.5367432e-07,
0.0000000e00,
-9.5367432e-07,
0.0000000e00,
9.5367432e-07,
9.5367432e-07,
0.0000000e00,
-9.5367432e-07,
0.0000000e00,
9.5367432e-07,
9.5367432e-07,
0.0000000e00,
-9.5367432e-07,
0.0000000e00,
9.5367432e-07,
9.5367432e-07,
-9.5367432e-07,
-9.5367432e-07,
0.0000000e00,
9.5367432e-07,
9.5367432e-07,
-9.5367432e-07,
-9.5367432e-07,
0.0000000e00,
9.5367432e-07,
9.5367432e-07,
-9.5367432e-07,
-9.5367432e-07,
0.0000000e00,
9.5367432e-07,
9.5367432e-07,
-9.5367432e-07,
-9.5367432e-07,
0.0000000e00,
9.5367432e-07,
9.5367432e-07,
-9.5367432e-07,
-9.5367432e-07,
0.0000000e00,
0.0000000e00,
0.0000000e00,
0.0000000e00,
0.0000000e00,
5.9604645e-08,
0.0000000e00,
5.9604645e-08,
0.0000000e00,
0.0000000e00,
1.1920929e-07,
5.9604645e-08,
0.0000000e00,
0.0000000e00,
1.1920929e-07,
0.0000000e00,
0.0000000e00,
2.3841858e-07,
0.0000000e00,
2.3841858e-07,
2.3841858e-07,
0.0000000e00,
1.1920929e-07,
2.3841858e-07,
0.0000000e00,
2.3841858e-07,
0.0000000e00,
0.0000000e00,
2.3841858e-07,
0.0000000e00,
0.0000000e00,
0.0000000e00,
0.0000000e00,
0.0000000e00,
4.7683716e-07,
0.0000000e00,
0.0000000e00,
4.7683716e-07,
4.7683716e-07,
2.3841858e-07,
4.7683716e-07,
4.7683716e-07,
0.0000000e00,
0.0000000e00,
2.3841858e-07,
0.0000000e00,
4.7683716e-07,
2.3841858e-07,
0.0000000e00,
4.7683716e-07,
4.7683716e-07,
9.5367432e-07,
0.0000000e00,
4.7683716e-07,
0.0000000e00,
4.7683716e-07,
4.7683716e-07,
4.7683716e-07,
0.0000000e00,
4.7683716e-07,
0.0000000e00,
4.7683716e-07,
0.0000000e00,
4.7683716e-07,
0.0000000e00,
4.7683716e-07,
0.0000000e00,
4.7683716e-07,
9.5367432e-07,
4.7683716e-07,
0.0000000e00,
4.7683716e-07,
0.0000000e00,
4.7683716e-07,
9.5367432e-07,
4.7683716e-07,
9.5367432e-07,
0.0000000e00,
4.7683716e-07,
4.7683716e-07,
],
dtype=np.float32,
)
def test_detla_delta(self):
""" test delta delta"""
with self.session():
feat = tf.constant(self.data[None, :], dtype=tf.float32)
output = py_x_ops.delta_delta(feat, order=self.order, window=self.window)
self.assertEqual(tf.rank(output).eval(), tf.rank(feat).eval())
self.assertEqual(output.shape, (1, self.feat_dim * (self.order + 1)))
self.assertAllClose(output.eval(), self.output_true[None, :])
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
tf.test.main()
| [
"tensorflow.rank",
"tensorflow.test.main",
"tempfile.mktemp",
"numpy.array",
"delta.layers.ops.py_x_ops.delta_delta",
"tensorflow.constant",
"absl.logging.set_verbosity",
"numpy.arange"
] | [((9524, 9559), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['logging.INFO'], {}), '(logging.INFO)\n', (9545, 9559), False, 'from absl import logging\n'), ((9564, 9578), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (9576, 9578), True, 'import tensorflow as tf\n'), ((1128, 1170), 'numpy.arange', 'np.arange', (['self.feat_dim'], {'dtype': 'np.float32'}), '(self.feat_dim, dtype=np.float32)\n', (1137, 1170), True, 'import numpy as np\n'), ((1247, 1281), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'suffix': '"""feat.ark"""'}), "(suffix='feat.ark')\n", (1262, 1281), False, 'import tempfile\n'), ((1301, 1335), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'suffix': '"""feat.scp"""'}), "(suffix='feat.scp')\n", (1316, 1335), False, 'import tempfile\n'), ((1606, 4079), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, \n 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0,\n 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0,\n 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,\n 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0,\n 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0,\n 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 0.0, -1.4901161e-08, -2.9802322e-08,\n 0.0, -5.9604645e-08, 0.0, 0.0, 1.1920929e-07, -1.1920929e-07, \n 1.1920929e-07, 0.0, -2.3841858e-07, 0.0, 2.3841858e-07, 2.3841858e-07, \n 0.0, -2.3841858e-07, -2.3841858e-07, 2.3841858e-07, 2.3841858e-07, 0.0,\n 4.7683716e-07, -4.7683716e-07, 4.7683716e-07, 0.0, 0.0, 4.7683716e-07, \n -4.7683716e-07, 4.7683716e-07, -4.7683716e-07, 0.0, 4.7683716e-07, -\n 4.7683716e-07, 4.7683716e-07, -4.7683716e-07, 0.0, 4.7683716e-07, -\n 4.7683716e-07, 4.7683716e-07, -4.7683716e-07, 0.0, 9.5367432e-07, \n 9.5367432e-07, 0.0, -9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, \n 0.0, -9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, 0.0, -\n 9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, -9.5367432e-07, -\n 9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, -9.5367432e-07, -\n 9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, -9.5367432e-07, -\n 9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, -9.5367432e-07, -\n 9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, -9.5367432e-07, -\n 9.5367432e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 5.9604645e-08, 0.0, \n 5.9604645e-08, 0.0, 0.0, 1.1920929e-07, 5.9604645e-08, 0.0, 0.0, \n 1.1920929e-07, 0.0, 0.0, 2.3841858e-07, 0.0, 2.3841858e-07, \n 2.3841858e-07, 0.0, 1.1920929e-07, 2.3841858e-07, 0.0, 2.3841858e-07, \n 0.0, 0.0, 2.3841858e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 4.7683716e-07, 0.0, \n 0.0, 4.7683716e-07, 4.7683716e-07, 2.3841858e-07, 4.7683716e-07, \n 4.7683716e-07, 0.0, 0.0, 2.3841858e-07, 0.0, 4.7683716e-07, \n 2.3841858e-07, 0.0, 4.7683716e-07, 4.7683716e-07, 9.5367432e-07, 0.0, \n 4.7683716e-07, 0.0, 4.7683716e-07, 4.7683716e-07, 4.7683716e-07, 0.0, \n 4.7683716e-07, 0.0, 4.7683716e-07, 0.0, 4.7683716e-07, 0.0, \n 4.7683716e-07, 0.0, 4.7683716e-07, 9.5367432e-07, 4.7683716e-07, 0.0, \n 4.7683716e-07, 0.0, 4.7683716e-07, 9.5367432e-07, 4.7683716e-07, \n 9.5367432e-07, 0.0, 4.7683716e-07, 4.7683716e-07]'], {'dtype': 'np.float32'}), '([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, \n 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0,\n 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0,\n 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0,\n 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,\n 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0,\n 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 0.0, -1.4901161e-08, -\n 2.9802322e-08, 0.0, -5.9604645e-08, 0.0, 0.0, 1.1920929e-07, -\n 1.1920929e-07, 1.1920929e-07, 0.0, -2.3841858e-07, 0.0, 2.3841858e-07, \n 2.3841858e-07, 0.0, -2.3841858e-07, -2.3841858e-07, 2.3841858e-07, \n 2.3841858e-07, 0.0, 4.7683716e-07, -4.7683716e-07, 4.7683716e-07, 0.0, \n 0.0, 4.7683716e-07, -4.7683716e-07, 4.7683716e-07, -4.7683716e-07, 0.0,\n 4.7683716e-07, -4.7683716e-07, 4.7683716e-07, -4.7683716e-07, 0.0, \n 4.7683716e-07, -4.7683716e-07, 4.7683716e-07, -4.7683716e-07, 0.0, \n 9.5367432e-07, 9.5367432e-07, 0.0, -9.5367432e-07, 0.0, 9.5367432e-07, \n 9.5367432e-07, 0.0, -9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, \n 0.0, -9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, -9.5367432e-07,\n -9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, -9.5367432e-07, -\n 9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, -9.5367432e-07, -\n 9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, -9.5367432e-07, -\n 9.5367432e-07, 0.0, 9.5367432e-07, 9.5367432e-07, -9.5367432e-07, -\n 9.5367432e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 5.9604645e-08, 0.0, \n 5.9604645e-08, 0.0, 0.0, 1.1920929e-07, 5.9604645e-08, 0.0, 0.0, \n 1.1920929e-07, 0.0, 0.0, 2.3841858e-07, 0.0, 2.3841858e-07, \n 2.3841858e-07, 0.0, 1.1920929e-07, 2.3841858e-07, 0.0, 2.3841858e-07, \n 0.0, 0.0, 2.3841858e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 4.7683716e-07, 0.0, \n 0.0, 4.7683716e-07, 4.7683716e-07, 2.3841858e-07, 4.7683716e-07, \n 4.7683716e-07, 0.0, 0.0, 2.3841858e-07, 0.0, 4.7683716e-07, \n 2.3841858e-07, 0.0, 4.7683716e-07, 4.7683716e-07, 9.5367432e-07, 0.0, \n 4.7683716e-07, 0.0, 4.7683716e-07, 4.7683716e-07, 4.7683716e-07, 0.0, \n 4.7683716e-07, 0.0, 4.7683716e-07, 0.0, 4.7683716e-07, 0.0, \n 4.7683716e-07, 0.0, 4.7683716e-07, 9.5367432e-07, 4.7683716e-07, 0.0, \n 4.7683716e-07, 0.0, 4.7683716e-07, 9.5367432e-07, 4.7683716e-07, \n 9.5367432e-07, 0.0, 4.7683716e-07, 4.7683716e-07], dtype=np.float32)\n', (1614, 4079), True, 'import numpy as np\n'), ((9124, 9173), 'tensorflow.constant', 'tf.constant', (['self.data[None, :]'], {'dtype': 'tf.float32'}), '(self.data[None, :], dtype=tf.float32)\n', (9135, 9173), True, 'import tensorflow as tf\n'), ((9195, 9259), 'delta.layers.ops.py_x_ops.delta_delta', 'py_x_ops.delta_delta', (['feat'], {'order': 'self.order', 'window': 'self.window'}), '(feat, order=self.order, window=self.window)\n', (9215, 9259), False, 'from delta.layers.ops import py_x_ops\n'), ((9289, 9304), 'tensorflow.rank', 'tf.rank', (['output'], {}), '(output)\n', (9296, 9304), True, 'import tensorflow as tf\n'), ((9313, 9326), 'tensorflow.rank', 'tf.rank', (['feat'], {}), '(feat)\n', (9320, 9326), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""Structured_Images_1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bc1bYBxAPGwsIONAjZDC2eRY4Bpm1W7v
"""
#!sudo apt install tesseract-ocr
#!pip install pytesseract
import numpy as np
import cv2
from imutils.object_detection import non_max_suppression
import pytesseract
import argparse
import time
import sys
from PIL import Image
from scipy.ndimage import interpolation as inter
from matplotlib import pyplot as plt
import tempfile
#from google.colab import files
#uploaded = files.upload()
def structured(img):
#image_path = "/content/8.png"
image_path = img
IMAGE_SIZE = 1800
def set_image_dpi(file_path):
im = Image.open(file_path)
length_x, width_y = im.size
factor = max(1, int(IMAGE_SIZE / length_x))
size = factor * length_x, factor * width_y
# size = (1800, 1800)
im_resized = im.resize(size, Image.ANTIALIAS)
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
temp_filename = temp_file.name
im_resized.save(temp_filename, dpi=(300, 300))
return temp_filename
#img= cv2.imread(img)
#im = Image.open(img)
#show image
#im.show()
#noise removal and smoothening
BINARY_THREHOLD = 180
def image_smoothening(img):
ret1, th1 = cv2.threshold(img, BINARY_THREHOLD, 255, cv2.THRESH_BINARY)
ret2, th2 = cv2.threshold(th1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
blur = cv2.GaussianBlur(th2, (1, 1), 0)
ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return th3
def remove_noise_and_smooth(file_name):
img = cv2.imread(file_name, 0)
filtered = cv2.adaptiveThreshold(img.astype(np.uint8), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 41)
kernel = np.ones((1, 1), np.uint8)
opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
img = image_smoothening(img)
or_image = cv2.bitwise_or(img, closing)
return or_image
img_dpi = set_image_dpi(image_path)
import cv2
import numpy as np
# read the image
img = cv2.imread(img_dpi)
# convert to gray
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_RECT , (3,3))
smooth = cv2.morphologyEx(gray, cv2.MORPH_DILATE, kernel)
# alternate blur in place of morphology
#smooth = cv2.GaussianBlur(gray, (15,15), 0)
# divide gray by morphology image
division = cv2.divide(gray, smooth, scale=255)
# threshold
result = cv2.threshold(division, 0, 255, cv2.THRESH_OTSU )[1]
# save results
cv2.imwrite('img_thresh.png',result)
import cv2
import numpy as np
from scipy.ndimage import interpolation as inter
def correct_skew(image, delta=1, limit=5):
def determine_score(arr, angle):
data = inter.rotate(arr, angle, reshape=False, order=0)
histogram = np.sum(data, axis=1)
score = np.sum((histogram[1:] - histogram[:-1]) ** 2)
return histogram, score
#img = cv2.imread(image, cv2.IMREAD_COLOR)
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
scores = []
angles = np.arange(-limit, limit + delta, delta)
for angle in angles:
histogram, score = determine_score(thresh, angle)
scores.append(score)
best_angle = angles[scores.index(max(scores))]
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, best_angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, \
borderMode=cv2.BORDER_REPLICATE)
return best_angle, rotated
img3 = cv2.imread(img_dpi)
angle, rotated = correct_skew(img3)
extractedInformation = pytesseract.image_to_string(rotated)
#print(extractedInformation)
#print(pytesseract.image_to_boxes(rotated))
return extractedInformation | [
"cv2.imwrite",
"PIL.Image.open",
"cv2.warpAffine",
"numpy.ones",
"cv2.threshold",
"cv2.divide",
"cv2.morphologyEx",
"tempfile.NamedTemporaryFile",
"numpy.sum",
"cv2.bitwise_or",
"cv2.cvtColor",
"pytesseract.image_to_string",
"scipy.ndimage.interpolation.rotate",
"cv2.getRotationMatrix2D",
... | [((2379, 2398), 'cv2.imread', 'cv2.imread', (['img_dpi'], {}), '(img_dpi)\n', (2389, 2398), False, 'import cv2\n'), ((2436, 2473), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2448, 2473), False, 'import cv2\n'), ((2513, 2562), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(3, 3)'], {}), '(cv2.MORPH_RECT, (3, 3))\n', (2538, 2562), False, 'import cv2\n'), ((2577, 2625), 'cv2.morphologyEx', 'cv2.morphologyEx', (['gray', 'cv2.MORPH_DILATE', 'kernel'], {}), '(gray, cv2.MORPH_DILATE, kernel)\n', (2593, 2625), False, 'import cv2\n'), ((2780, 2815), 'cv2.divide', 'cv2.divide', (['gray', 'smooth'], {'scale': '(255)'}), '(gray, smooth, scale=255)\n', (2790, 2815), False, 'import cv2\n'), ((2930, 2967), 'cv2.imwrite', 'cv2.imwrite', (['"""img_thresh.png"""', 'result'], {}), "('img_thresh.png', result)\n", (2941, 2967), False, 'import cv2\n'), ((4157, 4176), 'cv2.imread', 'cv2.imread', (['img_dpi'], {}), '(img_dpi)\n', (4167, 4176), False, 'import cv2\n'), ((4248, 4284), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['rotated'], {}), '(rotated)\n', (4275, 4284), False, 'import pytesseract\n'), ((781, 802), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (791, 802), False, 'from PIL import Image\n'), ((1052, 1108), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': '""".png"""'}), "(delete=False, suffix='.png')\n", (1079, 1108), False, 'import tempfile\n'), ((1453, 1512), 'cv2.threshold', 'cv2.threshold', (['img', 'BINARY_THREHOLD', '(255)', 'cv2.THRESH_BINARY'], {}), '(img, BINARY_THREHOLD, 255, cv2.THRESH_BINARY)\n', (1466, 1512), False, 'import cv2\n'), ((1534, 1597), 'cv2.threshold', 'cv2.threshold', (['th1', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(th1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (1547, 1597), False, 'import cv2\n'), ((1614, 1646), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['th2', '(1, 1)', '(0)'], {}), '(th2, (1, 1), 0)\n', (1630, 1646), False, 'import cv2\n'), ((1668, 1732), 'cv2.threshold', 'cv2.threshold', (['blur', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (1681, 1732), False, 'import cv2\n'), ((1815, 1839), 'cv2.imread', 'cv2.imread', (['file_name', '(0)'], {}), '(file_name, 0)\n', (1825, 1839), False, 'import cv2\n'), ((1981, 2006), 'numpy.ones', 'np.ones', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (1988, 2006), True, 'import numpy as np\n'), ((2026, 2076), 'cv2.morphologyEx', 'cv2.morphologyEx', (['filtered', 'cv2.MORPH_OPEN', 'kernel'], {}), '(filtered, cv2.MORPH_OPEN, kernel)\n', (2042, 2076), False, 'import cv2\n'), ((2096, 2146), 'cv2.morphologyEx', 'cv2.morphologyEx', (['opening', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(opening, cv2.MORPH_CLOSE, kernel)\n', (2112, 2146), False, 'import cv2\n'), ((2205, 2233), 'cv2.bitwise_or', 'cv2.bitwise_or', (['img', 'closing'], {}), '(img, closing)\n', (2219, 2233), False, 'import cv2\n'), ((2849, 2897), 'cv2.threshold', 'cv2.threshold', (['division', '(0)', '(255)', 'cv2.THRESH_OTSU'], {}), '(division, 0, 255, cv2.THRESH_OTSU)\n', (2862, 2897), False, 'import cv2\n'), ((3448, 3487), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (3460, 3487), False, 'import cv2\n'), ((3620, 3659), 'numpy.arange', 'np.arange', (['(-limit)', '(limit + delta)', 'delta'], {}), '(-limit, limit + delta, delta)\n', (3629, 3659), True, 'import numpy as np\n'), ((3929, 3977), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', 'best_angle', '(1.0)'], {}), '(center, best_angle, 1.0)\n', (3952, 3977), False, 'import cv2\n'), ((3997, 4090), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(w, h)'], {'flags': 'cv2.INTER_CUBIC', 'borderMode': 'cv2.BORDER_REPLICATE'}), '(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.\n BORDER_REPLICATE)\n', (4011, 4090), False, 'import cv2\n'), ((3175, 3223), 'scipy.ndimage.interpolation.rotate', 'inter.rotate', (['arr', 'angle'], {'reshape': '(False)', 'order': '(0)'}), '(arr, angle, reshape=False, order=0)\n', (3187, 3223), True, 'from scipy.ndimage import interpolation as inter\n'), ((3249, 3269), 'numpy.sum', 'np.sum', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (3255, 3269), True, 'import numpy as np\n'), ((3291, 3336), 'numpy.sum', 'np.sum', (['((histogram[1:] - histogram[:-1]) ** 2)'], {}), '((histogram[1:] - histogram[:-1]) ** 2)\n', (3297, 3336), True, 'import numpy as np\n'), ((3506, 3574), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (3519, 3574), False, 'import cv2\n')] |
import numpy as np
import os
from matplotlib import pyplot as plt
import tikzplotlib
def beautify(ax):
ax.set_frame_on(True)
ax.minorticks_on()
ax.grid(True)
ax.grid(linestyle=':')
ax.tick_params(which='both', direction='in',
bottom=True, labelbottom=True,
top=True, labeltop=False,
right=True, labelright=False,
left=True, labelleft=True)
ax.tick_params(which='major', length=6)
ax.tick_params(which='minor', length=3)
ax.autoscale(tight=True)
# ax.set_aspect('equal')
if ax.get_legend():
ax.legend(loc='best')
return ax
def plot_gaussian(mu, lmbda, color='r', label='', alpha=1.0, ax=None, artists=None):
ax = ax if ax else plt.gca()
t = np.hstack([np.arange(0, 2 * np.pi, 0.01), 0])
circle = np.vstack([np.sin(t), np.cos(t)])
ellipse = np.dot(np.linalg.cholesky(lmbda), circle)
if artists is None:
point = ax.scatter([mu[0]], [mu[1]], marker='D', color=color, s=4,
alpha=alpha)
line, = ax.plot(ellipse[0, :] + mu[0], ellipse[1, :] + mu[1],
linestyle='-', linewidth=2, color=color, label=label,
alpha=alpha)
else:
line, point = artists
point.set_offsets(mu)
point.set_alpha(alpha)
point.set_color(color)
line.set_xdata(ellipse[0, :] + mu[0])
line.set_ydata(ellipse[1, :] + mu[1])
line.set_alpha(alpha)
line.set_color(color)
return (line, point) if point else (line,)
def plot_violin_box(data, nb_cols=None,
tikz_path=None, pdf_path=None,
x_label=None, y_label=None,
title=None, x_categories=None,
violin=True, box=True, show=False):
def set_axis_style(ax, labels):
ax.get_xaxis().set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.set_xlabel(x_label)
if box:
fig, ax1 = plt.subplots(nrows=1, ncols=1, sharey='all')
ax1.boxplot(data, showfliers=True, showmeans=True, meanline=True)
labels = x_categories
set_axis_style(ax1, labels)
set_axis_style(ax1, labels)
ax1.set_ylabel(y_label)
tikzplotlib.save(tikz_path + '_box.tex', encoding=None)
plt.savefig(pdf_path + '_box.pdf')
if show:
plt.show()
if violin:
fig, ax2 = plt.subplots(nrows=1, ncols=1, sharey='all')
parts = ax2.violinplot(data, showmeans=False, showmedians=False, showextrema=False)
for pc in parts['bodies']:
pc.set_edgecolor('black')
pc.set_alpha(1)
quartile1, medians, quartile3 = np.percentile(data, [25, 50, 75], axis=0)
if nb_cols != 1:
inds = np.arange(1, len(medians) + 1)
else:
inds = np.arange(1, 2)
ax2.scatter(inds, medians, marker='o', color='white', s=30, zorder=3)
ax2.vlines(inds, quartile1, quartile3, color='k', linestyle='-', lw=5)
labels = x_categories
set_axis_style(ax2, labels)
set_axis_style(ax2, labels)
ax2.set_ylabel(y_label)
tikzplotlib.save(tikz_path + '_violin.tex', encoding=None)
plt.savefig(pdf_path + '_violin.pdf')
if show:
plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"numpy.sin",
"numpy.cos",
"numpy.linalg.cholesky",
"numpy.percentile",
"tikzplotlib.save",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((769, 778), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (776, 778), True, 'from matplotlib import pyplot as plt\n'), ((902, 927), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['lmbda'], {}), '(lmbda)\n', (920, 927), True, 'import numpy as np\n'), ((2178, 2222), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'sharey': '"""all"""'}), "(nrows=1, ncols=1, sharey='all')\n", (2190, 2222), True, 'from matplotlib import pyplot as plt\n'), ((2441, 2496), 'tikzplotlib.save', 'tikzplotlib.save', (["(tikz_path + '_box.tex')"], {'encoding': 'None'}), "(tikz_path + '_box.tex', encoding=None)\n", (2457, 2496), False, 'import tikzplotlib\n'), ((2505, 2539), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(pdf_path + '_box.pdf')"], {}), "(pdf_path + '_box.pdf')\n", (2516, 2539), True, 'from matplotlib import pyplot as plt\n'), ((2615, 2659), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'sharey': '"""all"""'}), "(nrows=1, ncols=1, sharey='all')\n", (2627, 2659), True, 'from matplotlib import pyplot as plt\n'), ((2894, 2935), 'numpy.percentile', 'np.percentile', (['data', '[25, 50, 75]'], {'axis': '(0)'}), '(data, [25, 50, 75], axis=0)\n', (2907, 2935), True, 'import numpy as np\n'), ((3362, 3420), 'tikzplotlib.save', 'tikzplotlib.save', (["(tikz_path + '_violin.tex')"], {'encoding': 'None'}), "(tikz_path + '_violin.tex', encoding=None)\n", (3378, 3420), False, 'import tikzplotlib\n'), ((3429, 3466), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(pdf_path + '_violin.pdf')"], {}), "(pdf_path + '_violin.pdf')\n", (3440, 3466), True, 'from matplotlib import pyplot as plt\n'), ((799, 828), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(0.01)'], {}), '(0, 2 * np.pi, 0.01)\n', (808, 828), True, 'import numpy as np\n'), ((858, 867), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (864, 867), True, 'import numpy as np\n'), ((869, 878), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (875, 878), True, 'import numpy as np\n'), ((2569, 2579), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2577, 2579), True, 'from matplotlib import pyplot as plt\n'), ((3044, 3059), 'numpy.arange', 'np.arange', (['(1)', '(2)'], {}), '(1, 2)\n', (3053, 3059), True, 'import numpy as np\n'), ((3497, 3507), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3505, 3507), True, 'from matplotlib import pyplot as plt\n')] |
import functools
from math import log
import numpy as np
import tree
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.numpy import SMALL_NUMBER, MIN_LOG_NN_OUTPUT, \
MAX_LOG_NN_OUTPUT
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
from ray.rllib.utils.torch_ops import atanh
from ray.rllib.utils.typing import TensorType, List
torch, nn = try_import_torch()
class TorchDistributionWrapper(ActionDistribution):
"""Wrapper class for torch.distributions."""
@override(ActionDistribution)
def __init__(self, inputs: List[TensorType], model: TorchModelV2):
# If inputs are not a torch Tensor, make them one and make sure they
# are on the correct device.
if not isinstance(inputs, torch.Tensor):
inputs = torch.from_numpy(inputs)
if isinstance(model, TorchModelV2):
inputs = inputs.to(next(model.parameters()).device)
super().__init__(inputs, model)
# Store the last sample here.
self.last_sample = None
@override(ActionDistribution)
def logp(self, actions: TensorType) -> TensorType:
return self.dist.log_prob(actions)
@override(ActionDistribution)
def entropy(self) -> TensorType:
return self.dist.entropy()
@override(ActionDistribution)
def kl(self, other: ActionDistribution) -> TensorType:
return torch.distributions.kl.kl_divergence(self.dist, other.dist)
@override(ActionDistribution)
def sample(self) -> TensorType:
self.last_sample = self.dist.sample()
return self.last_sample
@override(ActionDistribution)
def sampled_action_logp(self) -> TensorType:
assert self.last_sample is not None
return self.logp(self.last_sample)
class TorchCategorical(TorchDistributionWrapper):
"""Wrapper class for PyTorch Categorical distribution."""
@override(ActionDistribution)
def __init__(self, inputs, model=None, temperature=1.0):
if temperature != 1.0:
assert temperature > 0.0, \
"Categorical `temperature` must be > 0.0!"
inputs /= temperature
super().__init__(inputs, model)
self.dist = torch.distributions.categorical.Categorical(
logits=self.inputs)
@override(ActionDistribution)
def deterministic_sample(self):
self.last_sample = self.dist.probs.argmax(dim=1)
return self.last_sample
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return action_space.n
class TorchMultiCategorical(TorchDistributionWrapper):
"""MultiCategorical distribution for MultiDiscrete action spaces."""
@override(TorchDistributionWrapper)
def __init__(self, inputs, model, input_lens):
super().__init__(inputs, model)
# If input_lens is np.ndarray or list, force-make it a tuple.
inputs_split = self.inputs.split(tuple(input_lens), dim=1)
self.cats = [
torch.distributions.categorical.Categorical(logits=input_)
for input_ in inputs_split
]
@override(TorchDistributionWrapper)
def sample(self):
arr = [cat.sample() for cat in self.cats]
self.last_sample = torch.stack(arr, dim=1)
return self.last_sample
@override(ActionDistribution)
def deterministic_sample(self):
arr = [torch.argmax(cat.probs, -1) for cat in self.cats]
self.last_sample = torch.stack(arr, dim=1)
return self.last_sample
@override(TorchDistributionWrapper)
def logp(self, actions):
# # If tensor is provided, unstack it into list.
if isinstance(actions, torch.Tensor):
actions = torch.unbind(actions, dim=1)
logps = torch.stack(
[cat.log_prob(act) for cat, act in zip(self.cats, actions)])
return torch.sum(logps, dim=0)
@override(ActionDistribution)
def multi_entropy(self):
return torch.stack([cat.entropy() for cat in self.cats], dim=1)
@override(TorchDistributionWrapper)
def entropy(self):
return torch.sum(self.multi_entropy(), dim=1)
@override(ActionDistribution)
def multi_kl(self, other):
return torch.stack(
[
torch.distributions.kl.kl_divergence(cat, oth_cat)
for cat, oth_cat in zip(self.cats, other.cats)
],
dim=1,
)
@override(TorchDistributionWrapper)
def kl(self, other):
return torch.sum(self.multi_kl(other), dim=1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.sum(action_space.nvec)
class TorchDiagGaussian(TorchDistributionWrapper):
"""Wrapper class for PyTorch Normal distribution."""
@override(ActionDistribution)
def __init__(self, inputs, model):
super().__init__(inputs, model)
mean, log_std = torch.chunk(self.inputs, 2, dim=1)
self.dist = torch.distributions.normal.Normal(mean, torch.exp(log_std))
@override(ActionDistribution)
def deterministic_sample(self):
self.last_sample = self.dist.mean
return self.last_sample
@override(TorchDistributionWrapper)
def logp(self, actions):
return super().logp(actions).sum(-1)
@override(TorchDistributionWrapper)
def entropy(self):
return super().entropy().sum(-1)
@override(TorchDistributionWrapper)
def kl(self, other):
return super().kl(other).sum(-1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape) * 2
class TorchSquashedGaussian(TorchDistributionWrapper):
"""A tanh-squashed Gaussian distribution defined by: mean, std, low, high.
The distribution will never return low or high exactly, but
`low`+SMALL_NUMBER or `high`-SMALL_NUMBER respectively.
"""
def __init__(self, inputs, model, low=-1.0, high=1.0):
"""Parameterizes the distribution via `inputs`.
Args:
low (float): The lowest possible sampling value
(excluding this value).
high (float): The highest possible sampling value
(excluding this value).
"""
super().__init__(inputs, model)
# Split inputs into mean and log(std).
mean, log_std = torch.chunk(self.inputs, 2, dim=-1)
# Clip `scale` values (coming from NN) to reasonable values.
log_std = torch.clamp(log_std, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT)
std = torch.exp(log_std)
self.dist = torch.distributions.normal.Normal(mean, std)
assert np.all(np.less(low, high))
self.low = low
self.high = high
@override(ActionDistribution)
def deterministic_sample(self):
self.last_sample = self._squash(self.dist.mean)
return self.last_sample
@override(TorchDistributionWrapper)
def sample(self):
# Use the reparameterization version of `dist.sample` to allow for
# the results to be backprop'able e.g. in a loss term.
normal_sample = self.dist.rsample()
self.last_sample = self._squash(normal_sample)
return self.last_sample
@override(ActionDistribution)
def logp(self, x):
# Unsquash values (from [low,high] to ]-inf,inf[)
unsquashed_values = self._unsquash(x)
# Get log prob of unsquashed values from our Normal.
log_prob_gaussian = self.dist.log_prob(unsquashed_values)
# For safety reasons, clamp somehow, only then sum up.
log_prob_gaussian = torch.clamp(log_prob_gaussian, -100, 100)
log_prob_gaussian = torch.sum(log_prob_gaussian, dim=-1)
# Get log-prob for squashed Gaussian.
unsquashed_values_tanhd = torch.tanh(unsquashed_values)
log_prob = log_prob_gaussian - torch.sum(
torch.log(1 - unsquashed_values_tanhd**2 + SMALL_NUMBER), dim=-1)
return log_prob
def _squash(self, raw_values):
# Returned values are within [low, high] (including `low` and `high`).
squashed = ((torch.tanh(raw_values) + 1.0) / 2.0) * \
(self.high - self.low) + self.low
return torch.clamp(squashed, self.low, self.high)
def _unsquash(self, values):
normed_values = (values - self.low) / (self.high - self.low) * 2.0 - \
1.0
# Stabilize input to atanh.
save_normed_values = torch.clamp(normed_values, -1.0 + SMALL_NUMBER,
1.0 - SMALL_NUMBER)
unsquashed = atanh(save_normed_values)
return unsquashed
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape) * 2
class TorchBeta(TorchDistributionWrapper):
"""
A Beta distribution is defined on the interval [0, 1] and parameterized by
shape parameters alpha and beta (also called concentration parameters).
PDF(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z
with Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)
and Gamma(n) = (n - 1)!
"""
def __init__(self, inputs, model, low=0.0, high=1.0):
super().__init__(inputs, model)
# Stabilize input parameters (possibly coming from a linear layer).
self.inputs = torch.clamp(self.inputs, log(SMALL_NUMBER),
-log(SMALL_NUMBER))
self.inputs = torch.log(torch.exp(self.inputs) + 1.0) + 1.0
self.low = low
self.high = high
alpha, beta = torch.chunk(self.inputs, 2, dim=-1)
# Note: concentration0==beta, concentration1=alpha (!)
self.dist = torch.distributions.Beta(
concentration1=alpha, concentration0=beta)
@override(ActionDistribution)
def deterministic_sample(self):
self.last_sample = self._squash(self.dist.mean)
return self.last_sample
@override(TorchDistributionWrapper)
def sample(self):
# Use the reparameterization version of `dist.sample` to allow for
# the results to be backprop'able e.g. in a loss term.
normal_sample = self.dist.rsample()
self.last_sample = self._squash(normal_sample)
return self.last_sample
@override(ActionDistribution)
def logp(self, x):
unsquashed_values = self._unsquash(x)
return torch.sum(self.dist.log_prob(unsquashed_values), dim=-1)
def _squash(self, raw_values):
return raw_values * (self.high - self.low) + self.low
def _unsquash(self, values):
return (values - self.low) / (self.high - self.low)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape) * 2
class TorchDeterministic(TorchDistributionWrapper):
"""Action distribution that returns the input values directly.
This is similar to DiagGaussian with standard deviation zero (thus only
requiring the "mean" values as NN output).
"""
@override(ActionDistribution)
def deterministic_sample(self):
return self.inputs
@override(TorchDistributionWrapper)
def sampled_action_logp(self):
return torch.zeros((self.inputs.size()[0], ), dtype=torch.float32)
@override(TorchDistributionWrapper)
def sample(self):
return self.deterministic_sample()
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape)
class TorchMultiActionDistribution(TorchDistributionWrapper):
"""Action distribution that operates on multiple, possibly nested actions.
"""
def __init__(self, inputs, model, *, child_distributions, input_lens,
action_space):
"""Initializes a TorchMultiActionDistribution object.
Args:
inputs (torch.Tensor): A single tensor of shape [BATCH, size].
model (TorchModelV2): The TorchModelV2 object used to produce
inputs for this distribution.
child_distributions (any[torch.Tensor]): Any struct
that contains the child distribution classes to use to
instantiate the child distributions from `inputs`. This could
be an already flattened list or a struct according to
`action_space`.
input_lens (any[int]): A flat list or a nested struct of input
split lengths used to split `inputs`.
action_space (Union[gym.spaces.Dict,gym.spaces.Tuple]): The complex
and possibly nested action space.
"""
if not isinstance(inputs, torch.Tensor):
inputs = torch.from_numpy(inputs)
if isinstance(model, TorchModelV2):
inputs = inputs.to(next(model.parameters()).device)
super().__init__(inputs, model)
self.action_space_struct = get_base_struct_from_space(action_space)
self.input_lens = tree.flatten(input_lens)
flat_child_distributions = tree.flatten(child_distributions)
split_inputs = torch.split(inputs, self.input_lens, dim=1)
self.flat_child_distributions = tree.map_structure(
lambda dist, input_: dist(input_, model), flat_child_distributions,
list(split_inputs))
@override(ActionDistribution)
def logp(self, x):
if isinstance(x, np.ndarray):
x = torch.Tensor(x)
# Single tensor input (all merged).
if isinstance(x, torch.Tensor):
split_indices = []
for dist in self.flat_child_distributions:
if isinstance(dist, TorchCategorical):
split_indices.append(1)
else:
split_indices.append(dist.sample().size()[1])
split_x = list(torch.split(x, split_indices, dim=1))
# Structured or flattened (by single action component) input.
else:
split_x = tree.flatten(x)
def map_(val, dist):
# Remove extra categorical dimension.
if isinstance(dist, TorchCategorical):
val = torch.squeeze(val, dim=-1).int()
return dist.logp(val)
# Remove extra categorical dimension and take the logp of each
# component.
flat_logps = tree.map_structure(map_, split_x,
self.flat_child_distributions)
return functools.reduce(lambda a, b: a + b, flat_logps)
@override(ActionDistribution)
def kl(self, other):
kl_list = [
d.kl(o) for d, o in zip(self.flat_child_distributions,
other.flat_child_distributions)
]
return functools.reduce(lambda a, b: a + b, kl_list)
@override(ActionDistribution)
def entropy(self):
entropy_list = [d.entropy() for d in self.flat_child_distributions]
return functools.reduce(lambda a, b: a + b, entropy_list)
@override(ActionDistribution)
def sample(self):
child_distributions = tree.unflatten_as(self.action_space_struct,
self.flat_child_distributions)
return tree.map_structure(lambda s: s.sample(), child_distributions)
@override(ActionDistribution)
def deterministic_sample(self):
child_distributions = tree.unflatten_as(self.action_space_struct,
self.flat_child_distributions)
return tree.map_structure(lambda s: s.deterministic_sample(),
child_distributions)
@override(TorchDistributionWrapper)
def sampled_action_logp(self):
p = self.flat_child_distributions[0].sampled_action_logp()
for c in self.flat_child_distributions[1:]:
p += c.sampled_action_logp()
return p
@override(ActionDistribution)
def required_model_output_shape(self, action_space, model_config):
return np.sum(self.input_lens)
| [
"tree.flatten",
"numpy.prod",
"numpy.less",
"functools.reduce",
"ray.rllib.utils.annotations.override",
"math.log",
"numpy.sum",
"ray.rllib.utils.spaces.space_utils.get_base_struct_from_space",
"tree.unflatten_as",
"ray.rllib.utils.torch_ops.atanh",
"ray.rllib.utils.framework.try_import_torch",
... | [((570, 588), 'ray.rllib.utils.framework.try_import_torch', 'try_import_torch', ([], {}), '()\n', (586, 588), False, 'from ray.rllib.utils.framework import try_import_torch\n'), ((698, 726), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (706, 726), False, 'from ray.rllib.utils.annotations import override\n'), ((1239, 1267), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (1247, 1267), False, 'from ray.rllib.utils.annotations import override\n'), ((1372, 1400), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (1380, 1400), False, 'from ray.rllib.utils.annotations import override\n'), ((1479, 1507), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (1487, 1507), False, 'from ray.rllib.utils.annotations import override\n'), ((1648, 1676), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (1656, 1676), False, 'from ray.rllib.utils.annotations import override\n'), ((1797, 1825), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (1805, 1825), False, 'from ray.rllib.utils.annotations import override\n'), ((2082, 2110), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (2090, 2110), False, 'from ray.rllib.utils.annotations import override\n'), ((2479, 2507), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (2487, 2507), False, 'from ray.rllib.utils.annotations import override\n'), ((2657, 2685), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (2665, 2685), False, 'from ray.rllib.utils.annotations import override\n'), ((2917, 2951), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (2925, 2951), False, 'from ray.rllib.utils.annotations import override\n'), ((3328, 3362), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (3336, 3362), False, 'from ray.rllib.utils.annotations import override\n'), ((3524, 3552), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (3532, 3552), False, 'from ray.rllib.utils.annotations import override\n'), ((3743, 3777), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (3751, 3777), False, 'from ray.rllib.utils.annotations import override\n'), ((4108, 4136), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (4116, 4136), False, 'from ray.rllib.utils.annotations import override\n'), ((4244, 4278), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (4252, 4278), False, 'from ray.rllib.utils.annotations import override\n'), ((4362, 4390), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (4370, 4390), False, 'from ray.rllib.utils.annotations import override\n'), ((4644, 4678), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (4652, 4678), False, 'from ray.rllib.utils.annotations import override\n'), ((4782, 4810), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (4790, 4810), False, 'from ray.rllib.utils.annotations import override\n'), ((5033, 5061), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (5041, 5061), False, 'from ray.rllib.utils.annotations import override\n'), ((5286, 5314), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (5294, 5314), False, 'from ray.rllib.utils.annotations import override\n'), ((5431, 5465), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (5439, 5465), False, 'from ray.rllib.utils.annotations import override\n'), ((5546, 5580), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (5554, 5580), False, 'from ray.rllib.utils.annotations import override\n'), ((5651, 5685), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (5659, 5685), False, 'from ray.rllib.utils.annotations import override\n'), ((5776, 5804), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (5784, 5804), False, 'from ray.rllib.utils.annotations import override\n'), ((7018, 7046), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (7026, 7046), False, 'from ray.rllib.utils.annotations import override\n'), ((7177, 7211), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (7185, 7211), False, 'from ray.rllib.utils.annotations import override\n'), ((7509, 7537), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (7517, 7537), False, 'from ray.rllib.utils.annotations import override\n'), ((8945, 8973), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (8953, 8973), False, 'from ray.rllib.utils.annotations import override\n'), ((10103, 10131), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (10111, 10131), False, 'from ray.rllib.utils.annotations import override\n'), ((10262, 10296), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (10270, 10296), False, 'from ray.rllib.utils.annotations import override\n'), ((10594, 10622), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (10602, 10622), False, 'from ray.rllib.utils.annotations import override\n'), ((10980, 11008), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (10988, 11008), False, 'from ray.rllib.utils.annotations import override\n'), ((11380, 11408), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (11388, 11408), False, 'from ray.rllib.utils.annotations import override\n'), ((11478, 11512), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (11486, 11512), False, 'from ray.rllib.utils.annotations import override\n'), ((11629, 11663), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (11637, 11663), False, 'from ray.rllib.utils.annotations import override\n'), ((11753, 11781), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (11761, 11781), False, 'from ray.rllib.utils.annotations import override\n'), ((13700, 13728), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (13708, 13728), False, 'from ray.rllib.utils.annotations import override\n'), ((14876, 14904), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (14884, 14904), False, 'from ray.rllib.utils.annotations import override\n'), ((15162, 15190), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (15170, 15190), False, 'from ray.rllib.utils.annotations import override\n'), ((15362, 15390), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (15370, 15390), False, 'from ray.rllib.utils.annotations import override\n'), ((15649, 15677), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (15657, 15677), False, 'from ray.rllib.utils.annotations import override\n'), ((15998, 16032), 'ray.rllib.utils.annotations.override', 'override', (['TorchDistributionWrapper'], {}), '(TorchDistributionWrapper)\n', (16006, 16032), False, 'from ray.rllib.utils.annotations import override\n'), ((16251, 16279), 'ray.rllib.utils.annotations.override', 'override', (['ActionDistribution'], {}), '(ActionDistribution)\n', (16259, 16279), False, 'from ray.rllib.utils.annotations import override\n'), ((4891, 4916), 'numpy.sum', 'np.sum', (['action_space.nvec'], {}), '(action_space.nvec)\n', (4897, 4916), True, 'import numpy as np\n'), ((8869, 8894), 'ray.rllib.utils.torch_ops.atanh', 'atanh', (['save_normed_values'], {}), '(save_normed_values)\n', (8874, 8894), False, 'from ray.rllib.utils.torch_ops import atanh\n'), ((11862, 11889), 'numpy.prod', 'np.prod', (['action_space.shape'], {}), '(action_space.shape)\n', (11869, 11889), True, 'import numpy as np\n'), ((13293, 13333), 'ray.rllib.utils.spaces.space_utils.get_base_struct_from_space', 'get_base_struct_from_space', (['action_space'], {}), '(action_space)\n', (13319, 13333), False, 'from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space\n'), ((13361, 13385), 'tree.flatten', 'tree.flatten', (['input_lens'], {}), '(input_lens)\n', (13373, 13385), False, 'import tree\n'), ((13421, 13454), 'tree.flatten', 'tree.flatten', (['child_distributions'], {}), '(child_distributions)\n', (13433, 13454), False, 'import tree\n'), ((14700, 14764), 'tree.map_structure', 'tree.map_structure', (['map_', 'split_x', 'self.flat_child_distributions'], {}), '(map_, split_x, self.flat_child_distributions)\n', (14718, 14764), False, 'import tree\n'), ((14821, 14869), 'functools.reduce', 'functools.reduce', (['(lambda a, b: a + b)', 'flat_logps'], {}), '(lambda a, b: a + b, flat_logps)\n', (14837, 14869), False, 'import functools\n'), ((15110, 15155), 'functools.reduce', 'functools.reduce', (['(lambda a, b: a + b)', 'kl_list'], {}), '(lambda a, b: a + b, kl_list)\n', (15126, 15155), False, 'import functools\n'), ((15305, 15355), 'functools.reduce', 'functools.reduce', (['(lambda a, b: a + b)', 'entropy_list'], {}), '(lambda a, b: a + b, entropy_list)\n', (15321, 15355), False, 'import functools\n'), ((15443, 15517), 'tree.unflatten_as', 'tree.unflatten_as', (['self.action_space_struct', 'self.flat_child_distributions'], {}), '(self.action_space_struct, self.flat_child_distributions)\n', (15460, 15517), False, 'import tree\n'), ((15744, 15818), 'tree.unflatten_as', 'tree.unflatten_as', (['self.action_space_struct', 'self.flat_child_distributions'], {}), '(self.action_space_struct, self.flat_child_distributions)\n', (15761, 15818), False, 'import tree\n'), ((16366, 16389), 'numpy.sum', 'np.sum', (['self.input_lens'], {}), '(self.input_lens)\n', (16372, 16389), True, 'import numpy as np\n'), ((5885, 5912), 'numpy.prod', 'np.prod', (['action_space.shape'], {}), '(action_space.shape)\n', (5892, 5912), True, 'import numpy as np\n'), ((6944, 6962), 'numpy.less', 'np.less', (['low', 'high'], {}), '(low, high)\n', (6951, 6962), True, 'import numpy as np\n'), ((9054, 9081), 'numpy.prod', 'np.prod', (['action_space.shape'], {}), '(action_space.shape)\n', (9061, 9081), True, 'import numpy as np\n'), ((9686, 9703), 'math.log', 'log', (['SMALL_NUMBER'], {}), '(SMALL_NUMBER)\n', (9689, 9703), False, 'from math import log\n'), ((11089, 11116), 'numpy.prod', 'np.prod', (['action_space.shape'], {}), '(action_space.shape)\n', (11096, 11116), True, 'import numpy as np\n'), ((14350, 14365), 'tree.flatten', 'tree.flatten', (['x'], {}), '(x)\n', (14362, 14365), False, 'import tree\n'), ((9740, 9757), 'math.log', 'log', (['SMALL_NUMBER'], {}), '(SMALL_NUMBER)\n', (9743, 9757), False, 'from math import log\n')] |
import numpy as np
import io
import cv2
from sklearn.externals import joblib
from face_detector import get_face_detector, find_faces
from image_bytecode import image_bytecode
def calc_hist(img):
histogram = [0] * 3
for j in range(3):
histr = cv2.calcHist([img], [j], None, [256], [0, 256])
histr *= 255.0 / histr.max()
histogram[j] = histr
return np.array(histogram)
face_model = get_face_detector()
clf = joblib.load('models/face_spoofing.pkl')
sample_number = 1
measures = np.zeros(sample_number, dtype=np.float)
def spoof(image_obj):
"""
:params image_obj: io.BytesIO instance of image
:returns an io.BytesIO instance of image
"""
count = 0
flag = True
while flag == True:
# ret, img = cap.read()
# face_capture=face_read()
# path=img
# img = image_bytecode(img)
img = np.frombuffer(image_obj.getvalue(), np.uint8)
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
# img should be of np array
faces = find_faces(img, face_model)
measures[count % sample_number] = 0
height, width = img.shape[:2]
for x, y, x1, y1 in faces:
roi = img[y:y1, x:x1]
point = (0, 0)
img_ycrcb = cv2.cvtColor(roi, cv2.COLOR_BGR2YCR_CB)
img_luv = cv2.cvtColor(roi, cv2.COLOR_BGR2LUV)
ycrcb_hist = calc_hist(img_ycrcb)
luv_hist = calc_hist(img_luv)
feature_vector = np.append(ycrcb_hist.ravel(), luv_hist.ravel())
feature_vector = feature_vector.reshape(1, len(feature_vector))
prediction = clf.predict_proba(feature_vector)
prob = prediction[0][1]
measures[count % sample_number] = prob
cv2.rectangle(img, (x, y), (x1, y1), (255, 0, 0), 2)
point = (x, y-5)
if 0 not in measures:
text = "True"
if np.mean(measures) >=0.79:
text = "False"
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img=img, text=text, org=point, fontFace=font, fontScale=0.9, color=(0, 0, 255),
thickness=2, lineType=cv2.LINE_AA)
# return True
else:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img=img, text=text, org=point, fontFace=font, fontScale=0.9,
color=(0, 255, 0), thickness=2, lineType=cv2.LINE_AA)
flag = False
count += 1
# cv2.imwrite('images/flask_image.png', img=img)
# out_image = cv2.imwrite(os.path.join(execution_path, "flask"+filename), image)
img = cv2.imencode('.png', img)[1]
img_bytes = img.tobytes()
img_obj = io.BytesIO(img_bytes)
return img_obj
# if __name__=="__main__":
# spoof()
| [
"cv2.rectangle",
"numpy.mean",
"cv2.calcHist",
"cv2.imencode",
"face_detector.find_faces",
"sklearn.externals.joblib.load",
"io.BytesIO",
"cv2.putText",
"numpy.array",
"numpy.zeros",
"cv2.imdecode",
"cv2.cvtColor",
"face_detector.get_face_detector"
] | [((421, 440), 'face_detector.get_face_detector', 'get_face_detector', ([], {}), '()\n', (438, 440), False, 'from face_detector import get_face_detector, find_faces\n'), ((447, 486), 'sklearn.externals.joblib.load', 'joblib.load', (['"""models/face_spoofing.pkl"""'], {}), "('models/face_spoofing.pkl')\n", (458, 486), False, 'from sklearn.externals import joblib\n'), ((517, 556), 'numpy.zeros', 'np.zeros', (['sample_number'], {'dtype': 'np.float'}), '(sample_number, dtype=np.float)\n', (525, 556), True, 'import numpy as np\n'), ((386, 405), 'numpy.array', 'np.array', (['histogram'], {}), '(histogram)\n', (394, 405), True, 'import numpy as np\n'), ((2775, 2796), 'io.BytesIO', 'io.BytesIO', (['img_bytes'], {}), '(img_bytes)\n', (2785, 2796), False, 'import io\n'), ((261, 308), 'cv2.calcHist', 'cv2.calcHist', (['[img]', '[j]', 'None', '[256]', '[0, 256]'], {}), '([img], [j], None, [256], [0, 256])\n', (273, 308), False, 'import cv2\n'), ((945, 980), 'cv2.imdecode', 'cv2.imdecode', (['img', 'cv2.IMREAD_COLOR'], {}), '(img, cv2.IMREAD_COLOR)\n', (957, 980), False, 'import cv2\n'), ((1034, 1061), 'face_detector.find_faces', 'find_faces', (['img', 'face_model'], {}), '(img, face_model)\n', (1044, 1061), False, 'from face_detector import get_face_detector, find_faces\n'), ((2702, 2727), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'img'], {}), "('.png', img)\n", (2714, 2727), False, 'import cv2\n'), ((1266, 1305), 'cv2.cvtColor', 'cv2.cvtColor', (['roi', 'cv2.COLOR_BGR2YCR_CB'], {}), '(roi, cv2.COLOR_BGR2YCR_CB)\n', (1278, 1305), False, 'import cv2\n'), ((1328, 1364), 'cv2.cvtColor', 'cv2.cvtColor', (['roi', 'cv2.COLOR_BGR2LUV'], {}), '(roi, cv2.COLOR_BGR2LUV)\n', (1340, 1364), False, 'import cv2\n'), ((1766, 1818), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x1, y1)', '(255, 0, 0)', '(2)'], {}), '(img, (x, y), (x1, y1), (255, 0, 0), 2)\n', (1779, 1818), False, 'import cv2\n'), ((1931, 1948), 'numpy.mean', 'np.mean', (['measures'], {}), '(measures)\n', (1938, 1948), True, 'import numpy as np\n'), ((2065, 2195), 'cv2.putText', 'cv2.putText', ([], {'img': 'img', 'text': 'text', 'org': 'point', 'fontFace': 'font', 'fontScale': '(0.9)', 'color': '(0, 0, 255)', 'thickness': '(2)', 'lineType': 'cv2.LINE_AA'}), '(img=img, text=text, org=point, fontFace=font, fontScale=0.9,\n color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA)\n', (2076, 2195), False, 'import cv2\n'), ((2353, 2483), 'cv2.putText', 'cv2.putText', ([], {'img': 'img', 'text': 'text', 'org': 'point', 'fontFace': 'font', 'fontScale': '(0.9)', 'color': '(0, 255, 0)', 'thickness': '(2)', 'lineType': 'cv2.LINE_AA'}), '(img=img, text=text, org=point, fontFace=font, fontScale=0.9,\n color=(0, 255, 0), thickness=2, lineType=cv2.LINE_AA)\n', (2364, 2483), False, 'import cv2\n')] |
import unittest
import numpy
import pytest
import cupy
from cupy import testing
from cupy.testing import attr
import cupyx
from cupyx import lapack
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
'n': [3],
'nrhs': [None, 1, 4],
'order': ['C', 'F'],
}))
@attr.gpu
class TestGesv(unittest.TestCase):
_tol = {'f': 1e-5, 'd': 1e-12}
def _make_array(self, shape, alpha, beta):
a = testing.shaped_random(shape, cupy, dtype=self.dtype.char.lower(),
order=self.order, scale=alpha) + beta
return a
def _make_matrix(self, shape, alpha, beta):
a = self._make_array(shape, alpha, beta)
if self.dtype.char in 'FD':
a = a + 1j * self._make_array(shape, alpha, beta)
return a
def setUp(self):
self.dtype = numpy.dtype(self.dtype)
n = self.n
nrhs = 1 if self.nrhs is None else self.nrhs
# Diagonally dominant matrix is used as it is stable
alpha = 2.0 / n
a = self._make_matrix((n, n), alpha, -alpha / 2)
diag = cupy.diag(cupy.ones((n,), dtype=self.dtype.char.lower()))
a[diag > 0] = 0
a += diag
x = self._make_matrix((n, nrhs), 0.2, 0.9)
b = cupy.matmul(a, x)
b_shape = [n]
if self.nrhs is not None:
b_shape.append(nrhs)
b = b.reshape(b_shape)
self.a = a
if self.nrhs is None or self.nrhs == 1:
self.b = b.copy(order=self.order)
else:
self.b = b.copy(order='F')
self.x_ref = x.reshape(b_shape)
self.tol = self._tol[self.dtype.char.lower()]
def test_gesv(self):
lapack.gesv(self.a, self.b)
cupy.testing.assert_allclose(self.b, self.x_ref,
rtol=self.tol, atol=self.tol)
def test_invalid_cases(self):
if self.nrhs is None or self.nrhs == 1:
raise unittest.SkipTest()
ng_a = self.a.reshape(1, self.n, self.n)
with pytest.raises(ValueError):
lapack.gesv(ng_a, self.b)
ng_b = self.b.reshape(1, self.n, self.nrhs)
with pytest.raises(ValueError):
lapack.gesv(self.a, ng_b)
ng_a = cupy.ones((self.n, self.n+1), dtype=self.dtype)
with pytest.raises(ValueError):
lapack.gesv(ng_a, self.b)
ng_a = cupy.ones((self.n+1, self.n+1), dtype=self.dtype)
with pytest.raises(ValueError):
lapack.gesv(ng_a, self.b)
ng_a = cupy.ones(self.a.shape, dtype='i')
with pytest.raises(TypeError):
lapack.gesv(ng_a, self.b)
ng_a = cupy.ones((2, self.n, self.n), dtype=self.dtype, order='F')[0]
with pytest.raises(ValueError):
lapack.gesv(ng_a, self.b)
ng_b = self.b.copy(order='C')
with pytest.raises(ValueError):
lapack.gesv(self.a, ng_b)
@testing.parameterize(*testing.product({
'shape': [(4, 4), (5, 4), (4, 5)],
'nrhs': [None, 1, 4],
}))
@attr.gpu
class TestGels(unittest.TestCase):
_tol = {'f': 1e-5, 'd': 1e-12}
@testing.for_dtypes('fdFD')
def test_gels(self, dtype):
b_shape = [self.shape[0]]
if self.nrhs is not None:
b_shape.append(self.nrhs)
a = testing.shaped_random(self.shape, numpy, dtype=dtype)
b = testing.shaped_random(b_shape, numpy, dtype=dtype)
tol = self._tol[numpy.dtype(dtype).char.lower()]
x_lstsq = numpy.linalg.lstsq(a, b)[0]
x_gels = lapack.gels(cupy.array(a), cupy.array(b))
cupy.testing.assert_allclose(x_gels, x_lstsq, rtol=tol, atol=tol)
@testing.parameterize(*testing.product({
'shape': [(3, 4, 2, 2), (5, 3, 3), (7, 7)],
'nrhs': [None, 1, 4]
}))
@attr.gpu
class TestPosv(unittest.TestCase):
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(atol=1e-5)
def test_posv(self, xp, dtype):
# TODO: cusolver does not support nrhs > 1 for potrsBatched
if len(self.shape) > 2 and self.nrhs and self.nrhs > 1:
pytest.skip('cusolver does not support nrhs > 1 for potrsBatched')
a = self._create_posdef_matrix(xp, self.shape, dtype)
b_shape = list(self.shape[:-1])
if self.nrhs is not None:
b_shape.append(self.nrhs)
b = testing.shaped_random(b_shape, xp, dtype=dtype)
if xp == cupy:
return lapack.posv(a, b)
else:
return numpy.linalg.solve(a, b)
def _create_posdef_matrix(self, xp, shape, dtype):
n = shape[-1]
a = testing.shaped_random(shape, xp, dtype, scale=1)
a = a @ a.swapaxes(-2, -1).conjugate()
a = a + n * xp.eye(n)
return a
# TODO: cusolver does not support nrhs > 1 for potrsBatched
@testing.parameterize(*testing.product({
'shape': [(2, 3, 3)],
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
}))
@attr.gpu
class TestXFailBatchedPosv(unittest.TestCase):
def test_posv(self):
if not cupy.cusolver.check_availability('potrsBatched'):
pytest.skip('potrsBatched is not available')
a = self._create_posdef_matrix(cupy, self.shape, self.dtype)
n = a.shape[-1]
identity_matrix = cupy.eye(n, dtype=a.dtype)
b = cupy.empty(a.shape, a.dtype)
b[...] = identity_matrix
with cupyx.errstate(linalg='ignore'):
with pytest.raises(cupy.cuda.cusolver.CUSOLVERError):
lapack.posv(a, b)
def _create_posdef_matrix(self, xp, shape, dtype):
n = shape[-1]
a = testing.shaped_random(shape, xp, dtype, scale=1)
a = a @ a.swapaxes(-2, -1).conjugate()
a = a + n * xp.eye(n)
return a
| [
"cupy.matmul",
"cupy.testing.numpy_cupy_allclose",
"cupy.array",
"cupy.eye",
"cupy.testing.for_dtypes",
"cupy.testing.assert_allclose",
"cupy.cusolver.check_availability",
"cupy.empty",
"numpy.linalg.lstsq",
"pytest.skip",
"numpy.dtype",
"cupy.testing.shaped_random",
"cupyx.lapack.posv",
"... | [((3152, 3178), 'cupy.testing.for_dtypes', 'testing.for_dtypes', (['"""fdFD"""'], {}), "('fdFD')\n", (3170, 3178), False, 'from cupy import testing\n'), ((3853, 3879), 'cupy.testing.for_dtypes', 'testing.for_dtypes', (['"""fdFD"""'], {}), "('fdFD')\n", (3871, 3879), False, 'from cupy import testing\n'), ((3885, 3924), 'cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'atol': '(1e-05)'}), '(atol=1e-05)\n', (3912, 3924), False, 'from cupy import testing\n'), ((893, 916), 'numpy.dtype', 'numpy.dtype', (['self.dtype'], {}), '(self.dtype)\n', (904, 916), False, 'import numpy\n'), ((1309, 1326), 'cupy.matmul', 'cupy.matmul', (['a', 'x'], {}), '(a, x)\n', (1320, 1326), False, 'import cupy\n'), ((1741, 1768), 'cupyx.lapack.gesv', 'lapack.gesv', (['self.a', 'self.b'], {}), '(self.a, self.b)\n', (1752, 1768), False, 'from cupyx import lapack\n'), ((1777, 1855), 'cupy.testing.assert_allclose', 'cupy.testing.assert_allclose', (['self.b', 'self.x_ref'], {'rtol': 'self.tol', 'atol': 'self.tol'}), '(self.b, self.x_ref, rtol=self.tol, atol=self.tol)\n', (1805, 1855), False, 'import cupy\n'), ((2286, 2335), 'cupy.ones', 'cupy.ones', (['(self.n, self.n + 1)'], {'dtype': 'self.dtype'}), '((self.n, self.n + 1), dtype=self.dtype)\n', (2295, 2335), False, 'import cupy\n'), ((2427, 2480), 'cupy.ones', 'cupy.ones', (['(self.n + 1, self.n + 1)'], {'dtype': 'self.dtype'}), '((self.n + 1, self.n + 1), dtype=self.dtype)\n', (2436, 2480), False, 'import cupy\n'), ((2570, 2604), 'cupy.ones', 'cupy.ones', (['self.a.shape'], {'dtype': '"""i"""'}), "(self.a.shape, dtype='i')\n", (2579, 2604), False, 'import cupy\n'), ((175, 325), 'cupy.testing.product', 'testing.product', (["{'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n 'n': [3], 'nrhs': [None, 1, 4], 'order': ['C', 'F']}"], {}), "({'dtype': [numpy.float32, numpy.float64, numpy.complex64,\n numpy.complex128], 'n': [3], 'nrhs': [None, 1, 4], 'order': ['C', 'F']})\n", (190, 325), False, 'from cupy import testing\n'), ((3329, 3382), 'cupy.testing.shaped_random', 'testing.shaped_random', (['self.shape', 'numpy'], {'dtype': 'dtype'}), '(self.shape, numpy, dtype=dtype)\n', (3350, 3382), False, 'from cupy import testing\n'), ((3395, 3445), 'cupy.testing.shaped_random', 'testing.shaped_random', (['b_shape', 'numpy'], {'dtype': 'dtype'}), '(b_shape, numpy, dtype=dtype)\n', (3416, 3445), False, 'from cupy import testing\n'), ((3616, 3681), 'cupy.testing.assert_allclose', 'cupy.testing.assert_allclose', (['x_gels', 'x_lstsq'], {'rtol': 'tol', 'atol': 'tol'}), '(x_gels, x_lstsq, rtol=tol, atol=tol)\n', (3644, 3681), False, 'import cupy\n'), ((2979, 3053), 'cupy.testing.product', 'testing.product', (["{'shape': [(4, 4), (5, 4), (4, 5)], 'nrhs': [None, 1, 4]}"], {}), "({'shape': [(4, 4), (5, 4), (4, 5)], 'nrhs': [None, 1, 4]})\n", (2994, 3053), False, 'from cupy import testing\n'), ((4358, 4405), 'cupy.testing.shaped_random', 'testing.shaped_random', (['b_shape', 'xp'], {'dtype': 'dtype'}), '(b_shape, xp, dtype=dtype)\n', (4379, 4405), False, 'from cupy import testing\n'), ((4615, 4663), 'cupy.testing.shaped_random', 'testing.shaped_random', (['shape', 'xp', 'dtype'], {'scale': '(1)'}), '(shape, xp, dtype, scale=1)\n', (4636, 4663), False, 'from cupy import testing\n'), ((3707, 3794), 'cupy.testing.product', 'testing.product', (["{'shape': [(3, 4, 2, 2), (5, 3, 3), (7, 7)], 'nrhs': [None, 1, 4]}"], {}), "({'shape': [(3, 4, 2, 2), (5, 3, 3), (7, 7)], 'nrhs': [None,\n 1, 4]})\n", (3722, 3794), False, 'from cupy import testing\n'), ((5295, 5321), 'cupy.eye', 'cupy.eye', (['n'], {'dtype': 'a.dtype'}), '(n, dtype=a.dtype)\n', (5303, 5321), False, 'import cupy\n'), ((5334, 5362), 'cupy.empty', 'cupy.empty', (['a.shape', 'a.dtype'], {}), '(a.shape, a.dtype)\n', (5344, 5362), False, 'import cupy\n'), ((5632, 5680), 'cupy.testing.shaped_random', 'testing.shaped_random', (['shape', 'xp', 'dtype'], {'scale': '(1)'}), '(shape, xp, dtype, scale=1)\n', (5653, 5680), False, 'from cupy import testing\n'), ((4843, 4963), 'cupy.testing.product', 'testing.product', (["{'shape': [(2, 3, 3)], 'dtype': [numpy.float32, numpy.float64, numpy.\n complex64, numpy.complex128]}"], {}), "({'shape': [(2, 3, 3)], 'dtype': [numpy.float32, numpy.\n float64, numpy.complex64, numpy.complex128]})\n", (4858, 4963), False, 'from cupy import testing\n'), ((1994, 2013), 'unittest.SkipTest', 'unittest.SkipTest', ([], {}), '()\n', (2011, 2013), False, 'import unittest\n'), ((2076, 2101), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2089, 2101), False, 'import pytest\n'), ((2115, 2140), 'cupyx.lapack.gesv', 'lapack.gesv', (['ng_a', 'self.b'], {}), '(ng_a, self.b)\n', (2126, 2140), False, 'from cupyx import lapack\n'), ((2206, 2231), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2219, 2231), False, 'import pytest\n'), ((2245, 2270), 'cupyx.lapack.gesv', 'lapack.gesv', (['self.a', 'ng_b'], {}), '(self.a, ng_b)\n', (2256, 2270), False, 'from cupyx import lapack\n'), ((2347, 2372), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2360, 2372), False, 'import pytest\n'), ((2386, 2411), 'cupyx.lapack.gesv', 'lapack.gesv', (['ng_a', 'self.b'], {}), '(ng_a, self.b)\n', (2397, 2411), False, 'from cupyx import lapack\n'), ((2490, 2515), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2503, 2515), False, 'import pytest\n'), ((2529, 2554), 'cupyx.lapack.gesv', 'lapack.gesv', (['ng_a', 'self.b'], {}), '(ng_a, self.b)\n', (2540, 2554), False, 'from cupyx import lapack\n'), ((2618, 2642), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2631, 2642), False, 'import pytest\n'), ((2656, 2681), 'cupyx.lapack.gesv', 'lapack.gesv', (['ng_a', 'self.b'], {}), '(ng_a, self.b)\n', (2667, 2681), False, 'from cupyx import lapack\n'), ((2697, 2756), 'cupy.ones', 'cupy.ones', (['(2, self.n, self.n)'], {'dtype': 'self.dtype', 'order': '"""F"""'}), "((2, self.n, self.n), dtype=self.dtype, order='F')\n", (2706, 2756), False, 'import cupy\n'), ((2773, 2798), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2786, 2798), False, 'import pytest\n'), ((2812, 2837), 'cupyx.lapack.gesv', 'lapack.gesv', (['ng_a', 'self.b'], {}), '(ng_a, self.b)\n', (2823, 2837), False, 'from cupyx import lapack\n'), ((2889, 2914), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2902, 2914), False, 'import pytest\n'), ((2928, 2953), 'cupyx.lapack.gesv', 'lapack.gesv', (['self.a', 'ng_b'], {}), '(self.a, ng_b)\n', (2939, 2953), False, 'from cupyx import lapack\n'), ((3521, 3545), 'numpy.linalg.lstsq', 'numpy.linalg.lstsq', (['a', 'b'], {}), '(a, b)\n', (3539, 3545), False, 'import numpy\n'), ((3578, 3591), 'cupy.array', 'cupy.array', (['a'], {}), '(a)\n', (3588, 3591), False, 'import cupy\n'), ((3593, 3606), 'cupy.array', 'cupy.array', (['b'], {}), '(b)\n', (3603, 3606), False, 'import cupy\n'), ((4104, 4170), 'pytest.skip', 'pytest.skip', (['"""cusolver does not support nrhs > 1 for potrsBatched"""'], {}), "('cusolver does not support nrhs > 1 for potrsBatched')\n", (4115, 4170), False, 'import pytest\n'), ((4449, 4466), 'cupyx.lapack.posv', 'lapack.posv', (['a', 'b'], {}), '(a, b)\n', (4460, 4466), False, 'from cupyx import lapack\n'), ((4500, 4524), 'numpy.linalg.solve', 'numpy.linalg.solve', (['a', 'b'], {}), '(a, b)\n', (4518, 4524), False, 'import numpy\n'), ((5069, 5117), 'cupy.cusolver.check_availability', 'cupy.cusolver.check_availability', (['"""potrsBatched"""'], {}), "('potrsBatched')\n", (5101, 5117), False, 'import cupy\n'), ((5131, 5175), 'pytest.skip', 'pytest.skip', (['"""potrsBatched is not available"""'], {}), "('potrsBatched is not available')\n", (5142, 5175), False, 'import pytest\n'), ((5409, 5440), 'cupyx.errstate', 'cupyx.errstate', ([], {'linalg': '"""ignore"""'}), "(linalg='ignore')\n", (5423, 5440), False, 'import cupyx\n'), ((5459, 5506), 'pytest.raises', 'pytest.raises', (['cupy.cuda.cusolver.CUSOLVERError'], {}), '(cupy.cuda.cusolver.CUSOLVERError)\n', (5472, 5506), False, 'import pytest\n'), ((5524, 5541), 'cupyx.lapack.posv', 'lapack.posv', (['a', 'b'], {}), '(a, b)\n', (5535, 5541), False, 'from cupyx import lapack\n'), ((3470, 3488), 'numpy.dtype', 'numpy.dtype', (['dtype'], {}), '(dtype)\n', (3481, 3488), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@description: Unpack flow field and plot the contours
@contact: <EMAIL>
"""
import os
import postproc.calc as calc
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import matplotlib.colors as colors
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tkinter import Tcl
import imageio
from tqdm import tqdm
from pygifsicle import optimize
def plot_2D_fp_isocontours(data, interest, fn_save):
plt.style.use(['science'])
fig, ax = plt.subplots(figsize=(9, 4))
ax.set_xlabel(r'$x/D$')
ax.set_ylabel(r'$y/D$')
cmap = sns.color_palette("icefire", as_cmap=True)
plt.title(title)
divider = make_axes_locatable(ax)
# Plot the window of interest
ax.set_xlim(-2.5, 6)
ax.set_ylim(-3, 3)
X, Y = data[0:2]
u, v, w = data[2:-1]
p = data[-1][0]
# Now plot what we are interested in
if interest == 'p':
vals = p*2
cmap = sns.color_palette("PRGn_r", as_cmap=True)
elif interest == 'u':
vals = u
elif interest == 'v':
vals = np.mean(v, axis=2)
elif interest == 'mag':
U, V = np.mean(u, axis=2), np.mean(v, axis=2)
vals = np.sqrt(V ** 2 + U ** 2)
# vals = vals * data.iter_correction(30)
elif interest == 'vort':
U, V = np.mean(u, axis=2), np.mean(v, axis=2)
vals = calc.vortZ(U, V)
# vals = -data.p * data.length_scale # Need to scale by length scale
cmap = sns.color_palette("seismic", as_cmap=True)
grey_color = '#dedede'
circle = patches.Circle((0, 0), radius=0.5, linewidth=0.2, edgecolor='black', facecolor=grey_color)
ax.add_patch(circle)
lim = [np.min(vals), np.max(vals)]
# lim = [0, 1.4]
# lim = [-0.2, 0.2]
lim = [-1.9, 1.]
norm = colors.Normalize(vmin=lim[0], vmax=lim[1])
# lvls = 121
step = 0.01
if step is not None:
lvls = np.arange(lim[0], lim[1]+step, step)
else:
lvls = np.linspace(lim[0], lim[1], lvls)
if filled:
cs = ax.contourf(X, Y, np.transpose(vals),
levels=lvls, vmin=lim[0], vmax=lim[1],
norm=norm, cmap=cmap, extend='both')
ax_cb = divider.new_horizontal(size="5%", pad=0.05)
fig.add_axes(ax_cb)
plt.colorbar(cs, cax=ax_cb)
ax_cb.yaxis.tick_right()
# ax_cb.yaxis.set_major_formatter(FormatStrFormatter('%1.1f'))
else:
cs = ax.contour(X, Y, np.transpose(vals),
levels=lvls, vmin=lim[0], vmax=lim[1],
colors=['k'], linewidths=0.4)
ax.set_aspect(1)
plt.savefig(fn_save, dpi=300)
plt.close()
def save_frames(data, folder, interest):
for idx, snap in tqdm(enumerate(data), desc='Plotting frames'):
da = np.array(snap).T
plot_2D_fp_isocontours(da, interest, os.path.join(folder, str(idx) + '.png'))
def animate(data, folder, interest):
save_frames(data, folder, interest)
# Sort filenames to make sure they're in order
fn_images = os.listdir(folder)
fn_images = Tcl().call('lsort', '-dict', fn_images)
# Create gif
gif_path = folder + '/flow'+interest+'.gif'
with imageio.get_writer(gif_path, mode='I', duration=0.15) as writer:
for filename in tqdm(fn_images[::4], desc='Loop images'):
writer.append_data(imageio.imread(os.path.join(folder, filename)))
optimize(gif_path)
class SnapShots:
def __init__(self, snap):
self.snaps = snap
mean_t = np.mean(np.array(self.snaps).T, axis=1)
self.X, self.Y = mean_t[0:2]
self.u, self.v, self.w = mean_t[2:-1]
self.U, self.V = np.mean(self.u, axis=2), np.mean(self.v, axis=2)
self.p = np.mean(mean_t[-1], axis=0)
if __name__ == "__main__":
snaps = np.load('snapshots/flow_snaps.npy', allow_pickle=True)
data_root = '/home/masseyjmo/Workspace/Lotus/solver/postproc/circular_cylinder/figures/animations'
interest = 'p'
filled = True
title = '$ p $'
animate(snaps, os.path.join(data_root, 'frames_' + interest), interest)
mean_ = np.mean(np.array(snaps).T, axis=1)
fn_save = os.path.join(data_root + '/sim_' + interest + '.pdf')
plot_2D_fp_isocontours(np.array(snaps).T[:, 102], interest, fn_save)
| [
"numpy.sqrt",
"numpy.array",
"imageio.get_writer",
"numpy.arange",
"numpy.mean",
"os.listdir",
"seaborn.color_palette",
"matplotlib.pyplot.style.use",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.linspace",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"numpy.min",
"tkinter.Tcl",
"mat... | [((513, 539), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['science']"], {}), "(['science'])\n", (526, 539), True, 'import matplotlib.pyplot as plt\n'), ((554, 582), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 4)'}), '(figsize=(9, 4))\n', (566, 582), True, 'import matplotlib.pyplot as plt\n'), ((650, 692), 'seaborn.color_palette', 'sns.color_palette', (['"""icefire"""'], {'as_cmap': '(True)'}), "('icefire', as_cmap=True)\n", (667, 692), True, 'import seaborn as sns\n'), ((697, 713), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (706, 713), True, 'import matplotlib.pyplot as plt\n'), ((728, 751), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (747, 751), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((1609, 1703), 'matplotlib.patches.Circle', 'patches.Circle', (['(0, 0)'], {'radius': '(0.5)', 'linewidth': '(0.2)', 'edgecolor': '"""black"""', 'facecolor': 'grey_color'}), "((0, 0), radius=0.5, linewidth=0.2, edgecolor='black',\n facecolor=grey_color)\n", (1623, 1703), True, 'import matplotlib.patches as patches\n'), ((1842, 1884), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': 'lim[0]', 'vmax': 'lim[1]'}), '(vmin=lim[0], vmax=lim[1])\n', (1858, 1884), True, 'import matplotlib.colors as colors\n'), ((2680, 2709), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fn_save'], {'dpi': '(300)'}), '(fn_save, dpi=300)\n', (2691, 2709), True, 'import matplotlib.pyplot as plt\n'), ((2714, 2725), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2723, 2725), True, 'import matplotlib.pyplot as plt\n'), ((3099, 3117), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (3109, 3117), False, 'import os\n'), ((3462, 3480), 'pygifsicle.optimize', 'optimize', (['gif_path'], {}), '(gif_path)\n', (3470, 3480), False, 'from pygifsicle import optimize\n'), ((3856, 3910), 'numpy.load', 'np.load', (['"""snapshots/flow_snaps.npy"""'], {'allow_pickle': '(True)'}), "('snapshots/flow_snaps.npy', allow_pickle=True)\n", (3863, 3910), True, 'import numpy as np\n'), ((4212, 4265), 'os.path.join', 'os.path.join', (["(data_root + '/sim_' + interest + '.pdf')"], {}), "(data_root + '/sim_' + interest + '.pdf')\n", (4224, 4265), False, 'import os\n'), ((1001, 1042), 'seaborn.color_palette', 'sns.color_palette', (['"""PRGn_r"""'], {'as_cmap': '(True)'}), "('PRGn_r', as_cmap=True)\n", (1018, 1042), True, 'import seaborn as sns\n'), ((1737, 1749), 'numpy.min', 'np.min', (['vals'], {}), '(vals)\n', (1743, 1749), True, 'import numpy as np\n'), ((1751, 1763), 'numpy.max', 'np.max', (['vals'], {}), '(vals)\n', (1757, 1763), True, 'import numpy as np\n'), ((1959, 1997), 'numpy.arange', 'np.arange', (['lim[0]', '(lim[1] + step)', 'step'], {}), '(lim[0], lim[1] + step, step)\n', (1968, 1997), True, 'import numpy as np\n'), ((2021, 2054), 'numpy.linspace', 'np.linspace', (['lim[0]', 'lim[1]', 'lvls'], {}), '(lim[0], lim[1], lvls)\n', (2032, 2054), True, 'import numpy as np\n'), ((2344, 2371), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cs'], {'cax': 'ax_cb'}), '(cs, cax=ax_cb)\n', (2356, 2371), True, 'import matplotlib.pyplot as plt\n'), ((3248, 3301), 'imageio.get_writer', 'imageio.get_writer', (['gif_path'], {'mode': '"""I"""', 'duration': '(0.15)'}), "(gif_path, mode='I', duration=0.15)\n", (3266, 3301), False, 'import imageio\n'), ((3337, 3377), 'tqdm.tqdm', 'tqdm', (['fn_images[::4]'], {'desc': '"""Loop images"""'}), "(fn_images[::4], desc='Loop images')\n", (3341, 3377), False, 'from tqdm import tqdm\n'), ((3787, 3814), 'numpy.mean', 'np.mean', (['mean_t[-1]'], {'axis': '(0)'}), '(mean_t[-1], axis=0)\n', (3794, 3814), True, 'import numpy as np\n'), ((4093, 4138), 'os.path.join', 'os.path.join', (['data_root', "('frames_' + interest)"], {}), "(data_root, 'frames_' + interest)\n", (4105, 4138), False, 'import os\n'), ((2102, 2120), 'numpy.transpose', 'np.transpose', (['vals'], {}), '(vals)\n', (2114, 2120), True, 'import numpy as np\n'), ((2516, 2534), 'numpy.transpose', 'np.transpose', (['vals'], {}), '(vals)\n', (2528, 2534), True, 'import numpy as np\n'), ((2850, 2864), 'numpy.array', 'np.array', (['snap'], {}), '(snap)\n', (2858, 2864), True, 'import numpy as np\n'), ((3134, 3139), 'tkinter.Tcl', 'Tcl', ([], {}), '()\n', (3137, 3139), False, 'from tkinter import Tcl\n'), ((3721, 3744), 'numpy.mean', 'np.mean', (['self.u'], {'axis': '(2)'}), '(self.u, axis=2)\n', (3728, 3744), True, 'import numpy as np\n'), ((3746, 3769), 'numpy.mean', 'np.mean', (['self.v'], {'axis': '(2)'}), '(self.v, axis=2)\n', (3753, 3769), True, 'import numpy as np\n'), ((4171, 4186), 'numpy.array', 'np.array', (['snaps'], {}), '(snaps)\n', (4179, 4186), True, 'import numpy as np\n'), ((1127, 1145), 'numpy.mean', 'np.mean', (['v'], {'axis': '(2)'}), '(v, axis=2)\n', (1134, 1145), True, 'import numpy as np\n'), ((3581, 3601), 'numpy.array', 'np.array', (['self.snaps'], {}), '(self.snaps)\n', (3589, 3601), True, 'import numpy as np\n'), ((4293, 4308), 'numpy.array', 'np.array', (['snaps'], {}), '(snaps)\n', (4301, 4308), True, 'import numpy as np\n'), ((1243, 1267), 'numpy.sqrt', 'np.sqrt', (['(V ** 2 + U ** 2)'], {}), '(V ** 2 + U ** 2)\n', (1250, 1267), True, 'import numpy as np\n'), ((3425, 3455), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (3437, 3455), False, 'import os\n'), ((1189, 1207), 'numpy.mean', 'np.mean', (['u'], {'axis': '(2)'}), '(u, axis=2)\n', (1196, 1207), True, 'import numpy as np\n'), ((1209, 1227), 'numpy.mean', 'np.mean', (['v'], {'axis': '(2)'}), '(v, axis=2)\n', (1216, 1227), True, 'import numpy as np\n'), ((1415, 1431), 'postproc.calc.vortZ', 'calc.vortZ', (['U', 'V'], {}), '(U, V)\n', (1425, 1431), True, 'import postproc.calc as calc\n'), ((1525, 1567), 'seaborn.color_palette', 'sns.color_palette', (['"""seismic"""'], {'as_cmap': '(True)'}), "('seismic', as_cmap=True)\n", (1542, 1567), True, 'import seaborn as sns\n'), ((1361, 1379), 'numpy.mean', 'np.mean', (['u'], {'axis': '(2)'}), '(u, axis=2)\n', (1368, 1379), True, 'import numpy as np\n'), ((1381, 1399), 'numpy.mean', 'np.mean', (['v'], {'axis': '(2)'}), '(v, axis=2)\n', (1388, 1399), True, 'import numpy as np\n')] |
import pytorch_lightning as pl
import matplotlib.pyplot as plt
import torch
import os
from os import path
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from drought_impact_forecasting.models.utils.utils import mean_prediction, last_prediction, ENS
import wandb
class WandbTrain_callback(pl.Callback):
def __init__(self, print_preds = True):
self.print_preds = print_preds
self.print_sample = None
self.print_table = None
self.step_train_loss = []
self.step_validation_loss = []
self.runtime_prediction = os.path.join(wandb.run.dir,"runtime_pred")
self.r_pred = os.path.join(self.runtime_prediction,"r")
self.g_pred = os.path.join(self.runtime_prediction,"g")
self.b_pred = os.path.join(self.runtime_prediction,"b")
self.i_pred = os.path.join(self.runtime_prediction,"i")
self.img_pred = os.path.join(self.runtime_prediction,"img")
self.channel_list = [self.r_pred, self.g_pred, self.b_pred, self.i_pred]
for dir_path in [self.runtime_prediction,
self.r_pred, self.g_pred, self.b_pred, self.i_pred, self.img_pred]:
if not path.isdir(dir_path):
os.mkdir(dir_path)
wandb.define_metric("step")
wandb.define_metric("epoch")
wandb.define_metric('batch_training_loss', step_metric = "step")
wandb.define_metric('epoch_training_loss', step_metric = "epoch")
#self.log_ENS_baseline(val_1_data)
def log_ENS_baseline(self, data):
scores_mean = np.zeros((data.__len__(), 5))
scores_last = np.zeros((data.__len__(), 5))
for i in range(data.__len__()):
all_data = data.__getitem__(i)
T = all_data.size()[3]
t0 = round(all_data.shape[-1]/3) #t0 is the length of the context part
# for last/mean baseline we don't need weather
context = all_data[:5, :, :, :t0].unsqueeze(0) # b, c, h, w, t
target = all_data[:5, :, :, t0:].unsqueeze(0) # b, c, h, w, t
preds_mean = mean_prediction(context, mask_channel=True).permute(0, 3, 1, 2, 4)
preds_last = last_prediction(context, mask_channel=4).permute(0, 3, 1, 2, 4)
_, part_scores_mean = ENS(prediction = preds_mean, target = target)
_, part_scores_last = ENS(prediction = preds_last, target = target)
scores_mean[i, :] = part_scores_mean
scores_last[i, :] = part_scores_last
avg_scores_mean = np.mean(scores_mean, axis = 0)
avg_scores_last = np.mean(scores_last, axis = 0)
wandb.log({
'baseline_ENS_mean': avg_scores_mean[0],
'baseline_mad_mean': avg_scores_mean[1],
'baseline_ssim_mean': avg_scores_mean[2],
'baseline_ols_mean': avg_scores_mean[3],
'baseline_emd_mean': avg_scores_mean[4],
'baseline_ENS_last': avg_scores_last[0],
'baseline_mad_last': avg_scores_last[1],
'baseline_ssim_last': avg_scores_last[2],
'baseline_ols_last': avg_scores_last[3],
'baseline_emd_last': avg_scores_last[4]
})
def on_train_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs, batch, batch_idx: int, dataloader_idx: int) -> None:
tr_loss = float(outputs['loss'])
self.step_train_loss.append(tr_loss)
trainer.logger.experiment.log({
'step': trainer.global_step,
'batch_training_loss': tr_loss
})
return super().on_train_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", unused = None) -> None:
# compute the avg training loss
e_loss = sum(self.step_train_loss)/len(self.step_train_loss)
# resetting the per-batch training loss
self.step_train_loss = []
lr = trainer.lr_schedulers[0]['scheduler'].optimizer.param_groups[0]['lr']
pl_module.log('epoch_training_loss', e_loss, on_epoch=True, on_step=False)
pl_module.log('lr', lr, on_epoch=True, on_step=False)
#torch.save(trainer.model.state_dict(), os.path.join(self.runtime_model_folder, "model_"+str(trainer.current_epoch)+".torch"))
return super().on_train_epoch_end(trainer, pl_module)
def on_validation_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs, batch, batch_idx: int, dataloader_idx: int) -> None:
self.step_validation_loss.append(outputs)
# assigning the picture
if self.print_preds:
if self.print_sample is None:
self.print_sample = batch[0:,...]
self.log_groundtruth(trainer.model, self.print_sample)
return super().on_validation_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if not trainer.sanity_checking:
batch_loss = np.vstack(self.step_validation_loss)
# for ndvi loss, we have an extra weight parameter
if batch_loss.shape[1] == 6:
batch_loss[:,:5] = batch_loss[:,:5] * batch_loss[:,5:]
v_loss = np.mean(batch_loss, axis = 0)
if np.min(v_loss[1:]) == 0:
v_loss[0] = 0
else:
v_loss[0] = 4 / (1 / v_loss[1] + 1 / v_loss[2] + 1 / v_loss[3] + 1 / v_loss[4])
trainer.logger.experiment.log({
'epoch': trainer.current_epoch,
'epoch_validation_ENS': v_loss[0],
'epoch_validation_mad': v_loss[1],
'epoch_validation_ssim': v_loss[2],
'epoch_validation_ols': v_loss[3],
'epoch_validation_emd': v_loss[4]
})
if self.print_preds:
self.log_predictions(trainer.model, self.print_sample, trainer.current_epoch)
# resetting the per-batch validation loss
self.step_validation_loss = []
return {"epoch_validation_ENS" : v_loss[0]}
# resetting the per-batch validation loss
self.step_validation_loss = []
def on_test_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs, batch, batch_idx: int, dataloader_idx: int) -> None:
return super().on_test_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
def log_predictions(self, model, sample, epoch):
_, delta_preds, _ = model(sample[:1, :, :, :, :10])
delta = np.flip(delta_preds[0, :4, :, :, 0].cpu().numpy().transpose(1, 2, 0).astype(float), -1)
#delta_gt = np.flip(((self.sample[:4, :, :, 9] - means[0])[0]).cpu().numpy().transpose(1, 2, 0).astype(float), -1)
figs = []
for i, c in enumerate(self.channel_list):
fig, ax = plt.subplots()
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
im = ax.imshow(delta[:, :, i], cmap='inferno')
fig.colorbar(im, cax=cax, orientation='vertical')
plt.savefig(c + "/epoch_" + str(epoch) + ".png")
plt.close()
figs.append(wandb.Image(plt.imread(c + "/epoch_" + str(epoch) + ".png"),
caption = "epoch: {0} c: {1}".format(epoch, c[-1])))
plt.close(fig)
wandb.log({"epoch": epoch, "pred_imgs": figs})
def log_groundtruth(self, model, sample):
_, _, baselines = model(sample[:1, :, :, :, :10])
delta_gt = np.flip(((sample[:1,:4, :, :, 9] - baselines[...,0])[0]).cpu().numpy().transpose(1, 2, 0).astype(float), -1)
figs = []
for i, c in enumerate(self.channel_list):
fig, ax = plt.subplots()
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
im = ax.imshow(delta_gt[:, :, i], cmap='inferno')
fig.colorbar(im, cax=cax, orientation='vertical')
fig.savefig(c + "/gt.png")
figs.append(wandb.Image(plt.imread(c + "/gt.png"),
caption = "ground truth c: {0}".format(c[-1])))
plt.close(fig)
#self.print_table = wandb.Table(columns=["id", "r", "g", "b", "i"], data = [figs])
wandb.log({"epoch": -1,"pred_imgs": figs})
class WandbTest_callback(pl.Callback):
def __init__(self, wandb_name_model_to_test, epoch, test_set) -> None:
if ':' not in wandb_name_model_to_test:
self.wandb_name_model_to_test = wandb_name_model_to_test
else:
self.wandb_name_model_to_test = wandb_name_model_to_test.split(":")[1].split("/")[1][:-5]
self.epoch = epoch
self.test_set = test_set
super().__init__()
def on_test_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs, batch, batch_idx: int, dataloader_idx: int) -> None:
with open(os.path.join(wandb.run.dir,"scores_"+self.wandb_name_model_to_test+'_'+str(self.epoch).zfill(3)+'_'+self.test_set[:3]+".csv"), 'a') as f:
for i in range(len(outputs)):
# in csv we have MAD, SSIM, OLS, EMD, ENS
f.write(str(outputs[i,1]) + "," + str(outputs[i,2]) + "," + str(outputs[i,3]) + "," + str(outputs[i,4]) + ","+ str(outputs[i,0]) + '\n')
return super().on_test_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
def on_train_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs, batch, batch_idx: int, dataloader_idx: int) -> None:
return super().on_train_batch_end(trainer, pl_module, outputs, batch, batch_idx, dataloader_idx)
class Prediction_Callback(pl.Callback):
def __init__(self, ms_cut, train_dir, test_dir, dataset, print_predictions, timestamp):
self.sample = dataset.__getitem__(0)
self.print_predictions = print_predictions
self.epoch = 0
self.instance_folder = os.getcwd() + "/model_instances/model_" + timestamp
self.runtime_model_folder = self.instance_folder + "/runtime_model"
self.runtime_prediction = self.instance_folder + "/runtime_pred"
self.r_pred = self.runtime_prediction + "/r"
self.g_pred = self.runtime_prediction + "/g"
self.b_pred = self.runtime_prediction + "/b"
self.i_pred = self.runtime_prediction + "/i"
self.img_pred = self.runtime_prediction + "/img"
# set up prediction directory structure if necessary
for dir_path in [self.instance_folder,
self.runtime_model_folder,self.runtime_prediction,
self.r_pred,self.g_pred,self.b_pred,self.i_pred,self.img_pred]:
if not path.isdir(dir_path):
os.mkdir(dir_path)
if not os.path.isfile(self.instance_folder + "/scores.csv"):
with open(self.instance_folder + "/scores.csv", 'w') as filehandle:
filehandle.write("mad, ssim, ols, emd, score\n")
self.channel_list = [self.r_pred, self.g_pred, self.b_pred, self.i_pred]
def on_train_epoch_end(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", unused: "Optional" = None
) -> None:
torch.save(trainer.model.state_dict(), self.runtime_model_folder + "/model_"+str(self.epoch)+".torch")
if self.print_predictions:
# take 10 context and predict 1 (index from )
preds, delta_preds, means = trainer.model(torch.unsqueeze(self.sample[:, :, :, :10], dim=0))
'''
metrics = trainer.callback_metrics
metrics = {k:float(v) for k, v in metrics.items()}
metrics['train_loss'] = [float(metrics['train_loss'])]
metrics['lr'] = [float(metrics['lr'])]
metrics['online_val_loss'] = [float(metrics['online_val_loss'])]
'''
pre_pred = np.flip(preds[0][0, :3, :, :].cpu().numpy().transpose(1, 2, 0).astype(float), -1)
delta = np.flip(delta_preds[0][0, :4, :, :].cpu().numpy().transpose(1, 2, 0).astype(float), -1)
delta_gt = np.flip(((self.sample[:4, :, :, 9] - means[0])[0]).cpu().numpy().transpose(1, 2, 0).astype(float), -1)
ims = []
for i, c in enumerate(self.channel_list):
plt.imshow(delta[:, :, i])
plt.colorbar()
plt.savefig(c + "/epoch_" + str(self.epoch) + ".png")
plt.close()
ims.append(wandb.Image(plt.imread(c + "/epoch_" + str(self.epoch) + ".png"),
caption = "epoch: {0} c: {1}".format(self.epoch, c[-1])))
wandb.log({"Runtime Predictions":ims})
# values need to be between 0 and 1
cor_pred = np.clip(pre_pred, 0, 1)
'''
if self.epoch == 0:
with open(self.instance_folder + "/metrics.json", 'w') as fp:
json.dump(metrics, fp)
else:
with open(self.instance_folder + "/metrics.json", "r+") as fp:
data = json.load(fp)
data['train_loss'] = data['train_loss'] + metrics['train_loss']
data['lr'] = data['lr'] + metrics['lr']
data['online_val_loss'] = data['online_val_loss'] + metrics['online_val_loss']
fp.seek(0)
json.dump(data, fp) '''
plt.imsave(self.img_pred + "/epoch_" + str(self.epoch) + ".png", cor_pred)
ims = []
# store different rgb values of delta separately
if self.epoch == 0:
plt.imsave(self.img_pred + "/gt.png", np.clip(np.flip(self.sample[:3, :, :, 9].detach().cpu().numpy().
transpose(1, 2, 0).astype(float), -1),0,1))
# ground truth delta
delta_gt = (self.sample[:4, :, :, 9] - means[0])[0]
for i, c in enumerate(self.channel_list):
plt.imshow(np.flip(delta_gt.detach().cpu().numpy().transpose(1, 2, 0).astype(float), -1)[:, :, i])
plt.colorbar()
plt.savefig(c + "/gt.png")
plt.close()
ims.append(wandb.Image(plt.imread(c + "/gt.png"),
caption = "ground truth c: {1}".format(self.epoch, c[-1])))
wandb.log({"Runtime Predictions":ims})
ims = []
for i, c in enumerate(self.channel_list):
plt.imshow(delta[:, :, i])
plt.colorbar()
plt.savefig(c + "/epoch_" + str(self.epoch) + ".png")
plt.close()
ims.append(wandb.Image(plt.imread(c + "/epoch_" + str(self.epoch) + ".png"),
caption = "epoch: {0} c: {1}".format(self.epoch, c[-1])))
wandb.log({"Runtime Predictions":ims})
# in the very first epoch, store ground truth
self.epoch += 1
def on_train_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
return super().on_train_end(trainer, pl_module)
| [
"numpy.clip",
"wandb.log",
"drought_impact_forecasting.models.utils.utils.ENS",
"matplotlib.pyplot.imshow",
"numpy.mean",
"drought_impact_forecasting.models.utils.utils.last_prediction",
"torch.unsqueeze",
"wandb.define_metric",
"drought_impact_forecasting.models.utils.utils.mean_prediction",
"mat... | [((592, 635), 'os.path.join', 'os.path.join', (['wandb.run.dir', '"""runtime_pred"""'], {}), "(wandb.run.dir, 'runtime_pred')\n", (604, 635), False, 'import os\n'), ((658, 700), 'os.path.join', 'os.path.join', (['self.runtime_prediction', '"""r"""'], {}), "(self.runtime_prediction, 'r')\n", (670, 700), False, 'import os\n'), ((722, 764), 'os.path.join', 'os.path.join', (['self.runtime_prediction', '"""g"""'], {}), "(self.runtime_prediction, 'g')\n", (734, 764), False, 'import os\n'), ((786, 828), 'os.path.join', 'os.path.join', (['self.runtime_prediction', '"""b"""'], {}), "(self.runtime_prediction, 'b')\n", (798, 828), False, 'import os\n'), ((850, 892), 'os.path.join', 'os.path.join', (['self.runtime_prediction', '"""i"""'], {}), "(self.runtime_prediction, 'i')\n", (862, 892), False, 'import os\n'), ((916, 960), 'os.path.join', 'os.path.join', (['self.runtime_prediction', '"""img"""'], {}), "(self.runtime_prediction, 'img')\n", (928, 960), False, 'import os\n'), ((1270, 1297), 'wandb.define_metric', 'wandb.define_metric', (['"""step"""'], {}), "('step')\n", (1289, 1297), False, 'import wandb\n'), ((1306, 1334), 'wandb.define_metric', 'wandb.define_metric', (['"""epoch"""'], {}), "('epoch')\n", (1325, 1334), False, 'import wandb\n'), ((1344, 1406), 'wandb.define_metric', 'wandb.define_metric', (['"""batch_training_loss"""'], {'step_metric': '"""step"""'}), "('batch_training_loss', step_metric='step')\n", (1363, 1406), False, 'import wandb\n'), ((1417, 1480), 'wandb.define_metric', 'wandb.define_metric', (['"""epoch_training_loss"""'], {'step_metric': '"""epoch"""'}), "('epoch_training_loss', step_metric='epoch')\n", (1436, 1480), False, 'import wandb\n'), ((2564, 2592), 'numpy.mean', 'np.mean', (['scores_mean'], {'axis': '(0)'}), '(scores_mean, axis=0)\n', (2571, 2592), True, 'import numpy as np\n'), ((2621, 2649), 'numpy.mean', 'np.mean', (['scores_last'], {'axis': '(0)'}), '(scores_last, axis=0)\n', (2628, 2649), True, 'import numpy as np\n'), ((2660, 3107), 'wandb.log', 'wandb.log', (["{'baseline_ENS_mean': avg_scores_mean[0], 'baseline_mad_mean':\n avg_scores_mean[1], 'baseline_ssim_mean': avg_scores_mean[2],\n 'baseline_ols_mean': avg_scores_mean[3], 'baseline_emd_mean':\n avg_scores_mean[4], 'baseline_ENS_last': avg_scores_last[0],\n 'baseline_mad_last': avg_scores_last[1], 'baseline_ssim_last':\n avg_scores_last[2], 'baseline_ols_last': avg_scores_last[3],\n 'baseline_emd_last': avg_scores_last[4]}"], {}), "({'baseline_ENS_mean': avg_scores_mean[0], 'baseline_mad_mean':\n avg_scores_mean[1], 'baseline_ssim_mean': avg_scores_mean[2],\n 'baseline_ols_mean': avg_scores_mean[3], 'baseline_emd_mean':\n avg_scores_mean[4], 'baseline_ENS_last': avg_scores_last[0],\n 'baseline_mad_last': avg_scores_last[1], 'baseline_ssim_last':\n avg_scores_last[2], 'baseline_ols_last': avg_scores_last[3],\n 'baseline_emd_last': avg_scores_last[4]})\n", (2669, 3107), False, 'import wandb\n'), ((8001, 8047), 'wandb.log', 'wandb.log', (["{'epoch': epoch, 'pred_imgs': figs}"], {}), "({'epoch': epoch, 'pred_imgs': figs})\n", (8010, 8047), False, 'import wandb\n'), ((8948, 8991), 'wandb.log', 'wandb.log', (["{'epoch': -1, 'pred_imgs': figs}"], {}), "({'epoch': -1, 'pred_imgs': figs})\n", (8957, 8991), False, 'import wandb\n'), ((2300, 2341), 'drought_impact_forecasting.models.utils.utils.ENS', 'ENS', ([], {'prediction': 'preds_mean', 'target': 'target'}), '(prediction=preds_mean, target=target)\n', (2303, 2341), False, 'from drought_impact_forecasting.models.utils.utils import mean_prediction, last_prediction, ENS\n'), ((2380, 2421), 'drought_impact_forecasting.models.utils.utils.ENS', 'ENS', ([], {'prediction': 'preds_last', 'target': 'target'}), '(prediction=preds_last, target=target)\n', (2383, 2421), False, 'from drought_impact_forecasting.models.utils.utils import mean_prediction, last_prediction, ENS\n'), ((5388, 5424), 'numpy.vstack', 'np.vstack', (['self.step_validation_loss'], {}), '(self.step_validation_loss)\n', (5397, 5424), True, 'import numpy as np\n'), ((5638, 5665), 'numpy.mean', 'np.mean', (['batch_loss'], {'axis': '(0)'}), '(batch_loss, axis=0)\n', (5645, 5665), True, 'import numpy as np\n'), ((7455, 7469), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7467, 7469), True, 'import matplotlib.pyplot as plt\n'), ((7492, 7515), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (7511, 7515), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((7778, 7789), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7787, 7789), True, 'import matplotlib.pyplot as plt\n'), ((7977, 7991), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7986, 7991), True, 'import matplotlib.pyplot as plt\n'), ((8371, 8385), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8383, 8385), True, 'import matplotlib.pyplot as plt\n'), ((8408, 8431), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (8427, 8431), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((8824, 8838), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8833, 8838), True, 'import matplotlib.pyplot as plt\n'), ((11483, 11535), 'os.path.isfile', 'os.path.isfile', (["(self.instance_folder + '/scores.csv')"], {}), "(self.instance_folder + '/scores.csv')\n", (11497, 11535), False, 'import os\n'), ((13389, 13428), 'wandb.log', 'wandb.log', (["{'Runtime Predictions': ims}"], {}), "({'Runtime Predictions': ims})\n", (13398, 13428), False, 'import wandb\n'), ((13500, 13523), 'numpy.clip', 'np.clip', (['pre_pred', '(0)', '(1)'], {}), '(pre_pred, 0, 1)\n', (13507, 13523), True, 'import numpy as np\n'), ((15689, 15728), 'wandb.log', 'wandb.log', (["{'Runtime Predictions': ims}"], {}), "({'Runtime Predictions': ims})\n", (15698, 15728), False, 'import wandb\n'), ((1204, 1224), 'os.path.isdir', 'path.isdir', (['dir_path'], {}), '(dir_path)\n', (1214, 1224), False, 'from os import path\n'), ((1242, 1260), 'os.mkdir', 'os.mkdir', (['dir_path'], {}), '(dir_path)\n', (1250, 1260), False, 'import os\n'), ((5683, 5701), 'numpy.min', 'np.min', (['v_loss[1:]'], {}), '(v_loss[1:])\n', (5689, 5701), True, 'import numpy as np\n'), ((10639, 10650), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10648, 10650), False, 'import os\n'), ((11402, 11422), 'os.path.isdir', 'path.isdir', (['dir_path'], {}), '(dir_path)\n', (11412, 11422), False, 'from os import path\n'), ((11440, 11458), 'os.mkdir', 'os.mkdir', (['dir_path'], {}), '(dir_path)\n', (11448, 11458), False, 'import os\n'), ((12176, 12225), 'torch.unsqueeze', 'torch.unsqueeze', (['self.sample[:, :, :, :10]'], {'dim': '(0)'}), '(self.sample[:, :, :, :10], dim=0)\n', (12191, 12225), False, 'import torch\n'), ((13009, 13035), 'matplotlib.pyplot.imshow', 'plt.imshow', (['delta[:, :, i]'], {}), '(delta[:, :, i])\n', (13019, 13035), True, 'import matplotlib.pyplot as plt\n'), ((13052, 13066), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (13064, 13066), True, 'import matplotlib.pyplot as plt\n'), ((13153, 13164), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13162, 13164), True, 'import matplotlib.pyplot as plt\n'), ((15198, 15237), 'wandb.log', 'wandb.log', (["{'Runtime Predictions': ims}"], {}), "({'Runtime Predictions': ims})\n", (15207, 15237), False, 'import wandb\n'), ((15329, 15355), 'matplotlib.pyplot.imshow', 'plt.imshow', (['delta[:, :, i]'], {}), '(delta[:, :, i])\n', (15339, 15355), True, 'import matplotlib.pyplot as plt\n'), ((15372, 15386), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (15384, 15386), True, 'import matplotlib.pyplot as plt\n'), ((15473, 15484), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15482, 15484), True, 'import matplotlib.pyplot as plt\n'), ((2109, 2152), 'drought_impact_forecasting.models.utils.utils.mean_prediction', 'mean_prediction', (['context'], {'mask_channel': '(True)'}), '(context, mask_channel=True)\n', (2124, 2152), False, 'from drought_impact_forecasting.models.utils.utils import mean_prediction, last_prediction, ENS\n'), ((2201, 2241), 'drought_impact_forecasting.models.utils.utils.last_prediction', 'last_prediction', (['context'], {'mask_channel': '(4)'}), '(context, mask_channel=4)\n', (2216, 2241), False, 'from drought_impact_forecasting.models.utils.utils import mean_prediction, last_prediction, ENS\n'), ((8700, 8725), 'matplotlib.pyplot.imread', 'plt.imread', (["(c + '/gt.png')"], {}), "(c + '/gt.png')\n", (8710, 8725), True, 'import matplotlib.pyplot as plt\n'), ((14914, 14928), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (14926, 14928), True, 'import matplotlib.pyplot as plt\n'), ((14949, 14975), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(c + '/gt.png')"], {}), "(c + '/gt.png')\n", (14960, 14975), True, 'import matplotlib.pyplot as plt\n'), ((14996, 15007), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15005, 15007), True, 'import matplotlib.pyplot as plt\n'), ((15051, 15076), 'matplotlib.pyplot.imread', 'plt.imread', (["(c + '/gt.png')"], {}), "(c + '/gt.png')\n", (15061, 15076), True, 'import matplotlib.pyplot as plt\n')] |
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import time
import collections
import argparse
import numpy as np
import sklearn.neighbors
import tensorflow as tf
import keras
import dca_modpp.io
import Cell_BLAST as cb
import utils
N_EMPIRICAL = 10000
MAJORITY_THRESHOLD = 0.5
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", dest="model", type=str, required=True)
parser.add_argument("-r", "--ref", dest="ref", type=str, required=True)
parser.add_argument("-q", "--query", dest="query", type=str, required=True)
parser.add_argument("-o", "--output", dest="output", type=str, required=True)
parser.add_argument("-a", "--annotation", dest="annotation", type=str, default="cell_ontology_class")
parser.add_argument("--n-neighbors", dest="n_neighbors", type=int, default=10)
parser.add_argument("--min-hits", dest="min_hits", type=int, default=2)
parser.add_argument("-c", "--cutoff", dest="cutoff", type=float, nargs="+", default=[0.1])
parser.add_argument("-s", "--seed", dest="seed", type=int, default=None)
parser.add_argument("-d", "--device", dest="device", type=str, default=None)
parser.add_argument("--subsample-ref", dest="subsample_ref", type=int, default=None)
parser.add_argument("--clean", dest="clean", type=str, default=None)
cmd_args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = utils.pick_gpu_lowest_memory() \
if cmd_args.device is None else cmd_args.device
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.set_session(tf.Session(config=config))
return cmd_args
def main(cmd_args):
print("Reading data...")
genes = np.loadtxt(os.path.join(cmd_args.model, "genes.txt"), dtype=np.str)
ref = cb.data.ExprDataSet.read_dataset(cmd_args.ref)
ref = utils.clean_dataset(
ref, cmd_args.clean
).to_anndata() if cmd_args.clean else ref.to_anndata()
ref = ref[np.random.RandomState(cmd_args.seed).choice(
ref.shape[0], cmd_args.subsample_ref, replace=False
), :] if cmd_args.subsample_ref is not None else ref
ref_label = ref.obs[cmd_args.annotation].values
ref = dca_modpp.io.normalize(
ref, genes, filter_min_counts=False, size_factors=10000,
normalize_input=False, logtrans_input=True
)
print("Loading model...")
os.environ["CUDA_VISIBLE_DEVICES"] = utils.pick_gpu_lowest_memory() \
if cmd_args.device is None else cmd_args.device
model = keras.models.load_model(os.path.join(cmd_args.model, "model.h5"))
print("Projecting to latent space...")
ref_latent = model.predict({
"count": ref.X,
"size_factors": ref.obs.size_factors
})
nn = sklearn.neighbors.NearestNeighbors().fit(ref_latent)
print("Building empirical distribution...")
np.random.seed(cmd_args.seed)
idx1 = np.random.choice(ref_latent.shape[0], size=N_EMPIRICAL)
idx2 = np.random.choice(ref_latent.shape[0], size=N_EMPIRICAL)
empirical = np.sort(np.sqrt(np.sum(np.square(
ref_latent[idx1] - ref_latent[idx2]
), axis=1)))
print("Querying...")
query = cb.data.ExprDataSet.read_dataset(cmd_args.query)
query = query[:, np.union1d(query.var_names, genes)]
query = utils.clean_dataset(
query, cmd_args.clean
).to_anndata() if cmd_args.clean else query.to_anndata()
start_time = time.time()
query = dca_modpp.io.normalize(
query, genes, filter_min_counts=False, size_factors=10000,
normalize_input=False, logtrans_input=True
)
query_latent = model.predict({
"count": query.X,
"size_factors": query.obs.size_factors
})
nnd, nni = nn.kneighbors(query_latent, n_neighbors=cmd_args.n_neighbors)
pval = np.empty_like(nnd, np.float32)
time_per_cell = None
prediction_dict = collections.defaultdict(list)
for cutoff in cmd_args.cutoff:
for i in range(nnd.shape[0]):
for j in range(nnd.shape[1]):
pval[i, j] = np.searchsorted(empirical, nnd[i, j]) / empirical.size
uni, count = np.unique(ref_label[
nni[i][pval[i] < cutoff]
], return_counts=True)
total_count = count.sum()
if total_count < cmd_args.min_hits:
prediction_dict[cutoff].append("rejected")
continue
argmax = np.argmax(count)
if count[argmax] / total_count <= MAJORITY_THRESHOLD:
prediction_dict[cutoff].append("ambiguous")
continue
prediction_dict[cutoff].append(uni[argmax])
prediction_dict[cutoff] = np.array(prediction_dict[cutoff])
if time_per_cell is None:
time_per_cell = (
time.time() - start_time
) * 1000 / len(prediction_dict[cutoff])
print("Time per cell: %.3fms" % time_per_cell)
print("Saving results...")
if os.path.exists(cmd_args.output):
os.remove(cmd_args.output)
for cutoff in prediction_dict:
cb.data.write_hybrid_path(prediction_dict[cutoff], "%s//prediction/%s" % (
cmd_args.output, str(cutoff)
))
cb.data.write_hybrid_path(nni, "//".join((cmd_args.output, "nni")))
cb.data.write_hybrid_path(nnd, "//".join((cmd_args.output, "nnd")))
cb.data.write_hybrid_path(pval, "//".join((cmd_args.output, "pval")))
cb.data.write_hybrid_path(time_per_cell, "//".join((cmd_args.output, "time")))
if __name__ == "__main__":
main(parse_args())
| [
"utils.clean_dataset",
"numpy.union1d",
"numpy.array",
"utils.pick_gpu_lowest_memory",
"numpy.random.RandomState",
"os.remove",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.searchsorted",
"tensorflow.Session",
"numpy.random.seed",
"tensorflow.ConfigProto",
"numpy.random.choice",
"nu... | [((360, 385), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (383, 385), False, 'import argparse\n'), ((1562, 1578), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1576, 1578), True, 'import tensorflow as tf\n'), ((1841, 1887), 'Cell_BLAST.data.ExprDataSet.read_dataset', 'cb.data.ExprDataSet.read_dataset', (['cmd_args.ref'], {}), '(cmd_args.ref)\n', (1873, 1887), True, 'import Cell_BLAST as cb\n'), ((2896, 2925), 'numpy.random.seed', 'np.random.seed', (['cmd_args.seed'], {}), '(cmd_args.seed)\n', (2910, 2925), True, 'import numpy as np\n'), ((2937, 2992), 'numpy.random.choice', 'np.random.choice', (['ref_latent.shape[0]'], {'size': 'N_EMPIRICAL'}), '(ref_latent.shape[0], size=N_EMPIRICAL)\n', (2953, 2992), True, 'import numpy as np\n'), ((3004, 3059), 'numpy.random.choice', 'np.random.choice', (['ref_latent.shape[0]'], {'size': 'N_EMPIRICAL'}), '(ref_latent.shape[0], size=N_EMPIRICAL)\n', (3020, 3059), True, 'import numpy as np\n'), ((3209, 3257), 'Cell_BLAST.data.ExprDataSet.read_dataset', 'cb.data.ExprDataSet.read_dataset', (['cmd_args.query'], {}), '(cmd_args.query)\n', (3241, 3257), True, 'import Cell_BLAST as cb\n'), ((3456, 3467), 'time.time', 'time.time', ([], {}), '()\n', (3465, 3467), False, 'import time\n'), ((3831, 3861), 'numpy.empty_like', 'np.empty_like', (['nnd', 'np.float32'], {}), '(nnd, np.float32)\n', (3844, 3861), True, 'import numpy as np\n'), ((3909, 3938), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (3932, 3938), False, 'import collections\n'), ((4991, 5022), 'os.path.exists', 'os.path.exists', (['cmd_args.output'], {}), '(cmd_args.output)\n', (5005, 5022), False, 'import os\n'), ((1460, 1490), 'utils.pick_gpu_lowest_memory', 'utils.pick_gpu_lowest_memory', ([], {}), '()\n', (1488, 1490), False, 'import utils\n'), ((1652, 1677), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1662, 1677), True, 'import tensorflow as tf\n'), ((1774, 1815), 'os.path.join', 'os.path.join', (['cmd_args.model', '"""genes.txt"""'], {}), "(cmd_args.model, 'genes.txt')\n", (1786, 1815), False, 'import os\n'), ((2461, 2491), 'utils.pick_gpu_lowest_memory', 'utils.pick_gpu_lowest_memory', ([], {}), '()\n', (2489, 2491), False, 'import utils\n'), ((2586, 2626), 'os.path.join', 'os.path.join', (['cmd_args.model', '"""model.h5"""'], {}), "(cmd_args.model, 'model.h5')\n", (2598, 2626), False, 'import os\n'), ((4710, 4743), 'numpy.array', 'np.array', (['prediction_dict[cutoff]'], {}), '(prediction_dict[cutoff])\n', (4718, 4743), True, 'import numpy as np\n'), ((5032, 5058), 'os.remove', 'os.remove', (['cmd_args.output'], {}), '(cmd_args.output)\n', (5041, 5058), False, 'import os\n'), ((3279, 3313), 'numpy.union1d', 'np.union1d', (['query.var_names', 'genes'], {}), '(query.var_names, genes)\n', (3289, 3313), True, 'import numpy as np\n'), ((4164, 4230), 'numpy.unique', 'np.unique', (['ref_label[nni[i][pval[i] < cutoff]]'], {'return_counts': '(True)'}), '(ref_label[nni[i][pval[i] < cutoff]], return_counts=True)\n', (4173, 4230), True, 'import numpy as np\n'), ((4452, 4468), 'numpy.argmax', 'np.argmax', (['count'], {}), '(count)\n', (4461, 4468), True, 'import numpy as np\n'), ((1898, 1938), 'utils.clean_dataset', 'utils.clean_dataset', (['ref', 'cmd_args.clean'], {}), '(ref, cmd_args.clean)\n', (1917, 1938), False, 'import utils\n'), ((3099, 3145), 'numpy.square', 'np.square', (['(ref_latent[idx1] - ref_latent[idx2])'], {}), '(ref_latent[idx1] - ref_latent[idx2])\n', (3108, 3145), True, 'import numpy as np\n'), ((3327, 3369), 'utils.clean_dataset', 'utils.clean_dataset', (['query', 'cmd_args.clean'], {}), '(query, cmd_args.clean)\n', (3346, 3369), False, 'import utils\n'), ((4084, 4121), 'numpy.searchsorted', 'np.searchsorted', (['empirical', 'nnd[i, j]'], {}), '(empirical, nnd[i, j])\n', (4099, 4121), True, 'import numpy as np\n'), ((2020, 2056), 'numpy.random.RandomState', 'np.random.RandomState', (['cmd_args.seed'], {}), '(cmd_args.seed)\n', (2041, 2056), True, 'import numpy as np\n'), ((4824, 4835), 'time.time', 'time.time', ([], {}), '()\n', (4833, 4835), False, 'import time\n')] |
import numpy
from pyscf.pbc import tools as pyscf_tools
from pyscf.pbc.lib.kpts_helper import is_zero, member
from pyscfad.lib import numpy as jnp
from pyscfad.lib import ops
def _ewald_exxdiv_for_G0(cell, kpts, dms, vk, kpts_band=None):
s = cell.pbc_intor('int1e_ovlp', hermi=1, kpts=kpts)
madelung = pyscf_tools.pbc.madelung(cell, kpts)
if kpts is None:
for i,dm in enumerate(dms):
#vk[i] += madelung * reduce(numpy.dot, (s, dm, s))
vk = ops.index_add(vk, ops.index[i],
madelung * jnp.dot(s, jnp.dot(dm, s)))
elif numpy.shape(kpts) == (3,):
if kpts_band is None or is_zero(kpts_band-kpts):
for i,dm in enumerate(dms):
#vk[i] += madelung * reduce(numpy.dot, (s, dm, s))
vk = ops.index_add(vk, ops.index[i],
madelung * jnp.dot(s, jnp.dot(dm, s)))
elif kpts_band is None or numpy.array_equal(kpts, kpts_band):
for k in range(len(kpts)):
for i,dm in enumerate(dms):
#vk[i,k] += madelung * reduce(numpy.dot, (s[k], dm[k], s[k]))
vk = ops.index_add(vk, ops.index[i,k],
madelung * jnp.dot(s[k], jnp.dot(dm[k], s[k])))
else:
for k, kpt in enumerate(kpts):
for kp in member(kpt, kpts_band.reshape(-1,3)):
for i,dm in enumerate(dms):
#vk[i,kp] += madelung * reduce(numpy.dot, (s[k], dm[k], s[k]))
vk = ops.index_add(vk, ops.index[i,kp],
madelung * jnp.dot(s[k], jnp.dot(dm[k], s[k])))
return vk
| [
"pyscf.pbc.tools.pbc.madelung",
"pyscfad.lib.numpy.dot",
"pyscf.pbc.lib.kpts_helper.is_zero",
"numpy.array_equal",
"numpy.shape"
] | [((311, 347), 'pyscf.pbc.tools.pbc.madelung', 'pyscf_tools.pbc.madelung', (['cell', 'kpts'], {}), '(cell, kpts)\n', (335, 347), True, 'from pyscf.pbc import tools as pyscf_tools\n'), ((596, 613), 'numpy.shape', 'numpy.shape', (['kpts'], {}), '(kpts)\n', (607, 613), False, 'import numpy\n'), ((655, 680), 'pyscf.pbc.lib.kpts_helper.is_zero', 'is_zero', (['(kpts_band - kpts)'], {}), '(kpts_band - kpts)\n', (662, 680), False, 'from pyscf.pbc.lib.kpts_helper import is_zero, member\n'), ((944, 978), 'numpy.array_equal', 'numpy.array_equal', (['kpts', 'kpts_band'], {}), '(kpts, kpts_band)\n', (961, 978), False, 'import numpy\n'), ((570, 584), 'pyscfad.lib.numpy.dot', 'jnp.dot', (['dm', 's'], {}), '(dm, s)\n', (577, 584), True, 'from pyscfad.lib import numpy as jnp\n'), ((897, 911), 'pyscfad.lib.numpy.dot', 'jnp.dot', (['dm', 's'], {}), '(dm, s)\n', (904, 911), True, 'from pyscfad.lib import numpy as jnp\n'), ((1248, 1268), 'pyscfad.lib.numpy.dot', 'jnp.dot', (['dm[k]', 's[k]'], {}), '(dm[k], s[k])\n', (1255, 1268), True, 'from pyscfad.lib import numpy as jnp\n'), ((1627, 1647), 'pyscfad.lib.numpy.dot', 'jnp.dot', (['dm[k]', 's[k]'], {}), '(dm[k], s[k])\n', (1634, 1647), True, 'from pyscfad.lib import numpy as jnp\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
from collections import defaultdict
import numpy as np
def _create_coco_gt_results(dataset):
from mmdet.core import bbox2result
from mmtrack.core import track2result
results = defaultdict(list)
for img_info in dataset.data_infos:
ann = dataset.get_ann_info(img_info)
scores = np.ones((ann['bboxes'].shape[0], 1), dtype=np.float)
bboxes = np.concatenate((ann['bboxes'], scores), axis=1)
bbox_results = bbox2result(bboxes, ann['labels'], len(dataset.CLASSES))
track_results = track2result(bboxes, ann['labels'],
ann['instance_ids'].astype(np.int),
len(dataset.CLASSES))
results['bbox_results'].append(bbox_results)
results['track_results'].append(track_results)
return results
| [
"collections.defaultdict",
"numpy.ones",
"numpy.concatenate"
] | [((240, 257), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (251, 257), False, 'from collections import defaultdict\n'), ((360, 412), 'numpy.ones', 'np.ones', (["(ann['bboxes'].shape[0], 1)"], {'dtype': 'np.float'}), "((ann['bboxes'].shape[0], 1), dtype=np.float)\n", (367, 412), True, 'import numpy as np\n'), ((430, 477), 'numpy.concatenate', 'np.concatenate', (["(ann['bboxes'], scores)"], {'axis': '(1)'}), "((ann['bboxes'], scores), axis=1)\n", (444, 477), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import mne
from pathlib import Path
from .utils import (read_with_deepdish, compute_zero_crossings,
compute_svd_entropy)
def svd_entropy(emg_data):
"""Get the fft value of emg signal from 8 electrodes.
Parameters
----------
emg_data : array
An array of EMG data.
Returns
-------
int
Number of zeros crosses of all 8 channels.
"""
svd = compute_svd_entropy(emg_data)
return svd
def get_emg_feature(emg_data, config):
"""Create emg feature set.
Parameters
----------
emg_data : array
A 8 channel array of emg.
config : yaml
The configuration file.
Returns
-------
array
An array of calculated emg features.
"""
df = pd.DataFrame(np.empty((0, len(config['emg_features']))),
columns=config['emg_features'])
for i in range(emg_data.shape[0]):
zero_crosses = np.mean(compute_zero_crossings(emg_data[i, :, :]))
diff = np.diff(emg_data[i, :, :], axis=1)
slope_zero_crosses = np.mean(compute_zero_crossings(diff))
svd = np.mean(svd_entropy(emg_data[i, :, :]))
rms = np.sqrt(
np.sum(emg_data[i, :, :]**2, axis=1) / emg_data[i, :, :].shape[1])
rms = np.mean(rms)
data = np.vstack((zero_crosses, slope_zero_crosses, svd, rms)).T
temp = pd.DataFrame(data, columns=config['emg_features'])
df = pd.concat([df, temp], ignore_index=True, sort=False)
return df
def create_emg_features(config):
"""Create EMG feature dataset
Parameters
----------
config : yaml
The configuration file.
Returns
-------
dataframe
Pandas dataframe.
"""
read_path = Path(__file__).parents[2] / config['raw_haptic_dataset']
data = read_with_deepdish(read_path)
emg_feature = pd.DataFrame(np.empty((0, len(config['emg_features']))),
columns=config['emg_features'])
channels = [
'emg_0', 'emg_1', 'emg_2', 'emg_3', 'emg_4', 'emg_5', 'emg_6', 'emg_7'
]
for subject in config['subjects']:
for hand in config['hand_type']:
for control in config['control_type']:
emg_data = data[subject]['haptic'][hand][control]
id = mne.pick_channels(emg_data.ch_names, channels)
df = get_emg_feature(emg_data.get_data()[:, id, :], config)
df['subject'] = subject
df['hand_type'] = hand
df['control_type'] = control
emg_feature = pd.concat([emg_feature, df],
ignore_index=True,
sort=False)
return emg_feature
| [
"numpy.mean",
"pathlib.Path",
"numpy.diff",
"numpy.sum",
"numpy.vstack",
"mne.pick_channels",
"pandas.DataFrame",
"pandas.concat"
] | [((1040, 1074), 'numpy.diff', 'np.diff', (['emg_data[i, :, :]'], {'axis': '(1)'}), '(emg_data[i, :, :], axis=1)\n', (1047, 1074), True, 'import numpy as np\n'), ((1312, 1324), 'numpy.mean', 'np.mean', (['rms'], {}), '(rms)\n', (1319, 1324), True, 'import numpy as np\n'), ((1413, 1463), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "config['emg_features']"}), "(data, columns=config['emg_features'])\n", (1425, 1463), True, 'import pandas as pd\n'), ((1477, 1529), 'pandas.concat', 'pd.concat', (['[df, temp]'], {'ignore_index': '(True)', 'sort': '(False)'}), '([df, temp], ignore_index=True, sort=False)\n', (1486, 1529), True, 'import pandas as pd\n'), ((1340, 1395), 'numpy.vstack', 'np.vstack', (['(zero_crosses, slope_zero_crosses, svd, rms)'], {}), '((zero_crosses, slope_zero_crosses, svd, rms))\n', (1349, 1395), True, 'import numpy as np\n'), ((1231, 1269), 'numpy.sum', 'np.sum', (['(emg_data[i, :, :] ** 2)'], {'axis': '(1)'}), '(emg_data[i, :, :] ** 2, axis=1)\n', (1237, 1269), True, 'import numpy as np\n'), ((1783, 1797), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1787, 1797), False, 'from pathlib import Path\n'), ((2339, 2385), 'mne.pick_channels', 'mne.pick_channels', (['emg_data.ch_names', 'channels'], {}), '(emg_data.ch_names, channels)\n', (2356, 2385), False, 'import mne\n'), ((2617, 2676), 'pandas.concat', 'pd.concat', (['[emg_feature, df]'], {'ignore_index': '(True)', 'sort': '(False)'}), '([emg_feature, df], ignore_index=True, sort=False)\n', (2626, 2676), True, 'import pandas as pd\n')] |
'''
Class to steer Bayesian analysis and produce plots.
'''
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import scipy
import statistics
import os
import sys
import pickle
import argparse
from src.design import Design
from src import emulator, mcmc, init
import run_analysis_base
################################################################
class MergeResults(run_analysis_base.RunAnalysisBase):
#---------------------------------------------------------------
# Constructor
#---------------------------------------------------------------
def __init__(self, config_file, model, output_dir, **kwargs):
# Initialize base class
super(MergeResults, self).__init__(config_file, model, output_dir, **kwargs)
self.output_dir_holdout = os.path.join(self.output_dir_base, '{}/holdout'.format(model))
self.plot_dir = os.path.join(self.output_dir_base, model)
#---------------------------------------------------------------
# Run analysis
#---------------------------------------------------------------
def run_analysis(self):
# Initialize data and model from files
self.initialize()
# Initialize pickled config settings
init.Init(self.workdir).Initialize(self)
# Emulator validation: Store lists of true RAA, emulator RAA at each holdout point
SystemCount = len(self.AllData["systems"])
true_raa_aggregated = [[] for i in range(SystemCount)]
emulator_raa_mean_aggregated = [[] for i in range(SystemCount)]
emulator_raa_stdev_aggregated = [[] for i in range(SystemCount)]
# Store a list of the chi2 of the holdout residual
self.avg_residuals = []
# Store list of closure test result
T_qhat_closure_result_list = []
T_qhat_closure_result_list2 = []
T_qhat_closure_truth_list = []
E_qhat_closure_result_list = []
E_qhat_closure_result_list2 = []
E_qhat_closure_truth_list = []
theta_closure_list = []
theta_closure_result_dict = {}
theta_closure_result2_dict = {}
for name in self.Names:
theta_closure_result_dict[name] = []
theta_closure_result2_dict[name] = []
n_design_points = len(next(os.walk(self.output_dir_holdout))[1])
print('iterating through {} results'.format(n_design_points))
for i in range(0, n_design_points):
# Load pkl file of results
result_path = os.path.join(self.output_dir_holdout, '{}/result.pkl'.format(i))
if not os.path.exists(result_path):
print('Warning: {} does not exist'.format(result_path))
continue
with open(result_path, 'rb') as f:
result_dict = pickle.load(f)
# Holdout test
true_raa = result_dict['true_raa']
emulator_raa_mean = result_dict['emulator_raa_mean']
emulator_raa_stdev = result_dict['emulator_raa_stdev']
[true_raa_aggregated[i].append(true_raa[i]) for i in range(SystemCount)]
[emulator_raa_mean_aggregated[i].append(emulator_raa_mean[i]) for i in range(SystemCount)]
[emulator_raa_stdev_aggregated[i].append(emulator_raa_stdev[i]) for i in range(SystemCount)]
# Closure test
# qhat vs T
T_array = result_dict['T_array']
T_qhat_truth = result_dict['T_qhat_truth']
T_qhat_mean = result_dict['T_qhat_mean']
T_qhat_closure = result_dict['T_qhat_closure']
T_qhat_closure2 = result_dict['T_qhat_closure2']
T_qhat_closure_result_list.append(T_qhat_closure)
T_qhat_closure_result_list2.append(T_qhat_closure2)
T_qhat_closure_truth_list.append(T_qhat_truth)
# qhat vs E
E_array = result_dict['E_array']
E_qhat_truth = result_dict['E_qhat_truth']
E_qhat_mean = result_dict['E_qhat_mean']
E_qhat_closure = result_dict['E_qhat_closure']
E_qhat_closure2 = result_dict['E_qhat_closure2']
E_qhat_closure_result_list.append(E_qhat_closure)
E_qhat_closure_result_list2.append(E_qhat_closure2)
E_qhat_closure_truth_list.append(E_qhat_truth)
# ABCD closure
theta = result_dict['theta']
theta_closure_list.append(theta)
for name in self.Names:
theta_closure_result_dict[name].append(result_dict['{}_closure'.format(name)])
theta_closure_result2_dict[name].append(result_dict['{}_closure2'.format(name)])
# Plot summary of holdout tests
#self.plot_avg_residuals()
self.plot_emulator_validation(true_raa_aggregated, emulator_raa_mean_aggregated, emulator_raa_stdev_aggregated)
self.plot_emulator_uncertainty_validation(true_raa_aggregated, emulator_raa_mean_aggregated, emulator_raa_stdev_aggregated)
# Plot summary of qhat closure tests
self.plot_closure_summary_qhat(T_array, T_qhat_closure_result_list,
T_qhat_closure_truth_list, type='T', CR='90')
self.plot_closure_summary_qhat(T_array, T_qhat_closure_result_list2,
T_qhat_closure_truth_list, type='T', CR='60')
self.plot_closure_summary_qhat(E_array, E_qhat_closure_result_list,
E_qhat_closure_truth_list, type='E', CR='90')
self.plot_closure_summary_qhat(E_array, E_qhat_closure_result_list2,
E_qhat_closure_truth_list, type='E', CR='60')
# Print theta closure summary
for i,name in enumerate(self.Names):
self.plot_closure_summary_theta(i, name, theta_closure_list, theta_closure_result_dict, CR='90')
self.plot_closure_summary_theta(i, name, theta_closure_list, theta_closure_result2_dict, CR='60')
#---------------------------------------------------------------
# Plot summary of closure tests
#
# theta_closure_list is a list (per design point) of theta values
#
# theta_closure_result_dict is a dictionary (per ABCD) of lists (per design point)
# [{A: [True, True, ...]}, {B: [True, False, ...]}, ... ]
#
#---------------------------------------------------------------
def plot_closure_summary_theta(self, i, name, theta_closure_list, theta_closure_result_dict, CR='90'):
theta_i_list = [theta[i] for theta in theta_closure_list]
qhat_list = [self.qhat(T=0.3, E=100, parameters=theta) for theta in theta_closure_list]
success_list = theta_closure_result_dict[name]
# Construct 2D histogram of qhat vs theta[i],
# where amplitude is fraction of successful closure tests
theta_i_range = self.ranges_transformed[i]
xbins = np.linspace(theta_i_range[0], theta_i_range[1], num=8)
xwidth = (theta_i_range[0]+theta_i_range[1])/(7*2)
ybins = [0, 0.5, 1, 2, 3, 4, 5, 6, 8, 10, 15]
ybins_center = [(ybins[i+1]+ybins[i])/2 for i in range(len(ybins)-1)]
x = np.array(theta_i_list)
y = np.array(qhat_list)
z = np.array(success_list)
# Histogram of fraction of successes
self.N_per_bin = 1
H, xedges, yedges, binnumber= scipy.stats.binned_statistic_2d(x, y, z, statistic=np.mean,
bins=[xbins, ybins])
XX, YY = np.meshgrid(xedges, yedges)
fig = plt.figure(figsize = (11,9))
ax1=plt.subplot(111)
plot1 = ax1.pcolormesh(XX, YY, H.T)
fig.colorbar(plot1, ax=ax1)
# Histogram of efficiency uncertainty
Herr, xedges, yedges, binnumber= scipy.stats.binned_statistic_2d(x, y, z,
statistic=self.efficiency_uncertainty_bayesian,
bins=[xbins, ybins])
plt.xlabel(name, size=14)
plt.ylabel(r'$\left< \hat{q}/T^3 \right>_{T=300\;\rm{MeV}, E=100\;\rm{GeV}}$', size=14)
plt.title('Fraction of closure tests contained in {}% CR'.format(CR), size=14)
mean = np.mean(z)
self.N_per_bin = 1
unc = self.efficiency_uncertainty_bayesian(z)
ax1.legend(title='mean: {:0.2f}{}{:0.2f}'.format(mean, r'$\pm$', unc),
title_fontsize=14, loc='upper right')
for i in range(len(xbins)-1):
for j in range(len(ybins)-1):
zval = H[i][j]
zerr = Herr[i][j]
if np.isnan(zval) or np.isnan(zerr):
continue
ax1.text(xbins[i]+xwidth, ybins_center[j], '{:0.2f}{}{:0.2f}'.format(zval, r'$\pm$',zerr),
size=8, ha='center', va='center',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='0.3'))
# Save
plt.savefig('{}/Closure_Summary2D_{}_{}.pdf'.format(self.plot_dir, name, CR), dpi = 192)
plt.close('all')
#---------------------------------------------------------------
# Plot summary of closure tests
#
# qhat_closure_result_list is a list (per design point) of lists (of T values)
# [ [True, True, ...], [True, False, ...], ... ] where each sublist is a given design point
# qhat_closure_truth_list is a list (per design point) of lists (of T values)
# [ [qhat_T1, qhat_T2, ...], [qhat_T1, qhat_T2, ...], ... ] where each sublist is a given design point
#---------------------------------------------------------------
def plot_closure_summary_qhat(self, x_array, qhat_closure_result_list,
qhat_closure_truth_list, type='T', CR='90'):
# Construct 2D histogram of <qhat of design point> vs T,
# where amplitude is fraction of successful closure tests
# For each T and design point, compute <qhat of design point>,
# T, and the fraction of successful closure tests
x_list = []
qhat_mean_list = []
success_list = []
for i,x in enumerate(x_array):
for j,design in enumerate(qhat_closure_result_list):
qhat_mean = statistics.mean(qhat_closure_truth_list[j])
success = qhat_closure_result_list[j][i]
x_list.append(x)
qhat_mean_list.append(qhat_mean)
success_list.append(success)
# Now draw the mean success rate in 2D
if type is 'T':
xbins = np.linspace(0.15, 0.5, num=8)
xwidth = 0.025
self.N_per_bin = 50/7 # We have multiple T points per bin
if type is 'E':
xbins = np.linspace(20, 200, num=10)
xwidth = 10
self.N_per_bin = 50/9 # We have multiple E points per bin
ybins = [0, 0.5, 1, 2, 3, 4, 5, 6, 8, 10, 15]
ybins_center = [(ybins[i+1]+ybins[i])/2 for i in range(len(ybins)-1)]
x = np.array(x_list)
y = np.array(qhat_mean_list)
z = np.array(success_list)
# Histogram of fraction of successes
H, xedges, yedges, binnumber= scipy.stats.binned_statistic_2d(x, y, z, statistic=np.mean,
bins=[xbins, ybins])
H = np.ma.masked_invalid(H) # masking where there was no data
XX, YY = np.meshgrid(xedges, yedges)
fig = plt.figure(figsize = (11,9))
ax1=plt.subplot(111)
plot1 = ax1.pcolormesh(XX, YY, H.T)
fig.colorbar(plot1, ax=ax1)
# Histogram of binomial uncertainty
Herr, xedges, yedges, binnumber= scipy.stats.binned_statistic_2d(x, y, z,
statistic=self.efficiency_uncertainty_bayesian,
bins=[xbins, ybins])
Herr = np.ma.masked_invalid(Herr)
plt.xlabel('{} (GeV)'.format(type), size=14)
if type is 'T':
plt.ylabel(r'$\left< \hat{q}/T^3 \right>_{E=100\;\rm{GeV}}$', size=14)
if type is 'E':
plt.ylabel(r'$\left< \hat{q}/T^3 \right>_{T=300\;\rm{MeV}}$', size=14)
plt.title('Fraction of closure tests contained in {}% CR'.format(CR), size=14)
mean = np.mean(z)
self.N_per_bin = 50 # Here, we take just one point per curve
unc = self.efficiency_uncertainty_bayesian(z)
ax1.legend(title='mean: {:0.2f}{}{:0.2f}'.format(mean, r'$\pm$', unc),
title_fontsize=14, loc='upper right')
for i in range(len(xbins)-1):
for j in range(len(ybins)-1):
zval = H[i][j]
zerr = Herr[i][j]
if np.isnan(zval) or np.isnan(zerr):
continue
ax1.text(xbins[i]+xwidth, ybins_center[j], '{:0.2f}{}{:0.2f}'.format(zval, r'$\pm$',zerr),
size=8, ha='center', va='center',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='0.3'))
# Save
plt.savefig('{}/Closure_Summary2D_{}_{}.pdf'.format(self.plot_dir, type, CR), dpi = 192)
plt.close('all')
#---------------------------------------------------------------
# Compute binomial uncertainty from a list of True/False values
# [True, True, False, True, ...]
#---------------------------------------------------------------
def efficiency_uncertainty_binomial(self, success_list):
length = len(success_list)
sum = np.sum(success_list)
mean = 1.*sum/length
# We have multiple T points per bin, which would underestimate the uncertainty
# since neighboring points are highly correlated
real_length = length / self.N_per_bin
variance = real_length*mean*(1-mean)
sigma = np.sqrt(variance)
return sigma/real_length
#---------------------------------------------------------------
# Compute bayesian uncertainty on efficiency from a list of True/False values
# [True, True, False, True, ...]
# http://phys.kent.edu/~smargeti/STAR/D0/Ullrich-Errors.pdf
#---------------------------------------------------------------
def efficiency_uncertainty_bayesian(self, success_list):
length = len(success_list)
sum = np.sum(success_list)
mean = 1.*sum/length
# We have multiple T points per bin, which would underestimate the uncertainty
# since neighboring points are highly correlated
real_length = length / self.N_per_bin
k = mean*real_length
n = real_length
variance = (k+1)*(k+2)/((n+2)*(n+3)) - (k+1)*(k+1)/((n+2)*(n+2))
return np.sqrt(variance)
#---------------------------------------------------------------
# Plot emulator validation
#
# true_raa and emulator_raa are lists (per system) of lists (per design point) of lists
# e.g. true_raa[i] = [[RAA_0, RAA_1,...], [RAA_0, RAA_1, ...], ...]
#
#---------------------------------------------------------------
def plot_emulator_validation(self, true_raa, emulator_raa_mean, emulator_raa_stdev):
# Loop through emulators
for cent in range(0,2):
# Construct a figure with two plots
plt.figure(1, figsize=(10, 6))
ax_scatter = plt.axes([0.1, 0.13, 0.6, 0.8]) # [left, bottom, width, height]
ax_residual = plt.axes([0.81, 0.13, 0.15, 0.8])
markers = ['o', 's', 'D']
SystemCount = len(self.AllData["systems"])
for i in range(SystemCount):
system = self.AllData['systems'][i]
if 'AuAu' in system:
if cent == 0:
system_label = 'Au-Au \;200\; GeV, 0-10\%'
if cent == 1:
system_label = 'Au-Au \;200\; GeV, 40-50\%'
else:
if '2760' in system:
if cent == 0:
system_label = 'Pb-Pb \;2.76\; TeV, 0-5\%'
if cent == 1:
system_label = 'Pb-Pb \;2.76\; TeV, 30-40\%'
elif '5020' in system:
if cent == 0:
system_label = 'Pb-Pb \;5.02\; TeV, 0-10\%'
if cent == 1:
system_label = 'Pb-Pb \;5.02\; TeV, 30-50\%'
#color = sns.color_palette('colorblind')[i]
color = self.colors[i]
# Optionally: Remove outlier points from emulator validation plot
remove_outliers = False
if remove_outliers:
if self.model == 'LBT':
remove = [79, 124, 135]
if self.model == 'MATTER':
remove = [59, 60, 61, 62]
if self.model == 'MATTER+LBT1':
remove = [0, 2, 5, 12, 17, 28, 31, 34, 37, 46, 50, 56, 63, 65, 69]
if self.model == 'MATTER+LBT2':
remove = [2, 3, 14, 19, 20, 21, 27, 28, 33, 56]
for index in sorted(remove, reverse=True):
del true_raa[i][index]
del emulator_raa_mean[i][index]
del emulator_raa_stdev[i][index]
true_raa_flat_i = [item for sublist in true_raa[i] for item in sublist[cent]]
emulator_raa_mean_flat_i = [item for sublist in emulator_raa_mean[i] for item in sublist[cent]]
emulator_raa_stdev_flat_i = [item for sublist in emulator_raa_stdev[i] for item in sublist[cent]]
# Get RAA points
true_raa_i = np.array(true_raa_flat_i)
emulator_raa_mean_i = np.array(emulator_raa_mean_flat_i)
emulator_raa_stdev_i = np.array(emulator_raa_stdev_flat_i)
normalized_residual_i = np.divide(true_raa_i-emulator_raa_mean_i, emulator_raa_stdev_i)
# Draw scatter plot
ax_scatter.scatter(true_raa_i, emulator_raa_mean_i, s=5, marker=markers[i],
color=color, alpha=0.7, label=r'$\rm{{{}}}$'.format(system_label), linewidth=0)
#ax_scatter.set_ylim([0, 1.19])
#ax_scatter.set_xlim([0, 1.19])
ax_scatter.set_xlabel(r'$R_{\rm{AA}}^{\rm{true}}$', fontsize=20)
ax_scatter.set_ylabel(r'$R_{\rm{AA}}^{\rm{emulator}}$', fontsize=20)
ax_scatter.legend(title=self.model, title_fontsize=16,
loc='upper left', fontsize=14, markerscale=5)
plt.setp(ax_scatter.get_xticklabels(), fontsize=14)
plt.setp(ax_scatter.get_yticklabels(), fontsize=14)
# Draw line with slope 1
ax_scatter.plot([0,1], [0,1], sns.xkcd_rgb['almost black'], alpha=0.3,
linewidth=3, linestyle='--')
# Print mean value of emulator uncertainty
stdev_mean_relative = np.divide(emulator_raa_stdev_i, true_raa_i)
stdev_mean = np.mean(stdev_mean_relative)
text = r'$\left< \sigma_{{\rm{{emulator}}}}^{{\rm{{{}}}}} \right> = {:0.1f}\%$'.format(system_label, 100*stdev_mean)
ax_scatter.text(0.4, 0.17-0.09*i, text, fontsize=16)
# Draw normalization residuals
max = 3
bins = np.linspace(-max, max, 30)
x = (bins[1:] + bins[:-1])/2
h = ax_residual.hist(normalized_residual_i, color=color, histtype='step',
orientation='horizontal', linewidth=3, alpha=0.8, density=True, bins=bins)
ax_residual.scatter(h[0], x, color=color, s=10, marker=markers[i])
ax_residual.set_ylabel(r'$\left(R_{\rm{AA}}^{\rm{true}} - R_{\rm{AA}}^{\rm{emulator}}\right) / \sigma_{\rm{emulator}}$',
fontsize=20)
plt.setp(ax_residual.get_xticklabels(), fontsize=14)
plt.setp(ax_residual.get_yticklabels(), fontsize=14)
# Print out indices of points that deviate significantly
if remove_outliers:
stdev = np.std(normalized_residual_i)
for j,true_sublist in enumerate(true_raa[i]):
emulator_sublist = emulator_raa_mean[i][j]
for k,true_raa_value in enumerate(true_sublist):
emulator_raa_value = emulator_sublist[k]
normalized_residual = (true_raa_value-emulator_raa_value)/true_raa_value
if np.abs(normalized_residual) > 3*stdev:
print('Index {} has poor emulator validation...'.format(j))
plt.savefig('{}/EmulatorValidation_{}.pdf'.format(self.plot_dir, cent))
plt.close('all')
#---------------------------------------------------------------
# Plot emulator uncertainty validation
#
# true_raa and emulator_raa are lists (per system) of lists (per design point) of lists
# e.g. true_raa[i] = [[RAA_0, RAA_1,...], [RAA_0, RAA_1, ...], ...]
#
#---------------------------------------------------------------
def plot_emulator_uncertainty_validation(self, true_raa, emulator_raa_mean, emulator_raa_stdev):
# Loop through emulators
for cent in range(0,2):
# Construct a figure with two plots
plt.figure(1, figsize=(10, 6))
ax_scatter = plt.axes([0.1, 0.13, 0.6, 0.8]) # [left, bottom, width, height]
ax_residual = plt.axes([0.81, 0.13, 0.15, 0.8])
SystemCount = len(self.AllData["systems"])
for i in range(SystemCount):
system = self.AllData['systems'][i]
if 'AuAu' in system:
if cent == 0:
system_label = 'Au-Au \;200\; GeV, 0-10\%'
if cent == 1:
system_label = 'Au-Au \;200\; GeV, 40-50\%'
else:
if '2760' in system:
if cent == 0:
system_label = 'Pb-Pb \;2.76\; TeV, 0-5\%'
if cent == 1:
system_label = 'Pb-Pb \;2.76\; TeV, 30-40\%'
elif '5020' in system:
if cent == 0:
system_label = 'Pb-Pb \;5.02\; TeV, 0-10\%'
if cent == 1:
system_label = 'Pb-Pb \;5.02\; TeV, 30-50\%'
#color = sns.color_palette('colorblind')[i]
color = self.colors[i]
# Optionally: Remove outlier points from emulator validation plot
remove_outliers = False
if remove_outliers:
if self.model == 'LBT':
remove = [79, 124, 135]
if self.model == 'MATTER':
remove = [59, 60, 61, 62]
if self.model == 'MATTER+LBT1':
remove = [0, 2, 5, 12, 17, 28, 31, 34, 37, 46, 50, 56, 63, 65, 69]
if self.model == 'MATTER+LBT2':
remove = [2, 3, 14, 19, 20, 21, 27, 28, 33, 56]
for index in sorted(remove, reverse=True):
del true_raa[i][index]
del emulator_raa_mean[i][index]
del emulator_raa_stdev[i][index]
true_raa_flat_i = [item for sublist in true_raa[i] for item in sublist[cent]]
emulator_raa_mean_flat_i = [item for sublist in emulator_raa_mean[i] for item in sublist[cent]]
emulator_raa_stdev_flat_i = [item for sublist in emulator_raa_stdev[i] for item in sublist[cent]]
# Get RAA points
true_raa_i = np.array(true_raa_flat_i)
emulator_raa_mean_i = np.array(emulator_raa_mean_flat_i)
emulator_raa_stdev_i = np.array(emulator_raa_stdev_flat_i)
normalized_residual_i = np.divide(true_raa_i-emulator_raa_mean_i, emulator_raa_stdev_i)
# Draw scatter plot
ax_scatter.scatter(true_raa_i, emulator_raa_stdev_i, s=5,
color=color, alpha=0.7, label=r'$\rm{{{}}}$'.format(system_label), linewidth=0)
#ax_scatter.set_ylim([0, 1.19])
#ax_scatter.set_xlim([0, 1.19])
ax_scatter.set_xlabel(r'$R_{\rm{AA}}^{\rm{true}}$', fontsize=18)
ax_scatter.set_ylabel(r'$\sigma_{\rm{emulator}}$', fontsize=18)
ax_scatter.legend(title=self.model, title_fontsize=16,
loc='upper left', fontsize=14, markerscale=5)
# Draw normalization residuals
max = 3
bins = np.linspace(-max, max, 30)
ax_residual.hist(normalized_residual_i, color=color, histtype='step',
orientation='horizontal', linewidth=3, alpha=0.8, density=True, bins=bins)
ax_residual.set_ylabel(r'$\left(R_{\rm{AA}}^{\rm{true}} - R_{\rm{AA}}^{\rm{emulator}}\right) / \sigma_{\rm{emulator}}$',
fontsize=16)
# Print out indices of points that deviate significantly
if remove_outliers:
stdev = np.std(normalized_residual_i)
for j,true_sublist in enumerate(true_raa[i]):
emulator_sublist = emulator_raa_mean[i][j]
for k,true_raa_value in enumerate(true_sublist):
emulator_raa_value = emulator_sublist[k]
normalized_residual = (true_raa_value-emulator_raa_value)/true_raa_value
if np.abs(normalized_residual) > 3*stdev:
print('Index {} has poor emulator validation...'.format(j))
plt.savefig('{}/EmulatorUncertaintyValidation_{}.pdf'.format(self.plot_dir, cent))
plt.close('all')
##################################################################
if __name__ == '__main__':
# Define arguments
parser = argparse.ArgumentParser(description='Jetscape STAT analysis')
parser.add_argument('-o', '--outputdir', action='store',
type=str, metavar='outputdir',
default='./STATGallery')
parser.add_argument('-c', '--configFile', action='store',
type=str, metavar='configFile',
default='analysis_config.yaml',
help='Path of config file')
parser.add_argument('-m', '--model', action='store',
type=str, metavar='model',
default='LBT',
help='model')
# Parse the arguments
args = parser.parse_args()
print('')
print('Configuring MergeResults...')
# If invalid configFile is given, exit
if not os.path.exists(args.configFile):
print('File \"{0}\" does not exist! Exiting!'.format(args.configFile))
sys.exit(0)
analysis = MergeResults(config_file = args.configFile, model=args.model,
output_dir=args.outputdir)
analysis.run_model()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.array",
"sys.exit",
"numpy.divide",
"os.walk",
"numpy.mean",
"os.path.exists",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.ma.masked_invalid",
"numpy.meshgrid",
"scipy.stats.bi... | [((27411, 27472), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Jetscape STAT analysis"""'}), "(description='Jetscape STAT analysis')\n", (27434, 27472), False, 'import argparse\n'), ((975, 1016), 'os.path.join', 'os.path.join', (['self.output_dir_base', 'model'], {}), '(self.output_dir_base, model)\n', (987, 1016), False, 'import os\n'), ((7266, 7320), 'numpy.linspace', 'np.linspace', (['theta_i_range[0]', 'theta_i_range[1]'], {'num': '(8)'}), '(theta_i_range[0], theta_i_range[1], num=8)\n', (7277, 7320), True, 'import numpy as np\n'), ((7528, 7550), 'numpy.array', 'np.array', (['theta_i_list'], {}), '(theta_i_list)\n', (7536, 7550), True, 'import numpy as np\n'), ((7563, 7582), 'numpy.array', 'np.array', (['qhat_list'], {}), '(qhat_list)\n', (7571, 7582), True, 'import numpy as np\n'), ((7595, 7617), 'numpy.array', 'np.array', (['success_list'], {}), '(success_list)\n', (7603, 7617), True, 'import numpy as np\n'), ((7729, 7814), 'scipy.stats.binned_statistic_2d', 'scipy.stats.binned_statistic_2d', (['x', 'y', 'z'], {'statistic': 'np.mean', 'bins': '[xbins, ybins]'}), '(x, y, z, statistic=np.mean, bins=[xbins, ybins]\n )\n', (7760, 7814), False, 'import scipy\n'), ((7897, 7924), 'numpy.meshgrid', 'np.meshgrid', (['xedges', 'yedges'], {}), '(xedges, yedges)\n', (7908, 7924), True, 'import numpy as np\n'), ((7940, 7967), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 9)'}), '(figsize=(11, 9))\n', (7950, 7967), True, 'import matplotlib.pyplot as plt\n'), ((7981, 7997), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (7992, 7997), True, 'import matplotlib.pyplot as plt\n'), ((8166, 8280), 'scipy.stats.binned_statistic_2d', 'scipy.stats.binned_statistic_2d', (['x', 'y', 'z'], {'statistic': 'self.efficiency_uncertainty_bayesian', 'bins': '[xbins, ybins]'}), '(x, y, z, statistic=self.\n efficiency_uncertainty_bayesian, bins=[xbins, ybins])\n', (8197, 8280), False, 'import scipy\n'), ((8431, 8456), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['name'], {'size': '(14)'}), '(name, size=14)\n', (8441, 8456), True, 'import matplotlib.pyplot as plt\n'), ((8465, 8567), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\left< \\\\hat{q}/T^3 \\\\right>_{T=300\\\\;\\\\rm{MeV}, E=100\\\\;\\\\rm{GeV}}$"""'], {'size': '(14)'}), "(\n '$\\\\left< \\\\hat{q}/T^3 \\\\right>_{T=300\\\\;\\\\rm{MeV}, E=100\\\\;\\\\rm{GeV}}$',\n size=14)\n", (8475, 8567), True, 'import matplotlib.pyplot as plt\n'), ((8656, 8666), 'numpy.mean', 'np.mean', (['z'], {}), '(z)\n', (8663, 8666), True, 'import numpy as np\n'), ((9501, 9517), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9510, 9517), True, 'import matplotlib.pyplot as plt\n'), ((11520, 11536), 'numpy.array', 'np.array', (['x_list'], {}), '(x_list)\n', (11528, 11536), True, 'import numpy as np\n'), ((11549, 11573), 'numpy.array', 'np.array', (['qhat_mean_list'], {}), '(qhat_mean_list)\n', (11557, 11573), True, 'import numpy as np\n'), ((11586, 11608), 'numpy.array', 'np.array', (['success_list'], {}), '(success_list)\n', (11594, 11608), True, 'import numpy as np\n'), ((11701, 11786), 'scipy.stats.binned_statistic_2d', 'scipy.stats.binned_statistic_2d', (['x', 'y', 'z'], {'statistic': 'np.mean', 'bins': '[xbins, ybins]'}), '(x, y, z, statistic=np.mean, bins=[xbins, ybins]\n )\n', (11732, 11786), False, 'import scipy\n'), ((11864, 11887), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['H'], {}), '(H)\n', (11884, 11887), True, 'import numpy as np\n'), ((11939, 11966), 'numpy.meshgrid', 'np.meshgrid', (['xedges', 'yedges'], {}), '(xedges, yedges)\n', (11950, 11966), True, 'import numpy as np\n'), ((11982, 12009), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 9)'}), '(figsize=(11, 9))\n', (11992, 12009), True, 'import matplotlib.pyplot as plt\n'), ((12023, 12039), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (12034, 12039), True, 'import matplotlib.pyplot as plt\n'), ((12214, 12328), 'scipy.stats.binned_statistic_2d', 'scipy.stats.binned_statistic_2d', (['x', 'y', 'z'], {'statistic': 'self.efficiency_uncertainty_bayesian', 'bins': '[xbins, ybins]'}), '(x, y, z, statistic=self.\n efficiency_uncertainty_bayesian, bins=[xbins, ybins])\n', (12245, 12328), False, 'import scipy\n'), ((12485, 12511), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['Herr'], {}), '(Herr)\n', (12505, 12511), True, 'import numpy as np\n'), ((12899, 12909), 'numpy.mean', 'np.mean', (['z'], {}), '(z)\n', (12906, 12909), True, 'import numpy as np\n'), ((13793, 13809), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (13802, 13809), True, 'import matplotlib.pyplot as plt\n'), ((14169, 14189), 'numpy.sum', 'np.sum', (['success_list'], {}), '(success_list)\n', (14175, 14189), True, 'import numpy as np\n'), ((14488, 14505), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (14495, 14505), True, 'import numpy as np\n'), ((14985, 15005), 'numpy.sum', 'np.sum', (['success_list'], {}), '(success_list)\n', (14991, 15005), True, 'import numpy as np\n'), ((15384, 15401), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (15391, 15401), True, 'import numpy as np\n'), ((28226, 28257), 'os.path.exists', 'os.path.exists', (['args.configFile'], {}), '(args.configFile)\n', (28240, 28257), False, 'import os\n'), ((28346, 28357), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (28354, 28357), False, 'import sys\n'), ((11077, 11106), 'numpy.linspace', 'np.linspace', (['(0.15)', '(0.5)'], {'num': '(8)'}), '(0.15, 0.5, num=8)\n', (11088, 11106), True, 'import numpy as np\n'), ((11248, 11276), 'numpy.linspace', 'np.linspace', (['(20)', '(200)'], {'num': '(10)'}), '(20, 200, num=10)\n', (11259, 11276), True, 'import numpy as np\n'), ((12610, 12684), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\left< \\\\hat{q}/T^3 \\\\right>_{E=100\\\\;\\\\rm{GeV}}$"""'], {'size': '(14)'}), "('$\\\\left< \\\\hat{q}/T^3 \\\\right>_{E=100\\\\;\\\\rm{GeV}}$', size=14)\n", (12620, 12684), True, 'import matplotlib.pyplot as plt\n'), ((12717, 12791), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\left< \\\\hat{q}/T^3 \\\\right>_{T=300\\\\;\\\\rm{MeV}}$"""'], {'size': '(14)'}), "('$\\\\left< \\\\hat{q}/T^3 \\\\right>_{T=300\\\\;\\\\rm{MeV}}$', size=14)\n", (12727, 12791), True, 'import matplotlib.pyplot as plt\n'), ((15972, 16002), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(10, 6)'}), '(1, figsize=(10, 6))\n', (15982, 16002), True, 'import matplotlib.pyplot as plt\n'), ((16028, 16059), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.1, 0.13, 0.6, 0.8]'], {}), '([0.1, 0.13, 0.6, 0.8])\n', (16036, 16059), True, 'import matplotlib.pyplot as plt\n'), ((16118, 16151), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.81, 0.13, 0.15, 0.8]'], {}), '([0.81, 0.13, 0.15, 0.8])\n', (16126, 16151), True, 'import matplotlib.pyplot as plt\n'), ((21866, 21882), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (21875, 21882), True, 'import matplotlib.pyplot as plt\n'), ((22477, 22507), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(10, 6)'}), '(1, figsize=(10, 6))\n', (22487, 22507), True, 'import matplotlib.pyplot as plt\n'), ((22533, 22564), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.1, 0.13, 0.6, 0.8]'], {}), '([0.1, 0.13, 0.6, 0.8])\n', (22541, 22564), True, 'import matplotlib.pyplot as plt\n'), ((22623, 22656), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.81, 0.13, 0.15, 0.8]'], {}), '([0.81, 0.13, 0.15, 0.8])\n', (22631, 22656), True, 'import matplotlib.pyplot as plt\n'), ((27260, 27276), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (27269, 27276), True, 'import matplotlib.pyplot as plt\n'), ((1349, 1372), 'src.init.Init', 'init.Init', (['self.workdir'], {}), '(self.workdir)\n', (1358, 1372), False, 'from src import emulator, mcmc, init\n'), ((2718, 2745), 'os.path.exists', 'os.path.exists', (['result_path'], {}), '(result_path)\n', (2732, 2745), False, 'import os\n'), ((2922, 2936), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2933, 2936), False, 'import pickle\n'), ((10724, 10767), 'statistics.mean', 'statistics.mean', (['qhat_closure_truth_list[j]'], {}), '(qhat_closure_truth_list[j])\n', (10739, 10767), False, 'import statistics\n'), ((18503, 18528), 'numpy.array', 'np.array', (['true_raa_flat_i'], {}), '(true_raa_flat_i)\n', (18511, 18528), True, 'import numpy as np\n'), ((18567, 18601), 'numpy.array', 'np.array', (['emulator_raa_mean_flat_i'], {}), '(emulator_raa_mean_flat_i)\n', (18575, 18601), True, 'import numpy as np\n'), ((18641, 18676), 'numpy.array', 'np.array', (['emulator_raa_stdev_flat_i'], {}), '(emulator_raa_stdev_flat_i)\n', (18649, 18676), True, 'import numpy as np\n'), ((18717, 18782), 'numpy.divide', 'np.divide', (['(true_raa_i - emulator_raa_mean_i)', 'emulator_raa_stdev_i'], {}), '(true_raa_i - emulator_raa_mean_i, emulator_raa_stdev_i)\n', (18726, 18782), True, 'import numpy as np\n'), ((19912, 19955), 'numpy.divide', 'np.divide', (['emulator_raa_stdev_i', 'true_raa_i'], {}), '(emulator_raa_stdev_i, true_raa_i)\n', (19921, 19955), True, 'import numpy as np\n'), ((19985, 20013), 'numpy.mean', 'np.mean', (['stdev_mean_relative'], {}), '(stdev_mean_relative)\n', (19992, 20013), True, 'import numpy as np\n'), ((20325, 20351), 'numpy.linspace', 'np.linspace', (['(-max)', 'max', '(30)'], {}), '(-max, max, 30)\n', (20336, 20351), True, 'import numpy as np\n'), ((24957, 24982), 'numpy.array', 'np.array', (['true_raa_flat_i'], {}), '(true_raa_flat_i)\n', (24965, 24982), True, 'import numpy as np\n'), ((25021, 25055), 'numpy.array', 'np.array', (['emulator_raa_mean_flat_i'], {}), '(emulator_raa_mean_flat_i)\n', (25029, 25055), True, 'import numpy as np\n'), ((25095, 25130), 'numpy.array', 'np.array', (['emulator_raa_stdev_flat_i'], {}), '(emulator_raa_stdev_flat_i)\n', (25103, 25130), True, 'import numpy as np\n'), ((25171, 25236), 'numpy.divide', 'np.divide', (['(true_raa_i - emulator_raa_mean_i)', 'emulator_raa_stdev_i'], {}), '(true_raa_i - emulator_raa_mean_i, emulator_raa_stdev_i)\n', (25180, 25236), True, 'import numpy as np\n'), ((25978, 26004), 'numpy.linspace', 'np.linspace', (['(-max)', 'max', '(30)'], {}), '(-max, max, 30)\n', (25989, 26004), True, 'import numpy as np\n'), ((2408, 2440), 'os.walk', 'os.walk', (['self.output_dir_holdout'], {}), '(self.output_dir_holdout)\n', (2415, 2440), False, 'import os\n'), ((9061, 9075), 'numpy.isnan', 'np.isnan', (['zval'], {}), '(zval)\n', (9069, 9075), True, 'import numpy as np\n'), ((9079, 9093), 'numpy.isnan', 'np.isnan', (['zerr'], {}), '(zerr)\n', (9087, 9093), True, 'import numpy as np\n'), ((13346, 13360), 'numpy.isnan', 'np.isnan', (['zval'], {}), '(zval)\n', (13354, 13360), True, 'import numpy as np\n'), ((13364, 13378), 'numpy.isnan', 'np.isnan', (['zerr'], {}), '(zerr)\n', (13372, 13378), True, 'import numpy as np\n'), ((21182, 21211), 'numpy.std', 'np.std', (['normalized_residual_i'], {}), '(normalized_residual_i)\n', (21188, 21211), True, 'import numpy as np\n'), ((26565, 26594), 'numpy.std', 'np.std', (['normalized_residual_i'], {}), '(normalized_residual_i)\n', (26571, 26594), True, 'import numpy as np\n'), ((21619, 21646), 'numpy.abs', 'np.abs', (['normalized_residual'], {}), '(normalized_residual)\n', (21625, 21646), True, 'import numpy as np\n'), ((27002, 27029), 'numpy.abs', 'np.abs', (['normalized_residual'], {}), '(normalized_residual)\n', (27008, 27029), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import unittest
import os
import numpy as np
from scipy.io import netcdf
from booz_xform import Booz_xform
TEST_DIR = os.path.join(os.path.dirname(__file__), 'test_files')
class RegressionTest(unittest.TestCase):
def test_regression(self):
configurations = ['circular_tokamak',
'up_down_asymmetric_tokamak',
'li383_1.4m',
'LandremanSenguptaPlunk_section5p3']
for configuration in configurations:
wout_filename = 'wout_' + configuration + '.nc'
boozmn_filename = 'boozmn_' + configuration + '.nc'
boozmn_new_filename = 'boozmn_new_' + configuration + '.nc'
f = netcdf.netcdf_file(os.path.join(TEST_DIR, boozmn_filename),
'r', mmap=False)
b = Booz_xform()
b.read_wout(os.path.join(TEST_DIR, wout_filename))
# Transfer parameters from the reference file to the new
# calculation
b.mboz = f.variables['mboz_b'][()]
b.nboz = f.variables['nboz_b'][()]
b.compute_surfs = f.variables['jlist'][()] - 2
b.run()
# Compare 2D arrays
vars = ['bmnc_b', 'rmnc_b', 'zmns_b', 'numns_b', 'gmnc_b']
asym = bool(f.variables['lasym__logical__'][()])
if asym:
vars += ['bmns_b', 'rmns_b', 'zmnc_b', 'numnc_b', 'gmns_b']
rtol = 1e-12
atol = 1e-12
for var in vars:
# gmnc_b is misspelled in the fortran version
var_ref = var
if var == 'gmnc_b':
var_ref = 'gmn_b'
# Handle the issue that we now use the variable nu,
# whereas the boozmn format uses the variable
# p = -nu.
sign = 1
if var[:2] == 'nu':
sign = -1
var_ref = 'p' + var[2:]
# Reference values:
arr1 = f.variables[var_ref][()]
# Newly computed values:
arr2 = getattr(b, var).transpose()
print('abs diff in ' + var + ':', np.max(np.abs(arr1 - sign * arr2)))
np.testing.assert_allclose(arr1, sign * arr2,
rtol=rtol, atol=atol)
# Now compare some values written to the boozmn files.
b.write_boozmn(boozmn_new_filename)
f2 = netcdf.netcdf_file(boozmn_new_filename)
vars = f.variables.keys()
# These variables will not match:
exclude = ['rmax_b', 'rmin_b', 'betaxis_b', 'version', 'pres_b', 'beta_b', 'phip_b']
for var in vars:
if var in exclude:
continue
# Reference values:
arr1 = f.variables[var][()]
# Newly computed values:
arr2 = f2.variables[var][()]
print('abs diff in ' + var + ':', np.max(np.abs(arr1 - arr2)))
np.testing.assert_allclose(arr1, arr2,
rtol=rtol, atol=atol)
f.close()
if __name__ == '__main__':
unittest.main()
| [
"numpy.abs",
"numpy.testing.assert_allclose",
"booz_xform.Booz_xform",
"os.path.join",
"os.path.dirname",
"unittest.main",
"scipy.io.netcdf.netcdf_file"
] | [((156, 181), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (171, 181), False, 'import os\n'), ((3287, 3302), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3300, 3302), False, 'import unittest\n'), ((870, 882), 'booz_xform.Booz_xform', 'Booz_xform', ([], {}), '()\n', (880, 882), False, 'from booz_xform import Booz_xform\n'), ((2540, 2579), 'scipy.io.netcdf.netcdf_file', 'netcdf.netcdf_file', (['boozmn_new_filename'], {}), '(boozmn_new_filename)\n', (2558, 2579), False, 'from scipy.io import netcdf\n'), ((761, 800), 'os.path.join', 'os.path.join', (['TEST_DIR', 'boozmn_filename'], {}), '(TEST_DIR, boozmn_filename)\n', (773, 800), False, 'import os\n'), ((907, 944), 'os.path.join', 'os.path.join', (['TEST_DIR', 'wout_filename'], {}), '(TEST_DIR, wout_filename)\n', (919, 944), False, 'import os\n'), ((2296, 2363), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['arr1', '(sign * arr2)'], {'rtol': 'rtol', 'atol': 'atol'}), '(arr1, sign * arr2, rtol=rtol, atol=atol)\n', (2322, 2363), True, 'import numpy as np\n'), ((3116, 3176), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['arr1', 'arr2'], {'rtol': 'rtol', 'atol': 'atol'}), '(arr1, arr2, rtol=rtol, atol=atol)\n', (3142, 3176), True, 'import numpy as np\n'), ((2251, 2277), 'numpy.abs', 'np.abs', (['(arr1 - sign * arr2)'], {}), '(arr1 - sign * arr2)\n', (2257, 2277), True, 'import numpy as np\n'), ((3078, 3097), 'numpy.abs', 'np.abs', (['(arr1 - arr2)'], {}), '(arr1 - arr2)\n', (3084, 3097), True, 'import numpy as np\n')] |
"""Configuration variables for img_pipe."""
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
VOXEL_SIZES = np.array([256, 256, 256])
IMG_RANGES = [[0, VOXEL_SIZES[1], 0, VOXEL_SIZES[2]],
[0, VOXEL_SIZES[0], 0, VOXEL_SIZES[2]],
[0, VOXEL_SIZES[0], 0, VOXEL_SIZES[1]]]
IMG_LABELS = [['Inferior', 'Posterior'],
['Inferior', 'Left'],
['Posterior', 'Left']]
ELEC_PLOT_SIZE = np.array([1024, 1024])
ZOOM_STEP_SIZE = 5
CT_MIN_VAL = 1000
MAX_N_GROUPS = 25
UNIQUE_COLORS = [(0.1, 0.42, 0.43), (0.9, 0.34, 0.62), (0.47, 0.51, 0.3),
(0.47, 0.55, 0.99), (0.79, 0.68, 0.06), (0.34, 0.74, 0.05),
(0.58, 0.87, 0.13), (0.86, 0.98, 0.4), (0.92, 0.91, 0.66),
(0.77, 0.38, 0.34), (0.9, 0.37, 0.1), (0.2, 0.62, 0.9),
(0.22, 0.65, 0.64), (0.14, 0.94, 0.8), (0.34, 0.31, 0.68),
(0.59, 0.28, 0.74), (0.46, 0.19, 0.94), (0.37, 0.93, 0.7),
(0.56, 0.86, 0.55), (0.67, 0.69, 0.44)]
N_COLORS = len(UNIQUE_COLORS)
ELECTRODE_CMAP = LinearSegmentedColormap.from_list(
'elec_colors', UNIQUE_COLORS, N=N_COLORS)
ATLAS_DICT = {'desikan-killiany': 'aparc',
'DKT': 'aparc.DKTatlas',
'destrieux': 'aparc.a2009s'}
TEMPLATES = ['V1_average', 'cvs_avg35', 'cvs_avg35_inMNI152', 'fsaverage',
'fsaverage3', 'fsaverage4', 'fsaverage5', 'fsaverage6',
'fsaverage_sym']
HEMI_DICT = {'Left': 'lh', 'Right': 'rh'}
CORTICAL_SURFACES = {f'{hemi}-{roi}': f'{HEMI_DICT[hemi]}.{roi.lower()}'
for hemi in ('Left', 'Right')
for roi in ('Pial', 'Inflated', 'White')}
SUBCORTICAL_INDICES = [4, 5, 10, 11, 12, 13, 14, 15, 16, 17, 18, 26,
28, 43, 44, 49, 50, 51, 52, 53, 54, 58, 60]
| [
"numpy.array",
"matplotlib.colors.LinearSegmentedColormap.from_list"
] | [((189, 214), 'numpy.array', 'np.array', (['[256, 256, 256]'], {}), '([256, 256, 256])\n', (197, 214), True, 'import numpy as np\n'), ((508, 530), 'numpy.array', 'np.array', (['[1024, 1024]'], {}), '([1024, 1024])\n', (516, 530), True, 'import numpy as np\n'), ((1143, 1218), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""elec_colors"""', 'UNIQUE_COLORS'], {'N': 'N_COLORS'}), "('elec_colors', UNIQUE_COLORS, N=N_COLORS)\n", (1176, 1218), False, 'from matplotlib.colors import LinearSegmentedColormap\n')] |
import argparse
import sys
import numpy as np
import itertools
# visualization libraries
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('classic')
import numpy as np
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Data Collector for neuron outputs')
parser.add_argument('neuron_output_data_filepath', type=str, help='where is the neuron output data?')
parser.add_argument('neuron1', type=int, help='1st neuron to analyze')
parser.add_argument('neuron2', type=int, help='2nd neuron to analyze')
args = parser.parse_args()
neuronOutput = np.load(args.neuron_output_data_filepath)
neuronOutput = np.transpose(neuronOutput) # the neuron output needs to be transposed for covariance calculation
#dual variable distribution
data = np.column_stack((neuronOutput[args.neuron1],neuronOutput[args.neuron2]))
data = pd.DataFrame(data, columns=['neuron ' + str(args.neuron1) + ' stat distribution' , 'neuron ' + str(args.neuron2) + ' stat distribution'])
with sns.axes_style('white'):
sns.jointplot('neuron ' + str(args.neuron1) + ' stat distribution' , 'neuron ' + str(args.neuron2) + ' stat distribution', data, kind='kde');
plt.savefig(fname = 'Distribution of ' + str(args.neuron1) + ' and ' + str(args.neuron2) + ' Neurons output')
plt.clf()
#single variable distribution
sns.distplot(neuronOutput[args.neuron1], hist=True, kde=True,
bins=int(40), color = 'darkblue',
hist_kws={'edgecolor':'black'},
kde_kws={'linewidth': 4})
plt.title('Histogram of ' + str(args.neuron1) + ' Neurons output')
plt.xlabel('output')
plt.ylabel('occurences')
plt.savefig('Histogram of ' + str(args.neuron1) + ' Neurons output')
plt.clf()
sns.distplot(neuronOutput[args.neuron2], hist=True, kde=True,
bins=int(40), color = 'darkblue',
hist_kws={'edgecolor':'black'},
kde_kws={'linewidth': 4})
plt.title('Histogram of ' + str(args.neuron2) + ' Neurons output')
plt.xlabel('output')
plt.ylabel('occurences')
plt.savefig('Histogram of ' + str(args.neuron2) + ' Neurons output')
| [
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"numpy.column_stack",
"matplotlib.pyplot.style.use",
"seaborn.axes_style",
"numpy.transpose",
"numpy.load"
] | [((144, 168), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""classic"""'], {}), "('classic')\n", (157, 168), True, 'import matplotlib.pyplot as plt\n'), ((252, 324), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Data Collector for neuron outputs"""'}), "(description='Data Collector for neuron outputs')\n", (275, 324), False, 'import argparse\n'), ((632, 673), 'numpy.load', 'np.load', (['args.neuron_output_data_filepath'], {}), '(args.neuron_output_data_filepath)\n', (639, 673), True, 'import numpy as np\n'), ((693, 719), 'numpy.transpose', 'np.transpose', (['neuronOutput'], {}), '(neuronOutput)\n', (705, 719), True, 'import numpy as np\n'), ((835, 908), 'numpy.column_stack', 'np.column_stack', (['(neuronOutput[args.neuron1], neuronOutput[args.neuron2])'], {}), '((neuronOutput[args.neuron1], neuronOutput[args.neuron2]))\n', (850, 908), True, 'import numpy as np\n'), ((1361, 1370), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1368, 1370), True, 'import matplotlib.pyplot as plt\n'), ((1667, 1687), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""output"""'], {}), "('output')\n", (1677, 1687), True, 'import matplotlib.pyplot as plt\n'), ((1692, 1716), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""occurences"""'], {}), "('occurences')\n", (1702, 1716), True, 'import matplotlib.pyplot as plt\n'), ((1794, 1803), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1801, 1803), True, 'import matplotlib.pyplot as plt\n'), ((2066, 2086), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""output"""'], {}), "('output')\n", (2076, 2086), True, 'import matplotlib.pyplot as plt\n'), ((2091, 2115), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""occurences"""'], {}), "('occurences')\n", (2101, 2115), True, 'import matplotlib.pyplot as plt\n'), ((1067, 1090), 'seaborn.axes_style', 'sns.axes_style', (['"""white"""'], {}), "('white')\n", (1081, 1090), True, 'import seaborn as sns\n')] |
from typing import Dict, List, Optional
import numpy as np
from numpy import ndarray
from emmental.utils.utils import prob_to_pred
def precision_scorer(
golds: ndarray,
probs: Optional[ndarray],
preds: ndarray,
uids: Optional[List[str]] = None,
pos_label: int = 1,
) -> Dict[str, float]:
"""Precision.
Args:
golds(ndarray): Ground truth values.
probs(ndarray or None): Predicted probabilities.
preds(ndarray): Predicted values.
uids(list, optional): Unique ids, defaults to None.
pos_label(int, optional): The positive class label, defaults to 1.
Returns:
dict: Precision.
"""
if len(golds.shape) > 1:
golds = prob_to_pred(golds)
pred_pos = np.where(preds == pos_label, True, False)
gt_pos = np.where(golds == pos_label, True, False)
TP = np.sum(pred_pos * gt_pos)
FP = np.sum(pred_pos * np.logical_not(gt_pos))
precision = TP / (TP + FP) if TP + FP > 0 else 0.0
return {"precision": precision}
| [
"numpy.where",
"numpy.sum",
"numpy.logical_not",
"emmental.utils.utils.prob_to_pred"
] | [((736, 777), 'numpy.where', 'np.where', (['(preds == pos_label)', '(True)', '(False)'], {}), '(preds == pos_label, True, False)\n', (744, 777), True, 'import numpy as np\n'), ((791, 832), 'numpy.where', 'np.where', (['(golds == pos_label)', '(True)', '(False)'], {}), '(golds == pos_label, True, False)\n', (799, 832), True, 'import numpy as np\n'), ((842, 867), 'numpy.sum', 'np.sum', (['(pred_pos * gt_pos)'], {}), '(pred_pos * gt_pos)\n', (848, 867), True, 'import numpy as np\n'), ((701, 720), 'emmental.utils.utils.prob_to_pred', 'prob_to_pred', (['golds'], {}), '(golds)\n', (713, 720), False, 'from emmental.utils.utils import prob_to_pred\n'), ((895, 917), 'numpy.logical_not', 'np.logical_not', (['gt_pos'], {}), '(gt_pos)\n', (909, 917), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('ggplot')
size = 100
x_vec = np.linspace(0, 1, size + 1)[:-1]
y_vecs = []
lines = []
def live_plotter(x_vec, y1_data, line1, title='', pause_time=1e-2):
if line1 == []:
plt.ion()
fig = plt.figure(figsize=(15, 3))
ax = fig.add_subplot(111)
line1, = ax.plot(x_vec, y1_data,'-o',alpha=0.8)
plt.ylabel('Y Label')
plt.title(title)
plt.show()
line1.set_ydata(y1_data)
plt.ylim([-5, 105])
plt.pause(pause_time)
return line1
def live_plot(*args):
global x_vec, y_vecs, lines, size
while len(y_vecs) < len(args):
y_vecs += [np.zeros(size)]
lines += [[]]
for i, v in enumerate(args):
y_vecs[i] = np.append(y_vecs[i][1:], v)
try:
lines[i] = live_plotter(x_vec, y_vecs[i], lines[i])
except:
pass | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.style.use",
"numpy.append",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.ion"
] | [((52, 75), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (65, 75), True, 'import matplotlib.pyplot as plt\n'), ((96, 123), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(size + 1)'], {}), '(0, 1, size + 1)\n', (107, 123), True, 'import numpy as np\n'), ((506, 525), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-5, 105]'], {}), '([-5, 105])\n', (514, 525), True, 'import matplotlib.pyplot as plt\n'), ((530, 551), 'matplotlib.pyplot.pause', 'plt.pause', (['pause_time'], {}), '(pause_time)\n', (539, 551), True, 'import matplotlib.pyplot as plt\n'), ((249, 258), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (256, 258), True, 'import matplotlib.pyplot as plt\n'), ((273, 300), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 3)'}), '(figsize=(15, 3))\n', (283, 300), True, 'import matplotlib.pyplot as plt\n'), ((407, 428), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y Label"""'], {}), "('Y Label')\n", (417, 428), True, 'import matplotlib.pyplot as plt\n'), ((437, 453), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (446, 453), True, 'import matplotlib.pyplot as plt\n'), ((462, 472), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (470, 472), True, 'import matplotlib.pyplot as plt\n'), ((775, 802), 'numpy.append', 'np.append', (['y_vecs[i][1:]', 'v'], {}), '(y_vecs[i][1:], v)\n', (784, 802), True, 'import numpy as np\n'), ((684, 698), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (692, 698), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import logging
import logging.config
import time
from config import get_logging_config, args, evaluation_logfile, train_dir
from config import config as net_config
from paths import CKPT_ROOT
from utils import decode_bboxes, batch_iou
from skimage.transform import resize as imresize
from resnet import ResNet
from boxer import PriorBoxGrid
from voc_loader import VOCLoader
logging.config.dictConfig(get_logging_config(args.run_name))
log = logging.getLogger()
def main(argv=None): # pylint: disable=unused-argument
net = ResNet
depth = 50
loader = VOCLoader('07', 'test')
net = net(config=net_config, depth=depth, training=False)
num_classes = 21
batch_size = args.batch_size
img_size = args.image_size
image_ph = tf.placeholder(shape=[1, img_size, img_size, 3],
dtype=tf.float32, name='img_ph')
net.create_trunk(image_ph)
bboxer = PriorBoxGrid(net_config)
net.create_multibox_head(num_classes)
confidence = tf.nn.softmax(tf.squeeze(net.outputs['confidence']))
location = tf.squeeze(net.outputs['location'])
good_bboxes = decode_bboxes(location, bboxer.tiling)
detection_list = []
score_list = []
for i in range(1, num_classes):
class_mask = tf.greater(confidence[:, i], args.conf_thresh)
class_scores = tf.boolean_mask(confidence[:, i], class_mask)
class_bboxes = tf.boolean_mask(good_bboxes, class_mask)
K = tf.minimum(tf.size(class_scores), args.top_k_nms)
_, top_k_inds = tf.nn.top_k(class_scores, K)
top_class_scores = tf.gather(class_scores, top_k_inds)
top_class_bboxes = tf.gather(class_bboxes, top_k_inds)
final_inds = tf.image.non_max_suppression(top_class_bboxes,
top_class_scores,
max_output_size=50,
iou_threshold=args.nms_thresh)
final_class_bboxes = tf.gather(top_class_bboxes, final_inds)
final_scores = tf.gather(top_class_scores, final_inds)
detection_list.append(final_class_bboxes)
score_list.append(final_scores)
net.create_segmentation_head(num_classes)
segmentation = tf.cast(tf.argmax(tf.squeeze(net.outputs['segmentation']),
axis=-1), tf.int32)
times = []
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)) as sess:
sess.run(tf.global_variables_initializer())
ckpt_path = train_dir + '/model.ckpt-%i000' % args.ckpt
log.debug("Restoring checkpoint %s" % ckpt_path)
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, ckpt_path)
for i in range(200):
im = loader.load_image(loader.get_filenames()[i])
im = imresize(im, (img_size, img_size))
im = im.reshape((1, img_size, img_size, 3))
st = time.time()
sess.run([detection_list, score_list, segmentation], feed_dict={image_ph: im})
et = time.time()
if i > 10:
times.append(et-st)
m = np.mean(times)
s = np.std(times)
fps = 1/m
log.info("Mean={0:.2f}ms; Std={1:.2f}ms; FPS={2:.1f}".format(m*1000, s*1000, fps))
if __name__ == '__main__':
tf.app.run()
| [
"logging.getLogger",
"tensorflow.boolean_mask",
"voc_loader.VOCLoader",
"tensorflow.app.run",
"numpy.mean",
"tensorflow.placeholder",
"tensorflow.greater",
"tensorflow.image.non_max_suppression",
"tensorflow.ConfigProto",
"tensorflow.size",
"utils.decode_bboxes",
"tensorflow.global_variables",... | [((488, 507), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (505, 507), False, 'import logging\n'), ((447, 480), 'config.get_logging_config', 'get_logging_config', (['args.run_name'], {}), '(args.run_name)\n', (465, 480), False, 'from config import get_logging_config, args, evaluation_logfile, train_dir\n'), ((612, 635), 'voc_loader.VOCLoader', 'VOCLoader', (['"""07"""', '"""test"""'], {}), "('07', 'test')\n", (621, 635), False, 'from voc_loader import VOCLoader\n'), ((800, 886), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, img_size, img_size, 3]', 'dtype': 'tf.float32', 'name': '"""img_ph"""'}), "(shape=[1, img_size, img_size, 3], dtype=tf.float32, name=\n 'img_ph')\n", (814, 886), True, 'import tensorflow as tf\n'), ((956, 980), 'boxer.PriorBoxGrid', 'PriorBoxGrid', (['net_config'], {}), '(net_config)\n', (968, 980), False, 'from boxer import PriorBoxGrid\n'), ((1109, 1144), 'tensorflow.squeeze', 'tf.squeeze', (["net.outputs['location']"], {}), "(net.outputs['location'])\n", (1119, 1144), True, 'import tensorflow as tf\n'), ((1164, 1202), 'utils.decode_bboxes', 'decode_bboxes', (['location', 'bboxer.tiling'], {}), '(location, bboxer.tiling)\n', (1177, 1202), False, 'from utils import decode_bboxes, batch_iou\n'), ((3282, 3296), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (3289, 3296), True, 'import numpy as np\n'), ((3305, 3318), 'numpy.std', 'np.std', (['times'], {}), '(times)\n', (3311, 3318), True, 'import numpy as np\n'), ((3454, 3466), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (3464, 3466), True, 'import tensorflow as tf\n'), ((1055, 1092), 'tensorflow.squeeze', 'tf.squeeze', (["net.outputs['confidence']"], {}), "(net.outputs['confidence'])\n", (1065, 1092), True, 'import tensorflow as tf\n'), ((1304, 1350), 'tensorflow.greater', 'tf.greater', (['confidence[:, i]', 'args.conf_thresh'], {}), '(confidence[:, i], args.conf_thresh)\n', (1314, 1350), True, 'import tensorflow as tf\n'), ((1375, 1420), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['confidence[:, i]', 'class_mask'], {}), '(confidence[:, i], class_mask)\n', (1390, 1420), True, 'import tensorflow as tf\n'), ((1444, 1484), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['good_bboxes', 'class_mask'], {}), '(good_bboxes, class_mask)\n', (1459, 1484), True, 'import tensorflow as tf\n'), ((1572, 1600), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['class_scores', 'K'], {}), '(class_scores, K)\n', (1583, 1600), True, 'import tensorflow as tf\n'), ((1628, 1663), 'tensorflow.gather', 'tf.gather', (['class_scores', 'top_k_inds'], {}), '(class_scores, top_k_inds)\n', (1637, 1663), True, 'import tensorflow as tf\n'), ((1691, 1726), 'tensorflow.gather', 'tf.gather', (['class_bboxes', 'top_k_inds'], {}), '(class_bboxes, top_k_inds)\n', (1700, 1726), True, 'import tensorflow as tf\n'), ((1749, 1868), 'tensorflow.image.non_max_suppression', 'tf.image.non_max_suppression', (['top_class_bboxes', 'top_class_scores'], {'max_output_size': '(50)', 'iou_threshold': 'args.nms_thresh'}), '(top_class_bboxes, top_class_scores,\n max_output_size=50, iou_threshold=args.nms_thresh)\n', (1777, 1868), True, 'import tensorflow as tf\n'), ((2050, 2089), 'tensorflow.gather', 'tf.gather', (['top_class_bboxes', 'final_inds'], {}), '(top_class_bboxes, final_inds)\n', (2059, 2089), True, 'import tensorflow as tf\n'), ((2113, 2152), 'tensorflow.gather', 'tf.gather', (['top_class_scores', 'final_inds'], {}), '(top_class_scores, final_inds)\n', (2122, 2152), True, 'import tensorflow as tf\n'), ((1509, 1530), 'tensorflow.size', 'tf.size', (['class_scores'], {}), '(class_scores)\n', (1516, 1530), True, 'import tensorflow as tf\n'), ((2328, 2367), 'tensorflow.squeeze', 'tf.squeeze', (["net.outputs['segmentation']"], {}), "(net.outputs['segmentation'])\n", (2338, 2367), True, 'import tensorflow as tf\n'), ((2617, 2650), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2648, 2650), True, 'import tensorflow as tf\n'), ((2805, 2826), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2824, 2826), True, 'import tensorflow as tf\n'), ((2975, 3009), 'skimage.transform.resize', 'imresize', (['im', '(img_size, img_size)'], {}), '(im, (img_size, img_size))\n', (2983, 3009), True, 'from skimage.transform import resize as imresize\n'), ((3083, 3094), 'time.time', 'time.time', ([], {}), '()\n', (3092, 3094), False, 'import time\n'), ((3203, 3214), 'time.time', 'time.time', ([], {}), '()\n', (3212, 3214), False, 'import time\n'), ((2476, 2545), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(allow_soft_placement=True, log_device_placement=False)\n', (2490, 2545), True, 'import tensorflow as tf\n')] |
from __future__ import division, print_function, absolute_import
from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose
def test_hypergeom_logpmf():
# symmetries test
# f(k,N,K,n) = f(n-k,N,N-K,n) = f(K-k,N,K,N-n) = f(k,N,n,K)
k = 5
N = 50
K = 10
n = 5
logpmf1 = hypergeom.logpmf(k, N, K, n)
logpmf2 = hypergeom.logpmf(n - k, N, N - K, n)
logpmf3 = hypergeom.logpmf(K - k, N, K, N - n)
logpmf4 = hypergeom.logpmf(k, N, n, K)
assert_almost_equal(logpmf1, logpmf2, decimal=12)
assert_almost_equal(logpmf1, logpmf3, decimal=12)
assert_almost_equal(logpmf1, logpmf4, decimal=12)
# test related distribution
# Bernoulli distribution if n = 1
k = 1
N = 10
K = 7
n = 1
hypergeom_logpmf = hypergeom.logpmf(k, N, K, n)
bernoulli_logpmf = bernoulli.logpmf(k, K/N)
assert_almost_equal(hypergeom_logpmf, bernoulli_logpmf, decimal=12)
def test_boltzmann_upper_bound():
k = np.arange(-3, 5)
N = 1
p = boltzmann.pmf(k, 0.123, N)
expected = k == 0
assert_equal(p, expected)
lam = np.log(2)
N = 3
p = boltzmann.pmf(k, lam, N)
expected = [0, 0, 0, 4/7, 2/7, 1/7, 0, 0]
assert_allclose(p, expected, rtol=1e-13)
c = boltzmann.cdf(k, lam, N)
expected = [0, 0, 0, 4/7, 6/7, 1, 1, 1]
assert_allclose(c, expected, rtol=1e-13)
def test_betabinom_a_and_b_unity():
# test limiting case that betabinom(n, 1, 1) is a discrete uniform
# distribution from 0 to n
n = 20
k = np.arange(n + 1)
p = betabinom(n, 1, 1).pmf(k)
expected = np.repeat(1 / (n + 1), n + 1)
assert_almost_equal(p, expected)
def test_betabinom_bernoulli():
# test limiting case that betabinom(1, a, b) = bernoulli(a / (a + b))
a = 2.3
b = 0.63
k = np.arange(2)
p = betabinom(1, a, b).pmf(k)
expected = bernoulli(a / (a + b)).pmf(k)
assert_almost_equal(p, expected)
| [
"scipy.stats.boltzmann.cdf",
"numpy.testing.assert_equal",
"numpy.repeat",
"scipy.stats.betabinom",
"numpy.testing.assert_allclose",
"numpy.log",
"numpy.testing.assert_almost_equal",
"scipy.stats.bernoulli.logpmf",
"scipy.stats.hypergeom.logpmf",
"scipy.stats.boltzmann.pmf",
"numpy.arange",
"s... | [((402, 430), 'scipy.stats.hypergeom.logpmf', 'hypergeom.logpmf', (['k', 'N', 'K', 'n'], {}), '(k, N, K, n)\n', (418, 430), False, 'from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann\n'), ((445, 481), 'scipy.stats.hypergeom.logpmf', 'hypergeom.logpmf', (['(n - k)', 'N', '(N - K)', 'n'], {}), '(n - k, N, N - K, n)\n', (461, 481), False, 'from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann\n'), ((496, 532), 'scipy.stats.hypergeom.logpmf', 'hypergeom.logpmf', (['(K - k)', 'N', 'K', '(N - n)'], {}), '(K - k, N, K, N - n)\n', (512, 532), False, 'from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann\n'), ((547, 575), 'scipy.stats.hypergeom.logpmf', 'hypergeom.logpmf', (['k', 'N', 'n', 'K'], {}), '(k, N, n, K)\n', (563, 575), False, 'from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann\n'), ((580, 629), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['logpmf1', 'logpmf2'], {'decimal': '(12)'}), '(logpmf1, logpmf2, decimal=12)\n', (599, 629), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose\n'), ((634, 683), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['logpmf1', 'logpmf3'], {'decimal': '(12)'}), '(logpmf1, logpmf3, decimal=12)\n', (653, 683), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose\n'), ((688, 737), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['logpmf1', 'logpmf4'], {'decimal': '(12)'}), '(logpmf1, logpmf4, decimal=12)\n', (707, 737), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose\n'), ((873, 901), 'scipy.stats.hypergeom.logpmf', 'hypergeom.logpmf', (['k', 'N', 'K', 'n'], {}), '(k, N, K, n)\n', (889, 901), False, 'from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann\n'), ((925, 951), 'scipy.stats.bernoulli.logpmf', 'bernoulli.logpmf', (['k', '(K / N)'], {}), '(k, K / N)\n', (941, 951), False, 'from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann\n'), ((954, 1021), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['hypergeom_logpmf', 'bernoulli_logpmf'], {'decimal': '(12)'}), '(hypergeom_logpmf, bernoulli_logpmf, decimal=12)\n', (973, 1021), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose\n'), ((1066, 1082), 'numpy.arange', 'np.arange', (['(-3)', '(5)'], {}), '(-3, 5)\n', (1075, 1082), True, 'import numpy as np\n'), ((1102, 1128), 'scipy.stats.boltzmann.pmf', 'boltzmann.pmf', (['k', '(0.123)', 'N'], {}), '(k, 0.123, N)\n', (1115, 1128), False, 'from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann\n'), ((1155, 1180), 'numpy.testing.assert_equal', 'assert_equal', (['p', 'expected'], {}), '(p, expected)\n', (1167, 1180), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose\n'), ((1192, 1201), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1198, 1201), True, 'import numpy as np\n'), ((1220, 1244), 'scipy.stats.boltzmann.pmf', 'boltzmann.pmf', (['k', 'lam', 'N'], {}), '(k, lam, N)\n', (1233, 1244), False, 'from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann\n'), ((1295, 1335), 'numpy.testing.assert_allclose', 'assert_allclose', (['p', 'expected'], {'rtol': '(1e-13)'}), '(p, expected, rtol=1e-13)\n', (1310, 1335), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose\n'), ((1345, 1369), 'scipy.stats.boltzmann.cdf', 'boltzmann.cdf', (['k', 'lam', 'N'], {}), '(k, lam, N)\n', (1358, 1369), False, 'from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann\n'), ((1418, 1458), 'numpy.testing.assert_allclose', 'assert_allclose', (['c', 'expected'], {'rtol': '(1e-13)'}), '(c, expected, rtol=1e-13)\n', (1433, 1458), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose\n'), ((1618, 1634), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (1627, 1634), True, 'import numpy as np\n'), ((1684, 1713), 'numpy.repeat', 'np.repeat', (['(1 / (n + 1))', '(n + 1)'], {}), '(1 / (n + 1), n + 1)\n', (1693, 1713), True, 'import numpy as np\n'), ((1718, 1750), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['p', 'expected'], {}), '(p, expected)\n', (1737, 1750), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose\n'), ((1892, 1904), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (1901, 1904), True, 'import numpy as np\n'), ((1988, 2020), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['p', 'expected'], {}), '(p, expected)\n', (2007, 2020), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose\n'), ((1643, 1661), 'scipy.stats.betabinom', 'betabinom', (['n', '(1)', '(1)'], {}), '(n, 1, 1)\n', (1652, 1661), False, 'from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann\n'), ((1913, 1931), 'scipy.stats.betabinom', 'betabinom', (['(1)', 'a', 'b'], {}), '(1, a, b)\n', (1922, 1931), False, 'from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann\n'), ((1954, 1976), 'scipy.stats.bernoulli', 'bernoulli', (['(a / (a + b))'], {}), '(a / (a + b))\n', (1963, 1976), False, 'from scipy.stats import betabinom, hypergeom, bernoulli, boltzmann\n')] |
################################################################################
# -- Default set of parameters
################################################################################
import numpy as np; import pylab as pl; import time, sys, os
import matplotlib
## the number of cores to be used for simulations
n_cores = 4
# define the NEST path if it's needed
#nest_path = '/Users/sadra/NEST/nest/ins/lib/python3.4/site-packages/'
#nest_path = '/Users/sadra/NEST/lib/python3.5/site-packages/'
#if os.path.exists(nest_path):
# sys.path.append(nest_path)
#------------- neuron params
# resting potential (mV)
Ur = -70.e-3
# reversal potential of exc. (mV)
Ue = 0.e-3
# reversal potential of inh. (mV)
Ui = -75.e-3
# threshold voltage (mV)
Uth = -50.e-3
# reset potential (mV)
Ureset = -60.e-3
# membrane capacitance (F)
C = 120e-12
# leak conductance (S)
Gl = 1./140e6
# sample Exc and Inh conductances (nS)
Be, Bi = .1, -.2
# range of Exc and Inh conductances (nS)
Be_rng = np.array([0.01, .05, .1, .15, .2, .25])
Bi_rng = np.array([-.1, -.2, -.3, -.4, -.5])
# background and stimulus conductances (nS)
Be_bkg = .1
Be_stim = .1
# exc. synaptic time constant (s)
tau_e = 1.e-3
# inh. synaptic time constant (s)
tau_i = 1.e-3
# refractory time (s)
t_ref = 2e-3
# conductance-based alpha-synapses neuron model
neuron_params_default = \
{'C_m': C*1e12,
'E_L': Ur*1000.,
'E_ex': Ue*1000.,
'E_in': Ui*1000.,
'I_e': 0.0, # 130.0 for slow spiking...
'V_m': Ur*1000.,
'V_reset': Ureset*1000.,
'V_th': Uth*1000.,
'g_L': Gl*1e9,
't_ref': t_ref*1000.,
'tau_syn_ex': tau_e*1000.,
'tau_syn_in': tau_i*1000.}
# -- simulation params
#default synaptic delay (ms)
delay_default = .1
# time resolution of simulations (ms)
dt = .1
# background rate (sp/s)
r_bkg = 10000.-400.
# rate of perturbation (sp/s)
r_stim = -400.
# transitent time to discard the data (ms)
Ttrans = 500.
# simulation time before perturbation (ms)
Tblank= 500.
# simulation time of perturbation (ms)
Tstim = 500.
# time after perturbation
Tpost = 500
# number of trials
Ntrials = 5
# -- network params
# N: total population size (Exc + Inh)
# frac: fraction of Inh neurons
def set_total_population_size(N_total, frac = .2):
global N
global NE
global NI
N = N_total
# size of Inh population
NI = int(frac*N)
# size of Exc population
NE = N - NI
print("Setting %s cells total, %s E cells, %s I cells"%(N,NE,NI))
if not 'N' in locals():
set_total_population_size(2000)
# range of the size of Inh perturbations
nn_stim_rng = (np.array([0.1, .25, .5, .75, 1])*NI).astype('int')
# single cell type
cell_type = 'aeif_cond_alpha'
# -- default settings for plotting figures
# (comment out for conventional Python format)
matplotlib.rc('font', serif='sans-serif')
SIZE = 15
try:
pl.rc('font', size=SIZE) # controls default text sizes
pl.rc('axes', titlesize=SIZE) # fontsize of the axes title
pl.rc('axes', labelsize=SIZE) # fontsize of the x and y labels
pl.rc('xtick', labelsize=SIZE) # fontsize of the tick labels
pl.rc('ytick', labelsize=SIZE) # fontsize of the tick labels
pl.rc('legend', fontsize=SIZE) # legend fontsize
pl.rc('figure', titlesize=SIZE) # fontsize of the figure title
except:
pass # Just cosmetic...
# half-frame axes
def HalfFrame(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
################################################################################
################################################################################
################################################################################
| [
"numpy.array",
"matplotlib.rc",
"pylab.rc"
] | [((997, 1041), 'numpy.array', 'np.array', (['[0.01, 0.05, 0.1, 0.15, 0.2, 0.25]'], {}), '([0.01, 0.05, 0.1, 0.15, 0.2, 0.25])\n', (1005, 1041), True, 'import numpy as np\n'), ((1046, 1086), 'numpy.array', 'np.array', (['[-0.1, -0.2, -0.3, -0.4, -0.5]'], {}), '([-0.1, -0.2, -0.3, -0.4, -0.5])\n', (1054, 1086), True, 'import numpy as np\n'), ((2775, 2816), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {'serif': '"""sans-serif"""'}), "('font', serif='sans-serif')\n", (2788, 2816), False, 'import matplotlib\n'), ((2837, 2861), 'pylab.rc', 'pl.rc', (['"""font"""'], {'size': 'SIZE'}), "('font', size=SIZE)\n", (2842, 2861), True, 'import pylab as pl\n'), ((2897, 2926), 'pylab.rc', 'pl.rc', (['"""axes"""'], {'titlesize': 'SIZE'}), "('axes', titlesize=SIZE)\n", (2902, 2926), True, 'import pylab as pl\n'), ((2961, 2990), 'pylab.rc', 'pl.rc', (['"""axes"""'], {'labelsize': 'SIZE'}), "('axes', labelsize=SIZE)\n", (2966, 2990), True, 'import pylab as pl\n'), ((3029, 3059), 'pylab.rc', 'pl.rc', (['"""xtick"""'], {'labelsize': 'SIZE'}), "('xtick', labelsize=SIZE)\n", (3034, 3059), True, 'import pylab as pl\n'), ((3095, 3125), 'pylab.rc', 'pl.rc', (['"""ytick"""'], {'labelsize': 'SIZE'}), "('ytick', labelsize=SIZE)\n", (3100, 3125), True, 'import pylab as pl\n'), ((3161, 3191), 'pylab.rc', 'pl.rc', (['"""legend"""'], {'fontsize': 'SIZE'}), "('legend', fontsize=SIZE)\n", (3166, 3191), True, 'import pylab as pl\n'), ((3215, 3246), 'pylab.rc', 'pl.rc', (['"""figure"""'], {'titlesize': 'SIZE'}), "('figure', titlesize=SIZE)\n", (3220, 3246), True, 'import pylab as pl\n'), ((2583, 2618), 'numpy.array', 'np.array', (['[0.1, 0.25, 0.5, 0.75, 1]'], {}), '([0.1, 0.25, 0.5, 0.75, 1])\n', (2591, 2618), True, 'import numpy as np\n')] |
import numpy as np
def empirical_top_ranking_probs(orderings):
n, m =orderings.shape
probs = []
for i in range(m):
p_empirical = 0
for order in orderings:
if order[0] == i + 1:
p_empirical += 1
probs.append(p_empirical / n)
return probs
def empirical_better_than(orderings):
def indexOf(arr, elem):
for i, val in enumerate(arr):
if val == elem:
return i
return -1
n, m =orderings.shape
probs = np.zeros((n, m))
for i in range(m):
for j in range(m):
if i != j:
p_empirical = 0
for order in orderings:
if indexOf(order, i + 1) < indexOf(order, j + 1):
p_empirical += 1
probs[i, j] = p_empirical / n
return probs
def empirical_top_k(orderings):
def indexOf(arr, elem):
for i, val in enumerate(arr):
if val == elem:
return i
return -1
n, m =orderings.shape
probs = np.zeros((n, m))
for i in range(m):
for j in range(m):
p_empirical = 0
for order in orderings:
if indexOf(order, j + 1) <= i:
p_empirical += 1
probs[i, j] = p_empirical / n
return probs | [
"numpy.zeros"
] | [((461, 477), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (469, 477), True, 'import numpy as np\n'), ((920, 936), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (928, 936), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from bokeh.models import Band, HoverTool
from tqdm import tqdm
import timeit
import warnings
from copy import deepcopy
from scipy.stats import norm
import time
import multiprocessing
from joblib import Parallel, delayed
from copy import deepcopy, copy
from bokeh.plotting import ColumnDataSource, figure
import scipy
from scipy import interp
from sklearn import metrics
from sklearn.metrics import confusion_matrix, roc_auc_score
from sklearn.utils import resample
from ..utils import binary_metrics, dict_median, smooth
from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label
import numpy as np
import pandas as pd
from bokeh.models import Band, HoverTool
from tqdm import tqdm
import timeit
from copy import deepcopy
from scipy.stats import norm
import time
import multiprocessing
from joblib import Parallel, delayed
from copy import deepcopy, copy
from bokeh.plotting import ColumnDataSource, figure
import scipy
from scipy import interp
from sklearn import metrics
from sklearn.metrics import confusion_matrix, roc_auc_score
from sklearn.utils import resample
from ..utils import binary_evaluation
def roc(Y, stat, test=None, bootnum=100, legend=True, grid_line=False, label_font_size="10pt", xlabel="1 - Specificity", ylabel="Sensitivity", width=320, height=315, method='BCA', plot='data', legend_basic=False):
# Set positive
auc_check = roc_auc_score(Y, stat)
if auc_check > 0.5:
pos = 1
else:
pos = 0
# Set Linspace for FPR
fpr_linspace = np.linspace(0, 1, 1000) # Make it 1000
# Calculate for STAT
fpr_stat, tpr_stat, _ = metrics.roc_curve(Y, stat, pos_label=pos, drop_intermediate=False)
auc_stat = metrics.auc(fpr_stat, tpr_stat)
# Drop intermediates when fpr = 0
tpr_stat = interp(fpr_linspace, fpr_stat, tpr_stat)
tpr_list = tpr_stat
# tpr0_stat = tpr_stat[fpr_stat == 0][-1]
# tpr_stat = np.concatenate([[tpr0_stat], tpr_stat[fpr_stat > 0]])
# fpr_stat = np.concatenate([[0], fpr_stat[fpr_stat > 0]])
# # Vertical averaging
# idx = [np.abs(i - fpr_stat).argmin() for i in fpr_linspace]
# tpr_list = np.array(tpr_stat[idx])
binary_stats_train_dict = binary_evaluation(Y, stat)
binary_stats_train = []
for key, value in binary_stats_train_dict.items():
binary_stats_train.append(value)
binary_stats_train = np.array(binary_stats_train)
binary_stats_train_boot = []
tpr_bootstat = []
if bootnum > 1:
for i in range(bootnum):
bootidx = resample(list(range(len(Y))), stratify=Y) # Default stratified
# Get Yscore and Y for each bootstrap and calculate
Yscore_boot = stat[bootidx]
Ytrue_boot = Y[bootidx]
fpr_boot, tpr_boot, _ = metrics.roc_curve(Ytrue_boot, Yscore_boot, pos_label=pos, drop_intermediate=False)
auc_boot = metrics.auc(fpr_boot, tpr_boot)
if auc_boot < 0.5:
fpr_boot, tpr_boot, _ = metrics.roc_curve(Ytrue_boot, Yscore_boot, pos_label=abs(1 - pos), drop_intermediate=False)
bstat_loop = binary_evaluation(Ytrue_boot, Yscore_boot)
bstat_list = []
for key, value in bstat_loop.items():
bstat_list.append(value)
binary_stats_train_boot.append(bstat_list)
# Drop intermediates when fpr = 0
tpr0_boot = tpr_boot[fpr_boot == 0][-1]
tpr_boot = np.concatenate([[tpr0_boot], tpr_boot[fpr_boot > 0]])
fpr_boot = np.concatenate([[0], fpr_boot[fpr_boot > 0]])
# Vertical averaging
idx = [np.abs(i - fpr_boot).argmin() for i in fpr_linspace]
tpr_bootstat.append(np.array(tpr_boot[idx]))
binary_stats_train_boot = np.array(binary_stats_train_boot)
if bootnum > 1:
if method == 'BCA':
binary_stats_jack_boot = []
jackidx = []
base = np.arange(0, len(Y))
for i in base:
jack_delete = np.delete(base, i)
jackidx.append(jack_delete)
tpr_jackstat = []
for i in jackidx:
# Get Yscore and Y for each bootstrap and calculate
Yscore_jack = stat[i]
Ytrue_jack = Y[i]
fpr_jack, tpr_jack, _ = metrics.roc_curve(Ytrue_jack, Yscore_jack, pos_label=pos, drop_intermediate=False)
auc_jack = metrics.auc(fpr_jack, tpr_jack)
if auc_boot < 0.5:
fpr_jack, tpr_jack, _ = metrics.roc_curve(Ytrue_jack, Yscore_jack, pos_label=abs(1 - pos), drop_intermediate=False)
jstat_loop = binary_evaluation(Ytrue_jack, Yscore_jack)
jstat_list = []
for key, value in jstat_loop.items():
jstat_list.append(value)
binary_stats_jack_boot.append(jstat_list)
# Drop intermediates when fpr = 0
tpr0_jack = tpr_boot[fpr_boot == 0][-1]
tpr_jack = np.concatenate([[tpr0_jack], tpr_jack[fpr_jack > 0]])
fpr_jack = np.concatenate([[0], fpr_jack[fpr_jack > 0]])
# Vertical averaging
idx = [np.abs(i - fpr_jack).argmin() for i in fpr_linspace]
tpr_jackstat.append(np.array(tpr_jack[idx]))
binary_stats_jack_boot = np.array(binary_stats_jack_boot)
if bootnum > 1:
if method == 'BCA':
tpr_ib = bca_method(tpr_bootstat, tpr_list, tpr_jackstat)
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
stat_ib = bca_method(binary_stats_train_boot, binary_stats_train, binary_stats_jack_boot)
elif method == 'Per':
tpr_ib = per_method(tpr_bootstat, tpr_list)
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
stat_ib = per_method(binary_stats_train_boot, binary_stats_train)
stat_ib = list(stat_ib)
elif method == 'CPer':
tpr_ib = cper_method(tpr_bootstat, tpr_list)
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
stat_ib = cper_method(binary_stats_train_boot, binary_stats_train)
stat_ib = list(stat_ib)
else:
raise ValueError("bootmethod has to be 'BCA', 'Perc', or 'CPer'.")
#stat_ib = np.array(stat_ib).T
#print(stat_ib)
# ROC up
# for i in range(len(tpr_ib.T)):
# for j in range(1, len(tpr_ib)):
# if tpr_ib[j, i] < tpr_ib[j - 1, i]:
# tpr_ib[j, i] = tpr_ib[j - 1, i]
# Get tpr mid
if method != 'Per':
tpr_ib[:, 2] = (tpr_ib[:, 0] + tpr_ib[:, 1]) / 2
for i in range(len(stat_ib)):
stat_ib[i][2] = binary_stats_train[i]
else:
tpr_ib = []
tpr_ib.append(tpr_list)
tpr_ib.append(tpr_list)
tpr_ib.append(tpr_list)
tpr_ib = np.array(tpr_ib)
tpr_ib = tpr_ib.T
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
tpr_ib = np.concatenate((tpr_ib, np.ones((1, 3))), axis=0) # Add end 1
binary_stats_train_dict = binary_evaluation(Y, stat)
binary_stats_train = []
for key, value in binary_stats_train_dict.items():
binary_stats_train.append(value)
stat_ib = []
stat_ib.append(binary_stats_train)
stat_ib.append(binary_stats_train)
stat_ib.append(binary_stats_train)
# Test if available
if test is not None:
test_y = test[0]
test_ypred = test[1]
fpr_test, tpr_test, _ = metrics.roc_curve(test_y, test_ypred, pos_label=pos, drop_intermediate=False)
auc_test = metrics.auc(fpr_test, tpr_test)
binary_stats_test_dict = binary_evaluation(test_y, test_ypred)
binary_stats_test = []
for key, value in binary_stats_test_dict.items():
binary_stats_test.append(value)
stat_ib.append(binary_stats_test)
# Drop intermediates when fpr = 0
tpr_test = interp(fpr_linspace, fpr_test, tpr_test)
tpr_test = np.insert(tpr_test, 0, 0) # Add starting 0
tpr_test = np.concatenate([tpr_test, [1]])
# Drop intermediates when fpr = 0
# tpr0_test = tpr_test[fpr_test == 0][-1]
# tpr_test = np.concatenate([[tpr0_test], tpr_test[fpr_test > 0]])
# fpr_test = np.concatenate([[0], fpr_test[fpr_test > 0]])
# # Vertical averaging
# idx_test = [np.abs(i - fpr_test).argmin() for i in fpr_linspace]
# tpr_test = tpr_test[idx_test]
# tpr_test = np.insert(tpr_test, 0, 0) # Add starting 0
fpr_linspace = np.insert(fpr_linspace, 0, 0) # Add starting 0
fpr_linspace = np.concatenate((fpr_linspace, [1])) # Add end 1
# if 'data' plot original data instead of median
if plot == 'data':
tpr_list_linspace = np.concatenate([[0], tpr_list]) # Add starting 0
tpr_list_linspace = np.concatenate([tpr_list_linspace, [1]]) # Add starting 0
tpr_ib[:, 2] = tpr_list_linspace
elif plot == 'median':
pass
else:
raise ValueError("plot must be 'data' or 'median'")
# Check upper limit / lower limit
for i in tpr_ib:
if i[0] > i[2]:
i[0] = i[2]
if i[1] < i[2]:
i[1] = i[2]
# Calculate AUC
auc_ib_low = metrics.auc(fpr_linspace, tpr_ib[:, 0])
auc_ib_upp = metrics.auc(fpr_linspace, tpr_ib[:, 1])
auc_ib_mid = metrics.auc(fpr_linspace, tpr_ib[:, 2])
auc_ib = np.array([auc_ib_low, auc_ib_upp, auc_ib_mid])
# Plot
spec = 1 - fpr_linspace
ci_ib = (tpr_ib[:, 1] - tpr_ib[:, 0]) / 2
ci_oob = (tpr_ib[:, 1] - tpr_ib[:, 0]) / 2
fig = figure(title="",
plot_width=width,
plot_height=height,
x_axis_label=xlabel,
y_axis_label=ylabel,
x_range=(-0.06, 1.06),
y_range=(-0.06, 1.06))
fig.line([0, 1], [0, 1], color="black", line_dash="dashed", alpha=0.8, line_width=1) # Equal Distribution Line
# Plot IB
data_ib = {"x": fpr_linspace,
"y": tpr_ib[:, 2],
"lowci": tpr_ib[:, 0],
"uppci": tpr_ib[:, 1],
"spec": spec,
"ci": ci_ib}
source_ib = ColumnDataSource(data=data_ib)
# Line IB
if bootnum > 1:
if legend_basic == True:
legend_ib = "Train"
else:
legend_ib = "Train (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0]) / 2)
figline_ib = fig.line("x",
"y",
color="green",
line_width=2.5,
alpha=0.7,
legend=legend_ib,
source=source_ib)
fig.add_tools(HoverTool(renderers=[figline_ib],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111} (+/- @ci{1.111})"), ]))
# CI Band IB
figband_ib = Band(base="x",
lower="lowci",
upper="uppci",
level="underlay",
fill_alpha=0.1,
line_width=0.5,
line_color="black",
fill_color="green",
source=source_ib)
fig.add_layout(figband_ib)
else:
if legend_basic == True:
legend_ib = "Train"
else:
legend_ib = "Train (AUC = {:.2f})".format(auc_ib[2])
figline_ib = fig.line("x",
"y",
color="green",
line_width=2.5,
alpha=0.7,
legend=legend_ib,
source=source_ib)
fig.add_tools(HoverTool(renderers=[figline_ib],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111} (+/- @ci{1.111})"), ]))
# Line Test
if test is not None:
if legend_basic == True:
legend_oob = "Test"
else:
legend_oob = "Test (AUC = {:.2f})".format(auc_test)
# Plot IB
data_test = {"x": fpr_linspace,
"y": tpr_test,
"spec": spec}
source_test = ColumnDataSource(data=data_test)
figline_test = fig.line("x",
"y",
color="orange",
line_width=2.5,
alpha=0.7,
legend=legend_oob,
source=source_test)
fig.add_tools(HoverTool(renderers=[figline_test],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111}"), ]))
if grid_line == False:
fig.xgrid.visible = False
fig.ygrid.visible = False
fig.legend.visible = False
if legend == True:
if legend_basic == True:
fig.legend.visible = True
fig.legend.location = "bottom_right"
else:
if test is None:
oob_text = "Train (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text_add = Label(x=0.38, y=0.02,
text=oob_text, render_mode='css', text_font_size= '9pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.12, bottom=0, left=0.30, right=1, color='white', alpha=1,line_color='black')
fig.circle(0.34,0.06,color='green',size=8)
else:
ib_text = "Train (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text = "Test (AUC = {:.2f})".format(auc_test)
ib_text_add = Label(x=0.38, y=0.10,
text=ib_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.38, y=0.02,
text=oob_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.20, bottom=0, left=0.30, right=1, color='white', alpha=1,line_color='black')
fig.circle(0.34,0.14,color='green',size=8)
fig.circle(0.34,0.06,color='purple',size=8)
if legend_basic == True:
return fig, stat_ib
else:
return fig
def roc_boot(Y,
stat,
bootstat,
bootstat_oob,
bootidx,
bootidx_oob,
method,
smoothval=0,
jackstat=None,
jackidx=None,
xlabel="1 - Specificity",
ylabel="Sensitivity",
width=320,
height=315,
label_font_size="10pt",
legend=True,
grid_line=False,
plot_num=0,
plot='data',
test=None,
legend_basic=False,
train=None,
ci_only=False):
# Set positive
auc_check = roc_auc_score(Y, stat)
if auc_check > 0.5:
pos = 1
else:
pos = 0
# Set Linspace for FPR
fpr_linspace = np.linspace(0, 1, 1000) # Make it 1000
# Calculate for STAT
fpr_stat, tpr_stat, _ = metrics.roc_curve(Y, stat, pos_label=pos, drop_intermediate=False)
auc_stat = metrics.auc(fpr_stat, tpr_stat)
tpr_stat = interp(fpr_linspace, fpr_stat, tpr_stat)
tpr_list = tpr_stat
# Calculate for BOOTSTAT (IB)
pos_loop = []
tpr_bootstat = []
for i in range(len(bootidx)):
# Get Yscore and Y for each bootstrap and calculate
Yscore_boot = bootstat[i]
Ytrue_boot = Y[bootidx[i]]
fpr_boot, tpr_boot, _ = metrics.roc_curve(Ytrue_boot, Yscore_boot, pos_label=pos, drop_intermediate=False)
auc_boot = metrics.auc(fpr_boot, tpr_boot)
if auc_boot > 0.5:
pos_loop.append(pos)
else:
fpr_boot, tpr_boot, _ = metrics.roc_curve(Ytrue_boot, Yscore_boot, pos_label=abs(1 - pos), drop_intermediate=False)
pos_loop.append(abs(1 - pos))
# Drop intermediates when fpr = 0
tpr0_boot = tpr_boot[fpr_boot == 0][-1]
tpr_boot = np.concatenate([[tpr0_boot], tpr_boot[fpr_boot > 0]])
fpr_boot = np.concatenate([[0], fpr_boot[fpr_boot > 0]])
# Vertical averaging
idx = [np.abs(i - fpr_boot).argmin() for i in fpr_linspace]
tpr_bootstat.append(np.array(tpr_boot[idx]))
# tpr_boot = interp(fpr_linspace, fpr_boot, tpr_boot)
# tpr_bootstat.append(tpr_boot)
if method == 'BCA':
tpr_jackstat = []
for i in range(len(jackidx)):
# Get Yscore and Y for each bootstrap and calculate
Yscore_jack = jackstat[i]
Ytrue_jack = Y[jackidx[i]]
fpr_jack, tpr_jack, _ = metrics.roc_curve(Ytrue_jack, Yscore_jack, pos_label=pos, drop_intermediate=False)
auc_jack = metrics.auc(fpr_jack, tpr_jack)
# if auc_jack < 0.5:
# fpr_jack, tpr_jack, _ = metrics.roc_curve(Ytrue_jack, Yscore_jack, pos_label=abs(1 - pos), drop_intermediate=False)
# Drop intermediates when fpr = 0
tpr0_jack = tpr_jack[fpr_jack == 0][-1]
tpr_jack = np.concatenate([[tpr0_jack], tpr_jack[fpr_jack > 0]])
fpr_jack = np.concatenate([[0], fpr_jack[fpr_jack > 0]])
# Vertical averaging
idx = [np.abs(i - fpr_jack).argmin() for i in fpr_linspace]
tpr_jackstat.append(np.array(tpr_jack[idx]))
#save_stat = [tpr_bootstat, tpr_list, tpr_jackstat, fpr_linspace]
if method == 'BCA':
tpr_ib = bca_method(tpr_bootstat, tpr_list, tpr_jackstat)
if method == 'Per':
tpr_ib = per_method(tpr_bootstat, tpr_list)
if method == 'CPer':
tpr_ib = cper_method(tpr_bootstat, tpr_list)
tpr_ib = np.array(tpr_ib)
# ROC up
if method != 'Per':
for i in range(len(tpr_ib.T)):
for j in range(1, len(tpr_ib)):
if tpr_ib[j, i] < tpr_ib[j - 1, i]:
tpr_ib[j, i] = tpr_ib[j - 1, i]
# # Check upper limit / lower limit
if method != 'Per':
for i in range(len(tpr_ib)):
if tpr_ib[i][0] > tpr_list[i]:
tpr_ib[i][0] = tpr_list[i]
if tpr_ib[i][1] < tpr_list[i]:
tpr_ib[i][1] = tpr_list[i]
tpr_ib = np.concatenate((np.zeros((1, 3)), tpr_ib), axis=0) # Add starting 0
tpr_ib = np.concatenate((tpr_ib, np.ones((1, 3))), axis=0) # Add end 1
# Get tpr mid
if method != 'Per':
tpr_ib[:, 2] = (tpr_ib[:, 0] + tpr_ib[:, 1]) / 2
#print('testing.')
# Calculate for OOB
auc_bootstat_oob = []
tpr_bootstat_oob = []
for i in range(len(bootidx_oob)):
# Get Yscore and Y for each bootstrap oob and calculate
Yscore_boot_oob = bootstat_oob[i]
Ytrue_boot_oob = Y[bootidx_oob[i]]
fpr_boot_oob, tpr_boot_oob, _ = metrics.roc_curve(Ytrue_boot_oob, Yscore_boot_oob, pos_label=pos, drop_intermediate=False)
auc_boot_oob = metrics.auc(fpr_boot_oob, tpr_boot_oob)
# if auc_boot_oob < 0.5:
# fpr_boot_oob, tpr_boot_oob, _ = metrics.roc_curve(Ytrue_boot_oob, Yscore_boot_oob, pos_label=abs(1-pos_loop[i]), drop_intermediate=False)
auc_boot_oob = metrics.auc(fpr_boot_oob, tpr_boot_oob)
auc_bootstat_oob.append(auc_boot_oob)
# Drop intermediates when fpr = 0
tpr0_boot_oob = tpr_boot_oob[fpr_boot_oob == 0][-1]
tpr_boot_oob = np.concatenate([[tpr0_boot_oob], tpr_boot_oob[fpr_boot_oob > 0]])
fpr_boot_oob = np.concatenate([[0], fpr_boot_oob[fpr_boot_oob > 0]])
# Vertical averaging
idx_oob = [np.abs(i - fpr_boot_oob).argmin() for i in fpr_linspace]
tpr_bootstat_oob.append(np.array(tpr_boot_oob[idx_oob]))
#tpr_boot_oob = interp(fpr_linspace, fpr_boot_oob, tpr_boot_oob)
#tpr_bootstat_oob.append(tpr_boot_oob)
# Get CI for tpr
tpr_oob_lowci = np.percentile(tpr_bootstat_oob, 2.5, axis=0)
tpr_oob_medci = np.percentile(tpr_bootstat_oob, 50, axis=0)
tpr_oob_uppci = np.percentile(tpr_bootstat_oob, 97.5, axis=0)
tpr_oob = np.array([tpr_oob_lowci, tpr_oob_uppci, tpr_oob_medci]).T
#tpr_oob = per_method(tpr_bootstat_oob, tpr_list)
auc_oob = per_method(auc_bootstat_oob, auc_stat)
tpr_oob = np.concatenate((np.zeros((1, 3)), tpr_oob), axis=0) # Add starting 0
tpr_oob = np.concatenate((tpr_oob, np.ones((1, 3))), axis=0) # Add end 1
# ROC up
if method != 'Per':
for i in range(len(tpr_oob.T)):
for j in range(1, len(tpr_oob)):
if tpr_oob[j, i] < tpr_oob[j - 1, i]:
tpr_oob[j, i] = tpr_oob[j - 1, i]
# Test if available
if test is not None:
test_y = test[0]
test_ypred = test[1]
fpr_test, tpr_test, _ = metrics.roc_curve(test_y, test_ypred, pos_label=pos, drop_intermediate=False)
auc_test = metrics.auc(fpr_test, tpr_test)
# Drop intermediates when fpr = 0
# tpr0_test= tpr_test[fpr_test == 0][-1]
# tpr_test = np.concatenate([[tpr0_test], tpr_test[fpr_test > 0]])
# fpr_test = np.concatenate([[0], fpr_test[fpr_test > 0]])
# # Vertical averaging
# idx_test = [np.abs(i - fpr_test).argmin() for i in fpr_linspace]
# tpr_test = tpr_test[idx_test]
tpr_test = interp(fpr_linspace, fpr_test, tpr_test)
tpr_test = np.insert(tpr_test, 0, 0) # Add starting 0
tpr_test = np.concatenate((tpr_test,[1]))
tpr_oob[:, 2] = tpr_test
# if 'data' plot original data instead of median
if train is not None:
fpr_stat, tpr_stat, _ = metrics.roc_curve(train[0], train[1], pos_label=pos, drop_intermediate=False)
tpr_stat = interp(fpr_linspace, fpr_stat, tpr_stat)
tpr_list = tpr_stat
if plot == 'data':
tpr_list_linspace = np.concatenate([[0], tpr_list]) # Add starting 0
tpr_list_linspace = np.concatenate([tpr_list_linspace,[1]]) # Add starting 0
tpr_ib[:,2] = tpr_list_linspace
elif plot == 'median':
pass
else:
pass
# else:
# raise ValueError("plot must be 'data' or 'median'")
fpr_linspace = np.insert(fpr_linspace, 0, 0) # Add starting 0
fpr_linspace = np.concatenate((fpr_linspace, [1])) # Add end 1
# Calculate AUC
auc_ib_low = metrics.auc(fpr_linspace, tpr_ib[:, 0])
auc_ib_upp = metrics.auc(fpr_linspace, tpr_ib[:, 1])
auc_ib_mid = metrics.auc(fpr_linspace, tpr_ib[:, 2])
auc_ib = np.array([auc_ib_low, auc_ib_upp, auc_ib_mid])
auc_oob_low = metrics.auc(fpr_linspace, tpr_oob[:, 0])
auc_oob_upp = metrics.auc(fpr_linspace, tpr_oob[:, 1])
auc_oob_mid = metrics.auc(fpr_linspace, tpr_oob[:, 2])
auc_oob = np.array([auc_oob_low, auc_oob_upp, auc_oob_mid])
# print(auc_ib)
# print(auc_oob)
# print("AUC IB {} ({},{})".format(auc_ib[2], auc_ib[0], auc_ib[1]))
# print("AUC OOB {} ({},{})".format(auc_oob[2], auc_oob[0], auc_oob[1]))
# Smooth if set
if smoothval > 1:
tpr_ib[:, 0] = smooth(tpr_ib[:, 0], smoothval)
tpr_ib[:, 1] = smooth(tpr_ib[:, 1], smoothval)
tpr_ib[:, 2] = smooth(tpr_ib[:, 2], smoothval)
tpr_oob[:, 0] = smooth(tpr_oob[:, 0], smoothval)
tpr_oob[:, 1] = smooth(tpr_oob[:, 1], smoothval)
tpr_oob[:, 2] = smooth(tpr_oob[:, 2], smoothval)
tpr_test = smooth(tpr_test, smoothval)
# Plot
spec = 1 - fpr_linspace
ci_ib = (tpr_ib[:, 1] - tpr_ib[:, 0]) / 2
ci_oob = (tpr_ib[:, 1] - tpr_ib[:, 0]) / 2
fig = figure(title="",
plot_width=width,
plot_height=height,
x_axis_label=xlabel,
y_axis_label=ylabel,
x_range=(-0.06, 1.06),
y_range=(-0.06, 1.06))
fig.line([0, 1], [0, 1], color="black", line_dash="dashed", alpha=0.8, line_width=1)
# Plot IB
data_ib = {"x": fpr_linspace,
"y": tpr_ib[:, 2],
"lowci": tpr_ib[:, 0],
"uppci": tpr_ib[:, 1],
"spec": spec,
"ci": ci_ib}
source_ib = ColumnDataSource(data=data_ib)
# Line IB
if plot_num in [0, 1, 2, 4]:
if legend_basic == True:
legend_text = "Train"
else:
legend_text = "IB (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0]) / 2)
if ci_only == False:
figline_ib = fig.line("x",
"y",
color="green",
line_width=2.5,
alpha=0.7,
legend=legend_text,
source=source_ib)
fig.add_tools(HoverTool(renderers=[figline_ib],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111} (+/- @ci{1.111})"), ]))
# CI Band IB
figband_ib = Band(base="x",
lower="lowci",
upper="uppci",
level="underlay",
fill_alpha=0.1,
line_width=0.5,
line_color="black",
fill_color="green",
source=source_ib)
fig.add_layout(figband_ib)
figlegend_ib = fig.rect([10],[20],[5],[5], color="green", fill_alpha=0.1, line_width=0.5, line_color="grey", legend="IB (95% CI)")
# Plot OOB
data_oob = {"x": fpr_linspace,
"y": tpr_oob[:, 2],
"lowci": tpr_oob[:, 0],
"uppci": tpr_oob[:, 1],
"spec": spec,
"ci": ci_oob}
source_oob = ColumnDataSource(data=data_oob)
# Line OOB
if plot_num in [0, 1, 3, 4]:
if legend_basic == True:
legend_text = "Test"
else:
legend_text = "OOB (AUC = {:.2f} +/- {:.2f})".format(auc_oob[2], (auc_oob[1] - auc_oob[0]) / 2)
if ci_only == False:
figline = fig.line("x",
"y",
color="orange",
line_width=2.5,
alpha=0.7,
legend=legend_text,
source=source_oob)
fig.add_tools(HoverTool(renderers=[figline],
tooltips=[("Specificity", "@spec{1.111}"),
("Sensitivity", "@y{1.111} (+/- @ci{1.111})"), ]))
# CI Band OOB
figband_oob = Band(base="x",
lower="lowci",
upper="uppci",
level="underlay",
fill_alpha=0.1,
line_width=0.5,
line_color="black",
fill_color="orange",
source=source_oob)
fig.add_layout(figband_oob)
figlegend_ib = fig.rect([10],[20],[5],[5], color="orange", fill_alpha=0.1, line_width=0.5, line_color="grey", legend="OOB (95% CI)")
# Line Test
# if test is not None:
# if legend_basic == True:
# legend_text = "Test"
# else:
# legend_text = "Test (AUC = {:.2f})".format(auc_test)
# # Plot IB
# data_test = {"x": fpr_linspace,
# "y": tpr_test,
# "spec": spec}
# source_test = ColumnDataSource(data=data_test)
# figline_test = fig.line("x",
# "y",
# color="purple",
# line_width=2.5,
# alpha=0.8,
# legend=legend_text,
# line_dash="dashed",
# source=source_test)
# fig.add_tools(HoverTool(renderers=[figline_test],
# tooltips=[("Specificity", "@spec{1.111}"),
# ("Sensitivity", "@y{1.111}"), ]))
if grid_line == False:
fig.xgrid.visible = False
fig.ygrid.visible = False
# Legend Manually because of bokeh issue
ib_text = "IB (AUC = {:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text = "OOB (AUC = {:.2f} +/- {:.2f})".format(auc_oob[2], (auc_oob[1] - auc_oob[0])/2)
fig.legend.visible = False
if legend_basic == True:
fig.legend.location = "bottom_right"
fig.legend.visible = True
else:
if test is not None:
if legend == True:
ib_text_add = Label(x=0.38, y=0.18,
text=ib_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.38, y=0.10,
text=oob_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(oob_text_add)
test_text = "Test (AUC = {:.2f})".format(auc_test)
test_text_add = Label(x=0.38, y=0.02,
text=test_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(test_text_add)
fig.quad(top=0.28, bottom=0, left=0.30, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.34,0.22,color='green',size=8)
fig.circle(0.34,0.14,color='orange',size=8)
fig.circle(0.34,0.06,color='purple',size=8)
else:
if legend == True:
if plot_num in [0,1,4]:
if width == 320:
ib_text_add = Label(x=0.38, y=0.10,
text=ib_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.38, y=0.02,
text=oob_text, render_mode='canvas', text_font_size= '9pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.20, bottom=0, left=0.30, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.34,0.14,color='green',size=8)
fig.circle(0.34,0.06,color='orange',size=8)
elif width == 475:
ib_text_add = Label(x=0.52, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.52, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.42, right=1, color='white', alpha=0.4, line_color='lightgrey')
fig.circle(0.47,0.17,color='green',size=8)
fig.circle(0.47,0.07,color='orange',size=8)
elif width == 316:
ib_text_add = Label(x=0.22, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.22, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.17,0.18,color='green',size=8)
fig.circle(0.17,0.08,color='orange',size=8)
elif width == 237:
ib_text_1 = "IB (AUC = {:.2f}".format(auc_ib[2])
ib_text_2 = "+/- {:.2f})".format((auc_ib[1] - auc_ib[0])/2)
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
ib_text_add_1 = Label(x=0.38, y=0.28,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.38, y=0.19,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.38, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.38, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.4, bottom=0, left=0.20, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.27,0.30,color='green',size=8)
fig.circle(0.27,0.10,color='orange',size=8)
elif width == 190:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 158:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 135:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
elif plot_num == 2:
if width == 475:
ib_text_add = Label(x=0.52, y=0.03,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
fig.quad(top=0.10, bottom=0, left=0.42, right=1, color='white', alpha=0.4, line_color='lightgrey')
fig.circle(0.47,0.05,color='green',size=8)
elif width == 316:
ib_text_add = Label(x=0.30, y=0.02,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
fig.quad(top=0.10, bottom=0, left=0.20, right=1, color='white', alpha=0.4, line_color='lightgrey')
fig.circle(0.25,0.05,color='green',size=8)
elif width == 237:
ib_text_1 = "IB (AUC = {:.2f}".format(auc_ib[2])
ib_text_2 = "+/- {:.2f})".format((auc_ib[1] - auc_ib[0])/2)
ib_text_add_1 = Label(x=0.38, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.38, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.2, bottom=0, left=0.20, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.27,0.10,color='green',size=8)
elif width == 190:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
elif width == 158:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f}+/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0,
text=ib_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
elif width == 135:
ib_text_1 = "IB (AUC ="
ib_text_2 = "{:.2f} +/- {:.2f})".format(auc_ib[2], (auc_ib[1] - auc_ib[0])/2)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
elif plot_num == 3:
if width == 475:
oob_text_add = Label(x=0.52, y=0.03,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.10, bottom=0, left=0.42, right=1, color='white', alpha=0.4, line_color='lightgrey')
fig.circle(0.47,0.05,color='orange',size=8)
# fig.circle(0.47,0.07,color='orange',size=8)
elif width == 316:
oob_text_add = Label(x=0.22, y=0.02,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.10, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.17,0.05,color='orange',size=8)
elif width == 237:
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f}+/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
oob_text_add_1 = Label(x=0.38, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.38, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.2, bottom=0, left=0.20, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.27,0.10,color='orange',size=8)
elif width == 190:
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 158:
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 135:
oob_text_1 = "OOB (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_oob[2],(auc_oob[1] - auc_oob[0])/2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
if train is None:
return fig, auc_ib, auc_oob
else:
return fig, auc_ib, auc_oob
def roc_cv(Y_predfull, Y_predcv, Ytrue, width=450, height=350, xlabel="1-Specificity", ylabel="Sensitivity", legend=True, label_font_size="13pt", show_title=True, title_font_size="13pt", title="", plot_num=0, grid_line=False):
auc_check = roc_auc_score(Ytrue, Y_predfull)
if auc_check > 0.5:
pos = 1
else:
pos = 0
fprf, tprf, thresholdf = metrics.roc_curve(Ytrue, Y_predfull, pos_label=pos, drop_intermediate=False)
specf = 1 - fprf
auc_full = metrics.auc(fprf, tprf)
auc_full_hover = [auc_full] * len(tprf)
# Figure
data = {"x": fprf, "y": tprf, "spec": specf, "aucfull": auc_full_hover}
source = ColumnDataSource(data=data)
fig = figure(title=title, plot_width=width, plot_height=height, x_axis_label=xlabel, y_axis_label=ylabel, x_range=(-0.06, 1.06), y_range=(-0.06, 1.06))
# Figure: add line
# fig.line([0, 1], [0, 1], color="black", line_dash="dashed", line_width=2.5, legend="Equal Distribution Line")
fig.line([0, 1], [0, 1], color="black", line_dash="dashed", alpha=0.8, line_width=1)
if plot_num in [0, 1, 2, 4]:
figline = fig.line("x", "y", color="green", line_width=2.5, alpha=0.8, legend="FULL (AUC = {:.2f})".format(auc_full), source=source)
fig.add_tools(HoverTool(renderers=[figline], tooltips=[("Specificity", "@spec{1.111}"), ("Sensitivity", "@y{1.111}")]))
else:
pass
# ADD CV
# bootstrap using vertical averaging
# fpr, tpr with drop_intermediates for fpr = 0 (useful for plot... since we plot specificity on x-axis, we don't need intermediates when fpr=0)
fpr = fprf
tpr = tprf
tpr0 = tpr[fpr == 0][-1]
tpr = np.concatenate([[tpr0], tpr[fpr > 0]])
fpr = np.concatenate([[0], fpr[fpr > 0]])
tpr_boot = []
boot_stats = []
auc_cv = []
for i in range(len(Y_predcv)):
# Resample and get tpr, fpr
Yscore_res = Y_predcv[i]
fpr_res, tpr_res, threshold_res = metrics.roc_curve(Ytrue, Yscore_res, pos_label=pos, drop_intermediate=False)
auc_cv.append(metrics.auc(fpr_res, tpr_res))
# Drop intermediates when fpr=0
tpr0_res = tpr_res[fpr_res == 0][-1]
tpr_res = np.concatenate([[tpr0_res], tpr_res[fpr_res > 0]])
fpr_res = np.concatenate([[0], fpr_res[fpr_res > 0]])
# Vertical averaging... use closest fpr_res to fpr, and append the corresponding tpr
idx = [np.abs(i - fpr_res).argmin() for i in fpr]
tpr_list = tpr_res[idx]
tpr_boot.append(tpr_list)
# Get CI for tpr
tpr_lowci = np.percentile(tpr_boot, 2.5, axis=0)
tpr_uppci = np.percentile(tpr_boot, 97.5, axis=0)
tpr_medci = np.percentile(tpr_boot, 50, axis=0)
# Add the starting 0
tpr = np.insert(tpr, 0, 0)
fpr = np.insert(fpr, 0, 0)
tpr_lowci = np.insert(tpr_lowci, 0, 0)
tpr_uppci = np.insert(tpr_uppci, 0, 0)
tpr_medci = np.insert(tpr_medci, 0, 0)
# Get CI for cv
auc_lowci = np.percentile(auc_cv, 2.5, axis=0)
auc_uppci = np.percentile(auc_cv, 97.5, axis=0)
auc_medci = np.percentile(auc_cv, 50, axis=0)
auc_ci = (auc_uppci - auc_lowci) / 2
auc_ci_hover = [auc_ci] * len(tpr_medci)
auc_med_hover = [auc_medci] * len(tpr_medci)
# Concatenate tpr_ci
tpr_ci = np.array([tpr_lowci, tpr_uppci, tpr_medci])
# specificity and ci-interval for HoverTool
spec2 = 1 - fpr
ci2 = (tpr_uppci - tpr_lowci) / 2
data2 = {"x": fpr, "y": tpr_medci, "lowci": tpr_lowci, "uppci": tpr_uppci, "spec": spec2, "ci": ci2}
source2 = ColumnDataSource(data=data2)
if plot_num in [0, 1, 3, 4]:
figline = fig.line("x", "y", color="orange", line_width=2.5, alpha=0.8, legend="CV (AUC = {:.2f} +/- {:.2f})".format(auc_medci, auc_ci,), source=source2)
fig.add_tools(HoverTool(renderers=[figline], tooltips=[("Specificity", "@spec{1.111}"), ("Sensitivity", "@y{1.111} (+/- @ci{1.111})")]))
# Figure: add 95CI band
figband = Band(base="x", lower="lowci", upper="uppci", level="underlay", fill_alpha=0.1, line_width=0.5, line_color="black", fill_color="orange", source=source2)
fig.add_layout(figband)
else:
pass
# Change font size
if show_title is True:
fig.title.text = "AUC FULL ({}) & AUC CV ({} +/- {})".format(np.round(auc_full, 2), np.round(auc_medci, 2), np.round(auc_ci, 2))
fig.title.text_font_size = title_font_size
fig.xaxis.axis_label_text_font_size = label_font_size
fig.yaxis.axis_label_text_font_size = label_font_size
# Extra padding
fig.min_border_left = 20
fig.min_border_right = 20
fig.min_border_top = 20
fig.min_border_bottom = 20
# Edit legend
fig.legend.location = "bottom_right"
# fig.legend.label_text_font_size = "1pt"
# fig.legend.label_text_font = "1pt"
# if legend is False:
# fig.legend.visible = False
if grid_line == False:
fig.xgrid.visible = False
fig.ygrid.visible = False
# Legend Manually because of bokeh issue
auc_full = np.round(auc_full, 2)
auc_cv1 = np.round(auc_medci, 2)
auc_cv2 = np.round(auc_ci, 2)
ib_text = "FULL (AUC = {:.2f})".format(auc_full)
oob_text = "CV (AUC = {:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
fig.legend.visible = False
if legend == True:
if plot_num in [0,1,4]:
if width == 475:
ib_text_add = Label(x=0.52, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.52, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.42, right=1, color='white', alpha=0.4,line_color='lightgrey')
fig.circle(0.47,0.17,color='green',size=8)
fig.circle(0.47,0.07,color='orange',size=8)
elif width == 316:
ib_text_add = Label(x=0.30, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.30, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.20, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.25,0.18,color='green',size=8)
fig.circle(0.25,0.08,color='orange',size=8)
elif width == 237:
ib_text_add = Label(x=0.30, y=0.15,
text=ib_text, render_mode='canvas', text_font_size= '6.4pt')
fig.add_layout(ib_text_add)
oob_text_add = Label(x=0.30, y=0.05,
text=oob_text, render_mode='canvas', text_font_size= '6.4pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.25, bottom=0, left=0.20, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.25,0.18,color='green',size=8)
fig.circle(0.25,0.08,color='orange',size=8)
elif width == 190:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1, line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 158:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 135:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
ib_text_add_1 = Label(x=0.28, y=0.32,
text=ib_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.23,
text=ib_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.47, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.30,color='green',size=8)
fig.circle(0.20,0.10,color='orange',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
elif plot_num == 2:
if width == 475:
ib_text_add = Label(x=0.52, y=0.03,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
fig.quad(top=0.10, bottom=0, left=0.42, right=1, color='white', alpha=0.4,line_color='lightgrey')
fig.circle(0.47,0.05,color='green',size=8)
elif width == 316:
ib_text_add = Label(x=0.40, y=0.02,
text=ib_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(ib_text_add)
fig.quad(top=0.12, bottom=0, left=0.30, right=1, color='white', alpha=0.4,line_color='lightgrey')
fig.circle(0.35,0.05, color='green',size=8)
elif width == 237:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
ib_text_add_1 = Label(x=0.38, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.38, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.21, bottom=0, left=0.20, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.27,0.10,color='green',size=8)
elif width == 190:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.25, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
elif width == 158:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0,
text=ib_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.25, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
elif width == 135:
ib_text_1 = "FULL (AUC ="
ib_text_2 = "{:.2f})".format(auc_full)
ib_text_add_1 = Label(x=0.28, y=0.09,
text=ib_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_1)
ib_text_add_2 = Label(x=0.28, y=0.00,
text=ib_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(ib_text_add_2)
fig.quad(top=0.25, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='green',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
elif plot_num == 3:
if width == 475:
oob_text_add = Label(x=0.52, y=0.03,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.10, bottom=0, left=0.42, right=1, color='white', alpha=0.4,line_color='lightgrey')
fig.circle(0.47,0.05,color='orange',size=8)
# fig.circle(0.47,0.07,color='orange',size=8)
elif width == 316:
oob_text_add = Label(x=0.27, y=0.02,
text=oob_text, render_mode='canvas', text_font_size= '10pt')
fig.add_layout(oob_text_add)
fig.quad(top=0.11, bottom=0, left=0.17, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.22,0.05,color='orange',size=8)
elif width == 237:
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
oob_text_add_1 = Label(x=0.38, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.38, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.21, bottom=0, left=0.20, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.27,0.10,color='orange',size=8)
elif width == 190:
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6.8pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 158:
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '6pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
elif width == 135:
oob_text_1 = "CV (AUC ="
oob_text_2 = "{:.2f} +/- {:.2f})".format(auc_cv1, auc_cv2)
oob_text_add_1 = Label(x=0.28, y=0.09,
text=oob_text_1, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_1)
oob_text_add_2 = Label(x=0.28, y=0.00,
text=oob_text_2, render_mode='canvas', text_font_size= '5pt')
fig.add_layout(oob_text_add_2)
fig.quad(top=0.24, bottom=0, left=0.12, right=1, color='white', alpha=1,line_color='lightgrey')
fig.circle(0.20,0.10,color='orange',size=8)
else:
fig.legend.location = "bottom_right"
fig.legend.visible = True
return fig
def per_method(bootstat, stat):
"""Calculates bootstrap confidence intervals using the percentile bootstrap interval."""
if stat.ndim == 1:
boot_ci = []
# Calculate bootci for each component (peak), and append it to bootci
for i in range(len(bootstat[0])):
bootstat_i = [item[i] for item in bootstat]
lower_ci = np.percentile(bootstat_i, 2.5)
upper_ci = np.percentile(bootstat_i, 97.5)
mid_ci = np.percentile(bootstat_i, 50)
boot_ci.append([lower_ci, upper_ci, mid_ci])
boot_ci = np.array(boot_ci)
elif stat.ndim == 0:
lower_ci = np.percentile(bootstat, 2.5)
upper_ci = np.percentile(bootstat, 97.5)
mid_ci = np.percentile(bootstat, 50)
boot_ci = [lower_ci, upper_ci, mid_ci]
boot_ci = np.array(boot_ci)
# Recursive component (to get ndim = 1, and append)
else:
ncomp = stat.shape[1]
boot_ci = []
for k in range(ncomp):
bootstat_k = []
for j in range(len(bootstat)):
bootstat_k.append(bootstat[j][:, k])
boot_ci_k = per_method(bootstat_k, stat[:, k])
boot_ci.append(boot_ci_k)
boot_ci = np.array(boot_ci)
return boot_ci
def cper_method(bootstat, stat):
"""Calculates bootstrap confidence intervals using the bias-corrected bootstrap interval."""
if stat.ndim == 1:
nboot = len(bootstat)
zalpha = norm.ppf(0.05 / 2)
obs = stat # Observed mean
meansum = np.zeros((1, len(obs))).flatten()
for i in range(len(obs)):
for j in range(len(bootstat)):
if bootstat[j][i] >= obs[i]:
meansum[i] = meansum[i] + 1
prop = meansum / nboot # Proportion of times boot mean > obs mean
z0 = -norm.ppf(prop)
# new alpha
pct1 = 100 * norm.cdf((2 * z0 + zalpha))
pct2 = 100 * norm.cdf((2 * z0 - zalpha))
pct3 = 100 * norm.cdf((2 * z0))
boot_ci = []
for i in range(len(pct1)):
bootstat_i = [item[i] for item in bootstat]
append_low = np.percentile(bootstat_i, pct1[i])
append_mid = np.percentile(bootstat_i, pct3[i])
append_upp = np.percentile(bootstat_i, pct2[i])
boot_ci.append([append_low, append_upp, append_mid])
boot_ci = np.array(boot_ci)
# Recursive component (to get ndim = 1, and append)
else:
ncomp = stat.shape[1]
boot_ci = []
for k in range(ncomp):
bootstat_k = []
for j in range(len(bootstat)):
bootstat_k.append(bootstat[j][:, k])
boot_ci_k = cper_method(bootstat_k, stat[:, k])
boot_ci.append(boot_ci_k)
boot_ci = np.array(boot_ci)
return boot_ci
def bca_method(bootstat, stat, jackstat):
"""Calculates bootstrap confidence intervals using the bias-corrected and accelerated bootstrap interval."""
if stat.ndim == 1:
nboot = len(bootstat)
zalpha = norm.ppf(0.05 / 2)
obs = stat # Observed mean
meansum = np.zeros((1, len(obs))).flatten()
for i in range(len(obs)):
for j in range(len(bootstat)):
if bootstat[j][i] >= obs[i]:
meansum[i] = meansum[i] + 1
prop = meansum / nboot # Proportion of times boot mean > obs mean
z0 = -norm.ppf(prop, loc=0, scale=1)
# new alpha
jmean = np.mean(jackstat, axis=0)
num = np.sum((jmean - jackstat) ** 3, axis=0)
den = np.sum((jmean - jackstat) ** 2, axis=0)
ahat = num / (6 * den ** (3 / 2))
# Ignore warnings as they are delt with at line 123 with try/except
with warnings.catch_warnings():
warnings.simplefilter("ignore")
zL = z0 + norm.ppf(0.05 / 2, loc=0, scale=1)
pct1 = 100 * norm.cdf((z0 + zL / (1 - ahat * zL)))
zU = z0 + norm.ppf((1 - 0.05 / 2), loc=0, scale=1)
pct2 = 100 * norm.cdf((z0 + zU / (1 - ahat * zU)))
zM = z0 + norm.ppf((0.5), loc=0, scale=1)
pct3 = 100 * norm.cdf((z0 + zM / (1 - ahat * zM)))
# pct3 = (pct1 + pct2) / 2
# for i in range(len(pct3)):
# if np.isnan(pct3[i]) == True:
# pct3[i] = (pct2[i] + pct1[i]) / 2
boot_ci = []
for i in range(len(pct1)):
bootstat_i = [item[i] for item in bootstat]
try:
append_low = np.percentile(bootstat_i, pct1[i])
append_upp = np.percentile(bootstat_i, pct2[i])
append_mid = np.percentile(bootstat_i, pct3[i])
except ValueError:
# Use BC (CPerc) as there is no skewness
pct1 = 100 * norm.cdf((2 * z0 + zalpha))
pct2 = 100 * norm.cdf((2 * z0 - zalpha))
pct2 = 100 * norm.cdf((2 * z0))
append_low = np.percentile(bootstat_i, pct1[i])
append_upp = np.percentile(bootstat_i, pct2[i])
append_mid = np.percentile(bootstat_i, pct2[i])
boot_ci.append([append_low, append_upp, append_mid])
# Recursive component (to get ndim = 1, and append)
else:
ncomp = stat.shape[1]
boot_ci = []
for k in range(ncomp):
var = []
var_jstat = []
for j in range(len(bootstat)):
var.append(bootstat[j][:, k])
for m in range(len(jackstat)):
var_jstat.append(jackstat[m][:, k])
var_boot = bca_method(var, stat[:, k], var_jstat)
boot_ci.append(var_boot)
boot_ci = np.array(boot_ci)
return boot_ci
def get_sens_spec(Ytrue, Yscore, cuttoff_val):
"""Get sensitivity and specificity from cutoff value."""
Yscore_round = np.where(np.array(Yscore) > cuttoff_val, 1, 0)
tn, fp, fn, tp = metrics.confusion_matrix(Ytrue, Yscore_round).ravel()
sensitivity = tp / (tp + fn)
specificity = tn / (tn + fp)
return sensitivity, specificity
def get_sens_cuttoff(Ytrue, Yscore, specificity_val):
"""Get sensitivity and cuttoff value from specificity."""
fpr0 = 1 - specificity_val
fpr, sensitivity, thresholds = metrics.roc_curve(Ytrue, Yscore, pos_label=1, drop_intermediate=False)
idx = np.abs(fpr - fpr0).argmin() # this find the closest value in fpr to fpr0
# Check that this is not a perfect roc curve
# If it is perfect, allow sensitivity = 1, rather than 0
if specificity_val == 1 and sensitivity[idx] == 0:
for i in range(len(fpr)):
if fpr[i] == 1 and sensitivity[i] == 1:
return 1, 0.5
return sensitivity[idx], thresholds[idx]
def get_spec_sens_cuttoff(Ytrue, Yscore, metric, val):
"""Return specificity, sensitivity, cutoff value provided the metric and value used."""
if metric == "specificity":
specificity = val
sensitivity, threshold = get_sens_cuttoff(Ytrue, Yscore, val)
elif metric == "cutoffscore":
threshold = val
sensitivity, specificity = get_sens_spec(Ytrue, Yscore, val)
return specificity, sensitivity, threshold
def get_stats(Ytrue, Yscore, specificity, parametric):
"""Calculates binary metrics given the specificity."""
sensitivity, cutoffscore = get_sens_cuttoff(Ytrue, Yscore, specificity)
stats = binary_metrics(Ytrue, Yscore, cut_off=cutoffscore, parametric=parametric)
return stats
| [
"bokeh.plotting.figure",
"sklearn.metrics.auc",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"sklearn.metrics.roc_curve",
"bokeh.models.Band",
"scipy.stats.norm.cdf",
"numpy.mean",
"scipy.interp",
"bokeh.models.Label",
"numpy.delete",
"numpy.linspace",
"numpy.concatenate",
"warnings.sim... | [((1406, 1428), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['Y', 'stat'], {}), '(Y, stat)\n', (1419, 1428), False, 'from sklearn.metrics import confusion_matrix, roc_auc_score\n'), ((1542, 1565), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (1553, 1565), True, 'import numpy as np\n'), ((1636, 1702), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['Y', 'stat'], {'pos_label': 'pos', 'drop_intermediate': '(False)'}), '(Y, stat, pos_label=pos, drop_intermediate=False)\n', (1653, 1702), False, 'from sklearn import metrics\n'), ((1718, 1749), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_stat', 'tpr_stat'], {}), '(fpr_stat, tpr_stat)\n', (1729, 1749), False, 'from sklearn import metrics\n'), ((1804, 1844), 'scipy.interp', 'interp', (['fpr_linspace', 'fpr_stat', 'tpr_stat'], {}), '(fpr_linspace, fpr_stat, tpr_stat)\n', (1810, 1844), False, 'from scipy import interp\n'), ((2390, 2418), 'numpy.array', 'np.array', (['binary_stats_train'], {}), '(binary_stats_train)\n', (2398, 2418), True, 'import numpy as np\n'), ((3688, 3721), 'numpy.array', 'np.array', (['binary_stats_train_boot'], {}), '(binary_stats_train_boot)\n', (3696, 3721), True, 'import numpy as np\n'), ((8393, 8422), 'numpy.insert', 'np.insert', (['fpr_linspace', '(0)', '(0)'], {}), '(fpr_linspace, 0, 0)\n', (8402, 8422), True, 'import numpy as np\n'), ((8461, 8496), 'numpy.concatenate', 'np.concatenate', (['(fpr_linspace, [1])'], {}), '((fpr_linspace, [1]))\n', (8475, 8496), True, 'import numpy as np\n'), ((9075, 9114), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_linspace', 'tpr_ib[:, 0]'], {}), '(fpr_linspace, tpr_ib[:, 0])\n', (9086, 9114), False, 'from sklearn import metrics\n'), ((9132, 9171), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_linspace', 'tpr_ib[:, 1]'], {}), '(fpr_linspace, tpr_ib[:, 1])\n', (9143, 9171), False, 'from sklearn import metrics\n'), ((9189, 9228), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_linspace', 'tpr_ib[:, 2]'], {}), '(fpr_linspace, tpr_ib[:, 2])\n', (9200, 9228), False, 'from sklearn import metrics\n'), ((9242, 9288), 'numpy.array', 'np.array', (['[auc_ib_low, auc_ib_upp, auc_ib_mid]'], {}), '([auc_ib_low, auc_ib_upp, auc_ib_mid])\n', (9250, 9288), True, 'import numpy as np\n'), ((9433, 9579), 'bokeh.plotting.figure', 'figure', ([], {'title': '""""""', 'plot_width': 'width', 'plot_height': 'height', 'x_axis_label': 'xlabel', 'y_axis_label': 'ylabel', 'x_range': '(-0.06, 1.06)', 'y_range': '(-0.06, 1.06)'}), "(title='', plot_width=width, plot_height=height, x_axis_label=xlabel,\n y_axis_label=ylabel, x_range=(-0.06, 1.06), y_range=(-0.06, 1.06))\n", (9439, 9579), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((10026, 10056), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'data_ib'}), '(data=data_ib)\n', (10042, 10056), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((14982, 15004), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['Y', 'stat'], {}), '(Y, stat)\n', (14995, 15004), False, 'from sklearn.metrics import confusion_matrix, roc_auc_score\n'), ((15118, 15141), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (15129, 15141), True, 'import numpy as np\n'), ((15212, 15278), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['Y', 'stat'], {'pos_label': 'pos', 'drop_intermediate': '(False)'}), '(Y, stat, pos_label=pos, drop_intermediate=False)\n', (15229, 15278), False, 'from sklearn import metrics\n'), ((15294, 15325), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_stat', 'tpr_stat'], {}), '(fpr_stat, tpr_stat)\n', (15305, 15325), False, 'from sklearn import metrics\n'), ((15341, 15381), 'scipy.interp', 'interp', (['fpr_linspace', 'fpr_stat', 'tpr_stat'], {}), '(fpr_linspace, fpr_stat, tpr_stat)\n', (15347, 15381), False, 'from scipy import interp\n'), ((17843, 17859), 'numpy.array', 'np.array', (['tpr_ib'], {}), '(tpr_ib)\n', (17851, 17859), True, 'import numpy as np\n'), ((19986, 20030), 'numpy.percentile', 'np.percentile', (['tpr_bootstat_oob', '(2.5)'], {'axis': '(0)'}), '(tpr_bootstat_oob, 2.5, axis=0)\n', (19999, 20030), True, 'import numpy as np\n'), ((20051, 20094), 'numpy.percentile', 'np.percentile', (['tpr_bootstat_oob', '(50)'], {'axis': '(0)'}), '(tpr_bootstat_oob, 50, axis=0)\n', (20064, 20094), True, 'import numpy as np\n'), ((20115, 20160), 'numpy.percentile', 'np.percentile', (['tpr_bootstat_oob', '(97.5)'], {'axis': '(0)'}), '(tpr_bootstat_oob, 97.5, axis=0)\n', (20128, 20160), True, 'import numpy as np\n'), ((22193, 22222), 'numpy.insert', 'np.insert', (['fpr_linspace', '(0)', '(0)'], {}), '(fpr_linspace, 0, 0)\n', (22202, 22222), True, 'import numpy as np\n'), ((22261, 22296), 'numpy.concatenate', 'np.concatenate', (['(fpr_linspace, [1])'], {}), '((fpr_linspace, [1]))\n', (22275, 22296), True, 'import numpy as np\n'), ((22348, 22387), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_linspace', 'tpr_ib[:, 0]'], {}), '(fpr_linspace, tpr_ib[:, 0])\n', (22359, 22387), False, 'from sklearn import metrics\n'), ((22405, 22444), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_linspace', 'tpr_ib[:, 1]'], {}), '(fpr_linspace, tpr_ib[:, 1])\n', (22416, 22444), False, 'from sklearn import metrics\n'), ((22462, 22501), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_linspace', 'tpr_ib[:, 2]'], {}), '(fpr_linspace, tpr_ib[:, 2])\n', (22473, 22501), False, 'from sklearn import metrics\n'), ((22515, 22561), 'numpy.array', 'np.array', (['[auc_ib_low, auc_ib_upp, auc_ib_mid]'], {}), '([auc_ib_low, auc_ib_upp, auc_ib_mid])\n', (22523, 22561), True, 'import numpy as np\n'), ((22580, 22620), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_linspace', 'tpr_oob[:, 0]'], {}), '(fpr_linspace, tpr_oob[:, 0])\n', (22591, 22620), False, 'from sklearn import metrics\n'), ((22639, 22679), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_linspace', 'tpr_oob[:, 1]'], {}), '(fpr_linspace, tpr_oob[:, 1])\n', (22650, 22679), False, 'from sklearn import metrics\n'), ((22698, 22738), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_linspace', 'tpr_oob[:, 2]'], {}), '(fpr_linspace, tpr_oob[:, 2])\n', (22709, 22738), False, 'from sklearn import metrics\n'), ((22753, 22802), 'numpy.array', 'np.array', (['[auc_oob_low, auc_oob_upp, auc_oob_mid]'], {}), '([auc_oob_low, auc_oob_upp, auc_oob_mid])\n', (22761, 22802), True, 'import numpy as np\n'), ((23565, 23711), 'bokeh.plotting.figure', 'figure', ([], {'title': '""""""', 'plot_width': 'width', 'plot_height': 'height', 'x_axis_label': 'xlabel', 'y_axis_label': 'ylabel', 'x_range': '(-0.06, 1.06)', 'y_range': '(-0.06, 1.06)'}), "(title='', plot_width=width, plot_height=height, x_axis_label=xlabel,\n y_axis_label=ylabel, x_range=(-0.06, 1.06), y_range=(-0.06, 1.06))\n", (23571, 23711), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((24131, 24161), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'data_ib'}), '(data=data_ib)\n', (24147, 24161), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((25788, 25819), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'data_oob'}), '(data=data_oob)\n', (25804, 25819), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((46157, 46189), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['Ytrue', 'Y_predfull'], {}), '(Ytrue, Y_predfull)\n', (46170, 46189), False, 'from sklearn.metrics import confusion_matrix, roc_auc_score\n'), ((46286, 46362), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['Ytrue', 'Y_predfull'], {'pos_label': 'pos', 'drop_intermediate': '(False)'}), '(Ytrue, Y_predfull, pos_label=pos, drop_intermediate=False)\n', (46303, 46362), False, 'from sklearn import metrics\n'), ((46399, 46422), 'sklearn.metrics.auc', 'metrics.auc', (['fprf', 'tprf'], {}), '(fprf, tprf)\n', (46410, 46422), False, 'from sklearn import metrics\n'), ((46571, 46598), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'data'}), '(data=data)\n', (46587, 46598), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((46609, 46759), 'bokeh.plotting.figure', 'figure', ([], {'title': 'title', 'plot_width': 'width', 'plot_height': 'height', 'x_axis_label': 'xlabel', 'y_axis_label': 'ylabel', 'x_range': '(-0.06, 1.06)', 'y_range': '(-0.06, 1.06)'}), '(title=title, plot_width=width, plot_height=height, x_axis_label=\n xlabel, y_axis_label=ylabel, x_range=(-0.06, 1.06), y_range=(-0.06, 1.06))\n', (46615, 46759), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((47581, 47619), 'numpy.concatenate', 'np.concatenate', (['[[tpr0], tpr[fpr > 0]]'], {}), '([[tpr0], tpr[fpr > 0]])\n', (47595, 47619), True, 'import numpy as np\n'), ((47630, 47665), 'numpy.concatenate', 'np.concatenate', (['[[0], fpr[fpr > 0]]'], {}), '([[0], fpr[fpr > 0]])\n', (47644, 47665), True, 'import numpy as np\n'), ((48468, 48504), 'numpy.percentile', 'np.percentile', (['tpr_boot', '(2.5)'], {'axis': '(0)'}), '(tpr_boot, 2.5, axis=0)\n', (48481, 48504), True, 'import numpy as np\n'), ((48521, 48558), 'numpy.percentile', 'np.percentile', (['tpr_boot', '(97.5)'], {'axis': '(0)'}), '(tpr_boot, 97.5, axis=0)\n', (48534, 48558), True, 'import numpy as np\n'), ((48575, 48610), 'numpy.percentile', 'np.percentile', (['tpr_boot', '(50)'], {'axis': '(0)'}), '(tpr_boot, 50, axis=0)\n', (48588, 48610), True, 'import numpy as np\n'), ((48647, 48667), 'numpy.insert', 'np.insert', (['tpr', '(0)', '(0)'], {}), '(tpr, 0, 0)\n', (48656, 48667), True, 'import numpy as np\n'), ((48678, 48698), 'numpy.insert', 'np.insert', (['fpr', '(0)', '(0)'], {}), '(fpr, 0, 0)\n', (48687, 48698), True, 'import numpy as np\n'), ((48715, 48741), 'numpy.insert', 'np.insert', (['tpr_lowci', '(0)', '(0)'], {}), '(tpr_lowci, 0, 0)\n', (48724, 48741), True, 'import numpy as np\n'), ((48758, 48784), 'numpy.insert', 'np.insert', (['tpr_uppci', '(0)', '(0)'], {}), '(tpr_uppci, 0, 0)\n', (48767, 48784), True, 'import numpy as np\n'), ((48801, 48827), 'numpy.insert', 'np.insert', (['tpr_medci', '(0)', '(0)'], {}), '(tpr_medci, 0, 0)\n', (48810, 48827), True, 'import numpy as np\n'), ((48865, 48899), 'numpy.percentile', 'np.percentile', (['auc_cv', '(2.5)'], {'axis': '(0)'}), '(auc_cv, 2.5, axis=0)\n', (48878, 48899), True, 'import numpy as np\n'), ((48916, 48951), 'numpy.percentile', 'np.percentile', (['auc_cv', '(97.5)'], {'axis': '(0)'}), '(auc_cv, 97.5, axis=0)\n', (48929, 48951), True, 'import numpy as np\n'), ((48968, 49001), 'numpy.percentile', 'np.percentile', (['auc_cv', '(50)'], {'axis': '(0)'}), '(auc_cv, 50, axis=0)\n', (48981, 49001), True, 'import numpy as np\n'), ((49176, 49219), 'numpy.array', 'np.array', (['[tpr_lowci, tpr_uppci, tpr_medci]'], {}), '([tpr_lowci, tpr_uppci, tpr_medci])\n', (49184, 49219), True, 'import numpy as np\n'), ((49447, 49475), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'data2'}), '(data=data2)\n', (49463, 49475), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((50935, 50956), 'numpy.round', 'np.round', (['auc_full', '(2)'], {}), '(auc_full, 2)\n', (50943, 50956), True, 'import numpy as np\n'), ((50971, 50993), 'numpy.round', 'np.round', (['auc_medci', '(2)'], {}), '(auc_medci, 2)\n', (50979, 50993), True, 'import numpy as np\n'), ((51008, 51027), 'numpy.round', 'np.round', (['auc_ci', '(2)'], {}), '(auc_ci, 2)\n', (51016, 51027), True, 'import numpy as np\n'), ((71035, 71105), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['Ytrue', 'Yscore'], {'pos_label': '(1)', 'drop_intermediate': '(False)'}), '(Ytrue, Yscore, pos_label=1, drop_intermediate=False)\n', (71052, 71105), False, 'from sklearn import metrics\n'), ((6708, 6724), 'numpy.array', 'np.array', (['tpr_ib'], {}), '(tpr_ib)\n', (6716, 6724), True, 'import numpy as np\n'), ((7370, 7447), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['test_y', 'test_ypred'], {'pos_label': 'pos', 'drop_intermediate': '(False)'}), '(test_y, test_ypred, pos_label=pos, drop_intermediate=False)\n', (7387, 7447), False, 'from sklearn import metrics\n'), ((7465, 7496), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_test', 'tpr_test'], {}), '(fpr_test, tpr_test)\n', (7476, 7496), False, 'from sklearn import metrics\n'), ((7790, 7830), 'scipy.interp', 'interp', (['fpr_linspace', 'fpr_test', 'tpr_test'], {}), '(fpr_linspace, fpr_test, tpr_test)\n', (7796, 7830), False, 'from scipy import interp\n'), ((7848, 7873), 'numpy.insert', 'np.insert', (['tpr_test', '(0)', '(0)'], {}), '(tpr_test, 0, 0)\n', (7857, 7873), True, 'import numpy as np\n'), ((7909, 7940), 'numpy.concatenate', 'np.concatenate', (['[tpr_test, [1]]'], {}), '([tpr_test, [1]])\n', (7923, 7940), True, 'import numpy as np\n'), ((8613, 8644), 'numpy.concatenate', 'np.concatenate', (['[[0], tpr_list]'], {}), '([[0], tpr_list])\n', (8627, 8644), True, 'import numpy as np\n'), ((8689, 8729), 'numpy.concatenate', 'np.concatenate', (['[tpr_list_linspace, [1]]'], {}), '([tpr_list_linspace, [1]])\n', (8703, 8729), True, 'import numpy as np\n'), ((10836, 10998), 'bokeh.models.Band', 'Band', ([], {'base': '"""x"""', 'lower': '"""lowci"""', 'upper': '"""uppci"""', 'level': '"""underlay"""', 'fill_alpha': '(0.1)', 'line_width': '(0.5)', 'line_color': '"""black"""', 'fill_color': '"""green"""', 'source': 'source_ib'}), "(base='x', lower='lowci', upper='uppci', level='underlay', fill_alpha=\n 0.1, line_width=0.5, line_color='black', fill_color='green', source=\n source_ib)\n", (10840, 10998), False, 'from bokeh.models import Band, HoverTool\n'), ((12220, 12252), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'data_test'}), '(data=data_test)\n', (12236, 12252), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((15676, 15763), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['Ytrue_boot', 'Yscore_boot'], {'pos_label': 'pos', 'drop_intermediate': '(False)'}), '(Ytrue_boot, Yscore_boot, pos_label=pos, drop_intermediate\n =False)\n', (15693, 15763), False, 'from sklearn import metrics\n'), ((15778, 15809), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_boot', 'tpr_boot'], {}), '(fpr_boot, tpr_boot)\n', (15789, 15809), False, 'from sklearn import metrics\n'), ((16160, 16213), 'numpy.concatenate', 'np.concatenate', (['[[tpr0_boot], tpr_boot[fpr_boot > 0]]'], {}), '([[tpr0_boot], tpr_boot[fpr_boot > 0]])\n', (16174, 16213), True, 'import numpy as np\n'), ((16233, 16278), 'numpy.concatenate', 'np.concatenate', (['[[0], fpr_boot[fpr_boot > 0]]'], {}), '([[0], fpr_boot[fpr_boot > 0]])\n', (16247, 16278), True, 'import numpy as np\n'), ((18938, 19032), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['Ytrue_boot_oob', 'Yscore_boot_oob'], {'pos_label': 'pos', 'drop_intermediate': '(False)'}), '(Ytrue_boot_oob, Yscore_boot_oob, pos_label=pos,\n drop_intermediate=False)\n', (18955, 19032), False, 'from sklearn import metrics\n'), ((19052, 19091), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_boot_oob', 'tpr_boot_oob'], {}), '(fpr_boot_oob, tpr_boot_oob)\n', (19063, 19091), False, 'from sklearn import metrics\n'), ((19298, 19337), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_boot_oob', 'tpr_boot_oob'], {}), '(fpr_boot_oob, tpr_boot_oob)\n', (19309, 19337), False, 'from sklearn import metrics\n'), ((19510, 19575), 'numpy.concatenate', 'np.concatenate', (['[[tpr0_boot_oob], tpr_boot_oob[fpr_boot_oob > 0]]'], {}), '([[tpr0_boot_oob], tpr_boot_oob[fpr_boot_oob > 0]])\n', (19524, 19575), True, 'import numpy as np\n'), ((19599, 19652), 'numpy.concatenate', 'np.concatenate', (['[[0], fpr_boot_oob[fpr_boot_oob > 0]]'], {}), '([[0], fpr_boot_oob[fpr_boot_oob > 0]])\n', (19613, 19652), True, 'import numpy as np\n'), ((20175, 20230), 'numpy.array', 'np.array', (['[tpr_oob_lowci, tpr_oob_uppci, tpr_oob_medci]'], {}), '([tpr_oob_lowci, tpr_oob_uppci, tpr_oob_medci])\n', (20183, 20230), True, 'import numpy as np\n'), ((20856, 20933), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['test_y', 'test_ypred'], {'pos_label': 'pos', 'drop_intermediate': '(False)'}), '(test_y, test_ypred, pos_label=pos, drop_intermediate=False)\n', (20873, 20933), False, 'from sklearn import metrics\n'), ((20951, 20982), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_test', 'tpr_test'], {}), '(fpr_test, tpr_test)\n', (20962, 20982), False, 'from sklearn import metrics\n'), ((21368, 21408), 'scipy.interp', 'interp', (['fpr_linspace', 'fpr_test', 'tpr_test'], {}), '(fpr_linspace, fpr_test, tpr_test)\n', (21374, 21408), False, 'from scipy import interp\n'), ((21426, 21451), 'numpy.insert', 'np.insert', (['tpr_test', '(0)', '(0)'], {}), '(tpr_test, 0, 0)\n', (21435, 21451), True, 'import numpy as np\n'), ((21486, 21517), 'numpy.concatenate', 'np.concatenate', (['(tpr_test, [1])'], {}), '((tpr_test, [1]))\n', (21500, 21517), True, 'import numpy as np\n'), ((21658, 21735), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['train[0]', 'train[1]'], {'pos_label': 'pos', 'drop_intermediate': '(False)'}), '(train[0], train[1], pos_label=pos, drop_intermediate=False)\n', (21675, 21735), False, 'from sklearn import metrics\n'), ((21753, 21793), 'scipy.interp', 'interp', (['fpr_linspace', 'fpr_stat', 'tpr_stat'], {}), '(fpr_linspace, fpr_stat, tpr_stat)\n', (21759, 21793), False, 'from scipy import interp\n'), ((21869, 21900), 'numpy.concatenate', 'np.concatenate', (['[[0], tpr_list]'], {}), '([[0], tpr_list])\n', (21883, 21900), True, 'import numpy as np\n'), ((21945, 21985), 'numpy.concatenate', 'np.concatenate', (['[tpr_list_linspace, [1]]'], {}), '([tpr_list_linspace, [1]])\n', (21959, 21985), True, 'import numpy as np\n'), ((25008, 25170), 'bokeh.models.Band', 'Band', ([], {'base': '"""x"""', 'lower': '"""lowci"""', 'upper': '"""uppci"""', 'level': '"""underlay"""', 'fill_alpha': '(0.1)', 'line_width': '(0.5)', 'line_color': '"""black"""', 'fill_color': '"""green"""', 'source': 'source_ib'}), "(base='x', lower='lowci', upper='uppci', level='underlay', fill_alpha=\n 0.1, line_width=0.5, line_color='black', fill_color='green', source=\n source_ib)\n", (25012, 25170), False, 'from bokeh.models import Band, HoverTool\n'), ((26651, 26815), 'bokeh.models.Band', 'Band', ([], {'base': '"""x"""', 'lower': '"""lowci"""', 'upper': '"""uppci"""', 'level': '"""underlay"""', 'fill_alpha': '(0.1)', 'line_width': '(0.5)', 'line_color': '"""black"""', 'fill_color': '"""orange"""', 'source': 'source_oob'}), "(base='x', lower='lowci', upper='uppci', level='underlay', fill_alpha=\n 0.1, line_width=0.5, line_color='black', fill_color='orange', source=\n source_oob)\n", (26655, 26815), False, 'from bokeh.models import Band, HoverTool\n'), ((47866, 47942), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['Ytrue', 'Yscore_res'], {'pos_label': 'pos', 'drop_intermediate': '(False)'}), '(Ytrue, Yscore_res, pos_label=pos, drop_intermediate=False)\n', (47883, 47942), False, 'from sklearn import metrics\n'), ((48099, 48149), 'numpy.concatenate', 'np.concatenate', (['[[tpr0_res], tpr_res[fpr_res > 0]]'], {}), '([[tpr0_res], tpr_res[fpr_res > 0]])\n', (48113, 48149), True, 'import numpy as np\n'), ((48168, 48211), 'numpy.concatenate', 'np.concatenate', (['[[0], fpr_res[fpr_res > 0]]'], {}), '([[0], fpr_res[fpr_res > 0]])\n', (48182, 48211), True, 'import numpy as np\n'), ((49868, 50029), 'bokeh.models.Band', 'Band', ([], {'base': '"""x"""', 'lower': '"""lowci"""', 'upper': '"""uppci"""', 'level': '"""underlay"""', 'fill_alpha': '(0.1)', 'line_width': '(0.5)', 'line_color': '"""black"""', 'fill_color': '"""orange"""', 'source': 'source2'}), "(base='x', lower='lowci', upper='uppci', level='underlay', fill_alpha=\n 0.1, line_width=0.5, line_color='black', fill_color='orange', source=\n source2)\n", (49872, 50029), False, 'from bokeh.models import Band, HoverTool\n'), ((65335, 65352), 'numpy.array', 'np.array', (['boot_ci'], {}), '(boot_ci)\n', (65343, 65352), True, 'import numpy as np\n'), ((66229, 66247), 'scipy.stats.norm.ppf', 'norm.ppf', (['(0.05 / 2)'], {}), '(0.05 / 2)\n', (66237, 66247), False, 'from scipy.stats import norm\n'), ((67144, 67161), 'numpy.array', 'np.array', (['boot_ci'], {}), '(boot_ci)\n', (67152, 67161), True, 'import numpy as np\n'), ((67551, 67568), 'numpy.array', 'np.array', (['boot_ci'], {}), '(boot_ci)\n', (67559, 67568), True, 'import numpy as np\n'), ((67815, 67833), 'scipy.stats.norm.ppf', 'norm.ppf', (['(0.05 / 2)'], {}), '(0.05 / 2)\n', (67823, 67833), False, 'from scipy.stats import norm\n'), ((68249, 68274), 'numpy.mean', 'np.mean', (['jackstat'], {'axis': '(0)'}), '(jackstat, axis=0)\n', (68256, 68274), True, 'import numpy as np\n'), ((68289, 68328), 'numpy.sum', 'np.sum', (['((jmean - jackstat) ** 3)'], {'axis': '(0)'}), '((jmean - jackstat) ** 3, axis=0)\n', (68295, 68328), True, 'import numpy as np\n'), ((68343, 68382), 'numpy.sum', 'np.sum', (['((jmean - jackstat) ** 2)'], {'axis': '(0)'}), '((jmean - jackstat) ** 2, axis=0)\n', (68349, 68382), True, 'import numpy as np\n'), ((70460, 70477), 'numpy.array', 'np.array', (['boot_ci'], {}), '(boot_ci)\n', (70468, 70477), True, 'import numpy as np\n'), ((2769, 2856), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['Ytrue_boot', 'Yscore_boot'], {'pos_label': 'pos', 'drop_intermediate': '(False)'}), '(Ytrue_boot, Yscore_boot, pos_label=pos, drop_intermediate\n =False)\n', (2786, 2856), False, 'from sklearn import metrics\n'), ((2871, 2902), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_boot', 'tpr_boot'], {}), '(fpr_boot, tpr_boot)\n', (2882, 2902), False, 'from sklearn import metrics\n'), ((3388, 3441), 'numpy.concatenate', 'np.concatenate', (['[[tpr0_boot], tpr_boot[fpr_boot > 0]]'], {}), '([[tpr0_boot], tpr_boot[fpr_boot > 0]])\n', (3402, 3441), True, 'import numpy as np\n'), ((3461, 3506), 'numpy.concatenate', 'np.concatenate', (['[[0], fpr_boot[fpr_boot > 0]]'], {}), '([[0], fpr_boot[fpr_boot > 0]])\n', (3475, 3506), True, 'import numpy as np\n'), ((5131, 5163), 'numpy.array', 'np.array', (['binary_stats_jack_boot'], {}), '(binary_stats_jack_boot)\n', (5139, 5163), True, 'import numpy as np\n'), ((10592, 10720), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[figline_ib]', 'tooltips': "[('Specificity', '@spec{1.111}'), ('Sensitivity', '@y{1.111} (+/- @ci{1.111})')\n ]"}), "(renderers=[figline_ib], tooltips=[('Specificity', '@spec{1.111}'),\n ('Sensitivity', '@y{1.111} (+/- @ci{1.111})')])\n", (10601, 10720), False, 'from bokeh.models import Band, HoverTool\n'), ((11707, 11835), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[figline_ib]', 'tooltips': "[('Specificity', '@spec{1.111}'), ('Sensitivity', '@y{1.111} (+/- @ci{1.111})')\n ]"}), "(renderers=[figline_ib], tooltips=[('Specificity', '@spec{1.111}'),\n ('Sensitivity', '@y{1.111} (+/- @ci{1.111})')])\n", (11716, 11835), False, 'from bokeh.models import Band, HoverTool\n'), ((12565, 12678), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[figline_test]', 'tooltips': "[('Specificity', '@spec{1.111}'), ('Sensitivity', '@y{1.111}')]"}), "(renderers=[figline_test], tooltips=[('Specificity',\n '@spec{1.111}'), ('Sensitivity', '@y{1.111}')])\n", (12574, 12678), False, 'from bokeh.models import Band, HoverTool\n'), ((16405, 16428), 'numpy.array', 'np.array', (['tpr_boot[idx]'], {}), '(tpr_boot[idx])\n', (16413, 16428), True, 'import numpy as np\n'), ((16799, 16886), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['Ytrue_jack', 'Yscore_jack'], {'pos_label': 'pos', 'drop_intermediate': '(False)'}), '(Ytrue_jack, Yscore_jack, pos_label=pos, drop_intermediate\n =False)\n', (16816, 16886), False, 'from sklearn import metrics\n'), ((16905, 16936), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_jack', 'tpr_jack'], {}), '(fpr_jack, tpr_jack)\n', (16916, 16936), False, 'from sklearn import metrics\n'), ((17226, 17279), 'numpy.concatenate', 'np.concatenate', (['[[tpr0_jack], tpr_jack[fpr_jack > 0]]'], {}), '([[tpr0_jack], tpr_jack[fpr_jack > 0]])\n', (17240, 17279), True, 'import numpy as np\n'), ((17303, 17348), 'numpy.concatenate', 'np.concatenate', (['[[0], fpr_jack[fpr_jack > 0]]'], {}), '([[0], fpr_jack[fpr_jack > 0]])\n', (17317, 17348), True, 'import numpy as np\n'), ((18380, 18396), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (18388, 18396), True, 'import numpy as np\n'), ((18470, 18485), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (18477, 18485), True, 'import numpy as np\n'), ((19791, 19822), 'numpy.array', 'np.array', (['tpr_boot_oob[idx_oob]'], {}), '(tpr_boot_oob[idx_oob])\n', (19799, 19822), True, 'import numpy as np\n'), ((20371, 20387), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (20379, 20387), True, 'import numpy as np\n'), ((20464, 20479), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (20471, 20479), True, 'import numpy as np\n'), ((47180, 47289), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[figline]', 'tooltips': "[('Specificity', '@spec{1.111}'), ('Sensitivity', '@y{1.111}')]"}), "(renderers=[figline], tooltips=[('Specificity', '@spec{1.111}'), (\n 'Sensitivity', '@y{1.111}')])\n", (47189, 47289), False, 'from bokeh.models import Band, HoverTool\n'), ((47965, 47994), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_res', 'tpr_res'], {}), '(fpr_res, tpr_res)\n', (47976, 47994), False, 'from sklearn import metrics\n'), ((49694, 49820), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[figline]', 'tooltips': "[('Specificity', '@spec{1.111}'), ('Sensitivity', '@y{1.111} (+/- @ci{1.111})')\n ]"}), "(renderers=[figline], tooltips=[('Specificity', '@spec{1.111}'), (\n 'Sensitivity', '@y{1.111} (+/- @ci{1.111})')])\n", (49703, 49820), False, 'from bokeh.models import Band, HoverTool\n'), ((50194, 50215), 'numpy.round', 'np.round', (['auc_full', '(2)'], {}), '(auc_full, 2)\n', (50202, 50215), True, 'import numpy as np\n'), ((50217, 50239), 'numpy.round', 'np.round', (['auc_medci', '(2)'], {}), '(auc_medci, 2)\n', (50225, 50239), True, 'import numpy as np\n'), ((50241, 50260), 'numpy.round', 'np.round', (['auc_ci', '(2)'], {}), '(auc_ci, 2)\n', (50249, 50260), True, 'import numpy as np\n'), ((65123, 65153), 'numpy.percentile', 'np.percentile', (['bootstat_i', '(2.5)'], {}), '(bootstat_i, 2.5)\n', (65136, 65153), True, 'import numpy as np\n'), ((65177, 65208), 'numpy.percentile', 'np.percentile', (['bootstat_i', '(97.5)'], {}), '(bootstat_i, 97.5)\n', (65190, 65208), True, 'import numpy as np\n'), ((65230, 65259), 'numpy.percentile', 'np.percentile', (['bootstat_i', '(50)'], {}), '(bootstat_i, 50)\n', (65243, 65259), True, 'import numpy as np\n'), ((65397, 65425), 'numpy.percentile', 'np.percentile', (['bootstat', '(2.5)'], {}), '(bootstat, 2.5)\n', (65410, 65425), True, 'import numpy as np\n'), ((65445, 65474), 'numpy.percentile', 'np.percentile', (['bootstat', '(97.5)'], {}), '(bootstat, 97.5)\n', (65458, 65474), True, 'import numpy as np\n'), ((65492, 65519), 'numpy.percentile', 'np.percentile', (['bootstat', '(50)'], {}), '(bootstat, 50)\n', (65505, 65519), True, 'import numpy as np\n'), ((65585, 65602), 'numpy.array', 'np.array', (['boot_ci'], {}), '(boot_ci)\n', (65593, 65602), True, 'import numpy as np\n'), ((65990, 66007), 'numpy.array', 'np.array', (['boot_ci'], {}), '(boot_ci)\n', (65998, 66007), True, 'import numpy as np\n'), ((66595, 66609), 'scipy.stats.norm.ppf', 'norm.ppf', (['prop'], {}), '(prop)\n', (66603, 66609), False, 'from scipy.stats import norm\n'), ((66652, 66677), 'scipy.stats.norm.cdf', 'norm.cdf', (['(2 * z0 + zalpha)'], {}), '(2 * z0 + zalpha)\n', (66660, 66677), False, 'from scipy.stats import norm\n'), ((66701, 66726), 'scipy.stats.norm.cdf', 'norm.cdf', (['(2 * z0 - zalpha)'], {}), '(2 * z0 - zalpha)\n', (66709, 66726), False, 'from scipy.stats import norm\n'), ((66750, 66766), 'scipy.stats.norm.cdf', 'norm.cdf', (['(2 * z0)'], {}), '(2 * z0)\n', (66758, 66766), False, 'from scipy.stats import norm\n'), ((66906, 66940), 'numpy.percentile', 'np.percentile', (['bootstat_i', 'pct1[i]'], {}), '(bootstat_i, pct1[i])\n', (66919, 66940), True, 'import numpy as np\n'), ((66966, 67000), 'numpy.percentile', 'np.percentile', (['bootstat_i', 'pct3[i]'], {}), '(bootstat_i, pct3[i])\n', (66979, 67000), True, 'import numpy as np\n'), ((67026, 67060), 'numpy.percentile', 'np.percentile', (['bootstat_i', 'pct2[i]'], {}), '(bootstat_i, pct2[i])\n', (67039, 67060), True, 'import numpy as np\n'), ((68181, 68211), 'scipy.stats.norm.ppf', 'norm.ppf', (['prop'], {'loc': '(0)', 'scale': '(1)'}), '(prop, loc=0, scale=1)\n', (68189, 68211), False, 'from scipy.stats import norm\n'), ((68515, 68540), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (68538, 68540), False, 'import warnings\n'), ((68554, 68585), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (68575, 68585), False, 'import warnings\n'), ((70636, 70652), 'numpy.array', 'np.array', (['Yscore'], {}), '(Yscore)\n', (70644, 70652), True, 'import numpy as np\n'), ((70695, 70740), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['Ytrue', 'Yscore_round'], {}), '(Ytrue, Yscore_round)\n', (70719, 70740), False, 'from sklearn import metrics\n'), ((71116, 71134), 'numpy.abs', 'np.abs', (['(fpr - fpr0)'], {}), '(fpr - fpr0)\n', (71122, 71134), True, 'import numpy as np\n'), ((3633, 3656), 'numpy.array', 'np.array', (['tpr_boot[idx]'], {}), '(tpr_boot[idx])\n', (3641, 3656), True, 'import numpy as np\n'), ((3911, 3929), 'numpy.delete', 'np.delete', (['base', 'i'], {}), '(base, i)\n', (3920, 3929), True, 'import numpy as np\n'), ((4179, 4266), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['Ytrue_jack', 'Yscore_jack'], {'pos_label': 'pos', 'drop_intermediate': '(False)'}), '(Ytrue_jack, Yscore_jack, pos_label=pos, drop_intermediate\n =False)\n', (4196, 4266), False, 'from sklearn import metrics\n'), ((4283, 4314), 'sklearn.metrics.auc', 'metrics.auc', (['fpr_jack', 'tpr_jack'], {}), '(fpr_jack, tpr_jack)\n', (4294, 4314), False, 'from sklearn import metrics\n'), ((4820, 4873), 'numpy.concatenate', 'np.concatenate', (['[[tpr0_jack], tpr_jack[fpr_jack > 0]]'], {}), '([[tpr0_jack], tpr_jack[fpr_jack > 0]])\n', (4834, 4873), True, 'import numpy as np\n'), ((4895, 4940), 'numpy.concatenate', 'np.concatenate', (['[[0], fpr_jack[fpr_jack > 0]]'], {}), '([[0], fpr_jack[fpr_jack > 0]])\n', (4909, 4940), True, 'import numpy as np\n'), ((6780, 6796), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (6788, 6796), True, 'import numpy as np\n'), ((6872, 6887), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (6879, 6887), True, 'import numpy as np\n'), ((13181, 13258), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.02)', 'text': 'oob_text', 'render_mode': '"""css"""', 'text_font_size': '"""9pt"""'}), "(x=0.38, y=0.02, text=oob_text, render_mode='css', text_font_size='9pt')\n", (13186, 13258), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((13697, 13775), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.1)', 'text': 'ib_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""9pt"""'}), "(x=0.38, y=0.1, text=ib_text, render_mode='canvas', text_font_size='9pt')\n", (13702, 13775), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((13878, 13963), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.02)', 'text': 'oob_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""9pt"""'}), "(x=0.38, y=0.02, text=oob_text, render_mode='canvas', text_font_size='9pt'\n )\n", (13883, 13963), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((17487, 17510), 'numpy.array', 'np.array', (['tpr_jack[idx]'], {}), '(tpr_jack[idx])\n', (17495, 17510), True, 'import numpy as np\n'), ((24760, 24888), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[figline_ib]', 'tooltips': "[('Specificity', '@spec{1.111}'), ('Sensitivity', '@y{1.111} (+/- @ci{1.111})')\n ]"}), "(renderers=[figline_ib], tooltips=[('Specificity', '@spec{1.111}'),\n ('Sensitivity', '@y{1.111} (+/- @ci{1.111})')])\n", (24769, 24888), False, 'from bokeh.models import Band, HoverTool\n'), ((26403, 26529), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[figline]', 'tooltips': "[('Specificity', '@spec{1.111}'), ('Sensitivity', '@y{1.111} (+/- @ci{1.111})')\n ]"}), "(renderers=[figline], tooltips=[('Specificity', '@spec{1.111}'), (\n 'Sensitivity', '@y{1.111} (+/- @ci{1.111})')])\n", (26412, 26529), False, 'from bokeh.models import Band, HoverTool\n'), ((28689, 28768), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.18)', 'text': 'ib_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""9pt"""'}), "(x=0.38, y=0.18, text=ib_text, render_mode='canvas', text_font_size='9pt')\n", (28694, 28768), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((28872, 28951), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.1)', 'text': 'oob_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""9pt"""'}), "(x=0.38, y=0.1, text=oob_text, render_mode='canvas', text_font_size='9pt')\n", (28877, 28951), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((29109, 29195), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.02)', 'text': 'test_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""9pt"""'}), "(x=0.38, y=0.02, text=test_text, render_mode='canvas', text_font_size=\n '9pt')\n", (29114, 29195), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((51299, 51384), 'bokeh.models.Label', 'Label', ([], {'x': '(0.52)', 'y': '(0.15)', 'text': 'ib_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.52, y=0.15, text=ib_text, render_mode='canvas', text_font_size='10pt'\n )\n", (51304, 51384), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((51491, 51577), 'bokeh.models.Label', 'Label', ([], {'x': '(0.52)', 'y': '(0.05)', 'text': 'oob_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.52, y=0.05, text=oob_text, render_mode='canvas', text_font_size=\n '10pt')\n", (51496, 51577), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((68608, 68642), 'scipy.stats.norm.ppf', 'norm.ppf', (['(0.05 / 2)'], {'loc': '(0)', 'scale': '(1)'}), '(0.05 / 2, loc=0, scale=1)\n', (68616, 68642), False, 'from scipy.stats import norm\n'), ((68668, 68703), 'scipy.stats.norm.cdf', 'norm.cdf', (['(z0 + zL / (1 - ahat * zL))'], {}), '(z0 + zL / (1 - ahat * zL))\n', (68676, 68703), False, 'from scipy.stats import norm\n'), ((68728, 68766), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1 - 0.05 / 2)'], {'loc': '(0)', 'scale': '(1)'}), '(1 - 0.05 / 2, loc=0, scale=1)\n', (68736, 68766), False, 'from scipy.stats import norm\n'), ((68794, 68829), 'scipy.stats.norm.cdf', 'norm.cdf', (['(z0 + zU / (1 - ahat * zU))'], {}), '(z0 + zU / (1 - ahat * zU))\n', (68802, 68829), False, 'from scipy.stats import norm\n'), ((68854, 68883), 'scipy.stats.norm.ppf', 'norm.ppf', (['(0.5)'], {'loc': '(0)', 'scale': '(1)'}), '(0.5, loc=0, scale=1)\n', (68862, 68883), False, 'from scipy.stats import norm\n'), ((68911, 68946), 'scipy.stats.norm.cdf', 'norm.cdf', (['(z0 + zM / (1 - ahat * zM))'], {}), '(z0 + zM / (1 - ahat * zM))\n', (68919, 68946), False, 'from scipy.stats import norm\n'), ((69292, 69326), 'numpy.percentile', 'np.percentile', (['bootstat_i', 'pct1[i]'], {}), '(bootstat_i, pct1[i])\n', (69305, 69326), True, 'import numpy as np\n'), ((69356, 69390), 'numpy.percentile', 'np.percentile', (['bootstat_i', 'pct2[i]'], {}), '(bootstat_i, pct2[i])\n', (69369, 69390), True, 'import numpy as np\n'), ((69420, 69454), 'numpy.percentile', 'np.percentile', (['bootstat_i', 'pct3[i]'], {}), '(bootstat_i, pct3[i])\n', (69433, 69454), True, 'import numpy as np\n'), ((5073, 5096), 'numpy.array', 'np.array', (['tpr_jack[idx]'], {}), '(tpr_jack[idx])\n', (5081, 5096), True, 'import numpy as np\n'), ((5314, 5330), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (5322, 5330), True, 'import numpy as np\n'), ((16324, 16344), 'numpy.abs', 'np.abs', (['(i - fpr_boot)'], {}), '(i - fpr_boot)\n', (16330, 16344), True, 'import numpy as np\n'), ((19702, 19726), 'numpy.abs', 'np.abs', (['(i - fpr_boot_oob)'], {}), '(i - fpr_boot_oob)\n', (19708, 19726), True, 'import numpy as np\n'), ((48321, 48340), 'numpy.abs', 'np.abs', (['(i - fpr_res)'], {}), '(i - fpr_res)\n', (48327, 48340), True, 'import numpy as np\n'), ((51950, 52029), 'bokeh.models.Label', 'Label', ([], {'x': '(0.3)', 'y': '(0.15)', 'text': 'ib_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.3, y=0.15, text=ib_text, render_mode='canvas', text_font_size='10pt')\n", (51955, 52029), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((52142, 52227), 'bokeh.models.Label', 'Label', ([], {'x': '(0.3)', 'y': '(0.05)', 'text': 'oob_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.3, y=0.05, text=oob_text, render_mode='canvas', text_font_size='10pt'\n )\n", (52147, 52227), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((57208, 57293), 'bokeh.models.Label', 'Label', ([], {'x': '(0.52)', 'y': '(0.03)', 'text': 'ib_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.52, y=0.03, text=ib_text, render_mode='canvas', text_font_size='10pt'\n )\n", (57213, 57293), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((69734, 69768), 'numpy.percentile', 'np.percentile', (['bootstat_i', 'pct1[i]'], {}), '(bootstat_i, pct1[i])\n', (69747, 69768), True, 'import numpy as np\n'), ((69798, 69832), 'numpy.percentile', 'np.percentile', (['bootstat_i', 'pct2[i]'], {}), '(bootstat_i, pct2[i])\n', (69811, 69832), True, 'import numpy as np\n'), ((69862, 69896), 'numpy.percentile', 'np.percentile', (['bootstat_i', 'pct2[i]'], {}), '(bootstat_i, pct2[i])\n', (69875, 69896), True, 'import numpy as np\n'), ((3552, 3572), 'numpy.abs', 'np.abs', (['(i - fpr_boot)'], {}), '(i - fpr_boot)\n', (3558, 3572), True, 'import numpy as np\n'), ((5584, 5600), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (5592, 5600), True, 'import numpy as np\n'), ((17402, 17422), 'numpy.abs', 'np.abs', (['(i - fpr_jack)'], {}), '(i - fpr_jack)\n', (17408, 17422), True, 'import numpy as np\n'), ((29657, 29735), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.1)', 'text': 'ib_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""9pt"""'}), "(x=0.38, y=0.1, text=ib_text, render_mode='canvas', text_font_size='9pt')\n", (29662, 29735), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((29856, 29941), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.02)', 'text': 'oob_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""9pt"""'}), "(x=0.38, y=0.02, text=oob_text, render_mode='canvas', text_font_size='9pt'\n )\n", (29861, 29941), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((52599, 52684), 'bokeh.models.Label', 'Label', ([], {'x': '(0.3)', 'y': '(0.15)', 'text': 'ib_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.4pt"""'}), "(x=0.3, y=0.15, text=ib_text, render_mode='canvas', text_font_size='6.4pt'\n )\n", (52604, 52684), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((52792, 52878), 'bokeh.models.Label', 'Label', ([], {'x': '(0.3)', 'y': '(0.05)', 'text': 'oob_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.4pt"""'}), "(x=0.3, y=0.05, text=oob_text, render_mode='canvas', text_font_size=\n '6.4pt')\n", (52797, 52878), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((57604, 57683), 'bokeh.models.Label', 'Label', ([], {'x': '(0.4)', 'y': '(0.02)', 'text': 'ib_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.4, y=0.02, text=ib_text, render_mode='canvas', text_font_size='10pt')\n", (57609, 57683), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((60928, 61014), 'bokeh.models.Label', 'Label', ([], {'x': '(0.52)', 'y': '(0.03)', 'text': 'oob_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.52, y=0.03, text=oob_text, render_mode='canvas', text_font_size=\n '10pt')\n", (60933, 61014), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((69572, 69597), 'scipy.stats.norm.cdf', 'norm.cdf', (['(2 * z0 + zalpha)'], {}), '(2 * z0 + zalpha)\n', (69580, 69597), False, 'from scipy.stats import norm\n'), ((69629, 69654), 'scipy.stats.norm.cdf', 'norm.cdf', (['(2 * z0 - zalpha)'], {}), '(2 * z0 - zalpha)\n', (69637, 69654), False, 'from scipy.stats import norm\n'), ((69686, 69702), 'scipy.stats.norm.cdf', 'norm.cdf', (['(2 * z0)'], {}), '(2 * z0)\n', (69694, 69702), False, 'from scipy.stats import norm\n'), ((4990, 5010), 'numpy.abs', 'np.abs', (['(i - fpr_jack)'], {}), '(i - fpr_jack)\n', (4996, 5010), True, 'import numpy as np\n'), ((5866, 5882), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (5874, 5882), True, 'import numpy as np\n'), ((30332, 30417), 'bokeh.models.Label', 'Label', ([], {'x': '(0.52)', 'y': '(0.15)', 'text': 'ib_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.52, y=0.15, text=ib_text, render_mode='canvas', text_font_size='10pt'\n )\n", (30337, 30417), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((30536, 30622), 'bokeh.models.Label', 'Label', ([], {'x': '(0.52)', 'y': '(0.05)', 'text': 'oob_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.52, y=0.05, text=oob_text, render_mode='canvas', text_font_size=\n '10pt')\n", (30541, 30622), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((37542, 37627), 'bokeh.models.Label', 'Label', ([], {'x': '(0.52)', 'y': '(0.03)', 'text': 'ib_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.52, y=0.03, text=ib_text, render_mode='canvas', text_font_size='10pt'\n )\n", (37547, 37627), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((53468, 53556), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.32)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.32, text=ib_text_1, render_mode='canvas', text_font_size=\n '6.8pt')\n", (53473, 53556), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((53666, 53754), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.23)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.23, text=ib_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (53671, 53754), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((53865, 53954), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='6.8pt')\n", (53870, 53954), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((54066, 54154), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (54071, 54154), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((58102, 58190), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.09)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.38, y=0.09, text=ib_text_1, render_mode='canvas', text_font_size=\n '6.8pt')\n", (58107, 58190), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((58300, 58387), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.0)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.38, y=0.0, text=ib_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (58305, 58387), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((61392, 61478), 'bokeh.models.Label', 'Label', ([], {'x': '(0.27)', 'y': '(0.02)', 'text': 'oob_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.27, y=0.02, text=oob_text, render_mode='canvas', text_font_size=\n '10pt')\n", (61397, 61478), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((31025, 31110), 'bokeh.models.Label', 'Label', ([], {'x': '(0.22)', 'y': '(0.15)', 'text': 'ib_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.22, y=0.15, text=ib_text, render_mode='canvas', text_font_size='10pt'\n )\n", (31030, 31110), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((31229, 31315), 'bokeh.models.Label', 'Label', ([], {'x': '(0.22)', 'y': '(0.05)', 'text': 'oob_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.22, y=0.05, text=oob_text, render_mode='canvas', text_font_size=\n '10pt')\n", (31234, 31315), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((37964, 38043), 'bokeh.models.Label', 'Label', ([], {'x': '(0.3)', 'y': '(0.02)', 'text': 'ib_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.3, y=0.02, text=ib_text, render_mode='canvas', text_font_size='10pt')\n", (37969, 38043), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((41667, 41753), 'bokeh.models.Label', 'Label', ([], {'x': '(0.52)', 'y': '(0.03)', 'text': 'oob_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.52, y=0.03, text=oob_text, render_mode='canvas', text_font_size=\n '10pt')\n", (41672, 41753), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((54747, 54833), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.32)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.32, text=ib_text_1, render_mode='canvas', text_font_size=\n '6pt')\n", (54752, 54833), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((54943, 55029), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.23)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.23, text=ib_text_2, render_mode='canvas', text_font_size=\n '6pt')\n", (54948, 55029), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((55140, 55227), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='6pt')\n", (55145, 55227), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((55339, 55425), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '6pt')\n", (55344, 55425), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((58801, 58889), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.09, text=ib_text_1, render_mode='canvas', text_font_size=\n '6.8pt')\n", (58806, 58889), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((58999, 59086), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.0, text=ib_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (59004, 59086), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((61912, 62001), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.38, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='6.8pt')\n", (61917, 62001), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((62113, 62201), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.38, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (62118, 62201), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((32018, 32106), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.28)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.38, y=0.28, text=ib_text_1, render_mode='canvas', text_font_size=\n '6.8pt')\n", (32023, 32106), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((32228, 32316), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.19)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.38, y=0.19, text=ib_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (32233, 32316), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((32439, 32528), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.38, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='6.8pt')\n", (32444, 32528), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((32652, 32740), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.38, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (32657, 32740), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((38539, 38627), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.09)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.38, y=0.09, text=ib_text_1, render_mode='canvas', text_font_size=\n '6.8pt')\n", (38544, 38627), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((38749, 38836), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.0)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.38, y=0.0, text=ib_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (38754, 38836), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((42161, 42247), 'bokeh.models.Label', 'Label', ([], {'x': '(0.22)', 'y': '(0.02)', 'text': 'oob_text', 'render_mode': '"""canvas"""', 'text_font_size': '"""10pt"""'}), "(x=0.22, y=0.02, text=oob_text, render_mode='canvas', text_font_size=\n '10pt')\n", (42166, 42247), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((56017, 56103), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.32)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.32, text=ib_text_1, render_mode='canvas', text_font_size=\n '5pt')\n", (56022, 56103), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((56213, 56299), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.23)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.23, text=ib_text_2, render_mode='canvas', text_font_size=\n '5pt')\n", (56218, 56299), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((56410, 56497), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='5pt')\n", (56415, 56497), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((56609, 56695), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '5pt')\n", (56614, 56695), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((59500, 59586), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.09, text=ib_text_1, render_mode='canvas', text_font_size=\n '6pt')\n", (59505, 59586), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((59696, 59774), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0, text=ib_text_2, render_mode='canvas', text_font_size='6pt')\n", (59701, 59774), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((62637, 62726), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='6.8pt')\n", (62642, 62726), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((62838, 62926), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (62843, 62926), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((33437, 33525), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.32)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.32, text=ib_text_1, render_mode='canvas', text_font_size=\n '6.8pt')\n", (33442, 33525), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((33647, 33735), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.23)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.23, text=ib_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (33652, 33735), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((33858, 33947), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='6.8pt')\n", (33863, 33947), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((34071, 34159), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (34076, 34159), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((39320, 39408), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.09, text=ib_text_1, render_mode='canvas', text_font_size=\n '6.8pt')\n", (39325, 39408), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((39530, 39617), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.0, text=ib_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (39535, 39617), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((42737, 42826), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.38, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='6.8pt')\n", (42742, 42826), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((42950, 43038), 'bokeh.models.Label', 'Label', ([], {'x': '(0.38)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.38, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (42955, 43038), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((60192, 60278), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.09, text=ib_text_1, render_mode='canvas', text_font_size=\n '5pt')\n", (60197, 60278), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((60388, 60473), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.0, text=ib_text_2, render_mode='canvas', text_font_size='5pt'\n )\n", (60393, 60473), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((63364, 63451), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='6pt')\n", (63369, 63451), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((63563, 63649), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '6pt')\n", (63568, 63649), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((34857, 34943), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.32)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.32, text=ib_text_1, render_mode='canvas', text_font_size=\n '6pt')\n", (34862, 34943), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((35065, 35151), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.23)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.23, text=ib_text_2, render_mode='canvas', text_font_size=\n '6pt')\n", (35070, 35151), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((35274, 35361), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='6pt')\n", (35279, 35361), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((35485, 35571), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '6pt')\n", (35490, 35571), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((40101, 40187), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.09, text=ib_text_1, render_mode='canvas', text_font_size=\n '6pt')\n", (40106, 40187), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((40309, 40387), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0, text=ib_text_2, render_mode='canvas', text_font_size='6pt')\n", (40314, 40387), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((43530, 43619), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='6.8pt')\n", (43535, 43619), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((43743, 43831), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6.8pt"""'}), "(x=0.28, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '6.8pt')\n", (43748, 43831), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((64085, 64172), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='5pt')\n", (64090, 64172), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((64284, 64370), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '5pt')\n", (64289, 64370), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((36269, 36355), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.32)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.32, text=ib_text_1, render_mode='canvas', text_font_size=\n '5pt')\n", (36274, 36355), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((36477, 36563), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.23)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.23, text=ib_text_2, render_mode='canvas', text_font_size=\n '5pt')\n", (36482, 36563), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((36686, 36773), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='5pt')\n", (36691, 36773), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((36897, 36983), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '5pt')\n", (36902, 36983), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((40877, 40963), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'ib_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.09, text=ib_text_1, render_mode='canvas', text_font_size=\n '5pt')\n", (40882, 40963), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((41085, 41170), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'ib_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.0, text=ib_text_2, render_mode='canvas', text_font_size='5pt'\n )\n", (41090, 41170), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((44326, 44413), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='6pt')\n", (44331, 44413), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((44537, 44623), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""6pt"""'}), "(x=0.28, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '6pt')\n", (44542, 44623), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((45117, 45204), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.09)', 'text': 'oob_text_1', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.09, text=oob_text_1, render_mode='canvas', text_font_size\n ='5pt')\n", (45122, 45204), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n'), ((45328, 45414), 'bokeh.models.Label', 'Label', ([], {'x': '(0.28)', 'y': '(0.0)', 'text': 'oob_text_2', 'render_mode': '"""canvas"""', 'text_font_size': '"""5pt"""'}), "(x=0.28, y=0.0, text=oob_text_2, render_mode='canvas', text_font_size=\n '5pt')\n", (45333, 45414), False, 'from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label\n')] |
import os
import six
import numpy
from chainerchem.dataset.indexers.numpy_tuple_dataset_feature_indexer import NumpyTupleDatasetFeatureIndexer # NOQA
class NumpyTupleDataset(object):
"""Dataset of a tuple of datasets.
It combines multiple datasets into one dataset. Each example is represented
by a tuple whose ``i``-th item corresponds to the i-th dataset.
And each ``i``-th dataset is expected to be an instance of numpy.ndarray.
Args:
datasets: Underlying datasets. The ``i``-th one is used for the
``i``-th item of each example. All datasets must have the same
length.
"""
def __init__(self, *datasets):
if not datasets:
raise ValueError('no datasets are given')
length = len(datasets[0])
for i, dataset in enumerate(datasets):
if len(dataset) != length:
raise ValueError(
'dataset of the index {} has a wrong length'.format(i))
self._datasets = datasets
self._length = length
self._features_indexer = NumpyTupleDatasetFeatureIndexer(self)
def __getitem__(self, index):
batches = [dataset[index] for dataset in self._datasets]
if isinstance(index, slice):
length = len(batches[0])
return [tuple([batch[i] for batch in batches])
for i in six.moves.range(length)]
else:
return tuple(batches)
def __len__(self):
return self._length
def get_datasets(self):
return self._datasets
@property
def features(self):
"""Extract features according to the specified index.
- axis 0 is used to specify dataset id (`i`-th dataset)
- axis 1 is used to specify feature index
.. admonition:: Example
>>> import numpy
>>> from chainerchem.datasets import NumpyTupleDataset
>>> x = numpy.array([0, 1, 2], dtype=numpy.float32)
>>> t = x * x
>>> numpy_tuple_dataset = NumpyTupleDataset(x, t)
>>> targets = numpy_tuple_dataset.features[:, 1]
>>> print('targets', targets) # We can extract only target value
targets [0, 1, 4]
"""
return self._features_indexer
@classmethod
def save(cls, filepath, numpy_tuple_dataset):
"""save the dataset to filepath in npz format
Args:
filepath (str): filepath to save dataset. It is recommended to end
with '.npz' extension.
numpy_tuple_dataset (NumpyTupleDataset): dataset instance
"""
if not isinstance(numpy_tuple_dataset, NumpyTupleDataset):
raise TypeError('numpy_tuple_dataset is not instance of '
'NumpyTupleDataset, got {}'
.format(type(numpy_tuple_dataset)))
numpy.savez(filepath, *numpy_tuple_dataset._datasets)
@classmethod
def load(cls, filepath):
if not os.path.exists(filepath):
return None
load_data = numpy.load(filepath)
result = []
i = 0
while True:
key = 'arr_{}'.format(i)
if key in load_data.keys():
result.append(load_data[key])
i += 1
else:
break
return NumpyTupleDataset(*result)
| [
"os.path.exists",
"numpy.savez",
"six.moves.range",
"chainerchem.dataset.indexers.numpy_tuple_dataset_feature_indexer.NumpyTupleDatasetFeatureIndexer",
"numpy.load"
] | [((1084, 1121), 'chainerchem.dataset.indexers.numpy_tuple_dataset_feature_indexer.NumpyTupleDatasetFeatureIndexer', 'NumpyTupleDatasetFeatureIndexer', (['self'], {}), '(self)\n', (1115, 1121), False, 'from chainerchem.dataset.indexers.numpy_tuple_dataset_feature_indexer import NumpyTupleDatasetFeatureIndexer\n'), ((2881, 2934), 'numpy.savez', 'numpy.savez', (['filepath', '*numpy_tuple_dataset._datasets'], {}), '(filepath, *numpy_tuple_dataset._datasets)\n', (2892, 2934), False, 'import numpy\n'), ((3067, 3087), 'numpy.load', 'numpy.load', (['filepath'], {}), '(filepath)\n', (3077, 3087), False, 'import numpy\n'), ((2997, 3021), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (3011, 3021), False, 'import os\n'), ((1384, 1407), 'six.moves.range', 'six.moves.range', (['length'], {}), '(length)\n', (1399, 1407), False, 'import six\n')] |
import os
import json
import torch
import numpy as np
from torch.utils.data import Dataset
import random
def uniform_sample(input_feature, sample_len):
input_len = input_feature.shape[0]
assert sample_len > 0, "WRONG SAMPLE_LEN {}, THIS PARAM MUST BE GREATER THAN 0.".format(sample_len)
if sample_len >= input_len > 1:
sample_idxs = np.arange(input_len)
else:
if input_len == 1:
sample_len = 2
sample_scale = input_len / sample_len
sample_idxs = np.arange(sample_len) * sample_scale
sample_idxs = np.floor(sample_idxs)
return input_feature[sample_idxs.astype(np.int), :]
def random_sample(input_feature, sample_len):
input_len = input_feature.shape[0]
assert sample_len > 0, "WRONG SAMPLE_LEN {}, THIS PARAM MUST BE GREATER THAN 0.".format(sample_len)
if input_len < sample_len:
sample_idxs = np.random.choice(input_len, sample_len, replace=True)
sample_idxs = np.sort(sample_idxs)
elif input_len > sample_len:
sample_idxs = np.arange(sample_len) * input_len / sample_len
for i in range(sample_len - 1):
sample_idxs[i] = np.random.choice(range(np.int(sample_idxs[i]), np.int(sample_idxs[i + 1] + 1)))
sample_idxs[-1] = np.random.choice(np.arange(sample_idxs[-2], input_len))
else:
sample_idxs = np.arange(input_len)
return input_feature[sample_idxs.astype(np.int), :]
def consecutive_sample(input_feature, sample_len):
input_len = input_feature.shape[0]
assert sample_len > 0, "WRONG SAMPLE_LEN {}, THIS PARAM MUST BE GREATER THAN 0.".format(sample_len)
if input_len >= sample_len:
sample_idx = np.random.choice((input_len - sample_len))
return input_feature[sample_idx:(sample_idx + sample_len), :]
elif input_len < sample_len:
empty_features = np.zeros((sample_len - input_len, input_feature.shape[1]))
return np.concatenate((input_feature, empty_features), axis=0)
class ACMDataset(Dataset):
def __init__(self, args, phase="train", sample="random"):
self.phase = phase
self.sample = sample
self.data_dir = args.data_dir
self.sample_segments_num = args.sample_segments_num
with open(os.path.join(self.data_dir, "gt.json")) as gt_f:
self.gt_dict = json.load(gt_f)["database"]
if self.phase == "train":
self.feature_dir = os.path.join(self.data_dir, "train")
self.data_list = list(open(os.path.join(self.data_dir, "split_train.txt")))
self.data_list = [item.strip() for item in self.data_list]
else:
self.feature_dir = os.path.join(self.data_dir, "test")
self.data_list = list(open(os.path.join(self.data_dir, "split_test.txt")))
self.data_list = [item.strip() for item in self.data_list]
self.class_name_lst = args.class_name_lst
self.action_class_idx_dict = {action_cls: idx for idx, action_cls in enumerate(self.class_name_lst)}
self.action_class_num = args.action_cls_num
self.get_label()
def get_label(self):
self.label_dict = {}
for item_name in self.data_list:
item_anns_list = self.gt_dict[item_name]["annotations"]
item_label = np.zeros(self.action_class_num)
for ann in item_anns_list:
ann_label = ann["label"]
item_label[self.action_class_idx_dict[ann_label]] = 1.0
self.label_dict[item_name] = item_label
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
vid_name = self.data_list[idx]
vid_label = self.label_dict[vid_name]
vid_duration = self.gt_dict[vid_name]["duration"]
con_vid_feature = np.load(os.path.join(self.feature_dir, vid_name + ".npy"))
vid_len = con_vid_feature.shape[0]
if self.sample == "random":
con_vid_spd_feature = random_sample(con_vid_feature, self.sample_segments_num)
else:
con_vid_spd_feature = uniform_sample(con_vid_feature, self.sample_segments_num)
con_vid_spd_feature = torch.as_tensor(con_vid_spd_feature.astype(np.float32))
vid_label_t = torch.as_tensor(vid_label.astype(np.float32))
if self.phase == "train":
return con_vid_spd_feature, vid_label_t
else:
return vid_name, con_vid_spd_feature, vid_label_t, vid_len, vid_duration
def build_dataset(args, phase="train", sample="random"):
return ACMDataset(args, phase, sample)
def build_ftcl_dataset(args, phase="train", sample="random"):
return FTCLDataset(args, phase, sample)
class FTCLDataset(Dataset):
def __init__(self, args, phase="train", sample="random"):
self.phase = phase
self.sample = sample
self.data_dir = args.data_dir
self.sample_segments_num = args.sample_segments_num
with open(os.path.join(self.data_dir, "gt.json")) as gt_f:
self.gt_dict = json.load(gt_f)["database"]
if self.phase == "train":
self.feature_dir = os.path.join(self.data_dir, "train")
self.data_list = list(open(os.path.join(self.data_dir, "split_train.txt")))
self.data_list = [item.strip() for item in self.data_list]
else:
self.feature_dir = os.path.join(self.data_dir, "test")
self.data_list = list(open(os.path.join(self.data_dir, "split_test.txt")))
self.data_list = [item.strip() for item in self.data_list]
self.class_name_lst = args.class_name_lst
self.action_class_idx_dict = {action_cls: idx for idx, action_cls in enumerate(self.class_name_lst)}
self.action_class_num = args.action_cls_num
self.label_dict = {}
self.get_label()
self.pos_pair_list = []
self.neg_pair_list = []
self.pair_list = []
self.get_pair()
self.feature_list = []
self.get_feature()
def get_label(self):
for vid_name in self.data_list:
vid_anns_list = self.gt_dict[vid_name]["annotations"]
vid_label = np.zeros(self.action_class_num)
for ann in vid_anns_list:
ann_label = ann["label"]
vid_label[self.action_class_idx_dict[ann_label]] = 1.0
self.label_dict[vid_name] = vid_label
def get_group(self):
for idx in range(len(self.data_list)):
vid_name = self.data_list[idx]
group_idx = np.argwhere(self.label_dict[vid_name] == 1)
for class_idx in group_idx:
self.group_list[class_idx[0]].append(idx)
def get_pair(self):
for i in range(len(self.data_list)):
label_0 = np.array(self.label_dict[self.data_list[i]])
for j in range(i + 1, len(self.data_list)):
label_1 = np.array(self.label_dict[self.data_list[j]])
if (label_0 == label_1).all():
self.pos_pair_list.append([i, j])
elif np.sum(label_0 * label_1) == 0:
self.neg_pair_list.append([i, j])
self.neg_pair_list = random.sample(self.neg_pair_list, len(self.pos_pair_list))
self.neg_pair_list.extend(self.pos_pair_list)
self.pair_list = self.neg_pair_list
def get_feature(self):
for vid_name in self.data_list:
con_vid_feature = np.load(os.path.join(self.feature_dir, f'{vid_name}.npy'))
if self.sample == "random":
con_vid_spd_feature = random_sample(con_vid_feature, self.sample_segments_num)
else:
con_vid_spd_feature = uniform_sample(con_vid_feature, self.sample_segments_num)
self.feature_list.append(con_vid_spd_feature)
def __len__(self):
if self.phase == 'train':
return len(self.pair_list)
else:
return len(self.data_list)
def __getitem__(self, idx):
if self.phase == 'train':
idx_1, idx_2 = self.pair_list[idx]
feature_1 = torch.as_tensor(self.feature_list[idx_1].astype(np.float32))
feature_2 = torch.as_tensor(self.feature_list[idx_2].astype(np.float32))
label_1 = torch.as_tensor(self.label_dict[self.data_list[idx_1]].astype(np.float32))
label_2 = torch.as_tensor(self.label_dict[self.data_list[idx_2]].astype(np.float32))
return feature_1, feature_2, label_1, label_2
else:
vid_name = self.data_list[idx]
vid_label = self.label_dict[vid_name]
vid_duration = self.gt_dict[vid_name]["duration"]
con_vid_feature = self.feature_list[idx]
vid_len = con_vid_feature.shape[0]
if self.sample == "random":
con_vid_spd_feature = random_sample(con_vid_feature, self.sample_segments_num)
else:
con_vid_spd_feature = uniform_sample(con_vid_feature, self.sample_segments_num)
con_vid_spd_feature = torch.as_tensor(con_vid_spd_feature.astype(np.float32))
vid_label_t = torch.as_tensor(vid_label.astype(np.float32))
return vid_name, con_vid_spd_feature, vid_label_t, vid_len, vid_duration
| [
"numpy.random.choice",
"numpy.sort",
"numpy.floor",
"os.path.join",
"numpy.array",
"numpy.zeros",
"numpy.argwhere",
"numpy.sum",
"numpy.concatenate",
"json.load",
"numpy.int",
"numpy.arange"
] | [((369, 389), 'numpy.arange', 'np.arange', (['input_len'], {}), '(input_len)\n', (378, 389), True, 'import numpy as np\n'), ((587, 608), 'numpy.floor', 'np.floor', (['sample_idxs'], {}), '(sample_idxs)\n', (595, 608), True, 'import numpy as np\n'), ((921, 974), 'numpy.random.choice', 'np.random.choice', (['input_len', 'sample_len'], {'replace': '(True)'}), '(input_len, sample_len, replace=True)\n', (937, 974), True, 'import numpy as np\n'), ((998, 1018), 'numpy.sort', 'np.sort', (['sample_idxs'], {}), '(sample_idxs)\n', (1005, 1018), True, 'import numpy as np\n'), ((1729, 1769), 'numpy.random.choice', 'np.random.choice', (['(input_len - sample_len)'], {}), '(input_len - sample_len)\n', (1745, 1769), True, 'import numpy as np\n'), ((527, 548), 'numpy.arange', 'np.arange', (['sample_len'], {}), '(sample_len)\n', (536, 548), True, 'import numpy as np\n'), ((1391, 1411), 'numpy.arange', 'np.arange', (['input_len'], {}), '(input_len)\n', (1400, 1411), True, 'import numpy as np\n'), ((1905, 1963), 'numpy.zeros', 'np.zeros', (['(sample_len - input_len, input_feature.shape[1])'], {}), '((sample_len - input_len, input_feature.shape[1]))\n', (1913, 1963), True, 'import numpy as np\n'), ((1980, 2035), 'numpy.concatenate', 'np.concatenate', (['(input_feature, empty_features)'], {'axis': '(0)'}), '((input_feature, empty_features), axis=0)\n', (1994, 2035), True, 'import numpy as np\n'), ((2488, 2524), 'os.path.join', 'os.path.join', (['self.data_dir', '"""train"""'], {}), "(self.data_dir, 'train')\n", (2500, 2524), False, 'import os\n'), ((2733, 2768), 'os.path.join', 'os.path.join', (['self.data_dir', '"""test"""'], {}), "(self.data_dir, 'test')\n", (2745, 2768), False, 'import os\n'), ((3374, 3405), 'numpy.zeros', 'np.zeros', (['self.action_class_num'], {}), '(self.action_class_num)\n', (3382, 3405), True, 'import numpy as np\n'), ((3896, 3945), 'os.path.join', 'os.path.join', (['self.feature_dir', "(vid_name + '.npy')"], {}), "(self.feature_dir, vid_name + '.npy')\n", (3908, 3945), False, 'import os\n'), ((5254, 5290), 'os.path.join', 'os.path.join', (['self.data_dir', '"""train"""'], {}), "(self.data_dir, 'train')\n", (5266, 5290), False, 'import os\n'), ((5499, 5534), 'os.path.join', 'os.path.join', (['self.data_dir', '"""test"""'], {}), "(self.data_dir, 'test')\n", (5511, 5534), False, 'import os\n'), ((6320, 6351), 'numpy.zeros', 'np.zeros', (['self.action_class_num'], {}), '(self.action_class_num)\n', (6328, 6351), True, 'import numpy as np\n'), ((6707, 6750), 'numpy.argwhere', 'np.argwhere', (['(self.label_dict[vid_name] == 1)'], {}), '(self.label_dict[vid_name] == 1)\n', (6718, 6750), True, 'import numpy as np\n'), ((6949, 6993), 'numpy.array', 'np.array', (['self.label_dict[self.data_list[i]]'], {}), '(self.label_dict[self.data_list[i]])\n', (6957, 6993), True, 'import numpy as np\n'), ((1318, 1355), 'numpy.arange', 'np.arange', (['sample_idxs[-2]', 'input_len'], {}), '(sample_idxs[-2], input_len)\n', (1327, 1355), True, 'import numpy as np\n'), ((2314, 2352), 'os.path.join', 'os.path.join', (['self.data_dir', '"""gt.json"""'], {}), "(self.data_dir, 'gt.json')\n", (2326, 2352), False, 'import os\n'), ((2391, 2406), 'json.load', 'json.load', (['gt_f'], {}), '(gt_f)\n', (2400, 2406), False, 'import json\n'), ((5080, 5118), 'os.path.join', 'os.path.join', (['self.data_dir', '"""gt.json"""'], {}), "(self.data_dir, 'gt.json')\n", (5092, 5118), False, 'import os\n'), ((5157, 5172), 'json.load', 'json.load', (['gt_f'], {}), '(gt_f)\n', (5166, 5172), False, 'import json\n'), ((7078, 7122), 'numpy.array', 'np.array', (['self.label_dict[self.data_list[j]]'], {}), '(self.label_dict[self.data_list[j]])\n', (7086, 7122), True, 'import numpy as np\n'), ((7638, 7687), 'os.path.join', 'os.path.join', (['self.feature_dir', 'f"""{vid_name}.npy"""'], {}), "(self.feature_dir, f'{vid_name}.npy')\n", (7650, 7687), False, 'import os\n'), ((1076, 1097), 'numpy.arange', 'np.arange', (['sample_len'], {}), '(sample_len)\n', (1085, 1097), True, 'import numpy as np\n'), ((2565, 2611), 'os.path.join', 'os.path.join', (['self.data_dir', '"""split_train.txt"""'], {}), "(self.data_dir, 'split_train.txt')\n", (2577, 2611), False, 'import os\n'), ((2809, 2854), 'os.path.join', 'os.path.join', (['self.data_dir', '"""split_test.txt"""'], {}), "(self.data_dir, 'split_test.txt')\n", (2821, 2854), False, 'import os\n'), ((5331, 5377), 'os.path.join', 'os.path.join', (['self.data_dir', '"""split_train.txt"""'], {}), "(self.data_dir, 'split_train.txt')\n", (5343, 5377), False, 'import os\n'), ((5575, 5620), 'os.path.join', 'os.path.join', (['self.data_dir', '"""split_test.txt"""'], {}), "(self.data_dir, 'split_test.txt')\n", (5587, 5620), False, 'import os\n'), ((1217, 1239), 'numpy.int', 'np.int', (['sample_idxs[i]'], {}), '(sample_idxs[i])\n', (1223, 1239), True, 'import numpy as np\n'), ((1241, 1271), 'numpy.int', 'np.int', (['(sample_idxs[i + 1] + 1)'], {}), '(sample_idxs[i + 1] + 1)\n', (1247, 1271), True, 'import numpy as np\n'), ((7248, 7273), 'numpy.sum', 'np.sum', (['(label_0 * label_1)'], {}), '(label_0 * label_1)\n', (7254, 7273), True, 'import numpy as np\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.one_hot_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class OneHotTest(tf.test.TestCase):
def _testOneHot(self, truth, use_gpu=False, expected_err_re=None,
raises=None, **inputs):
with self.test_session(use_gpu=use_gpu):
if raises is not None:
with self.assertRaises(raises):
tf.one_hot(**inputs)
else:
ans = tf.one_hot(**inputs)
if expected_err_re is None:
tf_ans = ans.eval()
self.assertAllEqual(tf_ans, truth)
self.assertEqual(tf_ans.shape, ans.get_shape())
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
def _testBothOneHot(self, truth, expected_err_re=None, raises=None, **inputs):
self._testOneHot(truth, True, expected_err_re, raises, **inputs)
self._testOneHot(truth, False, expected_err_re, raises, **inputs)
def _testBasic(self, dtype):
indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[1.0, -1.0, -1.0],
[-1.0, -1.0, 1.0],
[-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
# axis == 0
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=0,
dtype=dtype,
truth=truth.T) # Output is transpose version in this case
def _testDefaultBasic(self, dtype):
indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
depth = 3
truth = np.asarray(
[[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
truth=truth)
# axis == 0
self._testBothOneHot(
indices=indices,
depth=depth,
axis=0,
truth=truth.T) # Output is transpose version in this case
def testFloatBasic(self):
self._testBasic(np.float32)
self._testDefaultBasic(np.float32)
def testDoubleBasic(self):
self._testBasic(np.float64)
self._testDefaultBasic(np.float64)
def testInt32Basic(self):
self._testBasic(np.int32)
self._testDefaultBasic(np.int32)
def testInt64Basic(self):
self._testBasic(np.int64)
self._testDefaultBasic(np.int64)
def testComplex64Basic(self):
self._testBasic(np.complex64)
self._testDefaultBasic(np.complex64)
def testComplex128Basic(self):
self._testBasic(np.complex128)
self._testDefaultBasic(np.complex128)
def _testBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1],
[1, 0, 1, -1]],
dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[[1.0, -1.0, -1.0],
[-1.0, -1.0, 1.0],
[-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]],
[[-1.0, 1.0, -1.0],
[1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0],
[-1.0, -1.0, -1.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testDefaultValuesBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1],
[1, 0, 1, -1]],
dtype=np.int64)
depth = 3
truth = np.asarray(
[[[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]],
[[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
dtype=dtype,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
depth=depth,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testValueTypeBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1],
[1, 0, 1, -1]],
dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[[1.0, -1.0, -1.0],
[-1.0, -1.0, 1.0],
[-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]],
[[-1.0, 1.0, -1.0],
[1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0],
[-1.0, -1.0, -1.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
dtype=dtype,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def testFloatBatch(self):
self._testBatch(np.float32)
self._testDefaultValuesBatch(np.float32)
self._testValueTypeBatch(np.float32)
def testDoubleBatch(self):
self._testBatch(np.float64)
self._testDefaultValuesBatch(np.float64)
self._testValueTypeBatch(np.float64)
def testInt32Batch(self):
self._testBatch(np.int32)
self._testDefaultValuesBatch(np.int32)
self._testValueTypeBatch(np.int32)
def testInt64Batch(self):
self._testBatch(np.int64)
self._testDefaultValuesBatch(np.int64)
self._testValueTypeBatch(np.int64)
def testComplexBatch(self):
self._testBatch(np.complex64)
# self._testDefaultValuesBatch(np.complex64)
self._testValueTypeBatch(np.complex64)
def testSimpleCases(self):
indices = [0,1,2]
depth = 3
truth = np.asarray(
[[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]],
dtype=np.float32)
self._testBothOneHot(indices=indices, depth=depth, truth=truth)
indices = [0,1,2]
depth = 3
truth = np.asarray(
[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
dtype=np.int32)
self._testBothOneHot(indices=indices, depth=depth, dtype=np.int32,
truth=truth)
indices = [0,1,2]
depth = 3
truth = np.asarray(
[[1, -1, -1],
[-1, 1, -1],
[-1, -1, 1]],
dtype=np.int32)
self._testBothOneHot(indices=indices, depth=depth, on_value=1,
off_value=-1, truth=truth)
def testSingleValueGiven(self):
# Only on_value provided
indices = [0,1,2]
depth = 3
truth = np.asarray(
[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
dtype=np.int32)
self._testBothOneHot(indices=indices, depth=depth, on_value=1, truth=truth)
# Only off_value provided
indices = [0,1,2]
depth = 3
truth = np.asarray(
[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
dtype=np.float32)
self._testBothOneHot(indices=indices, depth=depth,
off_value=0.0, truth=truth)
def testString(self):
indices = [0,1,2]
depth = 3
truth = np.asarray(
[[b"1.0", b"0.0", b"0.0"],
[b"0.0", b"1.0", b"0.0"],
[b"0.0", b"0.0", b"1.0"]])
on_value = np.asarray(b"1.0")
off_value = np.asarray(b"0.0")
self._testBothOneHot(indices=indices, depth=depth, on_value=on_value,
off_value=off_value, dtype=tf.string, truth=truth)
on_value = tf.constant(b"1.0")
off_value = tf.constant(b"0.0")
self._testBothOneHot(indices=indices, depth=depth, on_value=on_value,
off_value=off_value, dtype=tf.string, truth=truth)
on_value = b"1.0"
off_value = b"0.0"
self._testBothOneHot(indices=indices, depth=depth, on_value=on_value,
off_value=off_value, dtype=tf.string, truth=truth)
def testIndicesTypes(self):
tf_types = [tf.uint8, tf.int32, tf.int64]
np_types = [np.int32, np.int64]
for itype in tf_types + np_types:
# Note: to keep the tests simple in the case of uint8 the index -1 below
# maps to 255 which is out of the depth range, just like -1.
if itype in tf_types:
indices = tf.constant([[0, 2, -1, 1],
[1, 0, 1, -1]],
dtype=itype)
elif itype in np_types:
indices = np.asarray([[0, 2, -1, 1],
[1, 0, 1, -1]],
dtype=itype)
depth = 3
on_value = np.asarray(1.0, dtype=np.float32)
off_value = np.asarray(-1.0, dtype=np.float32)
truth = np.asarray(
[[[1.0, -1.0, -1.0],
[-1.0, -1.0, 1.0],
[-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]],
[[-1.0, 1.0, -1.0],
[1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0],
[-1.0, -1.0, -1.0]]],
dtype=np.float32)
# axis == -1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
axis=1,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def testPrefixDimOverflow(self):
for itype in [tf.int32, tf.int64, tf.uint8]:
prefix_dim_size = 65536
depth = 2
x = [i % depth for i in range(prefix_dim_size)]
indices = tf.constant(x, dtype=itype)
truth = np.zeros((prefix_dim_size, depth), np.float32)
for i in range(prefix_dim_size):
truth[i, x[i]] = 1.0
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=1.0,
off_value=0.0,
truth=truth)
def testOnOffMismatchTypeError(self):
indices = [0, 1, 2]
depth = 3
on_value = np.asarray(1.0, np.float64)
off_value = np.asarray(0.0, np.float32)
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
truth=None,
raises=TypeError)
def testDtypeMismatchTypeError(self):
indices = [0, 1, 2]
depth = 3
on_value = np.asarray(1.0, np.float32)
off_value = np.asarray(0.0, np.float32)
dtype = np.int32
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
dtype=dtype,
truth=None,
raises=TypeError)
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=off_value,
dtype=dtype,
truth=None,
raises=TypeError)
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.one_hot",
"numpy.asarray",
"tensorflow.test.main",
"numpy.zeros",
"tensorflow.constant"
] | [((12395, 12409), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (12407, 12409), True, 'import tensorflow as tf\n'), ((1759, 1800), 'numpy.asarray', 'np.asarray', (['[0, 2, -1, 1]'], {'dtype': 'np.int64'}), '([0, 2, -1, 1], dtype=np.int64)\n', (1769, 1800), True, 'import numpy as np\n'), ((1830, 1858), 'numpy.asarray', 'np.asarray', (['(1.0)'], {'dtype': 'dtype'}), '(1.0, dtype=dtype)\n', (1840, 1858), True, 'import numpy as np\n'), ((1875, 1904), 'numpy.asarray', 'np.asarray', (['(-1.0)'], {'dtype': 'dtype'}), '(-1.0, dtype=dtype)\n', (1885, 1904), True, 'import numpy as np\n'), ((1918, 2024), 'numpy.asarray', 'np.asarray', (['[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0], [-1.0, 1.0, -1.0]]'], {'dtype': 'dtype'}), '([[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0], [-1.0,\n 1.0, -1.0]], dtype=dtype)\n', (1928, 2024), True, 'import numpy as np\n'), ((2555, 2596), 'numpy.asarray', 'np.asarray', (['[0, 2, -1, 1]'], {'dtype': 'np.int64'}), '([0, 2, -1, 1], dtype=np.int64)\n', (2565, 2596), True, 'import numpy as np\n'), ((2624, 2722), 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]'], {'dtype': 'dtype'}), '([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 1.0, \n 0.0]], dtype=dtype)\n', (2634, 2722), True, 'import numpy as np\n'), ((3723, 3781), 'numpy.asarray', 'np.asarray', (['[[0, 2, -1, 1], [1, 0, 1, -1]]'], {'dtype': 'np.int64'}), '([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)\n', (3733, 3781), True, 'import numpy as np\n'), ((3862, 3890), 'numpy.asarray', 'np.asarray', (['(1.0)'], {'dtype': 'dtype'}), '(1.0, dtype=dtype)\n', (3872, 3890), True, 'import numpy as np\n'), ((3907, 3936), 'numpy.asarray', 'np.asarray', (['(-1.0)'], {'dtype': 'dtype'}), '(-1.0, dtype=dtype)\n', (3917, 3936), True, 'import numpy as np\n'), ((3950, 4143), 'numpy.asarray', 'np.asarray', (['[[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0], [-1.0, 1.0, -\n 1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0], [-1.0, 1.0, -1.0], [-1.0,\n -1.0, -1.0]]]'], {'dtype': 'dtype'}), '([[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0], [-\n 1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0], [-1.0, 1.0, -\n 1.0], [-1.0, -1.0, -1.0]]], dtype=dtype)\n', (3960, 4143), True, 'import numpy as np\n'), ((4719, 4777), 'numpy.asarray', 'np.asarray', (['[[0, 2, -1, 1], [1, 0, 1, -1]]'], {'dtype': 'np.int64'}), '([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)\n', (4729, 4777), True, 'import numpy as np\n'), ((4856, 5031), 'numpy.asarray', 'np.asarray', (['[[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]], [[\n 0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]]'], {'dtype': 'dtype'}), '([[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 1.0, \n 0.0]], [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, \n 0.0]]], dtype=dtype)\n', (4866, 5031), True, 'import numpy as np\n'), ((5563, 5621), 'numpy.asarray', 'np.asarray', (['[[0, 2, -1, 1], [1, 0, 1, -1]]'], {'dtype': 'np.int64'}), '([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)\n', (5573, 5621), True, 'import numpy as np\n'), ((5703, 5731), 'numpy.asarray', 'np.asarray', (['(1.0)'], {'dtype': 'dtype'}), '(1.0, dtype=dtype)\n', (5713, 5731), True, 'import numpy as np\n'), ((5748, 5777), 'numpy.asarray', 'np.asarray', (['(-1.0)'], {'dtype': 'dtype'}), '(-1.0, dtype=dtype)\n', (5758, 5777), True, 'import numpy as np\n'), ((5791, 5984), 'numpy.asarray', 'np.asarray', (['[[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0], [-1.0, 1.0, -\n 1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0], [-1.0, 1.0, -1.0], [-1.0,\n -1.0, -1.0]]]'], {'dtype': 'dtype'}), '([[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0], [-\n 1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0], [-1.0, 1.0, -\n 1.0], [-1.0, -1.0, -1.0]]], dtype=dtype)\n', (5801, 5984), True, 'import numpy as np\n'), ((7365, 7451), 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {'dtype': 'np.float32'}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.\n float32)\n', (7375, 7451), True, 'import numpy as np\n'), ((7592, 7653), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {'dtype': 'np.int32'}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.int32)\n', (7602, 7653), True, 'import numpy as np\n'), ((7841, 7908), 'numpy.asarray', 'np.asarray', (['[[1, -1, -1], [-1, 1, -1], [-1, -1, 1]]'], {'dtype': 'np.int32'}), '([[1, -1, -1], [-1, 1, -1], [-1, -1, 1]], dtype=np.int32)\n', (7851, 7908), True, 'import numpy as np\n'), ((8168, 8229), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {'dtype': 'np.int32'}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.int32)\n', (8178, 8229), True, 'import numpy as np\n'), ((8417, 8480), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32)\n', (8427, 8480), True, 'import numpy as np\n'), ((8690, 8784), 'numpy.asarray', 'np.asarray', (["[[b'1.0', b'0.0', b'0.0'], [b'0.0', b'1.0', b'0.0'], [b'0.0', b'0.0', b'1.0']]"], {}), "([[b'1.0', b'0.0', b'0.0'], [b'0.0', b'1.0', b'0.0'], [b'0.0',\n b'0.0', b'1.0']])\n", (8700, 8784), True, 'import numpy as np\n'), ((8817, 8835), 'numpy.asarray', 'np.asarray', (["b'1.0'"], {}), "(b'1.0')\n", (8827, 8835), True, 'import numpy as np\n'), ((8852, 8870), 'numpy.asarray', 'np.asarray', (["b'0.0'"], {}), "(b'0.0')\n", (8862, 8870), True, 'import numpy as np\n'), ((9038, 9057), 'tensorflow.constant', 'tf.constant', (["b'1.0'"], {}), "(b'1.0')\n", (9049, 9057), True, 'import tensorflow as tf\n'), ((9074, 9093), 'tensorflow.constant', 'tf.constant', (["b'0.0'"], {}), "(b'0.0')\n", (9085, 9093), True, 'import tensorflow as tf\n'), ((11547, 11574), 'numpy.asarray', 'np.asarray', (['(1.0)', 'np.float64'], {}), '(1.0, np.float64)\n', (11557, 11574), True, 'import numpy as np\n'), ((11591, 11618), 'numpy.asarray', 'np.asarray', (['(0.0)', 'np.float32'], {}), '(0.0, np.float32)\n', (11601, 11618), True, 'import numpy as np\n'), ((11912, 11939), 'numpy.asarray', 'np.asarray', (['(1.0)', 'np.float32'], {}), '(1.0, np.float32)\n', (11922, 11939), True, 'import numpy as np\n'), ((11956, 11983), 'numpy.asarray', 'np.asarray', (['(0.0)', 'np.float32'], {}), '(0.0, np.float32)\n', (11966, 11983), True, 'import numpy as np\n'), ((10098, 10131), 'numpy.asarray', 'np.asarray', (['(1.0)'], {'dtype': 'np.float32'}), '(1.0, dtype=np.float32)\n', (10108, 10131), True, 'import numpy as np\n'), ((10150, 10184), 'numpy.asarray', 'np.asarray', (['(-1.0)'], {'dtype': 'np.float32'}), '(-1.0, dtype=np.float32)\n', (10160, 10184), True, 'import numpy as np\n'), ((10200, 10398), 'numpy.asarray', 'np.asarray', (['[[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0], [-1.0, 1.0, -\n 1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0], [-1.0, 1.0, -1.0], [-1.0,\n -1.0, -1.0]]]'], {'dtype': 'np.float32'}), '([[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0], [-\n 1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0], [-1.0, 1.0, -\n 1.0], [-1.0, -1.0, -1.0]]], dtype=np.float32)\n', (10210, 10398), True, 'import numpy as np\n'), ((11144, 11171), 'tensorflow.constant', 'tf.constant', (['x'], {'dtype': 'itype'}), '(x, dtype=itype)\n', (11155, 11171), True, 'import tensorflow as tf\n'), ((11187, 11233), 'numpy.zeros', 'np.zeros', (['(prefix_dim_size, depth)', 'np.float32'], {}), '((prefix_dim_size, depth), np.float32)\n', (11195, 11233), True, 'import numpy as np\n'), ((1207, 1227), 'tensorflow.one_hot', 'tf.one_hot', ([], {}), '(**inputs)\n', (1217, 1227), True, 'import tensorflow as tf\n'), ((9783, 9839), 'tensorflow.constant', 'tf.constant', (['[[0, 2, -1, 1], [1, 0, 1, -1]]'], {'dtype': 'itype'}), '([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=itype)\n', (9794, 9839), True, 'import tensorflow as tf\n'), ((1160, 1180), 'tensorflow.one_hot', 'tf.one_hot', ([], {}), '(**inputs)\n', (1170, 1180), True, 'import tensorflow as tf\n'), ((9949, 10004), 'numpy.asarray', 'np.asarray', (['[[0, 2, -1, 1], [1, 0, 1, -1]]'], {'dtype': 'itype'}), '([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=itype)\n', (9959, 10004), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
towerstruc.py
Created by <NAME> on 2012-01-20.
Copyright (c) NREL. All rights reserved.
HISTORY: 2012 created
-7/2014: R.D. Bugs found in the call to shellBucklingEurocode from towerwithFrame3DD. Fixed.
Also set_as_top added.
-10/2014: R.D. Merged back with some changes Andrew did on his end.
-12/2014: A.N. fixed some errors from the merge (redundant drag calc). pep8 compliance. removed several unneccesary variables and imports (including set_as_top)
- 6/2015: A.N. major rewrite. removed pBEAM. can add spring stiffness anywhere. can add mass anywhere.
can use different material props throughout.
- 7/2015 : R.D. modified to use commonse modules.
- 1/2018 : G.B. modified for easier use with other modules, reducing user input burden, and shifting more to commonse
"""
from __future__ import print_function
import numpy as np
from openmdao.api import Component, Group, Problem, IndepVarComp
from commonse.WindWaveDrag import AeroHydroLoads, CylinderWindDrag, CylinderWaveDrag
from commonse.environment import WindBase, WaveBase, LinearWaves, TowerSoil, PowerWind, LogWind
from commonse.tube import CylindricalShellProperties
from commonse.utilities import assembleI, unassembleI, nodal2sectional
from commonse import gravity, eps, NFREQ
from commonse.vertical_cylinder import CylinderDiscretization, CylinderMass, CylinderFrame3DD
#from fusedwind.turbine.tower import TowerFromCSProps
#from fusedwind.interface import implement_base
import commonse.UtilizationSupplement as Util
# -----------------
# Components
# -----------------
class TowerDiscretization(Component):
def __init__(self):
super(TowerDiscretization, self).__init__()
self.add_param('hub_height', val=0.0, units='m', desc='diameter at tower base')
self.add_param('z_end', val=0.0, units='m', desc='Last node point on tower')
self.add_output('height_constraint', val=0.0, units='m', desc='mismatch between tower height and desired hub_height')
def solve_nonlinear(self, params, unknowns, resids):
unknowns['height_constraint'] = params['hub_height'] - params['z_end']
def linearize(self, params, unknowns, resids):
J = {}
J['height_constraint','hub_height'] = 1
J['height_constraint','z_end'] = -1
return J
class TowerMass(Component):
def __init__(self, nPoints):
super(TowerMass, self).__init__()
self.add_param('cylinder_mass', val=np.zeros(nPoints-1), units='kg', desc='Total cylinder mass')
self.add_param('cylinder_cost', val=0.0, units='USD', desc='Total cylinder cost')
self.add_param('cylinder_center_of_mass', val=0.0, units='m', desc='z-position of center of mass of cylinder')
self.add_param('cylinder_section_center_of_mass', val=np.zeros(nPoints-1), units='m', desc='z position of center of mass of each can in the cylinder')
self.add_param('cylinder_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of cylinder about base [xx yy zz xy xz yz]')
self.add_output('tower_raw_cost', val=0.0, units='USD', desc='Total tower cost')
self.add_output('tower_mass', val=0.0, units='kg', desc='Total tower mass')
self.add_output('tower_center_of_mass', val=0.0, units='m', desc='z-position of center of mass of tower')
self.add_output('tower_section_center_of_mass', val=np.zeros(nPoints-1), units='m', desc='z position of center of mass of each can in the tower')
self.add_output('tower_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of tower about base [xx yy zz xy xz yz]')
def solve_nonlinear(self, params, unknowns, resids):
unknowns['tower_raw_cost'] = params['cylinder_cost']
unknowns['tower_mass'] = params['cylinder_mass'].sum()
unknowns['tower_center_of_mass'] = params['cylinder_center_of_mass']
unknowns['tower_section_center_of_mass'] = params['cylinder_section_center_of_mass']
unknowns['tower_I_base'] = params['cylinder_I_base']
def linearize(self, params, unknowns, resids):
npts = len(params['cylinder_section_center_of_mass'])
zeroPts = np.zeros(npts)
zero6 = np.zeros(6)
J = {}
J['tower_mass','cylinder_mass'] = np.ones(len(unknowns['cylinder_mass']))
J['tower_mass','cylinder_cost'] = 0.0
J['tower_mass','cylinder_center_of_mass'] = 0.0
J['tower_mass','cylinder_section_center_of_mass'] = zeroPts
J['tower_mass','cylinder_I_base'] = zero6
J['tower_raw_cost','cylinder_mass'] = np.zeros(len(unknowns['cylinder_mass']))
J['tower_raw_cost','cylinder_cost'] = 1.0
J['tower_raw_cost','cylinder_center_of_mass'] = 0.0
J['tower_raw_cost','cylinder_section_center_of_mass'] = zeroPts
J['tower_raw_cost','cylinder_I_base'] = zero6
J['tower_center_of_mass','cylinder_mass'] = 0.0
J['tower_center_of_mass','cylinder_cost'] = 0.0
J['tower_center_of_mass','cylinder_center_of_mass'] = 1.0
J['tower_center_of_mass','cylinder_section_center_of_mass'] = zeroPts
J['tower_center_of_mass','cylinder_I_base'] = zero6
J['tower_section_center_of_mass','cylinder_mass'] = 0.0
J['tower_section_center_of_mass','cylinder_cost'] = 0.0
J['tower_section_center_of_mass','cylinder_center_of_mass'] = 0.0
J['tower_section_center_of_mass','cylinder_section_center_of_mass'] = np.eye(npts)
J['tower_section_center_of_mass','cylinder_I_base'] = np.zeros((npts,6))
J['tower_I_base','cylinder_mass'] = 1.0
J['tower_I_base','cylinder_cost'] = 0.0
J['tower_I_base','cylinder_center_of_mass'] = 0.0
J['tower_I_base','cylinder_section_center_of_mass'] = np.zeros((6,npts))
J['tower_I_base','cylinder_I_base'] = np.eye(len(params['cylinder_I_base']))
return J
class TurbineMass(Component):
def __init__(self):
super(TurbineMass, self).__init__()
self.add_param('hubH', val=0.0, units='m', desc='Hub-height')
self.add_param('rna_mass', val=0.0, units='kg', desc='Total tower mass')
self.add_param('rna_I', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of rna about tower top [xx yy zz xy xz yz]')
self.add_param('rna_cg', np.zeros((3,)), units='m', desc='xyz-location of rna cg relative to tower top')
self.add_param('tower_mass', val=0.0, units='kg', desc='Total tower mass')
self.add_param('tower_center_of_mass', val=0.0, units='m', desc='z-position of center of mass of tower')
self.add_param('tower_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of tower about base [xx yy zz xy xz yz]')
self.add_output('turbine_mass', val=0.0, units='kg', desc='Total mass of tower+rna')
self.add_output('turbine_center_of_mass', val=np.zeros((3,)), units='m', desc='xyz-position of tower+rna center of mass')
self.add_output('turbine_I_base', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia of tower about base [xx yy zz xy xz yz]')
# Derivatives
self.deriv_options['type'] = 'fd'
self.deriv_options['form'] = 'central'
self.deriv_options['step_calc'] = 'relative'
self.deriv_options['step_size'] = 1e-5
def solve_nonlinear(self, params, unknowns, resids):
unknowns['turbine_mass'] = params['rna_mass'] + params['tower_mass']
cg_rna = params['rna_cg'] + np.array([0.0, 0.0, params['hubH']])
cg_tower = np.array([0.0, 0.0, params['tower_center_of_mass']])
unknowns['turbine_center_of_mass'] = (params['rna_mass']*cg_rna + params['tower_mass']*cg_tower) / unknowns['turbine_mass']
R = cg_rna
I_tower = assembleI(params['tower_I_base'])
I_rna = assembleI(params['rna_I']) + params['rna_mass']*(np.dot(R, R)*np.eye(3) - np.outer(R, R))
unknowns['turbine_I_base'] = unassembleI(I_tower + I_rna)
class TowerPreFrame(Component):
def __init__(self, nFull):
super(TowerPreFrame, self).__init__()
self.add_param('z', np.zeros(nFull), units='m', desc='location along tower. start at bottom and go to top')
# extra mass
self.add_param('mass', 0.0, units='kg', desc='added mass')
self.add_param('mI', np.zeros((6,)), units='kg*m**2', desc='mass moment of inertia about some point p [xx yy zz xy xz yz]')
self.add_param('mrho', np.zeros((3,)), units='m', desc='xyz-location of p relative to node')
# point loads
self.add_param('rna_F', np.zeros((3,)), units='N', desc='rna force')
self.add_param('rna_M', np.zeros((3,)), units='N*m', desc='rna moment')
# Monopile handling
self.add_param('k_monopile', np.zeros(6), units='N/m', desc='Stiffness BCs for ocean soil. Only used if monoflag inputis True')
self.add_param('monopile', False, desc='Flag for monopile BCs', pass_by_obj=True)
# spring reaction data. Use float('inf') for rigid constraints.
nK = 1
self.add_output('kidx', np.zeros(nK, dtype=np.int_), desc='indices of z where external stiffness reactions should be applied.', pass_by_obj=True)
self.add_output('kx', np.zeros(nK), units='m', desc='spring stiffness in x-direction', pass_by_obj=True)
self.add_output('ky', np.zeros(nK), units='m', desc='spring stiffness in y-direction', pass_by_obj=True)
self.add_output('kz', np.zeros(nK), units='m', desc='spring stiffness in z-direction', pass_by_obj=True)
self.add_output('ktx', np.zeros(nK), units='m', desc='spring stiffness in theta_x-rotation', pass_by_obj=True)
self.add_output('kty', np.zeros(nK), units='m', desc='spring stiffness in theta_y-rotation', pass_by_obj=True)
self.add_output('ktz', np.zeros(nK), units='m', desc='spring stiffness in theta_z-rotation', pass_by_obj=True)
# extra mass
nMass = 1
self.add_output('midx', np.zeros(nMass, dtype=np.int_), desc='indices where added mass should be applied.', pass_by_obj=True)
self.add_output('m', np.zeros(nMass), units='kg', desc='added mass')
self.add_output('mIxx', np.zeros(nMass), units='kg*m**2', desc='x mass moment of inertia about some point p')
self.add_output('mIyy', np.zeros(nMass), units='kg*m**2', desc='y mass moment of inertia about some point p')
self.add_output('mIzz', np.zeros(nMass), units='kg*m**2', desc='z mass moment of inertia about some point p')
self.add_output('mIxy', np.zeros(nMass), units='kg*m**2', desc='xy mass moment of inertia about some point p')
self.add_output('mIxz', np.zeros(nMass), units='kg*m**2', desc='xz mass moment of inertia about some point p')
self.add_output('mIyz', np.zeros(nMass), units='kg*m**2', desc='yz mass moment of inertia about some point p')
self.add_output('mrhox', np.zeros(nMass), units='m', desc='x-location of p relative to node')
self.add_output('mrhoy', np.zeros(nMass), units='m', desc='y-location of p relative to node')
self.add_output('mrhoz', np.zeros(nMass), units='m', desc='z-location of p relative to node')
# point loads (if addGravityLoadForExtraMass=True be sure not to double count by adding those force here also)
nPL = 1
self.add_output('plidx', np.zeros(nPL, dtype=np.int_), desc='indices where point loads should be applied.', pass_by_obj=True)
self.add_output('Fx', np.zeros(nPL), units='N', desc='point force in x-direction')
self.add_output('Fy', np.zeros(nPL), units='N', desc='point force in y-direction')
self.add_output('Fz', np.zeros(nPL), units='N', desc='point force in z-direction')
self.add_output('Mxx', np.zeros(nPL), units='N*m', desc='point moment about x-axis')
self.add_output('Myy', np.zeros(nPL), units='N*m', desc='point moment about y-axis')
self.add_output('Mzz', np.zeros(nPL), units='N*m', desc='point moment about z-axis')
def solve_nonlinear(self, params, unknowns, resids):
# Prepare for reactions: rigid at tower base
unknowns['kidx'] = np.array([ 0 ], dtype=np.int_)
if params['monopile']:
kmono = params['k_monopile']
unknowns['kx'] = np.array([ kmono[0] ])
unknowns['ky'] = np.array([ kmono[2] ])
unknowns['kz'] = np.array([ kmono[4] ])
unknowns['ktx'] = np.array([ kmono[1] ])
unknowns['kty'] = np.array([ kmono[3] ])
unknowns['ktz'] = np.array([ kmono[5] ])
else:
unknowns['kx'] = np.array([ np.inf ])
unknowns['ky'] = np.array([ np.inf ])
unknowns['kz'] = np.array([ np.inf ])
unknowns['ktx'] = np.array([ np.inf ])
unknowns['kty'] = np.array([ np.inf ])
unknowns['ktz'] = np.array([ np.inf ])
# Prepare RNA for "extra node mass"
unknowns['midx'] = np.array([ len(params['z'])-1 ], dtype=np.int_)
unknowns['m'] = np.array([ params['mass'] ])
unknowns['mIxx'] = np.array([ params['mI'][0] ])
unknowns['mIyy'] = np.array([ params['mI'][1] ])
unknowns['mIzz'] = np.array([ params['mI'][2] ])
unknowns['mIxy'] = np.array([ params['mI'][3] ])
unknowns['mIxz'] = np.array([ params['mI'][4] ])
unknowns['mIyz'] = np.array([ params['mI'][5] ])
unknowns['mrhox'] = np.array([ params['mrho'][0] ])
unknowns['mrhoy'] = np.array([ params['mrho'][1] ])
unknowns['mrhoz'] = np.array([ params['mrho'][2] ])
# Prepare point forces at RNA node
unknowns['plidx'] = np.array([ len(params['z'])-1 ], dtype=np.int_)
unknowns['Fx'] = np.array([ params['rna_F'][0] ])
unknowns['Fy'] = np.array([ params['rna_F'][1] ])
unknowns['Fz'] = np.array([ params['rna_F'][2] ])
unknowns['Mxx'] = np.array([ params['rna_M'][0] ])
unknowns['Myy'] = np.array([ params['rna_M'][1] ])
unknowns['Mzz'] = np.array([ params['rna_M'][2] ])
def list_deriv_vars(self):
inputs = ('mass', 'mI', 'mrho', 'rna_F', 'rna_M')
outputs = ('m', 'mIxx', 'mIyy', 'mIzz', 'mIxy', 'mIxz', 'mIyz', 'Fx', 'Fy', 'Fz', 'Mxx', 'Myy', 'Mzz')
return inputs, outputs
def linearize(self, params, unknowns, resids):
J = {}
inp,out = self.list_deriv_vars()
for o in out:
for i in inp:
J[o,i] = np.zeros( (len(unknowns[o]), len(params[i])) )
J['m','mass'] = 1.0
J['mIxx','mI'] = np.eye(6)[0,:]
J['mIyy','mI'] = np.eye(6)[1,:]
J['mIzz','mI'] = np.eye(6)[2,:]
J['mIxy','mI'] = np.eye(6)[3,:]
J['mIxz','mI'] = np.eye(6)[4,:]
J['mIyz','mI'] = np.eye(6)[5,:]
J['Fx','rna_F'] = np.eye(3)[0,:]
J['Fy','rna_F'] = np.eye(3)[2,:]
J['Fz','rna_F'] = np.eye(3)[2,:]
J['Mxx','rna_M'] = np.eye(3)[0,:]
J['Myy','rna_M'] = np.eye(3)[2,:]
J['Mzz','rna_M'] = np.eye(3)[2,:]
class TowerPostFrame(Component):
def __init__(self, nFull, nDEL):
super(TowerPostFrame, self).__init__()
# effective geometry -- used for handbook methods to estimate hoop stress, buckling, fatigue
self.add_param('z', np.zeros(nFull), units='m', desc='location along tower. start at bottom and go to top')
self.add_param('d', np.zeros(nFull), units='m', desc='effective tower diameter for section')
self.add_param('t', np.zeros(nFull-1), units='m', desc='effective shell thickness for section')
self.add_param('L_reinforced', 0.0, units='m', desc='buckling length')
# Material properties
self.add_param('E', 0.0, units='N/m**2', desc='modulus of elasticity')
# Processed Frame3DD outputs
self.add_param('Fz', np.zeros(nFull-1), units='N', desc='Axial foce in vertical z-direction in cylinder structure.')
self.add_param('Mxx', np.zeros(nFull-1), units='N*m', desc='Moment about x-axis in cylinder structure.')
self.add_param('Myy', np.zeros(nFull-1), units='N*m', desc='Moment about y-axis in cylinder structure.')
self.add_param('axial_stress', val=np.zeros(nFull-1), units='N/m**2', desc='axial stress in tower elements')
self.add_param('shear_stress', val=np.zeros(nFull-1), units='N/m**2', desc='shear stress in tower elements')
self.add_param('hoop_stress' , val=np.zeros(nFull-1), units='N/m**2', desc='hoop stress in tower elements')
# safety factors
self.add_param('gamma_f', 1.35, desc='safety factor on loads')
self.add_param('gamma_m', 1.1, desc='safety factor on materials')
self.add_param('gamma_n', 1.0, desc='safety factor on consequence of failure')
self.add_param('gamma_b', 1.1, desc='buckling safety factor')
self.add_param('sigma_y', 0.0, units='N/m**2', desc='yield stress')
self.add_param('gamma_fatigue', 1.755, desc='total safety factor for fatigue')
# fatigue parameters
self.add_param('life', 20.0, desc='fatigue life of tower')
self.add_param('m_SN', 4, desc='slope of S/N curve', pass_by_obj=True)
self.add_param('DC', 80.0, desc='standard value of stress')
self.add_param('z_DEL', np.zeros(nDEL), desc='absolute z coordinates of corresponding fatigue parameters', pass_by_obj=True)
self.add_param('M_DEL', np.zeros(nDEL), desc='fatigue parameters at corresponding z coordinates', pass_by_obj=True)
# Frequencies
self.add_param('f1', 0.0, units='Hz', desc='First natural frequency')
self.add_param('f2', 0.0, units='Hz', desc='Second natural frequency')
# outputs
self.add_output('structural_frequencies', np.zeros(NFREQ), units='Hz', desc='First and second natural frequency')
self.add_output('top_deflection', 0.0, units='m', desc='Deflection of tower top in yaw-aligned +x direction')
self.add_output('stress', np.zeros(nFull-1), desc='Von Mises stress utilization along tower at specified locations. incudes safety factor.')
self.add_output('shell_buckling', np.zeros(nFull-1), desc='Shell buckling constraint. Should be < 1 for feasibility. Includes safety factors')
self.add_output('global_buckling', np.zeros(nFull-1), desc='Global buckling constraint. Should be < 1 for feasibility. Includes safety factors')
self.add_output('damage', np.zeros(nFull-1), desc='Fatigue damage at each tower section')
self.add_output('turbine_F', val=np.zeros(3), units='N', desc='Total force on tower+rna')
self.add_output('turbine_M', val=np.zeros(3), units='N*m', desc='Total x-moment on tower+rna measured at base')
# Derivatives
self.deriv_options['type'] = 'fd'
self.deriv_options['form'] = 'central'
self.deriv_options['step_calc'] = 'relative'
self.deriv_options['step_size'] = 1e-5
def solve_nonlinear(self, params, unknowns, resids):
# Unpack some variables
axial_stress = params['axial_stress']
shear_stress = params['shear_stress']
hoop_stress = params['hoop_stress']
sigma_y = params['sigma_y'] * np.ones(axial_stress.shape)
E = params['E'] * np.ones(axial_stress.shape)
L_reinforced = params['L_reinforced'] * np.ones(axial_stress.shape)
d,_ = nodal2sectional(params['d'])
z_section,_ = nodal2sectional(params['z'])
# Frequencies
unknowns['structural_frequencies'] = np.zeros(NFREQ)
unknowns['structural_frequencies'][0] = params['f1']
unknowns['structural_frequencies'][1] = params['f2']
# von mises stress
unknowns['stress'] = Util.vonMisesStressUtilization(axial_stress, hoop_stress, shear_stress,
params['gamma_f']*params['gamma_m']*params['gamma_n'], sigma_y)
# shell buckling
unknowns['shell_buckling'] = Util.shellBucklingEurocode(d, params['t'], axial_stress, hoop_stress,
shear_stress, L_reinforced, E, sigma_y, params['gamma_f'], params['gamma_b'])
# global buckling
tower_height = params['z'][-1] - params['z'][0]
M = np.sqrt(params['Mxx']**2 + params['Myy']**2)
unknowns['global_buckling'] = Util.bucklingGL(d, params['t'], params['Fz'], M, tower_height, E,
sigma_y, params['gamma_f'], params['gamma_b'])
# fatigue
N_DEL = 365.0*24.0*3600.0*params['life'] * np.ones(len(params['t']))
unknowns['damage'] = np.zeros(N_DEL.shape)
if any(params['M_DEL']):
M_DEL = np.interp(z_section, params['z_DEL'], params['M_DEL'])
unknowns['damage'] = Util.fatigue(M_DEL, N_DEL, d, params['t'], params['m_SN'],
params['DC'], params['gamma_fatigue'], stress_factor=1.0, weld_factor=True)
# -----------------
# Assembly
# -----------------
class TowerLeanSE(Group):
def __init__(self, nPoints, nFull):
super(TowerLeanSE, self).__init__()
nRefine = (nFull-1)/(nPoints-1)
# Independent variables that are unique to TowerSE
self.add('tower_section_height', IndepVarComp('tower_section_height', np.zeros(nPoints-1)), promotes=['*'])
self.add('tower_outer_diameter', IndepVarComp('tower_outer_diameter', np.zeros(nPoints)), promotes=['*'])
self.add('tower_wall_thickness', IndepVarComp('tower_wall_thickness', np.zeros(nPoints-1)), promotes=['*'])
self.add('tower_outfitting_factor', IndepVarComp('tower_outfitting_factor', 0.0), promotes=['*'])
self.add('tower_buckling_length', IndepVarComp('tower_buckling_length', 0.0), promotes=['*'])
# All the static components
self.add('geometry', CylinderDiscretization(nPoints, nRefine), promotes=['*'])
self.add('tgeometry', TowerDiscretization(), promotes=['hub_height','height_constraint'])
self.add('cm', CylinderMass(nFull), promotes=['material_density','z_full','d_full','t_full',
'material_cost_rate','labor_cost_rate','painting_cost_rate'])
self.add('tm', TowerMass(nFull), promotes=['tower_mass','tower_center_of_mass','tower_I_base','tower_raw_cost'])
self.add('gc', Util.GeometricConstraints(nPoints), promotes=['min_d_to_t','max_taper','manufacturability','weldability'])
self.add('turb', TurbineMass(), promotes=['turbine_mass','rna_mass', 'rna_cg', 'rna_I'])
# Connections for geometry and mass
self.connect('tower_section_height', 'section_height')
self.connect('tower_outer_diameter', ['diameter', 'gc.d'])
self.connect('tower_wall_thickness', ['wall_thickness', 'gc.t'])
self.connect('tower_outfitting_factor', 'cm.outfitting_factor')
self.connect('z_param', 'tgeometry.z_end', src_indices=[nPoints-1])
self.connect('hub_height', 'turb.hubH')
self.connect('cm.mass', 'tm.cylinder_mass')
self.connect('cm.cost', 'tm.cylinder_cost')
self.connect('cm.center_of_mass', 'tm.cylinder_center_of_mass')
self.connect('cm.section_center_of_mass','tm.cylinder_section_center_of_mass')
self.connect('cm.I_base','tm.cylinder_I_base')
self.connect('tower_mass', 'turb.tower_mass')
self.connect('tower_center_of_mass', 'turb.tower_center_of_mass')
self.connect('tower_I_base', 'turb.tower_I_base')
class TowerSE(Group):
def __init__(self, nLC, nPoints, nFull, nDEL, wind=''):
super(TowerSE, self).__init__()
# Independent variables that are unique to TowerSE
self.add('tower_M_DEL', IndepVarComp('tower_M_DEL', np.zeros(nDEL), pass_by_obj=True), promotes=['*'])
self.add('tower_z_DEL', IndepVarComp('tower_z_DEL', np.zeros(nDEL), pass_by_obj=True), promotes=['*'])
self.add('tower_add_gravity', IndepVarComp('tower_add_gravity', True, pass_by_obj=True), promotes=['*'])
self.add('tower_force_discretization', IndepVarComp('tower_force_discretization', 5.0), promotes=['*'])
self.add('monopile', IndepVarComp('monopile', False, pass_by_obj=True), promotes=['*'])
self.add('suctionpile_depth', IndepVarComp('suctionpile_depth', 0.0), promotes=['*'])
self.add('soil_G', IndepVarComp('soil_G', 0.0), promotes=['*'])
self.add('soil_nu', IndepVarComp('soil_nu', 0.0), promotes=['*'])
self.add('geom', TowerLeanSE(nPoints, nFull), promotes=['*'])
self.add('props', CylindricalShellProperties(nFull))
self.add('soil', TowerSoil())
# Connections for geometry and mass
self.connect('d_full', 'props.d')
self.connect('t_full', 'props.t')
self.connect('d_full', 'soil.d0', src_indices=[0])
self.connect('suctionpile_depth', 'soil.depth')
self.connect('soil_G', 'soil.G')
self.connect('soil_nu', 'soil.nu')
# Add in all Components that drive load cases
# Note multiple load cases have to be handled by replicating components and not groups/assemblies.
# Replicating Groups replicates the IndepVarComps which doesn't play nicely in OpenMDAO
for iLC in range(nLC):
lc = '' if nLC==1 else str(iLC+1)
if wind is None or wind.lower() in ['power', 'powerwind', '']:
self.add('wind'+lc, PowerWind(nFull), promotes=['z0'])
elif wind.lower() == 'logwind':
self.add('wind'+lc, LogWind(nFull), promotes=['z0'])
else:
raise ValueError('Unknown wind type, '+wind)
self.add('wave'+lc, LinearWaves(nFull), promotes=['z_floor'])
self.add('windLoads'+lc, CylinderWindDrag(nFull), promotes=['cd_usr'])
self.add('waveLoads'+lc, CylinderWaveDrag(nFull), promotes=['cm','cd_usr'])
self.add('distLoads'+lc, AeroHydroLoads(nFull))#, promotes=['yaw'])
self.add('pre'+lc, TowerPreFrame(nFull), promotes=['monopile','k_monopile'])
self.add('tower'+lc, CylinderFrame3DD(nFull, 1, 1, 1), promotes=['E','G','tol','Mmethod','geom','lump','shear',
'nM','shift','sigma_y'])
self.add('post'+lc, TowerPostFrame(nFull, nDEL), promotes=['E','sigma_y','DC','life','m_SN',
'gamma_b','gamma_f','gamma_fatigue','gamma_m','gamma_n'])
self.connect('z_full', ['wind'+lc+'.z', 'wave'+lc+'.z', 'windLoads'+lc+'.z', 'waveLoads'+lc+'.z', 'distLoads'+lc+'.z', 'pre'+lc+'.z', 'tower'+lc+'.z', 'post'+lc+'.z'])
self.connect('d_full', ['windLoads'+lc+'.d', 'waveLoads'+lc+'.d', 'tower'+lc+'.d', 'post'+lc+'.d'])
self.connect('rna_mass', 'pre'+lc+'.mass')
self.connect('rna_cg', 'pre'+lc+'.mrho')
self.connect('rna_I', 'pre'+lc+'.mI')
self.connect('material_density', 'tower'+lc+'.rho')
self.connect('pre'+lc+'.kidx', 'tower'+lc+'.kidx')
self.connect('pre'+lc+'.kx', 'tower'+lc+'.kx')
self.connect('pre'+lc+'.ky', 'tower'+lc+'.ky')
self.connect('pre'+lc+'.kz', 'tower'+lc+'.kz')
self.connect('pre'+lc+'.ktx', 'tower'+lc+'.ktx')
self.connect('pre'+lc+'.kty', 'tower'+lc+'.kty')
self.connect('pre'+lc+'.ktz', 'tower'+lc+'.ktz')
self.connect('pre'+lc+'.midx', 'tower'+lc+'.midx')
self.connect('pre'+lc+'.m', 'tower'+lc+'.m')
self.connect('pre'+lc+'.mIxx', 'tower'+lc+'.mIxx')
self.connect('pre'+lc+'.mIyy', 'tower'+lc+'.mIyy')
self.connect('pre'+lc+'.mIzz', 'tower'+lc+'.mIzz')
self.connect('pre'+lc+'.mIxy', 'tower'+lc+'.mIxy')
self.connect('pre'+lc+'.mIxz', 'tower'+lc+'.mIxz')
self.connect('pre'+lc+'.mIyz', 'tower'+lc+'.mIyz')
self.connect('pre'+lc+'.mrhox', 'tower'+lc+'.mrhox')
self.connect('pre'+lc+'.mrhoy', 'tower'+lc+'.mrhoy')
self.connect('pre'+lc+'.mrhoz', 'tower'+lc+'.mrhoz')
self.connect('pre'+lc+'.plidx', 'tower'+lc+'.plidx')
self.connect('pre'+lc+'.Fx', 'tower'+lc+'.Fx')
self.connect('pre'+lc+'.Fy', 'tower'+lc+'.Fy')
self.connect('pre'+lc+'.Fz', 'tower'+lc+'.Fz')
self.connect('pre'+lc+'.Mxx', 'tower'+lc+'.Mxx')
self.connect('pre'+lc+'.Myy', 'tower'+lc+'.Myy')
self.connect('pre'+lc+'.Mzz', 'tower'+lc+'.Mzz')
self.connect('tower_force_discretization', 'tower'+lc+'.dx')
self.connect('tower_add_gravity', 'tower'+lc+'.addGravityLoadForExtraMass')
self.connect('t_full', ['tower'+lc+'.t','post'+lc+'.t'])
self.connect('soil.k', 'k_monopile')
self.connect('tower'+lc+'.f1', 'post'+lc+'.f1')
self.connect('tower'+lc+'.f2', 'post'+lc+'.f2')
self.connect('tower'+lc+'.Fz_out', 'post'+lc+'.Fz')
self.connect('tower'+lc+'.Mxx_out', 'post'+lc+'.Mxx')
self.connect('tower'+lc+'.Myy_out', 'post'+lc+'.Myy')
self.connect('tower'+lc+'.axial_stress', 'post'+lc+'.axial_stress')
self.connect('tower'+lc+'.shear_stress', 'post'+lc+'.shear_stress')
self.connect('tower'+lc+'.hoop_stress_euro', 'post'+lc+'.hoop_stress')
# connections to wind1
self.connect('z0', 'wave'+lc+'.z_surface')
#self.connect('z_floor', 'waveLoads'+lc+'.wlevel')
# connections to windLoads1
self.connect('wind'+lc+'.U', 'windLoads'+lc+'.U')
#self.connect('wind'+lc+'.beta', 'windLoads'+lc+'.beta')
# connections to waveLoads1
self.connect('wave'+lc+'.U', 'waveLoads'+lc+'.U')
self.connect('wave'+lc+'.A', 'waveLoads'+lc+'.A')
#self.connect('wave'+lc+'.beta', 'waveLoads'+lc+'.beta')
self.connect('wave'+lc+'.p', 'waveLoads'+lc+'.p')
# connections to distLoads1
self.connect('windLoads'+lc+'.windLoads_Px', 'distLoads'+lc+'.windLoads_Px')
self.connect('windLoads'+lc+'.windLoads_Py', 'distLoads'+lc+'.windLoads_Py')
self.connect('windLoads'+lc+'.windLoads_Pz', 'distLoads'+lc+'.windLoads_Pz')
self.connect('windLoads'+lc+'.windLoads_qdyn', 'distLoads'+lc+'.windLoads_qdyn')
self.connect('windLoads'+lc+'.windLoads_beta', 'distLoads'+lc+'.windLoads_beta')
#self.connect('windLoads'+lc+'.windLoads_Px0', 'distLoads'+lc+'.windLoads_Px0')
#self.connect('windLoads'+lc+'.windLoads_Py0', 'distLoads'+lc+'.windLoads_Py0')
#self.connect('windLoads'+lc+'.windLoads_Pz0', 'distLoads'+lc+'.windLoads_Pz0')
#self.connect('windLoads'+lc+'.windLoads_qdyn0', 'distLoads'+lc+'.windLoads_qdyn0')
#self.connect('windLoads'+lc+'.windLoads_beta0', 'distLoads'+lc+'.windLoads_beta0')
self.connect('windLoads'+lc+'.windLoads_z', 'distLoads'+lc+'.windLoads_z')
self.connect('windLoads'+lc+'.windLoads_d', 'distLoads'+lc+'.windLoads_d')
self.connect('waveLoads'+lc+'.waveLoads_Px', 'distLoads'+lc+'.waveLoads_Px')
self.connect('waveLoads'+lc+'.waveLoads_Py', 'distLoads'+lc+'.waveLoads_Py')
self.connect('waveLoads'+lc+'.waveLoads_Pz', 'distLoads'+lc+'.waveLoads_Pz')
self.connect('waveLoads'+lc+'.waveLoads_pt', 'distLoads'+lc+'.waveLoads_qdyn')
self.connect('waveLoads'+lc+'.waveLoads_beta', 'distLoads'+lc+'.waveLoads_beta')
#self.connect('waveLoads'+lc+'.waveLoads_Px0', 'distLoads'+lc+'.waveLoads_Px0')
#self.connect('waveLoads'+lc+'.waveLoads_Py0', 'distLoads'+lc+'.waveLoads_Py0')
#self.connect('waveLoads'+lc+'.waveLoads_Pz0', 'distLoads'+lc+'.waveLoads_Pz0')
#self.connect('waveLoads'+lc+'.waveLoads_qdyn0', 'distLoads'+lc+'.waveLoads_qdyn0')
#self.connect('waveLoads'+lc+'.waveLoads_beta0', 'distLoads'+lc+'.waveLoads_beta0')
self.connect('waveLoads'+lc+'.waveLoads_z', 'distLoads'+lc+'.waveLoads_z')
self.connect('waveLoads'+lc+'.waveLoads_d', 'distLoads'+lc+'.waveLoads_d')
# Tower connections
self.connect('tower_buckling_length', ['tower'+lc+'.L_reinforced', 'post'+lc+'.L_reinforced'])
self.connect('tower_M_DEL', 'post'+lc+'.M_DEL')
self.connect('tower_z_DEL', 'post'+lc+'.z_DEL')
self.connect('props.Az', 'tower'+lc+'.Az')
self.connect('props.Asx', 'tower'+lc+'.Asx')
self.connect('props.Asy', 'tower'+lc+'.Asy')
self.connect('props.Jz', 'tower'+lc+'.Jz')
self.connect('props.Ixx', 'tower'+lc+'.Ixx')
self.connect('props.Iyy', 'tower'+lc+'.Iyy')
self.connect('distLoads'+lc+'.Px', 'tower'+lc+'.Px')
self.connect('distLoads'+lc+'.Py', 'tower'+lc+'.Py')
self.connect('distLoads'+lc+'.Pz', 'tower'+lc+'.Pz')
self.connect('distLoads'+lc+'.qdyn', 'tower'+lc+'.qdyn')
# Derivatives
self.deriv_options['type'] = 'fd'
self.deriv_options['form'] = 'central'
self.deriv_options['step_calc'] = 'relative'
self.deriv_options['step_size'] = 1e-5
if __name__ == '__main__':
# --- tower setup ------
from commonse.environment import PowerWind
from commonse.environment import LogWind
# --- geometry ----
h_param = np.diff(np.array([0.0, 43.8, 87.6]))
d_param = np.array([6.0, 4.935, 3.87])
t_param = 1.3*np.array([0.025, 0.021])
z_foundation = 0.0
L_reinforced = 30.0 # [m] buckling length
theta_stress = 0.0
yaw = 0.0
Koutfitting = 1.07
# --- material props ---
E = 210e9
G = 80.8e9
rho = 8500.0
sigma_y = 450.0e6
# --- extra mass ----
m = np.array([285598.8])
mIxx = 1.14930678e+08
mIyy = 2.20354030e+07
mIzz = 1.87597425e+07
mIxy = 0.0
mIxz = 5.03710467e+05
mIyz = 0.0
mI = np.array([mIxx, mIyy, mIzz, mIxy, mIxz, mIyz])
mrho = np.array([-1.13197635, 0.0, 0.50875268])
# -----------
# --- wind ---
wind_zref = 90.0
wind_z0 = 0.0
shearExp = 0.2
cd_usr = None
# ---------------
# --- wave ---
hmax = 0.0
T = 1.0
cm = 1.0
monopile = False
suction_depth = 0.0
soilG = 140e6
soilnu = 0.4
# ---------------
# two load cases. TODO: use a case iterator
# # --- loading case 1: max Thrust ---
wind_Uref1 = 11.73732
Fx1 = 1284744.19620519
Fy1 = 0.
Fz1 = -2914124.84400512
Mxx1 = 3963732.76208099
Myy1 = -2275104.79420872
Mzz1 = -346781.68192839
# # ---------------
# # --- loading case 2: max wind speed ---
wind_Uref2 = 70.0
Fx2 = 930198.60063279
Fy2 = 0.
Fz2 = -2883106.12368949
Mxx2 = -1683669.22411597
Myy2 = -2522475.34625363
Mzz2 = 147301.97023764
# # ---------------
# --- safety factors ---
gamma_f = 1.35
gamma_m = 1.3
gamma_n = 1.0
gamma_b = 1.1
# ---------------
# --- fatigue ---
z_DEL = np.array([0.000, 1.327, 3.982, 6.636, 9.291, 11.945, 14.600, 17.255, 19.909, 22.564, 25.218, 27.873, 30.527, 33.182, 35.836, 38.491, 41.145, 43.800, 46.455, 49.109, 51.764, 54.418, 57.073, 59.727, 62.382, 65.036, 67.691, 70.345, 73.000, 75.655, 78.309, 80.964, 83.618, 86.273, 87.600])
M_DEL = 1e3*np.array([8.2940E+003, 8.1518E+003, 7.8831E+003, 7.6099E+003, 7.3359E+003, 7.0577E+003, 6.7821E+003, 6.5119E+003, 6.2391E+003, 5.9707E+003, 5.7070E+003, 5.4500E+003, 5.2015E+003, 4.9588E+003, 4.7202E+003, 4.4884E+003, 4.2577E+003, 4.0246E+003, 3.7942E+003, 3.5664E+003, 3.3406E+003, 3.1184E+003, 2.8977E+003, 2.6811E+003, 2.4719E+003, 2.2663E+003, 2.0673E+003, 1.8769E+003, 1.7017E+003, 1.5479E+003, 1.4207E+003, 1.3304E+003, 1.2780E+003, 1.2673E+003, 1.2761E+003])
nDEL = len(z_DEL)
gamma_fatigue = 1.35*1.3*1.0
life = 20.0
m_SN = 4
# ---------------
# --- constraints ---
min_d_to_t = 120.0
max_taper = 0.2
# ---------------
# # V_max = 80.0 # tip speed
# # D = 126.0
# # .freq1p = V_max / (D/2) / (2*pi) # convert to Hz
nPoints = len(d_param)
nFull = 5*(nPoints-1) + 1
wind = 'PowerWind'
nLC = 2
prob = Problem(root=TowerSE(nLC, nPoints, nFull, nDEL, wind=wind))
prob.setup()
if wind=='PowerWind':
prob['wind1.shearExp'] = prob['wind2.shearExp'] = shearExp
# assign values to params
# --- geometry ----
prob['hub_height'] = h_param.sum()
prob['foundation_height'] = 0.0
prob['tower_section_height'] = h_param
prob['tower_outer_diameter'] = d_param
prob['tower_wall_thickness'] = t_param
prob['tower_buckling_length'] = L_reinforced
prob['tower_outfitting_factor'] = Koutfitting
prob['distLoads1.yaw'] = prob['distLoads2.yaw'] = yaw
prob['monopile'] = monopile
prob['suctionpile_depth'] = suction_depth
prob['soil_G'] = soilG
prob['soil_nu'] = soilnu
# --- material props ---
prob['E'] = E
prob['G'] = G
prob['material_density'] = rho
prob['sigma_y'] = sigma_y
# --- extra mass ----
prob['rna_mass'] = m
prob['rna_I'] = mI
prob['rna_cg'] = mrho
# -----------
# --- wind & wave ---
prob['wind1.zref'] = prob['wind2.zref'] = wind_zref
prob['z0'] = wind_z0
prob['cd_usr'] = cd_usr
prob['windLoads1.rho'] = prob['windLoads2.rho'] = 1.225
prob['windLoads1.mu'] = prob['windLoads2.mu'] = 1.7934e-5
prob['wave1.rho'] = prob['wave2.rho'] = prob['waveLoads1.rho'] = prob['waveLoads2.rho'] = 1025.0
prob['waveLoads1.mu'] = prob['waveLoads2.mu'] = 1.3351e-3
prob['windLoads1.beta'] = prob['windLoads2.beta'] = prob['waveLoads1.beta'] = prob['waveLoads2.beta'] = 0.0
prob['wave1.hmax'] = prob['wave2.hmax'] = hmax
prob['wave1.T'] = prob['wave2.T'] = T
#prob['waveLoads1.U0'] = prob['waveLoads1.A0'] = prob['waveLoads1.beta0'] = prob['waveLoads2.U0'] = prob['waveLoads2.A0'] = prob['waveLoads2.beta0'] = 0.0
# ---------------
# --- safety factors ---
prob['gamma_f'] = gamma_f
prob['gamma_m'] = gamma_m
prob['gamma_n'] = gamma_n
prob['gamma_b'] = gamma_b
prob['gamma_fatigue'] = gamma_fatigue
# ---------------
prob['DC'] = 80.0
prob['shear'] = True
prob['geom'] = False
prob['tower_force_discretization'] = 5.0
prob['nM'] = 2
prob['Mmethod'] = 1
prob['lump'] = 0
prob['tol'] = 1e-9
prob['shift'] = 0.0
# --- fatigue ---
prob['tower_z_DEL'] = z_DEL
prob['tower_M_DEL'] = M_DEL
prob['life'] = life
prob['m_SN'] = m_SN
# ---------------
# --- constraints ---
prob['min_d_to_t'] = min_d_to_t
prob['max_taper'] = max_taper
# ---------------
# # --- loading case 1: max Thrust ---
prob['wind1.Uref'] = wind_Uref1
prob['pre1.rna_F'] = np.array([Fx1, Fy1, Fz1])
prob['pre1.rna_M'] = np.array([Mxx1, Myy1, Mzz1])
# # ---------------
# # --- loading case 2: max Wind Speed ---
prob['wind2.Uref'] = wind_Uref2
prob['pre2.rna_F'] = np.array([Fx2, Fy2, Fz2])
prob['pre2.rna_M' ] = np.array([Mxx2, Myy2, Mzz2])
# # --- run ---
prob.run()
z,_ = nodal2sectional(prob['z_full'])
print('zs=', z)
print('ds=', prob['d_full'])
print('ts=', prob['t_full'])
print('mass (kg) =', prob['tower_mass'])
print('cg (m) =', prob['tower_center_of_mass'])
print('weldability =', prob['weldability'])
print('manufacturability =', prob['manufacturability'])
print('\nwind: ', prob['wind1.Uref'])
print('f1 (Hz) =', prob['tower1.f1'])
print('top_deflection1 (m) =', prob['post1.top_deflection'])
print('stress1 =', prob['post1.stress'])
print('GL buckling =', prob['post1.global_buckling'])
print('Shell buckling =', prob['post1.shell_buckling'])
print('damage =', prob['post1.damage'])
print('\nwind: ', prob['wind2.Uref'])
print('f1 (Hz) =', prob['tower2.f1'])
print('top_deflection2 (m) =', prob['post2.top_deflection'])
print('stress2 =', prob['post2.stress'])
print('GL buckling =', prob['post2.global_buckling'])
print('Shell buckling =', prob['post2.shell_buckling'])
print('damage =', prob['post2.damage'])
stress1 = np.copy( prob['post1.stress'] )
shellBuckle1 = np.copy( prob['post1.shell_buckling'] )
globalBuckle1 = np.copy( prob['post1.global_buckling'] )
damage1 = np.copy( prob['post1.damage'] )
stress2 = prob['post2.stress']
shellBuckle2 = prob['post2.shell_buckling']
globalBuckle2 = prob['post2.global_buckling']
damage2 = prob['post2.damage']
import matplotlib.pyplot as plt
plt.figure(figsize=(5.0, 3.5))
plt.subplot2grid((3, 3), (0, 0), colspan=2, rowspan=3)
plt.plot(stress1, z, label='stress 1')
plt.plot(stress2, z, label='stress 2')
plt.plot(shellBuckle1, z, label='shell buckling 1')
plt.plot(shellBuckle2, z, label='shell buckling 2')
plt.plot(globalBuckle1, z, label='global buckling 1')
plt.plot(globalBuckle2, z, label='global buckling 2')
plt.plot(damage1, z, label='damage 1')
plt.plot(damage2, z, label='damage 2')
plt.legend(bbox_to_anchor=(1.05, 1.0), loc=2)
plt.xlabel('utilization')
plt.ylabel('height along tower (m)')
#plt.figure(2)
#plt.plot(prob['d_full']/2.+max(prob['d_full']), z, 'ok')
#plt.plot(prob['d_full']/-2.+max(prob['d_full']), z, 'ok')
#fig = plt.figure(3)
#ax1 = fig.add_subplot(121)
#ax2 = fig.add_subplot(122)
#ax1.plot(prob['wind1.U'], z)
#ax2.plot(prob['wind2.U'], z)
#plt.tight_layout()
plt.show()
print(prob['tower1.base_F'])
print(prob['tower1.base_M'])
print(prob['tower2.base_F'])
print(prob['tower2.base_M'])
# ------------
"""
if optimize:
# --- optimizer imports ---
from pyopt_driver.pyopt_driver import pyOptDriver
from openmdao.lib.casehandlers.api import DumpCaseRecorder
# ----------------------
# --- Setup Pptimizer ---
tower.replace('driver', pyOptDriver())
tower.driver.optimizer = 'SNOPT'
tower.driver.options = {'Major feasibility tolerance': 1e-6,
'Minor feasibility tolerance': 1e-6,
'Major optimality tolerance': 1e-5,
'Function precision': 1e-8}
# ----------------------
# --- Objective ---
tower.driver.add_objective('tower1.mass / 300000')
# ----------------------
# --- Design Variables ---
tower.driver.add_parameter('z_param[1]', low=0.0, high=87.0)
tower.driver.add_parameter('d_param[:-1]', low=3.87, high=20.0)
tower.driver.add_parameter('t_param', low=0.005, high=0.2)
# ----------------------
# --- recorder ---
tower.recorders = [DumpCaseRecorder()]
# ----------------------
# --- Constraints ---
tower.driver.add_constraint('tower.stress <= 1.0')
tower.driver.add_constraint('tower.global_buckling <= 1.0')
tower.driver.add_constraint('tower.shell_buckling <= 1.0')
tower.driver.add_constraint('tower.damage <= 1.0')
tower.driver.add_constraint('gc.weldability <= 0.0')
tower.driver.add_constraint('gc.manufacturability <= 0.0')
freq1p = 0.2 # 1P freq in Hz
tower.driver.add_constraint('tower.f1 >= 1.1*%f' % freq1p)
# ----------------------
# --- run opt ---
tower.run()
# ---------------
"""
| [
"commonse.UtilizationSupplement.bucklingGL",
"numpy.sqrt",
"commonse.environment.TowerSoil",
"commonse.vertical_cylinder.CylinderMass",
"matplotlib.pyplot.ylabel",
"commonse.utilities.nodal2sectional",
"commonse.environment.LogWind",
"commonse.UtilizationSupplement.vonMisesStressUtilization",
"openm... | [((34104, 34132), 'numpy.array', 'np.array', (['[6.0, 4.935, 3.87]'], {}), '([6.0, 4.935, 3.87])\n', (34112, 34132), True, 'import numpy as np\n'), ((34439, 34459), 'numpy.array', 'np.array', (['[285598.8]'], {}), '([285598.8])\n', (34447, 34459), True, 'import numpy as np\n'), ((34603, 34649), 'numpy.array', 'np.array', (['[mIxx, mIyy, mIzz, mIxy, mIxz, mIyz]'], {}), '([mIxx, mIyy, mIzz, mIxy, mIxz, mIyz])\n', (34611, 34649), True, 'import numpy as np\n'), ((34661, 34701), 'numpy.array', 'np.array', (['[-1.13197635, 0.0, 0.50875268]'], {}), '([-1.13197635, 0.0, 0.50875268])\n', (34669, 34701), True, 'import numpy as np\n'), ((35712, 36001), 'numpy.array', 'np.array', (['[0.0, 1.327, 3.982, 6.636, 9.291, 11.945, 14.6, 17.255, 19.909, 22.564, \n 25.218, 27.873, 30.527, 33.182, 35.836, 38.491, 41.145, 43.8, 46.455, \n 49.109, 51.764, 54.418, 57.073, 59.727, 62.382, 65.036, 67.691, 70.345,\n 73.0, 75.655, 78.309, 80.964, 83.618, 86.273, 87.6]'], {}), '([0.0, 1.327, 3.982, 6.636, 9.291, 11.945, 14.6, 17.255, 19.909, \n 22.564, 25.218, 27.873, 30.527, 33.182, 35.836, 38.491, 41.145, 43.8, \n 46.455, 49.109, 51.764, 54.418, 57.073, 59.727, 62.382, 65.036, 67.691,\n 70.345, 73.0, 75.655, 78.309, 80.964, 83.618, 86.273, 87.6])\n', (35720, 36001), True, 'import numpy as np\n'), ((39515, 39540), 'numpy.array', 'np.array', (['[Fx1, Fy1, Fz1]'], {}), '([Fx1, Fy1, Fz1])\n', (39523, 39540), True, 'import numpy as np\n'), ((39566, 39594), 'numpy.array', 'np.array', (['[Mxx1, Myy1, Mzz1]'], {}), '([Mxx1, Myy1, Mzz1])\n', (39574, 39594), True, 'import numpy as np\n'), ((39730, 39755), 'numpy.array', 'np.array', (['[Fx2, Fy2, Fz2]'], {}), '([Fx2, Fy2, Fz2])\n', (39738, 39755), True, 'import numpy as np\n'), ((39782, 39810), 'numpy.array', 'np.array', (['[Mxx2, Myy2, Mzz2]'], {}), '([Mxx2, Myy2, Mzz2])\n', (39790, 39810), True, 'import numpy as np\n'), ((39858, 39889), 'commonse.utilities.nodal2sectional', 'nodal2sectional', (["prob['z_full']"], {}), "(prob['z_full'])\n", (39873, 39889), False, 'from commonse.utilities import assembleI, unassembleI, nodal2sectional\n'), ((40910, 40939), 'numpy.copy', 'np.copy', (["prob['post1.stress']"], {}), "(prob['post1.stress'])\n", (40917, 40939), True, 'import numpy as np\n'), ((40961, 40998), 'numpy.copy', 'np.copy', (["prob['post1.shell_buckling']"], {}), "(prob['post1.shell_buckling'])\n", (40968, 40998), True, 'import numpy as np\n'), ((41021, 41059), 'numpy.copy', 'np.copy', (["prob['post1.global_buckling']"], {}), "(prob['post1.global_buckling'])\n", (41028, 41059), True, 'import numpy as np\n'), ((41076, 41105), 'numpy.copy', 'np.copy', (["prob['post1.damage']"], {}), "(prob['post1.damage'])\n", (41083, 41105), True, 'import numpy as np\n'), ((41323, 41353), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5.0, 3.5)'}), '(figsize=(5.0, 3.5))\n', (41333, 41353), True, 'import matplotlib.pyplot as plt\n'), ((41358, 41412), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 3)', '(0, 0)'], {'colspan': '(2)', 'rowspan': '(3)'}), '((3, 3), (0, 0), colspan=2, rowspan=3)\n', (41374, 41412), True, 'import matplotlib.pyplot as plt\n'), ((41417, 41455), 'matplotlib.pyplot.plot', 'plt.plot', (['stress1', 'z'], {'label': '"""stress 1"""'}), "(stress1, z, label='stress 1')\n", (41425, 41455), True, 'import matplotlib.pyplot as plt\n'), ((41460, 41498), 'matplotlib.pyplot.plot', 'plt.plot', (['stress2', 'z'], {'label': '"""stress 2"""'}), "(stress2, z, label='stress 2')\n", (41468, 41498), True, 'import matplotlib.pyplot as plt\n'), ((41503, 41554), 'matplotlib.pyplot.plot', 'plt.plot', (['shellBuckle1', 'z'], {'label': '"""shell buckling 1"""'}), "(shellBuckle1, z, label='shell buckling 1')\n", (41511, 41554), True, 'import matplotlib.pyplot as plt\n'), ((41559, 41610), 'matplotlib.pyplot.plot', 'plt.plot', (['shellBuckle2', 'z'], {'label': '"""shell buckling 2"""'}), "(shellBuckle2, z, label='shell buckling 2')\n", (41567, 41610), True, 'import matplotlib.pyplot as plt\n'), ((41615, 41668), 'matplotlib.pyplot.plot', 'plt.plot', (['globalBuckle1', 'z'], {'label': '"""global buckling 1"""'}), "(globalBuckle1, z, label='global buckling 1')\n", (41623, 41668), True, 'import matplotlib.pyplot as plt\n'), ((41673, 41726), 'matplotlib.pyplot.plot', 'plt.plot', (['globalBuckle2', 'z'], {'label': '"""global buckling 2"""'}), "(globalBuckle2, z, label='global buckling 2')\n", (41681, 41726), True, 'import matplotlib.pyplot as plt\n'), ((41731, 41769), 'matplotlib.pyplot.plot', 'plt.plot', (['damage1', 'z'], {'label': '"""damage 1"""'}), "(damage1, z, label='damage 1')\n", (41739, 41769), True, 'import matplotlib.pyplot as plt\n'), ((41774, 41812), 'matplotlib.pyplot.plot', 'plt.plot', (['damage2', 'z'], {'label': '"""damage 2"""'}), "(damage2, z, label='damage 2')\n", (41782, 41812), True, 'import matplotlib.pyplot as plt\n'), ((41817, 41862), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1.0)', 'loc': '(2)'}), '(bbox_to_anchor=(1.05, 1.0), loc=2)\n', (41827, 41862), True, 'import matplotlib.pyplot as plt\n'), ((41867, 41892), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""utilization"""'], {}), "('utilization')\n", (41877, 41892), True, 'import matplotlib.pyplot as plt\n'), ((41897, 41933), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""height along tower (m)"""'], {}), "('height along tower (m)')\n", (41907, 41933), True, 'import matplotlib.pyplot as plt\n'), ((42266, 42276), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (42274, 42276), True, 'import matplotlib.pyplot as plt\n'), ((4339, 4353), 'numpy.zeros', 'np.zeros', (['npts'], {}), '(npts)\n', (4347, 4353), True, 'import numpy as np\n'), ((4370, 4381), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (4378, 4381), True, 'import numpy as np\n'), ((5622, 5634), 'numpy.eye', 'np.eye', (['npts'], {}), '(npts)\n', (5628, 5634), True, 'import numpy as np\n'), ((5697, 5716), 'numpy.zeros', 'np.zeros', (['(npts, 6)'], {}), '((npts, 6))\n', (5705, 5716), True, 'import numpy as np\n'), ((5933, 5952), 'numpy.zeros', 'np.zeros', (['(6, npts)'], {}), '((6, npts))\n', (5941, 5952), True, 'import numpy as np\n'), ((7766, 7818), 'numpy.array', 'np.array', (["[0.0, 0.0, params['tower_center_of_mass']]"], {}), "([0.0, 0.0, params['tower_center_of_mass']])\n", (7774, 7818), True, 'import numpy as np\n'), ((7989, 8022), 'commonse.utilities.assembleI', 'assembleI', (["params['tower_I_base']"], {}), "(params['tower_I_base'])\n", (7998, 8022), False, 'from commonse.utilities import assembleI, unassembleI, nodal2sectional\n'), ((8168, 8196), 'commonse.utilities.unassembleI', 'unassembleI', (['(I_tower + I_rna)'], {}), '(I_tower + I_rna)\n', (8179, 8196), False, 'from commonse.utilities import assembleI, unassembleI, nodal2sectional\n'), ((12403, 12431), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.int_'}), '([0], dtype=np.int_)\n', (12411, 12431), True, 'import numpy as np\n'), ((13317, 13343), 'numpy.array', 'np.array', (["[params['mass']]"], {}), "([params['mass']])\n", (13325, 13343), True, 'import numpy as np\n'), ((13374, 13401), 'numpy.array', 'np.array', (["[params['mI'][0]]"], {}), "([params['mI'][0]])\n", (13382, 13401), True, 'import numpy as np\n'), ((13432, 13459), 'numpy.array', 'np.array', (["[params['mI'][1]]"], {}), "([params['mI'][1]])\n", (13440, 13459), True, 'import numpy as np\n'), ((13490, 13517), 'numpy.array', 'np.array', (["[params['mI'][2]]"], {}), "([params['mI'][2]])\n", (13498, 13517), True, 'import numpy as np\n'), ((13548, 13575), 'numpy.array', 'np.array', (["[params['mI'][3]]"], {}), "([params['mI'][3]])\n", (13556, 13575), True, 'import numpy as np\n'), ((13606, 13633), 'numpy.array', 'np.array', (["[params['mI'][4]]"], {}), "([params['mI'][4]])\n", (13614, 13633), True, 'import numpy as np\n'), ((13664, 13691), 'numpy.array', 'np.array', (["[params['mI'][5]]"], {}), "([params['mI'][5]])\n", (13672, 13691), True, 'import numpy as np\n'), ((13722, 13751), 'numpy.array', 'np.array', (["[params['mrho'][0]]"], {}), "([params['mrho'][0]])\n", (13730, 13751), True, 'import numpy as np\n'), ((13782, 13811), 'numpy.array', 'np.array', (["[params['mrho'][1]]"], {}), "([params['mrho'][1]])\n", (13790, 13811), True, 'import numpy as np\n'), ((13842, 13871), 'numpy.array', 'np.array', (["[params['mrho'][2]]"], {}), "([params['mrho'][2]])\n", (13850, 13871), True, 'import numpy as np\n'), ((14022, 14052), 'numpy.array', 'np.array', (["[params['rna_F'][0]]"], {}), "([params['rna_F'][0]])\n", (14030, 14052), True, 'import numpy as np\n'), ((14083, 14113), 'numpy.array', 'np.array', (["[params['rna_F'][1]]"], {}), "([params['rna_F'][1]])\n", (14091, 14113), True, 'import numpy as np\n'), ((14144, 14174), 'numpy.array', 'np.array', (["[params['rna_F'][2]]"], {}), "([params['rna_F'][2]])\n", (14152, 14174), True, 'import numpy as np\n'), ((14205, 14235), 'numpy.array', 'np.array', (["[params['rna_M'][0]]"], {}), "([params['rna_M'][0]])\n", (14213, 14235), True, 'import numpy as np\n'), ((14266, 14296), 'numpy.array', 'np.array', (["[params['rna_M'][1]]"], {}), "([params['rna_M'][1]])\n", (14274, 14296), True, 'import numpy as np\n'), ((14327, 14357), 'numpy.array', 'np.array', (["[params['rna_M'][2]]"], {}), "([params['rna_M'][2]])\n", (14335, 14357), True, 'import numpy as np\n'), ((19762, 19790), 'commonse.utilities.nodal2sectional', 'nodal2sectional', (["params['d']"], {}), "(params['d'])\n", (19777, 19790), False, 'from commonse.utilities import assembleI, unassembleI, nodal2sectional\n'), ((19814, 19842), 'commonse.utilities.nodal2sectional', 'nodal2sectional', (["params['z']"], {}), "(params['z'])\n", (19829, 19842), False, 'from commonse.utilities import assembleI, unassembleI, nodal2sectional\n'), ((19911, 19926), 'numpy.zeros', 'np.zeros', (['NFREQ'], {}), '(NFREQ)\n', (19919, 19926), True, 'import numpy as np\n'), ((20114, 20258), 'commonse.UtilizationSupplement.vonMisesStressUtilization', 'Util.vonMisesStressUtilization', (['axial_stress', 'hoop_stress', 'shear_stress', "(params['gamma_f'] * params['gamma_m'] * params['gamma_n'])", 'sigma_y'], {}), "(axial_stress, hoop_stress, shear_stress, \n params['gamma_f'] * params['gamma_m'] * params['gamma_n'], sigma_y)\n", (20144, 20258), True, 'import commonse.UtilizationSupplement as Util\n'), ((20335, 20491), 'commonse.UtilizationSupplement.shellBucklingEurocode', 'Util.shellBucklingEurocode', (['d', "params['t']", 'axial_stress', 'hoop_stress', 'shear_stress', 'L_reinforced', 'E', 'sigma_y', "params['gamma_f']", "params['gamma_b']"], {}), "(d, params['t'], axial_stress, hoop_stress,\n shear_stress, L_reinforced, E, sigma_y, params['gamma_f'], params[\n 'gamma_b'])\n", (20361, 20491), True, 'import commonse.UtilizationSupplement as Util\n'), ((20642, 20690), 'numpy.sqrt', 'np.sqrt', (["(params['Mxx'] ** 2 + params['Myy'] ** 2)"], {}), "(params['Mxx'] ** 2 + params['Myy'] ** 2)\n", (20649, 20690), True, 'import numpy as np\n'), ((20725, 20841), 'commonse.UtilizationSupplement.bucklingGL', 'Util.bucklingGL', (['d', "params['t']", "params['Fz']", 'M', 'tower_height', 'E', 'sigma_y', "params['gamma_f']", "params['gamma_b']"], {}), "(d, params['t'], params['Fz'], M, tower_height, E, sigma_y,\n params['gamma_f'], params['gamma_b'])\n", (20740, 20841), True, 'import commonse.UtilizationSupplement as Util\n'), ((21017, 21038), 'numpy.zeros', 'np.zeros', (['N_DEL.shape'], {}), '(N_DEL.shape)\n', (21025, 21038), True, 'import numpy as np\n'), ((34061, 34088), 'numpy.array', 'np.array', (['[0.0, 43.8, 87.6]'], {}), '([0.0, 43.8, 87.6])\n', (34069, 34088), True, 'import numpy as np\n'), ((34151, 34175), 'numpy.array', 'np.array', (['[0.025, 0.021]'], {}), '([0.025, 0.021])\n', (34159, 34175), True, 'import numpy as np\n'), ((36014, 36317), 'numpy.array', 'np.array', (['[8294.0, 8151.8, 7883.1, 7609.9, 7335.9, 7057.7, 6782.1, 6511.9, 6239.1, \n 5970.7, 5707.0, 5450.0, 5201.5, 4958.8, 4720.2, 4488.4, 4257.7, 4024.6,\n 3794.2, 3566.4, 3340.6, 3118.4, 2897.7, 2681.1, 2471.9, 2266.3, 2067.3,\n 1876.9, 1701.7, 1547.9, 1420.7, 1330.4, 1278.0, 1267.3, 1276.1]'], {}), '([8294.0, 8151.8, 7883.1, 7609.9, 7335.9, 7057.7, 6782.1, 6511.9, \n 6239.1, 5970.7, 5707.0, 5450.0, 5201.5, 4958.8, 4720.2, 4488.4, 4257.7,\n 4024.6, 3794.2, 3566.4, 3340.6, 3118.4, 2897.7, 2681.1, 2471.9, 2266.3,\n 2067.3, 1876.9, 1701.7, 1547.9, 1420.7, 1330.4, 1278.0, 1267.3, 1276.1])\n', (36022, 36317), True, 'import numpy as np\n'), ((3057, 3071), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (3065, 3071), True, 'import numpy as np\n'), ((3654, 3668), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (3662, 3668), True, 'import numpy as np\n'), ((6363, 6377), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (6371, 6377), True, 'import numpy as np\n'), ((6503, 6517), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (6511, 6517), True, 'import numpy as np\n'), ((6827, 6841), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (6835, 6841), True, 'import numpy as np\n'), ((7197, 7211), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (7205, 7211), True, 'import numpy as np\n'), ((7710, 7746), 'numpy.array', 'np.array', (["[0.0, 0.0, params['hubH']]"], {}), "([0.0, 0.0, params['hubH']])\n", (7718, 7746), True, 'import numpy as np\n'), ((8041, 8067), 'commonse.utilities.assembleI', 'assembleI', (["params['rna_I']"], {}), "(params['rna_I'])\n", (8050, 8067), False, 'from commonse.utilities import assembleI, unassembleI, nodal2sectional\n'), ((8365, 8380), 'numpy.zeros', 'np.zeros', (['nFull'], {}), '(nFull)\n', (8373, 8380), True, 'import numpy as np\n'), ((8571, 8585), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (8579, 8585), True, 'import numpy as np\n'), ((8705, 8719), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (8713, 8719), True, 'import numpy as np\n'), ((8830, 8844), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (8838, 8844), True, 'import numpy as np\n'), ((8907, 8921), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (8915, 8921), True, 'import numpy as np\n'), ((9021, 9032), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (9029, 9032), True, 'import numpy as np\n'), ((9340, 9367), 'numpy.zeros', 'np.zeros', (['nK'], {'dtype': 'np.int_'}), '(nK, dtype=np.int_)\n', (9348, 9367), True, 'import numpy as np\n'), ((9492, 9504), 'numpy.zeros', 'np.zeros', (['nK'], {}), '(nK)\n', (9500, 9504), True, 'import numpy as np\n'), ((9605, 9617), 'numpy.zeros', 'np.zeros', (['nK'], {}), '(nK)\n', (9613, 9617), True, 'import numpy as np\n'), ((9718, 9730), 'numpy.zeros', 'np.zeros', (['nK'], {}), '(nK)\n', (9726, 9730), True, 'import numpy as np\n'), ((9832, 9844), 'numpy.zeros', 'np.zeros', (['nK'], {}), '(nK)\n', (9840, 9844), True, 'import numpy as np\n'), ((9951, 9963), 'numpy.zeros', 'np.zeros', (['nK'], {}), '(nK)\n', (9959, 9963), True, 'import numpy as np\n'), ((10070, 10082), 'numpy.zeros', 'np.zeros', (['nK'], {}), '(nK)\n', (10078, 10082), True, 'import numpy as np\n'), ((10238, 10268), 'numpy.zeros', 'np.zeros', (['nMass'], {'dtype': 'np.int_'}), '(nMass, dtype=np.int_)\n', (10246, 10268), True, 'import numpy as np\n'), ((10369, 10384), 'numpy.zeros', 'np.zeros', (['nMass'], {}), '(nMass)\n', (10377, 10384), True, 'import numpy as np\n'), ((10449, 10464), 'numpy.zeros', 'np.zeros', (['nMass'], {}), '(nMass)\n', (10457, 10464), True, 'import numpy as np\n'), ((10567, 10582), 'numpy.zeros', 'np.zeros', (['nMass'], {}), '(nMass)\n', (10575, 10582), True, 'import numpy as np\n'), ((10685, 10700), 'numpy.zeros', 'np.zeros', (['nMass'], {}), '(nMass)\n', (10693, 10700), True, 'import numpy as np\n'), ((10803, 10818), 'numpy.zeros', 'np.zeros', (['nMass'], {}), '(nMass)\n', (10811, 10818), True, 'import numpy as np\n'), ((10922, 10937), 'numpy.zeros', 'np.zeros', (['nMass'], {}), '(nMass)\n', (10930, 10937), True, 'import numpy as np\n'), ((11041, 11056), 'numpy.zeros', 'np.zeros', (['nMass'], {}), '(nMass)\n', (11049, 11056), True, 'import numpy as np\n'), ((11161, 11176), 'numpy.zeros', 'np.zeros', (['nMass'], {}), '(nMass)\n', (11169, 11176), True, 'import numpy as np\n'), ((11263, 11278), 'numpy.zeros', 'np.zeros', (['nMass'], {}), '(nMass)\n', (11271, 11278), True, 'import numpy as np\n'), ((11365, 11380), 'numpy.zeros', 'np.zeros', (['nMass'], {}), '(nMass)\n', (11373, 11380), True, 'import numpy as np\n'), ((11603, 11631), 'numpy.zeros', 'np.zeros', (['nPL'], {'dtype': 'np.int_'}), '(nPL, dtype=np.int_)\n', (11611, 11631), True, 'import numpy as np\n'), ((11734, 11747), 'numpy.zeros', 'np.zeros', (['nPL'], {}), '(nPL)\n', (11742, 11747), True, 'import numpy as np\n'), ((11825, 11838), 'numpy.zeros', 'np.zeros', (['nPL'], {}), '(nPL)\n', (11833, 11838), True, 'import numpy as np\n'), ((11916, 11929), 'numpy.zeros', 'np.zeros', (['nPL'], {}), '(nPL)\n', (11924, 11929), True, 'import numpy as np\n'), ((12008, 12021), 'numpy.zeros', 'np.zeros', (['nPL'], {}), '(nPL)\n', (12016, 12021), True, 'import numpy as np\n'), ((12101, 12114), 'numpy.zeros', 'np.zeros', (['nPL'], {}), '(nPL)\n', (12109, 12114), True, 'import numpy as np\n'), ((12194, 12207), 'numpy.zeros', 'np.zeros', (['nPL'], {}), '(nPL)\n', (12202, 12207), True, 'import numpy as np\n'), ((12537, 12557), 'numpy.array', 'np.array', (['[kmono[0]]'], {}), '([kmono[0]])\n', (12545, 12557), True, 'import numpy as np\n'), ((12591, 12611), 'numpy.array', 'np.array', (['[kmono[2]]'], {}), '([kmono[2]])\n', (12599, 12611), True, 'import numpy as np\n'), ((12645, 12665), 'numpy.array', 'np.array', (['[kmono[4]]'], {}), '([kmono[4]])\n', (12653, 12665), True, 'import numpy as np\n'), ((12699, 12719), 'numpy.array', 'np.array', (['[kmono[1]]'], {}), '([kmono[1]])\n', (12707, 12719), True, 'import numpy as np\n'), ((12753, 12773), 'numpy.array', 'np.array', (['[kmono[3]]'], {}), '([kmono[3]])\n', (12761, 12773), True, 'import numpy as np\n'), ((12807, 12827), 'numpy.array', 'np.array', (['[kmono[5]]'], {}), '([kmono[5]])\n', (12815, 12827), True, 'import numpy as np\n'), ((12875, 12893), 'numpy.array', 'np.array', (['[np.inf]'], {}), '([np.inf])\n', (12883, 12893), True, 'import numpy as np\n'), ((12927, 12945), 'numpy.array', 'np.array', (['[np.inf]'], {}), '([np.inf])\n', (12935, 12945), True, 'import numpy as np\n'), ((12979, 12997), 'numpy.array', 'np.array', (['[np.inf]'], {}), '([np.inf])\n', (12987, 12997), True, 'import numpy as np\n'), ((13031, 13049), 'numpy.array', 'np.array', (['[np.inf]'], {}), '([np.inf])\n', (13039, 13049), True, 'import numpy as np\n'), ((13083, 13101), 'numpy.array', 'np.array', (['[np.inf]'], {}), '([np.inf])\n', (13091, 13101), True, 'import numpy as np\n'), ((13135, 13153), 'numpy.array', 'np.array', (['[np.inf]'], {}), '([np.inf])\n', (13143, 13153), True, 'import numpy as np\n'), ((14903, 14912), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (14909, 14912), True, 'import numpy as np\n'), ((14945, 14954), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (14951, 14954), True, 'import numpy as np\n'), ((14987, 14996), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (14993, 14996), True, 'import numpy as np\n'), ((15029, 15038), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (15035, 15038), True, 'import numpy as np\n'), ((15071, 15080), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (15077, 15080), True, 'import numpy as np\n'), ((15113, 15122), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (15119, 15122), True, 'import numpy as np\n'), ((15155, 15164), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (15161, 15164), True, 'import numpy as np\n'), ((15197, 15206), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (15203, 15206), True, 'import numpy as np\n'), ((15239, 15248), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (15245, 15248), True, 'import numpy as np\n'), ((15281, 15290), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (15287, 15290), True, 'import numpy as np\n'), ((15323, 15332), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (15329, 15332), True, 'import numpy as np\n'), ((15365, 15374), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (15371, 15374), True, 'import numpy as np\n'), ((15637, 15652), 'numpy.zeros', 'np.zeros', (['nFull'], {}), '(nFull)\n', (15645, 15652), True, 'import numpy as np\n'), ((15753, 15768), 'numpy.zeros', 'np.zeros', (['nFull'], {}), '(nFull)\n', (15761, 15768), True, 'import numpy as np\n'), ((15854, 15873), 'numpy.zeros', 'np.zeros', (['(nFull - 1)'], {}), '(nFull - 1)\n', (15862, 15873), True, 'import numpy as np\n'), ((16186, 16205), 'numpy.zeros', 'np.zeros', (['(nFull - 1)'], {}), '(nFull - 1)\n', (16194, 16205), True, 'import numpy as np\n'), ((16312, 16331), 'numpy.zeros', 'np.zeros', (['(nFull - 1)'], {}), '(nFull - 1)\n', (16320, 16331), True, 'import numpy as np\n'), ((16425, 16444), 'numpy.zeros', 'np.zeros', (['(nFull - 1)'], {}), '(nFull - 1)\n', (16433, 16444), True, 'import numpy as np\n'), ((17625, 17639), 'numpy.zeros', 'np.zeros', (['nDEL'], {}), '(nDEL)\n', (17633, 17639), True, 'import numpy as np\n'), ((17758, 17772), 'numpy.zeros', 'np.zeros', (['nDEL'], {}), '(nDEL)\n', (17766, 17772), True, 'import numpy as np\n'), ((18107, 18122), 'numpy.zeros', 'np.zeros', (['NFREQ'], {}), '(NFREQ)\n', (18115, 18122), True, 'import numpy as np\n'), ((18331, 18350), 'numpy.zeros', 'np.zeros', (['(nFull - 1)'], {}), '(nFull - 1)\n', (18339, 18350), True, 'import numpy as np\n'), ((18489, 18508), 'numpy.zeros', 'np.zeros', (['(nFull - 1)'], {}), '(nFull - 1)\n', (18497, 18508), True, 'import numpy as np\n'), ((18643, 18662), 'numpy.zeros', 'np.zeros', (['(nFull - 1)'], {}), '(nFull - 1)\n', (18651, 18662), True, 'import numpy as np\n'), ((18789, 18808), 'numpy.zeros', 'np.zeros', (['(nFull - 1)'], {}), '(nFull - 1)\n', (18797, 18808), True, 'import numpy as np\n'), ((19570, 19597), 'numpy.ones', 'np.ones', (['axial_stress.shape'], {}), '(axial_stress.shape)\n', (19577, 19597), True, 'import numpy as np\n'), ((19635, 19662), 'numpy.ones', 'np.ones', (['axial_stress.shape'], {}), '(axial_stress.shape)\n', (19642, 19662), True, 'import numpy as np\n'), ((19711, 19738), 'numpy.ones', 'np.ones', (['axial_stress.shape'], {}), '(axial_stress.shape)\n', (19718, 19738), True, 'import numpy as np\n'), ((21092, 21146), 'numpy.interp', 'np.interp', (['z_section', "params['z_DEL']", "params['M_DEL']"], {}), "(z_section, params['z_DEL'], params['M_DEL'])\n", (21101, 21146), True, 'import numpy as np\n'), ((21181, 21319), 'commonse.UtilizationSupplement.fatigue', 'Util.fatigue', (['M_DEL', 'N_DEL', 'd', "params['t']", "params['m_SN']", "params['DC']", "params['gamma_fatigue']"], {'stress_factor': '(1.0)', 'weld_factor': '(True)'}), "(M_DEL, N_DEL, d, params['t'], params['m_SN'], params['DC'],\n params['gamma_fatigue'], stress_factor=1.0, weld_factor=True)\n", (21193, 21319), True, 'import commonse.UtilizationSupplement as Util\n'), ((22026, 22070), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""tower_outfitting_factor"""', '(0.0)'], {}), "('tower_outfitting_factor', 0.0)\n", (22038, 22070), False, 'from openmdao.api import Component, Group, Problem, IndepVarComp\n'), ((22130, 22172), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""tower_buckling_length"""', '(0.0)'], {}), "('tower_buckling_length', 0.0)\n", (22142, 22172), False, 'from openmdao.api import Component, Group, Problem, IndepVarComp\n'), ((22256, 22296), 'commonse.vertical_cylinder.CylinderDiscretization', 'CylinderDiscretization', (['nPoints', 'nRefine'], {}), '(nPoints, nRefine)\n', (22278, 22296), False, 'from commonse.vertical_cylinder import CylinderDiscretization, CylinderMass, CylinderFrame3DD\n'), ((22444, 22463), 'commonse.vertical_cylinder.CylinderMass', 'CylinderMass', (['nFull'], {}), '(nFull)\n', (22456, 22463), False, 'from commonse.vertical_cylinder import CylinderDiscretization, CylinderMass, CylinderFrame3DD\n'), ((22782, 22816), 'commonse.UtilizationSupplement.GeometricConstraints', 'Util.GeometricConstraints', (['nPoints'], {}), '(nPoints)\n', (22807, 22816), True, 'import commonse.UtilizationSupplement as Util\n'), ((24405, 24462), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""tower_add_gravity"""', '(True)'], {'pass_by_obj': '(True)'}), "('tower_add_gravity', True, pass_by_obj=True)\n", (24417, 24462), False, 'from openmdao.api import Component, Group, Problem, IndepVarComp\n'), ((24527, 24574), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""tower_force_discretization"""', '(5.0)'], {}), "('tower_force_discretization', 5.0)\n", (24539, 24574), False, 'from openmdao.api import Component, Group, Problem, IndepVarComp\n'), ((24621, 24670), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""monopile"""', '(False)'], {'pass_by_obj': '(True)'}), "('monopile', False, pass_by_obj=True)\n", (24633, 24670), False, 'from openmdao.api import Component, Group, Problem, IndepVarComp\n'), ((24726, 24764), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""suctionpile_depth"""', '(0.0)'], {}), "('suctionpile_depth', 0.0)\n", (24738, 24764), False, 'from openmdao.api import Component, Group, Problem, IndepVarComp\n'), ((24809, 24836), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""soil_G"""', '(0.0)'], {}), "('soil_G', 0.0)\n", (24821, 24836), False, 'from openmdao.api import Component, Group, Problem, IndepVarComp\n'), ((24882, 24910), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""soil_nu"""', '(0.0)'], {}), "('soil_nu', 0.0)\n", (24894, 24910), False, 'from openmdao.api import Component, Group, Problem, IndepVarComp\n'), ((25025, 25058), 'commonse.tube.CylindricalShellProperties', 'CylindricalShellProperties', (['nFull'], {}), '(nFull)\n', (25051, 25058), False, 'from commonse.tube import CylindricalShellProperties\n'), ((25085, 25096), 'commonse.environment.TowerSoil', 'TowerSoil', ([], {}), '()\n', (25094, 25096), False, 'from commonse.environment import WindBase, WaveBase, LinearWaves, TowerSoil, PowerWind, LogWind\n'), ((2586, 2607), 'numpy.zeros', 'np.zeros', (['(nPoints - 1)'], {}), '(nPoints - 1)\n', (2594, 2607), True, 'import numpy as np\n'), ((2918, 2939), 'numpy.zeros', 'np.zeros', (['(nPoints - 1)'], {}), '(nPoints - 1)\n', (2926, 2939), True, 'import numpy as np\n'), ((3520, 3541), 'numpy.zeros', 'np.zeros', (['(nPoints - 1)'], {}), '(nPoints - 1)\n', (3528, 3541), True, 'import numpy as np\n'), ((7079, 7093), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (7087, 7093), True, 'import numpy as np\n'), ((16551, 16570), 'numpy.zeros', 'np.zeros', (['(nFull - 1)'], {}), '(nFull - 1)\n', (16559, 16570), True, 'import numpy as np\n'), ((16668, 16687), 'numpy.zeros', 'np.zeros', (['(nFull - 1)'], {}), '(nFull - 1)\n', (16676, 16687), True, 'import numpy as np\n'), ((16785, 16804), 'numpy.zeros', 'np.zeros', (['(nFull - 1)'], {}), '(nFull - 1)\n', (16793, 16804), True, 'import numpy as np\n'), ((18894, 18905), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (18902, 18905), True, 'import numpy as np\n'), ((18992, 19003), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (19000, 19003), True, 'import numpy as np\n'), ((21714, 21735), 'numpy.zeros', 'np.zeros', (['(nPoints - 1)'], {}), '(nPoints - 1)\n', (21722, 21735), True, 'import numpy as np\n'), ((21830, 21847), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (21838, 21847), True, 'import numpy as np\n'), ((21944, 21965), 'numpy.zeros', 'np.zeros', (['(nPoints - 1)'], {}), '(nPoints - 1)\n', (21952, 21965), True, 'import numpy as np\n'), ((24205, 24219), 'numpy.zeros', 'np.zeros', (['nDEL'], {}), '(nDEL)\n', (24213, 24219), True, 'import numpy as np\n'), ((24316, 24330), 'numpy.zeros', 'np.zeros', (['nDEL'], {}), '(nDEL)\n', (24324, 24330), True, 'import numpy as np\n'), ((26153, 26171), 'commonse.environment.LinearWaves', 'LinearWaves', (['nFull'], {}), '(nFull)\n', (26164, 26171), False, 'from commonse.environment import WindBase, WaveBase, LinearWaves, TowerSoil, PowerWind, LogWind\n'), ((26232, 26255), 'commonse.WindWaveDrag.CylinderWindDrag', 'CylinderWindDrag', (['nFull'], {}), '(nFull)\n', (26248, 26255), False, 'from commonse.WindWaveDrag import AeroHydroLoads, CylinderWindDrag, CylinderWaveDrag\n'), ((26315, 26338), 'commonse.WindWaveDrag.CylinderWaveDrag', 'CylinderWaveDrag', (['nFull'], {}), '(nFull)\n', (26331, 26338), False, 'from commonse.WindWaveDrag import AeroHydroLoads, CylinderWindDrag, CylinderWaveDrag\n'), ((26403, 26424), 'commonse.WindWaveDrag.AeroHydroLoads', 'AeroHydroLoads', (['nFull'], {}), '(nFull)\n', (26417, 26424), False, 'from commonse.WindWaveDrag import AeroHydroLoads, CylinderWindDrag, CylinderWaveDrag\n'), ((26569, 26601), 'commonse.vertical_cylinder.CylinderFrame3DD', 'CylinderFrame3DD', (['nFull', '(1)', '(1)', '(1)'], {}), '(nFull, 1, 1, 1)\n', (26585, 26601), False, 'from commonse.vertical_cylinder import CylinderDiscretization, CylinderMass, CylinderFrame3DD\n'), ((8115, 8129), 'numpy.outer', 'np.outer', (['R', 'R'], {}), '(R, R)\n', (8123, 8129), True, 'import numpy as np\n'), ((25893, 25909), 'commonse.environment.PowerWind', 'PowerWind', (['nFull'], {}), '(nFull)\n', (25902, 25909), False, 'from commonse.environment import PowerWind\n'), ((8090, 8102), 'numpy.dot', 'np.dot', (['R', 'R'], {}), '(R, R)\n', (8096, 8102), True, 'import numpy as np\n'), ((8103, 8112), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (8109, 8112), True, 'import numpy as np\n'), ((26008, 26022), 'commonse.environment.LogWind', 'LogWind', (['nFull'], {}), '(nFull)\n', (26015, 26022), False, 'from commonse.environment import LogWind\n')] |
import pytest
import numpy as np
import sparse
from sparse import DOK
from sparse._utils import assert_eq
@pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4)])
@pytest.mark.parametrize("density", [0.1, 0.3, 0.5, 0.7])
def test_random_shape_nnz(shape, density):
s = sparse.random(shape, density, format="dok")
assert isinstance(s, DOK)
assert s.shape == shape
expected_nnz = density * np.prod(shape)
assert np.floor(expected_nnz) <= s.nnz <= np.ceil(expected_nnz)
def test_convert_to_coo():
s1 = sparse.random((2, 3, 4), 0.5, format="dok")
s2 = sparse.COO(s1)
assert_eq(s1, s2)
def test_convert_from_coo():
s1 = sparse.random((2, 3, 4), 0.5, format="coo")
s2 = DOK(s1)
assert_eq(s1, s2)
def test_convert_from_numpy():
x = np.random.rand(2, 3, 4)
s = DOK(x)
assert_eq(x, s)
def test_convert_to_numpy():
s = sparse.random((2, 3, 4), 0.5, format="dok")
x = s.todense()
assert_eq(x, s)
@pytest.mark.parametrize(
"shape, data",
[
(2, {0: 1}),
((2, 3), {(0, 1): 3, (1, 2): 4}),
((2, 3, 4), {(0, 1): 3, (1, 2, 3): 4, (1, 1): [6, 5, 4, 1]}),
],
)
def test_construct(shape, data):
s = DOK(shape, data)
x = np.zeros(shape, dtype=s.dtype)
for c, d in data.items():
x[c] = d
assert_eq(x, s)
@pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 4)])
@pytest.mark.parametrize("density", [0.1, 0.3, 0.5, 0.7])
def test_getitem(shape, density):
s = sparse.random(shape, density, format="dok")
x = s.todense()
for _ in range(s.nnz):
idx = np.random.randint(np.prod(shape))
idx = np.unravel_index(idx, shape)
assert np.isclose(s[idx], x[idx])
@pytest.mark.parametrize(
"shape, index, value",
[
((2,), slice(None), np.random.rand()),
((2,), slice(1, 2), np.random.rand()),
((2,), slice(0, 2), np.random.rand(2)),
((2,), 1, np.random.rand()),
((2, 3), (0, slice(None)), np.random.rand()),
((2, 3), (0, slice(1, 3)), np.random.rand()),
((2, 3), (1, slice(None)), np.random.rand(3)),
((2, 3), (0, slice(1, 3)), np.random.rand(2)),
((2, 3), (0, slice(2, 0, -1)), np.random.rand(2)),
((2, 3), (slice(None), 1), np.random.rand()),
((2, 3), (slice(None), 1), np.random.rand(2)),
((2, 3), (slice(1, 2), 1), np.random.rand()),
((2, 3), (slice(1, 2), 1), np.random.rand(1)),
((2, 3), (0, 2), np.random.rand()),
],
)
def test_setitem(shape, index, value):
s = sparse.random(shape, 0.5, format="dok")
x = s.todense()
s[index] = value
x[index] = value
assert_eq(x, s)
def test_default_dtype():
s = DOK((5,))
assert s.dtype == np.float64
def test_int_dtype():
data = {1: np.uint8(1), 2: np.uint16(2)}
s = DOK((5,), data)
assert s.dtype == np.uint16
def test_float_dtype():
data = {1: np.uint8(1), 2: np.float32(2)}
s = DOK((5,), data)
assert s.dtype == np.float32
def test_set_zero():
s = DOK((1,), dtype=np.uint8)
s[0] = 1
s[0] = 0
assert s[0] == 0
assert s.nnz == 0
@pytest.mark.parametrize("format", ["coo", "dok"])
def test_asformat(format):
s = sparse.random((2, 3, 4), density=0.5, format="dok")
s2 = s.asformat(format)
assert_eq(s, s2)
def test_coo_fv_interface():
s1 = sparse.full((5, 5), fill_value=1 + np.random.rand())
s2 = sparse.DOK(s1)
assert_eq(s1, s2)
s3 = sparse.COO(s2)
assert_eq(s1, s3)
def test_empty_dok_dtype():
d = sparse.DOK(5, dtype=np.uint8)
s = sparse.COO(d)
assert s.dtype == d.dtype
| [
"numpy.uint8",
"numpy.prod",
"numpy.ceil",
"numpy.isclose",
"numpy.random.rand",
"numpy.floor",
"pytest.mark.parametrize",
"sparse.DOK",
"sparse.random",
"numpy.zeros",
"numpy.unravel_index",
"numpy.uint16",
"numpy.float32",
"sparse.COO",
"sparse._utils.assert_eq"
] | [((111, 170), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(2,), (2, 3), (2, 3, 4)]'], {}), "('shape', [(2,), (2, 3), (2, 3, 4)])\n", (134, 170), False, 'import pytest\n'), ((172, 228), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""density"""', '[0.1, 0.3, 0.5, 0.7]'], {}), "('density', [0.1, 0.3, 0.5, 0.7])\n", (195, 228), False, 'import pytest\n'), ((977, 1132), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape, data"""', '[(2, {(0): 1}), ((2, 3), {(0, 1): 3, (1, 2): 4}), ((2, 3, 4), {(0, 1): 3, (\n 1, 2, 3): 4, (1, 1): [6, 5, 4, 1]})]'], {}), "('shape, data', [(2, {(0): 1}), ((2, 3), {(0, 1): 3,\n (1, 2): 4}), ((2, 3, 4), {(0, 1): 3, (1, 2, 3): 4, (1, 1): [6, 5, 4, 1]})])\n", (1000, 1132), False, 'import pytest\n'), ((1338, 1397), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(2,), (2, 3), (2, 3, 4)]'], {}), "('shape', [(2,), (2, 3), (2, 3, 4)])\n", (1361, 1397), False, 'import pytest\n'), ((1399, 1455), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""density"""', '[0.1, 0.3, 0.5, 0.7]'], {}), "('density', [0.1, 0.3, 0.5, 0.7])\n", (1422, 1455), False, 'import pytest\n'), ((3151, 3200), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""format"""', "['coo', 'dok']"], {}), "('format', ['coo', 'dok'])\n", (3174, 3200), False, 'import pytest\n'), ((280, 323), 'sparse.random', 'sparse.random', (['shape', 'density'], {'format': '"""dok"""'}), "(shape, density, format='dok')\n", (293, 323), False, 'import sparse\n'), ((534, 577), 'sparse.random', 'sparse.random', (['(2, 3, 4)', '(0.5)'], {'format': '"""dok"""'}), "((2, 3, 4), 0.5, format='dok')\n", (547, 577), False, 'import sparse\n'), ((587, 601), 'sparse.COO', 'sparse.COO', (['s1'], {}), '(s1)\n', (597, 601), False, 'import sparse\n'), ((607, 624), 'sparse._utils.assert_eq', 'assert_eq', (['s1', 's2'], {}), '(s1, s2)\n', (616, 624), False, 'from sparse._utils import assert_eq\n'), ((665, 708), 'sparse.random', 'sparse.random', (['(2, 3, 4)', '(0.5)'], {'format': '"""coo"""'}), "((2, 3, 4), 0.5, format='coo')\n", (678, 708), False, 'import sparse\n'), ((718, 725), 'sparse.DOK', 'DOK', (['s1'], {}), '(s1)\n', (721, 725), False, 'from sparse import DOK\n'), ((731, 748), 'sparse._utils.assert_eq', 'assert_eq', (['s1', 's2'], {}), '(s1, s2)\n', (740, 748), False, 'from sparse._utils import assert_eq\n'), ((790, 813), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (804, 813), True, 'import numpy as np\n'), ((822, 828), 'sparse.DOK', 'DOK', (['x'], {}), '(x)\n', (825, 828), False, 'from sparse import DOK\n'), ((834, 849), 'sparse._utils.assert_eq', 'assert_eq', (['x', 's'], {}), '(x, s)\n', (843, 849), False, 'from sparse._utils import assert_eq\n'), ((889, 932), 'sparse.random', 'sparse.random', (['(2, 3, 4)', '(0.5)'], {'format': '"""dok"""'}), "((2, 3, 4), 0.5, format='dok')\n", (902, 932), False, 'import sparse\n'), ((958, 973), 'sparse._utils.assert_eq', 'assert_eq', (['x', 's'], {}), '(x, s)\n', (967, 973), False, 'from sparse._utils import assert_eq\n'), ((1210, 1226), 'sparse.DOK', 'DOK', (['shape', 'data'], {}), '(shape, data)\n', (1213, 1226), False, 'from sparse import DOK\n'), ((1235, 1265), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 's.dtype'}), '(shape, dtype=s.dtype)\n', (1243, 1265), True, 'import numpy as np\n'), ((1319, 1334), 'sparse._utils.assert_eq', 'assert_eq', (['x', 's'], {}), '(x, s)\n', (1328, 1334), False, 'from sparse._utils import assert_eq\n'), ((1498, 1541), 'sparse.random', 'sparse.random', (['shape', 'density'], {'format': '"""dok"""'}), "(shape, density, format='dok')\n", (1511, 1541), False, 'import sparse\n'), ((2559, 2598), 'sparse.random', 'sparse.random', (['shape', '(0.5)'], {'format': '"""dok"""'}), "(shape, 0.5, format='dok')\n", (2572, 2598), False, 'import sparse\n'), ((2667, 2682), 'sparse._utils.assert_eq', 'assert_eq', (['x', 's'], {}), '(x, s)\n', (2676, 2682), False, 'from sparse._utils import assert_eq\n'), ((2719, 2728), 'sparse.DOK', 'DOK', (['(5,)'], {}), '((5,))\n', (2722, 2728), False, 'from sparse import DOK\n'), ((2841, 2856), 'sparse.DOK', 'DOK', (['(5,)', 'data'], {}), '((5,), data)\n', (2844, 2856), False, 'from sparse import DOK\n'), ((2971, 2986), 'sparse.DOK', 'DOK', (['(5,)', 'data'], {}), '((5,), data)\n', (2974, 2986), False, 'from sparse import DOK\n'), ((3052, 3077), 'sparse.DOK', 'DOK', (['(1,)'], {'dtype': 'np.uint8'}), '((1,), dtype=np.uint8)\n', (3055, 3077), False, 'from sparse import DOK\n'), ((3236, 3287), 'sparse.random', 'sparse.random', (['(2, 3, 4)'], {'density': '(0.5)', 'format': '"""dok"""'}), "((2, 3, 4), density=0.5, format='dok')\n", (3249, 3287), False, 'import sparse\n'), ((3321, 3337), 'sparse._utils.assert_eq', 'assert_eq', (['s', 's2'], {}), '(s, s2)\n', (3330, 3337), False, 'from sparse._utils import assert_eq\n'), ((3440, 3454), 'sparse.DOK', 'sparse.DOK', (['s1'], {}), '(s1)\n', (3450, 3454), False, 'import sparse\n'), ((3459, 3476), 'sparse._utils.assert_eq', 'assert_eq', (['s1', 's2'], {}), '(s1, s2)\n', (3468, 3476), False, 'from sparse._utils import assert_eq\n'), ((3486, 3500), 'sparse.COO', 'sparse.COO', (['s2'], {}), '(s2)\n', (3496, 3500), False, 'import sparse\n'), ((3505, 3522), 'sparse._utils.assert_eq', 'assert_eq', (['s1', 's3'], {}), '(s1, s3)\n', (3514, 3522), False, 'from sparse._utils import assert_eq\n'), ((3561, 3590), 'sparse.DOK', 'sparse.DOK', (['(5)'], {'dtype': 'np.uint8'}), '(5, dtype=np.uint8)\n', (3571, 3590), False, 'import sparse\n'), ((3599, 3612), 'sparse.COO', 'sparse.COO', (['d'], {}), '(d)\n', (3609, 3612), False, 'import sparse\n'), ((413, 427), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (420, 427), True, 'import numpy as np\n'), ((439, 461), 'numpy.floor', 'np.floor', (['expected_nnz'], {}), '(expected_nnz)\n', (447, 461), True, 'import numpy as np\n'), ((474, 495), 'numpy.ceil', 'np.ceil', (['expected_nnz'], {}), '(expected_nnz)\n', (481, 495), True, 'import numpy as np\n'), ((1652, 1680), 'numpy.unravel_index', 'np.unravel_index', (['idx', 'shape'], {}), '(idx, shape)\n', (1668, 1680), True, 'import numpy as np\n'), ((1697, 1723), 'numpy.isclose', 'np.isclose', (['s[idx]', 'x[idx]'], {}), '(s[idx], x[idx])\n', (1707, 1723), True, 'import numpy as np\n'), ((2802, 2813), 'numpy.uint8', 'np.uint8', (['(1)'], {}), '(1)\n', (2810, 2813), True, 'import numpy as np\n'), ((2818, 2830), 'numpy.uint16', 'np.uint16', (['(2)'], {}), '(2)\n', (2827, 2830), True, 'import numpy as np\n'), ((2931, 2942), 'numpy.uint8', 'np.uint8', (['(1)'], {}), '(1)\n', (2939, 2942), True, 'import numpy as np\n'), ((2947, 2960), 'numpy.float32', 'np.float32', (['(2)'], {}), '(2)\n', (2957, 2960), True, 'import numpy as np\n'), ((1622, 1636), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1629, 1636), True, 'import numpy as np\n'), ((1813, 1829), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1827, 1829), True, 'import numpy as np\n'), ((1860, 1876), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1874, 1876), True, 'import numpy as np\n'), ((1907, 1924), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1921, 1924), True, 'import numpy as np\n'), ((1945, 1961), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1959, 1961), True, 'import numpy as np\n'), ((1999, 2015), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2013, 2015), True, 'import numpy as np\n'), ((2053, 2069), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2067, 2069), True, 'import numpy as np\n'), ((2107, 2124), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (2121, 2124), True, 'import numpy as np\n'), ((2162, 2179), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (2176, 2179), True, 'import numpy as np\n'), ((2221, 2238), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (2235, 2238), True, 'import numpy as np\n'), ((2276, 2292), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2290, 2292), True, 'import numpy as np\n'), ((2330, 2347), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (2344, 2347), True, 'import numpy as np\n'), ((2385, 2401), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2399, 2401), True, 'import numpy as np\n'), ((2439, 2456), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2453, 2456), True, 'import numpy as np\n'), ((2484, 2500), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2498, 2500), True, 'import numpy as np\n'), ((3413, 3429), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3427, 3429), True, 'import numpy as np\n')] |
""" Testing the glm module
"""
import numpy as np
from numpy.testing import assert_almost_equal
from nose.tools import assert_true
import numpy.random as nr
from ..mixed_effects_stat import (
one_sample_ttest, one_sample_ftest, two_sample_ttest, two_sample_ftest,
generate_data, t_stat, mfx_stat)
def test_mfx():
""" Test the generic mixed-effects model"""
n_samples, n_tests = 20, 100
np.random.seed(1)
# generate some data
V1 = np.random.rand(n_samples, n_tests)
Y = generate_data(np.ones((n_samples, 1)), 0, 1, V1)
X = np.random.randn(20, 3)
# compute the test statistics
t1, = mfx_stat(Y, V1, X, 1,return_t=True,
return_f=False, return_effect=False,
return_var=False)
assert_true(t1.shape == (n_tests,))
assert_true(t1.mean() < 5 / np.sqrt(n_tests))
assert_true((t1.var() < 2) and (t1.var() > .5))
t2, = mfx_stat(Y, V1, X * np.random.rand(3), 1)
assert_almost_equal(t1, t2)
f, = mfx_stat(Y, V1, X, 1, return_t=False, return_f=True)
assert_almost_equal(t1 ** 2, f)
v2, = mfx_stat(Y, V1, X, 1, return_t=False, return_var=True)
assert_true((v2 > 0).all())
fx, = mfx_stat(Y, V1, X, 1, return_t=False, return_effect=True)
assert_true(fx.shape == (n_tests,))
def test_t_test():
""" test that the t test run
"""
n_samples, n_tests = 15, 100
data = nr.randn(n_samples, n_tests)
t = t_stat(data)
assert_true(t.shape == (n_tests,))
assert_true( np.abs(t.mean() < 5 / np.sqrt(n_tests)))
assert_true(t.var() < 2)
assert_true( t.var() > .5)
def test_two_sample_ttest():
""" test that the mfx ttest indeed runs
"""
n_samples, n_tests = 15, 4
np.random.seed(1)
# generate some data
vardata = np.random.rand(n_samples, n_tests)
data = generate_data(np.ones(n_samples), 0, 1, vardata)
# compute the test statistics
u = np.concatenate((np.ones(5), np.zeros(10)))
t2 = two_sample_ttest(data, vardata, u, n_iter=5)
assert t2.shape == (n_tests,)
assert np.abs(t2.mean() < 5 / np.sqrt(n_tests))
assert t2.var() < 2
assert t2.var() > .5
# try verbose mode
t3 = two_sample_ttest(data, vardata, u, n_iter=5, verbose=1)
assert_almost_equal(t2, t3)
def test_two_sample_ftest():
""" test that the mfx ttest indeed runs
"""
n_samples, n_tests = 15, 4
np.random.seed(1)
# generate some data
vardata = np.random.rand(n_samples, n_tests)
data = generate_data(np.ones((n_samples, 1)), 0, 1, vardata)
# compute the test statistics
u = np.concatenate((np.ones(5), np.zeros(10)))
t2 = two_sample_ftest(data, vardata, u, n_iter=5)
assert t2.shape == (n_tests,)
assert np.abs(t2.mean() < 5 / np.sqrt(n_tests))
assert t2.var() < 2
assert t2.var() > .5
# try verbose mode
t3 = two_sample_ftest(data, vardata, u, n_iter=5, verbose=1)
assert_almost_equal(t2, t3)
def test_mfx_ttest():
""" test that the mfx ttest indeed runs
"""
n_samples, n_tests = 15, 100
np.random.seed(1)
# generate some data
vardata = np.random.rand(n_samples, n_tests)
data = generate_data(np.ones((n_samples, 1)), 0, 1, vardata)
# compute the test statistics
t2 = one_sample_ttest(data, vardata, n_iter=5)
assert t2.shape == (n_tests,)
assert np.abs(t2.mean() < 5 / np.sqrt(n_tests))
assert t2.var() < 2
assert t2.var() > .5
# try verbose mode
t3 = one_sample_ttest(data, vardata, n_iter=5, verbose=1)
assert_almost_equal(t2, t3)
def test_mfx_ftest():
""" test that the mfx ftest indeed runs
"""
n_samples, n_tests = 15, 100
np.random.seed(1)
# generate some data
vardata = np.random.rand(n_samples, n_tests)
data = generate_data(np.ones((n_samples, 1)), 0, 1, vardata)
# compute the test statistics
f = one_sample_ftest(data, vardata, n_iter=5)
assert f.shape == (n_tests,)
assert (np.abs(f.mean() - 1) < 1)
assert f.var() < 10
assert f.var() > .2
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| [
"numpy.sqrt",
"numpy.random.rand",
"numpy.ones",
"nose.tools.assert_true",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.randn",
"nose.run"
] | [((411, 428), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (425, 428), True, 'import numpy as np\n'), ((468, 502), 'numpy.random.rand', 'np.random.rand', (['n_samples', 'n_tests'], {}), '(n_samples, n_tests)\n', (482, 502), True, 'import numpy as np\n'), ((568, 590), 'numpy.random.randn', 'np.random.randn', (['(20)', '(3)'], {}), '(20, 3)\n', (583, 590), True, 'import numpy as np\n'), ((771, 806), 'nose.tools.assert_true', 'assert_true', (['(t1.shape == (n_tests,))'], {}), '(t1.shape == (n_tests,))\n', (782, 806), False, 'from nose.tools import assert_true\n'), ((965, 992), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['t1', 't2'], {}), '(t1, t2)\n', (984, 992), False, 'from numpy.testing import assert_almost_equal\n'), ((1059, 1090), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['(t1 ** 2)', 'f'], {}), '(t1 ** 2, f)\n', (1078, 1090), False, 'from numpy.testing import assert_almost_equal\n'), ((1260, 1295), 'nose.tools.assert_true', 'assert_true', (['(fx.shape == (n_tests,))'], {}), '(fx.shape == (n_tests,))\n', (1271, 1295), False, 'from nose.tools import assert_true\n'), ((1401, 1429), 'numpy.random.randn', 'nr.randn', (['n_samples', 'n_tests'], {}), '(n_samples, n_tests)\n', (1409, 1429), True, 'import numpy.random as nr\n'), ((1455, 1489), 'nose.tools.assert_true', 'assert_true', (['(t.shape == (n_tests,))'], {}), '(t.shape == (n_tests,))\n', (1466, 1489), False, 'from nose.tools import assert_true\n'), ((1729, 1746), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1743, 1746), True, 'import numpy as np\n'), ((1791, 1825), 'numpy.random.rand', 'np.random.rand', (['n_samples', 'n_tests'], {}), '(n_samples, n_tests)\n', (1805, 1825), True, 'import numpy as np\n'), ((2266, 2293), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['t2', 't3'], {}), '(t2, t3)\n', (2285, 2293), False, 'from numpy.testing import assert_almost_equal\n'), ((2411, 2428), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2425, 2428), True, 'import numpy as np\n'), ((2473, 2507), 'numpy.random.rand', 'np.random.rand', (['n_samples', 'n_tests'], {}), '(n_samples, n_tests)\n', (2487, 2507), True, 'import numpy as np\n'), ((2953, 2980), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['t2', 't3'], {}), '(t2, t3)\n', (2972, 2980), False, 'from numpy.testing import assert_almost_equal\n'), ((3093, 3110), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (3107, 3110), True, 'import numpy as np\n'), ((3155, 3189), 'numpy.random.rand', 'np.random.rand', (['n_samples', 'n_tests'], {}), '(n_samples, n_tests)\n', (3169, 3189), True, 'import numpy as np\n'), ((3578, 3605), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['t2', 't3'], {}), '(t2, t3)\n', (3597, 3605), False, 'from numpy.testing import assert_almost_equal\n'), ((3718, 3735), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (3732, 3735), True, 'import numpy as np\n'), ((3780, 3814), 'numpy.random.rand', 'np.random.rand', (['n_samples', 'n_tests'], {}), '(n_samples, n_tests)\n', (3794, 3814), True, 'import numpy as np\n'), ((4146, 4175), 'nose.run', 'nose.run', ([], {'argv': "['', __file__]"}), "(argv=['', __file__])\n", (4154, 4175), False, 'import nose\n'), ((525, 548), 'numpy.ones', 'np.ones', (['(n_samples, 1)'], {}), '((n_samples, 1))\n', (532, 548), True, 'import numpy as np\n'), ((1851, 1869), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (1858, 1869), True, 'import numpy as np\n'), ((2533, 2556), 'numpy.ones', 'np.ones', (['(n_samples, 1)'], {}), '((n_samples, 1))\n', (2540, 2556), True, 'import numpy as np\n'), ((3215, 3238), 'numpy.ones', 'np.ones', (['(n_samples, 1)'], {}), '((n_samples, 1))\n', (3222, 3238), True, 'import numpy as np\n'), ((3840, 3863), 'numpy.ones', 'np.ones', (['(n_samples, 1)'], {}), '((n_samples, 1))\n', (3847, 3863), True, 'import numpy as np\n'), ((939, 956), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (953, 956), True, 'import numpy as np\n'), ((1953, 1963), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (1960, 1963), True, 'import numpy as np\n'), ((1965, 1977), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1973, 1977), True, 'import numpy as np\n'), ((2640, 2650), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2647, 2650), True, 'import numpy as np\n'), ((2652, 2664), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (2660, 2664), True, 'import numpy as np\n'), ((839, 855), 'numpy.sqrt', 'np.sqrt', (['n_tests'], {}), '(n_tests)\n', (846, 855), True, 'import numpy as np\n'), ((2102, 2118), 'numpy.sqrt', 'np.sqrt', (['n_tests'], {}), '(n_tests)\n', (2109, 2118), True, 'import numpy as np\n'), ((2789, 2805), 'numpy.sqrt', 'np.sqrt', (['n_tests'], {}), '(n_tests)\n', (2796, 2805), True, 'import numpy as np\n'), ((3417, 3433), 'numpy.sqrt', 'np.sqrt', (['n_tests'], {}), '(n_tests)\n', (3424, 3433), True, 'import numpy as np\n'), ((1529, 1545), 'numpy.sqrt', 'np.sqrt', (['n_tests'], {}), '(n_tests)\n', (1536, 1545), True, 'import numpy as np\n')] |
#Copyright 2021 Fortior Blockchain, LLLP
# Import Numpy
import numpy
# sigmoid function expit()
import scipy.special
# Library for plotting arrays
import matplotlib.pyplot
# Neural Network Class Definition
class neuralNetwork:
# Initialize the neural network
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
#set number of nodes in each input, hidden, output layer
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
# Link weight matrices, with and who
# Weights inside the arrays are w_i_j, where link is from node i to node j in the next layer
# w11 w21 -> w12 w22
self.wih = numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
# Learning rate
self.lr = learningrate
# Activation function is the sigmoid function
self.activation_function = lambda x: scipy.special.expit(x)
pass
# Train Network
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list,ndmin=2).T
# Calculate signals into hidden layer
hidden_inputs = numpy.dot(self.wih, inputs)
#calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# Calculate signals to final output layer
final_inputs = numpy.dot(self.who, hidden_outputs)
#calculate signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
# Output layer error is the - target-actual
output_errors = targets - final_outputs
# Hidden layer error is the output_errors, split by weights, recombined at hidden nodes
hidden_errors = numpy.dot(self.who.T, output_errors)
# Update the weights for the links between the hidden and output layers
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))
# Update the weights for the links between the input and hidden layers
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))
pass
# Query the neural network
# Functional query
def query(self, inputs_list):
# Convert inputs list to 2d array
inputs = numpy.array(inputs_list, ndmin=2).T
# Calculate signals into hidden layer
hidden_inputs = numpy.dot(self.wih, inputs)
#calculate signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# Calculate signals into final output layer
final_inputs = numpy.dot(self.who, hidden_outputs)
# Calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
# Number of input, hidden and output nodes
input_nodes = 144
# Hidden
# Hidden_nodes = 100
hidden_nodes = 10
# Ouput
# Output_nodes = 10
output_nodes = 2
# Learning rate is ~0.1
learning_rate = 0.099
# Create instance of neural network
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
# Load the mnist training data CSV file into a list
training_data_file = open("ALGO-USD.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
# Train neural network
# Go through all records in the training data set
for record in training_data_list:
# Split the record by the ',' commas
all_values = record.split(',')
# Scale and shift the inputs
##################
inputs = (numpy.asfarray(all_values[1:]) / 144.0 * 0.99) + 0.01
##################
# Create the target output values(all 0.01, except the desired label which is 0.99)
targets = numpy.zeros(output_nodes) + 0.01
# all_values[0] is the target label for this record
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
pass
# Load the mnist test data csv file into a list
test_data_file = open("ALGO-USD.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# Get the first test record
all_values = test_data_list[0].split(',')
# Fit input data
##################
image_array = numpy.asfarray(all_values[1:]).reshape((12,12))
##################
# Test the neural network
# Scorecard for how well the network performs, initially empty
scorecard = []
# Go through all the records in the test data set
for record in test_data_list:
# Split the record by the ',' commas
all_values = record.split(',')
# Correct answer is first value
correct_label = int(all_values[0])
print(correct_label, "correct label")
# Scale and shift inputs
##################
inputs = (numpy.asfarray(all_values[1:])/ 144.0 * 0.99)+0.01
##################
# Query the network
outputs = n.query(inputs)
# The index of the highest value corresponds to the label
label = numpy.argmax(outputs)
print(label, "network's answer")
# append correct or incorrect to list
if (label == correct_label):
# Network's answer matches correct answer, add 1 to scorecard
scorecard.append(1)
else:
# Network's answer doesn't match correct answer, add 0 to scorecard
scorecard.append(0)
pass
pass
print(scorecard)
# Calculate the performance score, the fraction of correct answers
scorecard_array = numpy.asarray(scorecard)
print("performace =", scorecard_array.sum() / scorecard_array.size)
| [
"numpy.asarray",
"numpy.argmax",
"numpy.asfarray",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.transpose"
] | [((5759, 5783), 'numpy.asarray', 'numpy.asarray', (['scorecard'], {}), '(scorecard)\n', (5772, 5783), False, 'import numpy\n'), ((5287, 5308), 'numpy.argmax', 'numpy.argmax', (['outputs'], {}), '(outputs)\n', (5299, 5308), False, 'import numpy\n'), ((1376, 1403), 'numpy.dot', 'numpy.dot', (['self.wih', 'inputs'], {}), '(self.wih, inputs)\n', (1385, 1403), False, 'import numpy\n'), ((1609, 1644), 'numpy.dot', 'numpy.dot', (['self.who', 'hidden_outputs'], {}), '(self.who, hidden_outputs)\n', (1618, 1644), False, 'import numpy\n'), ((1997, 2033), 'numpy.dot', 'numpy.dot', (['self.who.T', 'output_errors'], {}), '(self.who.T, output_errors)\n', (2006, 2033), False, 'import numpy\n'), ((2746, 2773), 'numpy.dot', 'numpy.dot', (['self.wih', 'inputs'], {}), '(self.wih, inputs)\n', (2755, 2773), False, 'import numpy\n'), ((2977, 3012), 'numpy.dot', 'numpy.dot', (['self.who', 'hidden_outputs'], {}), '(self.who, hidden_outputs)\n', (2986, 3012), False, 'import numpy\n'), ((4119, 4144), 'numpy.zeros', 'numpy.zeros', (['output_nodes'], {}), '(output_nodes)\n', (4130, 4144), False, 'import numpy\n'), ((4566, 4596), 'numpy.asfarray', 'numpy.asfarray', (['all_values[1:]'], {}), '(all_values[1:])\n', (4580, 4596), False, 'import numpy\n'), ((1207, 1240), 'numpy.array', 'numpy.array', (['inputs_list'], {'ndmin': '(2)'}), '(inputs_list, ndmin=2)\n', (1218, 1240), False, 'import numpy\n'), ((1261, 1295), 'numpy.array', 'numpy.array', (['targets_list'], {'ndmin': '(2)'}), '(targets_list, ndmin=2)\n', (1272, 1295), False, 'import numpy\n'), ((2631, 2664), 'numpy.array', 'numpy.array', (['inputs_list'], {'ndmin': '(2)'}), '(inputs_list, ndmin=2)\n', (2642, 2664), False, 'import numpy\n'), ((2220, 2251), 'numpy.transpose', 'numpy.transpose', (['hidden_outputs'], {}), '(hidden_outputs)\n', (2235, 2251), False, 'import numpy\n'), ((2440, 2463), 'numpy.transpose', 'numpy.transpose', (['inputs'], {}), '(inputs)\n', (2455, 2463), False, 'import numpy\n'), ((3935, 3965), 'numpy.asfarray', 'numpy.asfarray', (['all_values[1:]'], {}), '(all_values[1:])\n', (3949, 3965), False, 'import numpy\n'), ((5079, 5109), 'numpy.asfarray', 'numpy.asfarray', (['all_values[1:]'], {}), '(all_values[1:])\n', (5093, 5109), False, 'import numpy\n')] |
import numpy
import upy2
from upy2 import u, U
from upy2.typesetting import ScientificTypesetter
U(2).default()
ScientificTypesetter(stddevs=2, precision=2).default()
ua = 1 +- u(0.1)
print(10 * ua)
print(ua * 10)
ub = 1 +- u(0.2)
with ScientificTypesetter(stddevs=2, precision=4):
print(ua * ub)
print(numpy.sqrt(0.2 ** 2 + 0.1 ** 2))
| [
"numpy.sqrt",
"upy2.u",
"upy2.typesetting.ScientificTypesetter",
"upy2.U"
] | [((241, 285), 'upy2.typesetting.ScientificTypesetter', 'ScientificTypesetter', ([], {'stddevs': '(2)', 'precision': '(4)'}), '(stddevs=2, precision=4)\n', (261, 285), False, 'from upy2.typesetting import ScientificTypesetter\n'), ((98, 102), 'upy2.U', 'U', (['(2)'], {}), '(2)\n', (99, 102), False, 'from upy2 import u, U\n'), ((113, 157), 'upy2.typesetting.ScientificTypesetter', 'ScientificTypesetter', ([], {'stddevs': '(2)', 'precision': '(2)'}), '(stddevs=2, precision=2)\n', (133, 157), False, 'from upy2.typesetting import ScientificTypesetter\n'), ((179, 185), 'upy2.u', 'u', (['(0.1)'], {}), '(0.1)\n', (180, 185), False, 'from upy2 import u, U\n'), ((228, 234), 'upy2.u', 'u', (['(0.2)'], {}), '(0.2)\n', (229, 234), False, 'from upy2 import u, U\n'), ((316, 347), 'numpy.sqrt', 'numpy.sqrt', (['(0.2 ** 2 + 0.1 ** 2)'], {}), '(0.2 ** 2 + 0.1 ** 2)\n', (326, 347), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
# File: imagenet_utils.py
import cv2
import numpy as np
import tqdm
import multiprocessing
import tensorflow as tf
from abc import abstractmethod
from tensorpack import ModelDesc
from tensorpack.input_source import QueueInput, StagingInput
from tensorpack.dataflow import (
imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ,
BatchData, MultiThreadMapData)
from tensorpack.predict import PredictConfig, FeedfreePredictor
from tensorpack.utils.stats import RatioCounter
from tensorpack.models import regularize_cost
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.utils import logger
class GoogleNetResize(imgaug.ImageAugmentor):
"""
crop 8%~100% of the original image
See `Going Deeper with Convolutions` by Google.
"""
def __init__(self, crop_area_fraction=0.08,
aspect_ratio_low=0.75, aspect_ratio_high=1.333,
target_shape=224):
self._init(locals())
def _augment(self, img, _):
h, w = img.shape[:2]
area = h * w
for _ in range(10):
targetArea = self.rng.uniform(self.crop_area_fraction, 1.0) * area
aspectR = self.rng.uniform(self.aspect_ratio_low, self.aspect_ratio_high)
ww = int(np.sqrt(targetArea * aspectR) + 0.5)
hh = int(np.sqrt(targetArea / aspectR) + 0.5)
if self.rng.uniform() < 0.5:
ww, hh = hh, ww
if hh <= h and ww <= w:
x1 = 0 if w == ww else self.rng.randint(0, w - ww)
y1 = 0 if h == hh else self.rng.randint(0, h - hh)
out = img[y1:y1 + hh, x1:x1 + ww]
out = cv2.resize(out, (self.target_shape, self.target_shape), interpolation=cv2.INTER_CUBIC)
return out
out = imgaug.ResizeShortestEdge(self.target_shape, interp=cv2.INTER_CUBIC).augment(img)
out = imgaug.CenterCrop(self.target_shape).augment(out)
return out
def fbresnet_augmentor(isTrain):
"""
Augmentor used in fb.resnet.torch, for BGR images in range [0,255].
"""
if isTrain:
augmentors = [
GoogleNetResize(),
# It's OK to remove the following augs if your CPU is not fast enough.
# Removing brightness/contrast/saturation does not have a significant effect on accuracy.
# Removing lighting leads to a tiny drop in accuracy.
imgaug.RandomOrderAug(
[imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Flip(horiz=True),
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224)),
]
return augmentors
def get_imagenet_dataflow(
datadir, name, batch_size,
augmentors, parallel=None):
"""
See explanations in the tutorial:
http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
"""
assert name in ['train', 'val', 'test']
assert datadir is not None
assert isinstance(augmentors, list)
isTrain = name == 'train'
if parallel is None:
parallel = min(40, multiprocessing.cpu_count() // 2) # assuming hyperthreading
if isTrain:
ds = dataset.ILSVRC12(datadir, name, shuffle=True)
ds = AugmentImageComponent(ds, augmentors, copy=False)
if parallel < 16:
logger.warn("DataFlow may become the bottleneck when too few processes are used.")
ds = PrefetchDataZMQ(ds, parallel)
ds = BatchData(ds, batch_size, remainder=False)
else:
ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
aug = imgaug.AugmentorList(augmentors)
def mapf(dp):
fname, cls = dp
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = aug.augment(im)
return im, cls
ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
ds = BatchData(ds, batch_size, remainder=True)
ds = PrefetchDataZMQ(ds, 1)
return ds
def eval_on_ILSVRC12(model, sessinit, dataflow):
pred_config = PredictConfig(
model=model,
session_init=sessinit,
input_names=['input', 'label'],
output_names=['wrong-top1', 'wrong-top5']
)
acc1, acc5 = RatioCounter(), RatioCounter()
# This does not have a visible improvement over naive predictor,
# but will have an improvement if image_dtype is set to float32.
pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
for _ in tqdm.trange(dataflow.size()):
top1, top5 = pred()
batch_size = top1.shape[0]
acc1.feed(top1.sum(), batch_size)
acc5.feed(top5.sum(), batch_size)
print("Top1 Error: {}".format(acc1.ratio))
print("Top5 Error: {}".format(acc5.ratio))
class ImageNetModel(ModelDesc):
image_shape = 224
"""
uint8 instead of float32 is used as input type to reduce copy overhead.
It might hurt the performance a liiiitle bit.
The pretrained models were trained with float32.
"""
image_dtype = tf.uint8
"""
Either 'NCHW' or 'NHWC'
"""
data_format = 'NCHW'
"""
Whether the image is BGR or RGB. If using DataFlow, then it should be BGR.
"""
image_bgr = True
weight_decay = 1e-4
"""
To apply on normalization parameters, use '.*/W|.*/gamma|.*/beta'
"""
weight_decay_pattern = '.*/W'
"""
Scale the loss, for whatever reasons (e.g., gradient averaging, fp16 training, etc)
"""
loss_scale = 1.
"""
Label smoothing (See tf.losses.softmax_cross_entropy)
"""
label_smoothing = 0.
def inputs(self):
return [tf.placeholder(self.image_dtype, [None, self.image_shape, self.image_shape, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')]
def build_graph(self, image, label):
image = self.image_preprocess(image)
assert self.data_format in ['NCHW', 'NHWC']
if self.data_format == 'NCHW':
image = tf.transpose(image, [0, 3, 1, 2])
logits = self.get_logits(image)
loss = ImageNetModel.compute_loss_and_error(
logits, label, label_smoothing=self.label_smoothing)
if self.weight_decay > 0:
wd_loss = regularize_cost(self.weight_decay_pattern,
tf.contrib.layers.l2_regularizer(self.weight_decay),
name='l2_regularize_loss')
add_moving_summary(loss, wd_loss)
total_cost = tf.add_n([loss, wd_loss], name='cost')
else:
total_cost = tf.identity(loss, name='cost')
add_moving_summary(total_cost)
if self.loss_scale != 1.:
logger.info("Scaling the total loss by {} ...".format(self.loss_scale))
return total_cost * self.loss_scale
else:
return total_cost
@abstractmethod
def get_logits(self, image):
"""
Args:
image: 4D tensor of ``self.input_shape`` in ``self.data_format``
Returns:
Nx#class logits
"""
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.1, trainable=False)
tf.summary.scalar('learning_rate-summary', lr)
return tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
def image_preprocess(self, image):
with tf.name_scope('image_preprocess'):
if image.dtype.base_dtype != tf.float32:
image = tf.cast(image, tf.float32)
mean = [0.485, 0.456, 0.406] # rgb
std = [0.229, 0.224, 0.225]
if self.image_bgr:
mean = mean[::-1]
std = std[::-1]
image_mean = tf.constant(mean, dtype=tf.float32) * 255.
image_std = tf.constant(std, dtype=tf.float32) * 255.
image = (image - image_mean) / image_std
return image
@staticmethod
def compute_loss_and_error(logits, label, label_smoothing=0.):
if label_smoothing == 0.:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
else:
nclass = logits.shape[-1]
loss = tf.losses.softmax_cross_entropy(
tf.one_hot(label, nclass),
logits, label_smoothing=label_smoothing)
loss = tf.reduce_mean(loss, name='xentropy-loss')
def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'):
with tf.name_scope('prediction_incorrect'):
x = tf.logical_not(tf.nn.in_top_k(logits, label, topk))
return tf.cast(x, tf.float32, name=name)
wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))
wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
return loss
if __name__ == '__main__':
import argparse
from tensorpack.dataflow import TestDataSpeed
parser = argparse.ArgumentParser()
parser.add_argument('--data', required=True)
parser.add_argument('--batch', type=int, default=32)
parser.add_argument('--aug', choices=['train', 'val'], default='val')
args = parser.parse_args()
if args.aug == 'val':
augs = fbresnet_augmentor(False)
elif args.aug == 'train':
augs = fbresnet_augmentor(True)
df = get_imagenet_dataflow(
args.data, 'train', args.batch, augs)
# For val augmentor, Should get >100 it/s (i.e. 3k im/s) here on a decent E5 server.
TestDataSpeed(df).start()
| [
"tensorpack.dataflow.BatchData",
"numpy.sqrt",
"tensorpack.dataflow.dataset.ILSVRC12",
"tensorflow.get_variable",
"tensorflow.transpose",
"tensorflow.contrib.layers.l2_regularizer",
"tensorpack.dataflow.PrefetchDataZMQ",
"tensorpack.dataflow.imgaug.Flip",
"multiprocessing.cpu_count",
"tensorflow.n... | [((4850, 4978), 'tensorpack.predict.PredictConfig', 'PredictConfig', ([], {'model': 'model', 'session_init': 'sessinit', 'input_names': "['input', 'label']", 'output_names': "['wrong-top1', 'wrong-top5']"}), "(model=model, session_init=sessinit, input_names=['input',\n 'label'], output_names=['wrong-top1', 'wrong-top5'])\n", (4863, 4978), False, 'from tensorpack.predict import PredictConfig, FeedfreePredictor\n'), ((9896, 9921), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9919, 9921), False, 'import argparse\n'), ((3978, 4023), 'tensorpack.dataflow.dataset.ILSVRC12', 'dataset.ILSVRC12', (['datadir', 'name'], {'shuffle': '(True)'}), '(datadir, name, shuffle=True)\n', (3994, 4023), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((4037, 4086), 'tensorpack.dataflow.AugmentImageComponent', 'AugmentImageComponent', (['ds', 'augmentors'], {'copy': '(False)'}), '(ds, augmentors, copy=False)\n', (4058, 4086), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((4221, 4250), 'tensorpack.dataflow.PrefetchDataZMQ', 'PrefetchDataZMQ', (['ds', 'parallel'], {}), '(ds, parallel)\n', (4236, 4250), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((4264, 4306), 'tensorpack.dataflow.BatchData', 'BatchData', (['ds', 'batch_size'], {'remainder': '(False)'}), '(ds, batch_size, remainder=False)\n', (4273, 4306), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((4330, 4381), 'tensorpack.dataflow.dataset.ILSVRC12Files', 'dataset.ILSVRC12Files', (['datadir', 'name'], {'shuffle': '(False)'}), '(datadir, name, shuffle=False)\n', (4351, 4381), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((4396, 4428), 'tensorpack.dataflow.imgaug.AugmentorList', 'imgaug.AugmentorList', (['augmentors'], {}), '(augmentors)\n', (4416, 4428), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((4606, 4675), 'tensorpack.dataflow.MultiThreadMapData', 'MultiThreadMapData', (['ds', 'parallel', 'mapf'], {'buffer_size': '(2000)', 'strict': '(True)'}), '(ds, parallel, mapf, buffer_size=2000, strict=True)\n', (4624, 4675), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((4689, 4730), 'tensorpack.dataflow.BatchData', 'BatchData', (['ds', 'batch_size'], {'remainder': '(True)'}), '(ds, batch_size, remainder=True)\n', (4698, 4730), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((4744, 4766), 'tensorpack.dataflow.PrefetchDataZMQ', 'PrefetchDataZMQ', (['ds', '(1)'], {}), '(ds, 1)\n', (4759, 4766), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((5030, 5044), 'tensorpack.utils.stats.RatioCounter', 'RatioCounter', ([], {}), '()\n', (5042, 5044), False, 'from tensorpack.utils.stats import RatioCounter\n'), ((5046, 5060), 'tensorpack.utils.stats.RatioCounter', 'RatioCounter', ([], {}), '()\n', (5058, 5060), False, 'from tensorpack.utils.stats import RatioCounter\n'), ((7940, 8006), 'tensorflow.get_variable', 'tf.get_variable', (['"""learning_rate"""'], {'initializer': '(0.1)', 'trainable': '(False)'}), "('learning_rate', initializer=0.1, trainable=False)\n", (7955, 8006), True, 'import tensorflow as tf\n'), ((8015, 8061), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate-summary"""', 'lr'], {}), "('learning_rate-summary', lr)\n", (8032, 8061), True, 'import tensorflow as tf\n'), ((8077, 8131), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['lr', '(0.9)'], {'use_nesterov': '(True)'}), '(lr, 0.9, use_nesterov=True)\n', (8103, 8131), True, 'import tensorflow as tf\n'), ((9157, 9199), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {'name': '"""xentropy-loss"""'}), "(loss, name='xentropy-loss')\n", (9171, 9199), True, 'import tensorflow as tf\n'), ((3253, 3276), 'tensorpack.dataflow.imgaug.Flip', 'imgaug.Flip', ([], {'horiz': '(True)'}), '(horiz=True)\n', (3264, 3276), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((3333, 3380), 'tensorpack.dataflow.imgaug.ResizeShortestEdge', 'imgaug.ResizeShortestEdge', (['(256)', 'cv2.INTER_CUBIC'], {}), '(256, cv2.INTER_CUBIC)\n', (3358, 3380), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((3394, 3423), 'tensorpack.dataflow.imgaug.CenterCrop', 'imgaug.CenterCrop', (['(224, 224)'], {}), '((224, 224))\n', (3411, 3423), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((4125, 4212), 'tensorpack.utils.logger.warn', 'logger.warn', (['"""DataFlow may become the bottleneck when too few processes are used."""'], {}), "(\n 'DataFlow may become the bottleneck when too few processes are used.')\n", (4136, 4212), False, 'from tensorpack.utils import logger\n'), ((4497, 4532), 'cv2.imread', 'cv2.imread', (['fname', 'cv2.IMREAD_COLOR'], {}), '(fname, cv2.IMREAD_COLOR)\n', (4507, 4532), False, 'import cv2\n'), ((5255, 5275), 'tensorpack.input_source.QueueInput', 'QueueInput', (['dataflow'], {}), '(dataflow)\n', (5265, 5275), False, 'from tensorpack.input_source import QueueInput, StagingInput\n'), ((6456, 6548), 'tensorflow.placeholder', 'tf.placeholder', (['self.image_dtype', '[None, self.image_shape, self.image_shape, 3]', '"""input"""'], {}), "(self.image_dtype, [None, self.image_shape, self.image_shape,\n 3], 'input')\n", (6470, 6548), True, 'import tensorflow as tf\n'), ((6562, 6603), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]', '"""label"""'], {}), "(tf.int32, [None], 'label')\n", (6576, 6603), True, 'import tensorflow as tf\n'), ((6803, 6836), 'tensorflow.transpose', 'tf.transpose', (['image', '[0, 3, 1, 2]'], {}), '(image, [0, 3, 1, 2])\n', (6815, 6836), True, 'import tensorflow as tf\n'), ((7264, 7297), 'tensorpack.tfutils.summary.add_moving_summary', 'add_moving_summary', (['loss', 'wd_loss'], {}), '(loss, wd_loss)\n', (7282, 7297), False, 'from tensorpack.tfutils.summary import add_moving_summary\n'), ((7323, 7361), 'tensorflow.add_n', 'tf.add_n', (['[loss, wd_loss]'], {'name': '"""cost"""'}), "([loss, wd_loss], name='cost')\n", (7331, 7361), True, 'import tensorflow as tf\n'), ((7401, 7431), 'tensorflow.identity', 'tf.identity', (['loss'], {'name': '"""cost"""'}), "(loss, name='cost')\n", (7412, 7431), True, 'import tensorflow as tf\n'), ((7444, 7474), 'tensorpack.tfutils.summary.add_moving_summary', 'add_moving_summary', (['total_cost'], {}), '(total_cost)\n', (7462, 7474), False, 'from tensorpack.tfutils.summary import add_moving_summary\n'), ((8185, 8218), 'tensorflow.name_scope', 'tf.name_scope', (['"""image_preprocess"""'], {}), "('image_preprocess')\n", (8198, 8218), True, 'import tensorflow as tf\n'), ((8862, 8937), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'label'}), '(logits=logits, labels=label)\n', (8908, 8937), True, 'import tensorflow as tf\n'), ((9430, 9463), 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {'name': 'name'}), '(x, tf.float32, name=name)\n', (9437, 9463), True, 'import tensorflow as tf\n'), ((9566, 9612), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['wrong'], {'name': '"""train-error-top1"""'}), "(wrong, name='train-error-top1')\n", (9580, 9612), True, 'import tensorflow as tf\n'), ((9716, 9762), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['wrong'], {'name': '"""train-error-top5"""'}), "(wrong, name='train-error-top5')\n", (9730, 9762), True, 'import tensorflow as tf\n'), ((10442, 10459), 'tensorpack.dataflow.TestDataSpeed', 'TestDataSpeed', (['df'], {}), '(df)\n', (10455, 10459), False, 'from tensorpack.dataflow import TestDataSpeed\n'), ((1689, 1780), 'cv2.resize', 'cv2.resize', (['out', '(self.target_shape, self.target_shape)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(out, (self.target_shape, self.target_shape), interpolation=cv2.\n INTER_CUBIC)\n', (1699, 1780), False, 'import cv2\n'), ((1817, 1885), 'tensorpack.dataflow.imgaug.ResizeShortestEdge', 'imgaug.ResizeShortestEdge', (['self.target_shape'], {'interp': 'cv2.INTER_CUBIC'}), '(self.target_shape, interp=cv2.INTER_CUBIC)\n', (1842, 1885), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((1913, 1949), 'tensorpack.dataflow.imgaug.CenterCrop', 'imgaug.CenterCrop', (['self.target_shape'], {}), '(self.target_shape)\n', (1930, 1949), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((3888, 3915), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3913, 3915), False, 'import multiprocessing\n'), ((7134, 7185), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['self.weight_decay'], {}), '(self.weight_decay)\n', (7166, 7185), True, 'import tensorflow as tf\n'), ((8297, 8323), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (8304, 8323), True, 'import tensorflow as tf\n'), ((8536, 8571), 'tensorflow.constant', 'tf.constant', (['mean'], {'dtype': 'tf.float32'}), '(mean, dtype=tf.float32)\n', (8547, 8571), True, 'import tensorflow as tf\n'), ((8603, 8637), 'tensorflow.constant', 'tf.constant', (['std'], {'dtype': 'tf.float32'}), '(std, dtype=tf.float32)\n', (8614, 8637), True, 'import tensorflow as tf\n'), ((9058, 9083), 'tensorflow.one_hot', 'tf.one_hot', (['label', 'nclass'], {}), '(label, nclass)\n', (9068, 9083), True, 'import tensorflow as tf\n'), ((9300, 9337), 'tensorflow.name_scope', 'tf.name_scope', (['"""prediction_incorrect"""'], {}), "('prediction_incorrect')\n", (9313, 9337), True, 'import tensorflow as tf\n'), ((1279, 1308), 'numpy.sqrt', 'np.sqrt', (['(targetArea * aspectR)'], {}), '(targetArea * aspectR)\n', (1286, 1308), True, 'import numpy as np\n'), ((1337, 1366), 'numpy.sqrt', 'np.sqrt', (['(targetArea / aspectR)'], {}), '(targetArea / aspectR)\n', (1344, 1366), True, 'import numpy as np\n'), ((2478, 2524), 'tensorpack.dataflow.imgaug.BrightnessScale', 'imgaug.BrightnessScale', (['(0.6, 1.4)'], {'clip': '(False)'}), '((0.6, 1.4), clip=False)\n', (2500, 2524), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((2543, 2582), 'tensorpack.dataflow.imgaug.Contrast', 'imgaug.Contrast', (['(0.6, 1.4)'], {'clip': '(False)'}), '((0.6, 1.4), clip=False)\n', (2558, 2582), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((2601, 2634), 'tensorpack.dataflow.imgaug.Saturation', 'imgaug.Saturation', (['(0.4)'], {'rgb': '(False)'}), '(0.4, rgb=False)\n', (2618, 2634), False, 'from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData, MultiThreadMapData\n'), ((9374, 9409), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'label', 'topk'], {}), '(logits, label, topk)\n', (9388, 9409), True, 'import tensorflow as tf\n'), ((2798, 2840), 'numpy.asarray', 'np.asarray', (['[0.2175, 0.0188, 0.0045][::-1]'], {}), '([0.2175, 0.0188, 0.0045][::-1])\n', (2808, 2840), True, 'import numpy as np\n'), ((2928, 3043), 'numpy.array', 'np.array', (['[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.814], [-0.5836, -0.6948, \n 0.4203]]'], {'dtype': '"""float32"""'}), "([[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.814], [-0.5836, \n -0.6948, 0.4203]], dtype='float32')\n", (2936, 3043), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from alpharotate.utils.pretrain_zoo import PretrainModelZoo
from configs._base_.models.retinanet_r50_fpn import *
from configs._base_.datasets.dota_detection import *
from configs._base_.schedules.schedule_1x import *
# schedule
BATCH_SIZE = 1
GPU_GROUP = "0,1"
NUM_GPU = len(GPU_GROUP.strip().split(','))
LR = 1e-3
SAVE_WEIGHTS_INTE = 11725 * 2
DECAY_EPOCH = [8, 11, 20]
MAX_EPOCH = 12
WARM_EPOCH = 1 / 16.
DECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE
MAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH
WARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)
# dataset
DATASET_NAME = 'DIOR-R'
CLASS_NUM = 20
# model
# backbone
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
# bbox head
NUM_SUBNET_CONV = 4
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2.]
# loss
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
VERSION = 'RetinaNet_DIOR_R_RSDet_2x_20201128'
"""
RSDet-8p
FLOPs: 662229097; Trainable params: 32615676
cls : Expressway-Service-area|| Recall: 0.8589861751152074 || Precison: 0.05740330130574033|| AP: 0.7077386868799547
F1:0.7330546132257147 P:0.8273464658169177 R:0.6580645161290323
cls : tenniscourt|| Recall: 0.8789323164918971 || Precison: 0.3033607520564042|| AP: 0.7857012794326934
F1:0.8193770274071432 P:0.8458750181238219 R:0.7944981615143675
cls : windmill|| Recall: 0.6927951967978653 || Precison: 0.13584930342076001|| AP: 0.5286659144596662
F1:0.6188888440654919 P:0.7193990278391516 R:0.543028685790527
cls : Expressway-toll-station|| Recall: 0.625 || Precison: 0.03401898734177215|| AP: 0.5344552347901361
F1:0.6186084650574205 P:0.8308823529411765 R:0.49273255813953487
cls : golffield|| Recall: 0.9060869565217391 || Precison: 0.06413096996553422|| AP: 0.7574888643713931
F1:0.7864028348881296 P:0.8901098901098901 R:0.7043478260869566
cls : harbor|| Recall: 0.5806763285024155 || Precison: 0.023446338704014358|| AP: 0.29782467987141364
F1:0.37752234753283576 P:0.4247181964573269 R:0.3397745571658615
cls : dam|| Recall: 0.6728624535315985 || Precison: 0.02104406464364609|| AP: 0.2691142236145363
F1:0.362026081181577 P:0.44565217391304346 R:0.3048327137546468
cls : trainstation|| Recall: 0.6444007858546169 || Precison: 0.021516662293361324|| AP: 0.3815880593128638
F1:0.4496993516423597 P:0.5654761904761905 R:0.37328094302554027
cls : baseballfield|| Recall: 0.7743156668608038 || Precison: 0.26243584682195026|| AP: 0.6811438546180969
F1:0.7429230938374606 P:0.9455445544554455 R:0.6118229470005824
cls : vehicle|| Recall: 0.3068318318318318 || Precison: 0.13679418951032568|| AP: 0.2672063508440936
F1:0.3553452081061398 P:0.622696502444528 R:0.24861111111111112
cls : stadium|| Recall: 0.8482142857142857 || Precison: 0.07377685736474243|| AP: 0.6276089125330554
F1:0.6325454169113612 P:0.725 R:0.5610119047619048
cls : chimney|| Recall: 0.7730358874878759 || Precison: 0.04247042523713098|| AP: 0.7260324941088926
F1:0.8350750722792715 P:0.9680306905370843 R:0.7342386032977691
cls : airplane|| Recall: 0.6311495372625426 || Precison: 0.35695592286501376|| AP: 0.567026002527116
F1:0.6346504549275178 P:0.8086349924585219 R:0.5222844617632733
cls : ship|| Recall: 0.707923605979651 || Precison: 0.3610889639476393|| AP: 0.6159131184716269
F1:0.669633251073358 P:0.7577269627023728 R:0.5998976865798897
cls : bridge|| Recall: 0.4322132097334878 || Precison: 0.03649349378730066|| AP: 0.25033393470283555
F1:0.33802340126740676 P:0.43087971274685816 R:0.27809965237543455
cls : overpass|| Recall: 0.6307519640852974 || Precison: 0.0517138256268691|| AP: 0.4578515515288977
F1:0.5263468055036801 P:0.6678170836928387 R:0.43434343434343436
cls : groundtrackfield|| Recall: 0.9262599469496021 || Precison: 0.08814175374829623|| AP: 0.7454750733336922
F1:0.753787919975915 P:0.7564102564102564 R:0.7511936339522547
cls : airport|| Recall: 0.6606606606606606 || Precison: 0.026690931149529876|| AP: 0.3422431949515288
F1:0.4727842036814644 P:0.5450980392156862 R:0.4174174174174174
cls : basketballcourt|| Recall: 0.9044734389561976 || Precison: 0.09671632866610194|| AP: 0.8244285852702582
F1:0.8769870555164442 P:0.941711229946524 R:0.820596458527493
cls : storagetank|| Recall: 0.4953555070416506 || Precison: 0.42391383984174663|| AP: 0.4284867911249676
F1:0.5612463042262482 P:0.7862707288854609 R:0.43636830615127775
mAP is : 0.5398163403373859
"""
| [
"numpy.array",
"alpharotate.utils.pretrain_zoo.PretrainModelZoo"
] | [((763, 781), 'alpharotate.utils.pretrain_zoo.PretrainModelZoo', 'PretrainModelZoo', ([], {}), '()\n', (779, 781), False, 'from alpharotate.utils.pretrain_zoo import PretrainModelZoo\n'), ((532, 563), 'numpy.array', 'np.array', (['DECAY_EPOCH', 'np.int32'], {}), '(DECAY_EPOCH, np.int32)\n', (540, 563), True, 'import numpy as np\n')] |
import os
from nose.tools import assert_raises, assert_true, assert_equal
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_array_less)
from mne.transforms import apply_trans, rotation, translation, scaling
from mne.coreg import (fit_matched_points, fit_point_cloud,
_point_cloud_error, _decimate_points,
create_default_subject, scale_mri,
_is_mri_subject, scale_labels, scale_source_space,
read_elp)
from mne.io.kit.tests import data_dir as kit_data_dir
from mne.utils import requires_mne_fs_in_env, _TempDir, run_subprocess
from functools import reduce
tempdir = _TempDir()
def test_read_elp():
"""Test reading an ELP file"""
path = os.path.join(kit_data_dir, 'test_elp.txt')
points = read_elp(path)
assert_equal(points.shape, (8, 3))
assert_array_equal(points[0], [1.3930, 13.1613, -4.6967])
@requires_mne_fs_in_env
def test_scale_mri():
"""Test creating fsaverage and scaling it"""
# create fsaverage
create_default_subject(subjects_dir=tempdir)
is_mri = _is_mri_subject('fsaverage', tempdir)
assert_true(is_mri, "Creating fsaverage failed")
fid_path = os.path.join(tempdir, 'fsaverage', 'bem',
'fsaverage-fiducials.fif')
os.remove(fid_path)
create_default_subject(update=True, subjects_dir=tempdir)
assert_true(os.path.exists(fid_path), "Updating fsaverage")
# create source space
path = os.path.join(tempdir, 'fsaverage', 'bem', 'fsaverage-ico-6-src.fif')
if not os.path.exists(path):
cmd = ['mne_setup_source_space', '--subject', 'fsaverage', '--ico',
'6']
env = os.environ.copy()
env['SUBJECTS_DIR'] = tempdir
run_subprocess(cmd, env=env)
# scale fsaverage
scale_mri('fsaverage', 'flachkopf', [1, .2, .8], True, subjects_dir=tempdir)
is_mri = _is_mri_subject('flachkopf', tempdir)
assert_true(is_mri, "Scaling fsaverage failed")
src_path = os.path.join(tempdir, 'flachkopf', 'bem',
'flachkopf-ico-6-src.fif')
assert_true(os.path.exists(src_path), "Source space was not scaled")
scale_labels('flachkopf', subjects_dir=tempdir)
# scale source space separately
os.remove(src_path)
scale_source_space('flachkopf', 'ico-6', subjects_dir=tempdir)
assert_true(os.path.exists(src_path), "Source space was not scaled")
def test_fit_matched_points():
"""Test fit_matched_points: fitting two matching sets of points"""
tgt_pts = np.random.uniform(size=(6, 3))
# rotation only
trans = rotation(2, 6, 3)
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, translate=False,
out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation")
# rotation & scaling
trans = np.dot(rotation(2, 6, 3), scaling(.5, .5, .5))
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, translate=False, scale=1,
out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation and scaling.")
# rotation & translation
trans = np.dot(translation(2, -6, 3), rotation(2, 6, 3))
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation and translation.")
# rotation & translation & scaling
trans = reduce(np.dot, (translation(2, -6, 3), rotation(1.5, .3, 1.4),
scaling(.5, .5, .5)))
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, scale=1, out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation, translation and scaling.")
# test exceeding tolerance
tgt_pts[0, :] += 20
assert_raises(RuntimeError, fit_matched_points, tgt_pts, src_pts, tol=10)
def test_fit_point_cloud():
"""Test fit_point_cloud: fitting a set of points to a point cloud"""
# evenly spaced target points on a sphere
u = np.linspace(0, np.pi, 150)
v = np.linspace(0, np.pi, 150)
x = np.outer(np.cos(u), np.sin(v)).reshape((-1, 1))
y = np.outer(np.sin(u), np.sin(v)).reshape((-1, 1))
z = np.outer(np.ones(np.size(u)), np.cos(v)).reshape((-1, 1)) * 3
tgt_pts = np.hstack((x, y, z))
tgt_pts = _decimate_points(tgt_pts, .05)
# pick some points to fit
some_tgt_pts = tgt_pts[::362]
# rotation only
trans = rotation(1.5, .3, -0.4)
src_pts = apply_trans(trans, some_tgt_pts)
trans_est = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
scale=0, out='trans')
est_pts = apply_trans(trans_est, src_pts)
err = _point_cloud_error(est_pts, tgt_pts)
assert_array_less(err, .1, "fit_point_cloud with rotation.")
# rotation and translation
trans = np.dot(rotation(0.5, .3, -0.4), translation(.3, .2, -.2))
src_pts = apply_trans(trans, some_tgt_pts)
trans_est = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
scale=0, out='trans')
est_pts = apply_trans(trans_est, src_pts)
err = _point_cloud_error(est_pts, tgt_pts)
assert_array_less(err, .1, "fit_point_cloud with rotation and "
"translation.")
# rotation and 1 scale parameter
trans = np.dot(rotation(0.5, .3, -0.4), scaling(1.5, 1.5, 1.5))
src_pts = apply_trans(trans, some_tgt_pts)
trans_est = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
scale=1, out='trans')
est_pts = apply_trans(trans_est, src_pts)
err = _point_cloud_error(est_pts, tgt_pts)
assert_array_less(err, .1, "fit_point_cloud with rotation and 1 scaling "
"parameter.")
# rotation and 3 scale parameter
trans = np.dot(rotation(0.5, .3, -0.4), scaling(1.5, 1.7, 1.1))
src_pts = apply_trans(trans, some_tgt_pts)
trans_est = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
scale=3, out='trans')
est_pts = apply_trans(trans_est, src_pts)
err = _point_cloud_error(est_pts, tgt_pts)
assert_array_less(err, .1, "fit_point_cloud with rotation and 3 scaling "
"parameters.")
| [
"mne.coreg.scale_labels",
"mne.transforms.scaling",
"numpy.hstack",
"nose.tools.assert_true",
"nose.tools.assert_raises",
"mne.transforms.apply_trans",
"numpy.sin",
"nose.tools.assert_equal",
"mne.coreg.create_default_subject",
"os.remove",
"numpy.testing.assert_array_less",
"os.path.exists",
... | [((738, 748), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (746, 748), False, 'from mne.utils import requires_mne_fs_in_env, _TempDir, run_subprocess\n'), ((818, 860), 'os.path.join', 'os.path.join', (['kit_data_dir', '"""test_elp.txt"""'], {}), "(kit_data_dir, 'test_elp.txt')\n", (830, 860), False, 'import os\n'), ((874, 888), 'mne.coreg.read_elp', 'read_elp', (['path'], {}), '(path)\n', (882, 888), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((893, 927), 'nose.tools.assert_equal', 'assert_equal', (['points.shape', '(8, 3)'], {}), '(points.shape, (8, 3))\n', (905, 927), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((932, 988), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['points[0]', '[1.393, 13.1613, -4.6967]'], {}), '(points[0], [1.393, 13.1613, -4.6967])\n', (950, 988), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_array_less\n'), ((1114, 1158), 'mne.coreg.create_default_subject', 'create_default_subject', ([], {'subjects_dir': 'tempdir'}), '(subjects_dir=tempdir)\n', (1136, 1158), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((1172, 1209), 'mne.coreg._is_mri_subject', '_is_mri_subject', (['"""fsaverage"""', 'tempdir'], {}), "('fsaverage', tempdir)\n", (1187, 1209), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((1214, 1262), 'nose.tools.assert_true', 'assert_true', (['is_mri', '"""Creating fsaverage failed"""'], {}), "(is_mri, 'Creating fsaverage failed')\n", (1225, 1262), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((1279, 1347), 'os.path.join', 'os.path.join', (['tempdir', '"""fsaverage"""', '"""bem"""', '"""fsaverage-fiducials.fif"""'], {}), "(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif')\n", (1291, 1347), False, 'import os\n'), ((1380, 1399), 'os.remove', 'os.remove', (['fid_path'], {}), '(fid_path)\n', (1389, 1399), False, 'import os\n'), ((1404, 1461), 'mne.coreg.create_default_subject', 'create_default_subject', ([], {'update': '(True)', 'subjects_dir': 'tempdir'}), '(update=True, subjects_dir=tempdir)\n', (1426, 1461), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((1564, 1632), 'os.path.join', 'os.path.join', (['tempdir', '"""fsaverage"""', '"""bem"""', '"""fsaverage-ico-6-src.fif"""'], {}), "(tempdir, 'fsaverage', 'bem', 'fsaverage-ico-6-src.fif')\n", (1576, 1632), False, 'import os\n'), ((1896, 1974), 'mne.coreg.scale_mri', 'scale_mri', (['"""fsaverage"""', '"""flachkopf"""', '[1, 0.2, 0.8]', '(True)'], {'subjects_dir': 'tempdir'}), "('fsaverage', 'flachkopf', [1, 0.2, 0.8], True, subjects_dir=tempdir)\n", (1905, 1974), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((1986, 2023), 'mne.coreg._is_mri_subject', '_is_mri_subject', (['"""flachkopf"""', 'tempdir'], {}), "('flachkopf', tempdir)\n", (2001, 2023), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((2028, 2075), 'nose.tools.assert_true', 'assert_true', (['is_mri', '"""Scaling fsaverage failed"""'], {}), "(is_mri, 'Scaling fsaverage failed')\n", (2039, 2075), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((2091, 2159), 'os.path.join', 'os.path.join', (['tempdir', '"""flachkopf"""', '"""bem"""', '"""flachkopf-ico-6-src.fif"""'], {}), "(tempdir, 'flachkopf', 'bem', 'flachkopf-ico-6-src.fif')\n", (2103, 2159), False, 'import os\n'), ((2265, 2312), 'mne.coreg.scale_labels', 'scale_labels', (['"""flachkopf"""'], {'subjects_dir': 'tempdir'}), "('flachkopf', subjects_dir=tempdir)\n", (2277, 2312), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((2354, 2373), 'os.remove', 'os.remove', (['src_path'], {}), '(src_path)\n', (2363, 2373), False, 'import os\n'), ((2378, 2440), 'mne.coreg.scale_source_space', 'scale_source_space', (['"""flachkopf"""', '"""ico-6"""'], {'subjects_dir': 'tempdir'}), "('flachkopf', 'ico-6', subjects_dir=tempdir)\n", (2396, 2440), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((2633, 2663), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(6, 3)'}), '(size=(6, 3))\n', (2650, 2663), True, 'import numpy as np\n'), ((2697, 2714), 'mne.transforms.rotation', 'rotation', (['(2)', '(6)', '(3)'], {}), '(2, 6, 3)\n', (2705, 2714), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((2729, 2756), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'tgt_pts'], {}), '(trans, tgt_pts)\n', (2740, 2756), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((2773, 2839), 'mne.coreg.fit_matched_points', 'fit_matched_points', (['src_pts', 'tgt_pts'], {'translate': '(False)', 'out': '"""trans"""'}), "(src_pts, tgt_pts, translate=False, out='trans')\n", (2791, 2839), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((2889, 2920), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (2900, 2920), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((2925, 3011), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['tgt_pts', 'est_pts', '(2)', '"""fit_matched_points with rotation"""'], {}), "(tgt_pts, est_pts, 2,\n 'fit_matched_points with rotation')\n", (2950, 3011), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_array_less\n'), ((3140, 3167), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'tgt_pts'], {}), '(trans, tgt_pts)\n', (3151, 3167), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((3184, 3259), 'mne.coreg.fit_matched_points', 'fit_matched_points', (['src_pts', 'tgt_pts'], {'translate': '(False)', 'scale': '(1)', 'out': '"""trans"""'}), "(src_pts, tgt_pts, translate=False, scale=1, out='trans')\n", (3202, 3259), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((3306, 3337), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (3317, 3337), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((3342, 3441), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['tgt_pts', 'est_pts', '(2)', '"""fit_matched_points with rotation and scaling."""'], {}), "(tgt_pts, est_pts, 2,\n 'fit_matched_points with rotation and scaling.')\n", (3367, 3441), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_array_less\n'), ((3576, 3603), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'tgt_pts'], {}), '(trans, tgt_pts)\n', (3587, 3603), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((3620, 3669), 'mne.coreg.fit_matched_points', 'fit_matched_points', (['src_pts', 'tgt_pts'], {'out': '"""trans"""'}), "(src_pts, tgt_pts, out='trans')\n", (3638, 3669), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((3684, 3715), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (3695, 3715), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((3720, 3823), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['tgt_pts', 'est_pts', '(2)', '"""fit_matched_points with rotation and translation."""'], {}), "(tgt_pts, est_pts, 2,\n 'fit_matched_points with rotation and translation.')\n", (3745, 3823), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_array_less\n'), ((4032, 4059), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'tgt_pts'], {}), '(trans, tgt_pts)\n', (4043, 4059), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((4076, 4134), 'mne.coreg.fit_matched_points', 'fit_matched_points', (['src_pts', 'tgt_pts'], {'scale': '(1)', 'out': '"""trans"""'}), "(src_pts, tgt_pts, scale=1, out='trans')\n", (4094, 4134), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((4149, 4180), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (4160, 4180), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((4185, 4297), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['tgt_pts', 'est_pts', '(2)', '"""fit_matched_points with rotation, translation and scaling."""'], {}), "(tgt_pts, est_pts, 2,\n 'fit_matched_points with rotation, translation and scaling.')\n", (4210, 4297), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_array_less\n'), ((4387, 4460), 'nose.tools.assert_raises', 'assert_raises', (['RuntimeError', 'fit_matched_points', 'tgt_pts', 'src_pts'], {'tol': '(10)'}), '(RuntimeError, fit_matched_points, tgt_pts, src_pts, tol=10)\n', (4400, 4460), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((4618, 4644), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(150)'], {}), '(0, np.pi, 150)\n', (4629, 4644), True, 'import numpy as np\n'), ((4653, 4679), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(150)'], {}), '(0, np.pi, 150)\n', (4664, 4679), True, 'import numpy as np\n'), ((4878, 4898), 'numpy.hstack', 'np.hstack', (['(x, y, z)'], {}), '((x, y, z))\n', (4887, 4898), True, 'import numpy as np\n'), ((4913, 4944), 'mne.coreg._decimate_points', '_decimate_points', (['tgt_pts', '(0.05)'], {}), '(tgt_pts, 0.05)\n', (4929, 4944), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((5042, 5066), 'mne.transforms.rotation', 'rotation', (['(1.5)', '(0.3)', '(-0.4)'], {}), '(1.5, 0.3, -0.4)\n', (5050, 5066), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((5080, 5112), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'some_tgt_pts'], {}), '(trans, some_tgt_pts)\n', (5091, 5112), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((5129, 5218), 'mne.coreg.fit_point_cloud', 'fit_point_cloud', (['src_pts', 'tgt_pts'], {'rotate': '(True)', 'translate': '(False)', 'scale': '(0)', 'out': '"""trans"""'}), "(src_pts, tgt_pts, rotate=True, translate=False, scale=0,\n out='trans')\n", (5144, 5218), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((5261, 5292), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (5272, 5292), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((5303, 5339), 'mne.coreg._point_cloud_error', '_point_cloud_error', (['est_pts', 'tgt_pts'], {}), '(est_pts, tgt_pts)\n', (5321, 5339), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((5344, 5405), 'numpy.testing.assert_array_less', 'assert_array_less', (['err', '(0.1)', '"""fit_point_cloud with rotation."""'], {}), "(err, 0.1, 'fit_point_cloud with rotation.')\n", (5361, 5405), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_array_less\n'), ((5521, 5553), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'some_tgt_pts'], {}), '(trans, some_tgt_pts)\n', (5532, 5553), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((5570, 5659), 'mne.coreg.fit_point_cloud', 'fit_point_cloud', (['src_pts', 'tgt_pts'], {'rotate': '(True)', 'translate': '(True)', 'scale': '(0)', 'out': '"""trans"""'}), "(src_pts, tgt_pts, rotate=True, translate=True, scale=0, out\n ='trans')\n", (5585, 5659), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((5701, 5732), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (5712, 5732), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((5743, 5779), 'mne.coreg._point_cloud_error', '_point_cloud_error', (['est_pts', 'tgt_pts'], {}), '(est_pts, tgt_pts)\n', (5761, 5779), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((5784, 5861), 'numpy.testing.assert_array_less', 'assert_array_less', (['err', '(0.1)', '"""fit_point_cloud with rotation and translation."""'], {}), "(err, 0.1, 'fit_point_cloud with rotation and translation.')\n", (5801, 5861), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_array_less\n'), ((6006, 6038), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'some_tgt_pts'], {}), '(trans, some_tgt_pts)\n', (6017, 6038), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((6055, 6144), 'mne.coreg.fit_point_cloud', 'fit_point_cloud', (['src_pts', 'tgt_pts'], {'rotate': '(True)', 'translate': '(False)', 'scale': '(1)', 'out': '"""trans"""'}), "(src_pts, tgt_pts, rotate=True, translate=False, scale=1,\n out='trans')\n", (6070, 6144), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((6187, 6218), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (6198, 6218), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((6229, 6265), 'mne.coreg._point_cloud_error', '_point_cloud_error', (['est_pts', 'tgt_pts'], {}), '(est_pts, tgt_pts)\n', (6247, 6265), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((6270, 6359), 'numpy.testing.assert_array_less', 'assert_array_less', (['err', '(0.1)', '"""fit_point_cloud with rotation and 1 scaling parameter."""'], {}), "(err, 0.1,\n 'fit_point_cloud with rotation and 1 scaling parameter.')\n", (6287, 6359), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_array_less\n'), ((6500, 6532), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'some_tgt_pts'], {}), '(trans, some_tgt_pts)\n', (6511, 6532), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((6549, 6638), 'mne.coreg.fit_point_cloud', 'fit_point_cloud', (['src_pts', 'tgt_pts'], {'rotate': '(True)', 'translate': '(False)', 'scale': '(3)', 'out': '"""trans"""'}), "(src_pts, tgt_pts, rotate=True, translate=False, scale=3,\n out='trans')\n", (6564, 6638), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((6681, 6712), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (6692, 6712), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((6723, 6759), 'mne.coreg._point_cloud_error', '_point_cloud_error', (['est_pts', 'tgt_pts'], {}), '(est_pts, tgt_pts)\n', (6741, 6759), False, 'from mne.coreg import fit_matched_points, fit_point_cloud, _point_cloud_error, _decimate_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, read_elp\n'), ((6764, 6854), 'numpy.testing.assert_array_less', 'assert_array_less', (['err', '(0.1)', '"""fit_point_cloud with rotation and 3 scaling parameters."""'], {}), "(err, 0.1,\n 'fit_point_cloud with rotation and 3 scaling parameters.')\n", (6781, 6854), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_array_less\n'), ((1478, 1502), 'os.path.exists', 'os.path.exists', (['fid_path'], {}), '(fid_path)\n', (1492, 1502), False, 'import os\n'), ((1644, 1664), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1658, 1664), False, 'import os\n'), ((1776, 1793), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (1791, 1793), False, 'import os\n'), ((1840, 1868), 'mne.utils.run_subprocess', 'run_subprocess', (['cmd'], {'env': 'env'}), '(cmd, env=env)\n', (1854, 1868), False, 'from mne.utils import requires_mne_fs_in_env, _TempDir, run_subprocess\n'), ((2204, 2228), 'os.path.exists', 'os.path.exists', (['src_path'], {}), '(src_path)\n', (2218, 2228), False, 'import os\n'), ((2457, 2481), 'os.path.exists', 'os.path.exists', (['src_path'], {}), '(src_path)\n', (2471, 2481), False, 'import os\n'), ((3086, 3103), 'mne.transforms.rotation', 'rotation', (['(2)', '(6)', '(3)'], {}), '(2, 6, 3)\n', (3094, 3103), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((3105, 3127), 'mne.transforms.scaling', 'scaling', (['(0.5)', '(0.5)', '(0.5)'], {}), '(0.5, 0.5, 0.5)\n', (3112, 3127), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((3520, 3541), 'mne.transforms.translation', 'translation', (['(2)', '(-6)', '(3)'], {}), '(2, -6, 3)\n', (3531, 3541), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((3543, 3560), 'mne.transforms.rotation', 'rotation', (['(2)', '(6)', '(3)'], {}), '(2, 6, 3)\n', (3551, 3560), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((5456, 5480), 'mne.transforms.rotation', 'rotation', (['(0.5)', '(0.3)', '(-0.4)'], {}), '(0.5, 0.3, -0.4)\n', (5464, 5480), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((5481, 5508), 'mne.transforms.translation', 'translation', (['(0.3)', '(0.2)', '(-0.2)'], {}), '(0.3, 0.2, -0.2)\n', (5492, 5508), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((5943, 5967), 'mne.transforms.rotation', 'rotation', (['(0.5)', '(0.3)', '(-0.4)'], {}), '(0.5, 0.3, -0.4)\n', (5951, 5967), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((5968, 5990), 'mne.transforms.scaling', 'scaling', (['(1.5)', '(1.5)', '(1.5)'], {}), '(1.5, 1.5, 1.5)\n', (5975, 5990), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((6437, 6461), 'mne.transforms.rotation', 'rotation', (['(0.5)', '(0.3)', '(-0.4)'], {}), '(0.5, 0.3, -0.4)\n', (6445, 6461), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((6462, 6484), 'mne.transforms.scaling', 'scaling', (['(1.5)', '(1.7)', '(1.1)'], {}), '(1.5, 1.7, 1.1)\n', (6469, 6484), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((3921, 3942), 'mne.transforms.translation', 'translation', (['(2)', '(-6)', '(3)'], {}), '(2, -6, 3)\n', (3932, 3942), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((3944, 3967), 'mne.transforms.rotation', 'rotation', (['(1.5)', '(0.3)', '(1.4)'], {}), '(1.5, 0.3, 1.4)\n', (3952, 3967), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((3996, 4018), 'mne.transforms.scaling', 'scaling', (['(0.5)', '(0.5)', '(0.5)'], {}), '(0.5, 0.5, 0.5)\n', (4003, 4018), False, 'from mne.transforms import apply_trans, rotation, translation, scaling\n'), ((4698, 4707), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (4704, 4707), True, 'import numpy as np\n'), ((4709, 4718), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (4715, 4718), True, 'import numpy as np\n'), ((4754, 4763), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (4760, 4763), True, 'import numpy as np\n'), ((4765, 4774), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (4771, 4774), True, 'import numpy as np\n'), ((4831, 4840), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (4837, 4840), True, 'import numpy as np\n'), ((4818, 4828), 'numpy.size', 'np.size', (['u'], {}), '(u)\n', (4825, 4828), True, 'import numpy as np\n')] |
import sys
import numpy as np
from PySide2 import QtCore
from PySide2.QtCore import QObject, Slot, QAbstractItemModel, QModelIndex
from PySide2.QtGui import QStandardItemModel, QStandardItem
from PySide2.QtWidgets import QMainWindow, QAction, QApplication, QWidget, QHBoxLayout, QVBoxLayout, QPushButton, \
QTreeView, QListView, QTableView
class MainWindow(QMainWindow):
def __init__(self, app: QApplication):
super(MainWindow, self).__init__()
geometry = app.desktop().availableGeometry(self)
self.setFixedSize(geometry.width() * 0.6, geometry.height() * 0.9)
class CentralWidget(QWidget):
def __init__(self, model):
super(CentralWidget, self).__init__()
self.model = model
self.init_UI()
def init_UI(self):
vbox_layout = QVBoxLayout()
list = QListView()
list.setModel(self.model)
table = QTableView()
table.setModel(self.model)
tree = QTreeView()
tree.setModel(self.model)
vbox_layout.addWidget(list)
vbox_layout.addWidget(table)
vbox_layout.addWidget(tree)
# vbox_layout.addStretch()
self.setLayout(vbox_layout)
def add_data(item: QStandardItem, array: np.ndarray):
dt = array.dtype
for icol, name in enumerate(dt.names):
sub_dtype = dt[name]
for irow, value in enumerate(array[name]):
new_item = QStandardItem(str(value))
item.setChild(irow, icol, new_item)
if sub_dtype.names is not None:
for subname in sub_dtype.names:
inner_item = QStandardItem(subname + ":" +str(value[subname]))
new_item.appendRow(inner_item)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = MainWindow(app)
dt = np.dtype([
("angle", [
("theta", "d"),
("phi", "d"),
]),
("id", "i"),
("parent_id", "i"),
("energy", "d"),
("radius", "d", (10,10)),
])
array = np.ones(10, dtype=dt)
model = QStandardItemModel()
parentItem = model.invisibleRootItem()
add_data(parentItem, array)
widget = CentralWidget(model)
main.setCentralWidget(widget)
main.show()
app.exec_()
| [
"numpy.ones",
"PySide2.QtWidgets.QTableView",
"PySide2.QtWidgets.QTreeView",
"PySide2.QtWidgets.QListView",
"PySide2.QtWidgets.QApplication",
"numpy.dtype",
"PySide2.QtWidgets.QVBoxLayout",
"PySide2.QtGui.QStandardItemModel"
] | [((1747, 1769), 'PySide2.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1759, 1769), False, 'from PySide2.QtWidgets import QMainWindow, QAction, QApplication, QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QTreeView, QListView, QTableView\n'), ((1808, 1943), 'numpy.dtype', 'np.dtype', (["[('angle', [('theta', 'd'), ('phi', 'd')]), ('id', 'i'), ('parent_id', 'i'),\n ('energy', 'd'), ('radius', 'd', (10, 10))]"], {}), "([('angle', [('theta', 'd'), ('phi', 'd')]), ('id', 'i'), (\n 'parent_id', 'i'), ('energy', 'd'), ('radius', 'd', (10, 10))])\n", (1816, 1943), True, 'import numpy as np\n'), ((1996, 2017), 'numpy.ones', 'np.ones', (['(10)'], {'dtype': 'dt'}), '(10, dtype=dt)\n', (2003, 2017), True, 'import numpy as np\n'), ((2031, 2051), 'PySide2.QtGui.QStandardItemModel', 'QStandardItemModel', ([], {}), '()\n', (2049, 2051), False, 'from PySide2.QtGui import QStandardItemModel, QStandardItem\n'), ((801, 814), 'PySide2.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (812, 814), False, 'from PySide2.QtWidgets import QMainWindow, QAction, QApplication, QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QTreeView, QListView, QTableView\n'), ((831, 842), 'PySide2.QtWidgets.QListView', 'QListView', ([], {}), '()\n', (840, 842), False, 'from PySide2.QtWidgets import QMainWindow, QAction, QApplication, QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QTreeView, QListView, QTableView\n'), ((894, 906), 'PySide2.QtWidgets.QTableView', 'QTableView', ([], {}), '()\n', (904, 906), False, 'from PySide2.QtWidgets import QMainWindow, QAction, QApplication, QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QTreeView, QListView, QTableView\n'), ((958, 969), 'PySide2.QtWidgets.QTreeView', 'QTreeView', ([], {}), '()\n', (967, 969), False, 'from PySide2.QtWidgets import QMainWindow, QAction, QApplication, QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QTreeView, QListView, QTableView\n')] |
import boto3
import uuid
import json
import matplotlib.pyplot as plt
import numpy as np
import wave
import sys
import requests
dynamodb = boto3.resource('dynamodb')
def lambda_handler(event, context):
#download song from s3
#song =wave.open({song from s3}, "r")
#song_id = file ext of song from s3
spf = wave.open("Animal_cut.wav", "r")
# Extract Raw Audio from Wav File
signal = spf.readframes(-1)
signal = np.fromstring(signal, "Int16")
fs = spf.getframerate()
# If Stereo
if spf.getnchannels() == 2:
print("Just mono files")
sys.exit(0)
Time = np.linspace(0, len(signal) / fs, num=len(signal))
plt.figure(1)
plt.title("Signal Wave...")
plt.plot(Time, signal)
plt.show()
tunestamp = plt.savefig('foo.jpeg')
#plt.savefig('song_id.jpeg')
#upload image to s3
return #result
def set_ipfs_image(file_name):
s3 = boto3.resource('s3')
tmp = "/tmp/" + file_name
s3.meta.client.download_file('sudocoins-art-bucket', file_name, tmp)
with open(tmp, 'rb') as file:
files = {
'file': file
}
response = requests.post('https://ipfs.infura.io:5001/api/v0/add', files=files,
auth=('<KEY>', '<KEY>'))
response2 = json.loads(response.text)
return {
"ipfs_image": response2['Hash']
}
| [
"wave.open",
"requests.post",
"matplotlib.pyplot.savefig",
"json.loads",
"matplotlib.pyplot.plot",
"boto3.resource",
"matplotlib.pyplot.figure",
"sys.exit",
"matplotlib.pyplot.title",
"numpy.fromstring",
"matplotlib.pyplot.show"
] | [((139, 165), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (153, 165), False, 'import boto3\n'), ((324, 356), 'wave.open', 'wave.open', (['"""Animal_cut.wav"""', '"""r"""'], {}), "('Animal_cut.wav', 'r')\n", (333, 356), False, 'import wave\n'), ((441, 471), 'numpy.fromstring', 'np.fromstring', (['signal', '"""Int16"""'], {}), "(signal, 'Int16')\n", (454, 471), True, 'import numpy as np\n'), ((670, 683), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (680, 683), True, 'import matplotlib.pyplot as plt\n'), ((688, 715), 'matplotlib.pyplot.title', 'plt.title', (['"""Signal Wave..."""'], {}), "('Signal Wave...')\n", (697, 715), True, 'import matplotlib.pyplot as plt\n'), ((720, 742), 'matplotlib.pyplot.plot', 'plt.plot', (['Time', 'signal'], {}), '(Time, signal)\n', (728, 742), True, 'import matplotlib.pyplot as plt\n'), ((747, 757), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (755, 757), True, 'import matplotlib.pyplot as plt\n'), ((775, 798), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""foo.jpeg"""'], {}), "('foo.jpeg')\n", (786, 798), True, 'import matplotlib.pyplot as plt\n'), ((919, 939), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (933, 939), False, 'import boto3\n'), ((590, 601), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (598, 601), False, 'import sys\n'), ((1150, 1248), 'requests.post', 'requests.post', (['"""https://ipfs.infura.io:5001/api/v0/add"""'], {'files': 'files', 'auth': "('<KEY>', '<KEY>')"}), "('https://ipfs.infura.io:5001/api/v0/add', files=files, auth=(\n '<KEY>', '<KEY>'))\n", (1163, 1248), False, 'import requests\n'), ((1298, 1323), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1308, 1323), False, 'import json\n')] |
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import Divider, Size
from scipy.optimize import curve_fit
import pandas as pd
# This script reads in the output of
# 'nigericin_monensin_combo_by_cell.py', which processes the lifetime
# image data for the nigericin and monensin calibration. This script
# then generates a pH titration curve and fits it to a 4- parameter
# logistic function.
# generate some paths
current_dir = Path.cwd()
manuscript_path = current_dir.parents[2]
data_path = manuscript_path / 'source_data' / 'nigericin_monensin_U2OS' / 'nigericin_monensin_cell_means.csv'
# basic plot setup
plt.style.use(manuscript_path / 'figures' / 'default.mplstyle')
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
# load in the results
results = pd.read_csv(data_path)
# Calculate some rudimentary stats
gb_pH = results.groupby(['buffer_pH'])
tau_means = gb_pH['mean_tau_ns'].mean().tolist()
tau_stds = gb_pH['mean_tau_ns'].std().tolist()
fig1 = plt.figure(figsize=(3,3), dpi=300)
# generate fixed size axes, 1.5 inches
h = [Size.Fixed(1.0), Size.Fixed(1.5)]
v = [Size.Fixed(0.7), Size.Fixed(1.5)]
divider = Divider(fig1, (0, 0, 1, 1), h, v, aspect=False)
axs1 = fig1.add_axes(divider.get_position(),
axes_locator=divider.new_locator(nx=1, ny=1))
# fit to a 4 parameter logistic function
def logistic4(pH, min_val, hill, pKa, max_val):
return ((min_val - max_val) / (1.0 + ((pH/pKa)**hill))) + max_val
# set up a dataframe to store the fit outputs
fit_output = pd.DataFrame(columns={'condition','model', 'temperature', 'min_val',
'min_val_SD', 'max_val', 'max_val_SD', 'hill',
'hill_SD', 'pKa', 'pKa_SD'})
# perform the fitting
pH_range = gb_pH['mean_tau_ns'].mean().index.tolist()
init_params = [1.7, 15, 5, 3.5]
popt, pcov = curve_fit(logistic4,
xdata = pH_range,
ydata = tau_means,
p0 = init_params,
maxfev=10000)
fit_output.at[0, 'condition'] = 'U2OS'
fit_output.at[0, 'temperature'] = 35
fit_output.at[0, 'model'] = 'mean_arrival'
min_val, hill, pKa, max_val = popt
fit_output.at[0, 'min_val'] = min_val
fit_output.at[0, 'max_val'] = max_val
fit_output.at[0, 'hill'] = hill
fit_output.at[0, 'pKa'] = pKa
perr = np.sqrt(np.diag(pcov))
fit_output.at[0, 'min_val_SD'] = perr[0]
fit_output.at[0, 'max_val_SD'] = perr[3]
fit_output.at[0, 'hill_SD'] = perr[1]
fit_output.at[0, 'pKa_SD'] = perr[2]
# now plot the means and the fit curves
pH_plotting = np.linspace(4, 7.5, num=500)
axs1.plot(pH_plotting, logistic4(pH_plotting, min_val, hill, pKa, max_val),
label='', marker=None, markersize=0, color=cycle[0])
# medians +/- stdev
axs1.errorbar(pH_range, tau_means, tau_stds, linewidth=0, elinewidth=1,
markersize=4, marker='.', capthick=1, color=cycle[0])
axs1.spines['right'].set_visible(False)
axs1.spines['top'].set_visible(False)
axs1.set_ylabel('Lifetime (ns)')
axs1.set_xlabel('Buffer pH')
axs1.set_ylim(1.6, 3.6)
axs1.set_yticks(np.linspace(1.6, 3.6, 6))
axs1.set_title('U2OS Cells')
fig1.savefig('nigericin_monensin_means_whole_cell.pdf',
bbox_inches='tight', transparent=True)
fit_output.to_csv('U2OS_4PL_fits.csv')
# print the standard deviation by pH for inclusion in main text
print('standard deviation by pH')
print(results.groupby('buffer_pH')['mean_tau_ns'].std())
plt.show()
| [
"scipy.optimize.curve_fit",
"pandas.read_csv",
"pathlib.Path.cwd",
"mpl_toolkits.axes_grid1.Size.Fixed",
"matplotlib.pyplot.style.use",
"numpy.diag",
"matplotlib.pyplot.figure",
"numpy.linspace",
"mpl_toolkits.axes_grid1.Divider",
"pandas.DataFrame",
"matplotlib.pyplot.show"
] | [((486, 496), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (494, 496), False, 'from pathlib import Path\n'), ((668, 731), 'matplotlib.pyplot.style.use', 'plt.style.use', (["(manuscript_path / 'figures' / 'default.mplstyle')"], {}), "(manuscript_path / 'figures' / 'default.mplstyle')\n", (681, 731), True, 'import matplotlib.pyplot as plt\n'), ((823, 845), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path)\n', (834, 845), True, 'import pandas as pd\n'), ((1025, 1060), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3, 3)', 'dpi': '(300)'}), '(figsize=(3, 3), dpi=300)\n', (1035, 1060), True, 'import matplotlib.pyplot as plt\n'), ((1187, 1234), 'mpl_toolkits.axes_grid1.Divider', 'Divider', (['fig1', '(0, 0, 1, 1)', 'h', 'v'], {'aspect': '(False)'}), '(fig1, (0, 0, 1, 1), h, v, aspect=False)\n', (1194, 1234), False, 'from mpl_toolkits.axes_grid1 import Divider, Size\n'), ((1566, 1715), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "{'condition', 'model', 'temperature', 'min_val', 'min_val_SD', 'max_val',\n 'max_val_SD', 'hill', 'hill_SD', 'pKa', 'pKa_SD'}"}), "(columns={'condition', 'model', 'temperature', 'min_val',\n 'min_val_SD', 'max_val', 'max_val_SD', 'hill', 'hill_SD', 'pKa', 'pKa_SD'})\n", (1578, 1715), True, 'import pandas as pd\n'), ((1902, 1989), 'scipy.optimize.curve_fit', 'curve_fit', (['logistic4'], {'xdata': 'pH_range', 'ydata': 'tau_means', 'p0': 'init_params', 'maxfev': '(10000)'}), '(logistic4, xdata=pH_range, ydata=tau_means, p0=init_params,\n maxfev=10000)\n', (1911, 1989), False, 'from scipy.optimize import curve_fit\n'), ((2618, 2646), 'numpy.linspace', 'np.linspace', (['(4)', '(7.5)'], {'num': '(500)'}), '(4, 7.5, num=500)\n', (2629, 2646), True, 'import numpy as np\n'), ((3487, 3497), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3495, 3497), True, 'import matplotlib.pyplot as plt\n'), ((1104, 1119), 'mpl_toolkits.axes_grid1.Size.Fixed', 'Size.Fixed', (['(1.0)'], {}), '(1.0)\n', (1114, 1119), False, 'from mpl_toolkits.axes_grid1 import Divider, Size\n'), ((1121, 1136), 'mpl_toolkits.axes_grid1.Size.Fixed', 'Size.Fixed', (['(1.5)'], {}), '(1.5)\n', (1131, 1136), False, 'from mpl_toolkits.axes_grid1 import Divider, Size\n'), ((1143, 1158), 'mpl_toolkits.axes_grid1.Size.Fixed', 'Size.Fixed', (['(0.7)'], {}), '(0.7)\n', (1153, 1158), False, 'from mpl_toolkits.axes_grid1 import Divider, Size\n'), ((1160, 1175), 'mpl_toolkits.axes_grid1.Size.Fixed', 'Size.Fixed', (['(1.5)'], {}), '(1.5)\n', (1170, 1175), False, 'from mpl_toolkits.axes_grid1 import Divider, Size\n'), ((2391, 2404), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (2398, 2404), True, 'import numpy as np\n'), ((3127, 3151), 'numpy.linspace', 'np.linspace', (['(1.6)', '(3.6)', '(6)'], {}), '(1.6, 3.6, 6)\n', (3138, 3151), True, 'import numpy as np\n')] |
import os.path
import PIL.Image as pimg
import nibabel as nib
import numpy as np
import torch
from torch.autograd import Variable
from in_out.image_functions import rescale_image_intensities, points_to_voxels_transform
from support.utilities.general_settings import Settings
class Image:
"""
Landmarks (i.e. labelled point sets).
The Landmark class represents a set of labelled points. This class assumes that the source and the target
have the same number of points with a point-to-point correspondence.
"""
####################################################################################################################
### Constructor:
####################################################################################################################
# Constructor.
def __init__(self):
self.type = 'Image'
self.is_modified = True
self.affine = None
self.corner_points = None
self.bounding_box = None
self.downsampling_factor = 1
self.intensities = None # Numpy array.
self.intensities_torch = None
self.intensities_dtype = None
# Clone.
def clone(self):
clone = Image()
clone.is_modified = True
clone.affine = np.copy(self.affine)
clone.corner_points = np.copy(self.corner_points)
clone.bounding_box = np.copy(self.bounding_box)
clone.downsampling_factor = self.downsampling_factor
clone.intensities = np.copy(self.intensities)
clone.intensities_torch = self.intensities_torch.clone()
clone.intensities_dtype = self.intensities_dtype
return clone
####################################################################################################################
### Encapsulation methods:
####################################################################################################################
def get_number_of_points(self):
raise RuntimeError("Not implemented for Image yet.")
def set_affine(self, affine_matrix):
"""
The affine matrix A is a 4x4 matrix that gives the correspondence between the voxel coordinates and their
spatial positions in the 3D space: (x, y, z, 1) = A (u, v, w, 1).
See the nibabel documentation for further details (the same attribute name is used here).
"""
self.affine = affine_matrix
def set_intensities(self, intensities):
self.is_modified = True
self.intensities = intensities
def get_intensities(self):
return self.intensities
def get_intensities_torch(self):
return self.intensities_torch
def get_points(self):
image_shape = self.intensities.shape
dimension = Settings().dimension
axes = []
for d in range(dimension):
axe = np.linspace(self.corner_points[0, d], self.corner_points[2 ** d, d],
image_shape[d] // self.downsampling_factor)
axes.append(axe)
points = np.array(np.meshgrid(*axes, indexing='ij')[:])
for d in range(dimension):
points = np.swapaxes(points, d, d + 1)
return points
# @jit(parallel=True)
def get_deformed_intensities(self, deformed_points, intensities):
"""
Torch input / output.
Interpolation function with zero-padding.
"""
dimension = Settings().dimension
image_shape = self.intensities.shape
deformed_voxels = points_to_voxels_transform(deformed_points, self.affine)
if dimension == 2:
if not self.downsampling_factor == 1:
shape = deformed_points.shape
deformed_voxels = torch.nn.Upsample(size=self.intensities.shape, mode='bilinear', align_corners=True)(
deformed_voxels.permute(2, 0, 1).contiguous().view(
1, shape[2], shape[0], shape[1]))[0].permute(1, 2, 0).contiguous()
u, v = deformed_voxels.view(-1, 2)[:, 0], deformed_voxels.view(-1, 2)[:, 1]
u1 = np.floor(u.data.cpu().numpy()).astype(int)
v1 = np.floor(v.data.cpu().numpy()).astype(int)
u1 = np.clip(u1, 0, image_shape[0] - 1)
v1 = np.clip(v1, 0, image_shape[1] - 1)
u2 = np.clip(u1 + 1, 0, image_shape[0] - 1)
v2 = np.clip(v1 + 1, 0, image_shape[1] - 1)
fu = u - Variable(torch.from_numpy(u1).type(Settings().tensor_scalar_type))
fv = v - Variable(torch.from_numpy(v1).type(Settings().tensor_scalar_type))
gu = Variable(torch.from_numpy(u1 + 1).type(Settings().tensor_scalar_type)) - u
gv = Variable(torch.from_numpy(v1 + 1).type(Settings().tensor_scalar_type)) - v
deformed_intensities = (intensities[u1, v1] * gu * gv +
intensities[u1, v2] * gu * fv +
intensities[u2, v1] * fu * gv +
intensities[u2, v2] * fu * fv).view(image_shape)
elif dimension == 3:
if not self.downsampling_factor == 1:
shape = deformed_points.shape
deformed_voxels = torch.nn.Upsample(size=self.intensities.shape, mode='trilinear', align_corners=True)(
deformed_voxels.permute(3, 0, 1, 2).contiguous().view(
1, shape[3], shape[0], shape[1], shape[2]))[0].permute(1, 2, 3, 0).contiguous()
u, v, w = deformed_voxels.view(-1, 3)[:, 0], \
deformed_voxels.view(-1, 3)[:, 1], \
deformed_voxels.view(-1, 3)[:, 2]
u1_numpy = np.floor(u.data.cpu().numpy()).astype(int)
v1_numpy = np.floor(v.data.cpu().numpy()).astype(int)
w1_numpy = np.floor(w.data.cpu().numpy()).astype(int)
u1 = torch.from_numpy(np.clip(u1_numpy, 0, image_shape[0] - 1)).type(Settings().tensor_integer_type)
v1 = torch.from_numpy(np.clip(v1_numpy, 0, image_shape[1] - 1)).type(Settings().tensor_integer_type)
w1 = torch.from_numpy(np.clip(w1_numpy, 0, image_shape[2] - 1)).type(Settings().tensor_integer_type)
u2 = torch.from_numpy(np.clip(u1_numpy + 1, 0, image_shape[0] - 1)).type(Settings().tensor_integer_type)
v2 = torch.from_numpy(np.clip(v1_numpy + 1, 0, image_shape[1] - 1)).type(Settings().tensor_integer_type)
w2 = torch.from_numpy(np.clip(w1_numpy + 1, 0, image_shape[2] - 1)).type(Settings().tensor_integer_type)
fu = u - Variable(torch.from_numpy(u1_numpy).type(Settings().tensor_scalar_type))
fv = v - Variable(torch.from_numpy(v1_numpy).type(Settings().tensor_scalar_type))
fw = w - Variable(torch.from_numpy(w1_numpy).type(Settings().tensor_scalar_type))
gu = Variable(torch.from_numpy(u1_numpy + 1).type(Settings().tensor_scalar_type)) - u
gv = Variable(torch.from_numpy(v1_numpy + 1).type(Settings().tensor_scalar_type)) - v
gw = Variable(torch.from_numpy(w1_numpy + 1).type(Settings().tensor_scalar_type)) - w
deformed_intensities = (intensities[u1, v1, w1] * gu * gv * gw +
intensities[u1, v1, w2] * gu * gv * fw +
intensities[u1, v2, w1] * gu * fv * gw +
intensities[u1, v2, w2] * gu * fv * fw +
intensities[u2, v1, w1] * fu * gv * gw +
intensities[u2, v1, w2] * fu * gv * fw +
intensities[u2, v2, w1] * fu * fv * gw +
intensities[u2, v2, w2] * fu * fv * fw).view(image_shape)
else:
raise RuntimeError('Incorrect dimension of the ambient space: %d' % dimension)
return deformed_intensities
####################################################################################################################
### Public methods:
####################################################################################################################
# Update the relevant information.
def update(self):
if self.is_modified:
self._update_corner_point_positions()
self.update_bounding_box()
self.intensities_torch = Variable(torch.from_numpy(
self.intensities).type(Settings().tensor_scalar_type)).contiguous()
self.is_modified = False
def update_bounding_box(self):
"""
Compute a tight bounding box that contains all the 2/3D-embedded image data.
"""
dimension = Settings().dimension
self.bounding_box = np.zeros((dimension, 2))
for d in range(dimension):
self.bounding_box[d, 0] = np.min(self.corner_points[:, d])
self.bounding_box[d, 1] = np.max(self.corner_points[:, d])
def write(self, name, intensities=None):
if intensities is None:
intensities = self.get_intensities()
intensities_rescaled = rescale_image_intensities(intensities, self.intensities_dtype)
if name.find(".png") > 0:
pimg.fromarray(intensities_rescaled).save(os.path.join(Settings().output_dir, name))
elif name.find(".nii") > 0:
img = nib.Nifti1Image(intensities_rescaled, self.affine)
nib.save(img, os.path.join(Settings().output_dir, name))
elif name.find(".npy") > 0:
np.save(os.path.join(Settings().output_dir, name), intensities_rescaled)
else:
raise ValueError('Writing images with the given extension "%s" is not coded yet.' % name)
####################################################################################################################
### Utility methods:
####################################################################################################################
def _update_corner_point_positions(self):
dimension = Settings().dimension
if dimension == 2:
corner_points = np.zeros((4, 2))
umax, vmax = np.subtract(self.intensities.shape, (1, 1))
corner_points[0] = np.array([0, 0])
corner_points[1] = np.array([umax, 0])
corner_points[2] = np.array([0, vmax])
corner_points[3] = np.array([umax, vmax])
elif dimension == 3:
corner_points = np.zeros((8, 3))
umax, vmax, wmax = np.subtract(self.intensities.shape, (1, 1, 1))
corner_points[0] = np.array([0, 0, 0])
corner_points[1] = np.array([umax, 0, 0])
corner_points[2] = np.array([0, vmax, 0])
corner_points[3] = np.array([umax, vmax, 0])
corner_points[4] = np.array([0, 0, wmax])
corner_points[5] = np.array([umax, 0, wmax])
corner_points[6] = np.array([0, vmax, wmax])
corner_points[7] = np.array([umax, vmax, wmax])
#################################
# VERSION FOR IMAGE + MESH DATA #
#################################
# dimension = Settings().dimension
# if dimension == 2:
# corner_points = np.zeros((4, 2))
# umax, vmax = np.subtract(self.intensities.shape, (1, 1))
# corner_points[0] = np.dot(self.affine[0:2, 0:2], np.array([0, 0])) + self.affine[0:2, 2]
# corner_points[1] = np.dot(self.affine[0:2, 0:2], np.array([umax, 0])) + self.affine[0:2, 2]
# corner_points[2] = np.dot(self.affine[0:2, 0:2], np.array([0, vmax])) + self.affine[0:2, 2]
# corner_points[3] = np.dot(self.affine[0:2, 0:2], np.array([umax, vmax])) + self.affine[0:2, 2]
#
# elif dimension == 3:
# corner_points = np.zeros((8, 3))
# umax, vmax, wmax = np.subtract(self.intensities.shape, (1, 1, 1))
# corner_points[0] = np.dot(self.affine[0:3, 0:3], np.array([0, 0, 0])) + self.affine[0:3, 3]
# corner_points[1] = np.dot(self.affine[0:3, 0:3], np.array([umax, 0, 0])) + self.affine[0:3, 3]
# corner_points[2] = np.dot(self.affine[0:3, 0:3], np.array([0, vmax, 0])) + self.affine[0:3, 3]
# corner_points[3] = np.dot(self.affine[0:3, 0:3], np.array([umax, vmax, 0])) + self.affine[0:3, 3]
# corner_points[4] = np.dot(self.affine[0:3, 0:3], np.array([0, 0, wmax])) + self.affine[0:3, 3]
# corner_points[5] = np.dot(self.affine[0:3, 0:3], np.array([umax, 0, wmax])) + self.affine[0:3, 3]
# corner_points[6] = np.dot(self.affine[0:3, 0:3], np.array([0, vmax, wmax])) + self.affine[0:3, 3]
# corner_points[7] = np.dot(self.affine[0:3, 0:3], np.array([umax, vmax, wmax])) + self.affine[0:3, 3]
else:
raise RuntimeError('Invalid dimension: %d' % dimension)
self.corner_points = corner_points
| [
"numpy.clip",
"support.utilities.general_settings.Settings",
"numpy.copy",
"PIL.Image.fromarray",
"numpy.min",
"numpy.subtract",
"numpy.max",
"numpy.swapaxes",
"in_out.image_functions.rescale_image_intensities",
"numpy.zeros",
"numpy.linspace",
"numpy.array",
"nibabel.Nifti1Image",
"torch.... | [((1275, 1295), 'numpy.copy', 'np.copy', (['self.affine'], {}), '(self.affine)\n', (1282, 1295), True, 'import numpy as np\n'), ((1326, 1353), 'numpy.copy', 'np.copy', (['self.corner_points'], {}), '(self.corner_points)\n', (1333, 1353), True, 'import numpy as np\n'), ((1383, 1409), 'numpy.copy', 'np.copy', (['self.bounding_box'], {}), '(self.bounding_box)\n', (1390, 1409), True, 'import numpy as np\n'), ((1500, 1525), 'numpy.copy', 'np.copy', (['self.intensities'], {}), '(self.intensities)\n', (1507, 1525), True, 'import numpy as np\n'), ((3530, 3586), 'in_out.image_functions.points_to_voxels_transform', 'points_to_voxels_transform', (['deformed_points', 'self.affine'], {}), '(deformed_points, self.affine)\n', (3556, 3586), False, 'from in_out.image_functions import rescale_image_intensities, points_to_voxels_transform\n'), ((8763, 8787), 'numpy.zeros', 'np.zeros', (['(dimension, 2)'], {}), '((dimension, 2))\n', (8771, 8787), True, 'import numpy as np\n'), ((9125, 9187), 'in_out.image_functions.rescale_image_intensities', 'rescale_image_intensities', (['intensities', 'self.intensities_dtype'], {}), '(intensities, self.intensities_dtype)\n', (9150, 9187), False, 'from in_out.image_functions import rescale_image_intensities, points_to_voxels_transform\n'), ((2778, 2788), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (2786, 2788), False, 'from support.utilities.general_settings import Settings\n'), ((2871, 2988), 'numpy.linspace', 'np.linspace', (['self.corner_points[0, d]', 'self.corner_points[2 ** d, d]', '(image_shape[d] // self.downsampling_factor)'], {}), '(self.corner_points[0, d], self.corner_points[2 ** d, d], \n image_shape[d] // self.downsampling_factor)\n', (2882, 2988), True, 'import numpy as np\n'), ((3164, 3193), 'numpy.swapaxes', 'np.swapaxes', (['points', 'd', '(d + 1)'], {}), '(points, d, d + 1)\n', (3175, 3193), True, 'import numpy as np\n'), ((3438, 3448), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (3446, 3448), False, 'from support.utilities.general_settings import Settings\n'), ((4222, 4256), 'numpy.clip', 'np.clip', (['u1', '(0)', '(image_shape[0] - 1)'], {}), '(u1, 0, image_shape[0] - 1)\n', (4229, 4256), True, 'import numpy as np\n'), ((4274, 4308), 'numpy.clip', 'np.clip', (['v1', '(0)', '(image_shape[1] - 1)'], {}), '(v1, 0, image_shape[1] - 1)\n', (4281, 4308), True, 'import numpy as np\n'), ((4326, 4364), 'numpy.clip', 'np.clip', (['(u1 + 1)', '(0)', '(image_shape[0] - 1)'], {}), '(u1 + 1, 0, image_shape[0] - 1)\n', (4333, 4364), True, 'import numpy as np\n'), ((4382, 4420), 'numpy.clip', 'np.clip', (['(v1 + 1)', '(0)', '(image_shape[1] - 1)'], {}), '(v1 + 1, 0, image_shape[1] - 1)\n', (4389, 4420), True, 'import numpy as np\n'), ((8714, 8724), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (8722, 8724), False, 'from support.utilities.general_settings import Settings\n'), ((8861, 8893), 'numpy.min', 'np.min', (['self.corner_points[:, d]'], {}), '(self.corner_points[:, d])\n', (8867, 8893), True, 'import numpy as np\n'), ((8932, 8964), 'numpy.max', 'np.max', (['self.corner_points[:, d]'], {}), '(self.corner_points[:, d])\n', (8938, 8964), True, 'import numpy as np\n'), ((10067, 10077), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (10075, 10077), False, 'from support.utilities.general_settings import Settings\n'), ((10143, 10159), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {}), '((4, 2))\n', (10151, 10159), True, 'import numpy as np\n'), ((10185, 10228), 'numpy.subtract', 'np.subtract', (['self.intensities.shape', '(1, 1)'], {}), '(self.intensities.shape, (1, 1))\n', (10196, 10228), True, 'import numpy as np\n'), ((10260, 10276), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (10268, 10276), True, 'import numpy as np\n'), ((10308, 10327), 'numpy.array', 'np.array', (['[umax, 0]'], {}), '([umax, 0])\n', (10316, 10327), True, 'import numpy as np\n'), ((10359, 10378), 'numpy.array', 'np.array', (['[0, vmax]'], {}), '([0, vmax])\n', (10367, 10378), True, 'import numpy as np\n'), ((10410, 10432), 'numpy.array', 'np.array', (['[umax, vmax]'], {}), '([umax, vmax])\n', (10418, 10432), True, 'import numpy as np\n'), ((3070, 3103), 'numpy.meshgrid', 'np.meshgrid', (['*axes'], {'indexing': '"""ij"""'}), "(*axes, indexing='ij')\n", (3081, 3103), True, 'import numpy as np\n'), ((9374, 9424), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['intensities_rescaled', 'self.affine'], {}), '(intensities_rescaled, self.affine)\n', (9389, 9424), True, 'import nibabel as nib\n'), ((10491, 10507), 'numpy.zeros', 'np.zeros', (['(8, 3)'], {}), '((8, 3))\n', (10499, 10507), True, 'import numpy as np\n'), ((10539, 10585), 'numpy.subtract', 'np.subtract', (['self.intensities.shape', '(1, 1, 1)'], {}), '(self.intensities.shape, (1, 1, 1))\n', (10550, 10585), True, 'import numpy as np\n'), ((10617, 10636), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (10625, 10636), True, 'import numpy as np\n'), ((10668, 10690), 'numpy.array', 'np.array', (['[umax, 0, 0]'], {}), '([umax, 0, 0])\n', (10676, 10690), True, 'import numpy as np\n'), ((10722, 10744), 'numpy.array', 'np.array', (['[0, vmax, 0]'], {}), '([0, vmax, 0])\n', (10730, 10744), True, 'import numpy as np\n'), ((10776, 10801), 'numpy.array', 'np.array', (['[umax, vmax, 0]'], {}), '([umax, vmax, 0])\n', (10784, 10801), True, 'import numpy as np\n'), ((10833, 10855), 'numpy.array', 'np.array', (['[0, 0, wmax]'], {}), '([0, 0, wmax])\n', (10841, 10855), True, 'import numpy as np\n'), ((10887, 10912), 'numpy.array', 'np.array', (['[umax, 0, wmax]'], {}), '([umax, 0, wmax])\n', (10895, 10912), True, 'import numpy as np\n'), ((10944, 10969), 'numpy.array', 'np.array', (['[0, vmax, wmax]'], {}), '([0, vmax, wmax])\n', (10952, 10969), True, 'import numpy as np\n'), ((11001, 11029), 'numpy.array', 'np.array', (['[umax, vmax, wmax]'], {}), '([umax, vmax, wmax])\n', (11009, 11029), True, 'import numpy as np\n'), ((9235, 9271), 'PIL.Image.fromarray', 'pimg.fromarray', (['intensities_rescaled'], {}), '(intensities_rescaled)\n', (9249, 9271), True, 'import PIL.Image as pimg\n'), ((5954, 5964), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (5962, 5964), False, 'from support.utilities.general_settings import Settings\n'), ((6067, 6077), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (6075, 6077), False, 'from support.utilities.general_settings import Settings\n'), ((6180, 6190), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (6188, 6190), False, 'from support.utilities.general_settings import Settings\n'), ((6297, 6307), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (6305, 6307), False, 'from support.utilities.general_settings import Settings\n'), ((6414, 6424), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (6422, 6424), False, 'from support.utilities.general_settings import Settings\n'), ((6531, 6541), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (6539, 6541), False, 'from support.utilities.general_settings import Settings\n'), ((9290, 9300), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (9298, 9300), False, 'from support.utilities.general_settings import Settings\n'), ((4452, 4472), 'torch.from_numpy', 'torch.from_numpy', (['u1'], {}), '(u1)\n', (4468, 4472), False, 'import torch\n'), ((4478, 4488), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (4486, 4488), False, 'from support.utilities.general_settings import Settings\n'), ((4540, 4560), 'torch.from_numpy', 'torch.from_numpy', (['v1'], {}), '(v1)\n', (4556, 4560), False, 'import torch\n'), ((4566, 4576), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (4574, 4576), False, 'from support.utilities.general_settings import Settings\n'), ((4624, 4648), 'torch.from_numpy', 'torch.from_numpy', (['(u1 + 1)'], {}), '(u1 + 1)\n', (4640, 4648), False, 'import torch\n'), ((4654, 4664), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (4662, 4664), False, 'from support.utilities.general_settings import Settings\n'), ((4716, 4740), 'torch.from_numpy', 'torch.from_numpy', (['(v1 + 1)'], {}), '(v1 + 1)\n', (4732, 4740), False, 'import torch\n'), ((4746, 4756), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (4754, 4756), False, 'from support.utilities.general_settings import Settings\n'), ((5907, 5947), 'numpy.clip', 'np.clip', (['u1_numpy', '(0)', '(image_shape[0] - 1)'], {}), '(u1_numpy, 0, image_shape[0] - 1)\n', (5914, 5947), True, 'import numpy as np\n'), ((6020, 6060), 'numpy.clip', 'np.clip', (['v1_numpy', '(0)', '(image_shape[1] - 1)'], {}), '(v1_numpy, 0, image_shape[1] - 1)\n', (6027, 6060), True, 'import numpy as np\n'), ((6133, 6173), 'numpy.clip', 'np.clip', (['w1_numpy', '(0)', '(image_shape[2] - 1)'], {}), '(w1_numpy, 0, image_shape[2] - 1)\n', (6140, 6173), True, 'import numpy as np\n'), ((6246, 6290), 'numpy.clip', 'np.clip', (['(u1_numpy + 1)', '(0)', '(image_shape[0] - 1)'], {}), '(u1_numpy + 1, 0, image_shape[0] - 1)\n', (6253, 6290), True, 'import numpy as np\n'), ((6363, 6407), 'numpy.clip', 'np.clip', (['(v1_numpy + 1)', '(0)', '(image_shape[1] - 1)'], {}), '(v1_numpy + 1, 0, image_shape[1] - 1)\n', (6370, 6407), True, 'import numpy as np\n'), ((6480, 6524), 'numpy.clip', 'np.clip', (['(w1_numpy + 1)', '(0)', '(image_shape[2] - 1)'], {}), '(w1_numpy + 1, 0, image_shape[2] - 1)\n', (6487, 6524), True, 'import numpy as np\n'), ((9464, 9474), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (9472, 9474), False, 'from support.utilities.general_settings import Settings\n'), ((6594, 6620), 'torch.from_numpy', 'torch.from_numpy', (['u1_numpy'], {}), '(u1_numpy)\n', (6610, 6620), False, 'import torch\n'), ((6626, 6636), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (6634, 6636), False, 'from support.utilities.general_settings import Settings\n'), ((6688, 6714), 'torch.from_numpy', 'torch.from_numpy', (['v1_numpy'], {}), '(v1_numpy)\n', (6704, 6714), False, 'import torch\n'), ((6720, 6730), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (6728, 6730), False, 'from support.utilities.general_settings import Settings\n'), ((6782, 6808), 'torch.from_numpy', 'torch.from_numpy', (['w1_numpy'], {}), '(w1_numpy)\n', (6798, 6808), False, 'import torch\n'), ((6814, 6824), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (6822, 6824), False, 'from support.utilities.general_settings import Settings\n'), ((6872, 6902), 'torch.from_numpy', 'torch.from_numpy', (['(u1_numpy + 1)'], {}), '(u1_numpy + 1)\n', (6888, 6902), False, 'import torch\n'), ((6908, 6918), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (6916, 6918), False, 'from support.utilities.general_settings import Settings\n'), ((6970, 7000), 'torch.from_numpy', 'torch.from_numpy', (['(v1_numpy + 1)'], {}), '(v1_numpy + 1)\n', (6986, 7000), False, 'import torch\n'), ((7006, 7016), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (7014, 7016), False, 'from support.utilities.general_settings import Settings\n'), ((7068, 7098), 'torch.from_numpy', 'torch.from_numpy', (['(w1_numpy + 1)'], {}), '(w1_numpy + 1)\n', (7084, 7098), False, 'import torch\n'), ((7104, 7114), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (7112, 7114), False, 'from support.utilities.general_settings import Settings\n'), ((8410, 8444), 'torch.from_numpy', 'torch.from_numpy', (['self.intensities'], {}), '(self.intensities)\n', (8426, 8444), False, 'import torch\n'), ((8467, 8477), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (8475, 8477), False, 'from support.utilities.general_settings import Settings\n'), ((9563, 9573), 'support.utilities.general_settings.Settings', 'Settings', ([], {}), '()\n', (9571, 9573), False, 'from support.utilities.general_settings import Settings\n'), ((3746, 3833), 'torch.nn.Upsample', 'torch.nn.Upsample', ([], {'size': 'self.intensities.shape', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(size=self.intensities.shape, mode='bilinear',\n align_corners=True)\n", (3763, 3833), False, 'import torch\n'), ((5233, 5321), 'torch.nn.Upsample', 'torch.nn.Upsample', ([], {'size': 'self.intensities.shape', 'mode': '"""trilinear"""', 'align_corners': '(True)'}), "(size=self.intensities.shape, mode='trilinear',\n align_corners=True)\n", (5250, 5321), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. <NAME> (<EMAIL>),
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import numpy
import os.path
import logging
import rmgpy.constants as constants
################################################################################
def checkConformerEnergy(Vlist,path):
"""
Check to see that the starting energy of the species in the potential energy scan calculation
is not 0.5 kcal/mol (or more) higher than any other energies in the scan. If so, print and
log a warning message.
"""
Vlist = numpy.array(Vlist, numpy.float64)
Vdiff = (Vlist[0] - numpy.min(Vlist))*constants.E_h*constants.Na/1000
if Vdiff >= 2: #we choose 2 kJ/mol to be the critical energy
logging.warning('the species corresponding to ' + str(os.path.basename(path)) + ' is different in energy from the lowest energy conformer by ' + "%0.2f" % Vdiff + ' kJ/mol. This can cause significant errors in your computed rate constants. ')
| [
"numpy.array",
"numpy.min"
] | [((1903, 1936), 'numpy.array', 'numpy.array', (['Vlist', 'numpy.float64'], {}), '(Vlist, numpy.float64)\n', (1914, 1936), False, 'import numpy\n'), ((1961, 1977), 'numpy.min', 'numpy.min', (['Vlist'], {}), '(Vlist)\n', (1970, 1977), False, 'import numpy\n')] |
"""
map
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
import numpy
from mcedit2.command import SimpleRevisionCommand
from mcedit2.ui.import_map import Ui_importMapDialog
from mcedit2.ui.panels.map import Ui_mapWidget
from mcedit2.util.resources import resourcePath
from mcedit2.util.screen import centerWidgetInScreen
from mceditlib.anvil.adapter import AnvilMapData
from mceditlib.exceptions import LevelFormatError
log = logging.getLogger(__name__)
class MapListModel(QtCore.QAbstractListModel):
MapIDRole = Qt.UserRole
def __init__(self, editorSession):
super(MapListModel, self).__init__()
self.editorSession = editorSession
self.mapIDs = sorted(self.editorSession.worldEditor.listMaps())
def rowCount(self, index):
return len(self.mapIDs)
def data(self, index, role=Qt.DisplayRole):
row = index.row()
if not 0 <= row < len(self.mapIDs):
return None
mapID = self.mapIDs[row]
if role == Qt.DisplayRole:
return "Map #%s" % mapID
if role == Qt.DecorationRole:
return self.imageForMapID(mapID)
if role == self.MapIDRole:
return mapID
def getMap(self, mapID):
return self.editorSession.worldEditor.getMap(mapID)
def deleteMap(self, mapID):
self.mapIDs.remove(mapID)
self.editorSession.worldEditor.deleteMap(mapID)
def imageForMapID(self, mapID):
try:
mapData = self.getMap(mapID)
except LevelFormatError as e:
log.exception("Invalid map for ID %s (while getting map image)", mapID)
return None
colorsRGBA = mapData.getColorsAsRGBA()
colorsBGRA = rgbaToBgra(colorsRGBA)
image = QtGui.QImage(colorsBGRA, mapData.width, mapData.height, QtGui.QImage.Format_ARGB32)
return image
def rgbaToBgra(colors):
return numpy.ascontiguousarray(numpy.roll(colors, 1, -1)[..., ::-1])
bgraToRgba = rgbaToBgra
class MapPanel(QtGui.QWidget, Ui_mapWidget):
def __init__(self, editorSession):
"""
:type editorSession: mcedit2.editorsession.EditorSession
:rtype: MapPanel
"""
super(MapPanel, self).__init__(QtGui.qApp.mainWindow, f=Qt.Tool)
self.editorSession = editorSession
self.pixmapItem = None
self.mapListModel = None
self.setupUi(self)
icon = QtGui.QIcon(resourcePath("mcedit2/assets/mcedit2/icons/edit_map.png"))
action = QtGui.QAction(icon, self.tr("Edit Maps"), self)
action.setCheckable(True)
action.triggered.connect(self.toggleView)
self._toggleViewAction = action
self.reloadModel()
self.mapListView.clicked.connect(self.mapListClicked)
self.splitter.splitterMoved.connect(self.updatePixmapSize)
self.splitter.setStretchFactor(0, 2)
self.splitter.setStretchFactor(1, 1)
self.importImageButton.clicked.connect(self.importImage)
self.deleteMapButton.clicked.connect(self.deleteMap)
self.currentlyEditingLabel.setVisible(False)
self.displayFirstMap()
def displayFirstMap(self):
if len(self.mapListModel.mapIDs):
index = self.mapListModel.index(0, 0)
self.mapListView.setCurrentIndex(index)
self.displayMapID(index.data(MapListModel.MapIDRole))
def closeEvent(self, event):
self.toggleView()
def toggleViewAction(self):
return self._toggleViewAction
def toggleView(self):
if self.isHidden():
centerWidgetInScreen(self, 0.8)
self.show()
self._toggleViewAction.setChecked(True)
else:
self.hide()
self._toggleViewAction.setChecked(False)
def mapListClicked(self, index):
mapID = index.data(MapListModel.MapIDRole)
self.displayMapID(mapID)
def displayMapID(self, mapID):
if mapID is None:
mapData = None
else:
try:
mapData = self.mapListModel.getMap(mapID)
except LevelFormatError as e:
log.exception("Invalid data for map ID %s (while getting map info)", mapID)
mapData = None
if mapData is None:
self.widthLabel.setText("(N/A)")
self.heightLabel.setText("(N/A)")
self.dimensionLabel.setText("(N/A)")
self.scaleLabel.setText("(N/A)")
self.mapGraphicsView.setScene(None)
else:
self.widthLabel.setText(str(mapData.width))
self.heightLabel.setText(str(mapData.height))
self.dimensionLabel.setText(str(mapData.dimension))
self.scaleLabel.setText(str(mapData.scale))
self.updateScene(mapID)
def updateScene(self, mapID):
scene = QtGui.QGraphicsScene()
image = self.mapListModel.imageForMapID(mapID)
pixmap = QtGui.QPixmap.fromImage(image)
self.pixmapItem = scene.addPixmap(pixmap)
self.mapGraphicsView.setScene(scene)
self.mapGraphicsView.fitInView(self.pixmapItem, Qt.KeepAspectRatio)
def resizeEvent(self, event):
self.updatePixmapSize()
def showEvent(self, event):
self.updatePixmapSize()
def updatePixmapSize(self):
if self.pixmapItem:
self.mapGraphicsView.fitInView(self.pixmapItem, Qt.KeepAspectRatio)
def importImage(self):
result = QtGui.QFileDialog.getOpenFileName(self, self.tr("Choose an image file"),
".", # xxxxx
"Image files (*.gif;*.png;*.bmp;*.jpg)")
if result:
filename = result[0]
if filename:
colorTable = AnvilMapData.colorTable # xxxx dispatch through WorldEditor
dialog = ImportMapDialog(filename, colorTable)
dialog.exec_()
if dialog.result():
convertedImages = dialog.getConvertedImages()
command = MapImportCommand(self.editorSession, self.tr("Import Image as Map"))
with command.begin():
for x, y, image in convertedImages:
colors = numpy.fromstring(image.bits(), dtype=numpy.uint8)
colors.shape = 128, 128
newMap = self.editorSession.worldEditor.createMap()
newMap.colors[:] = colors
newMap.save()
self.reloadModel()
self.displayMapID(newMap.mapID)
def deleteMap(self):
idx = self.mapListView.currentIndex()
if idx.isValid():
mapID = self.mapListModel.data(idx, MapListModel.MapIDRole)
with self.editorSession.beginSimpleCommand(self.tr("Delete Map {0}").format(mapID)):
self.mapListModel.deleteMap(mapID)
self.reloadModel()
self.displayFirstMap()
def reloadModel(self):
self.mapListModel = MapListModel(self.editorSession)
self.mapListView.setModel(self.mapListModel)
class MapImportCommand(SimpleRevisionCommand):
pass
class ImportMapDialog(QtGui.QDialog, Ui_importMapDialog):
def __init__(self, imageFilename, colorTable):
super(ImportMapDialog, self).__init__()
self.setupUi(self)
self.filename = imageFilename
# Convert to ARGB to ensure alpha channel
image = QtGui.QImage(imageFilename)
self.image = image.convertToFormat(QtGui.QImage.Format_ARGB32)
self.pixmap = QtGui.QPixmap.fromImage(image)
self.lines = []
self.previewGroupItems = []
self.convertedImages = []
self.colorTable = [255] * 256
colorTable = numpy.array(colorTable)
colorTableBGRA = numpy.ascontiguousarray(numpy.roll(colorTable, 1, -1)[..., ::-1])
colorTableBGRA.shape = colorTableBGRA.size
colorTableBGRA.dtype = numpy.uint32
self.colorTable[:len(colorTable)] = list(colorTableBGRA)
self.importAsMosaicGroup.toggled.connect(self.updateScenes)
self.expandImageCheckbox.toggled.connect(self.updateScenes)
self.tilesWideSpinbox.valueChanged.connect(self.updateScenes)
self.tilesHighSpinbox.valueChanged.connect(self.updateScenes)
self.imageScene = QtGui.QGraphicsScene()
self.pixmapItem = self.imageScene.addPixmap(self.pixmap)
self.imageGraphicsView.setScene(self.imageScene)
self.previewScene = QtGui.QGraphicsScene()
self.previewGroup = QtGui.QGraphicsItemGroup()
self.previewScene.addItem(self.previewGroup)
self.previewGraphicsView.setScene(self.previewScene)
self.updateScenes()
def updateScenes(self):
for lineItem in self.lines:
self.imageScene.removeItem(lineItem)
self.lines[:] = []
for item in self.previewGroupItems:
self.previewGroup.removeFromGroup(item)
self.previewGroupItems[:] = []
self.convertedImages[:] = []
tilesWide = self.tilesWideSpinbox.value()
tilesHigh = self.tilesHighSpinbox.value()
#if self.importAsMosaicGroup.isChecked() and tilesWide > 1 or tilesHigh > 1:
imageWidth = self.pixmap.width()
imageHeight = self.pixmap.height()
xSpacing = imageWidth / tilesWide
ySpacing = imageHeight / tilesHigh
expandImage = self.expandImageCheckbox.isChecked()
if not expandImage:
xSpacing = ySpacing = max(xSpacing, ySpacing)
for x in range(1, tilesWide):
if x * xSpacing > imageWidth:
break
line = QtGui.QGraphicsLineItem(x * xSpacing, 0, x * xSpacing, imageHeight)
line.setPen(QtGui.QPen(Qt.red))
self.imageScene.addItem(line)
self.lines.append(line)
for y in range(1, tilesHigh):
if y * ySpacing > imageHeight:
break
line = QtGui.QGraphicsLineItem(0, y * ySpacing, imageWidth, y * ySpacing)
line.setPen(QtGui.QPen(Qt.red))
self.imageScene.addItem(line)
self.lines.append(line)
tilePositions = []
for x in range(0, tilesWide):
for y in range(0, tilesHigh):
if x * xSpacing > imageWidth or y * ySpacing > imageHeight:
continue
tilePositions.append((x, y))
image = self.image
tileSize = 128
tileSpacing = 6
tileOffset = tileSize + tileSpacing
for x, y in tilePositions:
tileImage = image.copy(x * xSpacing, y * ySpacing, xSpacing, ySpacing)
scaledImage = tileImage.scaled(QtCore.QSize(tileSize, tileSize),
Qt.KeepAspectRatio if not expandImage else Qt.IgnoreAspectRatio)
convertedImage = scaledImage.convertToFormat(QtGui.QImage.Format_Indexed8, self.colorTable)
convertedPixmap = QtGui.QPixmap.fromImage(convertedImage)
self.convertedImages.append((x, y, convertedImage))
convertedPixmapItem = QtGui.QGraphicsPixmapItem(convertedPixmap)
convertedPixmapItem.setPos(x * tileOffset, y * tileOffset)
self.previewGroup.addToGroup(convertedPixmapItem)
self.previewGroupItems.append(convertedPixmapItem)
rectItem = QtGui.QGraphicsRectItem(x*tileOffset, y*tileOffset, tileSize, tileSize)
rectItem.setPen(QtGui.QPen(Qt.black))
self.previewGroup.addToGroup(rectItem)
self.previewGroupItems.append(rectItem)
#
# else:
#
# image = self.pixmap.toImage()
# scaledImage = image.scaled(QtCore.QSize(128, 128), Qt.KeepAspectRatio)
# convertedImage = scaledImage.convertToFormat(QtGui.QImage.Format_Indexed8, self.colorTable)
# convertedPixmap = QtGui.QPixmap.fromImage(convertedImage)
# convertedPixmapItem = self.previewScene.addPixmap(convertedPixmap)
# self.previewGroup.addToGroup(convertedPixmapItem)
# self.mosaicTiles.append(convertedPixmapItem)
self.updateImageSize()
def updateImageSize(self):
self.imageGraphicsView.fitInView(self.pixmapItem, Qt.KeepAspectRatio)
self.previewGraphicsView.fitInView(self.previewGroup, Qt.KeepAspectRatio)
def resizeEvent(self, event):
self.updateImageSize()
def showEvent(self, event):
self.updateImageSize()
def getConvertedImages(self):
return list(self.convertedImages)
| [
"logging.getLogger",
"PySide.QtGui.QPixmap.fromImage",
"numpy.roll",
"PySide.QtCore.QSize",
"mcedit2.util.screen.centerWidgetInScreen",
"PySide.QtGui.QGraphicsPixmapItem",
"mcedit2.util.resources.resourcePath",
"numpy.array",
"PySide.QtGui.QGraphicsLineItem",
"PySide.QtGui.QGraphicsItemGroup",
"... | [((546, 573), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (563, 573), False, 'import logging\n'), ((1865, 1953), 'PySide.QtGui.QImage', 'QtGui.QImage', (['colorsBGRA', 'mapData.width', 'mapData.height', 'QtGui.QImage.Format_ARGB32'], {}), '(colorsBGRA, mapData.width, mapData.height, QtGui.QImage.\n Format_ARGB32)\n', (1877, 1953), False, 'from PySide import QtGui, QtCore\n'), ((4941, 4963), 'PySide.QtGui.QGraphicsScene', 'QtGui.QGraphicsScene', ([], {}), '()\n', (4961, 4963), False, 'from PySide import QtGui, QtCore\n'), ((5037, 5067), 'PySide.QtGui.QPixmap.fromImage', 'QtGui.QPixmap.fromImage', (['image'], {}), '(image)\n', (5060, 5067), False, 'from PySide import QtGui, QtCore\n'), ((7637, 7664), 'PySide.QtGui.QImage', 'QtGui.QImage', (['imageFilename'], {}), '(imageFilename)\n', (7649, 7664), False, 'from PySide import QtGui, QtCore\n'), ((7758, 7788), 'PySide.QtGui.QPixmap.fromImage', 'QtGui.QPixmap.fromImage', (['image'], {}), '(image)\n', (7781, 7788), False, 'from PySide import QtGui, QtCore\n'), ((7944, 7967), 'numpy.array', 'numpy.array', (['colorTable'], {}), '(colorTable)\n', (7955, 7967), False, 'import numpy\n'), ((8523, 8545), 'PySide.QtGui.QGraphicsScene', 'QtGui.QGraphicsScene', ([], {}), '()\n', (8543, 8545), False, 'from PySide import QtGui, QtCore\n'), ((8698, 8720), 'PySide.QtGui.QGraphicsScene', 'QtGui.QGraphicsScene', ([], {}), '()\n', (8718, 8720), False, 'from PySide import QtGui, QtCore\n'), ((8749, 8775), 'PySide.QtGui.QGraphicsItemGroup', 'QtGui.QGraphicsItemGroup', ([], {}), '()\n', (8773, 8775), False, 'from PySide import QtGui, QtCore\n'), ((2030, 2055), 'numpy.roll', 'numpy.roll', (['colors', '(1)', '(-1)'], {}), '(colors, 1, -1)\n', (2040, 2055), False, 'import numpy\n'), ((2530, 2587), 'mcedit2.util.resources.resourcePath', 'resourcePath', (['"""mcedit2/assets/mcedit2/icons/edit_map.png"""'], {}), "('mcedit2/assets/mcedit2/icons/edit_map.png')\n", (2542, 2587), False, 'from mcedit2.util.resources import resourcePath\n'), ((3680, 3711), 'mcedit2.util.screen.centerWidgetInScreen', 'centerWidgetInScreen', (['self', '(0.8)'], {}), '(self, 0.8)\n', (3700, 3711), False, 'from mcedit2.util.screen import centerWidgetInScreen\n'), ((9858, 9925), 'PySide.QtGui.QGraphicsLineItem', 'QtGui.QGraphicsLineItem', (['(x * xSpacing)', '(0)', '(x * xSpacing)', 'imageHeight'], {}), '(x * xSpacing, 0, x * xSpacing, imageHeight)\n', (9881, 9925), False, 'from PySide import QtGui, QtCore\n'), ((10170, 10236), 'PySide.QtGui.QGraphicsLineItem', 'QtGui.QGraphicsLineItem', (['(0)', '(y * ySpacing)', 'imageWidth', '(y * ySpacing)'], {}), '(0, y * ySpacing, imageWidth, y * ySpacing)\n', (10193, 10236), False, 'from PySide import QtGui, QtCore\n'), ((11174, 11213), 'PySide.QtGui.QPixmap.fromImage', 'QtGui.QPixmap.fromImage', (['convertedImage'], {}), '(convertedImage)\n', (11197, 11213), False, 'from PySide import QtGui, QtCore\n'), ((11313, 11355), 'PySide.QtGui.QGraphicsPixmapItem', 'QtGui.QGraphicsPixmapItem', (['convertedPixmap'], {}), '(convertedPixmap)\n', (11338, 11355), False, 'from PySide import QtGui, QtCore\n'), ((11576, 11651), 'PySide.QtGui.QGraphicsRectItem', 'QtGui.QGraphicsRectItem', (['(x * tileOffset)', '(y * tileOffset)', 'tileSize', 'tileSize'], {}), '(x * tileOffset, y * tileOffset, tileSize, tileSize)\n', (11599, 11651), False, 'from PySide import QtGui, QtCore\n'), ((8017, 8046), 'numpy.roll', 'numpy.roll', (['colorTable', '(1)', '(-1)'], {}), '(colorTable, 1, -1)\n', (8027, 8046), False, 'import numpy\n'), ((9950, 9968), 'PySide.QtGui.QPen', 'QtGui.QPen', (['Qt.red'], {}), '(Qt.red)\n', (9960, 9968), False, 'from PySide import QtGui, QtCore\n'), ((10261, 10279), 'PySide.QtGui.QPen', 'QtGui.QPen', (['Qt.red'], {}), '(Qt.red)\n', (10271, 10279), False, 'from PySide import QtGui, QtCore\n'), ((10898, 10930), 'PySide.QtCore.QSize', 'QtCore.QSize', (['tileSize', 'tileSize'], {}), '(tileSize, tileSize)\n', (10910, 10930), False, 'from PySide import QtGui, QtCore\n'), ((11676, 11696), 'PySide.QtGui.QPen', 'QtGui.QPen', (['Qt.black'], {}), '(Qt.black)\n', (11686, 11696), False, 'from PySide import QtGui, QtCore\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
'''
MIT License
Copyright (c) 2019 <NAME>, <NAME>, and <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from skimage import measure
import numpy as np
import torch
from .sdf import create_grid, eval_grid_octree, eval_grid
from skimage import measure
from numpy.linalg import inv
def reconstruction(net, cuda, calib_tensor,
resolution, b_min, b_max, thresh=0.5,
use_octree=False, num_samples=10000, transform=None):
'''
Reconstruct meshes from sdf predicted by the network.
:param net: a BasePixImpNet object. call image filter beforehead.
:param cuda: cuda device
:param calib_tensor: calibration tensor
:param resolution: resolution of the grid cell
:param b_min: bounding box corner [x_min, y_min, z_min]
:param b_max: bounding box corner [x_max, y_max, z_max]
:param use_octree: whether to use octree acceleration
:param num_samples: how many points to query each gpu iteration
:return: marching cubes results.
'''
# First we create a grid by resolution
# and transforming matrix for grid coordinates to real world xyz
coords, mat = create_grid(resolution, resolution, resolution)
#b_min, b_max, transform=transform)
calib = calib_tensor[0].cpu().numpy()
calib_inv = inv(calib)
coords = coords.reshape(3,-1).T
coords = np.matmul(np.concatenate([coords, np.ones((coords.shape[0],1))], 1), calib_inv.T)[:, :3]
coords = coords.T.reshape(3,resolution,resolution,resolution)
# Then we define the lambda function for cell evaluation
def eval_func(points):
points = np.expand_dims(points, axis=0)
points = np.repeat(points, 1, axis=0)
samples = torch.from_numpy(points).to(device=cuda).float()
net.query(samples, calib_tensor)
pred = net.get_preds()[0][0]
return pred.detach().cpu().numpy()
# Then we evaluate the grid
if use_octree:
sdf = eval_grid_octree(coords, eval_func, num_samples=num_samples)
else:
sdf = eval_grid(coords, eval_func, num_samples=num_samples)
# Finally we do marching cubes
try:
verts, faces, normals, values = measure.marching_cubes_lewiner(sdf, thresh)
# transform verts into world coordinate system
trans_mat = np.matmul(calib_inv, mat)
verts = np.matmul(trans_mat[:3, :3], verts.T) + trans_mat[:3, 3:4]
verts = verts.T
# in case mesh has flip transformation
if np.linalg.det(trans_mat[:3, :3]) < 0.0:
faces = faces[:,::-1]
return verts, faces, normals, values
except:
print('error cannot marching cubes')
return -1
def save_obj_mesh(mesh_path, verts, faces=None):
file = open(mesh_path, 'w')
for v in verts:
file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2]))
if faces is not None:
for f in faces:
if f[0] == f[1] or f[1] == f[2] or f[0] == f[2]:
continue
f_plus = f + 1
file.write('f %d %d %d\n' % (f_plus[0], f_plus[2], f_plus[1]))
file.close()
def save_obj_mesh_with_color(mesh_path, verts, faces, colors):
file = open(mesh_path, 'w')
for idx, v in enumerate(verts):
c = colors[idx]
file.write('v %.4f %.4f %.4f %.4f %.4f %.4f\n' % (v[0], v[1], v[2], c[0], c[1], c[2]))
for f in faces:
f_plus = f + 1
file.write('f %d %d %d\n' % (f_plus[0], f_plus[2], f_plus[1]))
file.close()
def save_obj_mesh_with_uv(mesh_path, verts, faces, uvs):
file = open(mesh_path, 'w')
for idx, v in enumerate(verts):
vt = uvs[idx]
file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2]))
file.write('vt %.4f %.4f\n' % (vt[0], vt[1]))
for f in faces:
f_plus = f + 1
file.write('f %d/%d %d/%d %d/%d\n' % (f_plus[0], f_plus[0],
f_plus[2], f_plus[2],
f_plus[1], f_plus[1]))
file.close()
| [
"numpy.repeat",
"numpy.ones",
"skimage.measure.marching_cubes_lewiner",
"numpy.linalg.det",
"torch.from_numpy",
"numpy.linalg.inv",
"numpy.matmul",
"numpy.expand_dims"
] | [((2369, 2379), 'numpy.linalg.inv', 'inv', (['calib'], {}), '(calib)\n', (2372, 2379), False, 'from numpy.linalg import inv\n'), ((2690, 2720), 'numpy.expand_dims', 'np.expand_dims', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (2704, 2720), True, 'import numpy as np\n'), ((2738, 2766), 'numpy.repeat', 'np.repeat', (['points', '(1)'], {'axis': '(0)'}), '(points, 1, axis=0)\n', (2747, 2766), True, 'import numpy as np\n'), ((3254, 3297), 'skimage.measure.marching_cubes_lewiner', 'measure.marching_cubes_lewiner', (['sdf', 'thresh'], {}), '(sdf, thresh)\n', (3284, 3297), False, 'from skimage import measure\n'), ((3373, 3398), 'numpy.matmul', 'np.matmul', (['calib_inv', 'mat'], {}), '(calib_inv, mat)\n', (3382, 3398), True, 'import numpy as np\n'), ((3415, 3452), 'numpy.matmul', 'np.matmul', (['trans_mat[:3, :3]', 'verts.T'], {}), '(trans_mat[:3, :3], verts.T)\n', (3424, 3452), True, 'import numpy as np\n'), ((3556, 3588), 'numpy.linalg.det', 'np.linalg.det', (['trans_mat[:3, :3]'], {}), '(trans_mat[:3, :3])\n', (3569, 3588), True, 'import numpy as np\n'), ((2463, 2492), 'numpy.ones', 'np.ones', (['(coords.shape[0], 1)'], {}), '((coords.shape[0], 1))\n', (2470, 2492), True, 'import numpy as np\n'), ((2785, 2809), 'torch.from_numpy', 'torch.from_numpy', (['points'], {}), '(points)\n', (2801, 2809), False, 'import torch\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Semantic Machines\N{TRADE MARK SIGN} software.
Calculates statistical significance for predictions from two experiments.
"""
import argparse
import csv
import json
from typing import Callable, List, Optional, Tuple
import numpy as np
import pandas as pd
from statsmodels.stats.contingency_tables import mcnemar
from dataflow.core.dialogue import TurnId
from dataflow.core.io import load_jsonl_file
from dataflow.onmt_helpers.evaluate_onmt_predictions import evaluate_dialogue
def get_report_dataframes(
exp0_prediction_report_df: pd.DataFrame, exp1_prediction_report_df: pd.DataFrame,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Returns the turn-level and dialogue-level report dataframes."""
exp0_prediction_report_df.set_index(
["dialogueId", "turnIndex"], inplace=True, drop=True
)
exp1_prediction_report_df.set_index(
["dialogueId", "turnIndex"], inplace=True, drop=True
)
turn_report_df = exp0_prediction_report_df.join(
exp1_prediction_report_df.loc[:, ["isCorrect"]],
how="outer",
lsuffix="_0",
rsuffix="_1",
)
assert not turn_report_df.isnull().any().any()
assert (
len(turn_report_df)
== len(exp0_prediction_report_df)
== len(exp1_prediction_report_df)
)
rows = []
for dialogue_id, df_for_dialogue in turn_report_df.groupby("dialogueId"):
dialogue_scores0 = evaluate_dialogue(
turns=[
(turn_index, row.get("isCorrect_0"))
for (_, turn_index), row in df_for_dialogue.iterrows()
]
)
dialogue_scores1 = evaluate_dialogue(
turns=[
(turn_index, row.get("isCorrect_1"))
for (_, turn_index), row in df_for_dialogue.iterrows()
]
)
rows.append(
{
"dialogueId": dialogue_id,
"isCorrect_0": dialogue_scores0.num_correct_dialogues > 0,
"isCorrect_1": dialogue_scores1.num_correct_dialogues > 0,
"prefix_0": dialogue_scores0.num_turns_before_first_error,
"prefix_1": dialogue_scores1.num_turns_before_first_error,
}
)
dialogue_report_df = pd.DataFrame(rows)
return turn_report_df, dialogue_report_df
def run_mcnemar_test(report_df: pd.DataFrame) -> Tuple[float, float]:
mask_correct_0 = report_df.loc[:, "isCorrect_0"]
mask_correct_1 = report_df.loc[:, "isCorrect_1"]
contingency_table = (
(
(mask_correct_0 & mask_correct_1).sum(),
(mask_correct_0 & ~mask_correct_1).sum(),
),
(
(~mask_correct_0 & mask_correct_1).sum(),
(~mask_correct_0 & ~mask_correct_1).sum(),
),
)
result = mcnemar(contingency_table)
return result.statistic, result.pvalue
def run_paired_permutation_test(
xs: List[int],
ys: List[int],
samples: int = 10000,
statistic: Callable[[List[int]], float] = np.mean, # type: ignore
) -> float:
"""Runs the two-sample permutation test to check whether the paired data xs and ys are from the same distribution (null hypothesis).
Args:
xs: the data from distribution F1
ys: the data from distribution F2
samples: the number of samples for the Monte Carlo sampling
statistic: the statistic to be used for the test (default is the mean)
Returns:
the p-value of the null hypothesis (two-tailed)
"""
def effect(xx: List[int], yy: List[int]) -> float:
return np.abs(statistic(xx) - statistic(yy))
n, k = len(xs), 0
diff = effect(xs, ys) # observed difference
for _ in range(samples): # for each random sample
swaps = np.random.randint(0, 2, n).astype(bool) # flip n coins
k += diff <= effect(
np.select([swaps, ~swaps], [xs, ys]), # swap elements accordingly
np.select([~swaps, swaps], [xs, ys]),
)
# fraction of random samples that achieved at least the observed difference
return k / float(samples)
def main(
exp0_prediction_report_tsv: str,
exp1_prediction_report_tsv: str,
datum_ids_jsonl: Optional[str],
scores_json: str,
) -> None:
"""Loads the two prediction report files and calculates statistical significance.
For the turn-level and dialogue-level accuracy, we use the McNemar test.
For the dialogue-level prefix length (i.e., the number of turns before the first error), we use the two-sample permutation test.
If `datum_ids_jsonl` is given, we only use the subset of turns specified in the file. In this case, only turn-level
metrics are used since it doesn't make sense to compute dialogue-level metrics with only a subset of turns.
"""
exp0_prediction_report_df = pd.read_csv(
exp0_prediction_report_tsv,
sep="\t",
encoding="utf-8",
quoting=csv.QUOTE_ALL,
na_values=None,
keep_default_na=False,
)
assert not exp0_prediction_report_df.isnull().any().any()
exp1_prediction_report_df = pd.read_csv(
exp1_prediction_report_tsv,
sep="\t",
encoding="utf-8",
quoting=csv.QUOTE_ALL,
na_values=None,
keep_default_na=False,
)
assert not exp1_prediction_report_df.isnull().any().any()
turn_report_df, dialogue_report_df = get_report_dataframes(
exp0_prediction_report_df=exp0_prediction_report_df,
exp1_prediction_report_df=exp1_prediction_report_df,
)
if not datum_ids_jsonl:
turn_statistic, turn_pvalue = run_mcnemar_test(turn_report_df)
dialogue_statistic, dialogue_pvalue = run_mcnemar_test(dialogue_report_df)
prefix_pvalue = run_paired_permutation_test(
xs=dialogue_report_df.loc[:, "prefix_0"].tolist(),
ys=dialogue_report_df.loc[:, "prefix_1"].tolist(),
)
with open(scores_json, "w") as fp:
fp.write(
json.dumps(
{
"turn": {"statistic": turn_statistic, "pvalue": turn_pvalue},
"dialogue": {
"statistic": dialogue_statistic,
"pvalue": dialogue_pvalue,
},
"prefix": {"pvalue": prefix_pvalue},
},
indent=2,
)
)
fp.write("\n")
else:
datum_ids = set(
load_jsonl_file(data_jsonl=datum_ids_jsonl, cls=TurnId, verbose=False)
)
mask_datum_id = [
TurnId(dialogue_id=dialogue_id, turn_index=turn_index) in datum_ids
for (dialogue_id, turn_index), row in exp1_prediction_report_df.iterrows()
]
turn_report_df = turn_report_df.loc[mask_datum_id]
# NOTE: We only compute turn-level statistics since it doesn't make sense to compute dialogue-level metrics
# with only a subset of turns.
turn_statistic, turn_pvalue = run_mcnemar_test(turn_report_df)
with open(scores_json, "w") as fp:
fp.write(
json.dumps(
{"turn": {"statistic": turn_statistic, "pvalue": turn_pvalue}},
indent=2,
)
)
fp.write("\n")
def add_arguments(argument_parser: argparse.ArgumentParser) -> None:
argument_parser.add_argument(
"--exp0_prediction_report_tsv",
help="the prediction report tsv file for one experiment exp0",
)
argument_parser.add_argument(
"--exp1_prediction_report_tsv",
help="the prediction report tsv file for the other experiment exp1",
)
argument_parser.add_argument(
"--datum_ids_jsonl", default=None, help="if set, only evaluate on these turns",
)
argument_parser.add_argument("--scores_json", help="output scores json file")
if __name__ == "__main__":
cmdline_parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter
)
add_arguments(cmdline_parser)
args = cmdline_parser.parse_args()
print("Semantic Machines\N{TRADE MARK SIGN} software.")
main(
exp0_prediction_report_tsv=args.exp0_prediction_report_tsv,
exp1_prediction_report_tsv=args.exp1_prediction_report_tsv,
datum_ids_jsonl=args.datum_ids_jsonl,
scores_json=args.scores_json,
)
| [
"statsmodels.stats.contingency_tables.mcnemar",
"dataflow.core.dialogue.TurnId",
"dataflow.core.io.load_jsonl_file",
"numpy.select",
"argparse.ArgumentParser",
"pandas.read_csv",
"json.dumps",
"numpy.random.randint",
"pandas.DataFrame"
] | [((2312, 2330), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (2324, 2330), True, 'import pandas as pd\n'), ((2858, 2884), 'statsmodels.stats.contingency_tables.mcnemar', 'mcnemar', (['contingency_table'], {}), '(contingency_table)\n', (2865, 2884), False, 'from statsmodels.stats.contingency_tables import mcnemar\n'), ((4880, 5014), 'pandas.read_csv', 'pd.read_csv', (['exp0_prediction_report_tsv'], {'sep': '"""\t"""', 'encoding': '"""utf-8"""', 'quoting': 'csv.QUOTE_ALL', 'na_values': 'None', 'keep_default_na': '(False)'}), "(exp0_prediction_report_tsv, sep='\\t', encoding='utf-8', quoting\n =csv.QUOTE_ALL, na_values=None, keep_default_na=False)\n", (4891, 5014), True, 'import pandas as pd\n'), ((5160, 5294), 'pandas.read_csv', 'pd.read_csv', (['exp1_prediction_report_tsv'], {'sep': '"""\t"""', 'encoding': '"""utf-8"""', 'quoting': 'csv.QUOTE_ALL', 'na_values': 'None', 'keep_default_na': '(False)'}), "(exp1_prediction_report_tsv, sep='\\t', encoding='utf-8', quoting\n =csv.QUOTE_ALL, na_values=None, keep_default_na=False)\n", (5171, 5294), True, 'import pandas as pd\n'), ((8051, 8147), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawTextHelpFormatter)\n', (8074, 8147), False, 'import argparse\n'), ((6576, 6646), 'dataflow.core.io.load_jsonl_file', 'load_jsonl_file', ([], {'data_jsonl': 'datum_ids_jsonl', 'cls': 'TurnId', 'verbose': '(False)'}), '(data_jsonl=datum_ids_jsonl, cls=TurnId, verbose=False)\n', (6591, 6646), False, 'from dataflow.core.io import load_jsonl_file\n'), ((3820, 3846), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (3837, 3846), True, 'import numpy as np\n'), ((3917, 3953), 'numpy.select', 'np.select', (['[swaps, ~swaps]', '[xs, ys]'], {}), '([swaps, ~swaps], [xs, ys])\n', (3926, 3953), True, 'import numpy as np\n'), ((3996, 4032), 'numpy.select', 'np.select', (['[~swaps, swaps]', '[xs, ys]'], {}), '([~swaps, swaps], [xs, ys])\n', (4005, 4032), True, 'import numpy as np\n'), ((6054, 6258), 'json.dumps', 'json.dumps', (["{'turn': {'statistic': turn_statistic, 'pvalue': turn_pvalue}, 'dialogue':\n {'statistic': dialogue_statistic, 'pvalue': dialogue_pvalue}, 'prefix':\n {'pvalue': prefix_pvalue}}"], {'indent': '(2)'}), "({'turn': {'statistic': turn_statistic, 'pvalue': turn_pvalue},\n 'dialogue': {'statistic': dialogue_statistic, 'pvalue': dialogue_pvalue\n }, 'prefix': {'pvalue': prefix_pvalue}}, indent=2)\n", (6064, 6258), False, 'import json\n'), ((6695, 6749), 'dataflow.core.dialogue.TurnId', 'TurnId', ([], {'dialogue_id': 'dialogue_id', 'turn_index': 'turn_index'}), '(dialogue_id=dialogue_id, turn_index=turn_index)\n', (6701, 6749), False, 'from dataflow.core.dialogue import TurnId\n'), ((7227, 7315), 'json.dumps', 'json.dumps', (["{'turn': {'statistic': turn_statistic, 'pvalue': turn_pvalue}}"], {'indent': '(2)'}), "({'turn': {'statistic': turn_statistic, 'pvalue': turn_pvalue}},\n indent=2)\n", (7237, 7315), False, 'import json\n')] |
""" Local outlier factor.
Reference:
<NAME>, <NAME>, <NAME>, and <NAME>. LOF: identifying density-based local outliers.
In Proceedings of the 2000 ACM SIGMOD international conference on Management of data, vol. 29, no. 2. ACM, 2000, pp. 93–104.
"""
# Authors: <NAME>, 2018.
import numpy as np
from sklearn.neighbors import LocalOutlierFactor
from .BaseDetector import BaseDetector
from .utils.validation import check_X_y
# -------------
# CLASSES
# -------------
class LOF(BaseDetector):
""" Local outlier factor (LOF).
Parameters
----------
k : int (default=10)
Number of nearest neighbors.
contamination : float (default=0.1)
Estimate of the expected percentage of anomalies in the data.
metric : string (default=euclidean)
Distance metric for the distance computation.
Comments
--------
- This method DOES NOT EASILY extend to OUT-OF-SAMPLE setting!
- The number of neighbors cannot be larger than the number of instances in
the data: automatically correct if necessary.
"""
def __init__(self, k=10, contamination=0.1, metric='euclidean',
tol=1e-8, verbose=False):
super(LOF, self).__init__()
self.k = int(k)
self.contamination = float(contamination)
self.metric = str(metric)
self.tol = float(tol)
self.verbose = bool(verbose)
def fit_predict(self, X, y=None):
""" Fit the model to the training set X and returns the anomaly score
of the instances in X.
:param X : np.array(), shape (n_samples, n_features)
The samples to compute anomaly score w.r.t. the training samples.
:param y : np.array(), shape (n_samples), default = None
Labels for examples in X.
:returns y_score : np.array(), shape (n_samples)
Anomaly score for the examples in X.
:returns y_pred : np.array(), shape (n_samples)
Returns -1 for inliers and +1 for anomalies/outliers.
"""
X, y = check_X_y(X, y)
return self.fit(X, y).predict(X)
def fit(self, X, y=None):
""" Fit the model using data in X.
:param X : np.array(), shape (n_samples, n_features)
The samples to compute anomaly score w.r.t. the training samples.
:param y : np.array(), shape (n_samples), default = None
Labels for examples in X.
:returns self : object
"""
X, y = check_X_y(X, y)
n, _ = X.shape
nn = self._check_valid_number_of_neighbors(n)
self.clf = LocalOutlierFactor(n_neighbors=nn, contamination=self.contamination, metric=self.metric)
self.clf.fit(X)
return self
def predict(self, X):
""" Compute the anomaly score + predict the label of instances in X.
:returns y_score : np.array(), shape (n_samples)
Anomaly score for the examples in X.
:returns y_pred : np.array(), shape (n_samples)
Returns -1 for inliers and +1 for anomalies/outliers.
"""
X, y = check_X_y(X, None)
n, _ = X.shape
# predict the anomaly scores
lof_score = self.clf._decision_function(X) * -1 # Shifted opposite of the Local Outlier Factor of X
# scaled y_score
y_score = (lof_score - min(lof_score)) / (max(lof_score) - min(lof_score))
# prediction threshold + absolute predictions
self.threshold = np.sort(y_score)[int(n * (1.0 - self.contamination))]
y_pred = np.ones(n, dtype=float)
y_pred[y_score < self.threshold] = -1
return y_score, y_pred
def _check_valid_number_of_neighbors(self, n_samples):
""" Check if the number of nearest neighbors is valid and correct.
:param n_samples : int
Number of samples in the data.
"""
return min(n_samples, self.k)
| [
"numpy.ones",
"numpy.sort",
"sklearn.neighbors.LocalOutlierFactor"
] | [((2595, 2688), 'sklearn.neighbors.LocalOutlierFactor', 'LocalOutlierFactor', ([], {'n_neighbors': 'nn', 'contamination': 'self.contamination', 'metric': 'self.metric'}), '(n_neighbors=nn, contamination=self.contamination, metric\n =self.metric)\n', (2613, 2688), False, 'from sklearn.neighbors import LocalOutlierFactor\n'), ((3539, 3562), 'numpy.ones', 'np.ones', (['n'], {'dtype': 'float'}), '(n, dtype=float)\n', (3546, 3562), True, 'import numpy as np\n'), ((3468, 3484), 'numpy.sort', 'np.sort', (['y_score'], {}), '(y_score)\n', (3475, 3484), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.