code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
from calculatearea import AreaEstimator
from math import sqrt
area_estimator = AreaEstimator(filename='rancho_rd.jpg', # name of the image in the directory
color_range=((0, 0, 118), (100, 100, 255)),
default_area_color=(255, 20, 160)) # BGR purple
A_px = area_estimator.get_area(return_pixels=True)
ft_per_pixel = sqrt(131193.8 / A_px) # plot size - 30.56 acres, img size - 447x588 px
print(ft_per_pixel)
# area_estimator.show_images()
# with tighter restraints
area_estimator.color_lower_limit = np.array([0, 0, 130], np.uint8)
area_estimator.color_upper_limit = np.array([50, 50, 255], np.uint8)
area_estimator.area_color = (100, 200, 200)
A_px = area_estimator.get_area()
area_estimator.show_images()
# guess feet per pixel
ft_per_pixel = sqrt(131193.8 / A_px)
print(ft_per_pixel)
| [
"calculatearea.AreaEstimator",
"numpy.array",
"math.sqrt"
] | [((100, 222), 'calculatearea.AreaEstimator', 'AreaEstimator', ([], {'filename': '"""rancho_rd.jpg"""', 'color_range': '((0, 0, 118), (100, 100, 255))', 'default_area_color': '(255, 20, 160)'}), "(filename='rancho_rd.jpg', color_range=((0, 0, 118), (100, 100,\n 255)), default_area_color=(255, 20, 160))\n", (113, 222), False, 'from calculatearea import AreaEstimator\n'), ((400, 421), 'math.sqrt', 'sqrt', (['(131193.8 / A_px)'], {}), '(131193.8 / A_px)\n', (404, 421), False, 'from math import sqrt\n'), ((586, 617), 'numpy.array', 'np.array', (['[0, 0, 130]', 'np.uint8'], {}), '([0, 0, 130], np.uint8)\n', (594, 617), True, 'import numpy as np\n'), ((653, 686), 'numpy.array', 'np.array', (['[50, 50, 255]', 'np.uint8'], {}), '([50, 50, 255], np.uint8)\n', (661, 686), True, 'import numpy as np\n'), ((833, 854), 'math.sqrt', 'sqrt', (['(131193.8 / A_px)'], {}), '(131193.8 / A_px)\n', (837, 854), False, 'from math import sqrt\n')] |
import cv2
import numpy as np
import random
import matplotlib.pyplot as plt
def similarity_transform(image, center, dim, angle=0.0, scale=1.0, clip=True):
#similarity transform includes rotation+translation+scaling
#it retains parallel and angles
M = cv2.getRotationMatrix2D(center, angle, scale)
(w,h,d)=image.shape
if clip==True:
new_image=cv2.warpAffine(image, M, dim)
elif clip==False:
cos=np.abs(M[0,0])
sin=np.abs(M[0,1])
nw=w*cos+h*sin
nh=w*sin+h*cos
M[0,2]+=(nw/2)-center[0]
M[1,2]+=(nh/2)-center[1]
new_image = cv2.warpAffine(image, M, (int(nw),int(nh)))
return new_image
def affine_transform(image, pts1,pts2):
#retians parallel, need 3 pairs of points to solve a 2*3 M matrix
M=cv2.getAffineTransform(pts1,pts2)
(w,h,d)=image.shape
new_image=cv2.warpAffine(image, M,(w,h))
return new_image
def perspective_transform(image, pts1,pts2):
#retians straight lines, need 4 pairs of points to solve a 3*3 (8 dof) matrix
(w, h, d) = image.shape
M=cv2.getPerspectiveTransform(pts1,pts2)
new_image=cv2.warpPerspective(image,M,(2*w,2*h))
return new_image
def transform_img(img,center,img_dim):
type = int(input("please specify transform type: 0 for similarity transform, 1 for affine transform, 2 for perspective transform: "))
if type==0:
center_x = input("please specify translation center x, using default hit enter: ")
center_y = input("please specify translation center y, using default hit enter: ")
if center_x=="":
center_x=center[0]
if center_y == "":
center_y = center[1]
center = (int(center_x), int(center_y))
print(center)
dim=img_dim
angle = input("please specify image rotation angle, using default hit enter: ")
if angle=="":
angle=0
angle=float(angle)
scale =input("please specify image scaling factor, default hit enter: ")
if scale=="":
scale=1
scale=float(scale)
clip_flag=int(input("please specify if the image will be clipped, by default clipped input 1, if not input 0: "))
if clip_flag==1:
clip=True
else:
clip=False
new_image=similarity_transform(img, center, dim, angle, scale, clip)
cv2.imshow("original", img)
cv2.imshow("similarity transform", new_image)
if type==1:
option=input ("please provide 3 pairs of corresponding points, using default values as example input 0: ")
if int(option)==0:
w, h, d = img.shape
pts1 = np.float32([[0, 0], [w - 1, 0], [0, h - 1]])
pts2 = np.float32([[w * 0.2, h * 0.1], [w * 0.9, h * 0.2], [w * 0.1, h * 0.9]])
new_image=affine_transform(img,pts1,pts2)
cv2.imshow("original", img)
cv2.imshow("affine transform", new_image)
#using matrix transformation to verify
new_matrix = dotProduct(img, pts1, pts2, type)
if type==2:
option=input ("please provide 4 pairs of corresponding points, using default values as example input 0: ")
if int(option)==0:
# warp:
random_margin = 60
width=img.shape[0]
height=img.shape[1]
x1 = random.randint(-random_margin, random_margin)
y1 = random.randint(-random_margin, random_margin)
x2 = random.randint(width - random_margin - 1, width - 1)
y2 = random.randint(-random_margin, random_margin)
x3 = random.randint(width - random_margin - 1, width - 1)
y3 = random.randint(height - random_margin - 1, height - 1)
x4 = random.randint(-random_margin, random_margin)
y4 = random.randint(height - random_margin - 1, height - 1)
dx1 = random.randint(-random_margin, random_margin)
dy1 = random.randint(-random_margin, random_margin)
dx2 = random.randint(width - random_margin - 1, width - 1)
dy2 = random.randint(-random_margin, random_margin)
dx3 = random.randint(width - random_margin - 1, width - 1)
dy3 = random.randint(height - random_margin - 1, height - 1)
dx4 = random.randint(-random_margin, random_margin)
dy4 = random.randint(height - random_margin - 1, height - 1)
pts1 = np.float32([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
pts2 = np.float32([[dx1, dy1], [dx2, dy2], [dx3, dy3], [dx4, dy4]])
new_image=perspective_transform(img,pts1,pts2)
cv2.imshow("original", img)
cv2.imshow("perspective transform", new_image)
#using matrix transformation to verify
new_matrix = dotProduct(img, pts1, pts2, type)
key = cv2.waitKey(0)
if key == 27:
cv2.destroyAllWindows()
return new_image
def quit_figure(event):
if event.key == 'q':
plt.close(event.canvas.figure)
def dotProduct(img, pts1, pts2, type):
if type==1:
M = cv2.getAffineTransform(pts1, pts2)
one = np.ones((3, 1))
if type==2:
M = cv2.getPerspectiveTransform(pts1, pts2)
one = np.ones((4, 1))
pts1_add = np.append(pts1,one,axis=1)
#transform_pts1=np.transpose(np.dot(M,np.transpose(pts1_add)))
transform_pts1 = np.dot(pts1_add,np.transpose(M))
if transform_pts1.all()==pts2.all():
print ("M matrix confirmed")
for idx, point in enumerate (pts1):
xi_0,yi_0=point
xi_1,yi_1=pts1[(idx+1)%len(pts1)]
plt.plot([xi_0,xi_1],[yi_0,yi_1], color='yellow')
for idx, point in enumerate (pts2):
xi_0,yi_0=point
xi_1,yi_1=pts2[(idx+1)%len(pts2)]
plt.plot([xi_0,xi_1],[yi_0,yi_1],color='green')
ax = plt.gca() # get the current axis
ax.xaxis.set_ticks_position('top') # put x axis to the top
ax.invert_yaxis() # invert y axis to the opposite direction so as be consistent as image coordinate system
plt.show()
cid = plt.gcf().canvas.mpl_connect('key_press_event', quit_figure)
| [
"cv2.imshow",
"cv2.warpPerspective",
"cv2.destroyAllWindows",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"cv2.waitKey",
"random.randint",
"numpy.abs",
"cv2.warpAffine",
"numpy.ones",
"cv2.getPerspectiveTransform",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"cv2.getAffineTr... | [((265, 310), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', 'angle', 'scale'], {}), '(center, angle, scale)\n', (288, 310), False, 'import cv2\n'), ((777, 811), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (799, 811), False, 'import cv2\n'), ((849, 881), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(w, h)'], {}), '(image, M, (w, h))\n', (863, 881), False, 'import cv2\n'), ((1063, 1102), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (1090, 1102), False, 'import cv2\n'), ((1116, 1161), 'cv2.warpPerspective', 'cv2.warpPerspective', (['image', 'M', '(2 * w, 2 * h)'], {}), '(image, M, (2 * w, 2 * h))\n', (1135, 1161), False, 'import cv2\n'), ((4778, 4792), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4789, 4792), False, 'import cv2\n'), ((5225, 5253), 'numpy.append', 'np.append', (['pts1', 'one'], {'axis': '(1)'}), '(pts1, one, axis=1)\n', (5234, 5253), True, 'import numpy as np\n'), ((5841, 5850), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5848, 5850), True, 'import matplotlib.pyplot as plt\n'), ((6066, 6076), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6074, 6076), True, 'import matplotlib.pyplot as plt\n'), ((370, 399), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', 'dim'], {}), '(image, M, dim)\n', (384, 399), False, 'import cv2\n'), ((2338, 2365), 'cv2.imshow', 'cv2.imshow', (['"""original"""', 'img'], {}), "('original', img)\n", (2348, 2365), False, 'import cv2\n'), ((2373, 2418), 'cv2.imshow', 'cv2.imshow', (['"""similarity transform"""', 'new_image'], {}), "('similarity transform', new_image)\n", (2383, 2418), False, 'import cv2\n'), ((2827, 2854), 'cv2.imshow', 'cv2.imshow', (['"""original"""', 'img'], {}), "('original', img)\n", (2837, 2854), False, 'import cv2\n'), ((2863, 2904), 'cv2.imshow', 'cv2.imshow', (['"""affine transform"""', 'new_image'], {}), "('affine transform', new_image)\n", (2873, 2904), False, 'import cv2\n'), ((4581, 4608), 'cv2.imshow', 'cv2.imshow', (['"""original"""', 'img'], {}), "('original', img)\n", (4591, 4608), False, 'import cv2\n'), ((4617, 4663), 'cv2.imshow', 'cv2.imshow', (['"""perspective transform"""', 'new_image'], {}), "('perspective transform', new_image)\n", (4627, 4663), False, 'import cv2\n'), ((4819, 4842), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4840, 4842), False, 'import cv2\n'), ((4923, 4953), 'matplotlib.pyplot.close', 'plt.close', (['event.canvas.figure'], {}), '(event.canvas.figure)\n', (4932, 4953), True, 'import matplotlib.pyplot as plt\n'), ((5030, 5064), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (5052, 5064), False, 'import cv2\n'), ((5082, 5097), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (5089, 5097), True, 'import numpy as np\n'), ((5133, 5172), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (5160, 5172), False, 'import cv2\n'), ((5190, 5205), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (5197, 5205), True, 'import numpy as np\n'), ((5364, 5379), 'numpy.transpose', 'np.transpose', (['M'], {}), '(M)\n', (5376, 5379), True, 'import numpy as np\n'), ((5598, 5650), 'matplotlib.pyplot.plot', 'plt.plot', (['[xi_0, xi_1]', '[yi_0, yi_1]'], {'color': '"""yellow"""'}), "([xi_0, xi_1], [yi_0, yi_1], color='yellow')\n", (5606, 5650), True, 'import matplotlib.pyplot as plt\n'), ((5779, 5830), 'matplotlib.pyplot.plot', 'plt.plot', (['[xi_0, xi_1]', '[yi_0, yi_1]'], {'color': '"""green"""'}), "([xi_0, xi_1], [yi_0, yi_1], color='green')\n", (5787, 5830), True, 'import matplotlib.pyplot as plt\n'), ((432, 447), 'numpy.abs', 'np.abs', (['M[0, 0]'], {}), '(M[0, 0])\n', (438, 447), True, 'import numpy as np\n'), ((457, 472), 'numpy.abs', 'np.abs', (['M[0, 1]'], {}), '(M[0, 1])\n', (463, 472), True, 'import numpy as np\n'), ((2631, 2675), 'numpy.float32', 'np.float32', (['[[0, 0], [w - 1, 0], [0, h - 1]]'], {}), '([[0, 0], [w - 1, 0], [0, h - 1]])\n', (2641, 2675), True, 'import numpy as np\n'), ((2695, 2767), 'numpy.float32', 'np.float32', (['[[w * 0.2, h * 0.1], [w * 0.9, h * 0.2], [w * 0.1, h * 0.9]]'], {}), '([[w * 0.2, h * 0.1], [w * 0.9, h * 0.2], [w * 0.1, h * 0.9]])\n', (2705, 2767), True, 'import numpy as np\n'), ((3300, 3345), 'random.randint', 'random.randint', (['(-random_margin)', 'random_margin'], {}), '(-random_margin, random_margin)\n', (3314, 3345), False, 'import random\n'), ((3363, 3408), 'random.randint', 'random.randint', (['(-random_margin)', 'random_margin'], {}), '(-random_margin, random_margin)\n', (3377, 3408), False, 'import random\n'), ((3426, 3478), 'random.randint', 'random.randint', (['(width - random_margin - 1)', '(width - 1)'], {}), '(width - random_margin - 1, width - 1)\n', (3440, 3478), False, 'import random\n'), ((3496, 3541), 'random.randint', 'random.randint', (['(-random_margin)', 'random_margin'], {}), '(-random_margin, random_margin)\n', (3510, 3541), False, 'import random\n'), ((3559, 3611), 'random.randint', 'random.randint', (['(width - random_margin - 1)', '(width - 1)'], {}), '(width - random_margin - 1, width - 1)\n', (3573, 3611), False, 'import random\n'), ((3629, 3683), 'random.randint', 'random.randint', (['(height - random_margin - 1)', '(height - 1)'], {}), '(height - random_margin - 1, height - 1)\n', (3643, 3683), False, 'import random\n'), ((3701, 3746), 'random.randint', 'random.randint', (['(-random_margin)', 'random_margin'], {}), '(-random_margin, random_margin)\n', (3715, 3746), False, 'import random\n'), ((3764, 3818), 'random.randint', 'random.randint', (['(height - random_margin - 1)', '(height - 1)'], {}), '(height - random_margin - 1, height - 1)\n', (3778, 3818), False, 'import random\n'), ((3838, 3883), 'random.randint', 'random.randint', (['(-random_margin)', 'random_margin'], {}), '(-random_margin, random_margin)\n', (3852, 3883), False, 'import random\n'), ((3902, 3947), 'random.randint', 'random.randint', (['(-random_margin)', 'random_margin'], {}), '(-random_margin, random_margin)\n', (3916, 3947), False, 'import random\n'), ((3966, 4018), 'random.randint', 'random.randint', (['(width - random_margin - 1)', '(width - 1)'], {}), '(width - random_margin - 1, width - 1)\n', (3980, 4018), False, 'import random\n'), ((4037, 4082), 'random.randint', 'random.randint', (['(-random_margin)', 'random_margin'], {}), '(-random_margin, random_margin)\n', (4051, 4082), False, 'import random\n'), ((4101, 4153), 'random.randint', 'random.randint', (['(width - random_margin - 1)', '(width - 1)'], {}), '(width - random_margin - 1, width - 1)\n', (4115, 4153), False, 'import random\n'), ((4172, 4226), 'random.randint', 'random.randint', (['(height - random_margin - 1)', '(height - 1)'], {}), '(height - random_margin - 1, height - 1)\n', (4186, 4226), False, 'import random\n'), ((4245, 4290), 'random.randint', 'random.randint', (['(-random_margin)', 'random_margin'], {}), '(-random_margin, random_margin)\n', (4259, 4290), False, 'import random\n'), ((4309, 4363), 'random.randint', 'random.randint', (['(height - random_margin - 1)', '(height - 1)'], {}), '(height - random_margin - 1, height - 1)\n', (4323, 4363), False, 'import random\n'), ((4384, 4436), 'numpy.float32', 'np.float32', (['[[x1, y1], [x2, y2], [x3, y3], [x4, y4]]'], {}), '([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])\n', (4394, 4436), True, 'import numpy as np\n'), ((4456, 4516), 'numpy.float32', 'np.float32', (['[[dx1, dy1], [dx2, dy2], [dx3, dy3], [dx4, dy4]]'], {}), '([[dx1, dy1], [dx2, dy2], [dx3, dy3], [dx4, dy4]])\n', (4466, 4516), True, 'import numpy as np\n'), ((6091, 6100), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6098, 6100), True, 'import matplotlib.pyplot as plt\n')] |
from abc import abstractmethod, ABC
import torch
from dpm.distributions import (
Distribution, Normal, Data,
GumbelSoftmax, ConditionalModel,
Categorical
)
from dpm.distributions import MixtureModel
from dpm.train import train
from dpm.criterion import cross_entropy, ELBO
from torch.nn import Softmax, ModuleList
from functools import partial
import numpy as np
class GaussianMixtureModel(Distribution):
def __init__(self, n_components=2, n_dims=1):
super().__init__()
self.n_components = n_components
self.n_dims = n_dims
self.model = MixtureModel([Normal(torch.randn(n_dims), torch.eye(n_dims))
for _ in range(n_components)],
[1.0 / n_components for _ in range(n_components)])
def log_prob(self, value):
return self.model.log_prob(value)
def sample(self, batch_size):
return self.model.sample(batch_size)
def fit(self, x, **kwargs):
data = Data(x)
stats = train(data, self.model, cross_entropy, **kwargs)
return stats
def predict(self, x):
log_probs = torch.stack([sub_model.log_prob(x)
for sub_model in self.model.models])
_, labels = log_probs.max(dim=0)
return labels
class VariationalCategorical(ConditionalModel):
has_latents = True
def __init__(self, conditional_kwargs={}):
preset_kwargs = {'input_dim':1, 'hidden_sizes':[24, 24], 'activation':'ReLU',
'output_shapes':[2], 'output_activations':[Softmax(dim=-1)],
'distribution':partial(GumbelSoftmax, temperature=1.0,
hard=True, learnable=False)}
preset_kwargs.update(conditional_kwargs)
super().__init__(**preset_kwargs)
class VariationalGaussianMixtureModel(Distribution):
has_latents = True
def __init__(self, n_components=2, n_dims=1, variational_kwargs={}, elbo_kwargs={}):
super().__init__()
self.n_components = n_components
self.n_dims = n_dims
self.normals = ModuleList([Normal(torch.randn(n_dims), torch.eye(n_dims))
for _ in range(n_components)])
variational_kwargs.update({'input_dim':n_dims,
'output_shapes':[n_components]})
self.variational_kwargs = variational_kwargs
self.elbo_kwargs = elbo_kwargs
self.categorical = VariationalCategorical(variational_kwargs)
self.criterion = ELBO(self.categorical, **elbo_kwargs)
self.prior = Categorical([1.0 / n_components for _ in range(n_components)],
learnable=False)
def log_prob(self, X, Z=None, n_iter=10):
if Z is None:
# Z = self.categorical.sample(X.expand(n_iter, *X.shape))
# print(Z.shape)
# raise ValueError()
Z = self.categorical.sample(X)
for _ in range(n_iter - 1):
Z = Z + self.categorical.sample(X)
Z = Z / n_iter
latent_probs = self.prior.log_prob(Z)
log_probs = torch.stack([sub_model.log_prob(X)
for sub_model in self.normals], dim=1)
return (log_probs * Z).sum(dim=-1) + latent_probs
def sample(self, batch_size):
indices = self.prior.sample(batch_size).view(-1).long()
samples = torch.stack([sub_model.sample(batch_size)
for sub_model in self.normals])
return samples[indices, np.arange(batch_size)]
def fit(self, x, **kwargs):
data = Data(x)
return train(data, self, self.criterion, **kwargs)
def predict(self, x):
log_probs = torch.stack([sub_model.log_prob(x)
for sub_model in self.normals])
_, labels = log_probs.max(dim=0)
return labels
def parameters(self):
for name, param in self.named_parameters(recurse=True):
if 'categorical' in name:
continue
yield param
# EOF
| [
"dpm.train.train",
"torch.nn.Softmax",
"torch.eye",
"functools.partial",
"dpm.criterion.ELBO",
"dpm.distributions.Data",
"torch.randn",
"numpy.arange"
] | [((1003, 1010), 'dpm.distributions.Data', 'Data', (['x'], {}), '(x)\n', (1007, 1010), False, 'from dpm.distributions import Distribution, Normal, Data, GumbelSoftmax, ConditionalModel, Categorical\n'), ((1027, 1075), 'dpm.train.train', 'train', (['data', 'self.model', 'cross_entropy'], {}), '(data, self.model, cross_entropy, **kwargs)\n', (1032, 1075), False, 'from dpm.train import train\n'), ((2571, 2608), 'dpm.criterion.ELBO', 'ELBO', (['self.categorical'], {}), '(self.categorical, **elbo_kwargs)\n', (2575, 2608), False, 'from dpm.criterion import cross_entropy, ELBO\n'), ((3664, 3671), 'dpm.distributions.Data', 'Data', (['x'], {}), '(x)\n', (3668, 3671), False, 'from dpm.distributions import Distribution, Normal, Data, GumbelSoftmax, ConditionalModel, Categorical\n'), ((3687, 3730), 'dpm.train.train', 'train', (['data', 'self', 'self.criterion'], {}), '(data, self, self.criterion, **kwargs)\n', (3692, 3730), False, 'from dpm.train import train\n'), ((1646, 1713), 'functools.partial', 'partial', (['GumbelSoftmax'], {'temperature': '(1.0)', 'hard': '(True)', 'learnable': '(False)'}), '(GumbelSoftmax, temperature=1.0, hard=True, learnable=False)\n', (1653, 1713), False, 'from functools import partial\n'), ((1588, 1603), 'torch.nn.Softmax', 'Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (1595, 1603), False, 'from torch.nn import Softmax, ModuleList\n'), ((3592, 3613), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (3601, 3613), True, 'import numpy as np\n'), ((609, 628), 'torch.randn', 'torch.randn', (['n_dims'], {}), '(n_dims)\n', (620, 628), False, 'import torch\n'), ((630, 647), 'torch.eye', 'torch.eye', (['n_dims'], {}), '(n_dims)\n', (639, 647), False, 'import torch\n'), ((2155, 2174), 'torch.randn', 'torch.randn', (['n_dims'], {}), '(n_dims)\n', (2166, 2174), False, 'import torch\n'), ((2176, 2193), 'torch.eye', 'torch.eye', (['n_dims'], {}), '(n_dims)\n', (2185, 2193), False, 'import torch\n')] |
""" PyTorch dataset classes for molecular data. """
import itertools
from typing import Dict, List, Tuple, Union
import numpy as np
import torch
from rdkit import Chem
# noinspection PyUnresolvedReferences
from rdkit.Chem import AllChem, rdmolops, rdPartialCharges, rdForceFieldHelpers, rdchem
from scipy import sparse
from torch.utils.data import Dataset
from conformation.distance_matrix import distmat_to_vec
from conformation.graph_data import Data
def to_one_hot(x: int, vals: Union[List, range]) -> List:
"""
Return a one-hot vector.
:param x: Data integer.
:param vals: List of possible data values.
:return: One-hot vector as list.
"""
return [x == v for v in vals]
class TestDataset(Dataset):
"""
Test.
"""
def __init__(self, data: torch.Tensor, condition: torch.Tensor = None):
super(Dataset, self).__init__()
self.data = data
self.condition = condition
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, idx: int) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
sample = self.data[idx]
if self.condition is not None:
return sample, self.condition
else:
return sample
class BasicDataset(Dataset):
"""
Dataset class for loading non-molecular data organized as numpy arrays
"""
def __init__(self, metadata: List[Dict[str, str]], condition: bool = False):
super(Dataset, self).__init__()
self.metadata = metadata
self.condition = condition
def __len__(self) -> int:
return len(self.metadata)
def __getitem__(self, idx: int) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
data = torch.load(self.metadata[idx]['path'])
if self.condition:
condition = torch.load(self.metadata[idx]['condition'])
return data, condition
else:
return data
class MolDataset(Dataset):
"""
Dataset class for loading atomic pairwise distance information for molecules.
"""
def __init__(self, metadata: List[Dict[str, str]]):
super(Dataset, self).__init__()
self.metadata = metadata
def __len__(self) -> int:
return len(self.metadata)
def __getitem__(self, idx: int) -> torch.Tensor:
_, data = distmat_to_vec(self.metadata[idx]['path'])
data = torch.from_numpy(data)
# noinspection PyTypeChecker
data = data.type(torch.float32)
return data
def __repr__(self) -> str:
return '{}({})'.format(self.__class__.__name__, len(self))
class GraphDataset(Dataset):
"""
Dataset class for loading molecular graphs and pairwise distance targets.
"""
# noinspection PyUnresolvedReferences
def __init__(self, metadata: List[Dict[str, str]], atom_types: List[int] = None, bond_types: List[float] = None,
target: bool = True, max_path_length: int = 10, atomic_num: bool = True, partial_charge: bool = True,
mmff_atom_types_one_hot: bool = True, valence_types: List[int] = None, valence: bool = True,
aromatic: bool = True, hybridization: bool = True, assign_stereo: bool = True,
charge_types: List[int] = None, formal_charge: bool = True, r_covalent: bool = True,
r_vanderwals: bool = True, default_valence: bool = True, max_ring_size: int = 8,
rings: bool = True, chirality: bool = True, mmff94_atom_types: List[int] = None,
hybridization_types: List[Chem.HybridizationType] = None,
chi_types: List[rdchem.ChiralType] = None, improved_architecture: bool = False, max_atoms: int = 26,
degree_types: List[int] = None, degree: bool = True, num_hydrogen_types: List[int] = None,
num_hydrogen: bool = True, num_radical_electron_types: List[int] = None,
num_radical_electron: bool = True, conjugated: bool = True, bond_type: bool = True,
bond_ring: bool = True, bond_stereo: bool = True, bond_stereo_types: List[int] = None,
shortest_path: bool = True, same_ring: bool = True, autoencoder: bool = False):
"""
Custom dataset for molecular graphs.
:param metadata: Metadata contents.
:param atom_types: List of allowed atomic numbers.
:param bond_types: List of allowed bond types.
:param target: Whether or not to load target data from metadata into Data() object.
:param max_path_length: Maximum shortest path length between any two atoms in a molecule in the dataset.
:param partial_charge: Whether or not to include Gasteiger Charge as a vertex feature.\
:param mmff_atom_types_one_hot: Whether or not to include MMFF94 atom types as vertex features.
:param valence_types: List of allowed total valence numbers.
:param valence: Whether or not to include total valence as a vertex feature.
:param aromatic: Whether or not to include aromaticity as a vertex feature.
:param hybridization: Whether or not to include hybridization as a vertex feature.
:param assign_stereo: Whether or not to include stereochemistry information.
:param charge_types: Formal charge types.
:param formal_charge: Whether or not to include formal charge as a vertex feature.
:param r_covalent: Whether or not to include covalent radius as a vertex feature.
:param r_vanderwals: Whether or not to include vanderwals radius as a vertex feature.
:param default_valence: Whether or not to include default valence as a vertex feature.
:param max_ring_size: Maximum ring size.
:param rings: Whether or not to include ring size as a vertex feature.
:param chirality: Whether or not to include chirality as a vertex feature.
:param mmff94_atom_types: MMFF94 atom types.
:param hybridization_types: Hybridization types.
:param chi_types: Chiral tag types.
:param improved_architecture: Whether or not to use Jonas improved relational architecture.
:param max_atoms: Maximum number of atoms for a given molecule in the dataset (improved_architecture = True)
:param degree_types: Atomic degree types.
:param degree: Whether or not to include degree as a vertex feature.
:param num_hydrogen_types: List of allowed number of H atoms (including neighbors).
:param num_hydrogen: Whether or not to include number of (neighboring) Hs as a vertex feature.
:param num_radical_electron_types: List of allowed number of radical electrons.
:param num_radical_electron: Whether or not to include number of radical electrons as a vertex feature.
:param conjugated: Whether or not to include conjugated as an edge feature.
:param bond_type: Whether or not to include bond type as an edge feature.
:param bond_ring: Whether or not to include bond being in ring as an edge feature.
:param bond_stereo: Whether or not to include bond stereo as an edge feature.
:param bond_stereo_types: List of bond stereo types.
:param shortest_path: Whether or not to include shortest path length as a bond feature.
:param same_ring: Whether or not to include same ring as bond feature.
:param autoencoder: Whether or not to prepare data for autoencoder training.
"""
super(Dataset, self).__init__()
if bond_types is None:
self.bond_types = [0., 1., 1.5, 2., 3.]
else:
self.bond_types = bond_types
if atom_types is None:
self.atom_types = [1, 6, 7, 8, 9]
else:
self.atom_types = atom_types
self.metadata = metadata
self.target = target
self.max_path_length = max_path_length
self.atomic_num = atomic_num
self.partial_charge = partial_charge
if mmff94_atom_types is None:
self.mmff94_atom_types = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 37, 38, 39, 40, 42, 43, 44, 46, 48, 59, 62, 63, 64,
65, 66, 70, 71, 72, 74, 75, 78]
else:
self.mmff94_atom_types = mmff94_atom_types
self.mmff_atom_types_one_hot = mmff_atom_types_one_hot
if valence_types is None:
self.valence_types = [1, 2, 3, 4, 5, 6]
else:
self.valence_types = valence_types
self.valence = valence
self.aromatic = aromatic
if hybridization_types is None:
self.hybridization_types = [Chem.HybridizationType.S,
Chem.HybridizationType.SP,
Chem.HybridizationType.SP2,
Chem.HybridizationType.SP3,
Chem.HybridizationType.SP3D,
Chem.HybridizationType.SP3D2,
Chem.HybridizationType.UNSPECIFIED]
else:
self.hybridization_types = hybridization_types
self.hybridization = hybridization
self.assign_stereo = assign_stereo
if charge_types is None:
self.charge_types = [-1, 0, 1]
else:
self.charge_types = charge_types
self.formal_charge = formal_charge
self.r_covalent = r_covalent
self.r_vanderwals = r_vanderwals
self.default_valence = default_valence
self.max_ring_size = max_ring_size
self.rings = rings
if chi_types is None:
self.chi_types = list(rdchem.ChiralType.values.values())
else:
self.chi_types = chi_types
self.chirality = chirality
self.improved_architecture = improved_architecture
self.max_atoms = max_atoms
if degree_types is None:
self.degree_types = [1, 2, 3, 4]
else:
self.degree_types = degree_types
self.degree = degree
if num_hydrogen_types is None:
self.num_hydrogen_types = [0, 1, 2, 3]
else:
self.num_hydrogen_types = num_hydrogen_types
self.num_hydrogen = num_hydrogen
if num_radical_electron_types is None:
self.num_radical_electron_types = [0, 1, 2]
else:
self.num_radical_electron_types = num_radical_electron_types
self.num_radical_electron = num_radical_electron
self.conjugated = conjugated
self.bond_type = bond_type
self.bond_ring = bond_ring
self.bond_stereo = bond_stereo
if bond_stereo_types is None:
self.bond_stereo_types = list(rdchem.BondStereo.values.values())
else:
self.bond_stereo_types = bond_stereo_types
self.shortest_path = shortest_path
self.same_ring = same_ring
self.autoencoder = autoencoder
def __len__(self) -> int:
return len(self.metadata)
def __getitem__(self, idx) -> Union[Data, Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
Output a data object with node features, edge connectivity, and (optionally) target.
:param idx: Which item to load.
:return: Data() object.
"""
data = Data()
# Molecule from binary
# noinspection PyUnresolvedReferences
mol = Chem.Mol(open(self.metadata[idx]['binary'], "rb").read())
num_atoms = mol.GetNumAtoms()
# Target
if self.target:
# Target: 1-D tensor representing average inter-atomic distance for each edge
target = np.load(self.metadata[idx]['target'])
data.y = torch.tensor(target, dtype=torch.float)
# Compute edge connectivity in COO format corresponding to a complete graph on num_nodes
complete_graph = np.ones([num_atoms, num_atoms]) # Create an auxiliary complete graph
complete_graph = np.triu(complete_graph, k=1) # Compute an upper triangular matrix of the complete graph
complete_graph = sparse.csc_matrix(complete_graph) # Compute a csc style sparse matrix from this graph
row, col = complete_graph.nonzero() # Extract the row and column indices corresponding to non-zero entries
row = torch.tensor(row, dtype=torch.long)
col = torch.tensor(col, dtype=torch.long)
data.edge_index = torch.stack([row, col]) # Edge connectivity in COO format (all possible edges)
# Edge features
edge_features = []
edge_count = 0
for a, b in itertools.combinations(list(np.arange(num_atoms)), 2):
bond_feature = []
bond = mol.GetBondBetweenAtoms(int(a), int(b))
if bond is None:
if self.bond_type:
bond_feature += [1] + [0]*len(self.bond_types)
if self.conjugated:
bond_feature += [0]
if self.bond_ring:
bond_feature += [0]
if self.bond_stereo:
bond_feature += [0]*len(self.bond_stereo_types)
if self.shortest_path:
path_len = len(rdmolops.GetShortestPath(mol, int(a), int(b))) - 1
bond_feature += to_one_hot(path_len - 1, range(self.max_path_length))
if self.same_ring:
ring_info = list(mol.GetRingInfo().AtomRings())
membership = [int(a) in r and int(b) in r for r in ring_info]
if sum(membership) > 0:
bond_feature += [1]
else:
bond_feature += [0]
if self.autoencoder:
# noinspection PyUnboundLocalVariable
bond_feature += [target[:, 0][edge_count]]
else:
if self.bond_type:
bond_feature += [0]
bond_feature += to_one_hot(bond.GetBondTypeAsDouble(), self.bond_types)
if self.conjugated:
bond_feature += [bond.GetIsConjugated()]
if self.bond_ring:
bond_feature += [bond.IsInRing()]
if self.bond_stereo:
bond_feature += to_one_hot(bond.GetStereo(), self.bond_stereo_types)
if self.shortest_path:
path_len = len(rdmolops.GetShortestPath(mol, int(a), int(b))) - 1
bond_feature += to_one_hot(path_len - 1, range(self.max_path_length))
if self.same_ring:
ring_info = list(mol.GetRingInfo().AtomRings())
membership = [int(a) in r and int(b) in r for r in ring_info]
if sum(membership) > 0:
bond_feature += [1]
else:
bond_feature += [0]
if self.autoencoder:
bond_feature += [target[:, 0][edge_count]]
edge_count += 1
edge_features.append(bond_feature)
data.edge_attr = torch.tensor(edge_features, dtype=torch.float)
# Vertex features
# List to hold all vertex features
vertex_features = []
pt = Chem.GetPeriodicTable()
if self.partial_charge:
rdPartialCharges.ComputeGasteigerCharges(mol)
mmff_p = None
if self.mmff_atom_types_one_hot:
# AllChem.EmbedMolecule(mol, maxAttempts=100000)
# AllChem.MMFFOptimizeMolecule(mol)
mmff_p = rdForceFieldHelpers.MMFFGetMoleculeProperties(mol)
if self.assign_stereo:
rdmolops.AssignStereochemistryFrom3D(mol)
for i in range(num_atoms):
atom = mol.GetAtomWithIdx(i)
atom_feature = []
if self.atomic_num:
atom_feature += to_one_hot(atom.GetAtomicNum(), self.atom_types)
if self.valence:
atom_feature += to_one_hot(atom.GetTotalValence(), self.valence_types)
if self.aromatic:
atom_feature += [atom.GetIsAromatic()]
if self.hybridization:
atom_feature += to_one_hot(atom.GetHybridization(), self.hybridization_types)
if self.partial_charge:
gc = float(atom.GetProp('_GasteigerCharge'))
if not np.isfinite(gc):
gc = 0.0
atom_feature += [gc]
if self.formal_charge:
atom_feature += to_one_hot(atom.GetFormalCharge(), self.charge_types)
if self.r_covalent:
atom_feature += [pt.GetRcovalent(atom.GetAtomicNum())]
if self.r_vanderwals:
atom_feature += [pt.GetRvdw(atom.GetAtomicNum())]
if self.default_valence:
atom_feature += to_one_hot(pt.GetDefaultValence(atom.GetAtomicNum()), self.valence_types)
if self.rings:
atom_feature += [atom.IsInRingSize(r) for r in range(3, self.max_ring_size + 1)]
if self.chirality:
atom_feature += to_one_hot(atom.GetChiralTag(), self.chi_types)
if self.mmff_atom_types_one_hot:
if mmff_p is None:
atom_feature += [0] * len(self.mmff94_atom_types)
else:
atom_feature += to_one_hot(mmff_p.GetMMFFAtomType(i), self.mmff94_atom_types)
if self.degree:
atom_feature += to_one_hot(atom.GetDegree(), self.degree_types)
if self.num_hydrogen:
atom_feature += to_one_hot(atom.GetTotalNumHs(), self.num_hydrogen_types)
if self.num_radical_electron:
atom_feature += to_one_hot(atom.GetNumRadicalElectrons(), self.num_radical_electron_types)
vertex_features.append(atom_feature)
data.x = torch.tensor(vertex_features, dtype=torch.float)
# UID
data.uid = torch.tensor([int(self.metadata[idx]['uid'])])
if self.improved_architecture:
# Vertex features
v_in = data.x
padding = torch.zeros([self.max_atoms, v_in.shape[1]])
padding[:v_in.shape[0], :] = v_in
v_in = padding
# Mask
mask = torch.tensor([1. if x < num_atoms else 0. for x in range(self.max_atoms)])
# Edge features
k = 0
e_in = torch.zeros([num_atoms, num_atoms, data.edge_attr.shape[1]])
for i, j in itertools.combinations(np.arange(num_atoms), 2):
e_in[i, j, :] = data.edge_attr[k, :]
e_in[j, i, :] = data.edge_attr[k, :]
k += 1
padding = torch.zeros([self.max_atoms, self.max_atoms, data.edge_attr.shape[1]])
padding[:e_in.shape[0], :e_in.shape[0], :] = e_in
e_in = padding
# Target
target = data.y
padding = torch.zeros([self.max_atoms*self.max_atoms - self.max_atoms, data.y.shape[1]])
padding[:target.shape[0], :] = target
target = padding
return v_in, e_in, mask, target
else:
return data
def __repr__(self) -> str:
return '{}({})'.format(self.__class__.__name__, len(self))
class CNFDataset(Dataset):
"""
Dataset class for loading atomic pairwise distance information for molecules for a conditional normalizing flow.
"""
def __init__(self, metadata: List[Dict[str, str]], padding_dim: int = 528, condition_dim: int = 256):
"""
:param metadata: Metadata.
:param padding_dim: Padding size for all distance vectors and conditions.
:param condition_dim: Dimensionality of the hidden size for the condition matrix.
"""
super(Dataset, self).__init__()
self.metadata = metadata
self.padding_dim = padding_dim
self.condition_dim = condition_dim
def __len__(self) -> int:
return len(self.metadata)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param idx: # of data item to retrieve.
:return: Padded distance vector, condition matrix, and # of pairwise distances in the molecule.
"""
# Load the pairwise distance matrix
_, data = distmat_to_vec(self.metadata[idx]['path'])
dist_vec = torch.from_numpy(data)
# noinspection PyTypeChecker
dist_vec = dist_vec.type(torch.float32)
# Compute the number of pairwise distances before padding
num_dist = torch.tensor(dist_vec.shape[0])
# Pad the pairwise distances vector
padding = torch.zeros(self.padding_dim)
padding[:dist_vec.shape[0]] = dist_vec
dist_vec = padding
# Load the condition matrix
condition = np.load(self.metadata[idx]['condition'])
condition = torch.from_numpy(condition)
# noinspection PyTypeChecker
condition = condition.type(torch.float32)
# Pad the condition matrix
padding = torch.zeros([self.padding_dim, self.condition_dim])
padding[0:condition.shape[0], :] = condition
condition = padding
return dist_vec, condition, num_dist
def __repr__(self) -> str:
return '{}({})'.format(self.__class__.__name__, len(self))
| [
"torch.from_numpy",
"numpy.isfinite",
"numpy.arange",
"rdkit.Chem.rdForceFieldHelpers.MMFFGetMoleculeProperties",
"rdkit.Chem.rdchem.BondStereo.values.values",
"rdkit.Chem.rdmolops.AssignStereochemistryFrom3D",
"numpy.ones",
"rdkit.Chem.GetPeriodicTable",
"conformation.graph_data.Data",
"conformat... | [((1794, 1832), 'torch.load', 'torch.load', (["self.metadata[idx]['path']"], {}), "(self.metadata[idx]['path'])\n", (1804, 1832), False, 'import torch\n'), ((2418, 2460), 'conformation.distance_matrix.distmat_to_vec', 'distmat_to_vec', (["self.metadata[idx]['path']"], {}), "(self.metadata[idx]['path'])\n", (2432, 2460), False, 'from conformation.distance_matrix import distmat_to_vec\n'), ((2477, 2499), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (2493, 2499), False, 'import torch\n'), ((11666, 11672), 'conformation.graph_data.Data', 'Data', ([], {}), '()\n', (11670, 11672), False, 'from conformation.graph_data import Data\n'), ((12250, 12281), 'numpy.ones', 'np.ones', (['[num_atoms, num_atoms]'], {}), '([num_atoms, num_atoms])\n', (12257, 12281), True, 'import numpy as np\n'), ((12346, 12374), 'numpy.triu', 'np.triu', (['complete_graph'], {'k': '(1)'}), '(complete_graph, k=1)\n', (12353, 12374), True, 'import numpy as np\n'), ((12461, 12494), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['complete_graph'], {}), '(complete_graph)\n', (12478, 12494), False, 'from scipy import sparse\n'), ((12680, 12715), 'torch.tensor', 'torch.tensor', (['row'], {'dtype': 'torch.long'}), '(row, dtype=torch.long)\n', (12692, 12715), False, 'import torch\n'), ((12731, 12766), 'torch.tensor', 'torch.tensor', (['col'], {'dtype': 'torch.long'}), '(col, dtype=torch.long)\n', (12743, 12766), False, 'import torch\n'), ((12794, 12817), 'torch.stack', 'torch.stack', (['[row, col]'], {}), '([row, col])\n', (12805, 12817), False, 'import torch\n'), ((15559, 15605), 'torch.tensor', 'torch.tensor', (['edge_features'], {'dtype': 'torch.float'}), '(edge_features, dtype=torch.float)\n', (15571, 15605), False, 'import torch\n'), ((15725, 15748), 'rdkit.Chem.GetPeriodicTable', 'Chem.GetPeriodicTable', ([], {}), '()\n', (15746, 15748), False, 'from rdkit import Chem\n'), ((18431, 18479), 'torch.tensor', 'torch.tensor', (['vertex_features'], {'dtype': 'torch.float'}), '(vertex_features, dtype=torch.float)\n', (18443, 18479), False, 'import torch\n'), ((20948, 20990), 'conformation.distance_matrix.distmat_to_vec', 'distmat_to_vec', (["self.metadata[idx]['path']"], {}), "(self.metadata[idx]['path'])\n", (20962, 20990), False, 'from conformation.distance_matrix import distmat_to_vec\n'), ((21011, 21033), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (21027, 21033), False, 'import torch\n'), ((21210, 21241), 'torch.tensor', 'torch.tensor', (['dist_vec.shape[0]'], {}), '(dist_vec.shape[0])\n', (21222, 21241), False, 'import torch\n'), ((21308, 21337), 'torch.zeros', 'torch.zeros', (['self.padding_dim'], {}), '(self.padding_dim)\n', (21319, 21337), False, 'import torch\n'), ((21474, 21514), 'numpy.load', 'np.load', (["self.metadata[idx]['condition']"], {}), "(self.metadata[idx]['condition'])\n", (21481, 21514), True, 'import numpy as np\n'), ((21536, 21563), 'torch.from_numpy', 'torch.from_numpy', (['condition'], {}), '(condition)\n', (21552, 21563), False, 'import torch\n'), ((21710, 21761), 'torch.zeros', 'torch.zeros', (['[self.padding_dim, self.condition_dim]'], {}), '([self.padding_dim, self.condition_dim])\n', (21721, 21761), False, 'import torch\n'), ((1888, 1931), 'torch.load', 'torch.load', (["self.metadata[idx]['condition']"], {}), "(self.metadata[idx]['condition'])\n", (1898, 1931), False, 'import torch\n'), ((12024, 12061), 'numpy.load', 'np.load', (["self.metadata[idx]['target']"], {}), "(self.metadata[idx]['target'])\n", (12031, 12061), True, 'import numpy as np\n'), ((12084, 12123), 'torch.tensor', 'torch.tensor', (['target'], {'dtype': 'torch.float'}), '(target, dtype=torch.float)\n', (12096, 12123), False, 'import torch\n'), ((15797, 15842), 'rdkit.Chem.rdPartialCharges.ComputeGasteigerCharges', 'rdPartialCharges.ComputeGasteigerCharges', (['mol'], {}), '(mol)\n', (15837, 15842), False, 'from rdkit.Chem import AllChem, rdmolops, rdPartialCharges, rdForceFieldHelpers, rdchem\n'), ((16043, 16093), 'rdkit.Chem.rdForceFieldHelpers.MMFFGetMoleculeProperties', 'rdForceFieldHelpers.MMFFGetMoleculeProperties', (['mol'], {}), '(mol)\n', (16088, 16093), False, 'from rdkit.Chem import AllChem, rdmolops, rdPartialCharges, rdForceFieldHelpers, rdchem\n'), ((16141, 16182), 'rdkit.Chem.rdmolops.AssignStereochemistryFrom3D', 'rdmolops.AssignStereochemistryFrom3D', (['mol'], {}), '(mol)\n', (16177, 16182), False, 'from rdkit.Chem import AllChem, rdmolops, rdPartialCharges, rdForceFieldHelpers, rdchem\n'), ((18687, 18731), 'torch.zeros', 'torch.zeros', (['[self.max_atoms, v_in.shape[1]]'], {}), '([self.max_atoms, v_in.shape[1]])\n', (18698, 18731), False, 'import torch\n'), ((18994, 19054), 'torch.zeros', 'torch.zeros', (['[num_atoms, num_atoms, data.edge_attr.shape[1]]'], {}), '([num_atoms, num_atoms, data.edge_attr.shape[1]])\n', (19005, 19054), False, 'import torch\n'), ((19284, 19354), 'torch.zeros', 'torch.zeros', (['[self.max_atoms, self.max_atoms, data.edge_attr.shape[1]]'], {}), '([self.max_atoms, self.max_atoms, data.edge_attr.shape[1]])\n', (19295, 19354), False, 'import torch\n'), ((19522, 19607), 'torch.zeros', 'torch.zeros', (['[self.max_atoms * self.max_atoms - self.max_atoms, data.y.shape[1]]'], {}), '([self.max_atoms * self.max_atoms - self.max_atoms, data.y.shape[1]]\n )\n', (19533, 19607), False, 'import torch\n'), ((9965, 9998), 'rdkit.Chem.rdchem.ChiralType.values.values', 'rdchem.ChiralType.values.values', ([], {}), '()\n', (9996, 9998), False, 'from rdkit.Chem import AllChem, rdmolops, rdPartialCharges, rdForceFieldHelpers, rdchem\n'), ((11049, 11082), 'rdkit.Chem.rdchem.BondStereo.values.values', 'rdchem.BondStereo.values.values', ([], {}), '()\n', (11080, 11082), False, 'from rdkit.Chem import AllChem, rdmolops, rdPartialCharges, rdForceFieldHelpers, rdchem\n'), ((13004, 13024), 'numpy.arange', 'np.arange', (['num_atoms'], {}), '(num_atoms)\n', (13013, 13024), True, 'import numpy as np\n'), ((19103, 19123), 'numpy.arange', 'np.arange', (['num_atoms'], {}), '(num_atoms)\n', (19112, 19123), True, 'import numpy as np\n'), ((16878, 16893), 'numpy.isfinite', 'np.isfinite', (['gc'], {}), '(gc)\n', (16889, 16893), True, 'import numpy as np\n')] |
import sys
import os
import cv2
import glob
import math
from time import sleep, time
import matplotlib
matplotlib.use('agg')
import numpy as np
from time import time
from nnlib import nnlib
import matplotlib.pyplot as plt
from facelib import S3FDExtractor, LandmarksExtractor
class SlimFace(object):
def __init__(self):
self.frame = 0
self.rects = None
self.lms_update = False
self.landmarks = None
self.slim_strength = {}
self.slim_strength_copy = {}
self.landmark_coordinate_list = {}
self.slim_part_pixel_list = {}
self.im_pixel_list = []
self.cp = 0
device_config = nnlib.DeviceConfig(cpu_only=False,
force_gpu_idx=0,
allow_growth=True)
# S3FD
nnlib.import_all(device_config)
S3FD_model_path = os.path.join('facelib', 'S3FD.h5')
S3FD_model = nnlib.keras.models.load_model(S3FD_model_path)
self.s3fd_model = S3FDExtractor(S3FD_model)
nnlib.import_all(device_config)
self.landmark_model = LandmarksExtractor(nnlib.keras)
self.landmark_model.manual_init()
def generate_landmark_coordinate(self, lms, l1, l2, r1, r2, end):
"""
:param lms: 所有关键点位置
:param l1: 左脸顶部点坐标
:param l2: 左脸底部点坐标
:param r1: 右脸顶部点坐标
:param r2: 右脸底部点坐标
:param end: 忘了
:return: 左脸横纵坐标和半径, 右脸横纵坐标和半径
"""
left_landmark = lms[l1]
left_landmark_button = lms[l2]
right_landmark = lms[r1]
right_landmark_button = lms[r2]
endPt = lms[end]
left_r = math.sqrt(
(left_landmark[0] - left_landmark_button[0]) * (left_landmark[0] - left_landmark_button[0]) +
(left_landmark[1] - left_landmark_button[1]) * (left_landmark[1] - left_landmark_button[1]))
right_r = math.sqrt(
(right_landmark[0] - right_landmark_button[0]) * (right_landmark[0] - right_landmark_button[0]) +
(right_landmark[1] - right_landmark_button[1]) * (right_landmark[1] - right_landmark_button[1]))
return [endPt, [left_r, left_landmark[0], left_landmark[1]], [right_r, right_landmark[0], right_landmark[1]]]
def generate_face_pixel(self, r, pixel_coordinate, landmark_x, landmark_y, face_part):
"""
:param r:
:param pixel_coordinate:
:param landmark_x:
:param landmark_y:
:param face_part:
:return:
"""
i, j = pixel_coordinate
radius = float(r * r)
distance = (i - landmark_x) * (i - landmark_x) + (j - landmark_y) * (j - landmark_y)
if math.fabs(i - landmark_x) <= r and math.fabs(j - landmark_y) <= r:
if (distance < radius):
self.slim_part_pixel_list[face_part].append(pixel_coordinate)
def generate_slim_part_params(self, lms, face_part):
if face_part == "cheek":
landmark_coordinate = self.generate_landmark_coordinate(lms, 3, 5, 13, 11, 33) # Original key Point: 3, 5, 13, 15, 30
elif face_part == "humerus":
landmark_coordinate = self.generate_landmark_coordinate(lms, 1, 17, 15, 26, 27)
elif face_part == "chin":
landmark_coordinate = self.generate_landmark_coordinate(lms, 5, 7, 11, 9, 33) # Original key Point: 5, 7, 11, 13, 33
self.landmark_coordinate_list[face_part] = landmark_coordinate
endPt, left_point, right_point = self.landmark_coordinate_list[face_part]
left_r, left_startX, left_startY = left_point
right_r, right_startX, right_startY = right_point
[self.generate_face_pixel(left_r, pixel, left_startX, left_startY, face_part) for pixel in self.im_pixel_list]
[self.generate_face_pixel(right_r, pixel, right_startX, right_startY, face_part) for pixel in self.im_pixel_list]
def set_slim_strength(self, cheek_strength, humerus_strength, chin_strength):
self.slim_strength_copy['cheek'] = [cheek_strength, cheek_strength]
self.slim_strength_copy['humerus'] = [humerus_strength, humerus_strength]
self.slim_strength_copy['chin'] = [chin_strength, chin_strength]
self.slim_strength['cheek'] = [cheek_strength, cheek_strength]
self.slim_strength['humerus'] = [humerus_strength, humerus_strength]
self.slim_strength['chin'] = [chin_strength, chin_strength]
def update_slim_part_params(self, lms):
self.slim_part_pixel_list = {'humerus': [], 'cheek': [], 'chin': []}
self.landmark_coordinate_list = {}
self.generate_slim_part_params(lms, 'cheek')
self.generate_slim_part_params(lms, 'humerus')
self.generate_slim_part_params(lms, 'chin')
return self.slim_part_pixel_list, self.landmark_coordinate_list
def compare_rects_change(self, rects):
# r, r1 = self.rects[0], rects[0]
# ux, uy = math.fabs(r[0] - r1[0]), math.fabs(r[1] - r1[1])
# update_status = (False if ux + uy < 30 or math.sqrt(ux * ux + uy * uy) < 60 else True)
# return update_status
if len(self.rects) > 0 and len(rects) > 0:
r, r1 = self.rects[0], rects[0]
face_proportion = math.fabs(r[0] - r[2]) / 960 * 300
ux, uy = math.fabs(r[0] - r1[0]), math.fabs(r[1] - r1[1])
print("r r1 ux uy face_proportion", r, r1, ux, uy, face_proportion)
# update_status = (False if ux + uy < 30 or math.sqrt(ux * ux + uy * uy) < 60 else True)
update_status = (
False if ux + uy < face_proportion or math.sqrt(ux * ux + uy * uy) < face_proportion else True)
# update_status = (False if ux + uy < 30 else True)
# print("update_status")
return update_status
def get_landmark(self, im):
self.frame += 1
if self.frame == 1:
rects = self.s3fd_model.extract(im)
lms = self.landmark_model.extract(im, rects[:1])
c_lms = lms
self.rects = rects
self.landmarks = lms
self.lms_update = True
else:
rects = self.s3fd_model.extract(im)
update_status = self.compare_rects_change(rects)
if not update_status:
c_lms = self.landmark_model.extract(im, rects[:1])
lms = self.landmarks
self.lms_update = False
else:
lms = self.landmark_model.extract(im, rects[:1])
c_lms = lms
self.rects = rects
self.landmarks = lms
self.lms_update = True
print("landmark", lms)
return rects, c_lms, lms, self.lms_update
def BilinearInsert(self, im, x, y):
try:
x1, y1 = int(x), int(y)
x2, y2 = x1 + 1, y1 + 1
part1 = im[y1, x1].astype(np.float) * (float(x2) - x) * (float(y2) - y)
part2 = im[y1, x2].astype(np.float) * (x - float(x1)) * (float(y2) - y)
part3 = im[y2, x1].astype(np.float) * (float(x2) - x) * (y - float(y1))
part4 = im[y2, x2].astype(np.float) * (x - float(x1)) * (y - float(y1))
insertValue = part1 + part2 + part3 + part4
return insertValue.astype(np.int8)
except Exception as e:
print(e)
def slim_face(self, i, j, s, im, copy_im, landmark_center_point, landmark_coordinate):
endX, endY = landmark_center_point
r, landmark_x, landmark_y = landmark_coordinate
dradius = float(r * r)
ddmc = (endX - landmark_x) * (endX - landmark_x) + (endY - landmark_y) * (endY - landmark_y)
distance = (i - landmark_x) * (i - landmark_x) + (j - landmark_y) * (j - landmark_y)
if (distance < dradius):
ratio = (dradius - distance) / (dradius - distance + ddmc)
ratio = s * ratio * ratio
UX, UY = i - ratio * (endX - landmark_x), j - ratio * (endY - landmark_y)
if j <= 255 and i <= 255:
copy_im[j, i] = self.BilinearInsert(im, UX, UY)
else:
print(f"Ignore wrong coordinate: [{j} , {i}]")
return copy_im
def localTranslationWarp(self, im, slim_part):
try:
copyImg = im.copy()
endPt, leftPt, rightPt = self.landmark_coordinate_list[slim_part]
if slim_part == 'cheek':
print(slim_part, self.slim_strength[slim_part])
for pixel in self.slim_part_pixel_list[slim_part]:
i, j = pixel
c = self.slim_face(i, j, self.slim_strength[slim_part][0], im, copyImg, endPt, leftPt)
cheek_im = self.slim_face(i, j, self.slim_strength[slim_part][1], im, c, endPt, rightPt)
return copyImg
except TypeError as e:
return cheek_im
def change_slim_power(self, lms, rects):
x1, x2 = rects[0][0], rects[0][2]
l = lms[3]
r = lms[13]
c = lms[30]
r_ux, r_uy = math.fabs(r[0] - c[0]), math.fabs(r[1] - c[1])
l_ux, l_uy = math.fabs(l[0] - c[0]), math.fabs(l[1] - c[1])
r_face = math.sqrt(r_ux * r_ux + r_uy * r_uy)
l_face = math.sqrt(l_ux * l_ux + l_uy * l_uy)
compare_face = r_face - l_face
face_width = math.fabs(x1 - x2)
f = math.fabs(compare_face) / face_width
if f > 0.15:
cp = 0
defult_strength = self.slim_strength_copy['cheek'][0]
if defult_strength > 0:
cp = - (f - 0.15) * 100 * 0.3
if defult_strength + cp < 0.3:
cp = - math.fabs(defult_strength + math.fabs(defult_strength / 4))
if math.fabs(cp - self.cp) > 0.3:
cp = self.cp - 0.3
elif defult_strength < 0:
cp = (f - 0.15) * 100 * 0.3
if defult_strength + cp > - 0.3:
cp = math.fabs(defult_strength + math.fabs(defult_strength / 4))
if cp - self.cp > 0.3:
cp = self.cp + 0.3
self.cp = cp
# print("left >>>", cp)
if compare_face > 0:
current_power = self.slim_strength_copy['cheek'][0] + cp
self.slim_strength['cheek'][0] = current_power
elif compare_face < 0:
current_power = self.slim_strength_copy['cheek'][1]
self.slim_strength['cheek'][1] = current_power + cp
def slim_handler(self, im):
h, w, _ = im.shape
im = cv2.resize(im, (int(w / 2), int(h / 2)))
[[self.im_pixel_list.append((i, j)) for j in range(int(h / 2) - 2)] for i in range(int(w / 2) - 2)]
rects, c_lms, landmarks, lms_update = self.get_landmark(im)
lms = landmarks[0].tolist()
(self.update_slim_part_params(lms) if lms_update else False)
lms2 = c_lms[0].tolist()
self.change_slim_power(lms2, rects)
cheek_im = self.localTranslationWarp(im, 'cheek')
humerus_im = self.localTranslationWarp(cheek_im, 'humerus')
chin_im = self.localTranslationWarp(humerus_im, 'chin')
im = cv2.resize(chin_im, (w, h))
return im
def put_frame():
count = 0
start_time = time()
cap = cv2.VideoCapture('./media/jcdemo.mp4')
while cap.isOpened():
print("put count", count)
count += 1
ret, im = cap.read()
if count == 1:
slim.set_slim_strength(cheek_strength=-2.0, humerus_strength=-0.2, chin_strength=1.5)
# slim.slim_strength_copy = slim.slim_strength
if not ret:
print(time() - start_time)
break
res = slim.slim_handler(im)
cv2.imwrite("data_output/slim/{}.jpg".format(count), res)
# out.write(res)
def put_img(cheek_strength, humerus_strength, chin_strength):
for i in range(12):
im = cv2.imread("data_input/test_face{}.png".format(i+1))
slim.set_slim_strength(cheek_strength, humerus_strength, chin_strength)
res_im = slim.slim_handler(im)
image = np.concatenate((im, res_im), axis=1)
cv2.imwrite("./data_output/deformation_face{}.jpg".format(i), image)
cv2.imshow("face", image)
cv2.waitKey(1)
if __name__ == "__main__":
# file_list = os.listdir('.')
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter("ssslim.avi", fourcc, 24.0, (1920, 1080))
# put_frame()
slim = SlimFace()
if len(sys.argv) == 1:
cheek_strength = 1.2
humerus_strength = 0.1
chin_strength = 1.6
else:
_, cheek_strength, humerus_strength, chin_strength = sys.argv
print("Deformation strength: ", cheek_strength, humerus_strength, chin_strength)
put_img(cheek_strength, humerus_strength, chin_strength)
| [
"nnlib.nnlib.import_all",
"facelib.S3FDExtractor",
"matplotlib.use",
"facelib.LandmarksExtractor",
"os.path.join",
"math.sqrt",
"cv2.imshow",
"nnlib.nnlib.keras.models.load_model",
"cv2.waitKey",
"nnlib.nnlib.DeviceConfig",
"math.fabs",
"cv2.VideoCapture",
"numpy.concatenate",
"cv2.resize"... | [((103, 124), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (117, 124), False, 'import matplotlib\n'), ((11258, 11264), 'time.time', 'time', ([], {}), '()\n', (11262, 11264), False, 'from time import time\n'), ((11275, 11313), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""./media/jcdemo.mp4"""'], {}), "('./media/jcdemo.mp4')\n", (11291, 11313), False, 'import cv2\n'), ((666, 736), 'nnlib.nnlib.DeviceConfig', 'nnlib.DeviceConfig', ([], {'cpu_only': '(False)', 'force_gpu_idx': '(0)', 'allow_growth': '(True)'}), '(cpu_only=False, force_gpu_idx=0, allow_growth=True)\n', (684, 736), False, 'from nnlib import nnlib\n'), ((847, 878), 'nnlib.nnlib.import_all', 'nnlib.import_all', (['device_config'], {}), '(device_config)\n', (863, 878), False, 'from nnlib import nnlib\n'), ((905, 939), 'os.path.join', 'os.path.join', (['"""facelib"""', '"""S3FD.h5"""'], {}), "('facelib', 'S3FD.h5')\n", (917, 939), False, 'import os\n'), ((961, 1007), 'nnlib.nnlib.keras.models.load_model', 'nnlib.keras.models.load_model', (['S3FD_model_path'], {}), '(S3FD_model_path)\n', (990, 1007), False, 'from nnlib import nnlib\n'), ((1034, 1059), 'facelib.S3FDExtractor', 'S3FDExtractor', (['S3FD_model'], {}), '(S3FD_model)\n', (1047, 1059), False, 'from facelib import S3FDExtractor, LandmarksExtractor\n'), ((1069, 1100), 'nnlib.nnlib.import_all', 'nnlib.import_all', (['device_config'], {}), '(device_config)\n', (1085, 1100), False, 'from nnlib import nnlib\n'), ((1131, 1162), 'facelib.LandmarksExtractor', 'LandmarksExtractor', (['nnlib.keras'], {}), '(nnlib.keras)\n', (1149, 1162), False, 'from facelib import S3FDExtractor, LandmarksExtractor\n'), ((1685, 1889), 'math.sqrt', 'math.sqrt', (['((left_landmark[0] - left_landmark_button[0]) * (left_landmark[0] -\n left_landmark_button[0]) + (left_landmark[1] - left_landmark_button[1]) *\n (left_landmark[1] - left_landmark_button[1]))'], {}), '((left_landmark[0] - left_landmark_button[0]) * (left_landmark[0] -\n left_landmark_button[0]) + (left_landmark[1] - left_landmark_button[1]) *\n (left_landmark[1] - left_landmark_button[1]))\n', (1694, 1889), False, 'import math\n'), ((1926, 2139), 'math.sqrt', 'math.sqrt', (['((right_landmark[0] - right_landmark_button[0]) * (right_landmark[0] -\n right_landmark_button[0]) + (right_landmark[1] - right_landmark_button[\n 1]) * (right_landmark[1] - right_landmark_button[1]))'], {}), '((right_landmark[0] - right_landmark_button[0]) * (right_landmark[\n 0] - right_landmark_button[0]) + (right_landmark[1] -\n right_landmark_button[1]) * (right_landmark[1] - right_landmark_button[1]))\n', (1935, 2139), False, 'import math\n'), ((9161, 9197), 'math.sqrt', 'math.sqrt', (['(r_ux * r_ux + r_uy * r_uy)'], {}), '(r_ux * r_ux + r_uy * r_uy)\n', (9170, 9197), False, 'import math\n'), ((9215, 9251), 'math.sqrt', 'math.sqrt', (['(l_ux * l_ux + l_uy * l_uy)'], {}), '(l_ux * l_ux + l_uy * l_uy)\n', (9224, 9251), False, 'import math\n'), ((9312, 9330), 'math.fabs', 'math.fabs', (['(x1 - x2)'], {}), '(x1 - x2)\n', (9321, 9330), False, 'import math\n'), ((11162, 11189), 'cv2.resize', 'cv2.resize', (['chin_im', '(w, h)'], {}), '(chin_im, (w, h))\n', (11172, 11189), False, 'import cv2\n'), ((12095, 12131), 'numpy.concatenate', 'np.concatenate', (['(im, res_im)'], {'axis': '(1)'}), '((im, res_im), axis=1)\n', (12109, 12131), True, 'import numpy as np\n'), ((12217, 12242), 'cv2.imshow', 'cv2.imshow', (['"""face"""', 'image'], {}), "('face', image)\n", (12227, 12242), False, 'import cv2\n'), ((12251, 12265), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (12262, 12265), False, 'import cv2\n'), ((9029, 9051), 'math.fabs', 'math.fabs', (['(r[0] - c[0])'], {}), '(r[0] - c[0])\n', (9038, 9051), False, 'import math\n'), ((9053, 9075), 'math.fabs', 'math.fabs', (['(r[1] - c[1])'], {}), '(r[1] - c[1])\n', (9062, 9075), False, 'import math\n'), ((9097, 9119), 'math.fabs', 'math.fabs', (['(l[0] - c[0])'], {}), '(l[0] - c[0])\n', (9106, 9119), False, 'import math\n'), ((9121, 9143), 'math.fabs', 'math.fabs', (['(l[1] - c[1])'], {}), '(l[1] - c[1])\n', (9130, 9143), False, 'import math\n'), ((9343, 9366), 'math.fabs', 'math.fabs', (['compare_face'], {}), '(compare_face)\n', (9352, 9366), False, 'import math\n'), ((2706, 2731), 'math.fabs', 'math.fabs', (['(i - landmark_x)'], {}), '(i - landmark_x)\n', (2715, 2731), False, 'import math\n'), ((2741, 2766), 'math.fabs', 'math.fabs', (['(j - landmark_y)'], {}), '(j - landmark_y)\n', (2750, 2766), False, 'import math\n'), ((5300, 5323), 'math.fabs', 'math.fabs', (['(r[0] - r1[0])'], {}), '(r[0] - r1[0])\n', (5309, 5323), False, 'import math\n'), ((5325, 5348), 'math.fabs', 'math.fabs', (['(r[1] - r1[1])'], {}), '(r[1] - r1[1])\n', (5334, 5348), False, 'import math\n'), ((5244, 5266), 'math.fabs', 'math.fabs', (['(r[0] - r[2])'], {}), '(r[0] - r[2])\n', (5253, 5266), False, 'import math\n'), ((9721, 9744), 'math.fabs', 'math.fabs', (['(cp - self.cp)'], {}), '(cp - self.cp)\n', (9730, 9744), False, 'import math\n'), ((11640, 11646), 'time.time', 'time', ([], {}), '()\n', (11644, 11646), False, 'from time import time\n'), ((5614, 5642), 'math.sqrt', 'math.sqrt', (['(ux * ux + uy * uy)'], {}), '(ux * ux + uy * uy)\n', (5623, 5642), False, 'import math\n'), ((9670, 9700), 'math.fabs', 'math.fabs', (['(defult_strength / 4)'], {}), '(defult_strength / 4)\n', (9679, 9700), False, 'import math\n'), ((9976, 10006), 'math.fabs', 'math.fabs', (['(defult_strength / 4)'], {}), '(defult_strength / 4)\n', (9985, 10006), False, 'import math\n')] |
import librosa
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import tensor as T
import numpy as np
from torch.nn.functional import conv1d
import torchopenl3.core
class CustomSTFT(nn.Module):
"""
STFT implemented like kapre 0.1.4.
Attributes
----------
n_dft: int
The window size for the STFT
n_hop: int
The hop (or stride) size
power_spectrogram: float
2.0 to get power spectrogram, 1.0 to get amplitude spectrogram.
return_decibel_spectrogram: bool
Whether to return in decibel or not, i.e. returns
log10(amplitude spectrogram) if True
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
Examples
--------
>>> stftlayer = CustomSTFT(n_dft = 512, n_hop = 242,
power_spectrogram = 2.0,
return_decibel_spectrogram=False)
>>> stftlayer = speclayer(x)
"""
def __init__(
self,
n_dft=512,
n_hop=None,
power_spectrogram=2.0,
return_decibel_spectrogram=False,
):
super().__init__()
if n_hop is None:
n_hop = n_dft // 2
self.n_dft = n_dft
self.n_hop = n_hop
self.power_spectrogram = float(power_spectrogram)
self.return_decibel_spectrogram = return_decibel_spectrogram
dft_real_kernels, dft_imag_kernels = self.get_stft_kernels(self.n_dft)
self.register_buffer(
"dft_real_kernels",
T(dft_real_kernels, requires_grad=False, dtype=torch.float32)
.squeeze(1)
.swapaxes(0, 2),
)
self.register_buffer(
"dft_imag_kernels",
T(dft_imag_kernels, requires_grad=False, dtype=torch.float32)
.squeeze(1)
.swapaxes(0, 2),
)
def forward(self, x):
"""
Convert a batch of waveforms to STFT forms.
Parameters
----------
x : torch tensor
"""
if x.is_cuda and not self.dft_real_kernels.is_cuda:
self.dft_real_kernels = self.dft_real_kernels.cuda()
self.dft_imag_kernels = self.dft_imag_kernels.cuda()
output_real = conv1d(
x, self.dft_real_kernels, stride=self.n_hop, padding=0
).unsqueeze(3)
output_imag = conv1d(
x, self.dft_imag_kernels, stride=self.n_hop, padding=0
).unsqueeze(3)
output = output_real.pow(2) + output_imag.pow(2)
if self.power_spectrogram != 2.0:
output = torch.pow(torch.sqrt(output), self.power_spectrogram)
if self.return_decibel_spectrogram:
output = self.amplitude_to_decibel(output)
return output
def get_stft_kernels(self, n_dft):
"""
Get the STFT kernels.
Implemented similar to kapre=0.1.4
"""
nb_filter = int(n_dft // 2 + 1)
# prepare DFT filters
timesteps = np.array(range(n_dft))
w_ks = np.arange(nb_filter) * 2 * np.pi / float(n_dft)
dft_real_kernels = np.cos(w_ks.reshape(-1, 1) * timesteps.reshape(1, -1))
dft_imag_kernels = -np.sin(w_ks.reshape(-1, 1) * timesteps.reshape(1, -1))
# windowing DFT filters
dft_window = librosa.filters.get_window(
"hann", n_dft, fftbins=True
) # _hann(n_dft, sym=False)
dft_window = dft_window.astype(np.float32)
dft_window = dft_window.reshape((1, -1))
dft_real_kernels = np.multiply(dft_real_kernels, dft_window)
dft_imag_kernels = np.multiply(dft_imag_kernels, dft_window)
dft_real_kernels = dft_real_kernels.transpose()
dft_imag_kernels = dft_imag_kernels.transpose()
dft_real_kernels = dft_real_kernels[:, np.newaxis, np.newaxis, :]
dft_imag_kernels = dft_imag_kernels[:, np.newaxis, np.newaxis, :]
return (
dft_real_kernels.astype(np.float32),
dft_imag_kernels.astype(np.float32),
)
def amplitude_to_decibel(self, x, amin=1e-10, dynamic_range=80.0):
"""
Convert (linear) amplitude to decibel (log10(x)).
Implemented similar to kapre=0.1.4
"""
log_spec = (
10 * torch.log(torch.clamp(x, min=amin)) / np.log(10).astype(np.float32)
)
if x.ndim > 1:
axis = tuple(range(x.ndim)[1:])
else:
axis = None
log_spec = log_spec - torch.amax(log_spec, dim=axis, keepdims=True)
log_spec = torch.clamp(log_spec, min=-1 * dynamic_range)
return log_spec
class CustomMelSTFT(CustomSTFT):
"""
MelSTFT implemented like kapre 0.1.4.
Attributes
----------
sr: int
n_dft: int
The window size for the STFT
n_hop: int
The hop (or stride) size
power_spectrogram: float
2.0 to get power spectrogram, 1.0 to get amplitude spectrogram.
return_decibel_spectrogram: bool
Whether to return in decibel or not, i.e. returns
log10(amplitude spectrogram) if True
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
Examples
--------
>>> stftlayer = CustomSTFT(n_dft = 512, n_hop = 242,
power_spectrogram = 2.0,
return_decibel_spectrogram=False)
>>> stftlayer = speclayer(x)
"""
def __init__(
self,
sr,
n_dft=512,
n_hop=None,
n_mels=128,
htk=True,
power_melgram=1.0,
return_decibel_melgram=False,
padding="same",
):
super().__init__(
n_dft=n_dft,
n_hop=n_hop,
power_spectrogram=2.0,
return_decibel_spectrogram=False,
)
self.padding = padding
self.sr = sr
self.power_melgram = power_melgram
self.return_decibel_melgram = return_decibel_melgram
mel_basis = librosa.filters.mel(
sr=sr,
n_fft=n_dft,
n_mels=n_mels,
fmin=0,
fmax=sr // 2,
htk=htk,
norm=1,
)
self.register_buffer("mel_basis", T(mel_basis, requires_grad=False))
def forward(self, x):
if x.is_cuda and not self.dft_real_kernels.is_cuda:
self.dft_real_kernels = self.dft_real_kernels.cuda()
self.dft_imag_kernels = self.dft_imag_kernels.cuda()
self.mel_basis = self.mel_basis.cuda()
if self.padding == "same":
x = self.custom_pad(x)
output = super().forward(x)
output = torch.matmul(self.mel_basis, output.squeeze(-1)).unsqueeze(-1)
if self.power_melgram != 2.0:
output = torch.pow(torch.sqrt(output), self.power_melgram)
if self.return_decibel_melgram:
output = self.amplitude_to_decibel(output)
return output
def custom_pad(self, x):
"""
Pad sequence.
Implemented similar to keras version used in kapre=0.1.4
"""
filter_width = self.n_dft
strides = self.n_hop
in_width = self.sr
if in_width % strides == 0:
pad_along_width = max(filter_width - strides, 0)
else:
pad_along_width = max(filter_width - (in_width % strides), 0)
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
x = torch.nn.ZeroPad2d((pad_left, pad_right, 0, 0))(x)
return x
class PytorchOpenl3(nn.Module):
def __init__(self, content_type, input_repr, embedding_size):
# Note: content_type is unused
super(PytorchOpenl3, self).__init__()
self.AUDIO_POOLING_SIZES = {
"linear": {512: (32, 24), 6144: (8, 8)},
"mel128": {512: (16, 24), 6144: (4, 8)},
"mel256": {512: (32, 24), 6144: (8, 8)},
}
if input_repr == "linear":
self.speclayer = CustomSTFT(
n_dft=512,
n_hop=242,
power_spectrogram=1.0,
return_decibel_spectrogram=True,
)
elif input_repr == "mel128":
self.speclayer = CustomMelSTFT(
sr=48000,
n_dft=2048,
n_hop=242,
n_mels=128,
htk=True,
power_melgram=1.0,
return_decibel_melgram=True,
padding="same",
)
elif input_repr == "mel256":
self.speclayer = CustomMelSTFT(
sr=48000,
n_dft=2048,
n_hop=242,
n_mels=256,
htk=True,
power_melgram=1.0,
return_decibel_melgram=True,
padding="same",
)
self.input_repr = input_repr
self.embedding_size = embedding_size
self.batch_normalization_1 = self.__batch_normalization(
2,
"batch_normalization_1",
num_features=1,
eps=0.001,
momentum=0.99,
)
self.conv2d_1 = self.__conv(
2,
name="conv2d_1",
in_channels=1,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
groups=1,
bias=True,
)
self.batch_normalization_2 = self.__batch_normalization(
2,
"batch_normalization_2",
num_features=64,
eps=0.001,
momentum=0.99,
)
self.conv2d_2 = self.__conv(
2,
name="conv2d_2",
in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
groups=1,
bias=True,
)
self.batch_normalization_3 = self.__batch_normalization(
2,
"batch_normalization_3",
num_features=64,
eps=0.001,
momentum=0.99,
)
self.conv2d_3 = self.__conv(
2,
name="conv2d_3",
in_channels=64,
out_channels=128,
kernel_size=(3, 3),
stride=(1, 1),
groups=1,
bias=True,
)
self.batch_normalization_4 = self.__batch_normalization(
2,
"batch_normalization_4",
num_features=128,
eps=0.001,
momentum=0.99,
)
self.conv2d_4 = self.__conv(
2,
name="conv2d_4",
in_channels=128,
out_channels=128,
kernel_size=(3, 3),
stride=(1, 1),
groups=1,
bias=True,
)
self.batch_normalization_5 = self.__batch_normalization(
2,
"batch_normalization_5",
num_features=128,
eps=0.001,
momentum=0.99,
)
self.conv2d_5 = self.__conv(
2,
name="conv2d_5",
in_channels=128,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
groups=1,
bias=True,
)
self.batch_normalization_6 = self.__batch_normalization(
2,
"batch_normalization_6",
num_features=256,
eps=0.001,
momentum=0.99,
)
self.conv2d_6 = self.__conv(
2,
name="conv2d_6",
in_channels=256,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
groups=1,
bias=True,
)
self.batch_normalization_7 = self.__batch_normalization(
2,
"batch_normalization_7",
num_features=256,
eps=0.001,
momentum=0.99,
)
self.conv2d_7 = self.__conv(
2,
name="conv2d_7",
in_channels=256,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
groups=1,
bias=True,
)
self.batch_normalization_8 = self.__batch_normalization(
2,
"batch_normalization_8",
num_features=512,
eps=0.001,
momentum=0.99,
)
self.audio_embedding_layer = self.__conv(
2,
name="audio_embedding_layer",
in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
groups=1,
bias=True,
)
def forward(self, x, keep_all_outputs=False):
if keep_all_outputs:
all_outputs = []
x = self.speclayer(x)
x = x.squeeze(-1).unsqueeze(1)
if keep_all_outputs:
all_outputs.append(x)
batch_normalization_1 = self.batch_normalization_1(x)
if keep_all_outputs:
all_outputs.append(batch_normalization_1)
conv2d_1_pad = F.pad(batch_normalization_1, (1, 1, 1, 1))
conv2d_1 = self.conv2d_1(conv2d_1_pad)
if keep_all_outputs:
all_outputs.append(conv2d_1)
batch_normalization_2 = self.batch_normalization_2(conv2d_1)
if keep_all_outputs:
all_outputs.append(batch_normalization_2)
activation_1 = F.relu(batch_normalization_2)
if keep_all_outputs:
all_outputs.append(activation_1)
conv2d_2_pad = F.pad(activation_1, (1, 1, 1, 1))
conv2d_2 = self.conv2d_2(conv2d_2_pad)
if keep_all_outputs:
all_outputs.append(conv2d_2)
batch_normalization_3 = self.batch_normalization_3(conv2d_2)
if keep_all_outputs:
all_outputs.append(batch_normalization_3)
activation_2 = F.relu(batch_normalization_3)
if keep_all_outputs:
all_outputs.append(activation_2)
max_pooling2d_1 = F.max_pool2d(
activation_2,
kernel_size=(2, 2),
stride=(2, 2),
padding=0,
ceil_mode=False,
)
if keep_all_outputs:
all_outputs.append(max_pooling2d_1)
conv2d_3_pad = F.pad(max_pooling2d_1, (1, 1, 1, 1))
conv2d_3 = self.conv2d_3(conv2d_3_pad)
if keep_all_outputs:
all_outputs.append(conv2d_3)
batch_normalization_4 = self.batch_normalization_4(conv2d_3)
if keep_all_outputs:
all_outputs.append(batch_normalization_4)
activation_3 = F.relu(batch_normalization_4)
if keep_all_outputs:
all_outputs.append(activation_3)
conv2d_4_pad = F.pad(activation_3, (1, 1, 1, 1))
conv2d_4 = self.conv2d_4(conv2d_4_pad)
if keep_all_outputs:
all_outputs.append(conv2d_4)
batch_normalization_5 = self.batch_normalization_5(conv2d_4)
if keep_all_outputs:
all_outputs.append(batch_normalization_5)
activation_4 = F.relu(batch_normalization_5)
if keep_all_outputs:
all_outputs.append(activation_4)
max_pooling2d_2 = F.max_pool2d(
activation_4,
kernel_size=(2, 2),
stride=(2, 2),
padding=0,
ceil_mode=False,
)
if keep_all_outputs:
all_outputs.append(max_pooling2d_2)
conv2d_5_pad = F.pad(max_pooling2d_2, (1, 1, 1, 1))
conv2d_5 = self.conv2d_5(conv2d_5_pad)
if keep_all_outputs:
all_outputs.append(conv2d_5)
batch_normalization_6 = self.batch_normalization_6(conv2d_5)
if keep_all_outputs:
all_outputs.append(batch_normalization_6)
activation_5 = F.relu(batch_normalization_6)
if keep_all_outputs:
all_outputs.append(activation_5)
conv2d_6_pad = F.pad(activation_5, (1, 1, 1, 1))
conv2d_6 = self.conv2d_6(conv2d_6_pad)
if keep_all_outputs:
all_outputs.append(conv2d_6)
batch_normalization_7 = self.batch_normalization_7(conv2d_6)
if keep_all_outputs:
all_outputs.append(batch_normalization_7)
activation_6 = F.relu(batch_normalization_7)
if keep_all_outputs:
all_outputs.append(activation_6)
max_pooling2d_3 = F.max_pool2d(
activation_6,
kernel_size=(2, 2),
stride=(2, 2),
padding=0,
ceil_mode=False,
)
if keep_all_outputs:
all_outputs.append(max_pooling2d_3)
conv2d_7_pad = F.pad(max_pooling2d_3, (1, 1, 1, 1))
conv2d_7 = self.conv2d_7(conv2d_7_pad)
if keep_all_outputs:
all_outputs.append(conv2d_7)
batch_normalization_8 = self.batch_normalization_8(conv2d_7)
if keep_all_outputs:
all_outputs.append(batch_normalization_8)
activation_7 = F.relu(batch_normalization_8)
if keep_all_outputs:
all_outputs.append(activation_7)
audio_embedding_layer_pad = F.pad(activation_7, (1, 1, 1, 1))
audio_embedding_layer = self.audio_embedding_layer(audio_embedding_layer_pad)
if keep_all_outputs:
all_outputs.append(audio_embedding_layer)
max_pooling2d_4 = F.max_pool2d(
audio_embedding_layer,
kernel_size=self.AUDIO_POOLING_SIZES[self.input_repr][self.embedding_size],
stride=self.AUDIO_POOLING_SIZES[self.input_repr][self.embedding_size],
padding=0,
ceil_mode=False,
)
if keep_all_outputs:
all_outputs.append(max_pooling2d_4)
# Might just use view ?
squeeze = (
max_pooling2d_4.swapaxes(1, 2)
.swapaxes(2, 3)
.reshape((max_pooling2d_4.shape[0], -1))
)
if keep_all_outputs:
all_outputs.append(squeeze)
return all_outputs
else:
return squeeze
def __batch_normalization(self, dim, name, **kwargs):
if dim == 0 or dim == 1:
layer = nn.BatchNorm1d(**kwargs)
elif dim == 2:
layer = nn.BatchNorm2d(**kwargs)
elif dim == 3:
layer = nn.BatchNorm3d(**kwargs)
else:
raise NotImplementedError()
return layer
def __conv(self, dim, name, **kwargs):
if dim == 1:
layer = nn.Conv1d(**kwargs)
elif dim == 2:
layer = nn.Conv2d(**kwargs)
elif dim == 3:
layer = nn.Conv3d(**kwargs)
else:
raise NotImplementedError()
return layer
def load_audio_embedding_model(input_repr, content_type, embedding_size):
return torchopenl3.core.load_audio_embedding_model(
input_repr, content_type, embedding_size
)
| [
"torch.nn.ZeroPad2d",
"torch.nn.functional.conv1d",
"torch.sqrt",
"numpy.log",
"torch.nn.BatchNorm1d",
"torch.nn.functional.pad",
"numpy.arange",
"torch.nn.BatchNorm2d",
"numpy.multiply",
"torch.nn.BatchNorm3d",
"torch.nn.Conv3d",
"librosa.filters.mel",
"torch.nn.functional.relu",
"torch.n... | [((3277, 3332), 'librosa.filters.get_window', 'librosa.filters.get_window', (['"""hann"""', 'n_dft'], {'fftbins': '(True)'}), "('hann', n_dft, fftbins=True)\n", (3303, 3332), False, 'import librosa\n'), ((3509, 3550), 'numpy.multiply', 'np.multiply', (['dft_real_kernels', 'dft_window'], {}), '(dft_real_kernels, dft_window)\n', (3520, 3550), True, 'import numpy as np\n'), ((3578, 3619), 'numpy.multiply', 'np.multiply', (['dft_imag_kernels', 'dft_window'], {}), '(dft_imag_kernels, dft_window)\n', (3589, 3619), True, 'import numpy as np\n'), ((4522, 4567), 'torch.clamp', 'torch.clamp', (['log_spec'], {'min': '(-1 * dynamic_range)'}), '(log_spec, min=-1 * dynamic_range)\n', (4533, 4567), False, 'import torch\n'), ((5934, 6031), 'librosa.filters.mel', 'librosa.filters.mel', ([], {'sr': 'sr', 'n_fft': 'n_dft', 'n_mels': 'n_mels', 'fmin': '(0)', 'fmax': '(sr // 2)', 'htk': 'htk', 'norm': '(1)'}), '(sr=sr, n_fft=n_dft, n_mels=n_mels, fmin=0, fmax=sr // 2,\n htk=htk, norm=1)\n', (5953, 6031), False, 'import librosa\n'), ((12977, 13019), 'torch.nn.functional.pad', 'F.pad', (['batch_normalization_1', '(1, 1, 1, 1)'], {}), '(batch_normalization_1, (1, 1, 1, 1))\n', (12982, 13019), True, 'import torch.nn.functional as F\n'), ((13312, 13341), 'torch.nn.functional.relu', 'F.relu', (['batch_normalization_2'], {}), '(batch_normalization_2)\n', (13318, 13341), True, 'import torch.nn.functional as F\n'), ((13439, 13472), 'torch.nn.functional.pad', 'F.pad', (['activation_1', '(1, 1, 1, 1)'], {}), '(activation_1, (1, 1, 1, 1))\n', (13444, 13472), True, 'import torch.nn.functional as F\n'), ((13765, 13794), 'torch.nn.functional.relu', 'F.relu', (['batch_normalization_3'], {}), '(batch_normalization_3)\n', (13771, 13794), True, 'import torch.nn.functional as F\n'), ((13895, 13988), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['activation_2'], {'kernel_size': '(2, 2)', 'stride': '(2, 2)', 'padding': '(0)', 'ceil_mode': '(False)'}), '(activation_2, kernel_size=(2, 2), stride=(2, 2), padding=0,\n ceil_mode=False)\n', (13907, 13988), True, 'import torch.nn.functional as F\n'), ((14156, 14192), 'torch.nn.functional.pad', 'F.pad', (['max_pooling2d_1', '(1, 1, 1, 1)'], {}), '(max_pooling2d_1, (1, 1, 1, 1))\n', (14161, 14192), True, 'import torch.nn.functional as F\n'), ((14485, 14514), 'torch.nn.functional.relu', 'F.relu', (['batch_normalization_4'], {}), '(batch_normalization_4)\n', (14491, 14514), True, 'import torch.nn.functional as F\n'), ((14612, 14645), 'torch.nn.functional.pad', 'F.pad', (['activation_3', '(1, 1, 1, 1)'], {}), '(activation_3, (1, 1, 1, 1))\n', (14617, 14645), True, 'import torch.nn.functional as F\n'), ((14938, 14967), 'torch.nn.functional.relu', 'F.relu', (['batch_normalization_5'], {}), '(batch_normalization_5)\n', (14944, 14967), True, 'import torch.nn.functional as F\n'), ((15068, 15161), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['activation_4'], {'kernel_size': '(2, 2)', 'stride': '(2, 2)', 'padding': '(0)', 'ceil_mode': '(False)'}), '(activation_4, kernel_size=(2, 2), stride=(2, 2), padding=0,\n ceil_mode=False)\n', (15080, 15161), True, 'import torch.nn.functional as F\n'), ((15329, 15365), 'torch.nn.functional.pad', 'F.pad', (['max_pooling2d_2', '(1, 1, 1, 1)'], {}), '(max_pooling2d_2, (1, 1, 1, 1))\n', (15334, 15365), True, 'import torch.nn.functional as F\n'), ((15658, 15687), 'torch.nn.functional.relu', 'F.relu', (['batch_normalization_6'], {}), '(batch_normalization_6)\n', (15664, 15687), True, 'import torch.nn.functional as F\n'), ((15785, 15818), 'torch.nn.functional.pad', 'F.pad', (['activation_5', '(1, 1, 1, 1)'], {}), '(activation_5, (1, 1, 1, 1))\n', (15790, 15818), True, 'import torch.nn.functional as F\n'), ((16111, 16140), 'torch.nn.functional.relu', 'F.relu', (['batch_normalization_7'], {}), '(batch_normalization_7)\n', (16117, 16140), True, 'import torch.nn.functional as F\n'), ((16241, 16334), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['activation_6'], {'kernel_size': '(2, 2)', 'stride': '(2, 2)', 'padding': '(0)', 'ceil_mode': '(False)'}), '(activation_6, kernel_size=(2, 2), stride=(2, 2), padding=0,\n ceil_mode=False)\n', (16253, 16334), True, 'import torch.nn.functional as F\n'), ((16502, 16538), 'torch.nn.functional.pad', 'F.pad', (['max_pooling2d_3', '(1, 1, 1, 1)'], {}), '(max_pooling2d_3, (1, 1, 1, 1))\n', (16507, 16538), True, 'import torch.nn.functional as F\n'), ((16831, 16860), 'torch.nn.functional.relu', 'F.relu', (['batch_normalization_8'], {}), '(batch_normalization_8)\n', (16837, 16860), True, 'import torch.nn.functional as F\n'), ((16971, 17004), 'torch.nn.functional.pad', 'F.pad', (['activation_7', '(1, 1, 1, 1)'], {}), '(activation_7, (1, 1, 1, 1))\n', (16976, 17004), True, 'import torch.nn.functional as F\n'), ((17200, 17420), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['audio_embedding_layer'], {'kernel_size': 'self.AUDIO_POOLING_SIZES[self.input_repr][self.embedding_size]', 'stride': 'self.AUDIO_POOLING_SIZES[self.input_repr][self.embedding_size]', 'padding': '(0)', 'ceil_mode': '(False)'}), '(audio_embedding_layer, kernel_size=self.AUDIO_POOLING_SIZES[\n self.input_repr][self.embedding_size], stride=self.AUDIO_POOLING_SIZES[\n self.input_repr][self.embedding_size], padding=0, ceil_mode=False)\n', (17212, 17420), True, 'import torch.nn.functional as F\n'), ((4457, 4502), 'torch.amax', 'torch.amax', (['log_spec'], {'dim': 'axis', 'keepdims': '(True)'}), '(log_spec, dim=axis, keepdims=True)\n', (4467, 4502), False, 'import torch\n'), ((6165, 6198), 'torch.tensor', 'T', (['mel_basis'], {'requires_grad': '(False)'}), '(mel_basis, requires_grad=False)\n', (6166, 6198), True, 'from torch import tensor as T\n'), ((7403, 7450), 'torch.nn.ZeroPad2d', 'torch.nn.ZeroPad2d', (['(pad_left, pad_right, 0, 0)'], {}), '((pad_left, pad_right, 0, 0))\n', (7421, 7450), False, 'import torch\n'), ((17999, 18023), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', ([], {}), '(**kwargs)\n', (18013, 18023), True, 'import torch.nn as nn\n'), ((18320, 18339), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {}), '(**kwargs)\n', (18329, 18339), True, 'import torch.nn as nn\n'), ((2229, 2291), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.dft_real_kernels'], {'stride': 'self.n_hop', 'padding': '(0)'}), '(x, self.dft_real_kernels, stride=self.n_hop, padding=0)\n', (2235, 2291), False, 'from torch.nn.functional import conv1d\n'), ((2349, 2411), 'torch.nn.functional.conv1d', 'conv1d', (['x', 'self.dft_imag_kernels'], {'stride': 'self.n_hop', 'padding': '(0)'}), '(x, self.dft_imag_kernels, stride=self.n_hop, padding=0)\n', (2355, 2411), False, 'from torch.nn.functional import conv1d\n'), ((2578, 2596), 'torch.sqrt', 'torch.sqrt', (['output'], {}), '(output)\n', (2588, 2596), False, 'import torch\n'), ((6727, 6745), 'torch.sqrt', 'torch.sqrt', (['output'], {}), '(output)\n', (6737, 6745), False, 'import torch\n'), ((18067, 18091), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {}), '(**kwargs)\n', (18081, 18091), True, 'import torch.nn as nn\n'), ((18383, 18402), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {}), '(**kwargs)\n', (18392, 18402), True, 'import torch.nn as nn\n'), ((3010, 3030), 'numpy.arange', 'np.arange', (['nb_filter'], {}), '(nb_filter)\n', (3019, 3030), True, 'import numpy as np\n'), ((4253, 4277), 'torch.clamp', 'torch.clamp', (['x'], {'min': 'amin'}), '(x, min=amin)\n', (4264, 4277), False, 'import torch\n'), ((4281, 4291), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (4287, 4291), True, 'import numpy as np\n'), ((18135, 18159), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', ([], {}), '(**kwargs)\n', (18149, 18159), True, 'import torch.nn as nn\n'), ((18446, 18465), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {}), '(**kwargs)\n', (18455, 18465), True, 'import torch.nn as nn\n'), ((1525, 1586), 'torch.tensor', 'T', (['dft_real_kernels'], {'requires_grad': '(False)', 'dtype': 'torch.float32'}), '(dft_real_kernels, requires_grad=False, dtype=torch.float32)\n', (1526, 1586), True, 'from torch import tensor as T\n'), ((1724, 1785), 'torch.tensor', 'T', (['dft_imag_kernels'], {'requires_grad': '(False)', 'dtype': 'torch.float32'}), '(dft_imag_kernels, requires_grad=False, dtype=torch.float32)\n', (1725, 1785), True, 'from torch import tensor as T\n')] |
from torch import nn as nn
from torch.nn import functional as F
from torch.nn.utils import spectral_norm
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.nn.utils import spectral_norm
from basicsr.utils.registry import ARCH_REGISTRY
import torch
class add_attn(nn.Module):
def __init__(self, x_channels, g_channels=256):
super(add_attn, self).__init__()
self.W = nn.Sequential(
nn.Conv2d(x_channels, x_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(x_channels))
self.theta = nn.Conv2d(x_channels, x_channels, kernel_size=2, stride=2, padding=0, bias=False)
self.phi = nn.Conv2d(g_channels, x_channels, kernel_size=1, stride=1, padding=0, bias=True)
self.psi = nn.Conv2d(x_channels, out_channels=1, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
phi_g = F.interpolate(self.phi(g), size=theta_x_size[2:], mode='bilinear', align_corners=False)
f = F.relu(theta_x + phi_g, inplace=True)
sigm_psi_f = torch.sigmoid(self.psi(f))
sigm_psi_f = F.interpolate(sigm_psi_f, size=input_size[2:], mode='bilinear', align_corners=False)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
class unetCat(nn.Module):
def __init__(self, dim_in, dim_out):
super(unetCat, self).__init__()
norm = spectral_norm
self.convU = norm(nn.Conv2d(dim_in, dim_out, 3, 1, 1, bias=False))
def forward(self, input_1, input_2):
# Upsampling
input_2 = F.interpolate(input_2, scale_factor=2, mode='bilinear', align_corners=False)
output_2 = F.leaky_relu(self.convU(input_2), negative_slope=0.2, inplace=True)
offset = output_2.size()[2] - input_1.size()[2]
padding = 2 * [offset // 2, offset // 2]
output_1 = F.pad(input_1, padding)
y = torch.cat([output_1, output_2], 1)
return y
class UNetDiscriminatorSN(nn.Module):
"""Defines a U-Net discriminator with spectral normalization (SN)"""
def __init__(self, num_in_ch, num_feat=64):
super(UNetDiscriminatorSN, self).__init__()
norm = spectral_norm
self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)
self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 3, 2, 1, bias=False))
self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 3, 2, 1, bias=False))
# Center
self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 3, 2, 1, bias=False))
self.gating = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 1, 1, 1, bias=False))
# attention Blocks
self.attn_1 = add_attn(x_channels=num_feat * 4, g_channels=num_feat * 4)
self.attn_2 = add_attn(x_channels=num_feat * 2, g_channels=num_feat * 4)
self.attn_3 = add_attn(x_channels=num_feat, g_channels=num_feat * 4)
# Cat
self.cat_1 = unetCat(dim_in=num_feat * 8, dim_out=num_feat * 4)
self.cat_2 = unetCat(dim_in=num_feat * 4, dim_out=num_feat * 2)
self.cat_3 = unetCat(dim_in=num_feat * 2, dim_out=num_feat)
# upsample
self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False))
self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False))
self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False))
# extra
self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1)
def forward(self, x):
x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)
x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)
x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)
x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)
gated = F.leaky_relu(self.gating(x3), negative_slope=0.2, inplace=True)
# Attention
attn1, ly1 = self.attn_1(x2, gated)
attn2, ly2 = self.attn_2(x1, gated)
attn3, ly3 = self.attn_3(x0, gated)
return (ly1, ly2, ly3)
class multiscale(nn.Module):
def __init__(self, num_in_ch, num_feat=64, num_D=2):
super(multiscale, self).__init__()
self.num_D = num_D
for i in range(num_D):
netD = UNetDiscriminatorSN(num_in_ch, num_feat=num_feat)
setattr(self, 'layer' + str(i), netD)
self.downsample = nn.AvgPool2d(4, stride=2, padding=[1, 1])
def singleD_forward(self, model, input):
return model(input)
def forward(self, input):
num_D = self.num_D
result = []
input_downsampled = input
for i in range(num_D):
model = getattr(self, 'layer' + str(num_D - 1 - i))
result.append(self.singleD_forward(model, input_downsampled))
if i != (num_D - 1):
input_downsampled = self.downsample(input_downsampled)
return result
if __name__ == "__main__":
from torchsummary import summary
from PIL import Image
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', default=r"..\inputs\1.png", help='image path')
parser.add_argument('--model_path', default=r"..\experiments\pretrained_models\multi", help='multi model list path')
parser.add_argument('--save_path', default=r".\Visual", help='path to save the heat map')
parser.add_argument('--Disc_num', default=2, help='path to save the heat map')
args = parser.parse_args()
dNum = args.Disc_num
uNet = multiscale(3, num_feat=64, num_D=dNum)
import numpy as np
imgpath = args.img_path
modelpath = args.model_path
save_dir = args.save_path
import cv2
import torchvision.transforms as transforms
img = cv2.imread(imgpath)
import os
import shutil
if not os.path.exists(save_dir):
os.mkdir(save_dir)
else:
shutil.rmtree(save_dir)
os.mkdir(save_dir)
for i in range(5000, 200000, 5000):
path = modelpath + r"\net_d_" + str(i) + ".pth"
l = torch.load(path)
p = uNet.state_dict()
uNet.load_state_dict(l["params"], strict=True)
input = transforms.ToTensor()(img)
input = input.unsqueeze(0)
AList = uNet(input)
DiscNum = 1
for AttentionLayer1, AttentionLayer2, AttentionLayer3 in AList:
A1 = AttentionLayer1.detach().numpy()
A1 = np.squeeze(A1)
A1 = A1 * 255
A1 = cv2.applyColorMap(np.uint8(A1), cv2.COLORMAP_JET)
save_path = save_dir + "\A1_D" + str(DiscNum) + "_" + str(i) + ".png"
cv2.imwrite(save_path, A1)
A2 = AttentionLayer2.detach().numpy()
A2 = np.squeeze(A2)
A2 = A2 * 255
A2 = cv2.applyColorMap(np.uint8(A2), cv2.COLORMAP_JET)
save_path = save_dir + "\A2_D" + str(DiscNum) + "_" + str(i) + ".png"
cv2.imwrite(save_path, A2)
A3 = AttentionLayer3.detach().numpy()
A3 = np.squeeze(A3)
A3 = A3 * 255
A3 = cv2.applyColorMap(np.uint8(A3), cv2.COLORMAP_JET)
save_path = save_dir + "\A3_D" + str(DiscNum) + "_" + str(i) + ".png"
cv2.imwrite(save_path, A3)
DiscNum += 1
| [
"numpy.uint8",
"os.path.exists",
"torch.nn.BatchNorm2d",
"cv2.imwrite",
"argparse.ArgumentParser",
"torch.load",
"torch.nn.Conv2d",
"shutil.rmtree",
"torch.nn.AvgPool2d",
"numpy.squeeze",
"os.mkdir",
"torch.nn.functional.interpolate",
"torch.nn.functional.relu",
"torch.nn.functional.pad",
... | [((5411, 5436), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5434, 5436), False, 'import argparse\n'), ((6116, 6135), 'cv2.imread', 'cv2.imread', (['imgpath'], {}), '(imgpath)\n', (6126, 6135), False, 'import cv2\n'), ((573, 659), 'torch.nn.Conv2d', 'nn.Conv2d', (['x_channels', 'x_channels'], {'kernel_size': '(2)', 'stride': '(2)', 'padding': '(0)', 'bias': '(False)'}), '(x_channels, x_channels, kernel_size=2, stride=2, padding=0, bias=\n False)\n', (582, 659), True, 'from torch import nn as nn\n'), ((675, 760), 'torch.nn.Conv2d', 'nn.Conv2d', (['g_channels', 'x_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(g_channels, x_channels, kernel_size=1, stride=1, padding=0, bias=True\n )\n', (684, 760), True, 'from torch import nn as nn\n'), ((775, 863), 'torch.nn.Conv2d', 'nn.Conv2d', (['x_channels'], {'out_channels': '(1)', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(x_channels, out_channels=1, kernel_size=1, stride=1, padding=0,\n bias=True)\n', (784, 863), True, 'from torch import nn as nn\n'), ((1181, 1218), 'torch.nn.functional.relu', 'F.relu', (['(theta_x + phi_g)'], {'inplace': '(True)'}), '(theta_x + phi_g, inplace=True)\n', (1187, 1218), True, 'from torch.nn import functional as F\n'), ((1289, 1377), 'torch.nn.functional.interpolate', 'F.interpolate', (['sigm_psi_f'], {'size': 'input_size[2:]', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(sigm_psi_f, size=input_size[2:], mode='bilinear',\n align_corners=False)\n", (1302, 1377), True, 'from torch.nn import functional as F\n'), ((1765, 1841), 'torch.nn.functional.interpolate', 'F.interpolate', (['input_2'], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(input_2, scale_factor=2, mode='bilinear', align_corners=False)\n", (1778, 1841), True, 'from torch.nn import functional as F\n'), ((2055, 2078), 'torch.nn.functional.pad', 'F.pad', (['input_1', 'padding'], {}), '(input_1, padding)\n', (2060, 2078), True, 'from torch.nn import functional as F\n'), ((2091, 2125), 'torch.cat', 'torch.cat', (['[output_1, output_2]', '(1)'], {}), '([output_1, output_2], 1)\n', (2100, 2125), False, 'import torch\n'), ((2408, 2474), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_in_ch', 'num_feat'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)\n', (2417, 2474), True, 'from torch import nn as nn\n'), ((3798, 3829), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_feat', '(1)', '(3)', '(1)', '(1)'], {}), '(num_feat, 1, 3, 1, 1)\n', (3807, 3829), True, 'from torch import nn as nn\n'), ((4763, 4804), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(4)'], {'stride': '(2)', 'padding': '[1, 1]'}), '(4, stride=2, padding=[1, 1])\n', (4775, 4804), True, 'from torch import nn as nn\n'), ((6180, 6204), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (6194, 6204), False, 'import os\n'), ((6214, 6232), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (6222, 6232), False, 'import os\n'), ((6251, 6274), 'shutil.rmtree', 'shutil.rmtree', (['save_dir'], {}), '(save_dir)\n', (6264, 6274), False, 'import shutil\n'), ((6283, 6301), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (6291, 6301), False, 'import os\n'), ((6411, 6427), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (6421, 6427), False, 'import torch\n'), ((453, 522), 'torch.nn.Conv2d', 'nn.Conv2d', (['x_channels', 'x_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(x_channels, x_channels, kernel_size=1, stride=1, padding=0)\n', (462, 522), True, 'from torch import nn as nn\n'), ((524, 550), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['x_channels'], {}), '(x_channels)\n', (538, 550), True, 'from torch import nn as nn\n'), ((1635, 1682), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_out', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(dim_in, dim_out, 3, 1, 1, bias=False)\n', (1644, 1682), True, 'from torch import nn as nn\n'), ((2502, 2556), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_feat', '(num_feat * 2)', '(3)', '(2)', '(1)'], {'bias': '(False)'}), '(num_feat, num_feat * 2, 3, 2, 1, bias=False)\n', (2511, 2556), True, 'from torch import nn as nn\n'), ((2584, 2642), 'torch.nn.Conv2d', 'nn.Conv2d', (['(num_feat * 2)', '(num_feat * 4)', '(3)', '(2)', '(1)'], {'bias': '(False)'}), '(num_feat * 2, num_feat * 4, 3, 2, 1, bias=False)\n', (2593, 2642), True, 'from torch import nn as nn\n'), ((2688, 2746), 'torch.nn.Conv2d', 'nn.Conv2d', (['(num_feat * 4)', '(num_feat * 8)', '(3)', '(2)', '(1)'], {'bias': '(False)'}), '(num_feat * 4, num_feat * 8, 3, 2, 1, bias=False)\n', (2697, 2746), True, 'from torch import nn as nn\n'), ((2776, 2834), 'torch.nn.Conv2d', 'nn.Conv2d', (['(num_feat * 8)', '(num_feat * 4)', '(1)', '(1)', '(1)'], {'bias': '(False)'}), '(num_feat * 8, num_feat * 4, 1, 1, 1, bias=False)\n', (2785, 2834), True, 'from torch import nn as nn\n'), ((3376, 3434), 'torch.nn.Conv2d', 'nn.Conv2d', (['(num_feat * 8)', '(num_feat * 4)', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False)\n', (3385, 3434), True, 'from torch import nn as nn\n'), ((3462, 3520), 'torch.nn.Conv2d', 'nn.Conv2d', (['(num_feat * 4)', '(num_feat * 2)', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False)\n', (3471, 3520), True, 'from torch import nn as nn\n'), ((3548, 3602), 'torch.nn.Conv2d', 'nn.Conv2d', (['(num_feat * 2)', 'num_feat', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(num_feat * 2, num_feat, 3, 1, 1, bias=False)\n', (3557, 3602), True, 'from torch import nn as nn\n'), ((3647, 3697), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_feat', 'num_feat', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(num_feat, num_feat, 3, 1, 1, bias=False)\n', (3656, 3697), True, 'from torch import nn as nn\n'), ((3725, 3775), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_feat', 'num_feat', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(num_feat, num_feat, 3, 1, 1, bias=False)\n', (3734, 3775), True, 'from torch import nn as nn\n'), ((6531, 6552), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6550, 6552), True, 'import torchvision.transforms as transforms\n'), ((6780, 6794), 'numpy.squeeze', 'np.squeeze', (['A1'], {}), '(A1)\n', (6790, 6794), True, 'import numpy as np\n'), ((6983, 7009), 'cv2.imwrite', 'cv2.imwrite', (['save_path', 'A1'], {}), '(save_path, A1)\n', (6994, 7009), False, 'import cv2\n'), ((7078, 7092), 'numpy.squeeze', 'np.squeeze', (['A2'], {}), '(A2)\n', (7088, 7092), True, 'import numpy as np\n'), ((7280, 7306), 'cv2.imwrite', 'cv2.imwrite', (['save_path', 'A2'], {}), '(save_path, A2)\n', (7291, 7306), False, 'import cv2\n'), ((7375, 7389), 'numpy.squeeze', 'np.squeeze', (['A3'], {}), '(A3)\n', (7385, 7389), True, 'import numpy as np\n'), ((7577, 7603), 'cv2.imwrite', 'cv2.imwrite', (['save_path', 'A3'], {}), '(save_path, A3)\n', (7588, 7603), False, 'import cv2\n'), ((6856, 6868), 'numpy.uint8', 'np.uint8', (['A1'], {}), '(A1)\n', (6864, 6868), True, 'import numpy as np\n'), ((7154, 7166), 'numpy.uint8', 'np.uint8', (['A2'], {}), '(A2)\n', (7162, 7166), True, 'import numpy as np\n'), ((7451, 7463), 'numpy.uint8', 'np.uint8', (['A3'], {}), '(A3)\n', (7459, 7463), True, 'import numpy as np\n')] |
"""
Runs a model on a single node across N-gpus.
"""
import os
from argparse import ArgumentParser
import pathlib
import numpy as np
import torch
import pytorch_lightning as pl
from lightning_models import LightningModel
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.profiler import SimpleProfiler, AdvancedProfiler
# from lr_logger import LearningRateLogger
from notifications import notify
from callbacks import CSVLogger
from lr_finder import LRFinder
def get_last_version(exp_path):
exp_path = pathlib.Path(exp_path)
exp_list = list(exp_path.glob('version*'))
if len(exp_list) == 0:
return None
def sort_func(path):
return int(path.name.split('version_')[-1])
path = sorted(exp_list, key=sort_func)[-1]
version = int(path.name.split('version_')[-1])
return version
def get_last_epoch(ckpt_path):
ckpt_path = pathlib.Path(ckpt_path)
ckpt_list = list(ckpt_path.glob('*.ckpt'))
if len(ckpt_list) == 0:
return None
def sort_func(path):
return int(path.stem.split('=')[-1])
path = sorted(ckpt_list, key=sort_func)[-1]
epoch = int(path.stem.split('=')[-1])
return epoch
def get_prof_index(prof_path):
prof_path = pathlib.Path(prof_path)
prof_list = list(prof_path.glob('*.log'))
if len(prof_list) == 0:
return 0
def sort_func(path):
return int(path.stem.split('_')[-1])
path = sorted(prof_list, key=sort_func)[-1]
index = int(path.stem.split('_')[-1]) + 1
return index
def parse_range(value):
value_list = value.strip('[]()').split(',')
start = float(value_list[0])
stop = float(value_list[1])
num = int(value_list[2])
return start, stop, num
def parse_list(value):
import ast
value_list = []
for value in value.strip('[]()').split(','):
try:
val = ast.literal_eval(value)
except ValueError:
val = value
value_list.append(val)
return value_list
def main(hparams):
"""
Main training routine specific for this project
:param hparams:
"""
# Set random seed before anything starts
hparams.seed = pl.seed_everything(hparams.seed)
save_dir = pathlib.Path(hparams.model_path) / hparams.exp_name
exp_path = save_dir / hparams.arch
exp_path.mkdir(parents=True, exist_ok=True)
if hparams.cont is not None:
if type(hparams.cont) == int:
version = hparams.cont
elif type(hparams.cont) == str:
version = get_last_version(exp_path)
if version is not None:
ckpt_path = exp_path / f'version_{version}' / 'checkpoints'
epoch = get_last_epoch(ckpt_path)
resume_path = ckpt_path / f'epoch={epoch}.ckpt'
hparams.last_epoch = epoch
else:
version = None
if version is None:
epoch = None
resume_path = None
hparams.last_epoch = -1
# ------------------------
# 1 INIT TRAINER
# ------------------------
# ---- Early Stopping ----
if hparams.patience is None:
hparams.patience = hparams.epochs
early_stop_callback = EarlyStopping(
monitor='val_loss',
min_delta=0.0,
patience=hparams.patience,
verbose=True,
mode='min'
)
# # ---- Logger ----
logger = TensorBoardLogger(
save_dir=save_dir,
name=hparams.arch,
version=None
)
actual_path = save_dir / logger.name / f'version_{logger.version}'
# # ---- Optional Profiler ----
if hparams.profiler is not None:
prof_path = actual_path / 'profiles'
prof_path.mkdir(parents=True, exist_ok=True)
prof_index = get_prof_index(prof_path)
prof_filename = prof_path / f'profile_{prof_index}.log'
if hparams.profiler == 'simple' or hparams.profiler == '':
profiler = SimpleProfiler(output_filename=prof_filename)
elif hparams.profiler == 'advanced':
profiler = AdvancedProfiler(output_filename=prof_filename)
else:
message = f'Wrong profiler choice [{hparams.profiler}]. \
Supported profilers include \
[simple (True/empty), advanced]'
raise ValueError(message)
else:
profiler = False
# ---- Model Checkpoint ----
ckpt_path = actual_path / 'checkpoints'
ckpt_path.mkdir(parents=True, exist_ok=True)
checkpoint_callback = ModelCheckpoint(
filepath=ckpt_path, monitor='val_loss',
verbose=False, save_last=True,
save_top_k=5, save_weights_only=False,
mode='auto', period=1, prefix=''
)
# -------- Custom Callbacks --------
csv_logger = CSVLogger()
callback_list = [csv_logger]
# -------- Trainer --------
trainer = Trainer(
weights_summary='full',
weights_save_path=save_dir,
gpus=hparams.gpus,
max_epochs=hparams.epochs,
precision=hparams.precision,
fast_dev_run=hparams.debug,
logger=logger,
checkpoint_callback=checkpoint_callback,
early_stop_callback=early_stop_callback,
profiler=profiler,
limit_train_batches=hparams.train_subset,
limit_val_batches=hparams.val_subset,
resume_from_checkpoint=resume_path,
callbacks=callback_list,
benchmark=True, # optimized CUDA convolution algorithm
progress_bar_refresh_rate=int(not hparams.silent),
)
# -----------------------------
# 2 FIND INITIAL LEARNING RATE
# -----------------------------
if hparams.lr_finder or hparams.scheduler == 'clr':
mode = 'exponential'
finder = LRFinder(hparams, LightningModel,
num_epochs=hparams.lr_epochs, mode=mode,
min_lr=hparams.lr_min, max_lr=hparams.lr_max)
finder.fit()
# Set learning rate bounds
res, _, _ = finder.suggestion()
hparams.learning_rate = res.best_lr
hparams.base_lr = res.best_lr
hparams.max_lr = res.max_lr
print('######### Learning Rate Finder #########')
print(f"LR range = ({finder.min_lr:.3e}, {finder.max_lr:.3e})")
print(f"Sugg. (Best LR = {res.best_lr:.3e})")
print(f"Sugg. (Min. LR = {res.min_lr:.3e})")
print(f"Sugg. (Max. LR = {res.max_lr:.3e})")
print('######### Learning Rate Finder #########')
# Save LR Finder results
finder_path = actual_path / 'lr_finder'
finder.save(finder_path)
if hparams.lr_plot:
# Plot
import matplotlib.pyplot as plt
finder.plot(save_path=finder_path)
finder.plot_grad(save_path=finder_path)
if hparams.lr_show_plot:
plt.show()
# Try to release memory
del finder
# ------------------------
# 3 INIT LIGHTNING MODEL
# ------------------------
if version is None:
model = LightningModel(hparams)
else:
model = LightningModel.load_from_checkpoint(resume_path)
# ------------------------
# 4 START TRAINING
# ------------------------
trainer.fit(model)
return hparams
if __name__ == '__main__':
# ------------------------
# TRAINING ARGUMENTS
# ------------------------
# these are project-wide arguments
root_dir = os.path.dirname(os.path.realpath(__file__))
parent_parser = ArgumentParser(add_help=False)
# Experiment params
parent_parser.add_argument(
'--model_path',
dest='model_path',
default='saved_models',
help='Where the trained model will be saved \
(Default: ./saved_models).'
)
parent_parser.add_argument(
'--exp_name',
dest='exp_name',
default='resnet_cifar10',
help='Experiment name (Default: resnet_cifar10).'
)
parent_parser.add_argument(
'--seed',
dest='seed',
type=int,
help='Random seed (Default: None).'
)
parent_parser.add_argument(
'--continue_from',
dest='cont',
help='Continue training from a previous checkpoint. \
Possible values: [last (str), a version number (int)].'
)
# GPU params
parent_parser.add_argument(
'--gpus', '-g',
dest='gpus',
type=int,
default=0,
help='Which (or how many) GPUs to train on. \
Possible value types: [list, str, int].'
)
parent_parser.add_argument(
'--precision',
dest='precision',
type=int,
default=32,
help='Choose float precision. Possible values: \
[32 (default), 16] bits.'
)
# Boolean params
parent_parser.add_argument(
'--debug',
dest='debug',
action='store_true',
help='Runs 1 batch of train, test and val to find any bugs \
(ie: a sort of unit test).'
)
parent_parser.add_argument(
'--profiler',
dest='profiler',
nargs='?', const='',
help='Activate profiler. Possible values: \
[None (default), simple, advanced].'
)
parent_parser.add_argument(
'--silent',
dest='silent',
action='store_true',
help='Silence the progress bar output. This is useful \
when running on Google Colab, since it freezes \
your web browser when too much information is printed.'
)
parent_parser.add_argument(
'--notify',
dest='telegram',
action='store_true',
help='Notify start and stop of training via Telegram. \
A telegram.json file must be provided on this folder.'
)
# Dataset params
parent_parser.add_argument(
'--train_subset',
dest='train_subset',
type=float, default=1.0,
help='Fraction of training dataset to use. \
(Default: 1.0).'
)
parent_parser.add_argument(
'--val_subset',
dest='val_subset',
type=float, default=1.0,
help='Fraction of validation dataset to use. \
(Default: 1.0).'
)
# Learning Rate Finder params
parent_parser.add_argument(
'--lr_finder',
dest='lr_finder',
action='store_true',
help='Get initial learning rate via a LR Finder.'
)
parent_parser.add_argument(
'--lr_min',
dest='lr_min',
type=float, default=1e-7,
help='Minimum learning rate value for the LR Finder.'
)
parent_parser.add_argument(
'--lr_max',
dest='lr_max',
type=float, default=1,
help='Maximum learning rate value for the LR Finder.'
)
parent_parser.add_argument(
'--lr_epochs',
dest='lr_epochs',
type=int, default=10,
help='Number of epochs to run the with LR Finder. \
(Default: 10).'
)
parent_parser.add_argument(
'--lr_plot',
dest='lr_plot',
action='store_true',
help='Plot LR Finder results.'
)
parent_parser.add_argument(
'--lr_show_plot',
dest='lr_show_plot',
action='store_true',
help='Show LR Finder results plot.'
)
# Grid search experiment params
parent_parser.add_argument(
'--param-name',
dest='param_name',
default='learning_rate',
help="Name of the experiment's hyperparameter."
)
parent_parser.add_argument(
'--param-range',
dest='param_range',
type=str,
default='[0.001,0.15,5]',
help='Hyperparameter linspace range in \
format [start,stop,num_samples]. \
Ignored if param_list is set.'
)
parent_parser.add_argument(
'--param-list',
dest='param_list',
type=str,
help='Hyperparameter value list (overides param-range).'
)
parent_parser.add_argument(
'--param-index',
dest='param_index',
type=int,
default=0,
help='Hyperparameter initial index \
(continue from a partial experiment).'
)
# each LightningModule defines arguments relevant to it
parser = LightningModel.add_model_specific_args(parent_parser, root_dir)
hyperparams = parser.parse_args()
# ---------------------
# RUN TRAINING
# ---------------------
# Helper function for the telegram notifier
@notify()
def call_main(hparams):
return main(hparams)
# Initialize experiment list
if hyperparams.param_list is None:
start, stop, num = parse_range(hyperparams.param_range)
params = np.linspace(start, stop, num)
else:
params = parse_list(hyperparams.param_list)
num = len(params)
seed = hyperparams.seed
start_index = hyperparams.param_index
hyperdict = vars(hyperparams)
for i in range(start_index, num):
# Reset seed
hyperparams.seed = seed
# Change hyperparameter for the ith exp.
hyperparams.param_index = i
if hyperparams.param_name in hyperdict.keys():
hyperdict[hyperparams.param_name] = params[i]
# Run trainer
if hyperparams.telegram:
call_main(hyperparams)
else:
main(hyperparams)
| [
"pytorch_lightning.callbacks.ModelCheckpoint",
"pytorch_lightning.callbacks.EarlyStopping",
"lightning_models.LightningModel",
"argparse.ArgumentParser",
"pathlib.Path",
"pytorch_lightning.seed_everything",
"pytorch_lightning.profiler.AdvancedProfiler",
"lightning_models.LightningModel.add_model_speci... | [((650, 672), 'pathlib.Path', 'pathlib.Path', (['exp_path'], {}), '(exp_path)\n', (662, 672), False, 'import pathlib\n'), ((1015, 1038), 'pathlib.Path', 'pathlib.Path', (['ckpt_path'], {}), '(ckpt_path)\n', (1027, 1038), False, 'import pathlib\n'), ((1365, 1388), 'pathlib.Path', 'pathlib.Path', (['prof_path'], {}), '(prof_path)\n', (1377, 1388), False, 'import pathlib\n'), ((2307, 2339), 'pytorch_lightning.seed_everything', 'pl.seed_everything', (['hparams.seed'], {}), '(hparams.seed)\n', (2325, 2339), True, 'import pytorch_lightning as pl\n'), ((3298, 3403), 'pytorch_lightning.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.0)', 'patience': 'hparams.patience', 'verbose': '(True)', 'mode': '"""min"""'}), "(monitor='val_loss', min_delta=0.0, patience=hparams.patience,\n verbose=True, mode='min')\n", (3311, 3403), False, 'from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\n'), ((3494, 3563), 'pytorch_lightning.loggers.TensorBoardLogger', 'TensorBoardLogger', ([], {'save_dir': 'save_dir', 'name': 'hparams.arch', 'version': 'None'}), '(save_dir=save_dir, name=hparams.arch, version=None)\n', (3511, 3563), False, 'from pytorch_lightning.loggers import TensorBoardLogger\n'), ((4625, 4792), 'pytorch_lightning.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'ckpt_path', 'monitor': '"""val_loss"""', 'verbose': '(False)', 'save_last': '(True)', 'save_top_k': '(5)', 'save_weights_only': '(False)', 'mode': '"""auto"""', 'period': '(1)', 'prefix': '""""""'}), "(filepath=ckpt_path, monitor='val_loss', verbose=False,\n save_last=True, save_top_k=5, save_weights_only=False, mode='auto',\n period=1, prefix='')\n", (4640, 4792), False, 'from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\n'), ((4883, 4894), 'callbacks.CSVLogger', 'CSVLogger', ([], {}), '()\n', (4892, 4894), False, 'from callbacks import CSVLogger\n'), ((7605, 7635), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (7619, 7635), False, 'from argparse import ArgumentParser\n'), ((12387, 12450), 'lightning_models.LightningModel.add_model_specific_args', 'LightningModel.add_model_specific_args', (['parent_parser', 'root_dir'], {}), '(parent_parser, root_dir)\n', (12425, 12450), False, 'from lightning_models import LightningModel\n'), ((12619, 12627), 'notifications.notify', 'notify', ([], {}), '()\n', (12625, 12627), False, 'from notifications import notify\n'), ((2356, 2388), 'pathlib.Path', 'pathlib.Path', (['hparams.model_path'], {}), '(hparams.model_path)\n', (2368, 2388), False, 'import pathlib\n'), ((5849, 5973), 'lr_finder.LRFinder', 'LRFinder', (['hparams', 'LightningModel'], {'num_epochs': 'hparams.lr_epochs', 'mode': 'mode', 'min_lr': 'hparams.lr_min', 'max_lr': 'hparams.lr_max'}), '(hparams, LightningModel, num_epochs=hparams.lr_epochs, mode=mode,\n min_lr=hparams.lr_min, max_lr=hparams.lr_max)\n', (5857, 5973), False, 'from lr_finder import LRFinder\n'), ((7141, 7164), 'lightning_models.LightningModel', 'LightningModel', (['hparams'], {}), '(hparams)\n', (7155, 7164), False, 'from lightning_models import LightningModel\n'), ((7191, 7239), 'lightning_models.LightningModel.load_from_checkpoint', 'LightningModel.load_from_checkpoint', (['resume_path'], {}), '(resume_path)\n', (7226, 7239), False, 'from lightning_models import LightningModel\n'), ((7557, 7583), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (7573, 7583), False, 'import os\n'), ((12839, 12868), 'numpy.linspace', 'np.linspace', (['start', 'stop', 'num'], {}), '(start, stop, num)\n', (12850, 12868), True, 'import numpy as np\n'), ((2004, 2027), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (2020, 2027), False, 'import ast\n'), ((4040, 4085), 'pytorch_lightning.profiler.SimpleProfiler', 'SimpleProfiler', ([], {'output_filename': 'prof_filename'}), '(output_filename=prof_filename)\n', (4054, 4085), False, 'from pytorch_lightning.profiler import SimpleProfiler, AdvancedProfiler\n'), ((4154, 4201), 'pytorch_lightning.profiler.AdvancedProfiler', 'AdvancedProfiler', ([], {'output_filename': 'prof_filename'}), '(output_filename=prof_filename)\n', (4170, 4201), False, 'from pytorch_lightning.profiler import SimpleProfiler, AdvancedProfiler\n'), ((6945, 6955), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6953, 6955), True, 'import matplotlib.pyplot as plt\n')] |
import json
import logging
import math
import os
import pathlib
import subprocess
import numpy as np
import shapely.geometry
import shapely.affinity
import venn7.bezier
ROOT = pathlib.Path(os.path.realpath(__file__)).parent
class VennDiagram:
"""A simple symmetric monotone Venn diagram. The diagram is encoded discretely
using a set of "row swaps." Creation of path data is performed on the fly.
See README for more info.
Parameters
----------
n : int
The order of the Venn diagram. Must be prime.
matrix_encoding_string : str
A string containing whitespace-separated rows of the "matrix encoding."
See README for example.
"""
def __init__(self, n, matrix_encoding_string, name=None, renderer_args=None):
self.name = name
self.n = n
self.row_swaps = self.parse_matrix_encoding_string(
matrix_encoding_string
)
self.flattened_row_swaps = [y for x in self.row_swaps for y in x]
self.renderer_args = renderer_args
if self.renderer_args is None:
self.renderer_args = {}
self.validate_basic()
self.validate_venn()
def parse_matrix_encoding_string(self, matrix_encoding_string):
rows = matrix_encoding_string.strip().splitlines()
matrix = [[int(c) for c in line.strip()] for line in rows]
row_swaps = []
for column in range(len(matrix[0])):
entry = []
for row in range(len(matrix)):
if matrix[row][column] == 1:
entry.append(row + 1)
row_swaps.append(entry)
return row_swaps
def validate_basic(self):
"""Check for basic errors in the matrix flattened_row_swaps."""
n = self.n
expected_length = (2 ** n - 2) // n
if len(self.flattened_row_swaps) != expected_length:
raise ValueError(
f"Wrong length: flattened_row_swaps should be of length {expected_length}"
)
last_x = self.flattened_row_swaps[-1]
for x in self.flattened_row_swaps:
if last_x == x:
raise ValueError(
"Immediate repetitions are not allowed in flattened_row_swaps"
)
last_x = x
for k in range(1, n - 1):
expected = math.comb(n, k) // n
count = 0
for x in self.flattened_row_swaps:
if x == k:
count += 1
if count != expected:
raise ValueError(f"Expected {expected} instances of {k}")
def validate_venn(self):
"""Check that this is in fact a Venn diagram."""
n = self.n
# I am not sure if this validation code is correct, sorry
ranks = [False] * (2 ** n)
ranks[0] = ranks[-1] = True
p = list(range(n))
for swap_row in self.full_flattened_row_swaps():
a = swap_row
b = swap_row - 1
p[a], p[b] = p[b], p[a]
rank = sum([2 ** x for x in p[swap_row:]])
if ranks[rank]:
raise ValueError(f"Duplicate rank {rank}")
ranks[rank] = True
if not all(ranks):
raise ValueError(f"Not all ranks represented")
def full_flattened_row_swaps(self):
"""Return the flattened_row_swaps duplicated n times."""
full_flattened_row_swaps = []
for i in range(self.n):
full_flattened_row_swaps += self.flattened_row_swaps
return full_flattened_row_swaps
def get_spline(self, index=0):
renderer = VennDiagramRenderer(self, **self.renderer_args)
return renderer.get_spline()
def get_polygon(self, index=0):
"""Get the shape of a single curve as a polygon."""
spline = self.get_spline(index)
resolution = 10
points = []
for bezier in spline.beziers:
for i in range(resolution):
points.append(bezier(i / resolution))
return points
def check_regions(self):
"""Approximate this Venn diagram with polygons and use Shapely to check
that the diagram is valid."""
original_curve = shapely.geometry.Polygon(self.get_polygon())
curves = []
for i in range(self.n):
angle = 2 * math.pi * i / self.n
curve = shapely.affinity.rotate(
original_curve, angle, origin=(0, 0), use_radians=True
)
curves.append(curve)
# Region at index 0 is an empty set.
regions = [[]]
for rank in range(1, 2 ** self.n):
curves_included = []
curves_excluded = []
tmp_rank = rank
for i in range(self.n):
if tmp_rank % 2 == 0:
curves_excluded.append(curves[i])
else:
curves_included.append(curves[i])
tmp_rank //= 2
region = curves_included[0]
for curve in curves_included[1:]:
region = region.intersection(curve)
for curve in curves_excluded:
region = region.difference(curve)
assert not region.is_empty
def export_json(self):
result = {
"name": self.name,
"n": self.n,
"curve": self.get_spline().as_svg_path(),
}
process = subprocess.run(
["node", str(ROOT / "venn_boolean.js")],
check=True,
capture_output=True,
input=json.dumps(result),
encoding="utf-8",
)
regions = json.loads(process.stdout)
processed_regions = [""]
for region in regions[1:]:
path = venn7.bezier.BezierPath.from_svg_path(region)
path = path.remove_tiny_segments(threshold=1)
processed_regions.append(path.as_svg_path())
result["regions"] = processed_regions
return result
def plot(self):
import matplotlib.pyplot as plt
import matplotlib.patches
import matplotlib.collections
fig, ax = plt.subplots()
polygons = [
matplotlib.patches.Polygon(self.get_polygon(i)) for i in range(diagram.n)
]
patches = matplotlib.collections.PatchCollection(polygons, alpha=0.2)
ax.add_collection(patches)
plt.xlim(-100, 100)
plt.ylim(-100, 100)
plt.show()
class VennDiagramRenderer:
"""A class that renders discrete Venn diagrams to splines."""
def __init__(
self,
venn_diagram,
inner_radius=30,
spacing=5,
tension_diagonal=1.0,
tension_default=1.0,
extra_outer_spacing=0,
):
self.n = venn_diagram.n
self.row_swaps = venn_diagram.row_swaps
self.inner_radius = inner_radius
self.spacing = spacing
self.tension_diagonal = tension_diagonal
self.tension_default = tension_default
self.extra_outer_spacing = extra_outer_spacing
# Avoid perfectly coincident endpoints, which causes
# issues for Boolean ops.
self.fudge_factor = 1e-4
def _get_radius_of_row(self, row, use_extra_outer_spacing=True):
adjusted_row = row
if use_extra_outer_spacing:
if row <= 1:
adjusted_row -= self.extra_outer_spacing
if row >= self.n - 1:
adjusted_row += self.extra_outer_spacing
result = self.inner_radius + self.spacing * adjusted_row
return result
def _get_curve_points_on_cylinder(self, index):
"""Get the set of control points (not Bezier but Metafont control
points) if the Venn diagram were unraveled on a cylinder. All these
points lie on a grid.
Each point is of the form (x, y, type). x is the circular coordinate
which wraps around from 0 to len(self.row_swaps). y is the other,
non-circular component which ranges from 0 to self.n - 1 inclusive.
type is a string used to tag points with information about the point.
This method generates two:
- intersection_+ means that the curve is going up at this point.
- intersection_- means that the curve is going down at this point.
"""
points = []
row, column = 0, index * len(self.row_swaps)
for i in range(self.n):
for swap_rows in self.row_swaps:
if row + 1 in swap_rows:
points.append((row + 1, column, "intersection_+"))
row += 1
elif row in swap_rows:
points.append((row, column, "intersection_-"))
row -= 1
column += 1
return points
def _add_arc_points(self, points):
"""Given a set of control points on the cylinder, find pairs of points
that are horizontal and insert new arc points to help round out the
curve in that region. It is assumed that all points are intersection type.
"""
squash_factor = len(self.row_swaps)
result = []
for i in range(len(points)):
r1, c1, type_1 = point = points[i]
r2, c2, type_2 = points[(i + 1) % len(points)]
result.append(point)
if r1 == r2:
radius = (c2 - c1) % len(self.n * self.row_swaps) * 0.5
column = c1 + radius
if type_1 == "intersection_+" and type_2 == "intersection_-":
arc_direction = 1
type_ = "arc_+"
elif type_1 == "intersection_-" and type_2 == "intersection_+":
arc_direction = -1
type_ = "arc_-"
else:
raise RuntimeError
vertical_radius = arc_direction * radius * 0.5
ratio = 0.6
#result.append((r1 + vertical_radius, column, type_))
return result
def _get_tensions(self, points):
"""Given a set of control points on the cylinder, determine whether
each pair of points is diagonal or horizontal. If they are diagonal and
both are of "intersection" type, their tension is set to
``tension_diagonal``. Otherwise, their tension is ``tension_default``.
Collect a list of all tensions and return it.
"""
tensions = []
for i in range(len(points)):
r1, c1, type_1 = points[i]
r2, c2, type_2 = points[(i + 1) % len(points)]
if (
type_1.startswith("intersection_") and type_2.startswith("intersection_")
and type_1 == type_2
):
tensions.append(self.tension_diagonal)
else:
tensions.append(self.tension_default)
return tensions
def _convert_cylinder_points_to_polar(self, cylinder_points):
polar_points = []
for row, column, __ in cylinder_points:
radius = self._get_radius_of_row(row)
theta = column * 2 * math.pi / (self.n * len(self.row_swaps))
x = radius * math.cos(theta)
y = radius * math.sin(theta)
polar_points.append((x, y))
return polar_points
def _normalize_rotation_and_scaling(self, spline):
"""Given a spline, rotate and uniformly scale it so that its furthest
point from the origin is transformed to (0, -50)."""
x, y = spline.get_furthest_point_from((0, 0))
angle = np.arctan2(y, x)
scale = np.hypot(x, y)
return spline.transform(
venn7.bezier.get_rotation_matrix(-np.pi * 0.5 - angle) * 50 / scale
)
def _get_angles(self, cylinder_points):
result = []
for row, column, type_ in cylinder_points:
tangent_angle = 2 * np.pi * column / (self.n * len(self.row_swaps)) + np.pi / 2
angle = tangent_angle
dy = self.spacing
dx = self._get_radius_of_row(row) * 2 * np.pi / (self.n * len(self.row_swaps))
tilt_angle = np.arctan2(dy, dx)
if type_ == "intersection_+":
angle -= tilt_angle
elif type_ == "intersection_-":
angle += tilt_angle
angle = angle % (2 * np.pi)
result.append(angle)
return result
def get_spline(self, index=0):
"""Render a single curve of the Venn diagram to a BezierSpline
and return the result.
Parameters
----------
index : int
Which curve to return. For a symmetric Venn diagram, indices
other than 0 are rotations of each other.
"""
cylinder_points = self._get_curve_points_on_cylinder(index)
cylinder_points = self._add_arc_points(cylinder_points)
angles = self._get_angles(cylinder_points)
control_points = self._convert_cylinder_points_to_polar(cylinder_points)
spline = venn7.bezier.AngleSpline(control_points, angles)
spline = self._normalize_rotation_and_scaling(spline)
spline = spline.translate(np.array([self.fudge_factor, 0]))
return spline
DIAGRAMS_LIST = [
"victoria",
"adelaide",
"massey",
"manawatu",
"palmerston_north",
"hamilton",
"5",
]
DIAGRAMS = {
"victoria": VennDiagram(
7,
"""
010000000000
101000001000
010100010101
100010101010
000001010001
000000100000
""",
"Victoria",
),
"adelaide": VennDiagram(
7,
"""
0100000000
1010001000
0101010101
1010101010
0001010001
0000100000
""",
"Adelaide",
),
"massey": VennDiagram(
7,
"""
010000000000
101000000010
010100010101
101010101000
010101000000
001000000000
""",
"Massey",
),
"manawatu": VennDiagram(
7,
"""
00001000000000
10000000100100
01010001010001
00101010001010
00000100100100
01000000000000
""",
"Manawatu",
renderer_args={
"extra_outer_spacing": 2
},
),
"palmerston_north": VennDiagram(
7,
"""
10000000000000
00100000001010
01010100010100
10001010100010
00000001000101
00000000010000
""",
"<NAME>",
renderer_args={
"extra_outer_spacing": 1
},
),
"hamilton": VennDiagram(
7,
"""
0010000000
1000100010
0101010101
1010101010
0101000100
0000000001
""",
"Hamilton",
renderer_args={
"extra_outer_spacing": 1
},
),
"5": VennDiagram(
5,
"""
1000
0101
1010
0001
""",
"Symmetric 5-Venn diagram",
renderer_args={
"inner_radius": 10,
"spacing": 8,
"tension_diagonal": 1,
"tension_default": 1,
},
),
}
if __name__ == "__main__":
import json
import sys
diagrams_json = {}
diagrams_json["diagrams_list"] = DIAGRAMS_LIST
for name, diagram in DIAGRAMS.items():
diagrams_json[name] = diagram.export_json()
with open(sys.argv[1], "w") as f:
f.write("const venn_diagrams = ")
json.dump(diagrams_json, f)
f.write(";")
| [
"json.loads",
"json.dumps",
"numpy.hypot",
"os.path.realpath",
"numpy.array",
"math.cos",
"numpy.arctan2",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"math.sin",
"math.comb",
"matplotlib.pyplot.subplots",
"json.dump",
"matplotlib.pyplot.show"
] | [((192, 218), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (208, 218), False, 'import os\n'), ((5641, 5667), 'json.loads', 'json.loads', (['process.stdout'], {}), '(process.stdout)\n', (5651, 5667), False, 'import json\n'), ((6138, 6152), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6150, 6152), True, 'import matplotlib.pyplot as plt\n'), ((6391, 6410), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-100)', '(100)'], {}), '(-100, 100)\n', (6399, 6410), True, 'import matplotlib.pyplot as plt\n'), ((6419, 6438), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-100)', '(100)'], {}), '(-100, 100)\n', (6427, 6438), True, 'import matplotlib.pyplot as plt\n'), ((6447, 6457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6455, 6457), True, 'import matplotlib.pyplot as plt\n'), ((11555, 11571), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (11565, 11571), True, 'import numpy as np\n'), ((11588, 11602), 'numpy.hypot', 'np.hypot', (['x', 'y'], {}), '(x, y)\n', (11596, 11602), True, 'import numpy as np\n'), ((15521, 15548), 'json.dump', 'json.dump', (['diagrams_json', 'f'], {}), '(diagrams_json, f)\n', (15530, 15548), False, 'import json\n'), ((12114, 12132), 'numpy.arctan2', 'np.arctan2', (['dy', 'dx'], {}), '(dy, dx)\n', (12124, 12132), True, 'import numpy as np\n'), ((13152, 13184), 'numpy.array', 'np.array', (['[self.fudge_factor, 0]'], {}), '([self.fudge_factor, 0])\n', (13160, 13184), True, 'import numpy as np\n'), ((2353, 2368), 'math.comb', 'math.comb', (['n', 'k'], {}), '(n, k)\n', (2362, 2368), False, 'import math\n'), ((5562, 5580), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (5572, 5580), False, 'import json\n'), ((11165, 11180), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (11173, 11180), False, 'import math\n'), ((11206, 11221), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (11214, 11221), False, 'import math\n')] |
#!/usr/bin/env python
import click as ck
import numpy as np
import pandas as pd
import gzip
import os
import torch as th
from collections import Counter
from aminoacids import MAXLEN, to_ngrams
import logging
import json
from sklearn.metrics import roc_curve, auc, matthews_corrcoef
from utils import get_goplus_defs, Ontology, NAMESPACES
logging.basicConfig(level=logging.INFO)
from deepgoel import DGELModel, load_normal_forms
from torch_utils import FastTensorDataLoader
ont = 'mf'
@ck.command()
@ck.option(
'--data-root', '-dr', default=f'data/',
help='Data root')
@ck.option(
'--ont', '-ont', default='mf',
help='Subontology')
@ck.option(
'--data-file', '-df', default=f'swissprot.pkl',
help='Pandas pkl file with proteins and their interpo annotations')
@ck.option(
'--device', '-d', default='cuda:1',
help='Device')
def main(data_root, ont, data_file, device):
terms_file = f'{data_root}/{ont}/terms.pkl'
model_file = f'{data_root}/{ont}/deepgozero.th'
go = Ontology(f'{data_root}/go.obo', with_rels=True)
# Load interpro data
df = pd.read_pickle(data_file)
terms_df = pd.read_pickle(terms_file)
terms = terms_df['gos'].values.flatten()
terms_dict = {v: i for i, v in enumerate(terms)}
ipr_df = pd.read_pickle(f'{data_root}/{ont}/interpros.pkl')
iprs = ipr_df['interpros'].values
iprs_dict = {v:k for k, v in enumerate(iprs)}
nf1, nf2, nf3, nf4, rels_dict, zero_classes = load_normal_forms(
f'{data_root}/go.norm', terms_dict)
defins = get_goplus_defs(f'{data_root}/definitions_go.txt')
zero_terms = [term for term in zero_classes if term in defins and go.get_namespace(term) == NAMESPACES[ont]]
print(len(zero_terms))
net = DGELModel(len(iprs_dict), len(terms), len(zero_classes), len(rels_dict), device).to(device)
# Loading best model
print('Loading the best model')
net.load_state_dict(th.load(model_file))
net.eval()
zero_terms_dict = {v: k for k, v in enumerate(zero_terms)}
data = get_data(df, iprs_dict, zero_terms_dict)
# data = data.to(device)
batch_size = 1000
data_loader = FastTensorDataLoader(*data, batch_size=batch_size, shuffle=False)
go_data = th.zeros(len(zero_terms), dtype=th.long).to(device)
for i, term in enumerate(zero_terms):
go_data[i] = zero_classes[term]
scores = np.zeros((data[0].shape[0], len(zero_terms)), dtype=np.float32)
for i, batch_data in enumerate(data_loader):
batch_data, _ = batch_data
zero_score = net.predict_zero(
batch_data.to(device), go_data).cpu().detach().numpy()
scores[i * batch_size: (i + 1) * batch_size] = zero_score
for i, row in enumerate(df.itertuples()):
for j, go_id in enumerate(zero_terms):
if scores[i, j] >= 0.01:
print(row.proteins, go_id, scores[i, j])
def compute_roc(labels, preds):
# Compute ROC curve and ROC area for each class
fpr, tpr, _ = roc_curve(labels.flatten(), preds.flatten())
roc_auc = auc(fpr, tpr)
return roc_auc, fpr, tpr
def compute_fmax(labels, preds):
fmax = 0.0
pmax = 0
rmax = 0
patience = 0
precs = []
recs = []
for t in range(0, 101):
threshold = t / 100.0
predictions = (preds >= threshold).astype(np.float32)
tp = np.sum(labels * predictions, axis=1)
fp = np.sum(predictions, axis=1) - tp
fn = np.sum(labels, axis=1) - tp
tp_ind = tp > 0
tp = tp[tp_ind]
fp = fp[tp_ind]
fn = fn[tp_ind]
if len(tp) == 0:
continue
p = np.mean(tp / (tp + fp))
r = np.sum(tp / (tp + fn)) / len(tp_ind)
precs.append(p)
recs.append(r)
f = 2 * p * r / (p + r)
if fmax <= f:
fmax = f
return fmax, precs, recs
def get_data(df, iprs_dict, terms_dict):
data = th.zeros((len(df), len(iprs_dict)), dtype=th.float32)
labels = th.zeros((len(df), len(terms_dict)), dtype=th.float32)
for i, row in enumerate(df.itertuples()):
for ipr in row.interpros:
if ipr in iprs_dict:
data[i, iprs_dict[ipr]] = 1
for go_id in row.prop_annotations:
if go_id in terms_dict:
g_id = terms_dict[go_id]
labels[i, g_id] = 1
return data, labels
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"pandas.read_pickle",
"numpy.mean",
"click.option",
"sklearn.metrics.auc",
"torch.load",
"numpy.sum",
"deepgoel.load_normal_forms",
"torch_utils.FastTensorDataLoader",
"utils.Ontology",
"utils.get_goplus_defs",
"click.command"
] | [((343, 382), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (362, 382), False, 'import logging\n'), ((493, 505), 'click.command', 'ck.command', ([], {}), '()\n', (503, 505), True, 'import click as ck\n'), ((507, 574), 'click.option', 'ck.option', (['"""--data-root"""', '"""-dr"""'], {'default': 'f"""data/"""', 'help': '"""Data root"""'}), "('--data-root', '-dr', default=f'data/', help='Data root')\n", (516, 574), True, 'import click as ck\n'), ((585, 645), 'click.option', 'ck.option', (['"""--ont"""', '"""-ont"""'], {'default': '"""mf"""', 'help': '"""Subontology"""'}), "('--ont', '-ont', default='mf', help='Subontology')\n", (594, 645), True, 'import click as ck\n'), ((656, 786), 'click.option', 'ck.option', (['"""--data-file"""', '"""-df"""'], {'default': 'f"""swissprot.pkl"""', 'help': '"""Pandas pkl file with proteins and their interpo annotations"""'}), "('--data-file', '-df', default=f'swissprot.pkl', help=\n 'Pandas pkl file with proteins and their interpo annotations')\n", (665, 786), True, 'import click as ck\n'), ((792, 852), 'click.option', 'ck.option', (['"""--device"""', '"""-d"""'], {'default': '"""cuda:1"""', 'help': '"""Device"""'}), "('--device', '-d', default='cuda:1', help='Device')\n", (801, 852), True, 'import click as ck\n'), ((1016, 1063), 'utils.Ontology', 'Ontology', (['f"""{data_root}/go.obo"""'], {'with_rels': '(True)'}), "(f'{data_root}/go.obo', with_rels=True)\n", (1024, 1063), False, 'from utils import get_goplus_defs, Ontology, NAMESPACES\n'), ((1099, 1124), 'pandas.read_pickle', 'pd.read_pickle', (['data_file'], {}), '(data_file)\n', (1113, 1124), True, 'import pandas as pd\n'), ((1140, 1166), 'pandas.read_pickle', 'pd.read_pickle', (['terms_file'], {}), '(terms_file)\n', (1154, 1166), True, 'import pandas as pd\n'), ((1283, 1333), 'pandas.read_pickle', 'pd.read_pickle', (['f"""{data_root}/{ont}/interpros.pkl"""'], {}), "(f'{data_root}/{ont}/interpros.pkl')\n", (1297, 1333), True, 'import pandas as pd\n'), ((1473, 1526), 'deepgoel.load_normal_forms', 'load_normal_forms', (['f"""{data_root}/go.norm"""', 'terms_dict'], {}), "(f'{data_root}/go.norm', terms_dict)\n", (1490, 1526), False, 'from deepgoel import DGELModel, load_normal_forms\n'), ((1550, 1600), 'utils.get_goplus_defs', 'get_goplus_defs', (['f"""{data_root}/definitions_go.txt"""'], {}), "(f'{data_root}/definitions_go.txt')\n", (1565, 1600), False, 'from utils import get_goplus_defs, Ontology, NAMESPACES\n'), ((2154, 2219), 'torch_utils.FastTensorDataLoader', 'FastTensorDataLoader', (['*data'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(*data, batch_size=batch_size, shuffle=False)\n', (2174, 2219), False, 'from torch_utils import FastTensorDataLoader\n'), ((3072, 3085), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (3075, 3085), False, 'from sklearn.metrics import roc_curve, auc, matthews_corrcoef\n'), ((1933, 1952), 'torch.load', 'th.load', (['model_file'], {}), '(model_file)\n', (1940, 1952), True, 'import torch as th\n'), ((3370, 3406), 'numpy.sum', 'np.sum', (['(labels * predictions)'], {'axis': '(1)'}), '(labels * predictions, axis=1)\n', (3376, 3406), True, 'import numpy as np\n'), ((3648, 3671), 'numpy.mean', 'np.mean', (['(tp / (tp + fp))'], {}), '(tp / (tp + fp))\n', (3655, 3671), True, 'import numpy as np\n'), ((3420, 3447), 'numpy.sum', 'np.sum', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (3426, 3447), True, 'import numpy as np\n'), ((3466, 3488), 'numpy.sum', 'np.sum', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (3472, 3488), True, 'import numpy as np\n'), ((3684, 3706), 'numpy.sum', 'np.sum', (['(tp / (tp + fn))'], {}), '(tp / (tp + fn))\n', (3690, 3706), True, 'import numpy as np\n')] |
import os
import torch
import create_data
from model import shape_net
import numpy as np
def align_bone_len(opt_, pre_):
opt = opt_.copy()
pre = pre_.copy()
opt_align = opt.copy()
for i in range(opt.shape[0]):
ratio = pre[i][6] / opt[i][6]
opt_align[i] = ratio * opt_align[i]
err = np.abs(opt_align - pre).mean(0)
return err
def fun(_shape, _label, data_loader):
# 计算相对骨骼长度
shape = _shape.clone().detach()
label = _label.detach().clone()
# 根据shape计算相对骨骼长度
X = data_loader.new_cal_ref_bone(shape)
err = align_bone_len(X.cpu().numpy(), label.cpu().numpy())
return err.sum()
checkpoint = 'checkpoints'
model = shape_net.ShapeNet()
shape_net.load_checkpoint(
model, os.path.join(checkpoint, 'ckp_siknet_synth_41.pth.tar')
)
for params in model.parameters():
params.requires_grad = False
data_set = ['rhd', 'stb', 'do', 'eo']
temp_data = create_data.DataSet(_mano_root='mano/models')
for data in data_set:
print('*' * 20)
print('加载' + data + '数据集')
print('*' * 20)
# 加载预测
pre_path = os.path.join('out_testset/', data + '_pre_joints.npy')
temp = np.load(pre_path)
temp = torch.Tensor(temp)
_x = temp_data.cal_ref_bone(temp)
# 模型回归shape
Y = model(_x)
Y = Y['beta']
np.save('out_testset/' + data + '_dl.npy', Y.clone().detach().cpu().numpy())
dl_err = fun(Y, _x, temp_data)
print('回归误差:{}'.format(dl_err))
| [
"numpy.abs",
"model.shape_net.ShapeNet",
"torch.Tensor",
"os.path.join",
"create_data.DataSet",
"numpy.load"
] | [((688, 708), 'model.shape_net.ShapeNet', 'shape_net.ShapeNet', ([], {}), '()\n', (706, 708), False, 'from model import shape_net\n'), ((923, 968), 'create_data.DataSet', 'create_data.DataSet', ([], {'_mano_root': '"""mano/models"""'}), "(_mano_root='mano/models')\n", (942, 968), False, 'import create_data\n'), ((747, 802), 'os.path.join', 'os.path.join', (['checkpoint', '"""ckp_siknet_synth_41.pth.tar"""'], {}), "(checkpoint, 'ckp_siknet_synth_41.pth.tar')\n", (759, 802), False, 'import os\n'), ((1088, 1142), 'os.path.join', 'os.path.join', (['"""out_testset/"""', "(data + '_pre_joints.npy')"], {}), "('out_testset/', data + '_pre_joints.npy')\n", (1100, 1142), False, 'import os\n'), ((1154, 1171), 'numpy.load', 'np.load', (['pre_path'], {}), '(pre_path)\n', (1161, 1171), True, 'import numpy as np\n'), ((1183, 1201), 'torch.Tensor', 'torch.Tensor', (['temp'], {}), '(temp)\n', (1195, 1201), False, 'import torch\n'), ((326, 349), 'numpy.abs', 'np.abs', (['(opt_align - pre)'], {}), '(opt_align - pre)\n', (332, 349), True, 'import numpy as np\n')] |
import numpy as np
raw = open("inputs/7.txt","r").readline()
input_array= [int(i) for i in np.asarray(raw.split(","))]
test_array = [16,1,2,0,4,2,7,1,2,14]
def alignCrabsPartOne(input):
result_array=[]
for i in range(min(input), max(input)+1):
result_array.append(sum([abs((horizontalPos-i)) for horizontalPos in input]))
print("The crabs align on",''.join(map(str,np.where(result_array == np.amin(result_array))[0])),"and spend", np.amin(result_array), "fuel")
def alignCrabsPartTwo(input):
result_array=[]
for i in range(min(input), max(input)+1):
result_array.append(sum([(abs(horizontalPos-i)+1)/2*(abs(horizontalPos -i)) for horizontalPos in input]))
print("The crabs align on",''.join(map(str,np.where(result_array == np.amin(result_array))[0])),"and spend", int(np.amin(result_array)), "fuel")
alignCrabsPartOne(test_array)
alignCrabsPartOne(input_array)
alignCrabsPartTwo(test_array)
alignCrabsPartTwo(input_array) | [
"numpy.amin"
] | [((439, 460), 'numpy.amin', 'np.amin', (['result_array'], {}), '(result_array)\n', (446, 460), True, 'import numpy as np\n'), ((784, 805), 'numpy.amin', 'np.amin', (['result_array'], {}), '(result_array)\n', (791, 805), True, 'import numpy as np\n'), ((398, 419), 'numpy.amin', 'np.amin', (['result_array'], {}), '(result_array)\n', (405, 419), True, 'import numpy as np\n'), ((739, 760), 'numpy.amin', 'np.amin', (['result_array'], {}), '(result_array)\n', (746, 760), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
import random
import numpy as np
import pytest
try:
import torch
import torch.distributed
import habitat_sim.utils.datasets_download as data_downloader
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.config.default import get_config
baseline_installed = True
except ImportError:
baseline_installed = False
def setup_function(test_trainers):
# Download the needed datasets
data_downloader.main(["--uids", "rearrange_task_assets", "--no-replace"])
@pytest.mark.skipif(
not baseline_installed, reason="baseline sub-module not installed"
)
@pytest.mark.parametrize(
"config_path,num_updates,target_reward",
[
("habitat_baselines/config/rearrange/ddppo_reach_state.yaml", 40, 5.0),
],
)
def test_trainers(config_path, num_updates, target_reward):
# Remove the checkpoints from previous tests
for f in glob.glob("data/test_checkpoints/test_training/*"):
os.remove(f)
# Setup the training
config = get_config(
config_path,
[
"NUM_UPDATES",
num_updates,
"TOTAL_NUM_STEPS",
-1.0,
"CHECKPOINT_FOLDER",
"data/test_checkpoints/test_training",
],
)
random.seed(config.TASK_CONFIG.SEED)
np.random.seed(config.TASK_CONFIG.SEED)
torch.manual_seed(config.TASK_CONFIG.SEED)
torch.cuda.manual_seed(config.TASK_CONFIG.SEED)
torch.backends.cudnn.deterministic = True
if config.FORCE_TORCH_SINGLE_THREADED and torch.cuda.is_available():
torch.set_num_threads(1)
assert (
config.TRAINER_NAME == "ddppo"
), "This test can only be used with ddppo trainer"
trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
trainer = trainer_init(config)
# Train
trainer.train()
# Gather the data
deltas = {
k: ((v[-1] - v[0]).sum().item() if len(v) > 1 else v[0].sum().item())
for k, v in trainer.window_episode_stats.items()
}
deltas["count"] = max(deltas["count"], 1.0)
reward = deltas["reward"] / deltas["count"]
# Make sure the final reward is greater than the target
assert (
reward >= target_reward
), f"reward for task {config_path} was {reward} but is expected to be at least {target_reward}"
| [
"torch.manual_seed",
"habitat_baselines.config.default.get_config",
"random.seed",
"torch.set_num_threads",
"pytest.mark.parametrize",
"habitat_baselines.common.baseline_registry.baseline_registry.get_trainer",
"habitat_sim.utils.datasets_download.main",
"torch.cuda.is_available",
"numpy.random.seed... | [((761, 852), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not baseline_installed)'], {'reason': '"""baseline sub-module not installed"""'}), "(not baseline_installed, reason=\n 'baseline sub-module not installed')\n", (779, 852), False, 'import pytest\n'), ((855, 998), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""config_path,num_updates,target_reward"""', "[('habitat_baselines/config/rearrange/ddppo_reach_state.yaml', 40, 5.0)]"], {}), "('config_path,num_updates,target_reward', [(\n 'habitat_baselines/config/rearrange/ddppo_reach_state.yaml', 40, 5.0)])\n", (878, 998), False, 'import pytest\n'), ((684, 757), 'habitat_sim.utils.datasets_download.main', 'data_downloader.main', (["['--uids', 'rearrange_task_assets', '--no-replace']"], {}), "(['--uids', 'rearrange_task_assets', '--no-replace'])\n", (704, 757), True, 'import habitat_sim.utils.datasets_download as data_downloader\n'), ((1142, 1192), 'glob.glob', 'glob.glob', (['"""data/test_checkpoints/test_training/*"""'], {}), "('data/test_checkpoints/test_training/*')\n", (1151, 1192), False, 'import glob\n'), ((1253, 1396), 'habitat_baselines.config.default.get_config', 'get_config', (['config_path', "['NUM_UPDATES', num_updates, 'TOTAL_NUM_STEPS', -1.0, 'CHECKPOINT_FOLDER',\n 'data/test_checkpoints/test_training']"], {}), "(config_path, ['NUM_UPDATES', num_updates, 'TOTAL_NUM_STEPS', -\n 1.0, 'CHECKPOINT_FOLDER', 'data/test_checkpoints/test_training'])\n", (1263, 1396), False, 'from habitat_baselines.config.default import get_config\n'), ((1502, 1538), 'random.seed', 'random.seed', (['config.TASK_CONFIG.SEED'], {}), '(config.TASK_CONFIG.SEED)\n', (1513, 1538), False, 'import random\n'), ((1543, 1582), 'numpy.random.seed', 'np.random.seed', (['config.TASK_CONFIG.SEED'], {}), '(config.TASK_CONFIG.SEED)\n', (1557, 1582), True, 'import numpy as np\n'), ((1587, 1629), 'torch.manual_seed', 'torch.manual_seed', (['config.TASK_CONFIG.SEED'], {}), '(config.TASK_CONFIG.SEED)\n', (1604, 1629), False, 'import torch\n'), ((1634, 1681), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['config.TASK_CONFIG.SEED'], {}), '(config.TASK_CONFIG.SEED)\n', (1656, 1681), False, 'import torch\n'), ((1962, 2012), 'habitat_baselines.common.baseline_registry.baseline_registry.get_trainer', 'baseline_registry.get_trainer', (['config.TRAINER_NAME'], {}), '(config.TRAINER_NAME)\n', (1991, 2012), False, 'from habitat_baselines.common.baseline_registry import baseline_registry\n'), ((1202, 1214), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1211, 1214), False, 'import os\n'), ((1774, 1799), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1797, 1799), False, 'import torch\n'), ((1809, 1833), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (1830, 1833), False, 'import torch\n')] |
import _pickle, numpy as np, itertools as it
from time import perf_counter
# from cppimport import import_hook
#
# # import cppimport
#
# # cppimport.set_quiet(False)
#
import rpxdock as rp
from rpxdock.bvh import bvh_test
from rpxdock.bvh import BVH, bvh
import rpxdock.homog as hm
def test_bvh_isect_cpp():
assert bvh_test.TEST_bvh_test_isect()
def test_bvh_isect_fixed():
# print()
mindist = 0.01
totbvh, totnaive = 0, 0
for i in range(10):
xyz1 = np.random.rand(1000, 3) + [0.9, 0.9, 0]
xyz2 = np.random.rand(1000, 3)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
assert len(bvh1) == 1000
pos1 = hm.htrans([0.9, 0.9, 0.9])
pos2 = np.eye(4)
tbvh = perf_counter()
clash1 = bvh.bvh_isect_fixed(bvh1, bvh2, mindist)
tbvh = perf_counter() - tbvh
tn = perf_counter()
clash2 = bvh.naive_isect_fixed(bvh1, bvh2, mindist)
tn = perf_counter() - tn
assert clash1 == clash2
# print(f"{i:3} clash {clash1:1} {tn / tbvh:8.2f}, {tn:1.6f}, {tbvh:1.6f}")
totbvh += tbvh
totnaive += tn
print("total times", totbvh, totnaive / totbvh, totnaive)
def test_bvh_isect():
t = rp.Timer().start()
N1, N2 = 10, 10
N = N1 * N2
mindist = 0.04
nclash = 0
for outer in range(N1):
xyz1 = np.random.rand(1250, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1250, 3) - [0.5, 0.5, 0.5]
pos1 = hm.rand_xform(N2, cart_sd=0.8)
pos2 = hm.rand_xform(N2, cart_sd=0.8)
t.checkpoint('init')
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
t.checkpoint('BVH')
clash = list()
for inner in range(N2):
clash1 = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[inner], pos2=pos2[inner],
mindist=mindist)
t.checkpoint('bvh_isect')
clash2 = bvh.naive_isect(bvh1, bvh2, pos1[inner], pos2[inner], mindist)
t.checkpoint('naive_isect')
assert clash1 == clash2
clash.append(clash1)
clashvec = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, mindist)
t.checkpoint('bvh_isect_vec')
assert np.all(clashvec == clash)
nclash += sum(clash)
assert clashvec[1] == bvh.bvh_isect_vec(bvh1, bvh2, pos1[1], pos2[1], mindist)
bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2[1], mindist) # ?? make sure api works?
bvh.bvh_isect_vec(bvh1, bvh2, pos1[1], pos2, mindist)
print(
f"Ngeom {N1:,} Npos {N2:,} isect {nclash/N:4.2f} bvh: {int(N/t.sum.bvh_isect):,}/s",
f"bvh_vec {int(N/t.sum.bvh_isect_vec):,} fastnaive {int(N/t.sum.naive_isect):,}/s",
f"ratio {int(t.sum.naive_isect/t.sum.bvh_isect_vec):,}x",
)
def test_bvh_isect_fixed_range():
N1, N2 = 10, 10
N = N1 * N2
mindist = 0.04
nclash = 0
for outer in range(N1):
xyz1 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
bvh1_half = BVH(xyz1[250:750])
bvh2_half = BVH(xyz2[250:750])
pos1 = hm.rand_xform(N2, cart_sd=0.5)
pos2 = hm.rand_xform(N2, cart_sd=0.5)
isect1 = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, mindist)
isect2, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(isect1 == isect2)
bounds = [250], [749], [250], [749]
isect1 = bvh.bvh_isect_vec(bvh1_half, bvh2_half, pos1, pos2, mindist)
isect2, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, mindist, *bounds)
assert np.all(isect1 == isect2)
def test_bvh_min_cpp():
assert bvh_test.TEST_bvh_test_min()
def test_bvh_min_dist_fixed():
xyz1 = np.random.rand(5000, 3) + [0.9, 0.9, 0.0]
xyz2 = np.random.rand(5000, 3)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_min_dist_fixed(bvh1, bvh2)
tbvh = perf_counter() - tbvh
dtest = np.linalg.norm(xyz1[i1] - xyz2[i2])
assert np.allclose(d, dtest, atol=1e-6)
# tnp = perf_counter()
# dnp = np.min(np.linalg.norm(xyz1[:, None] - xyz2[None], axis=2))
# tnp = perf_counter() - tnp
tn = perf_counter()
dn = bvh.naive_min_dist_fixed(bvh1, bvh2)
tn = perf_counter() - tn
print()
print("from bvh: ", d)
print("from naive:", dn)
assert np.allclose(dn, d, atol=1e-6)
print(f"tnaivecpp {tn:5f} tbvh {tbvh:5f} tbvhcreate {tcre:5f}")
print("bvh acceleration vs naive", tn / tbvh)
# assert tn / tbvh > 100
def test_bvh_min_dist():
xyz1 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
# print()
totbvh, totnaive = 0, 0
N = 10
pos1 = hm.rand_xform(N, cart_sd=1)
pos2 = hm.rand_xform(N, cart_sd=1)
dis = list()
for i in range(N):
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tbvh = perf_counter() - tbvh
dtest = np.linalg.norm(pos1[i] @ hm.hpoint(xyz1[i1]) - pos2[i] @ hm.hpoint(xyz2[i2]))
assert np.allclose(d, dtest, atol=1e-6)
tn = perf_counter()
dn = bvh.naive_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tn = perf_counter() - tn
assert np.allclose(dn, d, atol=1e-6)
dis.append((d, i1, i2))
# print(
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# np.linalg.norm(pos1[:3, 3]),
# dtest - d,
# )
totnaive += tn
totbvh += tbvh
d, i1, i2 = bvh.bvh_min_dist_vec(bvh1, bvh2, pos1, pos2)
for a, b, c, x in zip(d, i1, i2, dis):
assert a == x[0]
assert b == x[1]
assert c == x[2]
print(
"total times",
totbvh / N * 1000,
"ms",
totnaive / totbvh,
totnaive,
f"tcre {tcre:2.4f}",
)
def test_bvh_min_dist_floormin():
xyz1 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
# print()
totbvh, totnaive = 0, 0
N = 10
for i in range(N):
pos1 = hm.rand_xform(cart_sd=1)
pos2 = hm.rand_xform(cart_sd=1)
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_min_dist(bvh1, bvh2, pos1, pos2)
tbvh = perf_counter() - tbvh
dtest = np.linalg.norm(pos1 @ hm.hpoint(xyz1[i1]) - pos2 @ hm.hpoint(xyz2[i2]))
assert np.allclose(d, dtest, atol=1e-6)
tn = perf_counter()
dn = bvh.naive_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
assert np.allclose(dn, d, atol=1e-6)
# print(
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# np.linalg.norm(pos1[:3, 3]),
# dtest - d,
# )
totnaive += tn
totbvh += tbvh
print(
"total times",
totbvh / N * 1000,
"ms",
totnaive / totbvh,
totnaive,
f"tcre {tcre:2.4f}",
)
def test_bvh_slide_single_inline():
bvh1 = BVH([[-10, 0, 0]])
bvh2 = BVH([[0, 0, 0]])
d = bvh.bvh_slide(bvh1, bvh2, np.eye(4), np.eye(4), rad=1.0, dirn=[1, 0, 0])
assert d == 8
# moves xyz1 to -2,0,0
# should always come in from "infinity" from -direction
bvh1 = BVH([[10, 0, 0]])
bvh2 = BVH([[0, 0, 0]])
d = bvh.bvh_slide(bvh1, bvh2, np.eye(4), np.eye(4), rad=1.0, dirn=[1, 0, 0])
assert d == -12
# also moves xyz1 to -2,0,0
for i in range(100):
np.random.seed(i)
dirn = np.array([np.random.randn(), 0, 0])
dirn /= np.linalg.norm(dirn)
rad = np.abs(np.random.randn() / 10)
xyz1 = np.array([[np.random.randn(), 0, 0]])
xyz2 = np.array([[np.random.randn(), 0, 0]])
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
d = bvh.bvh_slide(bvh1, bvh2, np.eye(4), np.eye(4), rad=rad, dirn=dirn)
xyz1 += d * dirn
assert np.allclose(np.linalg.norm(xyz1 - xyz2), 2 * rad, atol=1e-4)
def test_bvh_slide_single():
nmiss = 0
for i in range(100):
# np.random.seed(i)
dirn = np.random.randn(3)
dirn /= np.linalg.norm(dirn)
rad = np.abs(np.random.randn())
xyz1 = np.random.randn(1, 3)
xyz2 = np.random.randn(1, 3)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
d = bvh.bvh_slide(bvh1, bvh2, np.eye(4), np.eye(4), rad=rad, dirn=dirn)
if d < 9e8:
xyz1 += d * dirn
assert np.allclose(np.linalg.norm(xyz1 - xyz2), 2 * rad, atol=1e-4)
else:
nmiss += 1
delta = xyz2 - xyz1
d0 = delta.dot(dirn)
dperp2 = np.sum(delta * delta) - d0 * d0
target_d2 = 4 * rad**2
assert target_d2 < dperp2
print("nmiss", nmiss, nmiss / 1000)
def test_bvh_slide_single_xform():
nmiss = 0
for i in range(1000):
dirn = np.random.randn(3)
dirn /= np.linalg.norm(dirn)
rad = np.abs(np.random.randn() * 2.0)
xyz1 = np.random.randn(1, 3)
xyz2 = np.random.randn(1, 3)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1 = hm.rand_xform()
pos2 = hm.rand_xform()
d = bvh.bvh_slide(bvh1, bvh2, pos1, pos2, rad=rad, dirn=dirn)
if d < 9e8:
p1 = (pos1 @ hm.hpoint(xyz1[0]))[:3] + d * dirn
p2 = (pos2 @ hm.hpoint(xyz2[0]))[:3]
assert np.allclose(np.linalg.norm(p1 - p2), 2 * rad, atol=1e-4)
else:
nmiss += 1
p2 = pos2 @ hm.hpoint(xyz2[0])
p1 = pos1 @ hm.hpoint(xyz1[0])
delta = p2 - p1
d0 = delta[:3].dot(dirn)
dperp2 = np.sum(delta * delta) - d0 * d0
target_d2 = 4 * rad**2
assert target_d2 < dperp2
print("nmiss", nmiss, nmiss / 1000)
def test_bvh_slide_whole():
# timings wtih -Ofast
# slide test 10,000 iter bvhslide float: 16,934/s double: 16,491/s bvhmin 17,968/s fracmiss: 0.0834
# np.random.seed(0)
N1, N2 = 2, 10
totbvh, totbvhf, totmin = 0, 0, 0
nmiss = 0
for j in range(N1):
xyz1 = np.random.rand(5000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(5000, 3) - [0.5, 0.5, 0.5]
# tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
# bvh1f = BVH_32bit(xyz1)
# bvh2f = BVH_32bit(xyz2)
# tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=0.5)
pos2 = hm.rand_xform(N2, cart_sd=0.5)
dirn = np.random.randn(3)
dirn /= np.linalg.norm(dirn)
radius = 0.001 + np.random.rand() / 10
slides = list()
for i in range(N2):
tbvh = perf_counter()
dslide = bvh.bvh_slide(bvh1, bvh2, pos1[i], pos2[i], radius, dirn)
tbvh = perf_counter() - tbvh
tbvhf = perf_counter()
# dslide = bvh.bvh_slide_32bit(bvh1f, bvh2f, pos1[i], pos2[i], radius, dirn)
tbvhf = perf_counter() - tbvhf
slides.append(dslide)
if dslide > 9e8:
tn = perf_counter()
dn, i, j = bvh.bvh_min_dist(bvh1, bvh2, pos1[i], pos2[i])
tn = perf_counter() - tn
assert dn > 2 * radius
nmiss += 1
else:
tmp = hm.htrans(dirn * dslide) @ pos1[i]
tn = perf_counter()
dn, i, j = bvh.bvh_min_dist(bvh1, bvh2, tmp, pos2[i])
tn = perf_counter() - tn
if not np.allclose(dn, 2 * radius, atol=1e-6):
print(dn, 2 * radius)
assert np.allclose(dn, 2 * radius, atol=1e-6)
# print(
# i,
# f"tnaivecpp {tn:1.6f} tbvh {tbvh:1.6f} tcpp/tbvh {tn/tbvh:8.1f}",
# np.linalg.norm(pos1[:3, 3]),
# dslide,
# )
totmin += tn
totbvh += tbvh
totbvhf += tbvhf
slides2 = bvh.bvh_slide_vec(bvh1, bvh2, pos1, pos2, radius, dirn)
assert np.allclose(slides, slides2)
N = N1 * N2
print(
f"slide test {N:,} iter bvhslide double: {int(N/totbvh):,}/s bvhmin {int(N/totmin):,}/s",
# f"slide test {N:,} iter bvhslide float: {int(N/totbvhf):,}/s double: {int(N/totbvh):,}/s bvhmin {int(N/totmin):,}/s",
f"fracmiss: {nmiss/N}",
)
def test_collect_pairs_simple():
print("test_collect_pairs_simple")
bufbvh = -np.ones((100, 2), dtype="i4")
bufnai = -np.ones((100, 2), dtype="i4")
bvh1 = BVH([[0, 0, 0], [0, 2, 0]])
bvh2 = BVH([[0.9, 0, 0], [0.9, 2, 0]])
assert len(bvh1) == 2
mindist = 1.0
pos1 = np.eye(4)
pos2 = np.eye(4)
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert not o
print(pbvh.shape)
assert len(pbvh) == 2 and nnai == 2
assert np.all(pbvh == [[0, 0], [1, 1]])
assert np.all(bufnai[:nnai] == [[0, 0], [1, 1]])
pos1 = hm.htrans([0, 2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert not o
assert len(pbvh) == 1 and nnai == 1
assert np.all(pbvh == [[0, 1]])
assert np.all(bufnai[:nnai] == [[0, 1]])
pos1 = hm.htrans([0, -2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert not o
assert len(pbvh) == 1 and nnai == 1
assert np.all(pbvh == [[1, 0]])
assert np.all(bufnai[:nnai] == [[1, 0]])
def test_collect_pairs_simple_selection():
print("test_collect_pairs_simple_selection")
bufbvh = -np.ones((100, 2), dtype="i4")
bufnai = -np.ones((100, 2), dtype="i4")
crd1 = [[0, 0, 0], [0, 0, 0], [0, 2, 0], [0, 0, 0]]
crd2 = [[0, 0, 0], [0.9, 0, 0], [0, 0, 0], [0.9, 2, 0]]
mask1 = [1, 0, 1, 0]
mask2 = [0, 1, 0, 1]
bvh1 = BVH(crd1, mask1)
bvh2 = BVH(crd2, mask2)
assert len(bvh1) == 2
assert np.allclose(bvh1.radius(), 1.0, atol=1e-6)
assert np.allclose(bvh1.center(), [0, 1, 0], atol=1e-6)
mindist = 1.0
pos1 = np.eye(4)
pos2 = np.eye(4)
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert len(pbvh) == 2 and nnai == 2
assert np.all(pbvh == [[0, 1], [2, 3]])
assert np.all(bufnai[:nnai] == [[0, 1], [2, 3]])
pos1 = hm.htrans([0, 2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert len(pbvh) == 1 and nnai == 1
assert np.all(pbvh == [[0, 3]])
assert np.all(bufnai[:nnai] == [[0, 3]])
pos1 = hm.htrans([0, -2, 0])
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufbvh)
assert not o
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1, pos2, mindist, bufnai)
assert len(pbvh) == 1 and nnai == 1
assert np.all(pbvh == [[2, 1]])
assert np.all(bufnai[:nnai] == [[2, 1]])
def test_collect_pairs():
N1, N2 = 1, 50
N = N1 * N2
Npts = 500
totbvh, totbvhf, totmin = 0, 0, 0
totbvh, totnai, totct, ntot = 0, 0, 0, 0
bufbvh = -np.ones((Npts * Npts, 2), dtype="i4")
bufnai = -np.ones((Npts * Npts, 2), dtype="i4")
for j in range(N1):
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = np.linalg.norm(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.append(x1)
pos2.append(x2)
if len(pos1) == N2:
break
pos1 = np.stack(pos1)
pos2 = np.stack(pos2)
pairs = list()
mindist = 0.002 + np.random.rand() / 10
for i in range(N2):
tbvh = perf_counter()
pbvh, o = bvh.bvh_collect_pairs(bvh1, bvh2, pos1[i], pos2[i], mindist, bufbvh)
tbvh = perf_counter() - tbvh
assert not o
tnai = perf_counter()
nnai = bvh.naive_collect_pairs(bvh1, bvh2, pos1[i], pos2[i], mindist, bufnai)
tnai = perf_counter() - tnai
tct = perf_counter()
nct = bvh.bvh_count_pairs(bvh1, bvh2, pos1[i], pos2[i], mindist)
tct = perf_counter() - tct
ntot += nct
assert nct == len(pbvh)
totnai += 1
pairs.append(pbvh.copy())
totbvh += tbvh
totnai += tnai
totct += tct
assert len(pbvh) == nnai
if len(pbvh) == 0:
continue
o = np.lexsort((pbvh[:, 1], pbvh[:, 0]))
pbvh[:] = pbvh[:][o]
o = np.lexsort((bufnai[:nnai, 1], bufnai[:nnai, 0]))
bufnai[:nnai] = bufnai[:nnai][o]
assert np.all(pbvh == bufnai[:nnai])
pair1 = pos1[i] @ hm.hpoint(xyz1[pbvh[:, 0]])[..., None]
pair2 = pos2[i] @ hm.hpoint(xyz2[pbvh[:, 1]])[..., None]
dpair = np.linalg.norm(pair2 - pair1, axis=1)
assert np.max(dpair) <= mindist
pcount = bvh.bvh_count_pairs_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(pcount == [len(x) for x in pairs])
pairs2, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, mindist)
for i, p in enumerate(pairs):
lb, ub = lbub[i]
assert np.all(pairs2[lb:ub] == pairs[i])
x, y = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1[:3], pos2[0], mindist)
assert len(y) == 3
x, y = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1[0], pos2[:5], mindist)
assert len(y) == 5
print(
f"collect test {N:,} iter bvh {int(N/totbvh):,}/s naive {int(N/totnai):,}/s ratio {totnai/totbvh:7.2f} count-only {int(N/totct):,}/s avg cnt {ntot/N}"
)
def test_collect_pairs_range():
N1, N2 = 1, 500
N = N1 * N2
Npts = 1000
for j in range(N1):
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = np.linalg.norm(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.append(x1)
pos2.append(x2)
if len(pos1) == N2:
break
pos1 = np.stack(pos1)
pos2 = np.stack(pos2)
pairs = list()
mindist = 0.002 + np.random.rand() / 10
pairs, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, mindist)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(lbub == rlbub)
assert np.all(pairs == rpairs)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist, [250],
[750])
assert len(rlbub) == len(pos1)
assert np.all(rpairs[:, 0] >= 250)
assert np.all(rpairs[:, 0] <= 750)
filt_pairs = pairs[np.logical_and(pairs[:, 0] >= 250, pairs[:, 0] <= 750)]
# assert np.all(filt_pairs == rpairs) # sketchy???
assert np.allclose(np.unique(filt_pairs, axis=1), np.unique(rpairs, axis=1))
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist, [600],
[1000], -1, [100], [400], -1)
assert len(rlbub) == len(pos1)
assert np.all(rpairs[:, 0] >= 600)
assert np.all(rpairs[:, 0] <= 1000)
assert np.all(rpairs[:, 1] >= 100)
assert np.all(rpairs[:, 1] <= 400)
filt_pairs = pairs[(pairs[:, 0] >= 600) * (pairs[:, 0] <= 1000) * (pairs[:, 1] >= 100) *
(pairs[:, 1] <= 400)]
assert np.all(filt_pairs == rpairs) # sketchy???
assert np.allclose(np.unique(filt_pairs, axis=1), np.unique(rpairs, axis=1))
def test_collect_pairs_range_sym():
# np.random.seed(132)
N1, N2 = 5, 100
N = N1 * N2
Npts = 1000
for j in range(N1):
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1, pos2 = list(), list()
while 1:
x1 = hm.rand_xform(cart_sd=0.5)
x2 = hm.rand_xform(cart_sd=0.5)
d = np.linalg.norm(x1[:, 3] - x2[:, 3])
if 0.8 < d < 1.3:
pos1.append(x1)
pos2.append(x2)
if len(pos1) == N2:
break
pos1 = np.stack(pos1)
pos2 = np.stack(pos2)
pairs = list()
mindist = 0.002 + np.random.rand() / 10
pairs, lbub = bvh.bvh_collect_pairs_vec(bvh1, bvh2, pos1, pos2, mindist)
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(lbub == rlbub)
assert np.all(pairs == rpairs)
bounds = [100], [400], len(xyz1) // 2
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist, *bounds)
assert len(rlbub) == len(pos1)
assert np.all(
np.logical_or(np.logical_and(100 <= rpairs[:, 0], rpairs[:, 0] <= 400),
np.logical_and(600 <= rpairs[:, 0], rpairs[:, 0] <= 900)))
filt_pairs = pairs[np.logical_or(np.logical_and(100 <= pairs[:, 0], pairs[:, 0] <= 400),
np.logical_and(600 <= pairs[:, 0], pairs[:, 0] <= 900))]
assert np.allclose(np.unique(filt_pairs, axis=1), np.unique(rpairs, axis=1))
bounds = [100], [400], len(xyz1) // 2, [20], [180], len(xyz1) // 5
rpairs, rlbub = bvh.bvh_collect_pairs_range_vec(bvh1, bvh2, pos1, pos2, mindist, *bounds)
def awful(p):
return np.logical_and(
np.logical_or(np.logical_and(100 <= p[:, 0], p[:, 0] <= 400),
np.logical_and(600 <= p[:, 0], p[:, 0] <= 900)),
np.logical_or(
np.logical_and(+20 <= p[:, 1], p[:, 1] <= 180),
np.logical_or(
np.logical_and(220 <= p[:, 1], p[:, 1] <= 380),
np.logical_or(
np.logical_and(420 <= p[:, 1], p[:, 1] <= 580),
np.logical_or(np.logical_and(620 <= p[:, 1], p[:, 1] <= 780),
np.logical_and(820 <= p[:, 1], p[:, 1] <= 980))))))
assert len(rlbub) == len(pos1)
assert np.all(awful(rpairs))
filt_pairs = pairs[awful(pairs)]
assert np.all(filt_pairs == rpairs) # sketchy???
assert np.allclose(np.unique(filt_pairs, axis=1), np.unique(rpairs, axis=1))
def test_slide_collect_pairs():
# timings wtih -Ofast
# slide test 10,000 iter bvhslide float: 16,934/s double: 16,491/s bvhmin 17,968/s fracmiss: 0.0834
# np.random.seed(0)
N1, N2 = 2, 50
Npts = 5000
totbvh, totbvhf, totcol, totmin = 0, 0, 0, 0
nhit = 0
buf = -np.ones((Npts * Npts, 2), dtype="i4")
for j in range(N1):
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyzcol1 = xyz1[:int(Npts / 5)]
xyzcol2 = xyz2[:int(Npts / 5)]
# tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
bvhcol1 = BVH(xyzcol1)
bvhcol2 = BVH(xyzcol2)
# tcre = perf_counter() - tcre
for i in range(N2):
dirn = np.random.randn(3)
dirn /= np.linalg.norm(dirn)
radius = 0.001 + np.random.rand() / 10
pairdis = 3 * radius
pos1 = hm.rand_xform(cart_sd=0.5)
pos2 = hm.rand_xform(cart_sd=0.5)
tbvh = perf_counter()
dslide = bvh.bvh_slide(bvh1, bvh2, pos1, pos2, radius, dirn)
tbvh = perf_counter() - tbvh
if dslide > 9e8:
tn = perf_counter()
dn, i, j = bvh.bvh_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
assert dn > 2 * radius
else:
nhit += 1
pos1 = hm.htrans(dirn * dslide) @ pos1
tn = perf_counter()
dn, i, j = bvh.bvh_min_dist(bvh1, bvh2, pos1, pos2)
tn = perf_counter() - tn
if not np.allclose(dn, 2 * radius, atol=1e-6):
print(dn, 2 * radius)
assert np.allclose(dn, 2 * radius, atol=1e-6)
tcol = perf_counter()
pair, o = bvh.bvh_collect_pairs(bvhcol1, bvhcol2, pos1, pos2, pairdis, buf)
assert not o
if len(pair) > 0:
tcol = perf_counter() - tcol
totcol += tcol
pair1 = pos1 @ hm.hpoint(xyzcol1[pair[:, 0]])[..., None]
pair2 = pos2 @ hm.hpoint(xyzcol2[pair[:, 1]])[..., None]
dpair = np.linalg.norm(pair2 - pair1, axis=1)
assert np.max(dpair) <= pairdis
totmin += tn
totbvh += tbvh
N = N1 * N2
print(
f"slide test {N:,} iter bvhslide double: {int(N/totbvh):,}/s bvhmin {int(N/totmin):,}/s",
# f"slide test {N:,} iter bvhslide float: {int(N/totbvhf):,}/s double: {int(N/totbvh):,}/s bvhmin {int(N/totmin):,}/s",
f"fracmiss: {nhit/N} collect {int(nhit/totcol):,}/s",
)
def test_bvh_accessors():
xyz = np.random.rand(10, 3) - [0.5, 0.5, 0.5]
b = BVH(xyz)
assert np.allclose(b.com()[:3], np.mean(xyz, axis=0))
p = b.centers()
dmat = np.linalg.norm(p[:, :3] - xyz[:, None], axis=2)
assert np.allclose(np.min(dmat, axis=1), 0)
def random_walk(N):
x = np.random.randn(N, 3).astype("f").cumsum(axis=0)
x -= x.mean(axis=0)
return 0.5 * x / x.std()
def test_bvh_isect_range(body=None, cart_sd=0.3, N2=10, mindist=0.02):
N1 = 1 if body else 2
N = N1 * N2
totbvh, totnaive, totbvh0, nhit = 0, 0, 0, 0
for ibvh in range(N1):
if body:
bvh1, bvh2 = body.bvh_bb, body.bvh_bb
else:
# xyz1 = np.random.rand(2000, 3) - [0.5, 0.5, 0.5]
# xyz2 = np.random.rand(2000, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(1000)
xyz2 = random_walk(1000)
tcre = perf_counter()
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
ranges = list()
for i in range(N2):
tbvh0 = perf_counter()
c = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i], mindist=mindist)
tbvh0 = perf_counter() - tbvh0
# if not c:
# continue
if c:
nhit += 1
tbvh = perf_counter()
range1 = bvh.isect_range_single(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i],
mindist=mindist)
tbvh = perf_counter() - tbvh
tn = perf_counter()
range2 = bvh.naive_isect_range(bvh1, bvh2, pos1[i], pos2[i], mindist)
assert range1 == range2
tn = perf_counter() - tn
ranges.append(range1)
# print(f"{str(range1):=^80}")
# body.move_to(pos1).dump_pdb("test1.pdb")
# body.move_to(pos2).dump_pdb("test2.pdb")
# return
# print(f"{i:3} range {range1} {tn / tbvh:8.2f}, {tn:1.6f}, {tbvh:1.6f}")
totbvh += tbvh
totnaive += tn
totbvh0 += tbvh0
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, mindist)
ranges = np.array(ranges)
assert np.all(lb == ranges[:, 0])
assert np.all(ub == ranges[:, 1])
ok = np.logical_and(lb >= 0, ub >= 0)
isect, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, mindist, lb, ub)
assert not np.any(isect[ok])
print(
f"iscet {nhit:,} hit of {N:,} iter bvh: {int(nhit/totbvh):,}/s fastnaive {int(nhit/totnaive):,}/s",
f"ratio {int(totnaive/totbvh):,}x isect-only: {totbvh/totbvh0:3.3f}x",
)
def test_bvh_isect_range_ids():
N1 = 50
N2 = 100
N = N1 * N2
# Nids = 100
cart_sd = 0.3
mindist = 0.03
Npts = 1000
factors = [1000, 500, 250, 200, 125, 100, 50, 40, 25, 20, 10, 8, 5, 4, 2, 1]
# Npts = 6
# factors = [3]
# mindist = 0.3
# N1 = 1
assert all(Npts % f == 0 for f in factors)
for ibvh in range(N1):
# for ibvh in [5]:
# np.random.seed(ibvh)
# print(ibvh)
Nids = factors[ibvh % len(factors)]
# xyz1 = np.random.rand(2000, 3) - [0.5, 0.5, 0.5]
# xyz2 = np.random.rand(2000, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(Npts)
xyz2 = random_walk(Npts)
tcre = perf_counter()
bvh1 = BVH(xyz1, [], np.repeat(np.arange(Nids), Npts / Nids))
bvh2 = BVH(xyz2, [], np.repeat(np.arange(Nids), Npts / Nids))
tcre = perf_counter() - tcre
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
# pos1 = pos1[99:]
# pos2 = pos2[99:]
# print(bvh1.vol_lb())
# print(bvh1.vol_ub())
# print(bvh1.obj_id())
# assert 0
# assert bvh1.max_id() == Nids - 1
# assert bvh1.min_lb() == 0
# assert bvh1.max_ub() == Nids - 1
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, mindist)
pos1 = pos1[lb != -1]
pos2 = pos2[lb != -1]
ub = ub[lb != -1]
lb = lb[lb != -1]
# print(lb, ub)
assert np.all(0 <= lb) and np.all(lb - 1 <= ub) and np.all(ub < Nids)
isectall = bvh.bvh_isect_vec(bvh1, bvh2, pos1, pos2, mindist)
assert np.all(isectall == np.logical_or(lb > 0, ub < Nids - 1))
isect, clash = bvh.bvh_isect_fixed_range_vec(bvh1, bvh2, pos1, pos2, mindist, lb, ub)
if np.any(isect):
print(np.where(isect)[0])
print('lb', lb[isect])
print('ub', ub[isect])
print('cA', clash[isect, 0])
print('cB', clash[isect, 1])
# print('is', isect.astype('i') * 100)
# print('isectlbub', np.sum(isect), np.sum(isect) / len(isect))
assert not np.any(isect[lb <= ub])
def test_bvh_isect_range_lb_ub(body=None, cart_sd=0.3, N1=3, N2=20, mindist=0.02):
N1 = 1 if body else N1
N = N1 * N2
Npts = 1000
nhit, nrangefail = 0, 0
args = [
rp.Bunch(maxtrim=a, maxtrim_lb=b, maxtrim_ub=c) for a in (-1, 400) for b in (-1, 300)
for c in (-1, 300)
]
for ibvh, arg in it.product(range(N1), args):
if body:
bvh1, bvh2 = body.bvh_bb, body.bvh_bb
else:
# xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
# xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz1 = random_walk(Npts)
xyz2 = random_walk(Npts)
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1 = hm.rand_xform(N2, cart_sd=cart_sd)
pos2 = hm.rand_xform(N2, cart_sd=cart_sd)
ranges = list()
for i in range(N2):
c = bvh.bvh_isect(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i], mindist=mindist)
if c: nhit += 1
range1 = bvh.isect_range_single(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i],
mindist=mindist, **arg)
ranges.append(range1)
if range1[0] < 0:
nrangefail += 1
assert c
continue
assert (arg.maxtrim < 0) or (np.diff(range1) + 1 >= Npts - arg.maxtrim)
assert (arg.maxtrim_lb < 0) or (range1[0] <= arg.maxtrim_lb)
assert (arg.maxtrim_ub < 0) or (range1[1] + 1 >= Npts - arg.maxtrim_ub)
# mostly covered elsewhere, and quite slow
# range2 = bvh.naive_isect_range(bvh1, bvh2, pos1[i], pos2[i], mindist)
# assert range1 == range2
lb, ub = bvh.isect_range(bvh1, bvh2, pos1, pos2, mindist, **arg)
ranges = np.array(ranges)
assert np.all(lb == ranges[:, 0])
assert np.all(ub == ranges[:, 1])
print(f"iscet {nhit:,} hit of {N:,} iter, frangefail {nrangefail/nhit}", )
def test_bvh_pickle(tmpdir):
xyz1 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(1000, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
pos1 = hm.rand_xform(cart_sd=1)
pos2 = hm.rand_xform(cart_sd=1)
tbvh = perf_counter()
d, i1, i2 = bvh.bvh_min_dist(bvh1, bvh2, pos1, pos2)
rng = bvh.isect_range_single(bvh1, bvh2, pos1, pos2, mindist=d + 0.01)
with open(tmpdir + "/1", "wb") as out:
_pickle.dump(bvh1, out)
with open(tmpdir + "/2", "wb") as out:
_pickle.dump(bvh2, out)
with open(tmpdir + "/1", "rb") as out:
bvh1b = _pickle.load(out)
with open(tmpdir + "/2", "rb") as out:
bvh2b = _pickle.load(out)
assert len(bvh1) == len(bvh1b)
assert len(bvh2) == len(bvh2b)
assert np.allclose(bvh1.com(), bvh1b.com())
assert np.allclose(bvh1.centers(), bvh1b.centers())
assert np.allclose(bvh2.com(), bvh2b.com())
assert np.allclose(bvh2.centers(), bvh2b.centers())
db, i1b, i2b = bvh.bvh_min_dist(bvh1b, bvh2b, pos1, pos2)
assert np.allclose(d, db)
assert i1 == i1b
assert i2 == i2b
rngb = bvh.isect_range_single(bvh1b, bvh2b, pos1, pos2, mindist=d + 0.01)
assert rngb == rng
def test_bvh_threading_isect_may_fail():
from concurrent.futures import ThreadPoolExecutor
from itertools import repeat
reps = 1
npos = 1000
Npts = 1000
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
mindist = 0.1
tottmain, tottthread = 0, 0
nt = 2
exe = ThreadPoolExecutor(nt)
for i in range(reps):
pos1 = hm.rand_xform(npos, cart_sd=0.5)
pos2 = hm.rand_xform(npos, cart_sd=0.5)
buf = np.empty((Npts, 2), dtype="i4")
t = perf_counter()
_ = [bvh.bvh_isect(bvh1, bvh2, p1, p2, mindist) for p1, p2 in zip(pos1, pos2)]
isect = np.array(_)
tmain = perf_counter() - t
tottmain += tmain
t = perf_counter()
futures = exe.map(
bvh.bvh_isect_vec,
repeat(bvh1),
repeat(bvh2),
np.split(pos1, nt),
np.split(pos2, nt),
repeat(mindist),
)
isect2 = np.concatenate([f for f in futures])
tthread = perf_counter() - t
tottthread += tthread
print("fisect", np.sum(isect2) / len(isect2))
assert np.allclose(isect, isect2)
# print("bvh_isect", i, tmain / tthread, ">= 1.1")
# assert tmain / tthread > 1.1
print("bvh_isect", tottmain / tottthread)
def test_bvh_threading_mindist_may_fail():
from concurrent.futures import ThreadPoolExecutor
from itertools import repeat
reps = 1
npos = 100
Npts = 1000
xyz1 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
xyz2 = np.random.rand(Npts, 3) - [0.5, 0.5, 0.5]
bvh1 = BVH(xyz1)
bvh2 = BVH(xyz2)
tottmain, tottthread = 0, 0
nt = 2
exe = ThreadPoolExecutor(nt)
for i in range(reps):
pos1 = hm.rand_xform(npos, cart_sd=0.7)
pos2 = hm.rand_xform(npos, cart_sd=0.7)
buf = np.empty((Npts, 2), dtype="i4")
t = perf_counter()
_ = [bvh.bvh_min_dist(bvh1, bvh2, p1, p2) for p1, p2 in zip(pos1, pos2)]
mindist = np.array(_)
tmain = perf_counter() - t
tottmain += tmain
t = perf_counter()
futures = exe.map(
bvh.bvh_min_dist_vec,
repeat(bvh1),
repeat(bvh2),
np.split(pos1, nt),
np.split(pos2, nt),
)
mindist2 = np.concatenate([f for f in futures], axis=1).T
tthread = perf_counter() - t
tottthread += tthread
assert np.allclose(mindist, mindist2)
# print("bvh_min_dist", i, tmain / tthread, ">= 1.1")
# assert tmain / tthread > 1.1
print("bvh_min_dist", tottmain / tottthread)
if __name__ == "__main__":
# from rpxdock.body import Body
# b = Body("rpxdock/data/pdb/DHR14.pdb")
# test_bvh_isect_range(b, cart_sd=15, N2=500, mindist=3.5)
# test_bvh_isect_cpp()
# test_bvh_isect_fixed()
test_bvh_isect()
# test_bvh_isect_fixed_range()
# test_bvh_min_cpp()
# test_bvh_min_dist_fixed()
# test_bvh_min_dist()
# test_bvh_min_dist_floormin()
# test_bvh_slide_single_inline()
# test_bvh_slide_single()
# test_bvh_slide_single_xform()
# test_bvh_slide_whole()
# test_collect_pairs_simple()
# test_collect_pairs_simple_selection()
# test_collect_pairs()
# test_collect_pairs_range()
# test_collect_pairs_range_sym()
# test_slide_collect_pairs()
# test_bvh_accessors()
# test_bvh_isect_range()
# test_bvh_isect_range_ids()
# test_bvh_isect_range_lb_ub(N1=10, N2=20)
# import tempfile
# test_bvh_pickle(tempfile.mkdtemp())
# test_bvh_threading_mindist_may_fail()
# test_bvh_threading_isect_may_fail()
| [
"rpxdock.bvh.bvh.naive_isect_range",
"rpxdock.bvh.bvh.bvh_collect_pairs_range_vec",
"numpy.random.rand",
"rpxdock.bvh.bvh.naive_isect_fixed",
"rpxdock.bvh.bvh.bvh_isect_fixed_range_vec",
"rpxdock.bvh.bvh.bvh_count_pairs_vec",
"_pickle.dump",
"numpy.array",
"rpxdock.bvh.BVH",
"numpy.linalg.norm",
... | [((321, 351), 'rpxdock.bvh.bvh_test.TEST_bvh_test_isect', 'bvh_test.TEST_bvh_test_isect', ([], {}), '()\n', (349, 351), False, 'from rpxdock.bvh import bvh_test\n'), ((3652, 3680), 'rpxdock.bvh.bvh_test.TEST_bvh_test_min', 'bvh_test.TEST_bvh_test_min', ([], {}), '()\n', (3678, 3680), False, 'from rpxdock.bvh import bvh_test\n'), ((3775, 3798), 'numpy.random.rand', 'np.random.rand', (['(5000)', '(3)'], {}), '(5000, 3)\n', (3789, 3798), True, 'import _pickle, numpy as np, itertools as it\n'), ((3809, 3823), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3821, 3823), False, 'from time import perf_counter\n'), ((3834, 3843), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (3837, 3843), False, 'from rpxdock.bvh import BVH, bvh\n'), ((3854, 3863), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (3857, 3863), False, 'from rpxdock.bvh import BVH, bvh\n'), ((3907, 3921), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3919, 3921), False, 'from time import perf_counter\n'), ((3937, 3971), 'rpxdock.bvh.bvh.bvh_min_dist_fixed', 'bvh.bvh_min_dist_fixed', (['bvh1', 'bvh2'], {}), '(bvh1, bvh2)\n', (3959, 3971), False, 'from rpxdock.bvh import BVH, bvh\n'), ((4015, 4050), 'numpy.linalg.norm', 'np.linalg.norm', (['(xyz1[i1] - xyz2[i2])'], {}), '(xyz1[i1] - xyz2[i2])\n', (4029, 4050), True, 'import _pickle, numpy as np, itertools as it\n'), ((4061, 4094), 'numpy.allclose', 'np.allclose', (['d', 'dtest'], {'atol': '(1e-06)'}), '(d, dtest, atol=1e-06)\n', (4072, 4094), True, 'import _pickle, numpy as np, itertools as it\n'), ((4232, 4246), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4244, 4246), False, 'from time import perf_counter\n'), ((4255, 4291), 'rpxdock.bvh.bvh.naive_min_dist_fixed', 'bvh.naive_min_dist_fixed', (['bvh1', 'bvh2'], {}), '(bvh1, bvh2)\n', (4279, 4291), False, 'from rpxdock.bvh import BVH, bvh\n'), ((4397, 4427), 'numpy.allclose', 'np.allclose', (['dn', 'd'], {'atol': '(1e-06)'}), '(dn, d, atol=1e-06)\n', (4408, 4427), True, 'import _pickle, numpy as np, itertools as it\n'), ((4713, 4727), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4725, 4727), False, 'from time import perf_counter\n'), ((4738, 4747), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (4741, 4747), False, 'from rpxdock.bvh import BVH, bvh\n'), ((4758, 4767), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (4761, 4767), False, 'from rpxdock.bvh import BVH, bvh\n'), ((4860, 4887), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N'], {'cart_sd': '(1)'}), '(N, cart_sd=1)\n', (4873, 4887), True, 'import rpxdock.homog as hm\n'), ((4898, 4925), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N'], {'cart_sd': '(1)'}), '(N, cart_sd=1)\n', (4911, 4925), True, 'import rpxdock.homog as hm\n'), ((5636, 5680), 'rpxdock.bvh.bvh.bvh_min_dist_vec', 'bvh.bvh_min_dist_vec', (['bvh1', 'bvh2', 'pos1', 'pos2'], {}), '(bvh1, bvh2, pos1, pos2)\n', (5656, 5680), False, 'from rpxdock.bvh import BVH, bvh\n'), ((6084, 6098), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (6096, 6098), False, 'from time import perf_counter\n'), ((6109, 6118), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (6112, 6118), False, 'from rpxdock.bvh import BVH, bvh\n'), ((6129, 6138), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (6132, 6138), False, 'from rpxdock.bvh import BVH, bvh\n'), ((7117, 7135), 'rpxdock.bvh.BVH', 'BVH', (['[[-10, 0, 0]]'], {}), '([[-10, 0, 0]])\n', (7120, 7135), False, 'from rpxdock.bvh import BVH, bvh\n'), ((7146, 7162), 'rpxdock.bvh.BVH', 'BVH', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (7149, 7162), False, 'from rpxdock.bvh import BVH, bvh\n'), ((7356, 7373), 'rpxdock.bvh.BVH', 'BVH', (['[[10, 0, 0]]'], {}), '([[10, 0, 0]])\n', (7359, 7373), False, 'from rpxdock.bvh import BVH, bvh\n'), ((7384, 7400), 'rpxdock.bvh.BVH', 'BVH', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (7387, 7400), False, 'from rpxdock.bvh import BVH, bvh\n'), ((12280, 12307), 'rpxdock.bvh.BVH', 'BVH', (['[[0, 0, 0], [0, 2, 0]]'], {}), '([[0, 0, 0], [0, 2, 0]])\n', (12283, 12307), False, 'from rpxdock.bvh import BVH, bvh\n'), ((12318, 12349), 'rpxdock.bvh.BVH', 'BVH', (['[[0.9, 0, 0], [0.9, 2, 0]]'], {}), '([[0.9, 0, 0], [0.9, 2, 0]])\n', (12321, 12349), False, 'from rpxdock.bvh import BVH, bvh\n'), ((12403, 12412), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (12409, 12412), True, 'import _pickle, numpy as np, itertools as it\n'), ((12423, 12432), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (12429, 12432), True, 'import _pickle, numpy as np, itertools as it\n'), ((12446, 12508), 'rpxdock.bvh.bvh.bvh_collect_pairs', 'bvh.bvh_collect_pairs', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'bufbvh'], {}), '(bvh1, bvh2, pos1, pos2, mindist, bufbvh)\n', (12467, 12508), False, 'from rpxdock.bvh import BVH, bvh\n'), ((12519, 12583), 'rpxdock.bvh.bvh.naive_collect_pairs', 'bvh.naive_collect_pairs', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'bufnai'], {}), '(bvh1, bvh2, pos1, pos2, mindist, bufnai)\n', (12542, 12583), False, 'from rpxdock.bvh import BVH, bvh\n'), ((12670, 12702), 'numpy.all', 'np.all', (['(pbvh == [[0, 0], [1, 1]])'], {}), '(pbvh == [[0, 0], [1, 1]])\n', (12676, 12702), True, 'import _pickle, numpy as np, itertools as it\n'), ((12713, 12754), 'numpy.all', 'np.all', (['(bufnai[:nnai] == [[0, 0], [1, 1]])'], {}), '(bufnai[:nnai] == [[0, 0], [1, 1]])\n', (12719, 12754), True, 'import _pickle, numpy as np, itertools as it\n'), ((12766, 12786), 'rpxdock.homog.htrans', 'hm.htrans', (['[0, 2, 0]'], {}), '([0, 2, 0])\n', (12775, 12786), True, 'import rpxdock.homog as hm\n'), ((12800, 12862), 'rpxdock.bvh.bvh.bvh_collect_pairs', 'bvh.bvh_collect_pairs', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'bufbvh'], {}), '(bvh1, bvh2, pos1, pos2, mindist, bufbvh)\n', (12821, 12862), False, 'from rpxdock.bvh import BVH, bvh\n'), ((12873, 12937), 'rpxdock.bvh.bvh.naive_collect_pairs', 'bvh.naive_collect_pairs', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'bufnai'], {}), '(bvh1, bvh2, pos1, pos2, mindist, bufnai)\n', (12896, 12937), False, 'from rpxdock.bvh import BVH, bvh\n'), ((13003, 13027), 'numpy.all', 'np.all', (['(pbvh == [[0, 1]])'], {}), '(pbvh == [[0, 1]])\n', (13009, 13027), True, 'import _pickle, numpy as np, itertools as it\n'), ((13038, 13071), 'numpy.all', 'np.all', (['(bufnai[:nnai] == [[0, 1]])'], {}), '(bufnai[:nnai] == [[0, 1]])\n', (13044, 13071), True, 'import _pickle, numpy as np, itertools as it\n'), ((13083, 13104), 'rpxdock.homog.htrans', 'hm.htrans', (['[0, -2, 0]'], {}), '([0, -2, 0])\n', (13092, 13104), True, 'import rpxdock.homog as hm\n'), ((13118, 13180), 'rpxdock.bvh.bvh.bvh_collect_pairs', 'bvh.bvh_collect_pairs', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'bufbvh'], {}), '(bvh1, bvh2, pos1, pos2, mindist, bufbvh)\n', (13139, 13180), False, 'from rpxdock.bvh import BVH, bvh\n'), ((13191, 13255), 'rpxdock.bvh.bvh.naive_collect_pairs', 'bvh.naive_collect_pairs', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'bufnai'], {}), '(bvh1, bvh2, pos1, pos2, mindist, bufnai)\n', (13214, 13255), False, 'from rpxdock.bvh import BVH, bvh\n'), ((13321, 13345), 'numpy.all', 'np.all', (['(pbvh == [[1, 0]])'], {}), '(pbvh == [[1, 0]])\n', (13327, 13345), True, 'import _pickle, numpy as np, itertools as it\n'), ((13356, 13389), 'numpy.all', 'np.all', (['(bufnai[:nnai] == [[1, 0]])'], {}), '(bufnai[:nnai] == [[1, 0]])\n', (13362, 13389), True, 'import _pickle, numpy as np, itertools as it\n'), ((13740, 13756), 'rpxdock.bvh.BVH', 'BVH', (['crd1', 'mask1'], {}), '(crd1, mask1)\n', (13743, 13756), False, 'from rpxdock.bvh import BVH, bvh\n'), ((13767, 13783), 'rpxdock.bvh.BVH', 'BVH', (['crd2', 'mask2'], {}), '(crd2, mask2)\n', (13770, 13783), False, 'from rpxdock.bvh import BVH, bvh\n'), ((13949, 13958), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (13955, 13958), True, 'import _pickle, numpy as np, itertools as it\n'), ((13969, 13978), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (13975, 13978), True, 'import _pickle, numpy as np, itertools as it\n'), ((13992, 14054), 'rpxdock.bvh.bvh.bvh_collect_pairs', 'bvh.bvh_collect_pairs', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'bufbvh'], {}), '(bvh1, bvh2, pos1, pos2, mindist, bufbvh)\n', (14013, 14054), False, 'from rpxdock.bvh import BVH, bvh\n'), ((14081, 14145), 'rpxdock.bvh.bvh.naive_collect_pairs', 'bvh.naive_collect_pairs', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'bufnai'], {}), '(bvh1, bvh2, pos1, pos2, mindist, bufnai)\n', (14104, 14145), False, 'from rpxdock.bvh import BVH, bvh\n'), ((14195, 14227), 'numpy.all', 'np.all', (['(pbvh == [[0, 1], [2, 3]])'], {}), '(pbvh == [[0, 1], [2, 3]])\n', (14201, 14227), True, 'import _pickle, numpy as np, itertools as it\n'), ((14238, 14279), 'numpy.all', 'np.all', (['(bufnai[:nnai] == [[0, 1], [2, 3]])'], {}), '(bufnai[:nnai] == [[0, 1], [2, 3]])\n', (14244, 14279), True, 'import _pickle, numpy as np, itertools as it\n'), ((14291, 14311), 'rpxdock.homog.htrans', 'hm.htrans', (['[0, 2, 0]'], {}), '([0, 2, 0])\n', (14300, 14311), True, 'import rpxdock.homog as hm\n'), ((14325, 14387), 'rpxdock.bvh.bvh.bvh_collect_pairs', 'bvh.bvh_collect_pairs', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'bufbvh'], {}), '(bvh1, bvh2, pos1, pos2, mindist, bufbvh)\n', (14346, 14387), False, 'from rpxdock.bvh import BVH, bvh\n'), ((14414, 14478), 'rpxdock.bvh.bvh.naive_collect_pairs', 'bvh.naive_collect_pairs', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'bufnai'], {}), '(bvh1, bvh2, pos1, pos2, mindist, bufnai)\n', (14437, 14478), False, 'from rpxdock.bvh import BVH, bvh\n'), ((14528, 14552), 'numpy.all', 'np.all', (['(pbvh == [[0, 3]])'], {}), '(pbvh == [[0, 3]])\n', (14534, 14552), True, 'import _pickle, numpy as np, itertools as it\n'), ((14563, 14596), 'numpy.all', 'np.all', (['(bufnai[:nnai] == [[0, 3]])'], {}), '(bufnai[:nnai] == [[0, 3]])\n', (14569, 14596), True, 'import _pickle, numpy as np, itertools as it\n'), ((14608, 14629), 'rpxdock.homog.htrans', 'hm.htrans', (['[0, -2, 0]'], {}), '([0, -2, 0])\n', (14617, 14629), True, 'import rpxdock.homog as hm\n'), ((14643, 14705), 'rpxdock.bvh.bvh.bvh_collect_pairs', 'bvh.bvh_collect_pairs', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'bufbvh'], {}), '(bvh1, bvh2, pos1, pos2, mindist, bufbvh)\n', (14664, 14705), False, 'from rpxdock.bvh import BVH, bvh\n'), ((14732, 14796), 'rpxdock.bvh.bvh.naive_collect_pairs', 'bvh.naive_collect_pairs', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'bufnai'], {}), '(bvh1, bvh2, pos1, pos2, mindist, bufnai)\n', (14755, 14796), False, 'from rpxdock.bvh import BVH, bvh\n'), ((14846, 14870), 'numpy.all', 'np.all', (['(pbvh == [[2, 1]])'], {}), '(pbvh == [[2, 1]])\n', (14852, 14870), True, 'import _pickle, numpy as np, itertools as it\n'), ((14881, 14914), 'numpy.all', 'np.all', (['(bufnai[:nnai] == [[2, 1]])'], {}), '(bufnai[:nnai] == [[2, 1]])\n', (14887, 14914), True, 'import _pickle, numpy as np, itertools as it\n'), ((25132, 25140), 'rpxdock.bvh.BVH', 'BVH', (['xyz'], {}), '(xyz)\n', (25135, 25140), False, 'from rpxdock.bvh import BVH, bvh\n'), ((25227, 25274), 'numpy.linalg.norm', 'np.linalg.norm', (['(p[:, :3] - xyz[:, None])'], {'axis': '(2)'}), '(p[:, :3] - xyz[:, None], axis=2)\n', (25241, 25274), True, 'import _pickle, numpy as np, itertools as it\n'), ((31800, 31809), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (31803, 31809), False, 'from rpxdock.bvh import BVH, bvh\n'), ((31820, 31829), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (31823, 31829), False, 'from rpxdock.bvh import BVH, bvh\n'), ((31840, 31864), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {'cart_sd': '(1)'}), '(cart_sd=1)\n', (31853, 31864), True, 'import rpxdock.homog as hm\n'), ((31875, 31899), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {'cart_sd': '(1)'}), '(cart_sd=1)\n', (31888, 31899), True, 'import rpxdock.homog as hm\n'), ((31910, 31924), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (31922, 31924), False, 'from time import perf_counter\n'), ((31940, 31980), 'rpxdock.bvh.bvh.bvh_min_dist', 'bvh.bvh_min_dist', (['bvh1', 'bvh2', 'pos1', 'pos2'], {}), '(bvh1, bvh2, pos1, pos2)\n', (31956, 31980), False, 'from rpxdock.bvh import BVH, bvh\n'), ((31990, 32054), 'rpxdock.bvh.bvh.isect_range_single', 'bvh.isect_range_single', (['bvh1', 'bvh2', 'pos1', 'pos2'], {'mindist': '(d + 0.01)'}), '(bvh1, bvh2, pos1, pos2, mindist=d + 0.01)\n', (32012, 32054), False, 'from rpxdock.bvh import BVH, bvh\n'), ((32640, 32682), 'rpxdock.bvh.bvh.bvh_min_dist', 'bvh.bvh_min_dist', (['bvh1b', 'bvh2b', 'pos1', 'pos2'], {}), '(bvh1b, bvh2b, pos1, pos2)\n', (32656, 32682), False, 'from rpxdock.bvh import BVH, bvh\n'), ((32693, 32711), 'numpy.allclose', 'np.allclose', (['d', 'db'], {}), '(d, db)\n', (32704, 32711), True, 'import _pickle, numpy as np, itertools as it\n'), ((32762, 32828), 'rpxdock.bvh.bvh.isect_range_single', 'bvh.isect_range_single', (['bvh1b', 'bvh2b', 'pos1', 'pos2'], {'mindist': '(d + 0.01)'}), '(bvh1b, bvh2b, pos1, pos2, mindist=d + 0.01)\n', (32784, 32828), False, 'from rpxdock.bvh import BVH, bvh\n'), ((33136, 33145), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (33139, 33145), False, 'from rpxdock.bvh import BVH, bvh\n'), ((33156, 33165), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (33159, 33165), False, 'from rpxdock.bvh import BVH, bvh\n'), ((33234, 33256), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['nt'], {}), '(nt)\n', (33252, 33256), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((34466, 34475), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (34469, 34475), False, 'from rpxdock.bvh import BVH, bvh\n'), ((34486, 34495), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (34489, 34495), False, 'from rpxdock.bvh import BVH, bvh\n'), ((34547, 34569), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['nt'], {}), '(nt)\n', (34565, 34569), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((530, 553), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (544, 553), True, 'import _pickle, numpy as np, itertools as it\n'), ((567, 581), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (579, 581), False, 'from time import perf_counter\n'), ((595, 604), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (598, 604), False, 'from rpxdock.bvh import BVH, bvh\n'), ((618, 627), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (621, 627), False, 'from rpxdock.bvh import BVH, bvh\n'), ((708, 734), 'rpxdock.homog.htrans', 'hm.htrans', (['[0.9, 0.9, 0.9]'], {}), '([0.9, 0.9, 0.9])\n', (717, 734), True, 'import rpxdock.homog as hm\n'), ((748, 757), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (754, 757), True, 'import _pickle, numpy as np, itertools as it\n'), ((772, 786), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (784, 786), False, 'from time import perf_counter\n'), ((802, 842), 'rpxdock.bvh.bvh.bvh_isect_fixed', 'bvh.bvh_isect_fixed', (['bvh1', 'bvh2', 'mindist'], {}), '(bvh1, bvh2, mindist)\n', (821, 842), False, 'from rpxdock.bvh import BVH, bvh\n'), ((890, 904), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (902, 904), False, 'from time import perf_counter\n'), ((920, 962), 'rpxdock.bvh.bvh.naive_isect_fixed', 'bvh.naive_isect_fixed', (['bvh1', 'bvh2', 'mindist'], {}), '(bvh1, bvh2, mindist)\n', (941, 962), False, 'from rpxdock.bvh import BVH, bvh\n'), ((1481, 1511), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': '(0.8)'}), '(N2, cart_sd=0.8)\n', (1494, 1511), True, 'import rpxdock.homog as hm\n'), ((1525, 1555), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': '(0.8)'}), '(N2, cart_sd=0.8)\n', (1538, 1555), True, 'import rpxdock.homog as hm\n'), ((1597, 1606), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (1600, 1606), False, 'from rpxdock.bvh import BVH, bvh\n'), ((1620, 1629), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (1623, 1629), False, 'from rpxdock.bvh import BVH, bvh\n'), ((2081, 2131), 'rpxdock.bvh.bvh.bvh_isect_vec', 'bvh.bvh_isect_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist)\n', (2098, 2131), False, 'from rpxdock.bvh import BVH, bvh\n'), ((2181, 2206), 'numpy.all', 'np.all', (['(clashvec == clash)'], {}), '(clashvec == clash)\n', (2187, 2206), True, 'import _pickle, numpy as np, itertools as it\n'), ((2326, 2379), 'rpxdock.bvh.bvh.bvh_isect_vec', 'bvh.bvh_isect_vec', (['bvh1', 'bvh2', 'pos1', 'pos2[1]', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2[1], mindist)\n', (2343, 2379), False, 'from rpxdock.bvh import BVH, bvh\n'), ((2413, 2466), 'rpxdock.bvh.bvh.bvh_isect_vec', 'bvh.bvh_isect_vec', (['bvh1', 'bvh2', 'pos1[1]', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1[1], pos2, mindist)\n', (2430, 2466), False, 'from rpxdock.bvh import BVH, bvh\n'), ((2981, 2990), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (2984, 2990), False, 'from rpxdock.bvh import BVH, bvh\n'), ((3004, 3013), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (3007, 3013), False, 'from rpxdock.bvh import BVH, bvh\n'), ((3032, 3050), 'rpxdock.bvh.BVH', 'BVH', (['xyz1[250:750]'], {}), '(xyz1[250:750])\n', (3035, 3050), False, 'from rpxdock.bvh import BVH, bvh\n'), ((3069, 3087), 'rpxdock.bvh.BVH', 'BVH', (['xyz2[250:750]'], {}), '(xyz2[250:750])\n', (3072, 3087), False, 'from rpxdock.bvh import BVH, bvh\n'), ((3101, 3131), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': '(0.5)'}), '(N2, cart_sd=0.5)\n', (3114, 3131), True, 'import rpxdock.homog as hm\n'), ((3145, 3175), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': '(0.5)'}), '(N2, cart_sd=0.5)\n', (3158, 3175), True, 'import rpxdock.homog as hm\n'), ((3192, 3242), 'rpxdock.bvh.bvh.bvh_isect_vec', 'bvh.bvh_isect_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist)\n', (3209, 3242), False, 'from rpxdock.bvh import BVH, bvh\n'), ((3265, 3327), 'rpxdock.bvh.bvh.bvh_isect_fixed_range_vec', 'bvh.bvh_isect_fixed_range_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist)\n', (3294, 3327), False, 'from rpxdock.bvh import BVH, bvh\n'), ((3341, 3365), 'numpy.all', 'np.all', (['(isect1 == isect2)'], {}), '(isect1 == isect2)\n', (3347, 3365), True, 'import _pickle, numpy as np, itertools as it\n'), ((3424, 3484), 'rpxdock.bvh.bvh.bvh_isect_vec', 'bvh.bvh_isect_vec', (['bvh1_half', 'bvh2_half', 'pos1', 'pos2', 'mindist'], {}), '(bvh1_half, bvh2_half, pos1, pos2, mindist)\n', (3441, 3484), False, 'from rpxdock.bvh import BVH, bvh\n'), ((3507, 3578), 'rpxdock.bvh.bvh.bvh_isect_fixed_range_vec', 'bvh.bvh_isect_fixed_range_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', '*bounds'], {}), '(bvh1, bvh2, pos1, pos2, mindist, *bounds)\n', (3536, 3578), False, 'from rpxdock.bvh import BVH, bvh\n'), ((3592, 3616), 'numpy.all', 'np.all', (['(isect1 == isect2)'], {}), '(isect1 == isect2)\n', (3598, 3616), True, 'import _pickle, numpy as np, itertools as it\n'), ((3723, 3746), 'numpy.random.rand', 'np.random.rand', (['(5000)', '(3)'], {}), '(5000, 3)\n', (3737, 3746), True, 'import _pickle, numpy as np, itertools as it\n'), ((3874, 3888), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3886, 3888), False, 'from time import perf_counter\n'), ((3982, 3996), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3994, 3996), False, 'from time import perf_counter\n'), ((4300, 4314), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4312, 4314), False, 'from time import perf_counter\n'), ((4609, 4632), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (4623, 4632), True, 'import _pickle, numpy as np, itertools as it\n'), ((4661, 4684), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (4675, 4684), True, 'import _pickle, numpy as np, itertools as it\n'), ((4778, 4792), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4790, 4792), False, 'from time import perf_counter\n'), ((4978, 4992), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4990, 4992), False, 'from time import perf_counter\n'), ((5011, 5057), 'rpxdock.bvh.bvh.bvh_min_dist', 'bvh.bvh_min_dist', (['bvh1', 'bvh2', 'pos1[i]', 'pos2[i]'], {}), '(bvh1, bvh2, pos1[i], pos2[i])\n', (5027, 5057), False, 'from rpxdock.bvh import BVH, bvh\n'), ((5198, 5231), 'numpy.allclose', 'np.allclose', (['d', 'dtest'], {'atol': '(1e-06)'}), '(d, dtest, atol=1e-06)\n', (5209, 5231), True, 'import _pickle, numpy as np, itertools as it\n'), ((5243, 5257), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5255, 5257), False, 'from time import perf_counter\n'), ((5269, 5317), 'rpxdock.bvh.bvh.naive_min_dist', 'bvh.naive_min_dist', (['bvh1', 'bvh2', 'pos1[i]', 'pos2[i]'], {}), '(bvh1, bvh2, pos1[i], pos2[i])\n', (5287, 5317), False, 'from rpxdock.bvh import BVH, bvh\n'), ((5362, 5392), 'numpy.allclose', 'np.allclose', (['dn', 'd'], {'atol': '(1e-06)'}), '(dn, d, atol=1e-06)\n', (5373, 5392), True, 'import _pickle, numpy as np, itertools as it\n'), ((5980, 6003), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (5994, 6003), True, 'import _pickle, numpy as np, itertools as it\n'), ((6032, 6055), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (6046, 6055), True, 'import _pickle, numpy as np, itertools as it\n'), ((6149, 6163), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (6161, 6163), False, 'from time import perf_counter\n'), ((6256, 6280), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {'cart_sd': '(1)'}), '(cart_sd=1)\n', (6269, 6280), True, 'import rpxdock.homog as hm\n'), ((6294, 6318), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {'cart_sd': '(1)'}), '(cart_sd=1)\n', (6307, 6318), True, 'import rpxdock.homog as hm\n'), ((6333, 6347), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (6345, 6347), False, 'from time import perf_counter\n'), ((6366, 6406), 'rpxdock.bvh.bvh.bvh_min_dist', 'bvh.bvh_min_dist', (['bvh1', 'bvh2', 'pos1', 'pos2'], {}), '(bvh1, bvh2, pos1, pos2)\n', (6382, 6406), False, 'from rpxdock.bvh import BVH, bvh\n'), ((6541, 6574), 'numpy.allclose', 'np.allclose', (['d', 'dtest'], {'atol': '(1e-06)'}), '(d, dtest, atol=1e-06)\n', (6552, 6574), True, 'import _pickle, numpy as np, itertools as it\n'), ((6586, 6600), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (6598, 6600), False, 'from time import perf_counter\n'), ((6612, 6654), 'rpxdock.bvh.bvh.naive_min_dist', 'bvh.naive_min_dist', (['bvh1', 'bvh2', 'pos1', 'pos2'], {}), '(bvh1, bvh2, pos1, pos2)\n', (6630, 6654), False, 'from rpxdock.bvh import BVH, bvh\n'), ((6699, 6729), 'numpy.allclose', 'np.allclose', (['dn', 'd'], {'atol': '(1e-06)'}), '(dn, d, atol=1e-06)\n', (6710, 6729), True, 'import _pickle, numpy as np, itertools as it\n'), ((7196, 7205), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (7202, 7205), True, 'import _pickle, numpy as np, itertools as it\n'), ((7207, 7216), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (7213, 7216), True, 'import _pickle, numpy as np, itertools as it\n'), ((7434, 7443), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (7440, 7443), True, 'import _pickle, numpy as np, itertools as it\n'), ((7445, 7454), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (7451, 7454), True, 'import _pickle, numpy as np, itertools as it\n'), ((7562, 7579), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (7576, 7579), True, 'import _pickle, numpy as np, itertools as it\n'), ((7643, 7663), 'numpy.linalg.norm', 'np.linalg.norm', (['dirn'], {}), '(dirn)\n', (7657, 7663), True, 'import _pickle, numpy as np, itertools as it\n'), ((7822, 7831), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (7825, 7831), False, 'from rpxdock.bvh import BVH, bvh\n'), ((7845, 7854), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (7848, 7854), False, 'from rpxdock.bvh import BVH, bvh\n'), ((8137, 8155), 'numpy.random.randn', 'np.random.randn', (['(3)'], {}), '(3)\n', (8152, 8155), True, 'import _pickle, numpy as np, itertools as it\n'), ((8170, 8190), 'numpy.linalg.norm', 'np.linalg.norm', (['dirn'], {}), '(dirn)\n', (8184, 8190), True, 'import _pickle, numpy as np, itertools as it\n'), ((8242, 8263), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)'], {}), '(1, 3)\n', (8257, 8263), True, 'import _pickle, numpy as np, itertools as it\n'), ((8277, 8298), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)'], {}), '(1, 3)\n', (8292, 8298), True, 'import _pickle, numpy as np, itertools as it\n'), ((8312, 8321), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (8315, 8321), False, 'from rpxdock.bvh import BVH, bvh\n'), ((8335, 8344), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (8338, 8344), False, 'from rpxdock.bvh import BVH, bvh\n'), ((8879, 8897), 'numpy.random.randn', 'np.random.randn', (['(3)'], {}), '(3)\n', (8894, 8897), True, 'import _pickle, numpy as np, itertools as it\n'), ((8912, 8932), 'numpy.linalg.norm', 'np.linalg.norm', (['dirn'], {}), '(dirn)\n', (8926, 8932), True, 'import _pickle, numpy as np, itertools as it\n'), ((8990, 9011), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)'], {}), '(1, 3)\n', (9005, 9011), True, 'import _pickle, numpy as np, itertools as it\n'), ((9025, 9046), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)'], {}), '(1, 3)\n', (9040, 9046), True, 'import _pickle, numpy as np, itertools as it\n'), ((9060, 9069), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (9063, 9069), False, 'from rpxdock.bvh import BVH, bvh\n'), ((9083, 9092), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (9086, 9092), False, 'from rpxdock.bvh import BVH, bvh\n'), ((9106, 9121), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {}), '()\n', (9119, 9121), True, 'import rpxdock.homog as hm\n'), ((9135, 9150), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {}), '()\n', (9148, 9150), True, 'import rpxdock.homog as hm\n'), ((9161, 9218), 'rpxdock.bvh.bvh.bvh_slide', 'bvh.bvh_slide', (['bvh1', 'bvh2', 'pos1', 'pos2'], {'rad': 'rad', 'dirn': 'dirn'}), '(bvh1, bvh2, pos1, pos2, rad=rad, dirn=dirn)\n', (9174, 9218), False, 'from rpxdock.bvh import BVH, bvh\n'), ((10166, 10175), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (10169, 10175), False, 'from rpxdock.bvh import BVH, bvh\n'), ((10189, 10198), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (10192, 10198), False, 'from rpxdock.bvh import BVH, bvh\n'), ((10313, 10343), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': '(0.5)'}), '(N2, cart_sd=0.5)\n', (10326, 10343), True, 'import rpxdock.homog as hm\n'), ((10357, 10387), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': '(0.5)'}), '(N2, cart_sd=0.5)\n', (10370, 10387), True, 'import rpxdock.homog as hm\n'), ((10401, 10419), 'numpy.random.randn', 'np.random.randn', (['(3)'], {}), '(3)\n', (10416, 10419), True, 'import _pickle, numpy as np, itertools as it\n'), ((10434, 10454), 'numpy.linalg.norm', 'np.linalg.norm', (['dirn'], {}), '(dirn)\n', (10448, 10454), True, 'import _pickle, numpy as np, itertools as it\n'), ((11732, 11787), 'rpxdock.bvh.bvh.bvh_slide_vec', 'bvh.bvh_slide_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'radius', 'dirn'], {}), '(bvh1, bvh2, pos1, pos2, radius, dirn)\n', (11749, 11787), False, 'from rpxdock.bvh import BVH, bvh\n'), ((11801, 11829), 'numpy.allclose', 'np.allclose', (['slides', 'slides2'], {}), '(slides, slides2)\n', (11812, 11829), True, 'import _pickle, numpy as np, itertools as it\n'), ((12197, 12226), 'numpy.ones', 'np.ones', (['(100, 2)'], {'dtype': '"""i4"""'}), "((100, 2), dtype='i4')\n", (12204, 12226), True, 'import _pickle, numpy as np, itertools as it\n'), ((12240, 12269), 'numpy.ones', 'np.ones', (['(100, 2)'], {'dtype': '"""i4"""'}), "((100, 2), dtype='i4')\n", (12247, 12269), True, 'import _pickle, numpy as np, itertools as it\n'), ((13495, 13524), 'numpy.ones', 'np.ones', (['(100, 2)'], {'dtype': '"""i4"""'}), "((100, 2), dtype='i4')\n", (13502, 13524), True, 'import _pickle, numpy as np, itertools as it\n'), ((13538, 13567), 'numpy.ones', 'np.ones', (['(100, 2)'], {'dtype': '"""i4"""'}), "((100, 2), dtype='i4')\n", (13545, 13567), True, 'import _pickle, numpy as np, itertools as it\n'), ((15083, 15120), 'numpy.ones', 'np.ones', (['(Npts * Npts, 2)'], {'dtype': '"""i4"""'}), "((Npts * Npts, 2), dtype='i4')\n", (15090, 15120), True, 'import _pickle, numpy as np, itertools as it\n'), ((15134, 15171), 'numpy.ones', 'np.ones', (['(Npts * Npts, 2)'], {'dtype': '"""i4"""'}), "((Npts * Npts, 2), dtype='i4')\n", (15141, 15171), True, 'import _pickle, numpy as np, itertools as it\n'), ((15318, 15327), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (15321, 15327), False, 'from rpxdock.bvh import BVH, bvh\n'), ((15341, 15350), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (15344, 15350), False, 'from rpxdock.bvh import BVH, bvh\n'), ((15681, 15695), 'numpy.stack', 'np.stack', (['pos1'], {}), '(pos1)\n', (15689, 15695), True, 'import _pickle, numpy as np, itertools as it\n'), ((15709, 15723), 'numpy.stack', 'np.stack', (['pos2'], {}), '(pos2)\n', (15717, 15723), True, 'import _pickle, numpy as np, itertools as it\n'), ((17037, 17093), 'rpxdock.bvh.bvh.bvh_count_pairs_vec', 'bvh.bvh_count_pairs_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist)\n', (17060, 17093), False, 'from rpxdock.bvh import BVH, bvh\n'), ((17171, 17229), 'rpxdock.bvh.bvh.bvh_collect_pairs_vec', 'bvh.bvh_collect_pairs_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist)\n', (17196, 17229), False, 'from rpxdock.bvh import BVH, bvh\n'), ((17356, 17421), 'rpxdock.bvh.bvh.bvh_collect_pairs_vec', 'bvh.bvh_collect_pairs_vec', (['bvh1', 'bvh2', 'pos1[:3]', 'pos2[0]', 'mindist'], {}), '(bvh1, bvh2, pos1[:3], pos2[0], mindist)\n', (17381, 17421), False, 'from rpxdock.bvh import BVH, bvh\n'), ((17460, 17525), 'rpxdock.bvh.bvh.bvh_collect_pairs_vec', 'bvh.bvh_collect_pairs_vec', (['bvh1', 'bvh2', 'pos1[0]', 'pos2[:5]', 'mindist'], {}), '(bvh1, bvh2, pos1[0], pos2[:5], mindist)\n', (17485, 17525), False, 'from rpxdock.bvh import BVH, bvh\n'), ((17952, 17961), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (17955, 17961), False, 'from rpxdock.bvh import BVH, bvh\n'), ((17975, 17984), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (17978, 17984), False, 'from rpxdock.bvh import BVH, bvh\n'), ((18314, 18328), 'numpy.stack', 'np.stack', (['pos1'], {}), '(pos1)\n', (18322, 18328), True, 'import _pickle, numpy as np, itertools as it\n'), ((18342, 18356), 'numpy.stack', 'np.stack', (['pos2'], {}), '(pos2)\n', (18350, 18356), True, 'import _pickle, numpy as np, itertools as it\n'), ((18445, 18503), 'rpxdock.bvh.bvh.bvh_collect_pairs_vec', 'bvh.bvh_collect_pairs_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist)\n', (18470, 18503), False, 'from rpxdock.bvh import BVH, bvh\n'), ((18526, 18590), 'rpxdock.bvh.bvh.bvh_collect_pairs_range_vec', 'bvh.bvh_collect_pairs_range_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist)\n', (18557, 18590), False, 'from rpxdock.bvh import BVH, bvh\n'), ((18604, 18625), 'numpy.all', 'np.all', (['(lbub == rlbub)'], {}), '(lbub == rlbub)\n', (18610, 18625), True, 'import _pickle, numpy as np, itertools as it\n'), ((18639, 18662), 'numpy.all', 'np.all', (['(pairs == rpairs)'], {}), '(pairs == rpairs)\n', (18645, 18662), True, 'import _pickle, numpy as np, itertools as it\n'), ((18686, 18764), 'rpxdock.bvh.bvh.bvh_collect_pairs_range_vec', 'bvh.bvh_collect_pairs_range_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', '[250]', '[750]'], {}), '(bvh1, bvh2, pos1, pos2, mindist, [250], [750])\n', (18717, 18764), False, 'from rpxdock.bvh import BVH, bvh\n'), ((18869, 18896), 'numpy.all', 'np.all', (['(rpairs[:, 0] >= 250)'], {}), '(rpairs[:, 0] >= 250)\n', (18875, 18896), True, 'import _pickle, numpy as np, itertools as it\n'), ((18910, 18937), 'numpy.all', 'np.all', (['(rpairs[:, 0] <= 750)'], {}), '(rpairs[:, 0] <= 750)\n', (18916, 18937), True, 'import _pickle, numpy as np, itertools as it\n'), ((19183, 19289), 'rpxdock.bvh.bvh.bvh_collect_pairs_range_vec', 'bvh.bvh_collect_pairs_range_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', '[600]', '[1000]', '(-1)', '[100]', '[400]', '(-1)'], {}), '(bvh1, bvh2, pos1, pos2, mindist, [600], [\n 1000], -1, [100], [400], -1)\n', (19214, 19289), False, 'from rpxdock.bvh import BVH, bvh\n'), ((19389, 19416), 'numpy.all', 'np.all', (['(rpairs[:, 0] >= 600)'], {}), '(rpairs[:, 0] >= 600)\n', (19395, 19416), True, 'import _pickle, numpy as np, itertools as it\n'), ((19430, 19458), 'numpy.all', 'np.all', (['(rpairs[:, 0] <= 1000)'], {}), '(rpairs[:, 0] <= 1000)\n', (19436, 19458), True, 'import _pickle, numpy as np, itertools as it\n'), ((19472, 19499), 'numpy.all', 'np.all', (['(rpairs[:, 1] >= 100)'], {}), '(rpairs[:, 1] >= 100)\n', (19478, 19499), True, 'import _pickle, numpy as np, itertools as it\n'), ((19513, 19540), 'numpy.all', 'np.all', (['(rpairs[:, 1] <= 400)'], {}), '(rpairs[:, 1] <= 400)\n', (19519, 19540), True, 'import _pickle, numpy as np, itertools as it\n'), ((19696, 19724), 'numpy.all', 'np.all', (['(filt_pairs == rpairs)'], {}), '(filt_pairs == rpairs)\n', (19702, 19724), True, 'import _pickle, numpy as np, itertools as it\n'), ((20079, 20088), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (20082, 20088), False, 'from rpxdock.bvh import BVH, bvh\n'), ((20102, 20111), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (20105, 20111), False, 'from rpxdock.bvh import BVH, bvh\n'), ((20441, 20455), 'numpy.stack', 'np.stack', (['pos1'], {}), '(pos1)\n', (20449, 20455), True, 'import _pickle, numpy as np, itertools as it\n'), ((20469, 20483), 'numpy.stack', 'np.stack', (['pos2'], {}), '(pos2)\n', (20477, 20483), True, 'import _pickle, numpy as np, itertools as it\n'), ((20572, 20630), 'rpxdock.bvh.bvh.bvh_collect_pairs_vec', 'bvh.bvh_collect_pairs_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist)\n', (20597, 20630), False, 'from rpxdock.bvh import BVH, bvh\n'), ((20653, 20717), 'rpxdock.bvh.bvh.bvh_collect_pairs_range_vec', 'bvh.bvh_collect_pairs_range_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist)\n', (20684, 20717), False, 'from rpxdock.bvh import BVH, bvh\n'), ((20731, 20752), 'numpy.all', 'np.all', (['(lbub == rlbub)'], {}), '(lbub == rlbub)\n', (20737, 20752), True, 'import _pickle, numpy as np, itertools as it\n'), ((20766, 20789), 'numpy.all', 'np.all', (['(pairs == rpairs)'], {}), '(pairs == rpairs)\n', (20772, 20789), True, 'import _pickle, numpy as np, itertools as it\n'), ((20857, 20930), 'rpxdock.bvh.bvh.bvh_collect_pairs_range_vec', 'bvh.bvh_collect_pairs_range_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', '*bounds'], {}), '(bvh1, bvh2, pos1, pos2, mindist, *bounds)\n', (20888, 20930), False, 'from rpxdock.bvh import BVH, bvh\n'), ((21522, 21595), 'rpxdock.bvh.bvh.bvh_collect_pairs_range_vec', 'bvh.bvh_collect_pairs_range_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', '*bounds'], {}), '(bvh1, bvh2, pos1, pos2, mindist, *bounds)\n', (21553, 21595), False, 'from rpxdock.bvh import BVH, bvh\n'), ((22381, 22409), 'numpy.all', 'np.all', (['(filt_pairs == rpairs)'], {}), '(filt_pairs == rpairs)\n', (22387, 22409), True, 'import _pickle, numpy as np, itertools as it\n'), ((22796, 22833), 'numpy.ones', 'np.ones', (['(Npts * Npts, 2)'], {'dtype': '"""i4"""'}), "((Npts * Npts, 2), dtype='i4')\n", (22803, 22833), True, 'import _pickle, numpy as np, itertools as it\n'), ((23084, 23093), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (23087, 23093), False, 'from rpxdock.bvh import BVH, bvh\n'), ((23107, 23116), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (23110, 23116), False, 'from rpxdock.bvh import BVH, bvh\n'), ((23133, 23145), 'rpxdock.bvh.BVH', 'BVH', (['xyzcol1'], {}), '(xyzcol1)\n', (23136, 23145), False, 'from rpxdock.bvh import BVH, bvh\n'), ((23162, 23174), 'rpxdock.bvh.BVH', 'BVH', (['xyzcol2'], {}), '(xyzcol2)\n', (23165, 23174), False, 'from rpxdock.bvh import BVH, bvh\n'), ((25085, 25106), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (25099, 25106), True, 'import _pickle, numpy as np, itertools as it\n'), ((25176, 25196), 'numpy.mean', 'np.mean', (['xyz'], {'axis': '(0)'}), '(xyz, axis=0)\n', (25183, 25196), True, 'import _pickle, numpy as np, itertools as it\n'), ((25297, 25317), 'numpy.min', 'np.min', (['dmat'], {'axis': '(1)'}), '(dmat, axis=1)\n', (25303, 25317), True, 'import _pickle, numpy as np, itertools as it\n'), ((26034, 26068), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': 'cart_sd'}), '(N2, cart_sd=cart_sd)\n', (26047, 26068), True, 'import rpxdock.homog as hm\n'), ((26082, 26116), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': 'cart_sd'}), '(N2, cart_sd=cart_sd)\n', (26095, 26116), True, 'import rpxdock.homog as hm\n'), ((27174, 27222), 'rpxdock.bvh.bvh.isect_range', 'bvh.isect_range', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist)\n', (27189, 27222), False, 'from rpxdock.bvh import BVH, bvh\n'), ((27238, 27254), 'numpy.array', 'np.array', (['ranges'], {}), '(ranges)\n', (27246, 27254), True, 'import _pickle, numpy as np, itertools as it\n'), ((27268, 27294), 'numpy.all', 'np.all', (['(lb == ranges[:, 0])'], {}), '(lb == ranges[:, 0])\n', (27274, 27294), True, 'import _pickle, numpy as np, itertools as it\n'), ((27308, 27334), 'numpy.all', 'np.all', (['(ub == ranges[:, 1])'], {}), '(ub == ranges[:, 1])\n', (27314, 27334), True, 'import _pickle, numpy as np, itertools as it\n'), ((27347, 27379), 'numpy.logical_and', 'np.logical_and', (['(lb >= 0)', '(ub >= 0)'], {}), '(lb >= 0, ub >= 0)\n', (27361, 27379), True, 'import _pickle, numpy as np, itertools as it\n'), ((27401, 27471), 'rpxdock.bvh.bvh.bvh_isect_fixed_range_vec', 'bvh.bvh_isect_fixed_range_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'lb', 'ub'], {}), '(bvh1, bvh2, pos1, pos2, mindist, lb, ub)\n', (27430, 27471), False, 'from rpxdock.bvh import BVH, bvh\n'), ((28367, 28381), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (28379, 28381), False, 'from time import perf_counter\n'), ((28566, 28600), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': 'cart_sd'}), '(N2, cart_sd=cart_sd)\n', (28579, 28600), True, 'import rpxdock.homog as hm\n'), ((28614, 28648), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': 'cart_sd'}), '(N2, cart_sd=cart_sd)\n', (28627, 28648), True, 'import rpxdock.homog as hm\n'), ((28937, 28985), 'rpxdock.bvh.bvh.isect_range', 'bvh.isect_range', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist)\n', (28952, 28985), False, 'from rpxdock.bvh import BVH, bvh\n'), ((29208, 29258), 'rpxdock.bvh.bvh.bvh_isect_vec', 'bvh.bvh_isect_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist)\n', (29225, 29258), False, 'from rpxdock.bvh import BVH, bvh\n'), ((29351, 29421), 'rpxdock.bvh.bvh.bvh_isect_fixed_range_vec', 'bvh.bvh_isect_fixed_range_vec', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist', 'lb', 'ub'], {}), '(bvh1, bvh2, pos1, pos2, mindist, lb, ub)\n', (29380, 29421), False, 'from rpxdock.bvh import BVH, bvh\n'), ((29432, 29445), 'numpy.any', 'np.any', (['isect'], {}), '(isect)\n', (29438, 29445), True, 'import _pickle, numpy as np, itertools as it\n'), ((29964, 30011), 'rpxdock.Bunch', 'rp.Bunch', ([], {'maxtrim': 'a', 'maxtrim_lb': 'b', 'maxtrim_ub': 'c'}), '(maxtrim=a, maxtrim_lb=b, maxtrim_ub=c)\n', (29972, 30011), True, 'import rpxdock as rp\n'), ((30457, 30491), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': 'cart_sd'}), '(N2, cart_sd=cart_sd)\n', (30470, 30491), True, 'import rpxdock.homog as hm\n'), ((30505, 30539), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['N2'], {'cart_sd': 'cart_sd'}), '(N2, cart_sd=cart_sd)\n', (30518, 30539), True, 'import rpxdock.homog as hm\n'), ((31409, 31464), 'rpxdock.bvh.bvh.isect_range', 'bvh.isect_range', (['bvh1', 'bvh2', 'pos1', 'pos2', 'mindist'], {}), '(bvh1, bvh2, pos1, pos2, mindist, **arg)\n', (31424, 31464), False, 'from rpxdock.bvh import BVH, bvh\n'), ((31480, 31496), 'numpy.array', 'np.array', (['ranges'], {}), '(ranges)\n', (31488, 31496), True, 'import _pickle, numpy as np, itertools as it\n'), ((31510, 31536), 'numpy.all', 'np.all', (['(lb == ranges[:, 0])'], {}), '(lb == ranges[:, 0])\n', (31516, 31536), True, 'import _pickle, numpy as np, itertools as it\n'), ((31550, 31576), 'numpy.all', 'np.all', (['(ub == ranges[:, 1])'], {}), '(ub == ranges[:, 1])\n', (31556, 31576), True, 'import _pickle, numpy as np, itertools as it\n'), ((31696, 31719), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (31710, 31719), True, 'import _pickle, numpy as np, itertools as it\n'), ((31748, 31771), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (31762, 31771), True, 'import _pickle, numpy as np, itertools as it\n'), ((32104, 32127), '_pickle.dump', '_pickle.dump', (['bvh1', 'out'], {}), '(bvh1, out)\n', (32116, 32127), False, 'import _pickle, numpy as np, itertools as it\n'), ((32176, 32199), '_pickle.dump', '_pickle.dump', (['bvh2', 'out'], {}), '(bvh2, out)\n', (32188, 32199), False, 'import _pickle, numpy as np, itertools as it\n'), ((32256, 32273), '_pickle.load', '_pickle.load', (['out'], {}), '(out)\n', (32268, 32273), False, 'import _pickle, numpy as np, itertools as it\n'), ((32330, 32347), '_pickle.load', '_pickle.load', (['out'], {}), '(out)\n', (32342, 32347), False, 'import _pickle, numpy as np, itertools as it\n'), ((33032, 33055), 'numpy.random.rand', 'np.random.rand', (['Npts', '(3)'], {}), '(Npts, 3)\n', (33046, 33055), True, 'import _pickle, numpy as np, itertools as it\n'), ((33084, 33107), 'numpy.random.rand', 'np.random.rand', (['Npts', '(3)'], {}), '(Npts, 3)\n', (33098, 33107), True, 'import _pickle, numpy as np, itertools as it\n'), ((33297, 33329), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['npos'], {'cart_sd': '(0.5)'}), '(npos, cart_sd=0.5)\n', (33310, 33329), True, 'import rpxdock.homog as hm\n'), ((33343, 33375), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['npos'], {'cart_sd': '(0.5)'}), '(npos, cart_sd=0.5)\n', (33356, 33375), True, 'import rpxdock.homog as hm\n'), ((33389, 33420), 'numpy.empty', 'np.empty', (['(Npts, 2)'], {'dtype': '"""i4"""'}), "((Npts, 2), dtype='i4')\n", (33397, 33420), True, 'import _pickle, numpy as np, itertools as it\n'), ((33431, 33445), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (33443, 33445), False, 'from time import perf_counter\n'), ((33545, 33556), 'numpy.array', 'np.array', (['_'], {}), '(_)\n', (33553, 33556), True, 'import _pickle, numpy as np, itertools as it\n'), ((33625, 33639), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (33637, 33639), False, 'from time import perf_counter\n'), ((33846, 33882), 'numpy.concatenate', 'np.concatenate', (['[f for f in futures]'], {}), '([f for f in futures])\n', (33860, 33882), True, 'import _pickle, numpy as np, itertools as it\n'), ((34013, 34039), 'numpy.allclose', 'np.allclose', (['isect', 'isect2'], {}), '(isect, isect2)\n', (34024, 34039), True, 'import _pickle, numpy as np, itertools as it\n'), ((34362, 34385), 'numpy.random.rand', 'np.random.rand', (['Npts', '(3)'], {}), '(Npts, 3)\n', (34376, 34385), True, 'import _pickle, numpy as np, itertools as it\n'), ((34414, 34437), 'numpy.random.rand', 'np.random.rand', (['Npts', '(3)'], {}), '(Npts, 3)\n', (34428, 34437), True, 'import _pickle, numpy as np, itertools as it\n'), ((34610, 34642), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['npos'], {'cart_sd': '(0.7)'}), '(npos, cart_sd=0.7)\n', (34623, 34642), True, 'import rpxdock.homog as hm\n'), ((34656, 34688), 'rpxdock.homog.rand_xform', 'hm.rand_xform', (['npos'], {'cart_sd': '(0.7)'}), '(npos, cart_sd=0.7)\n', (34669, 34688), True, 'import rpxdock.homog as hm\n'), ((34702, 34733), 'numpy.empty', 'np.empty', (['(Npts, 2)'], {'dtype': '"""i4"""'}), "((Npts, 2), dtype='i4')\n", (34710, 34733), True, 'import _pickle, numpy as np, itertools as it\n'), ((34744, 34758), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (34756, 34758), False, 'from time import perf_counter\n'), ((34854, 34865), 'numpy.array', 'np.array', (['_'], {}), '(_)\n', (34862, 34865), True, 'import _pickle, numpy as np, itertools as it\n'), ((34934, 34948), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (34946, 34948), False, 'from time import perf_counter\n'), ((35258, 35288), 'numpy.allclose', 'np.allclose', (['mindist', 'mindist2'], {}), '(mindist, mindist2)\n', (35269, 35288), True, 'import _pickle, numpy as np, itertools as it\n'), ((477, 500), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (491, 500), True, 'import _pickle, numpy as np, itertools as it\n'), ((641, 655), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (653, 655), False, 'from time import perf_counter\n'), ((856, 870), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (868, 870), False, 'from time import perf_counter\n'), ((974, 988), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (986, 988), False, 'from time import perf_counter\n'), ((1243, 1253), 'rpxdock.Timer', 'rp.Timer', ([], {}), '()\n', (1251, 1253), True, 'import rpxdock as rp\n'), ((1371, 1394), 'numpy.random.rand', 'np.random.rand', (['(1250)', '(3)'], {}), '(1250, 3)\n', (1385, 1394), True, 'import _pickle, numpy as np, itertools as it\n'), ((1426, 1449), 'numpy.random.rand', 'np.random.rand', (['(1250)', '(3)'], {}), '(1250, 3)\n', (1440, 1449), True, 'import _pickle, numpy as np, itertools as it\n'), ((1726, 1818), 'rpxdock.bvh.bvh.bvh_isect', 'bvh.bvh_isect', ([], {'bvh1': 'bvh1', 'bvh2': 'bvh2', 'pos1': 'pos1[inner]', 'pos2': 'pos2[inner]', 'mindist': 'mindist'}), '(bvh1=bvh1, bvh2=bvh2, pos1=pos1[inner], pos2=pos2[inner],\n mindist=mindist)\n', (1739, 1818), False, 'from rpxdock.bvh import BVH, bvh\n'), ((1900, 1962), 'rpxdock.bvh.bvh.naive_isect', 'bvh.naive_isect', (['bvh1', 'bvh2', 'pos1[inner]', 'pos2[inner]', 'mindist'], {}), '(bvh1, bvh2, pos1[inner], pos2[inner], mindist)\n', (1915, 1962), False, 'from rpxdock.bvh import BVH, bvh\n'), ((2263, 2319), 'rpxdock.bvh.bvh.bvh_isect_vec', 'bvh.bvh_isect_vec', (['bvh1', 'bvh2', 'pos1[1]', 'pos2[1]', 'mindist'], {}), '(bvh1, bvh2, pos1[1], pos2[1], mindist)\n', (2280, 2319), False, 'from rpxdock.bvh import BVH, bvh\n'), ((2871, 2894), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (2885, 2894), True, 'import _pickle, numpy as np, itertools as it\n'), ((2926, 2949), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (2940, 2949), True, 'import _pickle, numpy as np, itertools as it\n'), ((5071, 5085), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5083, 5085), False, 'from time import perf_counter\n'), ((5329, 5343), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5341, 5343), False, 'from time import perf_counter\n'), ((6420, 6434), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (6432, 6434), False, 'from time import perf_counter\n'), ((6666, 6680), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (6678, 6680), False, 'from time import perf_counter\n'), ((7891, 7900), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (7897, 7900), True, 'import _pickle, numpy as np, itertools as it\n'), ((7902, 7911), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (7908, 7911), True, 'import _pickle, numpy as np, itertools as it\n'), ((7981, 8008), 'numpy.linalg.norm', 'np.linalg.norm', (['(xyz1 - xyz2)'], {}), '(xyz1 - xyz2)\n', (7995, 8008), True, 'import _pickle, numpy as np, itertools as it\n'), ((8210, 8227), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (8225, 8227), True, 'import _pickle, numpy as np, itertools as it\n'), ((8381, 8390), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (8387, 8390), True, 'import _pickle, numpy as np, itertools as it\n'), ((8392, 8401), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (8398, 8401), True, 'import _pickle, numpy as np, itertools as it\n'), ((10026, 10049), 'numpy.random.rand', 'np.random.rand', (['(5000)', '(3)'], {}), '(5000, 3)\n', (10040, 10049), True, 'import _pickle, numpy as np, itertools as it\n'), ((10081, 10104), 'numpy.random.rand', 'np.random.rand', (['(5000)', '(3)'], {}), '(5000, 3)\n', (10095, 10104), True, 'import _pickle, numpy as np, itertools as it\n'), ((10564, 10578), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (10576, 10578), False, 'from time import perf_counter\n'), ((10597, 10654), 'rpxdock.bvh.bvh.bvh_slide', 'bvh.bvh_slide', (['bvh1', 'bvh2', 'pos1[i]', 'pos2[i]', 'radius', 'dirn'], {}), '(bvh1, bvh2, pos1[i], pos2[i], radius, dirn)\n', (10610, 10654), False, 'from rpxdock.bvh import BVH, bvh\n'), ((10710, 10724), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (10722, 10724), False, 'from time import perf_counter\n'), ((15208, 15231), 'numpy.random.rand', 'np.random.rand', (['Npts', '(3)'], {}), '(Npts, 3)\n', (15222, 15231), True, 'import _pickle, numpy as np, itertools as it\n'), ((15263, 15286), 'numpy.random.rand', 'np.random.rand', (['Npts', '(3)'], {}), '(Npts, 3)\n', (15277, 15286), True, 'import _pickle, numpy as np, itertools as it\n'), ((15415, 15441), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {'cart_sd': '(0.5)'}), '(cart_sd=0.5)\n', (15428, 15441), True, 'import rpxdock.homog as hm\n'), ((15456, 15482), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {'cart_sd': '(0.5)'}), '(cart_sd=0.5)\n', (15469, 15482), True, 'import rpxdock.homog as hm\n'), ((15496, 15531), 'numpy.linalg.norm', 'np.linalg.norm', (['(x1[:, 3] - x2[:, 3])'], {}), '(x1[:, 3] - x2[:, 3])\n', (15510, 15531), True, 'import _pickle, numpy as np, itertools as it\n'), ((15833, 15847), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (15845, 15847), False, 'from time import perf_counter\n'), ((15867, 15935), 'rpxdock.bvh.bvh.bvh_collect_pairs', 'bvh.bvh_collect_pairs', (['bvh1', 'bvh2', 'pos1[i]', 'pos2[i]', 'mindist', 'bufbvh'], {}), '(bvh1, bvh2, pos1[i], pos2[i], mindist, bufbvh)\n', (15888, 15935), False, 'from rpxdock.bvh import BVH, bvh\n'), ((16013, 16027), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (16025, 16027), False, 'from time import perf_counter\n'), ((16044, 16114), 'rpxdock.bvh.bvh.naive_collect_pairs', 'bvh.naive_collect_pairs', (['bvh1', 'bvh2', 'pos1[i]', 'pos2[i]', 'mindist', 'bufnai'], {}), '(bvh1, bvh2, pos1[i], pos2[i], mindist, bufnai)\n', (16067, 16114), False, 'from rpxdock.bvh import BVH, bvh\n'), ((16169, 16183), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (16181, 16183), False, 'from time import perf_counter\n'), ((16199, 16257), 'rpxdock.bvh.bvh.bvh_count_pairs', 'bvh.bvh_count_pairs', (['bvh1', 'bvh2', 'pos1[i]', 'pos2[i]', 'mindist'], {}), '(bvh1, bvh2, pos1[i], pos2[i], mindist)\n', (16218, 16257), False, 'from rpxdock.bvh import BVH, bvh\n'), ((16575, 16611), 'numpy.lexsort', 'np.lexsort', (['(pbvh[:, 1], pbvh[:, 0])'], {}), '((pbvh[:, 1], pbvh[:, 0]))\n', (16585, 16611), True, 'import _pickle, numpy as np, itertools as it\n'), ((16655, 16703), 'numpy.lexsort', 'np.lexsort', (['(bufnai[:nnai, 1], bufnai[:nnai, 0])'], {}), '((bufnai[:nnai, 1], bufnai[:nnai, 0]))\n', (16665, 16703), True, 'import _pickle, numpy as np, itertools as it\n'), ((16762, 16791), 'numpy.all', 'np.all', (['(pbvh == bufnai[:nnai])'], {}), '(pbvh == bufnai[:nnai])\n', (16768, 16791), True, 'import _pickle, numpy as np, itertools as it\n'), ((16942, 16979), 'numpy.linalg.norm', 'np.linalg.norm', (['(pair2 - pair1)'], {'axis': '(1)'}), '(pair2 - pair1, axis=1)\n', (16956, 16979), True, 'import _pickle, numpy as np, itertools as it\n'), ((17308, 17341), 'numpy.all', 'np.all', (['(pairs2[lb:ub] == pairs[i])'], {}), '(pairs2[lb:ub] == pairs[i])\n', (17314, 17341), True, 'import _pickle, numpy as np, itertools as it\n'), ((17842, 17865), 'numpy.random.rand', 'np.random.rand', (['Npts', '(3)'], {}), '(Npts, 3)\n', (17856, 17865), True, 'import _pickle, numpy as np, itertools as it\n'), ((17897, 17920), 'numpy.random.rand', 'np.random.rand', (['Npts', '(3)'], {}), '(Npts, 3)\n', (17911, 17920), True, 'import _pickle, numpy as np, itertools as it\n'), ((18048, 18074), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {'cart_sd': '(0.5)'}), '(cart_sd=0.5)\n', (18061, 18074), True, 'import rpxdock.homog as hm\n'), ((18089, 18115), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {'cart_sd': '(0.5)'}), '(cart_sd=0.5)\n', (18102, 18115), True, 'import rpxdock.homog as hm\n'), ((18129, 18164), 'numpy.linalg.norm', 'np.linalg.norm', (['(x1[:, 3] - x2[:, 3])'], {}), '(x1[:, 3] - x2[:, 3])\n', (18143, 18164), True, 'import _pickle, numpy as np, itertools as it\n'), ((18963, 19017), 'numpy.logical_and', 'np.logical_and', (['(pairs[:, 0] >= 250)', '(pairs[:, 0] <= 750)'], {}), '(pairs[:, 0] >= 250, pairs[:, 0] <= 750)\n', (18977, 19017), True, 'import _pickle, numpy as np, itertools as it\n'), ((19102, 19131), 'numpy.unique', 'np.unique', (['filt_pairs'], {'axis': '(1)'}), '(filt_pairs, axis=1)\n', (19111, 19131), True, 'import _pickle, numpy as np, itertools as it\n'), ((19133, 19158), 'numpy.unique', 'np.unique', (['rpairs'], {'axis': '(1)'}), '(rpairs, axis=1)\n', (19142, 19158), True, 'import _pickle, numpy as np, itertools as it\n'), ((19764, 19793), 'numpy.unique', 'np.unique', (['filt_pairs'], {'axis': '(1)'}), '(filt_pairs, axis=1)\n', (19773, 19793), True, 'import _pickle, numpy as np, itertools as it\n'), ((19795, 19820), 'numpy.unique', 'np.unique', (['rpairs'], {'axis': '(1)'}), '(rpairs, axis=1)\n', (19804, 19820), True, 'import _pickle, numpy as np, itertools as it\n'), ((19969, 19992), 'numpy.random.rand', 'np.random.rand', (['Npts', '(3)'], {}), '(Npts, 3)\n', (19983, 19992), True, 'import _pickle, numpy as np, itertools as it\n'), ((20024, 20047), 'numpy.random.rand', 'np.random.rand', (['Npts', '(3)'], {}), '(Npts, 3)\n', (20038, 20047), True, 'import _pickle, numpy as np, itertools as it\n'), ((20175, 20201), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {'cart_sd': '(0.5)'}), '(cart_sd=0.5)\n', (20188, 20201), True, 'import rpxdock.homog as hm\n'), ((20216, 20242), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {'cart_sd': '(0.5)'}), '(cart_sd=0.5)\n', (20229, 20242), True, 'import rpxdock.homog as hm\n'), ((20256, 20291), 'numpy.linalg.norm', 'np.linalg.norm', (['(x1[:, 3] - x2[:, 3])'], {}), '(x1[:, 3] - x2[:, 3])\n', (20270, 20291), True, 'import _pickle, numpy as np, itertools as it\n'), ((21368, 21397), 'numpy.unique', 'np.unique', (['filt_pairs'], {'axis': '(1)'}), '(filt_pairs, axis=1)\n', (21377, 21397), True, 'import _pickle, numpy as np, itertools as it\n'), ((21399, 21424), 'numpy.unique', 'np.unique', (['rpairs'], {'axis': '(1)'}), '(rpairs, axis=1)\n', (21408, 21424), True, 'import _pickle, numpy as np, itertools as it\n'), ((22449, 22478), 'numpy.unique', 'np.unique', (['filt_pairs'], {'axis': '(1)'}), '(filt_pairs, axis=1)\n', (22458, 22478), True, 'import _pickle, numpy as np, itertools as it\n'), ((22480, 22505), 'numpy.unique', 'np.unique', (['rpairs'], {'axis': '(1)'}), '(rpairs, axis=1)\n', (22489, 22505), True, 'import _pickle, numpy as np, itertools as it\n'), ((22870, 22893), 'numpy.random.rand', 'np.random.rand', (['Npts', '(3)'], {}), '(Npts, 3)\n', (22884, 22893), True, 'import _pickle, numpy as np, itertools as it\n'), ((22925, 22948), 'numpy.random.rand', 'np.random.rand', (['Npts', '(3)'], {}), '(Npts, 3)\n', (22939, 22948), True, 'import _pickle, numpy as np, itertools as it\n'), ((23254, 23272), 'numpy.random.randn', 'np.random.randn', (['(3)'], {}), '(3)\n', (23269, 23272), True, 'import _pickle, numpy as np, itertools as it\n'), ((23290, 23310), 'numpy.linalg.norm', 'np.linalg.norm', (['dirn'], {}), '(dirn)\n', (23304, 23310), True, 'import _pickle, numpy as np, itertools as it\n'), ((23405, 23431), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {'cart_sd': '(0.5)'}), '(cart_sd=0.5)\n', (23418, 23431), True, 'import rpxdock.homog as hm\n'), ((23448, 23474), 'rpxdock.homog.rand_xform', 'hm.rand_xform', ([], {'cart_sd': '(0.5)'}), '(cart_sd=0.5)\n', (23461, 23474), True, 'import rpxdock.homog as hm\n'), ((23492, 23506), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (23504, 23506), False, 'from time import perf_counter\n'), ((23525, 23576), 'rpxdock.bvh.bvh.bvh_slide', 'bvh.bvh_slide', (['bvh1', 'bvh2', 'pos1', 'pos2', 'radius', 'dirn'], {}), '(bvh1, bvh2, pos1, pos2, radius, dirn)\n', (23538, 23576), False, 'from rpxdock.bvh import BVH, bvh\n'), ((25915, 25929), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (25927, 25929), False, 'from time import perf_counter\n'), ((25946, 25955), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (25949, 25955), False, 'from rpxdock.bvh import BVH, bvh\n'), ((25972, 25981), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (25975, 25981), False, 'from rpxdock.bvh import BVH, bvh\n'), ((26183, 26197), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (26195, 26197), False, 'from time import perf_counter\n'), ((26211, 26296), 'rpxdock.bvh.bvh.bvh_isect', 'bvh.bvh_isect', ([], {'bvh1': 'bvh1', 'bvh2': 'bvh2', 'pos1': 'pos1[i]', 'pos2': 'pos2[i]', 'mindist': 'mindist'}), '(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i], mindist=mindist\n )\n', (26224, 26296), False, 'from rpxdock.bvh import BVH, bvh\n'), ((26428, 26442), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (26440, 26442), False, 'from time import perf_counter\n'), ((26461, 26554), 'rpxdock.bvh.bvh.isect_range_single', 'bvh.isect_range_single', ([], {'bvh1': 'bvh1', 'bvh2': 'bvh2', 'pos1': 'pos1[i]', 'pos2': 'pos2[i]', 'mindist': 'mindist'}), '(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i],\n mindist=mindist)\n', (26483, 26554), False, 'from rpxdock.bvh import BVH, bvh\n'), ((26645, 26659), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (26657, 26659), False, 'from time import perf_counter\n'), ((26678, 26738), 'rpxdock.bvh.bvh.naive_isect_range', 'bvh.naive_isect_range', (['bvh1', 'bvh2', 'pos1[i]', 'pos2[i]', 'mindist'], {}), '(bvh1, bvh2, pos1[i], pos2[i], mindist)\n', (26699, 26738), False, 'from rpxdock.bvh import BVH, bvh\n'), ((27489, 27506), 'numpy.any', 'np.any', (['isect[ok]'], {}), '(isect[ok])\n', (27495, 27506), True, 'import _pickle, numpy as np, itertools as it\n'), ((28531, 28545), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (28543, 28545), False, 'from time import perf_counter\n'), ((29127, 29142), 'numpy.all', 'np.all', (['(0 <= lb)'], {}), '(0 <= lb)\n', (29133, 29142), True, 'import _pickle, numpy as np, itertools as it\n'), ((29147, 29167), 'numpy.all', 'np.all', (['(lb - 1 <= ub)'], {}), '(lb - 1 <= ub)\n', (29153, 29167), True, 'import _pickle, numpy as np, itertools as it\n'), ((29172, 29189), 'numpy.all', 'np.all', (['(ub < Nids)'], {}), '(ub < Nids)\n', (29178, 29189), True, 'import _pickle, numpy as np, itertools as it\n'), ((29755, 29778), 'numpy.any', 'np.any', (['isect[lb <= ub]'], {}), '(isect[lb <= ub])\n', (29761, 29778), True, 'import _pickle, numpy as np, itertools as it\n'), ((30407, 30416), 'rpxdock.bvh.BVH', 'BVH', (['xyz1'], {}), '(xyz1)\n', (30410, 30416), False, 'from rpxdock.bvh import BVH, bvh\n'), ((30433, 30442), 'rpxdock.bvh.BVH', 'BVH', (['xyz2'], {}), '(xyz2)\n', (30436, 30442), False, 'from rpxdock.bvh import BVH, bvh\n'), ((30601, 30686), 'rpxdock.bvh.bvh.bvh_isect', 'bvh.bvh_isect', ([], {'bvh1': 'bvh1', 'bvh2': 'bvh2', 'pos1': 'pos1[i]', 'pos2': 'pos2[i]', 'mindist': 'mindist'}), '(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i], mindist=mindist\n )\n', (30614, 30686), False, 'from rpxdock.bvh import BVH, bvh\n'), ((30725, 30825), 'rpxdock.bvh.bvh.isect_range_single', 'bvh.isect_range_single', ([], {'bvh1': 'bvh1', 'bvh2': 'bvh2', 'pos1': 'pos1[i]', 'pos2': 'pos2[i]', 'mindist': 'mindist'}), '(bvh1=bvh1, bvh2=bvh2, pos1=pos1[i], pos2=pos2[i],\n mindist=mindist, **arg)\n', (30747, 30825), False, 'from rpxdock.bvh import BVH, bvh\n'), ((33457, 33499), 'rpxdock.bvh.bvh.bvh_isect', 'bvh.bvh_isect', (['bvh1', 'bvh2', 'p1', 'p2', 'mindist'], {}), '(bvh1, bvh2, p1, p2, mindist)\n', (33470, 33499), False, 'from rpxdock.bvh import BVH, bvh\n'), ((33571, 33585), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (33583, 33585), False, 'from time import perf_counter\n'), ((33702, 33714), 'itertools.repeat', 'repeat', (['bvh1'], {}), '(bvh1)\n', (33708, 33714), False, 'from itertools import repeat\n'), ((33725, 33737), 'itertools.repeat', 'repeat', (['bvh2'], {}), '(bvh2)\n', (33731, 33737), False, 'from itertools import repeat\n'), ((33748, 33766), 'numpy.split', 'np.split', (['pos1', 'nt'], {}), '(pos1, nt)\n', (33756, 33766), True, 'import _pickle, numpy as np, itertools as it\n'), ((33777, 33795), 'numpy.split', 'np.split', (['pos2', 'nt'], {}), '(pos2, nt)\n', (33785, 33795), True, 'import _pickle, numpy as np, itertools as it\n'), ((33806, 33821), 'itertools.repeat', 'repeat', (['mindist'], {}), '(mindist)\n', (33812, 33821), False, 'from itertools import repeat\n'), ((33899, 33913), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (33911, 33913), False, 'from time import perf_counter\n'), ((34770, 34806), 'rpxdock.bvh.bvh.bvh_min_dist', 'bvh.bvh_min_dist', (['bvh1', 'bvh2', 'p1', 'p2'], {}), '(bvh1, bvh2, p1, p2)\n', (34786, 34806), False, 'from rpxdock.bvh import BVH, bvh\n'), ((34880, 34894), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (34892, 34894), False, 'from time import perf_counter\n'), ((35014, 35026), 'itertools.repeat', 'repeat', (['bvh1'], {}), '(bvh1)\n', (35020, 35026), False, 'from itertools import repeat\n'), ((35037, 35049), 'itertools.repeat', 'repeat', (['bvh2'], {}), '(bvh2)\n', (35043, 35049), False, 'from itertools import repeat\n'), ((35060, 35078), 'numpy.split', 'np.split', (['pos1', 'nt'], {}), '(pos1, nt)\n', (35068, 35078), True, 'import _pickle, numpy as np, itertools as it\n'), ((35089, 35107), 'numpy.split', 'np.split', (['pos2', 'nt'], {}), '(pos2, nt)\n', (35097, 35107), True, 'import _pickle, numpy as np, itertools as it\n'), ((35134, 35178), 'numpy.concatenate', 'np.concatenate', (['[f for f in futures]'], {'axis': '(1)'}), '([f for f in futures], axis=1)\n', (35148, 35178), True, 'import _pickle, numpy as np, itertools as it\n'), ((35197, 35211), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (35209, 35211), False, 'from time import perf_counter\n'), ((7603, 7620), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (7618, 7620), True, 'import _pickle, numpy as np, itertools as it\n'), ((7683, 7700), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (7698, 7700), True, 'import _pickle, numpy as np, itertools as it\n'), ((8495, 8522), 'numpy.linalg.norm', 'np.linalg.norm', (['(xyz1 - xyz2)'], {}), '(xyz1 - xyz2)\n', (8509, 8522), True, 'import _pickle, numpy as np, itertools as it\n'), ((8653, 8674), 'numpy.sum', 'np.sum', (['(delta * delta)'], {}), '(delta * delta)\n', (8659, 8674), True, 'import _pickle, numpy as np, itertools as it\n'), ((8952, 8969), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (8967, 8969), True, 'import _pickle, numpy as np, itertools as it\n'), ((9368, 9391), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1 - p2)'], {}), '(p1 - p2)\n', (9382, 9391), True, 'import _pickle, numpy as np, itertools as it\n'), ((9466, 9484), 'rpxdock.homog.hpoint', 'hm.hpoint', (['xyz2[0]'], {}), '(xyz2[0])\n', (9475, 9484), True, 'import rpxdock.homog as hm\n'), ((9506, 9524), 'rpxdock.homog.hpoint', 'hm.hpoint', (['xyz1[0]'], {}), '(xyz1[0])\n', (9515, 9524), True, 'import rpxdock.homog as hm\n'), ((9602, 9623), 'numpy.sum', 'np.sum', (['(delta * delta)'], {}), '(delta * delta)\n', (9608, 9623), True, 'import _pickle, numpy as np, itertools as it\n'), ((10478, 10494), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (10492, 10494), True, 'import _pickle, numpy as np, itertools as it\n'), ((10671, 10685), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (10683, 10685), False, 'from time import perf_counter\n'), ((10828, 10842), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (10840, 10842), False, 'from time import perf_counter\n'), ((10925, 10939), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (10937, 10939), False, 'from time import perf_counter\n'), ((10963, 11009), 'rpxdock.bvh.bvh.bvh_min_dist', 'bvh.bvh_min_dist', (['bvh1', 'bvh2', 'pos1[i]', 'pos2[i]'], {}), '(bvh1, bvh2, pos1[i], pos2[i])\n', (10979, 11009), False, 'from rpxdock.bvh import BVH, bvh\n'), ((11190, 11204), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (11202, 11204), False, 'from time import perf_counter\n'), ((11228, 11270), 'rpxdock.bvh.bvh.bvh_min_dist', 'bvh.bvh_min_dist', (['bvh1', 'bvh2', 'tmp', 'pos2[i]'], {}), '(bvh1, bvh2, tmp, pos2[i])\n', (11244, 11270), False, 'from rpxdock.bvh import BVH, bvh\n'), ((11423, 11462), 'numpy.allclose', 'np.allclose', (['dn', '(2 * radius)'], {'atol': '(1e-06)'}), '(dn, 2 * radius, atol=1e-06)\n', (11434, 11462), True, 'import _pickle, numpy as np, itertools as it\n'), ((15769, 15785), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (15783, 15785), True, 'import _pickle, numpy as np, itertools as it\n'), ((15952, 15966), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (15964, 15966), False, 'from time import perf_counter\n'), ((16131, 16145), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (16143, 16145), False, 'from time import perf_counter\n'), ((16273, 16287), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (16285, 16287), False, 'from time import perf_counter\n'), ((16996, 17009), 'numpy.max', 'np.max', (['dpair'], {}), '(dpair)\n', (17002, 17009), True, 'import _pickle, numpy as np, itertools as it\n'), ((18402, 18418), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (18416, 18418), True, 'import _pickle, numpy as np, itertools as it\n'), ((20529, 20545), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (20543, 20545), True, 'import _pickle, numpy as np, itertools as it\n'), ((21012, 21068), 'numpy.logical_and', 'np.logical_and', (['(100 <= rpairs[:, 0])', '(rpairs[:, 0] <= 400)'], {}), '(100 <= rpairs[:, 0], rpairs[:, 0] <= 400)\n', (21026, 21068), True, 'import _pickle, numpy as np, itertools as it\n'), ((21093, 21149), 'numpy.logical_and', 'np.logical_and', (['(600 <= rpairs[:, 0])', '(rpairs[:, 0] <= 900)'], {}), '(600 <= rpairs[:, 0], rpairs[:, 0] <= 900)\n', (21107, 21149), True, 'import _pickle, numpy as np, itertools as it\n'), ((21191, 21245), 'numpy.logical_and', 'np.logical_and', (['(100 <= pairs[:, 0])', '(pairs[:, 0] <= 400)'], {}), '(100 <= pairs[:, 0], pairs[:, 0] <= 400)\n', (21205, 21245), True, 'import _pickle, numpy as np, itertools as it\n'), ((21286, 21340), 'numpy.logical_and', 'np.logical_and', (['(600 <= pairs[:, 0])', '(pairs[:, 0] <= 900)'], {}), '(600 <= pairs[:, 0], pairs[:, 0] <= 900)\n', (21300, 21340), True, 'import _pickle, numpy as np, itertools as it\n'), ((23593, 23607), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (23605, 23607), False, 'from time import perf_counter\n'), ((23659, 23673), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (23671, 23673), False, 'from time import perf_counter\n'), ((23697, 23737), 'rpxdock.bvh.bvh.bvh_min_dist', 'bvh.bvh_min_dist', (['bvh1', 'bvh2', 'pos1', 'pos2'], {}), '(bvh1, bvh2, pos1, pos2)\n', (23713, 23737), False, 'from rpxdock.bvh import BVH, bvh\n'), ((23915, 23929), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (23927, 23929), False, 'from time import perf_counter\n'), ((23953, 23993), 'rpxdock.bvh.bvh.bvh_min_dist', 'bvh.bvh_min_dist', (['bvh1', 'bvh2', 'pos1', 'pos2'], {}), '(bvh1, bvh2, pos1, pos2)\n', (23969, 23993), False, 'from rpxdock.bvh import BVH, bvh\n'), ((24146, 24185), 'numpy.allclose', 'np.allclose', (['dn', '(2 * radius)'], {'atol': '(1e-06)'}), '(dn, 2 * radius, atol=1e-06)\n', (24157, 24185), True, 'import _pickle, numpy as np, itertools as it\n'), ((24205, 24219), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (24217, 24219), False, 'from time import perf_counter\n'), ((24242, 24307), 'rpxdock.bvh.bvh.bvh_collect_pairs', 'bvh.bvh_collect_pairs', (['bvhcol1', 'bvhcol2', 'pos1', 'pos2', 'pairdis', 'buf'], {}), '(bvhcol1, bvhcol2, pos1, pos2, pairdis, buf)\n', (24263, 24307), False, 'from rpxdock.bvh import BVH, bvh\n'), ((25998, 26012), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (26010, 26012), False, 'from time import perf_counter\n'), ((26309, 26323), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (26321, 26323), False, 'from time import perf_counter\n'), ((26608, 26622), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (26620, 26622), False, 'from time import perf_counter\n'), ((26786, 26800), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (26798, 26800), False, 'from time import perf_counter\n'), ((28419, 28434), 'numpy.arange', 'np.arange', (['Nids'], {}), '(Nids)\n', (28428, 28434), True, 'import _pickle, numpy as np, itertools as it\n'), ((28487, 28502), 'numpy.arange', 'np.arange', (['Nids'], {}), '(Nids)\n', (28496, 28502), True, 'import _pickle, numpy as np, itertools as it\n'), ((29291, 29327), 'numpy.logical_or', 'np.logical_or', (['(lb > 0)', '(ub < Nids - 1)'], {}), '(lb > 0, ub < Nids - 1)\n', (29304, 29327), True, 'import _pickle, numpy as np, itertools as it\n'), ((33969, 33983), 'numpy.sum', 'np.sum', (['isect2'], {}), '(isect2)\n', (33975, 33983), True, 'import _pickle, numpy as np, itertools as it\n'), ((5132, 5151), 'rpxdock.homog.hpoint', 'hm.hpoint', (['xyz1[i1]'], {}), '(xyz1[i1])\n', (5141, 5151), True, 'import rpxdock.homog as hm\n'), ((5164, 5183), 'rpxdock.homog.hpoint', 'hm.hpoint', (['xyz2[i2]'], {}), '(xyz2[i2])\n', (5173, 5183), True, 'import rpxdock.homog as hm\n'), ((6478, 6497), 'rpxdock.homog.hpoint', 'hm.hpoint', (['xyz1[i1]'], {}), '(xyz1[i1])\n', (6487, 6497), True, 'import rpxdock.homog as hm\n'), ((6507, 6526), 'rpxdock.homog.hpoint', 'hm.hpoint', (['xyz2[i2]'], {}), '(xyz2[i2])\n', (6516, 6526), True, 'import rpxdock.homog as hm\n'), ((7731, 7748), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (7746, 7748), True, 'import _pickle, numpy as np, itertools as it\n'), ((7782, 7799), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (7797, 7799), True, 'import _pickle, numpy as np, itertools as it\n'), ((9316, 9334), 'rpxdock.homog.hpoint', 'hm.hpoint', (['xyz2[0]'], {}), '(xyz2[0])\n', (9325, 9334), True, 'import rpxdock.homog as hm\n'), ((11027, 11041), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (11039, 11041), False, 'from time import perf_counter\n'), ((11138, 11162), 'rpxdock.homog.htrans', 'hm.htrans', (['(dirn * dslide)'], {}), '(dirn * dslide)\n', (11147, 11162), True, 'import rpxdock.homog as hm\n'), ((11288, 11302), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (11300, 11302), False, 'from time import perf_counter\n'), ((11327, 11366), 'numpy.allclose', 'np.allclose', (['dn', '(2 * radius)'], {'atol': '(1e-06)'}), '(dn, 2 * radius, atol=1e-06)\n', (11338, 11366), True, 'import _pickle, numpy as np, itertools as it\n'), ((16820, 16847), 'rpxdock.homog.hpoint', 'hm.hpoint', (['xyz1[pbvh[:, 0]]'], {}), '(xyz1[pbvh[:, 0]])\n', (16829, 16847), True, 'import rpxdock.homog as hm\n'), ((16886, 16913), 'rpxdock.homog.hpoint', 'hm.hpoint', (['xyz2[pbvh[:, 1]]'], {}), '(xyz2[pbvh[:, 1]])\n', (16895, 16913), True, 'import rpxdock.homog as hm\n'), ((21675, 21721), 'numpy.logical_and', 'np.logical_and', (['(100 <= p[:, 0])', '(p[:, 0] <= 400)'], {}), '(100 <= p[:, 0], p[:, 0] <= 400)\n', (21689, 21721), True, 'import _pickle, numpy as np, itertools as it\n'), ((21749, 21795), 'numpy.logical_and', 'np.logical_and', (['(600 <= p[:, 0])', '(p[:, 0] <= 900)'], {}), '(600 <= p[:, 0], p[:, 0] <= 900)\n', (21763, 21795), True, 'import _pickle, numpy as np, itertools as it\n'), ((21840, 21886), 'numpy.logical_and', 'np.logical_and', (['(+20 <= p[:, 1])', '(p[:, 1] <= 180)'], {}), '(+20 <= p[:, 1], p[:, 1] <= 180)\n', (21854, 21886), True, 'import _pickle, numpy as np, itertools as it\n'), ((23337, 23353), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (23351, 23353), True, 'import _pickle, numpy as np, itertools as it\n'), ((23755, 23769), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (23767, 23769), False, 'from time import perf_counter\n'), ((23866, 23890), 'rpxdock.homog.htrans', 'hm.htrans', (['(dirn * dslide)'], {}), '(dirn * dslide)\n', (23875, 23890), True, 'import rpxdock.homog as hm\n'), ((24011, 24025), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (24023, 24025), False, 'from time import perf_counter\n'), ((24050, 24089), 'numpy.allclose', 'np.allclose', (['dn', '(2 * radius)'], {'atol': '(1e-06)'}), '(dn, 2 * radius, atol=1e-06)\n', (24061, 24089), True, 'import _pickle, numpy as np, itertools as it\n'), ((24604, 24641), 'numpy.linalg.norm', 'np.linalg.norm', (['(pair2 - pair1)'], {'axis': '(1)'}), '(pair2 - pair1, axis=1)\n', (24618, 24641), True, 'import _pickle, numpy as np, itertools as it\n'), ((25350, 25371), 'numpy.random.randn', 'np.random.randn', (['N', '(3)'], {}), '(N, 3)\n', (25365, 25371), True, 'import _pickle, numpy as np, itertools as it\n'), ((29462, 29477), 'numpy.where', 'np.where', (['isect'], {}), '(isect)\n', (29470, 29477), True, 'import _pickle, numpy as np, itertools as it\n'), ((9259, 9277), 'rpxdock.homog.hpoint', 'hm.hpoint', (['xyz1[0]'], {}), '(xyz1[0])\n', (9268, 9277), True, 'import rpxdock.homog as hm\n'), ((21936, 21982), 'numpy.logical_and', 'np.logical_and', (['(220 <= p[:, 1])', '(p[:, 1] <= 380)'], {}), '(220 <= p[:, 1], p[:, 1] <= 380)\n', (21950, 21982), True, 'import _pickle, numpy as np, itertools as it\n'), ((24385, 24399), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (24397, 24399), False, 'from time import perf_counter\n'), ((24664, 24677), 'numpy.max', 'np.max', (['dpair'], {}), '(dpair)\n', (24670, 24677), True, 'import _pickle, numpy as np, itertools as it\n'), ((31030, 31045), 'numpy.diff', 'np.diff', (['range1'], {}), '(range1)\n', (31037, 31045), True, 'import _pickle, numpy as np, itertools as it\n'), ((22038, 22084), 'numpy.logical_and', 'np.logical_and', (['(420 <= p[:, 1])', '(p[:, 1] <= 580)'], {}), '(420 <= p[:, 1], p[:, 1] <= 580)\n', (22052, 22084), True, 'import _pickle, numpy as np, itertools as it\n'), ((24467, 24497), 'rpxdock.homog.hpoint', 'hm.hpoint', (['xyzcol1[pair[:, 0]]'], {}), '(xyzcol1[pair[:, 0]])\n', (24476, 24497), True, 'import rpxdock.homog as hm\n'), ((24539, 24569), 'rpxdock.homog.hpoint', 'hm.hpoint', (['xyzcol2[pair[:, 1]]'], {}), '(xyzcol2[pair[:, 1]])\n', (24548, 24569), True, 'import rpxdock.homog as hm\n'), ((22121, 22167), 'numpy.logical_and', 'np.logical_and', (['(620 <= p[:, 1])', '(p[:, 1] <= 780)'], {}), '(620 <= p[:, 1], p[:, 1] <= 780)\n', (22135, 22167), True, 'import _pickle, numpy as np, itertools as it\n'), ((22204, 22250), 'numpy.logical_and', 'np.logical_and', (['(820 <= p[:, 1])', '(p[:, 1] <= 980)'], {}), '(820 <= p[:, 1], p[:, 1] <= 980)\n', (22218, 22250), True, 'import _pickle, numpy as np, itertools as it\n')] |
import os
import copy
import logging
import time
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
from prune import prune
logging_path = os.path.join(os.getcwd(), "text_log.log")
logging.basicConfig(level=logging.INFO,
format="%(levelname)s - %(asctime)s - %(msg)s",
datefmt="%Y-%m-%d %H:%M:%S",
handlers=[
logging.FileHandler(logging_path), # For debug
logging.StreamHandler(),
])
def magic_series(grid):
""" Check if grid satisfies the definition
series[k] == sum(series[i] == k) """
logging.debug("Grid:\n{}".format(grid))
magic = (grid.sum(1) == np.where(grid.T)[1])
logging.debug("Magic check:\n{}".format(magic))
return magic.all()
class Domain(object):
def __init__(self, length):
from prune import prune_sum_eq_len
self.length = length
self.numbers = np.linspace(0, length-1, length)
self.grid = np.empty((length, length))
self.grid[:] = np.nan
self.grid[0,0] = False
prune_sum_eq_len(self)
def __str__(self):
return str(self.grid)
def __repr__(self):
return str(self.to_numbers())
def __setitem__(self, position, value):
if isinstance(position, (int, np.integer)):
self.grid[:, position] = \
np.array(range(self.length)) == value
else:
self.grid[position] = value
def __getitem__(self, index):
return self.grid[index]
def __iter__(self):
""" Generate available values from most right column
with available values """
logging.debug("Grid:\n{}".format(self))
missing_values = self.missing_values()
logging.debug("Iter missing values:\n{}".format(str(missing_values)))
missing_mask = (~np.isnan(missing_values))
if missing_mask.any():
position = np.where(missing_mask.max(0))[0][-1]
available = missing_values[:, position]
logging.debug("Iter available: {}".format(available))
for value in available[~np.isnan(available)]:
yield value, position
def to_numbers(self):
""" Convert 2-D domain into 1-D array """
numbers = np.dot(self.numbers, self.grid)
return numbers
def missing_values(self):
""" Make matrix with only available missing values """
mask = np.isnan(self.grid)
values = mask * self.numbers.reshape((-1, 1))
return np.where(mask, values, np.nan)
def estimate(self, how="sum"):
""" Estimate min or max (position), if `sum`,
(index * position), if `mult`
for each missing position """
mask = np.isnan(self.grid)
if how == "mult":
values = np.multiply(*np.mgrid[:self.length, :self.length])
missing_values = np.where(mask, values, np.nan)
num = np.multiply(self.to_numbers(), self.numbers)
else:
missing_values = self.missing_values()
num = self.to_numbers()
clean_bound = np.nan_to_num(missing_values) - \
np.nan_to_num(num) + np.nansum(num)
clean_bound = np.where(mask, clean_bound, np.nan)
out = list()
for f in "min", "max":
est = eval("np.nan%s" % f)(missing_values, 0)
all_sum = np.nansum(est)
out.append(clean_bound + all_sum - est)
return out
def prune(self):
""" Prune unfeasible values from domain """
return prune(self)
def search(self):
""" Walk through domain and try each available item """
results = set()
for value, position in self:
temp_domain = copy.deepcopy(self)
logging.debug("value: {}, position: {}".format(value, position))
temp_domain[position] = value
feasibile = temp_domain.prune()
if not feasibile:
continue
# If no missing values, test and save
if feasibile and not np.isnan(temp_domain.to_numbers()).any():
logging.debug("Full series: {}".format(repr(temp_domain)))
if temp_domain.magic_series():
results.add(repr(temp_domain))
# Save all previous results
results |= temp_domain.search()
return results
def magic_series(self):
""" Check magic """
return magic_series(self.grid)
def feasibility_test(self):
""" Test domain feasibility
((b == 1) & (x == v)) | ((b == 0) & (x != v)) """
values = np.repeat(
np.linspace(0, self.length-1, self.length).reshape((-1,1)),
self.length, axis=1)
feasibility = \
np.logical_or(
np.isnan(self.grid), # missing
np.equal( # or feasible
self.grid,
np.equal(
values,
np.repeat([self.to_numbers()], self.length, axis=0))))
return feasibility
def main():
try:
for i in range(4, 50):
t0 = time.time()
d = Domain(i)
result = d.search()
logging.info("{} finished in {} sec: {}"\
.format(i, round(time.time() - t0, 5), result))
except KeyboardInterrupt as e:
pass
if __name__ == "__main__":
main()
| [
"numpy.multiply",
"logging.StreamHandler",
"copy.deepcopy",
"numpy.where",
"time.time",
"os.getcwd",
"numpy.linspace",
"prune.prune_sum_eq_len",
"numpy.empty",
"numpy.dot",
"numpy.isnan",
"logging.FileHandler",
"numpy.nansum",
"numpy.seterr",
"numpy.nan_to_num",
"prune.prune"
] | [((69, 113), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (78, 113), True, 'import numpy as np\n'), ((168, 179), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (177, 179), False, 'import os\n'), ((871, 905), 'numpy.linspace', 'np.linspace', (['(0)', '(length - 1)', 'length'], {}), '(0, length - 1, length)\n', (882, 905), True, 'import numpy as np\n'), ((924, 950), 'numpy.empty', 'np.empty', (['(length, length)'], {}), '((length, length))\n', (932, 950), True, 'import numpy as np\n'), ((1020, 1042), 'prune.prune_sum_eq_len', 'prune_sum_eq_len', (['self'], {}), '(self)\n', (1036, 1042), False, 'from prune import prune_sum_eq_len\n'), ((2222, 2253), 'numpy.dot', 'np.dot', (['self.numbers', 'self.grid'], {}), '(self.numbers, self.grid)\n', (2228, 2253), True, 'import numpy as np\n'), ((2386, 2405), 'numpy.isnan', 'np.isnan', (['self.grid'], {}), '(self.grid)\n', (2394, 2405), True, 'import numpy as np\n'), ((2475, 2505), 'numpy.where', 'np.where', (['mask', 'values', 'np.nan'], {}), '(mask, values, np.nan)\n', (2483, 2505), True, 'import numpy as np\n'), ((2695, 2714), 'numpy.isnan', 'np.isnan', (['self.grid'], {}), '(self.grid)\n', (2703, 2714), True, 'import numpy as np\n'), ((3165, 3200), 'numpy.where', 'np.where', (['mask', 'clean_bound', 'np.nan'], {}), '(mask, clean_bound, np.nan)\n', (3173, 3200), True, 'import numpy as np\n'), ((3509, 3520), 'prune.prune', 'prune', (['self'], {}), '(self)\n', (3514, 3520), False, 'from prune import prune\n'), ((346, 379), 'logging.FileHandler', 'logging.FileHandler', (['logging_path'], {}), '(logging_path)\n', (365, 379), False, 'import logging\n'), ((401, 424), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (422, 424), False, 'import logging\n'), ((622, 638), 'numpy.where', 'np.where', (['grid.T'], {}), '(grid.T)\n', (630, 638), True, 'import numpy as np\n'), ((1796, 1820), 'numpy.isnan', 'np.isnan', (['missing_values'], {}), '(missing_values)\n', (1804, 1820), True, 'import numpy as np\n'), ((2763, 2813), 'numpy.multiply', 'np.multiply', (['*np.mgrid[:self.length, :self.length]'], {}), '(*np.mgrid[:self.length, :self.length])\n', (2774, 2813), True, 'import numpy as np\n'), ((2843, 2873), 'numpy.where', 'np.where', (['mask', 'values', 'np.nan'], {}), '(mask, values, np.nan)\n', (2851, 2873), True, 'import numpy as np\n'), ((3128, 3142), 'numpy.nansum', 'np.nansum', (['num'], {}), '(num)\n', (3137, 3142), True, 'import numpy as np\n'), ((3334, 3348), 'numpy.nansum', 'np.nansum', (['est'], {}), '(est)\n', (3343, 3348), True, 'import numpy as np\n'), ((3695, 3714), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (3708, 3714), False, 'import copy\n'), ((4767, 4786), 'numpy.isnan', 'np.isnan', (['self.grid'], {}), '(self.grid)\n', (4775, 4786), True, 'import numpy as np\n'), ((5108, 5119), 'time.time', 'time.time', ([], {}), '()\n', (5117, 5119), False, 'import time\n'), ((3061, 3090), 'numpy.nan_to_num', 'np.nan_to_num', (['missing_values'], {}), '(missing_values)\n', (3074, 3090), True, 'import numpy as np\n'), ((3107, 3125), 'numpy.nan_to_num', 'np.nan_to_num', (['num'], {}), '(num)\n', (3120, 3125), True, 'import numpy as np\n'), ((2067, 2086), 'numpy.isnan', 'np.isnan', (['available'], {}), '(available)\n', (2075, 2086), True, 'import numpy as np\n'), ((4607, 4651), 'numpy.linspace', 'np.linspace', (['(0)', '(self.length - 1)', 'self.length'], {}), '(0, self.length - 1, self.length)\n', (4618, 4651), True, 'import numpy as np\n'), ((5265, 5276), 'time.time', 'time.time', ([], {}), '()\n', (5274, 5276), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 30 09:06:40 2021
@author: subhash
"""
import numpy as np
import matplotlib.pyplot as plt
import laspy as lp
input_path = "C:\\"
dataname = "2020_Drone_M"
point_cloud=lp.file.File(input_path+dataname+".las", mode="r")
print(type(point_cloud))
print(point_cloud)
points = np.vstack((point_cloud.x, point_cloud.y, point_cloud.z)).transpose()
colors = np.vstack((point_cloud.red, point_cloud.green, point_cloud.blue)).transpose()
# # pip install pptk
# import pptk
# v = pptk.viewer(points)
# v.attributes(colors/65535)
# v.color_map('cool')
# v.set(point_size=0.001,bg_color=[0,0,0,0],show_axis=0,show_grid=0)
import open3d as o3d
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
print(pcd.points)
print(type(pcd.points))
print(np.asarray(pcd.points).size)
pcd.colors = o3d.utility.Vector3dVector(colors/65535)
# # pcd.normals = o3d.utility.Vector3dVector(normals) no normals in data set .las?
o3d.visualization.draw_geometries([pcd])
voxel_grid = o3d.geometry.VoxelGrid
create_from_point_cloud(pcd,voxel_size=0.40)
o3d.visualization.draw_geometries([voxel_grid])
| [
"laspy.file.File",
"numpy.asarray",
"open3d.visualization.draw_geometries",
"numpy.vstack",
"open3d.geometry.PointCloud",
"open3d.utility.Vector3dVector"
] | [((215, 269), 'laspy.file.File', 'lp.file.File', (["(input_path + dataname + '.las')"], {'mode': '"""r"""'}), "(input_path + dataname + '.las', mode='r')\n", (227, 269), True, 'import laspy as lp\n'), ((695, 720), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (718, 720), True, 'import open3d as o3d\n'), ((734, 768), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (760, 768), True, 'import open3d as o3d\n'), ((859, 901), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['(colors / 65535)'], {}), '(colors / 65535)\n', (885, 901), True, 'import open3d as o3d\n'), ((983, 1023), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[pcd]'], {}), '([pcd])\n', (1016, 1023), True, 'import open3d as o3d\n'), ((1106, 1153), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[voxel_grid]'], {}), '([voxel_grid])\n', (1139, 1153), True, 'import open3d as o3d\n'), ((322, 378), 'numpy.vstack', 'np.vstack', (['(point_cloud.x, point_cloud.y, point_cloud.z)'], {}), '((point_cloud.x, point_cloud.y, point_cloud.z))\n', (331, 378), True, 'import numpy as np\n'), ((400, 465), 'numpy.vstack', 'np.vstack', (['(point_cloud.red, point_cloud.green, point_cloud.blue)'], {}), '((point_cloud.red, point_cloud.green, point_cloud.blue))\n', (409, 465), True, 'import numpy as np\n'), ((817, 839), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (827, 839), True, 'import numpy as np\n')] |
import numpy as np
from scipy import integrate
from floris.utils.tools import valid_ops as vops
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# MISCELLANEOUS #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# MAIN #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
def __Xie_Archer_wake(x, y, z, v_inflow, D_r, C_t, z_hub):
beta = (1. + np.sqrt(1 - C_t)) / (2. * np.sqrt(1 - C_t)) # C_t < 0.9
epsilon = 0.2 * np.sqrt(beta)
k_y = 0.025
k_z = 0.0175
sigma_y_D_r = k_y * (x / D_r) + epsilon
sigma_z_D_r = k_z * (x / D_r) + epsilon
# r2 = (z - z_hub)**2 + (y)**2
v_deficit = (1. - np.sqrt(1. - (C_t / (8 * sigma_y_D_r * sigma_z_D_r)))) * \
np.exp(- (((z - z_hub)**2) / (2 * (sigma_z_D_r * D_r)**2)) -
(((y**2) / (2 * (sigma_y_D_r * D_r)**2))))
v_wake = v_inflow * (1 - v_deficit)
return sigma_y_D_r, sigma_z_D_r, v_deficit, v_wake
class XieArcherWake(object):
def __init__(self, loc, inflow, C_t, D_r, z_hub, T_m=None, I_w=None,
I_a=0.077):
self.ref_loc = loc # (x_axis, y_axis)
self.v_inflow = inflow
self.C_thrust = C_t
self.d_rotor = D_r
self.z_hub = z_hub
self.I_a = I_a
self.epsilon = 0.2 * np.sqrt(
(1. + np.sqrt(1 - self.C_thrust)) / (2. * np.sqrt(1 - self.C_thrust)))
self.T_m = None if T_m is None else vops.find_and_load_model(T_m, "tim")
self.I_wake = None if T_m is None else I_w
# self.k_star = 0.033 if T_m is None else 0.3837 * I_w + 0.003678
self.k_y = 0.025 if T_m is None else (0.025 * I_w) / I_a
self.k_z = 0.0175 if T_m is None else (0.0175 * I_w) / I_a
def wake_sigma_Dr(self, k, x):
return k * (x / self.d_rotor) + self.epsilon
def deficit_constant(self, sigma_y_Dr, sigma_z_Dr):
a = 1. - np.sqrt(1. - (self.C_thrust / (8 * sigma_y_Dr * sigma_z_Dr))) if self.C_thrust / (8 * sigma_y_Dr * sigma_z_Dr) <= 1. else 1.
b, c = -1. / (2 * (sigma_z_Dr * self.d_rotor)**2), -1. / (2 * (sigma_y_Dr * self.d_rotor)**2)
return a, b, c
def wake_integrand(self, sigma_y_Dr, sigma_z_Dr, d_spanwise):
A, B, C = self.deficit_constant(sigma_y_Dr, sigma_z_Dr)
return lambda r, t: A * np.exp(
(C * (r * np.cos(t) + d_spanwise)**2) + (B * (r * np.sin(t))**2)) * r
def wake_velocity(self, x, y, z):
sigma_y_Dr, sigma_z_Dr = self.wake_sigma_Dr(self.k_y, x), self.wake_sigma_Dr(self.k_z, x)
v_deficit = (1. - np.sqrt(1. - (self.C_thrust / (8 * sigma_y_Dr * sigma_z_Dr)))) * \
np.exp(- (((z - self.z_hub)**2) / (2 * (sigma_z_Dr * self.d_rotor)**2)) - (((y**2) / (2 * (sigma_y_Dr * self.d_rotor)**2))))
return self.v_inflow * (1 - v_deficit)
@staticmethod
def wake_intersection(d_spanwise, y_wake, z_wake, down_d_rotor):
return vops.wake_overlap_ellipse(d_spanwise, y_wake, z_wake, down_d_rotor)
def wake_loss(self, down_loc, down_d_rotor, down_z_hub=None, eq=None):
assert self.ref_loc[1] >= down_loc[1], "Reference WT must be upstream downstream WT!"
d_streamwise, d_spanwise = \
np.abs(self.ref_loc[1] - down_loc[1]), np.abs(self.ref_loc[0] - down_loc[0])
m, n, = 25., 3. # application scope of the model and control the calculation
sigma_y_Dr, sigma_z_Dr = \
self.wake_sigma_Dr(self.k_y, d_streamwise), self.wake_sigma_Dr(self.k_z, d_streamwise)
if d_spanwise > n * sigma_y_Dr * self.d_rotor or d_streamwise > m * self.d_rotor:
return 0., 0.
else:
# f = lambda r, t: A * np.exp(B * ((r * np.cos(t) + d_spanwise)**2 + (r * np.sin(t))**2)) * r
integral_velocity, _ = integrate.dblquad(
self.wake_integrand(sigma_y_Dr, sigma_z_Dr, d_spanwise),
0, 2 * np.pi, lambda r: 0, lambda r: down_d_rotor / 2)
intersect_ratio = self.wake_intersection(
d_spanwise, 2 * sigma_y_Dr * self.d_rotor, 2 * sigma_z_Dr * self.d_rotor, down_d_rotor) \
if self.T_m is not None else 0.
I_add = self.T_m(self.C_thrust, self.I_wake, d_streamwise / self.d_rotor) \
if self.T_m is not None else 0.
return integral_velocity / (0.25 * np.pi * down_d_rotor**2), I_add * intersect_ratio
if __name__ == "__main__":
m = 20.
n = 0.
test = XieArcherWake(np.array([0., 80. * m]), 8, 0.8, 80, 70)
std_y = test.wake_sigma_Dr(0.025, 80. * m) * 80
std_z = test.wake_sigma_Dr(0.0175, 80. * m) * 80
print(std_y)
print(std_z)
# print(test.wake_loss(np.array([std_y * n, 0.]), 80))
| [
"numpy.abs",
"numpy.sqrt",
"floris.utils.tools.valid_ops.wake_overlap_ellipse",
"numpy.exp",
"numpy.array",
"floris.utils.tools.valid_ops.find_and_load_model",
"numpy.cos",
"numpy.sin"
] | [((741, 754), 'numpy.sqrt', 'np.sqrt', (['beta'], {}), '(beta)\n', (748, 754), True, 'import numpy as np\n'), ((1001, 1107), 'numpy.exp', 'np.exp', (['(-((z - z_hub) ** 2 / (2 * (sigma_z_D_r * D_r) ** 2)) - y ** 2 / (2 * (\n sigma_y_D_r * D_r) ** 2))'], {}), '(-((z - z_hub) ** 2 / (2 * (sigma_z_D_r * D_r) ** 2)) - y ** 2 / (2 *\n (sigma_y_D_r * D_r) ** 2))\n', (1007, 1107), True, 'import numpy as np\n'), ((3187, 3254), 'floris.utils.tools.valid_ops.wake_overlap_ellipse', 'vops.wake_overlap_ellipse', (['d_spanwise', 'y_wake', 'z_wake', 'down_d_rotor'], {}), '(d_spanwise, y_wake, z_wake, down_d_rotor)\n', (3212, 3254), True, 'from floris.utils.tools import valid_ops as vops\n'), ((4746, 4771), 'numpy.array', 'np.array', (['[0.0, 80.0 * m]'], {}), '([0.0, 80.0 * m])\n', (4754, 4771), True, 'import numpy as np\n'), ((664, 680), 'numpy.sqrt', 'np.sqrt', (['(1 - C_t)'], {}), '(1 - C_t)\n', (671, 680), True, 'import numpy as np\n'), ((690, 706), 'numpy.sqrt', 'np.sqrt', (['(1 - C_t)'], {}), '(1 - C_t)\n', (697, 706), True, 'import numpy as np\n'), ((934, 986), 'numpy.sqrt', 'np.sqrt', (['(1.0 - C_t / (8 * sigma_y_D_r * sigma_z_D_r))'], {}), '(1.0 - C_t / (8 * sigma_y_D_r * sigma_z_D_r))\n', (941, 986), True, 'import numpy as np\n'), ((1706, 1742), 'floris.utils.tools.valid_ops.find_and_load_model', 'vops.find_and_load_model', (['T_m', '"""tim"""'], {}), "(T_m, 'tim')\n", (1730, 1742), True, 'from floris.utils.tools import valid_ops as vops\n'), ((2908, 3036), 'numpy.exp', 'np.exp', (['(-((z - self.z_hub) ** 2 / (2 * (sigma_z_Dr * self.d_rotor) ** 2)) - y ** 2 /\n (2 * (sigma_y_Dr * self.d_rotor) ** 2))'], {}), '(-((z - self.z_hub) ** 2 / (2 * (sigma_z_Dr * self.d_rotor) ** 2)) - \n y ** 2 / (2 * (sigma_y_Dr * self.d_rotor) ** 2))\n', (2914, 3036), True, 'import numpy as np\n'), ((3479, 3516), 'numpy.abs', 'np.abs', (['(self.ref_loc[1] - down_loc[1])'], {}), '(self.ref_loc[1] - down_loc[1])\n', (3485, 3516), True, 'import numpy as np\n'), ((3518, 3555), 'numpy.abs', 'np.abs', (['(self.ref_loc[0] - down_loc[0])'], {}), '(self.ref_loc[0] - down_loc[0])\n', (3524, 3555), True, 'import numpy as np\n'), ((2163, 2223), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.C_thrust / (8 * sigma_y_Dr * sigma_z_Dr))'], {}), '(1.0 - self.C_thrust / (8 * sigma_y_Dr * sigma_z_Dr))\n', (2170, 2223), True, 'import numpy as np\n'), ((2829, 2889), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.C_thrust / (8 * sigma_y_Dr * sigma_z_Dr))'], {}), '(1.0 - self.C_thrust / (8 * sigma_y_Dr * sigma_z_Dr))\n', (2836, 2889), True, 'import numpy as np\n'), ((1588, 1614), 'numpy.sqrt', 'np.sqrt', (['(1 - self.C_thrust)'], {}), '(1 - self.C_thrust)\n', (1595, 1614), True, 'import numpy as np\n'), ((1624, 1650), 'numpy.sqrt', 'np.sqrt', (['(1 - self.C_thrust)'], {}), '(1 - self.C_thrust)\n', (1631, 1650), True, 'import numpy as np\n'), ((2646, 2655), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2652, 2655), True, 'import numpy as np\n'), ((2606, 2615), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2612, 2615), True, 'import numpy as np\n')] |
import time
import numpy as np
import copy
import matplotlib.pyplot as plt
import scipy.stats
import sklearn.metrics
import sklearn.utils.validation
def accuracy(y, p_pred):
"""
Computes the accuracy.
Parameters
----------
y : array-like
Ground truth labels.
p_pred : array-like
Array of confidence estimates.
Returns
-------
accuracy : float
"""
return sklearn.metrics.accuracy_score(y_true=y, y_pred=np.argmax(p_pred, axis=1))
def error(y, p_pred):
"""
Computes the classification error.
Parameters
----------
y : array-like
Ground truth labels.
p_pred : array-like
Array of confidence estimates.
Returns
-------
error : float
"""
return 1 - accuracy(y=y, p_pred=p_pred)
def odds_correctness(y, p_pred):
"""
Computes the odds of making a correct prediction.
Parameters
----------
y : array-like
Ground truth labels.
p_pred : array-like
Array of confidence estimates.
Returns
-------
odds : float
"""
return accuracy(y=y, p_pred=p_pred) / error(y=y, p_pred=p_pred)
def expected_calibration_error(y, p_pred, n_bins=100, n_classes=None, p=1):
"""
Computes the expected calibration error ECE_p.
Computes the empirical p-expected calibration error for a vector of confidence
estimates by binning.
Parameters
----------
y : array-like
Ground truth labels.
p_pred : array-like
Array of confidence estimates.
n_bins : int, default=15
Number of bins of :math:`[\\frac{1}{n_{\\text{classes}},1]` for the confidence estimates.
n_classes : int default=None
Number of classes. Estimated from `y` and `y_pred` if not given.
p : int, default=1
Power of the calibration error, :math:`1 \\leq p \\leq \\infty`.
Returns
-------
float
Expected calibration error
"""
# Check input
y = sklearn.utils.validation.column_or_1d(y)
y_pred = np.argmax(p_pred, axis=1)
y_pred = sklearn.utils.validation.column_or_1d(y_pred)
if n_classes is None:
n_classes = np.unique(np.concatenate([y, y_pred])).shape[0]
# Compute bin means
bin_range = [1 / n_classes, 1]
bins = np.linspace(bin_range[0], bin_range[1], n_bins + 1)
# Find prediction confidence
p_max = np.max(p_pred, axis=1)
# Compute empirical accuracy
empirical_acc = scipy.stats.binned_statistic(p_max, (y_pred == y).astype(int),
bins=n_bins, range=bin_range)[0]
nanindices = np.where(np.logical_not(np.isnan(empirical_acc)))[0]
# Perfect calibration
calibrated_acc = np.linspace(bin_range[0] + bin_range[1] / (2 * n_bins), bin_range[1] - bin_range[1] / (2 * n_bins),
n_bins)
# Expected calibration error
weights_ece = np.histogram(p_max, bins)[0][nanindices]
if p < np.inf:
ece = np.average(abs(empirical_acc[nanindices] - calibrated_acc[nanindices]) ** p,
weights=weights_ece)
elif np.isinf(p):
ece = np.max(abs(empirical_acc[nanindices] - calibrated_acc[nanindices]))
return ece
def sharpness(y, p_pred, ddof=1):
"""
Computes the empirical sharpness of a classifier.
Computes the empirical sharpness of a classifier by computing the sample variance of a
vector of confidence estimates.
Parameters
----------
y : array-like
Ground truth labels. Dummy argument for consistent cross validation.
p_pred : array-like
Array of confidence estimates
ddof : int, optional, default=1
Degrees of freedom for the variance estimator.
Returns
-------
float
Sharpness
"""
# Number of classes
n_classes = np.shape(p_pred)[1]
# Find prediction confidence
p_max = np.max(p_pred, axis=1)
# Compute sharpness
sharp = np.var(p_max, ddof=ddof) * 4 * n_classes ** 2 / (n_classes - 1) ** 2
return sharp
def overconfidence(y, p_pred):
"""
Computes the overconfidence of a classifier.
Computes the empirical overconfidence of a classifier on a test sample by evaluating
the average confidence on the false predictions.
Parameters
----------
y : array-like
Ground truth labels
p_pred : array-like
Array of confidence estimates
Returns
-------
float
Overconfidence
"""
# Find prediction and confidence
y_pred = np.argmax(p_pred, axis=1)
p_max = np.max(p_pred, axis=1)
return np.average(p_max[y_pred != y])
def underconfidence(y, p_pred):
"""
Computes the underconfidence of a classifier.
Computes the empirical underconfidence of a classifier on a test sample by evaluating
the average uncertainty on the correct predictions.
Parameters
----------
y : array-like
Ground truth labels
p_pred : array-like
Array of confidence estimates
Returns
-------
float
Underconfidence
"""
# Find prediction and confidence
y_pred = np.argmax(p_pred, axis=1)
p_max = np.max(p_pred, axis=1)
return np.average(1 - p_max[y_pred == y])
def ratio_over_underconfidence(y, p_pred):
"""
Computes the ratio of over- and underconfidence of a classifier.
Computes the empirical ratio of over- and underconfidence of a classifier on a test sample.
Parameters
----------
y : array-like
Ground truth labels
p_pred : array-like
Array of confidence estimates
Returns
-------
float
Ratio of over- and underconfidence
"""
return overconfidence(y=y, p_pred=p_pred) / underconfidence(y = y, p_pred=p_pred)
def average_confidence(y, p_pred):
"""
Computes the average confidence in the prediction
Parameters
----------
y : array-like
Ground truth labels. Here a dummy variable for cross validation.
p_pred : array-like
Array of confidence estimates.
Returns
-------
avg_conf:float
Average confidence in prediction.
"""
return np.mean(np.max(p_pred, axis=1))
def weighted_abs_conf_difference(y, p_pred):
"""
Computes the weighted absolute difference between over and underconfidence.
Parameters
----------
y : array-like
Ground truth labels. Here a dummy variable for cross validation.
p_pred : array-like
Array of confidence estimates.
Returns
-------
weighted_abs_diff: float
Accuracy weighted absolute difference between over and underconfidence.
"""
y_pred = np.argmax(p_pred, axis=1)
of = overconfidence(y, p_pred)
uf = underconfidence(y, p_pred)
return abs((1 - np.average(y == y_pred)) * of - np.average(y == y_pred) * uf)
def brier_score(y, p_pred):
"""
Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1). The Brier loss is composed of refinement loss and
calibration loss.
Note: We interface the `sklearn.metrics.brier_score_loss` method here to provide a consistent method signature.
Parameters
----------
y : array-like
Ground truth labels. Here a dummy variable for cross validation.
p_pred : array-like
Array of confidence estimates.
Returns
-------
score : float
Brier score
"""
p = np.clip(p_pred[:, 1], a_min=0, a_max=1)
return sklearn.metrics.brier_score_loss(y, p)
def precision(y, p_pred, **kwargs):
"""
Computes the precision.
Parameters
----------
y
p_pred
Returns
-------
"""
y_pred = np.argmax(p_pred, axis=1)
return sklearn.metrics.precision_score(y_true=y, y_pred=y_pred, **kwargs)
def recall(y, p_pred, **kwargs):
"""
Computes the recall.
Parameters
----------
y
p_pred
Returns
-------
"""
y_pred = np.argmax(p_pred, axis=1)
return sklearn.metrics.recall_score(y_true=y, y_pred=y_pred, **kwargs)
class MultiScorer:
"""
Use this class to encapsulate and/or aggregate multiple scoring functions so that it can be passed as an argument
for scoring in scikit's cross_val_score function. Instances of this class are also callables, with signature as
needed by `cross_val_score`. Evaluating multiple scoring function in this way versus scikit learns native way in the
`cross_validate` function avoids the unnecessary overhead of predicting anew for each scorer. This class is slightly
adapted from <NAME>'s implementation [1]_.
.. [1] https://github.com/StKyr/multiscorer
"""
def __init__(self, metrics, plots):
"""
Create a new instance of MultiScorer.
Parameters
----------
metrics: dict
The metrics to be used by the scorer.
The dictionary must have as key a name (str) for the metric and as value a tuple containing the metric
function itself and a dict literal of the additional named arguments to be passed to the function. The
metric function should be one of the `sklearn.metrics` function or any other callable with the same
signature: `metric(y_true, p_pred, **kwargs)`.
plots: dict
Plots to be generated for each CV run.
"""
self.metrics = metrics
self.plots = plots
self.results = {}
self._called = False
self.n_folds = 0
for metric in metrics.keys():
self.results[metric] = []
self.results["cal_time"] = []
def __call__(self, estimator, X, y):
"""
To be called by for evaluation from sklearn's GridSearchCV or cross_val_score. Parameters are as they are
defined in the respective documentation.
Returns
-------
dummy: float
A dummy value of 0.5 just for compatibility reasons.
"""
self.n_folds += 1
# Predict probabilities
start_time = time.time()
p_pred = estimator.predict_proba(X)
cal_time = time.time() - start_time
# Compute metrics
for key in self.metrics.keys():
# Evaluate metric and save
metric, kwargs = self.metrics[key]
self.results[key].append(metric(y, p_pred, **kwargs))
self.results["cal_time"].append(cal_time)
# Generate plots
for key in self.plots.keys():
# Evaluate plots and save
plot_fun, kwargs = self.plots[key]
# Plots in CV runs
# TODO: make this safe for no filename argument
kwargs_copy = copy.deepcopy(kwargs)
kwargs_copy["filename"] = kwargs.get("filename", "") + "_" + str(self.n_folds)
plot_fun(y=y, p_pred=p_pred, **kwargs_copy)
# TODO: plot latent function
plt.close("all")
# Set evaluation to true
self._called = True
# Return dummy value
return 0.5
def get_metric_names(self):
"""
Get all the metric names as given when initialized.
Returns
-------
metric_names: list
A list containing the given names (str) of the metrics
"""
return self.metrics.keys()
def get_results(self, metric=None, fold='all'):
"""
Get the results of a specific or all the metrics.
This method should be called after the object itself has been called so that the metrics are applied.
Parameters
----------
metric: str or None (default)
The given name of a metric to return its result(s). If omitted the results of all metrics will be returned.
fold: int in range [1, number_of_folds] or 'all' (Default)
Get the metric(s) results for the specific fold.
The number of folds corresponds to the number of times the instance is called.
If its value is a number, either the score of a single metric for that fold or a dictionary of the (single)
scores for that fold will be returned, depending on the value of `metric` parameter. If its value is 'all',
either a list of a single metric or a dictionary containing the lists of scores for all folds will be
returned, depending on the value of `metric` parameter.
Returns
-------
metric_result_for_one_fold
The result of the designated metric function for the specific fold, if `metric` parameter was not omitted
and an integer value was given to `fold` parameter. If the value of `metric` does not correspond to a
metric name, `None` will be returned.
all_metric_results_for_one_fold: dict
A dict having as keys the names of the metrics and as values their results for the specific fold.
This will be returned only if `metric` parameter was omitted and an integer value was given to `fold`
parameter.
metric_results_for_all_folds: list
A list of length number_of_folds containing the results of all folds for the specific metric, if `metric`
parameter was not omitted and value 'all' was given to `fold`. If the value of `metric` does not correspond
to a metric name, `None` will be returned.
all_metric_results_for_all_folds: dict of lists
A dict having as keys the names of the metrics and as values lists (of length number_of_folds) of their
results for all folds. This will be returned only if `metric` parameter was omitted and 'all' value was
given to `fold` parameter.
Raises
------
UserWarning
If this method is called before the instance is called for evaluation.
ValueError
If the value for `fold` parameter is not appropriate.
"""
if not self._called:
raise UserWarning('Evaluation has not been performed yet.')
if isinstance(fold, str) and fold == 'all':
if metric is None:
return self.results
else:
return self.results[metric]
elif isinstance(fold, int):
if fold not in range(1, self.n_folds + 1):
raise ValueError('Invalid fold index: ' + str(fold))
if metric is None:
res = dict()
for key in self.results.keys():
res[key] = self.results[key][fold - 1]
return res
else:
return self.results[metric][fold - 1]
else:
raise ValueError('Unexpected fold value: %s' % (str(fold)))
| [
"numpy.clip",
"numpy.histogram",
"numpy.average",
"numpy.argmax",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.isnan",
"numpy.concatenate",
"copy.deepcopy",
"numpy.shape",
"numpy.isinf",
"time.time",
"numpy.var"
] | [((2036, 2061), 'numpy.argmax', 'np.argmax', (['p_pred'], {'axis': '(1)'}), '(p_pred, axis=1)\n', (2045, 2061), True, 'import numpy as np\n'), ((2286, 2337), 'numpy.linspace', 'np.linspace', (['bin_range[0]', 'bin_range[1]', '(n_bins + 1)'], {}), '(bin_range[0], bin_range[1], n_bins + 1)\n', (2297, 2337), True, 'import numpy as np\n'), ((2384, 2406), 'numpy.max', 'np.max', (['p_pred'], {'axis': '(1)'}), '(p_pred, axis=1)\n', (2390, 2406), True, 'import numpy as np\n'), ((2724, 2836), 'numpy.linspace', 'np.linspace', (['(bin_range[0] + bin_range[1] / (2 * n_bins))', '(bin_range[1] - bin_range[1] / (2 * n_bins))', 'n_bins'], {}), '(bin_range[0] + bin_range[1] / (2 * n_bins), bin_range[1] - \n bin_range[1] / (2 * n_bins), n_bins)\n', (2735, 2836), True, 'import numpy as np\n'), ((3907, 3929), 'numpy.max', 'np.max', (['p_pred'], {'axis': '(1)'}), '(p_pred, axis=1)\n', (3913, 3929), True, 'import numpy as np\n'), ((4543, 4568), 'numpy.argmax', 'np.argmax', (['p_pred'], {'axis': '(1)'}), '(p_pred, axis=1)\n', (4552, 4568), True, 'import numpy as np\n'), ((4581, 4603), 'numpy.max', 'np.max', (['p_pred'], {'axis': '(1)'}), '(p_pred, axis=1)\n', (4587, 4603), True, 'import numpy as np\n'), ((4616, 4646), 'numpy.average', 'np.average', (['p_max[y_pred != y]'], {}), '(p_max[y_pred != y])\n', (4626, 4646), True, 'import numpy as np\n'), ((5143, 5168), 'numpy.argmax', 'np.argmax', (['p_pred'], {'axis': '(1)'}), '(p_pred, axis=1)\n', (5152, 5168), True, 'import numpy as np\n'), ((5181, 5203), 'numpy.max', 'np.max', (['p_pred'], {'axis': '(1)'}), '(p_pred, axis=1)\n', (5187, 5203), True, 'import numpy as np\n'), ((5216, 5250), 'numpy.average', 'np.average', (['(1 - p_max[y_pred == y])'], {}), '(1 - p_max[y_pred == y])\n', (5226, 5250), True, 'import numpy as np\n'), ((6680, 6705), 'numpy.argmax', 'np.argmax', (['p_pred'], {'axis': '(1)'}), '(p_pred, axis=1)\n', (6689, 6705), True, 'import numpy as np\n'), ((8062, 8101), 'numpy.clip', 'np.clip', (['p_pred[:, 1]'], {'a_min': '(0)', 'a_max': '(1)'}), '(p_pred[:, 1], a_min=0, a_max=1)\n', (8069, 8101), True, 'import numpy as np\n'), ((8321, 8346), 'numpy.argmax', 'np.argmax', (['p_pred'], {'axis': '(1)'}), '(p_pred, axis=1)\n', (8330, 8346), True, 'import numpy as np\n'), ((8588, 8613), 'numpy.argmax', 'np.argmax', (['p_pred'], {'axis': '(1)'}), '(p_pred, axis=1)\n', (8597, 8613), True, 'import numpy as np\n'), ((3123, 3134), 'numpy.isinf', 'np.isinf', (['p'], {}), '(p)\n', (3131, 3134), True, 'import numpy as np\n'), ((3841, 3857), 'numpy.shape', 'np.shape', (['p_pred'], {}), '(p_pred)\n', (3849, 3857), True, 'import numpy as np\n'), ((6180, 6202), 'numpy.max', 'np.max', (['p_pred'], {'axis': '(1)'}), '(p_pred, axis=1)\n', (6186, 6202), True, 'import numpy as np\n'), ((10673, 10684), 'time.time', 'time.time', ([], {}), '()\n', (10682, 10684), False, 'import time\n'), ((11526, 11542), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11535, 11542), True, 'import matplotlib.pyplot as plt\n'), ((468, 493), 'numpy.argmax', 'np.argmax', (['p_pred'], {'axis': '(1)'}), '(p_pred, axis=1)\n', (477, 493), True, 'import numpy as np\n'), ((2917, 2942), 'numpy.histogram', 'np.histogram', (['p_max', 'bins'], {}), '(p_max, bins)\n', (2929, 2942), True, 'import numpy as np\n'), ((10748, 10759), 'time.time', 'time.time', ([], {}), '()\n', (10757, 10759), False, 'import time\n'), ((11308, 11329), 'copy.deepcopy', 'copy.deepcopy', (['kwargs'], {}), '(kwargs)\n', (11321, 11329), False, 'import copy\n'), ((2647, 2670), 'numpy.isnan', 'np.isnan', (['empirical_acc'], {}), '(empirical_acc)\n', (2655, 2670), True, 'import numpy as np\n'), ((3967, 3991), 'numpy.var', 'np.var', (['p_max'], {'ddof': 'ddof'}), '(p_max, ddof=ddof)\n', (3973, 3991), True, 'import numpy as np\n'), ((6830, 6853), 'numpy.average', 'np.average', (['(y == y_pred)'], {}), '(y == y_pred)\n', (6840, 6853), True, 'import numpy as np\n'), ((2177, 2204), 'numpy.concatenate', 'np.concatenate', (['[y, y_pred]'], {}), '([y, y_pred])\n', (2191, 2204), True, 'import numpy as np\n'), ((6798, 6821), 'numpy.average', 'np.average', (['(y == y_pred)'], {}), '(y == y_pred)\n', (6808, 6821), True, 'import numpy as np\n')] |
"""
DATAQ 4108 Device Level code
author: <NAME>
Date: November 2017- April 2019
fully python3 compatible.
The main purpose of this module is to provide useful interface between DI-4108 and a server system level code that does all numbercrynching. This modules only job is to attend DI-4108 and insure that all data is collected and stored in a circular buffer.
The communication in this module is done via XLI module developed by <NAME>. This module is based on python sockets.
"""#!/usr/bin/env python3
"""
Simple IOC based on caproto library.
It has
"""
from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run
import caproto
from textwrap import dedent
from pdb import pm
from numpy import random, array, zeros, ndarray, nan, isnan
import sys
if sys.version_info[0] == 3:
if sys.version_info[1] <= 7:
from time import gmtime, strftime, time, sleep, clock
else:
from time import gmtime, strftime, time, sleep
from time import perf_counter as clock
else:
from time import gmtime, strftime, time, sleep, clock
import numpy as np
logging_shape = (1080,600, 3)
pre_shape = (216,768, 3)
depre_shape = (216,768, 3)
period_shape = (216,768, 3)
arr_shape = (64,10)
class Server(PVGroup):
"""
An IOC with three uncoupled read/writable PVs
Scalar PVs
----------
CPU
MEMORY
BATTERY
Vectors PVs
-----------
"""
arr_logging = np.zeros(logging_shape).flatten() + 128
image_logging = pvproperty(value=arr_logging, dtype = int, max_length = logging_shape[0]*logging_shape[1]*logging_shape[2])
arr_pre = np.zeros(pre_shape).flatten() + 255
image_pre = pvproperty(value=arr_pre, dtype = int, max_length = pre_shape[0]*pre_shape[1]*pre_shape[2])
arr_depre = np.zeros(depre_shape).flatten() + 255
image_depre = pvproperty(value=arr_depre, dtype = int, max_length = depre_shape[0]*depre_shape[1]*depre_shape[2])
arr_period = np.zeros(period_shape).flatten() + 255
image_period = pvproperty(value=arr_period, dtype = int, max_length = period_shape[0]*period_shape[1]*period_shape[2])
t1 = pvproperty(value=1.0)
dt = pvproperty(value=1.0, precision = 3)
server_name = pvproperty(value='event_handler_mock')
sample_pressure = pvproperty(value=0.0, read_only = True, dtype = float, precision = 3)
target_pressure = pvproperty(value=0.0, read_only = True, dtype = float, precision = 3)
pump_counter = pvproperty(value=0) #done
valves_per_pump_current = pvproperty(value=1.0, precision = 2) #done
valves_per_pump_total = pvproperty(value=1.0, precision = 2) #done
ctrl_operating_mode = pvproperty(value=1)
ctrl_shutdown_state = pvproperty(value=0,dtype = int)
ctrl_pump_state = pvproperty(value=0,dtype = int)
ctrl_disable_pump_state = pvproperty(value=0,dtype = int)
ctrl_pre_state = pvproperty(value=0,dtype = int)
ctrl_depre_state = pvproperty(value=0,dtype = int)
table_pressure_after_pre = pvproperty(value=1.0, precision = 3)
table_pressure_before_depre = pvproperty(value=1.0, precision = 3)
table_time_to_switch_pre = pvproperty(value=1.0, precision = 3)
table_time_to_switch_depre = pvproperty(value=1.0, precision = 3)
table_rise_slope = pvproperty(value=1.0, precision = 3)
table_fall_slope = pvproperty(value=1.0, precision = 3)
table_pulse_width_pre = pvproperty(value=1.0, precision = 3)
table_pulse_width_depre = pvproperty(value=1.0, precision = 3)
table_delay = pvproperty(value=1.0, precision = 3)
table_period = pvproperty(value=1.0, precision = 3)
table_valve_counter_pre = pvproperty(value=0, dtype = int)
table_valve_counter_depre = pvproperty(value=0, dtype = int)
warning_text = pvproperty(value='this is a warning/faults field')
timeout_period = pvproperty(value=60.0, dtype = float, units = 's')
target_pressure_coefficient = pvproperty(value=0.92, dtype = float)
transfer_tube = pvproperty(value=120, dtype = float, units = 'inches')
log_history = pvproperty(value=False, dtype = bool)
save_traces = pvproperty(value=False, dtype = bool)
show_sample_sensors = pvproperty(value=False, dtype = bool)
global_valve_counter_pre = pvproperty(value=1123, dtype = int)
global_valve_counter_pre_reset = pvproperty(value=False, dtype = bool)
#
global_valve_counter_depre = pvproperty(value=2123, dtype = int)
global_valve_counter_depre_reset = pvproperty(value=False, dtype = bool)
#
global_pump_counter= pvproperty(value=3123, dtype = int)
global_pump_counter_reset = pvproperty(value=False, dtype = bool)
medium= pvproperty(value='mineral spirits')
dio = pvproperty(value=0, dtype = int)
packet_counter = pvproperty(value=0, dtype = int)
@sample_pressure.startup
async def sample_pressure(self, instance, async_lib):
# This method will be called when the server starts up.
self.io_pull_queue = async_lib.ThreadsafeQueue()
self.io_push_queue = async_lib.ThreadsafeQueue()
self.device.io_push_queue = self.io_push_queue
self.device.io_pull_queue = self.io_pull_queue
# Loop and grab items from the response queue one at a time
while True:
io_dict = await self.io_push_queue.async_get()
# Propagate the keypress to the EPICS PV, triggering any monitors
# along the way
for key in list(io_dict.keys()):
if key == 'sample_pressure':
await self.sample_pressure.write(io_dict[key])
elif key == 'target_pressure':
await self.target_pressure.write(io_dict[key])
elif key == 'valves_per_pump_current':
await self.valves_per_pump_current.write(io_dict[key])
elif key == 'valves_per_pump_total':
await self.valves_per_pump_total.write(io_dict[key])
elif key == 'pump_counter':
await self.pump_counter.write(io_dict[key])
#depressurization event
elif key == 'table_time_to_switch_depre':
await self.table_time_to_switch_depre.write(io_dict[key])
elif key == 'table_fall_slope':
await self.table_fall_slope.write(io_dict[key])
elif key == 'table_valve_counter_depre':
await self.table_valve_counter_depre.write(io_dict[key])
elif key == 'image_depre':
await self.image_depre.write(io_dict[key])
#pressurization event
elif key == 'table_time_to_switch_pre':
await self.table_time_to_switch_pre.write(io_dict[key])
elif key == 'table_rise_slope':
await self.table_rise_slope.write(io_dict[key])
elif key == 'table_valve_counter_pre':
await self.table_valve_counter_pre.write(io_dict[key])
elif key == 'image_pre':
await self.image_pre.write(io_dict[key])
#period event
elif key == 'image_period':
await self.image_period.write(io_dict[key])
elif key == 'table_pulse_width_depre':
await self.table_pulse_width_depre.write(io_dict[key])
elif key == 'table_pulse_width_pre':
await self.table_pulse_width_pre.write(io_dict[key])
elif key == 'table_delay':
await self.table_delay.write(io_dict[key])
elif key == 'table_period':
await self.table_period.write(io_dict[key])
elif key == 'dio':
print(f' writing DIO: {io_dict[key]})')
await self.dio.write(io_dict[key])
elif key == 'packet_counter':
await self.packet_counter.write(io_dict[key])
if __name__ == '__main__':
ioc_options, run_options = ioc_arg_parser(
default_prefix='event_handler_mock:',
desc=dedent(Server.__doc__))
ioc = Server(**ioc_options)
ioc.device = device
run(ioc.pvdb, **run_options)
| [
"caproto.server.pvproperty",
"textwrap.dedent",
"numpy.zeros",
"caproto.server.run"
] | [((1536, 1647), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': 'arr_logging', 'dtype': 'int', 'max_length': '(logging_shape[0] * logging_shape[1] * logging_shape[2])'}), '(value=arr_logging, dtype=int, max_length=logging_shape[0] *\n logging_shape[1] * logging_shape[2])\n', (1546, 1647), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((1714, 1809), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': 'arr_pre', 'dtype': 'int', 'max_length': '(pre_shape[0] * pre_shape[1] * pre_shape[2])'}), '(value=arr_pre, dtype=int, max_length=pre_shape[0] * pre_shape[1] *\n pre_shape[2])\n', (1724, 1809), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((1882, 1985), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': 'arr_depre', 'dtype': 'int', 'max_length': '(depre_shape[0] * depre_shape[1] * depre_shape[2])'}), '(value=arr_depre, dtype=int, max_length=depre_shape[0] *\n depre_shape[1] * depre_shape[2])\n', (1892, 1985), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2061, 2168), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': 'arr_period', 'dtype': 'int', 'max_length': '(period_shape[0] * period_shape[1] * period_shape[2])'}), '(value=arr_period, dtype=int, max_length=period_shape[0] *\n period_shape[1] * period_shape[2])\n', (2071, 2168), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2177, 2198), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)'}), '(value=1.0)\n', (2187, 2198), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2209, 2243), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(3)'}), '(value=1.0, precision=3)\n', (2219, 2243), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2267, 2305), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '"""event_handler_mock"""'}), "(value='event_handler_mock')\n", (2277, 2305), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2331, 2394), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0.0)', 'read_only': '(True)', 'dtype': 'float', 'precision': '(3)'}), '(value=0.0, read_only=True, dtype=float, precision=3)\n', (2341, 2394), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2424, 2487), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0.0)', 'read_only': '(True)', 'dtype': 'float', 'precision': '(3)'}), '(value=0.0, read_only=True, dtype=float, precision=3)\n', (2434, 2487), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2516, 2535), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0)'}), '(value=0)\n', (2526, 2535), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2573, 2607), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(2)'}), '(value=1.0, precision=2)\n', (2583, 2607), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2645, 2679), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(2)'}), '(value=1.0, precision=2)\n', (2655, 2679), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2717, 2736), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1)'}), '(value=1)\n', (2727, 2736), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2764, 2794), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0)', 'dtype': 'int'}), '(value=0, dtype=int)\n', (2774, 2794), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2819, 2849), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0)', 'dtype': 'int'}), '(value=0, dtype=int)\n', (2829, 2849), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2882, 2912), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0)', 'dtype': 'int'}), '(value=0, dtype=int)\n', (2892, 2912), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2936, 2966), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0)', 'dtype': 'int'}), '(value=0, dtype=int)\n', (2946, 2966), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((2992, 3022), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0)', 'dtype': 'int'}), '(value=0, dtype=int)\n', (3002, 3022), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3060, 3094), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(3)'}), '(value=1.0, precision=3)\n', (3070, 3094), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3132, 3166), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(3)'}), '(value=1.0, precision=3)\n', (3142, 3166), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3201, 3235), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(3)'}), '(value=1.0, precision=3)\n', (3211, 3235), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3272, 3306), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(3)'}), '(value=1.0, precision=3)\n', (3282, 3306), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3333, 3367), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(3)'}), '(value=1.0, precision=3)\n', (3343, 3367), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3394, 3428), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(3)'}), '(value=1.0, precision=3)\n', (3404, 3428), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3460, 3494), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(3)'}), '(value=1.0, precision=3)\n', (3470, 3494), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3528, 3562), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(3)'}), '(value=1.0, precision=3)\n', (3538, 3562), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3584, 3618), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(3)'}), '(value=1.0, precision=3)\n', (3594, 3618), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3641, 3675), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1.0)', 'precision': '(3)'}), '(value=1.0, precision=3)\n', (3651, 3675), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3709, 3739), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0)', 'dtype': 'int'}), '(value=0, dtype=int)\n', (3719, 3739), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3775, 3805), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0)', 'dtype': 'int'}), '(value=0, dtype=int)\n', (3785, 3805), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3830, 3880), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '"""this is a warning/faults field"""'}), "(value='this is a warning/faults field')\n", (3840, 3880), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3907, 3953), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(60.0)', 'dtype': 'float', 'units': '"""s"""'}), "(value=60.0, dtype=float, units='s')\n", (3917, 3953), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((3993, 4028), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0.92)', 'dtype': 'float'}), '(value=0.92, dtype=float)\n', (4003, 4028), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4052, 4102), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(120)', 'dtype': 'float', 'units': '"""inches"""'}), "(value=120, dtype=float, units='inches')\n", (4062, 4102), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4126, 4161), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(False)', 'dtype': 'bool'}), '(value=False, dtype=bool)\n', (4136, 4161), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4183, 4218), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(False)', 'dtype': 'bool'}), '(value=False, dtype=bool)\n', (4193, 4218), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4248, 4283), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(False)', 'dtype': 'bool'}), '(value=False, dtype=bool)\n', (4258, 4283), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4320, 4353), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(1123)', 'dtype': 'int'}), '(value=1123, dtype=int)\n', (4330, 4353), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4394, 4429), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(False)', 'dtype': 'bool'}), '(value=False, dtype=bool)\n', (4404, 4429), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4473, 4506), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(2123)', 'dtype': 'int'}), '(value=2123, dtype=int)\n', (4483, 4506), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4549, 4584), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(False)', 'dtype': 'bool'}), '(value=False, dtype=bool)\n', (4559, 4584), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4620, 4653), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(3123)', 'dtype': 'int'}), '(value=3123, dtype=int)\n', (4630, 4653), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4689, 4724), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(False)', 'dtype': 'bool'}), '(value=False, dtype=bool)\n', (4699, 4724), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4742, 4777), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '"""mineral spirits"""'}), "(value='mineral spirits')\n", (4752, 4777), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4791, 4821), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0)', 'dtype': 'int'}), '(value=0, dtype=int)\n', (4801, 4821), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((4848, 4878), 'caproto.server.pvproperty', 'pvproperty', ([], {'value': '(0)', 'dtype': 'int'}), '(value=0, dtype=int)\n', (4858, 4878), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((8379, 8407), 'caproto.server.run', 'run', (['ioc.pvdb'], {}), '(ioc.pvdb, **run_options)\n', (8382, 8407), False, 'from caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\n'), ((8288, 8310), 'textwrap.dedent', 'dedent', (['Server.__doc__'], {}), '(Server.__doc__)\n', (8294, 8310), False, 'from textwrap import dedent\n'), ((1475, 1498), 'numpy.zeros', 'np.zeros', (['logging_shape'], {}), '(logging_shape)\n', (1483, 1498), True, 'import numpy as np\n'), ((1661, 1680), 'numpy.zeros', 'np.zeros', (['pre_shape'], {}), '(pre_shape)\n', (1669, 1680), True, 'import numpy as np\n'), ((1825, 1846), 'numpy.zeros', 'np.zeros', (['depre_shape'], {}), '(depre_shape)\n', (1833, 1846), True, 'import numpy as np\n'), ((2002, 2024), 'numpy.zeros', 'np.zeros', (['period_shape'], {}), '(period_shape)\n', (2010, 2024), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
from collections import defaultdict, deque
from typing import Any, Dict, List, Optional
import random
import json
import attr
import contextlib
import numpy as np
import tqdm
import torch
from torch.optim.lr_scheduler import LambdaLR
from torch.jit import Final
from habitat import Config, logger
from habitat.utils.visualizations import maps
from habitat.utils.visualizations.utils import (
observations_to_image,
save_semantic_frame
)
from habitat_baselines.common.base_trainer import BaseRLTrainer
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.env_utils import construct_envs
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.auxiliary_tasks import get_aux_task_classes
from habitat_baselines.rl.ppo.curiosity import ForwardCuriosity
from habitat_baselines.rl.models.rednet import load_rednet
from habitat_baselines.common.rollout_storage import RolloutStorage
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.common.utils import (
batch_obs,
batch_list,
generate_video,
linear_decay,
is_fp16_autocast_supported,
is_fp16_supported,
)
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_batch,
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_baselines.rl.ppo import (
PPO
)
from habitat_baselines.rl.ppo.encoder_dict import (
get_vision_encoder_inputs
)
class Diagnostics:
basic = "basic" # dummy to record episode stats (for t-test)
actions = "actions"
gps = "gps"
heading = "heading"
weights = "weights"
top_down_map = "top_down_map"
episode_info = "episode_info"
episode_info_full = "episode_info_full"
# The following three diagnostics are pretty large. Be careful.
""" internal_activations:
Records for dynamical analysis. Holds N lists, one per episode.
Each list is of episode length T, and has:
- belief hidden states # T x K x H
- fused hidden state # T x H
- sensor embeddings # T x H
- policy logits # T x A
- critic values # T x 1
"""
internal_activations = "internal_activations"
""" observations:
Sensor observations, pre-embedding. Non-visual inputs only. # key, T x H
"""
observations = "observations"
""" observations:
Sensor observations, pre-embedding, pre-preprocessing. Visual inputs only. # key, THWC
"""
visual_observations = "visual_observations"
# Note, we don't record preprocessed visual observations, but we prob don't need them.
# Following three are typically for probing
""" d2g:
Per timestep distance to closest goal (as used in the reward sensor)
"""
d2g = "d2g"
""" room_cat:
Per timestep room category
"""
room_cat = "room_cat"
""" visit_count:
Per timestep current tile visit count, from coverage reward
"""
visit_count = "visit_count"
collisions_t = "collisions_t" # collisions per timestep to distinguish debris spawn
coverage_t = "coverage_t" # coverage per timestep to distinguish debris spawn
sge_t = "sge_t" # SGE per timestep to distinguish debris spawn
@baseline_registry.register_trainer(name="ppo")
class PPOTrainer(BaseRLTrainer):
r"""Trainer class for PPO algorithm
Paper: https://arxiv.org/abs/1707.06347.
"""
supported_tasks = ["Nav-v0"]
def __init__(self, config=None):
super().__init__(config)
self.actor_critic = None
self.agent = None
self.envs = None
self.obs_space = None
self.obs_transforms = []
if config is not None:
# logger.info(f"config: {config}")
self.checkpoint_prefix = config.TENSORBOARD_DIR.split('/')[-1]
self._static_encoder = False
self._encoder = None
self.count_steps = 0
if self.config.RL.fp16_mode not in ("off", "autocast", "mixed"):
raise RuntimeError(
f"Unknown fp16 mode '{self.config.RL.fp16_mode}'"
)
if self.config.RL.fp16_mode != "off" and not torch.cuda.is_available():
logger.warn(
"FP16 requires CUDA but CUDA is not available, setting to off"
)
self._fp16_mixed = self.config.RL.fp16_mode == "mixed"
self._fp16_autocast = self.config.RL.fp16_mode == "autocast"
if self._fp16_mixed and not is_fp16_supported():
raise RuntimeError(
"FP16 requires PyTorch >= 1.6.0, please update your PyTorch"
)
if self._fp16_autocast and not is_fp16_autocast_supported():
raise RuntimeError(
"FP16 autocast requires PyTorch >= 1.7.1, please update your PyTorch"
)
def _setup_auxiliary_tasks(self, aux_cfg, ppo_cfg, task_cfg, observation_space=None, is_eval=False, policy_encoders=["rgb", "depth"]):
r"""
policy_encoders: If an auxiliary sensor is not used for the policy, we will make one
"""
aux_task_strings = [task.lower() for task in aux_cfg.tasks]
if "semanticcpca" in aux_task_strings and ppo_cfg.POLICY.USE_SEMANTICS:
raise Exception("I don't think using a separate semantic cpca task and feeding semantics into our main encoder are compatible")
# Differentiate instances of tasks by adding letters
aux_counts = {}
for i, x in enumerate(aux_task_strings):
if x in aux_counts:
aux_task_strings[i] = f"{aux_task_strings[i]}_{aux_counts[x]}"
aux_counts[x] += 1
else:
aux_counts[x] = 1
logger.info(f"Auxiliary tasks: {aux_task_strings}")
num_recurrent_memories = 1
# Currently we have two places for policy name.. not good. Will delete once baselines are run
if self.config.RL.PPO.policy != "BASELINE":
raise Exception("I don't think you meant to set this policy")
policy = baseline_registry.get_policy(ppo_cfg.POLICY.name)
hidden_sizes = None
if policy.IS_MULTIPLE_BELIEF:
proposed_num_beliefs = ppo_cfg.POLICY.BELIEFS.NUM_BELIEFS
num_recurrent_memories = len(aux_cfg.tasks) if proposed_num_beliefs == -1 else proposed_num_beliefs
if policy.IS_RECURRENT:
num_recurrent_memories += 1
init_aux_tasks = []
encoder_insts = {}
if not is_eval:
task_classes, encoder_classes = get_aux_task_classes(aux_cfg) # supervised is a dict
for encoder in encoder_classes: # This is a dict of other encoders we want
if encoder in policy_encoders: # If it already exists
pass
encoder_insts[encoder] = encoder_classes[encoder](observation_space, ppo_cfg.hidden_size).to(self.device)
for i, task in enumerate(aux_cfg.tasks):
task_class = task_classes[i]
# * We previously constructed the extra encoders during aux task setup, but they are best constructed separately beforehand (for attachment to aux task AND policy)
# * We don't actually need it here, so we disable for now
# req_sensors = {
# name: encoder_insts[name] for name in task_class.get_required_sensors(aux_cfg[task])
# }
# Currently the tasks which need a given encoder hold the module itself.
hidden_size = None # will go to sensible default
aux_module = task_class(
ppo_cfg, aux_cfg[task], task_cfg, self.device, \
observation_space=observation_space,
# sensor_encoders=req_sensors,
hidden_size=hidden_size).to(self.device)
init_aux_tasks.append(aux_module)
return init_aux_tasks, num_recurrent_memories, aux_task_strings, encoder_insts
def _setup_curiosity(self, ppo_cfg, task_cfg, embedding_size):
return ForwardCuriosity(ppo_cfg, task_cfg, embedding_size)
def get_ppo_class(self):
return PPO
@property
def obs_space(self):
if self._obs_space is None and self.envs is not None:
self._obs_space = self.envs.observation_spaces[0]
return self._obs_space
@obs_space.setter
def obs_space(self, new_obs_space):
self._obs_space = new_obs_space
def _setup_actor_critic_agent(
self,
ppo_cfg: Config,
task_cfg: Config,
aux_cfg: Config = None,
aux_tasks=[],
policy_encoders=["rgb", "depth"],
aux_encoders=None,
) -> None:
r"""Sets up actor critic and agent for PPO.
Args:
ppo_cfg: config node with relevant params
Returns:
None
"""
if len(aux_tasks) != 0 and len(aux_tasks) != len(aux_cfg.tasks):
raise Exception(f"Policy specifies {len(aux_cfg.tasks)} tasks but {len(aux_tasks)} were initialized.")
logger.add_filehandler(self.config.LOG_FILE)
observation_space = self.obs_space
self.obs_transforms = get_active_obs_transforms(self.config)
observation_space = apply_obs_transforms_obs_space(
observation_space, self.obs_transforms
)
self.obs_space = observation_space
# Default policy settings for object nav
is_objectnav = "ObjectNav" in task_cfg.TYPE or self.config.MOCK_OBJECTNAV
additional_sensors = []
embed_goal = False
if is_objectnav:
additional_sensors = ["gps", "compass"]
embed_goal = True
# TODO move `ppo_cfg.policy` to `config.RL.POLICY`
policy = baseline_registry.get_policy(ppo_cfg.POLICY.name)
self.actor_critic = policy(
observation_space=self.obs_space,
action_space=self.envs.action_spaces[0],
hidden_size=ppo_cfg.hidden_size,
goal_sensor_uuid=task_cfg.GOAL_SENSOR_UUID,
num_tasks=len(aux_cfg.tasks), # we pass this is in to support eval, where no aux modules are made
additional_sensors=additional_sensors,
embed_goal=embed_goal,
device=self.device,
config=ppo_cfg.POLICY,
policy_encoders=policy_encoders,
num_policy_heads=self._get_policy_head_count(),
mock_objectnav=self.config.MOCK_OBJECTNAV
)
# It's difficult to completely JIT this
# if policy.IS_JITTABLE and ppo_cfg.POLICY.jit:
# self.actor_critic = torch.jit.script(self.actor_critic)
self.actor_critic.to(self.device)
curiosity_module = None
if ppo_cfg.CURIOSITY.USE_CURIOSITY:
curiosity_module = \
self._setup_curiosity(ppo_cfg, task_cfg, self.actor_critic.embedding_size)
if self._fp16_mixed:
for name, module in self.actor_critic.named_modules():
if "running_mean_and_var" not in name:
module.to(dtype=torch.float16)
self.agent = self.get_ppo_class()(
actor_critic=self.actor_critic,
clip_param=ppo_cfg.clip_param,
ppo_epoch=ppo_cfg.ppo_epoch,
num_mini_batch=ppo_cfg.num_mini_batch,
value_loss_coef=ppo_cfg.value_loss_coef,
aux_loss_coef=ppo_cfg.aux_loss_coef,
entropy_coef=ppo_cfg.entropy_coef,
lr=ppo_cfg.lr,
eps=ppo_cfg.eps,
max_grad_norm=ppo_cfg.max_grad_norm,
aux_tasks=aux_tasks,
aux_cfg=aux_cfg,
curiosity_cfg=ppo_cfg.CURIOSITY,
curiosity_module=curiosity_module,
use_normalized_advantage=ppo_cfg.use_normalized_advantage,
aux_encoders=aux_encoders,
aux_map=ppo_cfg.POLICY.BELIEFS.AUX_MAP, # TODO reroute
importance_weight=self.config.RL.REWARD_FUSION.SPLIT.IMPORTANCE_WEIGHT,
fp16_autocast=self._fp16_autocast,
fp16_mixed=self._fp16_mixed,
).to(self.device)
self.load_pretrained_weights()
self.agent.script()
self.semantic_predictor = None
if self.config.RL.POLICY.TRAIN_PRED_SEMANTICS:
self.semantic_predictor = load_rednet(
self.device,
ckpt=ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT,
resize=True # since we train on half-vision
)
self.semantic_predictor.eval()
def load_pretrained_weights(self):
# Load a pre-trained visual encoder. (This is hardcoded to support rgbd loading)
# Note - this will be overwritten by checkpoints if we're not training from scratch
if not self.config.RL.PPO.POLICY.pretrained_encoder:
return
if self.config.RL.PPO.POLICY.USE_SEMANTICS:
# Merged with semantic - we can't load separate rgbd weight
return
pretrained_state = torch.load(
self.config.RL.PPO.POLICY.pretrained_weights, map_location="cpu"
)
spliced_mean_and_var = {
k.split(".")[-1]: v for k, v in pretrained_state["state_dict"].items()
if "running_mean_and_var" in k
}
modified_mean_and_var = {
k: v.view(1, 4, 1, 1)
for k, v in spliced_mean_and_var.items()
if "_var" in k or "_mean" in k
}
spliced_state = {
k: v for k, v in pretrained_state["state_dict"].items()
if "running_mean_and_var" not in k
}
# We try twice (with different prefixes) due to some compatibility issues in checkpoints and model versions
# This first version uses DDPPO weights - in other models, the visual encoder belongs on the "Net" (what we call the core/belief)
ve_str = 'actor_critic.net.visual_encoder.'
visual_dict = {
k[len(ve_str):]: v
for k, v in spliced_state.items()
if k.startswith(ve_str)
}
rgbd_module = self.actor_critic.visual_encoders.encoders["['depth', 'rgb']"][0]
if len(visual_dict) > 0:
rgbd_module.load_state_dict(visual_dict)
return
# This second version is for when we load with our own weights - where the visual encoder belongs to the policy (as a shared base)
ve_str = 'actor_critic.visual_encoder.'
visual_dict = {
k[len(ve_str):]: v
for k, v in spliced_state.items()
if k.startswith(ve_str)
}
rgbd_module.load_state_dict(visual_dict)
self.actor_critic.running_mean_and_var.load_state_dict(modified_mean_and_var)
def save_checkpoint(
self, file_name: str, extra_state: Optional[Dict] = None
) -> None:
r"""Save checkpoint with specified name.
Args:
file_name: file name for checkpoint
Returns:
None
"""
def _cast(t: torch.Tensor):
if t.dtype == torch.float16:
return t.to(dtype=torch.float32)
else:
return t
checkpoint = {
"state_dict": {
k: _cast(v) for k, v in self.agent.state_dict().items()
},
# FIXME optim state, should I cast it?
"optim_state": self.agent.optim_state_dict(),
# "optim_state": {
# k: _cast(v) for k, v in self.agent.optimizer.state_dict().items()
# },
# "state_dict": self.agent.state_dict(),
# "optim_state": self.agent.optimizer.state_dict(),
"config": self.config,
}
if extra_state is not None:
checkpoint["extra_state"] = extra_state
os.makedirs(self.config.CHECKPOINT_FOLDER, exist_ok=True)
torch.save(
checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
METRICS_BLACKLIST = {"top_down_map", "collisions.is_collision"}
@classmethod
def _extract_scalars_from_info(
cls, info: Dict[str, Any]
) -> Dict[str, float]:
result = {}
for k, v in info.items():
if k in cls.METRICS_BLACKLIST:
continue
if isinstance(v, dict):
result.update(
{
k + "." + subk: subv
for subk, subv in cls._extract_scalars_from_info(
v
).items()
if (k + "." + subk) not in cls.METRICS_BLACKLIST
}
)
# Things that are scalar-like will have an np.size of 1.
# Strings also have an np.size of 1, so explicitly ban those
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def _extract_scalars_from_infos(
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _get_policy_head_count(self):
reward_keys = self.config.RL.POLICIES
if reward_keys[0] == "none" and len(reward_keys) == 1:
return 1
if self.config.RL.REWARD_FUSION.STRATEGY == "SPLIT":
return 2
return 1
def _build_rewards(
self,
env_rewards,
metrics,
):
r"""
In order to support more complex reward operations, we treat rewards as normal metrics.
The env still returns rewards as per gym API, but env reward should be combined with measures as configured.
Typically, the env reward will just contain slack.
Args:
env_rewards: [b] env reward
metrics: dict of [b] (reward) measures.
Note these sizes don't have extra feature dims since rewards are expected to be scalars.
Return:
env_rewards: k x b, where k is the number of policy heads (typically 1)
"""
# extract the reward metrics
reward_keys = self.config.RL.POLICIES
if reward_keys[0] == "none" and len(reward_keys) == 1:
return env_rewards.unsqueeze(0)
strategy = self.config.RL.REWARD_FUSION.STRATEGY
if strategy == "SUM":
return (env_rewards + sum(metrics[p] for p in reward_keys)).unsqueeze(0)
reward_a = sum(metrics[p] for p in reward_keys[:-1]) + env_rewards
reward_b = metrics[reward_keys[-1]]
if self.config.RL.REWARD_FUSION.ENV_ON_ALL:
reward_b = reward_b + env_rewards
if self.config.RL.REWARD_FUSION.STRATEGY == "RAMP":
# Ramps from a to b
ramp_factor = min(1, max(0, (
self.count_steps - self.config.RL.REWARD_FUSION.RAMP.START
) / (
self.config.RL.REWARD_FUSION.RAMP.END - self.config.RL.REWARD_FUSION.RAMP.START
)))
return (reward_a * (1 - ramp_factor) + reward_b * ramp_factor).unsqueeze(0)
elif self.config.RL.REWARD_FUSION.STRATEGY == "SPLIT":
return torch.stack([reward_a, reward_b], dim=0)
raise NotImplementedError
def _collect_rollout_step(
self, rollouts, current_episode_reward, current_episode_env_reward, running_episode_stats, prior_obs_state=None
):
pth_time = 0.0
env_time = 0.0
ppo_cfg = self.config.RL.PPO
curiosity_cfg = ppo_cfg.CURIOSITY
t_sample_action = time.time()
# sample actions
with torch.no_grad(), torch.cuda.amp.autocast() if self._fp16_autocast else contextlib.suppress():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
behavioral_index = 0
if self._get_policy_head_count() > 1 and self.count_steps > self.config.RL.REWARD_FUSION.SPLIT.TRANSITION:
behavioral_index = 1
(
values,
actions,
actions_log_probs,
recurrent_hidden_states,
obs,
) = self.actor_critic.act(
step_observation,
rollouts.get_recurrent_states()[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
return_features=True,
behavioral_index=behavioral_index
)
pth_time += time.time() - t_sample_action
t_step_env = time.time()
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
env_time += time.time() - t_step_env
t_update_stats = time.time()
# Hardcoded
def map_to_full_metric(m):
if m == 'reached':
return ['coverage', 'reached']
elif m == 'visit_count':
return ['coverage', 'visit_count']
elif m == "mini_reached":
return ['coverage', 'mini_reached']
else:
return [m]
TRACKED_METRICS = [map_to_full_metric(m) for m in self.config.RL.PPO.ROLLOUT.METRICS]
tracked_metrics = batch_list(infos, device=self.device, whitelist=TRACKED_METRICS)
batch = batch_obs(observations, device=self.device)
if self.semantic_predictor is not None:
batch["semantic"] = self.semantic_predictor(batch["rgb"], batch["depth"])
if ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT == "weights/rednet_semmap_mp3d_40.pth":
batch["semantic"] -= 1
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
rewards = torch.tensor(
rewards, dtype=torch.float, device=current_episode_reward.device
)
POLICY_METRICS = [map_to_full_metric(m) for m in self.config.RL.POLICIES if m is not "none"] # Careful not to duplicate this
policy_metrics = batch_list(infos, device=rewards.device, whitelist=POLICY_METRICS)
rewards = self._build_rewards(rewards, policy_metrics)
rewards = rewards.unsqueeze(-1) # b x k -> b x k x 1
# reward [k x b x 1] * masks [b x 1] -> [k x b x 1]
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones],
dtype=torch.float,
device=current_episode_reward.device,
)
current_episode_env_reward += rewards
curiosity_obs = None
if curiosity_cfg.USE_CURIOSITY:
# ! Curiosity not supported for multi-rewards. Assuming bonus belongs to first dimension
curiosity_obs = obs[curiosity_cfg.VISION_KEY]
if prior_obs_state is not None:
with torch.no_grad():
# Pass in the state after seeing the prior observation (our input state)
prior_state = rollouts.get_recurrent_states()[rollouts.step] if curiosity_cfg.USE_BELIEF else None
fp_error = self.agent.get_curiosity_error(
prior_obs_state,
curiosity_obs,
rollouts.prev_actions[rollouts.step],
beliefs=prior_state
)
curiosity_reward = torch.log(fp_error + 1.0).unsqueeze(1).to(rewards.device) * curiosity_cfg.REWARD_SCALE
# If the episode has ended (mask is 0), prev and current obs are not in same scene, zero reward
curiosity_reward = curiosity_reward * masks # b x 1
rewards[:,0] = rewards[:, 0] + curiosity_reward
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward # only add reward at episode end?
running_episode_stats["env_reward"] += (1 - masks) * current_episode_env_reward
running_episode_stats["count"] += 1 - masks
for k, v in self._extract_scalars_from_infos(infos).items():
v = torch.tensor(
v, dtype=torch.float, device=current_episode_reward.device
).unsqueeze(1)
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
current_episode_env_reward *= masks
if self._static_encoder:
if self._fp16_mixed:
raise Exception("Not implemented")
with torch.no_grad(), torch.cuda.amp.autocast() if self._fp16_autocast else contextlib.suppress():
batch["visual_features"] = self._encoder(batch)
if self._get_policy_head_count() == 1: # Single-policy agents don't return the policy dimension.
values = values.unsqueeze(1)
actions_log_probs = actions_log_probs.unsqueeze(1)
rollouts.insert(
batch,
recurrent_hidden_states,
actions,
actions_log_probs, # b x k x 1
values, # b x k x 1
rewards, # k x b x 1
masks,
tracked_metrics
)
pth_time += time.time() - t_update_stats
return pth_time, env_time, self.envs.num_envs, curiosity_obs
def _update_agent(self, ppo_cfg, rollouts):
t_update_model = time.time()
with torch.no_grad(), torch.cuda.amp.autocast() if self._fp16_autocast else contextlib.suppress():
last_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
next_value = self.actor_critic.get_value(
last_observation,
rollouts.get_recurrent_states()[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
).detach()
behavioral_index = 0
if self._get_policy_head_count() > 1 and self.count_steps > self.config.RL.REWARD_FUSION.SPLIT.TRANSITION:
behavioral_index = 1
iw_clipped = ppo_cfg.SPLIT_IW_BOUNDS if hasattr(ppo_cfg, 'SPLIT_IW_BOUNDS') else \
[1.0 - ppo_cfg.clip_param, 1.0 + ppo_cfg.clip_param]
rollouts.compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau,
behavioral_index=behavioral_index,
importance_weight=self.config.RL.REWARD_FUSION.SPLIT.IMPORTANCE_WEIGHT,
weight_clip=iw_clipped,
)
(
value_loss,
action_loss,
dist_entropy,
aux_task_losses,
aux_dist_entropy,
aux_weights,
inv_curiosity,
fwd_curiosity,
) = self.agent.update(
rollouts,
ppo_cfg.gamma,
behavioral_index=behavioral_index
)
rollouts.after_update()
return (
time.time() - t_update_model,
value_loss,
action_loss,
dist_entropy,
aux_task_losses,
aux_dist_entropy,
aux_weights,
inv_curiosity,
fwd_curiosity,
)
def _make_deltas(self, window_episode_stats):
deltas = {
k: (
(v[-1] - v[0]).flatten(start_dim=-2).sum(dim=-1) # k x b x 1 OR b x 1 -> k or 1
if len(v) > 1
else v[0].flatten(start_dim=-2).sum(dim=-1)
)
for k, v in window_episode_stats.items()
}
# Get items, and flatten rewards to report multi-policy
flat_deltas = {}
for k, v in deltas.items():
if len(v.size()) > 0:
flat_deltas[k] = v[0].item()
for i in range(1, v.size(0)):
flat_deltas[f"{k}_{i}"] = v[i].item()
else:
flat_deltas[k] = v.item()
flat_deltas["count"] = max(flat_deltas["count"], 1.0)
return flat_deltas
def train(self, ckpt_path="", ckpt=-1, start_updates=0) -> None:
r"""Main method for training PPO.
Returns:
None
"""
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME)
)
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
ppo_cfg = self.config.RL.PPO
task_cfg = self.config.TASK_CONFIG.TASK
policy_encoders_map = get_vision_encoder_inputs(ppo_cfg)
"""
Initialize auxiliary tasks
"""
aux_cfg = self.config.RL.AUX_TASKS
init_aux_tasks, num_recurrent_memories, aux_task_strings, aux_encoder_insts = \
self._setup_auxiliary_tasks(aux_cfg, ppo_cfg, task_cfg,
observation_space=observation_space, policy_encoders=policy_encoders_map)
self._setup_actor_critic_agent(
ppo_cfg, task_cfg, aux_cfg,
init_aux_tasks,
aux_encoders=aux_encoder_insts,
policy_encoders=policy_encoders_map
)
rollouts = RolloutStorage(
ppo_cfg.num_steps,
self.envs.num_envs,
self.obs_space,
self.envs.action_spaces[0],
ppo_cfg.hidden_size,
num_recurrent_memories=num_recurrent_memories,
num_policy_heads=self._get_policy_head_count(),
metrics=ppo_cfg.ROLLOUT.METRICS
)
rollouts.to(self.device)
if self._fp16_mixed:
rollouts.to_fp16()
observations = self.envs.reset()
batch = batch_obs(observations, device=self.device)
if self.semantic_predictor is not None:
batch["semantic"] = self.semantic_predictor(batch["rgb"], batch["depth"])
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
for sensor in rollouts.observations:
rollouts.observations[sensor][0].copy_(batch[sensor])
# batch and observations may contain shared PyTorch CUDA
# tensors. We must explicitly clear them here otherwise
# they will be kept in memory for the entire duration of training!
batch = None
observations = None
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in self.agent.actor_critic.parameters())
)
)
logger.info(
"all parameters: {}".format(
sum(param.numel() for param in self.agent.get_parameters())
)
)
reward_count = self._get_policy_head_count()
current_episode_env_reward = torch.zeros(reward_count, self.envs.num_envs, 1) # num policies x envs x 1? (last dim is just a quirk, I think)
current_episode_reward = torch.zeros(reward_count, self.envs.num_envs, 1) # Include intrinsic rewards
running_episode_stats = dict(
count=torch.zeros(self.envs.num_envs, 1),
reward=torch.zeros(reward_count, self.envs.num_envs, 1),
env_reward=torch.zeros(reward_count, self.envs.num_envs, 1)
)
window_episode_stats = defaultdict(
lambda: deque(maxlen=ppo_cfg.reward_window_size)
)
t_start = time.time()
env_time = 0
pth_time = 0
self.count_steps = 0
elapsed_steps = 0
count_checkpoints = 0
if ckpt != -1:
logger.info(
f"Resuming runs at checkpoint {ckpt}. Timing statistics are not tracked properly."
)
assert ppo_cfg.use_linear_lr_decay is False and ppo_cfg.use_linear_clip_decay is False, "Resuming with decay not supported"
count_checkpoints = ckpt + 1
self.count_steps = start_updates * ppo_cfg.num_steps * self.config.NUM_PROCESSES # default estimate
ckpt_dict = self.load_checkpoint(ckpt_path, map_location="cpu")
# ! We may be changing the architecture, thus we sometimes load checkpoints without all the weights we need.
is_warm_start = ckpt_path == self.config.RL.POLICY.PRETRAINED_CKPT
self.agent.load_state_dict(ckpt_dict["state_dict"], strict=not is_warm_start)
if "optim_state" in ckpt_dict:
self.agent.load_optim_state(ckpt_dict["optim_state"], is_warm_start=is_warm_start)
else:
logger.warn("No optimizer state loaded, results may be funky")
if "extra_state" in ckpt_dict and "step" in ckpt_dict["extra_state"]:
self.count_steps = ckpt_dict["extra_state"]["step"]
lr_scheduler = LambdaLR(
optimizer=self.agent.optimizer,
lr_lambda=lambda x: linear_decay(x, self.config.NUM_UPDATES),
)
with TensorboardWriter(
self.config.TENSORBOARD_DIR,
flush_secs=self.flush_secs,
purge_step=self.count_steps,
) as writer:
for update in range(start_updates, self.config.NUM_UPDATES):
if ppo_cfg.use_linear_lr_decay:
lr_scheduler.step()
if ppo_cfg.use_linear_clip_decay:
self.agent.clip_param = ppo_cfg.clip_param * linear_decay(
update, self.config.NUM_UPDATES
)
prior_obs_state = None # For curiosity
for step in range(ppo_cfg.num_steps):
(
delta_pth_time,
delta_env_time,
delta_steps,
prior_obs_state
) = self._collect_rollout_step(
rollouts, current_episode_reward, current_episode_env_reward, running_episode_stats, prior_obs_state=prior_obs_state
)
pth_time += delta_pth_time
env_time += delta_env_time
self.count_steps += delta_steps
elapsed_steps += delta_steps
(
delta_pth_time,
value_losses,
action_losses,
dist_entropy,
aux_task_losses,
aux_dist_entropy,
aux_weights,
inv_curiosity,
fwd_curiosity
) = self._update_agent(ppo_cfg, rollouts)
pth_time += delta_pth_time
for k, v in running_episode_stats.items():
window_episode_stats[k].append(v.clone())
deltas = self._make_deltas(window_episode_stats)
self.report_train_metrics(writer, {
"aux_entropy": aux_dist_entropy,
"inv_curiosity_loss": inv_curiosity,
"fwd_curiosity_loss": fwd_curiosity,
},
deltas, dist_entropy, [value_losses, action_losses], aux_task_losses, self.count_steps, elapsed_steps, update,
env_time, pth_time, t_start, window_episode_stats,
aux_weights, aux_task_strings)
# checkpoint model
if update % self.config.CHECKPOINT_INTERVAL == 0:
self.save_checkpoint(
f"{self.checkpoint_prefix}.{count_checkpoints}.pth", dict(step=self.count_steps)
)
count_checkpoints += 1
self.envs.close()
def project_out(
self, states, projection_path='weights/base-full_timesteps.pth'
):
r"""
# states l x b x k x h
Project states out of the dimension which has vectors.
used in experimentation with the time dimension to see if we can stop early stops/chaotic behavior.
"""
if self._projections is None:
axes = torch.load(projection_path, map_location=self.device).float() # k x h, see probes.py
intercepts = axes[:, -1]
axes = axes[:, :-1]
norms = axes.norm(dim=1) # k
norm_axes = axes / axes.norm(dim=1, keepdim=True) # k x h
self._project_in = ((self.config.EVAL.PROJECT_OUT - intercepts)/ norms).unsqueeze(1) * norm_axes # k x 1 * k x h -> k x h
# https://statisticaloddsandends.wordpress.com/2018/02/02/projection-matrix-for-a-1-dimensional-subspace/
projection_matrices = []
for axis in norm_axes: # h
projection_matrices.append(torch.outer(axis, axis.T))
self._projections = torch.stack(projection_matrices, dim=0).float() # k x h x h
projected_states = []
for i in range(0, states.size(-2)):
# states[0] is just the layer of the RNN, we use a 1 layer GRU.
projected = torch.matmul(self._projections[i].unsqueeze(0), states[0, :, i].float().unsqueeze(2)).squeeze(-1) # 1 x h x h @ b x h x 1
# b x h
project_out = states[0, :, i] - projected
projected_states.append(project_out) # b x h
project_out = torch.stack(projected_states, dim=1)
states[0] = project_out + self._project_in # b x k x h + k x h
return states
@torch.no_grad()
def _simple_eval(
self,
ckpt_dict: dict,
config: Config
):
# Match EvalAI docker while still mostly following default eval structure (parity with local eval is proven up to 300 episodes)
# * this was originally written trying to identify mismatch between local val and evalai test-std.
# * no bug was found; the issue is likely distribution shift.
# Config
aux_cfg = config.RL.AUX_TASKS
ppo_cfg = config.RL.PPO
task_cfg = config.TASK_CONFIG.TASK
# Load spaces (via env)
self.envs = construct_envs(config, get_env_class(config.ENV_NAME))
# ! Agent setup
policy_encoders = get_vision_encoder_inputs(ppo_cfg)
observation_space = self.obs_space
self.obs_transforms = get_active_obs_transforms(self.config)
observation_space = apply_obs_transforms_obs_space(
observation_space, self.obs_transforms
)
is_objectnav = "ObjectNav" in task_cfg.TYPE or self.config.MOCK_OBJECTNAV
additional_sensors = []
embed_goal = False
if is_objectnav:
additional_sensors = ["gps", "compass"]
embed_goal = True
def _get_policy_head_count(config):
reward_keys = config.RL.POLICIES
if reward_keys[0] == "none" and len(reward_keys) == 1:
return 1
if config.RL.REWARD_FUSION.STRATEGY == "SPLIT":
return 2
return 1
policy_class = baseline_registry.get_policy(ppo_cfg.POLICY.name)
self.actor_critic = policy_class(
observation_space=observation_space,
action_space=self.envs.action_spaces[0],
hidden_size=ppo_cfg.hidden_size,
goal_sensor_uuid=task_cfg.GOAL_SENSOR_UUID,
num_tasks=len(aux_cfg.tasks), # we pass this is in to support eval, where no aux modules are made
additional_sensors=additional_sensors,
embed_goal=embed_goal,
device=self.device,
config=ppo_cfg.POLICY,
policy_encoders=policy_encoders,
num_policy_heads=_get_policy_head_count(config),
mock_objectnav=config.MOCK_OBJECTNAV
).to(self.device)
self.num_recurrent_memories = self.actor_critic.net.num_tasks
if self.actor_critic.IS_MULTIPLE_BELIEF:
proposed_num_beliefs = ppo_cfg.POLICY.BELIEFS.NUM_BELIEFS
self.num_recurrent_memories = len(aux_cfg.tasks) if proposed_num_beliefs == -1 else proposed_num_beliefs
if self.actor_critic.IS_RECURRENT:
self.num_recurrent_memories += 1
self.actor_critic.load_state_dict(
{
k.replace("actor_critic.", ""): v
for k, v in ckpt_dict["state_dict"].items()
if "actor_critic" in k
}
)
self.actor_critic.eval()
self.semantic_predictor = None
if ppo_cfg.POLICY.USE_SEMANTICS:
self.semantic_predictor = load_rednet(
self.device,
ckpt=ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT,
resize=True # since we train on half-vision
)
self.semantic_predictor.eval()
self.behavioral_index = 0
if "extra_state" in ckpt_dict and "step" in ckpt_dict["extra_state"]:
count_steps = ckpt_dict["extra_state"]["step"]
if _get_policy_head_count(config) > 1 and count_steps > config.RL.REWARD_FUSION.SPLIT.TRANSITION:
self.behavioral_index = 1
# Load other items
test_recurrent_hidden_states = torch.zeros(
self.actor_critic.num_recurrent_layers,
self.config.NUM_PROCESSES,
self.num_recurrent_memories,
ppo_cfg.hidden_size,
device=self.device,
)
not_done_masks = torch.zeros(
self.config.NUM_PROCESSES, 1, device=self.device, dtype=torch.bool
)
prev_actions = torch.zeros(
self.config.NUM_PROCESSES, 1, device=self.device, dtype=torch.long
)
# * Do eval
number_of_eval_episodes = self.config.TEST_EPISODE_COUNT
if number_of_eval_episodes == -1:
number_of_eval_episodes = sum(self.envs.number_of_episodes)
else:
total_num_eps = sum(self.envs.number_of_episodes)
if total_num_eps < number_of_eval_episodes:
logger.warn(
f"Config specified {number_of_eval_episodes} eval episodes"
f", dataset only has {total_num_eps}."
)
logger.warn(f"Evaluating with {total_num_eps} instead.")
number_of_eval_episodes = total_num_eps
observations = self.envs.reset()
batch = batch_obs(observations, device=self.device) # Note docker appears to get a single observation as opposed to a list (1 proc)
if self.semantic_predictor is not None:
batch["semantic"] = self.semantic_predictor(batch["rgb"], batch["depth"])
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
current_episode_reward = torch.zeros(
self.envs.num_envs, 1, device=self.device
)
stats_episodes = dict() # dict of dicts that stores stats per episode
total_stats = []
dones_per_ep = dict()
pbar = tqdm.tqdm(total=number_of_eval_episodes)
self.step = 0
self.ep = 0
while (
len(stats_episodes) < number_of_eval_episodes
and self.envs.num_envs > 0
):
current_episodes = self.envs.current_episodes()
with torch.no_grad(), torch.cuda.amp.autocast() if self._fp16_autocast else contextlib.suppress():
# Match EvalAI settings
if config.EVAL.restrict_gps:
batch["gps"][:,1] = 0
deterministic = hasattr(self.config.EVAL, "DETERMINISTIC") and self.config.EVAL.DETERMINISTIC
(
_,
actions,
_,
test_recurrent_hidden_states,
*_
) = self.actor_critic.act(
batch,
test_recurrent_hidden_states,
prev_actions,
not_done_masks,
deterministic=deterministic,
behavioral_index=self.behavioral_index,
)
prev_actions.copy_(actions)
if self.config.EVAL.PROJECT_OUT >= 0:
test_recurrent_hidden_states = self.project_out(test_recurrent_hidden_states, projection_path=self.config.EVAL.PROJECTION_PATH)
self.step += 1
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [
list(x) for x in zip(*outputs)
]
batch = batch_obs(observations, device=self.device)
if self.semantic_predictor is not None:
batch["semantic"] = self.semantic_predictor(batch["rgb"], batch["depth"])
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
not_done_masks = torch.tensor(
[[False] if done else [True] for done in dones],
dtype=torch.bool,
device=self.device,
)
rewards = torch.tensor(
rewards, dtype=torch.float, device=self.device
).unsqueeze(1)
current_episode_reward += rewards
next_episodes = self.envs.current_episodes()
envs_to_pause = []
n_envs = self.envs.num_envs
for i in range(n_envs):
next_k = (
next_episodes[i].scene_id,
next_episodes[i].episode_id,
)
if dones_per_ep.get(next_k, 0) == 1:
envs_to_pause.append(i) # wait for the rest
if not_done_masks[i].item() == 0:
episode_stats = dict()
episode_stats["reward"] = current_episode_reward[i].item()
current_episode_reward[i] = 0
episode_stats.update(
self._extract_scalars_from_info(infos[i])
)
# use scene_id + episode_id as unique id for storing stats
k = (
current_episodes[i].scene_id,
current_episodes[i].episode_id,
)
dones_per_ep[k] = dones_per_ep.get(k, 0) + 1
stats_episodes[
(
current_episodes[i].scene_id,
current_episodes[i].episode_id,
dones_per_ep[k],
)
] = episode_stats
pbar.update()
print(f'{self.ep} reset {self.step}')
self.step = 0
self.ep += 1
# episode continues
(
self.envs,
test_recurrent_hidden_states,
not_done_masks,
current_episode_reward,
prev_actions,
batch,
_,
_,
) = self._pause_envs(
envs_to_pause,
self.envs,
test_recurrent_hidden_states,
not_done_masks,
current_episode_reward,
prev_actions,
batch,
{},
_,
)
# Report results
num_episodes = len(stats_episodes)
aggregated_stats = dict()
for stat_key in next(iter(stats_episodes.values())).keys():
aggregated_stats[stat_key] = (
sum([v[stat_key] for v in stats_episodes.values()])
/ num_episodes
)
for k, v in aggregated_stats.items():
logger.info(f"Average episode {k}: {v:.8f}")
if "extra_state" in ckpt_dict and "step" in ckpt_dict["extra_state"]:
step_id = ckpt_dict["extra_state"]["step"]
logger.info(f"\n Step ID (update): {step_id}")
self.envs.close()
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
log_diagnostics=[],
output_dir='.',
label='.',
num_eval_runs=1,
skip_log=False,
simple_eval=False,
) -> None:
r"""Evaluates a single checkpoint.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object for logging to tensorboard
checkpoint_index: index of cur checkpoint for logging
Returns:
None
"""
self._projections = None
ckpt_dict = self.load_checkpoint(checkpoint_path, map_location="cpu")
# Config
if self.config.EVAL.USE_CKPT_CONFIG:
config = self._setup_eval_config(ckpt_dict["config"])
else:
config = self.config.clone()
aux_cfg = config.RL.AUX_TASKS
ppo_cfg = config.RL.PPO
task_cfg = config.TASK_CONFIG.TASK
config.defrost()
config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT
config.freeze()
if simple_eval:
self._simple_eval(ckpt_dict, config)
return
# Add additional measurements
config.defrost()
config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS")
if len(self.config.VIDEO_OPTION) > 0:
config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
config.freeze()
# Load spaces (via env)
self.envs = construct_envs(config, get_env_class(config.ENV_NAME))
# ! Agent setup
policy_encoders = get_vision_encoder_inputs(ppo_cfg)
# pass in aux config if we're doing attention
self._setup_actor_critic_agent(
ppo_cfg, task_cfg, aux_cfg,
policy_encoders=policy_encoders
)
self.actor_critic = self.agent.actor_critic # We don't use PPO info
self.actor_critic.load_state_dict(
{
k.replace("actor_critic.", ""): v
for k, v in ckpt_dict["state_dict"].items()
if "actor_critic" in k
}
)
self.actor_critic.eval()
logger.info(
"agent number of trainable parameters: {}".format(
sum(
param.numel()
for param in self.agent.parameters()
if param.requires_grad
)
)
)
self.semantic_predictor = None
if ppo_cfg.POLICY.USE_SEMANTICS and not ppo_cfg.POLICY.EVAL_GT_SEMANTICS:
self.semantic_predictor = load_rednet(
self.device, ckpt=ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT, resize=True, # since we train on half-vision
stabilize=ppo_cfg.POLICY.EVAL_SEMANTICS_STABILIZE
# ! TODO sub no resize back in, rn it's a no-op
# self.device, ckpt=ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT, resize=not self.config.RL.POLICY.FULL_VISION
)
self.semantic_predictor.eval()
self.behavioral_index = 0
if "extra_state" in ckpt_dict and "step" in ckpt_dict["extra_state"]:
self.count_steps = ckpt_dict["extra_state"]["step"]
if self._get_policy_head_count() > 1 and self.count_steps > self.config.RL.REWARD_FUSION.SPLIT.TRANSITION:
self.behavioral_index = 1
# Load other items
test_recurrent_hidden_states = torch.zeros(
self.actor_critic.num_recurrent_layers,
self.config.NUM_PROCESSES,
ppo_cfg.hidden_size,
device=self.device,
)
if self.actor_critic.IS_MULTIPLE_BELIEF:
# ! This can be skipped once we have belief specification
_, num_recurrent_memories, _, _ = self._setup_auxiliary_tasks(aux_cfg, ppo_cfg, task_cfg, is_eval=True)
test_recurrent_hidden_states = test_recurrent_hidden_states.unsqueeze(2).repeat(1, 1, num_recurrent_memories, 1)
not_done_masks = torch.zeros(
self.config.NUM_PROCESSES, 1, device=self.device, dtype=torch.bool
)
prev_actions = torch.zeros(
self.config.NUM_PROCESSES, 1, device=self.device, dtype=torch.long
)
number_of_eval_episodes = self.config.TEST_EPISODE_COUNT
if number_of_eval_episodes == -1:
number_of_eval_episodes = sum(self.envs.number_of_episodes)
else:
total_num_eps = sum(self.envs.number_of_episodes)
if total_num_eps < number_of_eval_episodes:
logger.warn(
f"Config specified {number_of_eval_episodes} eval episodes"
f", dataset only has {total_num_eps}."
)
logger.warn(f"Evaluating with {total_num_eps} instead.")
number_of_eval_episodes = total_num_eps
observations = self.envs.reset()
batch = batch_obs(observations, device=self.device)
if self.semantic_predictor is not None:
# batch["gt_semantic"] = batch["semantic"]
batch["semantic"] = self.semantic_predictor(batch["rgb"], batch["depth"])
if ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT == "weights/rednet_semmap_mp3d_40.pth":
batch["semantic"] -= 1
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
current_episode_reward = torch.zeros(
self.envs.num_envs, 1, device=self.device
)
stats_episodes = dict() # dict of dicts that stores stats per episode
total_stats = []
dones_per_ep = dict()
# Video and logging
aux_task_strings = self.config.RL.AUX_TASKS.tasks
rgb_frames = [
[] for _ in range(self.config.NUM_PROCESSES)
] # type: List[List[np.ndarray]]
is_full_eval = len(log_diagnostics) > 0 # len(self.config.VIDEO_OPTION) == 0 and
if len(self.config.VIDEO_OPTION) > 0:
os.makedirs(self.config.VIDEO_DIR, exist_ok=True)
video_indices = range(self.config.TEST_EPISODE_COUNT)
print(f"Videos: {video_indices}")
# Logging more extensive evaluation stats for analysis
per_timestep_diagnostics = [d for d in log_diagnostics if d in [
Diagnostics.actions, Diagnostics.gps, Diagnostics.heading,
Diagnostics.weights, Diagnostics.internal_activations,
Diagnostics.observations, Diagnostics.visual_observations,
Diagnostics.room_cat, Diagnostics.d2g, Diagnostics.visit_count,
Diagnostics.coverage_t, Diagnostics.collisions_t, Diagnostics.sge_t
]]
d_stats = {}
if len(per_timestep_diagnostics) > 0:
for d in per_timestep_diagnostics:
d_stats[d] = [
[] for _ in range(self.config.NUM_PROCESSES)
] # stored as nested list envs x timesteps x k (# tasks)
pbar = tqdm.tqdm(total=number_of_eval_episodes * num_eval_runs)
while (
len(stats_episodes) < number_of_eval_episodes * num_eval_runs
and self.envs.num_envs > 0
):
current_episodes = self.envs.current_episodes()
with torch.no_grad(), torch.cuda.amp.autocast() if self._fp16_autocast else contextlib.suppress():
weights_output = None
if (len(self.config.VIDEO_OPTION) > 0 or Diagnostics.weights in log_diagnostics) and \
self.actor_critic.IS_MULTIPLE_BELIEF and self.actor_critic.LATE_FUSION:
num_modules = ppo_cfg.POLICY.BELIEFS.NUM_BELIEFS
if num_modules == -1:
num_modules = len(aux_task_strings)
aux_task_strings = aux_task_strings[:num_modules]
weights_output = torch.empty(self.envs.num_envs, num_modules)
# Match EvalAI settings
if config.EVAL.restrict_gps:
batch["gps"][:,1] = 0
deterministic = hasattr(self.config.EVAL, "DETERMINISTIC") and self.config.EVAL.DETERMINISTIC
(
value,
actions,
action_log_probs,
test_recurrent_hidden_states,
*other_outputs
) = self.actor_critic.act(
batch,
test_recurrent_hidden_states,
prev_actions,
not_done_masks,
deterministic=deterministic,
weights_output=weights_output,
behavioral_index=self.behavioral_index,
return_all_activations=Diagnostics.internal_activations in log_diagnostics,
)
prev_actions.copy_(actions)
if self.config.EVAL.PROJECT_OUT >= 0:
test_recurrent_hidden_states = self.project_out(test_recurrent_hidden_states, self.config.EVAL.PROJECTION_PATH)
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [
list(x) for x in zip(*outputs)
]
if len(log_diagnostics) > 0:
for i in range(self.envs.num_envs):
if Diagnostics.actions in log_diagnostics:
d_stats[Diagnostics.actions][i].append(prev_actions[i].item())
if Diagnostics.weights in log_diagnostics:
aux_weights = None if weights_output is None else weights_output[i]
if aux_weights is not None:
d_stats[Diagnostics.weights][i].append(aux_weights.half().tolist())
if Diagnostics.internal_activations in log_diagnostics:
fused_features, fused_sensors, logits = other_outputs
d_stats[Diagnostics.internal_activations][i].append({
"beliefs": test_recurrent_hidden_states[-1, i].half().cpu(),
"fused_belief": fused_features[i].half().cpu(),
"fused_obs": fused_sensors[i, 0].half().cpu(), # b k h -> h
"action_logits": logits[i].half().cpu(),
"critic_values": value[i].half().cpu()
})
if Diagnostics.observations in log_diagnostics:
d_stats[Diagnostics.observations][i].append({
key: batch[key][i].cpu() for key in ['compass', 'gps'] # [H]
})
if Diagnostics.visual_observations in log_diagnostics:
d_stats[Diagnostics.visual_observations][i].append({
key: batch[key][i].cpu() for key in ['rgb', 'depth', 'semantic'] # HWC
})
if Diagnostics.sge_t in log_diagnostics:
d_stats[Diagnostics.sge_t][i].append(infos[i]['goal_vis'])
if Diagnostics.collisions_t in log_diagnostics:
d_stats[Diagnostics.collisions_t][i].append(infos[i]['collisions']['count'])
if Diagnostics.coverage_t in log_diagnostics:
d_stats[Diagnostics.coverage_t][i].append({
'mini_reached': infos[i]['coverage']['mini_reached'],
'reached': infos[i]['coverage']['reached'],
})
if Diagnostics.visit_count in log_diagnostics:
d_stats[Diagnostics.visit_count][i].append(infos[i]['coverage']['visit_count'])
if Diagnostics.room_cat in log_diagnostics:
d_stats[Diagnostics.room_cat][i].append({
'room_cat': infos[i]['region_level']['room_cat'],
})
if Diagnostics.d2g in log_diagnostics:
d_stats[Diagnostics.d2g][i].append(infos[i]['distance_to_goal'])
batch = batch_obs(observations, device=self.device)
if self.semantic_predictor is not None:
# batch["gt_semantic"] = batch["semantic"]
batch["semantic"] = self.semantic_predictor(batch["rgb"], batch["depth"])
if ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT == "weights/rednet_semmap_mp3d_40.pth":
batch["semantic"] -= 1
if len(self.config.VIDEO_OPTION) > 0:
for i in range(batch["semantic"].size(0)):
# observations[i]['gt_semantic'] = observations[i]['semantic']
observations[i]['semantic'] = batch["semantic"][i].cpu().numpy()
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
not_done_masks = torch.tensor(
[[False] if done else [True] for done in dones],
dtype=torch.bool,
device=self.device,
)
rewards = torch.tensor(
rewards, dtype=torch.float, device=self.device
).unsqueeze(1)
current_episode_reward += rewards
next_episodes = self.envs.current_episodes()
envs_to_pause = []
n_envs = self.envs.num_envs
for i in range(n_envs):
next_k = (
next_episodes[i].scene_id,
next_episodes[i].episode_id,
)
if dones_per_ep.get(next_k, 0) == num_eval_runs:
envs_to_pause.append(i) # wait for the rest
if not_done_masks[i].item() == 0:
episode_stats = dict()
episode_stats["reward"] = current_episode_reward[i].item()
current_episode_reward[i] = 0
episode_stats.update(
self._extract_scalars_from_info(infos[i])
)
# use scene_id + episode_id as unique id for storing stats
k = (
current_episodes[i].scene_id,
current_episodes[i].episode_id,
)
dones_per_ep[k] = dones_per_ep.get(k, 0) + 1
stats_episodes[
(
current_episodes[i].scene_id,
current_episodes[i].episode_id,
dones_per_ep[k],
)
] = episode_stats
if dones_per_ep.get(k, 0) == 1 and len(self.config.VIDEO_OPTION) > 0 and len(stats_episodes) in video_indices:
logger.info(f"Generating video {len(stats_episodes)}")
category = getattr(current_episodes[i], "object_category", "")
if category != "":
category += "_"
try:
if checkpoint_index == -1:
ckpt_file = checkpoint_path.split('/')[-1]
split_info = ckpt_file.split('.')
checkpoint_index = split_info[1]
proj_stem = self.config.EVAL.PROJECTION_PATH.split('/')[-1].split('_')[-2]
proj_str = f"proj-{proj_stem}-{self.config.EVAL.PROJECT_OUT}" if self.config.EVAL.PROJECT_OUT >= 0 else ""
generate_video(
video_option=self.config.VIDEO_OPTION,
video_dir=self.config.VIDEO_DIR,
images=rgb_frames[i],
episode_id=current_episodes[i].episode_id,
checkpoint_idx=checkpoint_index,
metrics=self._extract_scalars_from_info(infos[i]),
tag=f"{proj_str}{category}{label}_{current_episodes[i].scene_id.split('/')[-1]}",
tb_writer=writer,
)
except Exception as e:
logger.warning(str(e))
rgb_frames[i] = []
if len(log_diagnostics) > 0:
diagnostic_info = dict()
for metric in per_timestep_diagnostics:
if isinstance(d_stats[metric][i][0], dict):
diagnostic_info[metric] = batch_obs(d_stats[metric][i], dtype=torch.half)
else:
diagnostic_info[metric] = torch.tensor(d_stats[metric][i])
d_stats[metric][i] = []
# TODO We want to stack this too
if Diagnostics.top_down_map in log_diagnostics:
top_down_map = infos[i]["top_down_map"]["map"]
top_down_map = maps.colorize_topdown_map(
top_down_map, fog_of_war_mask=None
)
diagnostic_info.update(dict(top_down_map=top_down_map))
if Diagnostics.episode_info in log_diagnostics:
ep_info = attr.asdict(current_episodes[i])
if Diagnostics.episode_info_full not in log_diagnostics:
del ep_info['goals']
del ep_info['shortest_paths']
del ep_info['_shortest_path_cache']
diagnostic_info.update(dict(
episode_info=ep_info,
))
total_stats.append(
dict(
stats=episode_stats,
did_stop=bool(prev_actions[i] == 0),
info=diagnostic_info,
)
)
pbar.update()
# episode continues
else:
if len(self.config.VIDEO_OPTION) > 0:
aux_weights = None if weights_output is None else weights_output[i]
frame = observations_to_image(observations[i], infos[i], current_episode_reward[i].item(), aux_weights, aux_task_strings)
rgb_frames[i].append(frame)
if Diagnostics.gps in log_diagnostics:
d_stats[Diagnostics.gps][i].append(observations[i]["gps"].tolist())
if Diagnostics.heading in log_diagnostics:
d_stats[Diagnostics.heading][i].append(observations[i]["heading"].tolist())
(
self.envs,
test_recurrent_hidden_states,
not_done_masks,
current_episode_reward,
prev_actions,
batch,
d_stats,
rgb_frames,
) = self._pause_envs(
envs_to_pause,
self.envs,
test_recurrent_hidden_states,
not_done_masks,
current_episode_reward,
prev_actions,
batch,
d_stats,
rgb_frames,
)
# Report results
num_episodes = len(stats_episodes)
aggregated_stats = dict()
for stat_key in next(iter(stats_episodes.values())).keys():
aggregated_stats[stat_key] = (
sum([v[stat_key] for v in stats_episodes.values()])
/ num_episodes
)
for k, v in aggregated_stats.items():
logger.info(f"Average episode {k}: {v:.4f}")
if "extra_state" in ckpt_dict and "step" in ckpt_dict["extra_state"]:
step_id = ckpt_dict["extra_state"]["step"]
logger.info(f"\n Step ID (update): {step_id}")
if label != "train" and num_episodes == 2184 and not skip_log:
writer.add_scalars(
"eval_reward",
{"average reward": aggregated_stats["reward"]},
step_id,
)
metrics = {k: v for k, v in aggregated_stats.items() if k != "reward"}
if len(metrics) > 0:
writer.add_scalars("eval_metrics", metrics, step_id)
logger.info("eval_metrics")
logger.info(metrics)
if len(log_diagnostics) > 0:
proj_str = f"proj-{self.config.EVAL.PROJECT_OUT}" if self.config.EVAL.PROJECT_OUT >= 0 else ""
os.makedirs(output_dir, exist_ok=True)
if Diagnostics.top_down_map in log_diagnostics:
torch.save(total_stats, os.path.join(output_dir, f'{label}.pth'))
else:
meta_stats = {
'step_id': step_id,
'payload': total_stats
}
torch.save(meta_stats, os.path.join(output_dir, f'{proj_str}{label}.pth'))
self.envs.close()
def report_train_metrics(
self,
writer,
stats,
deltas,
entropy: torch.tensor,
losses: List[torch.tensor],
aux_losses,
count_steps,
elapsed_steps,
update,
env_time,
pth_time,
t_start,
window_episode_stats,
aux_weights,
aux_task_strings
):
r"""
Add stats (torch values that we're too lazy to aggregate),
Add losses
Add deltas (stats properly averaged across episodes)
To TB + logger.
Extracted since DDPPO trainer has the same code.
args:
deltas: dictionary of scalars
"""
for stat_key, stat_val in stats.items():
writer.add_scalar(stat_key, stat_val, count_steps)
# Check for other metrics that haven't been logged yet
metrics = {
k: v / deltas["count"]
for k, v in deltas.items()
if k not in {"count"}
}
if len(entropy.size()) > 0:
strs = ["entropy"] + [f"entropy_{i}" for i in range(1, entropy.size(0))]
for s, e in zip(strs, entropy):
metrics[s] = e.item()
else:
metrics["entropy"] = entropy.item()
if len(metrics) > 0:
writer.add_scalars("metrics", metrics, count_steps)
value_losses = losses[0]
policy_losses = losses[1]
losses_strs = ["value", "policy"]
if len(value_losses.size()) > 0:
for i in range(1, value_losses.size(0)):
losses_strs.extend([f"value_{i}", f"policy_{i}"])
losses = [val.item() for pair in zip(value_losses, policy_losses) for val in pair] + aux_losses
losses_strs.extend(aux_task_strings)
writer.add_scalars(
"losses",
{k: l for l, k in zip(losses, losses_strs)},
count_steps,
)
if aux_weights is not None:
writer.add_scalars(
"aux_weights",
{k: l for l, k in zip(aux_weights, aux_task_strings)},
count_steps,
)
# Log stats
if update > 0 and update % self.config.LOG_INTERVAL == 0:
formatted_losses = [f"{s}: {l:.3g}" for s, l in zip(losses_strs, losses)]
logger.info(
"update: {}\t {} \t aux_entropy {:.3g}\t inv curious {:.3g} fwd curious {:.3g}".format(
update, formatted_losses, stats["aux_entropy"], stats["inv_curiosity_loss"], stats["fwd_curiosity_loss"]
)
)
logger.info(
"update: {}\tfps: {:.3f}\t".format(
update, elapsed_steps / (time.time() - t_start)
)
)
logger.info(
"update: {}\tenv-time: {:.3f}s\tpth-time: {:.3f}s\t"
"frames: {}".format(
update, env_time, pth_time, count_steps
)
)
logger.info(
"Average window size: {} {}".format(
len(window_episode_stats["count"]),
" ".join(
"{}: {:.3f}".format(k, v / deltas["count"])
for k, v in deltas.items()
if k != "count"
),
)
)
| [
"habitat.logger.warn",
"habitat.logger.add_filehandler",
"habitat.utils.visualizations.maps.colorize_topdown_map",
"habitat_baselines.rl.ppo.encoder_dict.get_vision_encoder_inputs",
"habitat_baselines.common.environments.get_env_class",
"torch.cuda.is_available",
"habitat_baselines.common.baseline_regis... | [((3519, 3565), 'habitat_baselines.common.baseline_registry.baseline_registry.register_trainer', 'baseline_registry.register_trainer', ([], {'name': '"""ppo"""'}), "(name='ppo')\n", (3553, 3565), False, 'from habitat_baselines.common.baseline_registry import baseline_registry\n'), ((38227, 38242), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (38240, 38242), False, 'import torch\n'), ((5984, 6035), 'habitat.logger.info', 'logger.info', (['f"""Auxiliary tasks: {aux_task_strings}"""'], {}), "(f'Auxiliary tasks: {aux_task_strings}')\n", (5995, 6035), False, 'from habitat import Config, logger\n'), ((6318, 6367), 'habitat_baselines.common.baseline_registry.baseline_registry.get_policy', 'baseline_registry.get_policy', (['ppo_cfg.POLICY.name'], {}), '(ppo_cfg.POLICY.name)\n', (6346, 6367), False, 'from habitat_baselines.common.baseline_registry import baseline_registry\n'), ((8345, 8396), 'habitat_baselines.rl.ppo.curiosity.ForwardCuriosity', 'ForwardCuriosity', (['ppo_cfg', 'task_cfg', 'embedding_size'], {}), '(ppo_cfg, task_cfg, embedding_size)\n', (8361, 8396), False, 'from habitat_baselines.rl.ppo.curiosity import ForwardCuriosity\n'), ((9349, 9393), 'habitat.logger.add_filehandler', 'logger.add_filehandler', (['self.config.LOG_FILE'], {}), '(self.config.LOG_FILE)\n', (9371, 9393), False, 'from habitat import Config, logger\n'), ((9468, 9506), 'habitat_baselines.common.obs_transformers.get_active_obs_transforms', 'get_active_obs_transforms', (['self.config'], {}), '(self.config)\n', (9493, 9506), False, 'from habitat_baselines.common.obs_transformers import apply_obs_transforms_batch, apply_obs_transforms_obs_space, get_active_obs_transforms\n'), ((9535, 9605), 'habitat_baselines.common.obs_transformers.apply_obs_transforms_obs_space', 'apply_obs_transforms_obs_space', (['observation_space', 'self.obs_transforms'], {}), '(observation_space, self.obs_transforms)\n', (9565, 9605), False, 'from habitat_baselines.common.obs_transformers import apply_obs_transforms_batch, apply_obs_transforms_obs_space, get_active_obs_transforms\n'), ((10047, 10096), 'habitat_baselines.common.baseline_registry.baseline_registry.get_policy', 'baseline_registry.get_policy', (['ppo_cfg.POLICY.name'], {}), '(ppo_cfg.POLICY.name)\n', (10075, 10096), False, 'from habitat_baselines.common.baseline_registry import baseline_registry\n'), ((13282, 13358), 'torch.load', 'torch.load', (['self.config.RL.PPO.POLICY.pretrained_weights'], {'map_location': '"""cpu"""'}), "(self.config.RL.PPO.POLICY.pretrained_weights, map_location='cpu')\n", (13292, 13358), False, 'import torch\n'), ((16063, 16120), 'os.makedirs', 'os.makedirs', (['self.config.CHECKPOINT_FOLDER'], {'exist_ok': '(True)'}), '(self.config.CHECKPOINT_FOLDER, exist_ok=True)\n', (16074, 16120), False, 'import os\n'), ((16617, 16661), 'torch.load', 'torch.load', (['checkpoint_path', '*args'], {}), '(checkpoint_path, *args, **kwargs)\n', (16627, 16661), False, 'import torch\n'), ((17765, 17782), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (17776, 17782), False, 'from collections import defaultdict, deque\n'), ((20421, 20432), 'time.time', 'time.time', ([], {}), '()\n', (20430, 20432), False, 'import time\n'), ((21453, 21464), 'time.time', 'time.time', ([], {}), '()\n', (21462, 21464), False, 'import time\n'), ((21680, 21691), 'time.time', 'time.time', ([], {}), '()\n', (21689, 21691), False, 'import time\n'), ((22169, 22233), 'habitat_baselines.common.utils.batch_list', 'batch_list', (['infos'], {'device': 'self.device', 'whitelist': 'TRACKED_METRICS'}), '(infos, device=self.device, whitelist=TRACKED_METRICS)\n', (22179, 22233), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((22251, 22294), 'habitat_baselines.common.utils.batch_obs', 'batch_obs', (['observations'], {'device': 'self.device'}), '(observations, device=self.device)\n', (22260, 22294), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((22574, 22628), 'habitat_baselines.common.obs_transformers.apply_obs_transforms_batch', 'apply_obs_transforms_batch', (['batch', 'self.obs_transforms'], {}), '(batch, self.obs_transforms)\n', (22600, 22628), False, 'from habitat_baselines.common.obs_transformers import apply_obs_transforms_batch, apply_obs_transforms_obs_space, get_active_obs_transforms\n'), ((22648, 22726), 'torch.tensor', 'torch.tensor', (['rewards'], {'dtype': 'torch.float', 'device': 'current_episode_reward.device'}), '(rewards, dtype=torch.float, device=current_episode_reward.device)\n', (22660, 22726), False, 'import torch\n'), ((22907, 22973), 'habitat_baselines.common.utils.batch_list', 'batch_list', (['infos'], {'device': 'rewards.device', 'whitelist': 'POLICY_METRICS'}), '(infos, device=rewards.device, whitelist=POLICY_METRICS)\n', (22917, 22973), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((23175, 23297), 'torch.tensor', 'torch.tensor', (['[([0.0] if done else [1.0]) for done in dones]'], {'dtype': 'torch.float', 'device': 'current_episode_reward.device'}), '([([0.0] if done else [1.0]) for done in dones], dtype=torch.\n float, device=current_episode_reward.device)\n', (23187, 23297), False, 'import torch\n'), ((26342, 26353), 'time.time', 'time.time', ([], {}), '()\n', (26351, 26353), False, 'import time\n'), ((29516, 29550), 'habitat_baselines.rl.ppo.encoder_dict.get_vision_encoder_inputs', 'get_vision_encoder_inputs', (['ppo_cfg'], {}), '(ppo_cfg)\n', (29541, 29550), False, 'from habitat_baselines.rl.ppo.encoder_dict import get_vision_encoder_inputs\n'), ((30637, 30680), 'habitat_baselines.common.utils.batch_obs', 'batch_obs', (['observations'], {'device': 'self.device'}), '(observations, device=self.device)\n', (30646, 30680), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((30831, 30885), 'habitat_baselines.common.obs_transformers.apply_obs_transforms_batch', 'apply_obs_transforms_batch', (['batch', 'self.obs_transforms'], {}), '(batch, self.obs_transforms)\n', (30857, 30885), False, 'from habitat_baselines.common.obs_transformers import apply_obs_transforms_batch, apply_obs_transforms_obs_space, get_active_obs_transforms\n'), ((31690, 31738), 'torch.zeros', 'torch.zeros', (['reward_count', 'self.envs.num_envs', '(1)'], {}), '(reward_count, self.envs.num_envs, 1)\n', (31701, 31738), False, 'import torch\n'), ((31835, 31883), 'torch.zeros', 'torch.zeros', (['reward_count', 'self.envs.num_envs', '(1)'], {}), '(reward_count, self.envs.num_envs, 1)\n', (31846, 31883), False, 'import torch\n'), ((32289, 32300), 'time.time', 'time.time', ([], {}), '()\n', (32298, 32300), False, 'import time\n'), ((38089, 38125), 'torch.stack', 'torch.stack', (['projected_states'], {'dim': '(1)'}), '(projected_states, dim=1)\n', (38100, 38125), False, 'import torch\n'), ((38937, 38971), 'habitat_baselines.rl.ppo.encoder_dict.get_vision_encoder_inputs', 'get_vision_encoder_inputs', (['ppo_cfg'], {}), '(ppo_cfg)\n', (38962, 38971), False, 'from habitat_baselines.rl.ppo.encoder_dict import get_vision_encoder_inputs\n'), ((39046, 39084), 'habitat_baselines.common.obs_transformers.get_active_obs_transforms', 'get_active_obs_transforms', (['self.config'], {}), '(self.config)\n', (39071, 39084), False, 'from habitat_baselines.common.obs_transformers import apply_obs_transforms_batch, apply_obs_transforms_obs_space, get_active_obs_transforms\n'), ((39113, 39183), 'habitat_baselines.common.obs_transformers.apply_obs_transforms_obs_space', 'apply_obs_transforms_obs_space', (['observation_space', 'self.obs_transforms'], {}), '(observation_space, self.obs_transforms)\n', (39143, 39183), False, 'from habitat_baselines.common.obs_transformers import apply_obs_transforms_batch, apply_obs_transforms_obs_space, get_active_obs_transforms\n'), ((39767, 39816), 'habitat_baselines.common.baseline_registry.baseline_registry.get_policy', 'baseline_registry.get_policy', (['ppo_cfg.POLICY.name'], {}), '(ppo_cfg.POLICY.name)\n', (39795, 39816), False, 'from habitat_baselines.common.baseline_registry import baseline_registry\n'), ((41899, 42057), 'torch.zeros', 'torch.zeros', (['self.actor_critic.num_recurrent_layers', 'self.config.NUM_PROCESSES', 'self.num_recurrent_memories', 'ppo_cfg.hidden_size'], {'device': 'self.device'}), '(self.actor_critic.num_recurrent_layers, self.config.\n NUM_PROCESSES, self.num_recurrent_memories, ppo_cfg.hidden_size, device\n =self.device)\n', (41910, 42057), False, 'import torch\n'), ((42145, 42224), 'torch.zeros', 'torch.zeros', (['self.config.NUM_PROCESSES', '(1)'], {'device': 'self.device', 'dtype': 'torch.bool'}), '(self.config.NUM_PROCESSES, 1, device=self.device, dtype=torch.bool)\n', (42156, 42224), False, 'import torch\n'), ((42270, 42349), 'torch.zeros', 'torch.zeros', (['self.config.NUM_PROCESSES', '(1)'], {'device': 'self.device', 'dtype': 'torch.long'}), '(self.config.NUM_PROCESSES, 1, device=self.device, dtype=torch.long)\n', (42281, 42349), False, 'import torch\n'), ((43078, 43121), 'habitat_baselines.common.utils.batch_obs', 'batch_obs', (['observations'], {'device': 'self.device'}), '(observations, device=self.device)\n', (43087, 43121), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((43352, 43406), 'habitat_baselines.common.obs_transformers.apply_obs_transforms_batch', 'apply_obs_transforms_batch', (['batch', 'self.obs_transforms'], {}), '(batch, self.obs_transforms)\n', (43378, 43406), False, 'from habitat_baselines.common.obs_transformers import apply_obs_transforms_batch, apply_obs_transforms_obs_space, get_active_obs_transforms\n'), ((43441, 43495), 'torch.zeros', 'torch.zeros', (['self.envs.num_envs', '(1)'], {'device': 'self.device'}), '(self.envs.num_envs, 1, device=self.device)\n', (43452, 43495), False, 'import torch\n'), ((43669, 43709), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'total': 'number_of_eval_episodes'}), '(total=number_of_eval_episodes)\n', (43678, 43709), False, 'import tqdm\n'), ((50298, 50332), 'habitat_baselines.rl.ppo.encoder_dict.get_vision_encoder_inputs', 'get_vision_encoder_inputs', (['ppo_cfg'], {}), '(ppo_cfg)\n', (50323, 50332), False, 'from habitat_baselines.rl.ppo.encoder_dict import get_vision_encoder_inputs\n'), ((52133, 52257), 'torch.zeros', 'torch.zeros', (['self.actor_critic.num_recurrent_layers', 'self.config.NUM_PROCESSES', 'ppo_cfg.hidden_size'], {'device': 'self.device'}), '(self.actor_critic.num_recurrent_layers, self.config.\n NUM_PROCESSES, ppo_cfg.hidden_size, device=self.device)\n', (52144, 52257), False, 'import torch\n'), ((52698, 52777), 'torch.zeros', 'torch.zeros', (['self.config.NUM_PROCESSES', '(1)'], {'device': 'self.device', 'dtype': 'torch.bool'}), '(self.config.NUM_PROCESSES, 1, device=self.device, dtype=torch.bool)\n', (52709, 52777), False, 'import torch\n'), ((52823, 52902), 'torch.zeros', 'torch.zeros', (['self.config.NUM_PROCESSES', '(1)'], {'device': 'self.device', 'dtype': 'torch.long'}), '(self.config.NUM_PROCESSES, 1, device=self.device, dtype=torch.long)\n', (52834, 52902), False, 'import torch\n'), ((53610, 53653), 'habitat_baselines.common.utils.batch_obs', 'batch_obs', (['observations'], {'device': 'self.device'}), '(observations, device=self.device)\n', (53619, 53653), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((53989, 54043), 'habitat_baselines.common.obs_transformers.apply_obs_transforms_batch', 'apply_obs_transforms_batch', (['batch', 'self.obs_transforms'], {}), '(batch, self.obs_transforms)\n', (54015, 54043), False, 'from habitat_baselines.common.obs_transformers import apply_obs_transforms_batch, apply_obs_transforms_obs_space, get_active_obs_transforms\n'), ((54078, 54132), 'torch.zeros', 'torch.zeros', (['self.envs.num_envs', '(1)'], {'device': 'self.device'}), '(self.envs.num_envs, 1, device=self.device)\n', (54089, 54132), False, 'import torch\n'), ((55614, 55670), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'total': '(number_of_eval_episodes * num_eval_runs)'}), '(total=number_of_eval_episodes * num_eval_runs)\n', (55623, 55670), False, 'import tqdm\n'), ((4471, 4546), 'habitat.logger.warn', 'logger.warn', (['"""FP16 requires CUDA but CUDA is not available, setting to off"""'], {}), "('FP16 requires CUDA but CUDA is not available, setting to off')\n", (4482, 4546), False, 'from habitat import Config, logger\n'), ((6821, 6850), 'habitat_baselines.common.auxiliary_tasks.get_aux_task_classes', 'get_aux_task_classes', (['aux_cfg'], {}), '(aux_cfg)\n', (6841, 6850), False, 'from habitat_baselines.common.auxiliary_tasks import get_aux_task_classes\n'), ((12594, 12672), 'habitat_baselines.rl.models.rednet.load_rednet', 'load_rednet', (['self.device'], {'ckpt': 'ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT', 'resize': '(True)'}), '(self.device, ckpt=ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT, resize=True)\n', (12605, 12672), False, 'from habitat_baselines.rl.models.rednet import load_rednet\n'), ((16165, 16219), 'os.path.join', 'os.path.join', (['self.config.CHECKPOINT_FOLDER', 'file_name'], {}), '(self.config.CHECKPOINT_FOLDER, file_name)\n', (16177, 16219), False, 'import os\n'), ((20471, 20486), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20484, 20486), False, 'import torch\n'), ((21401, 21412), 'time.time', 'time.time', ([], {}), '()\n', (21410, 21412), False, 'import time\n'), ((21630, 21641), 'time.time', 'time.time', ([], {}), '()\n', (21639, 21641), False, 'import time\n'), ((26169, 26180), 'time.time', 'time.time', ([], {}), '()\n', (26178, 26180), False, 'import time\n'), ((26367, 26382), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (26380, 26382), False, 'import torch\n'), ((29181, 29216), 'habitat_baselines.common.environments.get_env_class', 'get_env_class', (['self.config.ENV_NAME'], {}), '(self.config.ENV_NAME)\n', (29194, 29216), False, 'from habitat_baselines.common.environments import get_env_class\n'), ((29326, 29351), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (29349, 29351), False, 'import torch\n'), ((29264, 29310), 'torch.device', 'torch.device', (['"""cuda"""', 'self.config.TORCH_GPU_ID'], {}), "('cuda', self.config.TORCH_GPU_ID)\n", (29276, 29310), False, 'import torch\n'), ((29369, 29388), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (29381, 29388), False, 'import torch\n'), ((32463, 32568), 'habitat.logger.info', 'logger.info', (['f"""Resuming runs at checkpoint {ckpt}. Timing statistics are not tracked properly."""'], {}), "(\n f'Resuming runs at checkpoint {ckpt}. Timing statistics are not tracked properly.'\n )\n", (32474, 32568), False, 'from habitat import Config, logger\n'), ((33809, 33916), 'habitat_baselines.common.tensorboard_utils.TensorboardWriter', 'TensorboardWriter', (['self.config.TENSORBOARD_DIR'], {'flush_secs': 'self.flush_secs', 'purge_step': 'self.count_steps'}), '(self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs,\n purge_step=self.count_steps)\n', (33826, 33916), False, 'from habitat_baselines.common.tensorboard_utils import TensorboardWriter\n'), ((38854, 38884), 'habitat_baselines.common.environments.get_env_class', 'get_env_class', (['config.ENV_NAME'], {}), '(config.ENV_NAME)\n', (38867, 38884), False, 'from habitat_baselines.common.environments import get_env_class\n'), ((41292, 41370), 'habitat_baselines.rl.models.rednet.load_rednet', 'load_rednet', (['self.device'], {'ckpt': 'ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT', 'resize': '(True)'}), '(self.device, ckpt=ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT, resize=True)\n', (41303, 41370), False, 'from habitat_baselines.rl.models.rednet import load_rednet\n'), ((45250, 45293), 'habitat_baselines.common.utils.batch_obs', 'batch_obs', (['observations'], {'device': 'self.device'}), '(observations, device=self.device)\n', (45259, 45293), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((45456, 45510), 'habitat_baselines.common.obs_transformers.apply_obs_transforms_batch', 'apply_obs_transforms_batch', (['batch', 'self.obs_transforms'], {}), '(batch, self.obs_transforms)\n', (45482, 45510), False, 'from habitat_baselines.common.obs_transformers import apply_obs_transforms_batch, apply_obs_transforms_obs_space, get_active_obs_transforms\n'), ((45541, 45647), 'torch.tensor', 'torch.tensor', (['[([False] if done else [True]) for done in dones]'], {'dtype': 'torch.bool', 'device': 'self.device'}), '([([False] if done else [True]) for done in dones], dtype=torch\n .bool, device=self.device)\n', (45553, 45647), False, 'import torch\n'), ((48397, 48441), 'habitat.logger.info', 'logger.info', (['f"""Average episode {k}: {v:.8f}"""'], {}), "(f'Average episode {k}: {v:.8f}')\n", (48408, 48441), False, 'from habitat import Config, logger\n'), ((48588, 48637), 'habitat.logger.info', 'logger.info', (['f"""\n Step ID (update): {step_id}"""'], {}), '(f"""\n Step ID (update): {step_id}""")\n', (48599, 48637), False, 'from habitat import Config, logger\n'), ((50215, 50245), 'habitat_baselines.common.environments.get_env_class', 'get_env_class', (['config.ENV_NAME'], {}), '(config.ENV_NAME)\n', (50228, 50245), False, 'from habitat_baselines.common.environments import get_env_class\n'), ((51304, 51438), 'habitat_baselines.rl.models.rednet.load_rednet', 'load_rednet', (['self.device'], {'ckpt': 'ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT', 'resize': '(True)', 'stabilize': 'ppo_cfg.POLICY.EVAL_SEMANTICS_STABILIZE'}), '(self.device, ckpt=ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT, resize=\n True, stabilize=ppo_cfg.POLICY.EVAL_SEMANTICS_STABILIZE)\n', (51315, 51438), False, 'from habitat_baselines.rl.models.rednet import load_rednet\n'), ((54647, 54696), 'os.makedirs', 'os.makedirs', (['self.config.VIDEO_DIR'], {'exist_ok': '(True)'}), '(self.config.VIDEO_DIR, exist_ok=True)\n', (54658, 54696), False, 'import os\n'), ((60784, 60827), 'habitat_baselines.common.utils.batch_obs', 'batch_obs', (['observations'], {'device': 'self.device'}), '(observations, device=self.device)\n', (60793, 60827), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((61479, 61533), 'habitat_baselines.common.obs_transformers.apply_obs_transforms_batch', 'apply_obs_transforms_batch', (['batch', 'self.obs_transforms'], {}), '(batch, self.obs_transforms)\n', (61505, 61533), False, 'from habitat_baselines.common.obs_transformers import apply_obs_transforms_batch, apply_obs_transforms_obs_space, get_active_obs_transforms\n'), ((61564, 61670), 'torch.tensor', 'torch.tensor', (['[([False] if done else [True]) for done in dones]'], {'dtype': 'torch.bool', 'device': 'self.device'}), '([([False] if done else [True]) for done in dones], dtype=torch\n .bool, device=self.device)\n', (61576, 61670), False, 'import torch\n'), ((68558, 68602), 'habitat.logger.info', 'logger.info', (['f"""Average episode {k}: {v:.4f}"""'], {}), "(f'Average episode {k}: {v:.4f}')\n", (68569, 68602), False, 'from habitat import Config, logger\n'), ((68749, 68798), 'habitat.logger.info', 'logger.info', (['f"""\n Step ID (update): {step_id}"""'], {}), '(f"""\n Step ID (update): {step_id}""")\n', (68760, 68798), False, 'from habitat import Config, logger\n'), ((69455, 69493), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (69466, 69493), False, 'import os\n'), ((4432, 4457), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4455, 4457), False, 'import torch\n'), ((4747, 4766), 'habitat_baselines.common.utils.is_fp16_supported', 'is_fp16_supported', ([], {}), '()\n', (4764, 4766), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((4931, 4959), 'habitat_baselines.common.utils.is_fp16_autocast_supported', 'is_fp16_autocast_supported', ([], {}), '()\n', (4957, 4959), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((20034, 20074), 'torch.stack', 'torch.stack', (['[reward_a, reward_b]'], {'dim': '(0)'}), '([reward_a, reward_b], dim=0)\n', (20045, 20074), False, 'import torch\n'), ((20488, 20513), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {}), '()\n', (20511, 20513), False, 'import torch\n'), ((20542, 20563), 'contextlib.suppress', 'contextlib.suppress', ([], {}), '()\n', (20561, 20563), False, 'import contextlib\n'), ((25148, 25196), 'torch.zeros_like', 'torch.zeros_like', (["running_episode_stats['count']"], {}), "(running_episode_stats['count'])\n", (25164, 25196), False, 'import torch\n'), ((25512, 25527), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25525, 25527), False, 'import torch\n'), ((26384, 26409), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {}), '()\n', (26407, 26409), False, 'import torch\n'), ((26438, 26459), 'contextlib.suppress', 'contextlib.suppress', ([], {}), '()\n', (26457, 26459), False, 'import contextlib\n'), ((27899, 27910), 'time.time', 'time.time', ([], {}), '()\n', (27908, 27910), False, 'import time\n'), ((31968, 32002), 'torch.zeros', 'torch.zeros', (['self.envs.num_envs', '(1)'], {}), '(self.envs.num_envs, 1)\n', (31979, 32002), False, 'import torch\n'), ((32023, 32071), 'torch.zeros', 'torch.zeros', (['reward_count', 'self.envs.num_envs', '(1)'], {}), '(reward_count, self.envs.num_envs, 1)\n', (32034, 32071), False, 'import torch\n'), ((32096, 32144), 'torch.zeros', 'torch.zeros', (['reward_count', 'self.envs.num_envs', '(1)'], {}), '(reward_count, self.envs.num_envs, 1)\n', (32107, 32144), False, 'import torch\n'), ((32219, 32259), 'collections.deque', 'deque', ([], {'maxlen': 'ppo_cfg.reward_window_size'}), '(maxlen=ppo_cfg.reward_window_size)\n', (32224, 32259), False, 'from collections import defaultdict, deque\n'), ((33420, 33482), 'habitat.logger.warn', 'logger.warn', (['"""No optimizer state loaded, results may be funky"""'], {}), "('No optimizer state loaded, results may be funky')\n", (33431, 33482), False, 'from habitat import Config, logger\n'), ((42721, 42838), 'habitat.logger.warn', 'logger.warn', (['f"""Config specified {number_of_eval_episodes} eval episodes, dataset only has {total_num_eps}."""'], {}), "(\n f'Config specified {number_of_eval_episodes} eval episodes, dataset only has {total_num_eps}.'\n )\n", (42732, 42838), False, 'from habitat import Config, logger\n'), ((42907, 42963), 'habitat.logger.warn', 'logger.warn', (['f"""Evaluating with {total_num_eps} instead."""'], {}), "(f'Evaluating with {total_num_eps} instead.')\n", (42918, 42963), False, 'from habitat import Config, logger\n'), ((43955, 43970), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (43968, 43970), False, 'import torch\n'), ((53253, 53370), 'habitat.logger.warn', 'logger.warn', (['f"""Config specified {number_of_eval_episodes} eval episodes, dataset only has {total_num_eps}."""'], {}), "(\n f'Config specified {number_of_eval_episodes} eval episodes, dataset only has {total_num_eps}.'\n )\n", (53264, 53370), False, 'from habitat import Config, logger\n'), ((53439, 53495), 'habitat.logger.warn', 'logger.warn', (['f"""Evaluating with {total_num_eps} instead."""'], {}), "(f'Evaluating with {total_num_eps} instead.')\n", (53450, 53495), False, 'from habitat import Config, logger\n'), ((55889, 55904), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (55902, 55904), False, 'import torch\n'), ((69234, 69261), 'habitat.logger.info', 'logger.info', (['"""eval_metrics"""'], {}), "('eval_metrics')\n", (69245, 69261), False, 'from habitat import Config, logger\n'), ((69278, 69298), 'habitat.logger.info', 'logger.info', (['metrics'], {}), '(metrics)\n', (69289, 69298), False, 'from habitat import Config, logger\n'), ((23678, 23693), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (23691, 23693), False, 'import torch\n'), ((24942, 25014), 'torch.tensor', 'torch.tensor', (['v'], {'dtype': 'torch.float', 'device': 'current_episode_reward.device'}), '(v, dtype=torch.float, device=current_episode_reward.device)\n', (24954, 25014), False, 'import torch\n'), ((25529, 25554), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {}), '()\n', (25552, 25554), False, 'import torch\n'), ((25583, 25604), 'contextlib.suppress', 'contextlib.suppress', ([], {}), '()\n', (25602, 25604), False, 'import contextlib\n'), ((33743, 33783), 'habitat_baselines.common.utils.linear_decay', 'linear_decay', (['x', 'self.config.NUM_UPDATES'], {}), '(x, self.config.NUM_UPDATES)\n', (33755, 33783), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((36883, 36936), 'torch.load', 'torch.load', (['projection_path'], {'map_location': 'self.device'}), '(projection_path, map_location=self.device)\n', (36893, 36936), False, 'import torch\n'), ((37520, 37545), 'torch.outer', 'torch.outer', (['axis', 'axis.T'], {}), '(axis, axis.T)\n', (37531, 37545), False, 'import torch\n'), ((37579, 37618), 'torch.stack', 'torch.stack', (['projection_matrices'], {'dim': '(0)'}), '(projection_matrices, dim=0)\n', (37590, 37618), False, 'import torch\n'), ((43972, 43997), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {}), '()\n', (43995, 43997), False, 'import torch\n'), ((44026, 44047), 'contextlib.suppress', 'contextlib.suppress', ([], {}), '()\n', (44045, 44047), False, 'import contextlib\n'), ((45727, 45787), 'torch.tensor', 'torch.tensor', (['rewards'], {'dtype': 'torch.float', 'device': 'self.device'}), '(rewards, dtype=torch.float, device=self.device)\n', (45739, 45787), False, 'import torch\n'), ((55906, 55931), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {}), '()\n', (55929, 55931), False, 'import torch\n'), ((55960, 55981), 'contextlib.suppress', 'contextlib.suppress', ([], {}), '()\n', (55979, 55981), False, 'import contextlib\n'), ((56494, 56538), 'torch.empty', 'torch.empty', (['self.envs.num_envs', 'num_modules'], {}), '(self.envs.num_envs, num_modules)\n', (56505, 56538), False, 'import torch\n'), ((61750, 61810), 'torch.tensor', 'torch.tensor', (['rewards'], {'dtype': 'torch.float', 'device': 'self.device'}), '(rewards, dtype=torch.float, device=self.device)\n', (61762, 61810), False, 'import torch\n'), ((69594, 69634), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{label}.pth"""'], {}), "(output_dir, f'{label}.pth')\n", (69606, 69634), False, 'import os\n'), ((69825, 69875), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{proj_str}{label}.pth"""'], {}), "(output_dir, f'{proj_str}{label}.pth')\n", (69837, 69875), False, 'import os\n'), ((17513, 17523), 'numpy.size', 'np.size', (['v'], {}), '(v)\n', (17520, 17523), True, 'import numpy as np\n'), ((34249, 34294), 'habitat_baselines.common.utils.linear_decay', 'linear_decay', (['update', 'self.config.NUM_UPDATES'], {}), '(update, self.config.NUM_UPDATES)\n', (34261, 34294), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((65749, 65810), 'habitat.utils.visualizations.maps.colorize_topdown_map', 'maps.colorize_topdown_map', (['top_down_map'], {'fog_of_war_mask': 'None'}), '(top_down_map, fog_of_war_mask=None)\n', (65774, 65810), False, 'from habitat.utils.visualizations import maps\n'), ((66067, 66099), 'attr.asdict', 'attr.asdict', (['current_episodes[i]'], {}), '(current_episodes[i])\n', (66078, 66099), False, 'import attr\n'), ((72644, 72655), 'time.time', 'time.time', ([], {}), '()\n', (72653, 72655), False, 'import time\n'), ((65276, 65323), 'habitat_baselines.common.utils.batch_obs', 'batch_obs', (['d_stats[metric][i]'], {'dtype': 'torch.half'}), '(d_stats[metric][i], dtype=torch.half)\n', (65285, 65323), False, 'from habitat_baselines.common.utils import batch_obs, batch_list, generate_video, linear_decay, is_fp16_autocast_supported, is_fp16_supported\n'), ((65416, 65448), 'torch.tensor', 'torch.tensor', (['d_stats[metric][i]'], {}), '(d_stats[metric][i])\n', (65428, 65448), False, 'import torch\n'), ((24217, 24242), 'torch.log', 'torch.log', (['(fp_error + 1.0)'], {}), '(fp_error + 1.0)\n', (24226, 24242), False, 'import torch\n')] |
"""A module that carry out the utility's used by the AI algorithms
"""
import pickle
from collections import deque
from typing import Deque
import numpy as np
import tensorflow.keras as tk
import tensorflow as tf
from sklearn.preprocessing import LabelEncoder
import ai.config as config
from model.action import Action
from model.game import Game
from model.piece import Color, Type
from .model import softmax_cross_entropy_with_logits, build_alphazero_model
archive_folder = 'data/alphazero/datasets'
weights_folder = 'data/alphazero/weights/'
def to_label(action: Action):
"""Converts the action python object to a label string.
:param action:
:return:
"""
dir_r = action.dst.r - action.src.r
dir_c = action.dst.c - action.src.c
return f"{action.src.r},{action.src.c}+{dir_r},{dir_c}"
def valid(x, y, n, m):
"""Check if the coordinates are in bounds.
:param x:
:param y:
:param n:
:param m:
:return:
"""
return 0 <= x < n and 0 <= y < m
def get_action_space(board_size=10):
"""Generates all possible actions for a piece in the game of checkers.
:param board_size:
:return:
"""
all_actions_list = []
for i in range(board_size):
for j in range(board_size):
moves_direction = [(1, 1), (1, -1), (-1, -1), (-1, 1)]
for move_dir in moves_direction:
for r in range(1, board_size):
dir_i = move_dir[0] * r
dir_j = move_dir[1] * r
if valid(i + dir_i, j + dir_j, board_size, board_size):
action = f"{i},{j}+{dir_i},{dir_j}"
all_actions_list.append(action)
return all_actions_list
def load_best_model() -> tk.models.Model:
"""loads the current version of AlphaZero model.
:return: AlphaZero neural network
"""
with tf.device('/device:GPU:0'):
print(f'loading version {config.CURRENT_VERSION}')
model = build_alphazero_model((10, 10, 30), len(get_action_space()), 8, 64, config.REG_CONST)
if config.CURRENT_VERSION is not None:
model.load_weights(weights_folder + 'alphazero' + f" {config.CURRENT_VERSION:0>3}" + '.h5')
model.compile(loss={'value_head': 'mean_squared_error',
'policy_head': softmax_cross_entropy_with_logits},
optimizer=tk.optimizers.Adam(lr=config.LEARNING_RATE),
loss_weights={'value_head': 0.5, 'policy_head': 0.5})
return model
def save_model(model: tk.models.Model, name='alphazero', version=1):
model.save_weights(weights_folder + name + f" {version:0>3}" + '.h5')
class GameState:
def __init__(self, game: Game):
self.white_pieces = game.white_pieces
self.black_pieces = game.black_pieces
self.turn = game.current_turn
self.no_progress = game.no_progress
self.board_length = game.grid.n
self.board_width = game.grid.m
self.game_class = game.__class__
def get_game(self) -> Game:
return self.game_class.build(self.white_pieces, self.black_pieces, self.turn, self.no_progress)
def get_all_possible_states(self):
paths, states = self.get_game().get_all_possible_states()
ret = [GameState(state) for state in states]
return paths, ret
def get_all_possible_paths(self):
return self.get_game().get_all_possible_paths()
def get_player_turn(self):
return self.turn
def __eq__(self, other):
if isinstance(other, GameState):
my_pieces = self.white_pieces + self.black_pieces
other_pieces = other.white_pieces + other.black_pieces
if my_pieces == other_pieces and self.turn == other.turn \
and self.no_progress == other.no_progress:
return True
return False
def __hash__(self):
my_pieces = self.white_pieces + self.black_pieces
hashable = tuple()
for piece in my_pieces:
hashable = hashable + (piece,)
hashable = hashable + (self.turn, self.no_progress)
return hash(hashable)
class StateStack:
"""A stack for saving playing history
"""
def __init__(self):
self.head = None
self.max_len = 5 # turns history
self.max_features = 6 # pieces planes (2 men) (2 kings) (1 turn flag) (1 no progress count)
self.dq: Deque[GameState] = deque(maxlen=self.max_len)
def get_input_shape(self):
return self.head.board_length, self.head.board_width, self.max_features * self.max_len
def get_deep_representation_stack(self):
# initialize the image stack with zeros
ret = np.zeros(self.get_input_shape())
# for each turn history we mask it as a numpy array
for idx, state in enumerate(reversed(self.dq)):
# we join the board pieces in one list for the ease of implementation
pieces = state.white_pieces + state.black_pieces
# calculates the index of the turn planes as each turn is defined as 5 planes
idx *= self.max_features
for piece in pieces:
row = piece.cell.r
column = piece.cell.c
color_idx = 0 if piece.color == Color.WHITE else 1
if piece.dead == -1:
value = -1
elif piece.dead == 0:
value = 1
else:
continue
if piece.type == Type.KING:
# Mask the king pieces in (3, 4) planes for the (white, black) players respectively
ret[row][column][color_idx + idx + 2] = value
else:
# Mask the pawn pieces in (1, 2) planes for the (white, black) players respectively
ret[row][column][color_idx + idx] = value
# Mask the turn flag in the plane (5) of the turn planes
ret[0][0][idx + 4] = state.turn
# Mask progress count in last plane
ret[0][0][idx + 5] = state.no_progress
return ret
def push(self, state: GameState):
self.dq.append(state)
self.head = state
def pop(self):
ret = self.dq.pop()
self.head = self.dq[len(self.dq) - 1]
return ret
def pop_left(self):
return self.dq.popleft()
def push_left(self, state: GameState):
self.dq.appendleft(state)
def __repr__(self):
return self.dq.__repr__()
def __len__(self):
return len(self.dq)
class ActionEncoder(LabelEncoder):
"""A utility to transform the action labels to unique integers and vice versa
"""
def __init__(self):
super().__init__()
self.space_shape = 0
def fit(self, action_space_list):
self.space_shape = np.array(action_space_list).shape
super().fit_transform(action_space_list)
class SampleBuilder:
def __init__(self):
self.samples = deque(maxlen=21000)
self.moves = []
def add_move(self, state_stack: StateStack, pi: np.array):
self.moves.append({'state': state_stack, 'policy': pi})
def commit_sample(self, value: int, pov: int):
"""Saves the game sample.
:param value: the evaluation of the end game
:param pov: the evaluation point of view
:return:
"""
for sample in self.moves:
sample['value'] = value if sample['state'].head.turn == pov else -value
self.samples.append(sample)
self.moves.clear()
def save(self, version: int, path: str):
"""write the samples as a dataset file to the archive folder.
:param path:
:param version: the dataset version
:return:
"""
with open(''.join([path, "/dataset ", str(version).zfill(4), ".pk"]), "wb") as f:
pickle.dump(self, f)
@staticmethod
def load(version: int, path: str):
"""loads a specific version of the datasets in the archive folder
:param path:
:param version: the dataset version
:return:
"""
with open(''.join([path, "/dataset ", str(version).zfill(4), ".pk"]), "rb") as f:
return pickle.load(f)
| [
"tensorflow.device",
"pickle.dump",
"collections.deque",
"pickle.load",
"tensorflow.keras.optimizers.Adam",
"numpy.array"
] | [((1885, 1911), 'tensorflow.device', 'tf.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (1894, 1911), True, 'import tensorflow as tf\n'), ((4474, 4500), 'collections.deque', 'deque', ([], {'maxlen': 'self.max_len'}), '(maxlen=self.max_len)\n', (4479, 4500), False, 'from collections import deque\n'), ((7036, 7055), 'collections.deque', 'deque', ([], {'maxlen': '(21000)'}), '(maxlen=21000)\n', (7041, 7055), False, 'from collections import deque\n'), ((6883, 6910), 'numpy.array', 'np.array', (['action_space_list'], {}), '(action_space_list)\n', (6891, 6910), True, 'import numpy as np\n'), ((7924, 7944), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (7935, 7944), False, 'import pickle\n'), ((8280, 8294), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8291, 8294), False, 'import pickle\n'), ((2403, 2446), 'tensorflow.keras.optimizers.Adam', 'tk.optimizers.Adam', ([], {'lr': 'config.LEARNING_RATE'}), '(lr=config.LEARNING_RATE)\n', (2421, 2446), True, 'import tensorflow.keras as tk\n')] |
"""Provides high-level DNDarray initialization functions"""
import numpy as np
import torch
import warnings
from typing import Callable, Iterable, Optional, Sequence, Tuple, Type, Union, List
from .communication import MPI, sanitize_comm, Communication
from .devices import Device
from .dndarray import DNDarray
from .memory import sanitize_memory_layout
from .sanitation import sanitize_in, sanitize_sequence
from .stride_tricks import sanitize_axis, sanitize_shape
from .types import datatype
from . import devices
from . import types
__all__ = [
"arange",
"array",
"asarray",
"empty",
"empty_like",
"eye",
"full",
"full_like",
"linspace",
"logspace",
"meshgrid",
"ones",
"ones_like",
"zeros",
"zeros_like",
]
def arange(
*args: Union[int, float],
dtype: Optional[Type[datatype]] = None,
split: Optional[int] = None,
device: Optional[Union[str, Device]] = None,
comm: Optional[Communication] = None
) -> DNDarray:
"""
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)`` (in other words, the interval including `start`
but excluding `stop`). For integer arguments the function is equivalent to the Python built-in `range
<http://docs.python.org/lib/built-in-funcs.html>`_ function, but returns a array rather than a list.
When using a non-integer step, such as 0.1, the results may be inconsistent due to being subject to numerical
rounding. In the cases the usage of :func:`linspace` is recommended.
For floating point arguments, the length of the result is :math:`\\lceil(stop-start)/step\\rceil`.
Again, due to floating point rounding, this rule may result in the last element of `out` being greater than `stop`
by machine epsilon.
Parameters
----------
start : scalar, optional
Start of interval. The interval includes this value. The default start value is 0.
stop : scalar
End of interval. The interval does not include this value, except in some cases where ``step`` is not an
integer and floating point round-off affects the length of ``out``.
step : scalar, optional
Spacing between values. For any output ``out``, this is the distance between two adjacent values,
``out[i+1]-out[i]``. The default step size is 1. If ``step`` is specified as a position argument, ``start``
must also be given.
dtype : datatype, optional
The type of the output array. If `dtype` is not given, it is automatically inferred from the other input
arguments.
split: int or None, optional
The axis along which the array is split and distributed; ``None`` means no distribution.
device : str, optional
Specifies the device the array shall be allocated on, defaults to globally set default device.
comm : Communication, optional
Handle to the nodes holding distributed parts or copies of this array.
See Also
--------
:func:`linspace` : Evenly spaced numbers with careful handling of endpoints.
Examples
--------
>>> ht.arange(3)
DNDarray([0, 1, 2], dtype=ht.int32, device=cpu:0, split=None)
>>> ht.arange(3.0)
DNDarray([0., 1., 2.], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.arange(3, 7)
DNDarray([3, 4, 5, 6], dtype=ht.int32, device=cpu:0, split=None)
>>> ht.arange(3, 7, 2)
DNDarray([3, 5], dtype=ht.int32, device=cpu:0, split=None)
"""
num_of_param = len(args)
# check if all positional arguments are integers
all_ints = all([isinstance(_, int) for _ in args])
# set start, stop, step, num according to *args
if num_of_param == 1:
if dtype is None:
# use int32 as default instead of int64 used in numpy
dtype = types.int32 if all_ints else types.float32
start = 0
stop = int(np.ceil(args[0]))
step = 1
num = stop
elif num_of_param == 2:
if dtype is None:
dtype = types.int32 if all_ints else types.float32
start = args[0]
stop = args[1]
step = 1
num = int(np.ceil(stop - start))
elif num_of_param == 3:
if dtype is None:
dtype = types.int32 if all_ints else types.float32
start = args[0]
stop = args[1]
step = args[2]
num = int(np.ceil((stop - start) / step))
else:
raise TypeError(
"function takes minimum one and at most 3 positional arguments ({} given)".format(
num_of_param
)
)
# sanitize device and comm
device = devices.sanitize_device(device)
comm = sanitize_comm(comm)
gshape = (num,)
split = sanitize_axis(gshape, split)
offset, lshape, _ = comm.chunk(gshape, split)
balanced = True
# compose the local tensor
start += offset * step
stop = start + lshape[0] * step
data = torch.arange(start, stop, step, device=device.torch_device)
htype = types.canonical_heat_type(dtype)
data = data.type(htype.torch_type())
return DNDarray(data, gshape, htype, split, device, comm, balanced)
def array(
obj: Iterable,
dtype: Optional[Type[datatype]] = None,
copy: bool = True,
ndmin: int = 0,
order: str = "C",
split: Optional[int] = None,
is_split: Optional[int] = None,
device: Optional[Device] = None,
comm: Optional[Communication] = None,
) -> DNDarray:
"""
Create a :class:`~heat.core.dndarray.DNDarray`.
Parameters
----------
obj : array_like
A tensor or array, any object exposing the array interface, an object whose ``__array__`` method returns an
array, or any (nested) sequence.
dtype : datatype, optional
The desired data-type for the array. If not given, then the type will be determined as the minimum type required
to hold the objects in the sequence. This argument can only be used to ‘upcast’ the array. For downcasting, use
the :func:`~heat.core.dndarray.astype` method.
copy : bool, optional
If ``True`` (default), then the object is copied. Otherwise, a copy will only be made if obj is a nested
sequence or if a copy is needed to satisfy any of the other requirements, e.g. ``dtype``.
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array should have. Ones will, if needed, be
attached to the shape if ``ndim > 0`` and prefaced in case of ``ndim < 0`` to meet the requirement.
order: str, optional
Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``,
meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in
column-major order (Fortran-like).
split : int or None, optional
The axis along which the passed array content ``obj`` is split and distributed in memory. Mutually exclusive
with ``is_split``.
is_split : int or None, optional
Specifies the axis along which the local data portions, passed in obj, are split across all machines. Useful for
interfacing with other distributed-memory code. The shape of the global array is automatically inferred.
Mutually exclusive with ``split``.
device : str or Device, optional
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on (i.e. globally set default
device).
comm : Communication, optional
Handle to the nodes holding distributed array chunks.
Raises
------
NotImplementedError
If order is one of the NumPy options ``'K'`` or ``'A'``.
Examples
--------
>>> ht.array([1, 2, 3])
DNDarray([1, 2, 3], dtype=ht.int64, device=cpu:0, split=None)
>>> ht.array([1, 2, 3.0])
DNDarray([1., 2., 3.], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.array([[1, 2], [3, 4]])
DNDarray([[1, 2],
[3, 4]], dtype=ht.int64, device=cpu:0, split=None)
>>> ht.array([1, 2, 3], ndmin=2)
DNDarray([[1],
[2],
[3]], dtype=ht.int64, device=cpu:0, split=None)
>>> ht.array([1, 2, 3], dtype=float)
DNDarray([1., 2., 3.], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.array([1, 2, 3, 4], split=0)
DNDarray([1, 2, 3, 4], dtype=ht.int64, device=cpu:0, split=0)
>>> if ht.MPI_WORLD.rank == 0
>>> a = ht.array([1, 2], is_split=0)
>>> else:
>>> a = ht.array([3, 4], is_split=0)
>>> a
DNDarray([1, 2, 3, 4], dtype=ht.int64, device=cpu:0, split=0)
>>> a = np.arange(2 * 3).reshape(2, 3)
>>> a
array([[ 0, 1, 2],
[ 3, 4, 5]])
>>> a.strides
(24, 8)
>>> b = ht.array(a)
>>> b
DNDarray([[0, 1, 2],
[3, 4, 5]], dtype=ht.int64, device=cpu:0, split=None)
>>> b.strides
(24, 8)
>>> b.larray.storage()
0
1
2
3
4
5
[torch.LongStorage of size 6]
>>> c = ht.array(a, order='F')
>>> c
DNDarray([[0, 1, 2],
[3, 4, 5]], dtype=ht.int64, device=cpu:0, split=None)
>>> c.strides
(8, 16)
>>> c.larray.storage()
0
3
1
4
2
5
[torch.LongStorage of size 6]
>>> a = np.arange(4 * 3).reshape(4, 3)
>>> a.strides
(24, 8)
>>> b = ht.array(a, order='F', split=0)
>>> b
DNDarray([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]], dtype=ht.int64, device=cpu:0, split=0)
>>> b.strides
[0/2] (8, 16)
[1/2] (8, 16)
>>> b.larray.storage()
[0/2] 0
3
1
4
2
5
[torch.LongStorage of size 6]
[1/2] 6
9
7
10
8
11
[torch.LongStorage of size 6]
"""
# array already exists; no copy
if (
isinstance(obj, DNDarray)
and not copy
and (dtype is None or dtype == obj.dtype)
and (split is None or split == obj.split)
and (is_split is None or is_split == obj.split)
and (device is None or device == obj.device)
):
return obj
# extract the internal tensor in case of a heat tensor
if isinstance(obj, DNDarray):
obj = obj.larray
# sanitize the data type
if dtype is not None:
dtype = types.canonical_heat_type(dtype)
# sanitize device
if device is not None:
device = devices.sanitize_device(device)
# initialize the array
if bool(copy):
if isinstance(obj, torch.Tensor):
# TODO: watch out. At the moment clone() implies losing the underlying memory layout.
# pytorch fix in progress
obj = obj.clone().detach()
else:
try:
obj = torch.tensor(
obj,
device=device.torch_device
if device is not None
else devices.get_device().torch_device,
)
except RuntimeError:
raise TypeError("invalid data of type {}".format(type(obj)))
else:
if not isinstance(obj, DNDarray):
obj = torch.as_tensor(
obj,
device=device.torch_device
if device is not None
else devices.get_device().torch_device,
)
# infer dtype from obj if not explicitly given
if dtype is None:
dtype = types.canonical_heat_type(obj.dtype)
else:
torch_dtype = dtype.torch_type()
if obj.dtype != torch_dtype:
obj = obj.type(torch_dtype)
# infer device from obj if not explicitly given
if device is None:
device = devices.sanitize_device(obj.device.type)
if str(obj.device) != device.torch_device:
warnings.warn(
"Array 'obj' is not on device '{}'. It will be moved to it.".format(device), UserWarning
)
obj = obj.to(device.torch_device)
# sanitize minimum number of dimensions
if not isinstance(ndmin, int):
raise TypeError("expected ndmin to be int, but was {}".format(type(ndmin)))
# reshape the object to encompass additional dimensions
ndmin_abs = abs(ndmin) - len(obj.shape)
if ndmin_abs > 0 and ndmin > 0:
obj = obj.reshape(obj.shape + ndmin_abs * (1,))
if ndmin_abs > 0 > ndmin:
obj = obj.reshape(ndmin_abs * (1,) + obj.shape)
# sanitize the split axes, ensure mutual exclusiveness
split = sanitize_axis(obj.shape, split)
is_split = sanitize_axis(obj.shape, is_split)
if split is not None and is_split is not None:
raise ValueError("split and is_split are mutually exclusive parameters")
# sanitize comm object
comm = sanitize_comm(comm)
# determine the local and the global shape. If split is None, they are identical
gshape = list(obj.shape)
lshape = gshape.copy()
balanced = True
# content shall be split, chunk the passed data object up
if split is not None:
_, _, slices = comm.chunk(gshape, split)
obj = obj[slices].clone()
obj = sanitize_memory_layout(obj, order=order)
# check with the neighboring rank whether the local shape would fit into a global shape
elif is_split is not None:
gshape = np.array(gshape)
lshape = np.array(lshape)
obj = sanitize_memory_layout(obj, order=order)
if comm.rank < comm.size - 1:
comm.Isend(lshape, dest=comm.rank + 1)
if comm.rank != 0:
# look into the message of the neighbor to see whether the shape length fits
status = MPI.Status()
comm.Probe(source=comm.rank - 1, status=status)
length = status.Get_count() // lshape.dtype.itemsize
# the number of shape elements does not match with the 'left' rank
if length != len(lshape):
discard_buffer = np.empty(length)
comm.Recv(discard_buffer, source=comm.rank - 1)
gshape[is_split] = np.iinfo(gshape.dtype).min
else:
# check whether the individual shape elements match
comm.Recv(gshape, source=comm.rank - 1)
for i in range(length):
if i == is_split:
continue
elif lshape[i] != gshape[i] and lshape[i] - 1 != gshape[i]:
gshape[is_split] = np.iinfo(gshape.dtype).min
# sum up the elements along the split dimension
reduction_buffer = np.array(gshape[is_split])
comm.Allreduce(MPI.IN_PLACE, reduction_buffer, MPI.SUM)
if reduction_buffer < 0:
raise ValueError("unable to construct tensor, shape of local data chunk does not match")
ttl_shape = np.array(obj.shape)
ttl_shape[is_split] = lshape[is_split]
comm.Allreduce(MPI.IN_PLACE, ttl_shape, MPI.SUM)
gshape[is_split] = ttl_shape[is_split]
split = is_split
# compare to calculated balanced lshape (cf. dndarray.is_balanced())
gshape = tuple(int(ele) for ele in gshape)
lshape = tuple(int(ele) for ele in lshape)
_, _, chk = comm.chunk(gshape, split)
test_lshape = tuple([x.stop - x.start for x in chk])
match = 1 if test_lshape == lshape else 0
gmatch = comm.allreduce(match, MPI.SUM)
if gmatch != comm.size:
balanced = False
elif split is None and is_split is None:
obj = sanitize_memory_layout(obj, order=order)
return DNDarray(obj, tuple(gshape), dtype, split, device, comm, balanced)
def asarray(
obj: Iterable,
dtype: Optional[Type[datatype]] = None,
order: str = "C",
is_split: Optional[bool] = None,
device: Optional[Union[str, Device]] = None,
) -> DNDarray:
"""
Convert ``obj`` to a DNDarray. If ``obj`` is a `DNDarray` or `Tensor` with the same `dtype` and `device` or if the
data is an `ndarray` of the corresponding ``dtype`` and the ``device`` is the CPU, no copy will be performed.
Parameters
----------
obj : iterable
Input data, in any form that can be converted to an array. This includes e.g. lists, lists of tuples, tuples,
tuples of tuples, tuples of lists and ndarrays.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order: str, optional
Whether to use row-major (C-style) or column-major (Fortran-style) memory representation. Defaults to ‘C’.
is_split : None or int, optional
Specifies the axis along which the local data portions, passed in obj, are split across all machines. Useful for
interfacing with other HPC code. The shape of the global tensor is automatically inferred.
device : str, ht.Device or None, optional
Specifies the device the tensor shall be allocated on. By default, it is inferred from the input data.
Examples
--------
>>> a = [1,2]
>>> ht.asarray(a)
DNDarray([1, 2], dtype=ht.int64, device=cpu:0, split=None)
>>> a = np.array([1,2,3])
>>> n = ht.asarray(a)
>>> n
DNDarray([1, 2, 3], dtype=ht.int64, device=cpu:0, split=None)
>>> n[0] = 0
>>> a
DNDarray([0, 2, 3], dtype=ht.int64, device=cpu:0, split=None)
>>> a = torch.tensor([1,2,3])
>>> t = ht.asarray(a)
>>> t
DNDarray([1, 2, 3], dtype=ht.int64, device=cpu:0, split=None)
>>> t[0] = 0
>>> a
DNDarray([0, 2, 3], dtype=ht.int64, device=cpu:0, split=None)
>>> a = ht.array([1,2,3,4], dtype=ht.float32)
>>> ht.asarray(a, dtype=ht.float32) is a
True
>>> ht.asarray(a, dtype=ht.float64) is a
False
"""
return array(obj, dtype=dtype, copy=False, order=order, is_split=is_split, device=device)
def empty(
shape: Union[int, Sequence[int]],
dtype: Type[datatype] = types.float32,
split: Optional[int] = None,
device: Optional[Device] = None,
comm: Optional[Communication] = None,
order: str = "C",
) -> DNDarray:
"""
Returns a new uninitialized :class:`~heat.core.dndarray.DNDarray` of given shape and data type. May be allocated
split up across multiple nodes along the specified axis.
Parameters
----------
shape : int or Sequence[int,...]
Desired shape of the output array, e.g. 1 or (1, 2, 3,).
dtype : datatype
The desired HeAT data type for the array.
split: int, optional
The axis along which the array is split and distributed; ``None`` means no distribution.
device : str or Device, optional
Specifies the :class:`~heat.core.devices.Device`. the array shall be allocated on, defaults to globally set
default device.
comm: Communication, optional
Handle to the nodes holding distributed parts or copies of this array.
order: str, optional
Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``,
meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in
column-major order (Fortran-like).
Raises
------
NotImplementedError
If order is one of the NumPy options ``'K'`` or ``'A'``.
Examples
--------
>>> ht.empty(3)
DNDarray([0., 0., 0.], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.empty(3, dtype=ht.int)
DNDarray([59140784, 0, 59136816], dtype=ht.int32, device=cpu:0, split=None)
>>> ht.empty((2, 3,))
DNDarray([[-1.7206e-10, 4.5905e-41, -1.7206e-10],
[ 4.5905e-41, 4.4842e-44, 0.0000e+00]], dtype=ht.float32, device=cpu:0, split=None)
"""
# TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released.
return __factory(shape, dtype, split, torch.empty, device, comm, order)
def empty_like(
a: DNDarray,
dtype: Optional[Type[datatype]] = None,
split: Optional[int] = None,
device: Optional[Device] = None,
comm: Optional[Communication] = None,
order: str = "C",
) -> DNDarray:
"""
Returns a new uninitialized :class:`~heat.core.dndarray.DNDarray` with the same type, shape and data distribution
of given object. Data type and data distribution strategy can be explicitly overriden.
Parameters
----------
a : DNDarray
The shape and data-type of ``a`` define these same attributes of the returned array. Uninitialized array with
the same shape, type and split axis as ``a`` unless overriden.
dtype : datatype, optional
Overrides the data type of the result.
split: int or None, optional
The axis along which the array is split and distributed; ``None`` means no distribution.
device : str or Device, optional
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set
default device.
comm: Communication, optional
Handle to the nodes holding distributed parts or copies of this array.
order: str, optional
Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``,
meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in
column-major order (Fortran-like).
Raises
------
NotImplementedError
If order is one of the NumPy options ``'K'`` or ``'A'``.
Examples
--------
>>> x = ht.ones((2, 3,))
>>> x
DNDarray([[1., 1., 1.],
[1., 1., 1.]], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.empty_like(x)
DNDarray([[-1.7205e-10, 4.5905e-41, 7.9442e-37],
[ 0.0000e+00, 4.4842e-44, 0.0000e+00]], dtype=ht.float32, device=cpu:0, split=None)
"""
return __factory_like(a, dtype, split, empty, device, comm, order=order)
def eye(
shape: Union[int, Sequence[int]],
dtype: Type[datatype] = types.float32,
split: Optional[int] = None,
device: Optional[Device] = None,
comm: Optional[Communication] = None,
order: str = "C",
) -> DNDarray:
"""
Returns a new 2-D :class:`~heat.core.dndarray.DNDarray` with ones on the diagonal and zeroes elsewhere, i.e. an
identity matrix.
Parameters
----------
shape : int or Sequence[int,...]
The shape of the data-type. If only one number is provided, returning array will be square with that size. In
other cases, the first value represents the number rows, the second the number of columns.
dtype : datatype, optional
Overrides the data type of the result.
split : int or None, optional
The axis along which the array is split and distributed; ``None`` means no distribution.
device : str or Device, optional
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set
default device.
comm : Communication, optional
Handle to the nodes holding distributed parts or copies of this array.
order: str, optional
Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``,
meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in
column-major order (Fortran-like).
Raises
------
NotImplementedError
If order is one of the NumPy options ``'K'`` or ``'A'``.
Examples
--------
>>> ht.eye(2)
DNDarray([[1., 0.],
[0., 1.]], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.eye((2, 3), dtype=ht.int32)
DNDarray([[1, 0, 0],
[0, 1, 0]], dtype=ht.int32, device=cpu:0, split=None)
"""
# TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released.
# Determine the actual size of the resulting data
gshape = shape
if isinstance(gshape, int):
gshape = (gshape, gshape)
if len(gshape) == 1:
gshape = gshape * 2
split = sanitize_axis(gshape, split)
device = devices.sanitize_device(device)
comm = sanitize_comm(comm)
offset, lshape, _ = comm.chunk(gshape, split)
balanced = True
# start by creating tensor filled with zeroes
data = torch.zeros(
lshape, dtype=types.canonical_heat_type(dtype).torch_type(), device=device.torch_device
)
# insert ones at the correct positions
for i in range(min(lshape)):
pos_x = i if split == 0 else i + offset
pos_y = i if split == 1 else i + offset
if pos_x >= lshape[0] or pos_y >= lshape[1]:
break
data[pos_x][pos_y] = 1
data = sanitize_memory_layout(data, order=order)
return DNDarray(
data, gshape, types.canonical_heat_type(data.dtype), split, device, comm, balanced
)
def __factory(
shape: Union[int, Sequence[int]],
dtype: Type[datatype],
split: Optional[int],
local_factory: Callable,
device: Device,
comm: Communication,
order: str,
) -> DNDarray:
"""
Abstracted factory function for HeAT :class:`~heat.core.dndarray.DNDarray` initialization.
Parameters
----------
shape : int or Sequence[ints,...]
Desired shape of the output array, e.g. 1 or (1, 2, 3,).
dtype : datatype
The desired HeAT data type for the array, defaults to ht.float32.
split : int or None
The axis along which the array is split and distributed.
local_factory : callable
Function that creates the local PyTorch tensor for the DNDarray.
device : Device
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set
default device.
comm : Communication
Handle to the nodes holding distributed parts or copies of this array.
order: str, optional
Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``,
meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in
column-major order (Fortran-like).
Raises
------
NotImplementedError
If order is one of the NumPy options ``'K'`` or ``'A'``.
"""
# clean the user input
shape = sanitize_shape(shape)
dtype = types.canonical_heat_type(dtype)
split = sanitize_axis(shape, split)
device = devices.sanitize_device(device)
comm = sanitize_comm(comm)
# chunk the shape if necessary
_, local_shape, _ = comm.chunk(shape, split)
# create the torch data using the factory function
data = local_factory(local_shape, dtype=dtype.torch_type(), device=device.torch_device)
data = sanitize_memory_layout(data, order=order)
return DNDarray(data, shape, dtype, split, device, comm, balanced=True)
def __factory_like(
a: DNDarray,
dtype: Type[datatype],
split: Optional[int],
factory: Callable,
device: Device,
comm: Communication,
order: str = "C",
**kwargs
) -> DNDarray:
"""
Abstracted '...-like' factory function for HeAT :class:`~heat.core.dndarray.DNDarray` initialization
Parameters
----------
a : DNDarray
The shape and data-type of ``a`` define these same attributes of the returned array.
dtype : datatype
The desired HeAT data type for the array.
split: int or None, optional
The axis along which the array is split and distributed, defaults to no distribution).
factory : function
Function that creates a DNDarray.
device : str
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set
default device.
comm: Communication
Handle to the nodes holding distributed parts or copies of this array.
order: str, optional
Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``,
meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in
column-major order (Fortran-like).
Raises
------
NotImplementedError
If order is one of the NumPy options ``'K'`` or ``'A'``.
"""
# TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released.
# determine the global shape of the object to create
# attempt in this order: shape property, length of object or default shape (1,)
try:
shape = a.shape
except AttributeError:
try:
shape = (len(a),)
except TypeError:
shape = (1,)
# infer the data type, otherwise default to float32
if dtype is None:
try:
dtype = types.heat_type_of(a)
except TypeError:
dtype = types.float32
# infer split axis
if split is None:
try:
split = a.split if not isinstance(a, str) else None
except AttributeError:
# do not split at all
pass
# use the default communicator, if not set
comm = sanitize_comm(comm)
return factory(shape, dtype=dtype, split=split, device=device, comm=comm, order=order, **kwargs)
def full(
shape: Union[int, Sequence[int]],
fill_value: Union[int, float],
dtype: Type[datatype] = types.float32,
split: Optional[int] = None,
device: Optional[Device] = None,
comm: Optional[Communication] = None,
order: str = "C",
) -> DNDarray:
"""
Return a new :class:`~heat.core.dndarray.DNDarray` of given shape and type, filled with ``fill_value``.
Parameters
----------
shape : int or Sequence[int,...]
Shape of the new array, e.g., (2, 3) or 2.
fill_value : scalar
Fill value.
dtype : datatype, optional
The desired data-type for the array
split: int or None, optional
The axis along which the array is split and distributed; ``None`` means no distribution.
device : str or Device, optional
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set
default device.
comm: Communication, optional
Handle to the nodes holding distributed parts or copies of this array.
order: str, optional
Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``,
meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in
column-major order (Fortran-like).
Raises
------
NotImplementedError
If order is one of the NumPy options ``'K'`` or ``'A'``.
Examples
--------
>>> ht.full((2, 2), ht.inf)
DNDarray([[inf, inf],
[inf, inf]], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.full((2, 2), 10)
DNDarray([[10., 10.],
[10., 10.]], dtype=ht.float32, device=cpu:0, split=None)
"""
def local_factory(*args, **kwargs):
return torch.full(*args, fill_value=fill_value, **kwargs)
# Will be redundant with PyTorch 1.7
if isinstance(fill_value, complex):
dtype = types.complex64
return __factory(shape, dtype, split, local_factory, device, comm, order=order)
def full_like(
a: DNDarray,
fill_value: Union[int, float],
dtype: Type[datatype] = types.float32,
split: Optional[int] = None,
device: Optional[Device] = None,
comm: Optional[Communication] = None,
order: str = "C",
) -> DNDarray:
"""
Return a full :class:`~heat.core.dndarray.DNDarray` with the same shape and type as a given array.
Parameters
----------
a : DNDarray
The shape and data-type of ``a`` define these same attributes of the returned array.
fill_value : scalar
Fill value.
dtype : datatype, optional
Overrides the data type of the result.
split: int or None, optional
The axis along which the array is split and distributed; ``None`` means no distribution.
device : str or Device, optional
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set
default device.
comm: Communication, optional
Handle to the nodes holding distributed parts or copies of this array.
order: str, optional
Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``,
meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in
column-major order (Fortran-like).
Raises
------
NotImplementedError
If order is one of the NumPy options ``'K'`` or ``'A'``.
Examples
--------
>>> x = ht.zeros((2, 3,))
>>> x
DNDarray([[0., 0., 0.],
[0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.full_like(x, 1.0)
DNDarray([[1., 1., 1.],
[1., 1., 1.]], dtype=ht.float32, device=cpu:0, split=None)
"""
return __factory_like(a, dtype, split, full, device, comm, fill_value=fill_value, order=order)
def linspace(
start: Union[int, float],
stop: Union[int, float],
num: int = 50,
endpoint: bool = True,
retstep: bool = False,
dtype: Optional[Type[datatype]] = None,
split: Optional[int] = None,
device: Optional[Device] = None,
comm: Optional[Communication] = None,
) -> Tuple[DNDarray, float]:
"""
Returns num evenly spaced samples, calculated over the interval ``[start, stop]``. The endpoint of the interval can
optionally be excluded. There are num equally spaced samples in the closed interval ``[start, stop]`` or the
half-open interval ``[start, stop)`` (depending on whether endpoint is ``True`` or ``False``).
Parameters
----------
start: scalar or scalar-convertible
The starting value of the sample interval, maybe a sequence if convertible to scalar
stop: scalar or scalar-convertible
The end value of the sample interval, unless is set to False. In that case, the sequence consists of all but the
last of ``num+1`` evenly spaced samples, so that stop is excluded. Note that the step size changes when endpoint
is ``False``.
num: int, optional
Number of samples to generate, defaults to 50. Must be non-negative.
endpoint: bool, optional
If ``True``, stop is the last sample, otherwise, it is not included.
retstep: bool, optional
If ``True``, return (samples, step), where step is the spacing between samples.
dtype: dtype, optional
The type of the output array.
split: int or None, optional
The axis along which the array is split and distributed; ``None`` means no distribution.
device : str or Device, optional
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set
default device.
comm : Communication, optional
Handle to the nodes holding distributed parts or copies of this array.
Examples
--------
>>> ht.linspace(2.0, 3.0, num=5)
DNDarray([2.0000, 2.2500, 2.5000, 2.7500, 3.0000], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.linspace(2.0, 3.0, num=5, endpoint=False)
DNDarray([2.0000, 2.2000, 2.4000, 2.6000, 2.8000], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.linspace(2.0, 3.0, num=5, retstep=True)
(DNDarray([2.0000, 2.2500, 2.5000, 2.7500, 3.0000], dtype=ht.float32, device=cpu:0, split=None), 0.25)
"""
# sanitize input parameters
start = float(start)
stop = float(stop)
num = int(num)
if num <= 0:
raise ValueError(
"number of samples 'num' must be non-negative integer, but was {}".format(num)
)
step = (stop - start) / max(1, num - 1 if endpoint else num)
# sanitize device and comm
device = devices.sanitize_device(device)
comm = sanitize_comm(comm)
# infer local and global shapes
gshape = (num,)
split = sanitize_axis(gshape, split)
offset, lshape, _ = comm.chunk(gshape, split)
balanced = True
# compose the local tensor
start += offset * step
stop = start + lshape[0] * step - step
data = torch.linspace(start, stop, lshape[0], device=device.torch_device)
if dtype is not None:
data = data.type(types.canonical_heat_type(dtype).torch_type())
# construct the resulting global tensor
ht_tensor = DNDarray(
data, gshape, types.canonical_heat_type(data.dtype), split, device, comm, balanced
)
if retstep:
return ht_tensor, step
return ht_tensor
def logspace(
start: Union[int, float],
stop: Union[int, float],
num: int = 50,
endpoint: bool = True,
base: float = 10.0,
dtype: Optional[Type[datatype]] = None,
split: Optional[int] = None,
device: Optional[Device] = None,
comm: Optional[Communication] = None,
) -> DNDarray:
"""
Return numbers spaced evenly on a log scale. In linear space, the sequence starts at ``base**start`` (``base`` to
the power of ``start``) and ends with ``base**stop`` (see ``endpoint`` below).
Parameters
----------
start : scalar or scalar-convertible
``base**start`` is the starting value of the sequence.
stop : scalar or scalar-convertible
``base**stop`` is the final value of the sequence, unless `endpoint` is ``False``. In that case, ``num+1``
values are spaced over the interval in log-space, of which all but the last (a sequence of length ``num``) are
returned.
num : int, optional
Number of samples to generate.
endpoint : bool, optional
If ``True``, `stop` is the last sample. Otherwise, it is not included.
base : float, optional
The base of the log space. The step size between the elements in :math:`ln(samples) / ln(base)` (or
:math:`base(samples)`) is uniform.
dtype : datatype, optional
The type of the output array. If ``dtype`` is not given, infer the data type from the other input arguments.
split: int or None, optional
The axis along which the array is split and distributed; ``None`` means no distribution.
device : str or Device, optional
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set
default device.
comm: Communication, optional
Handle to the nodes holding distributed parts or copies of this array.
See Also
--------
:func:`arange` : Similar to :func:`linspace`, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the endpoint may or may not be included.
:func:`linspace` : Similar to ``logspace``, but with the samples uniformly distributed in linear space, instead of
log space.
Examples
--------
>>> ht.logspace(2.0, 3.0, num=4)
DNDarray([ 100.0000, 215.4434, 464.1590, 1000.0000], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.logspace(2.0, 3.0, num=4, endpoint=False)
DNDarray([100.0000, 177.8279, 316.2278, 562.3413], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.logspace(2.0, 3.0, num=4, base=2.0)
DNDarray([4.0000, 5.0397, 6.3496, 8.0000], dtype=ht.float32, device=cpu:0, split=None)
"""
y = linspace(start, stop, num=num, endpoint=endpoint, split=split, device=device, comm=comm)
if dtype is None:
return pow(base, y)
return pow(base, y).astype(dtype, copy=False)
def meshgrid(*arrays: Sequence[DNDarray], indexing: str = "xy") -> List[DNDarray]:
"""
Returns coordinate matrices from coordinate vectors.
Parameters
----------
arrays : Sequence[ DNDarray ]
one-dimensional arrays representing grid coordinates. If exactly one vector is distributed, the returned matrices will
be distributed along the axis equal to the index of this vector in the input list.
indexing : str, optional
Cartesian ‘xy’ or matrix ‘ij’ indexing of output. It is ignored if zero or one one-dimensional arrays are provided. Default: 'xy' .
Raises
------
ValueError
If `indexing` is not 'xy' or 'ij'.
ValueError
If more than one input vector is distributed.
Examples
--------
>>> x = ht.arange(4)
>>> y = ht.arange(3)
>>> xx, yy = ht.meshgrid(x,y)
>>> xx
DNDarray([[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3]], dtype=ht.int32, device=cpu:0, split=None)
>>> yy
DNDarray([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]], dtype=ht.int32, device=cpu:0, split=None)
"""
splitted = None
if indexing not in ["xy", "ij"]:
raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.")
if len(arrays) == 0:
return []
arrays = sanitize_sequence(arrays)
for idx, array in enumerate(arrays):
sanitize_in(array)
if array.split is not None:
if splitted is not None:
raise ValueError("split != None are not supported.")
splitted = idx
# pytorch does not support the indexing keyword: switch vectors
if indexing == "xy" and len(arrays) > 1:
arrays[0], arrays[1] = arrays[1], arrays[0]
if splitted == 0:
arrays[0] = arrays[0].resplit(0)
arrays[1] = arrays[1].resplit(None)
elif splitted == 1:
arrays[0] = arrays[0].resplit(None)
arrays[1] = arrays[1].resplit(0)
grids = torch.meshgrid(*(array.larray for array in arrays))
# pytorch does not support indexing keyword: switch back
if indexing == "xy" and len(arrays) > 1:
grids = list(grids)
grids[0], grids[1] = grids[1], grids[0]
shape = tuple(array.size for array in arrays)
return list(
DNDarray(
array=grid,
gshape=shape,
dtype=types.heat_type_of(grid),
split=splitted,
device=devices.sanitize_device(grid.device.type),
comm=sanitize_comm(None),
balanced=True,
)
for grid in grids
)
def ones(
shape: Union[int, Sequence[int]],
dtype: Type[datatype] = types.float32,
split: Optional[int] = None,
device: Optional[Device] = None,
comm: Optional[Communication] = None,
order: str = "C",
) -> DNDarray:
"""
Returns a new :class:`~heat.core.dndarray.DNDarray` of given shape and data type filled with one. May be allocated
split up across multiple nodes along the specified axis.
Parameters
----------
shape : int or Sequence[int,...]
Desired shape of the output array, e.g. 1 or (1, 2, 3,).
dtype : datatype, optional
The desired HeAT data type for the array.
split : int or None, optional
The axis along which the array is split and distributed; ``None`` means no distribution.
device : str or Device, optional
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set
default device.
comm : Communication, optional
Handle to the nodes holding distributed parts or copies of this array.
order: str, optional
Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``,
meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in
column-major order (Fortran-like).
Raises
------
NotImplementedError
If order is one of the NumPy options ``'K'`` or ``'A'``.
Examples
--------
>>> ht.ones(3)
DNDarray([1., 1., 1.], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.ones(3, dtype=ht.int)
DNDarray([1, 1, 1], dtype=ht.int32, device=cpu:0, split=None)
>>> ht.ones((2, 3,))
DNDarray([[1., 1., 1.],
[1., 1., 1.]], dtype=ht.float32, device=cpu:0, split=None)
"""
# TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released.
return __factory(shape, dtype, split, torch.ones, device, comm, order)
def ones_like(
a: DNDarray,
dtype: Optional[Type[datatype]] = None,
split: Optional[int] = None,
device: Optional[Device] = None,
comm: Optional[Communication] = None,
order: str = "C",
) -> DNDarray:
"""
Returns a new :class:`~heat.core.dndarray.DNDarray` filled with ones with the same type,
shape and data distribution of given object. Data type and data distribution strategy can be explicitly overriden.
Parameters
----------
a : DNDarray
The shape and data-type of ``a`` define these same attributes of the returned array.
dtype : datatype, optional
Overrides the data type of the result.
split: int or None, optional
The axis along which the array is split and distributed; ``None`` means no distribution.
device : str or Device, optional
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set
default device.
comm: Communication, optional
Handle to the nodes holding distributed parts or copies of this array.
order: str, optional
Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``,
meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in
column-major order (Fortran-like).
Raises
------
NotImplementedError
If order is one of the NumPy options ``'K'`` or ``'A'``.
Examples
--------
>>> x = ht.zeros((2, 3,))
>>> x
DNDarray([[0., 0., 0.],
[0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.ones_like(x)
DNDarray([[1., 1., 1.],
[1., 1., 1.]], dtype=ht.float32, device=cpu:0, split=None)
"""
return __factory_like(a, dtype, split, ones, device, comm, order=order)
def zeros(
shape: Union[int, Sequence[int]],
dtype: Type[datatype] = types.float32,
split: Optional[int] = None,
device: Optional[Device] = None,
comm: Optional[Communication] = None,
order: str = "C",
) -> DNDarray:
"""
Returns a new :class:`~heat.core.dndarray.DNDarray` of given shape and data type filled with zero values.
May be allocated split up across multiple nodes along the specified axis.
Parameters
----------
shape : int or Sequence[int,...]
Desired shape of the output array, e.g. 1 or (1, 2, 3,).
dtype : datatype
The desired HeAT data type for the array.
split: int or None, optional
The axis along which the array is split and distributed; ``None`` means no distribution.
device : str or Device, optional
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set
default device.
comm: Communication, optional
Handle to the nodes holding distributed parts or copies of this array.
order: str, optional
Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``,
meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in
column-major order (Fortran-like).
Raises
------
NotImplementedError
If order is one of the NumPy options ``'K'`` or ``'A'``.
Examples
--------
>>> ht.zeros(3)
DNDarray([0., 0., 0.], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.zeros(3, dtype=ht.int)
DNDarray([0, 0, 0], dtype=ht.int32, device=cpu:0, split=None)
>>> ht.zeros((2, 3,))
DNDarray([[0., 0., 0.],
[0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None)
"""
# TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released.
return __factory(shape, dtype, split, torch.zeros, device, comm, order=order)
def zeros_like(
a: DNDarray,
dtype: Optional[Type[datatype]] = None,
split: Optional[int] = None,
device: Optional[Device] = None,
comm: Optional[Communication] = None,
order: str = "C",
) -> DNDarray:
"""
Returns a new :class:`~heat.core.dndarray.DNDarray` filled with zeros with the same type, shape and data
distribution of given object. Data type and data distribution strategy can be explicitly overriden.
Parameters
----------
a : DNDarray
The shape and data-type of ``a`` define these same attributes of the returned array.
dtype : datatype, optional
Overrides the data type of the result.
split: int or None, optional
The axis along which the array is split and distributed; ``None`` means no distribution.
device : str or Device, optional
Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set
default device.
comm: Communication, optional
Handle to the nodes holding distributed parts or copies of this array.
order: str, optional
Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``,
meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in
column-major order (Fortran-like).
Raises
------
NotImplementedError
If order is one of the NumPy options ``'K'`` or ``'A'``.
Examples
--------
>>> x = ht.ones((2, 3,))
>>> x
DNDarray([[1., 1., 1.],
[1., 1., 1.]], dtype=ht.float32, device=cpu:0, split=None)
>>> ht.zeros_like(x)
DNDarray([[0., 0., 0.],
[0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None)
"""
# TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released.
return __factory_like(a, dtype, split, zeros, device, comm, order=order)
| [
"numpy.ceil",
"torch.full",
"numpy.iinfo",
"numpy.array",
"torch.arange",
"torch.meshgrid",
"numpy.empty",
"torch.linspace"
] | [((4971, 5030), 'torch.arange', 'torch.arange', (['start', 'stop', 'step'], {'device': 'device.torch_device'}), '(start, stop, step, device=device.torch_device)\n', (4983, 5030), False, 'import torch\n'), ((36374, 36440), 'torch.linspace', 'torch.linspace', (['start', 'stop', 'lshape[0]'], {'device': 'device.torch_device'}), '(start, stop, lshape[0], device=device.torch_device)\n', (36388, 36440), False, 'import torch\n'), ((41686, 41737), 'torch.meshgrid', 'torch.meshgrid', (['*(array.larray for array in arrays)'], {}), '(*(array.larray for array in arrays))\n', (41700, 41737), False, 'import torch\n'), ((31125, 31175), 'torch.full', 'torch.full', (['*args'], {'fill_value': 'fill_value'}), '(*args, fill_value=fill_value, **kwargs)\n', (31135, 31175), False, 'import torch\n'), ((3929, 3945), 'numpy.ceil', 'np.ceil', (['args[0]'], {}), '(args[0])\n', (3936, 3945), True, 'import numpy as np\n'), ((13404, 13420), 'numpy.array', 'np.array', (['gshape'], {}), '(gshape)\n', (13412, 13420), True, 'import numpy as np\n'), ((13438, 13454), 'numpy.array', 'np.array', (['lshape'], {}), '(lshape)\n', (13446, 13454), True, 'import numpy as np\n'), ((14654, 14680), 'numpy.array', 'np.array', (['gshape[is_split]'], {}), '(gshape[is_split])\n', (14662, 14680), True, 'import numpy as np\n'), ((14899, 14918), 'numpy.array', 'np.array', (['obj.shape'], {}), '(obj.shape)\n', (14907, 14918), True, 'import numpy as np\n'), ((4182, 4203), 'numpy.ceil', 'np.ceil', (['(stop - start)'], {}), '(stop - start)\n', (4189, 4203), True, 'import numpy as np\n'), ((4410, 4440), 'numpy.ceil', 'np.ceil', (['((stop - start) / step)'], {}), '((stop - start) / step)\n', (4417, 4440), True, 'import numpy as np\n'), ((14024, 14040), 'numpy.empty', 'np.empty', (['length'], {}), '(length)\n', (14032, 14040), True, 'import numpy as np\n'), ((14140, 14162), 'numpy.iinfo', 'np.iinfo', (['gshape.dtype'], {}), '(gshape.dtype)\n', (14148, 14162), True, 'import numpy as np\n'), ((14543, 14565), 'numpy.iinfo', 'np.iinfo', (['gshape.dtype'], {}), '(gshape.dtype)\n', (14551, 14565), True, 'import numpy as np\n')] |
# Find angles in degrees of skeleton segments
import os
import cv2
import numpy as np
import pandas as pd
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plantcv.plantcv import color_palette
from plantcv.plantcv._debug import _debug
def segment_angle(segmented_img, objects, label="default"):
""" Calculate angle of segments (in degrees) by fitting a linear regression line to segments.
Inputs:
segmented_img = Segmented image to plot slope lines and angles on
objects = List of contours
label = optional label parameter, modifies the variable name of observations recorded
Returns:
labeled_img = Segmented debugging image with angles labeled
:param segmented_img: numpy.ndarray
:param objects: list
:param label: str
:return labeled_img: numpy.ndarray
"""
label_coord_x = []
label_coord_y = []
segment_angles = []
labeled_img = segmented_img.copy()
# Use a previously saved color scale if available
rand_color = color_palette(num=len(objects), saved=True)
for i, cnt in enumerate(objects):
# Find bounds for regression lines to get drawn
rect = cv2.minAreaRect(cnt)
pts = cv2.boxPoints(rect)
df = pd.DataFrame(pts, columns=('x', 'y'))
x_max = int(df['x'].max())
x_min = int(df['x'].min())
# Find line fit to each segment
[vx, vy, x, y] = cv2.fitLine(objects[i], cv2.DIST_L2, 0, 0.01, 0.01)
slope = -vy / vx
left_list = int(((x - x_min) * slope) + y)
right_list = int(((x - x_max) * slope) + y)
if slope > 1000000 or slope < -1000000:
print("Slope of contour with ID#", i, "is", slope, "and cannot be plotted.")
else:
# Draw slope lines
cv2.line(labeled_img, (x_max - 1, right_list), (x_min, left_list), rand_color[i], 1)
# Store coordinates for labels
label_coord_x.append(objects[i][0][0][0])
label_coord_y.append(objects[i][0][0][1])
# Calculate degrees from slopes
segment_angles.append(np.arctan(slope[0]) * 180 / np.pi)
segment_ids = []
for i, cnt in enumerate(objects):
# Label slope lines
w = label_coord_x[i]
h = label_coord_y[i]
text = "{:.2f}".format(segment_angles[i])
cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
# segment_label = "ID" + str(i)
segment_ids.append(i)
outputs.add_observation(sample=label, variable='segment_angle', trait='segment angle',
method='plantcv.plantcv.morphology.segment_angle', scale='degrees', datatype=list,
value=segment_angles, label=segment_ids)
_debug(visual=labeled_img, filename=os.path.join(params.debug_outdir, f"{params.device}_segmented_angles.png"))
return labeled_img
| [
"cv2.boxPoints",
"cv2.line",
"os.path.join",
"plantcv.plantcv.outputs.add_observation",
"cv2.putText",
"cv2.minAreaRect",
"pandas.DataFrame",
"cv2.fitLine",
"numpy.arctan"
] | [((2615, 2834), 'plantcv.plantcv.outputs.add_observation', 'outputs.add_observation', ([], {'sample': 'label', 'variable': '"""segment_angle"""', 'trait': '"""segment angle"""', 'method': '"""plantcv.plantcv.morphology.segment_angle"""', 'scale': '"""degrees"""', 'datatype': 'list', 'value': 'segment_angles', 'label': 'segment_ids'}), "(sample=label, variable='segment_angle', trait=\n 'segment angle', method='plantcv.plantcv.morphology.segment_angle',\n scale='degrees', datatype=list, value=segment_angles, label=segment_ids)\n", (2638, 2834), False, 'from plantcv.plantcv import outputs\n'), ((1197, 1217), 'cv2.minAreaRect', 'cv2.minAreaRect', (['cnt'], {}), '(cnt)\n', (1212, 1217), False, 'import cv2\n'), ((1232, 1251), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (1245, 1251), False, 'import cv2\n'), ((1265, 1302), 'pandas.DataFrame', 'pd.DataFrame', (['pts'], {'columns': "('x', 'y')"}), "(pts, columns=('x', 'y'))\n", (1277, 1302), True, 'import pandas as pd\n'), ((1439, 1490), 'cv2.fitLine', 'cv2.fitLine', (['objects[i]', 'cv2.DIST_L2', '(0)', '(0.01)', '(0.01)'], {}), '(objects[i], cv2.DIST_L2, 0, 0.01, 0.01)\n', (1450, 1490), False, 'import cv2\n'), ((2349, 2528), 'cv2.putText', 'cv2.putText', ([], {'img': 'labeled_img', 'text': 'text', 'org': '(w, h)', 'fontFace': 'cv2.FONT_HERSHEY_SIMPLEX', 'fontScale': 'params.text_size', 'color': '(150, 150, 150)', 'thickness': 'params.text_thickness'}), '(img=labeled_img, text=text, org=(w, h), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=params.text_size, color=(150, 150, 150),\n thickness=params.text_thickness)\n', (2360, 2528), False, 'import cv2\n'), ((1814, 1902), 'cv2.line', 'cv2.line', (['labeled_img', '(x_max - 1, right_list)', '(x_min, left_list)', 'rand_color[i]', '(1)'], {}), '(labeled_img, (x_max - 1, right_list), (x_min, left_list),\n rand_color[i], 1)\n', (1822, 1902), False, 'import cv2\n'), ((2923, 2997), 'os.path.join', 'os.path.join', (['params.debug_outdir', 'f"""{params.device}_segmented_angles.png"""'], {}), "(params.debug_outdir, f'{params.device}_segmented_angles.png')\n", (2935, 2997), False, 'import os\n'), ((2110, 2129), 'numpy.arctan', 'np.arctan', (['slope[0]'], {}), '(slope[0])\n', (2119, 2129), True, 'import numpy as np\n')] |
import numpy as np
import time
from sklearn.utils import check_random_state
from scipy.special import expit
def sigmoid(x):
return expit(np.clip(x, -30, 30))
class RestrictedBoltzmannMachine:
def __init__(self, n_hidden_variables, learning_rate=0.1, batch_size=20,
n_epochs=15, mu=0.5, pcd_steps=1, random_state=None, verbose=0):
self.n_hidden = n_hidden_variables
self.random_state = random_state
self.n_epochs = n_epochs
self.batch_size = batch_size
self.learning_rate = learning_rate
self.mu = mu
self.pcd_steps = pcd_steps
self.verbose = verbose
def fit(self, X):
self.random_state_ = check_random_state(self.random_state)
X = np.asarray(X, dtype=np.bool)
self.n_visible_ = X.shape[1]
self.init_parameters()
self.stochastic_gradient_descent(X)
return self
def init_parameters(self):
sdev = 1.0 / np.sqrt(self.n_visible_)
dim = (self.n_hidden, self.n_visible_)
self.W_ = self.random_state_.normal(0, sdev, size=dim)
self.b_ = np.zeros(self.n_visible_)
self.c_ = np.zeros(self.n_hidden)
self.VW_ = np.zeros(self.W_.shape)
self.Vb_ = np.zeros(self.b_.shape)
self.Vc_ = np.zeros(self.c_.shape)
self.V = None
def stochastic_gradient_descent(self, X):
bs = self.batch_size
now = time.time()
for epoch in range(self.n_epochs):
s = self.random_state_.permutation(X.shape[0])
X_s = X[s]
for i in range(0, X_s.shape[0], bs):
self.gradient_descent_step(X_s[i: i + bs])
if self.verbose > 2:
print('Epoch {0} ({1:.1f}%).'.format(epoch + 1,
100*float(i)/(X_s.shape[0] - 1)))
if self.verbose > 0:
now, last = time.time(), now
print('Epoch {0} ({1:.01f}s).'.format(epoch + 1, now - last))
if self.verbose > 3:
print('Average reconstruction error: {0:.3f}.'.\
format(self.reconstruction_error(X[0: 10*bs])))
def gradient_descent_step(self, X):
if self.V is None:
self.V = np.array(X, dtype=np.bool)
for i, vi in enumerate(self.V):
self.V[i] = self.sample(vi, 1, thinning=self.pcd_steps - 1)[0]
neg_W = np.zeros(self.W_.shape)
neg_b = np.zeros(self.b_.shape)
neg_c = np.zeros(self.c_.shape)
"""Note: we skip the division by self.V.shape[0] = self.X.shape[0],
since it would be cancelled by a multiplication by self.X.shape[0]
before updating the partial derivatives. """
for vi in self.V:
a = sigmoid(self.W_.dot(vi) + self.c_)
neg_W += a.reshape(-1, 1)*vi.reshape(1, -1)
neg_b += vi
neg_c += a
partial_W = np.zeros(self.W_.shape)
partial_b = np.zeros(self.b_.shape)
partial_c = np.zeros(self.c_.shape)
for xi in X:
a = sigmoid(self.W_.dot(xi) + self.c_)
partial_W += a.reshape(-1, 1)*xi.reshape(1, -1)
partial_b += xi
partial_c += a
partial_W -= neg_W
partial_b -= neg_b
partial_c -= neg_c
self.VW_ = self.mu*self.VW_ + self.learning_rate*partial_W
self.Vb_ = self.mu*self.Vb_ + self.learning_rate*partial_b
self.Vc_ = self.mu*self.Vc_ + self.learning_rate*partial_c
self.W_ += self.VW_
self.b_ += self.Vb_
self.c_ += self.Vc_
def sample(self, x, sample_size, thinning=0):
v = np.array(x)
samples = []
for _ in range(sample_size):
for _ in range(thinning + 1):
a = sigmoid(self.W_.dot(v) + self.c_)
thresholds = np.random.random(self.n_hidden)
h = (a > thresholds)
a = sigmoid(self.W_.T.dot(h) + self.b_)
thresholds = np.random.random(self.n_visible_)
v = (a > thresholds)
samples.append(v)
return np.array(samples, dtype=np.bool)
def reconstruction_error(self, X):
X = np.asarray(X, dtype=np.bool)
e = 0.0
for xi in X:
vi = self.sample(xi, 1)[0]
e += (xi != vi).sum()
e /= X.size
return e | [
"numpy.clip",
"sklearn.utils.check_random_state",
"numpy.sqrt",
"numpy.random.random",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"time.time"
] | [((143, 162), 'numpy.clip', 'np.clip', (['x', '(-30)', '(30)'], {}), '(x, -30, 30)\n', (150, 162), True, 'import numpy as np\n'), ((700, 737), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (718, 737), False, 'from sklearn.utils import check_random_state\n'), ((750, 778), 'numpy.asarray', 'np.asarray', (['X'], {'dtype': 'np.bool'}), '(X, dtype=np.bool)\n', (760, 778), True, 'import numpy as np\n'), ((1153, 1178), 'numpy.zeros', 'np.zeros', (['self.n_visible_'], {}), '(self.n_visible_)\n', (1161, 1178), True, 'import numpy as np\n'), ((1197, 1220), 'numpy.zeros', 'np.zeros', (['self.n_hidden'], {}), '(self.n_hidden)\n', (1205, 1220), True, 'import numpy as np\n'), ((1249, 1272), 'numpy.zeros', 'np.zeros', (['self.W_.shape'], {}), '(self.W_.shape)\n', (1257, 1272), True, 'import numpy as np\n'), ((1292, 1315), 'numpy.zeros', 'np.zeros', (['self.b_.shape'], {}), '(self.b_.shape)\n', (1300, 1315), True, 'import numpy as np\n'), ((1335, 1358), 'numpy.zeros', 'np.zeros', (['self.c_.shape'], {}), '(self.c_.shape)\n', (1343, 1358), True, 'import numpy as np\n'), ((1489, 1500), 'time.time', 'time.time', ([], {}), '()\n', (1498, 1500), False, 'import time\n'), ((2546, 2569), 'numpy.zeros', 'np.zeros', (['self.W_.shape'], {}), '(self.W_.shape)\n', (2554, 2569), True, 'import numpy as np\n'), ((2586, 2609), 'numpy.zeros', 'np.zeros', (['self.b_.shape'], {}), '(self.b_.shape)\n', (2594, 2609), True, 'import numpy as np\n'), ((2626, 2649), 'numpy.zeros', 'np.zeros', (['self.c_.shape'], {}), '(self.c_.shape)\n', (2634, 2649), True, 'import numpy as np\n'), ((3084, 3107), 'numpy.zeros', 'np.zeros', (['self.W_.shape'], {}), '(self.W_.shape)\n', (3092, 3107), True, 'import numpy as np\n'), ((3128, 3151), 'numpy.zeros', 'np.zeros', (['self.b_.shape'], {}), '(self.b_.shape)\n', (3136, 3151), True, 'import numpy as np\n'), ((3172, 3195), 'numpy.zeros', 'np.zeros', (['self.c_.shape'], {}), '(self.c_.shape)\n', (3180, 3195), True, 'import numpy as np\n'), ((3868, 3879), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3876, 3879), True, 'import numpy as np\n'), ((4377, 4409), 'numpy.array', 'np.array', (['samples'], {'dtype': 'np.bool'}), '(samples, dtype=np.bool)\n', (4385, 4409), True, 'import numpy as np\n'), ((4470, 4498), 'numpy.asarray', 'np.asarray', (['X'], {'dtype': 'np.bool'}), '(X, dtype=np.bool)\n', (4480, 4498), True, 'import numpy as np\n'), ((999, 1023), 'numpy.sqrt', 'np.sqrt', (['self.n_visible_'], {}), '(self.n_visible_)\n', (1006, 1023), True, 'import numpy as np\n'), ((2358, 2384), 'numpy.array', 'np.array', (['X'], {'dtype': 'np.bool'}), '(X, dtype=np.bool)\n', (2366, 2384), True, 'import numpy as np\n'), ((4072, 4103), 'numpy.random.random', 'np.random.random', (['self.n_hidden'], {}), '(self.n_hidden)\n', (4088, 4103), True, 'import numpy as np\n'), ((4243, 4276), 'numpy.random.random', 'np.random.random', (['self.n_visible_'], {}), '(self.n_visible_)\n', (4259, 4276), True, 'import numpy as np\n'), ((1980, 1991), 'time.time', 'time.time', ([], {}), '()\n', (1989, 1991), False, 'import time\n')] |
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import booz_xform as bx
wout_filename = 'test_files/wout_li383_1.4m.nc'
b = bx.Booz_xform()
b.read_wout(wout_filename)
b.compute_surfs = [47]
b.run()
bx.surfplot(b)
plt.tight_layout()
plt.figure()
bx.surfplot(b, fill=False, cmap=plt.cm.jet, levels=np.arange(1.3, 2.0, 0.05))
plt.tight_layout()
plt.show()
| [
"booz_xform.surfplot",
"booz_xform.Booz_xform",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((152, 167), 'booz_xform.Booz_xform', 'bx.Booz_xform', ([], {}), '()\n', (165, 167), True, 'import booz_xform as bx\n'), ((226, 240), 'booz_xform.surfplot', 'bx.surfplot', (['b'], {}), '(b)\n', (237, 240), True, 'import booz_xform as bx\n'), ((241, 259), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (257, 259), True, 'import matplotlib.pyplot as plt\n'), ((261, 273), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (271, 273), True, 'import matplotlib.pyplot as plt\n'), ((352, 370), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (368, 370), True, 'import matplotlib.pyplot as plt\n'), ((372, 382), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (380, 382), True, 'import matplotlib.pyplot as plt\n'), ((325, 350), 'numpy.arange', 'np.arange', (['(1.3)', '(2.0)', '(0.05)'], {}), '(1.3, 2.0, 0.05)\n', (334, 350), True, 'import numpy as np\n')] |
import numpy as np
from flask import request
from chatbot.common.Debug import flush
from chatbot.common.Talk import parse, get_ml_vars, get_ml_model, create_input
from chatbot.api.helpers.responses.Talk import TalkResponse
from chatbot.models.TalkLog import TalkLogModel
from chatbot.api.domain.repositories.TalkLogReposiroty import ITalkLogRepository
class TalkService:
def __init__(self, talk_log_repository: ITalkLogRepository):
self.talk_log_repository = talk_log_repository
def think(
self,
bot_id: int,
query: str,
top_count: int = 5,
threshold: float = 0.5):
# 変数の読み込み
vars = get_ml_vars(bot_id)
model = get_ml_model(bot_id)
info, word_set = parse(query)
input = create_input(word_to_id=vars['word_to_id'], word_set=word_set)
input = np.array([input])
result = model.predict(input)[0]
# index = int(np.argmax(result)) # topのみ取得する場合
rank = np.argsort(-result)
top_faq_ids = []
top_faq_info_list = []
for i in range(top_count):
target_index = rank[i]
faq_info = vars['faq_info_list'][target_index]
faq_info['score'] = float(result[target_index])
top_faq_ids.append(faq_info['faq_id'])
top_faq_info_list.append(faq_info)
# self.dump(
# vars,
# info,
# word_set,
# input,
# result,
# rank,
# top_faq_info_list[0])
if top_faq_info_list[0]['score'] > threshold:
return top_faq_info_list[0]['faq_id'], None, top_faq_info_list
else:
return None, top_faq_ids, top_faq_info_list
def dump(self, vars, info, word_set, input, result, rank, top):
flush('info : {}'.format(info))
flush('word_set : {}'.format(word_set))
flush('input : {}'.format(input))
flush('input.shape : {}'.format(input.shape))
flush('word_to_id : {}'.format(vars['word_to_id']))
flush('result: {}'.format(result))
flush('result type: {}'.format(type(result)))
flush('argmax ; {}'.format(np.argmax(result)))
flush('argsort ; {}'.format(np.argsort(result)))
flush('rank : {}'.format(rank))
flush('top : {}'.format(top))
for no, i in enumerate(np.argsort(-result)):
i = int(i)
if no > 5:
break
flush('no {}: index: {} score: {:5.2f}'.format(
no + 1, i, (result[i] * 100)))
def add_talk_log(
self,
request: request,
response: TalkResponse,
top_faq_info_list: list,
bot_id: int,
session_id: str):
talk_log = TalkLogModel(
request=request,
response=response,
top_faq_info_list=top_faq_info_list,
bot_id=bot_id,
session_id=session_id)
return self.talk_log_repository.add(talk_log)
| [
"chatbot.common.Talk.get_ml_model",
"numpy.argmax",
"numpy.argsort",
"numpy.array",
"chatbot.common.Talk.get_ml_vars",
"chatbot.models.TalkLog.TalkLogModel",
"chatbot.common.Talk.parse",
"chatbot.common.Talk.create_input"
] | [((679, 698), 'chatbot.common.Talk.get_ml_vars', 'get_ml_vars', (['bot_id'], {}), '(bot_id)\n', (690, 698), False, 'from chatbot.common.Talk import parse, get_ml_vars, get_ml_model, create_input\n'), ((715, 735), 'chatbot.common.Talk.get_ml_model', 'get_ml_model', (['bot_id'], {}), '(bot_id)\n', (727, 735), False, 'from chatbot.common.Talk import parse, get_ml_vars, get_ml_model, create_input\n'), ((762, 774), 'chatbot.common.Talk.parse', 'parse', (['query'], {}), '(query)\n', (767, 774), False, 'from chatbot.common.Talk import parse, get_ml_vars, get_ml_model, create_input\n'), ((791, 853), 'chatbot.common.Talk.create_input', 'create_input', ([], {'word_to_id': "vars['word_to_id']", 'word_set': 'word_set'}), "(word_to_id=vars['word_to_id'], word_set=word_set)\n", (803, 853), False, 'from chatbot.common.Talk import parse, get_ml_vars, get_ml_model, create_input\n'), ((870, 887), 'numpy.array', 'np.array', (['[input]'], {}), '([input])\n', (878, 887), True, 'import numpy as np\n'), ((1000, 1019), 'numpy.argsort', 'np.argsort', (['(-result)'], {}), '(-result)\n', (1010, 1019), True, 'import numpy as np\n'), ((2798, 2926), 'chatbot.models.TalkLog.TalkLogModel', 'TalkLogModel', ([], {'request': 'request', 'response': 'response', 'top_faq_info_list': 'top_faq_info_list', 'bot_id': 'bot_id', 'session_id': 'session_id'}), '(request=request, response=response, top_faq_info_list=\n top_faq_info_list, bot_id=bot_id, session_id=session_id)\n', (2810, 2926), False, 'from chatbot.models.TalkLog import TalkLogModel\n'), ((2382, 2401), 'numpy.argsort', 'np.argsort', (['(-result)'], {}), '(-result)\n', (2392, 2401), True, 'import numpy as np\n'), ((2195, 2212), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (2204, 2212), True, 'import numpy as np\n'), ((2251, 2269), 'numpy.argsort', 'np.argsort', (['result'], {}), '(result)\n', (2261, 2269), True, 'import numpy as np\n')] |
from interface import default, Interface
import numpy as np
import pandas as pd
from zipline.utils.sentinel import sentinel
DEFAULT_FX_RATE = sentinel('DEFAULT_FX_RATE')
class FXRateReader(Interface):
def get_rates(self, rate, quote, bases, dts):
"""
Get rates to convert ``bases`` into ``quote``.
Parameters
----------
rate : str
Rate type to load. Readers intended for use with the Pipeline API
should support at least ``zipline.data.fx.DEFAULT_FX_RATE``, which
will be used by default for Pipeline API terms that don't specify a
specific rate.
quote : str
Currency code of the currency to convert into.
bases : np.array[object]
Array of codes of the currencies to convert from. A single currency
may appear multiple times.
dts : pd.DatetimeIndex
Datetimes for which to load rates. Must be sorted in ascending
order and localized to UTC.
Returns
-------
rates : np.array
Array of shape ``(len(dts), len(bases))`` containing foreign
exchange rates mapping currencies from ``bases`` to ``quote``.
The row at index i corresponds to the dt in dts[i].
The column at index j corresponds to the base currency in bases[j].
"""
@default
def get_rate_scalar(self, rate, quote, base, dt):
"""Scalar version of ``get_rates``.
Parameters
----------
rate : str
Rate type to load. Readers intended for use with the Pipeline API
should support at least ``zipline.data.fx.DEFAULT_FX_RATE``, which
will be used by default for Pipeline API terms that don't specify a
specific rate.
quote : str
Currency code of the currency to convert into.
base : str
Currency code of the currency to convert from.
dt : np.datetime64 or pd.Timestamp
Datetime on which to load rate.
Returns
-------
rate : np.float64
Exchange rate from base -> quote on dt.
"""
rates_array = self.get_rates(
rate,
quote,
bases=np.array([base], dtype=object),
dts=pd.DatetimeIndex([dt], tz='UTC'),
)
return rates_array[0, 0]
| [
"pandas.DatetimeIndex",
"numpy.array",
"zipline.utils.sentinel.sentinel"
] | [((145, 172), 'zipline.utils.sentinel.sentinel', 'sentinel', (['"""DEFAULT_FX_RATE"""'], {}), "('DEFAULT_FX_RATE')\n", (153, 172), False, 'from zipline.utils.sentinel import sentinel\n'), ((2280, 2310), 'numpy.array', 'np.array', (['[base]'], {'dtype': 'object'}), '([base], dtype=object)\n', (2288, 2310), True, 'import numpy as np\n'), ((2328, 2360), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['[dt]'], {'tz': '"""UTC"""'}), "([dt], tz='UTC')\n", (2344, 2360), True, 'import pandas as pd\n')] |
"""This module specifies classes that model an application traffic pattern.
"""
from numpy.random import default_rng
import random
__all__ = [
"Application",
"SingleConstantApplication",
"SingleRandomApplication",
"MultiConstantApplication",
"MultiRandomApplication",
"MultiPoissonApplication",
]
class Application:
"""Generic class for quantum applications.
Parameters
----------
name : str
A name to identify this application.
Properties
----------
name : str
A name to identify this application.
"""
def __init__(self, name):
self.name = name
def get_pairs(self, timeslot):
"""Return the list of (A, B, max) tuples for a given timeslot.
Parameters
----------
timeslot : int
The timeslot for which we request the information
Returns
-------
list of three-element tuples
The first two elemente are the two end-points that wish to
establish an end-to-end entanglement; the third element is the
maximum number of qubits required by the application.
"""
raise NotImplementedError("Class Application should be inherited")
class SingleApplication(Application):
"""Abstract class to be used by applications returning a single pair.
Also, the number of qubits is always the same, as set in the ctor.
Parameters
----------
name : str
A name to identify this application.
max_qubits : int
The maximum number of qubits required.
Raises
------
ValueError
The maximum number of qubits required is negative.
"""
def __init__(self, name, max_qubits):
super().__init__(name)
if max_qubits < 0:
raise ValueError("Cannot have negative number of qubits specified")
self._max_qubits = max_qubits
def get_pairs(self, timeslot):
# timeslot is unused
pair = self._get_single_pair()
return [(pair[0], pair[1], self._max_qubits)]
def _get_single_pair(self):
raise NotImplementedError("Class SingleApplication should be inherited")
class MultiApplication(Application):
"""Abstract class to be used by applications returning multiple pairs.
Also, the number of qubits is always the same, as set in the ctor.
Parameters
----------
name : str
A name to identify this application.
max_qubits : int
The maximum number of qubits required.
Raises
------
ValueError
The maximum number of qubits required is negative.
"""
def __init__(self, name, max_qubits):
super().__init__(name)
if max_qubits < 0:
raise ValueError("Cannot have negative number of qubits specified")
self._max_qubits = max_qubits
def get_pairs(self, timeslot):
# timeslot is unused
return self._get_pairs()
def _get_pairs(self):
raise NotImplementedError("Class MultiApplication should be inherited")
class SingleConstantApplication(SingleApplication):
"""Return always the same pair, all with the same maximum number of qubits.
Parameters
----------
name : str
A name to identify this application.
alice : str
The name of the first end-point
bob : str
The name of the second end-point
max_qubits : int
The maximum number of qubits required.
"""
def __init__(self, name, alice, bob, max_qubits):
super().__init__(name, max_qubits)
if alice == bob:
raise ValueError(
f"Cannot use the same name in SinglePairConstantApplication: {alice}"
)
self._alice = alice
self._bob = bob
def _get_single_pair(self):
return [self._alice, self._bob]
class SingleRandomApplication(SingleApplication):
"""Return a random pair from a set, all with the same maximum number of qubits.
The `timeslot` parameter in `get_pairs` is ignored, hence multiple calls
to method with the same value of `timeslot` will result, in general,
in a different result.
Parameters
----------
name : str
A name to identify this application.
node_names : iterable
The possible names from which to extract the pair
max_qubits : int
The maximum number of qubits required.
"""
def __init__(self, name, node_names, max_qubits):
super().__init__(name, max_qubits)
self._node_names = set(node_names)
if len(self._node_names) <= 1:
raise ValueError(
(
"Invalid cardinality of set of names passed to "
f"SingleRandomPairs: {len(self._node_names)}"
)
)
def _get_single_pair(self):
return random.sample(self._node_names, 2)
class MultiConstantApplication(MultiApplication):
"""Return always the same pairs, all with the same maximum number of qubits.
Parameters
----------
name : str
A name to identify this application.
pairs : list
The list of pairs to be returned. Must be non-empty.
max_qubits : int
The maximum number of qubits required.
"""
def __init__(self, name, pairs, max_qubits):
super().__init__(name, max_qubits)
if not pairs:
raise ValueError(
"Cannot initialize MultiConstantApplication with an empty list of pairs"
)
self._pairs = []
for pair in pairs:
if pair[0] == pair[1]:
raise ValueError(f"The two end-points cannot be the same: {pair[0]}")
self._pairs.append([pair[0], pair[1], max_qubits])
def _get_pairs(self):
return list(self._pairs)
class MultiRandomApplication(MultiConstantApplication):
"""Return a random list of pairs from a set.
All pairs returned have the same maximum number of qubits.
The `timeslot` parameter in `get_pairs` is ignored, hence multiple calls
to method with the same value of `timeslot` will result, in general,
in a different result.
Parameters
----------
name : str
A name to identify this application.
pairs : list
The list of pairs to be returned. Must be non-empty.
cardinality : int
How many pairs to return. Must be smaller than or equal to the number
of pairs passed as argument to the ctor.
max_qubits : int
The maximum number of qubits required.
"""
def __init__(self, name, pairs, cardinality, max_qubits):
super().__init__(name, pairs, max_qubits)
if cardinality > len(pairs):
raise ValueError(
(
f"In MultiRandomApplication cardinality is too "
f"high ({cardinality}) compared to the number of "
f"pairs available ({len(pairs)})"
)
)
self._cardinality = cardinality
def _get_pairs(self):
return random.sample(self._pairs, self._cardinality)
class MultiPoissonApplication(MultiConstantApplication):
"""Return a random list of pairs from a set.
All pairs returned have thxe same maximum number of qubits.
The `timeslot` parameter in `get_pairs` is ignored, hence multiple calls
to method with the same value of `timeslot` will result, in general,
in a different result.
Parameters
----------
name : str
A name to identify this application.
pairs : list
The list of pairs to be returned. Must be non-empty.
cardinality : int
The average number of pairs to return.
max_qubits : int
The maximum number of qubits required.
seed : int
The seed to initialize the internal RNG.
"""
def __init__(self, name, pairs, cardinality, max_qubits, seed):
super().__init__(name, pairs, max_qubits)
if cardinality < 0:
raise ValueError(
f"Average number of pairs cannot be negative: {cardinality}"
)
self._rng = default_rng(seed)
self._cardinality = cardinality
def _get_pairs(self):
how_many = self._rng.poisson(self._cardinality, None)
return random.choices(self._pairs, k=how_many)
| [
"random.sample",
"random.choices",
"numpy.random.default_rng"
] | [((4863, 4897), 'random.sample', 'random.sample', (['self._node_names', '(2)'], {}), '(self._node_names, 2)\n', (4876, 4897), False, 'import random\n'), ((7074, 7119), 'random.sample', 'random.sample', (['self._pairs', 'self._cardinality'], {}), '(self._pairs, self._cardinality)\n', (7087, 7119), False, 'import random\n'), ((8140, 8157), 'numpy.random.default_rng', 'default_rng', (['seed'], {}), '(seed)\n', (8151, 8157), False, 'from numpy.random import default_rng\n'), ((8302, 8341), 'random.choices', 'random.choices', (['self._pairs'], {'k': 'how_many'}), '(self._pairs, k=how_many)\n', (8316, 8341), False, 'import random\n')] |
import os
import random
import numpy as np
import logging
import argparse
import collections
import open3d as o3d
import sys
print(os.path.abspath(__file__))
sys.path.append(".")
import torch
import torch.nn.parallel
import torch.optim
import torch.utils.data
from util import config
from util.common_util import AverageMeter, intersectionAndUnion, check_makedirs
# from util.voxelize import voxelize
from util.dataset import SHREC2022
from util.data_util import collate_fn
import pdb
random.seed(123)
np.random.seed(123)
def get_parser():
parser = argparse.ArgumentParser(description='PyTorch Point Cloud Semantic Segmentation')
parser.add_argument('--config', type=str, default='config/shrec2022_pointtransformer.yaml', help='config file')
parser.add_argument('opts', help='see config/shrec2022_pointtransformer.yaml for all options',
default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
assert args.config is not None
cfg = config.load_cfg_from_cfg_file(args.config)
if args.opts is not None:
cfg = config.merge_cfg_from_list(cfg, args.opts)
return cfg
def get_logger():
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
def main():
global args, logger
args = get_parser()
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.test_gpu)
logger = get_logger()
logger.info(args)
assert args.classes > 1
logger.info("=> creating model ...")
logger.info("Classes: {}".format(args.classes))
if args.arch == 'pointtransformer_seg':
from model.pointtransformer_seg import pointtransformer_seg as Model
else:
raise Exception('architecture not supported yet'.format(args.arch))
if args.curvatureM:
args.fea_dim += 1
if args.curvatureG:
args.fea_dim += 1
if args.curvatureMAX:
args.fea_dim += 1
if args.curvatureMIN:
args.fea_dim += 1
model = Model(c=args.fea_dim, k=args.classes).cuda()
logger.info(model)
if os.path.isfile(args.model_path):
logger.info("=> loading checkpoint '{}'".format(args.model_path))
checkpoint = torch.load(args.model_path)
state_dict = checkpoint['state_dict']
new_state_dict = collections.OrderedDict()
for k, v in state_dict.items():
name = k[7:]
new_state_dict[name] = v
model.load_state_dict(new_state_dict, strict=True)
logger.info("=> loaded checkpoint '{}' (epoch {})".format(args.model_path, checkpoint['epoch']))
args.epoch = checkpoint['epoch']
else:
raise RuntimeError("=> no checkpoint found at '{}'".format(args.model_path))
# criterion = nn.CrossEntropyLoss(ignore_index=args.ignore_label).cuda()
# names = [line.rstrip('\n') for line in open(args.names_path)]
# test(model, criterion, names)
test(model)
# def data_prepare():
# if args.data_name == 's3dis':
# data_list = sorted(os.listdir(args.data_root))
# data_list = [item[:-4] for item in data_list if 'Area_{}'.format(args.test_area) in item]
# else:
# raise Exception('dataset not supported yet'.format(args.data_name))
# print("Totally {} samples in val set.".format(len(data_list)))
#
# return data_list
# def data_load(data_name):
# data_path = os.path.join(args.data_root, data_name + '.npy')
# data = np.load(data_path) # xyzrgbl, N*7
# coord, feat, label = data[:, :3], data[:, 3:6], data[:, 6]
#
# idx_data = [] # length equals to the maximum number of points in any voxel
# if args.voxel_size:
# coord_min = np.min(coord, 0)
# coord -= coord_min
# idx_sort, count = voxelize(coord, args.voxel_size, mode=1)
# for i in range(count.max()):
# idx_select = np.cumsum(np.insert(count, 0, 0)[0:-1]) + i % count
# idx_part = idx_sort[idx_select]
# idx_data.append(idx_part)
# else:
# idx_data.append(np.arange(label.shape[0]))
#
# return coord, feat, label, idx_data
# def input_normalize(coord, feat):
# coord_min = np.min(coord, 0)
# coord -= coord_min
# feat = feat / 255.
#
# return coord, feat
# colors
colors = {0: [202, 202, 202], 1: [255, 0, 0], 2: [0, 255, 0], 3: [0, 0, 255], 4: [255, 204, 153],
5: [204, 0, 204], 6: [247, 255, 0], 7: [255, 0, 255], 8: [0, 255, 255], 9: [204, 229, 255]}
THRESHOLD = 2.0
# def test(model, criterion, names):
def test(model):
logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
# batch_time = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
# args.batch_size_test = 10
model.eval()
check_makedirs(args.save_folder)
# test_data = SHREC2022(data_root=args.data_root, split='val', transform=None, shuffle_index=False)
test_data = SHREC2022(data_root=args.data_root, split='val', transform=None, shuffle_index=False,
curvatureM=args.curvatureM, curvatureG=args.curvatureG,
curvatureMAX=args.curvatureMAX, curvatureMIN=args.curvatureMIN)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size_test,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=False,
collate_fn=collate_fn)
pred_save, label_save = [], []
# data_list = data_prepare()
# for idx, item in enumerate(data_list):
# end = time.time()
# pred_save_path = os.path.join(args.save_folder, '{}_{}_pred.npy'.format(item, args.epoch))
# label_save_path = os.path.join(args.save_folder, '{}_{}_label.npy'.format(item, args.epoch))
# if os.path.isfile(pred_save_path) and os.path.isfile(label_save_path):
# logger.info('{}/{}: {}, loaded pred and label.'.format(idx + 1, len(data_list), item))
# pred, label = np.load(pred_save_path), np.load(label_save_path)
# else:
# coord, feat, label, idx_data = data_load(item)
# pred = torch.zeros((label.size, args.classes)).cuda()
# idx_size = len(idx_data) # length equals to the maximum number of points in any voxel
# idx_list, coord_list, feat_list, offset_list = [], [], [], []
# for i in range(idx_size):
# logger.info(
# '{}/{}: {}/{}/{}, {}'.format(idx + 1, len(data_list), i + 1, idx_size, idx_data[0].shape[0], item))
# idx_part = idx_data[i]
# coord_part, feat_part = coord[idx_part], feat[idx_part]
# if args.voxel_max and coord_part.shape[0] > args.voxel_max:
# coord_p, idx_uni, cnt = np.random.rand(coord_part.shape[0]) * 1e-3, np.array([]), 0
# while idx_uni.size != idx_part.shape[0]:
# init_idx = np.argmin(coord_p)
# dist = np.sum(np.power(coord_part - coord_part[init_idx], 2), 1)
# idx_crop = np.argsort(dist)[:args.voxel_max]
# coord_sub, feat_sub, idx_sub = coord_part[idx_crop], feat_part[idx_crop], idx_part[idx_crop]
# dist = dist[idx_crop]
# delta = np.square(1 - dist / np.max(dist))
# coord_p[idx_crop] += delta
# coord_sub, feat_sub = input_normalize(coord_sub, feat_sub)
# idx_list.append(idx_sub), coord_list.append(coord_sub), feat_list.append(
# feat_sub), offset_list.append(idx_sub.size)
# idx_uni = np.unique(np.concatenate((idx_uni, idx_sub)))
# else:
# coord_part, feat_part = input_normalize(coord_part, feat_part)
# idx_list.append(idx_part), coord_list.append(coord_part), \
# feat_list.append(feat_part), offset_list.append(idx_part.size)
#
# batch_num = int(np.ceil(len(idx_list) / args.batch_size_test))
# for i in range(batch_num):
# s_i, e_i = i * args.batch_size_test, min((i + 1) * args.batch_size_test, len(idx_list))
# idx_part, coord_part, feat_part, offset_part = idx_list[s_i:e_i], coord_list[s_i:e_i], \
# feat_list[s_i:e_i], offset_list[s_i:e_i]
# idx_part = np.concatenate(idx_part)
# coord_part = torch.FloatTensor(np.concatenate(coord_part)).cuda(non_blocking=True)
# feat_part = torch.FloatTensor(np.concatenate(feat_part)).cuda(non_blocking=True)
# offset_part = torch.IntTensor(np.cumsum(offset_part)).cuda(non_blocking=True)
#
# with torch.no_grad():
# pred_part = model([coord_part, feat_part, offset_part]) # (n, k)
# torch.cuda.empty_cache()
# pred[idx_part, :] += pred_part
# logger.info(
# 'Test: {}/{}, {}/{}, {}/{}'.format(idx + 1, len(data_list), e_i,
# len(idx_list), args.voxel_max, idx_part.shape[0]))
# # loss = criterion(pred, torch.LongTensor(label).cuda(non_blocking=True)) # for reference
# pred = pred.max(1)[1].data.cpu().numpy()
for i, (coord, feat, label, offset, pid) in enumerate(test_loader): # (n, 3), (n,), (b,), (b,)
coord, feat, label, offset = coord.cuda(non_blocking=True), feat.cuda(non_blocking=True), \
label.cuda(non_blocking=True), offset.cuda(non_blocking=True)
# change to binary segmentation
if args.classes == 2:
label[label != 0] = 1
with torch.no_grad():
output = model([coord, feat, offset])
# pred = output.max(1)[1].cpu().numpy()
pred = (output[:, 1] - output[:, 0] > THRESHOLD).cpu().numpy().astype(int)
label = label.cpu().numpy()
# calculation 1: add per room predictions
intersection, union, target = intersectionAndUnion(pred, label, args.classes, args.ignore_label)
intersection_meter.update(intersection)
union_meter.update(union)
target_meter.update(target)
accuracy = sum(intersection) / (sum(target) + 1e-10)
# logger.info('Test: [{}/{}] '
# 'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
# 'Accuracy {accuracy:.4f}.'.format(i + 1, len(test_loader), batch_time=batch_time,
# accuracy=accuracy))
logger.info('Test: [{}/{}] Accuracy {accuracy:.4f}.'.format(i + 1, len(test_loader), accuracy=accuracy))
pred_save.append(pred)
label_save.append(label)
# np.save(pred_save_path, pred)
# np.save(label_save_path, label)
# save prediction
for j in range(offset.shape[0]):
if j == 0:
st, ed = 0, offset[j]
else:
st, ed = offset[j - 1], offset[j]
s_coord, s_label, s_pred, s_pid = coord[st:ed].cpu().numpy(), label[st:ed], pred[st:ed], pid[j].item()
filename = os.path.join(args.save_folder, str(s_pid) + '_pred.ply')
save_prediction(s_coord, s_label, s_pred, filename)
# with open(os.path.join(args.save_folder, "pred.pickle"), 'wb') as handle:
# pickle.dump({'pred': pred_save}, handle, protocol=pickle.HIGHEST_PROTOCOL)
# with open(os.path.join(args.save_folder, "label.pickle"), 'wb') as handle:
# pickle.dump({'label': label_save}, handle, protocol=pickle.HIGHEST_PROTOCOL)
# calculation 1
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU1 = np.mean(iou_class)
mAcc1 = np.mean(accuracy_class)
allAcc1 = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
# calculation 2
intersection, union, target = intersectionAndUnion(np.concatenate(pred_save),
np.concatenate(label_save), args.classes, args.ignore_label)
iou_class = intersection / (union + 1e-10)
accuracy_class = intersection / (target + 1e-10)
mIoU = np.mean(iou_class)
mAcc = np.mean(accuracy_class)
allAcc = sum(intersection) / (sum(target) + 1e-10)
logger.info('Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
logger.info('Val1 result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU1, mAcc1, allAcc1))
for i in range(args.classes):
# logger.info('Class_{} Result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(i, iou_class[i], accuracy_class[i],
# names[i]))
logger.info('Class_{} Result: iou/accuracy {:.4f}/{:.4f}.'.format(i, iou_class[i], accuracy_class[i]))
logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
def save_prediction(coord, label, pred, filename):
pred_file = filename
label_file = filename.replace('pred', 'label')
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(coord)
point_colors = np.stack([colors[v] for v in pred], axis=0) / 255.0
pcd.colors = o3d.utility.Vector3dVector(point_colors)
o3d.io.write_point_cloud(pred_file, pcd)
point_colors = np.stack([colors[v] for v in label], axis=0) / 255.0
pcd.colors = o3d.utility.Vector3dVector(point_colors)
o3d.io.write_point_cloud(label_file, pcd)
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"logging.StreamHandler",
"util.common_util.intersectionAndUnion",
"sys.path.append",
"util.config.load_cfg_from_cfg_file",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.stack",
"numpy.random.seed",
"numpy.concatenate",
"util.common_util.AverageMeter",
"model.pointtransfo... | [((160, 180), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (175, 180), False, 'import sys\n'), ((491, 507), 'random.seed', 'random.seed', (['(123)'], {}), '(123)\n', (502, 507), False, 'import random\n'), ((508, 527), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (522, 527), True, 'import numpy as np\n'), ((133, 158), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (148, 158), False, 'import os\n'), ((561, 646), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Point Cloud Semantic Segmentation"""'}), "(description='PyTorch Point Cloud Semantic Segmentation'\n )\n", (584, 646), False, 'import argparse\n'), ((998, 1040), 'util.config.load_cfg_from_cfg_file', 'config.load_cfg_from_cfg_file', (['args.config'], {}), '(args.config)\n', (1027, 1040), False, 'from util import config\n'), ((1209, 1239), 'logging.getLogger', 'logging.getLogger', (['logger_name'], {}), '(logger_name)\n', (1226, 1239), False, 'import logging\n'), ((1288, 1311), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1309, 1311), False, 'import logging\n'), ((2320, 2351), 'os.path.isfile', 'os.path.isfile', (['args.model_path'], {}), '(args.model_path)\n', (2334, 2351), False, 'import os\n'), ((4914, 4928), 'util.common_util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4926, 4928), False, 'from util.common_util import AverageMeter, intersectionAndUnion, check_makedirs\n'), ((4947, 4961), 'util.common_util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4959, 4961), False, 'from util.common_util import AverageMeter, intersectionAndUnion, check_makedirs\n'), ((4981, 4995), 'util.common_util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4993, 4995), False, 'from util.common_util import AverageMeter, intersectionAndUnion, check_makedirs\n'), ((5050, 5082), 'util.common_util.check_makedirs', 'check_makedirs', (['args.save_folder'], {}), '(args.save_folder)\n', (5064, 5082), False, 'from util.common_util import AverageMeter, intersectionAndUnion, check_makedirs\n'), ((5204, 5418), 'util.dataset.SHREC2022', 'SHREC2022', ([], {'data_root': 'args.data_root', 'split': '"""val"""', 'transform': 'None', 'shuffle_index': '(False)', 'curvatureM': 'args.curvatureM', 'curvatureG': 'args.curvatureG', 'curvatureMAX': 'args.curvatureMAX', 'curvatureMIN': 'args.curvatureMIN'}), "(data_root=args.data_root, split='val', transform=None,\n shuffle_index=False, curvatureM=args.curvatureM, curvatureG=args.\n curvatureG, curvatureMAX=args.curvatureMAX, curvatureMIN=args.curvatureMIN)\n", (5213, 5418), False, 'from util.dataset import SHREC2022\n'), ((5481, 5659), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': 'args.batch_size_test', 'shuffle': '(False)', 'num_workers': 'args.workers', 'pin_memory': '(True)', 'drop_last': '(False)', 'collate_fn': 'collate_fn'}), '(test_data, batch_size=args.batch_size_test,\n shuffle=False, num_workers=args.workers, pin_memory=True, drop_last=\n False, collate_fn=collate_fn)\n', (5508, 5659), False, 'import torch\n'), ((12027, 12045), 'numpy.mean', 'np.mean', (['iou_class'], {}), '(iou_class)\n', (12034, 12045), True, 'import numpy as np\n'), ((12058, 12081), 'numpy.mean', 'np.mean', (['accuracy_class'], {}), '(accuracy_class)\n', (12065, 12081), True, 'import numpy as np\n'), ((12488, 12506), 'numpy.mean', 'np.mean', (['iou_class'], {}), '(iou_class)\n', (12495, 12506), True, 'import numpy as np\n'), ((12518, 12541), 'numpy.mean', 'np.mean', (['accuracy_class'], {}), '(accuracy_class)\n', (12525, 12541), True, 'import numpy as np\n'), ((13370, 13395), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (13393, 13395), True, 'import open3d as o3d\n'), ((13413, 13446), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['coord'], {}), '(coord)\n', (13439, 13446), True, 'import open3d as o3d\n'), ((13536, 13576), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['point_colors'], {}), '(point_colors)\n', (13562, 13576), True, 'import open3d as o3d\n'), ((13581, 13621), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['pred_file', 'pcd'], {}), '(pred_file, pcd)\n', (13605, 13621), True, 'import open3d as o3d\n'), ((13712, 13752), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['point_colors'], {}), '(point_colors)\n', (13738, 13752), True, 'import open3d as o3d\n'), ((13757, 13798), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['label_file', 'pcd'], {}), '(label_file, pcd)\n', (13781, 13798), True, 'import open3d as o3d\n'), ((1085, 1127), 'util.config.merge_cfg_from_list', 'config.merge_cfg_from_list', (['cfg', 'args.opts'], {}), '(cfg, args.opts)\n', (1111, 1127), False, 'from util import config\n'), ((1430, 1452), 'logging.Formatter', 'logging.Formatter', (['fmt'], {}), '(fmt)\n', (1447, 1452), False, 'import logging\n'), ((2448, 2475), 'torch.load', 'torch.load', (['args.model_path'], {}), '(args.model_path)\n', (2458, 2475), False, 'import torch\n'), ((2547, 2572), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2570, 2572), False, 'import collections\n'), ((10261, 10327), 'util.common_util.intersectionAndUnion', 'intersectionAndUnion', (['pred', 'label', 'args.classes', 'args.ignore_label'], {}), '(pred, label, args.classes, args.ignore_label)\n', (10281, 10327), False, 'from util.common_util import AverageMeter, intersectionAndUnion, check_makedirs\n'), ((12234, 12259), 'numpy.concatenate', 'np.concatenate', (['pred_save'], {}), '(pred_save)\n', (12248, 12259), True, 'import numpy as np\n'), ((12316, 12342), 'numpy.concatenate', 'np.concatenate', (['label_save'], {}), '(label_save)\n', (12330, 12342), True, 'import numpy as np\n'), ((13467, 13510), 'numpy.stack', 'np.stack', (['[colors[v] for v in pred]'], {'axis': '(0)'}), '([colors[v] for v in pred], axis=0)\n', (13475, 13510), True, 'import numpy as np\n'), ((13642, 13686), 'numpy.stack', 'np.stack', (['[colors[v] for v in label]'], {'axis': '(0)'}), '([colors[v] for v in label], axis=0)\n', (13650, 13686), True, 'import numpy as np\n'), ((2244, 2281), 'model.pointtransformer_seg.pointtransformer_seg', 'Model', ([], {'c': 'args.fea_dim', 'k': 'args.classes'}), '(c=args.fea_dim, k=args.classes)\n', (2249, 2281), True, 'from model.pointtransformer_seg import pointtransformer_seg as Model\n'), ((9937, 9952), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9950, 9952), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 13:20:36 2020
@author: ambar
"""
'''
Imports
'''
import glob
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
from astropy.nddata import CCDData
import astropy.units as u
import ccdproc
import os
##############################################################
'''
Function Definitions
'''
# function creating a master bias and saving it as 'tmaster_bias.fits '
def createmasterbias():
#create biaslist
biaslist=sorted(glob.glob('HD115709/bias/r*.fit'))
print('Number of biases used =', len(biaslist))
#open bias zero for cube dimensions
hdub=fits.open(biaslist[0])
bias0=hdub[4].data
print(biaslist[0],'is open, shape:',bias0.shape)
hdub.close()
print(biaslist[0],'is closed')
#create biascube with shape of science and len(biaslist)
biascube=np.zeros((science0.shape[0],science0.shape[1],len(biaslist)),dtype=bias0.dtype)
print('biascube created with shape :', biascube.shape)
#crop and stack biases (biasc= counter, and biasn = name)
for biasc, biasn in enumerate(biaslist):
print('Open :',biasn)
hdu=fits.open(biaslist[biasc])
if hdu[4].header['CHIPNAME']=='A5382-1-7':
bias=CCDData(hdu[4].data,unit=u.adu)
tbias=ccdproc.trim_image(bias,fits_section=sciencewindow)
biascube[:,:,biasc]=tbias.data
hdu.close()
else:
hdu.close()
print(biasn, 'returned wrong chipname for selected extension')
exit
#take median and write to disk
mbias=np.nanmedian(biascube,2)
ccdmbias=CCDData(mbias,unit=u.adu)
ccdmbias.write('tmaster_bias.fits',overwrite=True)
print('master bias shape :',ccdmbias.data.shape)
# function creating a master flat and saving it as 'tmaster_flat.fits '
# note that this is not normalized, the finction used to correct the science normalizes internally
def createmasterflat():
#create flatlist
flatlist=sorted(glob.glob('HD115709/flat_SII/r*.fit'))
print('Number of flats used =', len(flatlist))
#open flat0 for cube len
hduf=fits.open(flatlist[0])
flat0=hduf[4].data
print(flatlist[0],'is open, shape:', flat0.shape)
hduf.close()
print(flatlist[0],'is closed')
#create flatcube with shape of science and len(flatlist)
flatcube=np.zeros((science0.shape[0],science0.shape[1],len(flatlist)),dtype=flat0.dtype)
#convert trim and populate flatcube after bias correction
for flatc, flatn in enumerate(flatlist):
print('Open :', flatn)
hdu=fits.open(flatlist[flatc])
if hdu[4].header['CHIPNAME']=='A5382-1-7':
ccdflat=CCDData(hdu[4].data,unit=u.adu)
ccdtflat=ccdproc.trim_image(ccdflat,fits_section=sciencewindow)
ccdcflat=ccdproc.subtract_bias(ccdtflat,ccdmbias_use)
flatcube[:,:,flatc]=ccdcflat.data
hdu.close()
else:
hdu.close()
print(flatn, 'returned wrong chipname for selected extension')
exit
#take median and write to disk
mflat=np.nanmedian(flatcube,2)
ccdmflat=CCDData(mflat,unit=u.adu)
ccdmflat.write('tmaster_flat.fits', overwrite=True) # write the fits to disk
#Corrects science images and saves them to the folder 'Corrected_Science'
#MAke sure the folder exists
def correctscience():
for sciencec, sciencen in enumerate(sciencelist):
hdu=fits.open(sciencelist[sciencec])
print('open', sciencen)
if hdu[1].header['CHIPNAME']=='A5382-1-7':
ccdscience=CCDData(hdu[1].data,unit=u.adu)
if not os.path.exists('Corrected_Science'):
os.makedirs('Corrected_Science')
cccdscience=ccdproc.flat_correct(ccdproc.subtract_bias(ccdscience,ccdmbias_use),ccdmflat_use,norm_value=np.nanmedian(ccdmflat_use))
path='Corrected_Science/'+sciencen[-12:]
cccdscience.write(path,overwrite=True)
hdu.close()
else:
hdu.close()
print(sciencen, 'returned wrong chipname for selected extension')
exit
##############################################################
'''
Main
'''
#open 1 science frame to get shape
sciencelist=sorted(glob.glob('HD115709/SII/r*.fit'))
hdus=fits.open(sciencelist[0])
science0=hdus[1].data
print(sciencelist[0],'is open, shape:',science0.shape)
#sciencewindow=hdus[1].header['RTDATSEC']
sciencewindow='[288:1617,1046:2509]'
print(sciencewindow)
hdus.close()
print(sciencelist[0],'is closed')
#check for existing master bias and load into 'ccdmbias_use', if it doesn't exist, make
try:
#get master bias data
hdumb=fits.open('tmaster_bias.fits')
ccdmbias_use=CCDData(hdumb[0].data,unit=u.adu)
print('tmaster_bias.fits is open, shape:',ccdmbias_use.shape)
hdumb.close()
print('tmaster_bias.fits is closed')
print('bias loaded in cdmbias_use')
except:
createmasterbias()
hdumb=fits.open('tmaster_bias.fits')
ccdmbias_use=CCDData(hdumb[0].data,unit=u.adu)
print('tmaster_bias.fits is open, shape:',ccdmbias_use.shape)
hdumb.close()
print('tmaster_bias.fits is closed')
print('bias loaded in cdmbias_use')
#check for existing master flat and load into 'ccdmflat_use', if it doesn't exist, make
try:
#get master flat data
hdumf=fits.open('tmaster_flat.fits')
ccdmflat_use=CCDData(hdumf[0].data,unit=u.adu)
print('tmaster_flat.fits is open,shape:',ccdmflat_use.shape)
hdumf.close()
print('tmaster_flat.fits is closed')
print('flat loaded in ccdmflat_use')
except:
createmasterflat()
hdumf=fits.open('tmaster_flat.fits')
ccdmflat_use=CCDData(hdumf[0].data,unit=u.adu)
print('tmaster_flat.fits is open,shape:',ccdmflat_use.shape)
hdumf.close()
print('tmaster_flat.fits is closed')
print('flat loaded in ccdmflat_use')
print(np.nanmedian(ccdmflat_use))
correctscience()
| [
"ccdproc.trim_image",
"os.path.exists",
"numpy.nanmedian",
"os.makedirs",
"astropy.nddata.CCDData",
"astropy.io.fits.open",
"ccdproc.subtract_bias",
"glob.glob"
] | [((4345, 4370), 'astropy.io.fits.open', 'fits.open', (['sciencelist[0]'], {}), '(sciencelist[0])\n', (4354, 4370), False, 'from astropy.io import fits\n'), ((653, 675), 'astropy.io.fits.open', 'fits.open', (['biaslist[0]'], {}), '(biaslist[0])\n', (662, 675), False, 'from astropy.io import fits\n'), ((1625, 1650), 'numpy.nanmedian', 'np.nanmedian', (['biascube', '(2)'], {}), '(biascube, 2)\n', (1637, 1650), True, 'import numpy as np\n'), ((1663, 1689), 'astropy.nddata.CCDData', 'CCDData', (['mbias'], {'unit': 'u.adu'}), '(mbias, unit=u.adu)\n', (1670, 1689), False, 'from astropy.nddata import CCDData\n'), ((2173, 2195), 'astropy.io.fits.open', 'fits.open', (['flatlist[0]'], {}), '(flatlist[0])\n', (2182, 2195), False, 'from astropy.io import fits\n'), ((3156, 3181), 'numpy.nanmedian', 'np.nanmedian', (['flatcube', '(2)'], {}), '(flatcube, 2)\n', (3168, 3181), True, 'import numpy as np\n'), ((3194, 3220), 'astropy.nddata.CCDData', 'CCDData', (['mflat'], {'unit': 'u.adu'}), '(mflat, unit=u.adu)\n', (3201, 3220), False, 'from astropy.nddata import CCDData\n'), ((4306, 4338), 'glob.glob', 'glob.glob', (['"""HD115709/SII/r*.fit"""'], {}), "('HD115709/SII/r*.fit')\n", (4315, 4338), False, 'import glob\n'), ((4727, 4757), 'astropy.io.fits.open', 'fits.open', (['"""tmaster_bias.fits"""'], {}), "('tmaster_bias.fits')\n", (4736, 4757), False, 'from astropy.io import fits\n'), ((4775, 4809), 'astropy.nddata.CCDData', 'CCDData', (['hdumb[0].data'], {'unit': 'u.adu'}), '(hdumb[0].data, unit=u.adu)\n', (4782, 4809), False, 'from astropy.nddata import CCDData\n'), ((5393, 5423), 'astropy.io.fits.open', 'fits.open', (['"""tmaster_flat.fits"""'], {}), "('tmaster_flat.fits')\n", (5402, 5423), False, 'from astropy.io import fits\n'), ((5441, 5475), 'astropy.nddata.CCDData', 'CCDData', (['hdumf[0].data'], {'unit': 'u.adu'}), '(hdumf[0].data, unit=u.adu)\n', (5448, 5475), False, 'from astropy.nddata import CCDData\n'), ((5935, 5961), 'numpy.nanmedian', 'np.nanmedian', (['ccdmflat_use'], {}), '(ccdmflat_use)\n', (5947, 5961), True, 'import numpy as np\n'), ((511, 544), 'glob.glob', 'glob.glob', (['"""HD115709/bias/r*.fit"""'], {}), "('HD115709/bias/r*.fit')\n", (520, 544), False, 'import glob\n'), ((1181, 1207), 'astropy.io.fits.open', 'fits.open', (['biaslist[biasc]'], {}), '(biaslist[biasc])\n', (1190, 1207), False, 'from astropy.io import fits\n'), ((2040, 2077), 'glob.glob', 'glob.glob', (['"""HD115709/flat_SII/r*.fit"""'], {}), "('HD115709/flat_SII/r*.fit')\n", (2049, 2077), False, 'import glob\n'), ((2639, 2665), 'astropy.io.fits.open', 'fits.open', (['flatlist[flatc]'], {}), '(flatlist[flatc])\n', (2648, 2665), False, 'from astropy.io import fits\n'), ((3493, 3525), 'astropy.io.fits.open', 'fits.open', (['sciencelist[sciencec]'], {}), '(sciencelist[sciencec])\n', (3502, 3525), False, 'from astropy.io import fits\n'), ((5016, 5046), 'astropy.io.fits.open', 'fits.open', (['"""tmaster_bias.fits"""'], {}), "('tmaster_bias.fits')\n", (5025, 5046), False, 'from astropy.io import fits\n'), ((5064, 5098), 'astropy.nddata.CCDData', 'CCDData', (['hdumb[0].data'], {'unit': 'u.adu'}), '(hdumb[0].data, unit=u.adu)\n', (5071, 5098), False, 'from astropy.nddata import CCDData\n'), ((5681, 5711), 'astropy.io.fits.open', 'fits.open', (['"""tmaster_flat.fits"""'], {}), "('tmaster_flat.fits')\n", (5690, 5711), False, 'from astropy.io import fits\n'), ((5729, 5763), 'astropy.nddata.CCDData', 'CCDData', (['hdumf[0].data'], {'unit': 'u.adu'}), '(hdumf[0].data, unit=u.adu)\n', (5736, 5763), False, 'from astropy.nddata import CCDData\n'), ((1276, 1308), 'astropy.nddata.CCDData', 'CCDData', (['hdu[4].data'], {'unit': 'u.adu'}), '(hdu[4].data, unit=u.adu)\n', (1283, 1308), False, 'from astropy.nddata import CCDData\n'), ((1326, 1378), 'ccdproc.trim_image', 'ccdproc.trim_image', (['bias'], {'fits_section': 'sciencewindow'}), '(bias, fits_section=sciencewindow)\n', (1344, 1378), False, 'import ccdproc\n'), ((2737, 2769), 'astropy.nddata.CCDData', 'CCDData', (['hdu[4].data'], {'unit': 'u.adu'}), '(hdu[4].data, unit=u.adu)\n', (2744, 2769), False, 'from astropy.nddata import CCDData\n'), ((2790, 2845), 'ccdproc.trim_image', 'ccdproc.trim_image', (['ccdflat'], {'fits_section': 'sciencewindow'}), '(ccdflat, fits_section=sciencewindow)\n', (2808, 2845), False, 'import ccdproc\n'), ((2866, 2911), 'ccdproc.subtract_bias', 'ccdproc.subtract_bias', (['ccdtflat', 'ccdmbias_use'], {}), '(ccdtflat, ccdmbias_use)\n', (2887, 2911), False, 'import ccdproc\n'), ((3632, 3664), 'astropy.nddata.CCDData', 'CCDData', (['hdu[1].data'], {'unit': 'u.adu'}), '(hdu[1].data, unit=u.adu)\n', (3639, 3664), False, 'from astropy.nddata import CCDData\n'), ((3683, 3718), 'os.path.exists', 'os.path.exists', (['"""Corrected_Science"""'], {}), "('Corrected_Science')\n", (3697, 3718), False, 'import os\n'), ((3736, 3768), 'os.makedirs', 'os.makedirs', (['"""Corrected_Science"""'], {}), "('Corrected_Science')\n", (3747, 3768), False, 'import os\n'), ((3814, 3861), 'ccdproc.subtract_bias', 'ccdproc.subtract_bias', (['ccdscience', 'ccdmbias_use'], {}), '(ccdscience, ccdmbias_use)\n', (3835, 3861), False, 'import ccdproc\n'), ((3885, 3911), 'numpy.nanmedian', 'np.nanmedian', (['ccdmflat_use'], {}), '(ccdmflat_use)\n', (3897, 3911), True, 'import numpy as np\n')] |
from keras.models import Model, load_model
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
import numpy as np
import os
import pandas as pd
import json
from keras.applications.densenet import preprocess_input as densenet_preprocess_input
# from keras.applications.resnet50 import preprocess_input as resnet_preprocess_input
# WAHTCH OUT!!! you should use different preprocess_input function
def preprocess_input(x):
x /= 127.5
x -= 1.
return x
test_gen = ImageDataGenerator(
preprocessing_function=preprocess_input)
test_gen_densenet = ImageDataGenerator(
preprocessing_function=densenet_preprocess_input)
test_gen_resnet = ImageDataGenerator(
preprocessing_function=preprocess_input)
test_generator1 = test_gen.flow_from_directory(
"./data/test", (299, 299), shuffle=False, batch_size=1)
test_generator2 = test_gen.flow_from_directory(
"./data/test", (299, 299), shuffle=False, batch_size=1)
test_generator_densenet = test_gen_densenet.flow_from_directory(
"./data/test", (299, 299), shuffle=False, batch_size=1)
test_generator_resnet = test_gen_resnet.flow_from_directory(
"./data/test", (224, 224), shuffle=False, batch_size=1)
model_IR105 = load_model('./backup/inception_resnet_model_epoch45.h5')
model_X20 = load_model('./backup/xception_model_epoch40.h5')
model_D60 = load_model('./backup/densenet_model_60.h5')
model_R50 = load_model('./backup/resnet_model_epoch45.h5')
pred_IR105 = model_IR105.predict_generator(test_generator1, max_queue_size=10,
workers=1, use_multiprocessing=False, verbose=0)
pred_X20 = model_X20.predict_generator(test_generator2, max_queue_size=10,
workers=1, use_multiprocessing=False, verbose=0)
pred_D60 = model_D60.predict_generator(test_generator_densenet, max_queue_size=10,
workers=1, use_multiprocessing=False, verbose=0)
pred_R50 = model_R50.predict_generator(test_generator_resnet, max_queue_size=10,
workers=1, use_multiprocessing=False, verbose=0)
np.save("inception_resnet_pred.npy", pred_IR105)
np.save("xception_pred.npy", pred_X20)
np.save("densenet_pred.npy", pred_D60)
np.save("resnet_pred.npy", pred_R50)
# save the filenames
with open('filenames.csv', 'w') as f:
for name in test_generator1.filenames:
f.write(name.split('/')[-1] + '\n')
# make the lookuptable
train_gen = ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = train_gen.flow_from_directory(
"./data/train", (224, 224), shuffle=False)
lookup_table = train_generator.class_indices
lookup_table = dict((v, k) for k, v in lookup_table.items())
with open("lookuptable.json", "w") as f:
json.dump(lookup_table, f)
| [
"json.dump",
"numpy.save",
"keras.models.load_model",
"keras.preprocessing.image.ImageDataGenerator"
] | [((513, 572), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'preprocess_input'}), '(preprocessing_function=preprocess_input)\n', (531, 572), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((599, 667), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'densenet_preprocess_input'}), '(preprocessing_function=densenet_preprocess_input)\n', (617, 667), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((692, 751), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'preprocess_input'}), '(preprocessing_function=preprocess_input)\n', (710, 751), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1236, 1292), 'keras.models.load_model', 'load_model', (['"""./backup/inception_resnet_model_epoch45.h5"""'], {}), "('./backup/inception_resnet_model_epoch45.h5')\n", (1246, 1292), False, 'from keras.models import Model, load_model\n'), ((1305, 1353), 'keras.models.load_model', 'load_model', (['"""./backup/xception_model_epoch40.h5"""'], {}), "('./backup/xception_model_epoch40.h5')\n", (1315, 1353), False, 'from keras.models import Model, load_model\n'), ((1366, 1409), 'keras.models.load_model', 'load_model', (['"""./backup/densenet_model_60.h5"""'], {}), "('./backup/densenet_model_60.h5')\n", (1376, 1409), False, 'from keras.models import Model, load_model\n'), ((1422, 1468), 'keras.models.load_model', 'load_model', (['"""./backup/resnet_model_epoch45.h5"""'], {}), "('./backup/resnet_model_epoch45.h5')\n", (1432, 1468), False, 'from keras.models import Model, load_model\n'), ((2119, 2167), 'numpy.save', 'np.save', (['"""inception_resnet_pred.npy"""', 'pred_IR105'], {}), "('inception_resnet_pred.npy', pred_IR105)\n", (2126, 2167), True, 'import numpy as np\n'), ((2168, 2206), 'numpy.save', 'np.save', (['"""xception_pred.npy"""', 'pred_X20'], {}), "('xception_pred.npy', pred_X20)\n", (2175, 2206), True, 'import numpy as np\n'), ((2207, 2245), 'numpy.save', 'np.save', (['"""densenet_pred.npy"""', 'pred_D60'], {}), "('densenet_pred.npy', pred_D60)\n", (2214, 2245), True, 'import numpy as np\n'), ((2246, 2282), 'numpy.save', 'np.save', (['"""resnet_pred.npy"""', 'pred_R50'], {}), "('resnet_pred.npy', pred_R50)\n", (2253, 2282), True, 'import numpy as np\n'), ((2466, 2525), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'preprocess_input'}), '(preprocessing_function=preprocess_input)\n', (2484, 2525), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((2775, 2801), 'json.dump', 'json.dump', (['lookup_table', 'f'], {}), '(lookup_table, f)\n', (2784, 2801), False, 'import json\n')] |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific
"""Class to test kernel density estimation for point clouds"""
import os
import sys
import numpy as np
from sklearn.neighbors import KernelDensity
import tensorflow as tf
from absl.testing import parameterized
from tensorflow_graphics.util import test_case
from pylib.pc import PointCloud
from pylib.pc import Grid
from pylib.pc import KDEMode
from pylib.pc import Neighborhood
from pylib.pc.tests import utils
class ComputePDFTest(test_case.TestCase):
@parameterized.parameters(
(4, 100, 10, 0.2, 0.1, 2),
(4, 100, 10, 0.7, 0.1, 2),
(4, 100, 10, np.sqrt(2), 0.1, 2),
(4, 100, 10, 0.2, 0.1, 3),
(4, 100, 10, 0.7, 0.1, 3),
(4, 100, 10, np.sqrt(3), 0.1, 3),
(4, 100, 10, 0.2, 0.1, 4),
(4, 100, 10, np.sqrt(4), 0.1, 4)
)
def test_compute_pdf(self,
batch_size,
num_points,
num_samples_per_batch,
cell_size,
bandwidth,
dimension):
cell_sizes = np.float32(np.repeat(cell_size, dimension))
bandwidths = np.float32(np.repeat(bandwidth, dimension))
points, batch_ids = utils._create_random_point_cloud_segmented(
batch_size, batch_size * num_points, dimension,
equal_sized_batches=True)
samples = np.full((batch_size * num_samples_per_batch, dimension),
0.0, dtype=float)
for i in range(batch_size):
cur_choice = np.random.choice(num_points, num_samples_per_batch,
replace=True)
samples[num_samples_per_batch * i:num_samples_per_batch * (i + 1), :] = \
points[cur_choice + i * num_points]
samples_batch_ids = np.repeat(np.arange(0, batch_size),
num_samples_per_batch)
point_cloud = PointCloud(points, batch_ids, batch_size)
grid = Grid(point_cloud, cell_sizes)
point_cloud_samples = PointCloud(samples, samples_batch_ids, batch_size)
neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
neighborhood.compute_pdf(bandwidths, KDEMode.constant)
pdf_tf = neighborhood._pdf
sorted_points = grid._sorted_points.numpy()
sorted_batch_ids = grid._sorted_batch_ids.numpy()
neighbor_ids = neighborhood._neighbors
pdf_real = []
accum_points = []
prev_batch_i = -1
for pt_i, batch_i in enumerate(sorted_batch_ids):
if batch_i != prev_batch_i:
if len(accum_points) > 0:
test_points = np.array(accum_points)
kde_skl = KernelDensity(bandwidth=bandwidth)
kde_skl.fit(test_points)
log_pdf = kde_skl.score_samples(test_points)
pdf = np.exp(log_pdf)
if len(pdf_real) > 0:
pdf_real = np.concatenate((pdf_real, pdf), axis=0)
else:
pdf_real = pdf
accum_points = [sorted_points[pt_i] / cell_size]
prev_batch_i = batch_i
else:
accum_points.append(sorted_points[pt_i] / cell_size)
test_points = np.array(accum_points)
kde_skl = KernelDensity(bandwidth=bandwidth)
kde_skl.fit(test_points)
log_pdf = kde_skl.score_samples(test_points)
pdf = np.exp(log_pdf)
if len(pdf_real) > 0:
pdf_real = np.concatenate((pdf_real, pdf), axis=0)
else:
pdf_real = pdf
pdf_tf = np.asarray(pdf_tf / float(len(accum_points)))
pdf_skl = np.asarray(pdf_real)[neighbor_ids[:, 0]]
self.assertAllClose(pdf_tf, pdf_skl)
@parameterized.parameters(
(1, 200, 1, 4, 2),
(1, 200, 1, 4, 3),
(1, 100, 1, 4, 4)
)
def test_compute_pdf_jacobian(self,
batch_size,
num_points,
num_samples,
radius,
dimension):
cell_sizes = np.float32(np.repeat(radius, dimension))
bandwidths = np.float32(np.repeat(radius, dimension))
points, batch_ids = utils._create_random_point_cloud_segmented(
batch_size, batch_size * num_points, dimension,
equal_sized_batches=True)
samples = np.full((batch_size * num_samples, dimension), 0.0, dtype=float)
for i in range(batch_size):
cur_choice = np.random.choice(num_points, num_samples, replace=True)
samples[num_samples * i:num_samples * (i + 1), :] = \
points[cur_choice + i * num_points]
samples_batch_ids = np.repeat(np.arange(0, batch_size), num_samples)
def compute_pdf(points_in):
point_cloud = PointCloud(points_in, batch_ids, batch_size)
grid = Grid(point_cloud, cell_sizes)
point_cloud_samples = PointCloud(samples, samples_batch_ids, batch_size)
neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)
neighborhood.compute_pdf(bandwidths, KDEMode.constant, normalize=True)
# account for influence of neighborhood size
_, _, counts = tf.unique_with_counts(neighborhood._neighbors[:, 1])
max_num_nb = tf.cast(tf.reduce_max(counts), tf.float32)
return neighborhood._pdf / max_num_nb
self.assert_jacobian_is_correct_fn(
compute_pdf, [np.float32(points)], atol=1e-4, delta=1e-3)
if __name__ == '__main__':
test_case.main()
| [
"numpy.sqrt",
"pylib.pc.tests.utils._create_random_point_cloud_segmented",
"tensorflow.unique_with_counts",
"numpy.array",
"numpy.arange",
"numpy.repeat",
"pylib.pc.PointCloud",
"numpy.asarray",
"sklearn.neighbors.KernelDensity",
"numpy.exp",
"numpy.concatenate",
"tensorflow_graphics.util.test... | [((3976, 4062), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(1, 200, 1, 4, 2)', '(1, 200, 1, 4, 3)', '(1, 100, 1, 4, 4)'], {}), '((1, 200, 1, 4, 2), (1, 200, 1, 4, 3), (1, 100, 1, \n 4, 4))\n', (4000, 4062), False, 'from absl.testing import parameterized\n'), ((5707, 5723), 'tensorflow_graphics.util.test_case.main', 'test_case.main', ([], {}), '()\n', (5721, 5723), False, 'from tensorflow_graphics.util import test_case\n'), ((1671, 1791), 'pylib.pc.tests.utils._create_random_point_cloud_segmented', 'utils._create_random_point_cloud_segmented', (['batch_size', '(batch_size * num_points)', 'dimension'], {'equal_sized_batches': '(True)'}), '(batch_size, batch_size *\n num_points, dimension, equal_sized_batches=True)\n', (1713, 1791), False, 'from pylib.pc.tests import utils\n'), ((1819, 1893), 'numpy.full', 'np.full', (['(batch_size * num_samples_per_batch, dimension)', '(0.0)'], {'dtype': 'float'}), '((batch_size * num_samples_per_batch, dimension), 0.0, dtype=float)\n', (1826, 1893), True, 'import numpy as np\n'), ((2331, 2372), 'pylib.pc.PointCloud', 'PointCloud', (['points', 'batch_ids', 'batch_size'], {}), '(points, batch_ids, batch_size)\n', (2341, 2372), False, 'from pylib.pc import PointCloud\n'), ((2384, 2413), 'pylib.pc.Grid', 'Grid', (['point_cloud', 'cell_sizes'], {}), '(point_cloud, cell_sizes)\n', (2388, 2413), False, 'from pylib.pc import Grid\n'), ((2441, 2491), 'pylib.pc.PointCloud', 'PointCloud', (['samples', 'samples_batch_ids', 'batch_size'], {}), '(samples, samples_batch_ids, batch_size)\n', (2451, 2491), False, 'from pylib.pc import PointCloud\n'), ((2511, 2562), 'pylib.pc.Neighborhood', 'Neighborhood', (['grid', 'cell_sizes', 'point_cloud_samples'], {}), '(grid, cell_sizes, point_cloud_samples)\n', (2523, 2562), False, 'from pylib.pc import Neighborhood\n'), ((3526, 3548), 'numpy.array', 'np.array', (['accum_points'], {}), '(accum_points)\n', (3534, 3548), True, 'import numpy as np\n'), ((3563, 3597), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'bandwidth': 'bandwidth'}), '(bandwidth=bandwidth)\n', (3576, 3597), False, 'from sklearn.neighbors import KernelDensity\n'), ((3686, 3701), 'numpy.exp', 'np.exp', (['log_pdf'], {}), '(log_pdf)\n', (3692, 3701), True, 'import numpy as np\n'), ((4469, 4589), 'pylib.pc.tests.utils._create_random_point_cloud_segmented', 'utils._create_random_point_cloud_segmented', (['batch_size', '(batch_size * num_points)', 'dimension'], {'equal_sized_batches': '(True)'}), '(batch_size, batch_size *\n num_points, dimension, equal_sized_batches=True)\n', (4511, 4589), False, 'from pylib.pc.tests import utils\n'), ((4617, 4681), 'numpy.full', 'np.full', (['(batch_size * num_samples, dimension)', '(0.0)'], {'dtype': 'float'}), '((batch_size * num_samples, dimension), 0.0, dtype=float)\n', (4624, 4681), True, 'import numpy as np\n'), ((1553, 1584), 'numpy.repeat', 'np.repeat', (['cell_size', 'dimension'], {}), '(cell_size, dimension)\n', (1562, 1584), True, 'import numpy as np\n'), ((1614, 1645), 'numpy.repeat', 'np.repeat', (['bandwidth', 'dimension'], {}), '(bandwidth, dimension)\n', (1623, 1645), True, 'import numpy as np\n'), ((1967, 2032), 'numpy.random.choice', 'np.random.choice', (['num_points', 'num_samples_per_batch'], {'replace': '(True)'}), '(num_points, num_samples_per_batch, replace=True)\n', (1983, 2032), True, 'import numpy as np\n'), ((2229, 2253), 'numpy.arange', 'np.arange', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (2238, 2253), True, 'import numpy as np\n'), ((3745, 3784), 'numpy.concatenate', 'np.concatenate', (['(pdf_real, pdf)'], {'axis': '(0)'}), '((pdf_real, pdf), axis=0)\n', (3759, 3784), True, 'import numpy as np\n'), ((3890, 3910), 'numpy.asarray', 'np.asarray', (['pdf_real'], {}), '(pdf_real)\n', (3900, 3910), True, 'import numpy as np\n'), ((1084, 1094), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1091, 1094), True, 'import numpy as np\n'), ((1184, 1194), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1191, 1194), True, 'import numpy as np\n'), ((1253, 1263), 'numpy.sqrt', 'np.sqrt', (['(4)'], {}), '(4)\n', (1260, 1263), True, 'import numpy as np\n'), ((4357, 4385), 'numpy.repeat', 'np.repeat', (['radius', 'dimension'], {}), '(radius, dimension)\n', (4366, 4385), True, 'import numpy as np\n'), ((4415, 4443), 'numpy.repeat', 'np.repeat', (['radius', 'dimension'], {}), '(radius, dimension)\n', (4424, 4443), True, 'import numpy as np\n'), ((4733, 4788), 'numpy.random.choice', 'np.random.choice', (['num_points', 'num_samples'], {'replace': '(True)'}), '(num_points, num_samples, replace=True)\n', (4749, 4788), True, 'import numpy as np\n'), ((4929, 4953), 'numpy.arange', 'np.arange', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (4938, 4953), True, 'import numpy as np\n'), ((5020, 5064), 'pylib.pc.PointCloud', 'PointCloud', (['points_in', 'batch_ids', 'batch_size'], {}), '(points_in, batch_ids, batch_size)\n', (5030, 5064), False, 'from pylib.pc import PointCloud\n'), ((5078, 5107), 'pylib.pc.Grid', 'Grid', (['point_cloud', 'cell_sizes'], {}), '(point_cloud, cell_sizes)\n', (5082, 5107), False, 'from pylib.pc import Grid\n'), ((5137, 5187), 'pylib.pc.PointCloud', 'PointCloud', (['samples', 'samples_batch_ids', 'batch_size'], {}), '(samples, samples_batch_ids, batch_size)\n', (5147, 5187), False, 'from pylib.pc import PointCloud\n'), ((5209, 5260), 'pylib.pc.Neighborhood', 'Neighborhood', (['grid', 'cell_sizes', 'point_cloud_samples'], {}), '(grid, cell_sizes, point_cloud_samples)\n', (5221, 5260), False, 'from pylib.pc import Neighborhood\n'), ((5410, 5462), 'tensorflow.unique_with_counts', 'tf.unique_with_counts', (['neighborhood._neighbors[:, 1]'], {}), '(neighborhood._neighbors[:, 1])\n', (5431, 5462), True, 'import tensorflow as tf\n'), ((5490, 5511), 'tensorflow.reduce_max', 'tf.reduce_max', (['counts'], {}), '(counts)\n', (5503, 5511), True, 'import tensorflow as tf\n'), ((5632, 5650), 'numpy.float32', 'np.float32', (['points'], {}), '(points)\n', (5642, 5650), True, 'import numpy as np\n'), ((3008, 3030), 'numpy.array', 'np.array', (['accum_points'], {}), '(accum_points)\n', (3016, 3030), True, 'import numpy as np\n'), ((3051, 3085), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'bandwidth': 'bandwidth'}), '(bandwidth=bandwidth)\n', (3064, 3085), False, 'from sklearn.neighbors import KernelDensity\n'), ((3192, 3207), 'numpy.exp', 'np.exp', (['log_pdf'], {}), '(log_pdf)\n', (3198, 3207), True, 'import numpy as np\n'), ((3263, 3302), 'numpy.concatenate', 'np.concatenate', (['(pdf_real, pdf)'], {'axis': '(0)'}), '((pdf_real, pdf), axis=0)\n', (3277, 3302), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from ..registry import ROIPOOLING
@ROIPOOLING.register_module
class RoIPooling(nn.Module):
def __init__(self,
pool_plane,
inter_channels,
outchannels,
crop_size=7,
img_size=(224, 224),
num_lms=8,
roi_size=2):
super(RoIPooling, self).__init__()
self.maxpool = nn.MaxPool2d(pool_plane)
self.linear = nn.Sequential(
nn.Linear(num_lms * inter_channels, outchannels), nn.ReLU(True),
nn.Dropout())
self.inter_channels = inter_channels
self.outchannels = outchannels
self.num_lms = num_lms
self.crop_size = crop_size
assert img_size[0] == img_size[
1], 'img width should equal to img height'
self.img_size = img_size[0]
self.roi_size = roi_size
self.a = self.roi_size / float(self.crop_size)
self.b = self.roi_size / float(self.crop_size)
def forward(self, features, landmarks):
"""batch-wise RoI pooling.
Args:
features(tensor): the feature maps to be pooled.
landmarks(tensor): crop the region of interest based on the
landmarks(bs, self.num_lms).
"""
batch_size = features.size(0)
# transfer landmark coordinates from original image to feature map
landmarks = landmarks / self.img_size * self.crop_size
landmarks = landmarks.view(batch_size, self.num_lms, 2)
ab = [np.array([[self.a, 0], [0, self.b]]) for _ in range(batch_size)]
ab = np.stack(ab, axis=0)
ab = torch.from_numpy(ab).float().cuda()
size = torch.Size(
(batch_size, features.size(1), self.roi_size, self.roi_size))
pooled = []
for l in range(self.num_lms):
tx = -1 + 2 * landmarks[:, l, 0] / float(self.crop_size)
ty = -1 + 2 * landmarks[:, l, 1] / float(self.crop_size)
t_xy = torch.stack((tx, ty)).view(batch_size, 2, 1)
theta = torch.cat((ab, t_xy), 2)
flowfield = nn.functional.affine_grid(theta, size)
one_pooled = nn.functional.grid_sample(
features,
flowfield.to(torch.float32),
mode='bilinear',
padding_mode='border')
one_pooled = self.maxpool(one_pooled).view(batch_size,
self.inter_channels)
pooled.append(one_pooled)
pooled = torch.stack(pooled, dim=1).view(batch_size, -1)
pooled = self.linear(pooled)
return pooled
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.init.constant_",
"torch.nn.functional.affine_grid",
"torch.stack",
"torch.nn.init.kaiming_normal_",
"torch.from_numpy",
"numpy.stack",
"numpy.array",
"torch.nn.MaxPool2d",
"torch.nn.init.normal_",
"torch.nn.Linear",
"torch.cat"
] | [((489, 513), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_plane'], {}), '(pool_plane)\n', (501, 513), True, 'import torch.nn as nn\n'), ((1699, 1719), 'numpy.stack', 'np.stack', (['ab'], {'axis': '(0)'}), '(ab, axis=0)\n', (1707, 1719), True, 'import numpy as np\n'), ((563, 611), 'torch.nn.Linear', 'nn.Linear', (['(num_lms * inter_channels)', 'outchannels'], {}), '(num_lms * inter_channels, outchannels)\n', (572, 611), True, 'import torch.nn as nn\n'), ((613, 626), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (620, 626), True, 'import torch.nn as nn\n'), ((640, 652), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (650, 652), True, 'import torch.nn as nn\n'), ((1621, 1657), 'numpy.array', 'np.array', (['[[self.a, 0], [0, self.b]]'], {}), '([[self.a, 0], [0, self.b]])\n', (1629, 1657), True, 'import numpy as np\n'), ((2151, 2175), 'torch.cat', 'torch.cat', (['(ab, t_xy)', '(2)'], {}), '((ab, t_xy), 2)\n', (2160, 2175), False, 'import torch\n'), ((2201, 2239), 'torch.nn.functional.affine_grid', 'nn.functional.affine_grid', (['theta', 'size'], {}), '(theta, size)\n', (2226, 2239), True, 'import torch.nn as nn\n'), ((2634, 2660), 'torch.stack', 'torch.stack', (['pooled'], {'dim': '(1)'}), '(pooled, dim=1)\n', (2645, 2660), False, 'import torch\n'), ((2860, 2930), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (2883, 2930), True, 'import torch.nn as nn\n'), ((2086, 2107), 'torch.stack', 'torch.stack', (['(tx, ty)'], {}), '((tx, ty))\n', (2097, 2107), False, 'import torch\n'), ((3011, 3039), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (3028, 3039), True, 'import torch.nn as nn\n'), ((3104, 3134), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (3121, 3134), True, 'import torch.nn as nn\n'), ((3151, 3179), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (3168, 3179), True, 'import torch.nn as nn\n'), ((1733, 1753), 'torch.from_numpy', 'torch.from_numpy', (['ab'], {}), '(ab)\n', (1749, 1753), False, 'import torch\n'), ((3239, 3273), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight', '(0)', '(0.01)'], {}), '(m.weight, 0, 0.01)\n', (3254, 3273), True, 'import torch.nn as nn\n'), ((3290, 3318), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (3307, 3318), True, 'import torch.nn as nn\n')] |
"""
Fake data generator.
"""
import datetime
import os
from typing import Dict
import collections
import numpy as np
import pandas as pd
# Generic type definitions.
ndist_params = collections.namedtuple('ndist_params', ('mu', 'sigma', 'derives_from', 'decimals'))
#
# Generator settings
#
# Base paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(BASE_DIR, 'data')
# Input files.
AD_GROUP_NAMES_FILE = os.path.join(DATA_DIR, 'gen_ad_groups.csv')
WEEKLY_PERF_FILE = os.path.join(DATA_DIR, 'gen_weekly_perf.csv')
WEEKDAY_PERF_FILE = os.path.join(DATA_DIR, 'gen_weekday_perf.csv')
# Settings for the random generator.
METRICS_RAND_SETTINGS: Dict[str, ndist_params] = {
'Impressions': ndist_params(mu=200, sigma=40, derives_from=None, decimals=0),
'Clicks': ndist_params(mu=0.1, sigma=0.01, derives_from='Impressions', decimals=0),
'Cost': ndist_params(mu=5, sigma=1, derives_from='Clicks', decimals=2),
'Conversions': ndist_params(mu=0.1, sigma=0.02, derives_from='Clicks', decimals=0),
'ConversionsValue': ndist_params(mu=1500, sigma=500, derives_from='Conversions', decimals=2),
}
HIGH_QUALITY_SCORE_SETTINGS = ndist_params(mu=7, sigma=2, derives_from=None, decimals=0)
LOW_QUALITY_SCORE_SETTINGS = ndist_params(mu=4, sigma=2, derives_from=None, decimals=0)
KEYWORD_IMPRESSIONS_SETTINGS = ndist_params(mu=500, sigma=300, derives_from=None, decimals=0)
# Simulated days without credit.
DAYS_WITHOUT_CREDIT = {
datetime.datetime(2018, 3, 17),
datetime.datetime(2018, 3, 18),
}
# Output files.
AD_GROUP_DATA_FILE = os.path.join(DATA_DIR, 'data_ad_group_performance.xlsx')
QUALITY_SCORE_DATA_FILE = os.path.join(DATA_DIR, 'data_keywords_quality_score.xlsx')
def load_weekday_perf(filename) -> pd.DataFrame:
"""
Loads the data file with source week days.
:param filename: File path.
:return: Loaded DataFrame.
"""
return pd.read_csv(filename, header=0)
def load_weekly_perf(filename) -> pd.DataFrame:
"""
Loads the data file with source weekly performance.
:param filename: File path
:return: Loaded DataFrame.
"""
weekly_perf = pd.read_csv(filename, header=0)
weekly_perf['iso_week'] = pd.to_datetime(weekly_perf['iso_week'], format='%YW%W-%w')
return weekly_perf
def load_ad_groups(filename) -> pd.DataFrame:
"""
Loads the data file with ad groups.
:param filename: File path.
:return: Loaded DataFrame.
"""
return pd.read_csv(filename, header=0)
def generate_ad_group_performance(ad_groups: pd.DataFrame, weekly_perf: pd.DataFrame, weekday_perf: pd.DataFrame) \
-> pd.DataFrame:
"""
Generates a data set with ad group daily performance.
:param ad_groups: Ad groups.
:param weekly_perf: Performance for each week.
:param weekday_perf: Performance for each week day.
:return: Generated DataFrame.
"""
# Join the tables.
result: pd.DataFrame = pd.merge(ad_groups, weekly_perf, on='key', how='inner')
result: pd.DataFrame = pd.merge(result, weekday_perf, on='key', how='inner')
result.drop(columns=['key'], inplace=True)
# Convert week date and day offset to concrete days.
result['days_delta'] = pd.to_timedelta(result['iso_weekday'], unit='D')
result['Date'] = result['iso_week'] + result['days_delta']
days_without_credit_filter = result['Date'].isin(DAYS_WITHOUT_CREDIT)
for column_name, rand_params in METRICS_RAND_SETTINGS.items():
random_seq = generate_metric_column(column_name, rand_params, result)
random_seq[days_without_credit_filter] = 0
result[column_name] = random_seq
fields = ['CampaignId', 'CampaignName', 'AdGroupId', 'AdGroupName', 'Date'] + \
[metric for metric in METRICS_RAND_SETTINGS.keys()]
return result[fields]
def generate_metric_column(column_name, rand_params, result):
"""
Generates a series with values for the specified metrics.
If the result set contains any of the following columns, they will be used as coefficients for the random value:
* f'weekly_perf_{column_name}'
* f'weekday_perf_{column_name}'
* f'ad_group_perf_{column_name}'
:param column_name: Name of the column.
:param rand_params: Parameters of the random distribution.
:param result: Result set.
:return: Generated series.
"""
random_seq = np.random.normal(rand_params.mu, rand_params.sigma, size=len(result))
random_seq[random_seq < 0] = 0
if rand_params.derives_from:
random_seq *= result[rand_params.derives_from]
if f'weekly_perf_{column_name}' in result:
random_seq *= result[f'weekly_perf_{column_name}']
if f'weekday_perf_{column_name}' in result:
random_seq *= result[f'weekday_perf_{column_name}']
if f'ad_group_perf_{column_name}' in result:
random_seq *= result[f'ad_group_perf_{column_name}']
if rand_params.decimals == 0:
random_seq = random_seq.astype(np.int64)
else:
random_seq = random_seq.round(rand_params.decimals)
return random_seq
def generate_quality_score(rand_params: ndist_params, size):
"""
Generates a sequence of Quality Score values using normal distribution of specified parameters.
:param rand_params: Parameters of the random distribution.
:param size: Number of rows.
:return: Generated series.
"""
random_seq = np.random.normal(rand_params.mu, rand_params.sigma, size=size)
random_seq[random_seq > 10] = 10
random_seq[random_seq < 1] = 1
return random_seq.astype(np.int64)
def generate_keywords_quality_score(ad_groups: pd.DataFrame) -> pd.DataFrame:
"""
Generates a list of keywords with Quality Score for each Ad Group.
:param ad_groups: List of all Ad Groups.
:return: Generated DataFrame.
"""
keywords = pd.DataFrame({
'key': 1,
'kw_base': 'Keyword #',
'kw_num': np.arange(1, 200),
})
keywords['Keyword'] = keywords['kw_base'] + keywords['kw_num'].astype(str)
keywords.drop(columns=['kw_base', 'kw_num'], inplace=True)
result: pd.DataFrame = pd.merge(ad_groups, keywords, on='key', how='inner')
result.drop(columns=['key'], inplace=True)
high_qs = generate_quality_score(HIGH_QUALITY_SCORE_SETTINGS, len(result))
low_qs = generate_quality_score(LOW_QUALITY_SCORE_SETTINGS, len(result))
selection_cond = ((result['AdGroupId'] % 2) == 0)
result['QualityScore'] = np.where(selection_cond, high_qs, low_qs)
result['Impressions'] = generate_metric_column('Impressions', KEYWORD_IMPRESSIONS_SETTINGS, result)
fields = ['CampaignId', 'CampaignName', 'AdGroupId', 'AdGroupName', 'Keyword', 'Impressions', 'QualityScore']
return result[fields]
def main():
"""
Generates all required data sets.
"""
# Load source data for generator.
ad_groups = load_ad_groups(AD_GROUP_NAMES_FILE)
weekly_perf = load_weekly_perf(WEEKLY_PERF_FILE)
weekday_perf = load_weekday_perf(WEEKDAY_PERF_FILE)
# Add joining key which will be used to produce the cartesian product:
ad_groups['key'] = 1
weekly_perf['key'] = 1
weekday_perf['key'] = 1
# Generate Ad Group performance.
ad_group_performance = generate_ad_group_performance(ad_groups, weekly_perf, weekday_perf)
ad_group_performance.to_excel(AD_GROUP_DATA_FILE, sheet_name='data', index=False)
# Generate Keywords with Quality Score.
quality_score = generate_keywords_quality_score(ad_groups)
quality_score.to_excel(QUALITY_SCORE_DATA_FILE, sheet_name='quality_score', index=False)
if __name__ == '__main__':
main()
| [
"datetime.datetime",
"numpy.random.normal",
"collections.namedtuple",
"pandas.to_timedelta",
"pandas.read_csv",
"numpy.arange",
"numpy.where",
"pandas.merge",
"os.path.join",
"os.path.abspath",
"pandas.to_datetime"
] | [((184, 271), 'collections.namedtuple', 'collections.namedtuple', (['"""ndist_params"""', "('mu', 'sigma', 'derives_from', 'decimals')"], {}), "('ndist_params', ('mu', 'sigma', 'derives_from',\n 'decimals'))\n", (206, 271), False, 'import collections\n'), ((391, 421), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""data"""'], {}), "(BASE_DIR, 'data')\n", (403, 421), False, 'import os\n'), ((460, 503), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""gen_ad_groups.csv"""'], {}), "(DATA_DIR, 'gen_ad_groups.csv')\n", (472, 503), False, 'import os\n'), ((523, 568), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""gen_weekly_perf.csv"""'], {}), "(DATA_DIR, 'gen_weekly_perf.csv')\n", (535, 568), False, 'import os\n'), ((589, 635), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""gen_weekday_perf.csv"""'], {}), "(DATA_DIR, 'gen_weekday_perf.csv')\n", (601, 635), False, 'import os\n'), ((1600, 1656), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""data_ad_group_performance.xlsx"""'], {}), "(DATA_DIR, 'data_ad_group_performance.xlsx')\n", (1612, 1656), False, 'import os\n'), ((1683, 1741), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""data_keywords_quality_score.xlsx"""'], {}), "(DATA_DIR, 'data_keywords_quality_score.xlsx')\n", (1695, 1741), False, 'import os\n'), ((1492, 1522), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(3)', '(17)'], {}), '(2018, 3, 17)\n', (1509, 1522), False, 'import datetime\n'), ((1528, 1558), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(3)', '(18)'], {}), '(2018, 3, 18)\n', (1545, 1558), False, 'import datetime\n'), ((1931, 1962), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': '(0)'}), '(filename, header=0)\n', (1942, 1962), True, 'import pandas as pd\n'), ((2166, 2197), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': '(0)'}), '(filename, header=0)\n', (2177, 2197), True, 'import pandas as pd\n'), ((2228, 2286), 'pandas.to_datetime', 'pd.to_datetime', (["weekly_perf['iso_week']"], {'format': '"""%YW%W-%w"""'}), "(weekly_perf['iso_week'], format='%YW%W-%w')\n", (2242, 2286), True, 'import pandas as pd\n'), ((2489, 2520), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': '(0)'}), '(filename, header=0)\n', (2500, 2520), True, 'import pandas as pd\n'), ((2964, 3019), 'pandas.merge', 'pd.merge', (['ad_groups', 'weekly_perf'], {'on': '"""key"""', 'how': '"""inner"""'}), "(ad_groups, weekly_perf, on='key', how='inner')\n", (2972, 3019), True, 'import pandas as pd\n'), ((3047, 3100), 'pandas.merge', 'pd.merge', (['result', 'weekday_perf'], {'on': '"""key"""', 'how': '"""inner"""'}), "(result, weekday_perf, on='key', how='inner')\n", (3055, 3100), True, 'import pandas as pd\n'), ((3233, 3281), 'pandas.to_timedelta', 'pd.to_timedelta', (["result['iso_weekday']"], {'unit': '"""D"""'}), "(result['iso_weekday'], unit='D')\n", (3248, 3281), True, 'import pandas as pd\n'), ((5410, 5472), 'numpy.random.normal', 'np.random.normal', (['rand_params.mu', 'rand_params.sigma'], {'size': 'size'}), '(rand_params.mu, rand_params.sigma, size=size)\n', (5426, 5472), True, 'import numpy as np\n'), ((6125, 6177), 'pandas.merge', 'pd.merge', (['ad_groups', 'keywords'], {'on': '"""key"""', 'how': '"""inner"""'}), "(ad_groups, keywords, on='key', how='inner')\n", (6133, 6177), True, 'import pandas as pd\n'), ((6466, 6507), 'numpy.where', 'np.where', (['selection_cond', 'high_qs', 'low_qs'], {}), '(selection_cond, high_qs, low_qs)\n', (6474, 6507), True, 'import numpy as np\n'), ((352, 377), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (367, 377), False, 'import os\n'), ((5929, 5946), 'numpy.arange', 'np.arange', (['(1)', '(200)'], {}), '(1, 200)\n', (5938, 5946), True, 'import numpy as np\n')] |
from jina import Executor, Document, DocumentArray, requests
import numpy as np
from typing import Tuple
import os
top_k = 10
class DiskIndexer(Executor):
"""Simple indexer class """
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._docs = DocumentArray()
self.top_k = top_k
if os.path.exists(self.save_path):
self._docs = DocumentArray.load(self.save_path)
else:
self._docs = DocumentArray()
@property
def save_path(self):
if not os.path.exists(self.workspace):
os.makedirs(self.workspace)
return os.path.join(self.workspace, "apps.json")
def close(self):
self._docs.save(self.save_path)
@requests(on="/index")
def index(self, docs: "DocumentArray", **kwargs):
self._docs.extend(docs)
return docs
@requests(on="/search")
def search(self, docs: "DocumentArray", **kwargs):
a = np.stack(docs.get_attributes("embedding"))
b = np.stack(self._docs.get_attributes("embedding"))
q_emb = _ext_A(_norm(a))
d_emb = _ext_B(_norm(b))
dists = _cosine(q_emb, d_emb)
idx, dist = self._get_sorted_top_k(dists, self.top_k)
for _q, _ids, _dists in zip(docs, idx, dist):
for _id, _dist in zip(_ids, _dists):
d = Document(self._docs[int(_id)], copy=True)
# d.score.value = 1 - _dist
_q.matches.append(d)
return docs
@staticmethod
def _get_sorted_top_k(
dist: "np.array", top_k: int
) -> Tuple["np.ndarray", "np.ndarray"]:
if top_k >= dist.shape[1]:
idx = dist.argsort(axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx, axis=1)
else:
idx_ps = dist.argpartition(kth=top_k, axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx_ps, axis=1)
idx_fs = dist.argsort(axis=1)
idx = np.take_along_axis(idx_ps, idx_fs, axis=1)
dist = np.take_along_axis(dist, idx_fs, axis=1)
return idx, dist
def _get_ones(x, y):
return np.ones((x, y))
def _ext_A(A):
nA, dim = A.shape
A_ext = _get_ones(nA, dim * 3)
A_ext[:, dim : 2 * dim] = A
A_ext[:, 2 * dim :] = A ** 2
return A_ext
def _ext_B(B):
nB, dim = B.shape
B_ext = _get_ones(dim * 3, nB)
B_ext[:dim] = (B ** 2).T
B_ext[dim : 2 * dim] = -2.0 * B.T
del B
return B_ext
# def _euclidean(A_ext, B_ext):
# sqdist = A_ext.dot(B_ext).clip(min=0)
# return np.sqrt(sqdist)
def _norm(A):
return A / np.linalg.norm(A, ord=2, axis=1, keepdims=True)
def _cosine(A_norm_ext, B_norm_ext):
return A_norm_ext.dot(B_norm_ext).clip(min=0) / 2
| [
"os.path.exists",
"numpy.ones",
"os.makedirs",
"os.path.join",
"jina.requests",
"jina.DocumentArray.load",
"numpy.linalg.norm",
"numpy.take_along_axis",
"jina.DocumentArray"
] | [((733, 754), 'jina.requests', 'requests', ([], {'on': '"""/index"""'}), "(on='/index')\n", (741, 754), False, 'from jina import Executor, Document, DocumentArray, requests\n'), ((867, 889), 'jina.requests', 'requests', ([], {'on': '"""/search"""'}), "(on='/search')\n", (875, 889), False, 'from jina import Executor, Document, DocumentArray, requests\n'), ((2128, 2143), 'numpy.ones', 'np.ones', (['(x, y)'], {}), '((x, y))\n', (2135, 2143), True, 'import numpy as np\n'), ((280, 295), 'jina.DocumentArray', 'DocumentArray', ([], {}), '()\n', (293, 295), False, 'from jina import Executor, Document, DocumentArray, requests\n'), ((334, 364), 'os.path.exists', 'os.path.exists', (['self.save_path'], {}), '(self.save_path)\n', (348, 364), False, 'import os\n'), ((623, 664), 'os.path.join', 'os.path.join', (['self.workspace', '"""apps.json"""'], {}), "(self.workspace, 'apps.json')\n", (635, 664), False, 'import os\n'), ((2606, 2653), 'numpy.linalg.norm', 'np.linalg.norm', (['A'], {'ord': '(2)', 'axis': '(1)', 'keepdims': '(True)'}), '(A, ord=2, axis=1, keepdims=True)\n', (2620, 2653), True, 'import numpy as np\n'), ((391, 425), 'jina.DocumentArray.load', 'DocumentArray.load', (['self.save_path'], {}), '(self.save_path)\n', (409, 425), False, 'from jina import Executor, Document, DocumentArray, requests\n'), ((465, 480), 'jina.DocumentArray', 'DocumentArray', ([], {}), '()\n', (478, 480), False, 'from jina import Executor, Document, DocumentArray, requests\n'), ((536, 566), 'os.path.exists', 'os.path.exists', (['self.workspace'], {}), '(self.workspace)\n', (550, 566), False, 'import os\n'), ((580, 607), 'os.makedirs', 'os.makedirs', (['self.workspace'], {}), '(self.workspace)\n', (591, 607), False, 'import os\n'), ((1724, 1761), 'numpy.take_along_axis', 'np.take_along_axis', (['dist', 'idx'], {'axis': '(1)'}), '(dist, idx, axis=1)\n', (1742, 1761), True, 'import numpy as np\n'), ((1864, 1904), 'numpy.take_along_axis', 'np.take_along_axis', (['dist', 'idx_ps'], {'axis': '(1)'}), '(dist, idx_ps, axis=1)\n', (1882, 1904), True, 'import numpy as np\n'), ((1965, 2007), 'numpy.take_along_axis', 'np.take_along_axis', (['idx_ps', 'idx_fs'], {'axis': '(1)'}), '(idx_ps, idx_fs, axis=1)\n', (1983, 2007), True, 'import numpy as np\n'), ((2027, 2067), 'numpy.take_along_axis', 'np.take_along_axis', (['dist', 'idx_fs'], {'axis': '(1)'}), '(dist, idx_fs, axis=1)\n', (2045, 2067), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""experiments.py: experiments python program for different experiment applications"""
__author__ = "<NAME>."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "<NAME>."
__email__ = "<EMAIL>"
__status__ = "Research"
import matplotlib
matplotlib.use("Agg")
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
plt.style.use("config/alt.mplstyle")
import sys
sys.path.append("models/")
sys.path.append("models/experiments/")
import os
import numpy as np
import argparse
import pandas as pd
import datetime as dt
from netCDF4 import Dataset, num2date
from dateutil import parser as dparser
import glob
import xarray
import statsmodels.api as sm
from statsmodels.formula.api import ols
from constant import *
import utils
from absorption import *
import case0
from model import Model
fontT = {"family": "serif", "color": "k", "weight": "normal", "size": 8}
font = {"family": "serif", "color": "black", "weight": "normal", "size": 10}
from matplotlib import font_manager
ticks_font = font_manager.FontProperties(family="serif", size=10, weight="normal")
matplotlib.rcParams["xtick.color"] = "k"
matplotlib.rcParams["ytick.color"] = "k"
matplotlib.rcParams["xtick.labelsize"] = 10
matplotlib.rcParams["ytick.labelsize"] = 10
matplotlib.rcParams["mathtext.default"] = "default"
def coloring_axes(ax, atype="left", col="red", fmtr="%H", ivl=60):
ax.spines[atype].set_color(col)
ax.tick_params(axis="y", which="both", colors=col)
ax.yaxis.label.set_color(col)
fmt = matplotlib.dates.DateFormatter(fmtr)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=ivl))
return ax
def coloring_twaxes(ax, atype="left", col="red", twcol="k", fmtr="%H", ivl=60):
ax.spines[atype].set_color(col)
ax.tick_params(axis="y", which="both", colors=twcol)
ax.yaxis.label.set_color(twcol)
fmt = matplotlib.dates.DateFormatter(fmtr)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=ivl))
return ax
def _case0_(args):
""" Impact of the I0 and frequency """
chi = np.deg2rad(np.linspace(0,90,91))
f0 = 10**np.linspace(-6,-1,31) * 1e3
fo = 10**np.linspace(np.log10(.1), np.log10(200), 100)
ev, start, end = dt.datetime(2015,3,11,16,22), dt.datetime(2015,3,11,15,30), dt.datetime(2015,3,11,17,30)
l, r = 52, 53
_f0_ = case0._Case0_(start, end)[40:53]
fname = "data/sim/case0.nc.gz"
os.system("gzip -d "+fname)
_nc = Dataset(fname.replace(".gz", ""))
os.system("gzip "+fname.replace(".gz", ""))
pg = utils.PointGrid("ott", ev, start, end, 30, v=False)
_lo_,_qo_ = [],[]
b = pg.igrf["B"][l:r,:]
pg._col_.nu_FT = pg._col_.nu_FT[l:r,:]
pg._col_.nu_av_CC = pg._col_.nu_av_CC[l:r,:]
pg._col_.nu_av_MB = pg._col_.nu_av_MB[l:r,:]
pg._col_.nu_SN["total"] = pg._col_.nu_SN["total"][l:r,:]
ne = _nc.variables["ne"][l:r,:]
for _f_ in fo:
print(" Frequency - ", _f_, " MHz")
u = Absorption(b, pg._col_, ne, fo=_f_*1e6)
_lo_.append([utils.int_absorption(u.AH["SN"]["O"], pg.alts, extpoint=68, llim = 60, ulim = 110),
utils.int_absorption(u.AH["AV_CC"]["O"], pg.alts, extpoint=64, llim = 60, ulim = 110),
utils.int_absorption(u.AH["AV_MB"]["O"], pg.alts, extpoint=64, llim = 60, ulim = 110),
utils.int_absorption(u.SW["FT"]["O"], pg.alts, extpoint=64, llim = 60, ulim = 110)])
continue
_lo_ = np.array(_lo_)
ne = _nc.variables["ne"][40:53,:]
nfo = np.linspace(1,70,50)
for i, _ in enumerate(_f0_):
_k_ = []
for _f_ in nfo:
print(" Frequency, I - ", _f_, " MHz,", _f0_[i], "W/m2")
u = Absorption(b, pg._col_, ne[i:i+1,:], fo=_f_*1e6)
_k_.append([utils.int_absorption(u.AH["SN"]["O"], pg.alts, extpoint=68, llim = 60, ulim = 110),
utils.int_absorption(u.AH["AV_CC"]["O"], pg.alts, extpoint=64, llim = 60, ulim = 110),
utils.int_absorption(u.AH["AV_MB"]["O"], pg.alts, extpoint=64, llim = 60, ulim = 110),
utils.int_absorption(u.SW["FT"]["O"], pg.alts, extpoint=64, llim = 60, ulim = 110)])
_k_ = np.array(_k_)[:,:,0]
_qo_.append([10**utils.extrap1d(_k_[:,0], np.log10(nfo))([1])[0],
10**utils.extrap1d(_k_[:,1], np.log10(nfo))([1])[0],
10**utils.extrap1d(_k_[:,2], np.log10(nfo))([1])[0],
10**utils.extrap1d(_k_[:,3], np.log10(nfo))([1])[0]])
_qo_ = np.array(_qo_)
haf0 = 93.5 * (f0**0.25)
l0 = 4.37e3 * (.22**0.5) / (fo)**2
haf1 = 10*np.log10(f0*1e-3) + 65
l1 = ((10*np.log10(2.2e-4) + 65)/fo)**1.5
matplotlib.rcParams["xtick.labelsize"] = 10
matplotlib.rcParams["ytick.labelsize"] = 10
matplotlib.rcParams["mathtext.default"] = "default"
font = {"family": "serif", "color": "black", "weight": "normal", "size": 10}
fonttext = {"family": "serif", "color": "blue", "weight": "normal", "size": 10}
#fig, axes = plt.subplots(figsize=(6, 6), nrows=2, ncols=2, dpi=150)
#fig.subplots_adjust(hspace=.3, wspace=.1)
fig, ax = plt.subplots(figsize=(3, 3), nrows=1, ncols=1, dpi=100)
#ax = axes[0,0]
#ax.loglog(f0*1e-3, haf0, "r", linewidth=1.2, label="Sato (1975)")
#ax.loglog(f0*1e-3, haf1, "b", linewidth=1.2, label="DARP2")
#ax.set_ylabel("HAF, MHz", fontdict=font)
#ax.set_xlim(1e-6,1e-1)
#ax.legend(loc=2, scatterpoints=3, fontsize=8, frameon=True)
#ax.text(0.2, 1.05, r"(a) $\chi=0^o$", horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, fontdict=fonttext)
#ax = axes[0,1]
#ax.set_yticks([])
#ax = ax.twinx()
#ax.loglog(fo, l0, "r", linewidth=1.2, label="Sato (1975)")
ax.loglog(fo, l1, "darkred", ls="--", linewidth=0.8, label="DARP")
ax.set_xlim(1,200)
ax.set_ylim(1,1e5)
ax.set_ylabel("Absorption, dB", fontdict=font)
#ax.legend(loc=1, scatterpoints=3, fontsize=8, frameon=True)
ax.text(0.5, 1.05, r"$\chi=0^o$, $I_{\infty}=2.2\times 10^{-4}$ $Wm^{-2}$", horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, fontdict=fonttext)
#ax = axes[1,0]
#ax.loglog(_f0_, _qo_[:,0], "ro", markersize=1.2, label=r"$\beta_{ah}(\nu_{sn})$")
#ax.loglog(_f0_, _qo_[:,1], "go", markersize=0.8, label=r"$\beta_{ah}(\nu_{av}^{cc})$")
#ax.loglog(_f0_, _qo_[:,2], "bo", markersize=1.2, label=r"$\beta_{ah}(\nu_{av}^{mb})$")
#ax.loglog(_f0_, _qo_[:,3], "ko", markersize=1.2, label=r"$\beta_{sw}(\nu_{me})$")
#ax.set_ylabel("HAF, MHz", fontdict=font)
#ax.set_xlabel(r"SXR, $Wm^{-2}$", fontdict=font)
#ax.set_xlim(1e-6,1e-1)
#ax.legend(loc=2, scatterpoints=3, fontsize=8, frameon=True)
#ax.text(0.2, 1.05, r"(b) $\chi=0^o$", horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, fontdict=fonttext)
#ax = axes[1,1]
ax.set_xlabel("Frequency, MHz", fontdict=font)
#ax.set_yticks([])
#ax = ax.twinx()
ax.loglog(fo, utils.smooth(_lo_[:,0,0], 11), "r", linewidth=1.2, label=r"$\beta_{ah}(\nu_{sn})$")
ax.loglog(fo, utils.smooth(_lo_[:,1,0], 11), "g", linewidth=0.8, label=r"$\beta_{ah}(\nu_{av}^{cc})$")
ax.loglog(fo, utils.smooth(_lo_[:,2,0], 11), "b", linewidth=1.2, label=r"$\beta_{ah}(\nu_{av}^{mb})$")
ax.loglog(fo, utils.smooth(_lo_[:,3,0], 11), "k", linewidth=1.2, label=r"$\beta_{sw}(\nu_{me})$")
ax.set_ylim(1,1e5)
ax.set_xlim(1,200)
ax.set_ylabel("Absorption, dB", fontdict=font)
ax.legend(loc=1, scatterpoints=3, fontsize=8, frameon=True)
ax.text(0.5, 1.05, r"$\chi=0^o$, $I_{\infty}=2.2\times 10^{-4}$ $Wm^{-2}$", horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, fontdict=fonttext)
fig.savefig("_images_/case0.png", bbox_inches="tight")
return
def _case1_(args):
""" Impact of special event case study """
evs, rs, starts, ends = [dt.datetime(2017,9,5), dt.datetime(2017,9,6), dt.datetime(2017,9,7)],\
["sps","sps","sps"],\
[dt.datetime(2017,9,5,17), dt.datetime(2017,9,6,11), dt.datetime(2017,9,7,13,30)],\
[dt.datetime(2017,9,5,19,30), dt.datetime(2017,9,6,17), dt.datetime(2017,9,7,19)]
for ev, r, start, end in zip(evs, rs, starts, ends):
if args.prog == "bgc":
cmd = "python simulate.py -p bgc -r {r} -ev {ev} -s {s} -e {e} -v -fr {fr}".format(r=r,
ev=ev.strftime("%Y-%m-%dT%H:%M"), s=start.strftime("%Y-%m-%dT%H:%M"),
e=end.strftime("%Y-%m-%dT%H:%M"), fr=5.24)
print(" "+ cmd)
os.system(cmd)
elif args.prog == "flare":
cmd = "python simulate.py -p flare -r {r} -ev {ev} -s {s} -e {e} -v -fr {fr} -rm".format(r=r,
ev=ev.strftime("%Y-%m-%dT%H:%M"), s=start.strftime("%Y-%m-%dT%H:%M"),
e=end.strftime("%Y-%m-%dT%H:%M"), fr=5.24)
print(" "+ cmd)
os.system(cmd)
if args.prog == "plot":
fmt = matplotlib.dates.DateFormatter("%H")
fig, axes = plt.subplots(figsize=(9, 5), nrows=2, ncols=3, dpi=150)
fig.subplots_adjust(hspace=.1, wspace=.3)
i = 0
CC = ["M2.3", "X9.3", "X1.7"]
for ev, start, end in zip(evs, starts, ends):
_X_ = pd.read_csv("config/dat/case1.ev{t}.csv".format(t=i))
_X_["dt"] = [ev + dt.timedelta(hours=h) for h in _X_.dt]
_X_ = _X_.sort_values(by=["dt"])
ax = axes[0,i]
gos = utils.read_goes(ev)
col = "r"
ax = coloring_axes(ax, col="k")
ax.semilogy(gos.date,gos.B_AVG,col,linewidth=0.75, label="SXR (.1-.8 nm)")
ax.semilogy(gos.date,gos.A_AVG,"b",linewidth=0.75, label="HXR (.05-.4 nm)")
if i==2: ax.legend(bbox_to_anchor=(1.4, 0.5), scatterpoints=3, ncol=1, fontsize=8, frameon=True)
ax.set_ylim(1e-8,1e-3)
ax.set_yticks([1e-8, 1e-7, 1e-6, 1e-5, 1e-4,1e-3])
ax.set_xlim(start,end)
if i==0: ax.set_ylabel(r"Solar Flux, $Wm^{-2}$",fontdict=font)
font["color"] = "k"
font["color"] = "darkgreen"
ax.text(0.5,1.05,"%s, %s"%(ev.strftime("%d %b %Y"), CC[i]),horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=font)
font["color"] = "k"
ax = axes[1,i]
ax = coloring_axes(ax, col="k")
if i==0:
ax.set_ylabel("Observations \n HF Absorption, db",fontdict=font)
ax.scatter(_X_[_X_.model=="N"].dt, _X_[_X_.model=="N"].db, s=3., color="gray", alpha=0.8, marker="D", label="Ionosonde")
#ax.legend(loc=1, scatterpoints=3, fontsize=8, frameon=True)
#ax.text(0.35, 1.05, "(a.%d) "%(i+1) + ev.strftime("%Y-%m-%d")+" UT", horizontalalignment="center",
# verticalalignment="center", transform=ax.transAxes, fontdict=fonttext)
fname = "data/sim/{date}/flare.sps.nc.gz".format(date=ev.strftime("%Y.%m.%d.%H.%M"))
os.system("gzip -d " + fname)
nc = Dataset(fname.replace(".gz",""))
os.system("gzip " + fname.replace(".gz",""))
times = num2date(nc.variables["time"][:], nc.variables["time"].units, nc.variables["time"].calendar)
times = np.array([x._to_real_datetime() for x in times]).astype("datetime64[ns]")
times = [dt.datetime.utcfromtimestamp(x.astype(int) * 1e-9) for x in times]
ax.plot(times, 2*nc.variables["drap"][:], "darkred", ls="--", linewidth=0.8, label="DRAP2")
ax.plot(times, 2*utils.int_absorption(nc.variables["abs.ah.sn.o"][:], model["alts"], extpoint=68), "r",
linewidth=1.2, label=r"$\beta_{ah}(\nu_{sn})$")
ax.plot(times, 2*utils.int_absorption(nc.variables["abs.ah.av.cc.o"][:], model["alts"], extpoint=64), "g",
linewidth=0.8, label=r"$\beta_{ah}(\nu_{av}^{cc})$")
ax.plot(times, 2*utils.int_absorption(nc.variables["abs.ah.av.mb.o"][:], model["alts"], extpoint=64), "b",
linewidth=1.2, label=r"$\beta_{ah}(\nu_{av}^{mb})$")
ax.plot(times, 2*utils.int_absorption(nc.variables["abs.sw.ft.o"][:], model["alts"], extpoint=64), "k",
linewidth=0.8, label=r"$\beta_{ah}(\nu_{av}^{cc})$")
ax.set_xlim(start, end)
ax.scatter(_X_[_X_.model=="Y"].dt, _X_[_X_.model=="Y"].db, s=1.2, color="darkred",
alpha=0.8, label="Levine et al. (2019)")
if i==2: ax.legend(bbox_to_anchor=(1.12, 0.9), scatterpoints=3, fontsize=8, frameon=True)
#ax.text(0.5, 1.05, "(b.%d) "%(i+1) + ev.strftime("%Y-%m-%d")+" UT, @6.4 MHz", horizontalalignment="center",
# verticalalignment="center", transform=ax.transAxes, fontdict=fonttext)
i += 1
axes[1,0].set_xlabel("Time (UT)", fontdict=font)
axes[1,1].set_xlabel("Time (UT)", fontdict=font)
axes[1,2].set_xlabel("Time (UT)", fontdict=font)
font["color"] = "k"
fig.autofmt_xdate(rotation=25,ha="center")
fig.savefig("_images_/case1.png", bbox_inches="tight")
return
def _case2_(args):
""" Testing electron temperature dependence """
args.event, args.rio, args.start, args.end = dt.datetime(2011,9,7,22,38), "mcmu",\
dt.datetime(2011,9,7,22,10), dt.datetime(2011,9,7,23,20)
start, end = dt.datetime(2011,9,7,22,30), dt.datetime(2011,9,7,23,0)
TElec = np.linspace(0.75,1.75,101)
if args.prog == "bgc":
cmd = "python simulate.py -p bgc -r {r} -ev {ev} -s {s} -e {e} -v".format(r=args.rio,
ev=args.event.strftime("%Y-%m-%dT%H:%M"), s=args.start.strftime("%Y-%m-%dT%H:%M"),
e=args.end.strftime("%Y-%m-%dT%H:%M"))
print(" "+ cmd)
os.system(cmd)
elif args.prog == "flare":
for t in TElec:
print(" TElec:", t)
Model(args.rio, args.event, args)._exp_("TElec", {"TElec": t})
elif args.prog == "plot":
matplotlib.rcParams["xtick.labelsize"] = 6
matplotlib.rcParams["ytick.labelsize"] = 6
matplotlib.rcParams["mathtext.default"] = "default"
font = {"family": "serif", "color": "black", "weight": "normal", "size": 6}
fonttext = {"family": "serif", "color": "blue", "weight": "normal", "size": 6}
fmt = matplotlib.dates.DateFormatter("%H:%M")
fig, axes = plt.subplots(figsize=(6, 2), nrows=1, ncols=2, dpi=100)
#ax = axes[0]
#import bootstrapped.bootstrap as bs
#import bootstrapped.stats_functions as bs_stats
#from scipy.stats import norm
#x = np.linspace(0.5,2,151)
#loc = bs.bootstrap(x, stat_func=bs_stats.mean).value
#scale = bs.bootstrap(x, stat_func=bs_stats.std).value
#ax.plot(x, norm.pdf(x, loc=loc, scale=scale), "r", lw=0.8, alpha=0.6)
#ax.text(0.5, 1.05, r"(a) Distribution of $T_d$", horizontalalignment="center",
# verticalalignment="center", transform=ax.transAxes, fontdict=fonttext)
files = glob.glob("data/sim/{dn}/flare*TElec*".format(dn=args.event.strftime("%Y.%m.%d.%H.%M")))
ax = axes[0]
ax.xaxis.set_major_formatter(fmt)
files.sort()
X = []
_abs_ = utils.read_riometer(args.event, args.rio)
_abs_ = _abs_[(_abs_.date > start) & (_abs_.date < end-dt.timedelta(minutes=1))]
cmap = matplotlib.cm.get_cmap("Reds")
Mx = np.zeros((int((end-start).total_seconds()/60), len(files)))
for i,f in enumerate(files):
os.system("gzip -d " + f)
nc = Dataset(f.replace(".gz", ""))
os.system("gzip " + f.replace(".gz", ""))
times = num2date(nc.variables["time"][:], nc.variables["time"].units, nc.variables["time"].calendar)
times = np.array([x._to_real_datetime() for x in times]).astype("datetime64[ns]")
times = [dt.datetime.utcfromtimestamp(x.astype(int) * 1e-9) for x in times]
#if np.mod(i,20)==0: ax.plot(times, utils.smooth(utils.int_absorption(nc.variables["abs.ah.sn.o"][:],
# model["alts"], extpoint=68), 5), color=cmap(.2+(i/200)),
# linewidth=0.6, ls="--", label=r"$T_d$=%.2f"%TElec[i])
m = pd.DataFrame()
m["date"] = times
m["hf_abs"] = utils.smooth(utils.int_absorption(nc.variables["abs.ah.sn.o"][:], model["alts"], extpoint=68), 5)
m = m[(m.date >= start) & (m.date < end)]
Mx[:,i] = m.hf_abs.tolist()
e = utils.estimate_error(m, _abs_)
X.append(e)
ax.plot(_abs_.date, _abs_.hf_abs, "ko", alpha=0.4, markersize=0.1, label=r"$\beta_{R}$", lw=.4)
mn, st = 1.2*np.median(Mx, axis=1), 1.98*np.std(Mx, axis=1)
ax.plot(m.date, mn, color="r", linewidth=0.8, ls="--", label=r"$\beta_m$")
ax.fill_between(m.date, mn - st, mn + st, color="r", alpha=0.5, label="95% CI")
X = np.array(X)
ax.set_xlim(start, end)
ax.text(0.5, 1.05, r"(b) %s UT, %s @30 MHz, $T_d=\frac{T^{90}}{T^{90}_{base}}$"%(args.event.strftime("%Y-%m-%d"),
args.rio.upper()), horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes, fontdict=fonttext)
ax.set_ylim(-.1,2.5)
ax.legend(loc=1, scatterpoints=3, fontsize=4, ncol=1, frameon=True)
ax.set_xlabel("Time (UT)", fontdict=font)
ax.set_ylabel("Absorption, dB", fontdict=font)
ax = axes[1]
ax.grid(False, axis="y")
ax.set_xlabel(r"Temperature ratio, $\frac{T^{90}}{T^{90}_{base}}$", fontdict=font)
ax.set_yticklabels([])
ax = ax.twinx()
ax.plot(TElec, X, "ro", markersize=0.3, alpha=.6)
ax.set_xlim(.75,1.75)
ax.axvline(TElec[np.argmin(X)], ls="--", lw=0.4, color="b")
ax.set_ylabel("RMSE", fontdict=font)
ax.text(0.5, 1.05, "(c) Impact of Temperature on RMSE", horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes, fontdict=fonttext)
fonttext["size"] = 4
ax.text(TElec[np.argmin(X)], 0.745, r"$T_d$=%.2f"%TElec[np.argmin(X)], horizontalalignment="center",
verticalalignment="center", fontdict=fonttext, rotation=90)
fig.autofmt_xdate()
fig.savefig("_images_/case2.png", bbox_inches="tight")
return
def _stats_(args):
""" Estimate and plot statistics """
x = pd.read_csv("config/flare.stats.m.csv")
x.dn = [dt.datetime.strptime(t,"%Y.%m.%d.%H.%M") for t in x.dn]
if args.prog == "plot":
matplotlib.rcParams["xtick.labelsize"] = 12
matplotlib.rcParams["ytick.labelsize"] = 12
matplotlib.rcParams["mathtext.default"] = "default"
font = {"family": "serif", "color": "black", "weight": "normal", "size": 12}
fonttext = {"family": "serif", "color": "blue", "weight": "normal", "size": 12}
fig1, axes1 = plt.subplots(figsize=(9, 12), nrows=4, ncols=3, dpi=90, sharey="row", sharex="col")
edist = {}
for j, nm in enumerate(["sn","cc","mb","me"]):
df = []
name = "mRMSE_"+nm
for i, row in x.iterrows():
stn = row["rio"]
f = "data/sim/archive/{dn}/skills.{rio}.nc".format(dn=row["dn"].strftime("%Y.%m.%d.%H.%M"), rio=stn)
d = xarray.open_dataset(f)
d.attrs.update({"acc": 1-(d.attrs[name]/d.attrs["mRMSE_dr"]),
name: (d.attrs[name]), "sza": np.median(d["sza"].values),
"local_time": np.median(d["local_time"].values), "mlt": np.mean(d["mlt"].values)})
df.append(d.attrs)
#print(d.attrs["dPeak"]-d.attrs["mPeak_"+nm])
df = pd.DataFrame.from_records(df)
ux = df[np.abs(df["dPeak"]-d.attrs["mPeak_dr"])<.8]
print(np.corrcoef(ux.dPeak, ux["mPeak_dr"]), len(ux))
df = df[~df.isin([np.nan, np.inf, -np.inf]).any(1)]
model = ols("acc ~ sza+local_time+mlt+mlat", data=df)
#model = ols(name + "~ sza*local_time*mlt*mlat", data=df)
response = model.fit()
anova = sm.stats.anova_lm(response, typ=2)
edist[nm] = df.acc.tolist()
print(anova)
#print(response.summary())
ax = axes1[j, 0]
sza, acc = running_mean(df.sza, df.acc, dx=5)
ax.plot(df.sza, df.acc, "ro", alpha=0.5, markersize=0.75)
#x = _create_x_(df.cossza.tolist(), np.mean(df.lat), np.mean(df.logfmax), np.mean(df["lt"]), lp="cossza")
#o = r.get_prediction(x[["sza","mlt",""]].values)
#m, v = o.predicted_mean, np.sqrt(o.var_pred_mean)
#ax.plot(df.sza, o.predicted_mean, "r-", linewidth=0.75, alpha=0.8)
#ax.fill_between(df.sza, m - 1.98*v, m + 1.98*v, color="r", alpha=0.2)
ax = axes1[j, 1]
ax.plot(df.local_time, df.acc, "ro", alpha=0.5, markersize=0.75)
ax = axes1[j, 2]
ax.plot(df.mlt, df.acc, "ro", alpha=0.5, markersize=0.75)
axes1[0,0].set_ylabel("RMSE", fontdict=font)
axes1[1,0].set_ylabel("RMSE", fontdict=font)
axes1[2,0].set_ylabel("RMSE", fontdict=font)
axes1[3,0].set_ylabel("RMSE", fontdict=font)
axes1[3,0].set_xlabel(r"SZA, $\chi(^o)$", fontdict=font)
axes1[3,1].set_xlabel(r"LT, Hours", fontdict=font)
axes1[3,2].set_xlabel(r"MLT, Hours", fontdict=font)
from scipy import stats
print(stats.ttest_rel(edist["mb"], edist["sn"]))
fig1.savefig("_images_/stats.png", bbox_inches="tight")
else:
xref = pd.read_csv("config/flares.csv", parse_dates=["dn", "start", "end"])
for i, row in x.iterrows():
ref = xref[xref.dn==row["dn"]]
stn = row["rio"]
f = "data/sim/archive/{dn}/flare.{rio}.nc.gz".format(dn=row["dn"].strftime("%Y.%m.%d.%H.%M"), rio=stn)
os.system("gzip -d " + f)
_x_ = Dataset(f.replace(".gz", ""))
os.system("gzip " + f.replace(".gz", ""))
times = num2date(_x_.variables["time"][:], _x_.variables["time"].units, _x_.variables["time"].calendar,
only_use_cftime_datetimes=False)
times = np.array([x._to_real_datetime() for x in times]).astype("datetime64[ns]")
times = [dt.datetime.utcfromtimestamp(x.astype(int) * 1e-9) for x in times]
alts = _x_.variables["alts"][:]
o = {
"sn": utils.int_absorption(_x_.variables["abs.ah.sn.o"][:], alts, extpoint=68),
"cc": utils.int_absorption(_x_.variables["abs.ah.av.cc.o"][:], alts, extpoint=64),
"mb": utils.int_absorption(_x_.variables["abs.ah.av.mb.o"][:], alts, extpoint=64),
"me": utils.int_absorption(_x_.variables["abs.sw.ft.o"][:], alts, extpoint=64),
"dr": _x_.variables["drap"][:],
}
pf = utils.Performance(stn=stn, ev=row["dn"], times=times, model=o, start=ref["start"].tolist()[0],
end=ref["end"].tolist()[0], bar=row["bar"], alt=row["alt"])
fname = f.replace("flare","skills")
pf._skill_()._params_()._to_netcdf_(fname.replace(".gz",""))
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--prog", default="flare", help="Program code [bgc/flare] (default bgc)")
parser.add_argument("-r", "--rio", default="ott", help="Riometer code (default ott)")
parser.add_argument("-ev", "--event", default=dt.datetime(2015,3,11,16,22), help="Start date (default 2015-3-11T16:22)",
type=dparser.isoparse)
parser.add_argument("-s", "--start", default=dt.datetime(2015,3,11,16), help="Start date (default 2015-3-11T15:30)",
type=dparser.isoparse)
parser.add_argument("-e", "--end", default=dt.datetime(2015,3,11,17), help="End date (default 2015-3-11T17:30)",
type=dparser.isoparse)
parser.add_argument("-g", "--save_goes", action="store_false", help="Save goes data (default True)")
parser.add_argument("-sat", "--sat", type=int, default=15, help="Satellite number (default 15)")
parser.add_argument("-rm", "--save_riom", action="store_false", help="Save riometer data (default True)")
parser.add_argument("-ps", "--plot_summary", action="store_true", help="Plot summary report (default False)")
parser.add_argument("-sr", "--save_result", action="store_false", help="Save results (default True)")
parser.add_argument("-c", "--clear", action="store_true", help="Clear pervious stored files (default False)")
parser.add_argument("-irr", "--irradiance", default="EUVAC+", help="Irradiance model (default EUVAC+)")
parser.add_argument("-v", "--verbose", action="store_true", help="Increase output verbosity (default False)")
parser.add_argument("-pc", "--plot_code", type=int, default=0, help="Plotting code,applicable if --prog==plot (default 0)")
parser.add_argument("-fr", "--frequency", type=float, default=30, help="Frequency of oprrations in MHz (default 30 MHz)")
parser.add_argument("-ex", "--exp", type=int, default=0, help="Program code [0-10] (default0)")
args = parser.parse_args()
if args.verbose:
print("\n Parameter list for simulation ")
for k in vars(args).keys():
print(" " , k , "->" , str(vars(args)[k]))
if args.exp == 0: _case0_(args)
if args.exp == 1: _case1_(args)
if args.exp == 2: _case2_(args)
if args.exp == 3: _stats_(args)
else: print("\n Program not implemented")
print("")
if os.path.exists("models/__pycache__"): os.system("rm -rf models/__pycache__")
if os.path.exists("models/experiments/__pycache__"): os.system("rm -rf models/experiments/__pycache__")
| [
"numpy.log10",
"model.Model",
"pandas.read_csv",
"matplotlib.dates.MinuteLocator",
"numpy.array",
"scipy.stats.ttest_rel",
"datetime.timedelta",
"utils.smooth",
"sys.path.append",
"utils.read_goes",
"datetime.datetime",
"os.path.exists",
"numpy.mean",
"argparse.ArgumentParser",
"netCDF4.... | [((331, 352), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (345, 352), False, 'import matplotlib\n'), ((419, 455), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""config/alt.mplstyle"""'], {}), "('config/alt.mplstyle')\n", (432, 455), True, 'import matplotlib.pyplot as plt\n'), ((468, 494), 'sys.path.append', 'sys.path.append', (['"""models/"""'], {}), "('models/')\n", (483, 494), False, 'import sys\n'), ((495, 533), 'sys.path.append', 'sys.path.append', (['"""models/experiments/"""'], {}), "('models/experiments/')\n", (510, 533), False, 'import sys\n'), ((1096, 1165), 'matplotlib.font_manager.FontProperties', 'font_manager.FontProperties', ([], {'family': '"""serif"""', 'size': '(10)', 'weight': '"""normal"""'}), "(family='serif', size=10, weight='normal')\n", (1123, 1165), False, 'from matplotlib import font_manager\n'), ((1591, 1627), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['fmtr'], {}), '(fmtr)\n', (1621, 1627), False, 'import matplotlib\n'), ((1967, 2003), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['fmtr'], {}), '(fmtr)\n', (1997, 2003), False, 'import matplotlib\n'), ((2542, 2571), 'os.system', 'os.system', (["('gzip -d ' + fname)"], {}), "('gzip -d ' + fname)\n", (2551, 2571), False, 'import os\n'), ((2671, 2722), 'utils.PointGrid', 'utils.PointGrid', (['"""ott"""', 'ev', 'start', 'end', '(30)'], {'v': '(False)'}), "('ott', ev, start, end, 30, v=False)\n", (2686, 2722), False, 'import utils\n'), ((3557, 3571), 'numpy.array', 'np.array', (['_lo_'], {}), '(_lo_)\n', (3565, 3571), True, 'import numpy as np\n'), ((3621, 3643), 'numpy.linspace', 'np.linspace', (['(1)', '(70)', '(50)'], {}), '(1, 70, 50)\n', (3632, 3643), True, 'import numpy as np\n'), ((4582, 4596), 'numpy.array', 'np.array', (['_qo_'], {}), '(_qo_)\n', (4590, 4596), True, 'import numpy as np\n'), ((5203, 5258), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3, 3)', 'nrows': '(1)', 'ncols': '(1)', 'dpi': '(100)'}), '(figsize=(3, 3), nrows=1, ncols=1, dpi=100)\n', (5215, 5258), True, 'import matplotlib.pyplot as plt\n'), ((13648, 13676), 'numpy.linspace', 'np.linspace', (['(0.75)', '(1.75)', '(101)'], {}), '(0.75, 1.75, 101)\n', (13659, 13676), True, 'import numpy as np\n'), ((18640, 18679), 'pandas.read_csv', 'pd.read_csv', (['"""config/flare.stats.m.csv"""'], {}), "('config/flare.stats.m.csv')\n", (18651, 18679), True, 'import pandas as pd\n'), ((23555, 23580), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (23578, 23580), False, 'import argparse\n'), ((25878, 25914), 'os.path.exists', 'os.path.exists', (['"""models/__pycache__"""'], {}), "('models/__pycache__')\n", (25892, 25914), False, 'import os\n'), ((25962, 26010), 'os.path.exists', 'os.path.exists', (['"""models/experiments/__pycache__"""'], {}), "('models/experiments/__pycache__')\n", (25976, 26010), False, 'import os\n'), ((1697, 1731), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'interval': 'ivl'}), '(interval=ivl)\n', (1717, 1731), True, 'import matplotlib.dates as mdates\n'), ((2073, 2107), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'interval': 'ivl'}), '(interval=ivl)\n', (2093, 2107), True, 'import matplotlib.dates as mdates\n'), ((2208, 2230), 'numpy.linspace', 'np.linspace', (['(0)', '(90)', '(91)'], {}), '(0, 90, 91)\n', (2219, 2230), True, 'import numpy as np\n'), ((2352, 2384), 'datetime.datetime', 'dt.datetime', (['(2015)', '(3)', '(11)', '(16)', '(22)'], {}), '(2015, 3, 11, 16, 22)\n', (2363, 2384), True, 'import datetime as dt\n'), ((2382, 2414), 'datetime.datetime', 'dt.datetime', (['(2015)', '(3)', '(11)', '(15)', '(30)'], {}), '(2015, 3, 11, 15, 30)\n', (2393, 2414), True, 'import datetime as dt\n'), ((2412, 2444), 'datetime.datetime', 'dt.datetime', (['(2015)', '(3)', '(11)', '(17)', '(30)'], {}), '(2015, 3, 11, 17, 30)\n', (2423, 2444), True, 'import datetime as dt\n'), ((2470, 2495), 'case0._Case0_', 'case0._Case0_', (['start', 'end'], {}), '(start, end)\n', (2483, 2495), False, 'import case0\n'), ((7091, 7122), 'utils.smooth', 'utils.smooth', (['_lo_[:, 0, 0]', '(11)'], {}), '(_lo_[:, 0, 0], 11)\n', (7103, 7122), False, 'import utils\n'), ((7193, 7224), 'utils.smooth', 'utils.smooth', (['_lo_[:, 1, 0]', '(11)'], {}), '(_lo_[:, 1, 0], 11)\n', (7205, 7224), False, 'import utils\n'), ((7300, 7331), 'utils.smooth', 'utils.smooth', (['_lo_[:, 2, 0]', '(11)'], {}), '(_lo_[:, 2, 0], 11)\n', (7312, 7331), False, 'import utils\n'), ((7407, 7438), 'utils.smooth', 'utils.smooth', (['_lo_[:, 3, 0]', '(11)'], {}), '(_lo_[:, 3, 0], 11)\n', (7419, 7438), False, 'import utils\n'), ((9095, 9131), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['"""%H"""'], {}), "('%H')\n", (9125, 9131), False, 'import matplotlib\n'), ((9152, 9207), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 5)', 'nrows': '(2)', 'ncols': '(3)', 'dpi': '(150)'}), '(figsize=(9, 5), nrows=2, ncols=3, dpi=150)\n', (9164, 9207), True, 'import matplotlib.pyplot as plt\n'), ((13455, 13486), 'datetime.datetime', 'dt.datetime', (['(2011)', '(9)', '(7)', '(22)', '(38)'], {}), '(2011, 9, 7, 22, 38)\n', (13466, 13486), True, 'import datetime as dt\n'), ((13505, 13536), 'datetime.datetime', 'dt.datetime', (['(2011)', '(9)', '(7)', '(22)', '(10)'], {}), '(2011, 9, 7, 22, 10)\n', (13516, 13536), True, 'import datetime as dt\n'), ((13534, 13565), 'datetime.datetime', 'dt.datetime', (['(2011)', '(9)', '(7)', '(23)', '(20)'], {}), '(2011, 9, 7, 23, 20)\n', (13545, 13565), True, 'import datetime as dt\n'), ((13580, 13611), 'datetime.datetime', 'dt.datetime', (['(2011)', '(9)', '(7)', '(22)', '(30)'], {}), '(2011, 9, 7, 22, 30)\n', (13591, 13611), True, 'import datetime as dt\n'), ((13609, 13639), 'datetime.datetime', 'dt.datetime', (['(2011)', '(9)', '(7)', '(23)', '(0)'], {}), '(2011, 9, 7, 23, 0)\n', (13620, 13639), True, 'import datetime as dt\n'), ((13982, 13996), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (13991, 13996), False, 'import os\n'), ((18692, 18733), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['t', '"""%Y.%m.%d.%H.%M"""'], {}), "(t, '%Y.%m.%d.%H.%M')\n", (18712, 18733), True, 'import datetime as dt\n'), ((19137, 19224), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 12)', 'nrows': '(4)', 'ncols': '(3)', 'dpi': '(90)', 'sharey': '"""row"""', 'sharex': '"""col"""'}), "(figsize=(9, 12), nrows=4, ncols=3, dpi=90, sharey='row',\n sharex='col')\n", (19149, 19224), True, 'import matplotlib.pyplot as plt\n'), ((21868, 21936), 'pandas.read_csv', 'pd.read_csv', (['"""config/flares.csv"""'], {'parse_dates': "['dn', 'start', 'end']"}), "('config/flares.csv', parse_dates=['dn', 'start', 'end'])\n", (21879, 21936), True, 'import pandas as pd\n'), ((25916, 25954), 'os.system', 'os.system', (['"""rm -rf models/__pycache__"""'], {}), "('rm -rf models/__pycache__')\n", (25925, 25954), False, 'import os\n'), ((26012, 26062), 'os.system', 'os.system', (['"""rm -rf models/experiments/__pycache__"""'], {}), "('rm -rf models/experiments/__pycache__')\n", (26021, 26062), False, 'import os\n'), ((2243, 2266), 'numpy.linspace', 'np.linspace', (['(-6)', '(-1)', '(31)'], {}), '(-6, -1, 31)\n', (2254, 2266), True, 'import numpy as np\n'), ((2296, 2309), 'numpy.log10', 'np.log10', (['(0.1)'], {}), '(0.1)\n', (2304, 2309), True, 'import numpy as np\n'), ((2310, 2323), 'numpy.log10', 'np.log10', (['(200)'], {}), '(200)\n', (2318, 2323), True, 'import numpy as np\n'), ((4279, 4292), 'numpy.array', 'np.array', (['_k_'], {}), '(_k_)\n', (4287, 4292), True, 'import numpy as np\n'), ((4680, 4700), 'numpy.log10', 'np.log10', (['(f0 * 0.001)'], {}), '(f0 * 0.001)\n', (4688, 4700), True, 'import numpy as np\n'), ((8000, 8023), 'datetime.datetime', 'dt.datetime', (['(2017)', '(9)', '(5)'], {}), '(2017, 9, 5)\n', (8011, 8023), True, 'import datetime as dt\n'), ((8023, 8046), 'datetime.datetime', 'dt.datetime', (['(2017)', '(9)', '(6)'], {}), '(2017, 9, 6)\n', (8034, 8046), True, 'import datetime as dt\n'), ((8046, 8069), 'datetime.datetime', 'dt.datetime', (['(2017)', '(9)', '(7)'], {}), '(2017, 9, 7)\n', (8057, 8069), True, 'import datetime as dt\n'), ((8126, 8153), 'datetime.datetime', 'dt.datetime', (['(2017)', '(9)', '(5)', '(17)'], {}), '(2017, 9, 5, 17)\n', (8137, 8153), True, 'import datetime as dt\n'), ((8152, 8179), 'datetime.datetime', 'dt.datetime', (['(2017)', '(9)', '(6)', '(11)'], {}), '(2017, 9, 6, 11)\n', (8163, 8179), True, 'import datetime as dt\n'), ((8178, 8209), 'datetime.datetime', 'dt.datetime', (['(2017)', '(9)', '(7)', '(13)', '(30)'], {}), '(2017, 9, 7, 13, 30)\n', (8189, 8209), True, 'import datetime as dt\n'), ((8226, 8257), 'datetime.datetime', 'dt.datetime', (['(2017)', '(9)', '(5)', '(19)', '(30)'], {}), '(2017, 9, 5, 19, 30)\n', (8237, 8257), True, 'import datetime as dt\n'), ((8255, 8282), 'datetime.datetime', 'dt.datetime', (['(2017)', '(9)', '(6)', '(17)'], {}), '(2017, 9, 6, 17)\n', (8266, 8282), True, 'import datetime as dt\n'), ((8281, 8308), 'datetime.datetime', 'dt.datetime', (['(2017)', '(9)', '(7)', '(19)'], {}), '(2017, 9, 7, 19)\n', (8292, 8308), True, 'import datetime as dt\n'), ((8688, 8702), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (8697, 8702), False, 'import os\n'), ((9596, 9615), 'utils.read_goes', 'utils.read_goes', (['ev'], {}), '(ev)\n', (9611, 9615), False, 'import utils\n'), ((11168, 11197), 'os.system', 'os.system', (["('gzip -d ' + fname)"], {}), "('gzip -d ' + fname)\n", (11177, 11197), False, 'import os\n'), ((11325, 11422), 'netCDF4.num2date', 'num2date', (["nc.variables['time'][:]", "nc.variables['time'].units", "nc.variables['time'].calendar"], {}), "(nc.variables['time'][:], nc.variables['time'].units, nc.variables[\n 'time'].calendar)\n", (11333, 11422), False, 'from netCDF4 import Dataset, num2date\n'), ((19955, 19984), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['df'], {}), '(df)\n', (19980, 19984), True, 'import pandas as pd\n'), ((20199, 20244), 'statsmodels.formula.api.ols', 'ols', (['"""acc ~ sza+local_time+mlt+mlat"""'], {'data': 'df'}), "('acc ~ sza+local_time+mlt+mlat', data=df)\n", (20202, 20244), False, 'from statsmodels.formula.api import ols\n'), ((20371, 20405), 'statsmodels.api.stats.anova_lm', 'sm.stats.anova_lm', (['response'], {'typ': '(2)'}), '(response, typ=2)\n', (20388, 20405), True, 'import statsmodels.api as sm\n'), ((21736, 21777), 'scipy.stats.ttest_rel', 'stats.ttest_rel', (["edist['mb']", "edist['sn']"], {}), "(edist['mb'], edist['sn'])\n", (21751, 21777), False, 'from scipy import stats\n'), ((22172, 22197), 'os.system', 'os.system', (["('gzip -d ' + f)"], {}), "('gzip -d ' + f)\n", (22181, 22197), False, 'import os\n'), ((22320, 22453), 'netCDF4.num2date', 'num2date', (["_x_.variables['time'][:]", "_x_.variables['time'].units", "_x_.variables['time'].calendar"], {'only_use_cftime_datetimes': '(False)'}), "(_x_.variables['time'][:], _x_.variables['time'].units, _x_.\n variables['time'].calendar, only_use_cftime_datetimes=False)\n", (22328, 22453), False, 'from netCDF4 import Dataset, num2date\n'), ((23825, 23857), 'datetime.datetime', 'dt.datetime', (['(2015)', '(3)', '(11)', '(16)', '(22)'], {}), '(2015, 3, 11, 16, 22)\n', (23836, 23857), True, 'import datetime as dt\n'), ((23984, 24012), 'datetime.datetime', 'dt.datetime', (['(2015)', '(3)', '(11)', '(16)'], {}), '(2015, 3, 11, 16)\n', (23995, 24012), True, 'import datetime as dt\n'), ((24138, 24166), 'datetime.datetime', 'dt.datetime', (['(2015)', '(3)', '(11)', '(17)'], {}), '(2015, 3, 11, 17)\n', (24149, 24166), True, 'import datetime as dt\n'), ((3147, 3225), 'utils.int_absorption', 'utils.int_absorption', (["u.AH['SN']['O']", 'pg.alts'], {'extpoint': '(68)', 'llim': '(60)', 'ulim': '(110)'}), "(u.AH['SN']['O'], pg.alts, extpoint=68, llim=60, ulim=110)\n", (3167, 3225), False, 'import utils\n'), ((3244, 3329), 'utils.int_absorption', 'utils.int_absorption', (["u.AH['AV_CC']['O']", 'pg.alts'], {'extpoint': '(64)', 'llim': '(60)', 'ulim': '(110)'}), "(u.AH['AV_CC']['O'], pg.alts, extpoint=64, llim=60,\n ulim=110)\n", (3264, 3329), False, 'import utils\n'), ((3344, 3429), 'utils.int_absorption', 'utils.int_absorption', (["u.AH['AV_MB']['O']", 'pg.alts'], {'extpoint': '(64)', 'llim': '(60)', 'ulim': '(110)'}), "(u.AH['AV_MB']['O'], pg.alts, extpoint=64, llim=60,\n ulim=110)\n", (3364, 3429), False, 'import utils\n'), ((3444, 3522), 'utils.int_absorption', 'utils.int_absorption', (["u.SW['FT']['O']", 'pg.alts'], {'extpoint': '(64)', 'llim': '(60)', 'ulim': '(110)'}), "(u.SW['FT']['O'], pg.alts, extpoint=64, llim=60, ulim=110)\n", (3464, 3522), False, 'import utils\n'), ((9037, 9051), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (9046, 9051), False, 'import os\n'), ((14538, 14577), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (14568, 14577), False, 'import matplotlib\n'), ((14598, 14653), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 2)', 'nrows': '(1)', 'ncols': '(2)', 'dpi': '(100)'}), '(figsize=(6, 2), nrows=1, ncols=2, dpi=100)\n', (14610, 14653), True, 'import matplotlib.pyplot as plt\n'), ((15452, 15493), 'utils.read_riometer', 'utils.read_riometer', (['args.event', 'args.rio'], {}), '(args.event, args.rio)\n', (15471, 15493), False, 'import utils\n'), ((15598, 15628), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""Reds"""'], {}), "('Reds')\n", (15620, 15628), False, 'import matplotlib\n'), ((17138, 17149), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (17146, 17149), True, 'import numpy as np\n'), ((19556, 19578), 'xarray.open_dataset', 'xarray.open_dataset', (['f'], {}), '(f)\n', (19575, 19578), False, 'import xarray\n'), ((20067, 20104), 'numpy.corrcoef', 'np.corrcoef', (['ux.dPeak', "ux['mPeak_dr']"], {}), "(ux.dPeak, ux['mPeak_dr'])\n", (20078, 20104), True, 'import numpy as np\n'), ((22739, 22811), 'utils.int_absorption', 'utils.int_absorption', (["_x_.variables['abs.ah.sn.o'][:]", 'alts'], {'extpoint': '(68)'}), "(_x_.variables['abs.ah.sn.o'][:], alts, extpoint=68)\n", (22759, 22811), False, 'import utils\n'), ((22839, 22914), 'utils.int_absorption', 'utils.int_absorption', (["_x_.variables['abs.ah.av.cc.o'][:]", 'alts'], {'extpoint': '(64)'}), "(_x_.variables['abs.ah.av.cc.o'][:], alts, extpoint=64)\n", (22859, 22914), False, 'import utils\n'), ((22942, 23017), 'utils.int_absorption', 'utils.int_absorption', (["_x_.variables['abs.ah.av.mb.o'][:]", 'alts'], {'extpoint': '(64)'}), "(_x_.variables['abs.ah.av.mb.o'][:], alts, extpoint=64)\n", (22962, 23017), False, 'import utils\n'), ((23045, 23117), 'utils.int_absorption', 'utils.int_absorption', (["_x_.variables['abs.sw.ft.o'][:]", 'alts'], {'extpoint': '(64)'}), "(_x_.variables['abs.sw.ft.o'][:], alts, extpoint=64)\n", (23065, 23117), False, 'import utils\n'), ((3874, 3952), 'utils.int_absorption', 'utils.int_absorption', (["u.AH['SN']['O']", 'pg.alts'], {'extpoint': '(68)', 'llim': '(60)', 'ulim': '(110)'}), "(u.AH['SN']['O'], pg.alts, extpoint=68, llim=60, ulim=110)\n", (3894, 3952), False, 'import utils\n'), ((3974, 4059), 'utils.int_absorption', 'utils.int_absorption', (["u.AH['AV_CC']['O']", 'pg.alts'], {'extpoint': '(64)', 'llim': '(60)', 'ulim': '(110)'}), "(u.AH['AV_CC']['O'], pg.alts, extpoint=64, llim=60,\n ulim=110)\n", (3994, 4059), False, 'import utils\n'), ((4077, 4162), 'utils.int_absorption', 'utils.int_absorption', (["u.AH['AV_MB']['O']", 'pg.alts'], {'extpoint': '(64)', 'llim': '(60)', 'ulim': '(110)'}), "(u.AH['AV_MB']['O'], pg.alts, extpoint=64, llim=60,\n ulim=110)\n", (4097, 4162), False, 'import utils\n'), ((4180, 4258), 'utils.int_absorption', 'utils.int_absorption', (["u.SW['FT']['O']", 'pg.alts'], {'extpoint': '(64)', 'llim': '(60)', 'ulim': '(110)'}), "(u.SW['FT']['O'], pg.alts, extpoint=64, llim=60, ulim=110)\n", (4200, 4258), False, 'import utils\n'), ((4717, 4734), 'numpy.log10', 'np.log10', (['(0.00022)'], {}), '(0.00022)\n', (4725, 4734), True, 'import numpy as np\n'), ((9467, 9488), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': 'h'}), '(hours=h)\n', (9479, 9488), True, 'import datetime as dt\n'), ((11733, 11818), 'utils.int_absorption', 'utils.int_absorption', (["nc.variables['abs.ah.sn.o'][:]", "model['alts']"], {'extpoint': '(68)'}), "(nc.variables['abs.ah.sn.o'][:], model['alts'], extpoint=68\n )\n", (11753, 11818), False, 'import utils\n'), ((11937, 12024), 'utils.int_absorption', 'utils.int_absorption', (["nc.variables['abs.ah.av.cc.o'][:]", "model['alts']"], {'extpoint': '(64)'}), "(nc.variables['abs.ah.av.cc.o'][:], model['alts'],\n extpoint=64)\n", (11957, 12024), False, 'import utils\n'), ((12129, 12216), 'utils.int_absorption', 'utils.int_absorption', (["nc.variables['abs.ah.av.mb.o'][:]", "model['alts']"], {'extpoint': '(64)'}), "(nc.variables['abs.ah.av.mb.o'][:], model['alts'],\n extpoint=64)\n", (12149, 12216), False, 'import utils\n'), ((12321, 12406), 'utils.int_absorption', 'utils.int_absorption', (["nc.variables['abs.sw.ft.o'][:]", "model['alts']"], {'extpoint': '(64)'}), "(nc.variables['abs.sw.ft.o'][:], model['alts'], extpoint=64\n )\n", (12341, 12406), False, 'import utils\n'), ((15751, 15776), 'os.system', 'os.system', (["('gzip -d ' + f)"], {}), "('gzip -d ' + f)\n", (15760, 15776), False, 'import os\n'), ((15898, 15995), 'netCDF4.num2date', 'num2date', (["nc.variables['time'][:]", "nc.variables['time'].units", "nc.variables['time'].calendar"], {}), "(nc.variables['time'][:], nc.variables['time'].units, nc.variables[\n 'time'].calendar)\n", (15906, 15995), False, 'from netCDF4 import Dataset, num2date\n'), ((16449, 16463), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16461, 16463), True, 'import pandas as pd\n'), ((16728, 16758), 'utils.estimate_error', 'utils.estimate_error', (['m', '_abs_'], {}), '(m, _abs_)\n', (16748, 16758), False, 'import utils\n'), ((20005, 20046), 'numpy.abs', 'np.abs', (["(df['dPeak'] - d.attrs['mPeak_dr'])"], {}), "(df['dPeak'] - d.attrs['mPeak_dr'])\n", (20011, 20046), True, 'import numpy as np\n'), ((14096, 14129), 'model.Model', 'Model', (['args.rio', 'args.event', 'args'], {}), '(args.rio, args.event, args)\n', (14101, 14129), False, 'from model import Model\n'), ((16533, 16618), 'utils.int_absorption', 'utils.int_absorption', (["nc.variables['abs.ah.sn.o'][:]", "model['alts']"], {'extpoint': '(68)'}), "(nc.variables['abs.ah.sn.o'][:], model['alts'], extpoint=68\n )\n", (16553, 16618), False, 'import utils\n'), ((16908, 16929), 'numpy.median', 'np.median', (['Mx'], {'axis': '(1)'}), '(Mx, axis=1)\n', (16917, 16929), True, 'import numpy as np\n'), ((16936, 16954), 'numpy.std', 'np.std', (['Mx'], {'axis': '(1)'}), '(Mx, axis=1)\n', (16942, 16954), True, 'import numpy as np\n'), ((17973, 17985), 'numpy.argmin', 'np.argmin', (['X'], {}), '(X)\n', (17982, 17985), True, 'import numpy as np\n'), ((18305, 18317), 'numpy.argmin', 'np.argmin', (['X'], {}), '(X)\n', (18314, 18317), True, 'import numpy as np\n'), ((19709, 19735), 'numpy.median', 'np.median', (["d['sza'].values"], {}), "(d['sza'].values)\n", (19718, 19735), True, 'import numpy as np\n'), ((19772, 19805), 'numpy.median', 'np.median', (["d['local_time'].values"], {}), "(d['local_time'].values)\n", (19781, 19805), True, 'import numpy as np\n'), ((19814, 19838), 'numpy.mean', 'np.mean', (["d['mlt'].values"], {}), "(d['mlt'].values)\n", (19821, 19838), True, 'import numpy as np\n'), ((18347, 18359), 'numpy.argmin', 'np.argmin', (['X'], {}), '(X)\n', (18356, 18359), True, 'import numpy as np\n'), ((4350, 4363), 'numpy.log10', 'np.log10', (['nfo'], {}), '(nfo)\n', (4358, 4363), True, 'import numpy as np\n'), ((4416, 4429), 'numpy.log10', 'np.log10', (['nfo'], {}), '(nfo)\n', (4424, 4429), True, 'import numpy as np\n'), ((4481, 4494), 'numpy.log10', 'np.log10', (['nfo'], {}), '(nfo)\n', (4489, 4494), True, 'import numpy as np\n'), ((4546, 4559), 'numpy.log10', 'np.log10', (['nfo'], {}), '(nfo)\n', (4554, 4559), True, 'import numpy as np\n'), ((15557, 15580), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (15569, 15580), True, 'import datetime as dt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Code accompanying the manuscript:
"Reinterpreting the relationship between number of species and
number of links connects community structure and stability"
-------
v1.0.0 (First release)
-------
For any question or comment, please contact:
<NAME>(1), <EMAIL>
(1) University of Namur, Namur, Rue de Bruxelles 61, Belgium.
Research Unit in Environmental and Evolutionary Biology (URBE);
Institute of Life-Earth-Environment, Namur;
Center for Complex Systems, Namur.
"""
import numpy as np
import matplotlib.pyplot as plt
##############################################################################
# Data and parameters
##############################################################################
#### Adjacency matrix of the network (example) ####
mat = np.array([[0,0,0,1,0,0],
[0,0,0,0,1,1],
[0,0,0,0,1,1],
[1,0,0,0,1,1],
[0,1,1,1,0,0],
[0,1,1,1,0,0]])
# The lower triangle of mat should contain the incoming links:
# aij will be 1 when a link comes from j and goes to i
# (i being the row and j being the column).
# example: here, the 4th species eats the 1st species
# (the link is coming from 1st to 4th)
# while the 5th and 6th species eat the 2nd, the 3rd, the 4th species.
# The run time of this example goes up to 1 minute.
# The run time increases exponentially with the number of species
# (eg. 35 sp = 1 minute, 100 sp = 10 minutes, 110 sp = 25 minutes).
#### Parameters ####
inttype = "trophic" # Or "mutualistic".
independent = True # Should the species having no incoming links be considered
# as independent (i.e. not undergo secondary extinction)?
# False: All species might undergo secondary extinctions;
# True: Species having no incoming links do not undergo secondary extinction.
nbsimu = 10000 # Number of different decompositions (simulations) to perform.
##############################################################################
# Network specific L~S relationship
##############################################################################
# This part of the code relates to:
# - Equations 1-2
# - Figure 1
# - Extended Data Figure 1-2
# Note that the Figure 1 presented in the manuscript consider that all species
# undergo secondary extension (scenario 1 : independent = False) while the
# example given here is without secondary extinction for the basal species
# (independent = True).
import decomposition
# Decompositions (in-silico extinction experiments)
###################################################
S, L, b, z, sseq, lseq = decomposition.experiment(mat, nbsimu, independent)
# S = Number of species.
# L = Number of links.
# b = Shape of the L~S relationship (based on Equation 2).
# z = Proportion of independent species.
# sseq = Number of species along each decomposition (row).
# lseq = Number of links along each decomposition (row).
# R2 of the prediction of the L~S relationship
###############################################
breg, areg, r2reg, r2b = decomposition.R2L(sseq, lseq, b)
# breg = Estimation of b (from L = a * S^b) based on log-log regression.
# areg = Estimation of a (from L = a * S^b) based on log-log regression.
# r2reg = R2 of the prediction of lseq along sseq (in log-space) based
# on regression.
# r2b = R2 of the prediction of lseq along sseq (in log-space) based
# on the Equation 1.
print(r2reg, r2b)
# Computing r2reg et r2b for multiple networks allows to obtain
# Extended Data Figure 1.
# Figure 1 (equivalent)
#######################
#### Unique combinaison in the L~S plane ####
dots, likely = np.unique(tuple([sseq.flatten(),
lseq.flatten()]),
axis=1, return_counts=True)
likely[0] = nbsimu
#### Dots with size proportional to their likelihood in the L~S plane ####
plt.scatter(*dots, s = likely/100, c="grey",
label = "Observed")
#### Predictions of the L~S relationship ####
plt.plot(np.linspace(0,S,S*10), areg*np.linspace(0,S,S*10)**breg,
c = "black", ls="dotted",
label = "Log-log regression")# Regression
plt.plot(np.linspace(0,S,S*10),(np.linspace(0,S,S*10)/2)**b,
c = "black",
label = r"$L = (S/2)^b$") # Equation 1
plt.xlabel("Number of species (S)")
plt.ylabel("Number of links (L)")
plt.legend()
##############################################################################
# Predicting robustness
##############################################################################
# This part of the code relates to:
# - Equations 3-5
# - Figure 2-3
# - Extended Data Figure 3a
import robustness
# Average number of extinctions occurring after the removal of one species
###########################################################################
deltaS, r2lost, lost, removed = robustness.R2deltaS(sseq, S, b, z)
# deltaS = Predicted average number of extinctions after one removal (Eq. 4).
# r2lost = R2 of the predictions of the number of species lost
# based on the number of species removed using deltaS.
# lost = Number of species lost along each decomposition (row).
# removed = Number of species removed along each decomposition (row).
print(r2lost)
# Computing r2lost for multiple networks allows to obtain Figure 2c.
# Figure 2a (equivalent)
##########################
#### Unique combinaison in the lost~removed plane ####
dots, likely = np.unique(tuple([removed.flatten(),
lost.flatten()]),
axis=1, return_counts=True)
#### Dots with size proportional to their likelihood in the lost~removed plane ####
plt.scatter(*dots, s = likely/100, c="grey", label = "Observed")
#### Predictions of the lost~removed relationship ####
plt.plot(np.linspace(0,S,S*10), deltaS*np.linspace(0,S,S*10),
c = "black", label = "Predicted")
plt.xlabel("Number of species removed (r)")
plt.ylabel("Number of species lost (n)")
plt.axhline(0.5*S, color = "black",
linestyle = "dotted") # 50% of species lost
plt.legend()
# Robustness
############
rob_obs, rob_var, rob_pred = robustness.robx(sseq, S, b, z, x=0.5)
# rob_obs = Mean robustness over nbsimu network decompositions.
# rob_var = Observed variance of robustness over nbsimu network decompositions.
# rob_pred = Predicted robustness (Equation 5).
print(rob_obs, rob_pred)
# Computing the rob_obs for multiple networks allows to obtain Figure 3.
# Extended Data Figure 3a
##########################
xs = np.round(np.linspace(0.01,1,S),2) # Various robustness threshold
robs = []
for x in xs: # For each threshold
robs.append([*robustness.robx(sseq, S, b, z, x)]) # Compute robustness
robs = np.array(robs)
plt.errorbar(xs, robs[:,0],
yerr = robs[:,1]**0.5/2,
c = "black", fmt='o', label = "Observed")
plt.plot(xs, robs[:,2], c = "black", label = "Predicted", zorder=-1)
plt.xlabel("x")
plt.ylabel("Robustness at threshold x")
plt.legend()
# Computing rob_obs for multiple networks allows to obtain
# Extended Data Figure 3b.
##############################################################################
# Local stability - Robustness trade-off
##############################################################################
# This part of the code relates to:
# - Equations 7-8
# - Figure 5
import local_stability
res_obs, res_var, res_pred = local_stability.realpart(mat, inttype, nbsimu = 1000)
# res_obs = Mean of the observed real part of the rightmost eigenvalues.
# res_var = Variance of the observed real part of the rightmost eigenvalues.
# res_pred = Predicted real part of the rightmost eigenvalue (Equation 7-8).
print(res_obs, res_pred)
# Computing res_obs and res_pred for multiple networks allows to obtain:
# - The R2 of Resilience~b;
# - Figure 5.
| [
"decomposition.experiment",
"robustness.robx",
"robustness.R2deltaS",
"matplotlib.pyplot.ylabel",
"local_stability.realpart",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"numpy.array",
"numpy.linspace",
"decomposition.R2L",
"matplotlib.pyplot.scatter",
... | [((821, 955), 'numpy.array', 'np.array', (['[[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1], [1, 0, 0, 0, 1,\n 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 0]]'], {}), '([[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1], [1, 0,\n 0, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 0]])\n', (829, 955), True, 'import numpy as np\n'), ((2637, 2687), 'decomposition.experiment', 'decomposition.experiment', (['mat', 'nbsimu', 'independent'], {}), '(mat, nbsimu, independent)\n', (2661, 2687), False, 'import decomposition\n'), ((3077, 3109), 'decomposition.R2L', 'decomposition.R2L', (['sseq', 'lseq', 'b'], {}), '(sseq, lseq, b)\n', (3094, 3109), False, 'import decomposition\n'), ((3905, 3967), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*dots'], {'s': '(likely / 100)', 'c': '"""grey"""', 'label': '"""Observed"""'}), "(*dots, s=likely / 100, c='grey', label='Observed')\n", (3916, 3967), True, 'import matplotlib.pyplot as plt\n'), ((4313, 4348), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of species (S)"""'], {}), "('Number of species (S)')\n", (4323, 4348), True, 'import matplotlib.pyplot as plt\n'), ((4349, 4382), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of links (L)"""'], {}), "('Number of links (L)')\n", (4359, 4382), True, 'import matplotlib.pyplot as plt\n'), ((4383, 4395), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4393, 4395), True, 'import matplotlib.pyplot as plt\n'), ((4882, 4916), 'robustness.R2deltaS', 'robustness.R2deltaS', (['sseq', 'S', 'b', 'z'], {}), '(sseq, S, b, z)\n', (4901, 4916), False, 'import robustness\n'), ((5703, 5765), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*dots'], {'s': '(likely / 100)', 'c': '"""grey"""', 'label': '"""Observed"""'}), "(*dots, s=likely / 100, c='grey', label='Observed')\n", (5714, 5765), True, 'import matplotlib.pyplot as plt\n'), ((5930, 5973), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of species removed (r)"""'], {}), "('Number of species removed (r)')\n", (5940, 5973), True, 'import matplotlib.pyplot as plt\n'), ((5974, 6014), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of species lost (n)"""'], {}), "('Number of species lost (n)')\n", (5984, 6014), True, 'import matplotlib.pyplot as plt\n'), ((6015, 6070), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0.5 * S)'], {'color': '"""black"""', 'linestyle': '"""dotted"""'}), "(0.5 * S, color='black', linestyle='dotted')\n", (6026, 6070), True, 'import matplotlib.pyplot as plt\n'), ((6107, 6119), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6117, 6119), True, 'import matplotlib.pyplot as plt\n'), ((6178, 6215), 'robustness.robx', 'robustness.robx', (['sseq', 'S', 'b', 'z'], {'x': '(0.5)'}), '(sseq, S, b, z, x=0.5)\n', (6193, 6215), False, 'import robustness\n'), ((6762, 6776), 'numpy.array', 'np.array', (['robs'], {}), '(robs)\n', (6770, 6776), True, 'import numpy as np\n'), ((6779, 6877), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['xs', 'robs[:, 0]'], {'yerr': '(robs[:, 1] ** 0.5 / 2)', 'c': '"""black"""', 'fmt': '"""o"""', 'label': '"""Observed"""'}), "(xs, robs[:, 0], yerr=robs[:, 1] ** 0.5 / 2, c='black', fmt='o',\n label='Observed')\n", (6791, 6877), True, 'import matplotlib.pyplot as plt\n'), ((6903, 6968), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'robs[:, 2]'], {'c': '"""black"""', 'label': '"""Predicted"""', 'zorder': '(-1)'}), "(xs, robs[:, 2], c='black', label='Predicted', zorder=-1)\n", (6911, 6968), True, 'import matplotlib.pyplot as plt\n'), ((6972, 6987), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (6982, 6987), True, 'import matplotlib.pyplot as plt\n'), ((6988, 7027), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Robustness at threshold x"""'], {}), "('Robustness at threshold x')\n", (6998, 7027), True, 'import matplotlib.pyplot as plt\n'), ((7028, 7040), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7038, 7040), True, 'import matplotlib.pyplot as plt\n'), ((7450, 7501), 'local_stability.realpart', 'local_stability.realpart', (['mat', 'inttype'], {'nbsimu': '(1000)'}), '(mat, inttype, nbsimu=1000)\n', (7474, 7501), False, 'import local_stability\n'), ((4039, 4064), 'numpy.linspace', 'np.linspace', (['(0)', 'S', '(S * 10)'], {}), '(0, S, S * 10)\n', (4050, 4064), True, 'import numpy as np\n'), ((4191, 4216), 'numpy.linspace', 'np.linspace', (['(0)', 'S', '(S * 10)'], {}), '(0, S, S * 10)\n', (4202, 4216), True, 'import numpy as np\n'), ((5834, 5859), 'numpy.linspace', 'np.linspace', (['(0)', 'S', '(S * 10)'], {}), '(0, S, S * 10)\n', (5845, 5859), True, 'import numpy as np\n'), ((6580, 6603), 'numpy.linspace', 'np.linspace', (['(0.01)', '(1)', 'S'], {}), '(0.01, 1, S)\n', (6591, 6603), True, 'import numpy as np\n'), ((5864, 5889), 'numpy.linspace', 'np.linspace', (['(0)', 'S', '(S * 10)'], {}), '(0, S, S * 10)\n', (5875, 5889), True, 'import numpy as np\n'), ((4067, 4092), 'numpy.linspace', 'np.linspace', (['(0)', 'S', '(S * 10)'], {}), '(0, S, S * 10)\n', (4078, 4092), True, 'import numpy as np\n'), ((4214, 4239), 'numpy.linspace', 'np.linspace', (['(0)', 'S', '(S * 10)'], {}), '(0, S, S * 10)\n', (4225, 4239), True, 'import numpy as np\n'), ((6698, 6731), 'robustness.robx', 'robustness.robx', (['sseq', 'S', 'b', 'z', 'x'], {}), '(sseq, S, b, z, x)\n', (6713, 6731), False, 'import robustness\n')] |
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import re
import unicodedata
from word2vec_api import get_word_vector
# ****** Define functions to create average word vectors of paragraphs
def makeFeatureVec(words, index2word_set, num_features=300):
# Function to average all of the word vectors in a given paragraph
# Pre-initialize an empty numpy array (for speed)
featureVec = np.zeros((num_features,), dtype="float32")
# number of words
nwords = 0.
# Loop over each word in the response and, if it is in the model's
# vocaublary, add its feature vector to the total
for word in word_tokenize(words):
if word in index2word_set:
nwords = nwords + 1.
# get word vecotr
vector = get_word_vector(word)
featureVec = np.add(featureVec, vector)
# edge case handling
if (nwords < 1):
nwords = 1
# Divide the result by the number of words to get the average
featureVec = np.divide(featureVec, nwords)
return featureVec
def getAvgFeatureVecs(reviews, index2word_set, WORDS, num_features=300, app=None):
'''Given a set of reviews (each one a list of words), calculate the average feature vector for each one and return a 2D numpy array'''
lemma = nltk.wordnet.WordNetLemmatizer()
blacklist = ['/', '@', '"', ':', '!', ';', '.', ',', 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself',
'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the']
blacklist_set = set(blacklist)
# a table structure to hold the different punctuation used
tbl = dict.fromkeys(i for i in range(
65535) if unicodedata.category(chr(i)).startswith('P'))
def remove_punctuation(text):
'''method to remove punctuations from sentences'''
return text.translate(tbl)
def words(text):
return re.findall(r'\w+', text.lower())
def P(word):
"Probability of `word`."
# use inverse of rank as proxy
# returns 0 if the word isn't in the dictionary
return - WORDS.get(word, 0)
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
def clean_review(review):
'''clean the given reveiw'''
review = remove_punctuation(review)
splits = review.split()
cleaned = []
for single_word in splits:
if single_word not in blacklist_set:
cleaned.append(correction(
lemma.lemmatize(single_word.lower())))
return ' '.join(cleaned)
# Initialize a counter
counter = 0.
# Preallocate a 2D numpy array, for speed
reviewFeatureVecs = np.zeros((len(reviews), num_features), dtype="float32")
# Loop through the samples
for review in reviews:
cleaned_review = clean_review(review)
if app:
app.logger.debug(cleaned_review)
else:
pass
# Call the function (defined above) that makes average feature vectors
reviewFeatureVecs[int(counter)] = makeFeatureVec(
cleaned_review, index2word_set, num_features)
# Increment the counter
counter = counter + 1.
return reviewFeatureVecs
| [
"nltk.wordnet.WordNetLemmatizer",
"numpy.add",
"word2vec_api.get_word_vector",
"nltk.tokenize.word_tokenize",
"numpy.zeros",
"numpy.divide"
] | [((454, 496), 'numpy.zeros', 'np.zeros', (['(num_features,)'], {'dtype': '"""float32"""'}), "((num_features,), dtype='float32')\n", (462, 496), True, 'import numpy as np\n'), ((678, 698), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['words'], {}), '(words)\n', (691, 698), False, 'from nltk.tokenize import word_tokenize\n'), ((1045, 1074), 'numpy.divide', 'np.divide', (['featureVec', 'nwords'], {}), '(featureVec, nwords)\n', (1054, 1074), True, 'import numpy as np\n'), ((1334, 1366), 'nltk.wordnet.WordNetLemmatizer', 'nltk.wordnet.WordNetLemmatizer', ([], {}), '()\n', (1364, 1366), False, 'import nltk\n'), ((820, 841), 'word2vec_api.get_word_vector', 'get_word_vector', (['word'], {}), '(word)\n', (835, 841), False, 'from word2vec_api import get_word_vector\n'), ((868, 894), 'numpy.add', 'np.add', (['featureVec', 'vector'], {}), '(featureVec, vector)\n', (874, 894), True, 'import numpy as np\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fine-tuning on regression/classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import sys
if six.PY2:
reload(sys)
sys.setdefaultencoding('utf8')
import os
import time
import json
import argparse
import numpy as np
import subprocess
import multiprocessing
from scipy.stats import pearsonr
import paddle
import paddle.fluid as fluid
import reader.cls as reader
from model.xlnet import XLNetConfig
from model.classifier import create_model
from optimization import optimization
from utils.args import ArgumentGroup, print_arguments, check_cuda
from utils.init import init_pretraining_params, init_checkpoint
from utils.cards import get_cards
num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
model_g = ArgumentGroup(parser, "model", "model configuration and paths.")
model_g.add_arg("model_config_path", str, None, "Path to the json file for bert model config.")
model_g.add_arg("dropout", float, 0.1, "Dropout rate.")
model_g.add_arg("dropatt", float, 0.1, "Attention dropout rate.")
model_g.add_arg("clamp_len", int, -1, "Clamp length.")
model_g.add_arg("summary_type", str, "last",
"Method used to summarize a sequence into a vector.", choices=['last'])
model_g.add_arg("use_summ_proj", bool, True,
"Whether to use projection for summarizing sequences.")
model_g.add_arg("spiece_model_file", str, None, "Sentence Piece model path.")
model_g.add_arg("init_checkpoint", str, None, "Init checkpoint to resume training from.")
model_g.add_arg("init_pretraining_params", str, None,
"Init pre-training params which preforms fine-tuning from. If the "
"arg 'init_checkpoint' has been set, this argument wouldn't be valid.")
model_g.add_arg("checkpoints", str, "checkpoints", "Path to save checkpoints.")
init_g = ArgumentGroup(parser, "init", "parameter initialization options.")
init_g.add_arg("init", str, "normal", "Initialization method.", choices=["normal", "uniform"])
init_g.add_arg("init_std", str, 0.02, "Initialization std when init is normal.")
init_g.add_arg("init_range", str, 0.1, "Initialization std when init is uniform.")
train_g = ArgumentGroup(parser, "training", "training options.")
train_g.add_arg("epoch", int, 1000, "Number of epoches for fine-tuning.")
train_g.add_arg("learning_rate", float, 5e-5, "Learning rate used to train with warmup.")
train_g.add_arg("lr_scheduler", str, "linear_warmup_decay",
"scheduler of learning rate.", choices=['linear_warmup_decay', 'noam_decay'])
train_g.add_arg("weight_decay", float, 0.01, "Weight decay rate for L2 regularizer.")
train_g.add_arg("lr_layer_decay_rate", float, 1.0, "Top layer: lr[L] = args.learning_rate. "
"Lower layers: lr[l-1] = lr[l] * lr_layer_decay_rate.")
train_g.add_arg("save_steps", int, 10000, "The steps interval to save checkpoints.")
train_g.add_arg("train_batch_size", int, 8, "Total examples' number in batch for training.")
train_g.add_arg("eval_batch_size", int, 128, "Total examples' number in batch for development.")
train_g.add_arg("predict_batch_size", int, 128, "Total examples' number in batch for prediction.")
train_g.add_arg("train_steps", int, 1000, "The total steps for training.")
train_g.add_arg("warmup_steps", int, 1000, "The steps for warmup.")
train_g.add_arg("validation_steps", int, 1000, "The steps interval to evaluate model performance.")
log_g = ArgumentGroup(parser, "logging", "logging related.")
log_g.add_arg("skip_steps", int, 10, "The steps interval to print loss.")
log_g.add_arg("verbose", bool, False, "Whether to output verbose log.")
data_g = ArgumentGroup(parser, "data", "Data paths, vocab paths and data processing options")
data_g.add_arg("data_dir", str, None, "Path to training data.")
data_g.add_arg("predict_dir", str, None, "Path to write predict results.")
data_g.add_arg("predict_threshold", float, 0.0, "Threshold for binary prediction.")
data_g.add_arg("max_seq_length", int, 512, "Number of words of the longest seqence.")
data_g.add_arg("uncased", bool, True,
"Whether to lower case the input text. Should be True for uncased models and False for cased models.")
data_g.add_arg("random_seed", int, 0, "Random seed.")
run_type_g = ArgumentGroup(parser, "run_type", "running type options.")
run_type_g.add_arg("use_cuda", bool, True, "If set, use GPU for training.")
run_type_g.add_arg("use_fast_executor", bool, False, "If set, use fast parallel executor (in experiment).")
run_type_g.add_arg("shuffle", bool, True, "")
run_type_g.add_arg("task_name", str, None,
"The name of task to perform fine-tuning, should be in {'xnli', 'mnli', 'cola', 'mrpc'}.")
run_type_g.add_arg("is_regression", str, None, "Whether it's a regression task.")
run_type_g.add_arg("do_train", bool, True, "Whether to perform training.")
run_type_g.add_arg("do_eval", bool, True, "Whether to perform evaluation on dev data set.")
run_type_g.add_arg("do_predict", bool, True, "Whether to perform evaluation on test data set.")
run_type_g.add_arg("eval_split", str, "dev", "Could be dev or test")
parser.add_argument("--enable_ce", action='store_true', help="The flag indicating whether to run the task for continuous evaluation.")
args = parser.parse_args()
# yapf: enable.
def evaluate(exe, predict_program, test_data_loader, fetch_list, eval_phase, num_examples):
test_data_loader.start()
total_cost, total_num_seqs = [], []
all_logits, all_labels = [], []
time_begin = time.time()
total_steps = int(num_examples / args.eval_batch_size)
steps = 0
while True:
try:
np_loss, np_num_seqs, np_logits, np_labels = exe.run(program=predict_program,
fetch_list=fetch_list)
total_cost.extend(np_loss * np_num_seqs)
total_num_seqs.extend(np_num_seqs)
all_logits.extend(np_logits)
all_labels.extend(np_labels)
if steps % (int(total_steps / 10)) == 0:
print("Evaluation [{}/{}]".format(steps, total_steps))
steps += 1
except fluid.core.EOFException:
test_data_loader.reset()
break
all_logits = np.array(all_logits)
all_labels = np.array(all_labels)
if args.is_regression:
key = "eval_pearsonr"
eval_result, _ = pearsonr(all_logits, all_labels)
else:
key = "eval_accuracy"
pred = np.argmax(all_logits, axis=1).reshape(all_labels.shape)
eval_result = np.sum(pred == all_labels) / float(all_labels.size)
time_end = time.time()
print("[%s evaluation] ave loss: %f, %s: %f, elapsed time: %f s" %
(eval_phase, np.sum(total_cost) / np.sum(total_num_seqs), key, eval_result,
time_end - time_begin))
def predict(exe, predict_program, test_data_loader, task_name, label_list, fetch_list):
test_data_loader.start()
pred_cnt = 0
predict_results = []
with open(os.path.join(args.predict_dir, "{}.tsv".format(
task_name)), "w") as fout:
fout.write("index\tprediction\n")
while True:
try:
np_logits = exe.run(program=predict_program,
fetch_list=fetch_list)
for result in np_logits[0]:
if pred_cnt % 1000 == 0:
print("Predicting submission for example: {}".format(
pred_cnt))
logits = [float(x) for x in result.flat]
predict_results.append(logits)
if len(logits) == 1:
label_out = logits[0]
elif len(logits) == 2:
if logits[1] - logits[0] > args.predict_threshold:
label_out = label_list[1]
else:
label_out = label_list[0]
elif len(logits) > 2:
max_index = np.argmax(np.array(logits, dtype=np.float32))
label_out = label_list[max_index]
else:
raise NotImplementedError
fout.write("{}\t{}\n".format(pred_cnt, label_out))
pred_cnt += 1
except fluid.core.EOFException:
test_data_loader.reset()
break
predict_json_path = os.path.join(args.predict_dir, "{}.logits.json".format(
task_name))
with open(predict_json_path, "w") as fp:
json.dump(predict_results, fp, indent=4)
def get_device_num():
# NOTE(zcd): for multi-processe training, each process use one GPU card.
if num_trainers > 1 : return 1
visible_device = os.environ.get('CUDA_VISIBLE_DEVICES', None)
if visible_device:
device_num = len(visible_device.split(','))
else:
device_num = subprocess.check_output(['nvidia-smi','-L']).decode().count('\n')
return device_num
def main(args):
if not (args.do_train or args.do_eval or args.do_predict):
raise ValueError("For args `do_train`, `do_eval` and `do_predict`, at "
"least one of them must be True.")
if args.do_predict and not args.predict_dir:
raise ValueError("args 'predict_dir' should be given when doing predict")
if not os.path.exists(args.predict_dir):
os.makedirs(args.predict_dir)
xlnet_config = XLNetConfig(args.model_config_path)
xlnet_config.print_config()
if args.use_cuda:
place = fluid.CUDAPlace(int(os.getenv('FLAGS_selected_gpus', '0')))
dev_count = get_device_num()
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
exe = fluid.Executor(place)
task_name = args.task_name.lower()
processors = {
"mnli_matched": reader.MnliMatchedProcessor,
"mnli_mismatched": reader.MnliMismatchedProcessor,
'sts-b': reader.StsbProcessor,
'imdb': reader.ImdbProcessor,
"yelp5": reader.Yelp5Processor
}
processor = processors[task_name](args)
label_list = processor.get_labels() if not args.is_regression else None
num_labels = len(label_list) if label_list is not None else None
train_program = fluid.Program()
startup_prog = fluid.Program()
if args.random_seed is not None:
startup_prog.random_seed = args.random_seed
train_program.random_seed = args.random_seed
if args.do_train:
# NOTE: If num_trainers > 1, the shuffle_seed must be set, because
# the order of batch data generated by reader
# must be the same in the respective processes.
shuffle_seed = 1 if num_trainers > 1 else None
train_data_generator = processor.data_generator(
batch_size=args.train_batch_size,
is_regression=args.is_regression,
phase='train',
epoch=args.epoch,
dev_count=dev_count,
shuffle=args.shuffle)
num_train_examples = processor.get_num_examples(phase='train')
print("Device count: %d" % dev_count)
print("Max num of epoches: %d" % args.epoch)
print("Num of train examples: %d" % num_train_examples)
print("Num of train steps: %d" % args.train_steps)
print("Num of warmup steps: %d" % args.warmup_steps)
with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard():
train_data_loader, loss, logits, num_seqs, label_ids = create_model(
args,
xlnet_config=xlnet_config,
n_class=num_labels)
scheduled_lr = optimization(
loss=loss,
warmup_steps=args.warmup_steps,
num_train_steps=args.train_steps,
learning_rate=args.learning_rate,
train_program=train_program,
startup_prog=startup_prog,
weight_decay=args.weight_decay,
lr_layer_decay_rate=args.lr_layer_decay_rate,
scheduler=args.lr_scheduler)
if args.do_eval:
dev_prog = fluid.Program()
with fluid.program_guard(dev_prog, startup_prog):
with fluid.unique_name.guard():
dev_data_loader, loss, logits, num_seqs, label_ids = create_model(
args,
xlnet_config=xlnet_config,
n_class=num_labels)
dev_prog = dev_prog.clone(for_test=True)
dev_data_loader.set_batch_generator(
processor.data_generator(
batch_size=args.eval_batch_size,
is_regression=args.is_regression,
phase=args.eval_split,
epoch=1,
dev_count=1,
shuffle=False), place)
if args.do_predict:
predict_prog = fluid.Program()
with fluid.program_guard(predict_prog, startup_prog):
with fluid.unique_name.guard():
predict_data_loader, loss, logits, num_seqs, label_ids = create_model(
args,
xlnet_config=xlnet_config,
n_class=num_labels)
predict_prog = predict_prog.clone(for_test=True)
predict_data_loader.set_batch_generator(
processor.data_generator(
batch_size=args.predict_batch_size,
is_regression=args.is_regression,
phase=args.eval_split,
epoch=1,
dev_count=1,
shuffle=False), place)
exe.run(startup_prog)
if args.do_train:
if args.init_checkpoint and args.init_pretraining_params:
print(
"WARNING: args 'init_checkpoint' and 'init_pretraining_params' "
"both are set! Only arg 'init_checkpoint' is made valid.")
if args.init_checkpoint:
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
elif args.init_pretraining_params:
init_pretraining_params(
exe,
args.init_pretraining_params,
main_program=startup_prog)
elif args.do_eval or args.do_predict:
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_checkpoint(
exe,
args.init_checkpoint,
main_program=startup_prog)
if args.do_train:
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.use_experimental_executor = args.use_fast_executor
exec_strategy.num_threads = dev_count
build_strategy = fluid.BuildStrategy()
if args.use_cuda and num_trainers > 1:
assert shuffle_seed is not None
dist_utils.prepare_for_multi_process(exe, build_strategy, train_program)
train_data_generator = fluid.contrib.reader.distributed_batch_reader(
train_data_generator)
train_compiled_program = fluid.CompiledProgram(train_program).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
train_data_loader.set_batch_generator(train_data_generator, place)
if args.do_train:
train_data_loader.start()
steps = 0
total_cost, total_num_seqs, total_time = [], [], 0.0
throughput = []
ce_info = []
while steps < args.train_steps:
try:
time_begin = time.time()
steps += 1
if steps % args.skip_steps == 0:
fetch_list = [loss.name, scheduled_lr.name, num_seqs.name]
else:
fetch_list = []
outputs = exe.run(train_compiled_program, fetch_list=fetch_list)
time_end = time.time()
used_time = time_end - time_begin
total_time += used_time
if steps % args.skip_steps == 0:
np_loss, np_lr, np_num_seqs = outputs
total_cost.extend(np_loss * np_num_seqs)
total_num_seqs.extend(np_num_seqs)
if args.verbose:
verbose = "train data_loader queue size: %d, " % train_data_loader.queue.size(
)
verbose += "learning rate: %f" % np_lr[0]
print(verbose)
current_example, current_epoch = processor.get_train_progress(
)
log_record = "epoch: {}, progress: {}/{}, step: {}, ave loss: {}".format(
current_epoch, current_example, num_train_examples,
steps, np.sum(total_cost) / np.sum(total_num_seqs))
ce_info.append([np.sum(total_cost) / np.sum(total_num_seqs), used_time])
if steps > 0 :
throughput.append( args.skip_steps / total_time)
log_record = log_record + ", speed: %f steps/s" % (args.skip_steps / total_time)
print(log_record)
else:
print(log_record)
total_cost, total_num_seqs, total_time = [], [], 0.0
if steps % args.save_steps == 0:
save_path = os.path.join(args.checkpoints,
"step_" + str(steps))
fluid.io.save_persistables(exe, save_path, train_program)
if steps % args.validation_steps == 0:
print("Average throughtput: %s" % (np.average(throughput)))
throughput = []
# evaluate dev set
if args.do_eval:
evaluate(exe, dev_prog, dev_data_loader,
[loss.name, num_seqs.name, logits.name, label_ids.name],
args.eval_split, processor.get_num_examples(phase=args.eval_split))
except fluid.core.EOFException:
save_path = os.path.join(args.checkpoints, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, train_program)
train_data_loader.reset()
break
if args.enable_ce:
card_num = get_cards()
ce_cost = 0
ce_time = 0
try:
ce_cost = ce_info[-2][0]
ce_time = ce_info[-2][1]
except:
print("ce info error")
print("kpis\ttrain_duration_%s_card%s\t%s" %
(args.task_name.replace("-", "_"), card_num, ce_time))
print("kpis\ttrain_cost_%s_card%s\t%f" %
(args.task_name.replace("-", "_"), card_num, ce_cost))
# final eval on dev set
if args.do_eval:
evaluate(exe, dev_prog, dev_data_loader,
[loss.name, num_seqs.name, logits.name, label_ids], args.eval_split,
processor.get_num_examples(phase=args.eval_split))
# final eval on test set
if args.do_predict:
predict(exe, predict_prog, predict_data_loader, task_name, label_list, [logits.name])
if __name__ == '__main__':
import paddle
paddle.enable_static()
print_arguments(args)
check_cuda(args.use_cuda)
main(args)
| [
"paddle.fluid.contrib.reader.distributed_batch_reader",
"sys.setdefaultencoding",
"utils.init.init_checkpoint",
"multiprocessing.cpu_count",
"numpy.array",
"paddle.fluid.Executor",
"scipy.stats.pearsonr",
"paddle.fluid.ExecutionStrategy",
"os.path.exists",
"utils.args.print_arguments",
"argparse... | [((1447, 1479), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['__doc__'], {}), '(__doc__)\n', (1470, 1479), False, 'import argparse\n'), ((1490, 1554), 'utils.args.ArgumentGroup', 'ArgumentGroup', (['parser', '"""model"""', '"""model configuration and paths."""'], {}), "(parser, 'model', 'model configuration and paths.')\n", (1503, 1554), False, 'from utils.args import ArgumentGroup, print_arguments, check_cuda\n'), ((2741, 2807), 'utils.args.ArgumentGroup', 'ArgumentGroup', (['parser', '"""init"""', '"""parameter initialization options."""'], {}), "(parser, 'init', 'parameter initialization options.')\n", (2754, 2807), False, 'from utils.args import ArgumentGroup, print_arguments, check_cuda\n'), ((3094, 3148), 'utils.args.ArgumentGroup', 'ArgumentGroup', (['parser', '"""training"""', '"""training options."""'], {}), "(parser, 'training', 'training options.')\n", (3107, 3148), False, 'from utils.args import ArgumentGroup, print_arguments, check_cuda\n'), ((4496, 4548), 'utils.args.ArgumentGroup', 'ArgumentGroup', (['parser', '"""logging"""', '"""logging related."""'], {}), "(parser, 'logging', 'logging related.')\n", (4509, 4548), False, 'from utils.args import ArgumentGroup, print_arguments, check_cuda\n'), ((4738, 4826), 'utils.args.ArgumentGroup', 'ArgumentGroup', (['parser', '"""data"""', '"""Data paths, vocab paths and data processing options"""'], {}), "(parser, 'data',\n 'Data paths, vocab paths and data processing options')\n", (4751, 4826), False, 'from utils.args import ArgumentGroup, print_arguments, check_cuda\n'), ((5397, 5455), 'utils.args.ArgumentGroup', 'ArgumentGroup', (['parser', '"""run_type"""', '"""running type options."""'], {}), "(parser, 'run_type', 'running type options.')\n", (5410, 5455), False, 'from utils.args import ArgumentGroup, print_arguments, check_cuda\n'), ((831, 861), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf8"""'], {}), "('utf8')\n", (853, 861), False, 'import sys\n'), ((1379, 1419), 'os.environ.get', 'os.environ.get', (['"""PADDLE_TRAINERS_NUM"""', '(1)'], {}), "('PADDLE_TRAINERS_NUM', 1)\n", (1393, 1419), False, 'import os\n'), ((6839, 6850), 'time.time', 'time.time', ([], {}), '()\n', (6848, 6850), False, 'import time\n'), ((7558, 7578), 'numpy.array', 'np.array', (['all_logits'], {}), '(all_logits)\n', (7566, 7578), True, 'import numpy as np\n'), ((7596, 7616), 'numpy.array', 'np.array', (['all_labels'], {}), '(all_labels)\n', (7604, 7616), True, 'import numpy as np\n'), ((7932, 7943), 'time.time', 'time.time', ([], {}), '()\n', (7941, 7943), False, 'import time\n'), ((10079, 10123), 'os.environ.get', 'os.environ.get', (['"""CUDA_VISIBLE_DEVICES"""', 'None'], {}), "('CUDA_VISIBLE_DEVICES', None)\n", (10093, 10123), False, 'import os\n'), ((10773, 10808), 'model.xlnet.XLNetConfig', 'XLNetConfig', (['args.model_config_path'], {}), '(args.model_config_path)\n', (10784, 10808), False, 'from model.xlnet import XLNetConfig\n'), ((11110, 11131), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (11124, 11131), True, 'import paddle.fluid as fluid\n'), ((11626, 11641), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (11639, 11641), True, 'import paddle.fluid as fluid\n'), ((11661, 11676), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (11674, 11676), True, 'import paddle.fluid as fluid\n'), ((20782, 20804), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (20802, 20804), False, 'import paddle\n'), ((20809, 20830), 'utils.args.print_arguments', 'print_arguments', (['args'], {}), '(args)\n', (20824, 20830), False, 'from utils.args import ArgumentGroup, print_arguments, check_cuda\n'), ((20835, 20860), 'utils.args.check_cuda', 'check_cuda', (['args.use_cuda'], {}), '(args.use_cuda)\n', (20845, 20860), False, 'from utils.args import ArgumentGroup, print_arguments, check_cuda\n'), ((7699, 7731), 'scipy.stats.pearsonr', 'pearsonr', (['all_logits', 'all_labels'], {}), '(all_logits, all_labels)\n', (7707, 7731), False, 'from scipy.stats import pearsonr\n'), ((9882, 9922), 'json.dump', 'json.dump', (['predict_results', 'fp'], {'indent': '(4)'}), '(predict_results, fp, indent=4)\n', (9891, 9922), False, 'import json\n'), ((10681, 10713), 'os.path.exists', 'os.path.exists', (['args.predict_dir'], {}), '(args.predict_dir)\n', (10695, 10713), False, 'import os\n'), ((10723, 10752), 'os.makedirs', 'os.makedirs', (['args.predict_dir'], {}), '(args.predict_dir)\n', (10734, 10752), False, 'import os\n'), ((11003, 11019), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (11017, 11019), True, 'import paddle.fluid as fluid\n'), ((13556, 13571), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (13569, 13571), True, 'import paddle.fluid as fluid\n'), ((14282, 14297), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (14295, 14297), True, 'import paddle.fluid as fluid\n'), ((16006, 16031), 'paddle.fluid.ExecutionStrategy', 'fluid.ExecutionStrategy', ([], {}), '()\n', (16029, 16031), True, 'import paddle.fluid as fluid\n'), ((16176, 16197), 'paddle.fluid.BuildStrategy', 'fluid.BuildStrategy', ([], {}), '()\n', (16195, 16197), True, 'import paddle.fluid as fluid\n'), ((7865, 7891), 'numpy.sum', 'np.sum', (['(pred == all_labels)'], {}), '(pred == all_labels)\n', (7871, 7891), True, 'import numpy as np\n'), ((12724, 12772), 'paddle.fluid.program_guard', 'fluid.program_guard', (['train_program', 'startup_prog'], {}), '(train_program, startup_prog)\n', (12743, 12772), True, 'import paddle.fluid as fluid\n'), ((13585, 13628), 'paddle.fluid.program_guard', 'fluid.program_guard', (['dev_prog', 'startup_prog'], {}), '(dev_prog, startup_prog)\n', (13604, 13628), True, 'import paddle.fluid as fluid\n'), ((14311, 14358), 'paddle.fluid.program_guard', 'fluid.program_guard', (['predict_prog', 'startup_prog'], {}), '(predict_prog, startup_prog)\n', (14330, 14358), True, 'import paddle.fluid as fluid\n'), ((15319, 15388), 'utils.init.init_checkpoint', 'init_checkpoint', (['exe', 'args.init_checkpoint'], {'main_program': 'startup_prog'}), '(exe, args.init_checkpoint, main_program=startup_prog)\n', (15334, 15388), False, 'from utils.init import init_pretraining_params, init_checkpoint\n'), ((15852, 15921), 'utils.init.init_checkpoint', 'init_checkpoint', (['exe', 'args.init_checkpoint'], {'main_program': 'startup_prog'}), '(exe, args.init_checkpoint, main_program=startup_prog)\n', (15867, 15921), False, 'from utils.init import init_pretraining_params, init_checkpoint\n'), ((16410, 16477), 'paddle.fluid.contrib.reader.distributed_batch_reader', 'fluid.contrib.reader.distributed_batch_reader', (['train_data_generator'], {}), '(train_data_generator)\n', (16455, 16477), True, 'import paddle.fluid as fluid\n'), ((19859, 19870), 'utils.cards.get_cards', 'get_cards', ([], {}), '()\n', (19868, 19870), False, 'from utils.cards import get_cards\n'), ((7787, 7816), 'numpy.argmax', 'np.argmax', (['all_logits'], {'axis': '(1)'}), '(all_logits, axis=1)\n', (7796, 7816), True, 'import numpy as np\n'), ((10900, 10937), 'os.getenv', 'os.getenv', (['"""FLAGS_selected_gpus"""', '"""0"""'], {}), "('FLAGS_selected_gpus', '0')\n", (10909, 10937), False, 'import os\n'), ((11070, 11097), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (11095, 11097), False, 'import multiprocessing\n'), ((12791, 12816), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (12814, 12816), True, 'import paddle.fluid as fluid\n'), ((12889, 12954), 'model.classifier.create_model', 'create_model', (['args'], {'xlnet_config': 'xlnet_config', 'n_class': 'num_labels'}), '(args, xlnet_config=xlnet_config, n_class=num_labels)\n', (12901, 12954), False, 'from model.classifier import create_model\n'), ((13047, 13353), 'optimization.optimization', 'optimization', ([], {'loss': 'loss', 'warmup_steps': 'args.warmup_steps', 'num_train_steps': 'args.train_steps', 'learning_rate': 'args.learning_rate', 'train_program': 'train_program', 'startup_prog': 'startup_prog', 'weight_decay': 'args.weight_decay', 'lr_layer_decay_rate': 'args.lr_layer_decay_rate', 'scheduler': 'args.lr_scheduler'}), '(loss=loss, warmup_steps=args.warmup_steps, num_train_steps=\n args.train_steps, learning_rate=args.learning_rate, train_program=\n train_program, startup_prog=startup_prog, weight_decay=args.\n weight_decay, lr_layer_decay_rate=args.lr_layer_decay_rate, scheduler=\n args.lr_scheduler)\n', (13059, 13353), False, 'from optimization import optimization\n'), ((13647, 13672), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (13670, 13672), True, 'import paddle.fluid as fluid\n'), ((13743, 13808), 'model.classifier.create_model', 'create_model', (['args'], {'xlnet_config': 'xlnet_config', 'n_class': 'num_labels'}), '(args, xlnet_config=xlnet_config, n_class=num_labels)\n', (13755, 13808), False, 'from model.classifier import create_model\n'), ((14377, 14402), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (14400, 14402), True, 'import paddle.fluid as fluid\n'), ((14477, 14542), 'model.classifier.create_model', 'create_model', (['args'], {'xlnet_config': 'xlnet_config', 'n_class': 'num_labels'}), '(args, xlnet_config=xlnet_config, n_class=num_labels)\n', (14489, 14542), False, 'from model.classifier import create_model\n'), ((15493, 15583), 'utils.init.init_pretraining_params', 'init_pretraining_params', (['exe', 'args.init_pretraining_params'], {'main_program': 'startup_prog'}), '(exe, args.init_pretraining_params, main_program=\n startup_prog)\n', (15516, 15583), False, 'from utils.init import init_pretraining_params, init_checkpoint\n'), ((16531, 16567), 'paddle.fluid.CompiledProgram', 'fluid.CompiledProgram', (['train_program'], {}), '(train_program)\n', (16552, 16567), True, 'import paddle.fluid as fluid\n'), ((17001, 17012), 'time.time', 'time.time', ([], {}), '()\n', (17010, 17012), False, 'import time\n'), ((17336, 17347), 'time.time', 'time.time', ([], {}), '()\n', (17345, 17347), False, 'import time\n'), ((8038, 8056), 'numpy.sum', 'np.sum', (['total_cost'], {}), '(total_cost)\n', (8044, 8056), True, 'import numpy as np\n'), ((8059, 8081), 'numpy.sum', 'np.sum', (['total_num_seqs'], {}), '(total_num_seqs)\n', (8065, 8081), True, 'import numpy as np\n'), ((18983, 19040), 'paddle.fluid.io.save_persistables', 'fluid.io.save_persistables', (['exe', 'save_path', 'train_program'], {}), '(exe, save_path, train_program)\n', (19009, 19040), True, 'import paddle.fluid as fluid\n'), ((19687, 19744), 'paddle.fluid.io.save_persistables', 'fluid.io.save_persistables', (['exe', 'save_path', 'train_program'], {}), '(exe, save_path, train_program)\n', (19713, 19744), True, 'import paddle.fluid as fluid\n'), ((10230, 10275), 'subprocess.check_output', 'subprocess.check_output', (["['nvidia-smi', '-L']"], {}), "(['nvidia-smi', '-L'])\n", (10253, 10275), False, 'import subprocess\n'), ((18249, 18267), 'numpy.sum', 'np.sum', (['total_cost'], {}), '(total_cost)\n', (18255, 18267), True, 'import numpy as np\n'), ((18270, 18292), 'numpy.sum', 'np.sum', (['total_num_seqs'], {}), '(total_num_seqs)\n', (18276, 18292), True, 'import numpy as np\n'), ((19152, 19174), 'numpy.average', 'np.average', (['throughput'], {}), '(throughput)\n', (19162, 19174), True, 'import numpy as np\n'), ((18330, 18348), 'numpy.sum', 'np.sum', (['total_cost'], {}), '(total_cost)\n', (18336, 18348), True, 'import numpy as np\n'), ((18351, 18373), 'numpy.sum', 'np.sum', (['total_num_seqs'], {}), '(total_num_seqs)\n', (18357, 18373), True, 'import numpy as np\n'), ((9343, 9377), 'numpy.array', 'np.array', (['logits'], {'dtype': 'np.float32'}), '(logits, dtype=np.float32)\n', (9351, 9377), True, 'import numpy as np\n')] |
'''
Author: <NAME> and <NAME>
Purpose: To predict aesthetic quality of image on a scale of 1 to 5.
How to use: There is a folder named test_images in parent directory of scripts
Put all your image to test in that folder
Run this code ie.. python3 main.py
Sample Output:
farm1_262_20009074919_cdd9c88d5f_b_0_f.jpg 3
farm1_551_19542178634_a28b694bb3_b_0_f.jpg 2
farm1_546_19894951500_a19ce7092d_b_0_f.jpg 2
farm4_3825_20263660105_2e24625702_b_0_f.jpg 5
'''
from PIL import Image
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.models import Model # basic class for specifying and training a neural network
from keras.layers import Input, Convolution2D, MaxPooling2D, Dense, Dropout, Activation, Flatten
from keras.utils import np_utils # utilities for one-hot encoding of ground truth values
import numpy as np
import random as rand
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
import keras
'''
This function reads accuracies of three generated models
This function read single value denoting accuracy from file named:-
accuracy1.txt, accuracy2.txt and accuracy3.txt
All the above files are present in folder named accuracy which is present in parent directory of script folder
'''
def get_accuracies():
filename = "../accuracy/accuracy1.txt"
with open(filename) as f:
accuracy1 = float(f.readline())
filename = "../accuracy/accuracy1.txt"
with open(filename) as f:
accuracy2 = float(f.readline())
filename = "../accuracy/accuracy1.txt"
with open(filename) as f:
accuracy3 = float(f.readline())
return accuracy1, accuracy2, accuracy3
'''
This function read all the images from test_images
It return a list containing name of all the test_images and their matrix representation in numpy array
'''
def read_input():
folder = "../test_images/"
FILE_NAMES = os.listdir(folder)
NEW_FILE_NAMES = []
for i in range(len(FILE_NAMES)):
im = Image.open(folder + FILE_NAMES[i])
im = im.resize((256, 256))
im.save(folder + "256X256_"+FILE_NAMES[i])
NEW_FILE_NAMES.append("256X256_"+FILE_NAMES[i])
read = lambda imname: np.asarray(Image.open(imname).convert("RGB"))
ims = [read(os.path.join(folder, filename)) for filename in NEW_FILE_NAMES]
for i in NEW_FILE_NAMES:
os.remove(folder+i)
X_test = np.array(ims, dtype='float32')
X_test /= np.max(X_test)
return FILE_NAMES,X_test
# Main Code begins here
if __name__ == "__main__":
# Read Accuracies of all the models
accuracy1, accuracy2, accuracy3 = get_accuracies()
# Read the input data
FILE_NAMES,X_test = read_input()
# Reading Model 1
model1 = load_model('../model/model1.h5')
prediction=model1.predict( X_test, batch_size=32, verbose=0)
y1_classes = prediction.argmax(axis=-1)
# Reading Model 2
model2 = load_model('../model/model2.h5')
prediction=model2.predict( X_test, batch_size=32, verbose=0)
y2_classes = prediction.argmax(axis=-1)
# Reading Model 3
model3 = load_model('../model/model3.h5')
prediction=model3.predict( X_test, batch_size=32, verbose=0)
y3_classes = prediction.argmax(axis=-1)
# Prediction using ensembling
for i in range(len(y1_classes)):
y1 = y1_classes[i]
y2 = y2_classes[i]
y3 = y3_classes[i]
Count = [0,0,0,0,0]
Count[y1] += 1
Count[y2] += 1
Count[y3] += 1
found = False
for j in range(len(Count)):
if Count[j] >= 2:
found = True
print(FILE_NAMES[i],"\t",j+1)
break
if not found:
if max(accuracy1,accuracy2,accuracy3) == accuracy1:
print(FILE_NAMES[i],"\t",y1+1)
elif max(accuracy1,accuracy2,accuracy3) == accuracy2:
print(FILE_NAMES[i],"\t",y2+1)
else:
print(FILE_NAMES[i],"\t",y3+1) | [
"os.listdir",
"keras.models.load_model",
"PIL.Image.open",
"os.path.join",
"numpy.max",
"numpy.array",
"os.remove"
] | [((1862, 1880), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1872, 1880), False, 'import os\n'), ((2306, 2336), 'numpy.array', 'np.array', (['ims'], {'dtype': '"""float32"""'}), "(ims, dtype='float32')\n", (2314, 2336), True, 'import numpy as np\n'), ((2348, 2362), 'numpy.max', 'np.max', (['X_test'], {}), '(X_test)\n', (2354, 2362), True, 'import numpy as np\n'), ((2620, 2652), 'keras.models.load_model', 'load_model', (['"""../model/model1.h5"""'], {}), "('../model/model1.h5')\n", (2630, 2652), False, 'from keras.models import load_model\n'), ((2787, 2819), 'keras.models.load_model', 'load_model', (['"""../model/model2.h5"""'], {}), "('../model/model2.h5')\n", (2797, 2819), False, 'from keras.models import load_model\n'), ((2954, 2986), 'keras.models.load_model', 'load_model', (['"""../model/model3.h5"""'], {}), "('../model/model3.h5')\n", (2964, 2986), False, 'from keras.models import load_model\n'), ((1943, 1977), 'PIL.Image.open', 'Image.open', (['(folder + FILE_NAMES[i])'], {}), '(folder + FILE_NAMES[i])\n', (1953, 1977), False, 'from PIL import Image\n'), ((2276, 2297), 'os.remove', 'os.remove', (['(folder + i)'], {}), '(folder + i)\n', (2285, 2297), False, 'import os\n'), ((2184, 2214), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (2196, 2214), False, 'import os\n'), ((2136, 2154), 'PIL.Image.open', 'Image.open', (['imname'], {}), '(imname)\n', (2146, 2154), False, 'from PIL import Image\n')] |
import numpy as np
import warnings
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, CategoricalHyperparameter
from ConfigSpace.conditions import EqualsCondition
from solnml.components.feature_engineering.transformations.base_transformer import *
from solnml.components.utils.text_util import build_embeddings_index, load_text_embeddings
class Text2VectorTransformation(Transformer):
type = 50
def __init__(self, method='weighted', alpha=1e-4):
super().__init__("text2vector")
self.method = method
self.alpha = alpha
self.input_type = [TEXT]
self.output_type = [TEXT_EMBEDDING]
self.compound_mode = 'replace'
self.embedding_dict = None
@ease_trans
def operate(self, input_datanode, target_fields=None):
X, y = input_datanode.data
X_new = X[:, target_fields]
if not self.embedding_dict:
self.embedding_dict = build_embeddings_index()
_X = None
for i in range(X_new.shape[1]):
emb_output = load_text_embeddings(X_new[:, i], self.embedding_dict, method=self.method, alpha=self.alpha)
if _X is None:
_X = emb_output.copy()
else:
_X = np.hstack((_X, emb_output))
return _X
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None, optimizer='tpe'):
method = CategoricalHyperparameter("method", ['average', 'weighted'], default_value='weighted')
alpha = UniformFloatHyperparameter("alpha", 1e-5, 1e-3, log=True, default_value=1e-4)
cs = ConfigurationSpace()
cs.add_hyperparameters([method, alpha])
alpha_cond = EqualsCondition(alpha, method, 'weighted')
cs.add_conditions([alpha_cond])
return cs
| [
"solnml.components.utils.text_util.build_embeddings_index",
"ConfigSpace.hyperparameters.UniformFloatHyperparameter",
"numpy.hstack",
"solnml.components.utils.text_util.load_text_embeddings",
"ConfigSpace.conditions.EqualsCondition",
"ConfigSpace.hyperparameters.CategoricalHyperparameter",
"ConfigSpace.... | [((1472, 1563), 'ConfigSpace.hyperparameters.CategoricalHyperparameter', 'CategoricalHyperparameter', (['"""method"""', "['average', 'weighted']"], {'default_value': '"""weighted"""'}), "('method', ['average', 'weighted'], default_value=\n 'weighted')\n", (1497, 1563), False, 'from ConfigSpace.hyperparameters import UniformFloatHyperparameter, CategoricalHyperparameter\n'), ((1575, 1661), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'UniformFloatHyperparameter', (['"""alpha"""', '(1e-05)', '(0.001)'], {'log': '(True)', 'default_value': '(0.0001)'}), "('alpha', 1e-05, 0.001, log=True, default_value=\n 0.0001)\n", (1601, 1661), False, 'from ConfigSpace.hyperparameters import UniformFloatHyperparameter, CategoricalHyperparameter\n'), ((1667, 1687), 'ConfigSpace.configuration_space.ConfigurationSpace', 'ConfigurationSpace', ([], {}), '()\n', (1685, 1687), False, 'from ConfigSpace.configuration_space import ConfigurationSpace\n'), ((1758, 1800), 'ConfigSpace.conditions.EqualsCondition', 'EqualsCondition', (['alpha', 'method', '"""weighted"""'], {}), "(alpha, method, 'weighted')\n", (1773, 1800), False, 'from ConfigSpace.conditions import EqualsCondition\n'), ((1001, 1025), 'solnml.components.utils.text_util.build_embeddings_index', 'build_embeddings_index', ([], {}), '()\n', (1023, 1025), False, 'from solnml.components.utils.text_util import build_embeddings_index, load_text_embeddings\n'), ((1109, 1205), 'solnml.components.utils.text_util.load_text_embeddings', 'load_text_embeddings', (['X_new[:, i]', 'self.embedding_dict'], {'method': 'self.method', 'alpha': 'self.alpha'}), '(X_new[:, i], self.embedding_dict, method=self.method,\n alpha=self.alpha)\n', (1129, 1205), False, 'from solnml.components.utils.text_util import build_embeddings_index, load_text_embeddings\n'), ((1307, 1334), 'numpy.hstack', 'np.hstack', (['(_X, emb_output)'], {}), '((_X, emb_output))\n', (1316, 1334), True, 'import numpy as np\n')] |
import glob,sys
import numpy as np
sys.path.append('../../flu/src')
import test_flu_prediction as test_flu
import matplotlib.pyplot as plt
import analysis_utils_toy_data as AU
file_formats = ['.svg', '.pdf']
plt.rcParams.update(test_flu.mpl_params)
line_styles = ['-', '--', '-.']
cols = ['b', 'r', 'g', 'c', 'm', 'k', 'y']
cols+=cols
figure_folder = '../figures/'
data_dir = '../data_new'
prefix= '/20140820_'
N_list = [20000] #,20000]
mu_list = [1e-6,2e-6, 4e-6, 8e-6, 16e-6, 32e-6, 64e-6, 128e-6]
#nflip_list = [0.02,0.04, 0.08, 0.16]
gamma_list = [1.0] #, 2.0,3.0, 5.0]
omega_list = [0.3]
nflip_list = [0.04, 0.08]
sdt_list = [1,100] #determines whether 2 genomes are sampled every generation, or 200 every 100 gen
pred, norm_pred, run_stats = AU.load_prediction_data(prefix, N_list, mu_list, nflip_list,
sdt_list, return_mean=True)
valdt = 200
ssize = 200
D = 0.2
L=2000
mean_fitness_true_fitness_spearman_i = -4
for gamma in gamma_list:
for omega in omega_list:
for sdt in [1,100]:
plt.figure(figsize= (10,6))
ax = plt.subplot(111)
#plt.title(r'\omega='+str(omega)+',\;dt='+str(sdt)+'$')
for di,D in enumerate([0.2, 0.5]):
pred_label = ssize, gamma, D, omega, valdt
### PLOT FITNESS CORRELATION VS PAIRWISE DIVERSITY ###
for ni, N in enumerate(N_list):
for fi, nflip in enumerate(nflip_list):
plt.errorbar([run_stats[(N,mu,nflip,sdt)][-1] for mu in mu_list],
[pred[(N,mu,nflip,sdt)+pred_label][0][mean_fitness_true_fitness_spearman_i] for mu in mu_list],
[pred[(N,mu,nflip,sdt)+pred_label][2][mean_fitness_true_fitness_spearman_i] for mu in mu_list],
c=cols[fi], ls=line_styles[di], label = '$n_A = '+str(nflip)+',\;\Gamma='+str(D)+'$', lw=2)
plt.ylabel("Spearman's correlation")
plt.xlabel('average pairwise distance')
plt.xscale('log')
plt.legend(loc=4)
#add panel label
plt.text(0.02,0.9,'Fig.~2-S1', transform = plt.gca().transAxes, fontsize = 20)
plt.xlim([0.5, 200])
for ff in file_formats:
plt.savefig(figure_folder+'Fig2_S1_pairwise_diversity_vs_predictability_sdt_'+str(sdt)+'_gamma_'
+str(gamma)+'_valdt_'+str(valdt)+ff)
### PLOT prediction_success VS PAIRWISE DIVERSITY ###
plt.figure(figsize= (10,6))
ax = plt.subplot(111)
#plt.title(r'$\gamma='+str(gamma)+',\;\omega='+str(omega)+',\;dt='+str(sdt)+'$')
for ni, N in enumerate(N_list):
for fi, nflip in enumerate(nflip_list):
for di,D in enumerate([0.2, 0.5]):
plt.errorbar([run_stats[(N,mu,nflip,sdt)][-1] for mu in mu_list],
[norm_pred[(N,mu,nflip,sdt)+pred_label][0][1] for mu in mu_list],
[norm_pred[(N,mu,nflip,sdt)+pred_label][2][1] for mu in mu_list],
c=cols[fi], ls=line_styles[di], label = '$n_A = '+str(nflip)+'$')
plt.ylabel(r'distance $\bar{d}$ to future populations')
plt.xlabel('average pairwise distance')
#add panel label
plt.text(0.02,0.9,'Fig.~2-S2', transform = plt.gca().transAxes, fontsize = 20)
plt.xscale('log')
plt.legend(loc=1)
plt.xlim([0.5, 200])
for ff in file_formats:
plt.savefig(figure_folder+'Fig2_S2_pairwise_diversity_vs_distance_sdt_'+str(sdt)+'_gamma_'+str(gamma)+'_valdt_'+str(valdt)+ff)
#plt.close()
## plot gamma versus the number of predictions that are worse than random
# reload the data without averaging over the different realizations.
pred, norm_pred, run_stats = AU.load_prediction_data(prefix, N_list, mu_list, nflip_list,
sdt_list, return_mean=False)
#for sdt in [100]:
if len(gamma_list)>1:
for omega in omega_list:
### PLOT FITNESS CORRELATION VS DSCALE ###
plt.figure(figsize= (10,6))
ax = plt.subplot(111)
#plt.title(r'$\omega='+str(omega)+',\;dt='+str(sdt)+'$')
for mi,mu in enumerate(mu_list[3:6]):
for ni, N in enumerate(N_list):
for fi, nflip in enumerate(nflip_list[:]):
if mi==0:
label_str = r'$n_A ='+str(nflip)+'$'
else:
label_str = None
plt.plot(gamma_list, [np.mean(pred[(N,mu,nflip,sdt)+(ssize, gamma, D, omega, valdt)][:,0]<
pred[(N,mu,nflip,sdt)+(ssize, gamma, D, omega, valdt)][:,3])
for gamma in gamma_list], lw=2, marker='o', markersize=10,
ls=line_styles[mi], c=cols[fi], label = label_str)
#plt.xscale('log')
#add panel label
plt.text(0.02,0.9,'Fig.~2-S3', transform = plt.gca().transAxes, fontsize = 20)
plt.xlim([0.0, 5.5])
plt.ylabel('worse than random (out of 100)')
plt.xlabel(r'time rescaling $\gamma$')
plt.legend(loc=1,numpoints=1)
for ff in file_formats:
plt.savefig(figure_folder+'Fig2_S3_gamma_vs_predictability_sdt_'+str(sdt)+'_D_'+str(D)+'_w_'+str(omega)+'_valdt_'+str(valdt)+ff)
| [
"numpy.mean",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"analysis_utils_toy_data.load_prediction_data",
"sys.path.append",
"matplotlib.pyplot.xlim",
"matplotlib.pyp... | [((35, 67), 'sys.path.append', 'sys.path.append', (['"""../../flu/src"""'], {}), "('../../flu/src')\n", (50, 67), False, 'import glob, sys\n'), ((209, 249), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['test_flu.mpl_params'], {}), '(test_flu.mpl_params)\n', (228, 249), True, 'import matplotlib.pyplot as plt\n'), ((753, 845), 'analysis_utils_toy_data.load_prediction_data', 'AU.load_prediction_data', (['prefix', 'N_list', 'mu_list', 'nflip_list', 'sdt_list'], {'return_mean': '(True)'}), '(prefix, N_list, mu_list, nflip_list, sdt_list,\n return_mean=True)\n', (776, 845), True, 'import analysis_utils_toy_data as AU\n'), ((4035, 4128), 'analysis_utils_toy_data.load_prediction_data', 'AU.load_prediction_data', (['prefix', 'N_list', 'mu_list', 'nflip_list', 'sdt_list'], {'return_mean': '(False)'}), '(prefix, N_list, mu_list, nflip_list, sdt_list,\n return_mean=False)\n', (4058, 4128), True, 'import analysis_utils_toy_data as AU\n'), ((4318, 4345), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (4328, 4345), True, 'import matplotlib.pyplot as plt\n'), ((4359, 4375), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (4370, 4375), True, 'import matplotlib.pyplot as plt\n'), ((5311, 5331), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 5.5]'], {}), '([0.0, 5.5])\n', (5319, 5331), True, 'import matplotlib.pyplot as plt\n'), ((5340, 5384), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""worse than random (out of 100)"""'], {}), "('worse than random (out of 100)')\n", (5350, 5384), True, 'import matplotlib.pyplot as plt\n'), ((5393, 5431), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time rescaling $\\\\gamma$"""'], {}), "('time rescaling $\\\\gamma$')\n", (5403, 5431), True, 'import matplotlib.pyplot as plt\n'), ((5440, 5470), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)', 'numpoints': '(1)'}), '(loc=1, numpoints=1)\n', (5450, 5470), True, 'import matplotlib.pyplot as plt\n'), ((1072, 1099), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1082, 1099), True, 'import matplotlib.pyplot as plt\n'), ((1117, 1133), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1128, 1133), True, 'import matplotlib.pyplot as plt\n'), ((2006, 2042), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Spearman\'s correlation"""'], {}), '("Spearman\'s correlation")\n', (2016, 2042), True, 'import matplotlib.pyplot as plt\n'), ((2055, 2094), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""average pairwise distance"""'], {}), "('average pairwise distance')\n", (2065, 2094), True, 'import matplotlib.pyplot as plt\n'), ((2107, 2124), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2117, 2124), True, 'import matplotlib.pyplot as plt\n'), ((2137, 2154), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (2147, 2154), True, 'import matplotlib.pyplot as plt\n'), ((2288, 2308), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.5, 200]'], {}), '([0.5, 200])\n', (2296, 2308), True, 'import matplotlib.pyplot as plt\n'), ((2607, 2634), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2617, 2634), True, 'import matplotlib.pyplot as plt\n'), ((2652, 2668), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (2663, 2668), True, 'import matplotlib.pyplot as plt\n'), ((3330, 3385), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""distance $\\\\bar{d}$ to future populations"""'], {}), "('distance $\\\\bar{d}$ to future populations')\n", (3340, 3385), True, 'import matplotlib.pyplot as plt\n'), ((3398, 3437), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""average pairwise distance"""'], {}), "('average pairwise distance')\n", (3408, 3437), True, 'import matplotlib.pyplot as plt\n'), ((3571, 3588), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (3581, 3588), True, 'import matplotlib.pyplot as plt\n'), ((3601, 3618), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (3611, 3618), True, 'import matplotlib.pyplot as plt\n'), ((3631, 3651), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.5, 200]'], {}), '([0.5, 200])\n', (3639, 3651), True, 'import matplotlib.pyplot as plt\n'), ((5267, 5276), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5274, 5276), True, 'import matplotlib.pyplot as plt\n'), ((2240, 2249), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2247, 2249), True, 'import matplotlib.pyplot as plt\n'), ((3523, 3532), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3530, 3532), True, 'import matplotlib.pyplot as plt\n'), ((4794, 4940), 'numpy.mean', 'np.mean', (['(pred[(N, mu, nflip, sdt) + (ssize, gamma, D, omega, valdt)][:, 0] < pred[(\n N, mu, nflip, sdt) + (ssize, gamma, D, omega, valdt)][:, 3])'], {}), '(pred[(N, mu, nflip, sdt) + (ssize, gamma, D, omega, valdt)][:, 0] <\n pred[(N, mu, nflip, sdt) + (ssize, gamma, D, omega, valdt)][:, 3])\n', (4801, 4940), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# wykys 2019
import numpy as np
def awgn(s: np.ndarray, snr_db: float = 20) -> np.ndarray:
sig_avg_watts = np.mean(s**2)
sig_avg_db = 10 * np.log10(sig_avg_watts)
noise_avg_db = sig_avg_db - snr_db
noise_avg_watts = 10 ** (noise_avg_db / 10)
mean_noise = 0
noise_volts = np.random.normal(
mean_noise,
np.sqrt(noise_avg_watts),
len(s)
)
return s + noise_volts
if __name__ == '__main__':
import sig_plot
f = 1e3
fs = 100*f
t = np.arange(0, 1/f, 1/fs)
s1 = np.sin(2*np.pi*f*t)
s2 = awgn(s1, 10)
sig_plot.splitplot([s1, s2])
sig_plot.show()
| [
"numpy.mean",
"numpy.log10",
"sig_plot.show",
"numpy.sqrt",
"sig_plot.splitplot",
"numpy.sin",
"numpy.arange"
] | [((137, 152), 'numpy.mean', 'np.mean', (['(s ** 2)'], {}), '(s ** 2)\n', (144, 152), True, 'import numpy as np\n'), ((527, 554), 'numpy.arange', 'np.arange', (['(0)', '(1 / f)', '(1 / fs)'], {}), '(0, 1 / f, 1 / fs)\n', (536, 554), True, 'import numpy as np\n'), ((560, 585), 'numpy.sin', 'np.sin', (['(2 * np.pi * f * t)'], {}), '(2 * np.pi * f * t)\n', (566, 585), True, 'import numpy as np\n'), ((606, 634), 'sig_plot.splitplot', 'sig_plot.splitplot', (['[s1, s2]'], {}), '([s1, s2])\n', (624, 634), False, 'import sig_plot\n'), ((639, 654), 'sig_plot.show', 'sig_plot.show', ([], {}), '()\n', (652, 654), False, 'import sig_plot\n'), ((173, 196), 'numpy.log10', 'np.log10', (['sig_avg_watts'], {}), '(sig_avg_watts)\n', (181, 196), True, 'import numpy as np\n'), ((369, 393), 'numpy.sqrt', 'np.sqrt', (['noise_avg_watts'], {}), '(noise_avg_watts)\n', (376, 393), True, 'import numpy as np\n')] |
# import scipy.signal as sig
import scipy as sp
import numpy as np
# tc = 30e-9
# caviy_tc = 10e-9
# n=1
# wc = 1/tc
fac = sp.math.factorial
# def filter_func(tc, order, t):
# wc=1/float(tc)
# return (wc*t)**(order-1)/fac(order-1)*wc*np.exp(-wc*t)
# filt = filter_func(tc, 7, np.arange(tc,20*tc, 0.1*tc))
# print filt
# # signal = np.repeat([0.,1.,0.], 10*len(filt))
# t = np.linspace(0,30e-9, 10*len(filt))
# rise_arr = 1-np.exp(-t/caviy_tc)
# fall_arr = rise_arr[-1]*np.exp(-t/caviy_tc)
# signal = np.concatenate((np.zeros(len(filt)), rise_arr, np.repeat(rise_arr[-1], 10*len(filt)), fall_arr, np.repeat(fall_arr[-1], 2*len(filt))))
# filt = filt/filt.max()
# print filt.min()
# convoluted = np.convolve(signal,filt,mode='same')
# deconv, _ = sig.deconvolve( convoluted, filt )
# # #the deconvolution has n = len(signal) - len(gauss) + 1 points
# # n = len(signal)-len(filt)+1
# # # so we need to expand it by
# # s = (len(signal)-n)/2
# # #on both sides.
# # deconv_res = np.zeros(len(signal))
# # deconv_res[s:len(signal)-s-1] = deconv
# # deconv = deconv_res
# # # now deconv contains the deconvolution
# # # expanded to the original shape (filled with zeros)
def filter_func(tc, order, t):
wc=1/float(tc)
return (wc*t)**(order-1)/fac(order-1)*wc*np.exp(-wc*t)
def deconvolve(data, tc, order):
'''
Returns deconvoluted data.
'''
t = data[0] # time values
y = data[1] # recorded signal
time = t[-1]-t[0]
resolution = t[1]-t[0]
if time <= 0:
raise UHFLIException('Time must run forwards.')
elif time < 10*tc:
raise UHFLIException('Data must be longer than 10 time constants of the filter.')
else:
filt = filter_func(tc, order, np.arange(0, 10*tc, resolution))
if filt.min() < 0e-5:
raise UHFLIException('Filter error.')
from scipy.signal import deconvolve
return deconvolve(y,filt)
# d = np.loadtxt(r'D:\data\20170612\130605_2port_30ns_isolator\130605_2port_30ns_isolator.dat').swapaxes(0,1) | [
"numpy.exp",
"scipy.signal.deconvolve",
"numpy.arange"
] | [((1305, 1320), 'numpy.exp', 'np.exp', (['(-wc * t)'], {}), '(-wc * t)\n', (1311, 1320), True, 'import numpy as np\n'), ((1868, 1887), 'scipy.signal.deconvolve', 'deconvolve', (['y', 'filt'], {}), '(y, filt)\n', (1878, 1887), False, 'from scipy.signal import deconvolve\n'), ((1719, 1752), 'numpy.arange', 'np.arange', (['(0)', '(10 * tc)', 'resolution'], {}), '(0, 10 * tc, resolution)\n', (1728, 1752), True, 'import numpy as np\n')] |
"""Implementation of a subset of the NumPy API using SymPy primitives."""
from collections import Iterable as _Iterable
import sympy as _sym
import numpy as _np
from symnum.array import (
SymbolicArray as _SymbolicArray, is_sympy_array as _is_sympy_array,
unary_elementwise_func as _unary_elementwise_func,
binary_broadcasting_func as _binary_broadcasting_func,
slice_iterator as _slice_iterator)
from sympy import S as _sym_singletons
# Define mappings from objects in NumPy namespace to SymPy equivalents
_constants = {
'pi': _sym_singletons.Pi,
('inf', 'infty', 'INF', 'Infinity', 'PINF'): _sym_singletons.Infinity,
('nan', 'NaN', 'NAN'): _sym_singletons.NaN,
'NINF': _sym_singletons.NegativeInfinity,
'e': _sym_singletons.Exp1,
'euler_gamma': _sym_singletons.EulerGamma,
'newaxis': None,
}
_unary_elementwise_funcs = {
'exp': _sym.exp,
'expm1': lambda x: _sym.exp(x) - 1,
'log': _sym.log,
'log2': lambda x: _sym.log(x, 2),
'log10': lambda x: _sym.log(x, 10),
'log1p': lambda x: _sym.log(1 + x),
'sin': _sym.sin,
'cos': _sym.cos,
'tan': _sym.tan,
'arcsin': _sym.asin,
'arccos': _sym.acos,
'arctan': _sym.atan,
'sinh': _sym.sinh,
'cosh': _sym.cosh,
'tanh': _sym.tanh,
'arcsinh': _sym.asinh,
'arccosh': _sym.acosh,
'arctanh': _sym.atanh,
'ceil': _sym.ceiling,
'floor': _sym.floor,
'sqrt': _sym.sqrt,
('abs', 'absolute'): _sym.Abs,
'sign': _sym.sign,
'angle': lambda x, deg=False: (
_sym.arg(x) * 180 / _sym.pi if deg else _sym.arg(x)),
('conj', 'conjugate'): _sym.conjugate,
'real': _sym.re,
'imag': _sym.im,
'logical_not': _sym.Not,
'isinf': lambda x: x == _sym_S.Infinity or x == _sym_S.NegativeInfinity,
'isposinf': lambda x: x == _sym_S.Infinity,
'isneginf': lambda x: x == _sym_S.NegativeInfinity,
'isnan': lambda x: x == _sym_S.NaN,
'isreal': lambda x: x.is_real,
'iscomplex': lambda x: not x.is_real,
'isfinite': lambda x: not (x == _sym_S.Infinity or
x == _sym_S.NegativeInfinity or x == _sym_S.NaN)
}
_binary_broadcasting_funcs = {
'arctan2': _sym.atan2,
'logical_and': _sym.And,
'logical_or': _sym.Or,
'logical_xor': _sym.Xor,
'maximum': _sym.Max,
'minimum': _sym.Min,
}
_binary_op_funcs = {
'add': lambda x1, x2: x1 + x2,
'subtract': lambda x1, x2: x1 - x2,
'multiply': lambda x1, x2: x1 * x2,
'divide': lambda x1, x2: x1 / x2,
'power': lambda x1, x2: x1**x2,
'matmul': lambda x1, x2: x1 @ x2,
}
def _wrap_numpy(numpy_name=None):
def decorator(func):
_numpy_name = func.__name__ if numpy_name is None else numpy_name
try:
func.__name__ = _numpy_name
numpy_func_doc = getattr(_np, _numpy_name).__doc__
if numpy_func_doc[0] == '\n':
numpy_func_doc = numpy_func_doc[1:]
func.__doc__ = f'symnum implementation of numpy.{_numpy_name}\n\n'
func.__doc__ += numpy_func_doc
finally:
return func
return decorator
def _wrap_unary_elementwise_func(sympy_func, numpy_name):
elementwise_func = _unary_elementwise_func(sympy_func, numpy_name, '')
@_wrap_numpy(numpy_name)
def wrapped(x, *args, **kwargs):
if len(args) > 0 or len(kwargs) > 0:
raise NotImplementedError(
f'Only first argument of {numpy_name} supported.')
else:
return elementwise_func(x)
return wrapped
def _wrap_binary_broadcasting_func(sympy_func, numpy_name):
broadcasting_func = _binary_broadcasting_func(sympy_func, numpy_name, '')
@_wrap_numpy(numpy_name)
def wrapped(x1, x2, *args, **kwargs):
if len(args) > 0 or len(kwargs) > 0:
raise NotImplementedError(
f'Only first two arguments of {numpy_name} supported.')
else:
return broadcasting_func(x1, x2)
return wrapped
def _wrap_binary_op_func(op_func, numpy_name):
@_wrap_numpy(numpy_name)
def wrapped(x1, x2, *args, **kwargs):
if len(args) > 0 or len(kwargs) > 0:
raise NotImplementedError(
f'Only first two arguments of {numpy_name} supported.')
else:
return op_func(x1, x2)
return wrapped
def _add_wrapped_funcs_to_namespace(func_mapping, namespace, wrapper):
for name_or_names, sympy_func in func_mapping.items():
if isinstance(name_or_names, tuple):
for name in name_or_names:
namespace[name] = wrapper(sympy_func, name)
else:
namespace[name_or_names] = wrapper(sympy_func, name_or_names)
def _populate_namespace(namespace):
for name_or_names, val in _constants.items():
if isinstance(name_or_names, tuple):
for name in name_or_names:
namespace[name] = val
else:
namespace[name_or_names] = val
_add_wrapped_funcs_to_namespace(
_unary_elementwise_funcs, namespace, _wrap_unary_elementwise_func)
_add_wrapped_funcs_to_namespace(
_binary_broadcasting_funcs, namespace, _wrap_binary_broadcasting_func)
_add_wrapped_funcs_to_namespace(
_binary_op_funcs, namespace, _wrap_binary_op_func)
_populate_namespace(globals())
# Array creation functions
def _flatten(iterable):
"""Recursively flatten nested iterables to a list."""
flattened = []
for el in iterable:
if isinstance(el, _Iterable):
flattened.extend(_flatten(el))
else:
flattened.append(el)
return flattened
def _contains_expr(iterable):
return any([isinstance(el, _sym.Expr) for el in _flatten(iterable)])
@_wrap_numpy()
def array(object, dtype=None):
if (_is_sympy_array(object) or isinstance(object, _sym.Expr)
or (isinstance(object, _Iterable) and _contains_expr(object))):
return _SymbolicArray(object, dtype=dtype)
else:
return _np.array(object, dtype)
@_wrap_numpy()
def eye(N, M=None, k=0):
M = N if M is None else M
return _SymbolicArray(
[1 if (j - i) == k else 0 for i in range(N) for j in range(M)], (N, M))
@_wrap_numpy()
def identity(n):
return eye(n, n, 0)
def _constant_array(val, shape):
size = _np.prod(shape)
return _SymbolicArray([val] * size, shape)
@_wrap_numpy()
def ones(shape):
return _constant_array(1, shape)
@_wrap_numpy()
def zeros(shape):
return _constant_array(0, shape)
@_wrap_numpy()
def full(shape, fill_value):
return _constant_array(fill_value, shape)
# Array reductions
@_wrap_numpy()
def sum(a, axis=None):
return a.sum(axis)
@_wrap_numpy()
def prod(a, axis=None):
return a.prod(axis)
# Array joining
@_wrap_numpy()
def concatenate(arrays, axis=0):
for i in range(len(arrays)):
if (axis > 0 and axis > arrays[i].ndim - 1) or (
axis < 0 and abs(axis) > arrays[i].ndim):
raise ValueError(
f'axis {axis} is out of bounds for array of dimension '
f'{arrays[i].ndim}')
ndim = arrays[0].ndim
for i in range(1, len(arrays)):
if arrays[i].ndim != ndim:
raise ValueError(
f'all the input arrays must have same number of dimensions, but'
f' the array at index 0 has {arrays[0].ndim} dimension(s) and '
f'the array at index {i} has {arrays[i].ndim} dimension(s)')
for d in (set(range(arrays[0].ndim)) - set([axis])):
if arrays[0].shape[d] != arrays[i].shape[d]:
raise ValueError(
f'all the input array dimensions for the concatenation axis'
f' must match exactly, but along dimension {d}, the array '
f'at index 0 has size {arrays[0].shape[d]} and the array at'
f' index {i} has size {arrays[i].shape[d]}')
array_slices = [slc for arr in arrays for slc in _slice_iterator(arr, axis)]
concatenated = array(array_slices)
if axis != 0:
concatenated = concatenated.transpose(
tuple(range(1, axis + 1)) + (0,) + tuple(range(axis + 1, ndim)))
return concatenated
| [
"numpy.prod",
"symnum.array.slice_iterator",
"symnum.array.is_sympy_array",
"numpy.array",
"sympy.log",
"symnum.array.unary_elementwise_func",
"sympy.exp",
"sympy.arg",
"symnum.array.binary_broadcasting_func",
"symnum.array.SymbolicArray"
] | [((3206, 3257), 'symnum.array.unary_elementwise_func', '_unary_elementwise_func', (['sympy_func', 'numpy_name', '""""""'], {}), "(sympy_func, numpy_name, '')\n", (3229, 3257), True, 'from symnum.array import SymbolicArray as _SymbolicArray, is_sympy_array as _is_sympy_array, unary_elementwise_func as _unary_elementwise_func, binary_broadcasting_func as _binary_broadcasting_func, slice_iterator as _slice_iterator\n'), ((3636, 3689), 'symnum.array.binary_broadcasting_func', '_binary_broadcasting_func', (['sympy_func', 'numpy_name', '""""""'], {}), "(sympy_func, numpy_name, '')\n", (3661, 3689), True, 'from symnum.array import SymbolicArray as _SymbolicArray, is_sympy_array as _is_sympy_array, unary_elementwise_func as _unary_elementwise_func, binary_broadcasting_func as _binary_broadcasting_func, slice_iterator as _slice_iterator\n'), ((6313, 6328), 'numpy.prod', '_np.prod', (['shape'], {}), '(shape)\n', (6321, 6328), True, 'import numpy as _np\n'), ((6340, 6375), 'symnum.array.SymbolicArray', '_SymbolicArray', (['([val] * size)', 'shape'], {}), '([val] * size, shape)\n', (6354, 6375), True, 'from symnum.array import SymbolicArray as _SymbolicArray, is_sympy_array as _is_sympy_array, unary_elementwise_func as _unary_elementwise_func, binary_broadcasting_func as _binary_broadcasting_func, slice_iterator as _slice_iterator\n'), ((978, 992), 'sympy.log', '_sym.log', (['x', '(2)'], {}), '(x, 2)\n', (986, 992), True, 'import sympy as _sym\n'), ((1017, 1032), 'sympy.log', '_sym.log', (['x', '(10)'], {}), '(x, 10)\n', (1025, 1032), True, 'import sympy as _sym\n'), ((1057, 1072), 'sympy.log', '_sym.log', (['(1 + x)'], {}), '(1 + x)\n', (1065, 1072), True, 'import sympy as _sym\n'), ((5796, 5819), 'symnum.array.is_sympy_array', '_is_sympy_array', (['object'], {}), '(object)\n', (5811, 5819), True, 'from symnum.array import SymbolicArray as _SymbolicArray, is_sympy_array as _is_sympy_array, unary_elementwise_func as _unary_elementwise_func, binary_broadcasting_func as _binary_broadcasting_func, slice_iterator as _slice_iterator\n'), ((5944, 5979), 'symnum.array.SymbolicArray', '_SymbolicArray', (['object'], {'dtype': 'dtype'}), '(object, dtype=dtype)\n', (5958, 5979), True, 'from symnum.array import SymbolicArray as _SymbolicArray, is_sympy_array as _is_sympy_array, unary_elementwise_func as _unary_elementwise_func, binary_broadcasting_func as _binary_broadcasting_func, slice_iterator as _slice_iterator\n'), ((6005, 6029), 'numpy.array', '_np.array', (['object', 'dtype'], {}), '(object, dtype)\n', (6014, 6029), True, 'import numpy as _np\n'), ((918, 929), 'sympy.exp', '_sym.exp', (['x'], {}), '(x)\n', (926, 929), True, 'import sympy as _sym\n'), ((1578, 1589), 'sympy.arg', '_sym.arg', (['x'], {}), '(x)\n', (1586, 1589), True, 'import sympy as _sym\n'), ((7992, 8018), 'symnum.array.slice_iterator', '_slice_iterator', (['arr', 'axis'], {}), '(arr, axis)\n', (8007, 8018), True, 'from symnum.array import SymbolicArray as _SymbolicArray, is_sympy_array as _is_sympy_array, unary_elementwise_func as _unary_elementwise_func, binary_broadcasting_func as _binary_broadcasting_func, slice_iterator as _slice_iterator\n'), ((1538, 1549), 'sympy.arg', '_sym.arg', (['x'], {}), '(x)\n', (1546, 1549), True, 'import sympy as _sym\n')] |
"""
this is a simple demo of data-retrieving by ipython
all codes including %matplotlib should be coded in ipython interface
please first uncomment the code on line 13 and then run the following code in ipython
"""
import numpy as np
import pandas as pd
import pandas.io.data as web
goog = web.DataReader('GOOG', data_source='yahoo', start='3/14/2009', end='4/14/2009')
goog.tail()
goog['Log_Ret'] = np.log(goog['Close'] / goog['Close'].shift(1))
goog['Volatility'] = pd.rolling_std(goog['Log_Ret'], window=252) * np.sqrt(252)
#%matplotlib
goog[['Close','Volatility']].plot(subplots=True, color='blue', figsize=(8,6)) | [
"pandas.rolling_std",
"numpy.sqrt",
"pandas.io.data.DataReader"
] | [((290, 369), 'pandas.io.data.DataReader', 'web.DataReader', (['"""GOOG"""'], {'data_source': '"""yahoo"""', 'start': '"""3/14/2009"""', 'end': '"""4/14/2009"""'}), "('GOOG', data_source='yahoo', start='3/14/2009', end='4/14/2009')\n", (304, 369), True, 'import pandas.io.data as web\n'), ((468, 511), 'pandas.rolling_std', 'pd.rolling_std', (["goog['Log_Ret']"], {'window': '(252)'}), "(goog['Log_Ret'], window=252)\n", (482, 511), True, 'import pandas as pd\n'), ((514, 526), 'numpy.sqrt', 'np.sqrt', (['(252)'], {}), '(252)\n', (521, 526), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Author
------
<NAME>
Email
-----
<EMAIL>
Created on
----------
- Sun Jun 25 13:00:00 2017
Modifications
-------------
- Sun Jun 25 13:00:00 2017
Aims
----
- utils for computing in parallel
"""
from copy import deepcopy
import numpy as np
from ipyparallel import Client
def launch_ipcluster_dv(profile="default", targets="all", block=True, max_engines=None):
# initiate ipcluster
rc = Client(profile=profile)
# print ipcluster information
n_proc = len(rc.ids)
if targets == "all":
targets = rc.ids
dv = rc.direct_view(targets=targets)
# check number of engines
# print(rc.ids, dv.targets, targets, max_engines)
if max_engines is not None:
if len(dv.targets) > max_engines:
targets = deepcopy(dv.targets)
np.random.shuffle(targets)
targets = targets[:max_engines]
targets.sort()
dv = rc.direct_view(targets=targets)
print("===================================================")
print("@Slam: ipcluster[{}, n_engines={}/{}]".format(
profile, len(dv.targets), n_proc))
print("---------------------------------------------------")
dv.block = block
# import basic modules in ipcluster
dv.execute("import os")
dv.execute("import numpy as np")
dv.execute("from joblib import Parallel, delayed, dump, load")
# print host information
dv.execute("host_names = os.uname()[1]").get()
u_host_names, u_counts = np.unique(
dv["host_names"], return_counts=True)
for i in range(len(u_counts)):
print("host: {} x {}".format(u_host_names[i], u_counts[i]))
print("===================================================")
return dv
def reset_dv(dv):
dv.execute("import IPython\n"
"ipy=IPython.get_ipython()\n"
"ipy.run_line_magic(\"reset\", \" -f\")\n")
return
def print_time_cost(dtime, unit_max="hour"):
""" return string for delta_time """
if dtime <= 60 * 1.5:
dtime_str = "{:.3f} sec".format(dtime)
elif dtime <= 60 * 60 * 1.5:
dtime_str = "{:.3f} min".format(dtime / 60.)
elif dtime <= (60 * 60 * 24 * 3):
dtime_str = "{:.3f} hours".format(dtime / 3600.)
else:
# even larger
if unit_max == "hour":
dtime <= (60 * 60 * 24 * 3)
dtime_str = "{:.3f} hours".format(dtime / 3600.)
else:
dtime_str = "{:.3f} days".format(dtime / 86400.)
return dtime_str
| [
"numpy.random.shuffle",
"copy.deepcopy",
"numpy.unique",
"ipyparallel.Client"
] | [((429, 452), 'ipyparallel.Client', 'Client', ([], {'profile': 'profile'}), '(profile=profile)\n', (435, 452), False, 'from ipyparallel import Client\n'), ((1504, 1551), 'numpy.unique', 'np.unique', (["dv['host_names']"], {'return_counts': '(True)'}), "(dv['host_names'], return_counts=True)\n", (1513, 1551), True, 'import numpy as np\n'), ((786, 806), 'copy.deepcopy', 'deepcopy', (['dv.targets'], {}), '(dv.targets)\n', (794, 806), False, 'from copy import deepcopy\n'), ((819, 845), 'numpy.random.shuffle', 'np.random.shuffle', (['targets'], {}), '(targets)\n', (836, 845), True, 'import numpy as np\n')] |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pytest
from cycler import cycler
def test_colorcycle_basic():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('color', ['r', 'g', 'y']))
for _ in range(4):
ax.plot(range(10), range(10))
assert [l.get_color() for l in ax.lines] == ['r', 'g', 'y', 'r']
def test_marker_cycle():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('c', ['r', 'g', 'y']) +
cycler('marker', ['.', '*', 'x']))
for _ in range(4):
ax.plot(range(10), range(10))
assert [l.get_color() for l in ax.lines] == ['r', 'g', 'y', 'r']
assert [l.get_marker() for l in ax.lines] == ['.', '*', 'x', '.']
def test_marker_cycle_kwargs_arrays_iterators():
fig, ax = plt.subplots()
ax.set_prop_cycle(c=np.array(['r', 'g', 'y']),
marker=iter(['.', '*', 'x']))
for _ in range(4):
ax.plot(range(10), range(10))
assert [l.get_color() for l in ax.lines] == ['r', 'g', 'y', 'r']
assert [l.get_marker() for l in ax.lines] == ['.', '*', 'x', '.']
def test_linestylecycle_basic():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('ls', ['-', '--', ':']))
for _ in range(4):
ax.plot(range(10), range(10))
assert [l.get_linestyle() for l in ax.lines] == ['-', '--', ':', '-']
def test_fillcycle_basic():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('c', ['r', 'g', 'y']) +
cycler('hatch', ['xx', 'O', '|-']) +
cycler('linestyle', ['-', '--', ':']))
for _ in range(4):
ax.fill(range(10), range(10))
assert ([p.get_facecolor() for p in ax.patches]
== [mpl.colors.to_rgba(c) for c in ['r', 'g', 'y', 'r']])
assert [p.get_hatch() for p in ax.patches] == ['xx', 'O', '|-', 'xx']
assert [p.get_linestyle() for p in ax.patches] == ['-', '--', ':', '-']
def test_fillcycle_ignore():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('color', ['r', 'g', 'y']) +
cycler('hatch', ['xx', 'O', '|-']) +
cycler('marker', ['.', '*', 'D']))
t = range(10)
# Should not advance the cycler, even though there is an
# unspecified property in the cycler "marker".
# "marker" is not a Polygon property, and should be ignored.
ax.fill(t, t, 'r', hatch='xx')
# Allow the cycler to advance, but specify some properties
ax.fill(t, t, hatch='O')
ax.fill(t, t)
ax.fill(t, t)
assert ([p.get_facecolor() for p in ax.patches]
== [mpl.colors.to_rgba(c) for c in ['r', 'r', 'g', 'y']])
assert [p.get_hatch() for p in ax.patches] == ['xx', 'O', 'O', '|-']
def test_property_collision_plot():
fig, ax = plt.subplots()
ax.set_prop_cycle('linewidth', [2, 4])
t = range(10)
for c in range(1, 4):
ax.plot(t, t, lw=0.1)
ax.plot(t, t)
ax.plot(t, t)
assert [l.get_linewidth() for l in ax.lines] == [0.1, 0.1, 0.1, 2, 4]
def test_property_collision_fill():
fig, ax = plt.subplots()
ax.set_prop_cycle(linewidth=[2, 3, 4, 5, 6], facecolor='bgcmy')
t = range(10)
for c in range(1, 4):
ax.fill(t, t, lw=0.1)
ax.fill(t, t)
ax.fill(t, t)
assert ([p.get_facecolor() for p in ax.patches]
== [mpl.colors.to_rgba(c) for c in 'bgcmy'])
assert [p.get_linewidth() for p in ax.patches] == [0.1, 0.1, 0.1, 5, 6]
def test_valid_input_forms():
fig, ax = plt.subplots()
# These should not raise an error.
ax.set_prop_cycle(None)
ax.set_prop_cycle(cycler('linewidth', [1, 2]))
ax.set_prop_cycle('color', 'rgywkbcm')
ax.set_prop_cycle('lw', (1, 2))
ax.set_prop_cycle('linewidth', [1, 2])
ax.set_prop_cycle('linewidth', iter([1, 2]))
ax.set_prop_cycle('linewidth', np.array([1, 2]))
ax.set_prop_cycle('color', np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]))
ax.set_prop_cycle('dashes', [[], [13, 2], [8, 3, 1, 3]])
ax.set_prop_cycle(lw=[1, 2], color=['k', 'w'], ls=['-', '--'])
ax.set_prop_cycle(lw=np.array([1, 2]),
color=np.array(['k', 'w']),
ls=np.array(['-', '--']))
def test_cycle_reset():
fig, ax = plt.subplots()
# Can't really test a reset because only a cycle object is stored
# but we can test the first item of the cycle.
prop = next(ax._get_lines.prop_cycler)
ax.set_prop_cycle(linewidth=[10, 9, 4])
assert prop != next(ax._get_lines.prop_cycler)
ax.set_prop_cycle(None)
got = next(ax._get_lines.prop_cycler)
assert prop == got
def test_invalid_input_forms():
fig, ax = plt.subplots()
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(1)
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle([1, 2])
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('color', 'fish')
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('linewidth', 1)
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('linewidth', {1, 2})
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(linewidth=1, color='r')
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('foobar', [1, 2])
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(foobar=[1, 2])
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(cycler(foobar=[1, 2]))
with pytest.raises(ValueError):
ax.set_prop_cycle(cycler(color='rgb', c='cmy'))
| [
"matplotlib.colors.to_rgba",
"numpy.array",
"pytest.raises",
"cycler.cycler",
"matplotlib.pyplot.subplots"
] | [((162, 176), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (174, 176), True, 'import matplotlib.pyplot as plt\n'), ((404, 418), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (416, 418), True, 'import matplotlib.pyplot as plt\n'), ((794, 808), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (806, 808), True, 'import matplotlib.pyplot as plt\n'), ((1161, 1175), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1173, 1175), True, 'import matplotlib.pyplot as plt\n'), ((1409, 1423), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1421, 1423), True, 'import matplotlib.pyplot as plt\n'), ((1976, 1990), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1988, 1990), True, 'import matplotlib.pyplot as plt\n'), ((2770, 2784), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2782, 2784), True, 'import matplotlib.pyplot as plt\n'), ((3064, 3078), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3076, 3078), True, 'import matplotlib.pyplot as plt\n'), ((3488, 3502), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3500, 3502), True, 'import matplotlib.pyplot as plt\n'), ((4312, 4326), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4324, 4326), True, 'import matplotlib.pyplot as plt\n'), ((4728, 4742), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4740, 4742), True, 'import matplotlib.pyplot as plt\n'), ((199, 231), 'cycler.cycler', 'cycler', (['"""color"""', "['r', 'g', 'y']"], {}), "('color', ['r', 'g', 'y'])\n", (205, 231), False, 'from cycler import cycler\n'), ((1198, 1228), 'cycler.cycler', 'cycler', (['"""ls"""', "['-', '--', ':']"], {}), "('ls', ['-', '--', ':'])\n", (1204, 1228), False, 'from cycler import cycler\n'), ((3592, 3619), 'cycler.cycler', 'cycler', (['"""linewidth"""', '[1, 2]'], {}), "('linewidth', [1, 2])\n", (3598, 3619), False, 'from cycler import cycler\n'), ((3827, 3843), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (3835, 3843), True, 'import numpy as np\n'), ((3876, 3919), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (3884, 3919), True, 'import numpy as np\n'), ((4753, 4791), 'pytest.raises', 'pytest.raises', (['(TypeError, ValueError)'], {}), '((TypeError, ValueError))\n', (4766, 4791), False, 'import pytest\n'), ((4831, 4869), 'pytest.raises', 'pytest.raises', (['(TypeError, ValueError)'], {}), '((TypeError, ValueError))\n', (4844, 4869), False, 'import pytest\n'), ((4915, 4953), 'pytest.raises', 'pytest.raises', (['(TypeError, ValueError)'], {}), '((TypeError, ValueError))\n', (4928, 4953), False, 'import pytest\n'), ((5008, 5046), 'pytest.raises', 'pytest.raises', (['(TypeError, ValueError)'], {}), '((TypeError, ValueError))\n', (5021, 5046), False, 'import pytest\n'), ((5099, 5137), 'pytest.raises', 'pytest.raises', (['(TypeError, ValueError)'], {}), '((TypeError, ValueError))\n', (5112, 5137), False, 'import pytest\n'), ((5195, 5233), 'pytest.raises', 'pytest.raises', (['(TypeError, ValueError)'], {}), '((TypeError, ValueError))\n', (5208, 5233), False, 'import pytest\n'), ((5295, 5333), 'pytest.raises', 'pytest.raises', (['(TypeError, ValueError)'], {}), '((TypeError, ValueError))\n', (5308, 5333), False, 'import pytest\n'), ((5388, 5426), 'pytest.raises', 'pytest.raises', (['(TypeError, ValueError)'], {}), '((TypeError, ValueError))\n', (5401, 5426), False, 'import pytest\n'), ((5479, 5517), 'pytest.raises', 'pytest.raises', (['(TypeError, ValueError)'], {}), '((TypeError, ValueError))\n', (5492, 5517), False, 'import pytest\n'), ((5577, 5602), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5590, 5602), False, 'import pytest\n'), ((441, 469), 'cycler.cycler', 'cycler', (['"""c"""', "['r', 'g', 'y']"], {}), "('c', ['r', 'g', 'y'])\n", (447, 469), False, 'from cycler import cycler\n'), ((494, 527), 'cycler.cycler', 'cycler', (['"""marker"""', "['.', '*', 'x']"], {}), "('marker', ['.', '*', 'x'])\n", (500, 527), False, 'from cycler import cycler\n'), ((833, 858), 'numpy.array', 'np.array', (["['r', 'g', 'y']"], {}), "(['r', 'g', 'y'])\n", (841, 858), True, 'import numpy as np\n'), ((1559, 1596), 'cycler.cycler', 'cycler', (['"""linestyle"""', "['-', '--', ':']"], {}), "('linestyle', ['-', '--', ':'])\n", (1565, 1596), False, 'from cycler import cycler\n'), ((1727, 1748), 'matplotlib.colors.to_rgba', 'mpl.colors.to_rgba', (['c'], {}), '(c)\n', (1745, 1748), True, 'import matplotlib as mpl\n'), ((2130, 2163), 'cycler.cycler', 'cycler', (['"""marker"""', "['.', '*', 'D']"], {}), "('marker', ['.', '*', 'D'])\n", (2136, 2163), False, 'from cycler import cycler\n'), ((2591, 2612), 'matplotlib.colors.to_rgba', 'mpl.colors.to_rgba', (['c'], {}), '(c)\n', (2609, 2612), True, 'import matplotlib as mpl\n'), ((3325, 3346), 'matplotlib.colors.to_rgba', 'mpl.colors.to_rgba', (['c'], {}), '(c)\n', (3343, 3346), True, 'import matplotlib as mpl\n'), ((4156, 4172), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (4164, 4172), True, 'import numpy as np\n'), ((4202, 4222), 'numpy.array', 'np.array', (["['k', 'w']"], {}), "(['k', 'w'])\n", (4210, 4222), True, 'import numpy as np\n'), ((4249, 4270), 'numpy.array', 'np.array', (["['-', '--']"], {}), "(['-', '--'])\n", (4257, 4270), True, 'import numpy as np\n'), ((5545, 5566), 'cycler.cycler', 'cycler', ([], {'foobar': '[1, 2]'}), '(foobar=[1, 2])\n', (5551, 5566), False, 'from cycler import cycler\n'), ((5630, 5658), 'cycler.cycler', 'cycler', ([], {'color': '"""rgb"""', 'c': '"""cmy"""'}), "(color='rgb', c='cmy')\n", (5636, 5658), False, 'from cycler import cycler\n'), ((1446, 1474), 'cycler.cycler', 'cycler', (['"""c"""', "['r', 'g', 'y']"], {}), "('c', ['r', 'g', 'y'])\n", (1452, 1474), False, 'from cycler import cycler\n'), ((1500, 1534), 'cycler.cycler', 'cycler', (['"""hatch"""', "['xx', 'O', '|-']"], {}), "('hatch', ['xx', 'O', '|-'])\n", (1506, 1534), False, 'from cycler import cycler\n'), ((2013, 2045), 'cycler.cycler', 'cycler', (['"""color"""', "['r', 'g', 'y']"], {}), "('color', ['r', 'g', 'y'])\n", (2019, 2045), False, 'from cycler import cycler\n'), ((2071, 2105), 'cycler.cycler', 'cycler', (['"""hatch"""', "['xx', 'O', '|-']"], {}), "('hatch', ['xx', 'O', '|-'])\n", (2077, 2105), False, 'from cycler import cycler\n')] |
import numpy as np
from pingle.core.policy import Policy
class RandomPolicy:
actions = []
def get_action(self, *,
observation,
previous_reward,
public_speech):
"""
Parameters
----------
observation: Observation
Observation given to the agent.
previous_reward: float
Reward given to the agent for the previous policy.
public_speech: SpeechAct
Speech acts aligned with the current observation.
"""
assert len(self.actions) > 0, \
'Should use an inheriting class which overrides actions'
return np.random.choice(self.actions)
| [
"numpy.random.choice"
] | [((681, 711), 'numpy.random.choice', 'np.random.choice', (['self.actions'], {}), '(self.actions)\n', (697, 711), True, 'import numpy as np\n')] |
# description: scan for grammar scores
import os
import h5py
import glob
import json
import logging
import numpy as np
from tronn.datalayer import H5DataLoader
from tronn.interpretation.inference import run_inference
from tronn.interpretation.motifs import get_sig_pwm_vector
from tronn.nets.preprocess_nets import mutate_sequences_single_motif
from tronn.util.h5_utils import add_pwm_names_to_h5
from tronn.util.formats import write_to_json
from tronn.util.pwms import MotifSetManager
from tronn.util.scripts import parse_multi_target_selection_strings
from tronn.util.utils import DataKeys
def run(args):
"""run delta motif interaction mutagenesis (DMIM)
"""
# setup
logger = logging.getLogger(__name__)
logger.info("Running dmim scan")
if args.tmp_dir is not None:
os.system('mkdir -p {}'.format(args.tmp_dir))
else:
args.tmp_dir = args.out_dir
# set up inference params
args.inference_params = {
"cmd_name": "mutatemotifs",
"inference_mode": True,
"premodel_fn": mutate_sequences_single_motif,
"mutate_type": args.mutate_type,
"inference_fn_name": "postprocess_mutate",
"use_filtering": True}
args.debug = False
# get a sig pwms vector
# Note that if rc pwms used, will adjust in preprocess fn
sig_pwms = get_sig_pwm_vector(
args.sig_pwms_file,
args.sig_pwms_key,
args.foreground_targets,
reduce_type="any")
args.inference_params.update({"sig_pwms": sig_pwms})
logging.info("Loaded {} pwms to perturb".format(np.sum(sig_pwms)))
# adjust filter targets based on foreground
filter_targets = parse_multi_target_selection_strings(
args.foreground_targets)
new_filter_targets = []
for keys_and_indices, params in filter_targets:
new_filter_targets += keys_and_indices
args.filter_targets += [(new_filter_targets, {"reduce_type": "any"})]
# collect a prediction sample if ensemble (for cross model quantile norm)
# always need to do this if you're repeating backprop
if args.model["name"] == "ensemble":
true_sample_size = args.sample_size
args.sample_size = 1000
run_inference(args, warm_start=True)
args.sample_size = true_sample_size
# attach prediction sample to model
args.model["params"]["prediction_sample"] = args.prediction_sample
# run inference
inference_files = run_inference(args)
# add in PWM names to the datasets
if args.infer_json is not None:
with open(args.infer_json, "r") as fp:
args.infer_json = json.load(fp)
pwm_file = args.infer_json.get("pwm_file")
pwm_list = MotifSetManager.read_pwm_file(pwm_file)
pwm_names = [pwm.name for pwm in pwm_list]
for inference_file in inference_files:
add_pwm_names_to_h5(inference_file, pwm_names)
# save out dataset json
results_data_log = "{}/dataset.{}.json".format(args.out_dir, args.subcommand_name)
results_data_loader = H5DataLoader(
data_dir=args.out_dir, data_files=inference_files, fasta=args.fasta)
dataset = results_data_loader.describe()
dataset.update({
"targets": args.targets,
"target_indices": args.target_indices})
write_to_json(dataset, results_data_log)
return None
| [
"logging.getLogger",
"tronn.interpretation.motifs.get_sig_pwm_vector",
"tronn.interpretation.inference.run_inference",
"tronn.util.scripts.parse_multi_target_selection_strings",
"tronn.datalayer.H5DataLoader",
"json.load",
"numpy.sum",
"tronn.util.formats.write_to_json",
"tronn.util.pwms.MotifSetMan... | [((699, 726), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (716, 726), False, 'import logging\n'), ((1340, 1446), 'tronn.interpretation.motifs.get_sig_pwm_vector', 'get_sig_pwm_vector', (['args.sig_pwms_file', 'args.sig_pwms_key', 'args.foreground_targets'], {'reduce_type': '"""any"""'}), "(args.sig_pwms_file, args.sig_pwms_key, args.\n foreground_targets, reduce_type='any')\n", (1358, 1446), False, 'from tronn.interpretation.motifs import get_sig_pwm_vector\n'), ((1677, 1738), 'tronn.util.scripts.parse_multi_target_selection_strings', 'parse_multi_target_selection_strings', (['args.foreground_targets'], {}), '(args.foreground_targets)\n', (1713, 1738), False, 'from tronn.util.scripts import parse_multi_target_selection_strings\n'), ((2459, 2478), 'tronn.interpretation.inference.run_inference', 'run_inference', (['args'], {}), '(args)\n', (2472, 2478), False, 'from tronn.interpretation.inference import run_inference\n'), ((3059, 3145), 'tronn.datalayer.H5DataLoader', 'H5DataLoader', ([], {'data_dir': 'args.out_dir', 'data_files': 'inference_files', 'fasta': 'args.fasta'}), '(data_dir=args.out_dir, data_files=inference_files, fasta=args.\n fasta)\n', (3071, 3145), False, 'from tronn.datalayer import H5DataLoader\n'), ((3301, 3341), 'tronn.util.formats.write_to_json', 'write_to_json', (['dataset', 'results_data_log'], {}), '(dataset, results_data_log)\n', (3314, 3341), False, 'from tronn.util.formats import write_to_json\n'), ((2211, 2247), 'tronn.interpretation.inference.run_inference', 'run_inference', (['args'], {'warm_start': '(True)'}), '(args, warm_start=True)\n', (2224, 2247), False, 'from tronn.interpretation.inference import run_inference\n'), ((2720, 2759), 'tronn.util.pwms.MotifSetManager.read_pwm_file', 'MotifSetManager.read_pwm_file', (['pwm_file'], {}), '(pwm_file)\n', (2749, 2759), False, 'from tronn.util.pwms import MotifSetManager\n'), ((1584, 1600), 'numpy.sum', 'np.sum', (['sig_pwms'], {}), '(sig_pwms)\n', (1590, 1600), True, 'import numpy as np\n'), ((2636, 2649), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (2645, 2649), False, 'import json\n'), ((2870, 2916), 'tronn.util.h5_utils.add_pwm_names_to_h5', 'add_pwm_names_to_h5', (['inference_file', 'pwm_names'], {}), '(inference_file, pwm_names)\n', (2889, 2916), False, 'from tronn.util.h5_utils import add_pwm_names_to_h5\n')] |
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import cv2
import numpy as np
import os
import argparse
import glob
from tqdm import tqdm
import matplotlib.pyplot as plt
import network
from tensorflow.python.util import deprecation
# disable future warnings and info messages for this demo
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
parser = argparse.ArgumentParser(description="Single shot depth estimator")
parser.add_argument(
"--img", type=str, help="path to reference RGB image", required=True
)
parser.add_argument("--ckpt", type=str, help="path to checkpoint", required=True)
parser.add_argument("--cpu", action="store_true", help="run on cpu")
parser.add_argument(
"--original_size", action="store_true", help="if true, restore original image size"
)
parser.add_argument(
"--dest",
type=str,
help="path to result folder. If not exists, it will be created",
default="results",
)
opts = parser.parse_args()
if opts.cpu:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
def create_dir(d):
""" Create a directory if it does not exist
Args:
d: directory to create
"""
if not os.path.exists(d):
os.makedirs(d)
def main(_):
network_params = {"height": 320, "width": 640, "is_training": False}
if os.path.isfile(opts.img):
img_list = [opts.img]
elif os.path.isdir(opts.img):
img_list = glob.glob(os.path.join(opts.img, "*.{}".format("png")))
img_list = sorted(img_list)
if len(img_list) == 0:
raise ValueError("No {} images found in folder {}".format(".png", opts.img))
print("=> found {} images".format(len(img_list)))
else:
raise Exception("No image nor folder provided")
model = network.Pydnet(network_params)
tensor_image = tf.placeholder(tf.float32, shape=(320, 640, 3))
batch_img = tf.expand_dims(tensor_image, 0)
tensor_depth = model.forward(batch_img)
tensor_depth = tf.nn.relu(tensor_depth)
# restore graph
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver.restore(sess, opts.ckpt)
# run graph
for i in tqdm(range(len(img_list))):
# preparing image
img = cv2.imread(img_list[i])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w, _ = img.shape
img = cv2.resize(img, (640, 320))
img = img / 255.0
# inference
depth = sess.run(tensor_depth, feed_dict={tensor_image: img})
depth = np.squeeze(depth)
min_depth = depth.min()
max_depth = depth.max()
depth = (depth - min_depth) / (max_depth - min_depth)
depth *= 255.0
# preparing final depth
if opts.original_size:
depth = cv2.resize(depth, (w, h))
name = os.path.basename(img_list[i]).split(".")[0]
dest = opts.dest
create_dir(dest)
dest = os.path.join(dest, name + "_depth.png")
plt.imsave(dest, depth, cmap="magma")
if __name__ == "__main__":
tf.app.run()
| [
"tensorflow.app.run",
"os.path.exists",
"argparse.ArgumentParser",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.compat.v1.logging.set_verbosity",
"os.path.isdir",
"network.Pydnet",
"numpy.squeeze",
"os.path.isfile",
"cv2.cvtColor",
"tensorflow.expand_dims",
"cv2.resize",
"cv... | [((880, 942), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (914, 942), True, 'import tensorflow as tf\n'), ((953, 1019), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Single shot depth estimator"""'}), "(description='Single shot depth estimator')\n", (976, 1019), False, 'import argparse\n'), ((1875, 1899), 'os.path.isfile', 'os.path.isfile', (['opts.img'], {}), '(opts.img)\n', (1889, 1899), False, 'import os\n'), ((2333, 2363), 'network.Pydnet', 'network.Pydnet', (['network_params'], {}), '(network_params)\n', (2347, 2363), False, 'import network\n'), ((2383, 2430), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(320, 640, 3)'}), '(tf.float32, shape=(320, 640, 3))\n', (2397, 2430), True, 'import tensorflow as tf\n'), ((2447, 2478), 'tensorflow.expand_dims', 'tf.expand_dims', (['tensor_image', '(0)'], {}), '(tensor_image, 0)\n', (2461, 2478), True, 'import tensorflow as tf\n'), ((2542, 2566), 'tensorflow.nn.relu', 'tf.nn.relu', (['tensor_depth'], {}), '(tensor_depth)\n', (2552, 2566), True, 'import tensorflow as tf\n'), ((2600, 2616), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2614, 2616), True, 'import tensorflow as tf\n'), ((2628, 2640), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2638, 2640), True, 'import tensorflow as tf\n'), ((3621, 3633), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (3631, 3633), True, 'import tensorflow as tf\n'), ((1737, 1754), 'os.path.exists', 'os.path.exists', (['d'], {}), '(d)\n', (1751, 1754), False, 'import os\n'), ((1764, 1778), 'os.makedirs', 'os.makedirs', (['d'], {}), '(d)\n', (1775, 1778), False, 'import os\n'), ((1940, 1963), 'os.path.isdir', 'os.path.isdir', (['opts.img'], {}), '(opts.img)\n', (1953, 1963), False, 'import os\n'), ((2654, 2687), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2685, 2687), True, 'import tensorflow as tf\n'), ((2823, 2846), 'cv2.imread', 'cv2.imread', (['img_list[i]'], {}), '(img_list[i])\n', (2833, 2846), False, 'import cv2\n'), ((2861, 2897), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (2873, 2897), False, 'import cv2\n'), ((2940, 2967), 'cv2.resize', 'cv2.resize', (['img', '(640, 320)'], {}), '(img, (640, 320))\n', (2950, 2967), False, 'import cv2\n'), ((3101, 3118), 'numpy.squeeze', 'np.squeeze', (['depth'], {}), '(depth)\n', (3111, 3118), True, 'import numpy as np\n'), ((3502, 3541), 'os.path.join', 'os.path.join', (['dest', "(name + '_depth.png')"], {}), "(dest, name + '_depth.png')\n", (3514, 3541), False, 'import os\n'), ((3550, 3587), 'matplotlib.pyplot.imsave', 'plt.imsave', (['dest', 'depth'], {'cmap': '"""magma"""'}), "(dest, depth, cmap='magma')\n", (3560, 3587), True, 'import matplotlib.pyplot as plt\n'), ((3352, 3377), 'cv2.resize', 'cv2.resize', (['depth', '(w, h)'], {}), '(depth, (w, h))\n', (3362, 3377), False, 'import cv2\n'), ((3393, 3422), 'os.path.basename', 'os.path.basename', (['img_list[i]'], {}), '(img_list[i])\n', (3409, 3422), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0,2*np.pi)
y = np.sin(x)
plt.plot(x,y)
plt.show()
| [
"numpy.sin",
"numpy.linspace",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] | [((55, 80), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (66, 80), True, 'import numpy as np\n'), ((82, 91), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (88, 91), True, 'import numpy as np\n'), ((92, 106), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (100, 106), True, 'import matplotlib.pyplot as plt\n'), ((106, 116), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (114, 116), True, 'import matplotlib.pyplot as plt\n')] |
"""
<NAME>
CEA Saclay - DM2S/STMF/LGLS
Mars 2021 - Stage 6 mois
We provide here a python package that can be used to graph and plot TRUSt data within jupyterlab.
This work is based on the files package (a TRUST package that reads the son files).
"""
from trustutils import files as tf
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import re
from trustutils.jupyter.filelist import FileAccumulator
mon_dictionnaire = {"x": "0", "y": "1", "z": "2", "Temperature": "0"}
def saveFileAccumulator(data):
"""
Method for saving files.
Parameters
---------
data : str
name of the file we want to save.
Returns
-------
"""
origin=os.getcwd();
path=origin+"/build/"
os.chdir(path)
FileAccumulator.active = True
FileAccumulator.Append(data)
os.chdir(origin)
def loadText(data,index_column=0,nb_column=-1,transpose=True,dtype="float", skiprows=0):
"""
Method for loading and saving files.
Parameters
---------
data : str
name of the file we want to save.
index_column : int
Index of the first column.
nb_column : int
Number of columns loaded.
transpose : bool
if we transpose the matrix.
dtype : str
type of the data.
skiprows : int
lines skip when reading.
Returns
-------
matrix : array
matrix.
"""
origin=os.getcwd();
path=origin+"/build/"
os.chdir(path)
if nb_column==-1:
nb=None
else:
nb=index_column+nb_column
if transpose:
matrix = np.loadtxt(data,dtype=dtype, skiprows=skiprows)[index_column:nb].T
else:
matrix = np.loadtxt(data,dtype=dtype, skiprows=skiprows)[index_column:nb]
os.chdir(origin)
saveFileAccumulator(data)
return(matrix)
def lastMoment(name):
"""
Method to return the last moment
Parameters
---------
name : str
name of the file.
Returns
-------
The last time of the plot.
"""
f=open(name,"r")
return(f.readlines()[-1].split(" ")[0])
def timeArray(name):
"""
Method to return the time step list.
Parameters
---------
name : str
name of the file.
Returns
-------
The last time of the plot.
"""
f=open(name,"r")
time_list=[]
for i in f.readlines():
tmp=i.split(" ")[0]
if tmp!='#':
time_list.append(float(tmp))
#time_list.append(tmp)
return(time_list)
class Graph:
r"""
Graph is a class to plot .son file
"""
def __init__(self,title=None,subtitle=None,nX=1,nY=1,size=15):
"""
Constructor of the class Graph.
Parameters
---------
title : str
title of the plot.
subtitle : str
subtitle of the first subplot.
nX : int
number of plot in the x axe.
nY : int
number of plot in the Y axe.
size : int
Image's size of the plot.
"""
self.x_label=""
self.y_label=""
self.nX=nX
self.nY=nY
self.size=size
self.xIndice=0
self.yIndice=0
if title==None: self.title=""
else: self.title=title
if subtitle==None: self.subtitle=""
else: self.subtitle=subtitle
self.flag=False
self._reset()
def coordonee(self):
"""
Methode to add a data into the board.
Parameters
---------
None
Returns
-------
The coordinates of the subplot.
"""
if(self.nX==1 & self.nY==1):
#raise ValueError("Use plot and not plot2!!!")
return(1)
elif((self.nX==1)|(self.nY==1)):
return(max(self.xIndice,self.yIndice))
else:
return(self.xIndice,self.yIndice)
def _reset(self):
"""
Methode to reinitialice the plot.
"""
self.fig,self.axs=plt.subplots(self.nY,self.nX,figsize=(self.size*self.nX,self.size))
self.addPlot(self.coordonee())
if self.nX*self.nY!=1: self.fig.suptitle(self.subtitle)
def add(self,x,y,coordonee=0,marker="-",color=None,label="",title=None):
#verifie si coordone 0 marche
"""
Methode to add a curve to the plot from a point sonde.
Parameters
---------
x : float array
x coordinates.
y : float array
y coordinates.
coordonee : int or int array
coordinates in the subplotlist.
color : str
collor of the curve.
label : str
label of the curve.
title : str
title of the curve.
"""
if not (title is None): self.title=title
self.addPlot(coordonee)
### On plot les données ###
if color is None:
self.subplot.plot(x,y,marker,label=label)
else:
self.subplot.plot(x,y,marker,label=label,color=color)
self.subplot.legend()
## On ajoute des titres
self.subplot.set(xlabel=self.x_label,ylabel=self.y_label)
self.subplot.grid()
def addPlot(self,coordonee,title=""):
"""
Methode to add a plot/subplot.
Parameters
---------
coordonee : str
Adress of the file.
title : str
Title of the plot.
"""
if title!="":
self.title=title
if(self.nX==1 & self.nY==1):
#raise ValueError("Use plot and not plot2!!!")
self.flag=True
elif (self.nX==1)|(self.nY==1): self.xIndice=coordonee
else:
self.xIndice=coordonee[0]
self.yIndice=coordonee[1]
if self.flag : self.subplot=self.axs
else: self.subplot=self.axs[self.coordonee()]
self.subplot.grid()
self.subplot.set_title(self.title)
def addPoint(self,data,color=None,marker="-",var="x",start=0,end=1000000000000,label=None,param="Time"):
"""
Methode to add a curve to the plot from a point sonde.
Parameters
---------
data : str
Adress of the file.
color : str
Color of the curve.
marker : str
symbol of the ploted line.
var : str
coordinate we want to plor.
start : float
Time start.
end : float
Time end.
label : str
title of the curve .
param : str
parameter of the curve .
"""
if label==None:
label=data
donne =tf.SonPOINTFile("build/" + data, None)
saveFileAccumulator(data)
#filelist.FileAccumulator.AppendFromProbe("build/"+data)
### On recupere le nom des variables ###
self.y_label=donne.getEntries()[0]
self.x_label=donne.getXLabel()
T=[]
x=[]
y=[]
z=[]
for i in donne.getEntries():
T.append(donne.getValues(i)[1])
t=donne.getValues(self.y_label)[0]
tmp=re.findall(r"[-+]?\d*\.\d+|\d+", donne.getEntries()[0])
x.append(float(tmp[0]))
y.append(float(tmp[1]))
varTMP=len(y) ### Variable pour connaitre la taille du vecteur ###
if len(tmp)<2:
z.append(tmp[2])
else:
z.append(list(np.zeros(varTMP))[0])
df = pd.DataFrame({"x": x, "y": y,"z":z})
dt = pd.DataFrame({"Time": t})
### Jointure interne ###
r=pd.merge(df.assign(key=0), dt.assign(key=0), on='key').drop('key', axis=1)
r=r.sort_values(by=['Time','x','y','z'])
for i in range(len(T)):
r[str(i)]=T[i]
r=r[(r["Time"]<end)&(r["Time"]>start)]
### On plot les données ###
if color is None:
self.subplot.plot(list(r[param]),list(r[mon_dictionnaire[var]]),marker,label=label)
else:
self.subplot.plot(list(r[param]),list(r[mon_dictionnaire[var]]),marker,label=label,color=color)
self.subplot.legend()
## On ajoute des titres
self.subplot.set(xlabel=self.x_label,ylabel=self.y_label)
## Faut tester
def visu(self,xmin=None,xmax=None,ymin=None,ymax=None):
"""
Methode for visualize all the data.
Parameters
---------
xmin : float
Minimun of the ploted interval of x.
xmax : float
Maximun of the ploted interval of x.
ymin : float
Minimun of the ploted interval of y.
ymax : float
Maximun of the ploted interval of y.
"""
if self.nY*self.nX==1:
plt.legend()
plt.gca().set_xlim([xmin,xmax])
plt.gca().set_ylim([ymin,ymax])
plt.show()
## addSegment
def addSegment(self,data,antecedant,param="Time",value=None,start=0,end=1000000,color=None,marker="-",label=None,var="x",nb=1):
"""
Methode to add a curve to the plot from a segment sonde.
Parameters
---------
data : str
Adress of the file.
antecedant : str
variable
param : str
parameter we consider constant .
value : str
value of the parameter. If None, it equals to cero.
start : float
Time start.
end : float
Time end.
color: str
Color of the curve.
marker: str
symbol of the ploted line.
label : str
title of the curve .
var : int
coordinate we want to plor.
nb : float
erreur tolerated between the comparaison to find the right data.
Returns
-------
"""
if label==None:
label=data
# On plot le dernier instant
if(value==None) and (param=="Time"):
value=float(lastMoment("build/" + data))
elif(param!="Time"):
value=0
donne =tf.SonSEGFile("build/" + data, None)
saveFileAccumulator(data)
# On recupere le nom des variables
self.y_label=donne.getEntries()[0]
self.x_label=donne.getXLabel()
x=[]
y=[]
z=[]
t=donne.getValues(self.y_label)[0]
T=[]
for i in donne.getEntries():
tmp=re.findall(r"[-+]?\d*\.\d+|\d+", i)
x.append(float(tmp[0]))
y.append(float(tmp[1]))
varTMP=len(y) ### Variable pour connaitre la taille du vecteur ###
if len(tmp)>2:
z.append(tmp[2])
else:
z.append(list(np.zeros(varTMP))[0])
for i in donne.getEntries():
T.append(list(donne.getValues(i)[1]))
T=np.resize(np.array(T).T,(len(T)*len(T[0])))
df = pd.DataFrame({"x": x, "y": y,"z":z})
dt = pd.DataFrame({"Time": t})
r=pd.merge(df.assign(key=0), dt.assign(key=0), on='key').drop('key', axis=1)
r=r.sort_values(by=['Time','x','y','z'])
tempVar=0
for i in T:
r[str(tempVar)]=T
tempVar+=tempVar
r=r[(start<=r["Time"])&(end>=r["Time"])]
# On plot les données
#print(r)
condition=(round(r[param],nb)==round(value,nb))
#condition=(abs(r[param]-value)<0.00000000000000001)
if(color==""):
self.subplot.plot(list(r[condition][antecedant]),list(r[condition][mon_dictionnaire[var]]),marker,label=label)
else :
self.subplot.plot(list(r[condition][antecedant]),list(r[condition][mon_dictionnaire[var]]),marker,label=label,color=color)
self.subplot.legend()
## On ajoute des titres
self.subplot.set(xlabel=self.x_label,ylabel=self.y_label)
## addSegment
def addSegment2(self,data,value="x",color=None,marker="-",label=None):
"""
Methode to add a curve to the plot from a segment sonde.
Parameters
---------
data : str
Adress of the file.
value : str
axe
color: str
Color of the curve.
Marker: str
symbol of the ploted line.
var :
coordinate we want to plor.
label : str
title of the curve .
Returns
-------
"""
if label==None:
label=data
donne = tf.SonSEGFile("build/" +data, None)
dat = donne.getEntriesSeg()
x, y = donne.getValues(dat[int(mon_dictionnaire[value])])
if(color==""):
self.subplot.plot(x,y,marker,label=label)
else :
self.subplot.plot(x,y,marker,label=label,color=color)
saveFileAccumulator(data)
self.subplot.legend()
## On ajoute des titres
self.subplot.set(xlabel=self.x_label,ylabel=self.y_label)
def label(self,x_label,y_label):
"""
Methode to change labels.
Parameters
---------
x_label : str
Label of x.
y_label :str
Label of y.
Returns
-------
"""
self.subplot.set(xlabel=x_label,ylabel=y_label)
class Table: #ancian tableau
r"""
Class to plot a Table.
"""
def __init__(self,columns):
"""
Constructor of the class Tableau.
Parameters
---------
columns : str
Name of the columns.
Returns
-------
"""
self.columns=columns
self._reset()
def _reset(self):
"""
Methode to reinitialice the board
"""
self.df=pd.DataFrame(columns=self.columns)
def addLigne(self,ligne,name):
"""
Methode to add a a row to the board.
Parameters
---------
data : ligne
Row of the board.
name : str
Name of the board.
Returns
-------
"""
dftmp=pd.DataFrame(ligne, columns=self.columns,index=[name])
self.df=self.df.append(dftmp)
def load(self,data,name):
"""
Methode to add a data into the board.
Parameters
---------
data : str
Adress of the file
name : str
Name of the row.
Returns
-------
"""
self.addLigne([list(np.loadtxt("build/" + data, dtype=float ))],name)
saveFileAccumulator(data)
def loadPoint(self,data,name):
"""
Methode to add a data into the board.
Parameters
---------
data : str
Adress of the file
name : str
Name of the row.
Returns
-------
"""
tmp=tf.SonPOINTFile("build/"+data, None)
saveFileAccumulator(data)
a=tmp.getValues(tmp.getEntries()[0])[1][1]
self.addLigne([[a]],name)
| [
"trustutils.jupyter.filelist.FileAccumulator.Append",
"matplotlib.pyplot.gca",
"os.getcwd",
"os.chdir",
"trustutils.files.SonSEGFile",
"numpy.loadtxt",
"re.findall",
"numpy.array",
"numpy.zeros",
"pandas.DataFrame",
"trustutils.files.SonPOINTFile",
"matplotlib.pyplot.subplots",
"matplotlib.p... | [((730, 741), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (739, 741), False, 'import os\n'), ((774, 788), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (782, 788), False, 'import os\n'), ((837, 865), 'trustutils.jupyter.filelist.FileAccumulator.Append', 'FileAccumulator.Append', (['data'], {}), '(data)\n', (859, 865), False, 'from trustutils.jupyter.filelist import FileAccumulator\n'), ((875, 891), 'os.chdir', 'os.chdir', (['origin'], {}), '(origin)\n', (883, 891), False, 'import os\n'), ((1489, 1500), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1498, 1500), False, 'import os\n'), ((1533, 1547), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (1541, 1547), False, 'import os\n'), ((1847, 1863), 'os.chdir', 'os.chdir', (['origin'], {}), '(origin)\n', (1855, 1863), False, 'import os\n'), ((4280, 4352), 'matplotlib.pyplot.subplots', 'plt.subplots', (['self.nY', 'self.nX'], {'figsize': '(self.size * self.nX, self.size)'}), '(self.nY, self.nX, figsize=(self.size * self.nX, self.size))\n', (4292, 4352), True, 'import matplotlib.pyplot as plt\n'), ((7157, 7195), 'trustutils.files.SonPOINTFile', 'tf.SonPOINTFile', (["('build/' + data)", 'None'], {}), "('build/' + data, None)\n", (7172, 7195), True, 'from trustutils import files as tf\n'), ((7990, 8028), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y, 'z': z}"], {}), "({'x': x, 'y': y, 'z': z})\n", (8002, 8028), True, 'import pandas as pd\n'), ((8040, 8065), 'pandas.DataFrame', 'pd.DataFrame', (["{'Time': t}"], {}), "({'Time': t})\n", (8052, 8065), True, 'import pandas as pd\n'), ((9470, 9480), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9478, 9480), True, 'import matplotlib.pyplot as plt\n'), ((10743, 10779), 'trustutils.files.SonSEGFile', 'tf.SonSEGFile', (["('build/' + data)", 'None'], {}), "('build/' + data, None)\n", (10756, 10779), True, 'from trustutils import files as tf\n'), ((11593, 11631), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y, 'z': z}"], {}), "({'x': x, 'y': y, 'z': z})\n", (11605, 11631), True, 'import pandas as pd\n'), ((11643, 11668), 'pandas.DataFrame', 'pd.DataFrame', (["{'Time': t}"], {}), "({'Time': t})\n", (11655, 11668), True, 'import pandas as pd\n'), ((13256, 13292), 'trustutils.files.SonSEGFile', 'tf.SonSEGFile', (["('build/' + data)", 'None'], {}), "('build/' + data, None)\n", (13269, 13292), True, 'from trustutils import files as tf\n'), ((14597, 14631), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.columns'}), '(columns=self.columns)\n', (14609, 14631), True, 'import pandas as pd\n'), ((14958, 15013), 'pandas.DataFrame', 'pd.DataFrame', (['ligne'], {'columns': 'self.columns', 'index': '[name]'}), '(ligne, columns=self.columns, index=[name])\n', (14970, 15013), True, 'import pandas as pd\n'), ((15782, 15820), 'trustutils.files.SonPOINTFile', 'tf.SonPOINTFile', (["('build/' + data)", 'None'], {}), "('build/' + data, None)\n", (15797, 15820), True, 'from trustutils import files as tf\n'), ((1769, 1817), 'numpy.loadtxt', 'np.loadtxt', (['data'], {'dtype': 'dtype', 'skiprows': 'skiprows'}), '(data, dtype=dtype, skiprows=skiprows)\n', (1779, 1817), True, 'import numpy as np\n'), ((9347, 9359), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9357, 9359), True, 'import matplotlib.pyplot as plt\n'), ((11108, 11146), 're.findall', 're.findall', (['"""[-+]?\\\\d*\\\\.\\\\d+|\\\\d+"""', 'i'], {}), "('[-+]?\\\\d*\\\\.\\\\d+|\\\\d+', i)\n", (11118, 11146), False, 'import re\n'), ((1675, 1723), 'numpy.loadtxt', 'np.loadtxt', (['data'], {'dtype': 'dtype', 'skiprows': 'skiprows'}), '(data, dtype=dtype, skiprows=skiprows)\n', (1685, 1723), True, 'import numpy as np\n'), ((11546, 11557), 'numpy.array', 'np.array', (['T'], {}), '(T)\n', (11554, 11557), True, 'import numpy as np\n'), ((9372, 9381), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9379, 9381), True, 'import matplotlib.pyplot as plt\n'), ((9416, 9425), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9423, 9425), True, 'import matplotlib.pyplot as plt\n'), ((15380, 15420), 'numpy.loadtxt', 'np.loadtxt', (["('build/' + data)"], {'dtype': 'float'}), "('build/' + data, dtype=float)\n", (15390, 15420), True, 'import numpy as np\n'), ((7954, 7970), 'numpy.zeros', 'np.zeros', (['varTMP'], {}), '(varTMP)\n', (7962, 7970), True, 'import numpy as np\n'), ((11407, 11423), 'numpy.zeros', 'np.zeros', (['varTMP'], {}), '(varTMP)\n', (11415, 11423), True, 'import numpy as np\n')] |
#In 1
from __future__ import print_function
from __future__ import division
import pandas as pd
import numpy as np
# from matplotlib import pyplot as plt
# import seaborn as sns
# from sklearn.model_selection import train_test_split
import statsmodels.api as sm
# just for the sake of this blog post!
from warnings import filterwarnings
filterwarnings('ignore')
#In 2
# load the provided data
train_features = pd.read_csv('data/dengue_features_train.csv',
index_col=[0,1,2])
train_labels = pd.read_csv('data/dengue_labels_train.csv',
index_col=[0,1,2])
#In 3
# Seperate data for San Juan
sj_train_features = train_features.loc['sj']
sj_train_labels = train_labels.loc['sj']
#In 6
# Remove `week_start_date` string.
sj_train_features.drop('week_start_date', axis=1, inplace=True)
#In 7
# Null check
pd.isnull(sj_train_features).any()
#In 9
sj_train_features.fillna(method='ffill', inplace=True)
#In 13
sj_train_features['total_cases'] = sj_train_labels.total_cases
#In 14
# compute the correlations
sj_correlations = sj_train_features.corr()
#In 19
def preprocess_data(data_path, labels_path=None):
# load data and set index to city, year, weekofyear
df = pd.read_csv(data_path, index_col=[0, 1, 2])
# select features we want
features = ['reanalysis_specific_humidity_g_per_kg',
'reanalysis_dew_point_temp_k',
'station_avg_temp_c']
df = df[features]
# fill missing values
df.fillna(method='ffill', inplace=True)
# add labels to dataframe
if labels_path:
labels = pd.read_csv(labels_path, index_col=[0, 1, 2])
df = df.join(labels)
# separate san juan and iquitos
sj = df.loc['sj']
return sj
#In 20
sj_train = preprocess_data('data/dengue_features_train.csv',
labels_path="data/dengue_labels_train.csv")
#In 23
sj_train_subtrain = sj_train
#sj_train_subtrain = sj_train.head(800)
sj_train_subtest = sj_train.tail(sj_train.shape[0] - 800)
#In 24
from statsmodels.tools import eval_measures
import statsmodels.formula.api as smf
def get_best_model(train, test):
# Step 1: specify the form of the model
model_formula = "total_cases ~ 1 + " \
"reanalysis_specific_humidity_g_per_kg + " \
"reanalysis_dew_point_temp_k + " \
"station_avg_temp_c"
grid = 10 ** np.arange(-8, -3, dtype=np.float64)
best_alpha = []
best_score = 1000
# Step 2: Find the best hyper parameter, alpha
for alpha in grid:
model = smf.glm(formula=model_formula,
data=train,
family=sm.families.NegativeBinomial(alpha=alpha))
results = model.fit()
predictions = results.predict(test).astype(int)
score = eval_measures.meanabs(predictions, test.total_cases)
if score < best_score:
best_alpha = alpha
best_score = score
print('best alpha = ', best_alpha)
print('best score = ', best_score)
# Step 3: refit on entire dataset
full_dataset = pd.concat([train, test])
model = smf.glm(formula=model_formula,
data=full_dataset,
family=sm.families.NegativeBinomial(alpha=best_alpha))
fitted_model = model.fit()
return fitted_model
sj_best_model = get_best_model(sj_train_subtrain, sj_train_subtest)
#In 27
sj_test = preprocess_data('data/dengue_features_test.csv')
sj_predictions = sj_best_model.predict(sj_test).astype(int)
submission = pd.read_csv("data/submission_format.csv",
index_col=[0, 1, 2])
submission.total_cases = np.concatenate([sj_predictions])
submission.to_csv("data/benchmark.csv")
| [
"pandas.isnull",
"pandas.read_csv",
"statsmodels.tools.eval_measures.meanabs",
"statsmodels.api.families.NegativeBinomial",
"numpy.concatenate",
"pandas.concat",
"warnings.filterwarnings",
"numpy.arange"
] | [((343, 367), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (357, 367), False, 'from warnings import filterwarnings\n'), ((417, 483), 'pandas.read_csv', 'pd.read_csv', (['"""data/dengue_features_train.csv"""'], {'index_col': '[0, 1, 2]'}), "('data/dengue_features_train.csv', index_col=[0, 1, 2])\n", (428, 483), True, 'import pandas as pd\n'), ((527, 591), 'pandas.read_csv', 'pd.read_csv', (['"""data/dengue_labels_train.csv"""'], {'index_col': '[0, 1, 2]'}), "('data/dengue_labels_train.csv', index_col=[0, 1, 2])\n", (538, 591), True, 'import pandas as pd\n'), ((3645, 3707), 'pandas.read_csv', 'pd.read_csv', (['"""data/submission_format.csv"""'], {'index_col': '[0, 1, 2]'}), "('data/submission_format.csv', index_col=[0, 1, 2])\n", (3656, 3707), True, 'import pandas as pd\n'), ((3759, 3791), 'numpy.concatenate', 'np.concatenate', (['[sj_predictions]'], {}), '([sj_predictions])\n', (3773, 3791), True, 'import numpy as np\n'), ((1234, 1277), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'index_col': '[0, 1, 2]'}), '(data_path, index_col=[0, 1, 2])\n', (1245, 1277), True, 'import pandas as pd\n'), ((3192, 3216), 'pandas.concat', 'pd.concat', (['[train, test]'], {}), '([train, test])\n', (3201, 3216), True, 'import pandas as pd\n'), ((865, 893), 'pandas.isnull', 'pd.isnull', (['sj_train_features'], {}), '(sj_train_features)\n', (874, 893), True, 'import pandas as pd\n'), ((1617, 1662), 'pandas.read_csv', 'pd.read_csv', (['labels_path'], {'index_col': '[0, 1, 2]'}), '(labels_path, index_col=[0, 1, 2])\n', (1628, 1662), True, 'import pandas as pd\n'), ((2454, 2489), 'numpy.arange', 'np.arange', (['(-8)', '(-3)'], {'dtype': 'np.float64'}), '(-8, -3, dtype=np.float64)\n', (2463, 2489), True, 'import numpy as np\n'), ((2896, 2948), 'statsmodels.tools.eval_measures.meanabs', 'eval_measures.meanabs', (['predictions', 'test.total_cases'], {}), '(predictions, test.total_cases)\n', (2917, 2948), False, 'from statsmodels.tools import eval_measures\n'), ((3326, 3372), 'statsmodels.api.families.NegativeBinomial', 'sm.families.NegativeBinomial', ([], {'alpha': 'best_alpha'}), '(alpha=best_alpha)\n', (3354, 3372), True, 'import statsmodels.api as sm\n'), ((2750, 2791), 'statsmodels.api.families.NegativeBinomial', 'sm.families.NegativeBinomial', ([], {'alpha': 'alpha'}), '(alpha=alpha)\n', (2778, 2791), True, 'import statsmodels.api as sm\n')] |
from knmy import knmy
import pandas as pd
import numpy as np
def knmi_get(start, end, stations=[240]):
# knmy.get_hourly_data returns a tuple with 4 items. Immediately index to [3] to get the df with weather variables.
knmi_data = knmy.get_hourly_data(stations=[240], start=start, end=end,
inseason=False, variables=['ALL'], parse=True)[3]
# Rename columns
cols = ['weatherstation', 'date', 'hour', 'winddirection', 'windspeed_avg', \
'windspeed_10m','windspeed_max', 'temperature', 'temperature_min', \
'temperature_dewpoint', 'sunduration', 'sunradiation', \
'precipitationduration', 'precipitation', 'airpressure', \
'horizontalview', 'cloudcover', 'relativehumidity', 'weathercode', \
'weathercodeindicator', 'mist', 'rain', 'snow', 'storm', 'ice']
knmi_data.columns = cols
# Drop useless columns
knmi_data.drop(['winddirection', 'windspeed_10m', 'temperature_dewpoint', \
'horizontalview', 'cloudcover', 'weathercode', \
'weathercodeindicator'], axis = 1, inplace=True)
# Drop first row since it's actually a header, then reset index
knmi_data.drop([0], inplace=True)
knmi_data.reset_index(drop=True, inplace=True)
# Subtract one hour to make data in line with NDW
knmi_data.hour = knmi_data.hour.astype(int) - 1
# Remove columns with NA vals
knmi_data.dropna(axis=1, inplace=True)
# Make dataframe numeric
knmi_data = knmi_data.apply(pd.to_numeric)
# Remove negative precipitation values (-1 means 0.05mm or less)
knmi_data['precipitation'] = \
np.where(knmi_data['precipitation'] < 0, 0, knmi_data['precipitation'])
# Make date col a datetime object
knmi_data['date'] = pd.to_datetime(knmi_data['date'], yearfirst = True,
format = '%Y%m%d').dt.date
return knmi_data
################################## METADATA ##################################
#
# STN LON(east) LAT(north) ALT(m) NAME
# 240: 4.790 52.318 -3.30 SCHIPHOL
#
# YYYYMMDD = datum (YYYY=jaar,MM=maand,DD=dag);
# HH = tijd (HH=uur, UT.12 UT=13 MET, 14 MEZT. Uurvak 05 loopt van 04.00 UT tot 5.00 UT;
# DD = Windrichting (in graden) gemiddeld over de laatste 10 minuten van het afgelopen uur (360=noord, 90=oost, 180=zuid, 270=west, 0=windstil 990=veranderlijk. Zie http://www.knmi.nl/kennis-en-datacentrum/achtergrond/klimatologische-brochures-en-boeken;
# FH = Uurgemiddelde windsnelheid (in 0.1 m/s). Zie http://www.knmi.nl/kennis-en-datacentrum/achtergrond/klimatologische-brochures-en-boeken;
# FF = Windsnelheid (in 0.1 m/s) gemiddeld over de laatste 10 minuten van het afgelopen uur;
# FX = Hoogste windstoot (in 0.1 m/s) over het afgelopen uurvak;
# T = Temperatuur (in 0.1 graden Celsius) op 1.50 m hoogte tijdens de waarneming;
# T10N = Minimumtemperatuur (in 0.1 graden Celsius) op 10 cm hoogte in de afgelopen 6 uur;
# TD = Dauwpuntstemperatuur (in 0.1 graden Celsius) op 1.50 m hoogte tijdens de waarneming;
# SQ = Duur van de zonneschijn (in 0.1 uren) per uurvak, berekend uit globale straling (-1 for <0.05 uur);
# Q = Globale straling (in J/cm2) per uurvak;
# DR = Duur van de neerslag (in 0.1 uur) per uurvak;
# RH = Uursom van de neerslag (in 0.1 mm) (-1 voor <0.05 mm);
# P = Luchtdruk (in 0.1 hPa) herleid naar zeeniveau, tijdens de waarneming;
# VV = Horizontaal zicht tijdens de waarneming (0=minder dan 100m, 1=100-200m, 2=200-300m,..., 49=4900-5000m, 50=5-6km, 56=6-7km, 57=7-8km, ..., 79=29-30km, 80=30-35km, 81=35-40km,..., 89=meer dan 70km);
# N = Bewolking (bedekkingsgraad van de bovenlucht in achtsten), tijdens de waarneming (9=bovenlucht onzichtbaar);
# U = Relatieve vochtigheid (in procenten) op 1.50 m hoogte tijdens de waarneming;
# WW = Weercode (00-99), visueel(WW) of automatisch(WaWa) waargenomen, voor het actuele weer of het weer in het afgelopen uur. Zie http://bibliotheek.knmi.nl/scholierenpdf/weercodes_Nederland;
# IX = Weercode indicator voor de wijze van waarnemen op een bemand of automatisch station (1=bemand gebruikmakend van code uit visuele waarnemingen, 2,3=bemand en weggelaten (geen belangrijk weersverschijnsel, geen gegevens), 4=automatisch en opgenomen (gebruikmakend van code uit visuele waarnemingen), 5,6=automatisch en weggelaten (geen belangrijk weersverschijnsel, geen gegevens), 7=automatisch gebruikmakend van code uit automatische waarnemingen);
# M = Mist 0=niet voorgekomen, 1=wel voorgekomen in het voorgaande uur en/of tijdens de waarneming;
# R = Regen 0=niet voorgekomen, 1=wel voorgekomen in het voorgaande uur en/of tijdens de waarneming;
# S = Sneeuw 0=niet voorgekomen, 1=wel voorgekomen in het voorgaande uur en/of tijdens de waarneming;
# O = Onweer 0=niet voorgekomen, 1=wel voorgekomen in het voorgaande uur en/of tijdens de waarneming;
# Y = IJsvorming 0=niet voorgekomen, 1=wel voorgekomen in het voorgaande uur en/of tijdens de waarneming;
| [
"numpy.where",
"pandas.to_datetime",
"knmy.knmy.get_hourly_data"
] | [((1676, 1747), 'numpy.where', 'np.where', (["(knmi_data['precipitation'] < 0)", '(0)', "knmi_data['precipitation']"], {}), "(knmi_data['precipitation'] < 0, 0, knmi_data['precipitation'])\n", (1684, 1747), True, 'import numpy as np\n'), ((246, 355), 'knmy.knmy.get_hourly_data', 'knmy.get_hourly_data', ([], {'stations': '[240]', 'start': 'start', 'end': 'end', 'inseason': '(False)', 'variables': "['ALL']", 'parse': '(True)'}), "(stations=[240], start=start, end=end, inseason=False,\n variables=['ALL'], parse=True)\n", (266, 355), False, 'from knmy import knmy\n'), ((1810, 1876), 'pandas.to_datetime', 'pd.to_datetime', (["knmi_data['date']"], {'yearfirst': '(True)', 'format': '"""%Y%m%d"""'}), "(knmi_data['date'], yearfirst=True, format='%Y%m%d')\n", (1824, 1876), True, 'import pandas as pd\n')] |
import numpy as np
def PCA_numpy(data, n_components=2):
#1nd step is to find covarience matrix
data_vector = []
for i in range(data.shape[1]):
data_vector.append(data[:, i])
cov_matrix = np.cov(data_vector)
#2rd step is to compute eigen vectors and eigne values
eig_values, eig_vectors = np.linalg.eig(cov_matrix)
eig_values = np.reshape(eig_values, (len(cov_matrix), 1))
#Make pairs
eig_pairs = []
for i in range(len(eig_values)):
eig_pairs.append([np.abs(eig_values[i]), eig_vectors[:,i]])
eig_pairs.sort()
eig_pairs.reverse()
#This PCA is only for 2 components
reduced_data = np.hstack((eig_pairs[0][1].reshape(len(eig_pairs[0][1]),1), eig_pairs[1][1].reshape(len(eig_pairs[0][1]),1)))
return data.dot(reduced_data) | [
"numpy.abs",
"numpy.cov",
"numpy.linalg.eig"
] | [((222, 241), 'numpy.cov', 'np.cov', (['data_vector'], {}), '(data_vector)\n', (228, 241), True, 'import numpy as np\n'), ((336, 361), 'numpy.linalg.eig', 'np.linalg.eig', (['cov_matrix'], {}), '(cov_matrix)\n', (349, 361), True, 'import numpy as np\n'), ((528, 549), 'numpy.abs', 'np.abs', (['eig_values[i]'], {}), '(eig_values[i])\n', (534, 549), True, 'import numpy as np\n')] |
import typing
import numpy as np
import numba as nb
@nb.njit
def uf_build(n: int) -> np.ndarray:
return np.full(n, -1, np.int64)
@nb.njit
def uf_find(uf: np.ndarray, u: int) -> int:
if uf[u] < 0: return u
uf[u] = uf_find(uf, uf[u])
return uf[u]
@nb.njit
def uf_unite(
uf: np.ndarray,
u: int,
v: int,
) -> typing.NoReturn:
u, v = uf_find(uf, u), uf_find(uf, v)
if u == v: return
if uf[u] > uf[v]: u, v = v, u
uf[u] += uf[v]
uf[v] = u | [
"numpy.full"
] | [((112, 136), 'numpy.full', 'np.full', (['n', '(-1)', 'np.int64'], {}), '(n, -1, np.int64)\n', (119, 136), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""Graph Viewer demo.
Renders a phonebot into an OpenGL-based window, looping through joint
angles; i.e., we construct a PhonebotGraph and update the transforms of
the legs.
"""
import time
import numpy as np
from phonebot.core.common.math.utils import anorm
from phonebot.core.common.config import PhonebotSettings
from phonebot.core.frame_graph.phonebot_graph import PhonebotGraph
from phonebot.core.frame_graph.graph_utils import (
solve_knee_angle, solve_inverse_kinematics, get_graph_geometries,
initialize_graph_zero)
from phonebot.vis.viewer.phonebot_viewer import PhonebotViewer
from phonebot.vis.viewer.viewer_base import HandleHelper
def main():
config = PhonebotSettings()
# Set queue size to 1 since we're not changing time.
# (This will get rid of buffering artifacts)
config.queue_size = 1
graph = PhonebotGraph(config)
viewer = PhonebotViewer()
handler = HandleHelper(viewer)
# Arbitrary stamp.
stamp = time.time()
# Initialize angles to zero.
initialize_graph_zero(graph, stamp, config)
# Sweep angles for both joints, run ik and visualize results.
for hip_angle_a in np.linspace(0.0, 2 * np.pi, 20):
for hip_angle_b in np.linspace(0.0, 2 * np.pi, 20):
for leg_prefix in config.order:
hip_joint_a = f'{leg_prefix}_hip_joint_a'
hip_joint_b = f'{leg_prefix}_hip_joint_b'
knee_joint_a = f'{leg_prefix}_knee_joint_a'
knee_joint_b = f'{leg_prefix}_knee_joint_b'
foot_a = f'{leg_prefix}_foot_a'
foot_b = f'{leg_prefix}_foot_b'
graph.get_edge(knee_joint_a, hip_joint_a).update(
stamp, hip_angle_a)
graph.get_edge(knee_joint_b, hip_joint_b).update(
stamp, hip_angle_b)
knee_angle_a, knee_angle_b = solve_knee_angle(
graph, leg_prefix, stamp, config=config)
stamp = time.time()
graph.get_edge(knee_joint_a, hip_joint_a).update(
stamp, hip_angle_a)
graph.get_edge(knee_joint_b, hip_joint_b).update(
stamp, hip_angle_b)
graph.get_edge(foot_a, knee_joint_a).update(
stamp, knee_angle_a)
graph.get_edge(foot_b, knee_joint_b).update(
stamp, knee_angle_b)
pos_a = graph.get_transform(
foot_a, F'{leg_prefix}_leg_origin', stamp).position
pos_b = graph.get_transform(
foot_b, F'{leg_prefix}_leg_origin', stamp).position
print(f'foot_positions : {pos_a} == {pos_b}')
ik_solution = solve_inverse_kinematics(
graph, stamp, leg_prefix, pos_a, config=config)
print(f'angles : {anorm([hip_angle_a, hip_angle_b])}'
f' == {ik_solution}')
# Send data to asynchronous viewer.
poses, edges = get_graph_geometries(graph, stamp, tol=np.inf)
with handler.collect():
handler.poses(poses=poses)
handler.edges(poses=poses, edges=edges)
if __name__ == '__main__':
main()
| [
"phonebot.core.common.config.PhonebotSettings",
"phonebot.core.frame_graph.graph_utils.solve_knee_angle",
"phonebot.core.frame_graph.graph_utils.get_graph_geometries",
"phonebot.core.common.math.utils.anorm",
"numpy.linspace",
"phonebot.core.frame_graph.phonebot_graph.PhonebotGraph",
"phonebot.vis.viewe... | [((705, 723), 'phonebot.core.common.config.PhonebotSettings', 'PhonebotSettings', ([], {}), '()\n', (721, 723), False, 'from phonebot.core.common.config import PhonebotSettings\n'), ((868, 889), 'phonebot.core.frame_graph.phonebot_graph.PhonebotGraph', 'PhonebotGraph', (['config'], {}), '(config)\n', (881, 889), False, 'from phonebot.core.frame_graph.phonebot_graph import PhonebotGraph\n'), ((903, 919), 'phonebot.vis.viewer.phonebot_viewer.PhonebotViewer', 'PhonebotViewer', ([], {}), '()\n', (917, 919), False, 'from phonebot.vis.viewer.phonebot_viewer import PhonebotViewer\n'), ((934, 954), 'phonebot.vis.viewer.viewer_base.HandleHelper', 'HandleHelper', (['viewer'], {}), '(viewer)\n', (946, 954), False, 'from phonebot.vis.viewer.viewer_base import HandleHelper\n'), ((991, 1002), 'time.time', 'time.time', ([], {}), '()\n', (1000, 1002), False, 'import time\n'), ((1041, 1084), 'phonebot.core.frame_graph.graph_utils.initialize_graph_zero', 'initialize_graph_zero', (['graph', 'stamp', 'config'], {}), '(graph, stamp, config)\n', (1062, 1084), False, 'from phonebot.core.frame_graph.graph_utils import solve_knee_angle, solve_inverse_kinematics, get_graph_geometries, initialize_graph_zero\n'), ((1175, 1206), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)', '(20)'], {}), '(0.0, 2 * np.pi, 20)\n', (1186, 1206), True, 'import numpy as np\n'), ((1235, 1266), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)', '(20)'], {}), '(0.0, 2 * np.pi, 20)\n', (1246, 1266), True, 'import numpy as np\n'), ((1903, 1960), 'phonebot.core.frame_graph.graph_utils.solve_knee_angle', 'solve_knee_angle', (['graph', 'leg_prefix', 'stamp'], {'config': 'config'}), '(graph, leg_prefix, stamp, config=config)\n', (1919, 1960), False, 'from phonebot.core.frame_graph.graph_utils import solve_knee_angle, solve_inverse_kinematics, get_graph_geometries, initialize_graph_zero\n'), ((2007, 2018), 'time.time', 'time.time', ([], {}), '()\n', (2016, 2018), False, 'import time\n'), ((2762, 2834), 'phonebot.core.frame_graph.graph_utils.solve_inverse_kinematics', 'solve_inverse_kinematics', (['graph', 'stamp', 'leg_prefix', 'pos_a'], {'config': 'config'}), '(graph, stamp, leg_prefix, pos_a, config=config)\n', (2786, 2834), False, 'from phonebot.core.frame_graph.graph_utils import solve_knee_angle, solve_inverse_kinematics, get_graph_geometries, initialize_graph_zero\n'), ((3054, 3100), 'phonebot.core.frame_graph.graph_utils.get_graph_geometries', 'get_graph_geometries', (['graph', 'stamp'], {'tol': 'np.inf'}), '(graph, stamp, tol=np.inf)\n', (3074, 3100), False, 'from phonebot.core.frame_graph.graph_utils import solve_knee_angle, solve_inverse_kinematics, get_graph_geometries, initialize_graph_zero\n'), ((2890, 2923), 'phonebot.core.common.math.utils.anorm', 'anorm', (['[hip_angle_a, hip_angle_b]'], {}), '([hip_angle_a, hip_angle_b])\n', (2895, 2923), False, 'from phonebot.core.common.math.utils import anorm\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import shutil
import numpy as np
import pytest
import vineyard
import vineyard.io
logger = logging.getLogger('vineyard')
@pytest.fixture(scope='module')
def global_object(vineyard_ipc_socket):
client1 = vineyard.connect(vineyard_ipc_socket)
client2 = vineyard.connect(vineyard_ipc_socket)
client3 = vineyard.connect(vineyard_ipc_socket)
client4 = vineyard.connect(vineyard_ipc_socket)
data = np.ones((1, 2, 3, 4, 5))
o1 = client1.put(data)
o2 = client2.put(data)
o3 = client3.put(data)
o4 = client4.put(data)
client4.persist(o4)
client3.persist(o3)
client2.persist(o2)
client1.persist(o1)
meta = vineyard.ObjectMeta()
meta['typename'] = 'vineyard::Tuple'
meta['size_'] = 4
meta.set_global(True)
meta.add_member('__elements_-0', client1.get_meta(o1))
meta.add_member('__elements_-1', client1.get_meta(o2))
meta.add_member('__elements_-2', o3)
meta.add_member('__elements_-3', o4)
meta['__elements_-size'] = 4
tup = client1.create_metadata(meta)
client1.persist(tup)
return tup.id
def test_seriarialize_round_trip(vineyard_ipc_socket, vineyard_endpoint, global_object):
destination = '/tmp/seri-test'
shutil.rmtree(destination, ignore_errors=True)
vineyard.io.serialize(destination,
global_object,
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint)
logger.info("finish serializing object to %s", destination)
ret = vineyard.io.deserialize(destination,
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint)
logger.info("finish deserializing object from %s, as %s", destination, ret)
client = vineyard.connect(vineyard_ipc_socket)
expected = client.get(global_object)
actual = client.get(ret)
assert isinstance(expected, tuple)
assert isinstance(actual, tuple)
assert len(expected) == len(actual)
for item1, item2 in zip(expected, actual):
np.testing.assert_array_almost_equal(item1, item2)
@pytest.mark.skip("require oss")
def test_seriarialize_round_trip_on_oss(vineyard_ipc_socket, vineyard_endpoint, global_object):
accessKeyID = os.environ["ACCESS_KEY_ID"]
accessKeySecret = os.environ["SECRET_ACCESS_KEY"]
endpoint = os.environ.get("ENDPOINT", "http://oss-cn-hangzhou.aliyuncs.com")
vineyard.io.serialize('oss://grape-uk/tmp/seri-test',
global_object,
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options={
"key": accessKeyID,
"secret": accessKeySecret,
"endpoint": endpoint,
})
ret = vineyard.io.deserialize('oss://grape-uk/tmp/seri-test',
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options={
"key": accessKeyID,
"secret": accessKeySecret,
"endpoint": endpoint,
})
client = vineyard.connect(vineyard_ipc_socket)
expected = client.get(global_object)
actual = client.get(ret)
assert isinstance(expected, tuple)
assert isinstance(actual, tuple)
assert len(expected) == len(actual)
for item1, item2 in zip(expected, actual):
np.testing.assert_array_almost_equal(item1, item2)
@pytest.mark.skip(reason="require s3")
def test_seriarialize_round_trip_on_s3(vineyard_ipc_socket, vineyard_endpoint, global_object):
accessKeyID = os.environ["ACCESS_KEY_ID"]
accessKeySecret = os.environ["SECRET_ACCESS_KEY"]
region_name = os.environ.get("REGION", "us-east-1")
vineyard.io.serialize(
"s3://test-bucket/tmp/seri-test",
global_object,
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options={
"key": accessKeyID,
"secret": accessKeySecret,
"client_kwargs": {
"region_name": region_name
},
},
)
ret = vineyard.io.deserialize(
's3://test-bucket/tmp/seri-test',
vineyard_ipc_socket=vineyard_ipc_socket,
vineyard_endpoint=vineyard_endpoint,
storage_options={
"key": accessKeyID,
"secret": accessKeySecret,
"client_kwargs": {
"region_name": region_name
},
},
)
client = vineyard.connect(vineyard_ipc_socket)
expected = client.get(global_object)
actual = client.get(ret)
assert isinstance(expected, tuple)
assert isinstance(actual, tuple)
assert len(expected) == len(actual)
for item1, item2 in zip(expected, actual):
np.testing.assert_array_almost_equal(item1, item2)
| [
"logging.getLogger",
"vineyard.ObjectMeta",
"vineyard.io.deserialize",
"numpy.testing.assert_array_almost_equal",
"numpy.ones",
"pytest.mark.skip",
"os.environ.get",
"vineyard.connect",
"shutil.rmtree",
"pytest.fixture",
"vineyard.io.serialize"
] | [((770, 799), 'logging.getLogger', 'logging.getLogger', (['"""vineyard"""'], {}), "('vineyard')\n", (787, 799), False, 'import logging\n'), ((803, 833), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (817, 833), False, 'import pytest\n'), ((2838, 2869), 'pytest.mark.skip', 'pytest.mark.skip', (['"""require oss"""'], {}), "('require oss')\n", (2854, 2869), False, 'import pytest\n'), ((4442, 4479), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""require s3"""'}), "(reason='require s3')\n", (4458, 4479), False, 'import pytest\n'), ((888, 925), 'vineyard.connect', 'vineyard.connect', (['vineyard_ipc_socket'], {}), '(vineyard_ipc_socket)\n', (904, 925), False, 'import vineyard\n'), ((940, 977), 'vineyard.connect', 'vineyard.connect', (['vineyard_ipc_socket'], {}), '(vineyard_ipc_socket)\n', (956, 977), False, 'import vineyard\n'), ((992, 1029), 'vineyard.connect', 'vineyard.connect', (['vineyard_ipc_socket'], {}), '(vineyard_ipc_socket)\n', (1008, 1029), False, 'import vineyard\n'), ((1044, 1081), 'vineyard.connect', 'vineyard.connect', (['vineyard_ipc_socket'], {}), '(vineyard_ipc_socket)\n', (1060, 1081), False, 'import vineyard\n'), ((1094, 1118), 'numpy.ones', 'np.ones', (['(1, 2, 3, 4, 5)'], {}), '((1, 2, 3, 4, 5))\n', (1101, 1118), True, 'import numpy as np\n'), ((1337, 1358), 'vineyard.ObjectMeta', 'vineyard.ObjectMeta', ([], {}), '()\n', (1356, 1358), False, 'import vineyard\n'), ((1894, 1940), 'shutil.rmtree', 'shutil.rmtree', (['destination'], {'ignore_errors': '(True)'}), '(destination, ignore_errors=True)\n', (1907, 1940), False, 'import shutil\n'), ((1945, 2077), 'vineyard.io.serialize', 'vineyard.io.serialize', (['destination', 'global_object'], {'vineyard_ipc_socket': 'vineyard_ipc_socket', 'vineyard_endpoint': 'vineyard_endpoint'}), '(destination, global_object, vineyard_ipc_socket=\n vineyard_ipc_socket, vineyard_endpoint=vineyard_endpoint)\n', (1966, 2077), False, 'import vineyard\n'), ((2225, 2344), 'vineyard.io.deserialize', 'vineyard.io.deserialize', (['destination'], {'vineyard_ipc_socket': 'vineyard_ipc_socket', 'vineyard_endpoint': 'vineyard_endpoint'}), '(destination, vineyard_ipc_socket=\n vineyard_ipc_socket, vineyard_endpoint=vineyard_endpoint)\n', (2248, 2344), False, 'import vineyard\n'), ((2502, 2539), 'vineyard.connect', 'vineyard.connect', (['vineyard_ipc_socket'], {}), '(vineyard_ipc_socket)\n', (2518, 2539), False, 'import vineyard\n'), ((3081, 3146), 'os.environ.get', 'os.environ.get', (['"""ENDPOINT"""', '"""http://oss-cn-hangzhou.aliyuncs.com"""'], {}), "('ENDPOINT', 'http://oss-cn-hangzhou.aliyuncs.com')\n", (3095, 3146), False, 'import os\n'), ((3151, 3397), 'vineyard.io.serialize', 'vineyard.io.serialize', (['"""oss://grape-uk/tmp/seri-test"""', 'global_object'], {'vineyard_ipc_socket': 'vineyard_ipc_socket', 'vineyard_endpoint': 'vineyard_endpoint', 'storage_options': "{'key': accessKeyID, 'secret': accessKeySecret, 'endpoint': endpoint}"}), "('oss://grape-uk/tmp/seri-test', global_object,\n vineyard_ipc_socket=vineyard_ipc_socket, vineyard_endpoint=\n vineyard_endpoint, storage_options={'key': accessKeyID, 'secret':\n accessKeySecret, 'endpoint': endpoint})\n", (3172, 3397), False, 'import vineyard\n'), ((3618, 3851), 'vineyard.io.deserialize', 'vineyard.io.deserialize', (['"""oss://grape-uk/tmp/seri-test"""'], {'vineyard_ipc_socket': 'vineyard_ipc_socket', 'vineyard_endpoint': 'vineyard_endpoint', 'storage_options': "{'key': accessKeyID, 'secret': accessKeySecret, 'endpoint': endpoint}"}), "('oss://grape-uk/tmp/seri-test', vineyard_ipc_socket\n =vineyard_ipc_socket, vineyard_endpoint=vineyard_endpoint,\n storage_options={'key': accessKeyID, 'secret': accessKeySecret,\n 'endpoint': endpoint})\n", (3641, 3851), False, 'import vineyard\n'), ((4106, 4143), 'vineyard.connect', 'vineyard.connect', (['vineyard_ipc_socket'], {}), '(vineyard_ipc_socket)\n', (4122, 4143), False, 'import vineyard\n'), ((4693, 4730), 'os.environ.get', 'os.environ.get', (['"""REGION"""', '"""us-east-1"""'], {}), "('REGION', 'us-east-1')\n", (4707, 4730), False, 'import os\n'), ((4735, 5008), 'vineyard.io.serialize', 'vineyard.io.serialize', (['"""s3://test-bucket/tmp/seri-test"""', 'global_object'], {'vineyard_ipc_socket': 'vineyard_ipc_socket', 'vineyard_endpoint': 'vineyard_endpoint', 'storage_options': "{'key': accessKeyID, 'secret': accessKeySecret, 'client_kwargs': {\n 'region_name': region_name}}"}), "('s3://test-bucket/tmp/seri-test', global_object,\n vineyard_ipc_socket=vineyard_ipc_socket, vineyard_endpoint=\n vineyard_endpoint, storage_options={'key': accessKeyID, 'secret':\n accessKeySecret, 'client_kwargs': {'region_name': region_name}})\n", (4756, 5008), False, 'import vineyard\n'), ((5130, 5390), 'vineyard.io.deserialize', 'vineyard.io.deserialize', (['"""s3://test-bucket/tmp/seri-test"""'], {'vineyard_ipc_socket': 'vineyard_ipc_socket', 'vineyard_endpoint': 'vineyard_endpoint', 'storage_options': "{'key': accessKeyID, 'secret': accessKeySecret, 'client_kwargs': {\n 'region_name': region_name}}"}), "('s3://test-bucket/tmp/seri-test',\n vineyard_ipc_socket=vineyard_ipc_socket, vineyard_endpoint=\n vineyard_endpoint, storage_options={'key': accessKeyID, 'secret':\n accessKeySecret, 'client_kwargs': {'region_name': region_name}})\n", (5153, 5390), False, 'import vineyard\n'), ((5508, 5545), 'vineyard.connect', 'vineyard.connect', (['vineyard_ipc_socket'], {}), '(vineyard_ipc_socket)\n', (5524, 5545), False, 'import vineyard\n'), ((2784, 2834), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['item1', 'item2'], {}), '(item1, item2)\n', (2820, 2834), True, 'import numpy as np\n'), ((4388, 4438), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['item1', 'item2'], {}), '(item1, item2)\n', (4424, 4438), True, 'import numpy as np\n'), ((5790, 5840), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['item1', 'item2'], {}), '(item1, item2)\n', (5826, 5840), True, 'import numpy as np\n')] |
# /usr/bin/env python3.6
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Loss function for Adaround """
from typing import Tuple, Union
import numpy as np
import tensorflow as tf
# Import AIMET specific modules
from aimet_common.defs import AdaroundConstants
class AdaroundHyperParameters:
"""
Hyper parameters for Adaround
"""
def __init__(self, num_iterations: int, reg_param: float, beta_range: Tuple, warm_start: float):
"""
:param num_iterations: Number of maximum iterations to adaround layer
:param reg_param: Regularization parameter, trading off between rounding loss vs reconstruction loss
:param beta_range: Start and stop parameters for annealing of rounding loss (start_beta, end_beta)
:param warm_start: Warm up period, during which rounding loss has zero effect
"""
self.num_iterations = num_iterations
self.reg_param = reg_param
self.beta_range = beta_range
self.warm_start = warm_start
class AdaroundLoss:
"""
Calculates the Reconstruction loss and Rounding loss - needed for Adaround optimization to
learn weight rounding
"""
@staticmethod
def compute_recon_loss(ada_quantized_output: tf.Tensor, orig_output: tf.Tensor, channels_index: int) -> tf.Tensor:
"""
Compute Reconstruction Loss using Squared Frobenius Norm - first part of Combined Loss
:param ada_quantized_output: Activation output from quantized wrapper module
:param orig_output: Activation output from original module
:param channels_index: channels_index across which loss will be computed
:return: reconstruction loss
"""
recon_loss = tf.reduce_mean(tf.reduce_sum(tf.math.squared_difference(ada_quantized_output, orig_output),
channels_index))
return recon_loss
@classmethod
def compute_round_loss(cls, alpha: tf.Variable, reg_param: float, warm_start: Union[bool, tf.Tensor],
beta: float) -> tf.Tensor:
"""
Compute Rounding Loss - second part of Combined Loss
:param alpha: parameter 'alpha' to be optimized, float32 tensor same shape as weight tensor
:param reg_param: Regularization parameter, trading off between rounding loss vs reconstruction loss
:param warm_start: Warm up period, during which rounding loss has zero effect
:param beta: Beta parameter
:return: rounding loss
"""
def round_loss_fn():
# compute rectified sigmoid of parameter 'alpha' which maps it between zero and one
h_alpha = tf.clip_by_value(tf.sigmoid(alpha) * (AdaroundConstants.ZETA - AdaroundConstants.GAMMA) +
AdaroundConstants.GAMMA, 0, 1)
# calculate regularization term - which ensures parameter to converge to exactly zeros and ones
# at the end of optimization
reg_term = tf.reduce_sum(tf.add(-tf.pow(tf.abs(tf.add(2 * h_alpha, -1)), beta), 1))
# calculate the rounding loss
round_loss = reg_param * reg_term
return round_loss
round_loss = tf.cond(warm_start, lambda: tf.constant(0.0, dtype=tf.float32), round_loss_fn)
return round_loss
@staticmethod
def compute_beta(max_iter: int, cur_iter: int, beta_range: Tuple, warm_start: float) -> float:
"""
Compute beta parameter used in regularization function using cosine decay
:param max_iter: total maximum number of iterations
:param cur_iter: current iteration
:param beta_range: range for beta decay (start_beta, end_beta)
:param warm_start: warm up period, during which rounding loss has zero effect
:return: parameter beta
"""
# Start and stop beta for annealing of rounding loss (start_beta, end_beta)
start_beta, end_beta = beta_range
# iteration at end of warm start period, which is 20% of max iterations
warm_start_end_iter = warm_start * max_iter
# compute relative iteration of current iteration
rel_iter = (cur_iter - warm_start_end_iter) / (max_iter - warm_start_end_iter)
beta = end_beta + 0.5 * (start_beta - end_beta) * (1 + np.cos(rel_iter * np.pi))
return beta
| [
"tensorflow.math.squared_difference",
"tensorflow.add",
"tensorflow.sigmoid",
"tensorflow.constant",
"numpy.cos"
] | [((3562, 3623), 'tensorflow.math.squared_difference', 'tf.math.squared_difference', (['ada_quantized_output', 'orig_output'], {}), '(ada_quantized_output, orig_output)\n', (3588, 3623), True, 'import tensorflow as tf\n'), ((5067, 5101), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (5078, 5101), True, 'import tensorflow as tf\n'), ((6130, 6154), 'numpy.cos', 'np.cos', (['(rel_iter * np.pi)'], {}), '(rel_iter * np.pi)\n', (6136, 6154), True, 'import numpy as np\n'), ((4508, 4525), 'tensorflow.sigmoid', 'tf.sigmoid', (['alpha'], {}), '(alpha)\n', (4518, 4525), True, 'import tensorflow as tf\n'), ((4860, 4883), 'tensorflow.add', 'tf.add', (['(2 * h_alpha)', '(-1)'], {}), '(2 * h_alpha, -1)\n', (4866, 4883), True, 'import tensorflow as tf\n')] |
import os
import numpy as np
import pickle
import time
from collections import deque
from mpi4py import MPI
import tensorflow as tf
from stable_baselines import logger
from stable_baselines.common import tf_util, SetVerbosity, TensorboardWriter
from stable_baselines import DDPG
from stable_baselines.common.buffers import ReplayBuffer
from stable_baselines.common.math_util import unscale_action, scale_action
from stable_baselines.common.vec_env import VecEnv
class DDPGfED(DDPG):
"""
Custom version of Deep Deterministic Policy Gradient (DDPG) to use with expert demonstrations.
Similar to DDPG from Demonstrations (DDPGfD).
"""
def __init__(self, policy, env, gamma=0.99, memory_policy=None, eval_env=None, nb_train_steps=50,
nb_rollout_steps=100, nb_eval_steps=100, param_noise=None, action_noise=None,
normalize_observations=False, tau=0.001, batch_size=128, param_noise_adaption_interval=50,
normalize_returns=False, enable_popart=False, observation_range=(-5., 5.), critic_l2_reg=0.,
return_range=(-np.inf, np.inf), actor_lr=1e-4, critic_lr=1e-3, clip_norm=None, reward_scale=1.,
render=False, render_eval=False, memory_limit=None, buffer_size=50000, random_exploration=0.0,
verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None,
full_tensorboard_log=False, seed=None, n_cpu_tf_sess=1,
expert_use=False, expert_data=None, expert_batch_size=64, expert_limit_success=0.5):
super(DDPGfED, self).__init__(policy=policy, env=env, gamma=gamma, memory_policy=memory_policy,
eval_env=eval_env, nb_train_steps=nb_train_steps,
nb_rollout_steps=nb_rollout_steps, nb_eval_steps=nb_eval_steps,
param_noise=param_noise, action_noise=action_noise,
normalize_observations=normalize_observations, tau=tau, batch_size=batch_size,
param_noise_adaption_interval=param_noise_adaption_interval,
normalize_returns=normalize_returns, enable_popart=enable_popart,
observation_range=observation_range, critic_l2_reg=critic_l2_reg,
return_range=return_range, actor_lr=actor_lr, critic_lr=critic_lr,
clip_norm=clip_norm, reward_scale=reward_scale, render=render,
render_eval=render_eval, memory_limit=memory_limit, buffer_size=buffer_size,
random_exploration=random_exploration, verbose=verbose,
tensorboard_log=tensorboard_log, _init_setup_model=_init_setup_model,
policy_kwargs=policy_kwargs, full_tensorboard_log=full_tensorboard_log,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.expert_use = expert_use
if self.expert_use:
self.expert_data = expert_data
self.expert_batch_size = expert_batch_size
self.expert_batch_size_current = expert_batch_size
self.expert_limit_success = expert_limit_success
self.demo_data = None
self.demo_size = None
self._init_demo_buffer()
else:
self.expert_data = None
self.expert_batch_size = 0
self.expert_batch_size_current = 0
self.expert_limit_success = 0
def get_random_action(self):
return np.random.uniform(-1.5, 1.5, self.env.action_space.shape[0])
def _init_demo_buffer(self):
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0 and self.verbose >= 1:
print("Start init demo buffer")
data_path = "{}{}.npz".format(self.expert_data[:-4], rank)
if not os.path.exists(data_path):
import shutil
shutil.copy(self.expert_data, data_path)
demo_data = np.load(data_path)
self.demo_size = len(demo_data.f.actions)
self.demo_buffer = ReplayBuffer(self.demo_size)
self.demo_data = {
"obs": demo_data["obs"].copy(),
"actions": demo_data["actions"].copy(),
"rewards": demo_data["rewards"].copy(),
"episode_starts": demo_data["episode_starts"].copy()
}
for n in range(1, self.demo_size):
obs = self.demo_data["obs"][n - 1]
self.demo_buffer.add(obs,
self.demo_data["actions"][n],
self.demo_data["rewards"][n] * self.reward_scale,
self.demo_data["obs"][n],
self.demo_data["episode_starts"][n].astype(np.float32))
if self.normalize_observations:
self.obs_rms.update(np.array([obs]))
del demo_data
os.remove(data_path)
MPI.COMM_WORLD.Barrier()
if rank == 0 and self.verbose >= 1:
print("Done init demo buffer")
def _train_step(self, step, writer, log=False):
"""
run a step of training from batch
:param step: (int) the current step iteration
:param writer: (TensorFlow Summary.writer) the writer for tensorboard
:param log: (bool) whether or not to log to metadata
:return: (float, float) critic loss, actor loss
"""
if self.expert_use and self.expert_batch_size_current > 0:
# Get a batch
batch_size = self.batch_size - self.expert_batch_size_current
obs, actions, rewards, next_obs, terminals = self.replay_buffer.sample(batch_size=batch_size,
env=self._vec_normalize_env)
_obs, _actions, _rewards, _next_obs, _terminals = self.demo_buffer.sample(
batch_size=self.expert_batch_size_current,
env=self._vec_normalize_env)
obs = np.append(obs, _obs, axis=0)
actions = np.append(actions, _actions, axis=0)
rewards = np.append(rewards, _rewards, axis=0)
next_obs = np.append(next_obs, _next_obs, axis=0)
terminals = np.append(terminals, _terminals, axis=0)
else:
# Get a batch
obs, actions, rewards, next_obs, terminals = self.replay_buffer.sample(batch_size=self.batch_size,
env=self._vec_normalize_env)
# Reshape to match previous behavior and placeholder shape
rewards = rewards.reshape(-1, 1)
terminals = terminals.reshape(-1, 1)
if self.normalize_returns and self.enable_popart:
old_mean, old_std, target_q = self.sess.run([self.ret_rms.mean, self.ret_rms.std, self.target_q],
feed_dict={
self.obs_target: next_obs,
self.rewards: rewards,
self.terminals_ph: terminals
})
self.ret_rms.update(target_q.flatten())
self.sess.run(self.renormalize_q_outputs_op, feed_dict={
self.old_std: np.array([old_std]),
self.old_mean: np.array([old_mean]),
})
else:
target_q = self.sess.run(self.target_q, feed_dict={
self.obs_target: next_obs,
self.rewards: rewards,
self.terminals_ph: terminals
})
# Get all gradients and perform a synced update.
ops = [self.actor_grads, self.actor_loss, self.critic_grads, self.critic_loss]
td_map = {
self.obs_train: obs,
self.actions: actions,
self.action_train_ph: actions,
self.rewards: rewards,
self.critic_target: target_q,
self.param_noise_stddev: 0 if self.param_noise is None else self.param_noise.current_stddev
}
if writer is not None:
# run loss backprop with summary if the step_id was not already logged (can happen with the right
# parameters as the step value is only an estimate)
if self.full_tensorboard_log and log and step not in self.tb_seen_steps:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, actor_grads, actor_loss, critic_grads, critic_loss = \
self.sess.run([self.summary] + ops, td_map, options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % step)
self.tb_seen_steps.append(step)
else:
summary, actor_grads, actor_loss, critic_grads, critic_loss = self.sess.run([self.summary] + ops,
td_map)
writer.add_summary(summary, step)
else:
actor_grads, actor_loss, critic_grads, critic_loss = self.sess.run(ops, td_map)
self.actor_optimizer.update(actor_grads, learning_rate=self.actor_lr)
self.critic_optimizer.update(critic_grads, learning_rate=self.critic_lr)
return critic_loss, actor_loss
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="DDPG",
reset_num_timesteps=True, replay_wrapper=None):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
if replay_wrapper is not None:
self.replay_buffer = replay_wrapper(self.replay_buffer)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
# a list for tensorboard logging, to prevent logging with the same step number, if it already occured
self.tb_seen_steps = []
rank = MPI.COMM_WORLD.Get_rank()
if self.verbose >= 2:
logger.log('Using agent with the following configuration:')
logger.log(str(self.__dict__.items()))
eval_episode_rewards_history = deque(maxlen=100)
episode_rewards_history = deque(maxlen=100)
episode_successes = []
with self.sess.as_default(), self.graph.as_default():
# Prepare everything.
self._reset()
obs = self.env.reset()
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
obs_ = self._vec_normalize_env.get_original_obs().squeeze()
eval_obs = None
if self.eval_env is not None:
eval_obs = self.eval_env.reset()
episode_reward = 0.
episode_step = 0
episodes = 0
step = 0
total_steps = 0
start_time = time.time()
epoch_episode_rewards = []
epoch_episode_steps = []
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
eval_episode_rewards = []
eval_qs = []
epoch_actions = []
epoch_qs = []
epoch_episodes = 0
epoch = 0
callback.on_training_start(locals(), globals())
while True:
for _ in range(log_interval):
callback.on_rollout_start()
# Perform rollouts.
for _ in range(self.nb_rollout_steps):
if total_steps >= total_timesteps:
callback.on_training_end()
return self
# Predict next action.
action, q_value = self._policy(obs, apply_noise=True, compute_q=True)
assert action.shape == self.env.action_space.shape
# Execute next action.
if rank == 0 and self.render:
self.env.render()
# Randomly sample actions from a uniform distribution
# with a probability self.random_exploration (used in HER + DDPG)
if np.random.rand() < self.random_exploration:
# actions sampled from action space are from range specific to the environment
# but algorithm operates on tanh-squashed actions therefore simple scaling is used
unscaled_action = self.action_space.sample()
action = scale_action(self.action_space, unscaled_action)
else:
# inferred actions need to be transformed to environment action_space before stepping
unscaled_action = unscale_action(self.action_space, action)
new_obs, reward, done, info = self.env.step(unscaled_action)
self.num_timesteps += 1
if callback.on_step() is False:
callback.on_training_end()
return self
step += 1
total_steps += 1
if rank == 0 and self.render:
self.env.render()
# Book-keeping.
epoch_actions.append(action)
epoch_qs.append(q_value)
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
reward_ = self._vec_normalize_env.get_original_reward().squeeze()
else:
# Avoid changing the original ones
obs_, new_obs_, reward_ = obs, new_obs, reward
self._store_transition(obs_, action, reward_, new_obs_, done)
obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
obs_ = new_obs_
episode_reward += reward_
episode_step += 1
if writer is not None:
ep_rew = np.array([reward_]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
tf_util.total_episode_reward_logger(self.episode_reward, ep_rew, ep_done,
writer, self.num_timesteps)
if done:
# Episode done.
epoch_episode_rewards.append(episode_reward)
episode_rewards_history.append(episode_reward)
epoch_episode_steps.append(episode_step)
episode_reward = 0.
episode_step = 0
epoch_episodes += 1
episodes += 1
maybe_is_success = info.get('is_success')
if maybe_is_success is not None:
episode_successes.append(float(maybe_is_success))
self._reset()
if not isinstance(self.env, VecEnv):
obs = self.env.reset()
callback.on_rollout_end()
# Train.
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
if total_steps % 1000 == 0 and rank == 0:
print("steps", total_steps, " rank", rank)
if total_steps % 1000 == 0 and rank == 1:
print("steps", total_steps, " rank", rank)
for t_train in range(self.nb_train_steps):
# Not enough samples in the replay buffer
if not self.replay_buffer.can_sample(self.batch_size) or total_steps < 1190:
MPI.COMM_WORLD.Barrier()
break
MPI.COMM_WORLD.Barrier()
# Adapt param noise, if necessary.
if len(self.replay_buffer) >= self.batch_size and \
t_train % self.param_noise_adaption_interval == 0:
distance = self._adapt_param_noise()
epoch_adaptive_distances.append(distance)
# weird equation to deal with the fact the nb_train_steps will be different
# to nb_rollout_steps
step = (int(t_train * (self.nb_rollout_steps / self.nb_train_steps)) +
self.num_timesteps - self.nb_rollout_steps)
critic_loss, actor_loss = self._train_step(step, writer, log=t_train == 0)
epoch_critic_losses.append(critic_loss)
epoch_actor_losses.append(actor_loss)
self._update_target_net()
# Evaluate.
eval_episode_rewards = []
eval_qs = []
MPI.COMM_WORLD.Barrier()
if self.eval_env is not None:
eval_episode_reward = 0.
for _ in range(self.nb_eval_steps):
if total_steps >= total_timesteps:
return self
eval_action, eval_q = self._policy(eval_obs, apply_noise=False, compute_q=True)
unscaled_action = unscale_action(self.action_space, eval_action)
eval_obs, eval_r, eval_done, _ = self.eval_env.step(unscaled_action)
if self.render_eval:
self.eval_env.render()
eval_episode_reward += eval_r
eval_qs.append(eval_q)
if eval_done:
if not isinstance(self.env, VecEnv):
eval_obs = self.eval_env.reset()
eval_episode_rewards.append(eval_episode_reward)
eval_episode_rewards_history.append(eval_episode_reward)
eval_episode_reward = 0.
mpi_size = MPI.COMM_WORLD.Get_size()
# Log stats.
# XXX shouldn't call np.mean on variable length lists
duration = time.time() - start_time
stats = self._get_stats()
combined_stats = stats.copy()
combined_stats['rollout/return'] = np.mean(epoch_episode_rewards)
combined_stats['rollout/return_history'] = np.mean(episode_rewards_history)
combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps)
combined_stats['rollout/actions_mean'] = np.mean(epoch_actions)
combined_stats['rollout/Q_mean'] = np.mean(epoch_qs)
combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses)
combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses)
if len(epoch_adaptive_distances) != 0:
combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances)
combined_stats['total/duration'] = duration
combined_stats['total/steps_per_second'] = float(step) / float(duration)
combined_stats['total/episodes'] = episodes
combined_stats['rollout/episodes'] = epoch_episodes
combined_stats['rollout/actions_std'] = np.std(epoch_actions)
# Evaluation statistics.
if self.eval_env is not None:
combined_stats['eval/return'] = np.mean(eval_episode_rewards)
combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history)
combined_stats['eval/Q'] = np.mean(eval_qs)
combined_stats['eval/episodes'] = len(eval_episode_rewards)
def as_scalar(scalar):
"""
check and return the input if it is a scalar, otherwise raise ValueError
:param scalar: (Any) the object to check
:return: (Number) the scalar if x is a scalar
"""
if isinstance(scalar, np.ndarray):
assert scalar.size == 1
return scalar[0]
elif np.isscalar(scalar):
return scalar
else:
raise ValueError('expected scalar, got %s' % scalar)
combined_stats_sums = MPI.COMM_WORLD.allreduce(
np.array([as_scalar(x) for x in combined_stats.values()]))
combined_stats = {k: v / mpi_size for (k, v) in zip(combined_stats.keys(), combined_stats_sums)}
# Total statistics.
combined_stats['total/epochs'] = epoch + 1
combined_stats['total/steps'] = step
for key in sorted(combined_stats.keys()):
logger.record_tabular(key, combined_stats[key])
if len(episode_successes) > 0:
logger.logkv("success rate", np.mean(episode_successes[-100:]))
logger.dump_tabular()
logger.info('')
logdir = logger.get_dir()
if rank == 0 and logdir:
if hasattr(self.env, 'get_state'):
with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as file_handler:
pickle.dump(self.env.get_state(), file_handler)
if self.eval_env and hasattr(self.eval_env, 'get_state'):
with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as file_handler:
pickle.dump(self.eval_env.get_state(), file_handler)
def save(self, save_path, cloudpickle=False):
data = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"nb_eval_steps": self.nb_eval_steps,
"param_noise_adaption_interval": self.param_noise_adaption_interval,
"nb_train_steps": self.nb_train_steps,
"nb_rollout_steps": self.nb_rollout_steps,
"verbose": self.verbose,
"param_noise": self.param_noise,
"action_noise": self.action_noise,
"gamma": self.gamma,
"tau": self.tau,
"normalize_returns": self.normalize_returns,
"enable_popart": self.enable_popart,
"normalize_observations": self.normalize_observations,
"batch_size": self.batch_size,
"observation_range": self.observation_range,
"return_range": self.return_range,
"critic_l2_reg": self.critic_l2_reg,
"actor_lr": self.actor_lr,
"critic_lr": self.critic_lr,
"clip_norm": self.clip_norm,
"reward_scale": self.reward_scale,
"memory_limit": self.memory_limit,
"buffer_size": self.buffer_size,
"random_exploration": self.random_exploration,
"policy": self.policy,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs,
"expert_use": self.expert_use,
"expert_data": self.expert_data,
"expert_batch_size": self.expert_batch_size,
"expert_batch_size_current": self.expert_batch_size_current,
"expert_limit_success": self.expert_limit_success
}
params_to_save = self.get_parameters()
self._save_to_file(save_path,
data=data,
params=params_to_save,
cloudpickle=cloudpickle)
@classmethod
def load(cls, load_path, env=None, custom_objects=None, **kwargs):
data, params = cls._load_from_file(load_path, custom_objects=custom_objects)
if 'policy_kwargs' in kwargs and kwargs['policy_kwargs'] != data['policy_kwargs']:
raise ValueError("The specified policy kwargs do not equal the stored policy kwargs. "
"Stored kwargs: {}, specified kwargs: {}".format(data['policy_kwargs'],
kwargs['policy_kwargs']))
model = cls(None, env, _init_setup_model=False)
model.__dict__.update(data)
model.__dict__.update(kwargs)
model.set_env(env)
model.setup_model()
# Patch for version < v2.6.0, duplicated keys where saved
if len(params) > len(model.get_parameter_list()):
n_params = len(model.params)
n_target_params = len(model.target_params)
n_normalisation_params = len(model.obs_rms_params) + len(model.ret_rms_params)
# Check that the issue is the one from
# https://github.com/hill-a/stable-baselines/issues/363
assert len(params) == 2 * (n_params + n_target_params) + n_normalisation_params, \
"The number of parameter saved differs from the number of parameters" \
" that should be loaded: {}!={}".format(len(params), len(model.get_parameter_list()))
# Remove duplicates
params_ = params[:n_params + n_target_params]
if n_normalisation_params > 0:
params_ += params[-n_normalisation_params:]
params = params_
model.load_parameters(params)
if model.expert_use:
model._init_demo_buffer()
return model | [
"stable_baselines.common.math_util.scale_action",
"numpy.random.rand",
"stable_baselines.logger.record_tabular",
"stable_baselines.common.buffers.ReplayBuffer",
"mpi4py.MPI.COMM_WORLD.Get_size",
"numpy.array",
"tensorflow.RunMetadata",
"os.remove",
"os.path.exists",
"numpy.mean",
"collections.de... | [((3704, 3764), 'numpy.random.uniform', 'np.random.uniform', (['(-1.5)', '(1.5)', 'self.env.action_space.shape[0]'], {}), '(-1.5, 1.5, self.env.action_space.shape[0])\n', (3721, 3764), True, 'import numpy as np\n'), ((3814, 3839), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (3837, 3839), False, 'from mpi4py import MPI\n'), ((4140, 4158), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (4147, 4158), True, 'import numpy as np\n'), ((4236, 4264), 'stable_baselines.common.buffers.ReplayBuffer', 'ReplayBuffer', (['self.demo_size'], {}), '(self.demo_size)\n', (4248, 4264), False, 'from stable_baselines.common.buffers import ReplayBuffer\n'), ((5068, 5088), 'os.remove', 'os.remove', (['data_path'], {}), '(data_path)\n', (5077, 5088), False, 'import os\n'), ((5097, 5121), 'mpi4py.MPI.COMM_WORLD.Barrier', 'MPI.COMM_WORLD.Barrier', ([], {}), '()\n', (5119, 5121), False, 'from mpi4py import MPI\n'), ((4013, 4038), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (4027, 4038), False, 'import os\n'), ((4078, 4118), 'shutil.copy', 'shutil.copy', (['self.expert_data', 'data_path'], {}), '(self.expert_data, data_path)\n', (4089, 4118), False, 'import shutil\n'), ((6175, 6203), 'numpy.append', 'np.append', (['obs', '_obs'], {'axis': '(0)'}), '(obs, _obs, axis=0)\n', (6184, 6203), True, 'import numpy as np\n'), ((6226, 6262), 'numpy.append', 'np.append', (['actions', '_actions'], {'axis': '(0)'}), '(actions, _actions, axis=0)\n', (6235, 6262), True, 'import numpy as np\n'), ((6285, 6321), 'numpy.append', 'np.append', (['rewards', '_rewards'], {'axis': '(0)'}), '(rewards, _rewards, axis=0)\n', (6294, 6321), True, 'import numpy as np\n'), ((6345, 6383), 'numpy.append', 'np.append', (['next_obs', '_next_obs'], {'axis': '(0)'}), '(next_obs, _next_obs, axis=0)\n', (6354, 6383), True, 'import numpy as np\n'), ((6408, 6448), 'numpy.append', 'np.append', (['terminals', '_terminals'], {'axis': '(0)'}), '(terminals, _terminals, axis=0)\n', (6417, 6448), True, 'import numpy as np\n'), ((10055, 10081), 'stable_baselines.common.SetVerbosity', 'SetVerbosity', (['self.verbose'], {}), '(self.verbose)\n', (10067, 10081), False, 'from stable_baselines.common import tf_util, SetVerbosity, TensorboardWriter\n'), ((10083, 10159), 'stable_baselines.common.TensorboardWriter', 'TensorboardWriter', (['self.graph', 'self.tensorboard_log', 'tb_log_name', 'new_tb_log'], {}), '(self.graph, self.tensorboard_log, tb_log_name, new_tb_log)\n', (10100, 10159), False, 'from stable_baselines.common import tf_util, SetVerbosity, TensorboardWriter\n'), ((10392, 10417), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (10415, 10417), False, 'from mpi4py import MPI\n'), ((10628, 10645), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (10633, 10645), False, 'from collections import deque\n'), ((10684, 10701), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (10689, 10701), False, 'from collections import deque\n'), ((8667, 8718), 'tensorflow.RunOptions', 'tf.RunOptions', ([], {'trace_level': 'tf.RunOptions.FULL_TRACE'}), '(trace_level=tf.RunOptions.FULL_TRACE)\n', (8680, 8718), True, 'import tensorflow as tf\n'), ((8750, 8766), 'tensorflow.RunMetadata', 'tf.RunMetadata', ([], {}), '()\n', (8764, 8766), True, 'import tensorflow as tf\n'), ((10469, 10528), 'stable_baselines.logger.log', 'logger.log', (['"""Using agent with the following configuration:"""'], {}), "('Using agent with the following configuration:')\n", (10479, 10528), False, 'from stable_baselines import logger\n'), ((11442, 11453), 'time.time', 'time.time', ([], {}), '()\n', (11451, 11453), False, 'import time\n'), ((5019, 5034), 'numpy.array', 'np.array', (['[obs]'], {}), '([obs])\n', (5027, 5034), True, 'import numpy as np\n'), ((19873, 19898), 'mpi4py.MPI.COMM_WORLD.Get_size', 'MPI.COMM_WORLD.Get_size', ([], {}), '()\n', (19896, 19898), False, 'from mpi4py import MPI\n'), ((20213, 20243), 'numpy.mean', 'np.mean', (['epoch_episode_rewards'], {}), '(epoch_episode_rewards)\n', (20220, 20243), True, 'import numpy as np\n'), ((20307, 20339), 'numpy.mean', 'np.mean', (['episode_rewards_history'], {}), '(episode_rewards_history)\n', (20314, 20339), True, 'import numpy as np\n'), ((20402, 20430), 'numpy.mean', 'np.mean', (['epoch_episode_steps'], {}), '(epoch_episode_steps)\n', (20409, 20430), True, 'import numpy as np\n'), ((20492, 20514), 'numpy.mean', 'np.mean', (['epoch_actions'], {}), '(epoch_actions)\n', (20499, 20514), True, 'import numpy as np\n'), ((20570, 20587), 'numpy.mean', 'np.mean', (['epoch_qs'], {}), '(epoch_qs)\n', (20577, 20587), True, 'import numpy as np\n'), ((20645, 20672), 'numpy.mean', 'np.mean', (['epoch_actor_losses'], {}), '(epoch_actor_losses)\n', (20652, 20672), True, 'import numpy as np\n'), ((20731, 20759), 'numpy.mean', 'np.mean', (['epoch_critic_losses'], {}), '(epoch_critic_losses)\n', (20738, 20759), True, 'import numpy as np\n'), ((21277, 21298), 'numpy.std', 'np.std', (['epoch_actions'], {}), '(epoch_actions)\n', (21283, 21298), True, 'import numpy as np\n'), ((23150, 23171), 'stable_baselines.logger.dump_tabular', 'logger.dump_tabular', ([], {}), '()\n', (23169, 23171), False, 'from stable_baselines import logger\n'), ((23192, 23207), 'stable_baselines.logger.info', 'logger.info', (['""""""'], {}), "('')\n", (23203, 23207), False, 'from stable_baselines import logger\n'), ((23237, 23253), 'stable_baselines.logger.get_dir', 'logger.get_dir', ([], {}), '()\n', (23251, 23253), False, 'from stable_baselines import logger\n'), ((7571, 7590), 'numpy.array', 'np.array', (['[old_std]'], {}), '([old_std])\n', (7579, 7590), True, 'import numpy as np\n'), ((7623, 7643), 'numpy.array', 'np.array', (['[old_mean]'], {}), '([old_mean])\n', (7631, 7643), True, 'import numpy as np\n'), ((18558, 18582), 'mpi4py.MPI.COMM_WORLD.Barrier', 'MPI.COMM_WORLD.Barrier', ([], {}), '()\n', (18580, 18582), False, 'from mpi4py import MPI\n'), ((20037, 20048), 'time.time', 'time.time', ([], {}), '()\n', (20046, 20048), False, 'import time\n'), ((20890, 20923), 'numpy.mean', 'np.mean', (['epoch_adaptive_distances'], {}), '(epoch_adaptive_distances)\n', (20897, 20923), True, 'import numpy as np\n'), ((21450, 21479), 'numpy.mean', 'np.mean', (['eval_episode_rewards'], {}), '(eval_episode_rewards)\n', (21457, 21479), True, 'import numpy as np\n'), ((21544, 21581), 'numpy.mean', 'np.mean', (['eval_episode_rewards_history'], {}), '(eval_episode_rewards_history)\n', (21551, 21581), True, 'import numpy as np\n'), ((21633, 21649), 'numpy.mean', 'np.mean', (['eval_qs'], {}), '(eval_qs)\n', (21640, 21649), True, 'import numpy as np\n'), ((22943, 22990), 'stable_baselines.logger.record_tabular', 'logger.record_tabular', (['key', 'combined_stats[key]'], {}), '(key, combined_stats[key])\n', (22964, 22990), False, 'from stable_baselines import logger\n'), ((17385, 17409), 'mpi4py.MPI.COMM_WORLD.Barrier', 'MPI.COMM_WORLD.Barrier', ([], {}), '()\n', (17407, 17409), False, 'from mpi4py import MPI\n'), ((22252, 22271), 'numpy.isscalar', 'np.isscalar', (['scalar'], {}), '(scalar)\n', (22263, 22271), True, 'import numpy as np\n'), ((23095, 23128), 'numpy.mean', 'np.mean', (['episode_successes[-100:]'], {}), '(episode_successes[-100:])\n', (23102, 23128), True, 'import numpy as np\n'), ((12930, 12946), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12944, 12946), True, 'import numpy as np\n'), ((13318, 13366), 'stable_baselines.common.math_util.scale_action', 'scale_action', (['self.action_space', 'unscaled_action'], {}), '(self.action_space, unscaled_action)\n', (13330, 13366), False, 'from stable_baselines.common.math_util import unscale_action, scale_action\n'), ((13569, 13610), 'stable_baselines.common.math_util.unscale_action', 'unscale_action', (['self.action_space', 'action'], {}), '(self.action_space, action)\n', (13583, 13610), False, 'from stable_baselines.common.math_util import unscale_action, scale_action\n'), ((15426, 15531), 'stable_baselines.common.tf_util.total_episode_reward_logger', 'tf_util.total_episode_reward_logger', (['self.episode_reward', 'ep_rew', 'ep_done', 'writer', 'self.num_timesteps'], {}), '(self.episode_reward, ep_rew, ep_done,\n writer, self.num_timesteps)\n', (15461, 15531), False, 'from stable_baselines.common import tf_util, SetVerbosity, TensorboardWriter\n'), ((17294, 17318), 'mpi4py.MPI.COMM_WORLD.Barrier', 'MPI.COMM_WORLD.Barrier', ([], {}), '()\n', (17316, 17318), False, 'from mpi4py import MPI\n'), ((19032, 19078), 'stable_baselines.common.math_util.unscale_action', 'unscale_action', (['self.action_space', 'eval_action'], {}), '(self.action_space, eval_action)\n', (19046, 19078), False, 'from stable_baselines.common.math_util import unscale_action, scale_action\n'), ((23396, 23433), 'os.path.join', 'os.path.join', (['logdir', '"""env_state.pkl"""'], {}), "(logdir, 'env_state.pkl')\n", (23408, 23433), False, 'import os\n'), ((23658, 23700), 'os.path.join', 'os.path.join', (['logdir', '"""eval_env_state.pkl"""'], {}), "(logdir, 'eval_env_state.pkl')\n", (23670, 23700), False, 'import os\n'), ((15281, 15300), 'numpy.array', 'np.array', (['[reward_]'], {}), '([reward_])\n', (15289, 15300), True, 'import numpy as np\n'), ((15360, 15376), 'numpy.array', 'np.array', (['[done]'], {}), '([done])\n', (15368, 15376), True, 'import numpy as np\n')] |
import random
import pandas as pd
import numpy as np
def intermediate_model():
# Reading entire grid cells.
df = pd.read_csv('..\\cells_ny.csv')
# storing individual columns
cell_ids = df['cell_id']
cell_names = df['cell_names']
max_row = 0
max_col = 0
# getting max row and column
for i in range(len(cell_names)):
val = cell_names[i].split(":")
row = int(val[0].replace("C", ""))
col = int(val[1])
if (max_row < row):
max_row = row
if (max_col < col):
max_col = col
# reshaping 1D list into 2D array.
# This helps in using it inside for loop
arr = np.array(cell_ids).reshape(max_row + 1, max_col + 1)
# Reading original walks.
with open("..\\walks_ten.txt") as file:
walks = file.readlines()
# Creating walks for Intermediate Model which keeps
# first node same as real model and rest of the nodes
# are picked uniformly randomly from the current node.
s_walk = open("..\\walks_inter_10.txt", "w")
store_dict = {}
# storing cell_id and cell_names as key/value pairs.
for i in range(len(cell_ids)):
store_dict.update({str(cell_ids[i]): cell_names[i]})
walk = []
# stores each walk created
shuffle_all = []
# Traversing through all the real walks.
for i in range(len(walks)):
walk = list(
walks[i].replace("[", "").replace("]", "").replace(" ", "").replace("\n", "").split(","))
# Getting first element of the array.
first = walk.pop(0)
shuffle_all.append(str(first).replace("'", ""))
row = 0
col = 0
# finding first element and getting its location in the grid.
if (str(first) in store_dict):
val = str(store_dict[str(first)]).split(":")
row = int(val[0].replace("C", ""))
col = int(val[1])
# range = avg - average of real walks
for i in range(15):
row_pos = row
col_pos = col
# getting indexes to move from the current position.
left = max(0, col_pos - 1)
right = min(max_col, col_pos + 1)
top = max(0, row_pos - 1)
bottom = min(max_row, row_pos + 1)
# stores cell_ids from the current location
current = []
# checking if there is a left column from current position
if (col_pos != left):
current.append(arr[row_pos][left])
# checking if there is a right column from current position
if (col_pos != right):
current.append(arr[row_pos][right])
# checking if there is a top column from the current position
if (row_pos != top):
current.append(arr[top][col_pos])
# checking if there is a bottom column from the current position
if (row_pos != bottom):
current.append(arr[bottom][col_pos])
# Getting one cell id uniformly randomly.
current_ele = random.choice(current)
shuffle_all.append(str(current_ele))
if (str(current_ele) in store_dict):
val = str(store_dict[str(current_ele)]).split(":")
row = int(val[0].replace("C", ""))
col = int(val[1])
s_walk.write(str(shuffle_all) + "\n")
# creating k perturbations for every shuffled walk
for k in range(9):
random.shuffle(shuffle_all)
s_walk.write(str(shuffle_all) + "\n")
shuffle_all.clear()
s_walk.close()
# This function returns average of real walks.
def average_of_cell_walks():
with open("..\\walks_ten.txt") as file:
walks = file.readlines()
sum_of_walks_size = 0
for i in range(len(walks)):
walk = list(
walks[i].replace("[", "").replace("]", "").replace(" ", "").replace("\n", "").replace("'", "").split(","))
sum_of_walks_size = sum_of_walks_size + int((len(walk)))
avg = sum_of_walks_size / int(len(walks))
return avg
# We take real walks and add (k=9) perturbations for each walk.
def k_walk_perturbations():
with open("..\\walks_ten.txt") as file:
walks = file.readlines()
s_walk = open("..\\walks_ten_10.txt", "w")
walk = []
for i in range(len(walks)):
walk = list(walks[i].replace("[", "").replace("]", "").replace(" ", "").replace("\n", "").split(","))
s_walk.write(str(walk) + "\n")
for k in range(9):
random.shuffle(walk)
s_walk.write(str(walk) + "\n")
s_walk.close()
# k_walk_perturbations()
intermediate_model()
# print(average_of_cell_walks())
| [
"numpy.array",
"random.choice",
"random.shuffle",
"pandas.read_csv"
] | [((123, 154), 'pandas.read_csv', 'pd.read_csv', (['"""..\\\\cells_ny.csv"""'], {}), "('..\\\\cells_ny.csv')\n", (134, 154), True, 'import pandas as pd\n'), ((670, 688), 'numpy.array', 'np.array', (['cell_ids'], {}), '(cell_ids)\n', (678, 688), True, 'import numpy as np\n'), ((3080, 3102), 'random.choice', 'random.choice', (['current'], {}), '(current)\n', (3093, 3102), False, 'import random\n'), ((3514, 3541), 'random.shuffle', 'random.shuffle', (['shuffle_all'], {}), '(shuffle_all)\n', (3528, 3541), False, 'import random\n'), ((4586, 4606), 'random.shuffle', 'random.shuffle', (['walk'], {}), '(walk)\n', (4600, 4606), False, 'import random\n')] |
#!/usr/bin/python2
import sys
import os
import nibabel as nib
import numpy as np
from nilearn.input_data import NiftiMasker
from scipy.stats import ttest_rel
from fg_constants import *
def load_cv_map(regressors, subj, masker):
img = os.path.join(MAPS_DIR, regressors, subj, 'corr_cv.nii.gz')
return masker.transform(img)
def join_all_subjects(regressor, subjects, masker):
maps = []
for s in subjects:
subj = 'sub%03d' % s
maps.append(load_cv_map(regressor, subj, masker))
return np.vstack(maps)
def make_ttest(reg1, reg2):
masker = NiftiMasker(nib.load(MASK_FILE), standardize=False)
masker.fit()
subjects = [1, 2, 3, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
a = np.arctanh(join_all_subjects(reg1, subjects, masker))
b = np.arctanh(join_all_subjects(reg2, subjects, masker))
t, prob = ttest_rel(a, b)
tt = masker.inverse_transform(t)
pp = masker.inverse_transform(prob)
return tt, pp
if __name__ == '__main__':
if len(sys.argv) >= 3:
reg1 = sys.argv[1]
reg2 = sys.argv[2]
tt, pp = make_ttest(reg1, reg2)
nib.save(tt, 'ttest/ttest_t_%s_vs_%s.nii.gz' % (A, B))
nib.save(pp, 'ttest/ttest_prob_%s_vs_%s.nii.gz' % (A, B))
| [
"nibabel.save",
"nibabel.load",
"os.path.join",
"scipy.stats.ttest_rel",
"numpy.vstack"
] | [((241, 299), 'os.path.join', 'os.path.join', (['MAPS_DIR', 'regressors', 'subj', '"""corr_cv.nii.gz"""'], {}), "(MAPS_DIR, regressors, subj, 'corr_cv.nii.gz')\n", (253, 299), False, 'import os\n'), ((522, 537), 'numpy.vstack', 'np.vstack', (['maps'], {}), '(maps)\n', (531, 537), True, 'import numpy as np\n'), ((870, 885), 'scipy.stats.ttest_rel', 'ttest_rel', (['a', 'b'], {}), '(a, b)\n', (879, 885), False, 'from scipy.stats import ttest_rel\n'), ((1133, 1187), 'nibabel.save', 'nib.save', (['tt', "('ttest/ttest_t_%s_vs_%s.nii.gz' % (A, B))"], {}), "(tt, 'ttest/ttest_t_%s_vs_%s.nii.gz' % (A, B))\n", (1141, 1187), True, 'import nibabel as nib\n'), ((1192, 1249), 'nibabel.save', 'nib.save', (['pp', "('ttest/ttest_prob_%s_vs_%s.nii.gz' % (A, B))"], {}), "(pp, 'ttest/ttest_prob_%s_vs_%s.nii.gz' % (A, B))\n", (1200, 1249), True, 'import nibabel as nib\n'), ((593, 612), 'nibabel.load', 'nib.load', (['MASK_FILE'], {}), '(MASK_FILE)\n', (601, 612), True, 'import nibabel as nib\n')] |
import glob
import numpy as np
import pandas as pd
from collections import OrderedDict
#from . import metrics
import metrics
from .csv_reader import csv_node
__all__ = ['tune_threshold',
'assemble_node',
'assemble_dev_threshold',
'metric_reading',
'Ensemble']
def tune_threshold(y_true, y_prob, metric="f1_score"):
if isinstance(metric, str):
metric = getattr(metrics, metric)
thresholds = np.arange(0.01, 1, 0.01)
best_score = 0.0
best_threshold = 0.5
for threshold in thresholds:
y_pred = np.array([1 if p > threshold else 0 for p in y_prob])
cur_score = metric(y_true, y_pred)
if cur_score > best_score:
best_score = cur_score
best_threshold = threshold
print("Tuned threshold: {:.4f}".format(best_threshold))
return best_threshold
def assemble_node(nodes, key="Y_PROBA", method="median", PIDs=None):
if isinstance(method, str):
method = getattr(np, method)
if PIDs is None:
PIDs = nodes[0].PID
probas = []
for pid in PIDs:
proba = method([x.data[pid][key] for x in nodes])
probas.append(proba)
return np.array(probas)
def assemble_dev_threshold(nodes, method="median", metric="f1_score", PIDs=None):
y_prob = assemble_node(nodes, key="Y_PROBA", method=method, PIDs=PIDs)
y_true = nodes[0].extract("Y_TRUE", PIDs)
threshold = tune_threshold(y_true, y_prob, metric)
return threshold
def metric_reading(y_true, y_pred, y_proba):
if isinstance(y_true, list):
readings = [metric_reading(y_true_, y_pred_, y_proba_)
for y_true_,y_pred_,y_proba_ in zip(y_true, y_pred, y_proba)]
return readings
else:
scores = metrics.classification_summary(y_true, y_pred, [0,1], y_proba, verbose=False)
reading = OrderedDict([('Pos.Acc',scores['pos_acc']*100.0),
('Neg.Acc',scores['neg_acc']*100.0),
('Precision',scores['precision']*100.0),
('Recall',scores['recall']*100.0),
('F1',scores['f1']*100.0),
('ROC',scores['roc']*100.0),
('PRC',scores['prc']*100.0),
('NDCG',scores['ndcg']*100.0),
('TP',scores['tp']),
('FP',scores['fp']),
('TN',scores['tn']),
('FN',scores['fn'])])
return reading
class Ensemble(object):
def __init__(self, results_csvs, dev_csvs, pids=None):
self.results_csvs = results_csvs
self.dev_csvs = dev_csvs
self.build(pids)
@classmethod
def from_keyword(klass, test_keyword, dev_keyword, pids=None):
test_csvs = glob.glob(test_keyword, recursive=True)
dev_csvs = glob.glob(dev_keyword, recursive=True)
return klass(test_csvs, dev_csvs, pids)
@classmethod
def from_folder(klass, results_folder, dev_folder, pids=None):
results_csvs = glob.glob("{}/**/predictions*.csv".format(results_folder), recursive=True)
dev_csvs = glob.glob("{}/**/predictions*.csv".format(dev_folder), recursive=True)
return klass(results_csvs, dev_csvs, pids)
def build(self, pids=None):
self.results = [csv_node.from_csv(x) for x in self.results_csvs]
self.devs = [csv_node.from_csv(x) for x in self.dev_csvs]
self.results = sorted(self.results, key=lambda x: x.seed)
self.devs = sorted(self.devs, key=lambda x: x.seed)
if pids is None:
self.pids = list(self.results[0].PID)
else:
self.pids = pids
try:
self.score_list = self.get_seeds_score_list()
self.score = True
except:
self.score = False
self.proba_list = self.get_seeds_proba_list()
self.pred_list = self.get_seeds_pred_list()
@property
def score_dataframe(self):
return pd.DataFrame(OrderedDict(self.score_list_head+self.score_list))
@property
def proba_dataframe(self):
return pd.DataFrame(OrderedDict(self.proba_list_head+self.proba_list))
@property
def pred_dataframe(self):
return pd.DataFrame(OrderedDict(self.pred_list_head+self.pred_list))
def get_df_by_seed(self, key="Y_PROBA"):
seeds = [x.seed for x in self.results]
probas = [x.extract(key, self.pids) for x in self.results]
df_dict = OrderedDict([("PID", self.pids)] + \
[("SEED_{}".format(seed), proba) for seed, proba in zip(seeds, probas)])
df = pd.DataFrame(df_dict)
return df
def get_score_by_seed(self, seed=0):
idx = [x.seed for x in self.results].index(seed)
node = self.results[idx]
y_true = node.extract("Y_TRUE")
y_pred = node.extract("Y_PRED")
y_proba = node.extract("Y_PROBA")
score = metric_reading(y_true, y_pred, y_proba)
return score
def score2pair(self, key, score):
val = ["{:.2f}".format(score[key]) for key in self.score_keys]
return (key, val)
def get_seeds_score_list(self):
seeds = [x.seed for x in self.results]
scores = [self.get_score_by_seed(x) for x in seeds]
self.score_keys = list(scores[0].keys())
self.score_list_head = [("Experiment", self.score_keys)]
df_list = []
for seed, score in zip(seeds, scores):
pair = self.score2pair("SEED_{}".format(seed), score)
df_list.append(pair)
mean_score = OrderedDict([(key, np.mean([score[key] for score in scores])) for key in self.score_keys])
std_score = OrderedDict([(key, np.std([score[key] for score in scores])) for key in self.score_keys])
df_list.append(self.score2pair("AVERAGE", mean_score))
df_list.append(self.score2pair("STD", std_score))
return df_list
def get_seeds_proba_list(self):
seeds = [x.seed for x in self.results]
probas = [x.extract("Y_PROBA", self.pids) for x in self.results]
self.proba_list_head = [("PID", self.pids)]
proba_list = [("SEED_{}".format(seed), proba) for seed, proba in zip(seeds, probas)]
return proba_list
def get_seeds_pred_list(self):
seeds = [x.seed for x in self.results]
preds = [x.extract("Y_PRED", self.pids) for x in self.results]
self.pred_list_head = [("PID", self.pids)]
pred_list = [("SEED_{}".format(seed), pred) for seed, pred in zip(seeds, preds)]
return pred_list
def median_vote(self, metric="f1_score"):
dev_threshold = assemble_dev_threshold(self.devs, method="median",
metric=metric, PIDs=self.devs[0].PID)
voted_y_proba = assemble_node(self.results, key="Y_PROBA",
method="median", PIDs=self.pids)
voted_y_pred = np.array([1 if p > dev_threshold else 0 for p in voted_y_proba])
y_true = self.results[0].extract("Y_TRUE", self.pids)
#df_dict = OrderedDict([("PID", self.pids),
# ("Y_PROBA", voted_y_proba),
# ("Y_PRED", voted_y_pred)])
#df = pd.DataFrame(df_dict)
proba_pair = ("MEDIAN", voted_y_proba)
self.proba_list.append(proba_pair)
proba_df = pd.DataFrame(OrderedDict(self.proba_list_head+[proba_pair]))
pred_pair = ("MEDIAN", voted_y_pred)
self.pred_list.append(pred_pair)
pred_df = pd.DataFrame(OrderedDict(self.pred_list_head+[pred_pair]))
if self.score:
score = metric_reading(y_true, voted_y_pred, voted_y_proba)
score_pair = self.score2pair("MEDIAN", score)
self.score_list.append(score_pair)
score_df = pd.DataFrame(OrderedDict(self.score_list_head+[score_pair]))
else:
score_df = None
return proba_df, pred_df, score_df
def mv_vote(self):
voted_y_proba = assemble_node(self.results, key="Y_PRED",
method="mean", PIDs=self.pids)
voted_y_pred = np.round(voted_y_proba)
y_true = self.results[0].extract("Y_TRUE", self.pids)
proba_pair = ("MV", voted_y_proba)
self.proba_list.append(proba_pair)
proba_df = pd.DataFrame(OrderedDict(self.proba_list_head+[proba_pair]))
pred_pair = ("MV", voted_y_pred)
self.pred_list.append(pred_pair)
pred_df = pd.DataFrame(OrderedDict(self.pred_list_head+[pred_pair]))
if self.score:
score = metric_reading(y_true, voted_y_pred, voted_y_proba)
score_pair = self.score2pair("MV", score)
self.score_list.append(score_pair)
score_df = pd.DataFrame(OrderedDict(self.score_list_head+[score_pair]))
else:
score_df = None
return proba_df, pred_df, score_df
| [
"numpy.mean",
"collections.OrderedDict",
"numpy.arange",
"numpy.round",
"numpy.array",
"metrics.classification_summary",
"numpy.std",
"pandas.DataFrame",
"glob.glob"
] | [((453, 477), 'numpy.arange', 'np.arange', (['(0.01)', '(1)', '(0.01)'], {}), '(0.01, 1, 0.01)\n', (462, 477), True, 'import numpy as np\n'), ((1194, 1210), 'numpy.array', 'np.array', (['probas'], {}), '(probas)\n', (1202, 1210), True, 'import numpy as np\n'), ((574, 629), 'numpy.array', 'np.array', (['[(1 if p > threshold else 0) for p in y_prob]'], {}), '([(1 if p > threshold else 0) for p in y_prob])\n', (582, 629), True, 'import numpy as np\n'), ((1769, 1847), 'metrics.classification_summary', 'metrics.classification_summary', (['y_true', 'y_pred', '[0, 1]', 'y_proba'], {'verbose': '(False)'}), '(y_true, y_pred, [0, 1], y_proba, verbose=False)\n', (1799, 1847), False, 'import metrics\n'), ((1865, 2280), 'collections.OrderedDict', 'OrderedDict', (["[('Pos.Acc', scores['pos_acc'] * 100.0), ('Neg.Acc', scores['neg_acc'] * \n 100.0), ('Precision', scores['precision'] * 100.0), ('Recall', scores[\n 'recall'] * 100.0), ('F1', scores['f1'] * 100.0), ('ROC', scores['roc'] *\n 100.0), ('PRC', scores['prc'] * 100.0), ('NDCG', scores['ndcg'] * 100.0\n ), ('TP', scores['tp']), ('FP', scores['fp']), ('TN', scores['tn']), (\n 'FN', scores['fn'])]"], {}), "([('Pos.Acc', scores['pos_acc'] * 100.0), ('Neg.Acc', scores[\n 'neg_acc'] * 100.0), ('Precision', scores['precision'] * 100.0), (\n 'Recall', scores['recall'] * 100.0), ('F1', scores['f1'] * 100.0), (\n 'ROC', scores['roc'] * 100.0), ('PRC', scores['prc'] * 100.0), ('NDCG',\n scores['ndcg'] * 100.0), ('TP', scores['tp']), ('FP', scores['fp']), (\n 'TN', scores['tn']), ('FN', scores['fn'])])\n", (1876, 2280), False, 'from collections import OrderedDict\n'), ((2887, 2926), 'glob.glob', 'glob.glob', (['test_keyword'], {'recursive': '(True)'}), '(test_keyword, recursive=True)\n', (2896, 2926), False, 'import glob\n'), ((2946, 2984), 'glob.glob', 'glob.glob', (['dev_keyword'], {'recursive': '(True)'}), '(dev_keyword, recursive=True)\n', (2955, 2984), False, 'import glob\n'), ((4730, 4751), 'pandas.DataFrame', 'pd.DataFrame', (['df_dict'], {}), '(df_dict)\n', (4742, 4751), True, 'import pandas as pd\n'), ((7099, 7165), 'numpy.array', 'np.array', (['[(1 if p > dev_threshold else 0) for p in voted_y_proba]'], {}), '([(1 if p > dev_threshold else 0) for p in voted_y_proba])\n', (7107, 7165), True, 'import numpy as np\n'), ((8324, 8347), 'numpy.round', 'np.round', (['voted_y_proba'], {}), '(voted_y_proba)\n', (8332, 8347), True, 'import numpy as np\n'), ((4101, 4152), 'collections.OrderedDict', 'OrderedDict', (['(self.score_list_head + self.score_list)'], {}), '(self.score_list_head + self.score_list)\n', (4112, 4152), False, 'from collections import OrderedDict\n'), ((4226, 4277), 'collections.OrderedDict', 'OrderedDict', (['(self.proba_list_head + self.proba_list)'], {}), '(self.proba_list_head + self.proba_list)\n', (4237, 4277), False, 'from collections import OrderedDict\n'), ((4350, 4399), 'collections.OrderedDict', 'OrderedDict', (['(self.pred_list_head + self.pred_list)'], {}), '(self.pred_list_head + self.pred_list)\n', (4361, 4399), False, 'from collections import OrderedDict\n'), ((7555, 7603), 'collections.OrderedDict', 'OrderedDict', (['(self.proba_list_head + [proba_pair])'], {}), '(self.proba_list_head + [proba_pair])\n', (7566, 7603), False, 'from collections import OrderedDict\n'), ((7721, 7767), 'collections.OrderedDict', 'OrderedDict', (['(self.pred_list_head + [pred_pair])'], {}), '(self.pred_list_head + [pred_pair])\n', (7732, 7767), False, 'from collections import OrderedDict\n'), ((8529, 8577), 'collections.OrderedDict', 'OrderedDict', (['(self.proba_list_head + [proba_pair])'], {}), '(self.proba_list_head + [proba_pair])\n', (8540, 8577), False, 'from collections import OrderedDict\n'), ((8691, 8737), 'collections.OrderedDict', 'OrderedDict', (['(self.pred_list_head + [pred_pair])'], {}), '(self.pred_list_head + [pred_pair])\n', (8702, 8737), False, 'from collections import OrderedDict\n'), ((8004, 8052), 'collections.OrderedDict', 'OrderedDict', (['(self.score_list_head + [score_pair])'], {}), '(self.score_list_head + [score_pair])\n', (8015, 8052), False, 'from collections import OrderedDict\n'), ((8970, 9018), 'collections.OrderedDict', 'OrderedDict', (['(self.score_list_head + [score_pair])'], {}), '(self.score_list_head + [score_pair])\n', (8981, 9018), False, 'from collections import OrderedDict\n'), ((5739, 5780), 'numpy.mean', 'np.mean', (['[score[key] for score in scores]'], {}), '([score[key] for score in scores])\n', (5746, 5780), True, 'import numpy as np\n'), ((5850, 5890), 'numpy.std', 'np.std', (['[score[key] for score in scores]'], {}), '([score[key] for score in scores])\n', (5856, 5890), True, 'import numpy as np\n')] |
# Training a Dueling Double DQN agent to play break-out
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
import gym
import numpy as np
from gym.core import ObservationWrapper
from gym.spaces import Box
import cv2
import os
import atari_wrappers # adjust env
from framebuffer import FrameBuffer # stack 4 consec images
from replay_buffer import ReplayBuffer
ENV_NAME = "BreakoutNoFrameskip-v4"
# create break-out env
env = gym.make(ENV_NAME)
env.reset()
print("Breakout environment created!")
############# preprocess images #############
# crop the image to include only useful information
# then resize the image to 64 x 64
class PreprocessAtariObs(ObservationWrapper):
def __init__(self, env):
"""A gym wrapper that crops, scales image into the desired shapes and grayscales it."""
ObservationWrapper.__init__(self, env)
self.image_size = (1, 64, 64)
self.observation_space = Box(0.0, 1.0, self.image_size)
def observation(self, img):
"""what happens to each observation"""
# Here's what you need to do:
# * crop image, remove irrelevant parts
# * resize image to self.img_size
# (use imresize from any library you want,
# e.g. opencv, skimage, PIL, keras)
# * cast image to grayscale
# * convert image pixels to (0,1) range, float32 type
# crop the image
# remove the top part
img = img[50:]
# resize the image
img = cv2.resize(img, dsize=(self.image_size[1], self.image_size[2]))
# gray scale
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# normalize to (0, 1)
img = img.astype(np.float32) / 255.0
# add channel dimension
return img[None]
# adjust the env by some wrappers
def PrimaryAtariWrap(env, clip_rewards=True):
assert 'NoFrameskip' in env.spec.id
# This wrapper holds the same action for <skip> frames and outputs
# the maximal pixel value of 2 last frames (to handle blinking
# in some envs)
env = atari_wrappers.MaxAndSkipEnv(env, skip=4)
# This wrapper sends done=True when each life is lost
# (not all the 5 lives that are givern by the game rules).
# It should make easier for the agent to understand that losing is bad.
env = atari_wrappers.EpisodicLifeEnv(env)
# This wrapper laucnhes the ball when an episode starts.
# Without it the agent has to learn this action, too.
# Actually it can but learning would take longer.
env = atari_wrappers.FireResetEnv(env)
# This wrapper transforms rewards to {-1, 0, 1} according to their sign
if clip_rewards:
env = atari_wrappers.ClipRewardEnv(env)
# This wrapper is yours :)
env = PreprocessAtariObs(env)
return env
def make_env(clip_rewards=True, seed=None):
env = gym.make(ENV_NAME) # create raw env
if seed is not None:
env.seed(seed)
env = PrimaryAtariWrap(env, clip_rewards)
env = FrameBuffer(env, n_frames=4, dim_order='pytorch')
return env
env = make_env()
env.reset()
n_actions = env.action_space.n
state_shape = env.observation_space.shape
print("adjust env with 4 consec images stacked can be created")
############# Model #############
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def conv2d_size_out(size, kernel_size, stride):
"""
common use case:
cur_layer_img_w = conv2d_size_out(cur_layer_img_w, kernel_size, stride)
cur_layer_img_h = conv2d_size_out(cur_layer_img_h, kernel_size, stride)
to understand the shape for dense layer's input
"""
return (size - (kernel_size - 1) - 1) // stride + 1
class DuelingDQNAgent(nn.Module):
def __init__(self, state_shape, n_actions, epsilon=0):
super().__init__()
self.epsilon = epsilon
self.n_actions = n_actions
self.state_shape = state_shape
# Define your network body here. Please make sure agent is fully contained here
# nn.Flatten() can be useful
# <YOUR CODE>d
kernel_size = 3
stride = 2
self.conv1 = nn.Conv2d(4, 16, kernel_size, stride)
out_size = conv2d_size_out(state_shape[1], kernel_size, stride)
self.conv2 = nn.Conv2d(16, 32, kernel_size, stride)
out_size = conv2d_size_out(out_size, kernel_size, stride)
self.conv3 = nn.Conv2d(32, 64, kernel_size, stride)
out_size = conv2d_size_out(out_size, kernel_size, stride)
# size of the output tensor after convolution batch_size x 64 x out_size x out_size
self.linear = nn.Linear(64*out_size*out_size, 256)
# advantage
self.advantage = nn.Sequential(
nn.Linear(256, 512),
nn.ReLU(),
nn.Linear(512, self.n_actions)
)
# state value
self.value = nn.Sequential(
nn.Linear(256, 512),
nn.ReLU(),
nn.Linear(512, 1)
)
def forward(self, state_t):
"""
takes agent's observation (tensor), returns qvalues (tensor)
:param state_t: a batch of 4-frame buffers, shape = [batch_size, 4, h, w]
"""
# Use your network to compute qvalues for given state
# qvalues = <YOUR CODE>
t = self.conv1(state_t)
t = F.relu(t)
t = self.conv2(t)
t = F.relu(t)
t = self.conv3(t)
t = F.relu(t)
t = t.view(state_t.shape[0], -1)
t = self.linear(t)
t = F.relu(t)
# compute advantage and state value as different heads
advantage = self.advantage(t)
value = self.value(t)
qvalues = value + advantage - advantage.mean(dim=1, keepdim=True)
assert qvalues.requires_grad, "qvalues must be a torch tensor with grad"
assert len(
qvalues.shape) == 2 and qvalues.shape[0] == state_t.shape[0] and qvalues.shape[1] == n_actions
return qvalues
def get_qvalues(self, states):
"""
like forward, but works on numpy arrays, not tensors
"""
model_device = next(self.parameters()).device
states = torch.tensor(states, device=model_device, dtype=torch.float)
qvalues = self.forward(states)
return qvalues.data.cpu().numpy()
def sample_actions(self, qvalues):
"""pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice(
[0, 1], batch_size, p=[1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
# Evaluate the agent
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
rewards = []
for _ in range(n_games):
reward = 0.0
s = env.reset()
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(
qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done:
break
rewards.append(reward)
return np.mean(rewards)
def play_and_record(initial_state, agent, env, exp_replay, n_steps=1):
"""
Play the game for exactly n steps, record every (s,a,r,s', done) to replay buffer.
Whenever game ends, add record with done=True and reset the game.
It is guaranteed that env has done=False when passed to this function.
PLEASE DO NOT RESET ENV UNLESS IT IS "DONE"
:returns: return sum of rewards over time and the state in which the env stays
"""
s = initial_state
sum_rewards = 0
# Play the game for n_steps as per instructions above
# <YOUR CODE>
sum_rewards = 0.0
for _ in range(n_steps):
qvalues = agent.get_qvalues([s])
action = agent.sample_actions(qvalues)[0]
next_s, r, done, _ = env.step(action)
exp_replay.add(s, action, r, next_s, done)
sum_rewards += r
if done:
s = env.reset()
else:
s = next_s
return sum_rewards, s
def compute_td_loss(states, actions, rewards, next_states, is_done,
agent, target_network,
gamma=0.99,
check_shapes=False,
device=device):
""" Compute td loss using torch operations only. Use the formulae above. '''
objective of agent is
\hat Q(s_t, a_t) = r_t + \gamma Target(s_{t+1}, argmax_{a} Q(s_{t+1}, a))
"""
states = torch.tensor(states, device=device, dtype=torch.float) # shape: [batch_size, *state_shape]
# for some torch reason should not make actions a tensor
actions = torch.tensor(actions, device=device, dtype=torch.long) # shape: [batch_size]
rewards = torch.tensor(rewards, device=device, dtype=torch.float) # shape: [batch_size]
# shape: [batch_size, *state_shape]
next_states = torch.tensor(next_states, device=device, dtype=torch.float)
is_done = torch.tensor(
is_done.astype('float32'),
device=device,
dtype=torch.float
) # shape: [batch_size]
is_not_done = 1 - is_done
# get q-values for all actions in current states
predicted_qvalues = agent(states)
# compute q-values for all actions in next states
predicted_next_qvalues = target_network(next_states)
# best action in next state
next_best_actions = torch.argmax(agent(states), dim=1)
# select q-values for chosen actions
predicted_qvalues_for_actions = predicted_qvalues[range(
len(actions)), actions]
# compute the objective of the agent
next_state_values = predicted_next_qvalues[range(
len(actions)), next_best_actions]
#assert next_state_values.dim(
#) == 1 and next_state_values.shape[0] == states.shape[0], "must predict one value per state"
# compute "target q-values" for loss - it's what's inside square parentheses in the above formula.
# at the last state use the simplified formula: Q(s,a) = r(s,a) since s' doesn't exist
# you can multiply next state values by is_not_done to achieve this.
# target_qvalues_for_actions = <YOUR CODE>
target_qvalues_for_actions = rewards + next_state_values * is_not_done
# mean squared error loss to minimize
loss = torch.mean((predicted_qvalues_for_actions -
target_qvalues_for_actions.detach()) ** 2)
if check_shapes:
assert predicted_next_qvalues.data.dim(
) == 2, "make sure you predicted q-values for all actions in next state"
assert next_state_values.data.dim(
) == 1, "make sure you computed V(s') as maximum over just the actions axis and not all axes"
assert target_qvalues_for_actions.data.dim(
) == 1, "there's something wrong with target q-values, they must be a vector"
return loss
############# Main Loop #############
seed = 42
env = make_env(seed)
state_shape = env.observation_space.shape
n_actions = env.action_space.n
state = env.reset()
agent = DuelingDQNAgent(state_shape, n_actions, epsilon=1).to(device)
target_network = DuelingDQNAgent(state_shape, n_actions).to(device)
target_network.load_state_dict(agent.state_dict())
exp_replay = ReplayBuffer(10**4)
'''
for i in range(100):
if not utils.is_enough_ram(min_available_gb=0.1):
print("""
Less than 100 Mb RAM available.
Make sure the buffer size in not too huge.
Also check, maybe other processes consume RAM heavily.
"""
)
break
play_and_record(state, agent, env, exp_replay, n_steps=10**2)
if len(exp_replay) == 10**4:
break
print(len(exp_replay))
'''
timesteps_per_epoch = 1
batch_size = 16
total_steps = 3 * 10**6 # Debug param
decay_steps = 10**6 # Debug param
# logs and ckpt
ckpt_dir = 'logs'
ckpt_file = 'dueling_ckpt.pth'
metrics_file = 'dueling_metrics.pth'
ckpt_freq = 10*5000 # Debug param
opt = torch.optim.Adam(agent.parameters(), lr=1e-4)
init_epsilon = 1
final_epsilon = 0.1
loss_freq = 50
refresh_target_network_freq = 5000
eval_freq = 5000
max_grad_norm = 50
n_lives = 5
mean_rw_history = []
td_loss_history = []
grad_norm_history = []
initial_state_v_history = []
step = 0
print("Starts training on {}".format(next(agent.parameters()).device))
for step in range(step, total_steps + 1):
'''
if not utils.is_enough_ram():
print('less that 100 Mb RAM available, freezing')
print('make sure everythin is ok and make KeyboardInterrupt to continue')
try:
while True:
pass
except KeyboardInterrupt:
pass
'''
agent.epsilon = utils.linear_decay(init_epsilon, final_epsilon, step, decay_steps)
# play
_, state = play_and_record(state, agent, env, exp_replay, timesteps_per_epoch)
# train
#<YOUR CODE: sample batch_size of data from experience replay>
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
#loss = <YOUR CODE: compute TD loss>
loss = compute_td_loss(obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch,
agent, target_network,
gamma=0.99, check_shapes=True)
loss.backward()
grad_norm = nn.utils.clip_grad_norm_(agent.parameters(), max_grad_norm)
opt.step()
opt.zero_grad()
if step % loss_freq == 0:
td_loss_history.append(loss.data.cpu().item())
grad_norm_history.append(grad_norm)
if step % refresh_target_network_freq == 0:
# Load agent weights into target_network
# <YOUR CODE>
target_network.load_state_dict(agent.state_dict())
if step % eval_freq == 0:
mean_rw_history.append(evaluate(
make_env(clip_rewards=True, seed=step),
agent, n_games=3 * n_lives, greedy=True)
)
initial_state_q_values = agent.get_qvalues(
[make_env(seed=step).reset()]
)
initial_state_v_history.append(np.max(initial_state_q_values))
print("buffer size = %i, epsilon = %.5f" %
(len(exp_replay), agent.epsilon))
if step % ckpt_freq==0:
print("checkpointing ...")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
# check point model and optimizer
checkpoint = {
"step": step,
"agent": agent.state_dict(),
"epsilon": agent.epsilon,
"target_network": target_network.state_dict(),
"optimizer": opt.state_dict(),
"replay_buffer": exp_replay._storage
}
torch.save(checkpoint, os.path.join(ckpt_dir, ckpt_file))
# save the performance metric
metrics = {
"mean_rw_history": mean_rw_history,
"td_loss_history": td_loss_history,
"grad_norm_history": grad_norm_history,
"initial_state_v_history": initial_state_v_history
}
torch.save(metrics, os.path.join(ckpt_dir, metrics_file))
# check point model and optimizer
checkpoint = {
"step": step,
"agent": agent.state_dict(),
"epsilon": agent.epsilon,
"target_network": target_network.state_dict(),
"optimizer": opt.state_dict(),
"replay_buffer": exp_replay._storage
}
torch.save(checkpoint, os.path.join(ckpt_dir, ckpt_file))
# save the performance metric
metrics = {
"mean_rw_history": mean_rw_history,
"td_loss_history": td_loss_history,
"grad_norm_history": grad_norm_history,
"initial_state_v_history": initial_state_v_history
}
torch.save(metrics, os.path.join(ckpt_dir, metrics_file))
| [
"torch.nn.ReLU",
"replay_buffer.ReplayBuffer",
"torch.cuda.is_available",
"gym.make",
"utils.linear_decay",
"numpy.mean",
"os.path.exists",
"numpy.where",
"numpy.max",
"atari_wrappers.MaxAndSkipEnv",
"atari_wrappers.FireResetEnv",
"numpy.random.choice",
"atari_wrappers.ClipRewardEnv",
"ata... | [((496, 514), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (504, 514), False, 'import gym\n'), ((11607, 11628), 'replay_buffer.ReplayBuffer', 'ReplayBuffer', (['(10 ** 4)'], {}), '(10 ** 4)\n', (11619, 11628), False, 'from replay_buffer import ReplayBuffer\n'), ((2128, 2169), 'atari_wrappers.MaxAndSkipEnv', 'atari_wrappers.MaxAndSkipEnv', (['env'], {'skip': '(4)'}), '(env, skip=4)\n', (2156, 2169), False, 'import atari_wrappers\n'), ((2378, 2413), 'atari_wrappers.EpisodicLifeEnv', 'atari_wrappers.EpisodicLifeEnv', (['env'], {}), '(env)\n', (2408, 2413), False, 'import atari_wrappers\n'), ((2598, 2630), 'atari_wrappers.FireResetEnv', 'atari_wrappers.FireResetEnv', (['env'], {}), '(env)\n', (2625, 2630), False, 'import atari_wrappers\n'), ((2917, 2935), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (2925, 2935), False, 'import gym\n'), ((3058, 3107), 'framebuffer.FrameBuffer', 'FrameBuffer', (['env'], {'n_frames': '(4)', 'dim_order': '"""pytorch"""'}), "(env, n_frames=4, dim_order='pytorch')\n", (3069, 3107), False, 'from framebuffer import FrameBuffer\n'), ((7405, 7421), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (7412, 7421), True, 'import numpy as np\n'), ((8804, 8858), 'torch.tensor', 'torch.tensor', (['states'], {'device': 'device', 'dtype': 'torch.float'}), '(states, device=device, dtype=torch.float)\n', (8816, 8858), False, 'import torch\n'), ((8974, 9028), 'torch.tensor', 'torch.tensor', (['actions'], {'device': 'device', 'dtype': 'torch.long'}), '(actions, device=device, dtype=torch.long)\n', (8986, 9028), False, 'import torch\n'), ((9068, 9123), 'torch.tensor', 'torch.tensor', (['rewards'], {'device': 'device', 'dtype': 'torch.float'}), '(rewards, device=device, dtype=torch.float)\n', (9080, 9123), False, 'import torch\n'), ((9205, 9264), 'torch.tensor', 'torch.tensor', (['next_states'], {'device': 'device', 'dtype': 'torch.float'}), '(next_states, device=device, dtype=torch.float)\n', (9217, 9264), False, 'import torch\n'), ((13089, 13155), 'utils.linear_decay', 'utils.linear_decay', (['init_epsilon', 'final_epsilon', 'step', 'decay_steps'], {}), '(init_epsilon, final_epsilon, step, decay_steps)\n', (13107, 13155), False, 'import utils\n'), ((15816, 15849), 'os.path.join', 'os.path.join', (['ckpt_dir', 'ckpt_file'], {}), '(ckpt_dir, ckpt_file)\n', (15828, 15849), False, 'import os\n'), ((16099, 16135), 'os.path.join', 'os.path.join', (['ckpt_dir', 'metrics_file'], {}), '(ckpt_dir, metrics_file)\n', (16111, 16135), False, 'import os\n'), ((882, 920), 'gym.core.ObservationWrapper.__init__', 'ObservationWrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (909, 920), False, 'from gym.core import ObservationWrapper\n'), ((993, 1023), 'gym.spaces.Box', 'Box', (['(0.0)', '(1.0)', 'self.image_size'], {}), '(0.0, 1.0, self.image_size)\n', (996, 1023), False, 'from gym.spaces import Box\n'), ((1561, 1624), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(self.image_size[1], self.image_size[2])'}), '(img, dsize=(self.image_size[1], self.image_size[2]))\n', (1571, 1624), False, 'import cv2\n'), ((1661, 1698), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (1673, 1698), False, 'import cv2\n'), ((2743, 2776), 'atari_wrappers.ClipRewardEnv', 'atari_wrappers.ClipRewardEnv', (['env'], {}), '(env)\n', (2771, 2776), False, 'import atari_wrappers\n'), ((3360, 3385), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3383, 3385), False, 'import torch\n'), ((4185, 4222), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(16)', 'kernel_size', 'stride'], {}), '(4, 16, kernel_size, stride)\n', (4194, 4222), True, 'import torch.nn as nn\n'), ((4316, 4354), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)', 'kernel_size', 'stride'], {}), '(16, 32, kernel_size, stride)\n', (4325, 4354), True, 'import torch.nn as nn\n'), ((4442, 4480), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', 'kernel_size', 'stride'], {}), '(32, 64, kernel_size, stride)\n', (4451, 4480), True, 'import torch.nn as nn\n'), ((4662, 4702), 'torch.nn.Linear', 'nn.Linear', (['(64 * out_size * out_size)', '(256)'], {}), '(64 * out_size * out_size, 256)\n', (4671, 4702), True, 'import torch.nn as nn\n'), ((5386, 5395), 'torch.nn.functional.relu', 'F.relu', (['t'], {}), '(t)\n', (5392, 5395), True, 'import torch.nn.functional as F\n'), ((5434, 5443), 'torch.nn.functional.relu', 'F.relu', (['t'], {}), '(t)\n', (5440, 5443), True, 'import torch.nn.functional as F\n'), ((5482, 5491), 'torch.nn.functional.relu', 'F.relu', (['t'], {}), '(t)\n', (5488, 5491), True, 'import torch.nn.functional as F\n'), ((5573, 5582), 'torch.nn.functional.relu', 'F.relu', (['t'], {}), '(t)\n', (5579, 5582), True, 'import torch.nn.functional as F\n'), ((6231, 6291), 'torch.tensor', 'torch.tensor', (['states'], {'device': 'model_device', 'dtype': 'torch.float'}), '(states, device=model_device, dtype=torch.float)\n', (6243, 6291), False, 'import torch\n'), ((6601, 6645), 'numpy.random.choice', 'np.random.choice', (['n_actions'], {'size': 'batch_size'}), '(n_actions, size=batch_size)\n', (6617, 6645), True, 'import numpy as np\n'), ((6719, 6781), 'numpy.random.choice', 'np.random.choice', (['[0, 1]', 'batch_size'], {'p': '[1 - epsilon, epsilon]'}), '([0, 1], batch_size, p=[1 - epsilon, epsilon])\n', (6735, 6781), True, 'import numpy as np\n'), ((6808, 6862), 'numpy.where', 'np.where', (['should_explore', 'random_actions', 'best_actions'], {}), '(should_explore, random_actions, best_actions)\n', (6816, 6862), True, 'import numpy as np\n'), ((4780, 4799), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(512)'], {}), '(256, 512)\n', (4789, 4799), True, 'import torch.nn as nn\n'), ((4813, 4822), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4820, 4822), True, 'import torch.nn as nn\n'), ((4836, 4866), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'self.n_actions'], {}), '(512, self.n_actions)\n', (4845, 4866), True, 'import torch.nn as nn\n'), ((4956, 4975), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(512)'], {}), '(256, 512)\n', (4965, 4975), True, 'import torch.nn as nn\n'), ((4989, 4998), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4996, 4998), True, 'import torch.nn as nn\n'), ((5012, 5029), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1)'], {}), '(512, 1)\n', (5021, 5029), True, 'import torch.nn as nn\n'), ((14450, 14480), 'numpy.max', 'np.max', (['initial_state_q_values'], {}), '(initial_state_q_values)\n', (14456, 14480), True, 'import numpy as np\n'), ((14674, 14698), 'os.path.exists', 'os.path.exists', (['ckpt_dir'], {}), '(ckpt_dir)\n', (14688, 14698), False, 'import os\n'), ((14712, 14733), 'os.makedirs', 'os.makedirs', (['ckpt_dir'], {}), '(ckpt_dir)\n', (14723, 14733), False, 'import os\n'), ((15118, 15151), 'os.path.join', 'os.path.join', (['ckpt_dir', 'ckpt_file'], {}), '(ckpt_dir, ckpt_file)\n', (15130, 15151), False, 'import os\n'), ((15477, 15513), 'os.path.join', 'os.path.join', (['ckpt_dir', 'metrics_file'], {}), '(ckpt_dir, metrics_file)\n', (15489, 15513), False, 'import os\n')] |
from flask import Flask, request
from flask import render_template
import numpy as np
import pickle
import os
import matplotlib.pyplot as plt
app = Flask(__name__)
## function to check whether board is its terminal state
def check_winner(game):
winner = ''
checkfor = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]]
for line in checkfor:
s = str(game[line[0]]) + str(game[line[1]]) + str(game[line[2]])
if s == 'XXX':
winner = 'The winner is PLAYER 1'
break
elif s == 'OOO':
winner = 'The winner is PLAYER 2'
break
if not any(a for a in game if type(a) != str) and 0 not in game and winner == '':
winner = 'No winner, its a tie'
return winner
#################
## class defined to initialize player instance
class Sarsa_Agent:
def __init__(self, name, exploration=0.5):
self.name = name ## player name used to store policies specific to the player name
self.terminate = None ## to store if it is in terminal state
self.exploration_rate = exploration ## attribute which defines the exploration rate
self.discounted_gamma = 0.7 ## decay gamma value which limits the rewards to the board states which are far from board terminal state
self.learning_rate = 0.2 ## learning rate which Q-value adds up to convergence
self.states = [] # to store all board states in a game
self.dict_state_value = {} # state of the board -> Q-value
## function to determine which step should be taken by the agent
def NextStep(self, available_positions, current_board_state, turn):
if np.random.uniform(0, 1) <= self.exploration_rate:
idx = np.random.choice(len(available_positions)) # picks random next step from available positions
action = available_positions[idx]
else:
value_max = -500
## picks the next step from the available postions based on the maximum Q-values
for i in available_positions:
next_board_state = current_board_state.copy()
next_board_state[i] = turn
next_boardHash = self.getHash(next_board_state)
value = 0 if self.dict_state_value.get(next_boardHash) is None else self.dict_state_value.get(next_boardHash)
if value >= value_max:
value_max = value
action = i
# print("{} takes action {}".format(self.name, action))
return action
## get the hash address of the board state
def getHash(self, game):
gameHash = str(game)
return gameHash
# append a hash state
def addState(self, state):
self.states.append(state)
# fucntion to compute and assign Q-values for each step (board state) at the end of game
def feedReward(self, reward):
for st in reversed(self.states):
if self.dict_state_value.get(st) is None:
self.dict_state_value[st] = 0
## SARSA Algorithm
self.dict_state_value[st] += self.learning_rate * (self.discounted_gamma * reward - self.dict_state_value[st])
reward = self.dict_state_value[st]
## fucntion to reset the player states
def reset(self):
self.states = []
## function to save the policies (it appends computed policies to existing policies if any exists)
def savePolicy(self):
if os.path.exists('policy_' + str(self.name)):
obj = open('policy_' + str(self.name), 'rb')
existing_policies = pickle.load(obj)
for i in existing_policies.keys():
self.dict_state_value[i] = existing_policies[i]
write_file = open('policy_' + str(self.name), 'wb')
pickle.dump(self.dict_state_value, write_file)
write_file.close()
## function to load the policies to any player instance
def loadPolicy(self, file):
read_file = open(file, 'rb')
self.dict_state_value = pickle.load(read_file)
read_file.close()
#################
## get the hash address of the board state
def getHash(game):
return str(game)
### function which gives rewards for each player at the end of each game
def reward(player1,player2,winner):
if winner == 'The winner is PLAYER 1':
player1.feedReward(1)
player2.feedReward(0)
elif winner == 'The winner is PLAYER 2':
player1.feedReward(0)
player2.feedReward(1)
else :
player1.feedReward(0.1)
player2.feedReward(0.5)
######## function to train the agent ###########
def agent_training(player1,player2,rounds):
p1 = 0
p2 = 0
tie = 0
p1_plot = 0
p2_plot = 0
tie_plot = 0
# plot_p1 = []
# plot_p2 = []
# iterations_count =[]
# plot_tie = []
for i in range(rounds):
w = ''
s = list(range(9))
sav = np.zeros(9)
avail = s.copy()
a = s.copy()
turn = ''
while(w == ''):
# Player 1 turn to play
turn = 'X'
p1_action = player1.NextStep(a, sav, 1)
sav[p1_action] = 1
s[p1_action] = turn
a.remove(p1_action)
board_hash = getHash(sav)
player1.addState(board_hash)
win = check_winner(s)
if win != '':
if win == 'No winner, its a tie':
tie+=1
tie_plot += 1
else :
p1+=1
p1_plot+=1
reward(player1,player2,win)
player1.reset()
player2.reset()
break
else:
# Player 2 turn to play
turn = 'O'
p2_action = player1.NextStep(a, sav, -1)
sav[p2_action] = -1
s[p2_action] = turn
a.remove(p2_action)
board_hash = getHash(sav)
player2.addState(board_hash)
win = check_winner(s)
if win != '':
if win == 'No winner, its a tie':
tie+=1
tie_plot += 1
else :
p2+=1
p2_plot+=1
reward(player1,player2,win)
player1.reset()
player2.reset()
break
'''
## unblock to look at the plots and stats
if i%500 == 0:
iterations_count.append(i)
plot_p1.append(p1_plot)
plot_p2.append(p2_plot)
plot_tie.append(tie_plot)
p1_plot = 0
p2_plot = 0
tie_plot = 0
fig = plt.figure()
plot_figure = fig.add_subplot()
plot_figure.plot(iterations_count, plot_p1, color='orange')
plot_figure.plot(iterations_count, plot_p2, color='blue')
plot_figure.plot(iterations_count, plot_tie, color='green')
fig.show()
print("Total iterations: ",rounds)
print("Player1 (X) wins: ",p1)
print("Player2 (O) wins: ",p2)
print("Tie: ",tie)
'''
######## End of function to train the agent ###########
'''
####### Unblock to train the Player1 and Player2 Agents ########
##### Agent at maximum difficulty (hard) level #####
hplayer1 = Sarsa_Agent("hard_p1")
hplayer2 = Sarsa_Agent("hard_p2")
print("training for hard level")
agent_training(hplayer1, hplayer2,50000)
print("done")
print("- - - - - - - - - - - - - - - - -")
hplayer1.savePolicy()
hplayer2.savePolicy()
######################################################
##### Agent at moderate difficulty (medium) level #####
mplayer1 = Sarsa_Agent("medium_p1")
mplayer2 = Sarsa_Agent("medium_p2")
print("training for medium level")
agent_training(mplayer1, mplayer2,3000)
print("done")
print("- - - - - - - - - - - - - - - - -")
mplayer1.savePolicy()
mplayer2.savePolicy()
######################################################
##### Agent at little difficulty (easy) level #####
eplayer1 = Sarsa_Agent("easy_p1")
eplayer2 = Sarsa_Agent("easy_p2")
print("training for easy level")
agent_training(eplayer1, eplayer2,100)
print("done")
eplayer1.savePolicy()
eplayer2.savePolicy()
######################################################
################################################################
'''
playerselect = 'X'
difficulty = 'easy'
board = 0
pos = 0
sav = 0
player1 = 0
player2 = 0
# player1 = Sarsa_Agent("agent", exploration= 0)
# player2 = Sarsa_Agent("agent", exploration= 0)
whichplayer = 1
w = ''
@app.route("/")
def init():
return render_template('tictactoe.html')
@app.route("/start",methods = ['POST', 'GET'])
def start():
global playerselect
playerselect = request.form.get('userselect') # did player choose X or O
difficulty = request.form.get('difficulty') # easy, medium, hard
place = -1
global board
global sav
global pos
global player1
global player2
board = list(range(9))
pos = board.copy()
sav = np.zeros(9)
# player1 = Player("agent", exp_rate = 0)
# player1.loadPolicy("policy_hplayer1")
# print(difficulty)
if difficulty == 'easy':
policy_p1 = "policy_easy_p1"
policy_p2 = "policy_easy_p2"
elif difficulty == 'medium':
policy_p1 = "policy_medium_p1"
policy_p2 = "policy_medium_p2"
elif difficulty == 'hard':
policy_p1 = "policy_hard_p1"
policy_p2 = "policy_hard_p2"
player1 = Sarsa_Agent("agent", exploration= 0)
player2 = Sarsa_Agent("agent", exploration= 0)
player1.loadPolicy(policy_p1)
player2.loadPolicy(policy_p2)
if playerselect == 'O':
place = player1.NextStep(pos,sav,1) # which place system is keeping
board[place] = 'X'
sav[place] = 1
pos.remove(place)
return str(place)
@app.route("/getPlace",methods = ['POST', 'GET'])
def getPlace():
userinput = request.form.get('userinput') # which place user has clicked
global playerselect
global board
global sav
global pos
global player1
global player2
index = int(userinput)
agent = 'X' if playerselect == 'O' else 'O'
agent_number = 1 if playerselect == 'O' else -1
human_number = -1 if playerselect == 'O' else 1
player = player1 if playerselect == 'O' else player2
board[index] = playerselect
sav[index] = human_number
pos.remove(index)
w = check_winner(board)
# print(w)
w = 'Human wins!' if w != '' and w != 'No winner, its a tie' else w
place = -1
if w == '' :
place = player.NextStep(pos,sav,agent_number) # which place system is keeping
board[place] = agent
sav[place] = agent_number
pos.remove(place)
w = check_winner(board)
w = 'Agent wins!' if w != '' and w != 'No winner, its a tie' else w
resp = {}
resp['place'] = str(place)
resp['w'] = w
return resp
if __name__ == "__main__":
app.run() | [
"flask.render_template",
"pickle.dump",
"flask.Flask",
"pickle.load",
"flask.request.form.get",
"numpy.zeros",
"numpy.random.uniform"
] | [((149, 164), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (154, 164), False, 'from flask import Flask, request\n'), ((7943, 7976), 'flask.render_template', 'render_template', (['"""tictactoe.html"""'], {}), "('tictactoe.html')\n", (7958, 7976), False, 'from flask import render_template\n'), ((8075, 8105), 'flask.request.form.get', 'request.form.get', (['"""userselect"""'], {}), "('userselect')\n", (8091, 8105), False, 'from flask import Flask, request\n'), ((8147, 8177), 'flask.request.form.get', 'request.form.get', (['"""difficulty"""'], {}), "('difficulty')\n", (8163, 8177), False, 'from flask import Flask, request\n'), ((8332, 8343), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (8340, 8343), True, 'import numpy as np\n'), ((9135, 9164), 'flask.request.form.get', 'request.form.get', (['"""userinput"""'], {}), "('userinput')\n", (9151, 9164), False, 'from flask import Flask, request\n'), ((3835, 3881), 'pickle.dump', 'pickle.dump', (['self.dict_state_value', 'write_file'], {}), '(self.dict_state_value, write_file)\n', (3846, 3881), False, 'import pickle\n'), ((4070, 4092), 'pickle.load', 'pickle.load', (['read_file'], {}), '(read_file)\n', (4081, 4092), False, 'import pickle\n'), ((4881, 4892), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (4889, 4892), True, 'import numpy as np\n'), ((1705, 1728), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1722, 1728), True, 'import numpy as np\n'), ((3632, 3648), 'pickle.load', 'pickle.load', (['obj'], {}), '(obj)\n', (3643, 3648), False, 'import pickle\n')] |
from simupy.block_diagram import BlockDiagram
import simupy_flight
import numpy as np
from nesc_testcase_helper import plot_nesc_comparisons, int_opts, benchmark
from nesc_testcase_helper import ft_per_m, kg_per_slug
Ixx = 3.6*kg_per_slug/(ft_per_m**2) #slug-ft2
Iyy = 3.6*kg_per_slug/(ft_per_m**2) #slug-ft2
Izz = 3.6*kg_per_slug/(ft_per_m**2) #slug-ft2
Ixy = 0.0*kg_per_slug/(ft_per_m**2) #slug-ft2
Iyz = 0.0*kg_per_slug/(ft_per_m**2) #slug-ft2
Izx = 0.0*kg_per_slug/(ft_per_m**2) #slug-ft2
m = 1.0*kg_per_slug #slug
x = 0.
y = 0.
z = 0.
S_A = 0.1963495/(ft_per_m**2)
b_l = 1.0
c_l = 1.0
a_l = b_l
lat_ic = 0.*np.pi/180
long_ic = 0.*np.pi/180
h_ic = 0./ft_per_m
V_N_ic = 1000./ft_per_m
V_E_ic = 000./ft_per_m
V_D_ic = -1000./ft_per_m
psi_ic = 0.*np.pi/180
theta_ic = 0.*np.pi/180
phi_ic = 0.*np.pi/180
p_b_ic = 0.*np.pi/180
q_b_ic = 0.*np.pi/180
r_b_ic = 0.*np.pi/180
# omega_X_ic = 0.004178073*np.pi/180
# omega_Y_ic = 0.*np.pi/180
# omega_Z_ic = 0.*np.pi/180
planet = simupy_flight.Planet(
gravity=simupy_flight.earth_J2_gravity,
winds=simupy_flight.get_constant_winds(),
atmosphere=simupy_flight.atmosphere_1976,
planetodetics=simupy_flight.Planetodetic(
a=simupy_flight.earth_equitorial_radius,
omega_p=simupy_flight.earth_rotation_rate,
f=simupy_flight.earth_f
)
)
vehicle = simupy_flight.Vehicle(base_aero_coeffs=simupy_flight.get_constant_aero(CD_b=0.1), m=m, I_xx=Ixx, I_yy=Iyy, I_zz=Izz, I_xy=Ixy, I_yz=Iyz, I_xz=Izx, x_com=x, y_com=y, z_com=z, x_mrc=x, y_mrc=y, z_mrc=z, S_A=S_A, a_l=a_l, b_l=b_l, c_l=c_l, d_l=0.,)
BD = BlockDiagram(planet, vehicle)
BD.connect(planet, vehicle, inputs=np.arange(planet.dim_output))
BD.connect(vehicle, planet, inputs=np.arange(vehicle.dim_output))
planet.initial_condition = planet.ic_from_planetodetic(
lamda_E=long_ic, phi_E=lat_ic, h=h_ic,
V_N=V_N_ic, V_E=V_E_ic, V_D=V_D_ic,
psi=psi_ic, theta=theta_ic, phi=phi_ic,
p_B=p_b_ic, q_B=q_b_ic, r_B=r_b_ic,)
# planet.initial_condition[-3:] = omega_X_ic, omega_Y_ic, omega_Z_ic
planet.initial_condition[-2] = 0.
with benchmark() as b:
res = BD.simulate(30, integrator_options=int_opts)
b.tfinal = res.t[-1]
plot_nesc_comparisons(res, '10')
| [
"simupy_flight.get_constant_aero",
"simupy.block_diagram.BlockDiagram",
"simupy_flight.get_constant_winds",
"nesc_testcase_helper.benchmark",
"nesc_testcase_helper.plot_nesc_comparisons",
"simupy_flight.Planetodetic",
"numpy.arange"
] | [((1585, 1614), 'simupy.block_diagram.BlockDiagram', 'BlockDiagram', (['planet', 'vehicle'], {}), '(planet, vehicle)\n', (1597, 1614), False, 'from simupy.block_diagram import BlockDiagram\n'), ((2179, 2211), 'nesc_testcase_helper.plot_nesc_comparisons', 'plot_nesc_comparisons', (['res', '"""10"""'], {}), "(res, '10')\n", (2200, 2211), False, 'from nesc_testcase_helper import plot_nesc_comparisons, int_opts, benchmark\n'), ((2080, 2091), 'nesc_testcase_helper.benchmark', 'benchmark', ([], {}), '()\n', (2089, 2091), False, 'from nesc_testcase_helper import plot_nesc_comparisons, int_opts, benchmark\n'), ((1054, 1088), 'simupy_flight.get_constant_winds', 'simupy_flight.get_constant_winds', ([], {}), '()\n', (1086, 1088), False, 'import simupy_flight\n'), ((1154, 1294), 'simupy_flight.Planetodetic', 'simupy_flight.Planetodetic', ([], {'a': 'simupy_flight.earth_equitorial_radius', 'omega_p': 'simupy_flight.earth_rotation_rate', 'f': 'simupy_flight.earth_f'}), '(a=simupy_flight.earth_equitorial_radius, omega_p\n =simupy_flight.earth_rotation_rate, f=simupy_flight.earth_f)\n', (1180, 1294), False, 'import simupy_flight\n'), ((1372, 1413), 'simupy_flight.get_constant_aero', 'simupy_flight.get_constant_aero', ([], {'CD_b': '(0.1)'}), '(CD_b=0.1)\n', (1403, 1413), False, 'import simupy_flight\n'), ((1650, 1678), 'numpy.arange', 'np.arange', (['planet.dim_output'], {}), '(planet.dim_output)\n', (1659, 1678), True, 'import numpy as np\n'), ((1715, 1744), 'numpy.arange', 'np.arange', (['vehicle.dim_output'], {}), '(vehicle.dim_output)\n', (1724, 1744), True, 'import numpy as np\n')] |
#!/usr/bin/python
#------------------------------------------------------------------------------
# Name: plotUpperLimits.py
# Author: <NAME>, 20150212
# Last Modified: 20150212
#This is to read upper limits files and plot them so another Python script
# createHTML.py, can display them at the end of a search summary.
# If they do not exist, it should gracefully quit, giving a placeholder so
# the parent script doesn't fail miserably.
#------------------------------------------------------------------------------
import numpy as np
import matplotlib as mpl
mpl.use('Agg') # This is so we can use matplotlib easily without setting $DISPLAY on remote servers
from matplotlib import pyplot as plt
import xml.etree.ElementTree as ET
from math import pow
import os
############################################################
#1) Read setup, upper limit and veto bands
############################################################
tree = ET.parse( open( "search_setup.xml",'r') )
root = tree.getroot()
#targetName = root[0].find("target").text
rightAscension = root[0].find("right_ascension").text
declination = root[0].find("declination").text
tau = float( root[0].find('spindown_age').text ) / (365.25*24*3600)
distance = float( root[0].find('distance').text ) / 3.08567758e19
Izz = float( root[0].find('moment_of_inertia').text )
print("Right Ascension: " + rightAscension )
print("Declination: " + declination )
def h0_age( tau, distance, Izz ):
"""calculates the spin-down based upper limit, h0_age, from the
supernova remnant's
age, distance and estimated moment of inertia"""
return 1.2e-24 * ( 3.4 / distance ) * pow( ( 300.0 / tau ) * ( Izz / 1.0e38) , 0.5)
h0_age = h0_age( tau, distance, Izz )
# Test to see if upper limits exist yet; fail gracefully and make a placeholder
# if not.
if os.path.isfile("upper_limit_bands.xml"):
tree = ET.parse( open( "upper_limit_bands.xml",'r') )
root = tree.getroot()
band_freq = []
band_width = [] #Usually constant, but we'll collect it up anyway
h0_ul = []
for band in root.iter('upper_limit_band'):
h0_ul.append( float( band.find('upper_limit_h0').text ) )
band_freq.append( float( band.find('freq').text ) )
band_width.append( float( band.find('band').text ) )
h0_ul = np.array( h0_ul )
band_freq = np.array( band_freq )
band_width = np.array( band_width )
# Get veto bands
tree = ET.parse( open( "veto_bands.xml",'r') )
root = tree.getroot()
veto_freq = []
veto_width = [] #Usually constant, but we'll collect it up anyway
for band in root.iter('veto_band'):
veto_freq.append( float( band.find('freq').text ) )
veto_width.append( float( band.find('band').text ) )
veto_freq = np.array( veto_freq )
veto_width = np.array( veto_width )
#############################################################################################
# Plot upper limits
#############################################################################################
figDir=os.path.join(os.getcwd(), 'figures')
if not os.path.isdir(figDir):
os.mkdir(figDir)
plt.figure(57)
plt.plot(band_freq + 0.5*band_width, h0_ul, "-ko", label="Upper limits")
plt.plot(band_freq + 0.5*band_width, [h0_age for x in band_freq], "-b", label="Age-based upper limit")
yPlotMax = 1.1*np.max([ max(h0_ul), h0_age ] )
plt.plot(veto_freq, [yPlotMax for x in veto_freq], "-or", label="Vetoed bands")
plt.axis([min(band_freq), max(band_freq), 0.9*np.min([ min(h0_ul), h0_age ]), yPlotMax ])
xForPlot = np.linspace(min(band_freq), max(band_freq+band_width), 5) # Make 5 marks on abscissa and ordinate
yForPlot = np.linspace(0.9*np.min([ min(h0_ul), h0_age ]) , 1.1*np.max([ max(h0_ul), h0_age ] ), 5)
x2DecPlcs = ['%.2f' % a for a in xForPlot ]
y2DecPlcs = ['%.3g' % a for a in yForPlot ]
plt.xticks(xForPlot, x2DecPlcs)
plt.yticks(yForPlot, y2DecPlcs)
plt.title("Estimated upper limits")
plt.xlabel("Frequency (Hz)")
plt.ylabel("$h_0$")
legend = plt.legend(loc='best', shadow=True)
frame = legend.get_frame() # Some probably overly sophisticated additions to the legend
frame.set_facecolor('0.90')
#plt.draw()
plt.savefig( os.path.join(figDir, "upper_limit_plot.png" ), dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="png", transparent=False, bbox_inches="tight", pad_inches=0.1, frameon=None)
############################################################
# End of lplotUpperLimits.py
############################################################
| [
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"math.pow",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.join",
"os.getcwd",
"os.path.isfile",
"numpy.array",
"matplotlib.pyplot.figure",
"os.path.isdir",
"matplotlib.pyplot.yticks",
"os.mkdir",
"... | [((583, 597), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (590, 597), True, 'import matplotlib as mpl\n'), ((1851, 1890), 'os.path.isfile', 'os.path.isfile', (['"""upper_limit_bands.xml"""'], {}), "('upper_limit_bands.xml')\n", (1865, 1890), False, 'import os\n'), ((2319, 2334), 'numpy.array', 'np.array', (['h0_ul'], {}), '(h0_ul)\n', (2327, 2334), True, 'import numpy as np\n'), ((2353, 2372), 'numpy.array', 'np.array', (['band_freq'], {}), '(band_freq)\n', (2361, 2372), True, 'import numpy as np\n'), ((2392, 2412), 'numpy.array', 'np.array', (['band_width'], {}), '(band_width)\n', (2400, 2412), True, 'import numpy as np\n'), ((2777, 2796), 'numpy.array', 'np.array', (['veto_freq'], {}), '(veto_freq)\n', (2785, 2796), True, 'import numpy as np\n'), ((2816, 2836), 'numpy.array', 'np.array', (['veto_width'], {}), '(veto_width)\n', (2824, 2836), True, 'import numpy as np\n'), ((3168, 3182), 'matplotlib.pyplot.figure', 'plt.figure', (['(57)'], {}), '(57)\n', (3178, 3182), True, 'from matplotlib import pyplot as plt\n'), ((3187, 3261), 'matplotlib.pyplot.plot', 'plt.plot', (['(band_freq + 0.5 * band_width)', 'h0_ul', '"""-ko"""'], {'label': '"""Upper limits"""'}), "(band_freq + 0.5 * band_width, h0_ul, '-ko', label='Upper limits')\n", (3195, 3261), True, 'from matplotlib import pyplot as plt\n'), ((3263, 3371), 'matplotlib.pyplot.plot', 'plt.plot', (['(band_freq + 0.5 * band_width)', '[h0_age for x in band_freq]', '"""-b"""'], {'label': '"""Age-based upper limit"""'}), "(band_freq + 0.5 * band_width, [h0_age for x in band_freq], '-b',\n label='Age-based upper limit')\n", (3271, 3371), True, 'from matplotlib import pyplot as plt\n'), ((3421, 3500), 'matplotlib.pyplot.plot', 'plt.plot', (['veto_freq', '[yPlotMax for x in veto_freq]', '"""-or"""'], {'label': '"""Vetoed bands"""'}), "(veto_freq, [yPlotMax for x in veto_freq], '-or', label='Vetoed bands')\n", (3429, 3500), True, 'from matplotlib import pyplot as plt\n'), ((3910, 3941), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xForPlot', 'x2DecPlcs'], {}), '(xForPlot, x2DecPlcs)\n', (3920, 3941), True, 'from matplotlib import pyplot as plt\n'), ((3945, 3976), 'matplotlib.pyplot.yticks', 'plt.yticks', (['yForPlot', 'y2DecPlcs'], {}), '(yForPlot, y2DecPlcs)\n', (3955, 3976), True, 'from matplotlib import pyplot as plt\n'), ((3981, 4016), 'matplotlib.pyplot.title', 'plt.title', (['"""Estimated upper limits"""'], {}), "('Estimated upper limits')\n", (3990, 4016), True, 'from matplotlib import pyplot as plt\n'), ((4020, 4048), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (4030, 4048), True, 'from matplotlib import pyplot as plt\n'), ((4052, 4071), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$h_0$"""'], {}), "('$h_0$')\n", (4062, 4071), True, 'from matplotlib import pyplot as plt\n'), ((4085, 4120), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'shadow': '(True)'}), "(loc='best', shadow=True)\n", (4095, 4120), True, 'from matplotlib import pyplot as plt\n'), ((1672, 1709), 'math.pow', 'pow', (['(300.0 / tau * (Izz / 1e+38))', '(0.5)'], {}), '(300.0 / tau * (Izz / 1e+38), 0.5)\n', (1675, 1709), False, 'from math import pow\n'), ((3083, 3094), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3092, 3094), False, 'import os\n'), ((3117, 3138), 'os.path.isdir', 'os.path.isdir', (['figDir'], {}), '(figDir)\n', (3130, 3138), False, 'import os\n'), ((3147, 3163), 'os.mkdir', 'os.mkdir', (['figDir'], {}), '(figDir)\n', (3155, 3163), False, 'import os\n'), ((4277, 4321), 'os.path.join', 'os.path.join', (['figDir', '"""upper_limit_plot.png"""'], {}), "(figDir, 'upper_limit_plot.png')\n", (4289, 4321), False, 'import os\n')] |
# Siconos solvers
import siconos.numerics as sn
# fclib interface
import siconos.fclib as fcl
# h5py
import h5py
import numpy as np
import scipy.linalg as la
# --- Create a friction contact problem ---
# Case 1 : from scratch
# Number of contacts
nc = 3
# W matrix
w_shape = (3 * nc, 3 * nc)
W = np.zeros(w_shape, dtype=np.float64)
W.flat[...] = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
# q vector
q = np.zeros(3 * nc, dtype=np.float64)
q[...] = [-1, 1, 3, -1, 1, 3, -1, 1, 3]
# Friction coeff
mu = [0.1, 0.1, 0.1]
fc3d = sn.FrictionContactProblem(3, nc, W, q, mu)
# Case 2 : use fclib-library, read hdf5 file
# --- Set solver options ---
# Check Friction_cst.h for a list of solvers ids.
solver_options = sn.SolverOptions(sn.SICONOS_FRICTION_3D_NSGS)#sn.SICONOS_FRICTION_3D_FPP)
eps = np.finfo(np.float64).eps
solver_options.dparam[0] = 100 * eps
# --- Set unknowns ---
velocity = np.zeros(3 * nc, dtype=np.float64)
reaction = np.zeros_like(velocity)
# --- Call solver driver with predefined options ---
sn.fc3d_driver(fc3d, reaction, velocity, solver_options)
| [
"siconos.numerics.FrictionContactProblem",
"siconos.numerics.fc3d_driver",
"siconos.numerics.SolverOptions",
"numpy.zeros",
"numpy.finfo",
"numpy.zeros_like"
] | [((299, 334), 'numpy.zeros', 'np.zeros', (['w_shape'], {'dtype': 'np.float64'}), '(w_shape, dtype=np.float64)\n', (307, 334), True, 'import numpy as np\n'), ((610, 644), 'numpy.zeros', 'np.zeros', (['(3 * nc)'], {'dtype': 'np.float64'}), '(3 * nc, dtype=np.float64)\n', (618, 644), True, 'import numpy as np\n'), ((732, 774), 'siconos.numerics.FrictionContactProblem', 'sn.FrictionContactProblem', (['(3)', 'nc', 'W', 'q', 'mu'], {}), '(3, nc, W, q, mu)\n', (757, 774), True, 'import siconos.numerics as sn\n'), ((923, 968), 'siconos.numerics.SolverOptions', 'sn.SolverOptions', (['sn.SICONOS_FRICTION_3D_NSGS'], {}), '(sn.SICONOS_FRICTION_3D_NSGS)\n', (939, 968), True, 'import siconos.numerics as sn\n'), ((1101, 1135), 'numpy.zeros', 'np.zeros', (['(3 * nc)'], {'dtype': 'np.float64'}), '(3 * nc, dtype=np.float64)\n', (1109, 1135), True, 'import numpy as np\n'), ((1147, 1170), 'numpy.zeros_like', 'np.zeros_like', (['velocity'], {}), '(velocity)\n', (1160, 1170), True, 'import numpy as np\n'), ((1226, 1282), 'siconos.numerics.fc3d_driver', 'sn.fc3d_driver', (['fc3d', 'reaction', 'velocity', 'solver_options'], {}), '(fc3d, reaction, velocity, solver_options)\n', (1240, 1282), True, 'import siconos.numerics as sn\n'), ((1004, 1024), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (1012, 1024), True, 'import numpy as np\n')] |
# Standard library imports
from pprint import pprint
# Local application imports
from gym_snape.game import Game
from gym_snape.game.pets import Pet
from gym_snape.game.food import Food
# Third-party imports
import gym
from gym import spaces
import numpy as np
class Snape(gym.Env):
metadata = {'render.modes': ['ansi']}
def __init__(self, display: bool = False):
super().__init__()
# Create a game instance
self.game = Game(display=display)
# Initial opponent is no one
self._opponent = None
"""
N = total number of shop slots (empty and non-empty)
M = total number of deck slots (empty and non-empty)
From the human POV, game controls can be conceptualized as discrete
action spaces:
(1) Roll shop: Discrete 2
- Pressed + no-op
(2) Freeze shop slot: Discrete N+1
- N slots + no-op
(3) Buy from shop: Discrete (N*M)+1
- N*M pairs of shop slots and deck slots + no-op
(4) Sell from deck: Discrete M+1
- M deck slots + no-op
(5) Swap two deck slots: Discrete (M*M)+1
- M*M pairs of source/destination deck slots + no-op
(6) Merge two deck slots: Discrete (M*M)+1
- M*M pairs of source/destination deck slots + no-op
(7) End turn (begin combat): Discrete 2
- Pressed + no-op
Since we want an agent to take exactly one action at a time, we can
combine all these sub-spaces into one large discrete action space. This
also removes the need for no-op actions.
So the entire action space will be:
Discrete (1)+(N)+(N*M)+(M)+(M*M)+(M*M)+(1)
"""
# Determine N and M
self._n_shop_slots = len(self.game.shop)
self._n_deck_slots = len(self.game.deck)
# Define the ranges of each sub-space
self.roll_action = 0
self.freeze_actions = range(1, 1+self._n_shop_slots)
self.buy_actions = range(
self.freeze_actions.stop,
self.freeze_actions.stop + self._n_shop_slots * self._n_deck_slots
)
self.sell_actions = range(
self.buy_actions.stop,
self.buy_actions.stop + self._n_deck_slots
)
self.swap_actions = range(
self.sell_actions.stop,
self.sell_actions.stop + self._n_deck_slots * self._n_deck_slots
)
self.merge_actions = range(
self.swap_actions.stop,
self.swap_actions.stop + self._n_deck_slots * self._n_deck_slots
)
self.end_turn_action = self.merge_actions.stop
# Define the entire action space
self.action_space = spaces.Discrete(self.end_turn_action+1)
"""
Observations can be conceptualized as multiple observation spaces:
(1) Game turn number: Discrete MAX_INTEGER
(2) Number of lives: Discrete N_LIVES
(3) Number of trophies: Discrete N_TROPHIES
(4) Amount of gold: Discrete MAX_GOLD_VALUE
(5) Actions taken: Discrete MAX_INTEGER
(5) Deck: Dict
(6) Shop: Dict
The keys of the deck dictionary will be the slot number. Corresponding
to each slot are:
- type: 0 if empty, 1 if pet
- id: id number of this pet type
- health: health points of this pet
- health_buff: (temp) health buff points on this pet
- attack: attack points of this pet
- attack_buff: (temp) attack buff points on this pet
- effect_id: id of effect type applied to this pet
- experience: experience points
- level: current level of this pet
- gold_cost: resale value of this pet
The keys of the shop dictionary will be the slot number. Corresponding
to each slot are:
- type: 0 if empty, 1 if pet, 2 if food
- id: id of this item
- health: health if pet, 0 if food
- health_buff: (temp) health buff if pet, 0 if food
- attack: attack if pet, 0 if food
- attack_buff: (temp) attack buff if pet, 0 if food
- effect_id: effect if pet, -1 if food
- gold_cost: purchase price of this item
- is_frozen: 0 if false, 1 if true
"""
# The following values are taken from the game's default configuration
INT_MAX = np.iinfo(np.int32).max
max_gold_value = 3
max_health = 50
max_attack = 50
max_experience = 3
max_level = 3
# Constants for slot type
self.IS_EMPTY = 0
self.IS_PET = 1
self.IS_FOOD = 2
# Constants for frozen/unfrozen
self.IS_FROZEN = 0
self.NOT_FROZEN = 1
# Define the deck subspace
self.deck_space = spaces.Dict(dict([
(i, spaces.Dict({
'type': spaces.Discrete(2),
'id': spaces.Discrete(INT_MAX),
'health': spaces.Discrete(max_health+1),
'health_buff': spaces.Discrete(max_health+1),
'attack': spaces.Discrete(max_attack+1),
'attack_buff': spaces.Discrete(max_attack+1),
'effect_id': spaces.Discrete(INT_MAX),
'experience': spaces.Discrete(max_experience+1),
'level': spaces.Discrete(max_level+1),
'gold_cost': spaces.Discrete(max_gold_value+1)
})) for i in range(self._n_deck_slots)
]))
# Define the shop subspace
self.shop_space = spaces.Dict(dict([
(i, spaces.Dict({
'type': spaces.Discrete(3),
'id': spaces.Discrete(INT_MAX),
'health': spaces.Discrete(max_health+1),
'health_buff': spaces.Discrete(max_health+1),
'attack': spaces.Discrete(max_attack+1),
'attack_buff': spaces.Discrete(max_attack+1),
'effect_id': spaces.Discrete(INT_MAX),
'gold_cost': spaces.Discrete(max_gold_value+1),
'is_frozen': spaces.Discrete(2),
})) for i in range(self._n_shop_slots)
]))
# The following values are taken from the game's default configuration
n_max_lives = 10
n_max_trophies = 10
gold_per_turn = 10
# Define the entire obsevation space
self.observation_space = spaces.Dict({
'n_turns': spaces.Discrete(INT_MAX),
'n_lives': spaces.Discrete(n_max_lives+1),
'n_trophies': spaces.Discrete(n_max_trophies+1),
'n_gold': spaces.Discrete(gold_per_turn+1),
'n_actions': spaces.Discrete(INT_MAX),
'deck': self.deck_space,
'shop': self.shop_space
})
# Initial game state
self.state = self._get_obs()
# Check that the initial observation is valid
if not self.observation_space.contains(self.state):
print('Invalid initial state')
pprint(self.state)
def assign_opponent(self, opponent):
"""Assign an opponent (environment object) to this environment."""
self._opponent = opponent
def step(self, action):
# Check that action is valid
assert self.action_space.contains(action), \
f'Action {action} is invalid; action space range is {self.action_space}'
# Process actions
if action == self.roll_action:
self.game.roll()
elif action in self.freeze_actions:
index = action - self.freeze_actions.start
self.game.freeze(index)
elif action in self.buy_actions:
a = action - self.buy_actions.start
indices = divmod(a, self._n_deck_slots)
self.game.buy(indices)
elif action in self.sell_actions:
index = action - self.sell_actions.start
self.game.sell(index)
elif action in self.swap_actions:
a = action - self.swap_actions.start
indices = divmod(a, self._n_deck_slots)
self.game.swap(indices)
elif action in self.merge_actions:
a = action - self.merge_actions.start
indices = divmod(a, self._n_deck_slots)
self.game.merge(indices)
elif action == self.end_turn_action:
if self._opponent:
self.game.challenge(self._opponent.game)
else:
raise AttributeError('opponent has not yet been assigned')
# Get new observation and check for end-of-game
observation = self._get_obs()
done = self.game.game_over
# Small negative reward for each action taken
reward = -1
# Extra reward for game won or lost
if self.game.won:
reward = 100
elif self.game.lost:
reward = -100
# Diagnostic information
info = {}
return observation, reward, done, info
def reset(self):
self.game = Game() # create a new game
return self._get_obs()
def _get_obs(self):
# Get deck state
deck_state = {}
for i, pet in enumerate(self.game.deck):
if pet:
deck_state[i] = {
'type': self.IS_PET,
'id': pet.id,
'health': pet.health,
'health_buff': pet.health_buff,
'attack': pet.attack,
'attack_buff': pet.attack_buff,
'effect_id': pet.effect_id,
'experience': pet.experience,
'level': pet.level,
'gold_cost': pet.gold_cost,
}
else:
deck_state[i] = {
'type': self.IS_EMPTY,
'id': 0,
'health': 0,
'health_buff': 0,
'attack': 0,
'attack_buff': 0,
'effect_id': 0,
'experience': 0,
'level': 0,
'gold_cost': 0
}
# Get shop state
shop_state = {}
for i, slot in enumerate(self.game.shop):
if isinstance(slot.item, Pet):
pet = slot.item
shop_state[i] = {
'type': self.IS_PET,
'id': pet.id,
'health': pet.health,
'health_buff': pet.health_buff,
'attack': pet.attack,
'attack_buff': pet.attack_buff,
'effect_id': pet.effect_id,
'gold_cost': pet.gold_cost,
'is_frozen': int(slot.is_frozen)
}
elif isinstance(slot.item, Food):
food = slot.item
shop_state[i] = {
'type': self.IS_FOOD,
'id': food.id,
'health': food.health,
'health_buff': 0,
'attack': food.attack,
'attack_buff': 0,
'effect_id': 0,
'gold_cost': food.gold_cost,
'is_frozen': int(slot.is_frozen)
}
else:
shop_state[i] = {
'type': self.IS_EMPTY,
'id': 0,
'health': 0,
'health_buff': 0,
'attack': 0,
'attack_buff': 0,
'effect_id': 0,
'gold_cost': 0,
'is_frozen': int(slot.is_frozen)
}
# Return observation
observation = {
'n_turns': self.game.turn,
'n_lives': self.game.lives,
'n_trophies': self.game.trophies,
'n_gold': self.game.gold,
'n_actions': self.game.actions_taken,
'deck': deck_state,
'shop': shop_state
}
return observation
def render(self, mode='ansi'):
if mode == 'ansi':
print(str(self.game))
else:
super().render(mode=mode)
def close(self):
pass
| [
"gym_snape.game.Game",
"numpy.iinfo",
"gym.spaces.Discrete",
"pprint.pprint"
] | [((461, 482), 'gym_snape.game.Game', 'Game', ([], {'display': 'display'}), '(display=display)\n', (465, 482), False, 'from gym_snape.game import Game\n'), ((2785, 2826), 'gym.spaces.Discrete', 'spaces.Discrete', (['(self.end_turn_action + 1)'], {}), '(self.end_turn_action + 1)\n', (2800, 2826), False, 'from gym import spaces\n'), ((9148, 9154), 'gym_snape.game.Game', 'Game', ([], {}), '()\n', (9152, 9154), False, 'from gym_snape.game import Game\n'), ((4541, 4559), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (4549, 4559), True, 'import numpy as np\n'), ((7154, 7172), 'pprint.pprint', 'pprint', (['self.state'], {}), '(self.state)\n', (7160, 7172), False, 'from pprint import pprint\n'), ((6584, 6608), 'gym.spaces.Discrete', 'spaces.Discrete', (['INT_MAX'], {}), '(INT_MAX)\n', (6599, 6608), False, 'from gym import spaces\n'), ((6633, 6665), 'gym.spaces.Discrete', 'spaces.Discrete', (['(n_max_lives + 1)'], {}), '(n_max_lives + 1)\n', (6648, 6665), False, 'from gym import spaces\n'), ((6691, 6726), 'gym.spaces.Discrete', 'spaces.Discrete', (['(n_max_trophies + 1)'], {}), '(n_max_trophies + 1)\n', (6706, 6726), False, 'from gym import spaces\n'), ((6748, 6782), 'gym.spaces.Discrete', 'spaces.Discrete', (['(gold_per_turn + 1)'], {}), '(gold_per_turn + 1)\n', (6763, 6782), False, 'from gym import spaces\n'), ((6807, 6831), 'gym.spaces.Discrete', 'spaces.Discrete', (['INT_MAX'], {}), '(INT_MAX)\n', (6822, 6831), False, 'from gym import spaces\n'), ((5029, 5047), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2)'], {}), '(2)\n', (5044, 5047), False, 'from gym import spaces\n'), ((5071, 5095), 'gym.spaces.Discrete', 'spaces.Discrete', (['INT_MAX'], {}), '(INT_MAX)\n', (5086, 5095), False, 'from gym import spaces\n'), ((5123, 5154), 'gym.spaces.Discrete', 'spaces.Discrete', (['(max_health + 1)'], {}), '(max_health + 1)\n', (5138, 5154), False, 'from gym import spaces\n'), ((5185, 5216), 'gym.spaces.Discrete', 'spaces.Discrete', (['(max_health + 1)'], {}), '(max_health + 1)\n', (5200, 5216), False, 'from gym import spaces\n'), ((5242, 5273), 'gym.spaces.Discrete', 'spaces.Discrete', (['(max_attack + 1)'], {}), '(max_attack + 1)\n', (5257, 5273), False, 'from gym import spaces\n'), ((5304, 5335), 'gym.spaces.Discrete', 'spaces.Discrete', (['(max_attack + 1)'], {}), '(max_attack + 1)\n', (5319, 5335), False, 'from gym import spaces\n'), ((5364, 5388), 'gym.spaces.Discrete', 'spaces.Discrete', (['INT_MAX'], {}), '(INT_MAX)\n', (5379, 5388), False, 'from gym import spaces\n'), ((5420, 5455), 'gym.spaces.Discrete', 'spaces.Discrete', (['(max_experience + 1)'], {}), '(max_experience + 1)\n', (5435, 5455), False, 'from gym import spaces\n'), ((5480, 5510), 'gym.spaces.Discrete', 'spaces.Discrete', (['(max_level + 1)'], {}), '(max_level + 1)\n', (5495, 5510), False, 'from gym import spaces\n'), ((5539, 5574), 'gym.spaces.Discrete', 'spaces.Discrete', (['(max_gold_value + 1)'], {}), '(max_gold_value + 1)\n', (5554, 5574), False, 'from gym import spaces\n'), ((5771, 5789), 'gym.spaces.Discrete', 'spaces.Discrete', (['(3)'], {}), '(3)\n', (5786, 5789), False, 'from gym import spaces\n'), ((5813, 5837), 'gym.spaces.Discrete', 'spaces.Discrete', (['INT_MAX'], {}), '(INT_MAX)\n', (5828, 5837), False, 'from gym import spaces\n'), ((5865, 5896), 'gym.spaces.Discrete', 'spaces.Discrete', (['(max_health + 1)'], {}), '(max_health + 1)\n', (5880, 5896), False, 'from gym import spaces\n'), ((5927, 5958), 'gym.spaces.Discrete', 'spaces.Discrete', (['(max_health + 1)'], {}), '(max_health + 1)\n', (5942, 5958), False, 'from gym import spaces\n'), ((5984, 6015), 'gym.spaces.Discrete', 'spaces.Discrete', (['(max_attack + 1)'], {}), '(max_attack + 1)\n', (5999, 6015), False, 'from gym import spaces\n'), ((6046, 6077), 'gym.spaces.Discrete', 'spaces.Discrete', (['(max_attack + 1)'], {}), '(max_attack + 1)\n', (6061, 6077), False, 'from gym import spaces\n'), ((6106, 6130), 'gym.spaces.Discrete', 'spaces.Discrete', (['INT_MAX'], {}), '(INT_MAX)\n', (6121, 6130), False, 'from gym import spaces\n'), ((6161, 6196), 'gym.spaces.Discrete', 'spaces.Discrete', (['(max_gold_value + 1)'], {}), '(max_gold_value + 1)\n', (6176, 6196), False, 'from gym import spaces\n'), ((6225, 6243), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2)'], {}), '(2)\n', (6240, 6243), False, 'from gym import spaces\n')] |
from __future__ import print_function
import subprocess
import tempfile
import numpy as np
import warnings
import astropy.units as u
_quantity = u.Quantity
from collections import defaultdict
import os
import sys
from . import utils
from . import synthspec
from .utils import QuantityOff,ImmutableDict,unitless,grouper
from .base_class import RadiativeTransferApproximator
from astropy import units as u
from astropy import constants
from astropy import log
import astropy.table
PYVERSION = 3 if sys.version_info >= (3,0) else 2
__all__ = ['pyradex', 'write_input', 'parse_outfile', 'call_radex', 'Radex',
'density_distribution']
def pyradex(executable='radex', minfreq=100, maxfreq=130,
collider_densities={'H2':1}, debug=False, delete_tempfile=True,
return_dict=False, **kwargs):
"""
Get the radex results for a set of input parameters
Parameters
----------
executable : str
Full path to the RADEX executable
minfreq : float
Lowest frequency line to store, in GHz
(note: any astropy.unit spectroscopic unit is also allowed)
maxfreq : float
Highest frequency line to store
collider_densities : dict
Collider names and their number densities
If the molecule specified has both o-H2 and p-H2, you will get a
WARNING if you specify 'H2'
An ortho/para example:
collider_densities = {'oH2':900, 'pH2':100}
which will yield H2 = 1000
See write_input for additional parameters
Returns
-------
An astropy table containing the RADEX returns
.. WARNING:: If RADEX spits out *******, it will be replaced with -999
"""
warnings.warn("pyradex is deprecated: Use pyradex.Radex instead if you can.")
infile,outfile = write_input(minfreq=minfreq, maxfreq=maxfreq,
delete_tempfile=delete_tempfile,
collider_densities=collider_densities, **kwargs)
logfile = call_radex(executable, infile.name, debug=debug,
delete_tempfile=delete_tempfile)
check_logfile(logfile.name)
data = parse_outfile(outfile.name, return_dict=return_dict)
if debug:
with open(infile.name,'r') as inf:
print("Input:")
print(inf.read())
with open(outfile.name,'r') as out:
print("Output:")
print(out.read())
infile.close()
outfile.close()
logfile.close()
return data
def check_logfile(logfilename):
with open(logfilename,'r') as f:
if "Warning: Assuming thermal o/p ratio" in f.read():
warnings.warn("Assumed thermal o/p ratio since only H2 was given but collider file has o- and p- H2")
def write_input(temperature=10, column=1e12, collider_densities={'H2':1},
bw=0.01, tbg=2.73, species='co', velocity_gradient=1.0, minfreq=1,
maxfreq=10, delete_tempfile=True):
"""
Write radex.inp file parameters
Parameters
----------
temperature : float
Kinetic temperature (K)
collider_densities : dict
Collider names and their number densities
column : float
column density of the molecule
species : str
Name of the molecule (specifically, the prefix for the file name, e.g.
for "co.dat", species='co'). Case sensitive!
tbg : float
Temperature of the background radiation (e.g. CMB)
velocity_gradient : float
Velocity gradient per pc in km/s
"""
if hasattr(minfreq, 'unit'):
minfreq = unitless(minfreq.to('GHz',u.spectral()))
if hasattr(maxfreq, 'unit'):
maxfreq = unitless(maxfreq.to('GHz',u.spectral()))
infile = tempfile.NamedTemporaryFile(mode='w', delete=delete_tempfile)
outfile = tempfile.NamedTemporaryFile(mode='w', delete=delete_tempfile)
infile.write(species+'.dat\n')
infile.write(outfile.name+'\n')
infile.write(str(minfreq)+' '+str(maxfreq)+'\n')
infile.write(str(temperature)+'\n')
# RADEX doesn't allow densities < 1e-3
for k in collider_densities.keys():
if collider_densities[k] < 1e-3:
collider_densities.pop(k)
infile.write('%s\n' % len(collider_densities))
for name,dens in collider_densities.items():
infile.write('%s\n' % name)
infile.write(str(dens)+'\n')
infile.write(str(tbg)+'\n')
infile.write(str(column)+'\n')
infile.write(str(velocity_gradient)+'\n')
# end the input file
infile.write('0\n')
infile.flush()
return infile,outfile
def call_radex(executable, inpfilename, debug=False, delete_tempfile=True):
logfile = tempfile.NamedTemporaryFile(mode='w', delete=delete_tempfile)
cmd = '{radex} < {inpfile} > {logfile}'.format(
radex=executable,
inpfile=inpfilename,
logfile=logfile.name)
if debug:
print("Command:",cmd)
result = subprocess.call(cmd, shell=True)
if result != 0:
print("RADEX returned error code %i" % result)
with open(logfile.name,'r') as f:
print(f.read())
return logfile
header_names = ['J_up','J_low','E_UP','FREQ', 'WAVE', 'T_EX', 'TAU', 'T_R', 'POP_UP', 'POP_LOW', 'FLUX_Kkms', 'FLUX_Inu']
header_units = [None, None, u.K, u.GHz, u.um, u.K, None, u.K, None, None, u.K*u.km/u.s, u.erg/u.cm**2/u.s]
dtypes = [str, str, float, float, float, float, float, float, float, float, float, float]
def parse_outfile(filename, return_dict=False):
with open(filename,'r') as f:
alllines = f.readlines()
header = {L.split(":")[0][2:].strip():L.split(":")[1].strip()
for L in alllines
if L[0]=='*'}
lines = [L.replace("--"," ") for L in alllines
if (L[0] != '*'
and 'iterat' not in L
and 'GHz' not in L
and 'TAU' not in L)]
niter = [L.split(" ")[3]
for L in alllines
if 'iterat' in L]
data_list = [[x if '*' not in x else '-999' for x in L.split()] for L in lines]
if len(data_list) == 0:
raise ValueError("No lines included?")
data_in_columns = map(list,zip(*data_list))
if return_dict:
data = {name: C for C,name in zip(data_in_columns, header_names)}
data['niter']=niter
return data
columns = [astropy.table.Column(data=C, name=name.lower(), unit=unit, dtype=dtype)
for C,name,unit,dtype in zip(data_in_columns, header_names, header_units, dtypes)]
data = astropy.table.Table(columns, meta=header)
return data
class Radex(RadiativeTransferApproximator):
def __call__(self, return_table=True, **kwargs):
# reset the parameters appropriately
self.set_params(**kwargs)
# No need to re-validate: it is already done when self.temperature is
# set in __init__
niter = self.run_radex(reload_molfile=False, validate_colliders=False)
if return_table:
return self.get_table()
else:
return niter
def __init__(self,
collider_densities=None,
density=None,
total_density=None,
temperature=None,
species='co',
column=None,
column_per_bin=None,
tbackground=2.7315,
deltav=1.0,
abundance=None,
datapath=None,
escapeProbGeom='lvg',
outfile='radex.out',
logfile='radex.log',
debug=False,
mu=2.8,
source_area=None,
):
"""
Direct wrapper of the radex FORTRAN code
Parameters
----------
collider_densities: dict
Dictionary giving the volume densities of the collider(s) in units
of cm^-3. Valid entries are h2,oh2,ph2,e,He,H,H+. The keys are
case-insensitive.
density: float
total_density: float
(optional) Alternative to ``collider_densities``: can specify a
single number indicating the total density of H2. This should
not be used when electrons or H atoms are the intended collider.
These keywords are synonymous and therefore only one can be used.
temperature: float
Local gas temperature in K
species: str
A string specifying a valid chemical species. This is used to look
up the specified molecule
column: float
column_per_bin : float
The column density of the molecule of interest per bin, where
a bin is (deltav km/s * 1 pc). These keywords are synonymous and
therefore only one can be specified.
abundance: float
The molecule's abundance relative to the total collider density in
each velocity bin, i.e. column = abundance * density * length * dv.
If both abundance and column are specified, abundance is ignored.
tbackground: float
Background radiation temperature (e.g., CMB)
deltav: float
The FWHM line width (really, the single-zone velocity width to
scale the column density by: this is most sensibly interpreted as a
velocity gradient (dv/length))
datapath: str
Path to the molecular data files. If it is not specified, defaults
to the current directory, OR the shell variable RADEX_DATAPATH if
it is specified.
outfile: str
Output file name
logfile: str
Log file name
escapeProbGeom: 'lvg','sphere','slab'
Which escape probability method to use
mu: float
Mean mass per particle in AMU. Set to 2.8 for H2+Helium mix
source_area: float / unit
The emitting area of the source on the sky in steradians
"""
log.debug("Importing radex fortran module")
from pyradex.radex import radex
self.radex = radex
self.mu = mu
if os.getenv('RADEX_DATAPATH') and datapath is None:
datapath = os.getenv('RADEX_DATAPATH')
log.debug(f"Datapath={datapath}")
if datapath is not None:
self.datapath = datapath
if self.datapath != os.path.expanduser(datapath):
raise ValueError("Data path %s was not successfully stored;"
" instead %s was." % (datapath,self.datapath))
log.debug(f"Setting species to {species}")
self.species = species
if self.molpath == b'':
raise ValueError("Must set a species name.")
if not os.path.exists(self.molpath):
raise ValueError("Must specify a valid path to a molecular data file "
"else RADEX will crash."
" Current path is {0}".format(self.molpath))
if sum(x is not None for x in (collider_densities,density,total_density)) > 1:
raise ValueError("Can only specify one of density, total_density,"
" and collider_densities")
if sum(x is not None for x in (column,column_per_bin)) > 1:
raise ValueError("Can only specify one of column, column_per_bin.")
n_specifications = sum(x is not None for x in (column, column_per_bin,
collider_densities,
density, total_density,
abundance))
if (n_specifications > 2):
raise ValueError("Can only specify two of column, density, and abundance.")
if (n_specifications < 2):
raise ValueError("Must specify two of column, density, and abundance.")
self._locked_parameter = 'density'
self._is_locked = True
log.debug(f"Setting temperature to {temperature}")
# This MUST happen before density is set, otherwise OPR will be
# incorrectly set.
self.radex.cphys.tkin = unitless(temperature)
log.debug(f"Temperature = {self.radex.cphys.tkin}")
# density warnings will occur if a generic (number-based) density is
# used. It can be suppressed more directly by using a dictionary-style
# density
self._suppress_density_warning = False
log.debug("Setting up collider densities")
if collider_densities:
self.density = collider_densities
self._suppress_density_warning = True
self._is_locked = False
if total_density:
log.warn("`total_density` was specified, but `collider_densities` "
"was used instead. Set `collider_densities=None` if you "
"want to use `total_density`.")
elif total_density:
self.density = total_density
self._suppress_density_warning = True
self._is_locked = False
elif density:
self.density = density
self._suppress_density_warning = True
self._is_locked = False
else:
self._locked_parameter = 'column'
self._is_locked = True
log.debug("Completed collider densities; setting up outfile/logfile")
self.outfile = outfile
self.logfile = logfile
self.escapeProbGeom = escapeProbGeom
self.deltav = deltav
log.debug("Setting parameters for the first time")
self._set_parameters()
if column_per_bin is not None:
self.column_per_bin = column_per_bin
elif column is not None:
self.column_per_bin = column
else:
self._locked_parameter = 'density'
self._is_locked = False
if abundance:
self.abundance = abundance
self._validate_colliders()
# This has to happen here, because the colliders are read in at
# this point and rates interpolated
self.temperature = temperature
self.tbg = tbackground
self.debug = debug
self.source_area = source_area
self._suppress_density_warning = False
_u_brightness = (u.erg * u.s**-1 * u.cm**-2 * u.Hz**-1 * u.sr**-1)
_u_sc = u.cm**-2
_u_cc = u.cm**-3
_u_gradient = u.cm**-2 / (u.km/u.s) / u.pc
_u_kms = u.km/u.s
_u_cms = u.cm/u.s
def set_params(self, density=None, collider_densities=None,
column=None, column_per_bin=None, temperature=None,
abundance=None, species=None, deltav=None, tbg=None,
escapeProbGeom=None):
if species is not None:
self.species = species
if deltav is not None:
self.deltav = deltav
# This MUST happen before density is set, otherwise OPR will be
# incorrectly set.
if temperature is not None:
self.radex.cphys.tkin = unitless(temperature)
# if the density is a specified parameter, we only want to warn that it
# is being set once
self._suppress_density_warning = False
if collider_densities is not None:
self.density = collider_densities
self._suppress_density_warning = True
elif density is not None:
if collider_densities is not None:
raise ValueError('Can specify only one of density,'
' collider_densities')
self.density = density
self._suppress_density_warning = True
if column is not None:
self.column = column
elif column_per_bin is not None:
if column is not None:
raise ValueError("Can specify only one of column,"
"column per bin")
self.column_per_bin = column_per_bin
if temperature is not None:
self.temperature = temperature
if abundance is not None:
self.abundance = abundance
if tbg is not None:
self.tbg = tbg
if escapeProbGeom is not None:
self.escapeProbGeom = escapeProbGeom
# the density warning should occur for any other future settings
self._suppress_density_warning = False
@property
def locked_parameter(self):
return self._locked_parameter
def _lock_param(self, parname):
if not hasattr(self, '_previous_locked_parameter') or (hasattr(self, '_locked_parameter') and
self._previous_locked_parameter != self._locked_parameter):
self._previous_locked_parameter = self._locked_parameter
self._locked_parameter = parname
def _set_parameters(self):
#self.radex.cphys.cdmol = self.column
#self.radex.cphys.tkin = self.temperature
if hasattr(self.deltav, 'to'):
self.radex.cphys.deltav = unitless(self.deltav.to(self._u_cms))
else:
self.radex.cphys.deltav = self.deltav * (self._u_cms.to(self._u_kms))
# these parameters are only used for outputs and therefore can be ignored
self.radex.freq.fmin = 0
self.radex.freq.fmax = 1e10
if not hasattr(self, 'miniter'):
self.miniter = 10
if not hasattr(self, 'maxiter'):
self.maxiter = 200
_all_valid_colliders = {'H2':'H2',
'PH2':'pH2',
'OH2':'oH2',
'E':'e',
'H':'H',
'HE':'He',
'H+':'H+'}
@property
def density(self):
d = {'H2':self.radex.cphys.density[0],
'pH2':self.radex.cphys.density[1],
'oH2':self.radex.cphys.density[2],
'e':self.radex.cphys.density[3],
'H':self.radex.cphys.density[4],
'He':self.radex.cphys.density[5],
'H+':self.radex.cphys.density[6]}
for k in d:
d[k] = u.Quantity(d[k], self._u_cc)
return ImmutableDict(d)
@density.setter
def density(self, collider_density):
collider_ids = {'H2': 0,
'PH2': 1,
'OH2': 2,
'E': 3,
'H': 4,
'HE': 5,
'H+': 6}
self._use_thermal_opr = False
if isinstance(collider_density, (float,int,_quantity,np.ndarray)):
if not self._suppress_density_warning:
log.warn("Assuming the density is n(H_2).")
collider_density = {'H2': collider_density}
collider_densities = defaultdict(lambda: 0)
for k in collider_density:
collider_densities[k.upper()] = unitless(u.Quantity(collider_density[k], self._u_cc))
if k.upper() not in self._all_valid_colliders:
raise ValueError('Collider %s is not one of the valid colliders: %s' %
(k,self._all_valid_colliders))
if (('OH2' in collider_densities and collider_densities['OH2'] !=0) or
('PH2' in collider_densities and collider_densities['PH2'] !=0)):
# this is simply not true: NH3 has just ph2 as a collider
#if not 'PH2' in collider_densities or not 'OH2' in collider_densities:
# raise ValueError("If o-H2 density is specified, p-H2 must also be.")
# TODO: look up whether RADEX uses density[0] if density[1] and [2] are specified
# (it looks like the answer is "no" based on a quick test)
#self.radex.cphys.density[0] = 0 # collider_densities['OH2'] + collider_densities['PH2']
# PARA is [1], ORTHO is [2]
# See lines 91, 92 of io.f
if 'PH2' in collider_densities:
self.radex.cphys.density[1] = collider_densities['PH2']
if 'OH2' in collider_densities:
self.radex.cphys.density[2] = collider_densities['OH2']
self._use_thermal_opr = False
elif 'H2' in collider_densities:
warnings.warn("Using a default ortho-to-para ratio (which "
"will only affect species for which independent "
"ortho & para collision rates are given)")
self._use_thermal_opr = True
#self.radex.cphys.density[0] = collider_densities['H2']
T = unitless(self.temperature)
if T > 0:
# From Faure, private communication
opr = min(3.0,9.0*np.exp(-170.6/T))
else:
opr = 3.0
fortho = opr/(1+opr)
log.debug("Set OPR to {0} and fortho to {1}".format(opr,fortho))
self.radex.cphys.density[1] = collider_densities['H2']*(1-fortho)
self.radex.cphys.density[2] = collider_densities['H2']*(fortho)
# RADEX relies on n(H2) = n(oH2) + n(pH2)
# We have set n(oH2) and n(pH2) above
vc = [x.lower() for x in self.valid_colliders]
if 'h2' in vc:
self.radex.cphys.density[0] = self.radex.cphys.density[1:3].sum()
self.radex.cphys.density[1] = 0
self.radex.cphys.density[2] = 0
elif 'oh2' in vc or 'ph2' in vc:
self.radex.cphys.density[0] = 0
self.radex.cphys.density[3] = collider_densities['E']
self.radex.cphys.density[4] = collider_densities['H']
self.radex.cphys.density[5] = collider_densities['HE']
self.radex.cphys.density[6] = collider_densities['H+']
# skip H2 when computing by assuming OPR correctly distributes ortho & para
# It's not obvious that RADEX does this correctly in readdata.f
self.radex.cphys.totdens = self.radex.cphys.density.sum()
# Unfortunately,
# must re-read molecular file and re-interpolate to new density
log.debug("Validating colliders")
self._validate_colliders()
log.debug(f"Running 'readdata' from molfile={self.molpath}")
self.radex.readdata()
log.debug("Ran 'readdata'")
if not self._is_locked:
self._is_locked = True
assert self.locked_parameter in ('column', 'abundance', 'density')
if self.locked_parameter == 'density': # self is locked, still need to update
if hasattr(self, '_previous_locked_parameter'):
self._lock_param(self._previous_locked_parameter)
else:
self._lock_param('abundance') # choose arbitrarily
if self.locked_parameter == 'column':
self.abundance = self.column_per_bin /(self.total_density*self.length)
elif self.locked_parameter == 'abundance':
self.column_per_bin = self.total_density * self.length * self.abundance
else:
raise ValueError("Neither column nor abundance were updated")
self._lock_param('density')
self._is_locked = False
invab = (self.total_density / (self.column / self.length)).decompose().value
if not np.allclose(invab, 1/self.abundance):
raise ValueError("Can not set density to %s" % collider_density)
@property
def valid_colliders(self):
return self._valid_colliders
@property
def total_density(self):
"""
The total density *by number of particles*
The *mass density* can be dramatically different!
"""
return u.Quantity(self.radex.cphys.totdens, self._u_cc)
@property
def opr(self):
return self.radex.cphys.density[1]/self.radex.cphys.density[2]
@property
def molpath(self):
log.debug(f"Computing molpath from molfile = {self.radex.impex.molfile}")
try:
result = b"".join(self.radex.impex.molfile).strip()
except TypeError:
result = self.radex.impex.molfile.tostring().strip()
# this hack may be wrong; the underlying dtype appears to be corrupt
return result.lstrip(b"b'") # strip "bytes" junk that appears to be added by numpy
@molpath.setter
def molpath(self, molfile):
log.debug(f"Setting molpath to {molfile} (self.radex.impex.molfile={self.radex.impex.molfile})")
if "~" in molfile:
molfile = os.path.expanduser(molfile)
if PYVERSION == 3:
try:
self.radex.impex.molfile[:] = np.bytes_([""]*len(self.radex.impex.molfile))
except TypeError as ex:
self.radex.impex.molfile = " " * self.radex.impex.molfile.dtype.itemsize
else:
self.radex.impex.molfile[:] = ""
log.debug(f"Verifying collision rates for molfile={molfile} from impex.molfile={self.radex.impex.molfile}")
utils.verify_collisionratefile(molfile)
try:
self.radex.impex.molfile[:len(molfile)] = molfile
except IndexError:
self.radex.impex.molfile = molfile + " " * (self.radex.impex.molfile.dtype.itemsize - len(molfile))
@property
def outfile(self):
return self.radex.impex.outfile
@outfile.setter
def outfile(self, outfile):
if PYVERSION == 3:
try:
self.radex.impex.outfile[:] = np.bytes_([""]*len(self.radex.impex.outfile))
except TypeError as ex:
self.radex.impex.outfile = " " * self.radex.impex.outfile.dtype.itemsize
else:
self.radex.impex.outfile[:] = ""
try:
self.radex.impex.outfile[:len(outfile)] = outfile
except IndexError:
self.radex.impex.outfile = outfile + " " * (self.radex.impex.outfile.dtype.itemsize - len(outfile))
@property
def logfile(self):
return self.radex.setup.logfile
@logfile.setter
def logfile(self, logfile):
if PYVERSION == 3:
try:
self.radex.setup.logfile[:] = np.bytes_([""]*len(self.radex.setup.logfile))
except TypeError as ex:
self.radex.setup.logfile = " " * self.radex.setup.logfile.dtype.itemsize
else:
self.radex.setup.logfile[:] = ""
try:
self.radex.setup.logfile[:len(logfile)] = logfile
except IndexError:
self.radex.setup.logfile = logfile + " " * (self.radex.setup.logfile.dtype.itemsize - len(logfile))
@property
def datapath(self):
try:
return os.path.expanduser(b"".join(self.radex.setup.radat).strip()).decode('utf-8')
except TypeError:
# occurs if radat is S120 instead of array of S1
return os.path.expanduser((self.radex.setup.radat.tostring().decode('utf-8').strip()))
@datapath.setter
def datapath(self, radat):
# self.radex data path not needed if molecule given as full path
if PYVERSION == 3:
try:
self.radex.setup.radat[:] = np.bytes_([""] * len(self.radex.setup.radat))
except TypeError as ex:
# now radat gets treated as a single S120 instead of an array of S1s
self.radex.setup.radat = " " * self.radex.setup.radat.dtype.itemsize
else:
self.radex.setup.radat[:] = ""
# there is dangerous magic here: radat needs to be interpreted as an array,
# but you can't make it an array of characters easily...
try:
self.radex.setup.radat[:len(radat)] = radat
except IndexError:
# in python3, this might just work, where the above doesn't?
# (this works if RADAT is an S120)
# the added space is because the right and left side must have *exactly* the same size
self.radex.setup.radat = radat + " " * (self.radex.setup.radat.dtype.itemsize - len(radat))
@property
def escapeProbGeom(self):
mdict = {2:'lvg',1:'sphere',3:'slab'}
return mdict[int(self.radex.setup.method)]
@escapeProbGeom.setter
def escapeProbGeom(self, escapeProbGeom):
mdict = {'lvg':2,'sphere':1,'slab':3}
if escapeProbGeom not in mdict:
raise ValueError("Invalid escapeProbGeom, must be one of "+",".join(mdict))
self.radex.setup.method = mdict[escapeProbGeom]
@property
def level_population(self):
return self.radex.collie.xpop
@property
def tex(self):
return u.Quantity(self.radex.radi.tex[self._mask], u.K)
Tex = tex
@property
def tau(self):
# taul(iline) = cddv*(xpop(n)*gstat(m)/gstat(n)-xpop(m))
#$ /(fgaus*xt/aeinst(iline))
return self.radex.radi.taul[self._mask]
@property
def frequency(self):
return u.Quantity(self.radex.radi.spfreq[self._mask], u.GHz)
@property
def temperature(self):
return u.Quantity(self.radex.cphys.tkin, u.K)
@temperature.setter
def temperature(self, tkin):
if hasattr(tkin,'to'):
tkin = unitless(u.Quantity(tkin, u.K))
elif tkin is None:
raise TypeError("Must specify tkin")
if tkin <= 0 or tkin > 1e4:
raise ValueError('Must have kinetic temperature > 0 and < 10^4 K')
self.radex.cphys.tkin = tkin
if not os.path.exists(self.molpath):
raise IOError("File not found: %s" % self.molpath)
# must re-read molecular file and re-interpolate to new temperature
self._validate_colliders()
#log.info("before DENS:"+str(self.radex.cphys.density))
#log.info("before TOTDENS:"+str(self.radex.cphys.totdens))
self.radex.readdata()
#log.info("after DENS:"+str(self.radex.cphys.density))
#log.info("after TOTDENS:"+str(self.radex.cphys.totdens))
if self._use_thermal_opr:
# Reset the density to a thermal value
lp = self._locked_parameter
self.density = (unitless(self.density['H2']) or
unitless(self.density['oH2']+self.density['pH2']))
self._locked_parameter = lp
@property
def column(self):
return self.column_per_bin
@column.setter
def column(self, value):
self.column_per_bin = value
@property
def column_per_bin(self):
return u.Quantity(self.radex.cphys.cdmol, self._u_sc)
@column_per_bin.setter
def column_per_bin(self, col):
if hasattr(col, 'to'):
col = unitless(u.Quantity(col, self._u_sc))
if col < 1e5 or col > 1e25:
raise ValueError("Extremely low or extremely high column.")
self.radex.cphys.cdmol = col
col = u.Quantity(col, self._u_sc)
if not self._is_locked:
self._is_locked = True
assert self.locked_parameter in ('column', 'abundance', 'density')
if self.locked_parameter == 'column': # self is locked, still need to update
if hasattr(self, '_previous_locked_parameter'):
self._lock_param(self._previous_locked_parameter)
else:
self._lock_param('density') # choose arbitrarily
assert self.locked_parameter in ('density', 'abundance')
if self.locked_parameter == 'density':
ab = (col/(self.total_density * self.length))
if hasattr(ab, 'decompose'):
self.abundance = ab.decompose().value
else:
self.abundance = ab / (self._u_cc*u.pc).to(self._u_sc)
elif self.locked_parameter == 'abundance':
self.density = col / self.length / self.abundance
else:
raise ValueError("Neither density nor abundance were updated")
self._lock_param('column')
self._is_locked = False
invab = (self.total_density / (self.column / self.length)).decompose().value
if not np.allclose(invab, 1/self.abundance):
raise ValueError("Can not set column_per_bin to %s" % col)
@property
def column_per_kms_perpc(self):
return self.column_per_bin / self.deltav
@column_per_kms_perpc.setter
def column_per_kms_perpc(self, cddv):
cddv = u.Quantity(cddv, self._u_gradient)
self.column_per_bin = cddv * u.Quantity(self.deltav, self._u_kms) * self.length()
@property
def abundance(self):
return self._abundance
@abundance.setter
def abundance(self, abund):
self._abundance = abund
if not self._is_locked:
assert self.locked_parameter in ('column', 'abundance', 'density')
if self.locked_parameter == 'abundance': # self is locked, still need to update
if hasattr(self, '_previous_locked_parameter'):
self._lock_param(self._previous_locked_parameter)
else:
self._lock_param('density') # choose arbitrarily
self._is_locked = True
if self.locked_parameter == 'column':
dens = self.column_per_bin / self.length / abund
self.density = dens
elif self.locked_parameter == 'density':
col = self.total_density*self.length*abund
self.column_per_bin = u.Quantity(col, u.cm**-2)
else:
raise ValueError("Neither column nor density were updated")
self._lock_param('abundance')
self._is_locked = False
invab = (self.total_density / (self.column / self.length)).decompose().value
if not np.allclose(invab, 1/self.abundance):
raise ValueError("Can not set abundance to %s" % abund)
@property
def deltav(self):
return self._deltav
@deltav.setter
def deltav(self, dv):
self._deltav = u.Quantity(dv, self._u_kms)
@property
def length(self):
""" Hard-coded, assumed length-scale """
return u.Quantity(1, u.pc)
@property
def debug(self):
return self.radex.dbg.debug
@debug.setter
def debug(self, debug):
self.radex.dbg.debug = debug
@property
def tbg(self):
return u.Quantity(self.radex.cphys.tbg, u.K)
@tbg.setter
def tbg(self, tbg):
if tbg is None:
# allow tbg to be not-set so that backrad() isn't triggered
return
#print("Set TBG=%f" % tbg)
if hasattr(tbg, 'value'):
tbg = unitless(u.Quantity(tbg, u.K))
self.radex.cphys.tbg = tbg
self.radex.backrad()
def run_radex(self, silent=True, reuse_last=False, reload_molfile=True,
abs_convergence_threshold=1e-16, rel_convergence_threshold=1e-8,
validate_colliders=True):
"""
Run the iterative matrix solution using a python loop
Parameters
----------
silent: bool
Print a message when iteration is done?
reuse_last: bool
If this is True, the matrix iterator will start at iteration 1
rather than iteration 0, and it will therefore repopulate the rate
matrix based on the radiative background alone. In principle,
setting this to True should result in a significantly faster
convergence; in practice, it does not.
reload_molfile: bool
Re-read the molecular line file? This is needed if the collision
rates are different and have not been updated by, e.g., changing
the temperature (which automatically runs the `readdata` function)
validate_colliders: bool
Validate the colliders before running the code. This should always
be done unless running in a grid, in which case it can cause a
slowdown (~30%).
"""
if validate_colliders:
# 100 loops, best of 3: 7.48 ms per loop
self._validate_colliders()
if reload_molfile or self.radex.collie.ctot.sum()==0:
# 100 loops, best of 3: 15.3 ms per loop
self.radex.readdata()
#self.radex.backrad()
# Given the properties of *this* class, set the appropriate RADEX
# fortran function values
# 10000 loops, best of 3: 74 micros per loop
self._set_parameters()
self._iter_counter = 1 if reuse_last else 0
converged = np.array(False)
# 1000000 loops, best of 3: 1.79 micros per loop
last = self.level_population.copy()
while not converged:
if self._iter_counter >= self.maxiter:
if not silent:
print("Did not converge in %i iterations, stopping." % self.maxiter)
break
# 10000 loops, best of 3: 30.8 micros per loop
self.radex.matrix(self._iter_counter, converged)
level_diff = np.abs(last-self.level_population)
frac_level_diff = level_diff/self.level_population
if (((level_diff.sum() < abs_convergence_threshold) or
(frac_level_diff.sum() < rel_convergence_threshold)) and
self._iter_counter>self.miniter):
if not silent:
print("Stopped changing after %i iterations" % self._iter_counter)
break
last = self.level_population.copy()
self._iter_counter += 1
if converged and not silent:
print("Successfully converged after %i iterations" % self._iter_counter)
return self._iter_counter
@property
def quantum_number(self):
# more recent versions of numpy/python don't require any restructuring?
return self.radex.quant.qnum
#return np.array([(b"".join(x)).strip() for x in
# grouper(self.radex.quant.qnum.T.ravel().tolist(),6,fillvalue=b'')])
@property
def upperlevelnumber(self):
# wrong return self.radex.imolec.iupp[self._mask]
return self.quantum_number[self.upperlevelindex]
@property
def lowerlevelnumber(self):
# wrong return self.radex.imolec.ilow[self._mask]
return self.quantum_number[self.lowerlevelindex]
@property
def upperlevelindex(self):
return self.radex.imolec.iupp[self._mask]-1
@property
def upperlevelpop(self):
return self.level_population[self.upperlevelindex]
@property
def lowerlevelindex(self):
return self.radex.imolec.ilow[self._mask]-1
@property
def lowerlevelpop(self):
return self.level_population[self.lowerlevelindex]
@property
def upperstateenergy(self):
return self.radex.rmolec.eup[self._mask]
@property
def inds_frequencies_included(self):
"""
The indices of the line frequencies fitted by RADEX
(RADEX can hold up to 99999 frequencies, but usually uses ~100)
"""
return np.where(self._mask)[0]
@property
def background_brightness(self):
return u.Quantity(self.radex.radi.backi[self._mask], self._u_brightness)
@background_brightness.setter
def background_brightness(self, value):
self.radex.radi.backi[:value.size] = value.to(self._u_brightness)
self.radex.radi.totalb[:value.size] = value.to(self._u_brightness)
_thc = (2 * constants.h * constants.c).cgs / u.sr
_fk = (constants.h * constants.c / constants.k_B).cgs
_thc_value = _thc.value
_fk_value = _fk.value
@property
def source_brightness(self):
"""
RADEX compat? (check)
"""
fk = self._fk_value
thc = self._thc_value
with QuantityOff():
ftau = np.exp(-self.tau)
xt = self._xt
xnu = self._xnu
earg = fk*xnu/self.tex
bnutex = thc*xt/(np.exp(earg)-1.0)
toti = self.background_brightness*ftau+bnutex*(1.0-ftau)
return u.Quantity(toti, self._u_brightness)
@property
def source_brightness_beta(self):
fk = self._fk_value
thc = self._thc_value
with QuantityOff():
ftau = np.exp(-self.tau)
xt = self._xt
xnu = self._xnu
earg = fk*xnu/self.tex
bnutex = thc*xt/(np.exp(earg)-1.0)
toti = self.background_brightness*ftau+bnutex*(1-self.beta)
return u.Quantity(toti, self._u_brightness)
@property
def beta(self):
# this will probably be faster if vectorized (translated completely
# from fortran to python)
return np.array([self.radex.escprob(t) for t in self.tau])
@property
def _xnu(self):
"""
Line frequency in inverse cm
"""
return u.Quantity(self.radex.radi.xnu[self._mask], u.cm**-1)
@property
def _xt(self):
# xt = xnu**3 # cm^-1 -> cm^-3
return self._xnu**3
@property
def _cddv(self):
return self.column / self.deltav
@property
def _statistical_weight(self):
return self.radex.rmolec.gstat
@property
def upperlevel_statisticalweight(self):
return self._statistical_weight[self.upperlevelindex]
@property
def lowerlevel_statisticalweight(self):
return self._statistical_weight[self.lowerlevelindex]
@property
def _mask(self):
return self.radex.radi.spfreq != 0
def get_synthspec(self, fmin, fmax, npts=1000, **kwargs):
"""
Generate a synthetic spectrum of the selected molecule over the
specified frequency range. This task is good for quick-looks but has a
lot of overhead for generating models and should not be used for
fitting (unless you have a conveniently small amount of data)
Parameters
----------
fmin : `~astropy.units.Quantity`
fmax : `~astropy.units.Quantity`
Frequency-equivalent quantity
"""
wcs = synthspec.FrequencyArray(fmin, fmax, npts)
S = synthspec.SyntheticSpectrum.from_RADEX(wcs, self, **kwargs)
return S
def partition_function(self, temperature=None):
"""
Equation 46 of Mangum & Shirley 2015:
Q = Sum( g_i exp(-E_i / kT) )
"""
warnings.warn("The partition function may be very inaccurate using "
"LAMDA files because they include a small fraction of"
" the total available states.")
gi = self.upperlevel_statisticalweight
Ei = u.Quantity(self.upperstateenergy, unit=u.K)
if temperature is None:
temperature = self.temperature
if not hasattr(temperature, 'unit'):
temperature = u.Quantity(temperature, unit=u.K)
return (gi*np.exp(-Ei/(temperature))).sum()
def density_distribution(densarr, distr, moleculecolumn, tauthresh=0.8,
opr=None, line_ids=[], mincol=None, Radex=Radex,
**kwargs):
"""
Compute the LVG model for a single zone with an assumed density
*distribution* but other properties fixed.
Parameters
----------
dendarr : array
Array of densities corresponding to the distribution function
distr : array
The density distribution corresponding to the density array
moleculecolumn : quantity
The total column density of the molecule in question. It will be
redistributed across the appropriate densities. Units: cm^-2
[this is wrong - each density will assume a too-low optical depth]
"""
if not np.allclose(distr.sum(), 1):
raise ValueError("The distribution must be normalized.")
if not line_ids:
raise ValueError("Specify at least one line ID")
meandens = (densarr*distr).mean()
if opr is None:
collider_densities = {'H2': meandens}
else:
fortho = opr/(1+opr)
collider_densities = {'oH2':meandens*fortho,'pH2':meandens*(1-fortho)}
# Test whether the multi-slab model is reasonable by checking:
# if the column was all at the mean density, would any lines be
# optically thick?
R = Radex(collider_densities=collider_densities, column=moleculecolumn, **kwargs)
R.run_radex()
if np.any(R.tau > tauthresh):
warnings.warn(("At least one line optical depth is >{tauthresh}. "
"Smoothing may be invalid.").format(tauthresh=tauthresh))
# set the optical depth from the *mean* density assuming the *total* column
tau = R.tau
print("Mean density: {0} Optical Depth: {1}".format(meandens, tau[line_ids]))
_thc = (2 * constants.h * constants.c).cgs / u.sr
_fk = (constants.h * constants.c / constants.k_B).cgs
_thc_value = _thc.value
_fk_value = _fk.value
_u_brightness = (u.erg * u.s**-1 * u.cm**-2 * u.Hz**-1 * u.sr**-1)
xnu = R.frequency.to(u.cm**-1, u.spectral()).value
linestrengths = []
texs = []
for dens,prob in zip(densarr,distr):
if opr is None:
collider_densities = {'H2':dens}
else:
collider_densities = {'oH2':dens*fortho,'pH2':dens*(1-fortho)}
R.density = collider_densities
try:
R.column = moleculecolumn * prob
if mincol is not None and R.column < mincol:
R.column = mincol
R.run_radex()
except ValueError as ex:
if ex.args[0] == "Extremely low or extremely high column.":
if R.column > u.Quantity(1e20, u.cm**-2):
raise ex
else:
texs.append(np.zeros_like(line_ids)+2.73)
linestrengths.append(np.zeros_like(line_ids))
continue
else:
raise ex
if hasattr(R, 'radex'):
R.radex.radi.taul[:len(tau)] = tau
elif hasattr(R, '_data_dict'):
R._data_dict['tau'] = tau
fk = _fk_value
thc = _thc_value
with QuantityOff():
ftau = np.exp(-tau)
xt = xnu**3
earg = fk*xnu/R.tex
bnutex = thc*xt/(np.exp(earg)-1.0)
toti_nounit = R.background_brightness*ftau+bnutex*(1.0-ftau)
toti = u.Quantity(toti_nounit, _u_brightness)
totK = ((toti*u.sr).to(u.K, u.brightness_temperature(1*u.sr,
R.frequency)))
linestrengths.append(totK[line_ids])
texs.append(R.tex[line_ids])
linestrengths = np.array(linestrengths)
texs = np.array(texs)
return R, linestrengths, linestrengths.sum(axis=0), texs, tau[line_ids]
def grid():
pass
| [
"numpy.array",
"os.path.exists",
"numpy.where",
"numpy.exp",
"astropy.units.brightness_temperature",
"subprocess.call",
"tempfile.NamedTemporaryFile",
"warnings.warn",
"os.path.expanduser",
"astropy.log.warn",
"numpy.abs",
"numpy.allclose",
"numpy.any",
"os.getenv",
"collections.defaultd... | [((1695, 1772), 'warnings.warn', 'warnings.warn', (['"""pyradex is deprecated: Use pyradex.Radex instead if you can."""'], {}), "('pyradex is deprecated: Use pyradex.Radex instead if you can.')\n", (1708, 1772), False, 'import warnings\n'), ((3675, 3736), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': 'delete_tempfile'}), "(mode='w', delete=delete_tempfile)\n", (3702, 3736), False, 'import tempfile\n'), ((3751, 3812), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': 'delete_tempfile'}), "(mode='w', delete=delete_tempfile)\n", (3778, 3812), False, 'import tempfile\n'), ((4613, 4674), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': 'delete_tempfile'}), "(mode='w', delete=delete_tempfile)\n", (4640, 4674), False, 'import tempfile\n'), ((4870, 4902), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (4885, 4902), False, 'import subprocess\n'), ((44403, 44428), 'numpy.any', 'np.any', (['(R.tau > tauthresh)'], {}), '(R.tau > tauthresh)\n', (44409, 44428), True, 'import numpy as np\n'), ((46673, 46696), 'numpy.array', 'np.array', (['linestrengths'], {}), '(linestrengths)\n', (46681, 46696), True, 'import numpy as np\n'), ((46708, 46722), 'numpy.array', 'np.array', (['texs'], {}), '(texs)\n', (46716, 46722), True, 'import numpy as np\n'), ((10012, 10055), 'astropy.log.debug', 'log.debug', (['"""Importing radex fortran module"""'], {}), "('Importing radex fortran module')\n", (10021, 10055), False, 'from astropy import log\n'), ((10602, 10644), 'astropy.log.debug', 'log.debug', (['f"""Setting species to {species}"""'], {}), "(f'Setting species to {species}')\n", (10611, 10644), False, 'from astropy import log\n'), ((12021, 12071), 'astropy.log.debug', 'log.debug', (['f"""Setting temperature to {temperature}"""'], {}), "(f'Setting temperature to {temperature}')\n", (12030, 12071), False, 'from astropy import log\n'), ((12233, 12284), 'astropy.log.debug', 'log.debug', (['f"""Temperature = {self.radex.cphys.tkin}"""'], {}), "(f'Temperature = {self.radex.cphys.tkin}')\n", (12242, 12284), False, 'from astropy import log\n'), ((12517, 12559), 'astropy.log.debug', 'log.debug', (['"""Setting up collider densities"""'], {}), "('Setting up collider densities')\n", (12526, 12559), False, 'from astropy import log\n'), ((13380, 13449), 'astropy.log.debug', 'log.debug', (['"""Completed collider densities; setting up outfile/logfile"""'], {}), "('Completed collider densities; setting up outfile/logfile')\n", (13389, 13449), False, 'from astropy import log\n'), ((13595, 13645), 'astropy.log.debug', 'log.debug', (['"""Setting parameters for the first time"""'], {}), "('Setting parameters for the first time')\n", (13604, 13645), False, 'from astropy import log\n'), ((18873, 18896), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (18884, 18896), False, 'from collections import defaultdict\n'), ((22108, 22141), 'astropy.log.debug', 'log.debug', (['"""Validating colliders"""'], {}), "('Validating colliders')\n", (22117, 22141), False, 'from astropy import log\n'), ((22185, 22245), 'astropy.log.debug', 'log.debug', (['f"""Running \'readdata\' from molfile={self.molpath}"""'], {}), '(f"Running \'readdata\' from molfile={self.molpath}")\n', (22194, 22245), False, 'from astropy import log\n'), ((22284, 22311), 'astropy.log.debug', 'log.debug', (['"""Ran \'readdata\'"""'], {}), '("Ran \'readdata\'")\n', (22293, 22311), False, 'from astropy import log\n'), ((23736, 23784), 'astropy.units.Quantity', 'u.Quantity', (['self.radex.cphys.totdens', 'self._u_cc'], {}), '(self.radex.cphys.totdens, self._u_cc)\n', (23746, 23784), True, 'from astropy import units as u\n'), ((23937, 24010), 'astropy.log.debug', 'log.debug', (['f"""Computing molpath from molfile = {self.radex.impex.molfile}"""'], {}), "(f'Computing molpath from molfile = {self.radex.impex.molfile}')\n", (23946, 24010), False, 'from astropy import log\n'), ((24408, 24514), 'astropy.log.debug', 'log.debug', (['f"""Setting molpath to {molfile} (self.radex.impex.molfile={self.radex.impex.molfile})"""'], {}), "(\n f'Setting molpath to {molfile} (self.radex.impex.molfile={self.radex.impex.molfile})'\n )\n", (24417, 24514), False, 'from astropy import log\n'), ((24910, 25027), 'astropy.log.debug', 'log.debug', (['f"""Verifying collision rates for molfile={molfile} from impex.molfile={self.radex.impex.molfile}"""'], {}), "(\n f'Verifying collision rates for molfile={molfile} from impex.molfile={self.radex.impex.molfile}'\n )\n", (24919, 25027), False, 'from astropy import log\n'), ((28618, 28666), 'astropy.units.Quantity', 'u.Quantity', (['self.radex.radi.tex[self._mask]', 'u.K'], {}), '(self.radex.radi.tex[self._mask], u.K)\n', (28628, 28666), True, 'from astropy import units as u\n'), ((28929, 28982), 'astropy.units.Quantity', 'u.Quantity', (['self.radex.radi.spfreq[self._mask]', 'u.GHz'], {}), '(self.radex.radi.spfreq[self._mask], u.GHz)\n', (28939, 28982), True, 'from astropy import units as u\n'), ((29040, 29078), 'astropy.units.Quantity', 'u.Quantity', (['self.radex.cphys.tkin', 'u.K'], {}), '(self.radex.cphys.tkin, u.K)\n', (29050, 29078), True, 'from astropy import units as u\n'), ((30480, 30526), 'astropy.units.Quantity', 'u.Quantity', (['self.radex.cphys.cdmol', 'self._u_sc'], {}), '(self.radex.cphys.cdmol, self._u_sc)\n', (30490, 30526), True, 'from astropy import units as u\n'), ((30837, 30864), 'astropy.units.Quantity', 'u.Quantity', (['col', 'self._u_sc'], {}), '(col, self._u_sc)\n', (30847, 30864), True, 'from astropy import units as u\n'), ((32425, 32459), 'astropy.units.Quantity', 'u.Quantity', (['cddv', 'self._u_gradient'], {}), '(cddv, self._u_gradient)\n', (32435, 32459), True, 'from astropy import units as u\n'), ((34030, 34057), 'astropy.units.Quantity', 'u.Quantity', (['dv', 'self._u_kms'], {}), '(dv, self._u_kms)\n', (34040, 34057), True, 'from astropy import units as u\n'), ((34159, 34178), 'astropy.units.Quantity', 'u.Quantity', (['(1)', 'u.pc'], {}), '(1, u.pc)\n', (34169, 34178), True, 'from astropy import units as u\n'), ((34384, 34421), 'astropy.units.Quantity', 'u.Quantity', (['self.radex.cphys.tbg', 'u.K'], {}), '(self.radex.cphys.tbg, u.K)\n', (34394, 34421), True, 'from astropy import units as u\n'), ((36591, 36606), 'numpy.array', 'np.array', (['(False)'], {}), '(False)\n', (36599, 36606), True, 'import numpy as np\n'), ((39202, 39267), 'astropy.units.Quantity', 'u.Quantity', (['self.radex.radi.backi[self._mask]', 'self._u_brightness'], {}), '(self.radex.radi.backi[self._mask], self._u_brightness)\n', (39212, 39267), True, 'from astropy import units as u\n'), ((40112, 40148), 'astropy.units.Quantity', 'u.Quantity', (['toti', 'self._u_brightness'], {}), '(toti, self._u_brightness)\n', (40122, 40148), True, 'from astropy import units as u\n'), ((40550, 40586), 'astropy.units.Quantity', 'u.Quantity', (['toti', 'self._u_brightness'], {}), '(toti, self._u_brightness)\n', (40560, 40586), True, 'from astropy import units as u\n'), ((40910, 40965), 'astropy.units.Quantity', 'u.Quantity', (['self.radex.radi.xnu[self._mask]', '(u.cm ** -1)'], {}), '(self.radex.radi.xnu[self._mask], u.cm ** -1)\n', (40920, 40965), True, 'from astropy import units as u\n'), ((42418, 42577), 'warnings.warn', 'warnings.warn', (['"""The partition function may be very inaccurate using LAMDA files because they include a small fraction of the total available states."""'], {}), "(\n 'The partition function may be very inaccurate using LAMDA files because they include a small fraction of the total available states.'\n )\n", (42431, 42577), False, 'import warnings\n'), ((42678, 42721), 'astropy.units.Quantity', 'u.Quantity', (['self.upperstateenergy'], {'unit': 'u.K'}), '(self.upperstateenergy, unit=u.K)\n', (42688, 42721), True, 'from astropy import units as u\n'), ((46385, 46423), 'astropy.units.Quantity', 'u.Quantity', (['toti_nounit', '_u_brightness'], {}), '(toti_nounit, _u_brightness)\n', (46395, 46423), True, 'from astropy import units as u\n'), ((2607, 2718), 'warnings.warn', 'warnings.warn', (['"""Assumed thermal o/p ratio since only H2 was given but collider file has o- and p- H2"""'], {}), "(\n 'Assumed thermal o/p ratio since only H2 was given but collider file has o- and p- H2'\n )\n", (2620, 2718), False, 'import warnings\n'), ((10157, 10184), 'os.getenv', 'os.getenv', (['"""RADEX_DATAPATH"""'], {}), "('RADEX_DATAPATH')\n", (10166, 10184), False, 'import os\n'), ((10230, 10257), 'os.getenv', 'os.getenv', (['"""RADEX_DATAPATH"""'], {}), "('RADEX_DATAPATH')\n", (10239, 10257), False, 'import os\n'), ((10270, 10303), 'astropy.log.debug', 'log.debug', (['f"""Datapath={datapath}"""'], {}), "(f'Datapath={datapath}')\n", (10279, 10303), False, 'from astropy import log\n'), ((10780, 10808), 'os.path.exists', 'os.path.exists', (['self.molpath'], {}), '(self.molpath)\n', (10794, 10808), False, 'import os\n'), ((18205, 18233), 'astropy.units.Quantity', 'u.Quantity', (['d[k]', 'self._u_cc'], {}), '(d[k], self._u_cc)\n', (18215, 18233), True, 'from astropy import units as u\n'), ((24554, 24581), 'os.path.expanduser', 'os.path.expanduser', (['molfile'], {}), '(molfile)\n', (24572, 24581), False, 'import os\n'), ((29464, 29492), 'os.path.exists', 'os.path.exists', (['self.molpath'], {}), '(self.molpath)\n', (29478, 29492), False, 'import os\n'), ((37078, 37114), 'numpy.abs', 'np.abs', (['(last - self.level_population)'], {}), '(last - self.level_population)\n', (37084, 37114), True, 'import numpy as np\n'), ((39111, 39131), 'numpy.where', 'np.where', (['self._mask'], {}), '(self._mask)\n', (39119, 39131), True, 'import numpy as np\n'), ((39873, 39890), 'numpy.exp', 'np.exp', (['(-self.tau)'], {}), '(-self.tau)\n', (39879, 39890), True, 'import numpy as np\n'), ((40308, 40325), 'numpy.exp', 'np.exp', (['(-self.tau)'], {}), '(-self.tau)\n', (40314, 40325), True, 'import numpy as np\n'), ((42868, 42901), 'astropy.units.Quantity', 'u.Quantity', (['temperature'], {'unit': 'u.K'}), '(temperature, unit=u.K)\n', (42878, 42901), True, 'from astropy import units as u\n'), ((45041, 45053), 'astropy.units.spectral', 'u.spectral', ([], {}), '()\n', (45051, 45053), True, 'from astropy import units as u\n'), ((46180, 46192), 'numpy.exp', 'np.exp', (['(-tau)'], {}), '(-tau)\n', (46186, 46192), True, 'import numpy as np\n'), ((46460, 46507), 'astropy.units.brightness_temperature', 'u.brightness_temperature', (['(1 * u.sr)', 'R.frequency'], {}), '(1 * u.sr, R.frequency)\n', (46484, 46507), True, 'from astropy import units as u\n'), ((3554, 3566), 'astropy.units.spectral', 'u.spectral', ([], {}), '()\n', (3564, 3566), True, 'from astropy import units as u\n'), ((3646, 3658), 'astropy.units.spectral', 'u.spectral', ([], {}), '()\n', (3656, 3658), True, 'from astropy import units as u\n'), ((10407, 10435), 'os.path.expanduser', 'os.path.expanduser', (['datapath'], {}), '(datapath)\n', (10425, 10435), False, 'import os\n'), ((12769, 12931), 'astropy.log.warn', 'log.warn', (['"""`total_density` was specified, but `collider_densities` was used instead. Set `collider_densities=None` if you want to use `total_density`."""'], {}), "(\n '`total_density` was specified, but `collider_densities` was used instead. Set `collider_densities=None` if you want to use `total_density`.'\n )\n", (12777, 12931), False, 'from astropy import log\n'), ((18743, 18786), 'astropy.log.warn', 'log.warn', (['"""Assuming the density is n(H_2)."""'], {}), "('Assuming the density is n(H_2).')\n", (18751, 18786), False, 'from astropy import log\n'), ((18984, 19027), 'astropy.units.Quantity', 'u.Quantity', (['collider_density[k]', 'self._u_cc'], {}), '(collider_density[k], self._u_cc)\n', (18994, 19027), True, 'from astropy import units as u\n'), ((20310, 20466), 'warnings.warn', 'warnings.warn', (['"""Using a default ortho-to-para ratio (which will only affect species for which independent ortho & para collision rates are given)"""'], {}), "(\n 'Using a default ortho-to-para ratio (which will only affect species for which independent ortho & para collision rates are given)'\n )\n", (20323, 20466), False, 'import warnings\n'), ((23337, 23375), 'numpy.allclose', 'np.allclose', (['invab', '(1 / self.abundance)'], {}), '(invab, 1 / self.abundance)\n', (23348, 23375), True, 'import numpy as np\n'), ((29196, 29217), 'astropy.units.Quantity', 'u.Quantity', (['tkin', 'u.K'], {}), '(tkin, u.K)\n', (29206, 29217), True, 'from astropy import units as u\n'), ((30648, 30675), 'astropy.units.Quantity', 'u.Quantity', (['col', 'self._u_sc'], {}), '(col, self._u_sc)\n', (30658, 30675), True, 'from astropy import units as u\n'), ((32109, 32147), 'numpy.allclose', 'np.allclose', (['invab', '(1 / self.abundance)'], {}), '(invab, 1 / self.abundance)\n', (32120, 32147), True, 'import numpy as np\n'), ((32498, 32534), 'astropy.units.Quantity', 'u.Quantity', (['self.deltav', 'self._u_kms'], {}), '(self.deltav, self._u_kms)\n', (32508, 32534), True, 'from astropy import units as u\n'), ((33780, 33818), 'numpy.allclose', 'np.allclose', (['invab', '(1 / self.abundance)'], {}), '(invab, 1 / self.abundance)\n', (33791, 33818), True, 'import numpy as np\n'), ((34674, 34694), 'astropy.units.Quantity', 'u.Quantity', (['tbg', 'u.K'], {}), '(tbg, u.K)\n', (34684, 34694), True, 'from astropy import units as u\n'), ((33473, 33500), 'astropy.units.Quantity', 'u.Quantity', (['col', '(u.cm ** -2)'], {}), '(col, u.cm ** -2)\n', (33483, 33500), True, 'from astropy import units as u\n'), ((40009, 40021), 'numpy.exp', 'np.exp', (['earg'], {}), '(earg)\n', (40015, 40021), True, 'import numpy as np\n'), ((40444, 40456), 'numpy.exp', 'np.exp', (['earg'], {}), '(earg)\n', (40450, 40456), True, 'import numpy as np\n'), ((42921, 42946), 'numpy.exp', 'np.exp', (['(-Ei / temperature)'], {}), '(-Ei / temperature)\n', (42927, 42946), True, 'import numpy as np\n'), ((46278, 46290), 'numpy.exp', 'np.exp', (['earg'], {}), '(earg)\n', (46284, 46290), True, 'import numpy as np\n'), ((45647, 45676), 'astropy.units.Quantity', 'u.Quantity', (['(1e+20)', '(u.cm ** -2)'], {}), '(1e+20, u.cm ** -2)\n', (45657, 45676), True, 'from astropy import units as u\n'), ((20776, 20794), 'numpy.exp', 'np.exp', (['(-170.6 / T)'], {}), '(-170.6 / T)\n', (20782, 20794), True, 'import numpy as np\n'), ((45829, 45852), 'numpy.zeros_like', 'np.zeros_like', (['line_ids'], {}), '(line_ids)\n', (45842, 45852), True, 'import numpy as np\n'), ((45758, 45781), 'numpy.zeros_like', 'np.zeros_like', (['line_ids'], {}), '(line_ids)\n', (45771, 45781), True, 'import numpy as np\n')] |
"""
Building
========
The building module contains functions related to building acoustics.
"""
from __future__ import division
import numpy as np
#from acoustics.utils import w
def rw_curve(tl):
"""
Calculate the curve of :math:`Rw` from a NumPy array `tl` with third
octave data between 100 Hz and 3.15 kHz.
:param tl: Transmission Loss
"""
ref_curve = np.array([0, 3, 6, 9, 12, 15, 18, 19, 20, 21, 22, 23, 23, 23,
23, 23])
residuals = 0
while residuals > -32:
ref_curve += 1
diff = tl - ref_curve
residuals = np.sum(np.clip(diff, np.min(diff), 0))
ref_curve -= 1
return ref_curve
def rw(tl):
"""
Calculate :math:`R_W` from a NumPy array `tl` with third octave data
between 100 Hz and 3.15 kHz.
:param tl: Transmission Loss
"""
return rw_curve(tl)[7]
def rw_c(tl):
"""
Calculate :math:`R_W + C` from a NumPy array `tl` with third octave data
between 100 Hz and 3.15 kHz.
:param tl: Transmission Loss
"""
k = np.array([-29, -26, -23, -21, -19, -17, -15, -13, -12, -11, -10, -9,
-9, -9, -9, -9])
a = -10 * np.log10(np.sum(10**((k - tl)/10)))
return a
def rw_ctr(tl):
"""
Calculate :math:`R_W + C_{tr}` from a NumPy array `tl` with third octave
data between 100 Hz and 3.15 kHz.
:param tl: Transmission Loss
"""
k_tr = np.array([-20, -20, -18, -16, -15, -14, -13, -12, -11, -9, -8, -9,
-10, -11, -13, -15])
a_tr = -10 * np.log10(np.sum(10**((k_tr - tl)/10)))
return a_tr
def stc_curve(tl):
"""
Calculate the Sound Transmission Class (STC) curve from a NumPy array `tl`
with third octave data between 125 Hz and 4 kHz.
:param tl: Transmission Loss
"""
ref_curve = np.array([0, 3, 6, 9, 12, 15, 16, 17, 18, 19, 20, 20, 20,
20, 20, 20])
top_curve = ref_curve
res_sum = 0
while True:
diff = tl - top_curve
residuals = np.clip(diff, np.min(diff), 0)
res_sum = np.sum(residuals)
if res_sum < -32:
if np.any(residuals > -8):
top_curve -= 1
break
top_curve += 1
return top_curve
def stc(tl):
"""
Calculate the Sound Transmission Class (STC) from a NumPy array `tl` with
third octave data between 125 Hz and 4 kHz.
:param tl: Transmission Loss
"""
return stc_curve(tl)[6]
def mass_law(freq, vol_density, thickness, theta=0, c=343, rho0=1.225):
""" Calculate transmission loss according to mass law.
:param freq: Frequency of interest in Hz.
:type freq: `float` or `NumPy array`
:param vol_density: Volumetric density of material in [kg/m^3].
:type vol_density: `float`
:param thickness: Thickness of wall.
:type thickness: `float`
:param theta: Angle of incidence in degrees. Default value is `0` (normal incidence).
:type theta: `float`
:param c: Speed of sound in [m/s].
:type c: `float`
:param rho0: Density of air in kg/m^3.
:type rho0: `float`
"""
rad_freq = 2.0 * np.pi * freq
surface_density = vol_density * thickness
theta_rad = np.deg2rad(theta)
a = rad_freq * surface_density * np.cos(theta_rad) / (2 * rho0 * c)
tl_theta = 10 * np.log10(1 + a**2)
return tl_theta
| [
"numpy.log10",
"numpy.any",
"numpy.array",
"numpy.deg2rad",
"numpy.sum",
"numpy.cos",
"numpy.min"
] | [((395, 465), 'numpy.array', 'np.array', (['[0, 3, 6, 9, 12, 15, 18, 19, 20, 21, 22, 23, 23, 23, 23, 23]'], {}), '([0, 3, 6, 9, 12, 15, 18, 19, 20, 21, 22, 23, 23, 23, 23, 23])\n', (403, 465), True, 'import numpy as np\n'), ((1078, 1167), 'numpy.array', 'np.array', (['[-29, -26, -23, -21, -19, -17, -15, -13, -12, -11, -10, -9, -9, -9, -9, -9]'], {}), '([-29, -26, -23, -21, -19, -17, -15, -13, -12, -11, -10, -9, -9, -9,\n -9, -9])\n', (1086, 1167), True, 'import numpy as np\n'), ((1443, 1534), 'numpy.array', 'np.array', (['[-20, -20, -18, -16, -15, -14, -13, -12, -11, -9, -8, -9, -10, -11, -13, -15]'], {}), '([-20, -20, -18, -16, -15, -14, -13, -12, -11, -9, -8, -9, -10, -11,\n -13, -15])\n', (1451, 1534), True, 'import numpy as np\n'), ((1847, 1917), 'numpy.array', 'np.array', (['[0, 3, 6, 9, 12, 15, 16, 17, 18, 19, 20, 20, 20, 20, 20, 20]'], {}), '([0, 3, 6, 9, 12, 15, 16, 17, 18, 19, 20, 20, 20, 20, 20, 20])\n', (1855, 1917), True, 'import numpy as np\n'), ((3246, 3263), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (3256, 3263), True, 'import numpy as np\n'), ((2103, 2120), 'numpy.sum', 'np.sum', (['residuals'], {}), '(residuals)\n', (2109, 2120), True, 'import numpy as np\n'), ((3356, 3376), 'numpy.log10', 'np.log10', (['(1 + a ** 2)'], {}), '(1 + a ** 2)\n', (3364, 3376), True, 'import numpy as np\n'), ((1205, 1234), 'numpy.sum', 'np.sum', (['(10 ** ((k - tl) / 10))'], {}), '(10 ** ((k - tl) / 10))\n', (1211, 1234), True, 'import numpy as np\n'), ((1578, 1610), 'numpy.sum', 'np.sum', (['(10 ** ((k_tr - tl) / 10))'], {}), '(10 ** ((k_tr - tl) / 10))\n', (1584, 1610), True, 'import numpy as np\n'), ((2068, 2080), 'numpy.min', 'np.min', (['diff'], {}), '(diff)\n', (2074, 2080), True, 'import numpy as np\n'), ((2162, 2184), 'numpy.any', 'np.any', (['(residuals > -8)'], {}), '(residuals > -8)\n', (2168, 2184), True, 'import numpy as np\n'), ((3301, 3318), 'numpy.cos', 'np.cos', (['theta_rad'], {}), '(theta_rad)\n', (3307, 3318), True, 'import numpy as np\n'), ((631, 643), 'numpy.min', 'np.min', (['diff'], {}), '(diff)\n', (637, 643), True, 'import numpy as np\n')] |
import numpy as np
from collections import defaultdict
from .loss import compute_rre, compute_rte
class Logger:
def __init__(self):
self.store = defaultdict(list)
def reset(self):
self.store = defaultdict(list)
def add(self, key, value):
self.store[key].append(value)
def avg(self, key):
return np.mean(self.store[key])
def save_sacred(self, ex, it):
for k, v in self.store.items():
if "fail" in k:
ex.log_scalar(k, np.sum(v), it)
else:
ex.log_scalar(k, np.mean(v), it)
def show(self):
print("\n========================")
for k, v in self.store.items():
if "fail" in k:
print(k, np.sum(v))
else:
print(k, np.mean(v))
if "attn_acc" in k or "count" in k:
print("min " + k + ": ", np.min(v))
print("max " + k + ": ", np.max(v))
print("========================\n")
def save_metrics(logger, prefix, R, t, R_est, t_est, te_thres=0.6, re_thres=5):
bs = R.shape[0]
rot_error = compute_rre(R_est, R)
trans_error = compute_rte(t.reshape(bs, -1), t_est.reshape(bs, -1))
logger.add(prefix + ".rte_all", trans_error)
logger.add(prefix + ".rre_all", rot_error)
if rot_error < re_thres and trans_error < te_thres:
logger.add(prefix + ".recall", 1)
logger.add(prefix + ".rte", trans_error)
logger.add(prefix + ".rre", rot_error)
else:
if rot_error > re_thres and trans_error > te_thres:
logger.add(prefix + ".fail_both", 1)
elif rot_error > 5:
logger.add(prefix + ".fail_rot", 1)
else:
logger.add(prefix + ".fail_trans", 1)
logger.add(prefix + ".recall", 0)
| [
"numpy.mean",
"numpy.max",
"numpy.sum",
"collections.defaultdict",
"numpy.min"
] | [((168, 185), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (179, 185), False, 'from collections import defaultdict\n'), ((232, 249), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (243, 249), False, 'from collections import defaultdict\n'), ((370, 394), 'numpy.mean', 'np.mean', (['self.store[key]'], {}), '(self.store[key])\n', (377, 394), True, 'import numpy as np\n'), ((537, 546), 'numpy.sum', 'np.sum', (['v'], {}), '(v)\n', (543, 546), True, 'import numpy as np\n'), ((605, 615), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (612, 615), True, 'import numpy as np\n'), ((803, 812), 'numpy.sum', 'np.sum', (['v'], {}), '(v)\n', (809, 812), True, 'import numpy as np\n'), ((859, 869), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (866, 869), True, 'import numpy as np\n'), ((980, 989), 'numpy.min', 'np.min', (['v'], {}), '(v)\n', (986, 989), True, 'import numpy as np\n'), ((1033, 1042), 'numpy.max', 'np.max', (['v'], {}), '(v)\n', (1039, 1042), True, 'import numpy as np\n')] |
"""MIT License
Copyright (c) 2019, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Note, some code snippets in this file are inspired by implementations from
<NAME> as provided for the purpose of this hackathon,.
"""
import stuett
from stuett.global_config import get_setting, setting_exists
import argparse
from pathlib import Path
from datetime import datetime, date, timedelta
import plotly.graph_objects as go
import pandas as pd
import xarray as xr
from torch.utils.data import Dataset
import numpy as np
from PIL import Image
import torch
import io
class PermaRegressionDataset(Dataset):
"""A dataset that maps images and meta information (such as radiation,
surface temperature, ...) onto the temperature below surface."""
def __init__(self, local, data_path='../data', transform=None,
time_slice={"start_time": "2017-01-01",
"end_time": "2017-12-31"}):
"""
Args:
local (bool): Whether to read the dataset from a local storage
location or from a public Azure share.
data_path (str, optional): If the data should be read from a local
location, then this folder will denote the location of the
dataset.
transform (callable, optional): Optional transform to be applied
on images.
time_slice (dict): Can be used to create a different train and test
set. Note, this is not a pretty solution, especially because
time values are not interleaved. I.e., if time information is
used as input to a network, but the network has never seen
values from the corresponding month, then it can't make
confident predictions.
"""
if transform is not None:
raise NotImplementedError("transform not implemented!")
self.transform = transform
# This sensor contains near-surface temperature readings and is on the
# south side and therefore receives a lot of sunshine.
rock_temperature_file_mh10 = "MH10_temperature_rock_2017.csv" # South
radiation_file = "MH15_radiometer__conv_2017.csv"
if not local:
account_name = (
get_setting("azure")["account_name"]
if setting_exists("azure")
else "storageaccountperma8980"
)
account_key = (
get_setting("azure")["account_key"] if setting_exists(
"azure") else None
)
ts_store = stuett.ABSStore(
container="hackathon-on-permafrost",
prefix="timeseries_derived_data_products",
account_name=account_name,
account_key=account_key,
)
img_store = stuett.ABSStore(
container="hackathon-on-permafrost",
prefix="timelapse_images_fast",
account_name=account_name,
account_key=account_key,
)
else:
timeseries_folder = Path(data_path).joinpath(
"timeseries_derived_data_products").resolve()
ts_store = stuett.DirectoryStore(timeseries_folder)
if rock_temperature_file_mh10 not in store:
raise RuntimeError('Please provide a valid path to the ' +
'permafrost data!')
img_store = stuett.DirectoryStore(Path(data_path).joinpath( \
'timelapse_images_fast'))
if "2017-01-01/20170101_080018.JPG" not in store:
raise RuntimeError('Please provide a valid path to the ' +
'permafrost images.')
#self._ts_store = ts_store
self._img_store = img_store
### Load timeseries data.
rock_temperature_node_mh10 = stuett.data.CsvSource(
rock_temperature_file_mh10, store=ts_store)
rock_temp_mh10 = rock_temperature_node_mh10(time_slice)
radiation_node = stuett.data.CsvSource(radiation_file, store=ts_store)
radiation = radiation_node(time_slice)
net_radiation = radiation.loc[:, ['net_radiation']]
surface_temp = rock_temp_mh10.loc[:, ['temperature_nearsurface_t2']]
target_temp = rock_temp_mh10.loc[:, ['temperature_10cm']]
### Load image filenames.
image_node = stuett.data.MHDSLRFilenames(
store=img_store,
force_write_to_remote=True,
as_pandas=False,
)
image_fns = image_node(time_slice)
### Find image filenames that were captured close to temperature
### measures.
# With close we mean within a 20min window.
# Temperature/radiation values that have no corresponding image are
# ignored.
# Sanity check!
#for t1, t2 in zip(radiation['time'], rock_temp_mh10['time']):
# assert (t1 == t2)
j = 0
n = len(image_fns['time'])
measurement_pairs = []
for i, t in enumerate(rock_temp_mh10['time'].values):
while j < n:
# Translate difference in timestamps to minutes before casting
# to int.
diff = (image_fns['time'][j] - t).values.astype( \
'timedelta64[m]').astype(np.int)
if diff > 10:
# Image too far in the future, ignore sensor value.
break
absdiff = np.abs(diff)
if absdiff < 10:
# The image is very close, simply check whether the next
# picture is even closer. Otherwise, we take the current
# image.
if j + 1 < n:
absdiff2 = np.abs(
(image_fns['time'][j + 1] - t).values.astype(
'timedelta64[m]').astype(np.int))
else:
absdiff2 = None
if absdiff2 is None or absdiff < absdiff2:
measurement_pairs.append((i, j))
j += 1
else:
measurement_pairs.append((i, j + 1))
j += 2
break
j += 1
### Build dataset (make sure that there are no None values in the
### timeseries measurements).
self._img_fns = []
self._surface_temp = []
self._target_temp = []
self._timestamps = []
self._radiation = []
# This is coarse time information that one may provide as additional
# information. We encode the (normalized) month and daytime information,
# as this information may be quite helpful when judging temperature
# values.
# Though, it might also tempt the regression system to ignore all
# other information and solely predict based on this information
# (as a strong local minimum).
self._month = []
self._daytime = []
assert(np.all(~np.isnan(net_radiation.values)))
assert(np.all(~np.isnan(surface_temp.values)))
#assert(np.all(~np.isnan(target_temp.values)))
for i, j in measurement_pairs:
if np.any(np.isnan(target_temp.values[i, 0])):
continue
self._target_temp.append(target_temp.values[i, 0])
self._surface_temp.append(surface_temp.values[i, 0])
self._radiation.append(net_radiation.values[i, 0])
self._timestamps.append(target_temp['time'].values[i])
ts = pd.to_datetime(self._timestamps[-1])
self._month.append(ts.month)
self._daytime.append(ts.hour*60 + ts.minute)
self._img_fns.append(str(image_fns.values[0, j]))
self._target_temp = np.array(self._target_temp, dtype=np.float32)
self._surface_temp = np.array(self._surface_temp, dtype=np.float32)
self._radiation = np.array(self._radiation, dtype=np.float32)
self._month = np.array(self._month, dtype=np.float32)
self._daytime = np.array(self._daytime, dtype=np.float32)
# Normalize regression values.
self.target_temp_mean = self._target_temp.mean()
self.target_temp_std = self._target_temp.std()
self.surface_temp_mean = self._surface_temp.mean()
self.surface_temp_std = self._surface_temp.std()
self.radiation_mean = self._radiation.mean()
self.radiation_std = self._radiation.std()
self._target_temp = (self._target_temp - self.target_temp_mean) / \
self.target_temp_std
self._surface_temp = (self._surface_temp - self.surface_temp_mean) / \
self.surface_temp_std
self._radiation = (self._radiation - self.radiation_mean) / \
self.radiation_std
self._month = (self._month - self._month.mean()) / self._month.std()
self._daytime = (self._month - self._daytime.mean()) / \
self._daytime.std()
print('dataset contains %d samples.' % len(self._img_fns))
def __len__(self):
return len(self._img_fns)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if isinstance(idx, list):
# TODO read multiple images
raise NotImplementedError()
else:
img = Image.open(io.BytesIO(self._img_store[self._img_fns[idx]]))
img = img.rotate(90, expand=1)
data = np.array(img.convert('RGB')).transpose([2, 0, 1])
data = data.astype(np.float32)
ts = self._timestamps[idx]
sample = {
'img': data,
'surface_temp': self._surface_temp[idx].reshape(-1, 1),
'target_temp': self._target_temp[idx].reshape(-1, 1),
'radiation': self._radiation[idx].reshape(-1, 1),
'month': self._month[idx].reshape(-1, 1),
'daytime': self._daytime[idx].reshape(-1, 1),
# Just for the user, not meant to be used as input to a neural net.
#'timestamp': ts,
'idx': idx
}
return sample | [
"stuett.data.MHDSLRFilenames",
"numpy.abs",
"stuett.DirectoryStore",
"pathlib.Path",
"stuett.global_config.get_setting",
"io.BytesIO",
"stuett.ABSStore",
"stuett.global_config.setting_exists",
"numpy.array",
"torch.is_tensor",
"numpy.isnan",
"stuett.data.CsvSource",
"pandas.to_datetime"
] | [((4891, 4956), 'stuett.data.CsvSource', 'stuett.data.CsvSource', (['rock_temperature_file_mh10'], {'store': 'ts_store'}), '(rock_temperature_file_mh10, store=ts_store)\n', (4912, 4956), False, 'import stuett\n'), ((5060, 5113), 'stuett.data.CsvSource', 'stuett.data.CsvSource', (['radiation_file'], {'store': 'ts_store'}), '(radiation_file, store=ts_store)\n', (5081, 5113), False, 'import stuett\n'), ((5421, 5514), 'stuett.data.MHDSLRFilenames', 'stuett.data.MHDSLRFilenames', ([], {'store': 'img_store', 'force_write_to_remote': '(True)', 'as_pandas': '(False)'}), '(store=img_store, force_write_to_remote=True,\n as_pandas=False)\n', (5448, 5514), False, 'import stuett\n'), ((8904, 8949), 'numpy.array', 'np.array', (['self._target_temp'], {'dtype': 'np.float32'}), '(self._target_temp, dtype=np.float32)\n', (8912, 8949), True, 'import numpy as np\n'), ((8979, 9025), 'numpy.array', 'np.array', (['self._surface_temp'], {'dtype': 'np.float32'}), '(self._surface_temp, dtype=np.float32)\n', (8987, 9025), True, 'import numpy as np\n'), ((9052, 9095), 'numpy.array', 'np.array', (['self._radiation'], {'dtype': 'np.float32'}), '(self._radiation, dtype=np.float32)\n', (9060, 9095), True, 'import numpy as np\n'), ((9119, 9158), 'numpy.array', 'np.array', (['self._month'], {'dtype': 'np.float32'}), '(self._month, dtype=np.float32)\n', (9127, 9158), True, 'import numpy as np\n'), ((9183, 9224), 'numpy.array', 'np.array', (['self._daytime'], {'dtype': 'np.float32'}), '(self._daytime, dtype=np.float32)\n', (9191, 9224), True, 'import numpy as np\n'), ((10270, 10290), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (10285, 10290), False, 'import torch\n'), ((3583, 3739), 'stuett.ABSStore', 'stuett.ABSStore', ([], {'container': '"""hackathon-on-permafrost"""', 'prefix': '"""timeseries_derived_data_products"""', 'account_name': 'account_name', 'account_key': 'account_key'}), "(container='hackathon-on-permafrost', prefix=\n 'timeseries_derived_data_products', account_name=account_name,\n account_key=account_key)\n", (3598, 3739), False, 'import stuett\n'), ((3835, 3981), 'stuett.ABSStore', 'stuett.ABSStore', ([], {'container': '"""hackathon-on-permafrost"""', 'prefix': '"""timelapse_images_fast"""', 'account_name': 'account_name', 'account_key': 'account_key'}), "(container='hackathon-on-permafrost', prefix=\n 'timelapse_images_fast', account_name=account_name, account_key=account_key\n )\n", (3850, 3981), False, 'import stuett\n'), ((4209, 4249), 'stuett.DirectoryStore', 'stuett.DirectoryStore', (['timeseries_folder'], {}), '(timeseries_folder)\n', (4230, 4249), False, 'import stuett\n'), ((8677, 8713), 'pandas.to_datetime', 'pd.to_datetime', (['self._timestamps[-1]'], {}), '(self._timestamps[-1])\n', (8691, 8713), True, 'import pandas as pd\n'), ((3322, 3345), 'stuett.global_config.setting_exists', 'setting_exists', (['"""azure"""'], {}), "('azure')\n", (3336, 3345), False, 'from stuett.global_config import get_setting, setting_exists\n'), ((3490, 3513), 'stuett.global_config.setting_exists', 'setting_exists', (['"""azure"""'], {}), "('azure')\n", (3504, 3513), False, 'from stuett.global_config import get_setting, setting_exists\n'), ((6523, 6535), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (6529, 6535), True, 'import numpy as np\n'), ((8133, 8163), 'numpy.isnan', 'np.isnan', (['net_radiation.values'], {}), '(net_radiation.values)\n', (8141, 8163), True, 'import numpy as np\n'), ((8189, 8218), 'numpy.isnan', 'np.isnan', (['surface_temp.values'], {}), '(surface_temp.values)\n', (8197, 8218), True, 'import numpy as np\n'), ((8338, 8372), 'numpy.isnan', 'np.isnan', (['target_temp.values[i, 0]'], {}), '(target_temp.values[i, 0])\n', (8346, 8372), True, 'import numpy as np\n'), ((10481, 10528), 'io.BytesIO', 'io.BytesIO', (['self._img_store[self._img_fns[idx]]'], {}), '(self._img_store[self._img_fns[idx]])\n', (10491, 10528), False, 'import io\n'), ((3266, 3286), 'stuett.global_config.get_setting', 'get_setting', (['"""azure"""'], {}), "('azure')\n", (3277, 3286), False, 'from stuett.global_config import get_setting, setting_exists\n'), ((3451, 3471), 'stuett.global_config.get_setting', 'get_setting', (['"""azure"""'], {}), "('azure')\n", (3462, 3471), False, 'from stuett.global_config import get_setting, setting_exists\n'), ((4483, 4498), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (4487, 4498), False, 'from pathlib import Path\n'), ((4098, 4113), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (4102, 4113), False, 'from pathlib import Path\n')] |
import numpy as np
import torch
import math
def TLift(in_score, gal_cam_id, gal_time, prob_cam_id, prob_time, num_cams, tau=100, sigma=200, K=10, alpha=0.2):
"""Function for the Temporal Lifting (TLift) method
TLift is a model-free temporal cooccurrence based score weighting method proposed in
<NAME> and <NAME>, "Interpretable and Generalizable Person Re-Identification with Query-Adaptive
Convolution and Temporal Lifting." In The European Conference on Computer Vision (ECCV), 23-28 August, 2020.
Inputs:
in_score: the similarity score of size [num_probs, num_gals] between the gallery and probe sets.
gal_cam_id: camera index for samples in the gallery set, starting from 0 and continuously numbered.
gal_time: time stamps of samples in the gallery set.
prob_cam_id: camera index for samples in the probe set, starting from 0 and continuously numbered.
prob_time: time stamps of samples in the probe set.
num_cams: the number of cameras.
tau: the interval threshold to define nearby persons. Default: 100.
sigma: the sensitivity parameter of the time difference. Default: 200.
K: parameter of the top K retrievals used to define the pivot set P. Default: 10.
alpha: regularizer for the multiplication fusion. Default: 0.2.
All the cam_id and time inputs are 1-dim vectors, and they are in the same order corresponding to
the first axis (probe) or second axis (gallery) of the in_score.
Outputs:
out_score: the refined score by TLift, with the same size as the in_score.
Comments:
The default alpha value works for the sigmoid or re-ranking matching scores. Otherwise, it is
suggested that your input scores are distributed in [0, 1], with an average score around 0.01-0.1
considering many negative matching pairs. To apply TLift directly on QAConv scores, please use
score = torch.sigmoid(score) instead of the scaled scores in qaconv.py.
Author:
<NAME>, reimplemented by <NAME>
<EMAIL>
Version:
V1.1
July 12, 2020
"""
out_score = torch.tensor(np.zeros_like(in_score))
if torch.cuda.is_available():
out_score = out_score.cuda()
if len(prob_time.shape) == 1:
prob_time = prob_time[np.newaxis, :]
prob_time_diff = prob_time - np.transpose(prob_time)
cooccur_mask = (abs(prob_time_diff) < tau)
g_sam_index = []
score = []
gal_time_diff = []
for g_cam in range(num_cams):
g_sam_index.append(np.where(gal_cam_id == g_cam)[0]) # camera id starting with 0.
score.append(in_score[:, g_sam_index[g_cam]])
frame_id = gal_time[g_sam_index[g_cam]]
if len(frame_id.shape) == 1:
frame_id = frame_id[np.newaxis, :]
gal_time_diff.append(
torch.tensor(frame_id - np.transpose(frame_id), dtype=out_score.dtype).to(out_score.device))
for p_cam in range(num_cams):
p_sam_index = np.where(prob_cam_id == p_cam)[0]
c_mask = cooccur_mask[p_sam_index][:, p_sam_index]
num_prob = len(p_sam_index)
for g_cam in range(num_cams):
# if p_cam == g_cam: # in some public datasets they still evaluate negative pairs in the same camera
# continue
prob_score = score[g_cam][p_sam_index, :]
for i in range(num_prob):
cooccur_index = np.where(c_mask[:, i] == True)[0]
cooccur_score = prob_score[cooccur_index, :]
sorted_score = np.sort(cooccur_score, axis=None)
if sorted_score.shape[0] > K:
thr = sorted_score[-K]
else:
thr = sorted_score[0]
mask_in_gal = np.where(cooccur_score >= thr)[1]
dt = gal_time_diff[g_cam][:, mask_in_gal]
weight = torch.mean(torch.exp(-1 * torch.pow(dt, 2).to(dtype=out_score.dtype) / math.pow(sigma, 2)),
dim=1)
out_score[p_sam_index[i], g_sam_index[g_cam]] = weight
out_score = out_score.cpu().numpy()
out_score = (out_score + alpha) * in_score
return out_score
if __name__ == '__main__':
in_score = np.random.randn(50, 100)
gal_cam_id = np.random.randint(0, 5, (100))
gal_time = np.random.randint(0, 20, (100))
prob_cam_id = np.random.randint(0, 5, (50))
prob_time = np.random.randint(0, 20, (50))
num_cams = 5
TLift(in_score, gal_cam_id, gal_time, prob_cam_id, prob_time, num_cams)
| [
"numpy.where",
"math.pow",
"numpy.sort",
"numpy.zeros_like",
"torch.pow",
"numpy.random.randint",
"torch.cuda.is_available",
"numpy.transpose",
"numpy.random.randn"
] | [((2196, 2221), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2219, 2221), False, 'import torch\n'), ((4259, 4283), 'numpy.random.randn', 'np.random.randn', (['(50)', '(100)'], {}), '(50, 100)\n', (4274, 4283), True, 'import numpy as np\n'), ((4301, 4329), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(100)'], {}), '(0, 5, 100)\n', (4318, 4329), True, 'import numpy as np\n'), ((4347, 4376), 'numpy.random.randint', 'np.random.randint', (['(0)', '(20)', '(100)'], {}), '(0, 20, 100)\n', (4364, 4376), True, 'import numpy as np\n'), ((4397, 4424), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(50)'], {}), '(0, 5, 50)\n', (4414, 4424), True, 'import numpy as np\n'), ((4443, 4471), 'numpy.random.randint', 'np.random.randint', (['(0)', '(20)', '(50)'], {}), '(0, 20, 50)\n', (4460, 4471), True, 'import numpy as np\n'), ((2164, 2187), 'numpy.zeros_like', 'np.zeros_like', (['in_score'], {}), '(in_score)\n', (2177, 2187), True, 'import numpy as np\n'), ((2373, 2396), 'numpy.transpose', 'np.transpose', (['prob_time'], {}), '(prob_time)\n', (2385, 2396), True, 'import numpy as np\n'), ((3008, 3038), 'numpy.where', 'np.where', (['(prob_cam_id == p_cam)'], {}), '(prob_cam_id == p_cam)\n', (3016, 3038), True, 'import numpy as np\n'), ((2566, 2595), 'numpy.where', 'np.where', (['(gal_cam_id == g_cam)'], {}), '(gal_cam_id == g_cam)\n', (2574, 2595), True, 'import numpy as np\n'), ((3566, 3599), 'numpy.sort', 'np.sort', (['cooccur_score'], {'axis': 'None'}), '(cooccur_score, axis=None)\n', (3573, 3599), True, 'import numpy as np\n'), ((3440, 3470), 'numpy.where', 'np.where', (['(c_mask[:, i] == True)'], {}), '(c_mask[:, i] == True)\n', (3448, 3470), True, 'import numpy as np\n'), ((3783, 3813), 'numpy.where', 'np.where', (['(cooccur_score >= thr)'], {}), '(cooccur_score >= thr)\n', (3791, 3813), True, 'import numpy as np\n'), ((2882, 2904), 'numpy.transpose', 'np.transpose', (['frame_id'], {}), '(frame_id)\n', (2894, 2904), True, 'import numpy as np\n'), ((3971, 3989), 'math.pow', 'math.pow', (['sigma', '(2)'], {}), '(sigma, 2)\n', (3979, 3989), False, 'import math\n'), ((3926, 3942), 'torch.pow', 'torch.pow', (['dt', '(2)'], {}), '(dt, 2)\n', (3935, 3942), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pytest
import gc
import copy
import numpy as np
import pandas as pd
try:
import geopandas as gpd
import shapely.geometry
GEOPANDAS_INSTALLED = True
except ImportError:
GEOPANDAS_INSTALLED = False
from pandapower.auxiliary import get_indices
import pandapower as pp
import pandapower.networks
import pandapower.control
import pandapower.timeseries
class MemoryLeakDemo:
"""
Dummy class to demonstrate memory leaks
"""
def __init__(self, net):
self.net = net
# it is interesting, that if "self" is just an attribute of net, there are no problems
# if "self" is saved in a DataFrame, it causes a memory leak
net['memory_leak_demo'] = pd.DataFrame(data=[self], columns=['object'])
class MemoryLeakDemoDF:
"""
Dummy class to demonstrate memory leaks
"""
def __init__(self, df):
self.df = df
# if "self" is saved in a DataFrame, it causes a memory leak
df.loc[0, 'object'] = self
class MemoryLeakDemoDict:
"""
Dummy class to demonstrate memory leaks
"""
def __init__(self, d):
self.d = d
d['object'] = self
def test_get_indices():
a = [i + 100 for i in range(10)]
lookup = {idx: pos for pos, idx in enumerate(a)}
lookup["before_fuse"] = a
# First without fused buses no magic here
# after fuse
result = get_indices([102, 107], lookup, fused_indices=True)
assert np.array_equal(result, [2, 7])
# before fuse
result = get_indices([2, 7], lookup, fused_indices=False)
assert np.array_equal(result, [102, 107])
# Same setup EXCEPT we have fused buses now (bus 102 and 107 are fused)
lookup[107] = lookup[102]
# after fuse
result = get_indices([102, 107], lookup, fused_indices=True)
assert np.array_equal(result, [2, 2])
# before fuse
result = get_indices([2, 7], lookup, fused_indices=False)
assert np.array_equal(result, [102, 107])
def test_net_deepcopy():
net = pp.networks.example_simple()
net.line_geodata.loc[0, 'coords'] = [[0, 1], [1, 2]]
net.bus_geodata.loc[0, ['x', 'y']] = 0, 1
pp.control.ContinuousTapControl(net, tid=0, vm_set_pu=1)
ds = pp.timeseries.DFData(pd.DataFrame(data=[[0, 1, 2], [3, 4, 5]]))
pp.control.ConstControl(net, element='load', variable='p_mw', element_index=[0], profile_name=[0], data_source=ds)
net1 = copy.deepcopy(net)
assert not net1.controller.object.at[1].data_source is ds
assert not net1.controller.object.at[1].data_source.df is ds.df
assert not net1.line_geodata.coords.at[0] is net.line_geodata.coords.at[0]
if GEOPANDAS_INSTALLED:
for tab in ('bus_geodata', 'line_geodata'):
if tab == 'bus_geodata':
geometry = net[tab].apply(lambda x: shapely.geometry.Point(x.x, x.y), axis=1)
else:
geometry = net[tab].coords.apply(shapely.geometry.LineString)
net[tab] = gpd.GeoDataFrame(net[tab], geometry=geometry)
net1 = net.deepcopy()
assert isinstance(net1.line_geodata, gpd.GeoDataFrame)
assert isinstance(net1.bus_geodata, gpd.GeoDataFrame)
assert isinstance(net1.bus_geodata.geometry.iat[0], shapely.geometry.Point)
assert isinstance(net1.line_geodata.geometry.iat[0], shapely.geometry.LineString)
def test_memory_leaks():
net = pp.networks.example_simple()
# first, test to check that there are no memory leaks
types_dict1 = pp.toolbox.get_gc_objects_dict()
num = 3
for _ in range(num):
net_copy = copy.deepcopy(net)
# In each net copy it has only one controller
pp.control.ContinuousTapControl(net_copy, tid=0, vm_set_pu=1)
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[pandapower.auxiliary.pandapowerNet] - types_dict1[pandapower.auxiliary.pandapowerNet] == 1
assert types_dict2[pandapower.control.ContinuousTapControl] - types_dict1.get(
pandapower.control.ContinuousTapControl, 0) == 1
def test_memory_leaks_demo():
net = pp.networks.example_simple()
# first, test to check that there are no memory leaks
types_dict1 = pp.toolbox.get_gc_objects_dict()
# now, demonstrate how a memory leak occurs
# emulates the earlier behavior before the fix with weakref
num = 3
for _ in range(num):
net_copy = copy.deepcopy(net)
MemoryLeakDemo(net_copy)
# demonstrate how the garbage collector doesn't remove the objects even if called explicitly
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[pandapower.auxiliary.pandapowerNet] - types_dict1[pandapower.auxiliary.pandapowerNet] == num
assert types_dict2[MemoryLeakDemo] - types_dict1.get(MemoryLeakDemo, 0) == num
def test_memory_leaks_no_copy():
types_dict0 = pp.toolbox.get_gc_objects_dict()
num = 3
for _ in range(num):
net = pp.create_empty_network()
# In each net copy it has only one controller
pp.control.ConstControl(net, 'sgen', 'p_mw', 0)
gc.collect()
types_dict1 = pp.toolbox.get_gc_objects_dict()
assert types_dict1[pandapower.control.ConstControl] - types_dict0.get(pandapower.control.ConstControl, 0) == 1
assert types_dict1[pandapower.auxiliary.pandapowerNet] - types_dict0.get(pandapower.auxiliary.pandapowerNet, 0) <= 1
def test_memory_leak_no_copy_demo():
types_dict1 = pp.toolbox.get_gc_objects_dict()
# now, demonstrate how a memory leak occurs
# emulates the earlier behavior before the fix with weakref
num = 3
for _ in range(num):
net = pp.networks.example_simple()
MemoryLeakDemo(net)
# demonstrate how the garbage collector doesn't remove the objects even if called explicitly
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[pandapower.auxiliary.pandapowerNet] - \
types_dict1.get(pandapower.auxiliary.pandapowerNet, 0) >= num-1
assert types_dict2[MemoryLeakDemo] - types_dict1.get(MemoryLeakDemo, 0) == num
def test_memory_leak_df():
types_dict1 = pp.toolbox.get_gc_objects_dict()
num = 3
for _ in range(num):
df = pd.DataFrame()
MemoryLeakDemoDF(df)
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[MemoryLeakDemoDF] - types_dict1.get(MemoryLeakDemoDF, 0) == num
def test_memory_leak_dict():
types_dict1 = pp.toolbox.get_gc_objects_dict()
num = 3
for _ in range(num):
d = dict()
MemoryLeakDemoDict(d)
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[MemoryLeakDemoDict] - types_dict1.get(MemoryLeakDemoDict, 0) <= 1
def test_create_trafo_characteristics():
net = pp.networks.example_multivoltage()
# test 2 modes, multiple index and single index, for 2w trafo
pp.control.create_trafo_characteristics(net, "trafo", [1], 'vk_percent', [[-2,-1,0,1,2]], [[2,3,4,5,6]])
assert "characteristic" in net
assert "tap_dependent_impedance" in net.trafo.columns
assert net.trafo.tap_dependent_impedance.dtype == np.bool_
assert net.trafo.tap_dependent_impedance.at[1]
assert not net.trafo.tap_dependent_impedance.at[0]
assert "vk_percent_characteristic" in net.trafo.columns
assert net.trafo.at[1, 'vk_percent_characteristic'] == 0
assert pd.isnull(net.trafo.at[0, 'vk_percent_characteristic'])
assert net.trafo.vk_percent_characteristic.dtype == pd.Int64Dtype()
assert "vkr_percent_characteristic" not in net.trafo.columns
pp.control.create_trafo_characteristics(net, "trafo", 1, 'vkr_percent', [-2,-1,0,1,2], [1.323,1.324,1.325,1.326,1.327])
assert len(net.characteristic) == 2
assert "vkr_percent_characteristic" in net.trafo.columns
assert net.trafo.at[1, 'vkr_percent_characteristic'] == 1
assert pd.isnull(net.trafo.at[0, 'vkr_percent_characteristic'])
assert net.trafo.vkr_percent_characteristic.dtype == pd.Int64Dtype()
assert isinstance(net.characteristic.object.at[0], pp.control.SplineCharacteristic)
assert isinstance(net.characteristic.object.at[1], pp.control.SplineCharacteristic)
# test for 3w trafo
pp.control.create_trafo_characteristics(net, "trafo3w", 0, 'vk_hv_percent', [-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1, 12.1])
assert "tap_dependent_impedance" in net.trafo3w.columns
assert net.trafo3w.tap_dependent_impedance.dtype == np.bool_
assert net.trafo3w.tap_dependent_impedance.at[0]
assert "vk_hv_percent_characteristic" in net.trafo3w.columns
assert net.trafo3w.at[0, 'vk_hv_percent_characteristic'] == 2
assert net.trafo3w.vk_hv_percent_characteristic.dtype == pd.Int64Dtype()
assert "vkr_hv_percent_characteristic" not in net.trafo3w.columns
assert "vk_mv_percent_characteristic" not in net.trafo3w.columns
pp.control.create_trafo_characteristics(net, "trafo3w", 0, 'vk_mv_percent', [-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1, 12.1])
assert net.trafo3w.tap_dependent_impedance.dtype == np.bool_
assert net.trafo3w.tap_dependent_impedance.at[0]
assert "vk_mv_percent_characteristic" in net.trafo3w.columns
assert net.trafo3w.at[0, 'vk_mv_percent_characteristic'] == 3
assert net.trafo3w.vk_hv_percent_characteristic.dtype == pd.Int64Dtype()
assert "vkr_mv_percent_characteristic" not in net.trafo3w.columns
assert "vk_lv_percent_characteristic" not in net.trafo3w.columns
assert "vkr_lv_percent_characteristic" not in net.trafo3w.columns
# this should be enough testing for adding columns
# now let's test if it raises errors
# invalid variable
with pytest.raises(UserWarning):
pp.control.create_trafo_characteristics(net, "trafo3w", 0, 'vk_percent',
[-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1, 12.1])
# invalid shapes
with pytest.raises(UserWarning):
pp.control.create_trafo_characteristics(net, "trafo3w", 0, 'vk_hv_percent',
[-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1])
with pytest.raises(UserWarning):
pp.control.create_trafo_characteristics(net, "trafo3w", [0], 'vk_hv_percent',
[-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1, 12.1])
with pytest.raises(UserWarning):
pp.control.create_trafo_characteristics(net, "trafo3w", [0, 1], 'vk_hv_percent',
[[-8, -4, 0, 4, 8]], [[8.1, 9.1, 10.1, 11.1, 12.1]])
with pytest.raises(UserWarning):
pp.control.create_trafo_characteristics(net, "trafo3w", [0, 1], 'vk_hv_percent',
[[-8, -4, 0, 4, 8], [-8, -4, 0, 4, 8]],
[[8.1, 9.1, 10.1, 11.1, 12.1]])
if __name__ == '__main__':
pytest.main([__file__, "-x"])
| [
"pandas.isnull",
"pandas.DataFrame",
"pandapower.create_empty_network",
"pandapower.control.ConstControl",
"pandapower.toolbox.get_gc_objects_dict",
"pandas.Int64Dtype",
"pandapower.auxiliary.get_indices",
"pytest.main",
"numpy.array_equal",
"pytest.raises",
"gc.collect",
"copy.deepcopy",
"p... | [((1568, 1619), 'pandapower.auxiliary.get_indices', 'get_indices', (['[102, 107]', 'lookup'], {'fused_indices': '(True)'}), '([102, 107], lookup, fused_indices=True)\n', (1579, 1619), False, 'from pandapower.auxiliary import get_indices\n'), ((1631, 1661), 'numpy.array_equal', 'np.array_equal', (['result', '[2, 7]'], {}), '(result, [2, 7])\n', (1645, 1661), True, 'import numpy as np\n'), ((1694, 1742), 'pandapower.auxiliary.get_indices', 'get_indices', (['[2, 7]', 'lookup'], {'fused_indices': '(False)'}), '([2, 7], lookup, fused_indices=False)\n', (1705, 1742), False, 'from pandapower.auxiliary import get_indices\n'), ((1754, 1788), 'numpy.array_equal', 'np.array_equal', (['result', '[102, 107]'], {}), '(result, [102, 107])\n', (1768, 1788), True, 'import numpy as np\n'), ((1927, 1978), 'pandapower.auxiliary.get_indices', 'get_indices', (['[102, 107]', 'lookup'], {'fused_indices': '(True)'}), '([102, 107], lookup, fused_indices=True)\n', (1938, 1978), False, 'from pandapower.auxiliary import get_indices\n'), ((1990, 2020), 'numpy.array_equal', 'np.array_equal', (['result', '[2, 2]'], {}), '(result, [2, 2])\n', (2004, 2020), True, 'import numpy as np\n'), ((2053, 2101), 'pandapower.auxiliary.get_indices', 'get_indices', (['[2, 7]', 'lookup'], {'fused_indices': '(False)'}), '([2, 7], lookup, fused_indices=False)\n', (2064, 2101), False, 'from pandapower.auxiliary import get_indices\n'), ((2113, 2147), 'numpy.array_equal', 'np.array_equal', (['result', '[102, 107]'], {}), '(result, [102, 107])\n', (2127, 2147), True, 'import numpy as np\n'), ((2185, 2213), 'pandapower.networks.example_simple', 'pp.networks.example_simple', ([], {}), '()\n', (2211, 2213), True, 'import pandapower as pp\n'), ((2322, 2378), 'pandapower.control.ContinuousTapControl', 'pp.control.ContinuousTapControl', (['net'], {'tid': '(0)', 'vm_set_pu': '(1)'}), '(net, tid=0, vm_set_pu=1)\n', (2353, 2378), True, 'import pandapower as pp\n'), ((2456, 2575), 'pandapower.control.ConstControl', 'pp.control.ConstControl', (['net'], {'element': '"""load"""', 'variable': '"""p_mw"""', 'element_index': '[0]', 'profile_name': '[0]', 'data_source': 'ds'}), "(net, element='load', variable='p_mw', element_index\n =[0], profile_name=[0], data_source=ds)\n", (2479, 2575), True, 'import pandapower as pp\n'), ((2583, 2601), 'copy.deepcopy', 'copy.deepcopy', (['net'], {}), '(net)\n', (2596, 2601), False, 'import copy\n'), ((3556, 3584), 'pandapower.networks.example_simple', 'pp.networks.example_simple', ([], {}), '()\n', (3582, 3584), True, 'import pandapower as pp\n'), ((3662, 3694), 'pandapower.toolbox.get_gc_objects_dict', 'pp.toolbox.get_gc_objects_dict', ([], {}), '()\n', (3692, 3694), True, 'import pandapower as pp\n'), ((3899, 3911), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3909, 3911), False, 'import gc\n'), ((3931, 3963), 'pandapower.toolbox.get_gc_objects_dict', 'pp.toolbox.get_gc_objects_dict', ([], {}), '()\n', (3961, 3963), True, 'import pandapower as pp\n'), ((4261, 4289), 'pandapower.networks.example_simple', 'pp.networks.example_simple', ([], {}), '()\n', (4287, 4289), True, 'import pandapower as pp\n'), ((4366, 4398), 'pandapower.toolbox.get_gc_objects_dict', 'pp.toolbox.get_gc_objects_dict', ([], {}), '()\n', (4396, 4398), True, 'import pandapower as pp\n'), ((4721, 4733), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4731, 4733), False, 'import gc\n'), ((4752, 4784), 'pandapower.toolbox.get_gc_objects_dict', 'pp.toolbox.get_gc_objects_dict', ([], {}), '()\n', (4782, 4784), True, 'import pandapower as pp\n'), ((5037, 5069), 'pandapower.toolbox.get_gc_objects_dict', 'pp.toolbox.get_gc_objects_dict', ([], {}), '()\n', (5067, 5069), True, 'import pandapower as pp\n'), ((5262, 5274), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5272, 5274), False, 'import gc\n'), ((5293, 5325), 'pandapower.toolbox.get_gc_objects_dict', 'pp.toolbox.get_gc_objects_dict', ([], {}), '()\n', (5323, 5325), True, 'import pandapower as pp\n'), ((5619, 5651), 'pandapower.toolbox.get_gc_objects_dict', 'pp.toolbox.get_gc_objects_dict', ([], {}), '()\n', (5649, 5651), True, 'import pandapower as pp\n'), ((5974, 5986), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5984, 5986), False, 'import gc\n'), ((6005, 6037), 'pandapower.toolbox.get_gc_objects_dict', 'pp.toolbox.get_gc_objects_dict', ([], {}), '()\n', (6035, 6037), True, 'import pandapower as pp\n'), ((6306, 6338), 'pandapower.toolbox.get_gc_objects_dict', 'pp.toolbox.get_gc_objects_dict', ([], {}), '()\n', (6336, 6338), True, 'import pandapower as pp\n'), ((6438, 6450), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6448, 6450), False, 'import gc\n'), ((6469, 6501), 'pandapower.toolbox.get_gc_objects_dict', 'pp.toolbox.get_gc_objects_dict', ([], {}), '()\n', (6499, 6501), True, 'import pandapower as pp\n'), ((6638, 6670), 'pandapower.toolbox.get_gc_objects_dict', 'pp.toolbox.get_gc_objects_dict', ([], {}), '()\n', (6668, 6670), True, 'import pandapower as pp\n'), ((6762, 6774), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6772, 6774), False, 'import gc\n'), ((6793, 6825), 'pandapower.toolbox.get_gc_objects_dict', 'pp.toolbox.get_gc_objects_dict', ([], {}), '()\n', (6823, 6825), True, 'import pandapower as pp\n'), ((6968, 7002), 'pandapower.networks.example_multivoltage', 'pp.networks.example_multivoltage', ([], {}), '()\n', (7000, 7002), True, 'import pandapower as pp\n'), ((7074, 7191), 'pandapower.control.create_trafo_characteristics', 'pp.control.create_trafo_characteristics', (['net', '"""trafo"""', '[1]', '"""vk_percent"""', '[[-2, -1, 0, 1, 2]]', '[[2, 3, 4, 5, 6]]'], {}), "(net, 'trafo', [1], 'vk_percent', [[\n -2, -1, 0, 1, 2]], [[2, 3, 4, 5, 6]])\n", (7113, 7191), True, 'import pandapower as pp\n'), ((7573, 7628), 'pandas.isnull', 'pd.isnull', (["net.trafo.at[0, 'vk_percent_characteristic']"], {}), "(net.trafo.at[0, 'vk_percent_characteristic'])\n", (7582, 7628), True, 'import pandas as pd\n'), ((7771, 7902), 'pandapower.control.create_trafo_characteristics', 'pp.control.create_trafo_characteristics', (['net', '"""trafo"""', '(1)', '"""vkr_percent"""', '[-2, -1, 0, 1, 2]', '[1.323, 1.324, 1.325, 1.326, 1.327]'], {}), "(net, 'trafo', 1, 'vkr_percent', [-2,\n -1, 0, 1, 2], [1.323, 1.324, 1.325, 1.326, 1.327])\n", (7810, 7902), True, 'import pandapower as pp\n'), ((8065, 8121), 'pandas.isnull', 'pd.isnull', (["net.trafo.at[0, 'vkr_percent_characteristic']"], {}), "(net.trafo.at[0, 'vkr_percent_characteristic'])\n", (8074, 8121), True, 'import pandas as pd\n'), ((8401, 8529), 'pandapower.control.create_trafo_characteristics', 'pp.control.create_trafo_characteristics', (['net', '"""trafo3w"""', '(0)', '"""vk_hv_percent"""', '[-8, -4, 0, 4, 8]', '[8.1, 9.1, 10.1, 11.1, 12.1]'], {}), "(net, 'trafo3w', 0, 'vk_hv_percent',\n [-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1, 12.1])\n", (8440, 8529), True, 'import pandapower as pp\n'), ((9056, 9184), 'pandapower.control.create_trafo_characteristics', 'pp.control.create_trafo_characteristics', (['net', '"""trafo3w"""', '(0)', '"""vk_mv_percent"""', '[-8, -4, 0, 4, 8]', '[8.1, 9.1, 10.1, 11.1, 12.1]'], {}), "(net, 'trafo3w', 0, 'vk_mv_percent',\n [-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1, 12.1])\n", (9095, 9184), True, 'import pandapower as pp\n'), ((11063, 11092), 'pytest.main', 'pytest.main', (["[__file__, '-x']"], {}), "([__file__, '-x'])\n", (11074, 11092), False, 'import pytest\n'), ((897, 942), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[self]', 'columns': "['object']"}), "(data=[self], columns=['object'])\n", (909, 942), True, 'import pandas as pd\n'), ((2409, 2450), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[0, 1, 2], [3, 4, 5]]'}), '(data=[[0, 1, 2], [3, 4, 5]])\n', (2421, 2450), True, 'import pandas as pd\n'), ((3751, 3769), 'copy.deepcopy', 'copy.deepcopy', (['net'], {}), '(net)\n', (3764, 3769), False, 'import copy\n'), ((3832, 3893), 'pandapower.control.ContinuousTapControl', 'pp.control.ContinuousTapControl', (['net_copy'], {'tid': '(0)', 'vm_set_pu': '(1)'}), '(net_copy, tid=0, vm_set_pu=1)\n', (3863, 3893), True, 'import pandapower as pp\n'), ((4567, 4585), 'copy.deepcopy', 'copy.deepcopy', (['net'], {}), '(net)\n', (4580, 4585), False, 'import copy\n'), ((5121, 5146), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (5144, 5146), True, 'import pandapower as pp\n'), ((5209, 5256), 'pandapower.control.ConstControl', 'pp.control.ConstControl', (['net', '"""sgen"""', '"""p_mw"""', '(0)'], {}), "(net, 'sgen', 'p_mw', 0)\n", (5232, 5256), True, 'import pandapower as pp\n'), ((5815, 5843), 'pandapower.networks.example_simple', 'pp.networks.example_simple', ([], {}), '()\n', (5841, 5843), True, 'import pandapower as pp\n'), ((6389, 6403), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6401, 6403), True, 'import pandas as pd\n'), ((7685, 7700), 'pandas.Int64Dtype', 'pd.Int64Dtype', ([], {}), '()\n', (7698, 7700), True, 'import pandas as pd\n'), ((8179, 8194), 'pandas.Int64Dtype', 'pd.Int64Dtype', ([], {}), '()\n', (8192, 8194), True, 'import pandas as pd\n'), ((8896, 8911), 'pandas.Int64Dtype', 'pd.Int64Dtype', ([], {}), '()\n', (8909, 8911), True, 'import pandas as pd\n'), ((9491, 9506), 'pandas.Int64Dtype', 'pd.Int64Dtype', ([], {}), '()\n', (9504, 9506), True, 'import pandas as pd\n'), ((9846, 9872), 'pytest.raises', 'pytest.raises', (['UserWarning'], {}), '(UserWarning)\n', (9859, 9872), False, 'import pytest\n'), ((9882, 10008), 'pandapower.control.create_trafo_characteristics', 'pp.control.create_trafo_characteristics', (['net', '"""trafo3w"""', '(0)', '"""vk_percent"""', '[-8, -4, 0, 4, 8]', '[8.1, 9.1, 10.1, 11.1, 12.1]'], {}), "(net, 'trafo3w', 0, 'vk_percent', [-\n 8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1, 12.1])\n", (9921, 10008), True, 'import pandapower as pp\n'), ((10083, 10109), 'pytest.raises', 'pytest.raises', (['UserWarning'], {}), '(UserWarning)\n', (10096, 10109), False, 'import pytest\n'), ((10119, 10241), 'pandapower.control.create_trafo_characteristics', 'pp.control.create_trafo_characteristics', (['net', '"""trafo3w"""', '(0)', '"""vk_hv_percent"""', '[-8, -4, 0, 4, 8]', '[8.1, 9.1, 10.1, 11.1]'], {}), "(net, 'trafo3w', 0, 'vk_hv_percent',\n [-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1])\n", (10158, 10241), True, 'import pandapower as pp\n'), ((10296, 10322), 'pytest.raises', 'pytest.raises', (['UserWarning'], {}), '(UserWarning)\n', (10309, 10322), False, 'import pytest\n'), ((10332, 10462), 'pandapower.control.create_trafo_characteristics', 'pp.control.create_trafo_characteristics', (['net', '"""trafo3w"""', '[0]', '"""vk_hv_percent"""', '[-8, -4, 0, 4, 8]', '[8.1, 9.1, 10.1, 11.1, 12.1]'], {}), "(net, 'trafo3w', [0],\n 'vk_hv_percent', [-8, -4, 0, 4, 8], [8.1, 9.1, 10.1, 11.1, 12.1])\n", (10371, 10462), True, 'import pandapower as pp\n'), ((10517, 10543), 'pytest.raises', 'pytest.raises', (['UserWarning'], {}), '(UserWarning)\n', (10530, 10543), False, 'import pytest\n'), ((10553, 10690), 'pandapower.control.create_trafo_characteristics', 'pp.control.create_trafo_characteristics', (['net', '"""trafo3w"""', '[0, 1]', '"""vk_hv_percent"""', '[[-8, -4, 0, 4, 8]]', '[[8.1, 9.1, 10.1, 11.1, 12.1]]'], {}), "(net, 'trafo3w', [0, 1],\n 'vk_hv_percent', [[-8, -4, 0, 4, 8]], [[8.1, 9.1, 10.1, 11.1, 12.1]])\n", (10592, 10690), True, 'import pandapower as pp\n'), ((10745, 10771), 'pytest.raises', 'pytest.raises', (['UserWarning'], {}), '(UserWarning)\n', (10758, 10771), False, 'import pytest\n'), ((10781, 10942), 'pandapower.control.create_trafo_characteristics', 'pp.control.create_trafo_characteristics', (['net', '"""trafo3w"""', '[0, 1]', '"""vk_hv_percent"""', '[[-8, -4, 0, 4, 8], [-8, -4, 0, 4, 8]]', '[[8.1, 9.1, 10.1, 11.1, 12.1]]'], {}), "(net, 'trafo3w', [0, 1],\n 'vk_hv_percent', [[-8, -4, 0, 4, 8], [-8, -4, 0, 4, 8]], [[8.1, 9.1, \n 10.1, 11.1, 12.1]])\n", (10820, 10942), True, 'import pandapower as pp\n'), ((3144, 3189), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['net[tab]'], {'geometry': 'geometry'}), '(net[tab], geometry=geometry)\n', (3160, 3189), True, 'import geopandas as gpd\n')] |
# template matching
import cv2
import numpy as np
from scipy import signal
from pprint import pprint
from matplotlib import pyplot as plt
def show_img(title, img):
cv2.namedWindow(f"{title}", cv2.WINDOW_NORMAL)
cv2.imshow(f"{title}", img)
cv2.waitKey(0)
# 需要處理成透明背景 應該就可以找到對的物體進行辨識
path1 = "t2.jpg"
path2 = "template1.jpg"
img = cv2.imread(path1, 0)
template = cv2.imread(path2, 0)
w = template.shape[1]
h = template.shape[0]
mask_img = np.where(template > 0, 100, 0)
mask_img = np.float32(mask_img)
cv2.imwrite("mask.jpg", mask_img)
res = cv2.matchTemplate(img, template, cv2.TM_CCORR_NORMED, mask=mask_img)
cv2.normalize(res, res, 0, 1, cv2.NORM_MINMAX, -1)
loc = np.where(res > 0.85)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
print(len(loc[0]))
i = 0
tmp_point = []
x, y = (0, 0)
bottom_right = (max_loc[0] + w, max_loc[1] + h)
cv2.rectangle(img, max_loc, bottom_right, (0, 0, 0), 2)
imm = img[max_loc[1]:bottom_right[1], max_loc[0]:bottom_right[0]]
for pt in sorted(zip(*loc[::-1]), key=lambda s: s[0]):
if np.sqrt((x - pt[0]) ** 2 + (y - pt[1]) ** 2) < 500:
continue
x, y = pt
tmp_point.append(pt)
# print(pt, (pt[0] + w, pt[1] + h))
# cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (0, 0, 0), 1)
x, y = (0, 0)
for pt in sorted(tmp_point, key=lambda s: s[1]):
if np.sqrt((y - pt[1]) ** 2) < 150:
continue
x, y = pt
print(pt)
# cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (0, 0, 0), 1)
show_img("test", img)
cv2.imwrite("template.jpg", imm)
# min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# print(min_val, max_val, min_loc, max_loc)
| [
"cv2.rectangle",
"cv2.imwrite",
"cv2.namedWindow",
"cv2.normalize",
"numpy.sqrt",
"numpy.where",
"cv2.imshow",
"cv2.minMaxLoc",
"cv2.waitKey",
"cv2.matchTemplate",
"cv2.imread",
"numpy.float32"
] | [((362, 382), 'cv2.imread', 'cv2.imread', (['path1', '(0)'], {}), '(path1, 0)\n', (372, 382), False, 'import cv2\n'), ((397, 417), 'cv2.imread', 'cv2.imread', (['path2', '(0)'], {}), '(path2, 0)\n', (407, 417), False, 'import cv2\n'), ((476, 506), 'numpy.where', 'np.where', (['(template > 0)', '(100)', '(0)'], {}), '(template > 0, 100, 0)\n', (484, 506), True, 'import numpy as np\n'), ((519, 539), 'numpy.float32', 'np.float32', (['mask_img'], {}), '(mask_img)\n', (529, 539), True, 'import numpy as np\n'), ((543, 576), 'cv2.imwrite', 'cv2.imwrite', (['"""mask.jpg"""', 'mask_img'], {}), "('mask.jpg', mask_img)\n", (554, 576), False, 'import cv2\n'), ((586, 654), 'cv2.matchTemplate', 'cv2.matchTemplate', (['img', 'template', 'cv2.TM_CCORR_NORMED'], {'mask': 'mask_img'}), '(img, template, cv2.TM_CCORR_NORMED, mask=mask_img)\n', (603, 654), False, 'import cv2\n'), ((658, 708), 'cv2.normalize', 'cv2.normalize', (['res', 'res', '(0)', '(1)', 'cv2.NORM_MINMAX', '(-1)'], {}), '(res, res, 0, 1, cv2.NORM_MINMAX, -1)\n', (671, 708), False, 'import cv2\n'), ((718, 738), 'numpy.where', 'np.where', (['(res > 0.85)'], {}), '(res > 0.85)\n', (726, 738), True, 'import numpy as np\n'), ((777, 795), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['res'], {}), '(res)\n', (790, 795), False, 'import cv2\n'), ((904, 959), 'cv2.rectangle', 'cv2.rectangle', (['img', 'max_loc', 'bottom_right', '(0, 0, 0)', '(2)'], {}), '(img, max_loc, bottom_right, (0, 0, 0), 2)\n', (917, 959), False, 'import cv2\n'), ((1561, 1593), 'cv2.imwrite', 'cv2.imwrite', (['"""template.jpg"""', 'imm'], {}), "('template.jpg', imm)\n", (1572, 1593), False, 'import cv2\n'), ((177, 223), 'cv2.namedWindow', 'cv2.namedWindow', (['f"""{title}"""', 'cv2.WINDOW_NORMAL'], {}), "(f'{title}', cv2.WINDOW_NORMAL)\n", (192, 223), False, 'import cv2\n'), ((229, 256), 'cv2.imshow', 'cv2.imshow', (['f"""{title}"""', 'img'], {}), "(f'{title}', img)\n", (239, 256), False, 'import cv2\n'), ((262, 276), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (273, 276), False, 'import cv2\n'), ((1091, 1135), 'numpy.sqrt', 'np.sqrt', (['((x - pt[0]) ** 2 + (y - pt[1]) ** 2)'], {}), '((x - pt[0]) ** 2 + (y - pt[1]) ** 2)\n', (1098, 1135), True, 'import numpy as np\n'), ((1388, 1413), 'numpy.sqrt', 'np.sqrt', (['((y - pt[1]) ** 2)'], {}), '((y - pt[1]) ** 2)\n', (1395, 1413), True, 'import numpy as np\n')] |
# Copyright (c) 2021, Technische Universität Kaiserslautern (TUK) & National University of Sciences and Technology (NUST).
# All rights reserved.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
from torch.optim import *
from loss import *
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
import torch.utils.model_zoo as model_zoo
from dataset import fix, get_dataloaders_generated_data
import os
import numpy as np
import pickle as pkl
import itertools
import matplotlib as mpl
import matplotlib.pyplot as plt
from collections import defaultdict
from matplotlib.colors import ListedColormap
plt.switch_backend('agg')
import torchnet as tnt
from torchnet.meter import ConfusionMeter as CM
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
def train_net(model, model_topology, generated_data_path, input_dim, bands, classes, workers, pre_model, data_split_lists, save_dir, sum_dir, error_maps_path,
batch_size, lr, epochs, log_after, cuda, device):
# print(model)
if cuda:
print('log: Using GPU')
model.cuda(device=device)
###############################################################################
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if not os.path.exists(sum_dir):
os.mkdir(sum_dir)
# writer = SummaryWriter()
# define loss and optimizer
optimizer = RMSprop(model.parameters(), lr=lr)
# save our initial learning rate
lr_initial = lr
weights = torch.Tensor([10, 10]) # forest has ____ times more weight
weights = weights.cuda(device=device) if cuda else weights
focal_criterion = FocalLoss2d(weight=weights)
lr_final = lr / 10 # get to one tenth of the starting rate
LR_decay = (lr_final / lr) ** (1. / epochs)
scheduler = lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=LR_decay)
loaders = get_dataloaders_generated_data(generated_data_path=generated_data_path, data_split_lists_path=data_split_lists, model_input_size=input_dim,
bands=bands, batch_size=batch_size, num_classes=len(classes)+1, train_split=0.8, one_hot=True,
num_workers=0)
train_loader, val_dataloader, test_loader = loaders
best_evaluation = 0.0
################################################################
if pre_model == 'None':
model_number = 0
print('log: No trained model passed. Starting from scratch...')
else:
model_path = os.path.join(save_dir, pre_model)
model_number = int(pre_model.split('/')[-1].split('_')[1])
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')), strict=False)
print('log: Resuming from model {} ...'.format(model_path))
print('log: Evaluating now...')
best_evaluation = eval_net(model=model, classes=classes, criterion=focal_criterion, val_loader=val_dataloader, cuda=cuda, device=device, writer=None,
batch_size=batch_size, step=0, bands=bands)
print('LOG: Starting with best evaluation accuracy: {:.3f}%'.format(best_evaluation))
##########################################################################
# training loop
bands_for_training = [x-1 for x in bands]
for k in range(epochs):
net_loss = []
total_correct, total_examples = 0, 0
print('log: Evaluating now...')
eval_net(model=model, classes=classes, criterion=focal_criterion, val_loader=val_dataloader, cuda=cuda, device=device, writer=None,
batch_size=batch_size, step=k, bands=bands)
model_number += 1
model_path = os.path.join(save_dir, 'model_{}_topology{}_lr{}_bands{}.pt'.format(model_number, model_topology, lr_initial, len(bands)))
torch.save(model.state_dict(), model_path)
print('log: Saved best performing {}'.format(model_path))
# we will save all models for now
# del_this = os.path.join(save_dir, 'model-{}.pt'.format(model_number-10))
# if os.path.exists(del_this):
# os.remove(del_this)
# print('log: Removed {}'.format(del_this))
for idx, data in enumerate(train_loader):
model.train()
model.zero_grad()
# get the required bands for training
test_x, label = data['input'], data['label']
test_x = test_x.cuda(device=device) if cuda else test_x
label = label.cuda(device=device) if cuda else label
out_x, logits = model.forward(test_x[:,bands_for_training,:,:])
pred = torch.argmax(logits, dim=1)
not_one_hot_target = torch.argmax(label, dim=1)
loss = focal_criterion(logits, not_one_hot_target)
loss.backward()
clip_grad_norm_(model.parameters(), 0.05)
optimizer.step()
accurate = (pred == not_one_hot_target).sum().item()
numerator = float(accurate)
denominator = float(pred.view(-1).size(0))
total_correct += numerator
total_examples += denominator
if idx % log_after == 0 and idx > 0:
accuracy = float(numerator) * 100 / denominator
print('{}. ({}/{}) input size= {}, output size = {}, loss = {}, accuracy = {}/{} = {:.2f}%'.format(k, idx, len(train_loader), test_x.size(),
out_x.size(), loss.item(), numerator,
denominator, accuracy))
net_loss.append(loss.item())
# this should be done at the end of epoch only
scheduler.step() # to dynamically change the learning rate
mean_accuracy = total_correct*100/total_examples
mean_loss = np.asarray(net_loss).mean()
print('####################################')
print('LOG: epoch {} -> total loss = {:.5f}, total accuracy = {:.5f}%'.format(k, mean_loss, mean_accuracy))
print('####################################')
pass
@torch.no_grad()
def eval_net(**kwargs):
model = kwargs['model']
classes = kwargs['classes']
num_classes = len(classes)
cuda = kwargs['cuda']
device = kwargs['device']
model.eval()
all_predictions = np.array([]) # empty all predictions
all_ground_truth = np.array([])
if cuda:
model.cuda(device=device)
bands_for_testing = [x - 1 for x in kwargs['bands']]
if 'writer' in kwargs.keys():
# it means this is evaluation at training time
val_loader = kwargs['val_loader']
model = kwargs['model']
focal_criterion = kwargs['criterion']
total_examples, total_correct, net_loss = 0, 0, []
un_confusion_meter = tnt.meter.ConfusionMeter(num_classes, normalized=False)
confusion_meter = tnt.meter.ConfusionMeter(num_classes, normalized=True)
for idx, data in enumerate(val_loader):
test_x, label = data['input'], data['label']
test_x = test_x.cuda(device=device) if cuda else test_x
label = label.cuda(device=device) if cuda else label
out_x, softmaxed = model.forward(test_x[:,bands_for_testing,:,:])
pred = torch.argmax(softmaxed, dim=1)
# not_one_hot_target = torch.argmax(label, dim=1) + 1
not_one_hot_target = torch.argmax(label, dim=1)
not_one_hot_target_for_loss = not_one_hot_target.clone()
not_one_hot_target_for_loss[not_one_hot_target_for_loss == 0] = 1
not_one_hot_target_for_loss -= 1
loss = focal_criterion(softmaxed, not_one_hot_target_for_loss) # dice_criterion(softmaxed, label) #
label_valid_indices = (not_one_hot_target.view(-1) != 0)
# mind the '-1' fix please. This is to convert Forest and Non-Forest labels from 1, 2 to 0, 1
valid_label = not_one_hot_target.view(-1)[label_valid_indices] - 1
valid_pred = pred.view(-1)[label_valid_indices]
# Eliminate NULL pixels from testing
accurate = (valid_pred == valid_label).sum().item()
numerator = float(accurate)
denominator = float(valid_pred.view(-1).size(0))
total_correct += numerator
total_examples += denominator
net_loss.append(loss.item())
# NULL elimination
un_confusion_meter.add(predicted=valid_pred.view(-1), target=valid_label.view(-1))
confusion_meter.add(predicted=valid_pred.view(-1), target=valid_label.view(-1))
all_predictions = np.concatenate((all_predictions, valid_pred.view(-1).cpu()), axis=0)
all_ground_truth = np.concatenate((all_ground_truth, valid_label.view(-1).cpu()), axis=0)
#################################
mean_accuracy = total_correct*100/total_examples
mean_loss = np.asarray(net_loss).mean()
# writer.add_scalar(tag='eval accuracy', scalar_value=mean_accuracy, global_step=step)
# writer.add_scalar(tag='eval loss', scalar_value=mean_loss, global_step=step)
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('LOG: validation: total loss = {:.5f}, total accuracy = ({}/{}) = {:.5f}%'.format(mean_loss, total_correct, total_examples, mean_accuracy))
print('Log: Confusion matrix')
print(confusion_meter.value())
confusion = confusion_matrix(all_ground_truth, all_predictions)
print('Confusion Matrix from Scikit-Learn\n')
print(confusion)
print('\nClassification Report\n')
print(classification_report(all_ground_truth, all_predictions, target_names=classes))
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
return mean_accuracy
else:
# model, images, labels, pre_model, save_dir, sum_dir, batch_size, lr, log_after, cuda
pre_model = kwargs['pre_model']
batch_size = kwargs['batch_size']
un_confusion_meter = tnt.meter.ConfusionMeter(num_classes, normalized=False)
confusion_meter = tnt.meter.ConfusionMeter(num_classes, normalized=True)
model_path = os.path.join(kwargs['save_dir'], pre_model)
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')), strict=False)
print('log: resumed model {} successfully!'.format(pre_model))
weights = torch.Tensor([10, 10]) # forest has ___ times more weight
weights = weights.cuda(device=device) if cuda else weights
focal_criterion = FocalLoss2d(weight=weights)
loaders = get_dataloaders_generated_data(generated_data_path=kwargs['generated_data_path'], data_split_lists_path=kwargs['data_split_lists'],
bands=kwargs['bands'], model_input_size=kwargs['input_dim'], num_classes=num_classes, train_split=0.8,
one_hot=True, batch_size=batch_size, num_workers=0)
net_loss = list()
train_dataloader, val_dataloader, test_dataloader = loaders
total_correct, total_examples = 0, 0
print("(LOG): Evaluating performance on test data...")
for idx, data in enumerate(test_dataloader):
test_x, label = data['input'], data['label']
test_x = test_x.cuda(device=device) if cuda else test_x
label = label.cuda(device=device) if cuda else label
out_x, softmaxed = model.forward(test_x[:,bands_for_testing,:,:])
pred = torch.argmax(softmaxed, dim=1)
# not_one_hot_target = torch.argmax(label, dim=1) + 1
not_one_hot_target = torch.argmax(label, dim=1)
#######################################################
not_one_hot_target_for_loss = not_one_hot_target.clone()
not_one_hot_target_for_loss[not_one_hot_target_for_loss == 0] = 1
not_one_hot_target_for_loss -= 1
loss = focal_criterion(softmaxed, not_one_hot_target_for_loss)
label_valid_indices = (not_one_hot_target.view(-1) != 0)
# mind the '-1' fix please. This is to convert Forest and Non-Forest labels from 1, 2 to 0, 1
valid_label = not_one_hot_target.view(-1)[label_valid_indices] - 1
valid_pred = pred.view(-1)[label_valid_indices]
# NULL elimination
accurate = (valid_pred == valid_label).sum().item()
numerator = float(accurate)
denominator = float(valid_pred.view(-1).size(0))
total_correct += numerator
total_examples += denominator
net_loss.append(loss.item())
########################################
# with NULL elimination
un_confusion_meter.add(predicted=valid_pred.view(-1), target=valid_label.view(-1))
confusion_meter.add(predicted=valid_pred.view(-1), target=valid_label.view(-1))
all_predictions = np.concatenate((all_predictions, valid_pred.view(-1).cpu()), axis=0)
all_ground_truth = np.concatenate((all_ground_truth, valid_label.view(-1).cpu()), axis=0)
if idx % 10 == 0:
print('log: on test sample: {}/{}'.format(idx, len(test_dataloader)))
#################################
mean_accuracy = total_correct*100/total_examples
mean_loss = np.asarray(net_loss).mean()
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('log: test:: total loss = {:.5f}, total accuracy = {:.5f}%'.format(mean_loss, mean_accuracy))
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('---> Confusion Matrix:')
print(confusion_meter.value())
confusion = confusion_matrix(all_ground_truth, all_predictions)
print('Confusion Matrix from Scikit-Learn\n')
print(confusion)
print('\nClassification Report\n')
print(classification_report(all_ground_truth, all_predictions, target_names=classes))
with open('normalized.pkl', 'wb') as this:
pkl.dump(confusion_meter.value(), this, protocol=pkl.HIGHEST_PROTOCOL)
with open('un_normalized.pkl', 'wb') as this:
pkl.dump(un_confusion_meter.value(), this, protocol=pkl.HIGHEST_PROTOCOL)
pass
pass
pass
@torch.no_grad()
def generate_error_maps(**kwargs):
model = kwargs['model']
classes = kwargs['classes']
num_classes = len(classes)
cuda = kwargs['cuda']
device = kwargs['device']
model.eval()
all_predictions = np.array([]) # empty all predictions
all_ground_truth = np.array([])
# special variables
all_but_chitral_and_upper_dir_predictions = np.array([]) # empty all predictions
all_but_chitral_and_upper_dir_ground_truth = np.array([])
if cuda:
model.cuda(device=device)
# model, images, labels, pre_model, save_dir, sum_dir, batch_size, lr, log_after, cuda
pre_model = kwargs['pre_model']
batch_size = kwargs['batch_size']
un_confusion_meter = tnt.meter.ConfusionMeter(num_classes, normalized=False)
confusion_meter = tnt.meter.ConfusionMeter(num_classes, normalized=True)
model_path = os.path.join(kwargs['save_dir'], pre_model)
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')), strict=False)
print('[LOG] Resumed model {} successfully!'.format(pre_model))
destination_path = os.path.join(kwargs['error_maps_path'], pre_model.split('.')[0])
if not os.path.exists(os.path.join(destination_path)):
os.mkdir(destination_path)
weights = torch.Tensor([10, 10]) # forest has ___ times more weight
weights = weights.cuda(device=device) if cuda else weights
focal_criterion = FocalLoss2d(weight=weights)
loaders = get_dataloaders_generated_data(generated_data_path=kwargs['generated_data_path'], data_split_lists_path=kwargs['data_split_lists'],
bands=kwargs['bands'], model_input_size=kwargs['input_dim'], num_classes=num_classes+1, train_split=0.8,
one_hot=True, batch_size=batch_size, num_workers=0)
net_loss = list()
train_dataloader, val_dataloader, test_dataloader = loaders
total_correct, total_examples = 0, 0
print("[LOG] Evaluating performance on test data...")
forest_cmap = ListedColormap(["yellow", "green"])
true_false_cmap = ListedColormap(['red', 'blue'])
bands_for_testing = [x - 1 for x in kwargs['bands']]
accuracy_per_district = defaultdict(lambda: [0, 0])
for idx, data in enumerate(test_dataloader):
test_x, label, sample_identifiers = data['input'], data['label'], data['sample_identifier']
test_x = test_x.cuda(device=device) if cuda else test_x
label = label.cuda(device=device) if cuda else label
out_x, softmaxed = model.forward(test_x[:,bands_for_testing,:,:])
pred = torch.argmax(softmaxed, dim=1)
not_one_hot_target = torch.argmax(label, dim=1)
for i in range(not_one_hot_target.shape[0]):
image_name = sample_identifiers[0][i].split('/')[-1].split('.')[0]
district_name = image_name.split('_')[0]
if district_name == 'upper' or district_name == 'lower':
district_name += ' dir'
# print(district_name)
rgb_image = (255*(test_x.numpy()[i].transpose(1, 2, 0)[:,:,[3, 2, 1]])).astype(np.uint8)
district_ground_truth = not_one_hot_target[i,:,:].clone()
ground_truth = not_one_hot_target[i,:,:] - 1
ground_truth[ground_truth < 0] = 0
district_prediction = pred[i,:,:]
error_map = np.array(ground_truth == district_prediction).astype(np.uint8)
# calculate accuracy for this district image (below)
district_label_valid_indices = (district_ground_truth.view(-1) != 0)
district_valid_label = district_ground_truth.view(-1)[district_label_valid_indices] - 1
district_valid_pred = district_prediction.view(-1)[district_label_valid_indices]
district_accurate = (district_valid_pred == district_valid_label).sum().item()
district_total_pixels = float(district_valid_pred.view(-1).size(0))
accuracy_per_district[district_name][0] += district_accurate
accuracy_per_district[district_name][1] += district_total_pixels
# special variables
if district_name != "upper dir" and district_name != "chitral":
all_but_chitral_and_upper_dir_predictions = np.concatenate((all_but_chitral_and_upper_dir_predictions,
district_valid_pred.view(-1).cpu()), axis=0)
all_but_chitral_and_upper_dir_ground_truth = np.concatenate((all_but_chitral_and_upper_dir_ground_truth,
district_valid_label.view(-1).cpu()), axis=0)
# # calculate accuracy for this district image (above)
# fig = plt.figure(figsize=(12,3))
# fig.suptitle("[Non-Forest: Yellow; Forest: Green;] Error: [Correct: Blue, In-correct: Red]", fontsize="x-large")
# ax1 = fig.add_subplot(1, 4, 1)
# ax1.imshow(rgb_image)
# ax1.set_title('Image')
# ax2 = fig.add_subplot(1, 4, 2)
# ax2.imshow(ground_truth, cmap=forest_cmap, vmin=0, vmax=2)
# ax2.set_title('Ground Truth')
# ax3 = fig.add_subplot(1, 4, 3)
# ax3.imshow(district_prediction, cmap=forest_cmap, vmin=0, vmax=2)
# ax3.set_title('Prediction')
# ax4 = fig.add_subplot(1, 4, 4)
# ax4.imshow(error_map, cmap=true_false_cmap, vmin=0, vmax=1)
# ax4.set_title('Error')
# fig.savefig(os.path.join(destination_path, '{}.png'.format(image_name)))
# plt.close()
#######################################################
not_one_hot_target_for_loss = not_one_hot_target.clone()
not_one_hot_target_for_loss[not_one_hot_target_for_loss == 0] = 1
not_one_hot_target_for_loss -= 1
loss = focal_criterion(softmaxed, not_one_hot_target_for_loss)
label_valid_indices = (not_one_hot_target.view(-1) != 0)
# mind the '-1' fix please. This is to convert Forest and Non-Forest labels from 1, 2 to 0, 1
valid_label = not_one_hot_target.view(-1)[label_valid_indices] - 1
valid_pred = pred.view(-1)[label_valid_indices]
# NULL elimination
accurate = (valid_pred == valid_label).sum().item()
numerator = float(accurate)
denominator = float(valid_pred.view(-1).size(0))
total_correct += numerator
total_examples += denominator
net_loss.append(loss.item())
########################################
# with NULL elimination
un_confusion_meter.add(predicted=valid_pred.view(-1), target=valid_label.view(-1))
confusion_meter.add(predicted=valid_pred.view(-1), target=valid_label.view(-1))
all_predictions = np.concatenate((all_predictions, valid_pred.view(-1).cpu()), axis=0)
all_ground_truth = np.concatenate((all_ground_truth, valid_label.view(-1).cpu()), axis=0)
if idx % 10 == 0:
print('log: on test sample: {}/{}'.format(idx, len(test_dataloader)))
#################################
mean_accuracy = total_correct*100/total_examples
mean_loss = np.asarray(net_loss).mean()
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('log: test:: total loss = {:.5f}, total accuracy = {:.5f}%'.format(mean_loss, mean_accuracy))
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
print('---> Confusion Matrix:')
print(confusion_meter.value())
confusion = confusion_matrix(all_ground_truth, all_predictions)
print('Confusion Matrix from Scikit-Learn\n')
print(confusion)
print('\nClassification Report\n')
print(classification_report(all_ground_truth, all_predictions, target_names=classes))
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
# Get per district test scores without Upper Dir and Chitral districts
print('[LOG] Per District Test Accuracies')
print(accuracy_per_district)
numerator_sum, denominator_sum = 0, 0
for idx, (this_district, [true, total]) in enumerate(accuracy_per_district.items(), 1):
print("{}: {} -> {}/{} = {:.2f}%".format(idx, this_district, true, total, 100*true/total))
if this_district != 'upper dir' and this_district != 'chitral':
numerator_sum += true
denominator_sum += total
else:
print("[LOG] Skipping {} district for performance testing".format(this_district))
print("[LOG] Net Test Accuracy Without Chitral and Upper Dir: {:.2f}%".format(100*numerator_sum/denominator_sum))
print('---> Confusion Matrix:')
print(confusion_meter.value())
confusion = confusion_matrix(all_but_chitral_and_upper_dir_ground_truth, all_but_chitral_and_upper_dir_predictions)
print('Confusion Matrix from Scikit-Learn\n')
print(confusion)
print('\nClassification Report\n')
print(classification_report(all_but_chitral_and_upper_dir_ground_truth, all_but_chitral_and_upper_dir_predictions,
target_names=classes))
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
pass
| [
"os.path.exists",
"torchnet.meter.ConfusionMeter",
"torch.device",
"sklearn.metrics.classification_report",
"torch.Tensor",
"os.path.join",
"dataset.get_dataloaders_generated_data",
"matplotlib.colors.ListedColormap",
"torch.argmax",
"numpy.array",
"numpy.asarray",
"collections.defaultdict",
... | [((800, 825), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (818, 825), True, 'import matplotlib.pyplot as plt\n'), ((6495, 6510), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6508, 6510), False, 'import torch\n'), ((14746, 14761), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14759, 14761), False, 'import torch\n'), ((1794, 1816), 'torch.Tensor', 'torch.Tensor', (['[10, 10]'], {}), '([10, 10])\n', (1806, 1816), False, 'import torch\n'), ((6721, 6733), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6729, 6733), True, 'import numpy as np\n'), ((6782, 6794), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6790, 6794), True, 'import numpy as np\n'), ((14983, 14995), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14991, 14995), True, 'import numpy as np\n'), ((15044, 15056), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15052, 15056), True, 'import numpy as np\n'), ((15129, 15141), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15137, 15141), True, 'import numpy as np\n'), ((15216, 15228), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15224, 15228), True, 'import numpy as np\n'), ((15466, 15521), 'torchnet.meter.ConfusionMeter', 'tnt.meter.ConfusionMeter', (['num_classes'], {'normalized': '(False)'}), '(num_classes, normalized=False)\n', (15490, 15521), True, 'import torchnet as tnt\n'), ((15544, 15598), 'torchnet.meter.ConfusionMeter', 'tnt.meter.ConfusionMeter', (['num_classes'], {'normalized': '(True)'}), '(num_classes, normalized=True)\n', (15568, 15598), True, 'import torchnet as tnt\n'), ((15616, 15659), 'os.path.join', 'os.path.join', (["kwargs['save_dir']", 'pre_model'], {}), "(kwargs['save_dir'], pre_model)\n", (15628, 15659), False, 'import os\n'), ((16022, 16044), 'torch.Tensor', 'torch.Tensor', (['[10, 10]'], {}), '([10, 10])\n', (16034, 16044), False, 'import torch\n'), ((16208, 16517), 'dataset.get_dataloaders_generated_data', 'get_dataloaders_generated_data', ([], {'generated_data_path': "kwargs['generated_data_path']", 'data_split_lists_path': "kwargs['data_split_lists']", 'bands': "kwargs['bands']", 'model_input_size': "kwargs['input_dim']", 'num_classes': '(num_classes + 1)', 'train_split': '(0.8)', 'one_hot': '(True)', 'batch_size': 'batch_size', 'num_workers': '(0)'}), "(generated_data_path=kwargs[\n 'generated_data_path'], data_split_lists_path=kwargs['data_split_lists'\n ], bands=kwargs['bands'], model_input_size=kwargs['input_dim'],\n num_classes=num_classes + 1, train_split=0.8, one_hot=True, batch_size=\n batch_size, num_workers=0)\n", (16238, 16517), False, 'from dataset import fix, get_dataloaders_generated_data\n'), ((16790, 16825), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['yellow', 'green']"], {}), "(['yellow', 'green'])\n", (16804, 16825), False, 'from matplotlib.colors import ListedColormap\n'), ((16848, 16879), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['red', 'blue']"], {}), "(['red', 'blue'])\n", (16862, 16879), False, 'from matplotlib.colors import ListedColormap\n'), ((16965, 16993), 'collections.defaultdict', 'defaultdict', (['(lambda : [0, 0])'], {}), '(lambda : [0, 0])\n', (16976, 16993), False, 'from collections import defaultdict\n'), ((22311, 22362), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['all_ground_truth', 'all_predictions'], {}), '(all_ground_truth, all_predictions)\n', (22327, 22362), False, 'from sklearn.metrics import confusion_matrix\n'), ((23472, 23579), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['all_but_chitral_and_upper_dir_ground_truth', 'all_but_chitral_and_upper_dir_predictions'], {}), '(all_but_chitral_and_upper_dir_ground_truth,\n all_but_chitral_and_upper_dir_predictions)\n', (23488, 23579), False, 'from sklearn.metrics import confusion_matrix\n'), ((1494, 1518), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1508, 1518), False, 'import os\n'), ((1528, 1546), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (1536, 1546), False, 'import os\n'), ((1558, 1581), 'os.path.exists', 'os.path.exists', (['sum_dir'], {}), '(sum_dir)\n', (1572, 1581), False, 'import os\n'), ((1591, 1608), 'os.mkdir', 'os.mkdir', (['sum_dir'], {}), '(sum_dir)\n', (1599, 1608), False, 'import os\n'), ((2820, 2853), 'os.path.join', 'os.path.join', (['save_dir', 'pre_model'], {}), '(save_dir, pre_model)\n', (2832, 2853), False, 'import os\n'), ((7196, 7251), 'torchnet.meter.ConfusionMeter', 'tnt.meter.ConfusionMeter', (['num_classes'], {'normalized': '(False)'}), '(num_classes, normalized=False)\n', (7220, 7251), True, 'import torchnet as tnt\n'), ((7278, 7332), 'torchnet.meter.ConfusionMeter', 'tnt.meter.ConfusionMeter', (['num_classes'], {'normalized': '(True)'}), '(num_classes, normalized=True)\n', (7302, 7332), True, 'import torchnet as tnt\n'), ((9852, 9903), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['all_ground_truth', 'all_predictions'], {}), '(all_ground_truth, all_predictions)\n', (9868, 9903), False, 'from sklearn.metrics import confusion_matrix\n'), ((10433, 10488), 'torchnet.meter.ConfusionMeter', 'tnt.meter.ConfusionMeter', (['num_classes'], {'normalized': '(False)'}), '(num_classes, normalized=False)\n', (10457, 10488), True, 'import torchnet as tnt\n'), ((10515, 10569), 'torchnet.meter.ConfusionMeter', 'tnt.meter.ConfusionMeter', (['num_classes'], {'normalized': '(True)'}), '(num_classes, normalized=True)\n', (10539, 10569), True, 'import torchnet as tnt\n'), ((10591, 10634), 'os.path.join', 'os.path.join', (["kwargs['save_dir']", 'pre_model'], {}), "(kwargs['save_dir'], pre_model)\n", (10603, 10634), False, 'import os\n'), ((10826, 10848), 'torch.Tensor', 'torch.Tensor', (['[10, 10]'], {}), '([10, 10])\n', (10838, 10848), False, 'import torch\n'), ((11024, 11329), 'dataset.get_dataloaders_generated_data', 'get_dataloaders_generated_data', ([], {'generated_data_path': "kwargs['generated_data_path']", 'data_split_lists_path': "kwargs['data_split_lists']", 'bands': "kwargs['bands']", 'model_input_size': "kwargs['input_dim']", 'num_classes': 'num_classes', 'train_split': '(0.8)', 'one_hot': '(True)', 'batch_size': 'batch_size', 'num_workers': '(0)'}), "(generated_data_path=kwargs[\n 'generated_data_path'], data_split_lists_path=kwargs['data_split_lists'\n ], bands=kwargs['bands'], model_input_size=kwargs['input_dim'],\n num_classes=num_classes, train_split=0.8, one_hot=True, batch_size=\n batch_size, num_workers=0)\n", (11054, 11329), False, 'from dataset import fix, get_dataloaders_generated_data\n'), ((14162, 14213), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['all_ground_truth', 'all_predictions'], {}), '(all_ground_truth, all_predictions)\n', (14178, 14213), False, 'from sklearn.metrics import confusion_matrix\n'), ((15981, 16007), 'os.mkdir', 'os.mkdir', (['destination_path'], {}), '(destination_path)\n', (15989, 16007), False, 'import os\n'), ((17356, 17386), 'torch.argmax', 'torch.argmax', (['softmaxed'], {'dim': '(1)'}), '(softmaxed, dim=1)\n', (17368, 17386), False, 'import torch\n'), ((17416, 17442), 'torch.argmax', 'torch.argmax', (['label'], {'dim': '(1)'}), '(label, dim=1)\n', (17428, 17442), False, 'import torch\n'), ((22483, 22561), 'sklearn.metrics.classification_report', 'classification_report', (['all_ground_truth', 'all_predictions'], {'target_names': 'classes'}), '(all_ground_truth, all_predictions, target_names=classes)\n', (22504, 22561), False, 'from sklearn.metrics import classification_report\n'), ((23696, 23830), 'sklearn.metrics.classification_report', 'classification_report', (['all_but_chitral_and_upper_dir_ground_truth', 'all_but_chitral_and_upper_dir_predictions'], {'target_names': 'classes'}), '(all_but_chitral_and_upper_dir_ground_truth,\n all_but_chitral_and_upper_dir_predictions, target_names=classes)\n', (23717, 23830), False, 'from sklearn.metrics import classification_report\n'), ((4925, 4952), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (4937, 4952), False, 'import torch\n'), ((4986, 5012), 'torch.argmax', 'torch.argmax', (['label'], {'dim': '(1)'}), '(label, dim=1)\n', (4998, 5012), False, 'import torch\n'), ((7668, 7698), 'torch.argmax', 'torch.argmax', (['softmaxed'], {'dim': '(1)'}), '(softmaxed, dim=1)\n', (7680, 7698), False, 'import torch\n'), ((7798, 7824), 'torch.argmax', 'torch.argmax', (['label'], {'dim': '(1)'}), '(label, dim=1)\n', (7810, 7824), False, 'import torch\n'), ((10040, 10118), 'sklearn.metrics.classification_report', 'classification_report', (['all_ground_truth', 'all_predictions'], {'target_names': 'classes'}), '(all_ground_truth, all_predictions, target_names=classes)\n', (10061, 10118), False, 'from sklearn.metrics import classification_report\n'), ((11951, 11981), 'torch.argmax', 'torch.argmax', (['softmaxed'], {'dim': '(1)'}), '(softmaxed, dim=1)\n', (11963, 11981), False, 'import torch\n'), ((12081, 12107), 'torch.argmax', 'torch.argmax', (['label'], {'dim': '(1)'}), '(label, dim=1)\n', (12093, 12107), False, 'import torch\n'), ((14350, 14428), 'sklearn.metrics.classification_report', 'classification_report', (['all_ground_truth', 'all_predictions'], {'target_names': 'classes'}), '(all_ground_truth, all_predictions, target_names=classes)\n', (14371, 14428), False, 'from sklearn.metrics import classification_report\n'), ((15940, 15970), 'os.path.join', 'os.path.join', (['destination_path'], {}), '(destination_path)\n', (15952, 15970), False, 'import os\n'), ((21964, 21984), 'numpy.asarray', 'np.asarray', (['net_loss'], {}), '(net_loss)\n', (21974, 21984), True, 'import numpy as np\n'), ((6231, 6251), 'numpy.asarray', 'np.asarray', (['net_loss'], {}), '(net_loss)\n', (6241, 6251), True, 'import numpy as np\n'), ((9322, 9342), 'numpy.asarray', 'np.asarray', (['net_loss'], {}), '(net_loss)\n', (9332, 9342), True, 'import numpy as np\n'), ((13791, 13811), 'numpy.asarray', 'np.asarray', (['net_loss'], {}), '(net_loss)\n', (13801, 13811), True, 'import numpy as np\n'), ((15722, 15741), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (15734, 15741), False, 'import torch\n'), ((2987, 3006), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2999, 3006), False, 'import torch\n'), ((10701, 10720), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (10713, 10720), False, 'import torch\n'), ((18117, 18162), 'numpy.array', 'np.array', (['(ground_truth == district_prediction)'], {}), '(ground_truth == district_prediction)\n', (18125, 18162), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from powernet_model import PowerNetModel
from es import ES
from es_agent import ESAgent
from tqdm import tqdm
import copy
import numpy as np
from copy import deepcopy
from utils import process
import parl
import paddle.fluid as fluid
from parl import layers
class Track2PowerNetAgent(object):
def __init__(self, action_space):
"""Initialize a new agent."""
self.action_space = action_space
self.actions = []
actions_vec = np.load("./saved_files/top1000_actions.npz")["actions"]
for i in range(actions_vec.shape[0]):
act = action_space.from_vect(actions_vec[i])
self.actions.append(act)
self.actions = self.actions[:1000]
self.act_num = len(self.actions)
self.sub_ids = np.load('./saved_files/sub_id_info.npz')['sub_ids']
self.do_nothing_action = action_space({})
self.origin_ids = range(len(self.actions))
offset = action_space.n_line
self.action_to_sub_topo = {}
for sub_id, sub_elem_num in enumerate(action_space.sub_info):
self.action_to_sub_topo[sub_id] = (offset, offset + sub_elem_num)
offset += sub_elem_num
self.step = 0
model = PowerNetModel()
algorithm = ES(model)
self.es_agent = ESAgent(algorithm)
self.es_agent.restore(save_path='./saved_files', filename='model.ckpt')
self.to_print_data = []
self.last_disconnect_step = -100
self.last_diconnect_line = None
self.simulation_times = 0
def simulate_do_nothing(self, observation):
init_to_maintain_lines = np.where((observation.time_next_maintenance>0) \
& (observation.time_next_maintenance<9))[0]
to_check_action = self.do_nothing_action
to_maintain_lines = []
for line_id in init_to_maintain_lines:
if observation.line_status[line_id]:
to_maintain_lines.append(line_id)
# we do not disconnect the only line in advance
if len(to_maintain_lines) == 1:
rest_step = observation.time_next_maintenance[to_maintain_lines[0]]
if rest_step > 1:
to_maintain_lines = []
else: # we only maintain the first line in `to_maintain_lines`
to_maintain_lines = to_maintain_lines[:1]
if len(to_maintain_lines
) != 0 and self.step - self.last_disconnect_step > 3:
line_status = []
for line_id in to_maintain_lines:
line_status.append((line_id, -1))
to_check_action = self.action_space({
'set_line_status': line_status
})
obs_simulate, reward_simulate, done_simulate, info_simulate = observation.simulate(
to_check_action)
observation._obs_env._reset_to_orig_state()
else:
obs_simulate, reward_simulate, done_simulate, info_simulate = observation.simulate(
to_check_action)
observation._obs_env._reset_to_orig_state()
return obs_simulate, done_simulate, to_check_action, to_maintain_lines
def find_unaccessible_pos(self, to_check_action):
if to_check_action == self.do_nothing_action:
return []
lines = to_check_action.as_dict()['set_line_status']['disconnected_id']
arr = []
for line_id in lines:
arr.append((line_id, 1))
act = self.action_space({
"set_bus": {
"lines_ex_id": arr,
"lines_or_id": arr
}
})
pos = np.where(act._set_topo_vect != 0)[0]
return pos
def avoid_overflow(self, observation, reset_action=None):
if reset_action is None:
obs_simulate, done_simulate, to_check_action, to_maintain_lines = self.simulate_do_nothing(
observation)
else:
to_check_action = reset_action
to_maintain_lines = []
obs_simulate, reward_simulate, done_simulate, info_simulate = observation.simulate(
to_check_action)
observation._obs_env._reset_to_orig_state()
has_overflow = False
if observation is not None and not any(np.isnan(observation.rho)):
has_overflow = any(observation.rho > 1.0) or any(
obs_simulate.rho > 1.0)
if not (done_simulate or has_overflow) and (
to_check_action == self.do_nothing_action):
return self.do_nothing_action, -1
if to_check_action != self.do_nothing_action and obs_simulate.rho.max(
) < 1.0 and not done_simulate:
return to_check_action, -1
# action selection and rerank
extracted_obs = process(observation).astype(np.float32)
top_idx, pred_rho = self.es_agent.predict_unitary_actions_rho(
extracted_obs)
action_selected = [False] * len(self.actions)
for i in range(80):
idx = top_idx[i]
action_selected[idx] = True
# select_action_by_dis
overflow_lines = np.where(observation.rho > 1.0)[0].tolist()
if len(overflow_lines) == 0:
overflow_lines = np.where(obs_simulate.rho > 1.0)[0].tolist()
best_idx = -1
least_overflow_action = self.do_nothing_action
least_overflow = 10.0
least_obs_simulate = obs_simulate
if obs_simulate is not None and not any(np.isnan(obs_simulate.rho)):
least_overflow = float(np.max(obs_simulate.rho))
if reset_action is None:
illegal_pos = self.find_unaccessible_pos(to_check_action)
else:
illegal_pos = []
self.simulation_times += 1
found = False
for idx in range(self.act_num):
if not action_selected[idx]: continue
to_simulate_action = self.actions[idx]
# check conflict
if to_check_action != self.do_nothing_action:
illegal_pos_value = to_simulate_action._set_topo_vect[
illegal_pos]
if np.any(illegal_pos_value):
continue
action1_vec = to_simulate_action.to_vect()
action2_vec = to_check_action.to_vect()
to_simulate_action = self.action_space.from_vect(action1_vec +
action2_vec)
legal_action = self.correct_action(observation, to_simulate_action,
self.sub_ids[idx])
if legal_action == self.do_nothing_action:
continue
obs_simulate, reward_simulate, done_simulate, info_simulate = observation.simulate(
legal_action)
observation._obs_env._reset_to_orig_state()
max_rho = obs_simulate.rho.max()
assert not info_simulate['is_illegal'] and not info_simulate[
'is_ambiguous']
if obs_simulate is not None and not any(
np.isnan(obs_simulate.rho)):
if not done_simulate:
overflow_value = float(np.max(obs_simulate.rho))
if (not found) and (overflow_value < least_overflow):
least_overflow = overflow_value
least_overflow_action = legal_action
least_obs_simulate = obs_simulate
best_idx = idx
if least_overflow < 0.95:
if not found:
pass
found = True
break
continue
if best_idx != -1:
least_overflow_action = self.correct_action(
observation, least_overflow_action, self.sub_ids[best_idx])
if to_check_action != self.do_nothing_action and least_overflow_action != self.do_nothing_action and reset_action is None:
self.last_disconnect_step = self.step - 1
self.last_diconnect_line = to_maintain_lines[0]
if reset_action is not None:
pass
return least_overflow_action, self.sub_ids[best_idx]
else:
return self.do_nothing_action, -1
def correct_action(self, observation, to_simulate_action, sub_id):
if sub_id != -1:
if observation.time_before_cooldown_sub[sub_id] != 0:
legal_action_vec = deepcopy(self.do_nothing_action.to_vect())
return self.do_nothing_action
else:
legal_action_vec = deepcopy(to_simulate_action.to_vect())
sub_topo = self.sub_topo_dict[sub_id]
if np.any(sub_topo == -1): # line disconnected
start, end = self.action_to_sub_topo[sub_id]
action_topo = legal_action_vec[start:end].astype(
"int") # reference
action_topo[np.where(
sub_topo == -1)[0]] = 0 # done't change bus=-1
legal_action_vec[start:end] = action_topo
legal_action = self.action_space.from_vect(legal_action_vec)
elif sub_id == -1:
legal_action = to_simulate_action
else: # TODO remove
legal_action = self.do_nothing_action
return legal_action
def act(self, observation, reward, done):
self.step += 1
offset = 0
self.sub_topo_dict = {}
for sub_id, sub_elem_num in enumerate(observation.sub_info):
sub_topo = observation.topo_vect[offset:offset + sub_elem_num]
offset += sub_elem_num
self.sub_topo_dict[sub_id] = sub_topo
disconnected = np.where(observation.line_status == False)[0].tolist()
to_maintain_lines = np.where((observation.time_next_maintenance>0) \
& (observation.time_next_maintenance<15))[0]
to_maintain_lines = to_maintain_lines.tolist()
if len(disconnected) > 0:
for line_id in disconnected:
if observation.time_before_cooldown_line[line_id] == 0 and \
line_id not in to_maintain_lines:
reset_action = self.action_space({
"set_line_status": [(line_id, +1)]
})
obs_simulate, reward_simulate, done_simulate, info_simulate = observation.simulate(
reset_action)
observation._obs_env._reset_to_orig_state()
if np.max(observation.rho) < 1.0 and np.max(
obs_simulate.rho) >= 1.0:
continue
combined_action, sub_id = self.avoid_overflow(
observation, reset_action)
return combined_action
if observation is not None and not any(np.isnan(observation.rho)):
if np.max(observation.rho) < 0.94 and np.any(
observation.topo_vect == 2):
offset = 0
for sub_id, sub_elem_num in enumerate(observation.sub_info):
sub_topo = self.sub_topo_dict[sub_id]
if np.any(
sub_topo == 2
) and observation.time_before_cooldown_sub[sub_id] == 0:
sub_topo = np.where(sub_topo == 2, 1,
sub_topo) # bus 2 to bus 1
sub_topo = np.where(
sub_topo == -1, 0,
sub_topo) # don't do action in bus=-1
reconfig_sub = self.action_space({
"set_bus": {
"substations_id": [(sub_id, sub_topo)]
}
})
obs_simulate, reward_simulate, done_simulate, info_simulate = observation.simulate(
reconfig_sub)
observation._obs_env._reset_to_orig_state()
assert not info_simulate[
'is_illegal'] and not info_simulate['is_ambiguous']
if not done_simulate and obs_simulate is not None and not any(
np.isnan(obs_simulate.rho)):
if np.max(obs_simulate.rho) < 0.95:
return reconfig_sub
else:
pass
action, sub_id = self.avoid_overflow(observation)
return action
| [
"numpy.where",
"es_agent.ESAgent",
"numpy.any",
"numpy.max",
"powernet_model.PowerNetModel",
"numpy.isnan",
"utils.process",
"numpy.load",
"es.ES"
] | [((1829, 1844), 'powernet_model.PowerNetModel', 'PowerNetModel', ([], {}), '()\n', (1842, 1844), False, 'from powernet_model import PowerNetModel\n'), ((1865, 1874), 'es.ES', 'ES', (['model'], {}), '(model)\n', (1867, 1874), False, 'from es import ES\n'), ((1899, 1917), 'es_agent.ESAgent', 'ESAgent', (['algorithm'], {}), '(algorithm)\n', (1906, 1917), False, 'from es_agent import ESAgent\n'), ((1075, 1119), 'numpy.load', 'np.load', (['"""./saved_files/top1000_actions.npz"""'], {}), "('./saved_files/top1000_actions.npz')\n", (1082, 1119), True, 'import numpy as np\n'), ((1379, 1419), 'numpy.load', 'np.load', (['"""./saved_files/sub_id_info.npz"""'], {}), "('./saved_files/sub_id_info.npz')\n", (1386, 1419), True, 'import numpy as np\n'), ((2229, 2325), 'numpy.where', 'np.where', (['((observation.time_next_maintenance > 0) & (observation.\n time_next_maintenance < 9))'], {}), '((observation.time_next_maintenance > 0) & (observation.\n time_next_maintenance < 9))\n', (2237, 2325), True, 'import numpy as np\n'), ((4217, 4250), 'numpy.where', 'np.where', (['(act._set_topo_vect != 0)'], {}), '(act._set_topo_vect != 0)\n', (4225, 4250), True, 'import numpy as np\n'), ((9362, 9384), 'numpy.any', 'np.any', (['(sub_topo == -1)'], {}), '(sub_topo == -1)\n', (9368, 9384), True, 'import numpy as np\n'), ((10449, 10546), 'numpy.where', 'np.where', (['((observation.time_next_maintenance > 0) & (observation.\n time_next_maintenance < 15))'], {}), '((observation.time_next_maintenance > 0) & (observation.\n time_next_maintenance < 15))\n', (10457, 10546), True, 'import numpy as np\n'), ((5366, 5386), 'utils.process', 'process', (['observation'], {}), '(observation)\n', (5373, 5386), False, 'from utils import process\n'), ((6130, 6154), 'numpy.max', 'np.max', (['obs_simulate.rho'], {}), '(obs_simulate.rho)\n', (6136, 6154), True, 'import numpy as np\n'), ((6712, 6737), 'numpy.any', 'np.any', (['illegal_pos_value'], {}), '(illegal_pos_value)\n', (6718, 6737), True, 'import numpy as np\n'), ((11616, 11650), 'numpy.any', 'np.any', (['(observation.topo_vect == 2)'], {}), '(observation.topo_vect == 2)\n', (11622, 11650), True, 'import numpy as np\n'), ((4856, 4881), 'numpy.isnan', 'np.isnan', (['observation.rho'], {}), '(observation.rho)\n', (4864, 4881), True, 'import numpy as np\n'), ((5713, 5744), 'numpy.where', 'np.where', (['(observation.rho > 1.0)'], {}), '(observation.rho > 1.0)\n', (5721, 5744), True, 'import numpy as np\n'), ((6066, 6092), 'numpy.isnan', 'np.isnan', (['obs_simulate.rho'], {}), '(obs_simulate.rho)\n', (6074, 6092), True, 'import numpy as np\n'), ((10366, 10408), 'numpy.where', 'np.where', (['(observation.line_status == False)'], {}), '(observation.line_status == False)\n', (10374, 10408), True, 'import numpy as np\n'), ((11538, 11563), 'numpy.isnan', 'np.isnan', (['observation.rho'], {}), '(observation.rho)\n', (11546, 11563), True, 'import numpy as np\n'), ((11581, 11604), 'numpy.max', 'np.max', (['observation.rho'], {}), '(observation.rho)\n', (11587, 11604), True, 'import numpy as np\n'), ((5823, 5855), 'numpy.where', 'np.where', (['(obs_simulate.rho > 1.0)'], {}), '(obs_simulate.rho > 1.0)\n', (5831, 5855), True, 'import numpy as np\n'), ((7675, 7701), 'numpy.isnan', 'np.isnan', (['obs_simulate.rho'], {}), '(obs_simulate.rho)\n', (7683, 7701), True, 'import numpy as np\n'), ((7785, 7809), 'numpy.max', 'np.max', (['obs_simulate.rho'], {}), '(obs_simulate.rho)\n', (7791, 7809), True, 'import numpy as np\n'), ((9602, 9626), 'numpy.where', 'np.where', (['(sub_topo == -1)'], {}), '(sub_topo == -1)\n', (9610, 9626), True, 'import numpy as np\n'), ((11859, 11880), 'numpy.any', 'np.any', (['(sub_topo == 2)'], {}), '(sub_topo == 2)\n', (11865, 11880), True, 'import numpy as np\n'), ((12021, 12057), 'numpy.where', 'np.where', (['(sub_topo == 2)', '(1)', 'sub_topo'], {}), '(sub_topo == 2, 1, sub_topo)\n', (12029, 12057), True, 'import numpy as np\n'), ((12155, 12192), 'numpy.where', 'np.where', (['(sub_topo == -1)', '(0)', 'sub_topo'], {}), '(sub_topo == -1, 0, sub_topo)\n', (12163, 12192), True, 'import numpy as np\n'), ((11200, 11223), 'numpy.max', 'np.max', (['observation.rho'], {}), '(observation.rho)\n', (11206, 11223), True, 'import numpy as np\n'), ((11234, 11258), 'numpy.max', 'np.max', (['obs_simulate.rho'], {}), '(obs_simulate.rho)\n', (11240, 11258), True, 'import numpy as np\n'), ((13036, 13060), 'numpy.max', 'np.max', (['obs_simulate.rho'], {}), '(obs_simulate.rho)\n', (13042, 13060), True, 'import numpy as np\n'), ((12976, 13002), 'numpy.isnan', 'np.isnan', (['obs_simulate.rho'], {}), '(obs_simulate.rho)\n', (12984, 13002), True, 'import numpy as np\n')] |
from __future__ import division
from scipy.stats import norm
from pandas import read_excel
import numpy as np
# Transform to normal distribution #
def allnorm(x, y):
sample = len(x)
# Estimate norm parameters #
phat1 = norm.fit(x, loc=0, scale=1)
meanx = phat1[0]
sigmax = phat1[1]
phat2 = norm.fit(y, loc=0, scale=1)
meany = phat2[0]
sigmay = phat2[1]
# Save frequent calculations #
x_minus_mean_x = x - meanx
y_minus_mean_y = y - meany
sigmax_pow_3 = sigmax ** 3
sigmax_pow_2 = sigmax ** 2
sigmay_pow_3 = sigmay ** 3
sigmay_pow_2 = sigmay ** 2
minus_sample = -sample
# Calculate hessian matrix of log-likelihood #
hes_normx = np.array([[minus_sample / (sigmax_pow_2), -2*np.sum(x_minus_mean_x) / (sigmax_pow_3)],
[-2*np.sum(x_minus_mean_x) / (sigmax_pow_3), (sample / (sigmax_pow_2)) - (3*np.sum((x_minus_mean_x)**2) / (sigmax**4))]
])
hes_normy = np.array([[minus_sample / (sigmay_pow_2), -2*np.sum(y_minus_mean_y) / (sigmay_pow_3)],
[-2*np.sum(x - meany) / (sigmay_pow_3), (sample / (sigmay_pow_2)) - (3*np.sum((y_minus_mean_y)**2) / sigmay**4)]
])
# Calculate cumulative of x and y #
u = norm.cdf(x_minus_mean_x / sigmax, loc=0, scale=1)
v = norm.cdf(y_minus_mean_y / sigmay, loc=0, scale=1)
# Fix output #
zeros_tmp = np.zeros((2, 2))
new_hes_normx = np.concatenate((hes_normx, zeros_tmp), axis=1)
new_hes_normy = np.concatenate((zeros_tmp, hes_normy), axis=1)
hes_norm = np.concatenate((new_hes_normx, new_hes_normy), axis=0)
sigma = [sigmax, sigmay, meanx, meany]
# Fix overflow #
for i in range(len(u)):
if u[i] == 1:
u[i] = 0.99999999
if v[i] == 1:
v[i] = 0.99999999
result = {"sigma": sigma,
"hes_norm": hes_norm,
"u": u, "v": v
}
return result
# Test #
if __name__ == "__main__":
df = read_excel("/home/petropoulakis/Desktop/artificial_data.xlsx", sheet_name='Sheet1')
x = []
y = []
for index, row in df.iterrows():
x.append([float(row['x'])])
y.append([float(row['y'])])
result = allnorm(x, y)
print(result['sigma'])
print(result['hes_norm'])
print(result['u'][:5])
print(result['v'][:5])
| [
"scipy.stats.norm.fit",
"numpy.sum",
"numpy.zeros",
"numpy.concatenate",
"pandas.read_excel",
"scipy.stats.norm.cdf"
] | [((235, 262), 'scipy.stats.norm.fit', 'norm.fit', (['x'], {'loc': '(0)', 'scale': '(1)'}), '(x, loc=0, scale=1)\n', (243, 262), False, 'from scipy.stats import norm\n'), ((319, 346), 'scipy.stats.norm.fit', 'norm.fit', (['y'], {'loc': '(0)', 'scale': '(1)'}), '(y, loc=0, scale=1)\n', (327, 346), False, 'from scipy.stats import norm\n'), ((1293, 1342), 'scipy.stats.norm.cdf', 'norm.cdf', (['(x_minus_mean_x / sigmax)'], {'loc': '(0)', 'scale': '(1)'}), '(x_minus_mean_x / sigmax, loc=0, scale=1)\n', (1301, 1342), False, 'from scipy.stats import norm\n'), ((1351, 1400), 'scipy.stats.norm.cdf', 'norm.cdf', (['(y_minus_mean_y / sigmay)'], {'loc': '(0)', 'scale': '(1)'}), '(y_minus_mean_y / sigmay, loc=0, scale=1)\n', (1359, 1400), False, 'from scipy.stats import norm\n'), ((1437, 1453), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (1445, 1453), True, 'import numpy as np\n'), ((1475, 1521), 'numpy.concatenate', 'np.concatenate', (['(hes_normx, zeros_tmp)'], {'axis': '(1)'}), '((hes_normx, zeros_tmp), axis=1)\n', (1489, 1521), True, 'import numpy as np\n'), ((1542, 1588), 'numpy.concatenate', 'np.concatenate', (['(zeros_tmp, hes_normy)'], {'axis': '(1)'}), '((zeros_tmp, hes_normy), axis=1)\n', (1556, 1588), True, 'import numpy as np\n'), ((1605, 1659), 'numpy.concatenate', 'np.concatenate', (['(new_hes_normx, new_hes_normy)'], {'axis': '(0)'}), '((new_hes_normx, new_hes_normy), axis=0)\n', (1619, 1659), True, 'import numpy as np\n'), ((2039, 2127), 'pandas.read_excel', 'read_excel', (['"""/home/petropoulakis/Desktop/artificial_data.xlsx"""'], {'sheet_name': '"""Sheet1"""'}), "('/home/petropoulakis/Desktop/artificial_data.xlsx', sheet_name=\n 'Sheet1')\n", (2049, 2127), False, 'from pandas import read_excel\n'), ((753, 775), 'numpy.sum', 'np.sum', (['x_minus_mean_x'], {}), '(x_minus_mean_x)\n', (759, 775), True, 'import numpy as np\n'), ((826, 848), 'numpy.sum', 'np.sum', (['x_minus_mean_x'], {}), '(x_minus_mean_x)\n', (832, 848), True, 'import numpy as np\n'), ((1033, 1055), 'numpy.sum', 'np.sum', (['y_minus_mean_y'], {}), '(y_minus_mean_y)\n', (1039, 1055), True, 'import numpy as np\n'), ((1106, 1123), 'numpy.sum', 'np.sum', (['(x - meany)'], {}), '(x - meany)\n', (1112, 1123), True, 'import numpy as np\n'), ((898, 925), 'numpy.sum', 'np.sum', (['(x_minus_mean_x ** 2)'], {}), '(x_minus_mean_x ** 2)\n', (904, 925), True, 'import numpy as np\n'), ((1173, 1200), 'numpy.sum', 'np.sum', (['(y_minus_mean_y ** 2)'], {}), '(y_minus_mean_y ** 2)\n', (1179, 1200), True, 'import numpy as np\n')] |
import torch.utils.data as data
import torch
import numpy as np
import os
from os import listdir
from os.path import join
from PIL import Image, ImageOps, ImageEnhance
import random
import re
import json
import math
import torch.nn.functional as F
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
def load_img(filepath, i):
if i != 0:
img = Image.open(filepath).convert('RGB')
elif i == 0:
img = Image.open(filepath).convert('YCbCr')
img = img.getchannel(0)
# y, _, _ = img.split()
return img
def rescale_img(img_in, scale):
(w, h) = img_in.size
new_size_in = tuple([int(scale*w), int(scale*h)])
img_in = img_in.resize(new_size_in, resample=Image.BICUBIC)
return img_in
def modcrop(im):
(w, h) = im.size
# new_h = h//modulo*modulo
# new_w = w//modulo*modulo
# ih = h - new_h
# iw = w - new_w
if w >= h:
dt = (h - w//2)//2
db = h - w - dt
ims = im.crop((0, dt, w, h - db))
else:
dl = (w - h//2)//2
dr = w - h - dl
ims = im.crop((dl, 0, w - dr, h))
ims = ims.rotate(90)
new_size_in = tuple([512, 256])
ims = ims.resize(new_size_in, resample=Image.BICUBIC)
return ims
def get_patch(img_in, img_tar, patch_size, scale, ix=-1, iy=-1):
(ih, iw) = img_in.size
patch_mult = scale # if len(scale) > 1 else 1
tp = patch_mult * patch_size
ip = tp // scale
if ix == -1:
ix = random.randrange(0, iw - ip + 1)
if iy == -1:
iy = random.randrange(0, ih - ip + 1)
(tx, ty) = (scale * ix, scale * iy)
img_in = img_in.crop((iy, ix, iy + ip, ix + ip))
img_tar = img_tar.crop((ty, tx, ty + tp, tx + tp))
#info_patch = {
# 'ix': ix, 'iy': iy, 'ip': ip, 'tx': tx, 'ty': ty, 'tp': tp}
return img_in, img_tar
def augment(img_left, img_right, img_tar, img_mask, flip_h=True, rot=True):
info_aug = {'flip_h': False, 'flip_v': False, 'trans': False}
if torch.rand(1).item() < 0.5 and flip_h:
img_left = ImageOps.flip(img_left)
img_right = ImageOps.flip(img_right)
img_tar = ImageOps.flip(img_tar)
img_mask = ImageOps.flip(img_mask)
info_aug['flip_h'] = True
# if rot:
# if torch.rand(1).item() < 0.5:
# img_left = ImageOps.mirror(img_left)
# img_right = ImageOps.mirror(img_right)
# img_tar = ImageOps.mirror(img_tar)
# info_aug['flip_v'] = True
# if torch.rand(1).item() < 0.5:
# a = random.randint(1, 3) * 90
# img_left = img_left.rotate(a)
# img_right = img_right.rotate(a)
# img_tar = img_tar.rotate(a)
# info_aug['trans'] = True
return img_left, img_right, img_tar, img_mask, info_aug
class DatasetFromFolder(data.Dataset):
def __init__(self, data_dir, data_augmentation, transform=None):
super(DatasetFromFolder, self).__init__()
self.transform = transform
self.data_augmentation = data_augmentation
self.data_dir = data_dir
f = open(self.data_dir + '/train_record.json')
self.render_params = json.load(f)
def __getitem__(self, index):
x = self.render_params[index]['x']
y = self.render_params[index]['y']
rotate_left = random.randint(0, 20) * 15 # max=300
rotate_right = rotate_left + 60
ratio = 12
if rotate_right == 360:
rotate_right = 0
rotate_target = rotate_left + 5 * random.randint(1, 11)
left_name = self.data_dir + '/train_data/' + str(x) + '_' + str(y) + '_' + str(rotate_left) + '.png'
right_name = self.data_dir + '/train_data/' + str(x) + '_' + str(y) + '_' + str(rotate_right) + '.png'
target_name = self.data_dir + '/train_data/' + str(x) + '_' + str(y) + '_' + str(rotate_target) + '.png'
mask_name = self.data_dir + '/train_mask/' + str(x) + '_' + str(y) + '_' + str(rotate_target) + '.png'
left_img = load_img(left_name, 1)
left_img = rescale_img(left_img, 0.5) # 320*240
right_img = load_img(right_name, 1)
right_img = rescale_img(right_img, 0.5) # 320*240
target_img = load_img(target_name, 1)
target_img = rescale_img(target_img, 0.5)
mask_img = load_img(mask_name, 1)
mask_img = rescale_img(mask_img, 0.5)
theta = int(0.2 * (rotate_target - rotate_left))
code = F.one_hot(torch.tensor(theta), num_classes=12)
code = np.float32(code.numpy())
x = (np.float32(x) + 6700) / 17700
y = (np.float32(y) + 1900) / 3000
# theta = np.float32(theta)
if self.data_augmentation:
left_img, right_img, target_img, mask_img, _ = augment(left_img, right_img, target_img, mask_img)
if self.transform:
left_img = self.transform(left_img)
right_img = self.transform(right_img)
target_img = self.transform(target_img)
mask_img = self.transform(mask_img)
return left_img, right_img, target_img, mask_img, x, y, code
def __len__(self):
return len(self.render_params)
class DatasetFromFolderEval(data.Dataset):
def __init__(self, lr_dir, upscale_factor, transform=None):
super(DatasetFromFolderEval, self).__init__()
self.image_filenames = [join(lr_dir, x) for x in listdir(lr_dir) if is_image_file(x)]
self.upscale_factor = upscale_factor
self.transform = transform
def __getitem__(self, index):
input = load_img(self.image_filenames[index])
_, file = os.path.split(self.image_filenames[index])
bicubic = rescale_img(input, self.upscale_factor)
if self.transform:
#input = self.transform(input)
bicubic = self.transform(bicubic)
return bicubic, file
def __len__(self):
return len(self.image_filenames)
def _to_radians(deg):
return deg * (np.pi / 180)
def rot_matrix_x(theta):
"""
theta: measured in radians
"""
mat = np.zeros((3,3)).astype(np.float32)
mat[0, 0] = 1.
mat[1, 1] = np.cos(theta)
mat[1, 2] = -np.sin(theta)
mat[2, 1] = np.sin(theta)
mat[2, 2] = np.cos(theta)
return mat
def rot_matrix_y(theta):
"""
theta: measured in radians
"""
mat = np.zeros((3,3)).astype(np.float32)
mat[0, 0] = np.cos(theta)
mat[0, 2] = np.sin(theta)
mat[1, 1] = 1.
mat[2, 0] = -np.sin(theta)
mat[2, 2] = np.cos(theta)
return mat
def rot_matrix_z(theta):
"""
theta: measured in radians
"""
mat = np.zeros((3,3)).astype(np.float32)
mat[0, 0] = np.cos(theta)
mat[0, 1] = -np.sin(theta)
mat[1, 0] = np.sin(theta)
mat[1, 1] = np.cos(theta)
mat[2, 2] = 1.
return mat
def pad_rotmat(theta):
"""theta = (3x3) rotation matrix"""
return np.hstack((theta, np.zeros((3,1))))
def get_theta(angles):
'''Construct a rotation matrix from angles.
This uses the Euler angle representation. But
it should also work if you use an axis-angle
representation.
'''
# bs = angles.shape[0]
# theta = np.zeros((3, 4))
angles = _to_radians(angles)
theta = pad_rotmat(np.dot(np.dot(rot_matrix_z(0), rot_matrix_y(angles)), rot_matrix_x(0)))
return theta | [
"PIL.Image.open",
"os.listdir",
"numpy.float32",
"random.randrange",
"os.path.join",
"os.path.split",
"json.load",
"torch.tensor",
"numpy.zeros",
"numpy.cos",
"PIL.ImageOps.flip",
"numpy.sin",
"random.randint",
"torch.rand"
] | [((6182, 6195), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6188, 6195), True, 'import numpy as np\n'), ((6243, 6256), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6249, 6256), True, 'import numpy as np\n'), ((6273, 6286), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6279, 6286), True, 'import numpy as np\n'), ((6436, 6449), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6442, 6449), True, 'import numpy as np\n'), ((6466, 6479), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6472, 6479), True, 'import numpy as np\n'), ((6546, 6559), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6552, 6559), True, 'import numpy as np\n'), ((6709, 6722), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6715, 6722), True, 'import numpy as np\n'), ((6770, 6783), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6776, 6783), True, 'import numpy as np\n'), ((6800, 6813), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6806, 6813), True, 'import numpy as np\n'), ((1523, 1555), 'random.randrange', 'random.randrange', (['(0)', '(iw - ip + 1)'], {}), '(0, iw - ip + 1)\n', (1539, 1555), False, 'import random\n'), ((1586, 1618), 'random.randrange', 'random.randrange', (['(0)', '(ih - ip + 1)'], {}), '(0, ih - ip + 1)\n', (1602, 1618), False, 'import random\n'), ((2097, 2120), 'PIL.ImageOps.flip', 'ImageOps.flip', (['img_left'], {}), '(img_left)\n', (2110, 2120), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((2141, 2165), 'PIL.ImageOps.flip', 'ImageOps.flip', (['img_right'], {}), '(img_right)\n', (2154, 2165), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((2184, 2206), 'PIL.ImageOps.flip', 'ImageOps.flip', (['img_tar'], {}), '(img_tar)\n', (2197, 2206), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((2226, 2249), 'PIL.ImageOps.flip', 'ImageOps.flip', (['img_mask'], {}), '(img_mask)\n', (2239, 2249), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((3215, 3227), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3224, 3227), False, 'import json\n'), ((5658, 5700), 'os.path.split', 'os.path.split', (['self.image_filenames[index]'], {}), '(self.image_filenames[index])\n', (5671, 5700), False, 'import os\n'), ((6213, 6226), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6219, 6226), True, 'import numpy as np\n'), ((6516, 6529), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6522, 6529), True, 'import numpy as np\n'), ((6740, 6753), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6746, 6753), True, 'import numpy as np\n'), ((3371, 3392), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (3385, 3392), False, 'import random\n'), ((4510, 4529), 'torch.tensor', 'torch.tensor', (['theta'], {}), '(theta)\n', (4522, 4529), False, 'import torch\n'), ((5409, 5424), 'os.path.join', 'join', (['lr_dir', 'x'], {}), '(lr_dir, x)\n', (5413, 5424), False, 'from os.path import join\n'), ((6112, 6128), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (6120, 6128), True, 'import numpy as np\n'), ((6385, 6401), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (6393, 6401), True, 'import numpy as np\n'), ((6658, 6674), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (6666, 6674), True, 'import numpy as np\n'), ((6941, 6957), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (6949, 6957), True, 'import numpy as np\n'), ((424, 444), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (434, 444), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((3575, 3596), 'random.randint', 'random.randint', (['(1)', '(11)'], {}), '(1, 11)\n', (3589, 3596), False, 'import random\n'), ((4601, 4614), 'numpy.float32', 'np.float32', (['x'], {}), '(x)\n', (4611, 4614), True, 'import numpy as np\n'), ((4644, 4657), 'numpy.float32', 'np.float32', (['y'], {}), '(y)\n', (4654, 4657), True, 'import numpy as np\n'), ((5434, 5449), 'os.listdir', 'listdir', (['lr_dir'], {}), '(lr_dir)\n', (5441, 5449), False, 'from os import listdir\n'), ((491, 511), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (501, 511), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((2039, 2052), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2049, 2052), False, 'import torch\n')] |
#!/usr/bin/env python
"""detector.py: module is dedicated to all detector functions."""
__author__ = "<NAME>."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "<NAME>."
__email__ = "<EMAIL>"
__status__ = "Research"
import os
import datetime as dt
import numpy as np
import pandas as pd
import glob
from scipy.stats import median_absolute_deviation
from scipy.signal import butter, lfilter
import traceback
from get_fit_data import get_date_by_dates
import plotlib
import json
import uuid
with open("prop.json", "r") as f: properties = json.load(f)
def smooth(x, window_len=51, window="hanning"):
if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3: return x
if not window in ["flat", "hanning", "hamming", "bartlett", "blackman"]: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == "flat": w = numpy.ones(window_len,"d")
else: w = eval("np."+window+"(window_len)")
y = np.convolve(w/w.sum(),s,mode="valid")
d = window_len - 1
y = y[int(d/2):-int(d/2)]
return y
class Detector(object):
""" Genertic detector class to be inhereited by all other scheme """
def __init__(self, dates, rad, kind, idx, plot=True, verbose=False):
self.dates = dates
self.rad = rad
self.kind = kind
self.plot = plot
self.verbose = verbose
self.uid = idx
self.data_fname = properties["data_fname"]%(dates[0].strftime("%Y-%m-%d"), rad)
self.result_fname = (properties["result_fname"]%(dates[0].strftime("%Y-%m-%d"), rad, kind)).format(id="%04d"%self.uid)
self.sza_th = properties["sza_th"]
self.smoothing_window = properties["smoothing_window"]
self.dur = properties["dur"]
self.records = []
self.scores = []
self.times = []
return
def smooth(self, x, window_len=51, window="hanning"):
if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3: return x
if not window in ["flat", "hanning", "hamming", "bartlett", "blackman"]: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == "flat": w = numpy.ones(window_len,"d")
else: w = eval("np."+window+"(window_len)")
y = np.convolve(w/w.sum(),s,mode="valid")
d = window_len - 1
y = y[int(d/2):-int(d/2)]
return y
def get_parse_radar_data(self):
parsed = True
try:
if not os.path.exists(self.data_fname):
self.o = get_date_by_dates(self.rad, self.dates)
print(" Length:", len(self.o))
self.o = self.o[self.o.sza <= self.sza_th]
print(" Modified length:", len(self.o))
self.o.to_csv(self.data_fname, header=True, index=False)
else: self.o = pd.read_csv(self.data_fname, parse_dates=["time"])
self.beams = self.o.beams.unique()
except:
parsed = False
if self.verbose: traceback.print_exc()
return parsed
def run(self):
""" Main function call """
if not os.path.exists(self.result_fname):
if self.get_parse_radar_data():
rec = []
load = True
for b in self.beams:
x = self.o[self.o.beams==b]
x = x.set_index("time").resample(properties["resample_time"]).median().reset_index()\
.interpolate(limit=properties["max_intrepolate_gap"])
x.echoes = self.smooth(np.array(x.echoes), self.smoothing_window)
out = self.scheme(x, self.dates[0], self.dates[1], b, dur=self.dur, load=True)
rec.extend(out)
load = False
self.df = pd.DataFrame.from_records(rec)
self.invoke_parser()
return
def scheme(self):
""" Needs to be overloaded """
return
def invoke_parser(self):
""" May need to be overloaded """
self.records = []
if len(self.df) > 0:
stats = self.df.groupby(by=["st"]).agg({"prob":[np.median, "count", median_absolute_deviation],
"qprob":[np.median, "count", median_absolute_deviation],
}).reset_index()
L = 1./len(self.beams)
for i in range(len(stats)):
st, et = stats.iloc[i]["st"].tolist()[0], stats.iloc[i]["st"].tolist()[0] + dt.timedelta(minutes=120)
mprob, cprob = stats.iloc[i]["prob"]["median"], stats.iloc[i]["prob"]["count"]*L
mad = stats.iloc[i]["prob"]["median_absolute_deviation"]
jscore = -10*np.log10(stats.iloc[i]["prob"]["median_absolute_deviation"]) if mad > 0. else -1
q_mprob, q_cprob = stats.iloc[i]["qprob"]["median"], stats.iloc[i]["qprob"]["count"]*L
mad = stats.iloc[i]["qprob"]["median_absolute_deviation"]
q_jscore = -10*np.log10(stats.iloc[i]["qprob"]["median_absolute_deviation"]) if mad > 0. else -1
self.records.append({"st": st, "et": et, "rad": self.rad, "mprob": mprob, "jscore": jscore, "cprob": cprob,
"qmprob": q_mprob, "qjscore": q_jscore, "qcprob": q_cprob})
print(" Chance of SWF between (%s, %s) observed by %s radar are (%.2f, %.2f, %.2f) (%.2f, %.2f, %.2f)"%\
(st.strftime("%Y-%m-%d %H:%M"), et.strftime("%Y-%m-%d %H:%M"), self.rad.upper(), mprob, cprob,
jscore, q_mprob, q_cprob, q_jscore))
return
def save(self):
if len(self.records)>0: pd.DataFrame.from_records(self.records).to_csv(self.result_fname, header=True, index=False)
return
def get_qprob(self, x):
q = qprob = 1./(1.+np.exp(((np.quantile(x, 0.1)-np.quantile(x, 0.9))/np.quantile(x, 0.1))))
return q
class ZScore(Detector):
""" Whitaker-Hayes algorithm: Z Score based method """
def __init__(self, dates, rad, idx, plot=True):
super().__init__(dates, rad, "zscore", idx, plot)
self.threshold = properties["z_score_threshold"]
return
def modified_z_score(self, intensity):
median_int = np.median(intensity)
mad_int = np.median([np.abs(intensity - median_int)])
modified_z_scores = 0.6745 * (intensity - median_int) / mad_int
return modified_z_scores
def scheme(self, x, start, end, b, dur, load):
""" Overloaded """
st, et = start, start + dt.timedelta(minutes=dur)
coll = []
while et <= end:
u = x[(x.time >= st) & (x.time < et)]
if len(u) == dur and not u.isnull().values.any():
qprob = self.get_qprob(u.echoes)
delta_intensity = []
intensity = np.array(u.echoes)
for i in np.arange(len(u)-1):
dist = intensity[i+1] - intensity[i]
delta_intensity.append(dist)
delta_int = np.array(delta_intensity)
scores = np.array(self.modified_z_score(delta_int).tolist() + [0])
if load:
#scores[scores<=np.quantile(scores,0.1)] = np.nan
#scores[scores>=np.quantile(scores,0.9)] = np.nan
self.scores.extend(scores.tolist())
self.times.extend(u.time.tolist())
scores[scores > 0.] = 0.
n_spikes = np.count_nonzero(np.abs(np.array(scores)) > self.threshold)
p = 0.
if self.plot:
self.plot_fname = "../plots/rad_summary_%s_%s_%s_%02d.png"%(st.strftime("%Y-%m-%d-%H-%M"),
et.strftime("%Y-%m-%d-%H-%M"), self.rad, b)
plotlib.plot_fit_data_with_scores(u, scores, self.plot_fname)
if n_spikes > 0: p = 1./(1.+np.exp(scores.min()-self.threshold))
obj = {"beam":b, "st": st, "et": et, "prob": p, "qprob":qprob}
coll.append(obj)
st = et
et += dt.timedelta(minutes=dur)
return coll
class CascadingNEO(Detector):
""" Nonlinear Energy Operator: NEO, Implemented from 'Holleman2011.Chapter.SpikeDetection_Characterization' """
def __init__(self, dates, rad, idx, plot=True):
super().__init__(dates, rad, "neo", idx, plot)
self.threshold = properties["neo_threshold"]
self.neo_order = properties["neo_order"]
return
def neo(self, x):
"""
neo(x): diff(x,2)-x.diff(x,1)
Implemented from "Holleman2011.Chapter.SpikeDetection_Characterization"
"""
y = np.gradient(x,edge_order=1)**2 - (np.gradient(x,edge_order=2)*x)
return y
def scheme(self, x, start, end, b, dur, load):
""" Overloaded """
st, et = start, start + dt.timedelta(minutes=dur)
coll = []
while et <= end:
u = x[(x.time >= st) & (x.time < et)]
if len(u) == dur and not u.isnull().values.any():
qprob = self.get_qprob(u.echoes)
scores = np.array(u.echoes)
for _i in range(self.neo_order):
scores = self.neo(scores)
n_spikes = np.count_nonzero(np.abs(np.array(scores)) > self.threshold)
if load:
#scores[scores<=np.quantile(scores,0.1)] = np.nan
#scores[scores>=np.quantile(scores,0.9)] = np.nan
self.scores.extend((scores/(10**self.neo_order)))
self.times.extend(u.time.tolist())
p = 0.
if self.plot:
self.plot_fname = "../plots/rad_summary_%s_%s_%s_%02d.png"%(st.strftime("%Y-%m-%d-%H-%M"),
et.strftime("%Y-%m-%d-%H-%M"), self.rad, b)
plotlib.plot_fit_data_with_scores(u, scores, self.plot_fname)
if n_spikes > 0: p = 1./(1.+np.exp(-((np.abs(scores).max()/(10**self.neo_order))-self.threshold)))
obj = {"beam":b, "st": st, "et": et, "prob": p, "qprob": qprob}
coll.append(obj)
st = et
et += dt.timedelta(minutes=dur)
return coll
def algorithm_runner_helper(dates, rad, kind, idx, plot=True, save=True):
if kind == "zscore": method = ZScore(dates, rad, idx, plot=plot)
if kind == "neo": method = CascadingNEO(dates, rad, idx, plot=plot)
method.run()
if save: method.save()
return method
def run_parallel_procs(dates, rad, kind="zscore", plot=True, save=True, idx=False, procs=8):
from multiprocessing import Pool
from functools import partial
dt_args = []
sdate, edate = dates[0], dates[1]
if idx:
idx = str(uuid.uuid1())
if save:
result_fname = (properties["result_fname"]%(dates[0].strftime("%Y-%m-%d"), rad, kind)).format(id=self.uid)
_dir = "/".join(result_fname.split("/")[:-1])
if not os.path.exists(_dir):
os.system("mkdir "+_dir)
with open(_dir+"/prop.json", "w") as f: f.write(json.dumps(properties, indent=4))
else:
properties["result_fname"] = "../results/%s_%s_%s.csv"
idx=""
pool = Pool(processes=procs)
while sdate <= edate:
dt_args.append([sdate, sdate + dt.timedelta(1)])
sdate = sdate + dt.timedelta(1)
pool.map(partial(algorithm_runner_helper, rad=rad, kind=kind, idx=idx, plot=plot, save=save), dt_args)
return
def run_batch_for_radar(rad, dates, kind, prop, plot=False, save=True, procs=8):
from multiprocessing import Pool
from functools import partial
dt_args = []
idx = prop["idx"]
for p in prop.keys():
properties[p] = prop[p]
if save:
result_fname = (properties["result_fname"]%(dates[0].strftime("%Y-%m-%d"), rad, kind)).format(id="%04d"%idx)
_dir = "/".join(result_fname.split("/")[:-1])
if not os.path.exists(_dir):
os.system("mkdir "+_dir)
with open(_dir+"/prop.json", "w") as f: f.write(json.dumps(properties, indent=4))
pool = Pool(processes=procs)
for d in dates:
dt_args.append([d, d + dt.timedelta(1)])
pool.map(partial(algorithm_runner_helper, rad=rad, kind=kind, idx=idx, plot=plot, save=save), dt_args)
return
if __name__ == "__main__":
dates = [dt.datetime(2015,5,5), dt.datetime(2015,5,5)]
run_parallel_procs(dates, "bks", "neo", True, False)
#run_parallel_procs(dates, "fhe", "neo", False, True)
#run_parallel_procs(dates, "kap", "neo", False, True)
#run_parallel_procs(dates, "bks", "zscore", False, True)
#run_parallel_procs(dates, "fhe", "zscore", False, True)
#run_parallel_procs(dates, "kap", "zscore", False, True)
pass
| [
"numpy.log10",
"pandas.read_csv",
"numpy.array",
"datetime.timedelta",
"numpy.gradient",
"datetime.datetime",
"os.path.exists",
"json.dumps",
"plotlib.plot_fit_data_with_scores",
"traceback.print_exc",
"numpy.abs",
"uuid.uuid1",
"pandas.DataFrame.from_records",
"numpy.median",
"numpy.qua... | [((615, 627), 'json.load', 'json.load', (['f'], {}), '(f)\n', (624, 627), False, 'import json\n'), ((11753, 11774), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'procs'}), '(processes=procs)\n', (11757, 11774), False, 'from multiprocessing import Pool\n'), ((12629, 12650), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'procs'}), '(processes=procs)\n', (12633, 12650), False, 'from multiprocessing import Pool\n'), ((6707, 6727), 'numpy.median', 'np.median', (['intensity'], {}), '(intensity)\n', (6716, 6727), True, 'import numpy as np\n'), ((11911, 11998), 'functools.partial', 'partial', (['algorithm_runner_helper'], {'rad': 'rad', 'kind': 'kind', 'idx': 'idx', 'plot': 'plot', 'save': 'save'}), '(algorithm_runner_helper, rad=rad, kind=kind, idx=idx, plot=plot,\n save=save)\n', (11918, 11998), False, 'from functools import partial\n'), ((12733, 12820), 'functools.partial', 'partial', (['algorithm_runner_helper'], {'rad': 'rad', 'kind': 'kind', 'idx': 'idx', 'plot': 'plot', 'save': 'save'}), '(algorithm_runner_helper, rad=rad, kind=kind, idx=idx, plot=plot,\n save=save)\n', (12740, 12820), False, 'from functools import partial\n'), ((12880, 12903), 'datetime.datetime', 'dt.datetime', (['(2015)', '(5)', '(5)'], {}), '(2015, 5, 5)\n', (12891, 12903), True, 'import datetime as dt\n'), ((12903, 12926), 'datetime.datetime', 'dt.datetime', (['(2015)', '(5)', '(5)'], {}), '(2015, 5, 5)\n', (12914, 12926), True, 'import datetime as dt\n'), ((3593, 3626), 'os.path.exists', 'os.path.exists', (['self.result_fname'], {}), '(self.result_fname)\n', (3607, 3626), False, 'import os\n'), ((8563, 8588), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': 'dur'}), '(minutes=dur)\n', (8575, 8588), True, 'import datetime as dt\n'), ((10690, 10715), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': 'dur'}), '(minutes=dur)\n', (10702, 10715), True, 'import datetime as dt\n'), ((11265, 11277), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (11275, 11277), False, 'import uuid\n'), ((11882, 11897), 'datetime.timedelta', 'dt.timedelta', (['(1)'], {}), '(1)\n', (11894, 11897), True, 'import datetime as dt\n'), ((12465, 12485), 'os.path.exists', 'os.path.exists', (['_dir'], {}), '(_dir)\n', (12479, 12485), False, 'import os\n'), ((12499, 12525), 'os.system', 'os.system', (["('mkdir ' + _dir)"], {}), "('mkdir ' + _dir)\n", (12508, 12525), False, 'import os\n'), ((2949, 2980), 'os.path.exists', 'os.path.exists', (['self.data_fname'], {}), '(self.data_fname)\n', (2963, 2980), False, 'import os\n'), ((3007, 3046), 'get_fit_data.get_date_by_dates', 'get_date_by_dates', (['self.rad', 'self.dates'], {}), '(self.rad, self.dates)\n', (3024, 3046), False, 'from get_fit_data import get_date_by_dates\n'), ((3309, 3359), 'pandas.read_csv', 'pd.read_csv', (['self.data_fname'], {'parse_dates': "['time']"}), "(self.data_fname, parse_dates=['time'])\n", (3320, 3359), True, 'import pandas as pd\n'), ((4278, 4308), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['rec'], {}), '(rec)\n', (4303, 4308), True, 'import pandas as pd\n'), ((6757, 6787), 'numpy.abs', 'np.abs', (['(intensity - median_int)'], {}), '(intensity - median_int)\n', (6763, 6787), True, 'import numpy as np\n'), ((7006, 7031), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': 'dur'}), '(minutes=dur)\n', (7018, 7031), True, 'import datetime as dt\n'), ((7301, 7319), 'numpy.array', 'np.array', (['u.echoes'], {}), '(u.echoes)\n', (7309, 7319), True, 'import numpy as np\n'), ((7500, 7525), 'numpy.array', 'np.array', (['delta_intensity'], {}), '(delta_intensity)\n', (7508, 7525), True, 'import numpy as np\n'), ((9167, 9195), 'numpy.gradient', 'np.gradient', (['x'], {'edge_order': '(1)'}), '(x, edge_order=1)\n', (9178, 9195), True, 'import numpy as np\n'), ((9201, 9229), 'numpy.gradient', 'np.gradient', (['x'], {'edge_order': '(2)'}), '(x, edge_order=2)\n', (9212, 9229), True, 'import numpy as np\n'), ((9360, 9385), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': 'dur'}), '(minutes=dur)\n', (9372, 9385), True, 'import datetime as dt\n'), ((9615, 9633), 'numpy.array', 'np.array', (['u.echoes'], {}), '(u.echoes)\n', (9623, 9633), True, 'import numpy as np\n'), ((11492, 11512), 'os.path.exists', 'os.path.exists', (['_dir'], {}), '(_dir)\n', (11506, 11512), False, 'import os\n'), ((11530, 11556), 'os.system', 'os.system', (["('mkdir ' + _dir)"], {}), "('mkdir ' + _dir)\n", (11539, 11556), False, 'import os\n'), ((3479, 3500), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3498, 3500), False, 'import traceback\n'), ((6120, 6159), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['self.records'], {}), '(self.records)\n', (6145, 6159), True, 'import pandas as pd\n'), ((8270, 8331), 'plotlib.plot_fit_data_with_scores', 'plotlib.plot_fit_data_with_scores', (['u', 'scores', 'self.plot_fname'], {}), '(u, scores, self.plot_fname)\n', (8303, 8331), False, 'import plotlib\n'), ((10362, 10423), 'plotlib.plot_fit_data_with_scores', 'plotlib.plot_fit_data_with_scores', (['u', 'scores', 'self.plot_fname'], {}), '(u, scores, self.plot_fname)\n', (10395, 10423), False, 'import plotlib\n'), ((11840, 11855), 'datetime.timedelta', 'dt.timedelta', (['(1)'], {}), '(1)\n', (11852, 11855), True, 'import datetime as dt\n'), ((12584, 12616), 'json.dumps', 'json.dumps', (['properties'], {'indent': '(4)'}), '(properties, indent=4)\n', (12594, 12616), False, 'import json\n'), ((12702, 12717), 'datetime.timedelta', 'dt.timedelta', (['(1)'], {}), '(1)\n', (12714, 12717), True, 'import datetime as dt\n'), ((4041, 4059), 'numpy.array', 'np.array', (['x.echoes'], {}), '(x.echoes)\n', (4049, 4059), True, 'import numpy as np\n'), ((4946, 4971), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(120)'}), '(minutes=120)\n', (4958, 4971), True, 'import datetime as dt\n'), ((5171, 5231), 'numpy.log10', 'np.log10', (["stats.iloc[i]['prob']['median_absolute_deviation']"], {}), "(stats.iloc[i]['prob']['median_absolute_deviation'])\n", (5179, 5231), True, 'import numpy as np\n'), ((5460, 5521), 'numpy.log10', 'np.log10', (["stats.iloc[i]['qprob']['median_absolute_deviation']"], {}), "(stats.iloc[i]['qprob']['median_absolute_deviation'])\n", (5468, 5521), True, 'import numpy as np\n'), ((6333, 6352), 'numpy.quantile', 'np.quantile', (['x', '(0.1)'], {}), '(x, 0.1)\n', (6344, 6352), True, 'import numpy as np\n'), ((11619, 11651), 'json.dumps', 'json.dumps', (['properties'], {'indent': '(4)'}), '(properties, indent=4)\n', (11629, 11651), False, 'import json\n'), ((6292, 6311), 'numpy.quantile', 'np.quantile', (['x', '(0.1)'], {}), '(x, 0.1)\n', (6303, 6311), True, 'import numpy as np\n'), ((6312, 6331), 'numpy.quantile', 'np.quantile', (['x', '(0.9)'], {}), '(x, 0.9)\n', (6323, 6331), True, 'import numpy as np\n'), ((7977, 7993), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (7985, 7993), True, 'import numpy as np\n'), ((9780, 9796), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (9788, 9796), True, 'import numpy as np\n'), ((10478, 10492), 'numpy.abs', 'np.abs', (['scores'], {}), '(scores)\n', (10484, 10492), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Test for updater.kalman module"""
import pytest
import numpy as np
from stonesoup.models.measurement.linear import LinearGaussian
from stonesoup.types.detection import Detection
from stonesoup.types.hypothesis import SingleHypothesis
from stonesoup.types.prediction import (
GaussianStatePrediction, GaussianMeasurementPrediction)
from stonesoup.types.state import GaussianState, SqrtGaussianState
from stonesoup.updater.kalman import (KalmanUpdater,
ExtendedKalmanUpdater,
UnscentedKalmanUpdater,
SqrtKalmanUpdater,
IteratedKalmanUpdater)
@pytest.mark.parametrize(
"UpdaterClass, measurement_model, prediction, measurement",
[
( # Standard Kalman
KalmanUpdater,
LinearGaussian(ndim_state=2, mapping=[0],
noise_covar=np.array([[0.04]])),
GaussianStatePrediction(np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])),
Detection(np.array([[-6.23]]))
),
( # Extended Kalman
ExtendedKalmanUpdater,
LinearGaussian(ndim_state=2, mapping=[0],
noise_covar=np.array([[0.04]])),
GaussianStatePrediction(np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])),
Detection(np.array([[-6.23]]))
),
( # Unscented Kalman
UnscentedKalmanUpdater,
LinearGaussian(ndim_state=2, mapping=[0],
noise_covar=np.array([[0.04]])),
GaussianStatePrediction(np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])),
Detection(np.array([[-6.23]]))
),
( # Iterated Kalman
IteratedKalmanUpdater,
LinearGaussian(ndim_state=2, mapping=[0],
noise_covar=np.array([[0.04]])),
GaussianStatePrediction(np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]])),
Detection(np.array([[-6.23]]))
),
],
ids=["standard", "extended", "unscented", "iterated"]
)
def test_kalman(UpdaterClass, measurement_model, prediction, measurement):
# Calculate evaluation variables
eval_measurement_prediction = GaussianMeasurementPrediction(
measurement_model.matrix() @ prediction.mean,
measurement_model.matrix() @ prediction.covar
@ measurement_model.matrix().T
+ measurement_model.covar(),
cross_covar=prediction.covar @ measurement_model.matrix().T)
kalman_gain = eval_measurement_prediction.cross_covar @ np.linalg.inv(
eval_measurement_prediction.covar)
eval_posterior = GaussianState(
prediction.mean
+ kalman_gain @ (measurement.state_vector
- eval_measurement_prediction.mean),
prediction.covar
- kalman_gain@eval_measurement_prediction.covar @ kalman_gain.T)
# Initialise a kalman updater
updater = UpdaterClass(measurement_model=measurement_model)
# Get and assert measurement prediction
measurement_prediction = updater.predict_measurement(prediction)
assert(np.allclose(measurement_prediction.mean,
eval_measurement_prediction.mean,
0, atol=1.e-14))
assert(np.allclose(measurement_prediction.covar,
eval_measurement_prediction.covar,
0, atol=1.e-14))
assert(np.allclose(measurement_prediction.cross_covar,
eval_measurement_prediction.cross_covar,
0, atol=1.e-14))
# Perform and assert state update (without measurement prediction)
posterior = updater.update(SingleHypothesis(
prediction=prediction,
measurement=measurement))
assert(np.allclose(posterior.mean, eval_posterior.mean, 0, atol=1.e-14))
assert(np.allclose(posterior.covar, eval_posterior.covar, 0, atol=1.e-14))
assert(np.array_equal(posterior.hypothesis.prediction, prediction))
assert (np.allclose(
posterior.hypothesis.measurement_prediction.state_vector,
measurement_prediction.state_vector, 0, atol=1.e-14))
assert (np.allclose(posterior.hypothesis.measurement_prediction.covar,
measurement_prediction.covar, 0, atol=1.e-14))
assert(np.array_equal(posterior.hypothesis.measurement, measurement))
assert(posterior.timestamp == prediction.timestamp)
# Perform and assert state update
posterior = updater.update(SingleHypothesis(
prediction=prediction,
measurement=measurement,
measurement_prediction=measurement_prediction))
assert(np.allclose(posterior.mean, eval_posterior.mean, 0, atol=1.e-14))
assert(np.allclose(posterior.covar, eval_posterior.covar, 0, atol=1.e-14))
assert(np.array_equal(posterior.hypothesis.prediction, prediction))
assert (np.allclose(
posterior.hypothesis.measurement_prediction.state_vector,
measurement_prediction.state_vector, 0, atol=1.e-14))
assert (np.allclose(posterior.hypothesis.measurement_prediction.covar,
measurement_prediction.covar, 0, atol=1.e-14))
assert(np.array_equal(posterior.hypothesis.measurement, measurement))
assert(posterior.timestamp == prediction.timestamp)
def test_sqrt_kalman():
measurement_model = LinearGaussian(ndim_state=2, mapping=[0],
noise_covar=np.array([[0.04]]))
prediction = GaussianStatePrediction(np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]]))
sqrt_prediction = SqrtGaussianState(prediction.state_vector,
np.linalg.cholesky(prediction.covar))
measurement = Detection(np.array([[-6.23]]))
# Calculate evaluation variables
eval_measurement_prediction = GaussianMeasurementPrediction(
measurement_model.matrix() @ prediction.mean,
measurement_model.matrix() @ prediction.covar
@ measurement_model.matrix().T
+ measurement_model.covar(),
cross_covar=prediction.covar @ measurement_model.matrix().T)
kalman_gain = eval_measurement_prediction.cross_covar @ np.linalg.inv(
eval_measurement_prediction.covar)
eval_posterior = GaussianState(
prediction.mean
+ kalman_gain @ (measurement.state_vector
- eval_measurement_prediction.mean),
prediction.covar
- kalman_gain @ eval_measurement_prediction.covar @ kalman_gain.T)
# Test Square root form returns the same as standard form
updater = KalmanUpdater(measurement_model=measurement_model)
sqrt_updater = SqrtKalmanUpdater(measurement_model=measurement_model, qr_method=False)
qr_updater = SqrtKalmanUpdater(measurement_model=measurement_model, qr_method=True)
posterior = updater.update(SingleHypothesis(prediction=prediction,
measurement=measurement))
posterior_s = sqrt_updater.update(SingleHypothesis(prediction=sqrt_prediction,
measurement=measurement))
posterior_q = qr_updater.update(SingleHypothesis(prediction=sqrt_prediction,
measurement=measurement))
assert np.allclose(posterior_s.mean, eval_posterior.mean, 0, atol=1.e-14)
assert np.allclose(posterior_q.mean, eval_posterior.mean, 0, atol=1.e-14)
assert np.allclose(posterior.covar, eval_posterior.covar, 0, atol=1.e-14)
assert np.allclose(eval_posterior.covar,
posterior_s.sqrt_covar@posterior_s.sqrt_covar.T, 0,
atol=1.e-14)
assert np.allclose(posterior.covar,
posterior_s.sqrt_covar@posterior_s.sqrt_covar.T, 0,
atol=1.e-14)
assert np.allclose(posterior.covar,
posterior_q.sqrt_covar@posterior_q.sqrt_covar.T, 0,
atol=1.e-14)
# I'm not sure this is going to be true in all cases. Keep in order to find edge cases
assert np.allclose(posterior_s.covar, posterior_q.covar, 0, atol=1.e-14)
# Next create a prediction with a covariance that will cause problems
prediction = GaussianStatePrediction(np.array([[-6.45], [0.7]]),
np.array([[1e24, 1e-24],
[1e-24, 1e24]]))
sqrt_prediction = SqrtGaussianState(prediction.state_vector,
np.linalg.cholesky(prediction.covar))
posterior = updater.update(SingleHypothesis(prediction=prediction,
measurement=measurement))
posterior_s = sqrt_updater.update(SingleHypothesis(
prediction=sqrt_prediction, measurement=measurement))
posterior_q = qr_updater.update(SingleHypothesis(prediction=sqrt_prediction,
measurement=measurement))
# The new posterior will be
eval_posterior = GaussianState(
prediction.mean
+ kalman_gain @ (measurement.state_vector
- eval_measurement_prediction.mean),
np.array([[0.04, 0],
[0, 1e24]])) # Accessed by looking through the Decimal() quantities...
# It's actually [0.039999999999 1e-48], [1e-24 1e24 + 1e-48]] ish
# Test that the square root form succeeds where the standard form fails
assert not np.allclose(posterior.covar, eval_posterior.covar, rtol=5.e-3)
assert np.allclose(posterior_s.sqrt_covar@posterior_s.sqrt_covar.T,
eval_posterior.covar, rtol=5.e-3)
assert np.allclose(posterior_q.sqrt_covar@posterior_s.sqrt_covar.T,
eval_posterior.covar, rtol=5.e-3)
| [
"stonesoup.types.state.GaussianState",
"numpy.allclose",
"stonesoup.updater.kalman.SqrtKalmanUpdater",
"numpy.array",
"numpy.linalg.inv",
"numpy.array_equal",
"stonesoup.updater.kalman.KalmanUpdater",
"numpy.linalg.cholesky",
"stonesoup.types.hypothesis.SingleHypothesis"
] | [((3165, 3365), 'stonesoup.types.state.GaussianState', 'GaussianState', (['(prediction.mean + kalman_gain @ (measurement.state_vector -\n eval_measurement_prediction.mean))', '(prediction.covar - kalman_gain @ eval_measurement_prediction.covar @\n kalman_gain.T)'], {}), '(prediction.mean + kalman_gain @ (measurement.state_vector -\n eval_measurement_prediction.mean), prediction.covar - kalman_gain @\n eval_measurement_prediction.covar @ kalman_gain.T)\n', (3178, 3365), False, 'from stonesoup.types.state import GaussianState, SqrtGaussianState\n'), ((3638, 3732), 'numpy.allclose', 'np.allclose', (['measurement_prediction.mean', 'eval_measurement_prediction.mean', '(0)'], {'atol': '(1e-14)'}), '(measurement_prediction.mean, eval_measurement_prediction.mean, \n 0, atol=1e-14)\n', (3649, 3732), True, 'import numpy as np\n'), ((3787, 3882), 'numpy.allclose', 'np.allclose', (['measurement_prediction.covar', 'eval_measurement_prediction.covar', '(0)'], {'atol': '(1e-14)'}), '(measurement_prediction.covar, eval_measurement_prediction.covar,\n 0, atol=1e-14)\n', (3798, 3882), True, 'import numpy as np\n'), ((3938, 4046), 'numpy.allclose', 'np.allclose', (['measurement_prediction.cross_covar', 'eval_measurement_prediction.cross_covar', '(0)'], {'atol': '(1e-14)'}), '(measurement_prediction.cross_covar, eval_measurement_prediction\n .cross_covar, 0, atol=1e-14)\n', (3949, 4046), True, 'import numpy as np\n'), ((4287, 4350), 'numpy.allclose', 'np.allclose', (['posterior.mean', 'eval_posterior.mean', '(0)'], {'atol': '(1e-14)'}), '(posterior.mean, eval_posterior.mean, 0, atol=1e-14)\n', (4298, 4350), True, 'import numpy as np\n'), ((4364, 4429), 'numpy.allclose', 'np.allclose', (['posterior.covar', 'eval_posterior.covar', '(0)'], {'atol': '(1e-14)'}), '(posterior.covar, eval_posterior.covar, 0, atol=1e-14)\n', (4375, 4429), True, 'import numpy as np\n'), ((4443, 4502), 'numpy.array_equal', 'np.array_equal', (['posterior.hypothesis.prediction', 'prediction'], {}), '(posterior.hypothesis.prediction, prediction)\n', (4457, 4502), True, 'import numpy as np\n'), ((4516, 4641), 'numpy.allclose', 'np.allclose', (['posterior.hypothesis.measurement_prediction.state_vector', 'measurement_prediction.state_vector', '(0)'], {'atol': '(1e-14)'}), '(posterior.hypothesis.measurement_prediction.state_vector,\n measurement_prediction.state_vector, 0, atol=1e-14)\n', (4527, 4641), True, 'import numpy as np\n'), ((4669, 4780), 'numpy.allclose', 'np.allclose', (['posterior.hypothesis.measurement_prediction.covar', 'measurement_prediction.covar', '(0)'], {'atol': '(1e-14)'}), '(posterior.hypothesis.measurement_prediction.covar,\n measurement_prediction.covar, 0, atol=1e-14)\n', (4680, 4780), True, 'import numpy as np\n'), ((4814, 4875), 'numpy.array_equal', 'np.array_equal', (['posterior.hypothesis.measurement', 'measurement'], {}), '(posterior.hypothesis.measurement, measurement)\n', (4828, 4875), True, 'import numpy as np\n'), ((5152, 5215), 'numpy.allclose', 'np.allclose', (['posterior.mean', 'eval_posterior.mean', '(0)'], {'atol': '(1e-14)'}), '(posterior.mean, eval_posterior.mean, 0, atol=1e-14)\n', (5163, 5215), True, 'import numpy as np\n'), ((5229, 5294), 'numpy.allclose', 'np.allclose', (['posterior.covar', 'eval_posterior.covar', '(0)'], {'atol': '(1e-14)'}), '(posterior.covar, eval_posterior.covar, 0, atol=1e-14)\n', (5240, 5294), True, 'import numpy as np\n'), ((5308, 5367), 'numpy.array_equal', 'np.array_equal', (['posterior.hypothesis.prediction', 'prediction'], {}), '(posterior.hypothesis.prediction, prediction)\n', (5322, 5367), True, 'import numpy as np\n'), ((5381, 5506), 'numpy.allclose', 'np.allclose', (['posterior.hypothesis.measurement_prediction.state_vector', 'measurement_prediction.state_vector', '(0)'], {'atol': '(1e-14)'}), '(posterior.hypothesis.measurement_prediction.state_vector,\n measurement_prediction.state_vector, 0, atol=1e-14)\n', (5392, 5506), True, 'import numpy as np\n'), ((5534, 5645), 'numpy.allclose', 'np.allclose', (['posterior.hypothesis.measurement_prediction.covar', 'measurement_prediction.covar', '(0)'], {'atol': '(1e-14)'}), '(posterior.hypothesis.measurement_prediction.covar,\n measurement_prediction.covar, 0, atol=1e-14)\n', (5545, 5645), True, 'import numpy as np\n'), ((5679, 5740), 'numpy.array_equal', 'np.array_equal', (['posterior.hypothesis.measurement', 'measurement'], {}), '(posterior.hypothesis.measurement, measurement)\n', (5693, 5740), True, 'import numpy as np\n'), ((6857, 7057), 'stonesoup.types.state.GaussianState', 'GaussianState', (['(prediction.mean + kalman_gain @ (measurement.state_vector -\n eval_measurement_prediction.mean))', '(prediction.covar - kalman_gain @ eval_measurement_prediction.covar @\n kalman_gain.T)'], {}), '(prediction.mean + kalman_gain @ (measurement.state_vector -\n eval_measurement_prediction.mean), prediction.covar - kalman_gain @\n eval_measurement_prediction.covar @ kalman_gain.T)\n', (6870, 7057), False, 'from stonesoup.types.state import GaussianState, SqrtGaussianState\n'), ((7185, 7235), 'stonesoup.updater.kalman.KalmanUpdater', 'KalmanUpdater', ([], {'measurement_model': 'measurement_model'}), '(measurement_model=measurement_model)\n', (7198, 7235), False, 'from stonesoup.updater.kalman import KalmanUpdater, ExtendedKalmanUpdater, UnscentedKalmanUpdater, SqrtKalmanUpdater, IteratedKalmanUpdater\n'), ((7255, 7326), 'stonesoup.updater.kalman.SqrtKalmanUpdater', 'SqrtKalmanUpdater', ([], {'measurement_model': 'measurement_model', 'qr_method': '(False)'}), '(measurement_model=measurement_model, qr_method=False)\n', (7272, 7326), False, 'from stonesoup.updater.kalman import KalmanUpdater, ExtendedKalmanUpdater, UnscentedKalmanUpdater, SqrtKalmanUpdater, IteratedKalmanUpdater\n'), ((7344, 7414), 'stonesoup.updater.kalman.SqrtKalmanUpdater', 'SqrtKalmanUpdater', ([], {'measurement_model': 'measurement_model', 'qr_method': '(True)'}), '(measurement_model=measurement_model, qr_method=True)\n', (7361, 7414), False, 'from stonesoup.updater.kalman import KalmanUpdater, ExtendedKalmanUpdater, UnscentedKalmanUpdater, SqrtKalmanUpdater, IteratedKalmanUpdater\n'), ((7897, 7962), 'numpy.allclose', 'np.allclose', (['posterior_s.mean', 'eval_posterior.mean', '(0)'], {'atol': '(1e-14)'}), '(posterior_s.mean, eval_posterior.mean, 0, atol=1e-14)\n', (7908, 7962), True, 'import numpy as np\n'), ((7975, 8040), 'numpy.allclose', 'np.allclose', (['posterior_q.mean', 'eval_posterior.mean', '(0)'], {'atol': '(1e-14)'}), '(posterior_q.mean, eval_posterior.mean, 0, atol=1e-14)\n', (7986, 8040), True, 'import numpy as np\n'), ((8053, 8118), 'numpy.allclose', 'np.allclose', (['posterior.covar', 'eval_posterior.covar', '(0)'], {'atol': '(1e-14)'}), '(posterior.covar, eval_posterior.covar, 0, atol=1e-14)\n', (8064, 8118), True, 'import numpy as np\n'), ((8131, 8235), 'numpy.allclose', 'np.allclose', (['eval_posterior.covar', '(posterior_s.sqrt_covar @ posterior_s.sqrt_covar.T)', '(0)'], {'atol': '(1e-14)'}), '(eval_posterior.covar, posterior_s.sqrt_covar @ posterior_s.\n sqrt_covar.T, 0, atol=1e-14)\n', (8142, 8235), True, 'import numpy as np\n'), ((8287, 8386), 'numpy.allclose', 'np.allclose', (['posterior.covar', '(posterior_s.sqrt_covar @ posterior_s.sqrt_covar.T)', '(0)'], {'atol': '(1e-14)'}), '(posterior.covar, posterior_s.sqrt_covar @ posterior_s.\n sqrt_covar.T, 0, atol=1e-14)\n', (8298, 8386), True, 'import numpy as np\n'), ((8438, 8537), 'numpy.allclose', 'np.allclose', (['posterior.covar', '(posterior_q.sqrt_covar @ posterior_q.sqrt_covar.T)', '(0)'], {'atol': '(1e-14)'}), '(posterior.covar, posterior_q.sqrt_covar @ posterior_q.\n sqrt_covar.T, 0, atol=1e-14)\n', (8449, 8537), True, 'import numpy as np\n'), ((8680, 8744), 'numpy.allclose', 'np.allclose', (['posterior_s.covar', 'posterior_q.covar', '(0)'], {'atol': '(1e-14)'}), '(posterior_s.covar, posterior_q.covar, 0, atol=1e-14)\n', (8691, 8744), True, 'import numpy as np\n'), ((10152, 10252), 'numpy.allclose', 'np.allclose', (['(posterior_s.sqrt_covar @ posterior_s.sqrt_covar.T)', 'eval_posterior.covar'], {'rtol': '(0.005)'}), '(posterior_s.sqrt_covar @ posterior_s.sqrt_covar.T,\n eval_posterior.covar, rtol=0.005)\n', (10163, 10252), True, 'import numpy as np\n'), ((10281, 10381), 'numpy.allclose', 'np.allclose', (['(posterior_q.sqrt_covar @ posterior_s.sqrt_covar.T)', 'eval_posterior.covar'], {'rtol': '(0.005)'}), '(posterior_q.sqrt_covar @ posterior_s.sqrt_covar.T,\n eval_posterior.covar, rtol=0.005)\n', (10292, 10381), True, 'import numpy as np\n'), ((3086, 3134), 'numpy.linalg.inv', 'np.linalg.inv', (['eval_measurement_prediction.covar'], {}), '(eval_measurement_prediction.covar)\n', (3099, 3134), True, 'import numpy as np\n'), ((4193, 4257), 'stonesoup.types.hypothesis.SingleHypothesis', 'SingleHypothesis', ([], {'prediction': 'prediction', 'measurement': 'measurement'}), '(prediction=prediction, measurement=measurement)\n', (4209, 4257), False, 'from stonesoup.types.hypothesis import SingleHypothesis\n'), ((5003, 5118), 'stonesoup.types.hypothesis.SingleHypothesis', 'SingleHypothesis', ([], {'prediction': 'prediction', 'measurement': 'measurement', 'measurement_prediction': 'measurement_prediction'}), '(prediction=prediction, measurement=measurement,\n measurement_prediction=measurement_prediction)\n', (5019, 5118), False, 'from stonesoup.types.hypothesis import SingleHypothesis\n'), ((6002, 6028), 'numpy.array', 'np.array', (['[[-6.45], [0.7]]'], {}), '([[-6.45], [0.7]])\n', (6010, 6028), True, 'import numpy as np\n'), ((6071, 6117), 'numpy.array', 'np.array', (['[[4.1123, 0.0013], [0.0013, 0.0365]]'], {}), '([[4.1123, 0.0013], [0.0013, 0.0365]])\n', (6079, 6117), True, 'import numpy as np\n'), ((6275, 6311), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['prediction.covar'], {}), '(prediction.covar)\n', (6293, 6311), True, 'import numpy as np\n'), ((6341, 6360), 'numpy.array', 'np.array', (['[[-6.23]]'], {}), '([[-6.23]])\n', (6349, 6360), True, 'import numpy as np\n'), ((6778, 6826), 'numpy.linalg.inv', 'np.linalg.inv', (['eval_measurement_prediction.covar'], {}), '(eval_measurement_prediction.covar)\n', (6791, 6826), True, 'import numpy as np\n'), ((7447, 7511), 'stonesoup.types.hypothesis.SingleHypothesis', 'SingleHypothesis', ([], {'prediction': 'prediction', 'measurement': 'measurement'}), '(prediction=prediction, measurement=measurement)\n', (7463, 7511), False, 'from stonesoup.types.hypothesis import SingleHypothesis\n'), ((7599, 7668), 'stonesoup.types.hypothesis.SingleHypothesis', 'SingleHypothesis', ([], {'prediction': 'sqrt_prediction', 'measurement': 'measurement'}), '(prediction=sqrt_prediction, measurement=measurement)\n', (7615, 7668), False, 'from stonesoup.types.hypothesis import SingleHypothesis\n'), ((7761, 7830), 'stonesoup.types.hypothesis.SingleHypothesis', 'SingleHypothesis', ([], {'prediction': 'sqrt_prediction', 'measurement': 'measurement'}), '(prediction=sqrt_prediction, measurement=measurement)\n', (7777, 7830), False, 'from stonesoup.types.hypothesis import SingleHypothesis\n'), ((8862, 8888), 'numpy.array', 'np.array', (['[[-6.45], [0.7]]'], {}), '([[-6.45], [0.7]])\n', (8870, 8888), True, 'import numpy as np\n'), ((8931, 8973), 'numpy.array', 'np.array', (['[[1e+24, 1e-24], [1e-24, 1e+24]]'], {}), '([[1e+24, 1e-24], [1e-24, 1e+24]])\n', (8939, 8973), True, 'import numpy as np\n'), ((9129, 9165), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['prediction.covar'], {}), '(prediction.covar)\n', (9147, 9165), True, 'import numpy as np\n'), ((9199, 9263), 'stonesoup.types.hypothesis.SingleHypothesis', 'SingleHypothesis', ([], {'prediction': 'prediction', 'measurement': 'measurement'}), '(prediction=prediction, measurement=measurement)\n', (9215, 9263), False, 'from stonesoup.types.hypothesis import SingleHypothesis\n'), ((9351, 9420), 'stonesoup.types.hypothesis.SingleHypothesis', 'SingleHypothesis', ([], {'prediction': 'sqrt_prediction', 'measurement': 'measurement'}), '(prediction=sqrt_prediction, measurement=measurement)\n', (9367, 9420), False, 'from stonesoup.types.hypothesis import SingleHypothesis\n'), ((9467, 9536), 'stonesoup.types.hypothesis.SingleHypothesis', 'SingleHypothesis', ([], {'prediction': 'sqrt_prediction', 'measurement': 'measurement'}), '(prediction=sqrt_prediction, measurement=measurement)\n', (9483, 9536), False, 'from stonesoup.types.hypothesis import SingleHypothesis\n'), ((9805, 9838), 'numpy.array', 'np.array', (['[[0.04, 0], [0, 1e+24]]'], {}), '([[0.04, 0], [0, 1e+24]])\n', (9813, 9838), True, 'import numpy as np\n'), ((10078, 10140), 'numpy.allclose', 'np.allclose', (['posterior.covar', 'eval_posterior.covar'], {'rtol': '(0.005)'}), '(posterior.covar, eval_posterior.covar, rtol=0.005)\n', (10089, 10140), True, 'import numpy as np\n'), ((5941, 5959), 'numpy.array', 'np.array', (['[[0.04]]'], {}), '([[0.04]])\n', (5949, 5959), True, 'import numpy as np\n'), ((1029, 1055), 'numpy.array', 'np.array', (['[[-6.45], [0.7]]'], {}), '([[-6.45], [0.7]])\n', (1037, 1055), True, 'import numpy as np\n'), ((1093, 1139), 'numpy.array', 'np.array', (['[[4.1123, 0.0013], [0.0013, 0.0365]]'], {}), '([[4.1123, 0.0013], [0.0013, 0.0365]])\n', (1101, 1139), True, 'import numpy as np\n'), ((1210, 1229), 'numpy.array', 'np.array', (['[[-6.23]]'], {}), '([[-6.23]])\n', (1218, 1229), True, 'import numpy as np\n'), ((1457, 1483), 'numpy.array', 'np.array', (['[[-6.45], [0.7]]'], {}), '([[-6.45], [0.7]])\n', (1465, 1483), True, 'import numpy as np\n'), ((1521, 1567), 'numpy.array', 'np.array', (['[[4.1123, 0.0013], [0.0013, 0.0365]]'], {}), '([[4.1123, 0.0013], [0.0013, 0.0365]])\n', (1529, 1567), True, 'import numpy as np\n'), ((1638, 1657), 'numpy.array', 'np.array', (['[[-6.23]]'], {}), '([[-6.23]])\n', (1646, 1657), True, 'import numpy as np\n'), ((1887, 1913), 'numpy.array', 'np.array', (['[[-6.45], [0.7]]'], {}), '([[-6.45], [0.7]])\n', (1895, 1913), True, 'import numpy as np\n'), ((1951, 1997), 'numpy.array', 'np.array', (['[[4.1123, 0.0013], [0.0013, 0.0365]]'], {}), '([[4.1123, 0.0013], [0.0013, 0.0365]])\n', (1959, 1997), True, 'import numpy as np\n'), ((2068, 2087), 'numpy.array', 'np.array', (['[[-6.23]]'], {}), '([[-6.23]])\n', (2076, 2087), True, 'import numpy as np\n'), ((2315, 2341), 'numpy.array', 'np.array', (['[[-6.45], [0.7]]'], {}), '([[-6.45], [0.7]])\n', (2323, 2341), True, 'import numpy as np\n'), ((2379, 2425), 'numpy.array', 'np.array', (['[[4.1123, 0.0013], [0.0013, 0.0365]]'], {}), '([[4.1123, 0.0013], [0.0013, 0.0365]])\n', (2387, 2425), True, 'import numpy as np\n'), ((2496, 2515), 'numpy.array', 'np.array', (['[[-6.23]]'], {}), '([[-6.23]])\n', (2504, 2515), True, 'import numpy as np\n'), ((972, 990), 'numpy.array', 'np.array', (['[[0.04]]'], {}), '([[0.04]])\n', (980, 990), True, 'import numpy as np\n'), ((1400, 1418), 'numpy.array', 'np.array', (['[[0.04]]'], {}), '([[0.04]])\n', (1408, 1418), True, 'import numpy as np\n'), ((1830, 1848), 'numpy.array', 'np.array', (['[[0.04]]'], {}), '([[0.04]])\n', (1838, 1848), True, 'import numpy as np\n'), ((2258, 2276), 'numpy.array', 'np.array', (['[[0.04]]'], {}), '([[0.04]])\n', (2266, 2276), True, 'import numpy as np\n')] |
# setupPly.py ---
#
# Description:
# Author: <NAME>
# Date: 28 Jun 2019
# https://arxiv.org/abs/1904.01701
#
# Instituto Superior Técnico (IST)
# Code:
import open3d as o3d
import argparse
import os
from glob import glob
import pickle
from global_registration import preprocess_point_cloud, execute_global_registration
import copy
import numpy as np
import time
def arguments_parser():
parser = argparse.ArgumentParser()
# parser.add_argument("--file_name", type=str, default='data.pickle', help="Please write a filename")
parser.add_argument("--dataset", type=str, default='sun3d', help="Please write the folders name - dataset")
parser.add_argument("--dataset_dir", type=str, default='test', help="Please write the folders name - dataset_dir")
return parser.parse_args()
def getPcRGBD(path, idx):
frame = 'frame-000000'
frame = frame[:-len(idx)] + idx
depth = frame + '.depth.png'
depth = os.path.join(path, depth)
rgb = frame + '.color.png'
rgb = os.path.join(path, rgb)
im_depth = o3d.read_image(depth)
im_color = o3d.read_image(rgb)
# rgbd_image = o3d.create_rgbd_image_from_color_and_depth(im_color, im_depth)
pcd = o3d.create_point_cloud_from_depth_image(im_depth, o3d.PinholeCameraIntrinsic(
# pcd = o3d.create_point_cloud_from_rgbd_image(rgbd_image, o3d.PinholeCameraIntrinsic(
o3d.PinholeCameraIntrinsicParameters.PrimeSenseDefault))
# o3d.draw_geometries([pcd])
return pcd
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source = copy.deepcopy(source)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source.paint_uniform_color([1, 0, 0])
source_temp.transform(transformation)
vis = o3d.VisualizerWithEditing()
vis.create_window()
vis.add_geometry(source_temp)
pos = os.path.join(os.getcwd(), 'registration', 'pos.json')
vis.get_render_option().load_from_json(pos)
vis.run() # user picks points
vis.destroy_window()
# o3d.draw_geometries([source_temp, target_temp])
def RotationError(R1, R2):
R1 = np.real(R1)
R2 = np.real(R2)
R_ = np.matmul(R1, np.linalg.inv(R2))
ae = np.arccos((np.trace(R_) - 1)/2)
ae = np.rad2deg(ae)
frob_norm = np.linalg.norm(R_ - np.eye(3), ord='fro')
return ae, frob_norm
def TranslationError(t1, t2):
return np.linalg.norm(t1-t2)
def refine_registration(source, target, corrs):
start = time.time()
p2p = o3d.TransformationEstimationPointToPoint()
result = p2p.compute_transformation(source, target, o3d.Vector2iVector(corrs))
elapsed_time = time.time() - start
return result, elapsed_time
def initializePointCoud(pts):
pcd = o3d.PointCloud()
pcd.points = o3d.Vector3dVector(pts)
return pcd
def makeMatchesSet(corres):
set = []
for idx, c in enumerate(corres):
if c == 1:
set.append([idx, idx])
return set
def createPointsfromMatches(pc1, pc2, matches):
x1_ = np.asarray(pc1.points)
x2_ = np.asarray(pc2.points)
x1 = []
x2 = []
for match in matches:
x1.append(x1_[match[0]])
x2.append(x2_[match[1]])
x1 = np.array(x1)
x2 = np.array(x2)
ones = np.ones((matches.shape[0], 1))
flag = ones.reshape(matches.shape[0])
out = np.arange(0, matches.shape[0])
np.random.shuffle(out)
nb_out = int(np.random.uniform(low=0.45, high=0.55) * matches.shape[0])
out = out[:nb_out]
flag[out] = 0
noise = np.random.normal(0, 0.1, size=(nb_out, 3))
x1_noise = x1
x1_noise[out] = x1_noise[out] + noise
flag = np.array(flag, dtype=int)
return x1_, x2_, flag
def main():
voxel_list = [0.08, 0.07, 0.06, 0.05, 0.04, 0.03]
# voxel_list = [ 0.05, 0.04, 0.03]
prefix = '*.pickle'
# voxel_size = 0.06
config = arguments_parser()
filepath = os.path.join(os.getcwd(), 'data', config.dataset, config.dataset_dir, prefix)
# filepath = os.path.join(os.getcwd(), '..', 'data', config.dataset, prefix)
filepath1 = os.path.join(os.getcwd(), 'registration', prefix)
# filepath = os.path.abspath(filepath)
files1 = glob(filepath1)
files = glob(filepath)
# angles = np.random.random_integers(-90,90,100)
f = open(files[1], 'rb')
data = pickle.load(f)
f.close()
s_idx1 = '{}'.format(data['idx1'][66].reshape(1)[0])
s_idx2 = '{}'.format(data['idx2'][66].reshape(1)[0])
pc1 = getPcRGBD(os.path.join(os.getcwd(), 'registration'), s_idx1)
pc2 = getPcRGBD(os.path.join(os.getcwd(), 'registration'), s_idx2)
print(pc1)
minCorres = False
minMatchNb = 0
minpc1 = np.array([])
minpc2 = np.array([])
minMatches = np.array([])
for voxel_size in voxel_list:
pc1_down, pc1_fpfh = preprocess_point_cloud(pc1, voxel_size)
pc2_down, pc2_fpfh = preprocess_point_cloud(pc2, voxel_size)
T = np.eye(4)
T[:3, :3] = data['R'][66]
T[:3, 3] = data['t'][66]
star_t = time.time()
result = execute_global_registration(pc1_down, pc2_down, pc1_fpfh, pc2_fpfh, voxel_size)
delta = time.time() - star_t
print('RANSAC')
print('Delta = {}'.format(delta))
rot_error, frob_error = RotationError(T[:3, :3], result.transformation[:3, :3])
print('Rotation Error: {} deg / {} (frob)\nTranslation Error: {}'.format(np.abs(rot_error), frob_error,
np.linalg.norm(T[:3, 3] - result.transformation[:3,3])))
matches = np.asarray(result.correspondence_set)
print('Number of Correspondences = {}'.format(matches.shape[0]))
if matches.shape[0] > 2700 and matches.shape[0] < 3500:
# print('Number of Correspondences = {}'.format(matches.shape[0]))
x1, x2, flag = createPointsfromMatches(pc1_down, pc2_down, matches)
print('Matches set created')
pts1 = initializePointCoud(x1)
pts2 = initializePointCoud(x2)
pc1_down, pc1_fpfh = preprocess_point_cloud(pts1, voxel_size)
pc2_down, pc2_fpfh = preprocess_point_cloud(pts2, voxel_size)
result = execute_global_registration(pc1_down, pc2_down, pc1_fpfh, pc2_fpfh, voxel_size)
matches = np.asarray(result.correspondence_set)
print('Number of Correspondences = {}'.format(matches.shape[0]))
rot_error, frob_error = RotationError(T[:3, :3], result.transformation[:3, :3])
print('Rotation Error: {} deg / {} (frob)\nTranslation Error: {}'.format(np.abs(rot_error), frob_error,
np.linalg.norm(T[:3,
3] - result.transformation[
:3, 3])))
minCorres = True
break
# x1_ = np.asarray(pc1_down.points)
# x2_ = np.asarray(pc2_down.points)
# x1 = []
# x2 = []
#
# for match in matches:
#
# x1.append(x1_[match[0]])
# x2.append(x2_[match[1]])
#
# x1 = np.array(x1)
# x2 = np.array(x2)
# # Não interessa
# ones = np.ones((matches.shape[0], 1))
# x1_h = np.concatenate([x1, ones], axis = 1)
# err = np.matmul(T,np.transpose(x1_h))
# err = x2 - np.transpose(err[:3])
# err = np.linalg.norm(err, axis=1)
#
#
#
#
# # Não interessa
# x1_h = np.concatenate([x1_noise, ones], axis=1)
# err = np.matmul(T, np.transpose(x1_h))
# err = x2 - np.transpose(err[:3])
# err = np.linalg.norm(err, axis=1)
# flag2 = ones.reshape(matches.shape[0])
# flag2[err > 0.3] = 0
#
# print(np.sum(flag)/matches.shape[0])
# print(np.sum(flag2)/matches.shape[0])
# print(err)
else:
if minMatchNb < matches.shape[0] and matches.shape[0] > 3500:
minpc1 = pc1_down
minpc2 = pc2_down
minMatches = matches
if np.array(minpc1.points).size > 0 and not minCorres:
if not minCorres:
x1, x2, flag = createPointsfromMatches(minpc1, minpc2, minMatches[:3000])
print('Matches set created')
print('Number of Correspondences = {}'.format(minMatches[:3000].shape[0]))
else:
print('Discard Frame')
# print('Number of Correspondences = {}'.format(matches.shape[0]))
# rot_error, frob_error = RotationError(T[:3, :3], result.transformation[:3, :3])
# print('Rotation Error: {} deg / {} (frob)\nTranslation Error: {}'.format(np.abs(rot_error),frob_error,
# np.linalg.norm(T[:3, 3] - result.transformation[:3, 3])))
#
# print('Umeyama')
# transformation, elapsed = refine_registration(pc1_down, pc2_down, result.correspondence_set)
# rot_error, frob_error = RotationError(T[:3, :3], transformation[:3, :3])
# print('Rotation Error: {} deg / {} (frob)\nTranslation Error: {}'.format(np.abs(rot_error), frob_error,
# np.linalg.norm(T[:3, 3] - transformation[:3,3])))
#
# print('Delta = {}'.format(elapsed))
# print('Total Delta = {}'.format(delta+elapsed))
if __name__ == '__main__':
main() | [
"numpy.trace",
"open3d.PointCloud",
"numpy.array",
"open3d.Vector2iVector",
"numpy.linalg.norm",
"copy.deepcopy",
"global_registration.execute_global_registration",
"numpy.arange",
"open3d.TransformationEstimationPointToPoint",
"open3d.VisualizerWithEditing",
"argparse.ArgumentParser",
"numpy.... | [((407, 432), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (430, 432), False, 'import argparse\n'), ((939, 964), 'os.path.join', 'os.path.join', (['path', 'depth'], {}), '(path, depth)\n', (951, 964), False, 'import os\n'), ((1006, 1029), 'os.path.join', 'os.path.join', (['path', 'rgb'], {}), '(path, rgb)\n', (1018, 1029), False, 'import os\n'), ((1047, 1068), 'open3d.read_image', 'o3d.read_image', (['depth'], {}), '(depth)\n', (1061, 1068), True, 'import open3d as o3d\n'), ((1084, 1103), 'open3d.read_image', 'o3d.read_image', (['rgb'], {}), '(rgb)\n', (1098, 1103), True, 'import open3d as o3d\n'), ((1563, 1584), 'copy.deepcopy', 'copy.deepcopy', (['source'], {}), '(source)\n', (1576, 1584), False, 'import copy\n'), ((1603, 1624), 'copy.deepcopy', 'copy.deepcopy', (['target'], {}), '(target)\n', (1616, 1624), False, 'import copy\n'), ((1638, 1659), 'copy.deepcopy', 'copy.deepcopy', (['source'], {}), '(source)\n', (1651, 1659), False, 'import copy\n'), ((1861, 1888), 'open3d.VisualizerWithEditing', 'o3d.VisualizerWithEditing', ([], {}), '()\n', (1886, 1888), True, 'import open3d as o3d\n'), ((2212, 2223), 'numpy.real', 'np.real', (['R1'], {}), '(R1)\n', (2219, 2223), True, 'import numpy as np\n'), ((2233, 2244), 'numpy.real', 'np.real', (['R2'], {}), '(R2)\n', (2240, 2244), True, 'import numpy as np\n'), ((2339, 2353), 'numpy.rad2deg', 'np.rad2deg', (['ae'], {}), '(ae)\n', (2349, 2353), True, 'import numpy as np\n'), ((2482, 2505), 'numpy.linalg.norm', 'np.linalg.norm', (['(t1 - t2)'], {}), '(t1 - t2)\n', (2496, 2505), True, 'import numpy as np\n'), ((2568, 2579), 'time.time', 'time.time', ([], {}), '()\n', (2577, 2579), False, 'import time\n'), ((2591, 2633), 'open3d.TransformationEstimationPointToPoint', 'o3d.TransformationEstimationPointToPoint', ([], {}), '()\n', (2631, 2633), True, 'import open3d as o3d\n'), ((2833, 2849), 'open3d.PointCloud', 'o3d.PointCloud', ([], {}), '()\n', (2847, 2849), True, 'import open3d as o3d\n'), ((2867, 2890), 'open3d.Vector3dVector', 'o3d.Vector3dVector', (['pts'], {}), '(pts)\n', (2885, 2890), True, 'import open3d as o3d\n'), ((3119, 3141), 'numpy.asarray', 'np.asarray', (['pc1.points'], {}), '(pc1.points)\n', (3129, 3141), True, 'import numpy as np\n'), ((3152, 3174), 'numpy.asarray', 'np.asarray', (['pc2.points'], {}), '(pc2.points)\n', (3162, 3174), True, 'import numpy as np\n'), ((3302, 3314), 'numpy.array', 'np.array', (['x1'], {}), '(x1)\n', (3310, 3314), True, 'import numpy as np\n'), ((3324, 3336), 'numpy.array', 'np.array', (['x2'], {}), '(x2)\n', (3332, 3336), True, 'import numpy as np\n'), ((3349, 3379), 'numpy.ones', 'np.ones', (['(matches.shape[0], 1)'], {}), '((matches.shape[0], 1))\n', (3356, 3379), True, 'import numpy as np\n'), ((3432, 3462), 'numpy.arange', 'np.arange', (['(0)', 'matches.shape[0]'], {}), '(0, matches.shape[0])\n', (3441, 3462), True, 'import numpy as np\n'), ((3467, 3489), 'numpy.random.shuffle', 'np.random.shuffle', (['out'], {}), '(out)\n', (3484, 3489), True, 'import numpy as np\n'), ((3619, 3661), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)'], {'size': '(nb_out, 3)'}), '(0, 0.1, size=(nb_out, 3))\n', (3635, 3661), True, 'import numpy as np\n'), ((3733, 3758), 'numpy.array', 'np.array', (['flag'], {'dtype': 'int'}), '(flag, dtype=int)\n', (3741, 3758), True, 'import numpy as np\n'), ((4274, 4289), 'glob.glob', 'glob', (['filepath1'], {}), '(filepath1)\n', (4278, 4289), False, 'from glob import glob\n'), ((4302, 4316), 'glob.glob', 'glob', (['filepath'], {}), '(filepath)\n', (4306, 4316), False, 'from glob import glob\n'), ((4412, 4426), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4423, 4426), False, 'import pickle\n'), ((4769, 4781), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4777, 4781), True, 'import numpy as np\n'), ((4795, 4807), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4803, 4807), True, 'import numpy as np\n'), ((4825, 4837), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4833, 4837), True, 'import numpy as np\n'), ((1248, 1335), 'open3d.PinholeCameraIntrinsic', 'o3d.PinholeCameraIntrinsic', (['o3d.PinholeCameraIntrinsicParameters.PrimeSenseDefault'], {}), '(o3d.PinholeCameraIntrinsicParameters.\n PrimeSenseDefault)\n', (1274, 1335), True, 'import open3d as o3d\n'), ((1970, 1981), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1979, 1981), False, 'import os\n'), ((2269, 2286), 'numpy.linalg.inv', 'np.linalg.inv', (['R2'], {}), '(R2)\n', (2282, 2286), True, 'import numpy as np\n'), ((2690, 2715), 'open3d.Vector2iVector', 'o3d.Vector2iVector', (['corrs'], {}), '(corrs)\n', (2708, 2715), True, 'import open3d as o3d\n'), ((2737, 2748), 'time.time', 'time.time', ([], {}), '()\n', (2746, 2748), False, 'import time\n'), ((4006, 4017), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4015, 4017), False, 'import os\n'), ((4181, 4192), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4190, 4192), False, 'import os\n'), ((4903, 4942), 'global_registration.preprocess_point_cloud', 'preprocess_point_cloud', (['pc1', 'voxel_size'], {}), '(pc1, voxel_size)\n', (4925, 4942), False, 'from global_registration import preprocess_point_cloud, execute_global_registration\n'), ((4972, 5011), 'global_registration.preprocess_point_cloud', 'preprocess_point_cloud', (['pc2', 'voxel_size'], {}), '(pc2, voxel_size)\n', (4994, 5011), False, 'from global_registration import preprocess_point_cloud, execute_global_registration\n'), ((5025, 5034), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (5031, 5034), True, 'import numpy as np\n'), ((5120, 5131), 'time.time', 'time.time', ([], {}), '()\n', (5129, 5131), False, 'import time\n'), ((5149, 5228), 'global_registration.execute_global_registration', 'execute_global_registration', (['pc1_down', 'pc2_down', 'pc1_fpfh', 'pc2_fpfh', 'voxel_size'], {}), '(pc1_down, pc2_down, pc1_fpfh, pc2_fpfh, voxel_size)\n', (5176, 5228), False, 'from global_registration import preprocess_point_cloud, execute_global_registration\n'), ((5691, 5728), 'numpy.asarray', 'np.asarray', (['result.correspondence_set'], {}), '(result.correspondence_set)\n', (5701, 5728), True, 'import numpy as np\n'), ((2391, 2400), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2397, 2400), True, 'import numpy as np\n'), ((3507, 3545), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.45)', 'high': '(0.55)'}), '(low=0.45, high=0.55)\n', (3524, 3545), True, 'import numpy as np\n'), ((4590, 4601), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4599, 4601), False, 'import os\n'), ((4661, 4672), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4670, 4672), False, 'import os\n'), ((5245, 5256), 'time.time', 'time.time', ([], {}), '()\n', (5254, 5256), False, 'import time\n'), ((6189, 6229), 'global_registration.preprocess_point_cloud', 'preprocess_point_cloud', (['pts1', 'voxel_size'], {}), '(pts1, voxel_size)\n', (6211, 6229), False, 'from global_registration import preprocess_point_cloud, execute_global_registration\n'), ((6263, 6303), 'global_registration.preprocess_point_cloud', 'preprocess_point_cloud', (['pts2', 'voxel_size'], {}), '(pts2, voxel_size)\n', (6285, 6303), False, 'from global_registration import preprocess_point_cloud, execute_global_registration\n'), ((6326, 6405), 'global_registration.execute_global_registration', 'execute_global_registration', (['pc1_down', 'pc2_down', 'pc1_fpfh', 'pc2_fpfh', 'voxel_size'], {}), '(pc1_down, pc2_down, pc1_fpfh, pc2_fpfh, voxel_size)\n', (6353, 6405), False, 'from global_registration import preprocess_point_cloud, execute_global_registration\n'), ((6428, 6465), 'numpy.asarray', 'np.asarray', (['result.correspondence_set'], {}), '(result.correspondence_set)\n', (6438, 6465), True, 'import numpy as np\n'), ((2309, 2321), 'numpy.trace', 'np.trace', (['R_'], {}), '(R_)\n', (2317, 2321), True, 'import numpy as np\n'), ((5502, 5519), 'numpy.abs', 'np.abs', (['rot_error'], {}), '(rot_error)\n', (5508, 5519), True, 'import numpy as np\n'), ((5614, 5669), 'numpy.linalg.norm', 'np.linalg.norm', (['(T[:3, 3] - result.transformation[:3, 3])'], {}), '(T[:3, 3] - result.transformation[:3, 3])\n', (5628, 5669), True, 'import numpy as np\n'), ((8538, 8561), 'numpy.array', 'np.array', (['minpc1.points'], {}), '(minpc1.points)\n', (8546, 8561), True, 'import numpy as np\n'), ((6721, 6738), 'numpy.abs', 'np.abs', (['rot_error'], {}), '(rot_error)\n', (6727, 6738), True, 'import numpy as np\n'), ((6837, 6892), 'numpy.linalg.norm', 'np.linalg.norm', (['(T[:3, 3] - result.transformation[:3, 3])'], {}), '(T[:3, 3] - result.transformation[:3, 3])\n', (6851, 6892), True, 'import numpy as np\n')] |
from aoc2020 import *
from aoc2020.utils import math_product
from itertools import chain
import numpy as np
def tborder(tile):
_, m = tile
return "".join(m[0])
def bborder(tile):
_, m = tile
return "".join(m[-1])
def lborder(tile):
_, m = tile
return "".join(m[:,0])
def rborder(tile):
_, m = tile
return "".join(m[:,-1])
def orientations(tile):
k, m = tile
for _ in range(2):
for i in range(4):
yield k, m
m = np.rot90(m)
m = np.fliplr(m)
class Solution(SolutionABC):
expected = 20899048083289
def solve(self) -> any:
all_tiles = self.load_tiles()
image_table = self.get_image_table(all_tiles)
return math_product([image_table[y][x][0] for x, y in [(0, 0), (0, -1), (-1, 0), (-1, -1)]])
@classmethod
def get_image_table(cls, tiles):
# Find the top most piece.
search_tile = tiles[0]
while search_tile is not None:
t0, search_tile = search_tile, None
for t in chain(*[orientations(x) for x in tiles if x[0] != t0[0]]):
if tborder(t0) == bborder(t):
search_tile = t
break
search_tile = t0
# Find the left most piece.
while search_tile is not None:
t0, search_tile = search_tile, None
for t in chain(*[orientations(x) for x in tiles if x[0] != t0[0]]):
if lborder(t0) == rborder(t):
search_tile = t
break
search_tile = t0
assigned = set([search_tile[0]])
# Find all the left most pieces.
img = [[search_tile]]
while search_tile is not None:
t0, search_tile = search_tile, None
for t in chain(*[orientations(x) for x in tiles if x[0] not in assigned]):
if bborder(t0) == tborder(t):
search_tile = t
img.append([t])
assigned.add(t[0])
break
# Find the rest of each row
for row in img:
search_tile = row[0]
while search_tile is not None:
t0, search_tile = search_tile, None
for t in chain(*[orientations(x) for x in tiles if x[0] not in assigned]):
if rborder(t0) == lborder(t):
search_tile = t
row.append(t)
assigned.add(t[0])
break
#for r in img:
# print(" ".join([str(c) for c, _ in r]))
return img
def load_tiles(self):
with self.load_resource("input") as src:
return [(k, m) for k, m in self.read_tiles(src)]
def read_tiles(self, src):
while True:
tile_heading = self.read_line(src)
if tile_heading == "":
return
tile_id = int(tile_heading[5:-1])
matrix = list(self.read_until(src, xfrm=lambda s: list(s)))
yield tile_id, np.array(matrix)
| [
"numpy.fliplr",
"numpy.array",
"aoc2020.utils.math_product",
"numpy.rot90"
] | [((517, 529), 'numpy.fliplr', 'np.fliplr', (['m'], {}), '(m)\n', (526, 529), True, 'import numpy as np\n'), ((727, 817), 'aoc2020.utils.math_product', 'math_product', (['[image_table[y][x][0] for x, y in [(0, 0), (0, -1), (-1, 0), (-1, -1)]]'], {}), '([image_table[y][x][0] for x, y in [(0, 0), (0, -1), (-1, 0), (\n -1, -1)]])\n', (739, 817), False, 'from aoc2020.utils import math_product\n'), ((493, 504), 'numpy.rot90', 'np.rot90', (['m'], {}), '(m)\n', (501, 504), True, 'import numpy as np\n'), ((3058, 3074), 'numpy.array', 'np.array', (['matrix'], {}), '(matrix)\n', (3066, 3074), True, 'import numpy as np\n')] |
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
pio.templates.default = "simple_white"
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
sample = np.random.normal(10, 1, 1000)
uv = UnivariateGaussian()
uv.fit(sample)
print("(", uv.mu_, ", ", uv.var_, ")")
# Question 2 - Empirically showing sample mean is consistent
diff_from_mean_array = []
sample_size = range(10, 1000, 10)
for i in sample_size:
uv.fit(sample[:i])
diff_from_mean_array.append(abs(uv.mu_-10))
# Draw plot
plot = go.Figure(data=[go.Scatter(
x=list(sample_size),
y=diff_from_mean_array,
mode='lines',
marker_color='rgba(199, 10, 165, .9)')
])
plot.update_layout(
title="Calculated Mean vs True Mean as a Function of Sample Size",
xaxis_title="Sample Size",
yaxis_title="Distance from True Mean",
)
plot.show()
# Question 3 - Plotting Empirical PDF of fitted model
sample = np.sort(sample)
pdf = uv.pdf(sample)
# Draw plot
pdf_plot = go.Figure()
pdf_plot.add_trace(go.Scatter(
x=sample,
y=pdf,
mode='lines',
marker_color='rgba(19, 200, 195, .9)')
)
pdf_plot.update_layout(
title="PDF from Estimators",
xaxis_title="Sample",
yaxis_title="Probability",
)
pdf_plot.show()
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
mean = np.array([0, 0, 4, 0])
cov = np.array([[1, 0.2, 0, 0.5],
[0.2, 2, 0, 0],
[0, 0, 1, 0],
[0.5, 0, 0, 1]])
sample = np.random.multivariate_normal(mean, cov, 1000)
mv = MultivariateGaussian()
mv.fit(sample)
print(mv.mu_, "\n", mv.cov_)
# Question 5 - Likelihood evaluation
f1 = np.linspace(-10, 10, 200)
f3 = np.linspace(-10, 10, 200)
log_likelihood_vals = [[mv.log_likelihood(np.array([a, 0, c, 0]), cov, sample) for c in f3] for a in f1]
heatmap_plot = go.Figure(data=go.Heatmap(
z=log_likelihood_vals,
x=f1,
y=f3))
heatmap_plot.update_layout(
title="Log Likelihood of mu = [f1, 0, f3, 0]",
xaxis_title="f1",
yaxis_title="f3"
)
heatmap_plot.show()
# Question 6 - Maximum likelihood
max_val = np.amax(log_likelihood_vals)
f1_loc, f3_loc = np.unravel_index(np.argmax(log_likelihood_vals),
np.shape(log_likelihood_vals))
print("max value = ", max_val,
"\nmu = [", round(f1[f1_loc], 3),
", 0, ", round(f3[f3_loc], 3), ", 0]")
if __name__ == '__main__':
np.random.seed(0)
test_univariate_gaussian()
test_multivariate_gaussian()
| [
"numpy.random.normal",
"plotly.graph_objects.Heatmap",
"numpy.random.multivariate_normal",
"numpy.sort",
"numpy.argmax",
"plotly.graph_objects.Figure",
"numpy.array",
"numpy.linspace",
"plotly.graph_objects.Scatter",
"numpy.random.seed",
"numpy.shape",
"IMLearn.learners.UnivariateGaussian",
... | [((288, 317), 'numpy.random.normal', 'np.random.normal', (['(10)', '(1)', '(1000)'], {}), '(10, 1, 1000)\n', (304, 317), True, 'import numpy as np\n'), ((327, 347), 'IMLearn.learners.UnivariateGaussian', 'UnivariateGaussian', ([], {}), '()\n', (345, 347), False, 'from IMLearn.learners import UnivariateGaussian, MultivariateGaussian\n'), ((1170, 1185), 'numpy.sort', 'np.sort', (['sample'], {}), '(sample)\n', (1177, 1185), True, 'import numpy as np\n'), ((1242, 1253), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (1251, 1253), True, 'import plotly.graph_objects as go\n'), ((1656, 1678), 'numpy.array', 'np.array', (['[0, 0, 4, 0]'], {}), '([0, 0, 4, 0])\n', (1664, 1678), True, 'import numpy as np\n'), ((1689, 1763), 'numpy.array', 'np.array', (['[[1, 0.2, 0, 0.5], [0.2, 2, 0, 0], [0, 0, 1, 0], [0.5, 0, 0, 1]]'], {}), '([[1, 0.2, 0, 0.5], [0.2, 2, 0, 0], [0, 0, 1, 0], [0.5, 0, 0, 1]])\n', (1697, 1763), True, 'import numpy as np\n'), ((1810, 1856), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', '(1000)'], {}), '(mean, cov, 1000)\n', (1839, 1856), True, 'import numpy as np\n'), ((1866, 1888), 'IMLearn.learners.MultivariateGaussian', 'MultivariateGaussian', ([], {}), '()\n', (1886, 1888), False, 'from IMLearn.learners import UnivariateGaussian, MultivariateGaussian\n'), ((1992, 2017), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(200)'], {}), '(-10, 10, 200)\n', (2003, 2017), True, 'import numpy as np\n'), ((2027, 2052), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(200)'], {}), '(-10, 10, 200)\n', (2038, 2052), True, 'import numpy as np\n'), ((2528, 2556), 'numpy.amax', 'np.amax', (['log_likelihood_vals'], {}), '(log_likelihood_vals)\n', (2535, 2556), True, 'import numpy as np\n'), ((2857, 2874), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2871, 2874), True, 'import numpy as np\n'), ((1277, 1362), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'sample', 'y': 'pdf', 'mode': '"""lines"""', 'marker_color': '"""rgba(19, 200, 195, .9)"""'}), "(x=sample, y=pdf, mode='lines', marker_color='rgba(19, 200, 195, .9)'\n )\n", (1287, 1362), True, 'import plotly.graph_objects as go\n'), ((2595, 2625), 'numpy.argmax', 'np.argmax', (['log_likelihood_vals'], {}), '(log_likelihood_vals)\n', (2604, 2625), True, 'import numpy as np\n'), ((2665, 2694), 'numpy.shape', 'np.shape', (['log_likelihood_vals'], {}), '(log_likelihood_vals)\n', (2673, 2694), True, 'import numpy as np\n'), ((2196, 2241), 'plotly.graph_objects.Heatmap', 'go.Heatmap', ([], {'z': 'log_likelihood_vals', 'x': 'f1', 'y': 'f3'}), '(z=log_likelihood_vals, x=f1, y=f3)\n', (2206, 2241), True, 'import plotly.graph_objects as go\n'), ((2099, 2121), 'numpy.array', 'np.array', (['[a, 0, c, 0]'], {}), '([a, 0, c, 0])\n', (2107, 2121), True, 'import numpy as np\n')] |
from math import exp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
class LR():
def __init__(self,max_iterator=200,learning_rate = 0.01):
self.max_iterator = max_iterator
self.learning_rate = learning_rate
def sigmoid(self,x):
return 1/(1+exp(-x))
def data_add_one_dim(self,data_feature): #回归系数比特征数多一个,即常数项 故原来的特征向量统统在前面添加一个1
#data_mat = []
pad = np.ones((len(data_feature),1),dtype=np.float32) #直接用numpy的水平拼接
data_mat = np.hstack((pad,data_feature))
#for d in range(len(data_feature)):
# data_mat.append([1.0,*data_feature[d]])
return data_mat
def fit(self,x,y):
data_feature = self.data_add_one_dim(x)
data_label = y
self.weights = np.zeros((len(data_feature[0]),1),dtype=np.float32)
for ite in range(self.max_iterator):
for i in range(len(data_feature)):
result = self.sigmoid(np.dot(data_feature[i],self.weights))
error = data_label[i] - result
self.weights += self.learning_rate * error * np.transpose([data_feature[i]])
print('LogisticRegression Model(learning_rate={},max_iter={})'.format(self.learning_rate, self.max_iterator))
def score(self, X_test, y_test):
right = 0
X_test = self.data_add_one_dim(X_test)
for x, y in zip(X_test, y_test):
result = np.dot(x, self.weights)
if (result > 0 and y == 1) or (result < 0 and y == 0):
right += 1
return right / len(X_test)
| [
"math.exp",
"numpy.dot",
"numpy.transpose",
"numpy.hstack"
] | [((597, 627), 'numpy.hstack', 'np.hstack', (['(pad, data_feature)'], {}), '((pad, data_feature))\n', (606, 627), True, 'import numpy as np\n'), ((1507, 1530), 'numpy.dot', 'np.dot', (['x', 'self.weights'], {}), '(x, self.weights)\n', (1513, 1530), True, 'import numpy as np\n'), ((387, 394), 'math.exp', 'exp', (['(-x)'], {}), '(-x)\n', (390, 394), False, 'from math import exp\n'), ((1047, 1084), 'numpy.dot', 'np.dot', (['data_feature[i]', 'self.weights'], {}), '(data_feature[i], self.weights)\n', (1053, 1084), True, 'import numpy as np\n'), ((1193, 1224), 'numpy.transpose', 'np.transpose', (['[data_feature[i]]'], {}), '([data_feature[i]])\n', (1205, 1224), True, 'import numpy as np\n')] |
import numpy as np
from torch.utils.data import Dataset
import jsonlines
import json
import random
from tqdm import tqdm
import pandas as pd
try:
from vocab_gen import *
except ImportError:
from datasets.vocab_gen import *
class YelpDataset(Dataset):
def __init__(self, jsonl_file:str, tokenizer=None, max_len:int = 50, is_from_partition=False, add_cls=False,
should_stem=True, using_pandas=False, using_bpe=False):
self.jsonl_file = jsonl_file
self.eval_df = None
self.reviews = []
self.tokenizer = tokenizer
self.max_len = max_len
self.should_stem = should_stem
self.add_cls = add_cls
if using_pandas:
self.train_df = pd.read_json(jsonl_file, lines=True)
self.train_df['label'] = self.train_df.iloc[:, 2]
self.train_df['text'] = self.train_df['text'].apply(clean_sentence)
self.train_df['label'] = self.train_df['label'].apply(lambda x: x-1)
self.train_df = self.train_df.drop(self.train_df.columns[[0, 2]], axis=1)
print(self.train_df)
else:
with jsonlines.open(self.jsonl_file) as reader:
for obj in reader.iter(type=dict, skip_invalid=True):
if is_from_partition:
self.reviews.append({"input": obj["input"], "label": obj["label"]})
else:
rating = obj["stars"]
review = obj["text"]
self.reviews.append({"input": review, "label": rating})
print("dataset loaded...")
def __len__(self):
return len(self.reviews)
def __getitem__(self, idx):
assert self.tokenizer is not None, "tokenizer must be passed in during instantiation"
sample = self.reviews[idx]
review, stars = sample["input"], int(sample["label"])
review = self.tokenizer.tokenize2Index(review, self.should_stem)[:self.max_len]
if (len(review) < self.max_len):
review += [PAD_TOKEN]*(self.max_len-len(review))
if self.add_cls:
review = [len(self.tokenizer.word2Index)] + [x + 1 for x in review] #SET CLS TOKEN TO 0 AND PUSH EVERYTHING DOWN BY 1
return {"input": np.array(review), "label": np.array(stars - 1)}
def getFromText(review, tokenizer, max_len=1000, should_stem=True):
review = tokenizer.tokenize2Index(review, should_stem)[:max_len]
if (len(review) < max_len):
review += [PAD_TOKEN]*(max_len-len(review))
return np.array(review)
def split_dataset(self, training_partition: float, training_file: str, validation_file: str):
assert training_partition > 0 and training_partition < 1, "Training partition must be a float between 0 and 1 non-exclusive"
num_train_examples = int(training_partition * len(self.reviews))
random.shuffle(self.reviews)
training_partition = self.reviews[:num_train_examples]
val_partition = self.reviews[num_train_examples:]
with open(training_file, "w+") as train_f:
for rev in tqdm(training_partition):
json.dump(rev, train_f)
train_f.write("\n")
with open(validation_file, "w+") as val_f:
for rev in tqdm(val_partition):
json.dump(rev, val_f)
val_f.write("\n")
def make_datasets(self, tokenizer, max_length, x_path, y_path):
x_train, y_train, x_val, y_val = [],[],[],[]
num_reviews = len(self.reviews)
for i in range(num_reviews):
rating_vector = [0,0,0,0,0]
rating_vector[int(self.reviews[i]["label"])-1] = 1
sequenced_review = tokenizer.tokenize2Index(self.reviews[i]["input"])
if len(sequenced_review) > max_length:
sequenced_review = sequenced_review[:max_length]
elif len(sequenced_review) < max_length:
sequenced_review += [PAD_TOKEN]*(max_length-len(sequenced_review))
sequenced_review = [int(x) for x in sequenced_review]
x_train.append(sequenced_review)
y_train.append(rating_vector)
np.savetxt(x_path, x_train, fmt ='%4d')
np.savetxt(y_path, y_train, fmt='%4d')
return np.asarray(x_train), np.asarray(y_train)
def make_eval_pandas(self, num):
file = "../datasets/yelp_challenge_" + str(num) + "_with_answers.jsonl"
self.eval_df = pd.read_json(file, lines=True)
self.eval_df['label'] = self.eval_df.iloc[:, 2]
self.eval_df['text'] = self.eval_df['text'].apply(clean_sentence)
self.eval_df['label'] = self.eval_df['label'].apply(lambda x: x - 1)
self.eval_df = self.eval_df.drop(self.eval_df.columns[[0, 2]], axis=1)
print(self.eval_df)
if __name__ == "__main__":
training_yelp = YelpDataset("yelp_review_training_dataset.jsonl", tokenizer=None, max_len=1000, is_from_partition=False, add_cls=False)
# with open("cleaned_reviews.txt", "w+") as f:
len_words = 0
count = 0
for rev in tqdm(training_yelp.reviews):
cleaned_review = clean_sentence(rev["input"].lower()).split()
len_words += len(cleaned_review)
count += 1
print(len_words / count)
| [
"random.shuffle",
"json.dump",
"tqdm.tqdm",
"numpy.asarray",
"jsonlines.open",
"numpy.array",
"numpy.savetxt",
"pandas.read_json"
] | [((5103, 5130), 'tqdm.tqdm', 'tqdm', (['training_yelp.reviews'], {}), '(training_yelp.reviews)\n', (5107, 5130), False, 'from tqdm import tqdm\n'), ((2582, 2598), 'numpy.array', 'np.array', (['review'], {}), '(review)\n', (2590, 2598), True, 'import numpy as np\n'), ((2914, 2942), 'random.shuffle', 'random.shuffle', (['self.reviews'], {}), '(self.reviews)\n', (2928, 2942), False, 'import random\n'), ((4208, 4246), 'numpy.savetxt', 'np.savetxt', (['x_path', 'x_train'], {'fmt': '"""%4d"""'}), "(x_path, x_train, fmt='%4d')\n", (4218, 4246), True, 'import numpy as np\n'), ((4256, 4294), 'numpy.savetxt', 'np.savetxt', (['y_path', 'y_train'], {'fmt': '"""%4d"""'}), "(y_path, y_train, fmt='%4d')\n", (4266, 4294), True, 'import numpy as np\n'), ((4492, 4522), 'pandas.read_json', 'pd.read_json', (['file'], {'lines': '(True)'}), '(file, lines=True)\n', (4504, 4522), True, 'import pandas as pd\n'), ((728, 764), 'pandas.read_json', 'pd.read_json', (['jsonl_file'], {'lines': '(True)'}), '(jsonl_file, lines=True)\n', (740, 764), True, 'import pandas as pd\n'), ((2276, 2292), 'numpy.array', 'np.array', (['review'], {}), '(review)\n', (2284, 2292), True, 'import numpy as np\n'), ((2303, 2322), 'numpy.array', 'np.array', (['(stars - 1)'], {}), '(stars - 1)\n', (2311, 2322), True, 'import numpy as np\n'), ((3140, 3164), 'tqdm.tqdm', 'tqdm', (['training_partition'], {}), '(training_partition)\n', (3144, 3164), False, 'from tqdm import tqdm\n'), ((3317, 3336), 'tqdm.tqdm', 'tqdm', (['val_partition'], {}), '(val_partition)\n', (3321, 3336), False, 'from tqdm import tqdm\n'), ((4310, 4329), 'numpy.asarray', 'np.asarray', (['x_train'], {}), '(x_train)\n', (4320, 4329), True, 'import numpy as np\n'), ((4331, 4350), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (4341, 4350), True, 'import numpy as np\n'), ((1138, 1169), 'jsonlines.open', 'jsonlines.open', (['self.jsonl_file'], {}), '(self.jsonl_file)\n', (1152, 1169), False, 'import jsonlines\n'), ((3182, 3205), 'json.dump', 'json.dump', (['rev', 'train_f'], {}), '(rev, train_f)\n', (3191, 3205), False, 'import json\n'), ((3354, 3375), 'json.dump', 'json.dump', (['rev', 'val_f'], {}), '(rev, val_f)\n', (3363, 3375), False, 'import json\n')] |
import os, numpy as np
class CMIP6_models:
total_num = 0
instances = []
file_path = './'
def __init__(self, Name, Res, Grids, CaseList, VarLab):
self.__class__.instances.append(self)
self.Name = Name
self.Res = Res
self.Grids = Grids
self.CaseList = CaseList
self.VarLab = VarLab
CMIP6_models.total_num +=1
def get_nc_name(self, case_name, VarLab):
self_nc_name = 'tp_' + self.Name + '_' + case_name + '_' + VarLab + '.nc'
return self_nc_name
def get_timestamp(self, case_name):
if not (case_name in self.CaseList):
print (self.Name, 'does not have',case_name)
return [ ]
else:
data_path = CMIP6_models.file_path + self.Name + '/' + case_name + '/'
filelist = os.listdir(data_path)
timestamp = []
for file in filelist:
if file[:3] == 'tas':
timestamp.append(file[-16:])
timestamp_unique = np.unique(timestamp)
timestamp_unique.sort()
return timestamp_unique
######################################################################################################
# note that NESM3 does not have SSP370 results
def set_class_instance():
bcc_csm2_mr = CMIP6_models( 'BCC-CSM2-MR', [160, 320], 'gn', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
bcc_esm1 = CMIP6_models( 'BCC-ESM1', [ 64, 128], 'gn', ['piControl', 'abrupt-4xCO2', 'historical' ], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
cams_csm1_0 = CMIP6_models( 'CAMS-CSM1-0', [160, 320], 'gn', [ 'historical', 'ssp585'], [['no cases'], ['r1i1p1f1' ]] )
canesm5 = CMIP6_models( 'CanESM5', [ 64, 128], 'gn', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
cesm2 = CMIP6_models( 'CESM2', [192, 288], 'gn', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
cesm2_waccm = CMIP6_models( 'CESM2-WACCM', [192, 288], 'gn', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
cnrm_cm6_1 = CMIP6_models( 'CNRM-CM6-1', [128, 256], 'gr', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f2'], ['r1i1p1f2', 'r2i1p1f2', 'r3i1p1f2']] )
cnrm_esm2_1 = CMIP6_models( 'CNRM-ESM2-1', [128, 256], 'gr', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f2'], ['r1i1p1f2', 'r2i1p1f2', 'r3i1p1f2']] )
e3sm_1_0 = CMIP6_models( 'E3SM-1-0', [180, 360], 'gr', ['piControl', 'abrupt-4xCO2', 'historical' ], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
ec_earth3 = CMIP6_models( 'EC-Earth3', [256, 512], 'gr', ['piControl', 'historical', 'ssp585'], [['r1i1p1f1'], ['r1i1p1f1', 'r3i1p1f1', 'r4i1p1f1']] )
ec_earth3_veg = CMIP6_models( 'EC-Earth3-Veg', [256, 512], 'gr', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
fgoals_g3 = CMIP6_models( 'FGOALS-g3', [ 80, 180], 'gn', ['piControl', 'historical', 'ssp585'], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
gfdl_esm4 = CMIP6_models( 'GFDL-ESM4', [180, 288], 'gr1', [ 'historical', 'ssp585'], [['no cases'], ['r1i1p1f1' ]] )
giss_e2_1_g = CMIP6_models( 'GISS-E2-1-G', [ 90, 144], 'gn', ['piControl', 'abrupt-4xCO2', 'historical' ], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
giss_e1_1_h = CMIP6_models( 'GISS-E2-1-H', [ 90, 144], 'gn', ['piControl', 'abrupt-4xCO2', 'historical' ], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
hadgem3_gc31_ll = CMIP6_models( 'HadGEM3-GC31-LL', [144, 192], 'gn', [ 'abrupt-4xCO2', 'historical' ], [['r1i1p1f3'], ['r1i1p1f3', 'r2i1p1f3', 'r3i1p1f3']] )
ipsl_cm6a_lr = CMIP6_models( 'IPSL-CM6A-LR', [143, 144], 'gr', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
miroc_es2l = CMIP6_models( 'MIROC-ES2L', [ 64, 128], 'gn', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f2'], ['r1i1p1f2', 'r2i1p1f2', 'r3i1p1f2']] )
miroc6 = CMIP6_models( 'MIROC6', [128, 256], 'gn', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
mri_esm2_0 = CMIP6_models( 'MRI-ESM2-0', [160, 320], 'gn', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
nesm3 = CMIP6_models( 'NESM3', [ 96, 192], 'gn', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
norcpm1 = CMIP6_models( 'NorCPM1', [ 96, 144], 'gn', ['piControl', 'historical' ], [['r1i1p1f1'], ['r1i1p1f1', 'r2i1p1f1', 'r3i1p1f1']] )
noresm2_lm = CMIP6_models( 'NorESM2-LM', [ 96, 144], 'gn', ['piControl', 'abrupt-4xCO2' ], [['r1i1p1f1'], ['no cases' ]] )
sam0_unicon = CMIP6_models( 'SAM0-UNICON', [192, 288], 'gn', ['piControl', 'abrupt-4xCO2' ], [['r1i1p1f1'], ['no cases' ]] )
ukesm1_0_ll = CMIP6_models( 'UKESM1-0-LL', [144, 192], 'gn', ['piControl', 'abrupt-4xCO2', 'historical', 'ssp585'], [['r1i1p1f2'], ['r1i1p1f2', 'r2i1p1f2', 'r3i1p1f2']] )
print (CMIP6_models.total_num, 'CMIP6_models instances has been generated: ',
bcc_csm2_mr.Name, bcc_esm1.Name, cams_csm1_0.Name, canesm5.Name, cesm2.Name, cesm2_waccm.Name, cnrm_cm6_1.Name,
cnrm_esm2_1.Name, e3sm_1_0.Name, ec_earth3.Name, ec_earth3_veg.Name, fgoals_g3.Name, gfdl_esm4.Name, giss_e2_1_g.Name,
giss_e1_1_h.Name, hadgem3_gc31_ll.Name, ipsl_cm6a_lr.Name, miroc_es2l.Name, miroc6.Name, mri_esm2_0.Name, nesm3.Name,
norcpm1.Name, noresm2_lm.Name, sam0_unicon.Name, ukesm1_0_ll.Name) | [
"os.listdir",
"numpy.unique"
] | [((840, 861), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (850, 861), False, 'import os, numpy as np\n'), ((1041, 1061), 'numpy.unique', 'np.unique', (['timestamp'], {}), '(timestamp)\n', (1050, 1061), True, 'import os, numpy as np\n')] |
# pip install pycocotools opencv-python opencv-contrib-python
# wget https://github.com/opencv/opencv_extra/raw/master/testdata/cv/ximgproc/model.yml.gz
import os
import copy
import time
import argparse
import contextlib
import multiprocessing
import numpy as np
import cv2
import cv2.ximgproc
import matplotlib.patches
import matplotlib.pyplot as plt
import torch
from torchvision.datasets import CocoDetection
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def imshow_with_boxes(img, boxes_xywh, savefig):
plt.figure()
plt.imshow(img)
plt.axis("off")
for x, y, w, h in boxes_xywh.tolist():
plt.gca().add_patch(
matplotlib.patches.Rectangle(
(x, y), w, h, linewidth=1, edgecolor="r", facecolor="none"
)
)
plt.savefig(savefig)
plt.close()
return savefig
def selective_search(img, fast, topk):
algo = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
algo.setBaseImage(img)
if fast:
algo.switchToSelectiveSearchFast()
else:
algo.switchToSelectiveSearchQuality()
boxes_xywh = algo.process().astype(np.float32)
scores = np.ones((len(boxes_xywh),))
return boxes_xywh[:topk], scores[:topk]
def edge_boxes(
img,
fast,
topk,
bgr2rgb=(2, 1, 0),
algo_edgedet=cv2.ximgproc.createStructuredEdgeDetection("model.yml.gz")
if os.path.exists("model.yml.gz")
else None,
):
edges = algo_edgedet.detectEdges(img[..., bgr2rgb].astype(np.float32) / 255.0)
orimap = algo_edgedet.computeOrientation(edges)
edges = algo_edgedet.edgesNms(edges, orimap)
algo_edgeboxes = cv2.ximgproc.createEdgeBoxes()
algo_edgeboxes.setMaxBoxes(topk)
boxes_xywh, scores = algo_edgeboxes.getBoundingBoxes(edges, orimap)
if scores is None:
boxes_xywh, scores = np.array([[0, 0.0, img.shape[1], img.shape[0]]]), np.ones(
(1,)
)
return boxes_xywh, scores.squeeze()
def process_image(
image_id,
img_extra,
fast,
resize,
algo,
rgb2bgr=(2, 1, 0),
category_other=-1,
topk=1000,
):
img = np.asarray(img_extra[0])[..., rgb2bgr]
h, w = img.shape[:2]
img_det = img if resize == 1 else cv2.resize(img, (resize, resize))
boxes_xywh, scores = algo(img_det, fast, topk)
boxes_xywh = boxes_xywh.astype(np.float32) * (
1 if resize == 1 else np.array([w, h, w, h]) / resize
)
labels = np.full((len(boxes_xywh),), category_other, dtype=int)
return image_id, dict(boxes=boxes_xywh, scores=scores, labels=labels)
def process_loaded(image_id, loaded, category_other=-1):
boxes_xyxy = loaded["pred_boxes_"].clamp(min=0)
boxes_xywh = torch.stack(
[
boxes_xyxy[:, 0],
boxes_xyxy[:, 1],
boxes_xyxy[:, 2] - boxes_xyxy[:, 0],
boxes_xyxy[:, 3] - boxes_xyxy[:, 1],
],
dim=-1,
)
labels = np.full((len(boxes_xywh),), category_other, dtype=int)
num_classes = loaded["pred_logits"].shape[-1]
scores = loaded["pred_logits"][:, 1 :: num_classes - 2][:, 0]
I = scores.argsort(descending=True)
scores = scores[I]
boxes_xywh = boxes_xywh[I]
labels = labels[I]
return image_id, dict(boxes=boxes_xywh, scores=scores, labels=labels)
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_type="bbox", useCats=0, maxDets=100):
self.coco_gt = copy.deepcopy(coco_gt)
self.coco_eval = COCOeval(coco_gt, iouType=iou_type)
if maxDets != [100]:
self.coco_eval.params.maxDets = maxDets
if not useCats:
self.coco_eval.params.useCats = useCats
self.coco_eval.params.catIds = [-1]
coco_gt.loadAnns = lambda imgIds, loadAnns=coco_gt.loadAnns: [
gt.update(dict(category_id=-1)) or gt for gt in loadAnns(imgIds)
]
self.accumulate, self.summarize = (
self.coco_eval.accumulate,
self.coco_eval.summarize,
)
@staticmethod
def call_without_stdout(func, *args):
with open(os.devnull, "w") as devnull:
with contextlib.redirect_stdout(devnull):
return func(*args)
def update(self, predictions):
tolist = lambda a: [a.tolist()] if a.ndim == 0 else a.tolist()
detection_results = [
dict(image_id=image_id, bbox=bbox, score=score, category_id=category_id)
for image_id, pred in predictions.items()
if pred
for bbox, score, category_id in zip(
pred["boxes"].tolist(), tolist(pred["scores"]), pred["labels"].tolist()
)
]
self.coco_eval.cocoDt = (
self.call_without_stdout(COCO.loadRes, self.coco_gt, detection_results)
if detection_results
else COCO()
)
self.coco_eval.params.imgIds = list(predictions)
self.call_without_stdout(self.coco_eval.evaluate)
def main(args):
coco_mode = "instances"
PATHS = dict(
train=(
os.path.join(args.dataset_root, f"train{args.dataset_year}"),
os.path.join(
args.dataset_root,
"annotations",
f"{coco_mode}_train{args.dataset_year}.json",
),
),
val=(
os.path.join(args.dataset_root, f"val{args.dataset_year}"),
os.path.join(
args.dataset_root,
"annotations",
f"{coco_mode}_val{args.dataset_year}.json",
),
),
)
dataset = CocoDetection(*PATHS[args.dataset_split])
coco_evaluator = CocoEvaluator(dataset.coco, maxDets=args.max_dets)
tic = time.time()
if args.output_dir:
os.makedirs(args.output_dir, exist_ok=True)
if args.algo != "process_loaded":
preds = dict(
multiprocessing.Pool(processes=args.num_workers).starmap(
process_image,
zip(
dataset.ids,
dataset,
[args.fast] * len(dataset),
[args.resize] * len(dataset),
[globals()[args.algo]] * len(dataset),
),
)
)
else:
preds = []
for i, t in enumerate(
zip(
dataset.ids,
dataset,
[args.fast] * len(dataset),
[args.resize] * len(dataset),
[globals()[args.algo]] * len(dataset),
)
):
loaded = torch.load(
os.path.join(args.input_dir, str(t[0]) + ".pt"), map_location="cpu"
)
preds.append(process_loaded(t[0], loaded))
if args.output_dir:
imshow_with_boxes(
t[1][0],
preds[-1][1]["boxes"][:5],
os.path.join(args.output_dir, str(t[0]) + ".jpg"),
)
print(i) if i % 50 == 0 else None
preds = dict(preds)
print("proposals", time.time() - tic)
tic = time.time()
coco_evaluator.update(preds)
coco_evaluator.accumulate()
coco_evaluator.summarize()
print("evaluator", time.time() - tic)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input-dir", "-i")
parser.add_argument("--output-dir", "-o")
parser.add_argument("--dataset-root")
parser.add_argument("--dataset-split", default="val", choices=["train", "val"])
parser.add_argument("--dataset-year", type=int, default=2017)
parser.add_argument("--num-workers", type=int, default=16)
parser.add_argument(
"--algo",
default="selective_search",
choices=["selective_search", "edge_boxes", "process_loaded"],
)
parser.add_argument("--fast", action="store_true")
parser.add_argument("--resize", type=int, default=128)
parser.add_argument("--max-dets", type=int, nargs="*", default=[100])
args = parser.parse_args()
print(args)
main(args)
| [
"pycocotools.cocoeval.COCOeval",
"numpy.array",
"copy.deepcopy",
"matplotlib.pyplot.imshow",
"os.path.exists",
"cv2.ximgproc.createStructuredEdgeDetection",
"argparse.ArgumentParser",
"numpy.asarray",
"pycocotools.coco.COCO",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"cv2.ximgproc.s... | [((548, 560), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (558, 560), True, 'import matplotlib.pyplot as plt\n'), ((565, 580), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (575, 580), True, 'import matplotlib.pyplot as plt\n'), ((585, 600), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (593, 600), True, 'import matplotlib.pyplot as plt\n'), ((818, 838), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savefig'], {}), '(savefig)\n', (829, 838), True, 'import matplotlib.pyplot as plt\n'), ((843, 854), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (852, 854), True, 'import matplotlib.pyplot as plt\n'), ((926, 987), 'cv2.ximgproc.segmentation.createSelectiveSearchSegmentation', 'cv2.ximgproc.segmentation.createSelectiveSearchSegmentation', ([], {}), '()\n', (985, 987), False, 'import cv2\n'), ((1671, 1701), 'cv2.ximgproc.createEdgeBoxes', 'cv2.ximgproc.createEdgeBoxes', ([], {}), '()\n', (1699, 1701), False, 'import cv2\n'), ((2729, 2864), 'torch.stack', 'torch.stack', (['[boxes_xyxy[:, 0], boxes_xyxy[:, 1], boxes_xyxy[:, 2] - boxes_xyxy[:, 0], \n boxes_xyxy[:, 3] - boxes_xyxy[:, 1]]'], {'dim': '(-1)'}), '([boxes_xyxy[:, 0], boxes_xyxy[:, 1], boxes_xyxy[:, 2] -\n boxes_xyxy[:, 0], boxes_xyxy[:, 3] - boxes_xyxy[:, 1]], dim=-1)\n', (2740, 2864), False, 'import torch\n'), ((5614, 5655), 'torchvision.datasets.CocoDetection', 'CocoDetection', (['*PATHS[args.dataset_split]'], {}), '(*PATHS[args.dataset_split])\n', (5627, 5655), False, 'from torchvision.datasets import CocoDetection\n'), ((5739, 5750), 'time.time', 'time.time', ([], {}), '()\n', (5748, 5750), False, 'import time\n'), ((7117, 7128), 'time.time', 'time.time', ([], {}), '()\n', (7126, 7128), False, 'import time\n'), ((7309, 7334), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7332, 7334), False, 'import argparse\n'), ((1417, 1447), 'os.path.exists', 'os.path.exists', (['"""model.yml.gz"""'], {}), "('model.yml.gz')\n", (1431, 1447), False, 'import os\n'), ((1351, 1409), 'cv2.ximgproc.createStructuredEdgeDetection', 'cv2.ximgproc.createStructuredEdgeDetection', (['"""model.yml.gz"""'], {}), "('model.yml.gz')\n", (1393, 1409), False, 'import cv2\n'), ((2147, 2171), 'numpy.asarray', 'np.asarray', (['img_extra[0]'], {}), '(img_extra[0])\n', (2157, 2171), True, 'import numpy as np\n'), ((2250, 2283), 'cv2.resize', 'cv2.resize', (['img', '(resize, resize)'], {}), '(img, (resize, resize))\n', (2260, 2283), False, 'import cv2\n'), ((3451, 3473), 'copy.deepcopy', 'copy.deepcopy', (['coco_gt'], {}), '(coco_gt)\n', (3464, 3473), False, 'import copy\n'), ((3499, 3534), 'pycocotools.cocoeval.COCOeval', 'COCOeval', (['coco_gt'], {'iouType': 'iou_type'}), '(coco_gt, iouType=iou_type)\n', (3507, 3534), False, 'from pycocotools.cocoeval import COCOeval\n'), ((5784, 5827), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (5795, 5827), False, 'import os\n'), ((1864, 1912), 'numpy.array', 'np.array', (['[[0, 0.0, img.shape[1], img.shape[0]]]'], {}), '([[0, 0.0, img.shape[1], img.shape[0]]])\n', (1872, 1912), True, 'import numpy as np\n'), ((1914, 1927), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (1921, 1927), True, 'import numpy as np\n'), ((4864, 4870), 'pycocotools.coco.COCO', 'COCO', ([], {}), '()\n', (4868, 4870), False, 'from pycocotools.coco import COCO\n'), ((7088, 7099), 'time.time', 'time.time', ([], {}), '()\n', (7097, 7099), False, 'import time\n'), ((7248, 7259), 'time.time', 'time.time', ([], {}), '()\n', (7257, 7259), False, 'import time\n'), ((652, 661), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (659, 661), True, 'import matplotlib.pyplot as plt\n'), ((2418, 2440), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (2426, 2440), True, 'import numpy as np\n'), ((4166, 4201), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['devnull'], {}), '(devnull)\n', (4192, 4201), False, 'import contextlib\n'), ((5088, 5148), 'os.path.join', 'os.path.join', (['args.dataset_root', 'f"""train{args.dataset_year}"""'], {}), "(args.dataset_root, f'train{args.dataset_year}')\n", (5100, 5148), False, 'import os\n'), ((5162, 5258), 'os.path.join', 'os.path.join', (['args.dataset_root', '"""annotations"""', 'f"""{coco_mode}_train{args.dataset_year}.json"""'], {}), "(args.dataset_root, 'annotations',\n f'{coco_mode}_train{args.dataset_year}.json')\n", (5174, 5258), False, 'import os\n'), ((5356, 5414), 'os.path.join', 'os.path.join', (['args.dataset_root', 'f"""val{args.dataset_year}"""'], {}), "(args.dataset_root, f'val{args.dataset_year}')\n", (5368, 5414), False, 'import os\n'), ((5428, 5522), 'os.path.join', 'os.path.join', (['args.dataset_root', '"""annotations"""', 'f"""{coco_mode}_val{args.dataset_year}.json"""'], {}), "(args.dataset_root, 'annotations',\n f'{coco_mode}_val{args.dataset_year}.json')\n", (5440, 5522), False, 'import os\n'), ((5901, 5949), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'args.num_workers'}), '(processes=args.num_workers)\n', (5921, 5949), False, 'import multiprocessing\n')] |
import numpy as np
import os
import csv
import sys
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from keras.callbacks import EarlyStopping, Callback
from keras.models import Model, Sequential, load_model
from keras.layers import Input, Dense, Dropout, Flatten
from keras.optimizers import SGD, Adam, RMSprop, Nadam
from keras.layers.core import Reshape
from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D, GlobalMaxPooling1D, GlobalAveragePooling1D, BatchNormalization, concatenate, Add, LSTM
from keras.layers.merge import Concatenate
from keras.regularizers import l2
from keras.layers.advanced_activations import PReLU
from keras.utils import plot_model
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import KFold
import pickle
from sklearn import ensemble
from sklearn.metrics import mean_squared_error, mean_absolute_error
from math import sqrt
import scipy as sp
import scipy
import h5py
from keras import backend as K
import random
"""
with open('../../../../AB-Bind_S645.csv') as csvfile:
mat = []
data = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in data:
mat.append(', '.join(row))
csvfile.close()
exp_ddg = []
for i in range(1, len(mat)):
tmp = mat[i].split(",")
exp_ddg.append(float(tmp[-1]))
np.save("exp_ddg.npy", exp_ddg)
#X = np.load("perspect_l0.npy", allow_pickle=True)
#X = np.reshape(X, (645, 48, 360))
#np.save("perspect_l0.npy", X)
#Y = np.load("exp_ddg.npy", allow_pickle=True)
"""
def scheduler(epoch, lr):
if epoch <= 1000 or epoch > 1001:
return lr
elif epoch == 1001:
return lr/10
class TestCallback(Callback):
def __init__(self, test_data):
self.test_data = test_data
def on_epoch_end(self, epoch, logs={}):
x, y = self.test_data
loss, acc = self.model.evaluate(x, y, verbose=0)
logs["test_loss"] = loss
logs["test_pcc"] = acc
print('Testing loss: {}, acc: {}'.format(loss, acc))
def rootmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def pearson_r(y_true, y_pred):
x = y_true
y = y_pred
mx = K.mean(x, axis=0)
my = K.mean(y, axis=0)
xm, ym = x - mx, y - my
r_num = K.sum(xm * ym)
x_square_sum = K.sum(xm * xm)
y_square_sum = K.sum(ym * ym)
r_den = K.sqrt(x_square_sum * y_square_sum)
r = r_num / r_den
return K.mean(r)
def normalize(X):
mean = np.mean(X,axis=0)
std = np.std(X,axis=0)
length1 = X.shape[0]
X_train_normed = X
for i in range(0,length1):
for j in range(0,X.shape[1]):
for k in range(0, X.shape[2]):
if std[j,k]!=0 :
X_train_normed[i,j,k] = (X_train_normed[i,j,k]-mean[j,k])/std[j,k]
return X_train_normed
def sub_model1(input1):
model_con1d = Conv1D(64, 3, activation=PReLU(), padding='same',kernel_initializer='he_normal')(input1)
model_con1d = Conv1D(64, 3, activation=PReLU(), padding='same',kernel_initializer='lecun_uniform')(model_con1d)
model_con1d = Dropout(0.1)(model_con1d)
model_con1d = Conv1D(64, 3, activation=PReLU(), padding='same',kernel_initializer='lecun_uniform')(model_con1d)
model_con1d = Conv1D(64, 3, activation=PReLU(), padding='same',kernel_initializer='lecun_uniform')(model_con1d)
model_con1d = Dropout(0.1)(model_con1d)
intermediate_output = Flatten()(model_con1d)
final_output = Dense(1, activation="linear")(intermediate_output)
return final_output
if not os.path.isdir("./ab_homo/"):
os.mkdir("./ab_homo/")
filename = "./ab_homo/spect_ab_cnn.log"
f = open(filename,"w+")
f.close()
outliers = np.load("outliers.npy", allow_pickle=True)
aux = np.load("X_ab_aux.npy", allow_pickle=True)
alpha_l1 = np.load("./X_ab_alpha_l1.npy", allow_pickle=True)
alpha_l1 = np.reshape(alpha_l1, (len(alpha_l1), 80, 84))
alpha_l1 = normalize(alpha_l1)
alpha_l1 = np.reshape(alpha_l1, (len(alpha_l1), 80*84))
alpha_l2 = np.load("./X_ab_alpha_l2.npy", allow_pickle=True)
alpha_l2 = np.reshape(alpha_l2, (len(alpha_l2), 80, 84))
alpha_l2 = normalize(alpha_l2)
alpha_l2 = np.reshape(alpha_l2, (len(alpha_l2), 80*84))
spect_X = np.load("./X_ab_l0.npy")
spect_X = np.asarray(spect_X, dtype = float)
Y = np.load("./Y_ab.npy")
y = np.asarray(Y, dtype = float)
idx = [0, 1, 2, 3, 4, 5, 7, 8, 12, 13, 14]
X = spect_X[:, :, :, idx]
X = np.reshape(X, (len(X), 48, 36*len(idx)))
X = normalize(X)
X = np.reshape(X, (len(X), 48*36*len(idx)))
X = np.concatenate((X, alpha_l1, alpha_l2, aux), axis=1)
X = np.delete(X, outliers, axis=0)
y = np.delete(y, outliers)
n_num = X.shape[0]
result_whole = []
rmse_whole = []
pearsonr_whole = []
iter_num = int(sys.argv[1])
bs = 8; epoch = 2000
for j in range(0, iter_num):
f = open(filename,"a+")
f.write("Iter {}\n".format(j))
f.close()
data = []
result = np.zeros((len(idx)+2, 87))
X_train, X_test = X[:-87], X[-87:]
y_train, y_test = y[:-87], y[-87:]
aux_train, alphal2_train, alphal1_train, X_train = X_train[:, 48*36*len(idx)+168*80:], X_train[:, 48*36*len(idx)+80*84:48*36*len(idx)+168*80], X_train[:, 48*36*len(idx):48*36*len(idx)+80*84], X_train[:, 0:48*36*len(idx)]
aux_test, alphal2_test, alphal1_test, X_test = X_test[:, 48*36*len(idx)+168*80:], X_test[:, 48*36*len(idx)+80*84:48*36*len(idx)+168*80], X_test[:, 48*36*len(idx):48*36*len(idx)+80*84], X_test[:, 0:48*36*len(idx)]
X_train = np.reshape(X_train, (len(X_train), 48, 36, len(idx)))
X_test = np.reshape(X_test, (len(X_test), 48, 36, len(idx)))
alphal1_train = np.reshape(alphal1_train, (len(alphal1_train), 80, 84))
alphal1_test = np.reshape(alphal1_test, (len(alphal1_test), 80, 84))
alphal2_train = np.reshape(alphal2_train, (len(alphal2_train), 80, 84))
alphal2_test = np.reshape(alphal2_test, (len(alphal2_test), 80, 84))
data.append([X_train, y_train, X_test, y_test, alphal1_train, alphal1_test, alphal2_train, alphal2_test, aux_train, aux_test])
input1 = Input(shape=(48, 36))
input2 = Input(shape=(80, 84))
cnn_model = Model(inputs=input1, outputs=sub_model1(input1))
cnn2_model = Model(inputs=input1, outputs=cnn_model.layers[-2].output)
alpha_model = Model(inputs=input2, outputs=sub_model1(input2))
alpha2_model = Model(inputs=input2, outputs=alpha_model.layers[-2].output)
plot_model(cnn_model, to_file='./ab_homo/cnn_model.png', show_shapes=True, dpi=200)
plot_model(alpha_model, to_file='./ab_homo/alpha_model.png', show_shapes=True, dpi=200)
saved_hist = []
for attr_idx in range(11):
cnn_model = Model(inputs=input1, outputs=sub_model1(input1))
cnn2_model = Model(inputs=input1, outputs=cnn_model.layers[-2].output)
cnn_model.compile(optimizer=Adam(learning_rate=1e-4), loss='mse', metrics=[pearson_r])
history = cnn_model.fit(X_train[:, :, :, attr_idx], y_train, batch_size=bs, epochs=epoch, shuffle=True, verbose=2, callbacks=[TestCallback((X_test[:, :, :, attr_idx], y_test))])
saved_hist.append(history)
cnn_model.save("./ab_homo/spectcnn_ab_model_{}_{}.h5".format(j, attr_idx))
y_pred = cnn_model.predict(X_test[:,:,:,attr_idx])
y_pred = np.reshape(y_pred, (len(y_pred),))
mse = mean_squared_error(y_test, y_pred)
rmse = sqrt(mse)
pearcorr = sp.stats.pearsonr(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
f = open(filename,"a+")
f.write("Index {:0>2d}: RMSE: {:.4f}, PCC: {:.4f}, MAE: {:.4f}\n".format(attr_idx, rmse, pearcorr[0], mae))
f.close()
print("Index {:0>2d}: RMSE: {:.4f}, PCC: {:.4f}, MAE: {:.4f}".format(attr_idx, rmse, pearcorr[0], mae))
result[attr_idx] = y_pred
### Alpha L1 Laplacian
alpha_model = Model(inputs=input2, outputs=sub_model1(input2))
alpha2_model = Model(inputs=input2, outputs=alpha_model.layers[-2].output)
alpha_model.compile(optimizer=Adam(learning_rate=1e-4), loss='mse', metrics=[pearson_r])
history = alpha_model.fit(alphal1_train, y_train, batch_size=bs, epochs=epoch, shuffle=True, verbose=2, callbacks=[TestCallback((alphal1_test, y_test))])
saved_hist.append(history)
alpha_model.save("./ab_homo/spectcnn_ab_model_{}_11.h5".format(j))
y_pred = alpha_model.predict(alphal1_test)
y_pred = np.reshape(y_pred, (len(y_pred),))
mse = mean_squared_error(y_test, y_pred)
rmse = sqrt(mse)
pearcorr = sp.stats.pearsonr(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
f = open(filename,"a+")
f.write("Index {:0>2d}: RMSE: {:.4f}, PCC: {:.4f}, MAE: {:.4f}\n".format(11, rmse, pearcorr[0], mae))
f.close()
print("Index {:0>2d}: RMSE: {:.4f}, PCC: {:.4f}, MAE: {:.4f}".format(11, rmse, pearcorr[0], mae))
result[11] = y_pred
### Alpha L2 Laplacian
alpha_model = Model(inputs=input2, outputs=sub_model1(input2))
alpha2_model = Model(inputs=input2, outputs=alpha_model.layers[-2].output)
alpha_model.compile(optimizer=Adam(learning_rate=1e-4), loss='mse', metrics=[pearson_r])
history = alpha_model.fit(alphal2_train, y_train, batch_size=bs, epochs=epoch, shuffle=True, verbose=2, callbacks=[TestCallback((alphal2_test, y_test))])
saved_hist.append(history)
alpha_model.save("./ab_homo/spectcnn_ab_model_{}_12.h5".format(j))
y_pred = alpha_model.predict(alphal2_test)
y_pred = np.reshape(y_pred, (len(y_pred),))
mse = mean_squared_error(y_test, y_pred)
rmse = sqrt(mse)
pearcorr = sp.stats.pearsonr(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
f = open(filename,"a+")
f.write("Index {:0>2d}: RMSE: {:.4f}, PCC: {:.4f}, MAE: {:.4f}\n".format(12, rmse, pearcorr[0], mae))
f.close()
print("Index {:0>2d}: RMSE: {:.4f}, PCC: {:.4f}, MAE: {:.4f}".format(12, rmse, pearcorr[0], mae))
result[12] = y_pred
# Plot All Training Loss
print("spect_ab_model saved")
plt.figure()
for attr_idx in range(13):
plt.plot(saved_hist[attr_idx].history['loss'], linewidth=0.7, label=str(attr_idx))
#plt.plot(history.history['pearson_r'], linewidth=0.7, label='PCC')
plt.legend()
plt.savefig("./ab_homo/spectnettree_ab_loss_{}.png".format(j), dpi=200)
np.save("./ab_homo/ab_data_{}.npy".format(j), data)
for index in range(13):
rmse = np.sqrt(mean_squared_error(y_test,result[index]))
pearsonr = scipy.stats.pearsonr(y_test,result[index])
print('RMSE =', rmse)
print('pearsonr =', pearsonr)
f = open(filename,"a+")
f.write("Iter {:0>2d}, Index {:0>2d}: RMSE: {:.4f} PCC: {:.4f} {}\n\n".format(j, index, rmse, pearsonr[0], pearsonr[1]))
f.close()
rmse_whole.append(rmse)
pearsonr_whole.append(pearsonr[0])
result_whole.append(result)
np.save("./ab_homo/spectcnn_ab_result_whole_{}.npy".format(j), result_whole)
np.save("./ab_homo/spectcnn_ab_rmse_whole_{}.npy".format(j), rmse_whole)
np.save("./ab_homo/spectcnn_ab_pearsonr_whole_{}.npy".format(j),pearsonr_whole) | [
"keras.backend.sum",
"math.sqrt",
"scipy.stats.pearsonr",
"keras.layers.Dense",
"numpy.mean",
"numpy.delete",
"keras.utils.plot_model",
"keras.backend.square",
"numpy.asarray",
"os.path.isdir",
"os.mkdir",
"numpy.concatenate",
"keras.models.Model",
"sklearn.metrics.mean_absolute_error",
... | [((3941, 3983), 'numpy.load', 'np.load', (['"""outliers.npy"""'], {'allow_pickle': '(True)'}), "('outliers.npy', allow_pickle=True)\n", (3948, 3983), True, 'import numpy as np\n'), ((3991, 4033), 'numpy.load', 'np.load', (['"""X_ab_aux.npy"""'], {'allow_pickle': '(True)'}), "('X_ab_aux.npy', allow_pickle=True)\n", (3998, 4033), True, 'import numpy as np\n'), ((4048, 4097), 'numpy.load', 'np.load', (['"""./X_ab_alpha_l1.npy"""'], {'allow_pickle': '(True)'}), "('./X_ab_alpha_l1.npy', allow_pickle=True)\n", (4055, 4097), True, 'import numpy as np\n'), ((4259, 4308), 'numpy.load', 'np.load', (['"""./X_ab_alpha_l2.npy"""'], {'allow_pickle': '(True)'}), "('./X_ab_alpha_l2.npy', allow_pickle=True)\n", (4266, 4308), True, 'import numpy as np\n'), ((4469, 4493), 'numpy.load', 'np.load', (['"""./X_ab_l0.npy"""'], {}), "('./X_ab_l0.npy')\n", (4476, 4493), True, 'import numpy as np\n'), ((4505, 4537), 'numpy.asarray', 'np.asarray', (['spect_X'], {'dtype': 'float'}), '(spect_X, dtype=float)\n', (4515, 4537), True, 'import numpy as np\n'), ((4547, 4568), 'numpy.load', 'np.load', (['"""./Y_ab.npy"""'], {}), "('./Y_ab.npy')\n", (4554, 4568), True, 'import numpy as np\n'), ((4574, 4600), 'numpy.asarray', 'np.asarray', (['Y'], {'dtype': 'float'}), '(Y, dtype=float)\n', (4584, 4600), True, 'import numpy as np\n'), ((4792, 4844), 'numpy.concatenate', 'np.concatenate', (['(X, alpha_l1, alpha_l2, aux)'], {'axis': '(1)'}), '((X, alpha_l1, alpha_l2, aux), axis=1)\n', (4806, 4844), True, 'import numpy as np\n'), ((4852, 4882), 'numpy.delete', 'np.delete', (['X', 'outliers'], {'axis': '(0)'}), '(X, outliers, axis=0)\n', (4861, 4882), True, 'import numpy as np\n'), ((4888, 4910), 'numpy.delete', 'np.delete', (['y', 'outliers'], {}), '(y, outliers)\n', (4897, 4910), True, 'import numpy as np\n'), ((2384, 2401), 'keras.backend.mean', 'K.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2390, 2401), True, 'from keras import backend as K\n'), ((2412, 2429), 'keras.backend.mean', 'K.mean', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (2418, 2429), True, 'from keras import backend as K\n'), ((2472, 2486), 'keras.backend.sum', 'K.sum', (['(xm * ym)'], {}), '(xm * ym)\n', (2477, 2486), True, 'from keras import backend as K\n'), ((2507, 2521), 'keras.backend.sum', 'K.sum', (['(xm * xm)'], {}), '(xm * xm)\n', (2512, 2521), True, 'from keras import backend as K\n'), ((2542, 2556), 'keras.backend.sum', 'K.sum', (['(ym * ym)'], {}), '(ym * ym)\n', (2547, 2556), True, 'from keras import backend as K\n'), ((2570, 2605), 'keras.backend.sqrt', 'K.sqrt', (['(x_square_sum * y_square_sum)'], {}), '(x_square_sum * y_square_sum)\n', (2576, 2605), True, 'from keras import backend as K\n'), ((2641, 2650), 'keras.backend.mean', 'K.mean', (['r'], {}), '(r)\n', (2647, 2650), True, 'from keras import backend as K\n'), ((2684, 2702), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2691, 2702), True, 'import numpy as np\n'), ((2713, 2730), 'numpy.std', 'np.std', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2719, 2730), True, 'import numpy as np\n'), ((3790, 3817), 'os.path.isdir', 'os.path.isdir', (['"""./ab_homo/"""'], {}), "('./ab_homo/')\n", (3803, 3817), False, 'import os\n'), ((3824, 3846), 'os.mkdir', 'os.mkdir', (['"""./ab_homo/"""'], {}), "('./ab_homo/')\n", (3832, 3846), False, 'import os\n'), ((6334, 6355), 'keras.layers.Input', 'Input', ([], {'shape': '(48, 36)'}), '(shape=(48, 36))\n', (6339, 6355), False, 'from keras.layers import Input, Dense, Dropout, Flatten\n'), ((6370, 6391), 'keras.layers.Input', 'Input', ([], {'shape': '(80, 84)'}), '(shape=(80, 84))\n', (6375, 6391), False, 'from keras.layers import Input, Dense, Dropout, Flatten\n'), ((6478, 6535), 'keras.models.Model', 'Model', ([], {'inputs': 'input1', 'outputs': 'cnn_model.layers[-2].output'}), '(inputs=input1, outputs=cnn_model.layers[-2].output)\n', (6483, 6535), False, 'from keras.models import Model, Sequential, load_model\n'), ((6626, 6685), 'keras.models.Model', 'Model', ([], {'inputs': 'input2', 'outputs': 'alpha_model.layers[-2].output'}), '(inputs=input2, outputs=alpha_model.layers[-2].output)\n', (6631, 6685), False, 'from keras.models import Model, Sequential, load_model\n'), ((6693, 6780), 'keras.utils.plot_model', 'plot_model', (['cnn_model'], {'to_file': '"""./ab_homo/cnn_model.png"""', 'show_shapes': '(True)', 'dpi': '(200)'}), "(cnn_model, to_file='./ab_homo/cnn_model.png', show_shapes=True,\n dpi=200)\n", (6703, 6780), False, 'from keras.utils import plot_model\n'), ((6782, 6874), 'keras.utils.plot_model', 'plot_model', (['alpha_model'], {'to_file': '"""./ab_homo/alpha_model.png"""', 'show_shapes': '(True)', 'dpi': '(200)'}), "(alpha_model, to_file='./ab_homo/alpha_model.png', show_shapes=\n True, dpi=200)\n", (6792, 6874), False, 'from keras.utils import plot_model\n'), ((8218, 8277), 'keras.models.Model', 'Model', ([], {'inputs': 'input2', 'outputs': 'alpha_model.layers[-2].output'}), '(inputs=input2, outputs=alpha_model.layers[-2].output)\n', (8223, 8277), False, 'from keras.models import Model, Sequential, load_model\n'), ((8747, 8781), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (8765, 8781), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((8794, 8803), 'math.sqrt', 'sqrt', (['mse'], {}), '(mse)\n', (8798, 8803), False, 'from math import sqrt\n'), ((8820, 8853), 'scipy.stats.pearsonr', 'sp.stats.pearsonr', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (8837, 8853), True, 'import scipy as sp\n'), ((8865, 8900), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (8884, 8900), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((9301, 9360), 'keras.models.Model', 'Model', ([], {'inputs': 'input2', 'outputs': 'alpha_model.layers[-2].output'}), '(inputs=input2, outputs=alpha_model.layers[-2].output)\n', (9306, 9360), False, 'from keras.models import Model, Sequential, load_model\n'), ((9830, 9864), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (9848, 9864), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((9877, 9886), 'math.sqrt', 'sqrt', (['mse'], {}), '(mse)\n', (9881, 9886), False, 'from math import sqrt\n'), ((9903, 9936), 'scipy.stats.pearsonr', 'sp.stats.pearsonr', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (9920, 9936), True, 'import scipy as sp\n'), ((9948, 9983), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (9967, 9983), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((10337, 10349), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10347, 10349), True, 'import matplotlib.pyplot as plt\n'), ((10556, 10568), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10566, 10568), True, 'import matplotlib.pyplot as plt\n'), ((3323, 3335), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (3330, 3335), False, 'from keras.layers import Input, Dense, Dropout, Flatten\n'), ((3602, 3614), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (3609, 3614), False, 'from keras.layers import Input, Dense, Dropout, Flatten\n'), ((3657, 3666), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3664, 3666), False, 'from keras.layers import Input, Dense, Dropout, Flatten\n'), ((3702, 3731), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (3707, 3731), False, 'from keras.layers import Input, Dense, Dropout, Flatten\n'), ((7021, 7078), 'keras.models.Model', 'Model', ([], {'inputs': 'input1', 'outputs': 'cnn_model.layers[-2].output'}), '(inputs=input1, outputs=cnn_model.layers[-2].output)\n', (7026, 7078), False, 'from keras.models import Model, Sequential, load_model\n'), ((7614, 7648), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7632, 7648), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((7665, 7674), 'math.sqrt', 'sqrt', (['mse'], {}), '(mse)\n', (7669, 7674), False, 'from math import sqrt\n'), ((7695, 7728), 'scipy.stats.pearsonr', 'sp.stats.pearsonr', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7712, 7728), True, 'import scipy as sp\n'), ((7744, 7779), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7763, 7779), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((10822, 10865), 'scipy.stats.pearsonr', 'scipy.stats.pearsonr', (['y_test', 'result[index]'], {}), '(y_test, result[index])\n', (10842, 10865), False, 'import scipy\n'), ((2271, 2296), 'keras.backend.square', 'K.square', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (2279, 2296), True, 'from keras import backend as K\n'), ((8313, 8339), 'keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (8317, 8339), False, 'from keras.optimizers import SGD, Adam, RMSprop, Nadam\n'), ((9396, 9422), 'keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (9400, 9422), False, 'from keras.optimizers import SGD, Adam, RMSprop, Nadam\n'), ((10760, 10801), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'result[index]'], {}), '(y_test, result[index])\n', (10778, 10801), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((3123, 3130), 'keras.layers.advanced_activations.PReLU', 'PReLU', ([], {}), '()\n', (3128, 3130), False, 'from keras.layers.advanced_activations import PReLU\n'), ((3231, 3238), 'keras.layers.advanced_activations.PReLU', 'PReLU', ([], {}), '()\n', (3236, 3238), False, 'from keras.layers.advanced_activations import PReLU\n'), ((3393, 3400), 'keras.layers.advanced_activations.PReLU', 'PReLU', ([], {}), '()\n', (3398, 3400), False, 'from keras.layers.advanced_activations import PReLU\n'), ((3510, 3517), 'keras.layers.advanced_activations.PReLU', 'PReLU', ([], {}), '()\n', (3515, 3517), False, 'from keras.layers.advanced_activations import PReLU\n'), ((7116, 7142), 'keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (7120, 7142), False, 'from keras.optimizers import SGD, Adam, RMSprop, Nadam\n')] |
# coding: utf-8
# In[7]:
import numpy as np
from sklearn import cluster
from scipy.cluster.vq import whiten
k = 50
kextra = 10
num_recs = 645
seed = 2
segment_file = open('bird_data/supplemental_data/segment_features.txt',
'r')
##clean
line = segment_file.readline()
line = segment_file.readline()
index = 0
while line != '':
tokens = line.split(',')
nums = map(float, tokens)
nums = nums[2:len(line)] # Omit recid and segid
if index == 0:
segfeatures = nums
else:
segfeatures = np.vstack((segfeatures, nums))
line = segment_file.readline()
index += 1
#Before running k-means, it is beneficial to rescale each feature dimension of the observation set with whitening.
#Each feature is divided by its standard deviation across all observations to give it unit variance.
#From documentation in scikit-learn
segfeatures = whiten(segfeatures)
# In[8]:
segfeatures
# In[14]:
kmeans1 = cluster.KMeans(n_clusters=k, init='k-means++', n_init=k,
max_iter=300, random_state=seed)
kmeans2 = cluster.KMeans(n_clusters=kextra, init='k-means++', n_init=k,
max_iter=300, random_state=seed)
clusters1 = kmeans1.fit_predict(segfeatures)
clusters2 = kmeans2.fit_predict(segfeatures)
segment_file = open('bird_data/supplemental_data/segment_features.txt',
'r')
segment_file.seek(0)
line = segment_file.readline()
line = segment_file.readline()
index = 0
prev_rec_id = -1
hist = np.zeros((num_recs, k + kextra))
while line != '':
while 1:
tokens = line.split(',')
rec_id = int(tokens[0])
if rec_id != prev_rec_id:
prev_rec_id = rec_id
break
hist[rec_id][clusters1[index]] += 1
hist[rec_id][k + clusters2[index]] += 1
line = segment_file.readline()
if line == '':
break
index += 1
segment_file.close()
histfilename = 'hist.txt'
histfile = open(histfilename, 'w')
histfile.write('rec_id,[hist]\n')
for rec_id in range(num_recs):
histfile.write('%d,' % rec_id)
for col in range(k + kextra - 1):
histfile.write('%f,' % hist[rec_id][col])
histfile.write('%f\n' % hist[rec_id][col + 1])
histfile.close()
# In[ ]:
| [
"scipy.cluster.vq.whiten",
"numpy.zeros",
"sklearn.cluster.KMeans",
"numpy.vstack"
] | [((915, 934), 'scipy.cluster.vq.whiten', 'whiten', (['segfeatures'], {}), '(segfeatures)\n', (921, 934), False, 'from scipy.cluster.vq import whiten\n'), ((982, 1075), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': 'k', 'init': '"""k-means++"""', 'n_init': 'k', 'max_iter': '(300)', 'random_state': 'seed'}), "(n_clusters=k, init='k-means++', n_init=k, max_iter=300,\n random_state=seed)\n", (996, 1075), False, 'from sklearn import cluster\n'), ((1107, 1205), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': 'kextra', 'init': '"""k-means++"""', 'n_init': 'k', 'max_iter': '(300)', 'random_state': 'seed'}), "(n_clusters=kextra, init='k-means++', n_init=k, max_iter=300,\n random_state=seed)\n", (1121, 1205), False, 'from sklearn import cluster\n'), ((1527, 1559), 'numpy.zeros', 'np.zeros', (['(num_recs, k + kextra)'], {}), '((num_recs, k + kextra))\n', (1535, 1559), True, 'import numpy as np\n'), ((560, 590), 'numpy.vstack', 'np.vstack', (['(segfeatures, nums)'], {}), '((segfeatures, nums))\n', (569, 590), True, 'import numpy as np\n')] |
import numpy as np
import cmath
from matplotlib import pyplot as plt
def f(x):
return 10/(1+(10*x - 5)**2)
def reverse_bit(n):
return int('{:08b}'.format(n)[::-1], 2)
def fft(f_k):
N = len(f_k)
if N >= 2:
first_half = f_k[0:N//2]
second_half = f_k[N//2:N]
first = fft(first_half)
second = fft(second_half)
z = np.array([np.exp(-1j * 2 * np.pi * k / N)
for k in range(N//2)])
k = z * second
return np.append((first + k), (first - k))
else:
return f_k
n = 2**8
f_j = np.array([reverse_bit(x) for x in np.arange(n)])
f_j = np.array([f(x/n) for x in f_j])
f_hat = np.array(fft(f_j))/np.sqrt(n)
print(f_hat)
real = [x.real for x in f_hat]
imag = [x.imag for x in f_hat]
plt.plot(real, label='Real part')
plt.plot(imag, label='Imaginary part')
plt.legend()
plt.show()
| [
"numpy.sqrt",
"numpy.arange",
"matplotlib.pyplot.plot",
"numpy.append",
"numpy.exp",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((784, 817), 'matplotlib.pyplot.plot', 'plt.plot', (['real'], {'label': '"""Real part"""'}), "(real, label='Real part')\n", (792, 817), True, 'from matplotlib import pyplot as plt\n'), ((818, 856), 'matplotlib.pyplot.plot', 'plt.plot', (['imag'], {'label': '"""Imaginary part"""'}), "(imag, label='Imaginary part')\n", (826, 856), True, 'from matplotlib import pyplot as plt\n'), ((857, 869), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (867, 869), True, 'from matplotlib import pyplot as plt\n'), ((870, 880), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (878, 880), True, 'from matplotlib import pyplot as plt\n'), ((698, 708), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (705, 708), True, 'import numpy as np\n'), ((500, 531), 'numpy.append', 'np.append', (['(first + k)', '(first - k)'], {}), '(first + k, first - k)\n', (509, 531), True, 'import numpy as np\n'), ((617, 629), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (626, 629), True, 'import numpy as np\n'), ((384, 417), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * k / N)'], {}), '(-1.0j * 2 * np.pi * k / N)\n', (390, 417), True, 'import numpy as np\n')] |
import argparse
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
def plot_controller_data(data_file):
data = pd.read_csv(data_file, skiprows=[1])
font = {'family': 'Source Sans Pro', 'size': 12, 'weight': 'light'}
matplotlib.rc('font', **font)
matplotlib.rcParams["axes.titlepad"] = 15
matplotlib.rcParams['figure.dpi'] = 300
colors = plt.cm.viridis(np.linspace(0, 1, 1))
make_plot(data, 1, 'Distance', colors[0], data_file)
make_plot(data, 2, 'Bearing', colors[0], data_file)
def make_plot(data, col_index, label, color, filename):
fig = plt.figure(figsize=(12, 8))
ax = fig.gca()
sim_time = data.iloc[:, 0].tolist()
data_to_plot = data.iloc[:, col_index].tolist()
ax.plot(sim_time, data_to_plot, label=label, color=color)
ax.set_title('Controller ', size=20, fontweight='normal')
ax.set_xlabel('Simulation Time', labelpad=15)
ax.set_ylabel(label+' Value', labelpad=10)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlim(3, sim_time[-1])
ax.set_ylim(min(data_to_plot)-1, max(data_to_plot) + 1)
ax.margins(0)
ax.grid()
ax.legend(loc=8, borderaxespad=1, ncol=3, frameon=False)
out_file = filename[0:filename.rfind('.')] + '_' + label.lower() + '.pdf'
os.makedirs(os.path.dirname(os.path.normpath(out_file)), exist_ok=True)
plt.savefig(out_file, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script for plotting controller inputs (distance and bearing) '
'vs time.')
parser.add_argument('controller_data_file', metavar='controller_data_file', type=str, nargs='?',
default='controller_data.csv',
help='the controller data file')
args = parser.parse_args()
plot_controller_data(args.controller_data_file)
| [
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"argparse.ArgumentParser",
"matplotlib.pyplot.close",
"os.path.normpath",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.rc"
] | [((165, 201), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {'skiprows': '[1]'}), '(data_file, skiprows=[1])\n', (176, 201), True, 'import pandas as pd\n'), ((279, 308), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (292, 308), False, 'import matplotlib\n'), ((631, 658), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (641, 658), True, 'import matplotlib.pyplot as plt\n'), ((1421, 1463), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_file'], {'bbox_inches': '"""tight"""'}), "(out_file, bbox_inches='tight')\n", (1432, 1463), True, 'import matplotlib.pyplot as plt\n'), ((1468, 1479), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1477, 1479), True, 'import matplotlib.pyplot as plt\n'), ((1522, 1635), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script for plotting controller inputs (distance and bearing) vs time."""'}), "(description=\n 'Script for plotting controller inputs (distance and bearing) vs time.')\n", (1545, 1635), False, 'import argparse\n'), ((427, 447), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (438, 447), True, 'import numpy as np\n'), ((1373, 1399), 'os.path.normpath', 'os.path.normpath', (['out_file'], {}), '(out_file)\n', (1389, 1399), False, 'import os\n')] |
# coding: utf-8
# In[2]:
import keras
import scipy as sp
import scipy.misc, scipy.ndimage.interpolation
from medpy import metric
import numpy as np
import os
from keras import losses
import tensorflow as tf
from keras.models import Model
from keras.layers import Input,merge, concatenate, Conv2D, MaxPooling2D, Activation, UpSampling2D,Dropout,Conv2DTranspose,add,multiply,Flatten,Dense
from keras.layers.normalization import BatchNormalization as bn
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.optimizers import RMSprop
from keras import regularizers
from keras import backend as K
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
import numpy as np
import nibabel as nib
CUDA_VISIBLE_DEVICES = [0]
os.environ['CUDA_VISIBLE_DEVICES']=','.join([str(x) for x in CUDA_VISIBLE_DEVICES])
#oasis files 1-457
import h5py
path='/home/bahaa/oasis_mri/OAS1_'
# In[3]:
import numpy as np
import cv2
from keras.models import load_model
model = load_model('basic_dense_net_dsp_round2.h5')
print(model.summary())
import csv
# In[62]:
fields=['id','landmarks']
with open(r'name.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(fields)
import glob
import os
import numpy as np
import csv
import cv2
a=glob.glob('/home/rdey/dsp_final/test/*.jpg')
X_test=[]
print(len(a))
#print(a[0].replace('/home/rdey/dsp_final/train/','').replace('.jpg',''))
for i in range (0,len(a)):
if(i%1000==0):
print(i)
#print(('/home/rdey/dsp_final/train/'+str(a[i].replace('/home/rdey/dsp_final/train/','').strip())))
temp_x=cv2.imread(('/home/rdey/dsp_final/test/'+str(a[i].replace('/home/rdey/dsp_final/test/','').strip())),1)
temp_x=cv2.resize(temp_x,(64,64)).astype('float32')
predicted=model.predict(np.array(temp_x).reshape((1,)+temp_x.shape))
#print(predicted.shape)
max_value=0
max_loc=0
for j in range(0,len(predicted[0])):
if(predicted[0][j]>max_value):
max_value=predicted[0][j]
max_loc=j
fields=[str(a[i].replace('/home/rdey/dsp_final/test/','').strip()).replace('.jpg',''),str(max_loc)+' '+str(max_value)]
with open(r'name.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(fields)
#except:
# print('error',i)
| [
"keras.models.load_model",
"csv.writer",
"numpy.array",
"cv2.resize",
"glob.glob"
] | [((1034, 1077), 'keras.models.load_model', 'load_model', (['"""basic_dense_net_dsp_round2.h5"""'], {}), "('basic_dense_net_dsp_round2.h5')\n", (1044, 1077), False, 'from keras.models import load_model\n'), ((1318, 1362), 'glob.glob', 'glob.glob', (['"""/home/rdey/dsp_final/test/*.jpg"""'], {}), "('/home/rdey/dsp_final/test/*.jpg')\n", (1327, 1362), False, 'import glob\n'), ((1203, 1216), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1213, 1216), False, 'import csv\n'), ((2242, 2255), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2252, 2255), False, 'import csv\n'), ((1765, 1793), 'cv2.resize', 'cv2.resize', (['temp_x', '(64, 64)'], {}), '(temp_x, (64, 64))\n', (1775, 1793), False, 'import cv2\n'), ((1837, 1853), 'numpy.array', 'np.array', (['temp_x'], {}), '(temp_x)\n', (1845, 1853), True, 'import numpy as np\n')] |
# GR2 test from Liv Rev
import numpy
from models import sr_mf
from bcs import outflow
from simulation import simulation
from methods import fvs_method
from rk import rk3
from grid import grid
from matplotlib import pyplot
Ngz = 3
Npoints = 800
L = 0.5
interval = grid([-L, L], Npoints, Ngz)
rhoL = 1
pL = 1
rhoR = 0.125
pR = 0.1
vyL = 0
vyR = 0
vzL = 0
vzR = 0
Bx = 0.5
ByL = 1.0
ByR =-1.0
BzL = 0
BzR = 0
gamma = 4.0 / 3.0
epsL = pL / rhoL / (gamma - 1)
epsR = pR / rhoR / (gamma - 1)
m_p = 1
m_e = 1
rhoL_p = m_p / (m_p + m_e) * rhoL
rhoL_e = m_e / (m_p + m_e) * rhoL
rhoR_p = m_p / (m_p + m_e) * rhoR
rhoR_e = m_e / (m_p + m_e) * rhoR
qL = numpy.array([rhoL_e, 0, 0, 0, epsL, rhoL_p, 0, 0, 0, epsL, Bx , ByL , BzL, 0, 0, 0, 0, 0 ])
qR = numpy.array([rhoR_e, 0, 0, 0, epsR, rhoR_p, 0, 0, 0, epsR, Bx , ByR , BzR, 0, 0, 0, 0, 0 ])
model_mf = sr_mf.sr_mf_gamma_law(initial_data = sr_mf.initial_riemann(qL, qR),
gamma=gamma,
kappa_m = 0.01, kappa_f = 0.01, kappa_q = 0.001)
#sim = simulation(model, interval, fvs_method(2), rk3, outflow, cfl=0.5)
rho_e = 1 + 0.8 * numpy.random.rand(1000)
rho_p = 1 + 0.8 * numpy.random.rand(1000)
eps_e = 1 + 0.8 * numpy.random.rand(1000)
eps_p = 1 + 0.8 * numpy.random.rand(1000)
Bx = 1 + 5 * numpy.random.randn(1000)
By = -1 + 5 * numpy.random.randn(1000)
Bz = 5 * numpy.random.randn(1000)
Ex = 1 + 5 * numpy.random.randn(1000)
Ey = -1 + 5 * numpy.random.randn(1000)
Ez = 5 * numpy.random.randn(1000)
vm_e = 0.3 * numpy.random.randn(1000)
theta_e = 2 * numpy.pi * numpy.random.rand(1000)
phi_e = numpy.pi * numpy.random.randn(1000)
vx_e = vm_e * numpy.cos(theta_e) * numpy.sin(phi_e)
vy_e = vm_e * numpy.sin(theta_e) * numpy.sin(phi_e)
vz_e = vm_e * numpy.cos(phi_e)
vm_p = 0.3 * numpy.random.randn(1000)
theta_p = 2 * numpy.pi * numpy.random.rand(1000)
phi_p = numpy.pi * numpy.random.randn(1000)
vx_p = vm_p * numpy.cos(theta_p) * numpy.sin(phi_p)
vy_p = vm_p * numpy.sin(theta_p) * numpy.sin(phi_p)
vz_p = vm_p * numpy.cos(phi_p)
prim = numpy.vstack( (rho_e, vx_e, vy_e, vz_e, eps_e,
rho_p, vx_p, vy_p, vz_p, eps_p,
Bx, By, Bz, Ex, Ey, Ez, numpy.zeros_like(Bx), numpy.zeros_like(Bx)) )
cons, aux = model_mf.prim2all(prim)
prim_old = prim + 1e-4 * numpy.random.rand(18, 1000)
prim_check, aux_check = model_mf.cons2all(cons, prim_old)
print(numpy.linalg.norm(prim_check - prim))
print(numpy.linalg.norm(aux_check - aux)) | [
"numpy.random.rand",
"grid.grid",
"numpy.zeros_like",
"numpy.array",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"models.sr_mf.initial_riemann",
"numpy.random.randn"
] | [((265, 292), 'grid.grid', 'grid', (['[-L, L]', 'Npoints', 'Ngz'], {}), '([-L, L], Npoints, Ngz)\n', (269, 292), False, 'from grid import grid\n'), ((648, 740), 'numpy.array', 'numpy.array', (['[rhoL_e, 0, 0, 0, epsL, rhoL_p, 0, 0, 0, epsL, Bx, ByL, BzL, 0, 0, 0, 0, 0]'], {}), '([rhoL_e, 0, 0, 0, epsL, rhoL_p, 0, 0, 0, epsL, Bx, ByL, BzL, 0,\n 0, 0, 0, 0])\n', (659, 740), False, 'import numpy\n'), ((745, 837), 'numpy.array', 'numpy.array', (['[rhoR_e, 0, 0, 0, epsR, rhoR_p, 0, 0, 0, epsR, Bx, ByR, BzR, 0, 0, 0, 0, 0]'], {}), '([rhoR_e, 0, 0, 0, epsR, rhoR_p, 0, 0, 0, epsR, Bx, ByR, BzR, 0,\n 0, 0, 0, 0])\n', (756, 837), False, 'import numpy\n'), ((1374, 1398), 'numpy.random.randn', 'numpy.random.randn', (['(1000)'], {}), '(1000)\n', (1392, 1398), False, 'import numpy\n'), ((1485, 1509), 'numpy.random.randn', 'numpy.random.randn', (['(1000)'], {}), '(1000)\n', (1503, 1509), False, 'import numpy\n'), ((1523, 1547), 'numpy.random.randn', 'numpy.random.randn', (['(1000)'], {}), '(1000)\n', (1541, 1547), False, 'import numpy\n'), ((1573, 1596), 'numpy.random.rand', 'numpy.random.rand', (['(1000)'], {}), '(1000)\n', (1590, 1596), False, 'import numpy\n'), ((1616, 1640), 'numpy.random.randn', 'numpy.random.randn', (['(1000)'], {}), '(1000)\n', (1634, 1640), False, 'import numpy\n'), ((1676, 1692), 'numpy.sin', 'numpy.sin', (['phi_e'], {}), '(phi_e)\n', (1685, 1692), False, 'import numpy\n'), ((1728, 1744), 'numpy.sin', 'numpy.sin', (['phi_e'], {}), '(phi_e)\n', (1737, 1744), False, 'import numpy\n'), ((1759, 1775), 'numpy.cos', 'numpy.cos', (['phi_e'], {}), '(phi_e)\n', (1768, 1775), False, 'import numpy\n'), ((1789, 1813), 'numpy.random.randn', 'numpy.random.randn', (['(1000)'], {}), '(1000)\n', (1807, 1813), False, 'import numpy\n'), ((1839, 1862), 'numpy.random.rand', 'numpy.random.rand', (['(1000)'], {}), '(1000)\n', (1856, 1862), False, 'import numpy\n'), ((1882, 1906), 'numpy.random.randn', 'numpy.random.randn', (['(1000)'], {}), '(1000)\n', (1900, 1906), False, 'import numpy\n'), ((1942, 1958), 'numpy.sin', 'numpy.sin', (['phi_p'], {}), '(phi_p)\n', (1951, 1958), False, 'import numpy\n'), ((1994, 2010), 'numpy.sin', 'numpy.sin', (['phi_p'], {}), '(phi_p)\n', (2003, 2010), False, 'import numpy\n'), ((2025, 2041), 'numpy.cos', 'numpy.cos', (['phi_p'], {}), '(phi_p)\n', (2034, 2041), False, 'import numpy\n'), ((2397, 2433), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(prim_check - prim)'], {}), '(prim_check - prim)\n', (2414, 2433), False, 'import numpy\n'), ((2441, 2475), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(aux_check - aux)'], {}), '(aux_check - aux)\n', (2458, 2475), False, 'import numpy\n'), ((886, 915), 'models.sr_mf.initial_riemann', 'sr_mf.initial_riemann', (['qL', 'qR'], {}), '(qL, qR)\n', (907, 915), False, 'from models import sr_mf\n'), ((1138, 1161), 'numpy.random.rand', 'numpy.random.rand', (['(1000)'], {}), '(1000)\n', (1155, 1161), False, 'import numpy\n'), ((1180, 1203), 'numpy.random.rand', 'numpy.random.rand', (['(1000)'], {}), '(1000)\n', (1197, 1203), False, 'import numpy\n'), ((1222, 1245), 'numpy.random.rand', 'numpy.random.rand', (['(1000)'], {}), '(1000)\n', (1239, 1245), False, 'import numpy\n'), ((1264, 1287), 'numpy.random.rand', 'numpy.random.rand', (['(1000)'], {}), '(1000)\n', (1281, 1287), False, 'import numpy\n'), ((1301, 1325), 'numpy.random.randn', 'numpy.random.randn', (['(1000)'], {}), '(1000)\n', (1319, 1325), False, 'import numpy\n'), ((1340, 1364), 'numpy.random.randn', 'numpy.random.randn', (['(1000)'], {}), '(1000)\n', (1358, 1364), False, 'import numpy\n'), ((1412, 1436), 'numpy.random.randn', 'numpy.random.randn', (['(1000)'], {}), '(1000)\n', (1430, 1436), False, 'import numpy\n'), ((1451, 1475), 'numpy.random.randn', 'numpy.random.randn', (['(1000)'], {}), '(1000)\n', (1469, 1475), False, 'import numpy\n'), ((1655, 1673), 'numpy.cos', 'numpy.cos', (['theta_e'], {}), '(theta_e)\n', (1664, 1673), False, 'import numpy\n'), ((1707, 1725), 'numpy.sin', 'numpy.sin', (['theta_e'], {}), '(theta_e)\n', (1716, 1725), False, 'import numpy\n'), ((1921, 1939), 'numpy.cos', 'numpy.cos', (['theta_p'], {}), '(theta_p)\n', (1930, 1939), False, 'import numpy\n'), ((1973, 1991), 'numpy.sin', 'numpy.sin', (['theta_p'], {}), '(theta_p)\n', (1982, 1991), False, 'import numpy\n'), ((2198, 2218), 'numpy.zeros_like', 'numpy.zeros_like', (['Bx'], {}), '(Bx)\n', (2214, 2218), False, 'import numpy\n'), ((2220, 2240), 'numpy.zeros_like', 'numpy.zeros_like', (['Bx'], {}), '(Bx)\n', (2236, 2240), False, 'import numpy\n'), ((2305, 2332), 'numpy.random.rand', 'numpy.random.rand', (['(18)', '(1000)'], {}), '(18, 1000)\n', (2322, 2332), False, 'import numpy\n')] |
# Compartments are created here.
# NOT USED YET
import tkinter as tk
from matplotlib import pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib
from tkinter import messagebox
import test
import numpy as np
class CreateCompartmentWindow():
def __init__(self, master,app=None):
super(CreateCompartmentWindow, self).__init__()
if __name__ == '__main__':
base_canvas_dim = [1000, 720]
self.canvas_origo = [50, base_canvas_dim[1] - 50]
self.grid = test.get_grid(origo=self.canvas_origo,base_canvas_dim=base_canvas_dim)
self.parent_dimensions = base_canvas_dim
self.to_draw = test.get_to_draw()
else:
self.app = app
self.grid = app._main_grid
self.parent_dimensions = app._canvas_dim
self.to_draw = app._pending_grid_draw
self.parent_origo = app._canvas_origo
frame_dim = (1500,980)
self.canvas_origo = (50,720-50)
self.canvas_dim = (1000,720)
self.frame = master
self.frame.wm_title("Load properties")
self.frame.geometry(str(frame_dim[0])+'x'+str(frame_dim[1]))
self.frame.grab_set()
self.points_child = {}
self.child_dimensions = (self.parent_dimensions[0]-self.parent_dimensions[0]+1, self.parent_dimensions[1]+1)
for line,point in self.to_draw.items():
point1 = (int(point[0][0]),int(point[0][1]))
point2 = (int(point[1][0]),int(point[1][1]))
self.points_child[line] = [point1,point2]
for line, points in self.points_child.items():
for point in self.grid.get_points_along_line(points[0],points[1]):
self.grid.set_barrier(point[0],point[1])
fig = plt.figure()
self.draw_grid()
self.canvas_plt = FigureCanvasTkAgg(fig,self.frame)
self.canvas_plt.show()
self.canvas_plt.get_tk_widget().place(relx=0.5,rely=0.5)
#self.draw_grid()
tk.Button(self.frame,text='DRAW',command=self.draw_grid).place(relx=0.1,rely=0.1)
def __str__(self):
return 'class CreateCompartmentWindow(): Compartment string not implemented'
def draw_grid(self):
'''
Drawing grid
EMPTY = yellow
FULL = red
:return:
'''
# TODO make a better plot of the tanks
def discrete_matshow(data):
# get discrete colormap
cmap = plt.get_cmap('RdBu', np.max(data) - np.min(data) + 1)
# set limits .5 outside true range
mat = plt.matshow(data, cmap=cmap, vmin=np.min(data) - .5, vmax=np.max(data) + .5)
# tell the colorbar to tick at integers
cax = plt.colorbar(mat, ticks=np.arange(np.min(data), np.max(data) + 1))
# # generate data
# a = np.random.randint(1, 20, size=(10, 10))
discrete_matshow(self.grid.get_matrix())
plt.suptitle('Tanks defined by numbers from 2 and up.')
return plt
#plt.show()
def search_dfs(self):
'''
Depth first search method.
:return:
'''
start = (0,0)
stack = make_stack.Stack()
stack.push_item(start)
while len(stack) != 0:
cell = stack.pop_item()
if self.grid.is_empty(cell[0], cell[1]):
self.grid.set_full(cell[0], cell[1])
for item in self.grid.four_neighbors(cell[0], cell[1]):
stack.push_item(item)
def search_bfs(self):
'''
Bredth first search method.
Searcing evry 20th pixel for empty places in the grid. When a empty cell is found, the search starts.
The search ends when no more empty cells are found in the boudnary regions (circular expansion of search).
USE GRID CONVENSION HERE. NOT POINTS.
grid(row,col) is same as grid(y,x)
points uses
point(x , y) is same as grid(col,row)
:return:
'''
compartment_count = 1
cells = 0
el_max = ''
el_min = ''
compartments = {}
for startrow in range(0, self.child_dimensions[1], 20):
for startcol in range(0, self.child_dimensions[0], 20):
if self.grid.is_empty(startrow,startcol):
el_max = ''
el_min = ''
cells = 0
boundary = deque()
boundary.append((startrow,startcol))
corners = []
while len(boundary) != 0:
current_cell = boundary.pop()
#find the min/max elevation, counting cells in tank
if el_max == '':
el_max = current_cell[0]
el_min = current_cell[0]
else:
if current_cell[0] < el_max:
el_max = current_cell[0]
if current_cell[0] > el_min:
el_min = current_cell[0]
cells += 1
neighbors = self.grid.eight_neighbors(current_cell[0], current_cell[1])
#doing serach operations and looking for corners
no_of_barriers = 0
for neighbor in neighbors[0:4]:
if self.grid.get_value(neighbor[0], neighbor[1]) == -1: no_of_barriers += 1
else: pass
if self.grid.is_empty(neighbor[0], neighbor[1]):
self.grid.set_value(neighbor[0], neighbor[1],compartment_count)
boundary.append(neighbor)
#finding corners on diagonal cells
for neighbor in neighbors[4:]:
if self.grid.get_value(neighbor[0], neighbor[1]) == -1: no_of_barriers += 1
else: pass
if no_of_barriers > 4:
corners.append((neighbor[0], neighbor[1]))
# returning values to the program
compartments[compartment_count] = cells, corners
compartment_count += 1
return compartments
if __name__ == '__main__':
root = tk.Tk()
my_app = CreateCompartmentWindow(master=root)
root.mainloop() | [
"test.get_to_draw",
"test.get_grid",
"tkinter.Button",
"numpy.max",
"matplotlib.pyplot.figure",
"tkinter.Tk",
"numpy.min",
"matplotlib.pyplot.suptitle",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
] | [((6467, 6474), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (6472, 6474), True, 'import tkinter as tk\n'), ((1844, 1856), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1854, 1856), True, 'from matplotlib import pyplot as plt\n'), ((1908, 1942), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig', 'self.frame'], {}), '(fig, self.frame)\n', (1925, 1942), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((3003, 3058), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Tanks defined by numbers from 2 and up."""'], {}), "('Tanks defined by numbers from 2 and up.')\n", (3015, 3058), True, 'from matplotlib import pyplot as plt\n'), ((584, 655), 'test.get_grid', 'test.get_grid', ([], {'origo': 'self.canvas_origo', 'base_canvas_dim': 'base_canvas_dim'}), '(origo=self.canvas_origo, base_canvas_dim=base_canvas_dim)\n', (597, 655), False, 'import test\n'), ((735, 753), 'test.get_to_draw', 'test.get_to_draw', ([], {}), '()\n', (751, 753), False, 'import test\n'), ((2074, 2132), 'tkinter.Button', 'tk.Button', (['self.frame'], {'text': '"""DRAW"""', 'command': 'self.draw_grid'}), "(self.frame, text='DRAW', command=self.draw_grid)\n", (2083, 2132), True, 'import tkinter as tk\n'), ((2554, 2566), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (2560, 2566), True, 'import numpy as np\n'), ((2569, 2581), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (2575, 2581), True, 'import numpy as np\n'), ((2686, 2698), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (2692, 2698), True, 'import numpy as np\n'), ((2710, 2722), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (2716, 2722), True, 'import numpy as np\n'), ((2833, 2845), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (2839, 2845), True, 'import numpy as np\n'), ((2847, 2859), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (2853, 2859), True, 'import numpy as np\n')] |
# MIT License
#
# Copyright (c) 2017-2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import argparse
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import joblib
try:
import urllib
urllib_HTTP_Error = urllib.error.HTTPError
except AttributeError:
import urllib2
urllib_HTTP_Error = urllib2.HTTPError
from .version import __version__ # noqa F401
from tindetheus import config
from tindetheus import export_embeddings
from tindetheus import tindetheus_align
from tindetheus.tinder_client import client
import tindetheus.facenet_clone.facenet as facenet
import tindetheus.image_processing as imgproc
import tindetheus.machine_learning as ml
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def catch_http_error(e, args, retries, facebook_token, x_auth_token):
print(e)
print("Closing session.")
retries -= 1
if retries > 0:
print("Let's try again. \n Retries left:", retries)
main(args, facebook_token, x_auth_token=x_auth_token,
retries=retries)
else:
print("Out of retries. Exiting.")
def main(args, facebook_token, x_auth_token=None, retries=20):
# There are three function choices: browse, build, like
# browse: review new tinder profiles and store them in your database
# train: use machine learning to create a new model that likes and dislikes
# profiles based on your historical preference
# like: use your machine leanring model to like new tinder profiles
if args.function == 'browse':
try:
my_sess = client(facebook_token, args.distance, args.model_dir,
likes_left=args.likes, x_auth_token=x_auth_token)
my_sess.browse()
except urllib_HTTP_Error as e:
catch_http_error(e, args, retries, facebook_token, x_auth_token)
elif args.function == 'train':
# align the database
tindetheus_align.main()
# export the embeddings from the aligned database
export_embeddings.main(model_dir=args.model_dir,
image_batch=args.image_batch)
# calculate the n average embedding per profiles
X, y = ml.calc_avg_emb()
# fit and save a logistic regression model to the database
ml.fit_log_reg(X, y)
elif args.function == 'validate':
print('\n\nAttempting to validate the dataset...\n\n')
valdir = 'validation'
# align the validation dataset
tindetheus_align.main(input_dir=valdir,
output_dir=valdir+'_aligned')
# export embeddings
# y is the image list, X is the embedding_array
image_list, emb_array = export_embeddings.main(model_dir=args.model_dir, # noqa E501
data_dir=valdir+'_aligned', # noqa E501
image_batch=args.image_batch, # noqa E501
embeddings_name='val_embeddings.npy', # noqa E501
labels_name='val_labels.npy', # noqa E501
labels_strings_name='val_label_strings.npy', # noqa E501
return_image_list=True) # noqa E501
# print(image_list)
# convert the image list to a numpy array to take advantage of
# numpy array slicing
image_list = np.array(image_list)
print('\n\nEvaluating trained model\n \n')
model = joblib.load('log_reg_model.pkl')
yhat = model.predict(emb_array)
# print(yhat)
# 0 should be dislike, and 1 should be like
# if this is backwards, there is probably a bug...
dislikes = yhat == 0
likes = yhat == 1
imgproc.show_images(image_list[dislikes], holdon=True, title='Dislike')
print('\n\nGenerating plots...\n\n')
plt.title('Dislike')
imgproc.show_images(image_list[likes], holdon=True, title='Like')
plt.title('Like')
cols = ['Image name', 'Model prediction (0=Dislike, 1=Like)']
results = np.array((image_list, yhat)).T
print('\n\nSaving results to validation.csv\n\n')
my_results_DF = pd.DataFrame(results, columns=cols)
my_results_DF.to_csv('validation.csv')
plt.show()
elif args.function == 'like':
try:
print('... Loading the facenet model ...')
print('... be patient this may take some time ...')
with tf.Graph().as_default():
with tf.Session() as sess:
# pass the tf session into client object
my_sess = client(facebook_token, args.distance,
args.model_dir, likes_left=args.likes,
tfsess=sess, x_auth_token=x_auth_token)
# Load the facenet model
facenet.load_model(my_sess.model_dir)
print('Facenet model loaded successfully!!!')
# automatically like users
my_sess.like()
except urllib_HTTP_Error as e:
catch_http_error(e, args, retries, facebook_token, x_auth_token)
elif args.function == 'like_folder':
print('Copying al_database profiles into either al/like or al/dislike')
# make folders
if not os.path.exists('al'):
os.makedirs('al')
if not os.path.exists('al/like'):
os.makedirs('al/like')
if not os.path.exists('al/dislike'):
os.makedirs('al/dislike')
# load the auto like database
al_data = np.load('al_database.npy', allow_pickle=True)
# copy profile images to either al/like or al/dislike
for user in al_data:
imgproc.al_copy_images(user[8], user[0], user[-1])
else:
text = '''You must specify a function. Your choices are either
tindetheus browse
tindetheus train
tindetheus like
tindetheus validate'''
print(text)
def parse_arguments(argv, defaults):
help_text = '''There are four function choices: browse, train, like, or validate.
\n
1) tindetheus browse
-- Let's you browse tinder profiles to add to your database.
-- Browses tinder profiles in your distance until you run out.
-- Asks if you'd like to increase the distance by 5 miles.
-- Use to build a database of the tinder profiles you look at.
\n
2) tindetheus train
-- Trains a model to your Tinder database.
-- Uses facenet implementation for facial detection and classification.
-- Saves logistic regression model to classify which faces you like and
-- dislike.
\n
3) tindetheus like
-- Automatically like and dislike Tinder profiles based on your historical
-- preference. First run browse, then run train, then prosper with like.
-- Uses the trained model to automatically like and dislike profiles.
-- Profiles where a face isn't detected are automatically disliked.
\n
4) tindetheus validate
-- This validate functions applies your personally trained tinder model on
-- an external set of images. Place images you'd like to run tindetheus on
-- withing a folder within the validation directory. See README for more
-- details. The results are saved in validation.csv.
\n
5) tindetheus like_folder
-- Creates al/like and al/dislike folders based on the profiles you have
-- automatically liked. This copies the profile images from al_database
-- into al/like or al/disliked based on whether the model liked or
-- disliked the profile.
\n
You can now store all default optional parameters in your environment
variables! This means you can set your starting distance, number of likes, and
image_batch size without manually specifying the options each time. You can
set local environment variables for a local folder using a .env file. This is
an example .env file:
FACEBOOK_AUTH_TOKEN="TODO" # your facebook token hash
# alternatively you can use the XAuthToken
# TINDER_AUTH_TOKEN="TODO"
TINDETHEUS_MODEL_DIR="/models/20170512-110547"
# the location of your facenet model directory
# see https://github.com/davidsandberg/facenet#pre-trained-models for other
# pretrained facenet models
TINDETHEUS_IMAGE_BATCH=1000 # number of images to load in a batch during train
# the larger the image_batch size, the faster the training process, at the
# cost of additional memory. A 4GB machine may struggle with 1000 images.
TINDETHEUS_DISTANCE=5 # Set the starting distance in miles
TINDETHEUS_LIKES=100 # set the number of likes you want to use
# note that free Tinder users only get 100 likes in 24 hours
TINDETHEUS_RETRIES=20
\n
Optional arguments will overide config.txt settings.
'''
parser = argparse.ArgumentParser(description=help_text,
formatter_class=argparse.RawDescriptionHelpFormatter) # noqa: E501
parser.add_argument('function', type=str,
help='browse, train, like, validate, or like_folder')
parser.add_argument('--distance', type=int,
help='Set the starting distance in miles.'
'Tindetheus will crawl in 5 mile increments from here'
'. Default=5.', default=defaults['distance'])
parser.add_argument('--image_batch', type=int,
help='The number of images to load in the facenet'
' model at one time. This only affects the train '
'functionality. A larger number will be faster at the'
' cost for larger memory. Default=1000.',
default=defaults['image_batch'])
parser.add_argument('--model_dir', type=str, help='Location of your '
'pretrained facenet model. Default="20170512-110547"',
default=defaults['model_dir'])
parser.add_argument('--likes', type=int, help='Set the number of likes to '
'use. Note that free Tinder users only get 100 likes '
'in 24 hour period', default=defaults['likes'])
parser.add_argument('--version', action='version', version=__version__)
return parser.parse_args(argv)
def command_line_run():
# settings to look for
defaults = {'facebook_token': config.FACEBOOK_AUTH_TOKEN,
'XAuthToken': config.TINDER_AUTH_TOKEN,
'model_dir': config.TINDETHEUS_MODEL_DIR,
'image_batch': config.TINDETHEUS_IMAGE_BATCH,
'distance': config.TINDETHEUS_DISTANCE,
'likes': config.TINDETHEUS_LIKES,
'retries': config.TINDETHEUS_RETRIES}
# parse the supplied arguments
args = parse_arguments(sys.argv[1:], defaults)
if defaults['facebook_token'] is None and defaults['XAuthToken'] is None:
raise('ERROR: No facebook or tinder token has been set.'
'You must supply an auth token in order to use tindetheus!')
# run the main function with parsed arguments
main(args, defaults['facebook_token'], defaults['XAuthToken'],
retries=defaults['retries'])
if __name__ == '__main__':
command_line_run()
| [
"tindetheus.machine_learning.calc_avg_emb",
"tindetheus.image_processing.show_images",
"numpy.array",
"tindetheus.facenet_clone.facenet.load_model",
"os.path.exists",
"tensorflow.Graph",
"argparse.ArgumentParser",
"tensorflow.Session",
"tindetheus.image_processing.al_copy_images",
"tindetheus.tind... | [((9981, 10086), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'help_text', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=help_text, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (10004, 10086), False, 'import argparse\n'), ((2763, 2870), 'tindetheus.tinder_client.client', 'client', (['facebook_token', 'args.distance', 'args.model_dir'], {'likes_left': 'args.likes', 'x_auth_token': 'x_auth_token'}), '(facebook_token, args.distance, args.model_dir, likes_left=args.likes,\n x_auth_token=x_auth_token)\n', (2769, 2870), False, 'from tindetheus.tinder_client import client\n'), ((3113, 3136), 'tindetheus.tindetheus_align.main', 'tindetheus_align.main', ([], {}), '()\n', (3134, 3136), False, 'from tindetheus import tindetheus_align\n'), ((3203, 3281), 'tindetheus.export_embeddings.main', 'export_embeddings.main', ([], {'model_dir': 'args.model_dir', 'image_batch': 'args.image_batch'}), '(model_dir=args.model_dir, image_batch=args.image_batch)\n', (3225, 3281), False, 'from tindetheus import export_embeddings\n'), ((3385, 3402), 'tindetheus.machine_learning.calc_avg_emb', 'ml.calc_avg_emb', ([], {}), '()\n', (3400, 3402), True, 'import tindetheus.machine_learning as ml\n'), ((3478, 3498), 'tindetheus.machine_learning.fit_log_reg', 'ml.fit_log_reg', (['X', 'y'], {}), '(X, y)\n', (3492, 3498), True, 'import tindetheus.machine_learning as ml\n'), ((3678, 3749), 'tindetheus.tindetheus_align.main', 'tindetheus_align.main', ([], {'input_dir': 'valdir', 'output_dir': "(valdir + '_aligned')"}), "(input_dir=valdir, output_dir=valdir + '_aligned')\n", (3699, 3749), False, 'from tindetheus import tindetheus_align\n'), ((3894, 4153), 'tindetheus.export_embeddings.main', 'export_embeddings.main', ([], {'model_dir': 'args.model_dir', 'data_dir': "(valdir + '_aligned')", 'image_batch': 'args.image_batch', 'embeddings_name': '"""val_embeddings.npy"""', 'labels_name': '"""val_labels.npy"""', 'labels_strings_name': '"""val_label_strings.npy"""', 'return_image_list': '(True)'}), "(model_dir=args.model_dir, data_dir=valdir +\n '_aligned', image_batch=args.image_batch, embeddings_name=\n 'val_embeddings.npy', labels_name='val_labels.npy', labels_strings_name\n ='val_label_strings.npy', return_image_list=True)\n", (3916, 4153), False, 'from tindetheus import export_embeddings\n'), ((4709, 4729), 'numpy.array', 'np.array', (['image_list'], {}), '(image_list)\n', (4717, 4729), True, 'import numpy as np\n'), ((4797, 4829), 'joblib.load', 'joblib.load', (['"""log_reg_model.pkl"""'], {}), "('log_reg_model.pkl')\n", (4808, 4829), False, 'import joblib\n'), ((5066, 5137), 'tindetheus.image_processing.show_images', 'imgproc.show_images', (['image_list[dislikes]'], {'holdon': '(True)', 'title': '"""Dislike"""'}), "(image_list[dislikes], holdon=True, title='Dislike')\n", (5085, 5137), True, 'import tindetheus.image_processing as imgproc\n'), ((5191, 5211), 'matplotlib.pyplot.title', 'plt.title', (['"""Dislike"""'], {}), "('Dislike')\n", (5200, 5211), True, 'import matplotlib.pyplot as plt\n'), ((5221, 5286), 'tindetheus.image_processing.show_images', 'imgproc.show_images', (['image_list[likes]'], {'holdon': '(True)', 'title': '"""Like"""'}), "(image_list[likes], holdon=True, title='Like')\n", (5240, 5286), True, 'import tindetheus.image_processing as imgproc\n'), ((5295, 5312), 'matplotlib.pyplot.title', 'plt.title', (['"""Like"""'], {}), "('Like')\n", (5304, 5312), True, 'import matplotlib.pyplot as plt\n'), ((5515, 5550), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {'columns': 'cols'}), '(results, columns=cols)\n', (5527, 5550), True, 'import pandas as pd\n'), ((5607, 5617), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5615, 5617), True, 'import matplotlib.pyplot as plt\n'), ((5402, 5430), 'numpy.array', 'np.array', (['(image_list, yhat)'], {}), '((image_list, yhat))\n', (5410, 5430), True, 'import numpy as np\n'), ((6948, 6993), 'numpy.load', 'np.load', (['"""al_database.npy"""'], {'allow_pickle': '(True)'}), "('al_database.npy', allow_pickle=True)\n", (6955, 6993), True, 'import numpy as np\n'), ((6679, 6699), 'os.path.exists', 'os.path.exists', (['"""al"""'], {}), "('al')\n", (6693, 6699), False, 'import os\n'), ((6713, 6730), 'os.makedirs', 'os.makedirs', (['"""al"""'], {}), "('al')\n", (6724, 6730), False, 'import os\n'), ((6746, 6771), 'os.path.exists', 'os.path.exists', (['"""al/like"""'], {}), "('al/like')\n", (6760, 6771), False, 'import os\n'), ((6785, 6807), 'os.makedirs', 'os.makedirs', (['"""al/like"""'], {}), "('al/like')\n", (6796, 6807), False, 'import os\n'), ((6823, 6851), 'os.path.exists', 'os.path.exists', (['"""al/dislike"""'], {}), "('al/dislike')\n", (6837, 6851), False, 'import os\n'), ((6865, 6890), 'os.makedirs', 'os.makedirs', (['"""al/dislike"""'], {}), "('al/dislike')\n", (6876, 6890), False, 'import os\n'), ((7098, 7148), 'tindetheus.image_processing.al_copy_images', 'imgproc.al_copy_images', (['user[8]', 'user[0]', 'user[-1]'], {}), '(user[8], user[0], user[-1])\n', (7120, 7148), True, 'import tindetheus.image_processing as imgproc\n'), ((5848, 5860), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5858, 5860), True, 'import tensorflow as tf\n'), ((5961, 6081), 'tindetheus.tinder_client.client', 'client', (['facebook_token', 'args.distance', 'args.model_dir'], {'likes_left': 'args.likes', 'tfsess': 'sess', 'x_auth_token': 'x_auth_token'}), '(facebook_token, args.distance, args.model_dir, likes_left=args.likes,\n tfsess=sess, x_auth_token=x_auth_token)\n', (5967, 6081), False, 'from tindetheus.tinder_client import client\n'), ((6217, 6254), 'tindetheus.facenet_clone.facenet.load_model', 'facenet.load_model', (['my_sess.model_dir'], {}), '(my_sess.model_dir)\n', (6235, 6254), True, 'import tindetheus.facenet_clone.facenet as facenet\n'), ((5802, 5812), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5810, 5812), True, 'import tensorflow as tf\n')] |
import sys
import csv
import MeCab
import numpy as np
class CalcSim_MeCab:
def __init__(self):
pass
def cos_sim(self, x, y):
val = np.sqrt(np.sum(x**2)) * np.sqrt(np.sum(y**2))
return np.dot(x, y) / val if val != 0 else 0
def WordFrequencyCount(self, word, wordFreq_dict):
if word in wordFreq_dict:
wordFreq_dict[word] +=1
else:
wordFreq_dict.setdefault(word, 1)
return wordFreq_dict
"""
@fn calcTranslationSimilarity_normal()
@brief calculate a similarity between two sentences
@param original_translation first sentence for reference (string)
@param other_translations second, third,... sentences for similarity calculation (string array)
@retval similarity array (float array)
@details "Wakati"で分けた形態素をそのまま使用して類似度を計算
@warning
@note https://www.creativevillage.ne.jp/84849
"""
def calcTranslationSimilarity_normal(self, original_translation, other_translations):
sentence_list = []
sentence_wakati_list = []
# words separation with "wakati"
# wakati = MeCab.Tagger('-Owakati')
wakati = MeCab.Tagger(r'-O wakati -d D:\\MeCab\\dic\\ipadic')
sentence_list.append(original_translation)
for i in other_translations:
if i != "":
sentence_list.append(i)
sentence_wakati_list = [wakati.parse(i).split() for i in sentence_list]
# print(sentence_wakati_list)
# create Bag of Words table
word_to_index = {}
index_to_word = {}
for s in sentence_wakati_list:
for w in s:
if w not in word_to_index:
new_index = len(word_to_index)
word_to_index[w] = new_index
index_to_word[new_index] = w
corpus = np.zeros((len(sentence_wakati_list), len(word_to_index)))
for i, s in enumerate(sentence_wakati_list):
for w in s:
corpus[i, word_to_index[w]] = 1
sentence_wakati_list_2 = sentence_wakati_list
sentence_wakati_list_2.pop(0)
# calculate sentence similarity
similarity_score = []
for i, v in enumerate(sentence_wakati_list_2):
per = self.cos_sim(corpus[0], corpus[i + 1])
print(str(v) + ": " + str(per))
similarity_score.append(per)
return similarity_score
"""
@fn calcTranslationSimilarity_important()
@brief calculate a similarity between two sentences
@param original_translation first sentence for reference (string)
@param other_translations second, third,... sentences for similarity calculation (string array)
@retval similarity array (float array)
@details "名詞"や"動詞"などの重要な形態素のみを抽出して類似度を計算
@warning
@note https://your-3d.com/pytho-mecab-frequencywords/
"""
def calcTranslationSimilarity_important(self, original_translation, other_translations):
sentence_list = []
sentence_wakati_list = []
wakati = MeCab.Tagger(r'-O chasen -d D:\\MeCab\\dic\\ipadic')
sentence_list.append(original_translation)
for i in other_translations:
if i != "":
sentence_list.append(i)
wordFreq_dict = {}
for i in sentence_list:
node = wakati.parseToNode(i)
temp_list = []
while node:
if node.feature.split(",")[0] == "名詞" or node.feature.split(",")[0] == "動詞" or node.feature.split(",")[0] == "形容詞" or node.feature.split(",")[0] == "形容動詞":
word = node.surface
# print(word)
self.WordFrequencyCount(word, wordFreq_dict)
temp_list.append(word)
node = node.next
sentence_wakati_list.append(temp_list)
# print(sentence_wakati_list)
# create Bag of Words table
word_to_index = {}
index_to_word = {}
for s in sentence_wakati_list:
for w in s:
if w not in word_to_index:
new_index = len(word_to_index)
word_to_index[w] = new_index
index_to_word[new_index] = w
corpus = np.zeros((len(sentence_wakati_list), len(word_to_index)))
for i, s in enumerate(sentence_wakati_list):
for w in s:
corpus[i, word_to_index[w]] = 1
sentence_wakati_list_2 = sentence_wakati_list
sentence_wakati_list_2.pop(0)
similarity_score = []
for i, v in enumerate(sentence_wakati_list_2):
per = self.cos_sim(corpus[0], corpus[i + 1])
print(str(v) + ": " + str(per))
similarity_score.append(per)
return similarity_score
if __name__ == '__main__':
args = sys.argv
# 引数チェック
if len(args) != 3:
print('使い方が間違っています。引数の個数: ' + str(len(args)))
print('usage: python <*.py> <input_filename> <output_filename>')
print('yours: ')
for i in range(len(args)):
print('args[' + i + ']= ' + str(args[i]))
exit()
# 必要なファイルを開く
try:
f_in = open(args[1], mode='r')
f_out = open(args[2], mode='w')
except FileNotFoundError as err:
print("ファイルが存在しないため、読み込めませんでした。")
exit()
except Exception as other:
print("ファイルが読み込めませんでした。")
exit()
print("Input File: " + args[1])
reader = csv.reader(f_in, delimiter=",", doublequote=True, lineterminator="\r\n", quotechar='"', skipinitialspace=True)
# object for calculating sentence similarity
o_mecab = CalcSim_MeCab() # MeCab Lib
count = 0
for line in reader:
original_translation = line.pop(0)
f_out.write(original_translation + ",")
other_translations = line
for ot in other_translations:
f_out.write(ot + ",")
if count == 0: # CSVのヘッダー定義
f_out.write("\n")
else: # 文章の類似度計算
####################################################
# similarity_score = o_mecab.calcTranslationSimilarity_normal(original_translation, other_translations)
similarity_score = o_mecab.calcTranslationSimilarity_important(original_translation, other_translations)
####################################################
for i in similarity_score:
f_out.write(str(i) + ",")
f_out.write("\n")
print('===========================\n')
count += 1
f_in.close()
f_out.close() | [
"MeCab.Tagger",
"numpy.sum",
"numpy.dot",
"csv.reader"
] | [((5678, 5792), 'csv.reader', 'csv.reader', (['f_in'], {'delimiter': '""","""', 'doublequote': '(True)', 'lineterminator': "'\\r\\n'", 'quotechar': '"""\\""""', 'skipinitialspace': '(True)'}), '(f_in, delimiter=\',\', doublequote=True, lineterminator=\'\\r\\n\',\n quotechar=\'"\', skipinitialspace=True)\n', (5688, 5792), False, 'import csv\n'), ((1204, 1261), 'MeCab.Tagger', 'MeCab.Tagger', (['"""-O wakati -d D:\\\\\\\\MeCab\\\\\\\\dic\\\\\\\\ipadic"""'], {}), "('-O wakati -d D:\\\\\\\\MeCab\\\\\\\\dic\\\\\\\\ipadic')\n", (1216, 1261), False, 'import MeCab\n'), ((3154, 3211), 'MeCab.Tagger', 'MeCab.Tagger', (['"""-O chasen -d D:\\\\\\\\MeCab\\\\\\\\dic\\\\\\\\ipadic"""'], {}), "('-O chasen -d D:\\\\\\\\MeCab\\\\\\\\dic\\\\\\\\ipadic')\n", (3166, 3211), False, 'import MeCab\n'), ((179, 193), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (185, 193), True, 'import numpy as np\n'), ((203, 217), 'numpy.sum', 'np.sum', (['(y ** 2)'], {}), '(y ** 2)\n', (209, 217), True, 'import numpy as np\n'), ((233, 245), 'numpy.dot', 'np.dot', (['x', 'y'], {}), '(x, y)\n', (239, 245), True, 'import numpy as np\n')] |
import h5py
import numpy as np
def load_stdata(fname):
f = h5py.File(fname, 'r')
data = f['data'].value
timestamps = f['date'].value
f.close()
return data, timestamps
data,timestamps = load_stdata('NYC14_M16x8_T60_NewEnd.h5')
# print(data,timestamps)
data = np.ndarray.tolist(data)
timestamps = np.ndarray.tolist(timestamps)
# print(timestamps)
for i in range(16):
print(data[1][0][i]) | [
"numpy.ndarray.tolist",
"h5py.File"
] | [((279, 302), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['data'], {}), '(data)\n', (296, 302), True, 'import numpy as np\n'), ((316, 345), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['timestamps'], {}), '(timestamps)\n', (333, 345), True, 'import numpy as np\n'), ((63, 84), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (72, 84), False, 'import h5py\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.